From 0fb12a0fd78f818dd03e98b41509d8bb4af59f37 Mon Sep 17 00:00:00 2001 From: Venkata Charan Sunku Date: Thu, 12 Feb 2026 00:41:58 +0530 Subject: [PATCH 1/2] OCPBUGS-76586: terraform/provider/google: updated google provider to 6.10.0 using openshift fork Fixing the google provider inconsistencies present with the service account creation --- terraform/providers/google/go.mod | 98 +- terraform/providers/google/go.sum | 208 +- .../google/vendor/cel.dev/expr/.bazelversion | 2 + .../google/vendor/cel.dev/expr/.gitattributes | 2 + .../google/vendor/cel.dev/expr/.gitignore | 1 + .../google/vendor/cel.dev/expr/BUILD.bazel | 3 + .../vendor/cel.dev/expr/CODE_OF_CONDUCT.md | 25 + .../vendor/cel.dev/expr/CONTRIBUTING.md | 32 + .../google/vendor/cel.dev/expr/GOVERNANCE.md | 43 + .../google/vendor/cel.dev/expr/LICENSE | 202 + .../google/vendor/cel.dev/expr/MAINTAINERS.md | 13 + .../google/vendor/cel.dev/expr/README.md | 65 + .../google/vendor/cel.dev/expr/WORKSPACE | 145 + .../google/vendor/cel.dev/expr/checked.pb.go | 1432 ++ .../vendor/cel.dev/expr/cloudbuild.yaml | 9 + .../google/vendor/cel.dev/expr/eval.pb.go | 490 + .../google/vendor/cel.dev/expr/explain.pb.go | 236 + .../vendor/cel.dev/expr/regen_go_proto.sh | 9 + .../expr/regen_go_proto_canonical_protos.sh | 10 + .../google/vendor/cel.dev/expr/syntax.pb.go | 1633 ++ .../google/vendor/cel.dev/expr/value.pb.go | 653 + .../.release-please-manifest-individual.json | 28 +- .../.release-please-manifest-submodules.json | 294 +- .../go/.release-please-manifest.json | 2 +- .../vendor/cloud.google.com/go/CHANGES.md | 14 + .../vendor/cloud.google.com/go/README.md | 16 +- .../cloud.google.com/go/auth/CHANGES.md | 150 + .../vendor/cloud.google.com/go/auth/README.md | 42 +- .../vendor/cloud.google.com/go/auth/auth.go | 181 +- .../go/auth/credentials/compute.go | 7 +- .../go/auth/credentials/detect.go | 26 +- .../go/auth/credentials/filetypes.go | 18 +- .../go/auth/credentials/idtoken/cache.go | 10 +- .../go/auth/credentials/idtoken/compute.go | 6 +- .../go/auth/credentials/idtoken/idtoken.go | 10 +- .../go/auth/credentials/idtoken/validate.go | 4 +- .../auth/credentials/impersonate/idtoken.go | 11 +- .../credentials/impersonate/impersonate.go | 12 +- .../go/auth/credentials/impersonate/user.go | 22 +- .../internal/externalaccount/aws_provider.go | 97 +- .../externalaccount/externalaccount.go | 44 +- .../internal/externalaccount/url_provider.go | 17 +- .../internal/externalaccount/x509_provider.go | 63 + .../go/auth/credentials/internal/gdch/gdch.go | 10 +- .../internal/impersonate/impersonate.go | 7 +- .../internal/stsexchange/sts_exchange.go | 8 +- .../go/auth/credentials/selfsignedjwt.go | 4 + .../go/auth/grpctransport/directpath.go | 13 +- .../go/auth/grpctransport/grpctransport.go | 149 +- .../go/auth/httptransport/httptransport.go | 11 + .../go/auth/httptransport/transport.go | 61 +- .../go/auth/internal/compute/compute.go | 66 + .../go/auth/internal/compute/manufacturer.go | 22 + .../internal/compute/manufacturer_linux.go | 23 + .../internal/compute/manufacturer_windows.go | 46 + .../go/auth/internal/credsfile/filetype.go | 30 +- .../go/auth/internal/internal.go | 48 +- .../go/auth/internal/transport/cba.go | 96 +- .../internal/transport/cert/default_cert.go | 9 +- .../transport/cert/secureconnect_cert.go | 10 +- .../internal/transport/cert/workload_cert.go | 2 +- .../go/auth/internal/transport/s2a.go | 100 +- .../go/auth/internal/transport/transport.go | 33 +- .../go/auth/oauth2adapt/CHANGES.md | 14 + .../cloud.google.com/go/auth/threelegged.go | 21 +- .../cloud.google.com/go/bigtable/CHANGES.md | 126 + .../cloud.google.com/go/bigtable/admin.go | 75 +- .../adminpb}/bigtable_instance_admin.pb.go | 188 +- .../apiv2/adminpb}/bigtable_table_admin.pb.go | 346 +- .../admin/apiv2/adminpb}/common.pb.go | 35 +- .../admin/apiv2/adminpb}/instance.pb.go | 421 +- .../bigtable/admin/apiv2/adminpb}/table.pb.go | 405 +- .../bigtable/admin/apiv2/adminpb/types.pb.go | 2225 ++ .../cloud.google.com/go/bigtable/aliasshim.go | 24 + .../bigtable/apiv2/bigtablepb}/bigtable.pb.go | 1225 +- .../go/bigtable/apiv2/bigtablepb}/data.pb.go | 1973 +- .../apiv2/bigtablepb}/feature_flags.pb.go | 32 +- .../apiv2/bigtablepb}/request_stats.pb.go | 40 +- .../apiv2/bigtablepb}/response_params.pb.go | 34 +- .../go/bigtable/apiv2/bigtablepb/types.pb.go | 2211 ++ .../cloud.google.com/go/bigtable/bigtable.go | 396 +- .../go/bigtable/conformance_test.sh | 8 +- .../cloud.google.com/go/bigtable/filter.go | 2 +- .../vendor/cloud.google.com/go/bigtable/gc.go | 2 +- .../go/bigtable/internal/option/option.go | 23 - .../go/bigtable/internal/version.go | 2 +- .../cloud.google.com/go/bigtable/metrics.go | 417 + .../bigtable/metrics_monitoring_exporter.go | 354 + .../go/bigtable/metrics_util.go | 99 + .../cloud.google.com/go/bigtable/reader.go | 2 +- .../cloud.google.com/go/bigtable/type.go | 223 +- .../go/compute/metadata/CHANGES.md | 33 + .../go/compute/metadata/metadata.go | 384 +- .../go/compute/metadata/retry_linux.go | 9 +- .../go/compute/metadata/syscheck.go | 26 + .../go/compute/metadata/syscheck_linux.go | 28 + .../go/compute/metadata/syscheck_windows.go | 38 + .../google/vendor/cloud.google.com/go/doc.go | 9 +- .../google/vendor/cloud.google.com/go/go.work | 6 +- .../vendor/cloud.google.com/go/go.work.sum | 16 +- .../vendor/cloud.google.com/go/iam/CHANGES.md | 49 + .../go/iam/apiv1/iampb/iam_policy.pb.go | 14 +- .../go/iam/apiv1/iampb/options.pb.go | 8 +- .../go/iam/apiv1/iampb/policy.pb.go | 20 +- .../go/longrunning/CHANGES.md | 49 + .../go/longrunning/autogen/auxiliary_go123.go | 32 + .../autogen/longrunningpb/operations.pb.go | 24 +- .../longrunning/autogen/operations_client.go | 14 +- .../cloud.google.com/go/monitoring/LICENSE | 202 + .../apiv3/v2/alert_policy_client.go | 399 + .../go/monitoring/apiv3/v2/auxiliary.go | 682 + .../go/monitoring/apiv3/v2/auxiliary_go123.go | 112 + .../go/monitoring/apiv3/v2/doc.go | 124 + .../monitoring/apiv3/v2/gapic_metadata.json | 336 + .../go/monitoring/apiv3/v2/group_client.go | 466 + .../go/monitoring/apiv3/v2/metric_client.go | 578 + .../apiv3/v2/monitoringpb/alert.pb.go | 2432 ++ .../apiv3/v2/monitoringpb/alert_service.pb.go | 1045 + .../apiv3/v2/monitoringpb/common.pb.go | 1165 + .../v2/monitoringpb/dropped_labels.pb.go | 197 + .../apiv3/v2/monitoringpb/group.pb.go | 265 + .../apiv3/v2/monitoringpb/group_service.pb.go | 1319 ++ .../apiv3/v2/monitoringpb/metric.pb.go | 1194 + .../v2/monitoringpb/metric_service.pb.go | 2502 ++ .../v2/monitoringpb/mutation_record.pb.go | 192 + .../apiv3/v2/monitoringpb/notification.pb.go | 647 + .../monitoringpb/notification_service.pb.go | 2002 ++ .../apiv3/v2/monitoringpb/query_service.pb.go | 212 + .../apiv3/v2/monitoringpb/service.pb.go | 3107 +++ .../v2/monitoringpb/service_service.pb.go | 1796 ++ .../apiv3/v2/monitoringpb/snooze.pb.go | 315 + .../v2/monitoringpb/snooze_service.pb.go | 867 + .../apiv3/v2/monitoringpb/span_context.pb.go | 188 + .../apiv3/v2/monitoringpb/uptime.pb.go | 2726 +++ .../v2/monitoringpb/uptime_service.pb.go | 1226 + .../apiv3/v2/notification_channel_client.go | 618 + .../go/monitoring/apiv3/v2/query_client.go | 233 + .../apiv3/v2/service_monitoring_client.go | 565 + .../go/monitoring/apiv3/v2/snooze_client.go | 343 + .../apiv3/v2/uptime_check_client.go | 450 + .../go/monitoring/apiv3/v2/version.go | 23 + .../go/monitoring/internal/version.go | 18 + ...elease-please-config-yoshi-submodules.json | 6 + .../google/assuredworkloads/workload.go | 81 +- .../google/assuredworkloads/workload.yaml | 47 +- .../assuredworkloads/workload_internal.go | 385 + .../assuredworkloads/workload_schema.go | 40 +- .../assuredworkloads/workload_yaml_embed.go | 6 +- .../google/clouddeploy/delivery_pipeline.go | 6 + .../google/clouddeploy/delivery_pipeline.yaml | 14 + .../clouddeploy/delivery_pipeline_internal.go | 38 + .../clouddeploy/delivery_pipeline_schema.go | 10 + .../delivery_pipeline_yaml_embed.go | 6 +- .../services/google/clouddeploy/target.go | 3 + .../services/google/clouddeploy/target.yaml | 5 + .../google/clouddeploy/target_internal.go | 19 + .../google/clouddeploy/target_schema.go | 5 + .../google/clouddeploy/target_yaml_embed.go | 6 +- .../google/compute/instance_template.yaml | 2 +- .../services/google/containeraws/node_pool.go | 84 + .../google/containeraws/node_pool.yaml | 38 + .../google/containeraws/node_pool_internal.go | 422 + .../google/containeraws/node_pool_schema.go | 42 + .../containeraws/node_pool_yaml_embed.go | 6 +- .../google/gkehub/feature_membership.go | 33 + .../google/gkehub/feature_membership.yaml | 29 +- .../gkehub/feature_membership_internal.go | 87 + .../gkehub/feature_membership_schema.go | 21 +- .../gkehub/feature_membership_yaml_embed.go | 6 +- .../google/networkconnectivity/client.go | 32 - .../google/networkconnectivity/hub.go | 450 - .../google/networkconnectivity/hub.yaml | 155 - .../networkconnectivity/hub_internal.go | 1113 - .../google/networkconnectivity/hub_schema.go | 203 - .../networkconnectivity/hub_yaml_embed.go | 23 - .../google/networkconnectivity/spoke.go | 661 - .../google/networkconnectivity/spoke.yaml | 303 - .../networkconnectivity/spoke_internal.go | 2607 --- .../networkconnectivity/spoke_schema.go | 391 - .../networkconnectivity/spoke_yaml_embed.go | 23 - .../go-crypto/openpgp/packet/signature.go | 7 - .../ProtonMail/go-crypto/openpgp/read.go | 45 +- .../github.com/cespare/xxhash/v2/README.md | 2 + .../github.com/cespare/xxhash/v2/xxhash.go | 29 +- .../cespare/xxhash/v2/xxhash_asm.go | 2 +- .../cespare/xxhash/v2/xxhash_other.go | 2 +- .../cespare/xxhash/v2/xxhash_safe.go | 2 +- .../cespare/xxhash/v2/xxhash_unsafe.go | 2 +- .../xds/go/udpa/annotations/migrate.pb.go | 4 +- .../xds/go/udpa/annotations/security.pb.go | 4 +- .../xds/go/udpa/annotations/sensitive.pb.go | 4 +- .../cncf/xds/go/udpa/annotations/status.pb.go | 4 +- .../xds/go/udpa/annotations/versioning.pb.go | 4 +- .../xds/go/udpa/type/v1/typed_struct.pb.go | 16 +- .../xds/go/xds/annotations/v3/migrate.pb.go | 4 +- .../xds/go/xds/annotations/v3/security.pb.go | 4 +- .../xds/go/xds/annotations/v3/sensitive.pb.go | 4 +- .../xds/go/xds/annotations/v3/status.pb.go | 4 +- .../go/xds/annotations/v3/versioning.pb.go | 4 +- .../cncf/xds/go/xds/core/v3/authority.pb.go | 4 +- .../cncf/xds/go/xds/core/v3/cidr.pb.go | 16 +- .../xds/go/xds/core/v3/collection_entry.pb.go | 16 +- .../xds/go/xds/core/v3/context_params.pb.go | 4 +- .../cncf/xds/go/xds/core/v3/extension.pb.go | 14 +- .../cncf/xds/go/xds/core/v3/resource.pb.go | 12 +- .../xds/go/xds/core/v3/resource_locator.pb.go | 4 +- .../xds/go/xds/core/v3/resource_name.pb.go | 4 +- .../xds/data/orca/v3/orca_load_report.pb.go | 4 +- .../xds/go/xds/service/orca/v3/orca.pb.go | 125 +- .../go/xds/service/orca/v3/orca_grpc.pb.go | 135 + .../cncf/xds/go/xds/type/matcher/v3/cel.pb.go | 42 +- .../xds/go/xds/type/matcher/v3/domain.pb.go | 4 +- .../go/xds/type/matcher/v3/http_inputs.pb.go | 27 +- .../cncf/xds/go/xds/type/matcher/v3/ip.pb.go | 4 +- .../xds/go/xds/type/matcher/v3/matcher.pb.go | 270 +- .../xds/go/xds/type/matcher/v3/range.pb.go | 4 +- .../xds/go/xds/type/matcher/v3/regex.pb.go | 4 +- .../xds/go/xds/type/matcher/v3/string.pb.go | 4 +- .../cncf/xds/go/xds/type/v3/cel.pb.go | 142 +- .../xds/go/xds/type/v3/cel.pb.validate.go | 71 +- .../cncf/xds/go/xds/type/v3/range.pb.go | 4 +- .../xds/go/xds/type/v3/typed_struct.pb.go | 16 +- .../envoy/admin/v3/certs.pb.go | 22 +- .../envoy/admin/v3/certs.pb.validate.go | 1 + .../envoy/admin/v3/certs_vtproto.pb.go | 504 + .../envoy/admin/v3/clusters.pb.go | 2 +- .../envoy/admin/v3/clusters.pb.validate.go | 1 + .../envoy/admin/v3/clusters_vtproto.pb.go | 656 + .../envoy/admin/v3/config_dump.pb.go | 34 +- .../envoy/admin/v3/config_dump.pb.validate.go | 1 + .../envoy/admin/v3/config_dump_shared.pb.go | 106 +- .../v3/config_dump_shared.pb.validate.go | 1 + .../admin/v3/config_dump_shared_vtproto.pb.go | 1715 ++ .../envoy/admin/v3/config_dump_vtproto.pb.go | 466 + .../envoy/admin/v3/init_dump.pb.go | 2 +- .../envoy/admin/v3/init_dump.pb.validate.go | 1 + .../envoy/admin/v3/init_dump_vtproto.pb.go | 149 + .../envoy/admin/v3/listeners.pb.go | 2 +- .../envoy/admin/v3/listeners.pb.validate.go | 1 + .../envoy/admin/v3/listeners_vtproto.pb.go | 203 + .../envoy/admin/v3/memory.pb.go | 2 +- .../envoy/admin/v3/memory.pb.validate.go | 1 + .../envoy/admin/v3/memory_vtproto.pb.go | 110 + .../envoy/admin/v3/metrics.pb.go | 2 +- .../envoy/admin/v3/metrics.pb.validate.go | 1 + .../envoy/admin/v3/metrics_vtproto.pb.go | 89 + .../envoy/admin/v3/mutex_stats.pb.go | 2 +- .../envoy/admin/v3/mutex_stats.pb.validate.go | 1 + .../envoy/admin/v3/mutex_stats_vtproto.pb.go | 86 + .../envoy/admin/v3/server_info.pb.go | 295 +- .../envoy/admin/v3/server_info.pb.validate.go | 5 + .../envoy/admin/v3/server_info_vtproto.pb.go | 671 + .../go-control-plane/envoy/admin/v3/tap.pb.go | 2 +- .../envoy/admin/v3/tap.pb.validate.go | 1 + .../envoy/admin/v3/tap_vtproto.pb.go | 106 + .../envoy/annotations/deprecation.pb.go | 2 +- .../annotations/deprecation.pb.validate.go | 1 + .../envoy/annotations/resource.pb.go | 2 +- .../envoy/annotations/resource.pb.validate.go | 1 + .../envoy/annotations/resource_vtproto.pb.go | 73 + .../envoy/config/accesslog/v3/accesslog.pb.go | 200 +- .../accesslog/v3/accesslog.pb.validate.go | 4 +- .../accesslog/v3/accesslog_vtproto.pb.go | 1751 ++ .../envoy/config/bootstrap/v3/bootstrap.pb.go | 1061 +- .../bootstrap/v3/bootstrap.pb.validate.go | 163 + .../bootstrap/v3/bootstrap_vtproto.pb.go | 3128 +++ .../config/cluster/v3/circuit_breaker.pb.go | 30 +- .../cluster/v3/circuit_breaker.pb.validate.go | 1 + .../cluster/v3/circuit_breaker_vtproto.pb.go | 337 + .../envoy/config/cluster/v3/cluster.pb.go | 1510 +- .../config/cluster/v3/cluster.pb.validate.go | 156 + .../config/cluster/v3/cluster_vtproto.pb.go | 3439 +++ .../envoy/config/cluster/v3/filter.pb.go | 10 +- .../config/cluster/v3/filter.pb.validate.go | 1 + .../config/cluster/v3/filter_vtproto.pb.go | 121 + .../config/cluster/v3/outlier_detection.pb.go | 468 +- .../v3/outlier_detection.pb.validate.go | 64 + .../v3/outlier_detection_vtproto.pb.go | 456 + .../config/common/matcher/v3/matcher.pb.go | 2 +- .../common/matcher/v3/matcher.pb.validate.go | 1 + .../common/matcher/v3/matcher_vtproto.pb.go | 2035 ++ .../envoy/config/core/v3/address.pb.go | 52 +- .../config/core/v3/address.pb.validate.go | 1 + .../config/core/v3/address_vtproto.pb.go | 859 + .../envoy/config/core/v3/backoff.pb.go | 16 +- .../config/core/v3/backoff.pb.validate.go | 1 + .../config/core/v3/backoff_vtproto.pb.go | 91 + .../envoy/config/core/v3/base.pb.go | 1353 +- .../envoy/config/core/v3/base.pb.validate.go | 1242 +- .../envoy/config/core/v3/base_vtproto.pb.go | 2497 ++ .../envoy/config/core/v3/config_source.pb.go | 75 +- .../core/v3/config_source.pb.validate.go | 1 + .../core/v3/config_source_vtproto.pb.go | 831 + .../config/core/v3/event_service_config.pb.go | 2 +- .../v3/event_service_config.pb.validate.go | 1 + .../v3/event_service_config_vtproto.pb.go | 110 + .../envoy/config/core/v3/extension.pb.go | 10 +- .../config/core/v3/extension.pb.validate.go | 1 + .../config/core/v3/extension_vtproto.pb.go | 88 + .../config/core/v3/grpc_method_list.pb.go | 2 +- .../core/v3/grpc_method_list.pb.validate.go | 1 + .../core/v3/grpc_method_list_vtproto.pb.go | 149 + .../envoy/config/core/v3/grpc_service.pb.go | 642 +- .../core/v3/grpc_service.pb.validate.go | 61 + .../config/core/v3/grpc_service_vtproto.pb.go | 1648 ++ .../envoy/config/core/v3/health_check.pb.go | 702 +- .../core/v3/health_check.pb.validate.go | 40 +- .../config/core/v3/health_check_vtproto.pb.go | 1384 ++ .../envoy/config/core/v3/http_service.pb.go | 2 +- .../core/v3/http_service.pb.validate.go | 1 + .../config/core/v3/http_service_vtproto.pb.go | 94 + .../envoy/config/core/v3/http_uri.pb.go | 12 +- .../config/core/v3/http_uri.pb.validate.go | 1 + .../config/core/v3/http_uri_vtproto.pb.go | 123 + .../envoy/config/core/v3/protocol.pb.go | 968 +- .../config/core/v3/protocol.pb.validate.go | 36 + .../config/core/v3/protocol_vtproto.pb.go | 1718 ++ .../envoy/config/core/v3/proxy_protocol.pb.go | 2 +- .../core/v3/proxy_protocol.pb.validate.go | 1 + .../core/v3/proxy_protocol_vtproto.pb.go | 162 + .../envoy/config/core/v3/resolver.pb.go | 2 +- .../config/core/v3/resolver.pb.validate.go | 1 + .../config/core/v3/resolver_vtproto.pb.go | 163 + .../envoy/config/core/v3/socket_option.pb.go | 285 +- .../core/v3/socket_option.pb.validate.go | 398 + .../core/v3/socket_option_vtproto.pb.go | 391 + .../core/v3/substitution_format_string.pb.go | 10 +- .../substitution_format_string.pb.validate.go | 1 + .../substitution_format_string_vtproto.pb.go | 298 + .../config/core/v3/udp_socket_config.pb.go | 18 +- .../core/v3/udp_socket_config.pb.validate.go | 1 + .../core/v3/udp_socket_config_vtproto.pb.go | 91 + .../envoy/config/endpoint/v3/endpoint.pb.go | 24 +- .../endpoint/v3/endpoint.pb.validate.go | 1 + .../endpoint/v3/endpoint_components.pb.go | 161 +- .../v3/endpoint_components.pb.validate.go | 30 + .../v3/endpoint_components_vtproto.pb.go | 896 + .../config/endpoint/v3/endpoint_vtproto.pb.go | 331 + .../config/endpoint/v3/load_report.pb.go | 552 +- .../endpoint/v3/load_report.pb.validate.go | 201 + .../endpoint/v3/load_report_vtproto.pb.go | 696 + .../config/listener/v3/api_listener.pb.go | 10 +- .../listener/v3/api_listener.pb.validate.go | 1 + .../listener/v3/api_listener_vtproto.pb.go | 77 + .../envoy/config/listener/v3/listener.pb.go | 179 +- .../listener/v3/listener.pb.validate.go | 3 + .../listener/v3/listener_components.pb.go | 44 +- .../v3/listener_components.pb.validate.go | 1 + .../v3/listener_components_vtproto.pb.go | 1213 + .../config/listener/v3/listener_vtproto.pb.go | 1297 ++ .../config/listener/v3/quic_config.pb.go | 122 +- .../listener/v3/quic_config.pb.validate.go | 59 + .../listener/v3/quic_config_vtproto.pb.go | 345 + .../listener/v3/udp_listener_config.pb.go | 2 +- .../v3/udp_listener_config.pb.validate.go | 1 + .../v3/udp_listener_config_vtproto.pb.go | 184 + .../config/metrics/v3/metrics_service.pb.go | 11 +- .../metrics/v3/metrics_service.pb.validate.go | 1 + .../metrics/v3/metrics_service_vtproto.pb.go | 139 + .../envoy/config/metrics/v3/stats.pb.go | 24 +- .../config/metrics/v3/stats.pb.validate.go | 1 + .../config/metrics/v3/stats_vtproto.pb.go | 976 + .../envoy/config/overload/v3/overload.pb.go | 26 +- .../overload/v3/overload.pb.validate.go | 1 + .../config/overload/v3/overload_vtproto.pb.go | 938 + .../envoy/config/rbac/v3/rbac.pb.go | 265 +- .../envoy/config/rbac/v3/rbac.pb.validate.go | 43 + .../envoy/config/rbac/v3/rbac_vtproto.pb.go | 2103 ++ .../envoy/config/route/v3/route.pb.go | 37 +- .../config/route/v3/route.pb.validate.go | 1 + .../config/route/v3/route_components.pb.go | 2321 +- .../route/v3/route_components.pb.validate.go | 32 + .../route/v3/route_components_vtproto.pb.go | 8302 +++++++ .../envoy/config/route/v3/route_vtproto.pb.go | 474 + .../envoy/config/route/v3/scoped_route.pb.go | 2 +- .../route/v3/scoped_route.pb.validate.go | 1 + .../route/v3/scoped_route_vtproto.pb.go | 263 + .../envoy/config/tap/v3/common.pb.go | 22 +- .../envoy/config/tap/v3/common.pb.validate.go | 1 + .../envoy/config/tap/v3/common_vtproto.pb.go | 1591 ++ .../envoy/config/trace/v3/datadog.pb.go | 169 +- .../config/trace/v3/datadog.pb.validate.go | 161 + .../config/trace/v3/datadog_vtproto.pb.go | 167 + .../envoy/config/trace/v3/dynamic_ot.pb.go | 61 +- .../config/trace/v3/dynamic_ot.pb.validate.go | 1 + .../config/trace/v3/dynamic_ot_vtproto.pb.go | 88 + .../envoy/config/trace/v3/http_tracer.pb.go | 10 +- .../trace/v3/http_tracer.pb.validate.go | 1 + .../config/trace/v3/http_tracer_vtproto.pb.go | 178 + .../envoy/config/trace/v3/lightstep.pb.go | 2 +- .../config/trace/v3/lightstep.pb.validate.go | 1 + .../config/trace/v3/lightstep_vtproto.pb.go | 145 + .../envoy/config/trace/v3/opencensus.pb.go | 175 +- .../config/trace/v3/opencensus.pb.validate.go | 1 + .../config/trace/v3/opencensus_vtproto.pb.go | 311 + .../envoy/config/trace/v3/opentelemetry.pb.go | 2 +- .../trace/v3/opentelemetry.pb.validate.go | 1 + .../trace/v3/opentelemetry_vtproto.pb.go | 206 + .../envoy/config/trace/v3/service.pb.go | 2 +- .../config/trace/v3/service.pb.validate.go | 1 + .../config/trace/v3/service_vtproto.pb.go | 95 + .../envoy/config/trace/v3/skywalking.pb.go | 16 +- .../config/trace/v3/skywalking.pb.validate.go | 1 + .../config/trace/v3/skywalking_vtproto.pb.go | 224 + .../envoy/config/trace/v3/trace.pb.go | 2 +- .../config/trace/v3/trace.pb.validate.go | 1 + .../envoy/config/trace/v3/xray.pb.go | 10 +- .../envoy/config/trace/v3/xray.pb.validate.go | 1 + .../envoy/config/trace/v3/xray_vtproto.pb.go | 221 + .../envoy/config/trace/v3/zipkin.pb.go | 71 +- .../config/trace/v3/zipkin.pb.validate.go | 1 + .../config/trace/v3/zipkin_vtproto.pb.go | 144 + .../envoy/data/accesslog/v3/accesslog.pb.go | 233 +- .../accesslog/v3/accesslog.pb.validate.go | 5 + .../data/accesslog/v3/accesslog_vtproto.pb.go | 2040 ++ .../clusters/aggregate/v3/cluster.pb.go | 2 +- .../aggregate/v3/cluster.pb.validate.go | 1 + .../aggregate/v3/cluster_vtproto.pb.go | 77 + .../filters/common/fault/v3/fault.pb.go | 10 +- .../common/fault/v3/fault.pb.validate.go | 1 + .../common/fault/v3/fault_vtproto.pb.go | 491 + .../filters/http/fault/v3/fault.pb.go | 18 +- .../http/fault/v3/fault.pb.validate.go | 1 + .../filters/http/fault/v3/fault_vtproto.pb.go | 546 + .../filters/http/rbac/v3/rbac.pb.go | 122 +- .../filters/http/rbac/v3/rbac.pb.validate.go | 5 + .../filters/http/rbac/v3/rbac_vtproto.pb.go | 283 + .../filters/http/router/v3/router.pb.go | 18 +- .../http/router/v3/router.pb.validate.go | 1 + .../http/router/v3/router_vtproto.pb.go | 302 + .../v3/http_connection_manager.pb.go | 693 +- .../v3/http_connection_manager.pb.validate.go | 5 + .../v3/http_connection_manager_vtproto.pb.go | 3470 +++ .../v3/client_side_weighted_round_robin.pb.go | 36 +- ...t_side_weighted_round_robin.pb.validate.go | 1 + ...nt_side_weighted_round_robin_vtproto.pb.go | 148 + .../common/v3/common.pb.go | 24 +- .../common/v3/common.pb.validate.go | 1 + .../common/v3/common_vtproto.pb.go | 492 + .../least_request/v3/least_request.pb.go | 248 +- .../v3/least_request.pb.validate.go | 12 + .../v3/least_request_vtproto.pb.go | 196 + .../pick_first/v3/pick_first.pb.go | 2 +- .../pick_first/v3/pick_first.pb.validate.go | 1 + .../pick_first/v3/pick_first_vtproto.pb.go | 74 + .../ring_hash/v3/ring_hash.pb.go | 133 +- .../ring_hash/v3/ring_hash.pb.validate.go | 5 +- .../ring_hash/v3/ring_hash_vtproto.pb.go | 191 + .../wrr_locality/v3/wrr_locality.pb.go | 2 +- .../v3/wrr_locality.pb.validate.go | 1 + .../v3/wrr_locality_vtproto.pb.go | 95 + .../rbac/audit_loggers/stream/v3/stream.pb.go | 2 +- .../stream/v3/stream.pb.validate.go | 1 + .../stream/v3/stream_vtproto.pb.go | 61 + .../transport_sockets/tls/v3/cert.pb.go | 2 +- .../tls/v3/cert.pb.validate.go | 1 + .../transport_sockets/tls/v3/common.pb.go | 417 +- .../tls/v3/common.pb.validate.go | 139 + .../tls/v3/common_vtproto.pb.go | 1155 + .../transport_sockets/tls/v3/secret.pb.go | 2 +- .../tls/v3/secret.pb.validate.go | 1 + .../tls/v3/secret_vtproto.pb.go | 415 + .../transport_sockets/tls/v3/tls.pb.go | 565 +- .../tls/v3/tls.pb.validate.go | 32 + .../tls/v3/tls_spiffe_validator_config.pb.go | 2 +- ...tls_spiffe_validator_config.pb.validate.go | 1 + .../tls_spiffe_validator_config_vtproto.pb.go | 167 + .../tls/v3/tls_vtproto.pb.go | 1265 + .../envoy/service/discovery/v3/ads.pb.go | 188 +- .../service/discovery/v3/ads.pb.validate.go | 1 + .../envoy/service/discovery/v3/ads_grpc.pb.go | 210 + .../service/discovery/v3/ads_vtproto.pb.go | 61 + .../service/discovery/v3/discovery.pb.go | 22 +- .../discovery/v3/discovery.pb.validate.go | 1 + .../discovery/v3/discovery_vtproto.pb.go | 1546 ++ .../envoy/service/load_stats/v3/lrs.pb.go | 192 +- .../service/load_stats/v3/lrs.pb.validate.go | 1 + .../service/load_stats/v3/lrs_grpc.pb.go | 197 + .../service/load_stats/v3/lrs_vtproto.pb.go | 230 + .../envoy/service/status/v3/csds.pb.go | 348 +- .../service/status/v3/csds.pb.validate.go | 3 + .../envoy/service/status/v3/csds_grpc.pb.go | 177 + .../service/status/v3/csds_vtproto.pb.go | 866 + .../envoy/type/http/v3/cookie.pb.go | 14 +- .../envoy/type/http/v3/cookie.pb.validate.go | 1 + .../envoy/type/http/v3/cookie_vtproto.pb.go | 99 + .../type/http/v3/path_transformation.pb.go | 2 +- .../v3/path_transformation.pb.validate.go | 1 + .../http/v3/path_transformation_vtproto.pb.go | 300 + .../envoy/type/matcher/v3/filter_state.pb.go | 2 +- .../matcher/v3/filter_state.pb.validate.go | 1 + .../matcher/v3/filter_state_vtproto.pb.go | 121 + .../envoy/type/matcher/v3/http_inputs.pb.go | 2 +- .../matcher/v3/http_inputs.pb.validate.go | 1 + .../type/matcher/v3/http_inputs_vtproto.pb.go | 289 + .../envoy/type/matcher/v3/metadata.pb.go | 2 +- .../type/matcher/v3/metadata.pb.validate.go | 1 + .../type/matcher/v3/metadata_vtproto.pb.go | 195 + .../envoy/type/matcher/v3/node.pb.go | 2 +- .../envoy/type/matcher/v3/node.pb.validate.go | 1 + .../envoy/type/matcher/v3/node_vtproto.pb.go | 94 + .../envoy/type/matcher/v3/number.pb.go | 2 +- .../type/matcher/v3/number.pb.validate.go | 1 + .../type/matcher/v3/number_vtproto.pb.go | 160 + .../envoy/type/matcher/v3/path.pb.go | 2 +- .../envoy/type/matcher/v3/path.pb.validate.go | 1 + .../envoy/type/matcher/v3/path_vtproto.pb.go | 110 + .../envoy/type/matcher/v3/regex.pb.go | 10 +- .../type/matcher/v3/regex.pb.validate.go | 1 + .../envoy/type/matcher/v3/regex_vtproto.pb.go | 246 + .../type/matcher/v3/status_code_input.pb.go | 2 +- .../v3/status_code_input.pb.validate.go | 1 + .../v3/status_code_input_vtproto.pb.go | 104 + .../envoy/type/matcher/v3/string.pb.go | 138 +- .../type/matcher/v3/string.pb.validate.go | 43 + .../type/matcher/v3/string_vtproto.pb.go | 370 + .../envoy/type/matcher/v3/struct.pb.go | 2 +- .../type/matcher/v3/struct.pb.validate.go | 1 + .../type/matcher/v3/struct_vtproto.pb.go | 171 + .../envoy/type/matcher/v3/value.pb.go | 2 +- .../type/matcher/v3/value.pb.validate.go | 1 + .../envoy/type/matcher/v3/value_vtproto.pb.go | 545 + .../envoy/type/metadata/v3/metadata.pb.go | 2 +- .../type/metadata/v3/metadata.pb.validate.go | 1 + .../type/metadata/v3/metadata_vtproto.pb.go | 563 + .../envoy/type/tracing/v3/custom_tag.pb.go | 2 +- .../type/tracing/v3/custom_tag.pb.validate.go | 1 + .../type/tracing/v3/custom_tag_vtproto.pb.go | 556 + .../envoy/type/v3/hash_policy.pb.go | 2 +- .../envoy/type/v3/hash_policy.pb.validate.go | 1 + .../envoy/type/v3/hash_policy_vtproto.pb.go | 251 + .../go-control-plane/envoy/type/v3/http.pb.go | 2 +- .../envoy/type/v3/http.pb.validate.go | 1 + .../envoy/type/v3/http_status.pb.go | 2 +- .../envoy/type/v3/http_status.pb.validate.go | 1 + .../envoy/type/v3/http_status_vtproto.pb.go | 70 + .../envoy/type/v3/percent.pb.go | 2 +- .../envoy/type/v3/percent.pb.validate.go | 1 + .../envoy/type/v3/percent_vtproto.pb.go | 132 + .../envoy/type/v3/range.pb.go | 2 +- .../envoy/type/v3/range.pb.validate.go | 1 + .../envoy/type/v3/range_vtproto.pb.go | 200 + .../envoy/type/v3/ratelimit_strategy.pb.go | 2 +- .../type/v3/ratelimit_strategy.pb.validate.go | 1 + .../type/v3/ratelimit_strategy_vtproto.pb.go | 241 + .../envoy/type/v3/ratelimit_unit.pb.go | 2 +- .../type/v3/ratelimit_unit.pb.validate.go | 1 + .../envoy/type/v3/semantic_version.pb.go | 2 +- .../type/v3/semantic_version.pb.validate.go | 1 + .../type/v3/semantic_version_vtproto.pb.go | 86 + .../envoy/type/v3/token_bucket.pb.go | 20 +- .../envoy/type/v3/token_bucket.pb.validate.go | 1 + .../envoy/type/v3/token_bucket_vtproto.pb.go | 100 + .../validate/validate.pb.go | 10 +- .../vendor/github.com/go-logr/logr/README.md | 1 + .../github.com/go-logr/logr/funcr/funcr.go | 169 +- .../github.com/golang/glog/glog_file.go | 31 +- .../golang/glog/glog_file_nonwindows.go | 12 + .../golang/glog/glog_file_windows.go | 30 + .../golang/protobuf/ptypes/any/any.pb.go | 62 - .../protobuf/ptypes/duration/duration.pb.go | 63 - .../protobuf/ptypes/struct/struct.pb.go | 78 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 - .../protobuf/ptypes/wrappers/wrappers.pb.go | 71 - .../proto/common_go_proto/common.pb.go | 99 +- .../s2a_context_go_proto/s2a_context.pb.go | 6 +- .../internal/proto/s2a_go_proto/s2a.pb.go | 28 +- .../proto/s2a_go_proto/s2a_grpc.pb.go | 13 +- .../proto/v2/common_go_proto/common.pb.go | 296 +- .../v2/s2a_context_go_proto/s2a_context.pb.go | 73 +- .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 772 +- .../proto/v2/s2a_go_proto/s2a_grpc.pb.go | 13 +- .../google/s2a-go/internal/record/record.go | 34 +- .../internal/tokenmanager/tokenmanager.go | 15 +- .../google/s2a-go/internal/v2/s2av2.go | 40 +- .../v2/tlsconfigstore/tlsconfigstore.go | 15 +- .../vendor/github.com/google/s2a-go/s2a.go | 103 +- .../github.com/google/s2a-go/s2a_options.go | 28 +- .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 14 + .../googleapis/gax-go/v2/apierror/apierror.go | 4 +- .../github.com/googleapis/gax-go/v2/header.go | 31 +- .../googleapis/gax-go/v2/internal/version.go | 2 +- .../googleapis/gax-go/v2/iterator/iterator.go | 63 + .../github.com/hashicorp/go-hclog/README.md | 5 +- .../hashicorp/go-hclog/intlogger.go | 135 +- .../github.com/hashicorp/go-hclog/logger.go | 22 + .../hashicorp/hc-install/version/VERSION | 2 +- .../github.com/hashicorp/hcl/v2/CHANGELOG.md | 24 +- .../github.com/hashicorp/hcl/v2/Makefile | 18 + .../hashicorp/hcl/v2/hclsyntax/expression.go | 117 + .../hcl/v2/hclsyntax/expression_vars.go | 6 +- .../hashicorp/hcl/v2/hclsyntax/generate.go | 2 +- .../hashicorp/hcl/v2/hclsyntax/parser.go | 76 +- .../hcl/v2/hclsyntax/scan_string_lit.go | 25 +- .../hcl/v2/hclsyntax/scan_string_lit.rl | 2 + .../hashicorp/hcl/v2/hclsyntax/scan_tokens.go | 1101 +- .../hashicorp/hcl/v2/hclsyntax/scan_tokens.rl | 4 + .../hashicorp/hcl/v2/hclsyntax/spec.md | 2 +- .../hashicorp/hcl/v2/hclsyntax/token.go | 5 +- .../hcl/v2/hclsyntax/token_type_string.go | 12 +- .../github.com/hashicorp/hcl/v2/tools.go | 11 + .../internal/version/version.go | 2 +- .../hashicorp/terraform-exec/tfexec/apply.go | 29 +- .../terraform-exec/tfexec/options.go | 12 + .../hashicorp/terraform-exec/tfexec/plan.go | 48 +- .../terraform-exec/tfexec/version.go | 17 + .../hashicorp/terraform-json/plan.go | 21 + .../internal/logging/keys.go | 6 + .../tfprotov5/client_capabilities.go | 49 + .../tfprotov5/data_source.go | 8 + .../terraform-plugin-go/tfprotov5/deferred.go | 44 + .../internal/fromproto/client_capabilities.go | 69 + .../internal/fromproto/data_source.go | 7 +- .../tfprotov5/internal/fromproto/provider.go | 5 +- .../tfprotov5/internal/fromproto/resource.go | 27 +- .../tf5serverlogging/client_capabilities.go | 81 + .../internal/tf5serverlogging/deferred.go | 24 + .../internal/tfplugin5/tfplugin5.pb.go | 2384 +- .../internal/tfplugin5/tfplugin5.proto | 50 +- .../internal/tfplugin5/tfplugin5_grpc.pb.go | 6 +- .../tfprotov5/internal/toproto/data_source.go | 1 + .../tfprotov5/internal/toproto/deferred.go | 21 + .../tfprotov5/internal/toproto/resource.go | 3 + .../terraform-plugin-go/tfprotov5/provider.go | 9 +- .../terraform-plugin-go/tfprotov5/resource.go | 40 +- .../tfprotov5/tf5server/server.go | 109 +- .../tfprotov6/client_capabilities.go | 49 + .../tfprotov6/data_source.go | 8 + .../terraform-plugin-go/tfprotov6/deferred.go | 44 + .../internal/fromproto/client_capabilities.go | 69 + .../internal/fromproto/data_source.go | 7 +- .../tfprotov6/internal/fromproto/provider.go | 5 +- .../tfprotov6/internal/fromproto/resource.go | 27 +- .../tf6serverlogging/client_capabilities.go | 81 + .../internal/tf6serverlogging/deferred.go | 24 + .../internal/tfplugin6/tfplugin6.pb.go | 2055 +- .../internal/tfplugin6/tfplugin6.proto | 50 +- .../internal/tfplugin6/tfplugin6_grpc.pb.go | 6 +- .../tfprotov6/internal/toproto/data_source.go | 1 + .../tfprotov6/internal/toproto/deferred.go | 21 + .../tfprotov6/internal/toproto/resource.go | 3 + .../terraform-plugin-go/tfprotov6/provider.go | 9 +- .../terraform-plugin-go/tfprotov6/resource.go | 40 +- .../tfprotov6/tf6server/server.go | 109 +- .../tftypes/value_msgpack.go | 2 +- .../v2/helper/resource/aliases.go | 145 - .../v2/helper/resource/teststep_validate.go | 128 - .../v2/internal/plugintest/util.go | 70 - .../terraform-plugin-testing/LICENSE | 375 + .../terraform-plugin-testing/config/config.go | 38 + .../config/directory.go | 63 + .../terraform-plugin-testing/config/doc.go | 6 + .../terraform-plugin-testing/config/file.go | 63 + .../config/variable.go | 326 + .../helper/resource/environment_variables.go | 0 .../helper/resource/error.go | 132 + .../helper/resource/id.go | 62 + .../helper/resource/json.go | 0 .../helper/resource/plan_checks.go | 32 + .../helper/resource/plugin.go | 7 +- .../helper/resource/state.go | 292 + .../helper/resource/state_shim.go | 41 +- .../helper/resource/testcase_providers.go | 0 .../helper/resource/testcase_validate.go | 34 +- .../helper/resource/testing.go | 225 +- .../helper/resource/testing_config.go | 5 +- .../helper/resource/testing_new.go | 225 +- .../helper/resource/testing_new_config.go | 106 +- .../resource/testing_new_import_state.go | 61 +- .../resource/testing_new_refresh_state.go | 19 +- .../helper/resource/testing_sets.go | 2 +- .../helper/resource/teststep_providers.go | 28 +- .../helper/resource/teststep_validate.go | 239 + .../helper/resource/tfversion_checks.go | 31 + .../helper/resource/wait.go | 135 + .../internal/addrs/doc.go | 20 + .../internal/addrs/instance_key.go | 50 + .../internal/addrs/module.go | 16 + .../internal/addrs/module_instance.go | 242 + .../configs/configschema/coerce_value.go | 253 + .../internal/configs/configschema/doc.go | 17 + .../configs/configschema/empty_value.go | 62 + .../configs/configschema/implied_type.go | 71 + .../configschema/nestingmode_string.go | 28 + .../internal/configs/configschema/schema.go | 161 + .../internal/configs/hcl2shim/flatmap.go | 426 + .../internal/configs/hcl2shim/paths.go | 279 + .../internal/configs/hcl2shim/values.go | 233 + .../internal/configs/hcl2shim/values_equiv.go | 217 + .../internal/errorshim/error_join_shim.go | 47 + .../internal/logging/context.go | 78 + .../internal/logging/environment_variables.go | 27 + .../internal/logging/helper_resource.go | 35 + .../internal/logging/helper_schema.go | 35 + .../internal/logging/keys.go | 63 + .../internal/plugintest/config.go | 3 +- .../internal/plugintest/doc.go | 0 .../plugintest/environment_variables.go | 8 + .../internal/plugintest/guard.go | 0 .../internal/plugintest/helper.go | 42 +- .../internal/plugintest/util.go | 186 + .../internal/plugintest/working_dir.go | 79 +- .../internal/teststep/config.go | 241 + .../internal/teststep/directory.go | 94 + .../internal/teststep/file.go | 94 + .../internal/teststep/string.go | 61 + .../internal/tfdiags/config_traversals.go | 59 + .../internal/tfdiags/contextual.go | 84 + .../internal/tfdiags/diagnostic.go | 26 + .../internal/tfdiags/diagnostic_base.go | 34 + .../internal/tfdiags/diagnostics.go | 196 + .../internal/tfdiags/doc.go | 19 + .../internal/tfdiags/error.go | 27 + .../internal/tfdiags/severity_string.go | 29 + .../internal/tfdiags/simple_warning.go | 23 + .../terraform-plugin-testing/plancheck/doc.go | 5 + .../plancheck/expect_empty_plan.go | 36 + .../plancheck/expect_non_empty_plan.go | 29 + .../plancheck/expect_resource_action.go | 90 + .../plancheck/expect_sensitive_value.go | 61 + .../plancheck/expect_unknown_value.go | 61 + .../plancheck/plan_check.go | 30 + .../plancheck/resource_action.go | 50 + .../terraform/diff.go | 1055 + .../terraform/instancetype.go | 19 + .../terraform/instancetype_string.go | 26 + .../terraform/resource.go | 372 + .../terraform/resource_address.go | 229 + .../terraform/resource_mode.go | 18 + .../terraform/resource_mode_string.go | 24 + .../terraform/resource_provider.go | 37 + .../terraform/schemas.go | 37 + .../terraform/state.go | 1824 ++ .../terraform/state_filter.go | 273 + .../terraform/util.go | 25 + .../tfjsonpath/doc.go | 6 + .../tfjsonpath/path.go | 107 + .../tfjsonpath/step.go | 14 + .../terraform-plugin-testing/tfversion/all.go | 45 + .../terraform-plugin-testing/tfversion/any.go | 59 + .../terraform-plugin-testing/tfversion/doc.go | 5 + .../tfversion/require_above.go | 35 + .../tfversion/require_below.go | 35 + .../tfversion/require_between.go | 38 + .../tfversion/require_not.go | 32 + .../tfversion/skip_above.go | 35 + .../tfversion/skip_below.go | 35 + .../tfversion/skip_between.go | 38 + .../tfversion/skip_if.go | 32 + .../tfversion/version_check.go | 39 + .../tfversion/versions.go | 32 + .../terraform-provider-google/CHANGELOG.md | 613 +- .../google/envvar/envvar_utils.go | 8 + .../google/fwmodels/provider_model.go | 10 +- ...source_provider_config_plugin_framework.go | 233 + .../google/fwprovider/framework_provider.go | 54 +- .../google/fwresource/field_helpers.go | 2 +- .../google/fwtransport/framework_config.go | 92 +- .../data_source_provider_config_sdk.go | 166 + .../google/provider/provider.go | 38 +- .../google/provider/provider_dcl_resources.go | 55 +- .../provider/provider_mmv1_resources.go | 467 +- ...ce_access_context_manager_access_policy.go | 6 +- ...ccess_context_manager_service_perimeter.go | 5 +- ...service_perimeter_dry_run_egress_policy.go | 984 + ...ervice_perimeter_dry_run_ingress_policy.go | 970 + ...manager_service_perimeter_egress_policy.go | 94 +- ...anager_service_perimeter_ingress_policy.go | 99 +- .../resource_active_directory_domain.go | 20 + .../alloydb/resource_alloydb_cluster.go | 146 +- .../alloydb/resource_alloydb_instance.go | 40 + .../google/services/apigee/apigee_utils.go | 49 + .../apigee/resource_apigee_app_group.go | 567 + .../resource_apigee_app_group_sweeper.go} | 14 +- .../apigee/resource_apigee_developer.go | 568 + .../resource_apigee_developer_sweeper.go | 143 + ...esource_apigee_environment_keyvaluemaps.go | 253 + ...apigee_environment_keyvaluemaps_entries.go | 253 + .../apigee/resource_apigee_instance.go | 83 +- .../apigee/resource_apigee_nat_address.go | 132 + ...esource_app_engine_flexible_app_version.go | 4 +- ...esource_app_engine_standard_app_version.go | 4 +- .../apphub/resource_apphub_application.go | 25 +- ...ource_apphub_service_project_attachment.go | 4 +- ...data_source_artifact_registry_locations.go | 91 + .../resource_assured_workloads_workload.go | 75 +- .../data_source_google_bigquery_tables.go | 149 + .../services/bigquery/iam_bigquery_dataset.go | 4 +- .../bigquery/iam_bigquery_member_dataset.go | 330 + .../services/bigquery/iam_bigquery_table.go | 5 - .../bigquery/resource_bigquery_dataset.go | 58 +- .../resource_bigquery_dataset_access.go | 48 +- .../bigquery/resource_bigquery_job.go | 56 + .../bigquery/resource_bigquery_table.go | 133 +- ...ce_bigquery_analytics_hub_data_exchange.go | 131 + ...resource_bigquery_analytics_hub_listing.go | 108 +- ...esource_bigquery_datapolicy_data_policy.go | 17 +- .../resource_bigquery_data_transfer_config.go | 104 +- .../resource_bigquery_reservation.go | 33 - .../bigtable/iam_bigtable_instance.go | 7 +- .../bigtable/resource_bigtable_app_profile.go | 87 + .../bigtable/resource_bigtable_instance.go | 2 +- .../bigtable/resource_bigtable_table.go | 191 +- ...google_certificate_manager_certificates.go | 140 + ...esource_certificate_manager_certificate.go | 15 + .../resource_clouddeploy_delivery_pipeline.go | 16 + .../resource_clouddeploy_target.go | 8 + .../resource_clouddomains_registration.go | 1 - .../resource_cloudfunctions_function.go | 27 +- .../resource_cloudfunctions2_function.go | 35 +- ...d_identity_group_transitive_memberships.go | 177 + .../resource_cloud_quotas_quota_preference.go | 2 +- .../resource_cloud_run_domain_mapping.go | 4 +- .../cloudrun/resource_cloud_run_service.go | 245 +- .../cloudrunv2/resource_cloud_run_v2_job.go | 349 +- .../resource_cloud_run_v2_service.go | 282 +- .../cloudtasks/resource_cloud_tasks_queue.go | 697 + .../composer/resource_composer_environment.go | 83 +- .../compute/compute_instance_helpers.go | 25 +- ...=> data_source_compute_security_policy.go} | 4 +- .../data_source_google_compute_instance.go | 10 + ...oogle_compute_instance_guest_attributes.go | 133 + ...e_compute_region_instance_group_manager.go | 65 + .../google/services/compute/metadata.go | 2 +- .../compute/resource_compute_attached_disk.go | 8 + .../resource_compute_backend_service.go | 470 +- .../services/compute/resource_compute_disk.go | 79 +- .../resource_compute_external_vpn_gateway.go | 31 +- .../compute/resource_compute_firewall.go | 4 +- .../resource_compute_forwarding_rule.go | 35 +- .../resource_compute_global_address.go | 242 + ...resource_compute_global_forwarding_rule.go | 2 +- .../resource_compute_ha_vpn_gateway.go | 33 +- .../compute/resource_compute_health_check.go | 49 + .../compute/resource_compute_instance.go | 259 +- ...resource_compute_instance_from_template.go | 20 +- ...resource_compute_instance_group_manager.go | 8 +- .../resource_compute_instance_template.go | 87 +- .../compute/resource_compute_interconnect.go | 291 +- ...esource_compute_managed_ssl_certificate.go | 1 - .../compute/resource_compute_network.go | 2 +- .../resource_compute_network_endpoints.go | 4 +- ...ute_network_firewall_policy_association.go | 261 +- ...ork_firewall_policy_association_sweeper.go | 139 + ...ce_compute_network_firewall_policy_rule.go | 1498 +- ...e_network_firewall_policy_rule_sweeper.go} | 12 +- .../compute/resource_compute_node_template.go | 111 + .../resource_compute_per_instance_config.go | 8 +- .../resource_compute_project_metadata_item.go | 21 +- ...source_compute_public_advertised_prefix.go | 12 + ...resource_compute_region_backend_service.go | 333 +- .../resource_compute_region_commitment.go | 24 + ...e_compute_region_instance_group_manager.go | 47 +- ...source_compute_region_instance_template.go | 60 +- ...ion_network_firewall_policy_association.go | 278 +- ...ork_firewall_policy_association_sweeper.go | 139 + ...ute_region_network_firewall_policy_rule.go | 1515 +- ...on_network_firewall_policy_rule_sweeper.go | 139 + ...urce_compute_region_per_instance_config.go | 8 +- ...resource_compute_region_ssl_certificate.go | 13 +- ...source_compute_region_target_http_proxy.go | 40 + ...ource_compute_region_target_https_proxy.go | 137 +- .../resource_compute_resize_request.go | 1300 ++ .../resource_compute_resource_policy.go | 151 +- .../compute/resource_compute_router.go | 2 +- .../compute/resource_compute_router_nat.go | 72 +- .../resource_compute_router_nat_address.go | 814 + .../compute/resource_compute_router_peer.go | 123 +- .../resource_compute_security_policy_rule.go | 258 + .../resource_compute_service_attachment.go | 109 +- .../resource_compute_ssl_certificate.go | 13 +- .../resource_compute_storage_pool_sweeper.go | 98 + .../compute/resource_compute_subnetwork.go | 236 +- .../resource_compute_target_http_proxy.go | 11 +- .../resource_compute_target_https_proxy.go | 108 +- .../data_source_google_container_cluster.go | 9 + .../google/services/container/node_config.go | 736 +- .../container/resource_container_cluster.go | 503 +- .../resource_container_cluster_migratev1.go | 26 +- .../container/resource_container_node_pool.go | 604 +- .../resource_container_attached_cluster.go | 87 +- .../resource_container_aws_node_pool.go | 85 + .../resource_billing_project_info.go | 4 +- ...se_migration_service_connection_profile.go | 103 +- ...atabase_migration_service_migration_job.go | 1224 + ...migration_service_migration_job_sweeper.go | 143 + ...e_data_loss_prevention_discovery_config.go | 1356 +- ...oss_prevention_discovery_config_sweeper.go | 47 +- ...source_data_loss_prevention_job_trigger.go | 1 - .../dataplex/resource_dataplex_datascan.go | 2 +- .../dataplex/resource_dataplex_task.go | 2 +- .../dataproc_operation.go} | 31 +- .../dataproc/resource_dataproc_batch.go | 2011 ++ .../resource_dataproc_batch_sweeper.go | 143 + .../dataproc/resource_dataproc_cluster.go | 7 +- .../resource_dataproc_metastore_service.go | 32 + .../datastore/resource_datastore_index.go | 425 - .../resource_datastream_connection_profile.go | 188 +- .../resource_datastream_private_connection.go | 6 + .../datastream/resource_datastream_stream.go | 3218 ++- .../resource_dialogflow_cx_agent.go | 260 +- .../resource_dialogflow_cx_entity_type.go | 4 - .../resource_dialogflow_cx_environment.go | 4 - .../resource_dialogflow_cx_flow.go | 256 +- .../resource_dialogflow_cx_intent.go | 4 - .../resource_dialogflow_cx_page.go | 4 - ...esource_dialogflow_cx_security_settings.go | 9 + .../resource_dialogflow_cx_test_case.go | 4 - .../resource_dialogflow_cx_version.go | 4 - .../resource_dialogflow_cx_webhook.go | 4 - .../resource_discovery_engine_chat_engine.go | 44 +- .../resource_discovery_engine_data_store.go | 263 +- .../resource_discovery_engine_schema.go | 319 + ...esource_discovery_engine_schema_sweeper.go | 143 + .../resource_discovery_engine_target_site.go | 540 + ...ce_discovery_engine_target_site_sweeper.go | 139 + .../services/dns/resource_dns_managed_zone.go | 3 +- .../services/dns/resource_dns_record_set.go | 39 +- .../resource_edgenetwork_network.go | 97 +- .../resource_edgenetwork_subnet.go | 97 +- .../resource_essential_contacts_contact.go | 7 + .../filestore/resource_filestore_instance.go | 96 + .../firestore/resource_firestore_database.go | 112 +- .../resource_firestore_database_sweeper.go | 10 +- .../firestore/resource_firestore_document.go | 3 + .../firestore/resource_firestore_field.go | 38 +- .../resource_gke_hub_feature_membership.go | 21 +- .../gkehub/resource_gke_hub_membership.go | 4 +- ...ource_google_gke_hub_membership_binding.go | 53 + .../gkehub2/resource_gke_hub_feature.go | 29 +- ...urce_gkeonprem_bare_metal_admin_cluster.go | 14 +- .../resource_gkeonprem_bare_metal_cluster.go | 28 +- ...resource_gkeonprem_bare_metal_node_pool.go | 8 +- .../resource_gkeonprem_vmware_node_pool.go | 2 +- .../resource_healthcare_pipeline_job.go | 1148 + .../resource_healthcare_workspace.go | 464 + ...rce_iam_workload_identity_pool_provider.go | 254 +- .../services/iap/resource_iap_client.go | 2 +- .../services/iap/resource_iap_settings.go | 1367 ++ ...dentity_platform_project_default_config.go | 810 - ...ource_integration_connectors_connection.go | 20 +- .../resource_integrations_client.go | 46 +- ...ce_google_kms_crypto_key_latest_version.go | 181 + ...a_source_google_kms_crypto_key_versions.go | 257 + .../services/kms/iam_kms_ekm_connection.go | 249 + .../resource_kms_autokey_config_sweeper.go | 126 + .../services/logging/iam_logging_log_view.go | 246 + .../logging/resource_logging_log_scope.go | 411 + .../resource_logging_log_scope_sweeper.go} | 14 +- .../logging/resource_logging_log_view.go | 5 + .../resource_logging_project_bucket_config.go | 8 +- .../services/logging/resource_logging_sink.go | 7 +- .../looker/resource_looker_instance.go | 301 + .../resource_migration_center_group.go | 2 +- ...esource_migration_center_preference_set.go | 78 +- .../resource_monitoring_alert_policy.go | 26 + ...esource_monitoring_notification_channel.go | 2 +- .../monitoring/resource_monitoring_slo.go | 4 +- .../resource_netapp_active_directory.go | 304 +- ...esource_netapp_active_directory_sweeper.go | 162 +- .../services/netapp/resource_netapp_backup.go | 152 +- .../netapp/resource_netapp_backup_policy.go | 166 +- .../resource_netapp_backup_policy_sweeper.go | 162 +- .../netapp/resource_netapp_backup_sweeper.go | 162 +- .../netapp/resource_netapp_backup_vault.go | 112 +- .../resource_netapp_backup_vault_sweeper.go | 162 +- .../resource_netapp_kmsconfig_sweeper.go | 156 +- .../netapp/resource_netapp_storage_pool.go | 273 +- .../resource_netapp_storage_pool_sweeper.go | 162 +- .../services/netapp/resource_netapp_volume.go | 220 +- .../resource_netapp_volume_replication.go | 18 +- ...resource_netapp_volume_snapshot_sweeper.go | 156 +- .../resource_network_connectivity_group.go | 598 + .../resource_network_connectivity_hub.go | 564 +- ...source_network_connectivity_hub_sweeper.go | 110 +- ...rce_network_connectivity_internal_range.go | 97 +- .../resource_network_connectivity_spoke.go | 1416 +- ...urce_network_connectivity_spoke_sweeper.go | 110 +- ...urce_network_security_client_tls_policy.go | 885 + ...work_security_client_tls_policy_sweeper.go | 139 + ...etwork_security_gateway_security_policy.go | 26 + ...urce_network_security_server_tls_policy.go | 984 + ...work_security_server_tls_policy_sweeper.go | 139 + .../resource_network_services_gateway.go | 36 +- .../notebooks/resource_notebooks_instance.go | 3 +- ...rce_oracle_database_autonomous_database.go | 43 + ...ce_oracle_database_autonomous_databases.go | 114 + ...e_database_cloud_exadata_infrastructure.go | 38 + ..._database_cloud_exadata_infrastructures.go | 111 + ...source_oracle_database_cloud_vm_cluster.go | 38 + ...ource_oracle_database_cloud_vm_clusters.go | 115 + .../data_source_oracle_database_db_nodes.go | 216 + .../data_source_oracle_database_db_servers.go | 249 + .../oracle_database_operation.go | 92 + ...rce_oracle_database_autonomous_database.go | 3381 +++ ...le_database_autonomous_database_sweeper.go | 143 + ...e_database_cloud_exadata_infrastructure.go | 1623 ++ ...se_cloud_exadata_infrastructure_sweeper.go | 143 + ...source_oracle_database_cloud_vm_cluster.go | 1556 ++ ...racle_database_cloud_vm_cluster_sweeper.go | 143 + .../orgpolicy/resource_org_policy_policy.go | 2 +- ...esource_privateca_certificate_authority.go | 9 +- ...e_privileged_access_manager_entitlement.go | 46 + .../privileged_access_manager_operation.go | 89 + ...e_privileged_access_manager_entitlement.go | 1355 ++ ...eged_access_manager_entitlement_sweeper.go | 143 + .../pubsub/resource_pubsub_subscription.go | 81 +- .../services/pubsub/resource_pubsub_topic.go | 343 +- .../services/redis/resource_redis_cluster.go | 495 + .../data_source_google_client_config.go | 22 +- .../data_source_google_folder.go | 8 + .../data_source_google_service_account.go | 7 + .../data_source_google_service_accounts.go | 105 + .../resourcemanager/resource_google_folder.go | 51 +- .../resource_google_project.go | 44 +- .../resource_google_project_service.go | 3 +- .../resource_google_service_account.go | 66 +- ...ta_source_secret_manager_secret_version.go | 19 +- ...ce_secret_manager_secret_version_access.go | 19 +- .../data_source_secret_manager_secrets.go | 26 +- .../resource_secret_manager_secret_version.go | 2 +- ...a_source_secret_manager_regional_secret.go | 49 + ..._secret_manager_regional_secret_version.go | 219 + ..._manager_regional_secret_version_access.go | 174 + ..._source_secret_manager_regional_secrets.go | 178 + .../iam_secret_manager_regional_secret.go | 249 + ...resource_secret_manager_regional_secret.go | 936 + ..._secret_manager_regional_secret_sweeper.go | 143 + ..._secret_manager_regional_secret_version.go | 610 + ...ource_secure_source_manager_branch_rule.go | 660 + ...cure_source_manager_branch_rule_sweeper.go | 143 + ...resource_scc_folder_notification_config.go | 475 + ..._scc_folder_notification_config_sweeper.go | 143 + ...esource_scc_folder_scc_big_query_export.go | 462 + ...scc_folder_scc_big_query_export_sweeper.go | 143 + .../resource_scc_notification_config.go | 16 +- ...e_scc_organization_scc_big_query_export.go | 463 + ...ganization_scc_big_query_export_sweeper.go | 143 + ...esource_scc_project_notification_config.go | 519 + ...cc_project_notification_config_sweeper.go} | 14 +- ...source_scc_project_scc_big_query_export.go | 496 + ...cc_project_scc_big_query_export_sweeper.go | 143 + .../iam_scc_v2_organization_source.go | 202 + .../resource_scc_v2_folder_mute_config.go | 435 + ...ource_scc_v2_folder_mute_config_sweeper.go | 143 + ...ource_scc_v2_folder_notification_config.go | 482 + ...c_v2_folder_notification_config_sweeper.go | 143 + ...urce_scc_v2_folder_scc_big_query_export.go | 472 + ..._v2_folder_scc_big_query_export_sweeper.go | 143 + ...esource_scc_v2_organization_mute_config.go | 435 + ...scc_v2_organization_mute_config_sweeper.go | 143 + ...scc_v2_organization_notification_config.go | 16 +- ...cc_v2_organization_scc_big_query_export.go | 500 + ...ganization_scc_big_query_export_sweeper.go | 143 + ...c_v2_organization_scc_big_query_exports.go | 502 + ...anization_scc_big_query_exports_sweeper.go | 143 + .../resource_scc_v2_organization_source.go | 340 + .../resource_scc_v2_project_mute_config.go | 468 + ...urce_scc_v2_project_mute_config_sweeper.go | 143 + ...urce_scc_v2_project_notification_config.go | 526 + ..._v2_project_notification_config_sweeper.go | 139 + ...rce_scc_v2_project_scc_big_query_export.go | 503 + ...v2_project_scc_big_query_export_sweeper.go | 143 + .../resource_service_networking_connection.go | 21 +- ...a_source_google_site_verification_token.go | 149 + .../resource_site_verification_owner.go | 277 + ...resource_site_verification_web_resource.go | 344 + ..._site_verification_web_resource_sweeper.go | 143 + .../resource_sourcerepo_repository.go | 83 +- .../resource_spanner_backup_schedule.go | 663 + ...source_spanner_backup_schedule_sweeper.go} | 14 +- .../spanner/resource_spanner_database.go | 64 +- .../spanner/resource_spanner_instance.go | 306 +- .../services/sql/resource_sql_database.go | 10 +- .../sql/resource_sql_database_instance.go | 74 +- .../google/services/sql/resource_sql_user.go | 32 +- ...ata_source_google_storage_bucket_object.go | 20 + .../storage/resource_storage_bucket.go | 138 +- .../resource_storage_bucket_600_migration.go | 1058 + .../storage/resource_storage_bucket_object.go | 9 + .../resource_storage_managed_folder.go | 41 +- .../resource_tags_location_tag_bindings.go | 30 +- .../services/tags/resource_tags_tag_key.go | 4 +- .../services/tags/resource_tags_tag_value.go | 4 +- .../transcoder/resource_transcoder_job.go | 2976 +++ .../resource_transcoder_job_sweeper.go} | 12 +- .../resource_transcoder_job_template.go | 2927 +++ ...esource_transcoder_job_template_sweeper.go | 143 + ...resource_vertex_ai_feature_online_store.go | 2 +- .../resource_vertex_ai_featurestore.go | 2 +- ...vertex_ai_index_endpoint_deployed_index.go | 1163 + .../resource_vmwareengine_cluster.go | 603 + .../resource_vmwareengine_cluster_sweeper.go | 57 +- ...wareengine_external_access_rule_sweeper.go | 190 +- ...e_vmwareengine_external_address_sweeper.go | 190 +- .../resource_vmwareengine_private_cloud.go | 767 +- ...urce_vmwareengine_private_cloud_sweeper.go | 2 +- .../resource_vpc_access_connector.go | 15 +- .../workbench/resource_workbench_instance.go | 131 +- .../workflows/resource_workflows_workflow.go | 7 +- .../google/sweeper/gcp_sweeper.go | 43 +- .../google/tpgresource/datasource_helpers.go | 6 + .../google/tpgresource/field_helpers.go | 3 + .../google/tpgresource/labels.go | 27 +- .../google/tpgresource/resource_test_utils.go | 2 +- .../google/tpgresource/self_link_helpers.go | 11 + .../google/tpgresource/utils.go | 33 + .../google/transport/config.go | 69 +- .../transport/error_retry_predicates.go | 10 + .../transport/provider_dcl_client_creation.go | 24 - .../google/verify/validation.go | 35 +- .../terraform-provider-google/main.go | 14 +- .../version/version.go | 2 +- .../github.com/planetscale/vtprotobuf/LICENSE | 29 + .../vtprotobuf/protohelpers/protohelpers.go | 122 + .../types/known/anypb/any_vtproto.pb.go | 389 + .../known/durationpb/duration_vtproto.pb.go | 317 + .../types/known/emptypb/empty_vtproto.pb.go | 207 + .../types/known/structpb/struct_vtproto.pb.go | 2004 ++ .../known/timestamppb/timestamp_vtproto.pb.go | 317 + .../known/wrapperspb/wrappers_vtproto.pb.go | 2240 ++ .../go-cty/cty/function/stdlib/collection.go | 4 +- .../go-cty/cty/function/stdlib/conversion.go | 5 +- .../zclconf/go-cty/cty/json/marshal.go | 12 +- .../zclconf/go-cty/cty/primitive_type.go | 16 +- .../google.golang.org/grpc/otelgrpc/config.go | 86 +- .../google.golang.org/grpc/otelgrpc/doc.go | 13 +- .../grpc/otelgrpc/interceptor.go | 34 +- .../grpc/otelgrpc/interceptorinfo.go | 13 +- .../grpc/otelgrpc/internal/parse.go | 13 +- .../grpc/otelgrpc/metadata_supplier.go | 13 +- .../grpc/otelgrpc/semconv.go | 13 +- .../grpc/otelgrpc/stats_handler.go | 46 +- .../grpc/otelgrpc/version.go | 15 +- .../net/http/otelhttp/client.go | 15 +- .../net/http/otelhttp/common.go | 20 +- .../net/http/otelhttp/config.go | 30 +- .../instrumentation/net/http/otelhttp/doc.go | 13 +- .../net/http/otelhttp/handler.go | 140 +- .../otelhttp/internal/request/body_wrapper.go | 75 + .../internal/request/resp_writer_wrapper.go | 112 + .../net/http/otelhttp/internal/semconv/env.go | 165 + .../otelhttp/internal/semconv/httpconv.go | 348 + .../http/otelhttp/internal/semconv/util.go | 98 + .../http/otelhttp/internal/semconv/v1.20.0.go | 192 + .../http/otelhttp/internal/semconvutil/gen.go | 13 +- .../otelhttp/internal/semconvutil/httpconv.go | 13 +- .../otelhttp/internal/semconvutil/netconv.go | 18 +- .../net/http/otelhttp/labeler.go | 21 +- .../net/http/otelhttp/transport.go | 87 +- .../net/http/otelhttp/version.go | 15 +- .../instrumentation/net/http/otelhttp/wrap.go | 100 - .../go.opentelemetry.io/otel/.codespellignore | 2 + .../go.opentelemetry.io/otel/.codespellrc | 2 +- .../go.opentelemetry.io/otel/.gitmodules | 3 - .../go.opentelemetry.io/otel/.golangci.yml | 8 + .../go.opentelemetry.io/otel/CHANGELOG.md | 237 +- .../go.opentelemetry.io/otel/CODEOWNERS | 6 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 25 +- .../vendor/go.opentelemetry.io/otel/Makefile | 96 +- .../vendor/go.opentelemetry.io/otel/README.md | 54 +- .../go.opentelemetry.io/otel/RELEASING.md | 7 + .../otel/attribute/README.md | 3 + .../go.opentelemetry.io/otel/attribute/doc.go | 13 +- .../otel/attribute/encoder.go | 13 +- .../otel/attribute/filter.go | 13 +- .../otel/attribute/iterator.go | 13 +- .../go.opentelemetry.io/otel/attribute/key.go | 13 +- .../go.opentelemetry.io/otel/attribute/kv.go | 13 +- .../go.opentelemetry.io/otel/attribute/set.go | 139 +- .../otel/attribute/value.go | 31 +- .../otel/baggage/README.md | 3 + .../otel/baggage/baggage.go | 392 +- .../otel/baggage/context.go | 13 +- .../go.opentelemetry.io/otel/baggage/doc.go | 13 +- .../go.opentelemetry.io/otel/codes/README.md | 3 + .../go.opentelemetry.io/otel/codes/codes.go | 15 +- .../go.opentelemetry.io/otel/codes/doc.go | 13 +- .../vendor/go.opentelemetry.io/otel/doc.go | 15 +- .../go.opentelemetry.io/otel/error_handler.go | 13 +- .../go.opentelemetry.io/otel/get_main_pkgs.sh | 13 +- .../go.opentelemetry.io/otel/handler.go | 25 +- .../otel/internal/attribute/attribute.go | 37 +- .../otel/internal/baggage/baggage.go | 13 +- .../otel/internal/baggage/context.go | 13 +- .../go.opentelemetry.io/otel/internal/gen.go | 13 +- .../otel/internal/global/handler.go | 84 +- .../otel/internal/global/instruments.go | 65 +- .../otel/internal/global/internal_logging.go | 39 +- .../otel/internal/global/meter.go | 36 +- .../otel/internal/global/propagator.go | 13 +- .../otel/internal/global/state.go | 67 +- .../otel/internal/global/trace.go | 22 +- .../otel/internal/rawhelpers.go | 22 +- .../otel/internal_logging.go | 13 +- .../vendor/go.opentelemetry.io/otel/metric.go | 13 +- .../go.opentelemetry.io/otel/metric/README.md | 3 + .../otel/metric/asyncfloat64.go | 19 +- .../otel/metric/asyncint64.go | 13 +- .../go.opentelemetry.io/otel/metric/config.go | 13 +- .../go.opentelemetry.io/otel/metric/doc.go | 31 +- .../otel/metric/embedded/README.md | 3 + .../otel/metric/embedded/embedded.go | 33 +- .../otel/metric/instrument.go | 35 +- .../go.opentelemetry.io/otel/metric/meter.go | 90 +- .../otel/metric/noop/README.md | 3 + .../otel/metric/noop/noop.go | 41 +- .../otel/metric/syncfloat64.go | 73 +- .../otel/metric/syncint64.go | 67 +- .../go.opentelemetry.io/otel/propagation.go | 13 +- .../otel/propagation/README.md | 3 + .../otel/propagation/baggage.go | 13 +- .../otel/propagation/doc.go | 13 +- .../otel/propagation/propagation.go | 13 +- .../otel/propagation/trace_context.go | 15 +- .../go.opentelemetry.io/otel/renovate.json | 24 + .../go.opentelemetry.io/otel/requirements.txt | 2 +- .../go.opentelemetry.io/otel/sdk/LICENSE | 201 + .../go.opentelemetry.io/otel/sdk/README.md | 3 + .../otel/sdk/instrumentation/README.md | 3 + .../otel/sdk/instrumentation/doc.go | 13 + .../otel/sdk/instrumentation/library.go | 9 + .../otel/sdk/instrumentation/scope.go | 15 + .../otel/sdk/internal/x/README.md | 46 + .../otel/sdk/internal/x/x.go | 66 + .../otel/sdk/metric/LICENSE | 201 + .../otel/sdk/metric/README.md | 3 + .../otel/sdk/metric/aggregation.go | 189 + .../otel/sdk/metric/cache.go | 83 + .../otel/sdk/metric/config.go | 137 + .../otel/sdk/metric/doc.go | 47 + .../otel/sdk/metric/env.go | 39 + .../otel/sdk/metric/exemplar.go | 81 + .../otel/sdk/metric/exporter.go | 77 + .../otel/sdk/metric/instrument.go | 347 + .../otel/sdk/metric/instrumentkind_string.go | 30 + .../metric/internal/aggregate/aggregate.go | 154 + .../otel/sdk/metric/internal/aggregate/doc.go | 7 + .../sdk/metric/internal/aggregate/exemplar.go | 42 + .../aggregate/exponential_histogram.go | 444 + .../metric/internal/aggregate/histogram.go | 233 + .../metric/internal/aggregate/lastvalue.go | 162 + .../sdk/metric/internal/aggregate/limit.go | 42 + .../otel/sdk/metric/internal/aggregate/sum.go | 238 + .../otel/sdk/metric/internal/exemplar/doc.go | 6 + .../otel/sdk/metric/internal/exemplar/drop.go | 23 + .../sdk/metric/internal/exemplar/exemplar.go | 29 + .../sdk/metric/internal/exemplar/filter.go | 29 + .../internal/exemplar/filtered_reservoir.go | 49 + .../otel/sdk/metric/internal/exemplar/hist.go | 46 + .../otel/sdk/metric/internal/exemplar/rand.go | 191 + .../sdk/metric/internal/exemplar/reservoir.go | 32 + .../sdk/metric/internal/exemplar/storage.go | 95 + .../sdk/metric/internal/exemplar/value.go | 58 + .../otel/sdk/metric/internal/reuse_slice.go | 13 + .../otel/sdk/metric/internal/x/README.md | 112 + .../otel/sdk/metric/internal/x/x.go | 85 + .../otel/sdk/metric/manual_reader.go | 203 + .../otel/sdk/metric/meter.go | 729 + .../otel/sdk/metric/metricdata/README.md | 3 + .../otel/sdk/metric/metricdata/data.go | 296 + .../otel/sdk/metric/metricdata/temporality.go | 30 + .../metric/metricdata/temporality_string.go | 25 + .../otel/sdk/metric/periodic_reader.go | 370 + .../otel/sdk/metric/pipeline.go | 655 + .../otel/sdk/metric/provider.go | 143 + .../otel/sdk/metric/reader.go | 189 + .../otel/sdk/metric/version.go | 9 + .../otel/sdk/metric/view.go | 117 + .../otel/sdk/resource/README.md | 3 + .../otel/sdk/resource/auto.go | 118 + .../otel/sdk/resource/builtin.go | 118 + .../otel/sdk/resource/config.go | 195 + .../otel/sdk/resource/container.go | 89 + .../otel/sdk/resource/doc.go | 20 + .../otel/sdk/resource/env.go | 95 + .../otel/sdk/resource/host_id.go | 109 + .../otel/sdk/resource/host_id_bsd.go | 12 + .../otel/sdk/resource/host_id_darwin.go | 8 + .../otel/sdk/resource/host_id_exec.go | 18 + .../otel/sdk/resource/host_id_linux.go | 11 + .../otel/sdk/resource/host_id_readfile.go | 17 + .../otel/sdk/resource/host_id_unsupported.go | 19 + .../otel/sdk/resource/host_id_windows.go | 37 + .../otel/sdk/resource/os.go | 89 + .../otel/sdk/resource/os_release_darwin.go | 91 + .../otel/sdk/resource/os_release_unix.go | 143 + .../otel/sdk/resource/os_unix.go | 79 + .../otel/sdk/resource/os_unsupported.go | 15 + .../otel/sdk/resource/os_windows.go | 90 + .../otel/sdk/resource/process.go | 173 + .../otel/sdk/resource/resource.go | 294 + .../go.opentelemetry.io/otel/sdk/version.go | 9 + .../otel/semconv/v1.17.0/README.md | 3 + .../otel/semconv/v1.17.0/doc.go | 13 +- .../otel/semconv/v1.17.0/event.go | 13 +- .../otel/semconv/v1.17.0/exception.go | 13 +- .../otel/semconv/v1.17.0/http.go | 13 +- .../otel/semconv/v1.17.0/resource.go | 13 +- .../otel/semconv/v1.17.0/schema.go | 13 +- .../otel/semconv/v1.17.0/trace.go | 13 +- .../otel/semconv/v1.20.0/README.md | 3 + .../otel/semconv/v1.20.0/attribute_group.go | 13 +- .../otel/semconv/v1.20.0/doc.go | 13 +- .../otel/semconv/v1.20.0/event.go | 13 +- .../otel/semconv/v1.20.0/exception.go | 13 +- .../otel/semconv/v1.20.0/http.go | 13 +- .../otel/semconv/v1.20.0/resource.go | 13 +- .../otel/semconv/v1.20.0/schema.go | 13 +- .../otel/semconv/v1.20.0/trace.go | 13 +- .../otel/semconv/v1.26.0/README.md | 3 + .../otel/semconv/v1.26.0/attribute_group.go | 8996 ++++++++ .../otel/semconv/v1.26.0/doc.go | 9 + .../otel/semconv/v1.26.0/exception.go | 9 + .../otel/semconv/v1.26.0/metric.go | 1307 ++ .../otel/semconv/v1.26.0/schema.go | 9 + .../vendor/go.opentelemetry.io/otel/trace.go | 13 +- .../go.opentelemetry.io/otel/trace/README.md | 3 + .../go.opentelemetry.io/otel/trace/config.go | 13 +- .../go.opentelemetry.io/otel/trace/context.go | 17 +- .../go.opentelemetry.io/otel/trace/doc.go | 13 +- .../otel/trace/embedded/README.md | 3 + .../otel/trace/embedded/embedded.go | 13 +- .../otel/trace/nonrecording.go | 13 +- .../go.opentelemetry.io/otel/trace/noop.go | 20 +- .../otel/trace/provider.go | 59 + .../go.opentelemetry.io/otel/trace/span.go | 177 + .../go.opentelemetry.io/otel/trace/trace.go | 256 +- .../go.opentelemetry.io/otel/trace/tracer.go | 37 + .../otel/trace/tracestate.go | 23 +- .../otel/verify_examples.sh | 13 +- .../otel/verify_readmes.sh | 21 + .../otel/verify_released_changelog.sh | 42 + .../go.opentelemetry.io/otel/version.go | 15 +- .../go.opentelemetry.io/otel/versions.yaml | 25 +- .../google/vendor/golang.org/x/crypto/LICENSE | 4 +- .../golang.org/x/crypto/argon2/blamka_amd64.s | 2972 ++- .../x/crypto/blake2b/blake2bAVX2_amd64.s | 5167 ++++- .../x/crypto/blake2b/blake2b_amd64.s | 1681 +- .../vendor/golang.org/x/crypto/cast5/cast5.go | 2 +- .../chacha20poly1305/chacha20poly1305.go | 2 +- .../chacha20poly1305/chacha20poly1305_amd64.s | 11503 ++++++++-- .../x/crypto/cryptobyte/asn1/asn1.go | 2 +- .../golang.org/x/crypto/cryptobyte/string.go | 2 +- .../vendor/golang.org/x/crypto/hkdf/hkdf.go | 2 +- .../x/crypto/internal/poly1305/sum_amd64.s | 133 +- .../vendor/golang.org/x/crypto/sha3/doc.go | 2 +- .../vendor/golang.org/x/crypto/sha3/hashes.go | 8 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 5787 ++++- .../golang.org/x/crypto/sha3/register.go | 18 - .../vendor/golang.org/x/crypto/sha3/shake.go | 4 +- .../x/exp/constraints/constraints.go | 50 + .../google/vendor/golang.org/x/net/LICENSE | 4 +- .../vendor/golang.org/x/net/http2/config.go | 122 + .../golang.org/x/net/http2/config_go124.go | 61 + .../x/net/http2/config_pre_go124.go | 16 + .../vendor/golang.org/x/net/http2/http2.go | 53 +- .../vendor/golang.org/x/net/http2/server.go | 181 +- .../golang.org/x/net/http2/transport.go | 147 +- .../vendor/golang.org/x/net/http2/write.go | 10 + .../google/vendor/golang.org/x/oauth2/LICENSE | 4 +- .../vendor/golang.org/x/oauth2/token.go | 7 + .../google/vendor/golang.org/x/sync/LICENSE | 4 +- .../golang.org/x/sync/errgroup/errgroup.go | 135 + .../golang.org/x/sync/errgroup/go120.go | 13 + .../golang.org/x/sync/errgroup/pre_go120.go | 14 + .../google/vendor/golang.org/x/sys/LICENSE | 4 +- .../google/vendor/golang.org/x/sys/cpu/cpu.go | 21 + .../vendor/golang.org/x/sys/cpu/cpu_arm64.go | 12 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 5 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 2 +- .../golang.org/x/sys/cpu/cpu_linux_riscv64.go | 137 + .../golang.org/x/sys/cpu/cpu_riscv64.go | 11 +- .../vendor/golang.org/x/sys/unix/README.md | 2 +- .../vendor/golang.org/x/sys/unix/mkerrors.sh | 6 +- .../vendor/golang.org/x/sys/unix/mremap.go | 5 + .../golang.org/x/sys/unix/syscall_aix.go | 2 +- .../golang.org/x/sys/unix/syscall_darwin.go | 61 + .../golang.org/x/sys/unix/syscall_hurd.go | 1 + .../golang.org/x/sys/unix/syscall_linux.go | 64 +- .../x/sys/unix/syscall_linux_arm64.go | 2 + .../x/sys/unix/syscall_linux_loong64.go | 2 + .../x/sys/unix/syscall_linux_riscv64.go | 2 + .../golang.org/x/sys/unix/syscall_openbsd.go | 1 + .../golang.org/x/sys/unix/syscall_unix.go | 9 + .../golang.org/x/sys/unix/vgetrandom_linux.go | 13 + .../x/sys/unix/vgetrandom_unsupported.go | 11 + .../x/sys/unix/zerrors_darwin_amd64.go | 12 + .../x/sys/unix/zerrors_darwin_arm64.go | 12 + .../golang.org/x/sys/unix/zerrors_linux.go | 51 +- .../x/sys/unix/zerrors_linux_386.go | 7 + .../x/sys/unix/zerrors_linux_amd64.go | 7 + .../x/sys/unix/zerrors_linux_arm.go | 7 + .../x/sys/unix/zerrors_linux_arm64.go | 7 + .../x/sys/unix/zerrors_linux_loong64.go | 7 + .../x/sys/unix/zerrors_linux_mips.go | 7 + .../x/sys/unix/zerrors_linux_mips64.go | 7 + .../x/sys/unix/zerrors_linux_mips64le.go | 7 + .../x/sys/unix/zerrors_linux_mipsle.go | 7 + .../x/sys/unix/zerrors_linux_ppc.go | 7 + .../x/sys/unix/zerrors_linux_ppc64.go | 7 + .../x/sys/unix/zerrors_linux_ppc64le.go | 7 + .../x/sys/unix/zerrors_linux_riscv64.go | 7 + .../x/sys/unix/zerrors_linux_s390x.go | 7 + .../x/sys/unix/zerrors_linux_sparc64.go | 7 + .../x/sys/unix/zerrors_zos_s390x.go | 2 + .../x/sys/unix/zsyscall_darwin_amd64.go | 101 + .../x/sys/unix/zsyscall_darwin_amd64.s | 25 + .../x/sys/unix/zsyscall_darwin_arm64.go | 101 + .../x/sys/unix/zsyscall_darwin_arm64.s | 25 + .../golang.org/x/sys/unix/zsyscall_linux.go | 33 +- .../x/sys/unix/zsyscall_openbsd_386.go | 24 + .../x/sys/unix/zsyscall_openbsd_386.s | 5 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 24 + .../x/sys/unix/zsyscall_openbsd_amd64.s | 5 + .../x/sys/unix/zsyscall_openbsd_arm.go | 24 + .../x/sys/unix/zsyscall_openbsd_arm.s | 5 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 24 + .../x/sys/unix/zsyscall_openbsd_arm64.s | 5 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 24 + .../x/sys/unix/zsyscall_openbsd_mips64.s | 5 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 24 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 24 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 5 + .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 2 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 3 +- .../x/sys/unix/zsysnum_linux_loong64.go | 3 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 3 +- .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + .../x/sys/unix/ztypes_darwin_amd64.go | 13 + .../x/sys/unix/ztypes_darwin_arm64.go | 13 + .../x/sys/unix/ztypes_freebsd_386.go | 1 + .../x/sys/unix/ztypes_freebsd_amd64.go | 1 + .../x/sys/unix/ztypes_freebsd_arm.go | 1 + .../x/sys/unix/ztypes_freebsd_arm64.go | 1 + .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 + .../golang.org/x/sys/unix/ztypes_linux.go | 96 +- .../x/sys/unix/ztypes_linux_riscv64.go | 33 + .../golang.org/x/sys/windows/dll_windows.go | 2 +- .../golang.org/x/sys/windows/registry/key.go | 205 + .../x/sys/windows/registry/mksyscall.go | 9 + .../x/sys/windows/registry/syscall.go | 32 + .../x/sys/windows/registry/value.go | 386 + .../sys/windows/registry/zsyscall_windows.go | 117 + .../x/sys/windows/security_windows.go | 24 +- .../x/sys/windows/syscall_windows.go | 16 +- .../golang.org/x/sys/windows/types_windows.go | 72 +- .../x/sys/windows/zsyscall_windows.go | 80 + .../google/vendor/golang.org/x/text/LICENSE | 4 +- .../google/vendor/golang.org/x/time/LICENSE | 4 +- .../vendor/golang.org/x/time/rate/rate.go | 17 +- .../google/vendor/golang.org/x/tools/LICENSE | 27 + .../google/vendor/golang.org/x/tools/PATENTS | 22 + .../x/tools/cmd/stringer/stringer.go | 660 + .../x/tools/go/gcexportdata/gcexportdata.go | 186 + .../x/tools/go/gcexportdata/importer.go | 75 + .../tools/go/internal/packagesdriver/sizes.go | 53 + .../golang.org/x/tools/go/packages/doc.go | 250 + .../x/tools/go/packages/external.go | 140 + .../golang.org/x/tools/go/packages/golist.go | 1106 + .../x/tools/go/packages/golist_overlay.go | 83 + .../x/tools/go/packages/loadmode_string.go | 57 + .../x/tools/go/packages/packages.go | 1445 ++ .../golang.org/x/tools/go/packages/visit.go | 59 + .../x/tools/go/types/objectpath/objectpath.go | 753 + .../x/tools/internal/aliases/aliases.go | 32 + .../x/tools/internal/aliases/aliases_go121.go | 31 + .../x/tools/internal/aliases/aliases_go122.go | 63 + .../x/tools/internal/event/core/event.go | 85 + .../x/tools/internal/event/core/export.go | 70 + .../x/tools/internal/event/core/fast.go | 77 + .../golang.org/x/tools/internal/event/doc.go | 7 + .../x/tools/internal/event/event.go | 127 + .../x/tools/internal/event/keys/keys.go | 564 + .../x/tools/internal/event/keys/standard.go | 22 + .../x/tools/internal/event/keys/util.go | 21 + .../x/tools/internal/event/label/label.go | 215 + .../x/tools/internal/gcimporter/bimport.go | 150 + .../x/tools/internal/gcimporter/exportdata.go | 99 + .../x/tools/internal/gcimporter/gcimporter.go | 266 + .../x/tools/internal/gcimporter/iexport.go | 1332 ++ .../x/tools/internal/gcimporter/iimport.go | 1100 + .../internal/gcimporter/newInterface10.go | 22 + .../internal/gcimporter/newInterface11.go | 14 + .../internal/gcimporter/support_go118.go | 34 + .../x/tools/internal/gcimporter/unified_no.go | 10 + .../tools/internal/gcimporter/unified_yes.go | 10 + .../tools/internal/gcimporter/ureader_yes.go | 728 + .../x/tools/internal/gocommand/invoke.go | 470 + .../x/tools/internal/gocommand/vendor.go | 163 + .../x/tools/internal/gocommand/version.go | 71 + .../internal/packagesinternal/packages.go | 22 + .../x/tools/internal/pkgbits/codes.go | 77 + .../x/tools/internal/pkgbits/decoder.go | 521 + .../x/tools/internal/pkgbits/doc.go | 32 + .../x/tools/internal/pkgbits/encoder.go | 383 + .../x/tools/internal/pkgbits/flags.go | 9 + .../x/tools/internal/pkgbits/frames_go1.go | 21 + .../x/tools/internal/pkgbits/frames_go17.go | 28 + .../x/tools/internal/pkgbits/reloc.go | 42 + .../x/tools/internal/pkgbits/support.go | 17 + .../x/tools/internal/pkgbits/sync.go | 113 + .../internal/pkgbits/syncmarker_string.go | 89 + .../x/tools/internal/stdlib/manifest.go | 17320 ++++++++++++++ .../x/tools/internal/stdlib/stdlib.go | 97 + .../internal/tokeninternal/tokeninternal.go | 137 + .../tools/internal/typesinternal/errorcode.go | 1560 ++ .../typesinternal/errorcode_string.go | 179 + .../x/tools/internal/typesinternal/recv.go | 43 + .../x/tools/internal/typesinternal/toonew.go | 89 + .../x/tools/internal/typesinternal/types.go | 50 + .../x/tools/internal/versions/features.go | 43 + .../x/tools/internal/versions/gover.go | 172 + .../x/tools/internal/versions/toolchain.go | 14 + .../internal/versions/toolchain_go119.go | 14 + .../internal/versions/toolchain_go120.go | 14 + .../internal/versions/toolchain_go121.go | 14 + .../x/tools/internal/versions/types.go | 19 + .../x/tools/internal/versions/types_go121.go | 30 + .../x/tools/internal/versions/types_go122.go | 41 + .../x/tools/internal/versions/versions.go | 57 + .../api/appengine/v1/appengine-api.json | 105 +- .../api/appengine/v1/appengine-gen.go | 492 +- .../api/bigquery/v2/bigquery-api.json | 229 +- .../api/bigquery/v2/bigquery-gen.go | 1319 +- .../bigtableadmin/v2/bigtableadmin-api.json | 249 +- .../api/bigtableadmin/v2/bigtableadmin-gen.go | 705 +- .../v1/certificatemanager-api.json | 2308 ++ .../v1/certificatemanager-gen.go | 5674 +++++ .../api/cloudbilling/v1/cloudbilling-gen.go | 96 +- .../api/cloudbuild/v1/cloudbuild-api.json | 40 +- .../api/cloudbuild/v1/cloudbuild-gen.go | 476 +- .../cloudfunctions/v1/cloudfunctions-api.json | 609 +- .../cloudfunctions/v1/cloudfunctions-gen.go | 626 +- .../cloudidentity/v1/cloudidentity-api.json | 14 +- .../api/cloudidentity/v1/cloudidentity-gen.go | 332 +- .../api/cloudkms/v1/cloudkms-api.json | 149 +- .../api/cloudkms/v1/cloudkms-gen.go | 364 +- .../v1/cloudresourcemanager-gen.go | 176 +- .../v3/cloudresourcemanager-gen.go | 168 +- .../api/composer/v1/composer-api.json | 74 +- .../api/composer/v1/composer-gen.go | 234 +- .../api/compute/v1/compute-api.json | 393 +- .../api/compute/v1/compute-gen.go | 4974 ++-- .../api/compute/v1/compute2-gen.go | 93 +- .../api/compute/v1/compute3-gen.go | 155 +- .../api/container/v1/container-api.json | 529 +- .../api/container/v1/container-gen.go | 1288 +- .../api/dataflow/v1b3/dataflow-api.json | 255 +- .../api/dataflow/v1b3/dataflow-gen.go | 839 +- .../api/dataproc/v1/dataproc-api.json | 14252 ++++++++---- .../api/dataproc/v1/dataproc-gen.go | 19170 ++++++++++++---- .../api/datastream/v1/datastream-api.json | 24 +- .../api/datastream/v1/datastream-gen.go | 328 +- .../google.golang.org/api/dns/v1/dns-api.json | 17 +- .../google.golang.org/api/dns/v1/dns-gen.go | 277 +- .../api/healthcare/v1/healthcare-api.json | 521 +- .../api/healthcare/v1/healthcare-gen.go | 2341 +- .../google.golang.org/api/iam/v1/iam-api.json | 71 +- .../google.golang.org/api/iam/v1/iam-gen.go | 488 +- .../iamcredentials/v1/iamcredentials-gen.go | 32 +- .../google.golang.org/api/internal/creds.go | 19 +- .../api/internal/gensupport/resumable.go | 4 + .../api/internal/settings.go | 5 +- .../google.golang.org/api/internal/version.go | 2 +- .../api/logging/v2/logging-api.json | 918 +- .../api/logging/v2/logging-gen.go | 4419 +++- .../api/pubsub/v1/pubsub-api.json | 195 +- .../api/pubsub/v1/pubsub-gen.go | 371 +- .../google.golang.org/api/run/v2/run-api.json | 297 +- .../google.golang.org/api/run/v2/run-gen.go | 928 +- .../v1/servicemanagement-api.json | 62 +- .../v1/servicemanagement-gen.go | 485 +- .../v1/servicenetworking-api.json | 68 +- .../v1/servicenetworking-gen.go | 528 +- .../api/serviceusage/v1/serviceusage-api.json | 164 +- .../api/serviceusage/v1/serviceusage-gen.go | 594 +- .../api/spanner/v1/spanner-api.json | 641 +- .../api/spanner/v1/spanner-gen.go | 2253 +- .../api/sqladmin/v1beta4/sqladmin-api.json | 356 +- .../api/sqladmin/v1beta4/sqladmin-gen.go | 1090 +- .../api/storage/v1/storage-api.json | 336 +- .../api/storage/v1/storage-gen.go | 810 +- .../v1/storagetransfer-api.json | 24 +- .../storagetransfer/v1/storagetransfer-gen.go | 209 +- .../api/transport/grpc/dial.go | 36 +- .../api/transport/http/dial.go | 18 +- .../googleapis/api/annotations/client.pb.go | 454 +- .../api/annotations/field_info.pb.go | 159 +- .../googleapis/api/annotations/http.pb.go | 48 +- .../googleapis/api/annotations/resource.pb.go | 9 +- .../api/distribution/distribution.pb.go | 892 + .../googleapis/api/expr/v1alpha1/syntax.pb.go | 298 +- .../genproto/googleapis/api/label/label.pb.go | 249 + .../googleapis/api/metric/metric.pb.go | 771 + .../api/monitoredres/monitored_resource.pb.go | 476 + .../googleapis/bigtable/admin/v2/types.pb.go | 1166 - .../type/calendarperiod/calendar_period.pb.go | 189 + .../genproto/googleapis/type/date/date.pb.go | 200 + .../genproto/protobuf/api/api.go | 25 + .../google.golang.org/grpc/MAINTAINERS.md | 33 +- .../vendor/google.golang.org/grpc/README.md | 2 +- .../vendor/google.golang.org/grpc/SECURITY.md | 2 +- .../google.golang.org/grpc/backoff/backoff.go | 2 +- .../grpc/balancer/balancer.go | 21 + .../grpc/balancer/base/balancer.go | 4 +- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 28 +- .../grpc_lb_v1/load_balancer_grpc.pb.go | 85 +- .../grpc/balancer/grpclb/grpclb.go | 2 +- .../grpc/balancer/grpclb/grpclb_config.go | 4 +- .../grpc/balancer/grpclb/grpclb_picker.go | 6 +- .../balancer/leastrequest/leastrequest.go | 12 +- .../{ => balancer/pickfirst}/pickfirst.go | 36 +- .../grpc/balancer/rls/balancer.go | 87 +- .../grpc/balancer/rls/cache.go | 58 +- .../rls/internal/adaptive/adaptive.go | 5 +- .../rls/internal/adaptive/lookback.go | 7 - .../balancer/rls/internal/keys/builder.go | 2 +- .../grpc/balancer/rls/picker.go | 104 +- .../grpc/balancer/roundrobin/roundrobin.go | 4 +- .../balancer/weightedroundrobin/balancer.go | 137 +- .../balancer/weightedroundrobin/scheduler.go | 18 +- .../weightedaggregator/aggregator.go | 2 +- .../balancer/weightedtarget/weightedtarget.go | 13 +- .../grpc/balancer_wrapper.go | 54 +- .../grpc_binarylog_v1/binarylog.pb.go | 24 +- .../google.golang.org/grpc/clientconn.go | 174 +- .../vendor/google.golang.org/grpc/codec.go | 69 +- .../alts/internal/conn/aeadrekey.go | 2 +- .../alts/internal/conn/aes128gcmrekey.go | 2 +- .../credentials/alts/internal/conn/record.go | 7 - .../alts/internal/handshaker/handshaker.go | 4 +- .../internal/handshaker/service/service.go | 4 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 8 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 299 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 23 +- .../grpc_gcp/transport_security_common.pb.go | 10 +- .../grpc/credentials/insecure/insecure.go | 2 +- .../grpc/credentials/oauth/oauth.go | 6 +- .../google.golang.org/grpc/credentials/tls.go | 34 +- .../credentials/tls/certprovider/store.go | 44 +- .../google.golang.org/grpc/dialoptions.go | 69 +- .../vendor/google.golang.org/grpc/doc.go | 2 +- .../grpc/encoding/encoding.go | 5 +- .../grpc/encoding/encoding_v2.go | 81 + .../grpc/encoding/proto/proto.go | 44 +- .../grpc/experimental/stats/metricregistry.go | 269 + .../grpc/experimental/stats/metrics.go | 114 + .../grpc/grpclog/component.go | 10 +- .../google.golang.org/grpc/grpclog/grpclog.go | 104 +- .../grpc/grpclog/internal/grpclog.go | 26 + .../grpc/grpclog/internal/logger.go | 87 + .../internal/loggerv2.go} | 178 +- .../google.golang.org/grpc/grpclog/logger.go | 59 +- .../grpc/grpclog/loggerv2.go | 181 +- .../grpc/health/grpc_health_v1/health.pb.go | 10 +- .../health/grpc_health_v1/health_grpc.pb.go | 79 +- .../google.golang.org/grpc/health/server.go | 2 +- .../grpc/internal/backoff/backoff.go | 4 +- .../internal/balancergroup/balancergroup.go | 97 +- .../grpc/internal/binarylog/method_logger.go | 2 +- .../grpc/internal/channelz/channelmap.go | 9 +- .../grpc/internal/channelz/funcs.go | 2 +- .../internal/channelz/syscall_nonlinux.go | 4 +- .../grpc/internal/envconfig/envconfig.go | 10 + .../grpc/internal/experimental.go | 8 +- .../grpc/internal/googlecloud/googlecloud.go | 6 +- .../{prefixLogger.go => prefix_logger.go} | 40 +- .../grpc/internal/grpcrand/grpcrand.go | 100 - .../grpc/internal/grpcrand/grpcrand_go1.21.go | 73 - .../internal/grpcsync/callback_serializer.go | 24 +- .../grpc/internal/grpcsync/pubsub.go | 4 +- .../grpc/internal/internal.go | 61 +- .../internal/proto/grpc_lookup_v1/rls.pb.go | 10 +- .../proto/grpc_lookup_v1/rls_config.pb.go | 135 +- .../proto/grpc_lookup_v1/rls_grpc.pb.go | 23 +- .../internal/resolver/dns/dns_resolver.go | 14 +- .../resolver/dns/internal/internal.go | 13 +- .../resolver/passthrough/passthrough.go | 2 +- .../internal/stats/metrics_recorder_list.go | 95 + .../grpc/internal/status/status.go | 4 +- .../grpc/internal/syscall/syscall_nonlinux.go | 6 +- .../grpc/internal/tcp_keepalive_unix.go | 2 +- .../grpc/internal/tcp_keepalive_windows.go | 2 +- .../grpc/internal/transport/controlbuf.go | 256 +- .../grpc/internal/transport/handler_server.go | 47 +- .../grpc/internal/transport/http2_client.go | 79 +- .../grpc/internal/transport/http2_server.go | 53 +- .../grpc/internal/transport/http_util.go | 24 +- .../grpc/internal/transport/proxy.go | 10 +- .../grpc/internal/transport/transport.go | 240 +- .../grpc/internal/wrr/random.go | 9 +- .../grpc/internal/xds/bootstrap/bootstrap.go | 1002 +- .../grpc/internal/xds/bootstrap/template.go | 2 +- .../grpc/internal/xds/rbac/converter.go | 4 +- .../grpc/internal/xds/rbac/matchers.go | 2 +- .../grpc/internal/xds/rbac/rbac_engine.go | 18 +- .../grpc/keepalive/keepalive.go | 20 +- .../google.golang.org/grpc/mem/buffer_pool.go | 194 + .../grpc/mem/buffer_slice.go | 226 + .../google.golang.org/grpc/mem/buffers.go | 252 + .../grpc/metadata/metadata.go | 22 +- .../grpc/orca/call_metrics.go | 2 +- .../google.golang.org/grpc/orca/producer.go | 4 +- .../grpc/orca/server_metrics.go | 2 +- .../google.golang.org/grpc/picker_wrapper.go | 81 +- .../google.golang.org/grpc/preloader.go | 28 +- .../grpc_reflection_v1/reflection.pb.go | 26 +- .../grpc_reflection_v1/reflection_grpc.pb.go | 85 +- .../grpc_reflection_v1alpha/reflection.pb.go | 28 +- .../reflection_grpc.pb.go | 85 +- .../grpc/reflection/internal/internal.go | 2 +- .../google.golang.org/grpc/regenerate.sh | 123 - .../grpc/resolver/manual/manual.go | 4 +- .../grpc/resolver_wrapper.go | 11 +- .../vendor/google.golang.org/grpc/rpc_util.go | 331 +- .../vendor/google.golang.org/grpc/server.go | 99 +- .../google.golang.org/grpc/service_config.go | 24 +- .../grpc/shared_buffer_pool.go | 154 - .../google.golang.org/grpc/stats/stats.go | 6 - .../vendor/google.golang.org/grpc/stream.go | 217 +- .../grpc/stream_interfaces.go | 86 + .../vendor/google.golang.org/grpc/version.go | 2 +- .../grpc/xds/bootstrap/credentials.go | 2 +- .../google.golang.org/grpc/xds/csds/csds.go | 89 +- .../grpc/xds/googledirectpath/googlec2p.go | 120 +- .../balancer/cdsbalancer/cdsbalancer.go | 26 +- .../balancer/cdsbalancer/cluster_watcher.go | 21 +- .../balancer/clusterimpl/clusterimpl.go | 206 +- .../internal/balancer/clusterimpl/picker.go | 25 +- .../clustermanager/balancerstateaggregator.go | 126 +- .../balancer/clustermanager/clustermanager.go | 106 +- .../clusterresolver/clusterresolver.go | 9 +- .../clusterresolver/resource_resolver.go | 50 +- .../clusterresolver/resource_resolver_dns.go | 8 +- .../clusterresolver/resource_resolver_eds.go | 16 +- .../balancer/loadstore/load_store_wrapper.go | 2 +- .../balancer/outlierdetection/balancer.go | 20 +- .../internal/balancer/priority/balancer.go | 1 + .../balancer/priority/balancer_priority.go | 12 +- .../xds/internal/balancer/ringhash/picker.go | 25 - .../xds/internal/balancer/ringhash/ring.go | 55 +- .../internal/balancer/ringhash/ringhash.go | 45 +- .../xds/internal/clusterspecifier/rls/rls.go | 4 +- .../xds/internal/httpfilter/fault/fault.go | 14 +- .../grpc/xds/internal/httpfilter/rbac/rbac.go | 14 +- .../xds/internal/httpfilter/router/router.go | 4 +- .../grpc/xds/internal/internal.go | 12 + .../internal/resolver/internal/internal.go | 2 +- .../xds/internal/resolver/serviceconfig.go | 10 +- .../xds/internal/resolver/watch_service.go | 45 +- .../xds/internal/resolver/xds_resolver.go | 28 +- .../grpc/xds/internal/server/conn_wrapper.go | 5 +- .../xds/internal/server/listener_wrapper.go | 43 +- .../grpc/xds/internal/server/rds_handler.go | 9 +- .../grpc/xds/internal/xdsclient/authority.go | 133 +- .../grpc/xds/internal/xdsclient/client.go | 4 - .../grpc/xds/internal/xdsclient/client_new.go | 160 +- .../internal/xdsclient/client_refcounted.go | 104 + .../grpc/xds/internal/xdsclient/clientimpl.go | 18 +- .../xdsclient/clientimpl_authority.go | 14 +- .../xds/internal/xdsclient/clientimpl_dump.go | 46 +- .../xdsclient/clientimpl_loadreport.go | 2 +- .../internal/xdsclient/clientimpl_watchers.go | 5 +- .../internal/xdsclient/internal/internal.go | 25 + .../grpc/xds/internal/xdsclient/load/store.go | 17 +- .../grpc/xds/internal/xdsclient/singleton.go | 115 - .../xdsclient/transport/internal/internal.go | 25 + .../xdsclient/transport/loadreport.go | 1 + .../internal/xdsclient/transport/transport.go | 152 +- .../xdslbregistry/converter/converter.go | 4 +- .../xdsresource/cluster_resource_type.go | 18 +- .../xdsresource/endpoints_resource_type.go | 20 +- .../xdsclient/xdsresource/filter_chain.go | 8 +- .../xdsresource/listener_resource_type.go | 22 +- .../internal/xdsclient/xdsresource/matcher.go | 6 +- .../xdsclient/xdsresource/resource_type.go | 34 +- .../xdsresource/route_config_resource_type.go | 20 +- .../xdsclient/xdsresource/unmarshal_cds.go | 25 +- .../google.golang.org/grpc/xds/server.go | 17 +- .../protobuf/encoding/protojson/decode.go | 2 +- .../protobuf/encoding/protojson/encode.go | 4 +- .../protobuf/internal/descopts/options.go | 20 +- .../internal/editionssupport/editions.go | 2 +- .../protobuf/internal/filedesc/desc.go | 4 + .../protobuf/internal/filedesc/desc_init.go | 2 + .../protobuf/internal/filedesc/desc_lazy.go | 2 + .../protobuf/internal/filedesc/editions.go | 2 +- .../protobuf/internal/genid/doc.go | 2 +- .../internal/genid/go_features_gen.go | 15 +- .../protobuf/internal/genid/map_entry.go | 2 +- .../protobuf/internal/genid/wrappers.go | 2 +- .../protobuf/internal/impl/codec_extension.go | 11 +- .../protobuf/internal/impl/codec_field.go | 3 + .../protobuf/internal/impl/codec_message.go | 3 + .../protobuf/internal/impl/codec_reflect.go | 210 - .../protobuf/internal/impl/codec_unsafe.go | 3 - .../protobuf/internal/impl/convert.go | 2 +- .../protobuf/internal/impl/encode.go | 2 +- .../protobuf/internal/impl/equal.go | 224 + .../internal/impl/legacy_extension.go | 1 + .../protobuf/internal/impl/message.go | 4 +- .../protobuf/internal/impl/pointer_reflect.go | 215 - .../protobuf/internal/impl/pointer_unsafe.go | 3 - .../protobuf/internal/strs/strings_pure.go | 28 - .../internal/strs/strings_unsafe_go120.go | 3 +- .../internal/strs/strings_unsafe_go121.go | 3 +- .../protobuf/internal/version/version.go | 4 +- .../google.golang.org/protobuf/proto/equal.go | 9 + .../protobuf/proto/extension.go | 71 + .../protobuf/reflect/protodesc/desc_init.go | 4 + .../protobuf/reflect/protodesc/editions.go | 2 +- .../protobuf/reflect/protoreflect/methods.go | 10 + .../reflect/protoreflect/value_pure.go | 60 - .../protoreflect/value_unsafe_go120.go | 3 +- .../protoreflect/value_unsafe_go121.go | 3 +- .../protobuf/runtime/protoiface/methods.go | 18 + .../types/descriptorpb/descriptor.pb.go | 748 +- .../types/gofeaturespb/go_features.pb.go | 24 +- .../protobuf/types/known/anypb/any.pb.go | 24 +- .../protobuf/types/known/apipb/api.pb.go | 531 + .../types/known/durationpb/duration.pb.go | 24 +- .../protobuf/types/known/emptypb/empty.pb.go | 24 +- .../types/known/fieldmaskpb/field_mask.pb.go | 24 +- .../sourcecontextpb/source_context.pb.go | 160 + .../types/known/structpb/struct.pb.go | 110 +- .../types/known/timestamppb/timestamp.pb.go | 24 +- .../protobuf/types/known/typepb/type.pb.go | 918 + .../types/known/wrapperspb/wrappers.pb.go | 200 +- terraform/providers/google/vendor/modules.txt | 244 +- 1842 files changed, 370593 insertions(+), 61772 deletions(-) create mode 100644 terraform/providers/google/vendor/cel.dev/expr/.bazelversion create mode 100644 terraform/providers/google/vendor/cel.dev/expr/.gitattributes create mode 100644 terraform/providers/google/vendor/cel.dev/expr/.gitignore create mode 100644 terraform/providers/google/vendor/cel.dev/expr/BUILD.bazel create mode 100644 terraform/providers/google/vendor/cel.dev/expr/CODE_OF_CONDUCT.md create mode 100644 terraform/providers/google/vendor/cel.dev/expr/CONTRIBUTING.md create mode 100644 terraform/providers/google/vendor/cel.dev/expr/GOVERNANCE.md create mode 100644 terraform/providers/google/vendor/cel.dev/expr/LICENSE create mode 100644 terraform/providers/google/vendor/cel.dev/expr/MAINTAINERS.md create mode 100644 terraform/providers/google/vendor/cel.dev/expr/README.md create mode 100644 terraform/providers/google/vendor/cel.dev/expr/WORKSPACE create mode 100644 terraform/providers/google/vendor/cel.dev/expr/checked.pb.go create mode 100644 terraform/providers/google/vendor/cel.dev/expr/cloudbuild.yaml create mode 100644 terraform/providers/google/vendor/cel.dev/expr/eval.pb.go create mode 100644 terraform/providers/google/vendor/cel.dev/expr/explain.pb.go create mode 100644 terraform/providers/google/vendor/cel.dev/expr/regen_go_proto.sh create mode 100644 terraform/providers/google/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh create mode 100644 terraform/providers/google/vendor/cel.dev/expr/syntax.pb.go create mode 100644 terraform/providers/google/vendor/cel.dev/expr/value.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/compute.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go rename terraform/providers/google/vendor/{google.golang.org/genproto/googleapis/bigtable/admin/v2 => cloud.google.com/go/bigtable/admin/apiv2/adminpb}/bigtable_instance_admin.pb.go (96%) rename terraform/providers/google/vendor/{google.golang.org/genproto/googleapis/bigtable/admin/v2 => cloud.google.com/go/bigtable/admin/apiv2/adminpb}/bigtable_table_admin.pb.go (96%) rename terraform/providers/google/vendor/{google.golang.org/genproto/googleapis/bigtable/admin/v2 => cloud.google.com/go/bigtable/admin/apiv2/adminpb}/common.pb.go (88%) rename terraform/providers/google/vendor/{google.golang.org/genproto/googleapis/bigtable/admin/v2 => cloud.google.com/go/bigtable/admin/apiv2/adminpb}/instance.pb.go (80%) rename terraform/providers/google/vendor/{google.golang.org/genproto/googleapis/bigtable/admin/v2 => cloud.google.com/go/bigtable/admin/apiv2/adminpb}/table.pb.go (87%) create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/types.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/bigtable/aliasshim.go rename terraform/providers/google/vendor/{google.golang.org/genproto/googleapis/bigtable/v2 => cloud.google.com/go/bigtable/apiv2/bigtablepb}/bigtable.pb.go (79%) rename terraform/providers/google/vendor/{google.golang.org/genproto/googleapis/bigtable/v2 => cloud.google.com/go/bigtable/apiv2/bigtablepb}/data.pb.go (57%) rename terraform/providers/google/vendor/{google.golang.org/genproto/googleapis/bigtable/v2 => cloud.google.com/go/bigtable/apiv2/bigtablepb}/feature_flags.pb.go (89%) rename terraform/providers/google/vendor/{google.golang.org/genproto/googleapis/bigtable/v2 => cloud.google.com/go/bigtable/apiv2/bigtablepb}/request_stats.pb.go (93%) rename terraform/providers/google/vendor/{google.golang.org/genproto/googleapis/bigtable/v2 => cloud.google.com/go/bigtable/apiv2/bigtablepb}/response_params.pb.go (85%) create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/types.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics_monitoring_exporter.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics_util.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/LICENSE create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go create mode 100644 terraform/providers/google/vendor/cloud.google.com/go/monitoring/internal/version.go delete mode 100644 terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/client.go delete mode 100644 terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.go delete mode 100644 terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.yaml delete mode 100644 terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_internal.go delete mode 100644 terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_schema.go delete mode 100644 terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_yaml_embed.go delete mode 100644 terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.go delete mode 100644 terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.yaml delete mode 100644 terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_internal.go delete mode 100644 terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_schema.go delete mode 100644 terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_yaml_embed.go create mode 100644 terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_grpc.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_grpc.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_grpc.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/golang/glog/glog_file_nonwindows.go create mode 100644 terraform/providers/google/vendor/github.com/golang/glog/glog_file_windows.go delete mode 100644 terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go delete mode 100644 terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go create mode 100644 terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/Makefile create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/tools.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/client_capabilities.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/deferred.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/client_capabilities.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/client_capabilities.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/deferred.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/deferred.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/client_capabilities.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/deferred.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/client_capabilities.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/client_capabilities.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/deferred.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/deferred.go delete mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/aliases.go delete mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go delete mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/LICENSE create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/config.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/directory.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/doc.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/file.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/variable.go rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/environment_variables.go (100%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/error.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/id.go rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/json.go (100%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/plan_checks.go rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/plugin.go (98%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/state.go rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/state_shim.go (77%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/testcase_providers.go (100%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/testcase_validate.go (66%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/testing.go (83%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/testing_config.go (78%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/testing_new.go (64%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/testing_new_config.go (67%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/testing_new_import_state.go (78%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/testing_new_refresh_state.go (78%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/testing_sets.go (99%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/helper/resource/teststep_providers.go (72%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/teststep_validate.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/tfversion_checks.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/wait.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/doc.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/instance_key.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/module.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/module_instance.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/coerce_value.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/doc.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/empty_value.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/implied_type.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/nestingmode_string.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/schema.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/flatmap.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/paths.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/values.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/values_equiv.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/errorshim/error_join_shim.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/context.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/environment_variables.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/helper_resource.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/helper_schema.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/keys.go rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/internal/plugintest/config.go (97%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/internal/plugintest/doc.go (100%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/internal/plugintest/environment_variables.go (91%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/internal/plugintest/guard.go (100%) rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/internal/plugintest/helper.go (90%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/util.go rename terraform/providers/google/vendor/github.com/hashicorp/{terraform-plugin-sdk/v2 => terraform-plugin-testing}/internal/plugintest/working_dir.go (88%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/config.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/directory.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/file.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/string.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/config_traversals.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/contextual.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostic.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostic_base.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostics.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/doc.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/error.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/severity_string.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/simple_warning.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/doc.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_empty_plan.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_non_empty_plan.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_resource_action.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_sensitive_value.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_unknown_value.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/plan_check.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/resource_action.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/diff.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/instancetype.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/instancetype_string.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_address.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_mode.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_mode_string.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_provider.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/schemas.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/state.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/state_filter.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/util.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/doc.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/path.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/step.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/all.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/any.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/doc.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_above.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_below.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_between.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_not.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_above.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_below.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_between.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/version_check.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/versions.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/data_source_provider_config_plugin_framework.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/data_source_provider_config_sdk.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_dry_run_egress_policy.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_dry_run_ingress_policy.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/apigee_utils.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_app_group.go rename terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/{datastore/resource_datastore_index_sweeper.go => apigee/resource_apigee_app_group_sweeper.go} (90%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_developer.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_developer_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment_keyvaluemaps.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment_keyvaluemaps_entries.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/data_source_artifact_registry_locations.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/data_source_google_bigquery_tables.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_member_dataset.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/data_source_google_certificate_manager_certificates.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/data_source_cloud_identity_group_transitive_memberships.go rename terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/{data_source_compute_secutity_policy.go => data_source_compute_security_policy.go} (93%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_guest_attributes.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_instance_group_manager.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_association_sweeper.go rename terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/{resource_compute_security_policy_rule_sweeper.go => resource_compute_network_firewall_policy_rule_sweeper.go} (90%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_association_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_rule_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resize_request.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat_address.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_storage_pool_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_migration_job.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_migration_job_sweeper.go rename terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/{datastore/datastore_operation.go => dataproc/dataproc_operation.go} (62%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_batch.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_batch_sweeper.go delete mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/resource_datastore_index.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_schema.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_schema_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_target_site.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_target_site_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/data_source_google_gke_hub_membership_binding.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_pipeline_job.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_workspace.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_settings.go delete mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key_latest_version.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key_versions.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/iam_kms_ekm_connection.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_autokey_config_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/iam_logging_log_view.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_scope.go rename terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/{cloudrunv2/resource_cloud_run_v2_service_sweeper.go => logging/resource_logging_log_scope_sweeper.go} (89%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_group.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_client_tls_policy.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_client_tls_policy_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_server_tls_policy.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_server_tls_policy_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_autonomous_database.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_autonomous_databases.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_exadata_infrastructure.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_exadata_infrastructures.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_vm_cluster.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_vm_clusters.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_db_nodes.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_db_servers.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/oracle_database_operation.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_autonomous_database.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_autonomous_database_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_exadata_infrastructure.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_exadata_infrastructure_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_vm_cluster.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_vm_cluster_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/data_source_privileged_access_manager_entitlement.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/privileged_access_manager_operation.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_accounts.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret_version.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret_version_access.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secrets.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/iam_secret_manager_regional_secret.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret_version.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securesourcemanager/resource_secure_source_manager_branch_rule.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securesourcemanager/resource_secure_source_manager_branch_rule_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_notification_config.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_notification_config_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_scc_big_query_export.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_scc_big_query_export_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_organization_scc_big_query_export.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_organization_scc_big_query_export_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_notification_config.go rename terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/{identityplatform/resource_identity_platform_project_default_config_sweeper.go => securitycenter/resource_scc_project_notification_config_sweeper.go} (87%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_scc_big_query_export.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_scc_big_query_export_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/iam_scc_v2_organization_source.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_mute_config.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_mute_config_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_notification_config.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_notification_config_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_scc_big_query_export.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_scc_big_query_export_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_mute_config.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_mute_config_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_export.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_export_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_exports.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_exports_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_source.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_mute_config.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_mute_config_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_notification_config.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_notification_config_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_scc_big_query_export.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_scc_big_query_export_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/data_source_google_site_verification_token.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_owner.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_web_resource.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_web_resource_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_backup_schedule.go rename terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/{activedirectory/resource_active_directory_domain_sweeper.go => spanner/resource_spanner_backup_schedule_sweeper.go} (87%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_600_migration.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job.go rename terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/{cloudrunv2/resource_cloud_run_v2_job_sweeper.go => transcoder/resource_transcoder_job_sweeper.go} (90%) create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job_template.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job_template_sweeper.go create mode 100644 terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_endpoint_deployed_index.go create mode 100644 terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/LICENSE create mode 100644 terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go create mode 100644 terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/anypb/any_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/durationpb/duration_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/emptypb/empty_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/structpb/struct_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/timestamppb/timestamp_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/wrapperspb/wrappers_vtproto.pb.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go delete mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go delete mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/.gitmodules create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/embedded/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/noop/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/renovate.json create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/LICENSE create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/config.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/env.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/version.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/view.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/config.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/container.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/env.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/process.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/version.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/embedded/README.md create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/provider.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/span.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/tracer.go create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_readmes.sh create mode 100644 terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh delete mode 100644 terraform/providers/google/vendor/golang.org/x/crypto/sha3/register.go create mode 100644 terraform/providers/google/vendor/golang.org/x/exp/constraints/constraints.go create mode 100644 terraform/providers/google/vendor/golang.org/x/net/http2/config.go create mode 100644 terraform/providers/google/vendor/golang.org/x/net/http2/config_go124.go create mode 100644 terraform/providers/google/vendor/golang.org/x/net/http2/config_pre_go124.go create mode 100644 terraform/providers/google/vendor/golang.org/x/sync/errgroup/errgroup.go create mode 100644 terraform/providers/google/vendor/golang.org/x/sync/errgroup/go120.go create mode 100644 terraform/providers/google/vendor/golang.org/x/sync/errgroup/pre_go120.go create mode 100644 terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go create mode 100644 terraform/providers/google/vendor/golang.org/x/sys/unix/vgetrandom_linux.go create mode 100644 terraform/providers/google/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go create mode 100644 terraform/providers/google/vendor/golang.org/x/sys/windows/registry/key.go create mode 100644 terraform/providers/google/vendor/golang.org/x/sys/windows/registry/mksyscall.go create mode 100644 terraform/providers/google/vendor/golang.org/x/sys/windows/registry/syscall.go create mode 100644 terraform/providers/google/vendor/golang.org/x/sys/windows/registry/value.go create mode 100644 terraform/providers/google/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/LICENSE create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/PATENTS create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/cmd/stringer/stringer.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/go/gcexportdata/importer.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/go/packages/doc.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/go/packages/external.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/go/packages/golist.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/go/packages/golist_overlay.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/go/packages/loadmode_string.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/go/packages/packages.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/go/packages/visit.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/event.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/export.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/fast.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/event/doc.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/event/event.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/keys.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/standard.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/util.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/event/label/label.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/bimport.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/iexport.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/iimport.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/invoke.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/vendor.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/version.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/packagesinternal/packages.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/codes.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/decoder.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/doc.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/encoder.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/flags.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/reloc.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/support.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/sync.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/stdlib/manifest.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/stdlib/stdlib.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/recv.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/toonew.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/types.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/versions/features.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/versions/gover.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types_go121.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types_go122.go create mode 100644 terraform/providers/google/vendor/golang.org/x/tools/internal/versions/versions.go create mode 100644 terraform/providers/google/vendor/google.golang.org/api/certificatemanager/v1/certificatemanager-api.json create mode 100644 terraform/providers/google/vendor/google.golang.org/api/certificatemanager/v1/certificatemanager-gen.go create mode 100644 terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go create mode 100644 terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go create mode 100644 terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go create mode 100644 terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go delete mode 100644 terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/types.pb.go create mode 100644 terraform/providers/google/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go create mode 100644 terraform/providers/google/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go create mode 100644 terraform/providers/google/vendor/google.golang.org/genproto/protobuf/api/api.go rename terraform/providers/google/vendor/google.golang.org/grpc/{ => balancer/pickfirst}/pickfirst.go (87%) create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/encoding/encoding_v2.go create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/experimental/stats/metrics.go create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/grpclog/internal/logger.go rename terraform/providers/google/vendor/google.golang.org/grpc/{internal/grpclog/grpclog.go => grpclog/internal/loggerv2.go} (52%) rename terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/{prefixLogger.go => prefix_logger.go} (63%) delete mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go delete mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/mem/buffer_pool.go create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/mem/buffer_slice.go create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/mem/buffers.go delete mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/regenerate.sh delete mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/shared_buffer_pool.go create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go delete mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go create mode 100644 terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go delete mode 100644 terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/equal.go delete mode 100644 terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go delete mode 100644 terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go delete mode 100644 terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 terraform/providers/google/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go create mode 100644 terraform/providers/google/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go create mode 100644 terraform/providers/google/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go diff --git a/terraform/providers/google/go.mod b/terraform/providers/google/go.mod index 0ffed21f085..5b5c10de649 100644 --- a/terraform/providers/google/go.mod +++ b/terraform/providers/google/go.mod @@ -4,66 +4,69 @@ go 1.21 toolchain go1.22.4 -require github.com/hashicorp/terraform-provider-google v1.20.1-0.20240708170355-9d7e7459a11c // v5.37.0 +require github.com/hashicorp/terraform-provider-google v1.20.1-0.20240708170355-9d7e7459a11c // v6.10.0 require ( bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.5.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/bigtable v1.24.0 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect - cloud.google.com/go/iam v1.1.8 // indirect - cloud.google.com/go/longrunning v0.5.7 // indirect - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0 // indirect - github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect + cel.dev/expr v0.16.0 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.9.8 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/bigtable v1.33.0 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/iam v1.2.1 // indirect + cloud.google.com/go/longrunning v0.6.1 // indirect + cloud.google.com/go/monitoring v1.21.1 // indirect + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.75.0 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect github.com/agext/levenshtein v1.2.2 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect + github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/envoyproxy/go-control-plane v0.12.0 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/glog v1.2.0 // indirect + github.com/golang/glog v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 // indirect - github.com/google/s2a-go v0.1.7 // indirect + github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.6.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.6.3 // indirect - github.com/hashicorp/hcl/v2 v2.19.1 // indirect + github.com/hashicorp/hc-install v0.6.4 // indirect + github.com/hashicorp/hcl/v2 v2.20.1 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.20.0 // indirect - github.com/hashicorp/terraform-json v0.21.0 // indirect + github.com/hashicorp/terraform-exec v0.21.0 // indirect + github.com/hashicorp/terraform-json v0.22.1 // indirect github.com/hashicorp/terraform-plugin-framework v1.7.0 // indirect github.com/hashicorp/terraform-plugin-framework-validators v0.9.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.22.1 // indirect + github.com/hashicorp/terraform-plugin-go v0.23.0 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect github.com/hashicorp/terraform-plugin-mux v0.15.0 // indirect github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 // indirect + github.com/hashicorp/terraform-plugin-testing v1.5.1 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect @@ -78,34 +81,41 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/oklog/run v1.0.0 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - github.com/zclconf/go-cty v1.14.2 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect - golang.org/x/crypto v0.24.0 // indirect + golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/time v0.5.0 // indirect - google.golang.org/api v0.185.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + google.golang.org/api v0.201.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect ) replace bitbucket.org/creachadair/stringset => bitbucket.org/creachadair/stringset v0.0.11 + +// https://issues.redhat.com/browse/OCPBUGS-76586 +replace github.com/hashicorp/terraform-provider-google => github.com/openshift/terraform-providers-terraform-provider-google v1.20.1-0.20260210200830-91107ee8cc5d diff --git a/terraform/providers/google/go.sum b/terraform/providers/google/go.sum index d9ae901a17e..71e1a691b22 100644 --- a/terraform/providers/google/go.sum +++ b/terraform/providers/google/go.sum @@ -1,29 +1,33 @@ bitbucket.org/creachadair/stringset v0.0.11 h1:6Sv4CCv14Wm+OipW4f3tWOb0SQVpBDLW0knnJqUnmZ8= bitbucket.org/creachadair/stringset v0.0.11/go.mod h1:wh0BHewFe+j0HrzWz7KcGbSNpFzWwnpmgPRlB57U5jU= +cel.dev/expr v0.16.0 h1:yloc84fytn4zmJX2GU3TkXGsaieaV7dQ057Qs4sIG2Y= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= -cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/bigtable v1.24.0 h1:RtBERIoZZsQm3LUExDGFWgOwMEHCO04O9/pDA0KoAZI= -cloud.google.com/go/bigtable v1.24.0/go.mod h1:NlsITD7sKXo97kKIfF83ROd6P1bw8J4zsAUUYqk167Q= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= -cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= -cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= -cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.9.8 h1:+CSJ0Gw9iVeSENVCKJoLHhdUykDgXSc4Qn+gu2BRtR8= +cloud.google.com/go/auth v0.9.8/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/bigtable v1.33.0 h1:2BDaWLRAwXO14DJL/u8crbV2oUbMZkIa2eGq8Yao1bk= +cloud.google.com/go/bigtable v1.33.0/go.mod h1:HtpnH4g25VT1pejHRtInlFPnN5sjTxbQlsYBjh9t5l0= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= +cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0 h1:LIPIYi4hy7ttUSrziY/TYwMDuEvvV593n80kRmz6nZ4= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.75.0 h1:7tFkHNjfjm7dYnjqyuzMon+31lPaMTjca3OuamWd0Oo= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.75.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= @@ -38,14 +42,14 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 h1:fLZ97KE86ELjEYJCEUVzmbhfzDxHHGwBrDVMd4XL6Bs= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -58,11 +62,11 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= @@ -76,13 +80,13 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -91,8 +95,8 @@ github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3a github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -112,8 +116,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -125,15 +129,15 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 h1:5/4TSDzpDnHQ8rKEEQBjRlYx77mHOvXu08oGchxej7o= github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932/go.mod h1:cC6EdPbj/17GFCPDK39NRarlMI+kt+O60S12cNB5J9Y= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -145,8 +149,8 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= @@ -156,30 +160,30 @@ github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/C github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= -github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= -github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= -github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= +github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= +github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= +github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc= +github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= -github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= -github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= -github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= +github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= +github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= +github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= github.com/hashicorp/terraform-plugin-framework v1.7.0 h1:wOULbVmfONnJo9iq7/q+iBOBJul5vRovaYJIu2cY/Pw= github.com/hashicorp/terraform-plugin-framework v1.7.0/go.mod h1:jY9Id+3KbZ17OMpulgnWLSfwxNVYSoYBQFTgsx044CI= github.com/hashicorp/terraform-plugin-framework-validators v0.9.0 h1:LYz4bXh3t7bTEydXOmPDPupRRnA480B/9+jV8yZvxBA= github.com/hashicorp/terraform-plugin-framework-validators v0.9.0/go.mod h1:+BVERsnfdlhYR2YkXMBtPnmn9UsL19U3qUtSZ+Y/5MY= -github.com/hashicorp/terraform-plugin-go v0.22.1 h1:iTS7WHNVrn7uhe3cojtvWWn83cm2Z6ryIUDTRO0EV7w= -github.com/hashicorp/terraform-plugin-go v0.22.1/go.mod h1:qrjnqRghvQ6KnDbB12XeZ4FluclYwptntoWCr9QaXTI= +github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= +github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= github.com/hashicorp/terraform-plugin-mux v0.15.0 h1:+/+lDx0WUsIOpkAmdwBIoFU8UP9o2eZASoOnLsWbKME= github.com/hashicorp/terraform-plugin-mux v0.15.0/go.mod h1:9ezplb1Dyq394zQ+ldB0nvy/qbNAz3mMoHHseMTMaKo= github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 h1:qHprzXy/As0rxedphECBEQAh3R4yp6pKksKHcqZx5G8= github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0/go.mod h1:H+8tjs9TjV2w57QFVSMBQacf8k/E1XwLXGCARgViC6A= -github.com/hashicorp/terraform-provider-google v1.20.1-0.20240708170355-9d7e7459a11c h1:rTfbrGJsKJ+ciBhJkBRYcT6tCPpeI1yspoC1KpPSdYE= -github.com/hashicorp/terraform-provider-google v1.20.1-0.20240708170355-9d7e7459a11c/go.mod h1:VHiRA994xbf2/Xi++bmSfimZHe/4rbfPM3eprIU1Oq0= +github.com/hashicorp/terraform-plugin-testing v1.5.1 h1:T4aQh9JAhmWo4+t1A7x+rnxAJHCDIYW9kXyo4sVO92c= +github.com/hashicorp/terraform-plugin-testing v1.5.1/go.mod h1:dg8clO6K59rZ8w9EshBmDp1CxTIPu3yA4iaDpX1h5u0= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -229,22 +233,26 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/openshift/terraform-providers-terraform-provider-google v1.20.1-0.20260210200830-91107ee8cc5d h1:2BNEi6M+dHAEz9lS3ghEqMKfi4CV422Zhlfci2mFhY4= +github.com/openshift/terraform-providers-terraform-provider-google v1.20.1-0.20260210200830-91107ee8cc5d/go.mod h1:AUv8iMXsDhlPHwQqUCNpc+yKmXdjW6ciqZRA3pilbJE= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -269,22 +277,26 @@ github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= -github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -294,8 +306,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 h1:ESSUROHIBHg7USnszlcdmjBEwdMj9VUvU+OPk4yl2mc= golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= @@ -318,19 +330,19 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -348,8 +360,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -357,10 +369,10 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -376,8 +388,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.185.0 h1:ENEKk1k4jW8SmmaT6RE+ZasxmxezCrD5Vw4npvr+pAU= -google.golang.org/api v0.185.0/go.mod h1:HNfvIkJGlgrIlrbYkAm9W9IdkmKZjOTVh33YltygGbg= +google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0= +google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -386,20 +398,20 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 h1:CUiCqkPw1nNrNQzCCG4WA65m0nAmQiwXHpub3dNyruU= -google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4/go.mod h1:EvuUDCulqGgV80RvP1BHuom+smhX4qtlhnNatHuroGQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 h1:QW9+G6Fir4VcRXVH8x3LilNAb6cxBGLa6+GM4hRwexE= -google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3/go.mod h1:kdrSS/OiLkPrNUpzD4aHgCq2rVuC/YRxok32HXZ4vRE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 h1:Di6ANFilr+S60a4S61ZM00vLdw0IrQOSMS2/6mrnOU0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 h1:nFS3IivktIU5Mk6KQa+v6RKkHUpdQpphqGNLxqNnbEk= +google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:tEzYTYZxbmVNOu0OAFH9HzdJtLn6h4Aj89zzlBCdHms= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -411,8 +423,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/terraform/providers/google/vendor/cel.dev/expr/.bazelversion b/terraform/providers/google/vendor/cel.dev/expr/.bazelversion new file mode 100644 index 00000000000..579c9d21e7d --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/.bazelversion @@ -0,0 +1,2 @@ +6.4.0 +# Keep this pinned version in parity with cel-go diff --git a/terraform/providers/google/vendor/cel.dev/expr/.gitattributes b/terraform/providers/google/vendor/cel.dev/expr/.gitattributes new file mode 100644 index 00000000000..3de1ec213ae --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/.gitattributes @@ -0,0 +1,2 @@ +*.pb.go linguist-generated=true +*.pb.go -diff -merge diff --git a/terraform/providers/google/vendor/cel.dev/expr/.gitignore b/terraform/providers/google/vendor/cel.dev/expr/.gitignore new file mode 100644 index 00000000000..ac51a054d2d --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/.gitignore @@ -0,0 +1 @@ +bazel-* diff --git a/terraform/providers/google/vendor/cel.dev/expr/BUILD.bazel b/terraform/providers/google/vendor/cel.dev/expr/BUILD.bazel new file mode 100644 index 00000000000..f631b6df06d --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/BUILD.bazel @@ -0,0 +1,3 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 diff --git a/terraform/providers/google/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/terraform/providers/google/vendor/cel.dev/expr/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..59908e2d8e8 --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/CODE_OF_CONDUCT.md @@ -0,0 +1,25 @@ +# Contributor Code of Conduct +## Version 0.1.1 (adapted from 0.3b-angular) + +As contributors and maintainers of the Common Expression Language +(CEL) project, we pledge to respect everyone who contributes by +posting issues, updating documentation, submitting pull requests, +providing feedback in comments, and any other activities. + +Communication through any of CEL's channels (GitHub, Gitter, IRC, +mailing lists, Google+, Twitter, etc.) must be constructive and never +resort to personal attacks, trolling, public or private harassment, +insults, or other unprofessional conduct. + +We promise to extend courtesy and respect to everyone involved in this +project regardless of gender, gender identity, sexual orientation, +disability, age, race, ethnicity, religion, or level of experience. We +expect anyone contributing to the project to do the same. + +If any member of the community violates this code of conduct, the +maintainers of the CEL project may take action, removing issues, +comments, and PRs or blocking accounts as deemed appropriate. + +If you are subject to or witness unacceptable behavior, or have any +other concerns, please email us at +[cel-conduct@google.com](mailto:cel-conduct@google.com). diff --git a/terraform/providers/google/vendor/cel.dev/expr/CONTRIBUTING.md b/terraform/providers/google/vendor/cel.dev/expr/CONTRIBUTING.md new file mode 100644 index 00000000000..8f5fd5c31fd --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/CONTRIBUTING.md @@ -0,0 +1,32 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are a +few guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## What to expect from maintainers + +Expect maintainers to respond to new issues or pull requests within a week. +For outstanding and ongoing issues and particularly for long-running +pull requests, expect the maintainers to review within a week of a +contributor asking for a new review. There is no commitment to resolution -- +merging or closing a pull request, or fixing or closing an issue -- because some +issues will require more discussion than others. diff --git a/terraform/providers/google/vendor/cel.dev/expr/GOVERNANCE.md b/terraform/providers/google/vendor/cel.dev/expr/GOVERNANCE.md new file mode 100644 index 00000000000..0a525bc17de --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/GOVERNANCE.md @@ -0,0 +1,43 @@ +# Project Governance + +This document defines the governance process for the CEL language. CEL is +Google-developed, but openly governed. Major contributors to the CEL +specification and its corresponding implementations constitute the CEL +Language Council. New members may be added by a unanimous vote of the +Council. + +The MAINTAINERS.md file lists the members of the CEL Language Council, and +unofficially indicates the "areas of expertise" of each member with respect +to the publicly available CEL repos. + +## Code Changes + +Code changes must follow the standard pull request (PR) model documented in the +CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a +maintainer. The maintainer reserves the right to request that any feature +request (FR) or PR be reviewed by the language council. + +## Syntax and Semantic Changes + +Syntactic and semantic changes must be reviewed by the CEL Language Council. +Maintainers may also request language council review at their discretion. + +The review process is as follows: + +- Create a Feature Request in the CEL-Spec repo. The feature description will + serve as an abstract for the detailed design document. +- Co-develop a design document with the Language Council. +- Once the proposer gives the design document approval, the document will be + linked to the FR in the CEL-Spec repo and opened for comments to members of + the cel-lang-discuss@googlegroups.com. +- The Language Council will review the design doc at the next council meeting + (once every three weeks) and the council decision included in the document. + +If the proposal is approved, the spec will be updated by a maintainer (if +applicable) and a rationale will be included in the CEL-Spec wiki to ensure +future developers may follow CEL's growth and direction over time. + +Approved proposals may be implemented by the proposer or by the maintainers as +the parties see fit. At the discretion of the maintainer, changes from the +approved design are permitted during implementation if they improve the user +experience and clarity of the feature. diff --git a/terraform/providers/google/vendor/cel.dev/expr/LICENSE b/terraform/providers/google/vendor/cel.dev/expr/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/terraform/providers/google/vendor/cel.dev/expr/MAINTAINERS.md b/terraform/providers/google/vendor/cel.dev/expr/MAINTAINERS.md new file mode 100644 index 00000000000..1ed2eb8ab35 --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/MAINTAINERS.md @@ -0,0 +1,13 @@ +# CEL Language Council + +| Name | Company | Area of Expertise | +|-----------------|--------------|-------------------| +| Alfred Fuller | Facebook | cel-cpp, cel-spec | +| Jim Larson | Google | cel-go, cel-spec | +| Matthais Blume | Google | cel-spec | +| Tristan Swadell | Google | cel-go, cel-spec | + +## Emeritus + +* Sanjay Ghemawat (Google) +* Wolfgang Grieskamp (Facebook) diff --git a/terraform/providers/google/vendor/cel.dev/expr/README.md b/terraform/providers/google/vendor/cel.dev/expr/README.md new file mode 100644 index 00000000000..2da1e7f2fa2 --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/README.md @@ -0,0 +1,65 @@ +# Common Expression Language + +The Common Expression Language (CEL) implements common semantics for expression +evaluation, enabling different applications to more easily interoperate. + +Key Applications + +* Security policy: organizations have complex infrastructure and need common + tooling to reason about the system as a whole +* Protocols: expressions are a useful data type and require interoperability + across programming languages and platforms. + + +Guiding philosophy: + +1. Keep it small & fast. + * CEL evaluates in linear time, is mutation free, and not Turing-complete. + This limitation is a feature of the language design, which allows the + implementation to evaluate orders of magnitude faster than equivalently + sandboxed JavaScript. +2. Make it extensible. + * CEL is designed to be embedded in applications, and allows for + extensibility via its context which allows for functions and data to be + provided by the software that embeds it. +3. Developer-friendly. + * The language is approachable to developers. The initial spec was based + on the experience of developing Firebase Rules and usability testing + many prior iterations. + * The library itself and accompanying toolings should be easy to adopt by + teams that seek to integrate CEL into their platforms. + +The required components of a system that supports CEL are: + +* The textual representation of an expression as written by a developer. It is + of similar syntax to expressions in C/C++/Java/JavaScript +* A binary representation of an expression. It is an abstract syntax tree + (AST). +* A compiler library that converts the textual representation to the binary + representation. This can be done ahead of time (in the control plane) or + just before evaluation (in the data plane). +* A context containing one or more typed variables, often protobuf messages. + Most use-cases will use `attribute_context.proto` +* An evaluator library that takes the binary format in the context and + produces a result, usually a Boolean. + +Example of boolean conditions and object construction: + +``` c +// Condition +account.balance >= transaction.withdrawal + || (account.overdraftProtection + && account.overdraftLimit >= transaction.withdrawal - account.balance) + +// Object construction +common.GeoPoint{ latitude: 10.0, longitude: -5.5 } +``` + +For more detail, see: + +* [Introduction](doc/intro.md) +* [Language Definition](doc/langdef.md) + +Released under the [Apache License](LICENSE). + +Disclaimer: This is not an official Google product. diff --git a/terraform/providers/google/vendor/cel.dev/expr/WORKSPACE b/terraform/providers/google/vendor/cel.dev/expr/WORKSPACE new file mode 100644 index 00000000000..bb4c469adbb --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/WORKSPACE @@ -0,0 +1,145 @@ +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "io_bazel_rules_go", + sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + ], +) + +http_archive( + name = "bazel_gazelle", + sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + ], +) + +http_archive( + name = "rules_proto", + sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d", + strip_prefix = "rules_proto-4.0.0-3.20.0", + urls = [ + "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz", + ], +) + +# googleapis as of 05/26/2023 +http_archive( + name = "com_google_googleapis", + strip_prefix = "googleapis-07c27163ac591955d736f3057b1619ece66f5b99", + sha256 = "bd8e735d881fb829751ecb1a77038dda4a8d274c45490cb9fcf004583ee10571", + urls = [ + "https://github.com/googleapis/googleapis/archive/07c27163ac591955d736f3057b1619ece66f5b99.tar.gz", + ], +) + +# protobuf +http_archive( + name = "com_google_protobuf", + sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2", + strip_prefix = "protobuf-3.21.5", + urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"], +) + +# googletest +http_archive( + name = "com_google_googletest", + urls = ["https://github.com/google/googletest/archive/master.zip"], + strip_prefix = "googletest-master", +) + +# gflags +http_archive( + name = "com_github_gflags_gflags", + sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe", + strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a", + urls = [ + "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + ], +) + +# glog +http_archive( + name = "com_google_glog", + sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21", + strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8", + urls = [ + "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + ], +) + +# absl +http_archive( + name = "com_google_absl", + strip_prefix = "abseil-cpp-master", + urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"], +) + +load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains") +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") +load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") +load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains") +load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") + +switched_rules_by_language( + name = "com_google_googleapis_imports", + cc = True, +) + +# Do *not* call *_dependencies(), etc, yet. See comment at the end. + +# Generated Google APIs protos for Golang +# Generated Google APIs protos for Golang 05/25/2023 +go_repository( + name = "org_golang_google_genproto_googleapis_api", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/api", + sum = "h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=", + version = "v0.0.0-20230525234035-dd9d682886f9", +) + +# Generated Google APIs protos for Golang 05/25/2023 +go_repository( + name = "org_golang_google_genproto_googleapis_rpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/rpc", + sum = "h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=", + version = "v0.0.0-20230525234030-28d5490b6b19", +) + +# gRPC deps +go_repository( + name = "org_golang_google_grpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/grpc", + tag = "v1.49.0", +) + +go_repository( + name = "org_golang_x_net", + importpath = "golang.org/x/net", + sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=", + version = "v0.0.0-20190311183353-d8887717615a", +) + +go_repository( + name = "org_golang_x_text", + importpath = "golang.org/x/text", + sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=", + version = "v0.3.2", +) + +# Run the dependencies at the end. These will silently try to import some +# of the above repositories but at different versions, so ours must come first. +go_rules_dependencies() +go_register_toolchains(version = "1.19.1") +gazelle_dependencies() +rules_proto_dependencies() +rules_proto_toolchains() +protobuf_deps() diff --git a/terraform/providers/google/vendor/cel.dev/expr/checked.pb.go b/terraform/providers/google/vendor/cel.dev/expr/checked.pb.go new file mode 100644 index 00000000000..bb225c8ab3e --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/checked.pb.go @@ -0,0 +1,1432 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/checked.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Type_PrimitiveType int32 + +const ( + Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0 + Type_BOOL Type_PrimitiveType = 1 + Type_INT64 Type_PrimitiveType = 2 + Type_UINT64 Type_PrimitiveType = 3 + Type_DOUBLE Type_PrimitiveType = 4 + Type_STRING Type_PrimitiveType = 5 + Type_BYTES Type_PrimitiveType = 6 +) + +// Enum value maps for Type_PrimitiveType. +var ( + Type_PrimitiveType_name = map[int32]string{ + 0: "PRIMITIVE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "UINT64", + 4: "DOUBLE", + 5: "STRING", + 6: "BYTES", + } + Type_PrimitiveType_value = map[string]int32{ + "PRIMITIVE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "UINT64": 3, + "DOUBLE": 4, + "STRING": 5, + "BYTES": 6, + } +) + +func (x Type_PrimitiveType) Enum() *Type_PrimitiveType { + p := new(Type_PrimitiveType) + *p = x + return p +} + +func (x Type_PrimitiveType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[0].Descriptor() +} + +func (Type_PrimitiveType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[0] +} + +func (x Type_PrimitiveType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_PrimitiveType.Descriptor instead. +func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +type Type_WellKnownType int32 + +const ( + Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0 + Type_ANY Type_WellKnownType = 1 + Type_TIMESTAMP Type_WellKnownType = 2 + Type_DURATION Type_WellKnownType = 3 +) + +// Enum value maps for Type_WellKnownType. +var ( + Type_WellKnownType_name = map[int32]string{ + 0: "WELL_KNOWN_TYPE_UNSPECIFIED", + 1: "ANY", + 2: "TIMESTAMP", + 3: "DURATION", + } + Type_WellKnownType_value = map[string]int32{ + "WELL_KNOWN_TYPE_UNSPECIFIED": 0, + "ANY": 1, + "TIMESTAMP": 2, + "DURATION": 3, + } +) + +func (x Type_WellKnownType) Enum() *Type_WellKnownType { + p := new(Type_WellKnownType) + *p = x + return p +} + +func (x Type_WellKnownType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[1].Descriptor() +} + +func (Type_WellKnownType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[1] +} + +func (x Type_WellKnownType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_WellKnownType.Descriptor instead. +func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +type CheckedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` + ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"` + Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` +} + +func (x *CheckedExpr) Reset() { + *x = CheckedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckedExpr) ProtoMessage() {} + +func (x *CheckedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead. +func (*CheckedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{0} +} + +func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference { + if x != nil { + return x.ReferenceMap + } + return nil +} + +func (x *CheckedExpr) GetTypeMap() map[int64]*Type { + if x != nil { + return x.TypeMap + } + return nil +} + +func (x *CheckedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +func (x *CheckedExpr) GetExprVersion() string { + if x != nil { + return x.ExprVersion + } + return "" +} + +func (x *CheckedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TypeKind: + // + // *Type_Dyn + // *Type_Null + // *Type_Primitive + // *Type_Wrapper + // *Type_WellKnown + // *Type_ListType_ + // *Type_MapType_ + // *Type_Function + // *Type_MessageType + // *Type_TypeParam + // *Type_Type + // *Type_Error + // *Type_AbstractType_ + TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` +} + +func (x *Type) Reset() { + *x = Type{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1} +} + +func (m *Type) GetTypeKind() isType_TypeKind { + if m != nil { + return m.TypeKind + } + return nil +} + +func (x *Type) GetDyn() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Dyn); ok { + return x.Dyn + } + return nil +} + +func (x *Type) GetNull() structpb.NullValue { + if x, ok := x.GetTypeKind().(*Type_Null); ok { + return x.Null + } + return structpb.NullValue(0) +} + +func (x *Type) GetPrimitive() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Primitive); ok { + return x.Primitive + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWrapper() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Wrapper); ok { + return x.Wrapper + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWellKnown() Type_WellKnownType { + if x, ok := x.GetTypeKind().(*Type_WellKnown); ok { + return x.WellKnown + } + return Type_WELL_KNOWN_TYPE_UNSPECIFIED +} + +func (x *Type) GetListType() *Type_ListType { + if x, ok := x.GetTypeKind().(*Type_ListType_); ok { + return x.ListType + } + return nil +} + +func (x *Type) GetMapType() *Type_MapType { + if x, ok := x.GetTypeKind().(*Type_MapType_); ok { + return x.MapType + } + return nil +} + +func (x *Type) GetFunction() *Type_FunctionType { + if x, ok := x.GetTypeKind().(*Type_Function); ok { + return x.Function + } + return nil +} + +func (x *Type) GetMessageType() string { + if x, ok := x.GetTypeKind().(*Type_MessageType); ok { + return x.MessageType + } + return "" +} + +func (x *Type) GetTypeParam() string { + if x, ok := x.GetTypeKind().(*Type_TypeParam); ok { + return x.TypeParam + } + return "" +} + +func (x *Type) GetType() *Type { + if x, ok := x.GetTypeKind().(*Type_Type); ok { + return x.Type + } + return nil +} + +func (x *Type) GetError() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Error); ok { + return x.Error + } + return nil +} + +func (x *Type) GetAbstractType() *Type_AbstractType { + if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok { + return x.AbstractType + } + return nil +} + +type isType_TypeKind interface { + isType_TypeKind() +} + +type Type_Dyn struct { + Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"` +} + +type Type_Null struct { + Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Type_Primitive struct { + Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_Wrapper struct { + Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_WellKnown struct { + WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"` +} + +type Type_ListType_ struct { + ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"` +} + +type Type_MapType_ struct { + MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"` +} + +type Type_Function struct { + Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"` +} + +type Type_MessageType struct { + MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"` +} + +type Type_TypeParam struct { + TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"` +} + +type Type_Type struct { + Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"` +} + +type Type_Error struct { + Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"` +} + +type Type_AbstractType_ struct { + AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"` +} + +func (*Type_Dyn) isType_TypeKind() {} + +func (*Type_Null) isType_TypeKind() {} + +func (*Type_Primitive) isType_TypeKind() {} + +func (*Type_Wrapper) isType_TypeKind() {} + +func (*Type_WellKnown) isType_TypeKind() {} + +func (*Type_ListType_) isType_TypeKind() {} + +func (*Type_MapType_) isType_TypeKind() {} + +func (*Type_Function) isType_TypeKind() {} + +func (*Type_MessageType) isType_TypeKind() {} + +func (*Type_TypeParam) isType_TypeKind() {} + +func (*Type_Type) isType_TypeKind() {} + +func (*Type_Error) isType_TypeKind() {} + +func (*Type_AbstractType_) isType_TypeKind() {} + +type Decl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to DeclKind: + // + // *Decl_Ident + // *Decl_Function + DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` +} + +func (x *Decl) Reset() { + *x = Decl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl) ProtoMessage() {} + +func (x *Decl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl.ProtoReflect.Descriptor instead. +func (*Decl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2} +} + +func (x *Decl) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Decl) GetDeclKind() isDecl_DeclKind { + if m != nil { + return m.DeclKind + } + return nil +} + +func (x *Decl) GetIdent() *Decl_IdentDecl { + if x, ok := x.GetDeclKind().(*Decl_Ident); ok { + return x.Ident + } + return nil +} + +func (x *Decl) GetFunction() *Decl_FunctionDecl { + if x, ok := x.GetDeclKind().(*Decl_Function); ok { + return x.Function + } + return nil +} + +type isDecl_DeclKind interface { + isDecl_DeclKind() +} + +type Decl_Ident struct { + Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"` +} + +type Decl_Function struct { + Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"` +} + +func (*Decl_Ident) isDecl_DeclKind() {} + +func (*Decl_Function) isDecl_DeclKind() {} + +type Reference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Reference) Reset() { + *x = Reference{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Reference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Reference) ProtoMessage() {} + +func (x *Reference) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Reference.ProtoReflect.Descriptor instead. +func (*Reference) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{3} +} + +func (x *Reference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Reference) GetOverloadId() []string { + if x != nil { + return x.OverloadId + } + return nil +} + +func (x *Reference) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +type Type_ListType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` +} + +func (x *Type_ListType) Reset() { + *x = Type_ListType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_ListType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_ListType) ProtoMessage() {} + +func (x *Type_ListType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead. +func (*Type_ListType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Type_ListType) GetElemType() *Type { + if x != nil { + return x.ElemType + } + return nil +} + +type Type_MapType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` + ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` +} + +func (x *Type_MapType) Reset() { + *x = Type_MapType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_MapType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_MapType) ProtoMessage() {} + +func (x *Type_MapType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead. +func (*Type_MapType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Type_MapType) GetKeyType() *Type { + if x != nil { + return x.KeyType + } + return nil +} + +func (x *Type_MapType) GetValueType() *Type { + if x != nil { + return x.ValueType + } + return nil +} + +type Type_FunctionType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` +} + +func (x *Type_FunctionType) Reset() { + *x = Type_FunctionType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_FunctionType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_FunctionType) ProtoMessage() {} + +func (x *Type_FunctionType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead. +func (*Type_FunctionType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Type_FunctionType) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Type_FunctionType) GetArgTypes() []*Type { + if x != nil { + return x.ArgTypes + } + return nil +} + +type Type_AbstractType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` +} + +func (x *Type_AbstractType) Reset() { + *x = Type_AbstractType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_AbstractType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_AbstractType) ProtoMessage() {} + +func (x *Type_AbstractType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead. +func (*Type_AbstractType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Type_AbstractType) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Type_AbstractType) GetParameterTypes() []*Type { + if x != nil { + return x.ParameterTypes + } + return nil +} + +type Decl_IdentDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_IdentDecl) Reset() { + *x = Decl_IdentDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_IdentDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_IdentDecl) ProtoMessage() {} + +func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead. +func (*Decl_IdentDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Decl_IdentDecl) GetType() *Type { + if x != nil { + return x.Type + } + return nil +} + +func (x *Decl_IdentDecl) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +func (x *Decl_IdentDecl) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +type Decl_FunctionDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` +} + +func (x *Decl_FunctionDecl) Reset() { + *x = Decl_FunctionDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl) ProtoMessage() {} + +func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload { + if x != nil { + return x.Overloads + } + return nil +} + +type Decl_FunctionDecl_Overload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` + TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` + ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` + Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_FunctionDecl_Overload) Reset() { + *x = Decl_FunctionDecl_Overload{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl_Overload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl_Overload) ProtoMessage() {} + +func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0} +} + +func (x *Decl_FunctionDecl_Overload) GetOverloadId() string { + if x != nil { + return x.OverloadId + } + return "" +} + +func (x *Decl_FunctionDecl_Overload) GetParams() []*Type { + if x != nil { + return x.Params + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string { + if x != nil { + return x.TypeParams + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool { + if x != nil { + return x.IsInstanceFunction + } + return false +} + +func (x *Decl_FunctionDecl_Overload) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +var File_cel_expr_checked_proto protoreflect.FileDescriptor + +var file_cel_expr_checked_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, + 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, + 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, + 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61, + 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69, + 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, + 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c, + 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, + 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b, + 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c, + 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, + 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f, + 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65, + 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c, + 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a, + 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, + 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, + 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, + 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64, + 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63, + 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_checked_proto_rawDescOnce sync.Once + file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc +) + +func file_cel_expr_checked_proto_rawDescGZIP() []byte { + file_cel_expr_checked_proto_rawDescOnce.Do(func() { + file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData) + }) + return file_cel_expr_checked_proto_rawDescData +} + +var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_cel_expr_checked_proto_goTypes = []interface{}{ + (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType + (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType + (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr + (*Type)(nil), // 3: cel.expr.Type + (*Decl)(nil), // 4: cel.expr.Decl + (*Reference)(nil), // 5: cel.expr.Reference + nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry + nil, // 7: cel.expr.CheckedExpr.TypeMapEntry + (*Type_ListType)(nil), // 8: cel.expr.Type.ListType + (*Type_MapType)(nil), // 9: cel.expr.Type.MapType + (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType + (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType + (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl + (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl + (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload + (*SourceInfo)(nil), // 15: cel.expr.SourceInfo + (*Expr)(nil), // 16: cel.expr.Expr + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty + (structpb.NullValue)(0), // 18: google.protobuf.NullValue + (*Constant)(nil), // 19: cel.expr.Constant +} +var file_cel_expr_checked_proto_depIdxs = []int32{ + 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry + 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry + 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo + 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr + 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty + 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue + 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType + 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType + 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType + 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType + 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType + 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType + 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type + 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty + 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType + 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl + 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl + 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant + 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference + 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type + 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type + 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type + 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type + 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type + 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type + 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type + 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type + 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant + 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload + 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type + 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_cel_expr_checked_proto_init() } +func file_cel_expr_checked_proto_init() { + if File_cel_expr_checked_proto != nil { + return + } + file_cel_expr_syntax_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_ListType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_MapType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_FunctionType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_AbstractType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_IdentDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl_Overload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Type_Dyn)(nil), + (*Type_Null)(nil), + (*Type_Primitive)(nil), + (*Type_Wrapper)(nil), + (*Type_WellKnown)(nil), + (*Type_ListType_)(nil), + (*Type_MapType_)(nil), + (*Type_Function)(nil), + (*Type_MessageType)(nil), + (*Type_TypeParam)(nil), + (*Type_Type)(nil), + (*Type_Error)(nil), + (*Type_AbstractType_)(nil), + } + file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Decl_Ident)(nil), + (*Decl_Function)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_checked_proto_rawDesc, + NumEnums: 2, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_checked_proto_goTypes, + DependencyIndexes: file_cel_expr_checked_proto_depIdxs, + EnumInfos: file_cel_expr_checked_proto_enumTypes, + MessageInfos: file_cel_expr_checked_proto_msgTypes, + }.Build() + File_cel_expr_checked_proto = out.File + file_cel_expr_checked_proto_rawDesc = nil + file_cel_expr_checked_proto_goTypes = nil + file_cel_expr_checked_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cel.dev/expr/cloudbuild.yaml b/terraform/providers/google/vendor/cel.dev/expr/cloudbuild.yaml new file mode 100644 index 00000000000..8a8ea3763f6 --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/cloudbuild.yaml @@ -0,0 +1,9 @@ +steps: +- name: 'gcr.io/cloud-builders/bazel:6.4.0' + entrypoint: bazel + args: ['test', '--test_output=errors', '...'] + id: bazel-test + waitFor: ['-'] +timeout: 15m +options: + machineType: 'N1_HIGHCPU_32' diff --git a/terraform/providers/google/vendor/cel.dev/expr/eval.pb.go b/terraform/providers/google/vendor/cel.dev/expr/eval.pb.go new file mode 100644 index 00000000000..8f651f9cc6a --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/eval.pb.go @@ -0,0 +1,490 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/eval.proto + +package expr + +import ( + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EvalState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *EvalState) Reset() { + *x = EvalState{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState) ProtoMessage() {} + +func (x *EvalState) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState.ProtoReflect.Descriptor instead. +func (*EvalState) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0} +} + +func (x *EvalState) GetValues() []*ExprValue { + if x != nil { + return x.Values + } + return nil +} + +func (x *EvalState) GetResults() []*EvalState_Result { + if x != nil { + return x.Results + } + return nil +} + +type ExprValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *ExprValue_Value + // *ExprValue_Error + // *ExprValue_Unknown + Kind isExprValue_Kind `protobuf_oneof:"kind"` +} + +func (x *ExprValue) Reset() { + *x = ExprValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExprValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExprValue) ProtoMessage() {} + +func (x *ExprValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead. +func (*ExprValue) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{1} +} + +func (m *ExprValue) GetKind() isExprValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *ExprValue) GetValue() *Value { + if x, ok := x.GetKind().(*ExprValue_Value); ok { + return x.Value + } + return nil +} + +func (x *ExprValue) GetError() *ErrorSet { + if x, ok := x.GetKind().(*ExprValue_Error); ok { + return x.Error + } + return nil +} + +func (x *ExprValue) GetUnknown() *UnknownSet { + if x, ok := x.GetKind().(*ExprValue_Unknown); ok { + return x.Unknown + } + return nil +} + +type isExprValue_Kind interface { + isExprValue_Kind() +} + +type ExprValue_Value struct { + Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"` +} + +type ExprValue_Error struct { + Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"` +} + +type ExprValue_Unknown struct { + Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"` +} + +func (*ExprValue_Value) isExprValue_Kind() {} + +func (*ExprValue_Error) isExprValue_Kind() {} + +func (*ExprValue_Unknown) isExprValue_Kind() {} + +type ErrorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` +} + +func (x *ErrorSet) Reset() { + *x = ErrorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorSet) ProtoMessage() {} + +func (x *ErrorSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead. +func (*ErrorSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{2} +} + +func (x *ErrorSet) GetErrors() []*status.Status { + if x != nil { + return x.Errors + } + return nil +} + +type UnknownSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` +} + +func (x *UnknownSet) Reset() { + *x = UnknownSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnknownSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnknownSet) ProtoMessage() {} + +func (x *UnknownSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. +func (*UnknownSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} +} + +func (x *UnknownSet) GetExprs() []int64 { + if x != nil { + return x.Exprs + } + return nil +} + +type EvalState_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EvalState_Result) Reset() { + *x = EvalState_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState_Result) ProtoMessage() {} + +func (x *EvalState_Result) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead. +func (*EvalState_Result) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *EvalState_Result) GetExpr() int64 { + if x != nil { + return x.Expr + } + return 0 +} + +func (x *EvalState_Result) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +var File_cel_expr_eval_proto protoreflect.FileDescriptor + +var file_cel_expr_eval_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, + 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, + 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_eval_proto_rawDescOnce sync.Once + file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc +) + +func file_cel_expr_eval_proto_rawDescGZIP() []byte { + file_cel_expr_eval_proto_rawDescOnce.Do(func() { + file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData) + }) + return file_cel_expr_eval_proto_rawDescData +} + +var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_eval_proto_goTypes = []interface{}{ + (*EvalState)(nil), // 0: cel.expr.EvalState + (*ExprValue)(nil), // 1: cel.expr.ExprValue + (*ErrorSet)(nil), // 2: cel.expr.ErrorSet + (*UnknownSet)(nil), // 3: cel.expr.UnknownSet + (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result + (*Value)(nil), // 5: cel.expr.Value + (*status.Status)(nil), // 6: google.rpc.Status +} +var file_cel_expr_eval_proto_depIdxs = []int32{ + 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue + 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result + 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value + 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet + 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet + 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_cel_expr_eval_proto_init() } +func file_cel_expr_eval_proto_init() { + if File_cel_expr_eval_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExprValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnknownSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ExprValue_Value)(nil), + (*ExprValue_Error)(nil), + (*ExprValue_Unknown)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_eval_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_eval_proto_goTypes, + DependencyIndexes: file_cel_expr_eval_proto_depIdxs, + MessageInfos: file_cel_expr_eval_proto_msgTypes, + }.Build() + File_cel_expr_eval_proto = out.File + file_cel_expr_eval_proto_rawDesc = nil + file_cel_expr_eval_proto_goTypes = nil + file_cel_expr_eval_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cel.dev/expr/explain.pb.go b/terraform/providers/google/vendor/cel.dev/expr/explain.pb.go new file mode 100644 index 00000000000..79fd5443b96 --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/explain.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/explain.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Deprecated: Do not use. +type Explain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` +} + +func (x *Explain) Reset() { + *x = Explain{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain) ProtoMessage() {} + +func (x *Explain) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain.ProtoReflect.Descriptor instead. +func (*Explain) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0} +} + +func (x *Explain) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +func (x *Explain) GetExprSteps() []*Explain_ExprStep { + if x != nil { + return x.ExprSteps + } + return nil +} + +type Explain_ExprStep struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` +} + +func (x *Explain_ExprStep) Reset() { + *x = Explain_ExprStep{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain_ExprStep) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain_ExprStep) ProtoMessage() {} + +func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead. +func (*Explain_ExprStep) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Explain_ExprStep) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Explain_ExprStep) GetValueIndex() int32 { + if x != nil { + return x.ValueIndex + } + return 0 +} + +var File_cel_expr_explain_proto protoreflect.FileDescriptor + +var file_cel_expr_explain_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_explain_proto_rawDescOnce sync.Once + file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc +) + +func file_cel_expr_explain_proto_rawDescGZIP() []byte { + file_cel_expr_explain_proto_rawDescOnce.Do(func() { + file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData) + }) + return file_cel_expr_explain_proto_rawDescData +} + +var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_cel_expr_explain_proto_goTypes = []interface{}{ + (*Explain)(nil), // 0: cel.expr.Explain + (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep + (*Value)(nil), // 2: cel.expr.Value +} +var file_cel_expr_explain_proto_depIdxs = []int32{ + 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value + 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_cel_expr_explain_proto_init() } +func file_cel_expr_explain_proto_init() { + if File_cel_expr_explain_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain_ExprStep); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_explain_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_explain_proto_goTypes, + DependencyIndexes: file_cel_expr_explain_proto_depIdxs, + MessageInfos: file_cel_expr_explain_proto_msgTypes, + }.Build() + File_cel_expr_explain_proto = out.File + file_cel_expr_explain_proto_rawDesc = nil + file_cel_expr_explain_proto_goTypes = nil + file_cel_expr_explain_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cel.dev/expr/regen_go_proto.sh b/terraform/providers/google/vendor/cel.dev/expr/regen_go_proto.sh new file mode 100644 index 00000000000..abf2f9788ea --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/regen_go_proto.sh @@ -0,0 +1,9 @@ +#!/bin/sh +bazel build //proto/test/... +files=($(bazel aquery 'kind(proto, //proto/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n")) +for src in ${files[@]}; +do + dst=$(echo $src | sed 's/\(.*\%\/github.com\/google\/cel-spec\/\(.*\)\)/\2/') + echo "copying $dst" + $(cp $src $dst) +done diff --git a/terraform/providers/google/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/terraform/providers/google/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh new file mode 100644 index 00000000000..9a13479e401 --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +bazel build //proto/cel/expr:all + +rm -vf ./*.pb.go + +files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") ) +for src in "${files[@]}"; +do + cp -v "${src}" ./ +done diff --git a/terraform/providers/google/vendor/cel.dev/expr/syntax.pb.go b/terraform/providers/google/vendor/cel.dev/expr/syntax.pb.go new file mode 100644 index 00000000000..48a952872e8 --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/syntax.pb.go @@ -0,0 +1,1633 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/syntax.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SourceInfo_Extension_Component int32 + +const ( + SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0 + SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1 + SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2 + SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3 +) + +// Enum value maps for SourceInfo_Extension_Component. +var ( + SourceInfo_Extension_Component_name = map[int32]string{ + 0: "COMPONENT_UNSPECIFIED", + 1: "COMPONENT_PARSER", + 2: "COMPONENT_TYPE_CHECKER", + 3: "COMPONENT_RUNTIME", + } + SourceInfo_Extension_Component_value = map[string]int32{ + "COMPONENT_UNSPECIFIED": 0, + "COMPONENT_PARSER": 1, + "COMPONENT_TYPE_CHECKER": 2, + "COMPONENT_RUNTIME": 3, + } +) + +func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component { + p := new(SourceInfo_Extension_Component) + *p = x + return p +} + +func (x SourceInfo_Extension_Component) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_syntax_proto_enumTypes[0].Descriptor() +} + +func (SourceInfo_Extension_Component) Type() protoreflect.EnumType { + return &file_cel_expr_syntax_proto_enumTypes[0] +} + +func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead. +func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +type ParsedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` + SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` +} + +func (x *ParsedExpr) Reset() { + *x = ParsedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParsedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParsedExpr) ProtoMessage() {} + +func (x *ParsedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead. +func (*ParsedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0} +} + +func (x *ParsedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +func (x *ParsedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +type Expr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to ExprKind: + // + // *Expr_ConstExpr + // *Expr_IdentExpr + // *Expr_SelectExpr + // *Expr_CallExpr + // *Expr_ListExpr + // *Expr_StructExpr + // *Expr_ComprehensionExpr + ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` +} + +func (x *Expr) Reset() { + *x = Expr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr) ProtoMessage() {} + +func (x *Expr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr.ProtoReflect.Descriptor instead. +func (*Expr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1} +} + +func (x *Expr) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr) GetExprKind() isExpr_ExprKind { + if m != nil { + return m.ExprKind + } + return nil +} + +func (x *Expr) GetConstExpr() *Constant { + if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok { + return x.ConstExpr + } + return nil +} + +func (x *Expr) GetIdentExpr() *Expr_Ident { + if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok { + return x.IdentExpr + } + return nil +} + +func (x *Expr) GetSelectExpr() *Expr_Select { + if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok { + return x.SelectExpr + } + return nil +} + +func (x *Expr) GetCallExpr() *Expr_Call { + if x, ok := x.GetExprKind().(*Expr_CallExpr); ok { + return x.CallExpr + } + return nil +} + +func (x *Expr) GetListExpr() *Expr_CreateList { + if x, ok := x.GetExprKind().(*Expr_ListExpr); ok { + return x.ListExpr + } + return nil +} + +func (x *Expr) GetStructExpr() *Expr_CreateStruct { + if x, ok := x.GetExprKind().(*Expr_StructExpr); ok { + return x.StructExpr + } + return nil +} + +func (x *Expr) GetComprehensionExpr() *Expr_Comprehension { + if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok { + return x.ComprehensionExpr + } + return nil +} + +type isExpr_ExprKind interface { + isExpr_ExprKind() +} + +type Expr_ConstExpr struct { + ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"` +} + +type Expr_IdentExpr struct { + IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"` +} + +type Expr_SelectExpr struct { + SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"` +} + +type Expr_CallExpr struct { + CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"` +} + +type Expr_ListExpr struct { + ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"` +} + +type Expr_StructExpr struct { + StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"` +} + +type Expr_ComprehensionExpr struct { + ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"` +} + +func (*Expr_ConstExpr) isExpr_ExprKind() {} + +func (*Expr_IdentExpr) isExpr_ExprKind() {} + +func (*Expr_SelectExpr) isExpr_ExprKind() {} + +func (*Expr_CallExpr) isExpr_ExprKind() {} + +func (*Expr_ListExpr) isExpr_ExprKind() {} + +func (*Expr_StructExpr) isExpr_ExprKind() {} + +func (*Expr_ComprehensionExpr) isExpr_ExprKind() {} + +type Constant struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ConstantKind: + // + // *Constant_NullValue + // *Constant_BoolValue + // *Constant_Int64Value + // *Constant_Uint64Value + // *Constant_DoubleValue + // *Constant_StringValue + // *Constant_BytesValue + // *Constant_DurationValue + // *Constant_TimestampValue + ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` +} + +func (x *Constant) Reset() { + *x = Constant{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Constant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Constant) ProtoMessage() {} + +func (x *Constant) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Constant.ProtoReflect.Descriptor instead. +func (*Constant) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2} +} + +func (m *Constant) GetConstantKind() isConstant_ConstantKind { + if m != nil { + return m.ConstantKind + } + return nil +} + +func (x *Constant) GetNullValue() structpb.NullValue { + if x, ok := x.GetConstantKind().(*Constant_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Constant) GetBoolValue() bool { + if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Constant) GetInt64Value() int64 { + if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Constant) GetUint64Value() uint64 { + if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Constant) GetDoubleValue() float64 { + if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Constant) GetStringValue() string { + if x, ok := x.GetConstantKind().(*Constant_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Constant) GetBytesValue() []byte { + if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok { + return x.BytesValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetDurationValue() *durationpb.Duration { + if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok { + return x.DurationValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetTimestampValue() *timestamppb.Timestamp { + if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +type isConstant_ConstantKind interface { + isConstant_ConstantKind() +} + +type Constant_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Constant_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Constant_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Constant_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Constant_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Constant_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Constant_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Constant_DurationValue struct { + // Deprecated: Do not use. + DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"` +} + +type Constant_TimestampValue struct { + // Deprecated: Do not use. + TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` +} + +func (*Constant_NullValue) isConstant_ConstantKind() {} + +func (*Constant_BoolValue) isConstant_ConstantKind() {} + +func (*Constant_Int64Value) isConstant_ConstantKind() {} + +func (*Constant_Uint64Value) isConstant_ConstantKind() {} + +func (*Constant_DoubleValue) isConstant_ConstantKind() {} + +func (*Constant_StringValue) isConstant_ConstantKind() {} + +func (*Constant_BytesValue) isConstant_ConstantKind() {} + +func (*Constant_DurationValue) isConstant_ConstantKind() {} + +func (*Constant_TimestampValue) isConstant_ConstantKind() {} + +type SourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"` + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"` + Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *SourceInfo) Reset() { + *x = SourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo) ProtoMessage() {} + +func (x *SourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead. +func (*SourceInfo) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3} +} + +func (x *SourceInfo) GetSyntaxVersion() string { + if x != nil { + return x.SyntaxVersion + } + return "" +} + +func (x *SourceInfo) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *SourceInfo) GetLineOffsets() []int32 { + if x != nil { + return x.LineOffsets + } + return nil +} + +func (x *SourceInfo) GetPositions() map[int64]int32 { + if x != nil { + return x.Positions + } + return nil +} + +func (x *SourceInfo) GetMacroCalls() map[int64]*Expr { + if x != nil { + return x.MacroCalls + } + return nil +} + +func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension { + if x != nil { + return x.Extensions + } + return nil +} + +type Expr_Ident struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Expr_Ident) Reset() { + *x = Expr_Ident{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Ident) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Ident) ProtoMessage() {} + +func (x *Expr_Ident) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead. +func (*Expr_Ident) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Expr_Ident) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type Expr_Select struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` + Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` + TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` +} + +func (x *Expr_Select) Reset() { + *x = Expr_Select{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Select) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Select) ProtoMessage() {} + +func (x *Expr_Select) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead. +func (*Expr_Select) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Expr_Select) GetOperand() *Expr { + if x != nil { + return x.Operand + } + return nil +} + +func (x *Expr_Select) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *Expr_Select) GetTestOnly() bool { + if x != nil { + return x.TestOnly + } + return false +} + +type Expr_Call struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` + Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` +} + +func (x *Expr_Call) Reset() { + *x = Expr_Call{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Call) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Call) ProtoMessage() {} + +func (x *Expr_Call) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead. +func (*Expr_Call) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Expr_Call) GetTarget() *Expr { + if x != nil { + return x.Target + } + return nil +} + +func (x *Expr_Call) GetFunction() string { + if x != nil { + return x.Function + } + return "" +} + +func (x *Expr_Call) GetArgs() []*Expr { + if x != nil { + return x.Args + } + return nil +} + +type Expr_CreateList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` + OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"` +} + +func (x *Expr_CreateList) Reset() { + *x = Expr_CreateList{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateList) ProtoMessage() {} + +func (x *Expr_CreateList) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead. +func (*Expr_CreateList) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Expr_CreateList) GetElements() []*Expr { + if x != nil { + return x.Elements + } + return nil +} + +func (x *Expr_CreateList) GetOptionalIndices() []int32 { + if x != nil { + return x.OptionalIndices + } + return nil +} + +type Expr_CreateStruct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` + Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *Expr_CreateStruct) Reset() { + *x = Expr_CreateStruct{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct) ProtoMessage() {} + +func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *Expr_CreateStruct) GetMessageName() string { + if x != nil { + return x.MessageName + } + return "" +} + +func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type Expr_Comprehension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` + IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` + AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` + AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` + LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` + LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` + Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *Expr_Comprehension) Reset() { + *x = Expr_Comprehension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Comprehension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Comprehension) ProtoMessage() {} + +func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead. +func (*Expr_Comprehension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5} +} + +func (x *Expr_Comprehension) GetIterVar() string { + if x != nil { + return x.IterVar + } + return "" +} + +func (x *Expr_Comprehension) GetIterRange() *Expr { + if x != nil { + return x.IterRange + } + return nil +} + +func (x *Expr_Comprehension) GetAccuVar() string { + if x != nil { + return x.AccuVar + } + return "" +} + +func (x *Expr_Comprehension) GetAccuInit() *Expr { + if x != nil { + return x.AccuInit + } + return nil +} + +func (x *Expr_Comprehension) GetLoopCondition() *Expr { + if x != nil { + return x.LoopCondition + } + return nil +} + +func (x *Expr_Comprehension) GetLoopStep() *Expr { + if x != nil { + return x.LoopStep + } + return nil +} + +func (x *Expr_Comprehension) GetResult() *Expr { + if x != nil { + return x.Result + } + return nil +} + +type Expr_CreateStruct_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to KeyKind: + // + // *Expr_CreateStruct_Entry_FieldKey + // *Expr_CreateStruct_Entry_MapKey + KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"` + Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"` +} + +func (x *Expr_CreateStruct_Entry) Reset() { + *x = Expr_CreateStruct_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct_Entry) ProtoMessage() {} + +func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0} +} + +func (x *Expr_CreateStruct_Entry) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { + if m != nil { + return m.KeyKind + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetFieldKey() string { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok { + return x.FieldKey + } + return "" +} + +func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok { + return x.MapKey + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetValue() *Expr { + if x != nil { + return x.Value + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool { + if x != nil { + return x.OptionalEntry + } + return false +} + +type isExpr_CreateStruct_Entry_KeyKind interface { + isExpr_CreateStruct_Entry_KeyKind() +} + +type Expr_CreateStruct_Entry_FieldKey struct { + FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"` +} + +type Expr_CreateStruct_Entry_MapKey struct { + MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"` +} + +func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {} + +func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {} + +type SourceInfo_Extension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"` + Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *SourceInfo_Extension) Reset() { + *x = SourceInfo_Extension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension) ProtoMessage() {} + +func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2} +} + +func (x *SourceInfo_Extension) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component { + if x != nil { + return x.AffectedComponents + } + return nil +} + +func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version { + if x != nil { + return x.Version + } + return nil +} + +type SourceInfo_Extension_Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` +} + +func (x *SourceInfo_Extension_Version) Reset() { + *x = SourceInfo_Extension_Version{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension_Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension_Version) ProtoMessage() {} + +func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +func (x *SourceInfo_Extension_Version) GetMajor() int64 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *SourceInfo_Extension_Version) GetMinor() int64 { + if x != nil { + return x.Minor + } + return 0 +} + +var File_cel_expr_syntax_proto protoreflect.FileDescriptor + +var file_cel_expr_syntax_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, + 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22, + 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, + 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78, + 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, + 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c, + 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69, + 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, + 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c, + 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab, + 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, + 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, + 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70, + 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a, + 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, + 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65, + 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, + 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, + 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, + 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, + 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, + 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, + 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, + 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, + 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, + 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, + 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06, + 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61, + 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12, + 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50, + 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, + 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, + 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, + 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, + 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79, + 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, + 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cel_expr_syntax_proto_rawDescOnce sync.Once + file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc +) + +func file_cel_expr_syntax_proto_rawDescGZIP() []byte { + file_cel_expr_syntax_proto_rawDescOnce.Do(func() { + file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData) + }) + return file_cel_expr_syntax_proto_rawDescData +} + +var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_cel_expr_syntax_proto_goTypes = []interface{}{ + (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component + (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr + (*Expr)(nil), // 2: cel.expr.Expr + (*Constant)(nil), // 3: cel.expr.Constant + (*SourceInfo)(nil), // 4: cel.expr.SourceInfo + (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident + (*Expr_Select)(nil), // 6: cel.expr.Expr.Select + (*Expr_Call)(nil), // 7: cel.expr.Expr.Call + (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList + (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct + (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension + (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry + nil, // 12: cel.expr.SourceInfo.PositionsEntry + nil, // 13: cel.expr.SourceInfo.MacroCallsEntry + (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension + (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version + (structpb.NullValue)(0), // 16: google.protobuf.NullValue + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp +} +var file_cel_expr_syntax_proto_depIdxs = []int32{ + 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr + 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo + 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant + 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident + 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select + 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call + 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList + 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct + 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension + 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue + 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration + 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp + 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry + 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry + 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension + 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr + 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr + 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr + 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr + 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry + 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr + 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr + 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr + 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr + 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr + 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr + 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr + 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr + 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component + 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_cel_expr_syntax_proto_init() } +func file_cel_expr_syntax_proto_init() { + if File_cel_expr_syntax_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParsedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Constant); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Ident); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Select); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Call); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Comprehension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension_Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Expr_ConstExpr)(nil), + (*Expr_IdentExpr)(nil), + (*Expr_SelectExpr)(nil), + (*Expr_CallExpr)(nil), + (*Expr_ListExpr)(nil), + (*Expr_StructExpr)(nil), + (*Expr_ComprehensionExpr)(nil), + } + file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Constant_NullValue)(nil), + (*Constant_BoolValue)(nil), + (*Constant_Int64Value)(nil), + (*Constant_Uint64Value)(nil), + (*Constant_DoubleValue)(nil), + (*Constant_StringValue)(nil), + (*Constant_BytesValue)(nil), + (*Constant_DurationValue)(nil), + (*Constant_TimestampValue)(nil), + } + file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*Expr_CreateStruct_Entry_FieldKey)(nil), + (*Expr_CreateStruct_Entry_MapKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_syntax_proto_rawDesc, + NumEnums: 1, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_syntax_proto_goTypes, + DependencyIndexes: file_cel_expr_syntax_proto_depIdxs, + EnumInfos: file_cel_expr_syntax_proto_enumTypes, + MessageInfos: file_cel_expr_syntax_proto_msgTypes, + }.Build() + File_cel_expr_syntax_proto = out.File + file_cel_expr_syntax_proto_rawDesc = nil + file_cel_expr_syntax_proto_goTypes = nil + file_cel_expr_syntax_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cel.dev/expr/value.pb.go b/terraform/providers/google/vendor/cel.dev/expr/value.pb.go new file mode 100644 index 00000000000..e5e29228c2c --- /dev/null +++ b/terraform/providers/google/vendor/cel.dev/expr/value.pb.go @@ -0,0 +1,653 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/value.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *Value_NullValue + // *Value_BoolValue + // *Value_Int64Value + // *Value_Uint64Value + // *Value_DoubleValue + // *Value_StringValue + // *Value_BytesValue + // *Value_EnumValue + // *Value_ObjectValue + // *Value_MapValue + // *Value_ListValue + // *Value_TypeValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (x *Value) Reset() { + *x = Value{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{0} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() structpb.NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetInt64Value() int64 { + if x, ok := x.GetKind().(*Value_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Value) GetUint64Value() uint64 { + if x, ok := x.GetKind().(*Value_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Value) GetDoubleValue() float64 { + if x, ok := x.GetKind().(*Value_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBytesValue() []byte { + if x, ok := x.GetKind().(*Value_BytesValue); ok { + return x.BytesValue + } + return nil +} + +func (x *Value) GetEnumValue() *EnumValue { + if x, ok := x.GetKind().(*Value_EnumValue); ok { + return x.EnumValue + } + return nil +} + +func (x *Value) GetObjectValue() *anypb.Any { + if x, ok := x.GetKind().(*Value_ObjectValue); ok { + return x.ObjectValue + } + return nil +} + +func (x *Value) GetMapValue() *MapValue { + if x, ok := x.GetKind().(*Value_MapValue); ok { + return x.MapValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +func (x *Value) GetTypeValue() string { + if x, ok := x.GetKind().(*Value_TypeValue); ok { + return x.TypeValue + } + return "" +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Value_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Value_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Value_EnumValue struct { + EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"` +} + +type Value_ObjectValue struct { + ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"` +} + +type Value_MapValue struct { + MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"` +} + +type Value_TypeValue struct { + TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_Int64Value) isValue_Kind() {} + +func (*Value_Uint64Value) isValue_Kind() {} + +func (*Value_DoubleValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BytesValue) isValue_Kind() {} + +func (*Value_EnumValue) isValue_Kind() {} + +func (*Value_ObjectValue) isValue_Kind() {} + +func (*Value_MapValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (*Value_TypeValue) isValue_Kind() {} + +type EnumValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EnumValue) Reset() { + *x = EnumValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValue) ProtoMessage() {} + +func (x *EnumValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead. +func (*EnumValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{1} +} + +func (x *EnumValue) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EnumValue) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ListValue) Reset() { + *x = ListValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +type MapValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *MapValue) Reset() { + *x = MapValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue) ProtoMessage() {} + +func (x *MapValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue.ProtoReflect.Descriptor instead. +func (*MapValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3} +} + +func (x *MapValue) GetEntries() []*MapValue_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type MapValue_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MapValue_Entry) Reset() { + *x = MapValue_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue_Entry) ProtoMessage() {} + +func (x *MapValue_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead. +func (*MapValue_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *MapValue_Entry) GetKey() *Value { + if x != nil { + return x.Key + } + return nil +} + +func (x *MapValue_Entry) GetValue() *Value { + if x != nil { + return x.Value + } + return nil +} + +var File_cel_expr_value_proto protoreflect.FileDescriptor + +var file_cel_expr_value_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69, + 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, + 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, + 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65, + 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_value_proto_rawDescOnce sync.Once + file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc +) + +func file_cel_expr_value_proto_rawDescGZIP() []byte { + file_cel_expr_value_proto_rawDescOnce.Do(func() { + file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData) + }) + return file_cel_expr_value_proto_rawDescData +} + +var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_value_proto_goTypes = []interface{}{ + (*Value)(nil), // 0: cel.expr.Value + (*EnumValue)(nil), // 1: cel.expr.EnumValue + (*ListValue)(nil), // 2: cel.expr.ListValue + (*MapValue)(nil), // 3: cel.expr.MapValue + (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry + (structpb.NullValue)(0), // 5: google.protobuf.NullValue + (*anypb.Any)(nil), // 6: google.protobuf.Any +} +var file_cel_expr_value_proto_depIdxs = []int32{ + 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue + 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any + 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue + 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue + 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value + 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry + 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value + 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_cel_expr_value_proto_init() } +func file_cel_expr_value_proto_init() { + if File_cel_expr_value_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Value_NullValue)(nil), + (*Value_BoolValue)(nil), + (*Value_Int64Value)(nil), + (*Value_Uint64Value)(nil), + (*Value_DoubleValue)(nil), + (*Value_StringValue)(nil), + (*Value_BytesValue)(nil), + (*Value_EnumValue)(nil), + (*Value_ObjectValue)(nil), + (*Value_MapValue)(nil), + (*Value_ListValue)(nil), + (*Value_TypeValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_value_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_value_proto_goTypes, + DependencyIndexes: file_cel_expr_value_proto_depIdxs, + MessageInfos: file_cel_expr_value_proto_msgTypes, + }.Build() + File_cel_expr_value_proto = out.File + file_cel_expr_value_proto_rawDesc = nil + file_cel_expr_value_proto_goTypes = nil + file_cel_expr_value_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-individual.json index 6a2b8ff17a0..39ed1f94745 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-individual.json +++ b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-individual.json @@ -1,18 +1,18 @@ { - "ai": "0.6.0", + "ai": "0.8.2", "aiplatform": "1.68.0", - "auth": "0.5.1", - "auth/oauth2adapt": "0.2.2", - "bigquery": "1.61.0", - "bigtable": "1.24.0", - "datastore": "1.17.1", - "errorreporting": "0.3.0", - "firestore": "1.15.0", - "logging": "1.10.0", - "profiler": "0.4.0", - "pubsub": "1.38.0", + "auth": "0.9.7", + "auth/oauth2adapt": "0.2.4", + "bigquery": "1.63.1", + "bigtable": "1.33.0", + "datastore": "1.19.0", + "errorreporting": "0.3.1", + "firestore": "1.17.0", + "logging": "1.11.0", + "profiler": "0.4.1", + "pubsub": "1.44.0", "pubsublite": "1.8.2", - "spanner": "1.63.0", - "storage": "1.42.0", - "vertexai": "0.11.0" + "spanner": "1.69.0", + "storage": "1.44.0", + "vertexai": "0.13.1" } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-submodules.json index 408a92956ac..edbdcf47fd9 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -1,148 +1,150 @@ { - "accessapproval": "1.7.7", - "accesscontextmanager": "1.8.7", - "advisorynotifications": "1.4.1", - "alloydb": "1.10.2", - "analytics": "0.23.2", - "apigateway": "1.6.7", - "apigeeconnect": "1.6.7", - "apigeeregistry": "0.8.5", - "apikeys": "1.1.7", - "appengine": "1.8.7", - "apphub": "0.1.1", - "apps": "0.4.2", - "area120": "0.8.7", - "artifactregistry": "1.14.9", - "asset": "1.19.1", - "assuredworkloads": "1.11.7", - "automl": "1.13.7", - "backupdr": "0.1.1", - "baremetalsolution": "1.2.6", - "batch": "1.8.7", - "beyondcorp": "1.0.6", - "billing": "1.18.5", - "binaryauthorization": "1.8.3", - "certificatemanager": "1.8.1", - "channel": "1.17.7", - "chat": "0.1.1", - "cloudbuild": "1.16.1", - "cloudcontrolspartner": "0.2.1", - "clouddms": "1.7.6", - "cloudprofiler": "0.3.2", - "cloudquotas": "0.2.1", - "cloudtasks": "1.12.8", - "commerce": "1.0.0", - "compute": "1.27.0", - "compute/metadata": "0.3.0", - "confidentialcomputing": "1.5.1", - "config": "1.0.0", - "contactcenterinsights": "1.13.2", - "container": "1.37.0", - "containeranalysis": "0.11.6", - "datacatalog": "1.20.1", - "dataflow": "0.9.7", - "dataform": "0.9.4", - "datafusion": "1.7.7", - "datalabeling": "0.8.7", - "dataplex": "1.16.0", - "dataproc": "2.4.2", - "dataqna": "0.8.7", - "datastream": "1.10.6", - "deploy": "1.19.0", - "developerconnect": "0.0.0", - "dialogflow": "1.54.0", - "discoveryengine": "1.8.0", - "dlp": "1.14.0", - "documentai": "1.30.0", - "domains": "0.9.7", - "edgecontainer": "1.2.1", - "edgenetwork": "0.2.4", - "essentialcontacts": "1.6.8", - "eventarc": "1.13.6", - "filestore": "1.8.3", - "functions": "1.16.2", - "gkebackup": "1.5.0", - "gkeconnect": "0.8.7", - "gkehub": "0.14.7", - "gkemulticloud": "1.2.0", - "grafeas": "0.3.6", - "gsuiteaddons": "1.6.7", - "iam": "1.1.8", - "iap": "1.9.6", - "identitytoolkit": "0.0.0", - "ids": "1.4.7", - "iot": "1.7.7", - "kms": "1.17.1", - "language": "1.12.5", - "lifesciences": "0.9.7", - "longrunning": "0.5.7", - "managedidentities": "1.6.7", - "managedkafka": "0.1.0", - "maps": "1.11.1", - "mediatranslation": "0.8.7", - "memcache": "1.10.7", - "metastore": "1.13.6", - "migrationcenter": "1.0.0", - "monitoring": "1.19.0", - "netapp": "1.1.0", - "networkconnectivity": "1.14.6", - "networkmanagement": "1.13.2", - "networksecurity": "0.9.7", - "networkservices": "0.1.1", - "notebooks": "1.11.5", - "optimization": "1.6.5", - "orchestration": "1.9.2", - "orgpolicy": "1.12.3", - "osconfig": "1.12.7", - "oslogin": "1.13.3", - "parallelstore": "0.3.0", - "phishingprotection": "0.8.7", - "policysimulator": "0.2.5", - "policytroubleshooter": "1.10.5", - "privatecatalog": "0.9.7", - "rapidmigrationassessment": "1.0.7", - "recaptchaenterprise": "2.13.0", - "recommendationengine": "0.8.7", - "recommender": "1.12.3", - "redis": "1.16.0", - "resourcemanager": "1.9.7", - "resourcesettings": "1.7.0", - "retail": "1.17.0", - "run": "1.3.7", - "scheduler": "1.10.8", - "secretmanager": "1.13.1", - "securesourcemanager": "0.1.5", - "security": "1.17.0", - "securitycenter": "1.30.0", - "securitycentermanagement": "0.2.1", - "securityposture": "0.1.3", - "servicecontrol": "1.13.2", - "servicedirectory": "1.11.7", - "servicehealth": "1.0.0", - "servicemanagement": "1.9.8", - "serviceusage": "1.8.6", - "shell": "1.7.7", - "shopping": "0.8.1", - "speech": "1.23.1", - "storageinsights": "1.0.7", - "storagetransfer": "1.10.6", - "streetview": "0.1.0", - "support": "1.0.6", - "talent": "1.6.8", - "telcoautomation": "0.2.2", - "texttospeech": "1.7.7", - "tpu": "1.6.7", - "trace": "1.10.7", - "translate": "1.10.3", - "video": "1.21.0", - "videointelligence": "1.11.7", - "vision": "2.8.2", - "visionai": "0.2.0", - "vmmigration": "1.7.7", - "vmwareengine": "1.1.3", - "vpcaccess": "1.7.7", - "webrisk": "1.9.7", - "websecurityscanner": "1.6.7", - "workflows": "1.12.6", - "workstations": "1.0.0" + "accessapproval": "1.8.1", + "accesscontextmanager": "1.9.1", + "advisorynotifications": "1.5.1", + "alloydb": "1.12.1", + "analytics": "0.25.1", + "apigateway": "1.7.1", + "apigeeconnect": "1.7.1", + "apigeeregistry": "0.9.1", + "apihub": "0.1.1", + "apikeys": "1.2.1", + "appengine": "1.9.1", + "apphub": "0.2.1", + "apps": "0.5.1", + "area120": "0.9.1", + "artifactregistry": "1.15.1", + "asset": "1.20.2", + "assuredworkloads": "1.12.1", + "automl": "1.14.1", + "backupdr": "1.1.1", + "baremetalsolution": "1.3.1", + "batch": "1.11.0", + "beyondcorp": "1.1.1", + "billing": "1.19.1", + "binaryauthorization": "1.9.1", + "certificatemanager": "1.9.1", + "channel": "1.18.1", + "chat": "0.6.0", + "cloudbuild": "1.18.0", + "cloudcontrolspartner": "1.2.0", + "clouddms": "1.8.1", + "cloudprofiler": "0.4.1", + "cloudquotas": "1.1.1", + "cloudtasks": "1.13.1", + "commerce": "1.1.1", + "compute": "1.28.1", + "compute/metadata": "0.5.2", + "confidentialcomputing": "1.7.1", + "config": "1.1.1", + "contactcenterinsights": "1.14.1", + "container": "1.40.0", + "containeranalysis": "0.13.1", + "datacatalog": "1.22.1", + "dataflow": "0.10.1", + "dataform": "0.10.1", + "datafusion": "1.8.1", + "datalabeling": "0.9.1", + "dataplex": "1.19.1", + "dataproc": "2.9.0", + "dataqna": "0.9.1", + "datastream": "1.11.1", + "deploy": "1.22.1", + "developerconnect": "0.2.1", + "dialogflow": "1.58.0", + "discoveryengine": "1.14.0", + "dlp": "1.19.0", + "documentai": "1.34.0", + "domains": "0.10.1", + "edgecontainer": "1.3.1", + "edgenetwork": "1.2.1", + "essentialcontacts": "1.7.1", + "eventarc": "1.14.1", + "filestore": "1.9.1", + "functions": "1.19.1", + "gkebackup": "1.6.1", + "gkeconnect": "0.11.1", + "gkehub": "0.15.1", + "gkemulticloud": "1.4.0", + "grafeas": "0.3.11", + "gsuiteaddons": "1.7.1", + "iam": "1.2.1", + "iap": "1.10.1", + "identitytoolkit": "0.2.1", + "ids": "1.5.1", + "iot": "1.8.1", + "kms": "1.20.0", + "language": "1.14.1", + "lifesciences": "0.10.1", + "longrunning": "0.6.1", + "managedidentities": "1.7.1", + "managedkafka": "0.2.1", + "maps": "1.14.0", + "mediatranslation": "0.9.1", + "memcache": "1.11.1", + "metastore": "1.14.1", + "migrationcenter": "1.1.1", + "monitoring": "1.21.1", + "netapp": "1.4.0", + "networkconnectivity": "1.15.1", + "networkmanagement": "1.14.1", + "networksecurity": "0.10.1", + "networkservices": "0.2.1", + "notebooks": "1.12.1", + "optimization": "1.7.1", + "orchestration": "1.11.0", + "orgpolicy": "1.14.0", + "osconfig": "1.14.1", + "oslogin": "1.14.1", + "parallelstore": "0.6.1", + "phishingprotection": "0.9.1", + "policysimulator": "0.3.1", + "policytroubleshooter": "1.11.1", + "privatecatalog": "0.10.1", + "privilegedaccessmanager": "0.2.1", + "rapidmigrationassessment": "1.1.1", + "recaptchaenterprise": "2.17.1", + "recommendationengine": "0.9.1", + "recommender": "1.13.1", + "redis": "1.17.1", + "resourcemanager": "1.10.1", + "resourcesettings": "1.8.1", + "retail": "1.18.1", + "run": "1.5.1", + "scheduler": "1.11.1", + "secretmanager": "1.14.1", + "securesourcemanager": "1.2.1", + "security": "1.18.1", + "securitycenter": "1.35.1", + "securitycentermanagement": "1.1.1", + "securityposture": "0.2.1", + "servicecontrol": "1.14.1", + "servicedirectory": "1.12.1", + "servicehealth": "1.1.1", + "servicemanagement": "1.10.1", + "serviceusage": "1.9.1", + "shell": "1.8.1", + "shopping": "0.10.0", + "speech": "1.25.1", + "storageinsights": "1.1.1", + "storagetransfer": "1.11.1", + "streetview": "0.2.1", + "support": "1.1.1", + "talent": "1.7.1", + "telcoautomation": "1.1.1", + "texttospeech": "1.8.1", + "tpu": "1.7.1", + "trace": "1.11.1", + "translate": "1.12.1", + "video": "1.23.1", + "videointelligence": "1.12.1", + "vision": "2.9.1", + "visionai": "0.4.1", + "vmmigration": "1.8.1", + "vmwareengine": "1.3.1", + "vpcaccess": "1.8.1", + "webrisk": "1.10.1", + "websecurityscanner": "1.7.1", + "workflows": "1.13.1", + "workstations": "1.1.1" } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest.json b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest.json index 82876bd850d..c8f1da56d86 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest.json +++ b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.115.0" + ".": "0.116.0" } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/CHANGES.md b/terraform/providers/google/vendor/cloud.google.com/go/CHANGES.md index 47eeeb266d4..adc725ca1a7 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/CHANGES.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,19 @@ # Changes +## [0.116.0](https://github.com/googleapis/google-cloud-go/compare/v0.115.1...v0.116.0) (2024-10-09) + + +### Features + +* **genai:** Add tokenizer package ([#10699](https://github.com/googleapis/google-cloud-go/issues/10699)) ([214af16](https://github.com/googleapis/google-cloud-go/commit/214af1604bf3837f68e96dbf81c1331b90c9375f)) + +## [0.115.1](https://github.com/googleapis/google-cloud-go/compare/v0.115.0...v0.115.1) (2024-08-13) + + +### Bug Fixes + +* **cloud.google.com/go:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + ## [0.115.0](https://github.com/googleapis/google-cloud-go/compare/v0.114.0...v0.115.0) (2024-06-12) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/README.md b/terraform/providers/google/vendor/cloud.google.com/go/README.md index 99514979018..63db0209c7d 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/README.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/README.md @@ -28,12 +28,16 @@ For an updated list of all of our released APIs please see our ## [Go Versions Supported](#supported-versions) +**Note:** As of Jan 1, 2025 the Cloud Client Libraries for Go will support the +two most-recent major Go releases -- the same [policy](https://go.dev/doc/devel/release#policy) +the Go programming language follows. + Our libraries are compatible with at least the three most recent, major Go releases. They are currently compatible with: +- Go 1.23 - Go 1.22 - Go 1.21 -- Go 1.20 ## Authorization @@ -56,14 +60,14 @@ client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfil ``` You can exert more control over authorization by using the -[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to -create an `oauth2.TokenSource`. Then pass -[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource) +[credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) package to +create an [auth.Credentials](https://pkg.go.dev/cloud.google.com/go/auth#Credentials). +Then pass [`option.WithAuthCredentials`](https://pkg.go.dev/google.golang.org/api/option#WithAuthCredentials) to the `NewClient` function: ```go -tokenSource := ... -client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) +creds := ... +client, err := storage.NewClient(ctx, option.WithAuthCredentials(creds)) ``` ## Contributing diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/CHANGES.md b/terraform/providers/google/vendor/cloud.google.com/go/auth/CHANGES.md index 7ef5fc0def9..c81df739278 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,155 @@ # Changelog +## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09) + + +### Bug Fixes + +* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962) +* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287)) + +## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01) + + +### Bug Fixes + +* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907) + +## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30) + + +### Bug Fixes + +* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8)) + +## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25) + + +### Bug Fixes + +* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2)) +* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1)) +* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350)) + +## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11) + + +### Bug Fixes + +* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6)) + +## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) + + +### Bug Fixes + +* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804) + +## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30) + + +### Bug Fixes + +* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742) +* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795) + + +### Documentation + +* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437)) + +## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22) + + +### Bug Fixes + +* **auth:** Setting expireEarly to default when the value is 0 ([#10732](https://github.com/googleapis/google-cloud-go/issues/10732)) ([5e67869](https://github.com/googleapis/google-cloud-go/commit/5e67869a31e9e8ecb4eeebd2cfa11a761c3b1948)) + +## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16) + + +### Features + +* **auth:** Auth library can talk to S2A over mTLS ([#10634](https://github.com/googleapis/google-cloud-go/issues/10634)) ([5250a13](https://github.com/googleapis/google-cloud-go/commit/5250a13ec95b8d4eefbe0158f82857ff2189cb45)) + +## [0.8.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.0...auth/v0.8.1) (2024-08-13) + + +### Bug Fixes + +* **auth:** Make default client creation more lenient ([#10669](https://github.com/googleapis/google-cloud-go/issues/10669)) ([1afb9ee](https://github.com/googleapis/google-cloud-go/commit/1afb9ee1ee9de9810722800018133304a0ca34d1)), refs [#10638](https://github.com/googleapis/google-cloud-go/issues/10638) + +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.3...auth/v0.8.0) (2024-08-07) + + +### Features + +* **auth:** Adds support for X509 workload identity federation ([#10373](https://github.com/googleapis/google-cloud-go/issues/10373)) ([5d07505](https://github.com/googleapis/google-cloud-go/commit/5d075056cbe27bb1da4072a26070c41f8999eb9b)) + +## [0.7.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.2...auth/v0.7.3) (2024-08-01) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) +* **auth:** Disable automatic universe domain check for MDS ([#10620](https://github.com/googleapis/google-cloud-go/issues/10620)) ([7cea5ed](https://github.com/googleapis/google-cloud-go/commit/7cea5edd5a0c1e6bca558696f5607879141910e8)) +* **auth:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.7.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.1...auth/v0.7.2) (2024-07-22) + + +### Bug Fixes + +* **auth:** Use default client for universe metadata lookup ([#10551](https://github.com/googleapis/google-cloud-go/issues/10551)) ([d9046fd](https://github.com/googleapis/google-cloud-go/commit/d9046fdd1435d1ce48f374806c1def4cb5ac6cd3)), refs [#10544](https://github.com/googleapis/google-cloud-go/issues/10544) + +## [0.7.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.0...auth/v0.7.1) (2024-07-10) + + +### Bug Fixes + +* **auth:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.1...auth/v0.7.0) (2024-07-09) + + +### Features + +* **auth:** Add workload X509 cert provider as a default cert provider ([#10479](https://github.com/googleapis/google-cloud-go/issues/10479)) ([c51ee6c](https://github.com/googleapis/google-cloud-go/commit/c51ee6cf65ce05b4d501083e49d468c75ac1ea63)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Check len of slices, not non-nil ([#10483](https://github.com/googleapis/google-cloud-go/issues/10483)) ([0a966a1](https://github.com/googleapis/google-cloud-go/commit/0a966a183e5f0e811977216d736d875b7233e942)) + +## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.0...auth/v0.6.1) (2024-07-01) + + +### Bug Fixes + +* **auth:** Support gRPC API keys ([#10460](https://github.com/googleapis/google-cloud-go/issues/10460)) ([daa6646](https://github.com/googleapis/google-cloud-go/commit/daa6646d2af5d7fb5b30489f4934c7db89868c7c)) +* **auth:** Update http and grpc transports to support token exchange over mTLS ([#10397](https://github.com/googleapis/google-cloud-go/issues/10397)) ([c6dfdcf](https://github.com/googleapis/google-cloud-go/commit/c6dfdcf893c3f971eba15026c12db0a960ae81f2)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.2...auth/v0.6.0) (2024-06-25) + + +### Features + +* **auth:** Add non-blocking token refresh for compute MDS ([#10263](https://github.com/googleapis/google-cloud-go/issues/10263)) ([9ac350d](https://github.com/googleapis/google-cloud-go/commit/9ac350da11a49b8e2174d3fc5b1a5070fec78b4e)) + + +### Bug Fixes + +* **auth:** Return error if envvar detected file returns an error ([#10431](https://github.com/googleapis/google-cloud-go/issues/10431)) ([e52b9a7](https://github.com/googleapis/google-cloud-go/commit/e52b9a7c45468827f5d220ab00965191faeb9d05)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.1...auth/v0.5.2) (2024-06-24) + + +### Bug Fixes + +* **auth:** Fetch initial token when CachedTokenProviderOptions.DisableAutoRefresh is true ([#10415](https://github.com/googleapis/google-cloud-go/issues/10415)) ([3266763](https://github.com/googleapis/google-cloud-go/commit/32667635ca2efad05cd8c087c004ca07d7406913)), refs [#10414](https://github.com/googleapis/google-cloud-go/issues/10414) + ## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/README.md b/terraform/providers/google/vendor/cloud.google.com/go/auth/README.md index 36de276a074..6fe4f0763e3 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/README.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/README.md @@ -1,4 +1,40 @@ -# auth +# Google Auth Library for Go -This module is currently EXPERIMENTAL and under active development. It is not -yet intended to be used. +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/auth.svg)](https://pkg.go.dev/cloud.google.com/go/auth) + +## Install + +``` bash +go get cloud.google.com/go/auth@latest +``` + +## Usage + +The most common way this library is used is transitively, by default, from any +of our Go client libraries. + +### Notable use-cases + +- To create a credential directly please see examples in the + [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) + package. +- To create a authenticated HTTP client please see examples in the + [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport) + package. +- To create a authenticated gRPC connection please see examples in the + [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport) + package. +- To create an ID token please see examples in the + [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken) + package. + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/auth.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/auth.go index d579e482e89..314bd292e3f 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/auth.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/auth.go @@ -12,6 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package auth provides utilities for managing Google Cloud credentials, +// including functionality for creating, caching, and refreshing OAuth2 tokens. +// It offers customizable options for different OAuth2 flows, such as 2-legged +// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic +// token management. package auth import ( @@ -44,6 +49,21 @@ const ( universeDomainDefault = "googleapis.com" ) +// tokenState represents different states for a [Token]. +type tokenState int + +const ( + // fresh indicates that the [Token] is valid. It is not expired or close to + // expired, or the token has no expiry. + fresh tokenState = iota + // stale indicates that the [Token] is close to expired, and should be + // refreshed. The token can be used normally. + stale + // invalid indicates that the [Token] is expired or invalid. The token + // cannot be used for a normal operation. + invalid +) + var ( defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType} @@ -81,13 +101,27 @@ type Token struct { // IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not // expired. A token is considered expired if [Token.Expiry] has passed or will -// pass in the next 10 seconds. +// pass in the next 225 seconds. func (t *Token) IsValid() bool { return t.isValidWithEarlyExpiry(defaultExpiryDelta) } +// MetadataString is a convenience method for accessing string values in the +// token's metadata. Returns an empty string if the metadata is nil or the value +// for the given key cannot be cast to a string. +func (t *Token) MetadataString(k string) string { + if t.Metadata == nil { + return "" + } + s, ok := t.Metadata[k].(string) + if !ok { + return "" + } + return s +} + func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { - if t == nil || t.Value == "" { + if t.isEmpty() { return false } if t.Expiry.IsZero() { @@ -96,8 +130,14 @@ func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow()) } +func (t *Token) isEmpty() bool { + return t == nil || t.Value == "" +} + // Credentials holds Google credentials, including -// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials). +// [Application Default Credentials]. +// +// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials type Credentials struct { json []byte projectID CredentialsPropertyProvider @@ -206,11 +246,15 @@ func NewCredentials(opts *CredentialsOptions) *Credentials { // CachedTokenProvider. type CachedTokenProviderOptions struct { // DisableAutoRefresh makes the TokenProvider always return the same token, - // even if it is expired. + // even if it is expired. The default is false. Optional. DisableAutoRefresh bool // ExpireEarly configures the amount of time before a token expires, that it - // should be refreshed. If unset, the default value is 10 seconds. + // should be refreshed. If unset, the default value is 3 minutes and 45 + // seconds. Optional. ExpireEarly time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool } func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { @@ -221,40 +265,135 @@ func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { } func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration { - if ctpo == nil { + if ctpo == nil || ctpo.ExpireEarly == 0 { return defaultExpiryDelta } return ctpo.ExpireEarly } +func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { + if ctpo == nil { + return false + } + return ctpo.DisableAsyncRefresh +} + // NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned -// by the underlying provider. By default it will refresh tokens ten seconds -// before they expire, but this time can be configured with the optional -// options. +// by the underlying provider. By default it will refresh tokens asynchronously +// (non-blocking mode) within a window that starts 3 minutes and 45 seconds +// before they expire. The asynchronous (non-blocking) refresh can be changed to +// a synchronous (blocking) refresh using the +// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry +// duration can be configured using the CachedTokenProviderOptions.ExpireEarly +// option. func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { if ctp, ok := tp.(*cachedTokenProvider); ok { return ctp } return &cachedTokenProvider{ - tp: tp, - autoRefresh: opts.autoRefresh(), - expireEarly: opts.expireEarly(), + tp: tp, + autoRefresh: opts.autoRefresh(), + expireEarly: opts.expireEarly(), + blockingRefresh: opts.blockingRefresh(), } } type cachedTokenProvider struct { - tp TokenProvider - autoRefresh bool - expireEarly time.Duration + tp TokenProvider + autoRefresh bool + expireEarly time.Duration + blockingRefresh bool mu sync.Mutex cachedToken *Token + // isRefreshRunning ensures that the non-blocking refresh will only be + // attempted once, even if multiple callers enter the Token method. + isRefreshRunning bool + // isRefreshErr ensures that the non-blocking refresh will only be attempted + // once per refresh window if an error is encountered. + isRefreshErr bool } func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) { + if c.blockingRefresh { + return c.tokenBlocking(ctx) + } + return c.tokenNonBlocking(ctx) +} + +func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, error) { + switch c.tokenState() { + case fresh: + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + case stale: + // Call tokenAsync with a new Context because the user-provided context + // may have a short timeout incompatible with async token refresh. + c.tokenAsync(context.Background()) + // Return the stale token immediately to not block customer requests to Cloud services. + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + default: // invalid + return c.tokenBlocking(ctx) + } +} + +// tokenState reports the token's validity. +func (c *cachedTokenProvider) tokenState() tokenState { + c.mu.Lock() + defer c.mu.Unlock() + t := c.cachedToken + now := timeNow() + if t == nil || t.Value == "" { + return invalid + } else if t.Expiry.IsZero() { + return fresh + } else if now.After(t.Expiry.Round(0)) { + return invalid + } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) { + return stale + } + return fresh +} + +// tokenAsync uses a bool to ensure that only one non-blocking token refresh +// happens at a time, even if multiple callers have entered this function +// concurrently. This avoids creating an arbitrary number of concurrent +// goroutines. Retries should be attempted and managed within the Token method. +// If the refresh attempt fails, no further attempts are made until the refresh +// window expires and the token enters the invalid state, at which point the +// blocking call to Token should likely return the same error on the main goroutine. +func (c *cachedTokenProvider) tokenAsync(ctx context.Context) { + fn := func() { + c.mu.Lock() + c.isRefreshRunning = true + c.mu.Unlock() + t, err := c.tp.Token(ctx) + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshRunning = false + if err != nil { + // Discard errors from the non-blocking refresh, but prevent further + // attempts. + c.isRefreshErr = true + return + } + c.cachedToken = t + } + c.mu.Lock() + defer c.mu.Unlock() + if !c.isRefreshRunning && !c.isRefreshErr { + go fn() + } +} + +func (c *cachedTokenProvider) tokenBlocking(ctx context.Context) (*Token, error) { c.mu.Lock() defer c.mu.Unlock() - if c.cachedToken.IsValid() || !c.autoRefresh { + c.isRefreshErr = false + if c.cachedToken.IsValid() || (!c.autoRefresh && !c.cachedToken.isEmpty()) { return c.cachedToken, nil } t, err := c.tp.Token(ctx) @@ -364,7 +503,7 @@ func (o *Options2LO) client() *http.Client { if o.Client != nil { return o.Client } - return internal.CloneDefaultClient() + return internal.DefaultClient() } func (o *Options2LO) validate() error { @@ -423,12 +562,12 @@ func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { v := url.Values{} v.Set("grant_type", defaultGrantType) v.Set("assertion", payload) - resp, err := tp.Client.PostForm(tp.opts.TokenURL, v) + req, err := http.NewRequestWithContext(ctx, "POST", tp.opts.TokenURL, strings.NewReader(v.Encode())) if err != nil { - return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + return nil, err } - defer resp.Body.Close() - body, err := internal.ReadAll(resp.Body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(tp.Client, req) if err != nil { return nil, fmt.Errorf("auth: cannot fetch token: %w", err) } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/compute.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/compute.go index f3ec8882424..6f70fa353b0 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/compute.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -37,9 +37,10 @@ var ( // computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that // uses the metadata service to retrieve tokens. -func computeTokenProvider(earlyExpiry time.Duration, scope ...string) auth.TokenProvider { - return auth.NewCachedTokenProvider(computeProvider{scopes: scope}, &auth.CachedTokenProviderOptions{ - ExpireEarly: earlyExpiry, +func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { + return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + DisableAsyncRefresh: opts.DisableAsyncRefresh, }) } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/detect.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/detect.go index cb3f44f5873..010afc37c8f 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -37,6 +37,9 @@ const ( googleAuthURL = "https://accounts.google.com/o/oauth2/auth" googleTokenURL = "https://oauth2.googleapis.com/token" + // GoogleMTLSTokenURL is Google's default OAuth2.0 mTLS endpoint. + GoogleMTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + // Help on default credentials adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" ) @@ -73,16 +76,18 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if err := opts.validate(); err != nil { return nil, err } - if opts.CredentialsJSON != nil { + if len(opts.CredentialsJSON) > 0 { return readCredentialsFileJSON(opts.CredentialsJSON, opts) } if opts.CredentialsFile != "" { return readCredentialsFile(opts.CredentialsFile, opts) } if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" { - if creds, err := readCredentialsFile(filename, opts); err == nil { - return creds, err + creds, err := readCredentialsFile(filename, opts) + if err != nil { + return nil, err } + return creds, nil } fileName := credsfile.GetWellKnownFileName() @@ -92,9 +97,9 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if OnGCE() { return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: computeTokenProvider(opts.EarlyTokenRefresh, opts.Scopes...), - ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { - return metadata.ProjectID() + TokenProvider: computeTokenProvider(opts), + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadata.ProjectIDWithContext(ctx) }), UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, }), nil @@ -116,8 +121,13 @@ type DetectOptions struct { // Optional. Subject string // EarlyTokenRefresh configures how early before a token expires that it - // should be refreshed. + // should be refreshed. Once the token’s time until expiration has entered + // this refresh window the token is considered valid but stale. If unset, + // the default value is 3 minutes and 45 seconds. Optional. EarlyTokenRefresh time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool // AuthHandlerOptions configures an authorization handler and other options // for 3LO flows. It is required, and only used, for client credential // flows. @@ -180,7 +190,7 @@ func (o *DetectOptions) client() *http.Client { if o.Client != nil { return o.Client } - return internal.CloneDefaultClient() + return internal.DefaultClient() } func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/filetypes.go index fe93557389d..6591b181132 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -33,7 +33,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { return nil, err } - var projectID, quotaProjectID, universeDomain string + var projectID, universeDomain string var tp auth.TokenProvider switch fileType { case credsfile.ServiceAccountKey: @@ -56,7 +56,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ExternalAccountKey: f, err := credsfile.ParseExternalAccount(b) @@ -67,7 +66,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) case credsfile.ExternalAccountAuthorizedUserKey: f, err := credsfile.ParseExternalAccountAuthorizedUser(b) @@ -78,7 +76,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ImpersonatedServiceAccountKey: f, err := credsfile.ParseImpersonatedServiceAccount(b) @@ -108,9 +105,9 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ ExpireEarly: opts.EarlyTokenRefresh, }), - JSON: b, - ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), - QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + // TODO(codyoss): only set quota project here if there was a user override UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), }), nil } @@ -127,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string } func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) if opts.UseSelfSignedJWT { return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) } opts2LO := &auth.Options2LO{ Email: f.ClientEmail, @@ -174,6 +177,7 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions Scopes: opts.scopes(), WorkforcePoolUserProject: f.WorkforcePoolUserProject, Client: opts.client(), + IsDefaultClient: opts.Client == nil, } if f.ServiceAccountImpersonation != nil { externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go index 6eb6d3b4445..e6f4ff81160 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go @@ -23,6 +23,8 @@ import ( "strings" "sync" "time" + + "cloud.google.com/go/auth/internal" ) type cachingClient struct { @@ -52,22 +54,20 @@ func (c *cachingClient) getCert(ctx context.Context, url string) (*certResponse, if response, ok := c.get(url); ok { return response, nil } - req, err := http.NewRequest(http.MethodGet, url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, err } - req = req.WithContext(ctx) - resp, err := c.client.Do(req) + resp, body, err := internal.DoRequest(c.client, req) if err != nil { return nil, err } - defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("idtoken: unable to retrieve cert, got status code %d", resp.StatusCode) } certResp := &certResponse{} - if err := json.NewDecoder(resp.Body).Decode(certResp); err != nil { + if err := json.Unmarshal(body, &certResp); err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go index d6757b60f87..dced1ec4044 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go @@ -43,8 +43,8 @@ func computeCredentials(opts *Options) (*auth.Credentials, error) { TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ ExpireEarly: 5 * time.Minute, }), - ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { - return metadata.ProjectID() + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadata.ProjectIDWithContext(ctx) }), UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, }), nil @@ -66,7 +66,7 @@ func (c computeIDTokenProvider) Token(ctx context.Context) (*auth.Token, error) v.Set("licenses", "TRUE") } urlSuffix := identitySuffix + "?" + v.Encode() - res, err := c.client.Get(urlSuffix) + res, err := c.client.GetWithContext(ctx, urlSuffix) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go index fc26505547e..b66c6551e6e 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go @@ -12,6 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package idtoken provides functionality for generating and validating ID +// tokens, with configurable options for audience, custom claims, and token +// formats. +// +// For more information on ID tokens, see +// https://cloud.google.com/docs/authentication/token-types#id. package idtoken import ( @@ -72,7 +78,7 @@ type Options struct { func (o *Options) client() *http.Client { if o == nil || o.Client == nil { - return internal.CloneDefaultClient() + return internal.DefaultClient() } return o.Client } @@ -104,7 +110,7 @@ func NewCredentials(opts *Options) (*auth.Credentials, error) { } func (o *Options) jsonBytes() []byte { - if o.CredentialsJSON != nil { + if len(o.CredentialsJSON) > 0 { return o.CredentialsJSON } var fnOverride string diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go index d653bf2c189..4b17af20211 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go @@ -40,7 +40,7 @@ const ( ) var ( - defaultValidator = &Validator{client: newCachingClient(internal.CloneDefaultClient())} + defaultValidator = &Validator{client: newCachingClient(internal.DefaultClient())} // now aliases time.Now for testing. now = time.Now ) @@ -83,7 +83,7 @@ func NewValidator(opts *ValidatorOptions) (*Validator, error) { if opts != nil && opts.Client != nil { client = opts.Client } else { - client = internal.CloneDefaultClient() + client = internal.DefaultClient() } return &Validator{client: newCachingClient(client)}, nil } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go index 95a4c492ebf..e51bee7d876 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go @@ -103,7 +103,7 @@ func NewIDTokenCredentials(opts *IDTokenOptions) (*auth.Credentials, error) { } } else if opts.Client == nil { creds = opts.Credentials - client = internal.CloneDefaultClient() + client = internal.DefaultClient() if err := httptransport.AddAuthorizationMiddleware(client, opts.Credentials); err != nil { return nil, err } @@ -162,20 +162,15 @@ func (i impersonatedIDTokenProvider) Token(ctx context.Context) (*auth.Token, er } url := fmt.Sprintf("%s/v1/%s:generateIdToken", iamCredentialsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) - req, err := http.NewRequest("POST", url, bytes.NewReader(bodyBytes)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes)) if err != nil { return nil, fmt.Errorf("impersonate: unable to create request: %w", err) } req.Header.Set("Content-Type", "application/json") - resp, err := i.client.Do(req) + resp, body, err := internal.DoRequest(i.client, req) if err != nil { return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err) } - defer resp.Body.Close() - body, err := internal.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to read body: %w", err) - } if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go index a0045db45fd..91b42bc3f7f 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go @@ -79,7 +79,7 @@ func NewCredentials(opts *CredentialsOptions) (*auth.Credentials, error) { } } else if opts.Credentials != nil { creds = opts.Credentials - client = internal.CloneDefaultClient() + client = internal.DefaultClient() if err := httptransport.AddAuthorizationMiddleware(client, opts.Credentials); err != nil { return nil, err } @@ -238,21 +238,15 @@ func (i impersonatedTokenProvider) Token(ctx context.Context) (*auth.Token, erro return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err) } url := fmt.Sprintf("%s/v1/%s:generateAccessToken", iamCredentialsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) - req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(b)) if err != nil { return nil, fmt.Errorf("impersonate: unable to create request: %w", err) } req.Header.Set("Content-Type", "application/json") - - resp, err := i.client.Do(req) + resp, body, err := internal.DoRequest(i.client, req) if err != nil { return nil, fmt.Errorf("impersonate: unable to generate access token: %w", err) } - defer resp.Body.Close() - body, err := internal.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to read body: %w", err) - } if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go index 5aefa2a8e30..1acaaa922d9 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go @@ -92,14 +92,14 @@ type userTokenProvider struct { } func (u userTokenProvider) Token(ctx context.Context) (*auth.Token, error) { - signedJWT, err := u.signJWT() + signedJWT, err := u.signJWT(ctx) if err != nil { return nil, err } return u.exchangeToken(ctx, signedJWT) } -func (u userTokenProvider) signJWT() (string, error) { +func (u userTokenProvider) signJWT(ctx context.Context) (string, error) { now := time.Now() exp := now.Add(u.lifetime) claims := claimSet{ @@ -124,20 +124,16 @@ func (u userTokenProvider) signJWT() (string, error) { return "", fmt.Errorf("impersonate: unable to marshal request: %w", err) } reqURL := fmt.Sprintf("%s/v1/%s:signJwt", iamCredentialsEndpoint, formatIAMServiceAccountName(u.targetPrincipal)) - req, err := http.NewRequest("POST", reqURL, bytes.NewReader(bodyBytes)) + req, err := http.NewRequestWithContext(ctx, "POST", reqURL, bytes.NewReader(bodyBytes)) if err != nil { return "", fmt.Errorf("impersonate: unable to create request: %w", err) } req.Header.Set("Content-Type", "application/json") - rawResp, err := u.client.Do(req) + resp, body, err := internal.DoRequest(u.client, req) if err != nil { return "", fmt.Errorf("impersonate: unable to sign JWT: %w", err) } - body, err := internal.ReadAll(rawResp.Body) - if err != nil { - return "", fmt.Errorf("impersonate: unable to read body: %w", err) - } - if c := rawResp.StatusCode; c < 200 || c > 299 { + if c := resp.StatusCode; c < 200 || c > 299 { return "", fmt.Errorf("impersonate: status code %d: %s", c, body) } @@ -157,15 +153,11 @@ func (u userTokenProvider) exchangeToken(ctx context.Context, signedJWT string) if err != nil { return nil, err } - rawResp, err := u.client.Do(req) + resp, body, err := internal.DoRequest(u.client, req) if err != nil { return nil, fmt.Errorf("impersonate: unable to exchange token: %w", err) } - body, err := internal.ReadAll(rawResp.Body) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to read body: %w", err) - } - if c := rawResp.StatusCode; c < 200 || c > 299 { + if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index d9e1dcddf64..d8b5d4fdeb9 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -94,35 +94,33 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) if sp.RegionalCredVerificationURL == "" { sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL } - if sp.requestSigner == nil { - headers := make(map[string]string) - if sp.shouldUseMetadataServer() { - awsSessionToken, err := sp.getAWSSessionToken(ctx) - if err != nil { - return "", err - } - - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken - } - } - - awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) if err != nil { return "", err } - if sp.region, err = sp.getRegion(ctx, headers); err != nil { - return "", err - } - sp.requestSigner = &awsRequestSigner{ - RegionName: sp.region, - AwsSecurityCredentials: awsSecurityCredentials, + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken } } + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. - req, err := http.NewRequest("POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) + req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) if err != nil { return "", err } @@ -194,20 +192,14 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e } req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) - resp, err := sp.Client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", respBody) + return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) } - return string(respBody), nil + return string(body), nil } func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) { @@ -233,29 +225,21 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string] for name, value := range headers { req.Header.Add(name, value) } - - resp, err := sp.Client.Do(req) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", respBody) + return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) } // This endpoint will return the region in format: us-east-2b. // Only the us-east-2 part should be used. - bodyLen := len(respBody) + bodyLen := len(body) if bodyLen == 0 { return "", nil } - return string(respBody[:bodyLen-1]), nil + return string(body[:bodyLen-1]), nil } func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) { @@ -299,22 +283,17 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context for name, value := range headers { req.Header.Add(name, value) } - - resp, err := sp.Client.Do(req) - if err != nil { - return result, err - } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return result, err } if resp.StatusCode != http.StatusOK { - return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", respBody) + return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) + } + if err := json.Unmarshal(body, &result); err != nil { + return nil, err } - err = json.Unmarshal(respBody, &result) - return result, err + return result, nil } func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) { @@ -329,20 +308,14 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m req.Header.Add(name, value) } - resp, err := sp.Client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", respBody) + return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) } - return string(respBody), nil + return string(body), nil } // awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go index b19c6edeae5..112186a9e6e 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -100,6 +100,10 @@ type Options struct { AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider // Client for token request. Client *http.Client + // IsDefaultClient marks whether the client passed in is a default client that can be overriden. + // This is important for X509 credentials which should create a new client if the default was used + // but should respect a client explicitly passed in by the user. + IsDefaultClient bool } // SubjectTokenProvider can be used to supply a subject token to exchange for a @@ -181,6 +185,26 @@ func (o *Options) validate() error { return nil } +// client returns the http client that should be used for the token exchange. If a non-default client +// is provided, then the client configured in the options will always be returned. If a default client +// is provided and the options are configured for X509 credentials, a new client will be created. +func (o *Options) client() (*http.Client, error) { + // If a client was provided and no override certificate config location was provided, use the provided client. + if o.CredentialSource == nil || o.CredentialSource.Certificate == nil || (!o.IsDefaultClient && o.CredentialSource.Certificate.CertificateConfigLocation == "") { + return o.Client, nil + } + + // If a new client should be created, validate and use the certificate source to create a new mTLS client. + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return createX509Client(cert.CertificateConfigLocation) +} + // resolveTokenURL sets the default STS token endpoint with the configured // universe domain. func (o *Options) resolveTokenURL() { @@ -204,11 +228,18 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { if err != nil { return nil, err } + + client, err := opts.client() + if err != nil { + return nil, err + } + tp := &tokenProvider{ - client: opts.Client, + client: client, opts: opts, stp: stp, } + if opts.ServiceAccountImpersonationURL == "" { return auth.NewCachedTokenProvider(tp, nil), nil } @@ -218,7 +249,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { // needed for impersonation tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} imp, err := impersonate.NewTokenProvider(&impersonate.Options{ - Client: opts.Client, + Client: client, URL: opts.ServiceAccountImpersonationURL, Scopes: scopes, Tp: auth.NewCachedTokenProvider(tp, nil), @@ -353,6 +384,15 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { execProvider.opts = o execProvider.env = runtimeEnvironment{} return execProvider, nil + } else if o.CredentialSource.Certificate != nil { + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return &x509Provider{}, nil } return nil, errors.New("credentials: unable to parse credential source") } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go index 22b8af1c11b..0a020599e07 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -30,6 +30,7 @@ const ( fileTypeJSON = "json" urlProviderType = "url" programmaticProviderType = "programmatic" + x509ProviderType = "x509" ) type urlSubjectProvider struct { @@ -48,27 +49,21 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) for key, val := range sp.Headers { req.Header.Add(key, val) } - resp, err := sp.Client.Do(req) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("credentials: invalid body in subject token URL query: %w", err) - } if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { - return "", fmt.Errorf("credentials: status code %d: %s", c, respBody) + return "", fmt.Errorf("credentials: status code %d: %s", c, body) } if sp.Format == nil { - return string(respBody), nil + return string(body), nil } switch sp.Format.Type { case "json": jsonData := make(map[string]interface{}) - err = json.Unmarshal(respBody, &jsonData) + err = json.Unmarshal(body, &jsonData) if err != nil { return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) } @@ -82,7 +77,7 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) } return token, nil case fileTypeText: - return string(respBody), nil + return string(body), nil default: return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go new file mode 100644 index 00000000000..115df5881f1 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go @@ -0,0 +1,63 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "crypto/tls" + "net/http" + "time" + + "cloud.google.com/go/auth/internal/transport/cert" +) + +// x509Provider implements the subjectTokenProvider type for +// x509 workload identity credentials. Because x509 credentials +// rely on an mTLS connection to represent the 3rd party identity +// rather than a subject token, this provider will always return +// an empty string when a subject token is requested by the external account +// token provider. +type x509Provider struct { +} + +func (xp *x509Provider) providerType() string { + return x509ProviderType +} + +func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) { + return "", nil +} + +// createX509Client creates a new client that is configured with mTLS, using the +// certificate configuration specified in the credential source. +func createX509Client(certificateConfigLocation string) (*http.Client, error) { + certProvider, err := cert.NewWorkloadX509CertProvider(certificateConfigLocation) + if err != nil { + return nil, err + } + trans := http.DefaultTransport.(*http.Transport).Clone() + + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: certProvider, + } + + // Create a client with default settings plus the X509 workload cert and key. + client := &http.Client{ + Transport: trans, + Timeout: 30 * time.Second, + } + + return client, nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go index 467edb9088e..720045d3b07 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -25,6 +25,7 @@ import ( "net/http" "net/url" "os" + "strings" "time" "cloud.google.com/go/auth" @@ -129,12 +130,13 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { v.Set("requested_token_type", requestTokenType) v.Set("subject_token", payload) v.Set("subject_token_type", subjectTokenType) - resp, err := g.client.PostForm(g.tokenURL, v) + + req, err := http.NewRequestWithContext(ctx, "POST", g.tokenURL, strings.NewReader(v.Encode())) if err != nil { - return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + return nil, err } - defer resp.Body.Close() - body, err := internal.ReadAll(resp.Body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(g.client, req) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go index 3ceab873b8e..ed53afa519e 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -109,15 +109,10 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) { if err := setAuthHeader(ctx, o.Tp, req); err != nil { return nil, err } - resp, err := o.Client.Do(req) + resp, body, err := internal.DoRequest(o.Client, req) if err != nil { return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) } - defer resp.Body.Close() - body, err := internal.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("credentials: unable to read body: %w", err) - } if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go index f70e0aef48f..768a9dafc13 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -93,16 +93,10 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo } req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) - resp, err := opts.Client.Do(req) + resp, body, err := internal.DoRequest(opts.Client, req) if err != nil { return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) } - defer resp.Body.Close() - - body, err := internal.ReadAll(resp.Body) - if err != nil { - return nil, err - } if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index b62a8ae4d5d..6ae29de6c27 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -17,6 +17,7 @@ package credentials import ( "context" "crypto/rsa" + "errors" "fmt" "strings" "time" @@ -35,6 +36,9 @@ var ( // configureSelfSignedJWT uses the private key in the service account to create // a JWT without making a network call. func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } pk, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index 8dbfa7ef7e9..8696df1487f 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -22,7 +22,7 @@ import ( "strings" "cloud.google.com/go/auth" - "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/auth/internal/compute" "google.golang.org/grpc" grpcgoogle "google.golang.org/grpc/credentials/google" ) @@ -55,7 +55,7 @@ func checkDirectPathEndPoint(endpoint string) bool { return true } -func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool { +func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool { if tp == nil { return false } @@ -66,10 +66,13 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool if tok == nil { return false } - if source, _ := tok.Metadata["auth.google.tokenSource"].(string); source != "compute-metadata" { + if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { + return true + } + if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { return false } - if acct, _ := tok.Metadata["auth.google.serviceAccount"].(string); acct != "default" { + if tok.MetadataString("auth.google.serviceAccount") != "default" { return false } return true @@ -91,7 +94,7 @@ func isDirectPathXdsUsed(o *Options) bool { // configuration allows the use of direct path. If it does not the provided // grpcOpts and endpoint are returned. func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { - if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) { + if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) { // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 75bda4c6389..42d4cbe3062 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -12,22 +12,29 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package grpctransport provides functionality for managing gRPC client +// connections to Google Cloud services. package grpctransport import ( "context" + "crypto/tls" "errors" "fmt" "net/http" + "os" + "sync" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" "go.opencensus.io/plugin/ocgrpc" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" grpccreds "google.golang.org/grpc/credentials" grpcinsecure "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/stats" ) const ( @@ -37,7 +44,7 @@ const ( // Check env to decide if using google-c2p resolver for DirectPath traffic. enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) var ( @@ -45,6 +52,32 @@ var ( timeoutDialerOption grpc.DialOption ) +// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across +// all dial connections to avoid the memory leak documented in +// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 +// +// TODO: When this module depends on a version of otelgrpc containing the fix, +// replace this singleton with inline usage for simplicity. +// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797. +var ( + initOtelStatsHandlerOnce sync.Once + otelStatsHandler stats.Handler +) + +// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all +// dial connections. +func otelGRPCStatsHandler() stats.Handler { + initOtelStatsHandlerOnce.Do(func() { + otelStatsHandler = otelgrpc.NewClientHandler() + }) + return otelStatsHandler +} + +// ClientCertProvider is a function that returns a TLS client certificate to be +// used when opening TLS connections. It follows the same semantics as +// [crypto/tls.Config.GetClientCertificate]. +type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + // Options used to configure a [GRPCClientConnPool] from [Dial]. type Options struct { // DisableTelemetry disables default telemetry (OpenTelemetry). An example @@ -69,6 +102,10 @@ type Options struct { // Credentials used to add Authorization metadata to all requests. If set // DetectOpts are ignored. Credentials *auth.Credentials + // ClientCertProvider is a function that returns a TLS client certificate to + // be used when opening TLS connections. It follows the same semantics as + // crypto/tls.Config.GetClientCertificate. + ClientCertProvider ClientCertProvider // DetectOpts configures settings for detect Application Default // Credentials. DetectOpts *credentials.DetectOptions @@ -77,6 +114,9 @@ type Options struct { // configured for the client, which will be compared to the universe domain // that is separately configured for the credentials. UniverseDomain string + // APIKey specifies an API key to be used as the basis for authentication. + // If set DetectOpts are ignored. + APIKey string // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -99,7 +139,8 @@ func (o *Options) validate() error { if o.InternalOptions != nil && o.InternalOptions.SkipValidation { return nil } - hasCreds := o.Credentials != nil || + hasCreds := o.APIKey != "" || + o.Credentials != nil || (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) || (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "") if o.DisableAuthentication && hasCreds { @@ -125,6 +166,13 @@ func (o *Options) resolveDetectOptions() *credentials.DetectOptions { if len(do.Scopes) == 0 && do.Audience == "" && io != nil { do.Audience = o.InternalOptions.DefaultAudience } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = credentials.GoogleMTLSTokenURL + } return do } @@ -189,9 +237,10 @@ func Dial(ctx context.Context, secure bool, opts *Options) (GRPCClientConnPool, // return a GRPCClientConnPool if pool == 1 or else a pool of of them if >1 func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, error) { tOpts := &transport.Options{ - Endpoint: opts.Endpoint, - Client: opts.client(), - UniverseDomain: opts.UniverseDomain, + Endpoint: opts.Endpoint, + ClientCertProvider: opts.ClientCertProvider, + Client: opts.client(), + UniverseDomain: opts.UniverseDomain, } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate @@ -213,8 +262,21 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er grpc.WithTransportCredentials(transportCreds), } - // Authentication can only be sent when communicating over a secure connection. - if !opts.DisableAuthentication { + // Ensure the token exchange HTTP transport uses the same ClientCertProvider as the GRPC API transport. + opts.ClientCertProvider, err = transport.GetClientCertificateProvider(tOpts) + if err != nil { + return nil, err + } + + if opts.APIKey != "" { + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(&grpcKeyProvider{ + apiKey: opts.APIKey, + metadata: opts.Metadata, + secure: secure, + }), + ) + } else if !opts.DisableAuthentication { metadata := opts.Metadata var creds *auth.Credentials @@ -236,7 +298,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er if metadata == nil { metadata = make(map[string]string, 1) } - metadata[quotaProjectHeaderKey] = qp + // Don't overwrite user specified quota + if _, ok := metadata[quotaProjectHeaderKey]; !ok { + metadata[quotaProjectHeaderKey] = qp + } } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(&grpcCredentialsProvider{ @@ -254,9 +319,30 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. grpcOpts = addOCStatsHandler(grpcOpts, opts) + grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) - return grpc.DialContext(ctx, endpoint, grpcOpts...) + return grpc.NewClient(endpoint, grpcOpts...) +} + +// grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. +type grpcKeyProvider struct { + apiKey string + metadata map[string]string + secure bool +} + +func (g *grpcKeyProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + metadata := make(map[string]string, len(g.metadata)+1) + metadata["X-goog-api-key"] = g.apiKey + for k, v := range g.metadata { + metadata[k] = v + } + return metadata, nil +} + +func (g *grpcKeyProvider) RequireTransportSecurity() bool { + return g.secure } // grpcCredentialsProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. @@ -270,29 +356,39 @@ type grpcCredentialsProvider struct { clientUniverseDomain string } -// getClientUniverseDomain returns the default service domain for a given Cloud universe. -// The default value is "googleapis.com". This is the universe domain -// configured for the client, which will be compared to the universe domain -// that is separately configured for the credentials. +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (c *grpcCredentialsProvider) getClientUniverseDomain() string { - if c.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if c.clientUniverseDomain != "" { + return c.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return c.clientUniverseDomain + return internal.DefaultUniverseDomain } func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx) - if err != nil { - return nil, err - } - if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { - return nil, err - } token, err := c.creds.Token(ctx) if err != nil { return nil, err } + if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } if c.secure { ri, _ := grpccreds.RequestInfoFromContext(ctx) if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil { @@ -327,3 +423,10 @@ func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOpt } return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) } + +func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { + if opts.DisableTelemetry { + return dialOpts + } + return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler())) +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index ef09c1b7523..30fedf9562f 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package httptransport provides functionality for managing HTTP client +// connections to Google Cloud services. package httptransport import ( @@ -116,6 +118,13 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions { if len(do.Scopes) == 0 && do.Audience == "" && io != nil { do.Audience = o.InternalOptions.DefaultAudience } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = detect.GoogleMTLSTokenURL + } return do } @@ -195,6 +204,8 @@ func NewClient(opts *Options) (*http.Client, error) { if baseRoundTripper == nil { baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext) } + // Ensure the token exchange transport uses the same ClientCertProvider as the API transport. + opts.ClientCertProvider = clientCertProvider trans, err := newTransport(baseRoundTripper, opts) if err != nil { return nil, err diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/httptransport/transport.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/httptransport/transport.go index 94caeb00f0a..63498ee792b 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -19,6 +19,7 @@ import ( "crypto/tls" "net" "net/http" + "os" "time" "cloud.google.com/go/auth" @@ -27,11 +28,12 @@ import ( "cloud.google.com/go/auth/internal/transport" "cloud.google.com/go/auth/internal/transport/cert" "go.opencensus.io/plugin/ochttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" ) const ( - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { @@ -41,6 +43,9 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err headers: headers, } var trans http.RoundTripper = ht + // Give OpenTelemetry precedence over OpenCensus in case user configuration + // causes both to write the same header (`X-Cloud-Trace-Context`). + trans = addOpenTelemetryTransport(trans, opts) trans = addOCTransport(trans, opts) switch { case opts.DisableAuthentication: @@ -76,7 +81,10 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err if headers == nil { headers = make(map[string][]string, 1) } - headers.Set(quotaProjectHeaderKey, qp) + // Don't overwrite user specified quota + if v := headers.Get(quotaProjectHeaderKey); v == "" { + headers.Set(quotaProjectHeaderKey, qp) + } } creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) trans = &authTransport{ @@ -94,7 +102,11 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err // http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - trans := http.DefaultTransport.(*http.Transport).Clone() + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + defaultTransport = transport.BaseTransport() + } + trans := defaultTransport.Clone() trans.MaxIdleConnsPerHost = 100 if clientCertSource != nil { @@ -155,6 +167,13 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { return rt.RoundTrip(&newReq) } +func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { + if opts.DisableTelemetry { + return trans + } + return otelhttp.NewTransport(trans) +} + func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { if opts.DisableTelemetry { return trans @@ -171,13 +190,23 @@ type authTransport struct { clientUniverseDomain string } -// getClientUniverseDomain returns the universe domain configured for the client. -// The default value is "googleapis.com". +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (t *authTransport) getClientUniverseDomain() string { - if t.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if t.clientUniverseDomain != "" { + return t.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return t.clientUniverseDomain + return internal.DefaultUniverseDomain } // RoundTrip authorizes and authenticates the request with an @@ -193,17 +222,19 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { } }() } - credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) - if err != nil { - return nil, err - } - if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { - return nil, err - } token, err := t.creds.Token(req.Context()) if err != nil { return nil, err } + if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } req2 := req.Clone(req.Context()) SetAuthHeader(token, req2) reqBodyClosed = true diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/compute.go new file mode 100644 index 00000000000..651bd61fbbc --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -0,0 +1,66 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "log" + "runtime" + "strings" + "sync" +) + +var ( + vmOnGCEOnce sync.Once + vmOnGCE bool +) + +// OnComputeEngine returns whether the client is running on GCE. +// +// This is a copy of the gRPC internal googlecloud.OnGCE() func at: +// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go +// The functionality is similar to the metadata.OnGCE() func at: +// https://github.com/xmenxk/google-cloud-go/blob/main/compute/metadata/metadata.go +// +// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. +// In particular, OnComputeEngine() will return false on Serverless. +func OnComputeEngine() bool { + vmOnGCEOnce.Do(func() { + mf, err := manufacturer() + if err != nil { + log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err) + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) + }) + return vmOnGCE +} + +// isRunningOnGCE checks whether the local system, without doing a network request, is +// running on GCP. +func isRunningOnGCE(manufacturer []byte, goos string) bool { + name := string(manufacturer) + switch goos { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + return false + } +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go new file mode 100644 index 00000000000..af490bf4f49 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go @@ -0,0 +1,22 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go new file mode 100644 index 00000000000..d92178df86c --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go @@ -0,0 +1,23 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import "os" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return os.ReadFile(linuxProductNameFile) +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go new file mode 100644 index 00000000000..16be9df3064 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go index 69e30779f98..3be6e5bbb41 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -90,19 +90,20 @@ type ExternalAccountAuthorizedUserFile struct { // CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. // -// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. +// One field amongst File, URL, Certificate, and Executable should be filled, depending on the kind of credential in question. // The EnvironmentID should start with AWS if being used for an AWS credential. type CredentialSource struct { - File string `json:"file"` - URL string `json:"url"` - Headers map[string]string `json:"headers"` - Executable *ExecutableConfig `json:"executable,omitempty"` - EnvironmentID string `json:"environment_id"` - RegionURL string `json:"region_url"` - RegionalCredVerificationURL string `json:"regional_cred_verification_url"` - CredVerificationURL string `json:"cred_verification_url"` - IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` - Format *Format `json:"format,omitempty"` + File string `json:"file"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable,omitempty"` + Certificate *CertificateConfig `json:"certificate"` + EnvironmentID string `json:"environment_id"` // TODO: Make type for this + RegionURL string `json:"region_url"` + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + Format *Format `json:"format,omitempty"` } // Format describes the format of a [CredentialSource]. @@ -121,6 +122,13 @@ type ExecutableConfig struct { OutputFile string `json:"output_file"` } +// CertificateConfig represents the options used to set up X509 based workload +// [CredentialSource] +type CertificateConfig struct { + UseDefaultCertificateConfig bool `json:"use_default_certificate_config"` + CertificateConfigLocation string `json:"certificate_config_location"` +} + // ServiceAccountImpersonationInfo has impersonation configuration. type ServiceAccountImpersonationInfo struct { TokenLifetimeSeconds int `json:"token_lifetime_seconds"` diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/internal.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/internal.go index 70534e809a4..66a51f19c73 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/internal.go @@ -38,18 +38,35 @@ const ( // QuotaProjectEnvVar is the environment variable for setting the quota // project. QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" - projectEnvVar = "GOOGLE_CLOUD_PROJECT" - maxBodySize = 1 << 20 + // UniverseDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 // DefaultUniverseDomain is the default value for universe domain. // Universe domain is the default service domain for a given Cloud universe. DefaultUniverseDomain = "googleapis.com" ) -// CloneDefaultClient returns a [http.Client] with some good defaults. -func CloneDefaultClient() *http.Client { +type clonableTransport interface { + Clone() *http.Transport +} + +// DefaultClient returns an [http.Client] with some defaults set. If +// the current [http.DefaultTransport] is a [clonableTransport], as +// is the case for an [*http.Transport], the clone will be used. +// Otherwise the [http.DefaultTransport] is used directly. +func DefaultClient() *http.Client { + if transport, ok := http.DefaultTransport.(clonableTransport); ok { + return &http.Client{ + Transport: transport.Clone(), + Timeout: 30 * time.Second, + } + } + return &http.Client{ - Transport: http.DefaultTransport.(*http.Transport).Clone(), + Transport: http.DefaultTransport, Timeout: 30 * time.Second, } } @@ -124,6 +141,21 @@ func GetProjectID(b []byte, override string) string { return v.Project } +// DoRequest executes the provided req with the client. It reads the response +// body, closes it, and returns it. +func DoRequest(client *http.Client, req *http.Request) (*http.Response, []byte, error) { + resp, err := client.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + body, err := ReadAll(io.LimitReader(resp.Body, maxBodySize)) + if err != nil { + return nil, nil, err + } + return resp, body, nil +} + // ReadAll consumes the whole reader and safely reads the content of its body // with some overflow protection. func ReadAll(r io.Reader) ([]byte, error) { @@ -166,9 +198,9 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string // httpGetMetadataUniverseDomain is a package var for unit test substitution. var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { - client := metadata.NewClient(&http.Client{Timeout: time.Second}) - // TODO(quartzmo): set ctx on request - return client.Get("universe/universe_domain") + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + return metadata.GetWithContext(ctx, "universe/universe_domain") } func getMetadataUniverseDomain(ctx context.Context) (string, error) { diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cba.go index 6ef88311a24..f606888f120 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -17,7 +17,9 @@ package transport import ( "context" "crypto/tls" + "crypto/x509" "errors" + "log" "net" "net/http" "net/url" @@ -44,10 +46,12 @@ const ( googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS" universeDomainPlaceholder = "UNIVERSE_DOMAIN" + + mtlsMDSRoot = "/run/google-mds-mtls/root.crt" + mtlsMDSKey = "/run/google-mds-mtls/client.key" ) var ( - mdsMTLSAutoConfigSource mtlsConfigSource errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") ) @@ -120,7 +124,24 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede defaultTransportCreds := credentials.NewTLS(&tls.Config{ GetClientCertificate: config.clientCertSource, }) - if config.s2aAddress == "" { + + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return defaultTransportCreds, config.endpoint, nil + } + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { return defaultTransportCreds, config.endpoint, nil } @@ -133,8 +154,9 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede } s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ - S2AAddress: config.s2aAddress, - FallbackOpts: fallbackOpts, + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, }) if err != nil { // Use default if we cannot initialize S2A client transport credentials. @@ -151,7 +173,23 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, return nil, nil, err } - if config.s2aAddress == "" { + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return config.clientCertSource, nil, nil + } + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { return config.clientCertSource, nil, nil } @@ -169,14 +207,40 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, } dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ - S2AAddress: config.s2aAddress, - FallbackOpts: fallbackOpts, + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, }) return nil, dialTLSContextFunc, nil } +func loadMTLSMDSTransportCreds(mtlsMDSRootFile, mtlsMDSKeyFile string) (credentials.TransportCredentials, error) { + rootPEM, err := os.ReadFile(mtlsMDSRootFile) + if err != nil { + return nil, err + } + caCertPool := x509.NewCertPool() + ok := caCertPool.AppendCertsFromPEM(rootPEM) + if !ok { + return nil, errors.New("failed to load MTLS MDS root certificate") + } + // The mTLS MDS credentials are formatted as the concatenation of a PEM-encoded certificate chain + // followed by a PEM-encoded private key. For this reason, the concatenation is passed in to the + // tls.X509KeyPair function as both the certificate chain and private key arguments. + cert, err := tls.LoadX509KeyPair(mtlsMDSKeyFile, mtlsMDSKeyFile) + if err != nil { + return nil, err + } + tlsConfig := tls.Config{ + RootCAs: caCertPool, + Certificates: []tls.Certificate{cert}, + MinVersion: tls.VersionTLS13, + } + return credentials.NewTLS(&tlsConfig), nil +} + func getTransportConfig(opts *Options) (*transportConfig, error) { - clientCertSource, err := getClientCertificateSource(opts) + clientCertSource, err := GetClientCertificateProvider(opts) if err != nil { return nil, err } @@ -196,27 +260,27 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { return nil, errUniverseNotSupportedMTLS } - s2aMTLSEndpoint := opts.DefaultMTLSEndpoint - s2aAddress := GetS2AAddress() - if s2aAddress == "" { + mtlsS2AAddress := GetMTLSS2AAddress() + if s2aAddress == "" && mtlsS2AAddress == "" { return &defaultTransportConfig, nil } return &transportConfig{ clientCertSource: clientCertSource, endpoint: endpoint, s2aAddress: s2aAddress, - s2aMTLSEndpoint: s2aMTLSEndpoint, + mtlsS2AAddress: mtlsS2AAddress, + s2aMTLSEndpoint: opts.DefaultMTLSEndpoint, }, nil } -// getClientCertificateSource returns a default client certificate source, if +// GetClientCertificateProvider returns a default client certificate source, if // not provided by the user. // // A nil default source can be returned if the source does not exist. Any exceptions // encountered while initializing the default source will be reported as client // error (ex. corrupt metadata file). -func getClientCertificateSource(opts *Options) (cert.Provider, error) { +func GetClientCertificateProvider(opts *Options) (cert.Provider, error) { if !isClientCertificateEnabled(opts) { return nil, nil } else if opts.ClientCertProvider != nil { @@ -241,8 +305,10 @@ type transportConfig struct { clientCertSource cert.Provider // The corresponding endpoint to use based on client certificate source. endpoint string - // The S2A address if it can be used, otherwise an empty string. + // The plaintext S2A address if it can be used, otherwise an empty string. s2aAddress string + // The MTLS S2A address if it can be used, otherwise an empty string. + mtlsS2AAddress string // The MTLS endpoint to use with S2A. s2aMTLSEndpoint string } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go index 96582ce7b6a..5cedc50f1e8 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go @@ -50,11 +50,14 @@ var errSourceUnavailable = errors.New("certificate source is unavailable") // returned to indicate that a default certificate source is unavailable. func DefaultProvider() (Provider, error) { defaultCert.once.Do(func() { - defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("") + defaultCert.provider, defaultCert.err = NewWorkloadX509CertProvider("") if errors.Is(defaultCert.err, errSourceUnavailable) { - defaultCert.provider, defaultCert.err = NewSecureConnectProvider("") + defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("") if errors.Is(defaultCert.err, errSourceUnavailable) { - defaultCert.provider, defaultCert.err = nil, nil + defaultCert.provider, defaultCert.err = NewSecureConnectProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = nil, nil + } } } }) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go index 3227aba280c..738cb21618e 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -62,11 +62,11 @@ func NewSecureConnectProvider(configFilePath string) (Provider, error) { file, err := os.ReadFile(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Secure Connect is not supported. - return nil, errSourceUnavailable - } - return nil, err + // Config file missing means Secure Connect is not supported. + // There are non-os.ErrNotExist errors that may be returned. + // (e.g. if the home directory is /dev/null, *nix systems will + // return ENOTDIR instead of ENOENT) + return nil, errSourceUnavailable } var metadata secureConnectMetadata diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go index ea1e1febbc2..e8675bf824b 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -99,7 +99,7 @@ func getCertAndKeyFiles(configFilePath string) (string, string, error) { } if config.CertConfigs.Workload == nil { - return "", "", errors.New("no Workload Identity Federation certificate information found in the certificate configuration file") + return "", "", errSourceUnavailable } certFile := config.CertConfigs.Workload.CertPath diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index 2ed532deb7a..37894bfcd01 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -15,12 +15,13 @@ package transport import ( + "context" "encoding/json" + "fmt" "log" "os" "strconv" "sync" - "time" "cloud.google.com/go/auth/internal/transport/cert" "cloud.google.com/go/compute/metadata" @@ -31,41 +32,38 @@ const ( ) var ( - // The period an MTLS config can be reused before needing refresh. - configExpiry = time.Hour + mtlsConfiguration *mtlsConfig - // mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. mtlsOnce sync.Once ) // GetS2AAddress returns the S2A address to be reached via plaintext connection. // Returns empty string if not set or invalid. func GetS2AAddress() string { - c, err := getMetadataMTLSAutoConfig().Config() - if err != nil { - return "" - } - if !c.Valid() { + getMetadataMTLSAutoConfig() + if !mtlsConfiguration.valid() { return "" } - return c.S2A.PlaintextAddress + return mtlsConfiguration.S2A.PlaintextAddress } -type mtlsConfigSource interface { - Config() (*mtlsConfig, error) +// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. +// Returns empty string if not set or invalid. +func GetMTLSS2AAddress() string { + getMetadataMTLSAutoConfig() + if !mtlsConfiguration.valid() { + return "" + } + return mtlsConfiguration.S2A.MTLSAddress } // mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. type mtlsConfig struct { - S2A *s2aAddresses `json:"s2a"` - Expiry time.Time + S2A *s2aAddresses `json:"s2a"` } -func (c *mtlsConfig) Valid() bool { - return c != nil && c.S2A != nil && !c.expired() -} -func (c *mtlsConfig) expired() bool { - return c.Expiry.Before(time.Now()) +func (c *mtlsConfig) valid() bool { + return c != nil && c.S2A != nil } // s2aAddresses contains the plaintext and/or MTLS S2A addresses. @@ -76,80 +74,36 @@ type s2aAddresses struct { MTLSAddress string `json:"mtls_address"` } -// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. -func getMetadataMTLSAutoConfig() mtlsConfigSource { +func getMetadataMTLSAutoConfig() { + var err error mtlsOnce.Do(func() { - mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ - src: &metadataMTLSAutoConfig{}, + mtlsConfiguration, err = queryConfig() + if err != nil { + log.Printf("Getting MTLS config failed: %v", err) } }) - return mdsMTLSAutoConfigSource -} - -// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. -// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. -type reuseMTLSConfigSource struct { - src mtlsConfigSource // src.Config() is called when config is expired - mu sync.Mutex // mutex guards config - config *mtlsConfig // cached config } -func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { - cs.mu.Lock() - defer cs.mu.Unlock() - - if cs.config.Valid() { - return cs.config, nil - } - c, err := cs.src.Config() - if err != nil { - return nil, err - } - cs.config = c - return c, nil -} - -// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource -// It has the logic to query MDS and return an mtlsConfig -type metadataMTLSAutoConfig struct{} - var httpGetMetadataMTLSConfig = func() (string, error) { - return metadata.Get(configEndpointSuffix) + return metadata.GetWithContext(context.Background(), configEndpointSuffix) } -func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { +func queryConfig() (*mtlsConfig, error) { resp, err := httpGetMetadataMTLSConfig() if err != nil { - log.Printf("querying MTLS config from MDS endpoint failed: %v", err) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) } var config mtlsConfig err = json.Unmarshal([]byte(resp), &config) if err != nil { - log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("unmarshalling MTLS config from MDS endpoint failed: %w", err) } - if config.S2A == nil { - log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("returned MTLS config from MDS endpoint is invalid: %v", config) } - - // set new expiry - config.Expiry = time.Now().Add(configExpiry) return &config, nil } -func defaultMTLSConfig() *mtlsConfig { - return &mtlsConfig{ - S2A: &s2aAddresses{ - PlaintextAddress: "", - MTLSAddress: "", - }, - Expiry: time.Now().Add(configExpiry), - } -} - func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool { // If client cert is found, use that over S2A. if clientCertSource != nil { diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/transport.go index b76386d3c0d..cc586ec5b1a 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -17,7 +17,11 @@ package transport import ( + "crypto/tls" "fmt" + "net" + "net/http" + "time" "cloud.google.com/go/auth/credentials" ) @@ -49,11 +53,11 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt } // Smartly size this memory and copy below. - if oldDo.CredentialsJSON != nil { + if len(oldDo.CredentialsJSON) > 0 { newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON)) copy(newDo.CredentialsJSON, oldDo.CredentialsJSON) } - if oldDo.Scopes != nil { + if len(oldDo.Scopes) > 0 { newDo.Scopes = make([]string, len(oldDo.Scopes)) copy(newDo.Scopes, oldDo.Scopes) } @@ -74,3 +78,28 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri } return nil } + +// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { + trans := BaseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +// BaseTransport returns a default [http.Transport] which can be used if +// [http.DefaultTransport] has been overwritten. +func BaseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/terraform/providers/google/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index ff9747beda0..7faf6e0c985 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,19 @@ # Changelog +## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + ## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/auth/threelegged.go b/terraform/providers/google/vendor/cloud.google.com/go/auth/threelegged.go index 1b8d83c4b4f..97a57f4694b 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/auth/threelegged.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/auth/threelegged.go @@ -62,7 +62,8 @@ type Options3LO struct { // Optional. Client *http.Client // EarlyTokenExpiry is the time before the token expires that it should be - // refreshed. If not set the default value is 10 seconds. Optional. + // refreshed. If not set the default value is 3 minutes and 45 seconds. + // Optional. EarlyTokenExpiry time.Duration // AuthHandlerOpts provides a set of options for doing a @@ -127,7 +128,7 @@ func (o *Options3LO) client() *http.Client { if o.Client != nil { return o.Client } - return internal.CloneDefaultClient() + return internal.DefaultClient() } // authCodeURL returns a URL that points to a OAuth2 consent page. @@ -284,7 +285,7 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin v.Set("client_secret", o.ClientSecret) } } - req, err := http.NewRequest("POST", o.TokenURL, strings.NewReader(v.Encode())) + req, err := http.NewRequestWithContext(ctx, "POST", o.TokenURL, strings.NewReader(v.Encode())) if err != nil { return nil, refreshToken, err } @@ -294,25 +295,19 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin } // Make request - r, err := o.client().Do(req.WithContext(ctx)) + resp, body, err := internal.DoRequest(o.client(), req) if err != nil { return nil, refreshToken, err } - body, err := internal.ReadAll(r.Body) - r.Body.Close() - if err != nil { - return nil, refreshToken, fmt.Errorf("auth: cannot fetch token: %w", err) - } - - failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 tokError := &Error{ - Response: r, + Response: resp, Body: body, } var token *Token // errors ignored because of default switch on content - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + content, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")) switch content { case "application/x-www-form-urlencoded", "text/plain": // some endpoints return a query string diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/CHANGES.md b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/CHANGES.md index 8a216391c12..2c6917bbe28 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/CHANGES.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/CHANGES.md @@ -1,5 +1,131 @@ # Changes +## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.32.0...bigtable/v1.33.0) (2024-09-23) + + +### Features + +* **bigtable/admin:** Add support for Cloud Bigtable Row Affinity in App Profiles ([b9dfce5](https://github.com/googleapis/google-cloud-go/commit/b9dfce5e509d0c795e89c66b7f6a6bb356e3a172)) + + +### Bug Fixes + +* **bigtable:** Rollback new auth library ([#10906](https://github.com/googleapis/google-cloud-go/issues/10906)) ([8109157](https://github.com/googleapis/google-cloud-go/commit/8109157cb2bfb700fde04361e0fa7c1345608fce)) + +## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.31.0...bigtable/v1.32.0) (2024-09-09) + + +### Features + +* **bigtable:** Add "TypeUnspecified" to represent an unspecified type ([#10820](https://github.com/googleapis/google-cloud-go/issues/10820)) ([8d008de](https://github.com/googleapis/google-cloud-go/commit/8d008def4378d33ab66ca0ec346534be87155576)) +* **bigtable:** Add client side metrics to feature flag ([#10678](https://github.com/googleapis/google-cloud-go/issues/10678)) ([02b2d12](https://github.com/googleapis/google-cloud-go/commit/02b2d12d51f774ea9ce6985b3f03006ef3d23e50)) +* **bigtable:** Add update value type test. ([#10771](https://github.com/googleapis/google-cloud-go/issues/10771)) ([210f022](https://github.com/googleapis/google-cloud-go/commit/210f0228e68452c23cbf6bf42862974303f54450)) +* **bigtable:** Wrapping errors on Export ([#10836](https://github.com/googleapis/google-cloud-go/issues/10836)) ([fc6d6a8](https://github.com/googleapis/google-cloud-go/commit/fc6d6a8b6bb90714e92bfb09762cc5a99930a6a8)) + + +### Bug Fixes + +* **bigtable:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) +* **bigtable:** Error logging for client side metrics ([#10658](https://github.com/googleapis/google-cloud-go/issues/10658)) ([9a94ff8](https://github.com/googleapis/google-cloud-go/commit/9a94ff87b83f37472aa94b6e0d1cc69bbb83c3bc)) + +## [1.31.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.30.0...bigtable/v1.31.0) (2024-08-28) + + +### Features + +* **bigtable:** Using new auth library ([#10766](https://github.com/googleapis/google-cloud-go/issues/10766)) ([8daf304](https://github.com/googleapis/google-cloud-go/commit/8daf304faf3808690996ad3a45d2890b107a0939)) + + +### Performance Improvements + +* **bigtable:** Use RecvMsg instead of Recv ([#10674](https://github.com/googleapis/google-cloud-go/issues/10674)) ([7e4fbc5](https://github.com/googleapis/google-cloud-go/commit/7e4fbc5612441c59bfaa1e5b9bbd06e3387b5c02)) + +## [1.30.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.29.0...bigtable/v1.30.0) (2024-08-26) + + +### Features + +* **bigtable:** Add MarshalJSON to allow clients to get a stringified version of the protobuf ([#10679](https://github.com/googleapis/google-cloud-go/issues/10679)) ([663f399](https://github.com/googleapis/google-cloud-go/commit/663f3996ced66c312c8202535574d3ffcb72d283)) +* **bigtable:** Add nil checks to Equal method ([#10758](https://github.com/googleapis/google-cloud-go/issues/10758)) ([f1aad7f](https://github.com/googleapis/google-cloud-go/commit/f1aad7f3a05a959d0dd973e026026391deda7657)) +* **bigtable:** Add UpdateFamily to allow updating a family type ([#10759](https://github.com/googleapis/google-cloud-go/issues/10759)) ([ec0cbb2](https://github.com/googleapis/google-cloud-go/commit/ec0cbb20ba42b7ef03688a06dc0a380e9b27e394)) +* **bigtable:** Update go version for conformance tests ([#10743](https://github.com/googleapis/google-cloud-go/issues/10743)) ([74cf45e](https://github.com/googleapis/google-cloud-go/commit/74cf45efe7dc6f74cadac3f015a705f8dbf69622)) + + +### Bug Fixes + +* **bigtable:** Use new auth library ([#10670](https://github.com/googleapis/google-cloud-go/issues/10670)) ([fab520d](https://github.com/googleapis/google-cloud-go/commit/fab520d226340bbf1aedc001dcb7384651e075a0)) + +## [1.29.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.28.0...bigtable/v1.29.0) (2024-08-09) + + +### Features + +* **bigtable/admin:** Add fields and the BackupType proto for Hot Backups ([649c075](https://github.com/googleapis/google-cloud-go/commit/649c075d5310e2fac64a0b65ec445e7caef42cb0)) +* **bigtable:** Remove deprecated Bytes from BigEndianBytesEncoding ([#10659](https://github.com/googleapis/google-cloud-go/issues/10659)) ([0bb1a6d](https://github.com/googleapis/google-cloud-go/commit/0bb1a6de1fba3307cc770e2bcaebe93c8fe9d628)) + + +### Bug Fixes + +* **bigtable:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73)) + + +### Documentation + +* **bigtable/admin:** Clarify comments and fix typos ([649c075](https://github.com/googleapis/google-cloud-go/commit/649c075d5310e2fac64a0b65ec445e7caef42cb0)) + +## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.27.1...bigtable/v1.28.0) (2024-08-03) + + +### Features + +* **bigtable:** Add MergeToCell support to the bigtable emulator and client ([#10366](https://github.com/googleapis/google-cloud-go/issues/10366)) ([0211c95](https://github.com/googleapis/google-cloud-go/commit/0211c95e0404aad31be5bec6d5855f0bc5358161)) +* **bigtable:** Add support for new functions ([#10582](https://github.com/googleapis/google-cloud-go/issues/10582)) ([a49ab59](https://github.com/googleapis/google-cloud-go/commit/a49ab593a7495c2cfff106594762b9a6c79eb8b2)) +* **bigtable:** Expose protoToType ([#10602](https://github.com/googleapis/google-cloud-go/issues/10602)) ([643a8e3](https://github.com/googleapis/google-cloud-go/commit/643a8e356632160c143e94f905c72b4e6452f5a6)) + + +### Bug Fixes + +* **bigtable/emulator:** Sending empty row in SampleRowKeys response ([#10611](https://github.com/googleapis/google-cloud-go/issues/10611)) ([928f1a7](https://github.com/googleapis/google-cloud-go/commit/928f1a77191fbf4736051305e0ad67b69bae11fb)) +* **bigtable:** Move usage to local proto definitions ([#10598](https://github.com/googleapis/google-cloud-go/issues/10598)) ([ce31365](https://github.com/googleapis/google-cloud-go/commit/ce31365acc54fdf0970fc9552b1758c8fef4762f)) + +## [1.27.1](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.27.0...bigtable/v1.27.1) (2024-07-25) + + +### Bug Fixes + +* **bigtable:** Start generating proto sources ([5b4b0f7](https://github.com/googleapis/google-cloud-go/commit/5b4b0f7878276ab5709011778b1b4a6ffd30a60b)) + +## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.26.0...bigtable/v1.27.0) (2024-07-25) + + +### Features + +* **bigtable:** Built-in client side metrics ([#10046](https://github.com/googleapis/google-cloud-go/issues/10046)) ([a747f0a](https://github.com/googleapis/google-cloud-go/commit/a747f0a49b79c0fe3034f7374b47ca56fc5ce0f5)) + +## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.25.0...bigtable/v1.26.0) (2024-07-23) + + +### Features + +* **bigtable/emulator:** Allow listening on Unix Domain Sockets ([#9665](https://github.com/googleapis/google-cloud-go/issues/9665)) ([424494c](https://github.com/googleapis/google-cloud-go/commit/424494ce23db13468a4ea3e3be6ed1dee028ecdb)) +* **bigtable:** Add column family type to FamilyInfo in TableInfo ([#10520](https://github.com/googleapis/google-cloud-go/issues/10520)) ([fd16a17](https://github.com/googleapis/google-cloud-go/commit/fd16a1785df6f1378aecb3cd6a7f2c9bcc40c6c7)) +* **bigtable:** Mark CBT Authorized View admin APIs as unimplemented in the emulator ([#10562](https://github.com/googleapis/google-cloud-go/issues/10562)) ([6b32871](https://github.com/googleapis/google-cloud-go/commit/6b328715c83c8fa2bfd1c3b6b64acd8f1bd486f2)) + + +### Bug Fixes + +* **bigtable:** Add quotes to end of range ([#10488](https://github.com/googleapis/google-cloud-go/issues/10488)) ([142b153](https://github.com/googleapis/google-cloud-go/commit/142b15384d4d818faf30f3bae4567c7f579f4079)) +* **bigtable:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **bigtable:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) +* **bigtable:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.24.0...bigtable/v1.25.0) (2024-06-20) + + +### Features + +* **bigtable:** Add string type to supported Bigtable type ([#10306](https://github.com/googleapis/google-cloud-go/issues/10306)) ([18fa7e4](https://github.com/googleapis/google-cloud-go/commit/18fa7e4961d055939078833d0442a415fac96ae6)) + ## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.23.0...bigtable/v1.24.0) (2024-05-28) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin.go index 3e5974f1d72..69f57f0efa7 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin.go @@ -26,6 +26,7 @@ import ( "strings" "time" + btapb "cloud.google.com/go/bigtable/admin/apiv2/adminpb" btopt "cloud.google.com/go/bigtable/internal/option" "cloud.google.com/go/iam" "cloud.google.com/go/internal/optional" @@ -36,7 +37,6 @@ import ( "google.golang.org/api/iterator" "google.golang.org/api/option" gtransport "google.golang.org/api/transport/grpc" - btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/metadata" "google.golang.org/protobuf/types/known/durationpb" @@ -599,6 +599,7 @@ type FamilyInfo struct { Name string GCPolicy string FullGCPolicy GCPolicy + ValueType Type } func (ac *AdminClient) getTable(ctx context.Context, table string, view btapb.Table_View) (*btapb.Table, error) { @@ -638,6 +639,7 @@ func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, Name: name, GCPolicy: GCRuleToString(fam.GcRule), FullGCPolicy: gcRuleToPolicy(fam.GcRule), + ValueType: ProtoToType(fam.ValueType), }) } // we expect DeletionProtection to be in the response because Table_SCHEMA_VIEW is being used in this function @@ -665,41 +667,82 @@ func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, return ti, nil } -type gcPolicySettings struct { +type updateFamilyOption struct { ignoreWarnings bool } -// GCPolicyOption is the interface to change GC policy settings +// GCPolicyOption is deprecated, kept for backwards compatibility, use UpdateFamilyOption in new code type GCPolicyOption interface { - apply(s *gcPolicySettings) + apply(s *updateFamilyOption) } +// UpdateFamilyOption is the interface to update family settings +type UpdateFamilyOption GCPolicyOption + type ignoreWarnings bool -func (w ignoreWarnings) apply(s *gcPolicySettings) { +func (w ignoreWarnings) apply(s *updateFamilyOption) { s.ignoreWarnings = bool(w) } -// IgnoreWarnings returns a gcPolicyOption that ignores safety checks when modifying the column families +// IgnoreWarnings returns a updateFamilyOption that ignores safety checks when modifying the column families func IgnoreWarnings() GCPolicyOption { return ignoreWarnings(true) } -func (ac *AdminClient) setGCPolicy(ctx context.Context, table, family string, policy GCPolicy, opts ...GCPolicyOption) error { +// SetGCPolicy specifies which cells in a column family should be garbage collected. +// GC executes opportunistically in the background; table reads may return data +// matching the GC policy. +func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error { + return ac.UpdateFamily(ctx, table, family, Family{GCPolicy: policy}) +} + +// SetGCPolicyWithOptions is similar to SetGCPolicy but allows passing options +func (ac *AdminClient) SetGCPolicyWithOptions(ctx context.Context, table, family string, policy GCPolicy, opts ...GCPolicyOption) error { + familyOpts := []UpdateFamilyOption{} + for _, opt := range opts { + if opt != nil { + familyOpts = append(familyOpts, opt.(UpdateFamilyOption)) + } + } + return ac.UpdateFamily(ctx, table, family, Family{GCPolicy: policy}, familyOpts...) +} + +// UpdateFamily updates column families' garbage colleciton policies and value type. +func (ac *AdminClient) UpdateFamily(ctx context.Context, table, familyName string, family Family, opts ...UpdateFamilyOption) error { ctx = mergeOutgoingMetadata(ctx, ac.md) prefix := ac.instancePrefix() - s := gcPolicySettings{} + s := updateFamilyOption{} for _, opt := range opts { if opt != nil { opt.apply(&s) } } + + cf := &btapb.ColumnFamily{} + mask := &field_mask.FieldMask{} + if family.GCPolicy != nil { + cf.GcRule = family.GCPolicy.proto() + mask.Paths = append(mask.Paths, "gc_rule") + + } + if family.ValueType != nil { + cf.ValueType = family.ValueType.proto() + mask.Paths = append(mask.Paths, "value_type") + } + + // No update + if len(mask.Paths) == 0 { + return nil + } + req := &btapb.ModifyColumnFamiliesRequest{ Name: prefix + "/tables/" + table, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ - Id: family, - Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{GcRule: policy.proto()}}, + Id: familyName, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: cf}, + UpdateMask: mask, }}, IgnoreWarnings: s.ignoreWarnings, } @@ -707,18 +750,6 @@ func (ac *AdminClient) setGCPolicy(ctx context.Context, table, family string, po return err } -// SetGCPolicy specifies which cells in a column family should be garbage collected. -// GC executes opportunistically in the background; table reads may return data -// matching the GC policy. -func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error { - return ac.SetGCPolicyWithOptions(ctx, table, family, policy) -} - -// SetGCPolicyWithOptions is similar to SetGCPolicy but allows passing options -func (ac *AdminClient) SetGCPolicyWithOptions(ctx context.Context, table, family string, policy GCPolicy, opts ...GCPolicyOption) error { - return ac.setGCPolicy(ctx, table, family, policy, opts...) -} - // DropRowRange permanently deletes a row range from the specified table. func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix string) error { ctx = mergeOutgoingMetadata(ctx, ac.md) diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_instance_admin.pb.go similarity index 96% rename from terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go rename to terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_instance_admin.pb.go index 92a1e66360e..ca912acd58f 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_instance_admin.pb.go @@ -14,11 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v4.24.4 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/bigtable/admin/v2/bigtable_instance_admin.proto -package admin +package adminpb import ( context "context" @@ -2235,10 +2235,10 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x24, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x22, 0x21, - 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x3a, 0x01, + 0x2a, 0x22, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, @@ -2264,9 +2264,9 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x26, 0x1a, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0xe8, 0x01, 0x0a, 0x15, 0x50, 0x61, 0x72, + 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x1a, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xe8, 0x01, 0x0a, 0x15, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, @@ -2277,11 +2277,11 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x14, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x32, 0x2a, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, + 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x3a, 0x08, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x32, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, @@ -2300,10 +2300,10 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x12, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x74, 0x65, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x22, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x99, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, @@ -2332,10 +2332,10 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5a, 0xca, 0x41, 0x20, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x1a, 0x2c, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0xf4, 0x01, 0x0a, 0x14, + 0x64, 0x61, 0x74, 0x61, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x3a, 0x01, 0x2a, 0x1a, 0x2c, 0x2f, + 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xf4, 0x01, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, @@ -2347,11 +2347,11 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x13, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x3f, 0x32, 0x34, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x12, 0x94, 0x01, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x75, + 0x93, 0x02, 0x3f, 0x3a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x32, 0x34, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0x94, 0x01, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, @@ -2370,11 +2370,11 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x68, 0xda, 0x41, 0x21, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x3e, 0x22, 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x73, 0x3a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x12, 0xa5, 0x01, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, + 0x93, 0x02, 0x3e, 0x3a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x22, 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x73, 0x12, 0xa5, 0x01, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, @@ -2407,12 +2407,12 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x17, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4a, 0x32, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x70, - 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x12, 0x9d, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, + 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4a, 0x3a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x32, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x2a, + 0x7d, 0x12, 0x9d, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, @@ -2428,20 +2428,20 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x48, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, - 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, + 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x9a, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x4f, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x3a, 0x01, 0x2a, 0x12, 0xc5, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, + 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, + 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0xc5, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, @@ -2449,11 +2449,11 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x22, - 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0xbf, 0x01, 0x0a, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x3a, + 0x01, 0x2a, 0x22, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xbf, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, @@ -2491,22 +2491,22 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, - 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xe2, 0x01, 0x0a, 0x1c, + 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xdd, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x1a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x64, - 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, - 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0x3b, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, + 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, + 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -2523,7 +2523,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDescGZIP() [ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 29) -var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_goTypes = []interface{}{ +var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_goTypes = []any{ (CreateClusterMetadata_TableProgress_State)(0), // 0: google.bigtable.admin.v2.CreateClusterMetadata.TableProgress.State (*CreateInstanceRequest)(nil), // 1: google.bigtable.admin.v2.CreateInstanceRequest (*GetInstanceRequest)(nil), // 2: google.bigtable.admin.v2.GetInstanceRequest @@ -2660,7 +2660,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { } file_google_bigtable_admin_v2_instance_proto_init() if !protoimpl.UnsafeEnabled { - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*CreateInstanceRequest); i { case 0: return &v.state @@ -2672,7 +2672,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetInstanceRequest); i { case 0: return &v.state @@ -2684,7 +2684,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListInstancesRequest); i { case 0: return &v.state @@ -2696,7 +2696,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ListInstancesResponse); i { case 0: return &v.state @@ -2708,7 +2708,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*PartialUpdateInstanceRequest); i { case 0: return &v.state @@ -2720,7 +2720,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DeleteInstanceRequest); i { case 0: return &v.state @@ -2732,7 +2732,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*CreateClusterRequest); i { case 0: return &v.state @@ -2744,7 +2744,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*GetClusterRequest); i { case 0: return &v.state @@ -2756,7 +2756,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ListClustersRequest); i { case 0: return &v.state @@ -2768,7 +2768,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ListClustersResponse); i { case 0: return &v.state @@ -2780,7 +2780,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*DeleteClusterRequest); i { case 0: return &v.state @@ -2792,7 +2792,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*CreateInstanceMetadata); i { case 0: return &v.state @@ -2804,7 +2804,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*UpdateInstanceMetadata); i { case 0: return &v.state @@ -2816,7 +2816,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*CreateClusterMetadata); i { case 0: return &v.state @@ -2828,7 +2828,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*UpdateClusterMetadata); i { case 0: return &v.state @@ -2840,7 +2840,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*PartialUpdateClusterMetadata); i { case 0: return &v.state @@ -2852,7 +2852,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*PartialUpdateClusterRequest); i { case 0: return &v.state @@ -2864,7 +2864,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*CreateAppProfileRequest); i { case 0: return &v.state @@ -2876,7 +2876,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*GetAppProfileRequest); i { case 0: return &v.state @@ -2888,7 +2888,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*ListAppProfilesRequest); i { case 0: return &v.state @@ -2900,7 +2900,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*ListAppProfilesResponse); i { case 0: return &v.state @@ -2912,7 +2912,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*UpdateAppProfileRequest); i { case 0: return &v.state @@ -2924,7 +2924,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*DeleteAppProfileRequest); i { case 0: return &v.state @@ -2936,7 +2936,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*UpdateAppProfileMetadata); i { case 0: return &v.state @@ -2948,7 +2948,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*ListHotTabletsRequest); i { case 0: return &v.state @@ -2960,7 +2960,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ListHotTabletsResponse); i { case 0: return &v.state @@ -2972,7 +2972,7 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*CreateClusterMetadata_TableProgress); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_table_admin.pb.go similarity index 96% rename from terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go rename to terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_table_admin.pb.go index 63901312428..f765d7acdee 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_table_admin.pb.go @@ -14,11 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v4.24.4 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/bigtable/admin/v2/bigtable_table_admin.proto -package admin +package adminpb import ( context "context" @@ -2575,7 +2575,7 @@ type CopyBackupRequest struct { unknownFields protoimpl.UnknownFields // Required. The name of the destination cluster that will contain the backup - // copy. The cluster must already exists. Values are of the form: + // copy. The cluster must already exist. Values are of the form: // `projects/{project}/instances/{instance}/clusters/{cluster}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Required. The id of the new backup. The `backup_id` along with `parent` @@ -4029,10 +4029,10 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x4d, 0xda, 0x41, 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x2f, 0x22, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, - 0x01, 0x2a, 0x12, 0x8a, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, + 0x93, 0x02, 0x2f, 0x3a, 0x01, 0x2a, 0x22, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x8a, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, @@ -4044,11 +4044,11 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x22, 0x3d, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0x12, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x3a, 0x01, 0x2a, + 0x22, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0xa4, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, @@ -4078,10 +4078,10 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x62, 0x6c, 0x65, 0x12, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x39, 0x32, 0x30, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x0b, + 0x02, 0x39, 0x3a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x32, 0x30, 0x2f, 0x76, 0x32, 0x2f, 0x7b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8e, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, @@ -4099,11 +4099,11 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x66, 0xca, 0x41, 0x1e, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x15, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x22, 0x33, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x75, 0x6e, 0x64, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xa1, 0x02, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x3a, 0x01, 0x2a, + 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x75, 0x6e, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0xa1, 0x02, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, @@ -4116,12 +4116,12 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x29, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x2c, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, - 0x77, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x22, 0x3c, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x3a, 0x0f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x12, 0xd1, 0x01, 0x0a, 0x13, 0x4c, 0x69, + 0x77, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x3a, 0x0f, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x22, 0x3c, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0xd1, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, @@ -4159,13 +4159,13 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x1b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x5f, 0x32, 0x4c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x2e, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, - 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, - 0x2f, 0x2a, 0x7d, 0x3a, 0x0f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, - 0x76, 0x69, 0x65, 0x77, 0x12, 0xb2, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, + 0xd3, 0xe4, 0x93, 0x02, 0x5f, 0x3a, 0x0f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, + 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x32, 0x4c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, + 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb2, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, @@ -4185,21 +4185,21 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x5f, 0xda, 0x41, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x22, 0x3f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x99, 0x01, 0x0a, 0x0c, + 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x3a, 0x01, 0x2a, 0x22, 0x3f, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x99, 0x01, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x22, 0x42, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x22, 0x37, 0x2f, 0x76, 0x32, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xe8, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x6e, 0x65, + 0x70, 0x74, 0x79, 0x22, 0x42, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x3a, 0x01, 0x2a, 0x22, 0x37, + 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x72, 0x6f, 0x70, 0x52, + 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0xe8, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, @@ -4209,12 +4209,12 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x55, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, 0x22, 0x43, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x3a, - 0x01, 0x2a, 0x12, 0xda, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, + 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, 0x3a, 0x01, 0x2a, 0x22, 0x43, 0x2f, + 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x12, 0xda, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, @@ -4223,11 +4223,11 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5f, 0xda, 0x41, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, - 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, + 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x3a, + 0x01, 0x2a, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0xea, 0x01, 0x0a, 0x0d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, @@ -4239,10 +4239,10 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x24, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, - 0x3a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0x12, 0xa8, 0x01, 0x0a, + 0xe4, 0x93, 0x02, 0x38, 0x3a, 0x01, 0x2a, 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, @@ -4285,11 +4285,11 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, - 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x36, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x73, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0xa0, 0x01, + 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x3a, 0x06, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0xa0, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, @@ -4307,12 +4307,12 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x62, 0xda, 0x41, 0x12, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2c, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x32, - 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x06, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x9c, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x3a, + 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x32, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x9c, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, @@ -4342,10 +4342,10 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0xca, 0x41, 0x1d, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x72, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xed, 0x01, 0x0a, 0x0a, 0x43, 0x6f, + 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0xed, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, @@ -4356,41 +4356,41 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x2a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x73, 0x3a, 0x63, 0x6f, 0x70, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0xec, 0x01, 0x0a, 0x0c, 0x47, 0x65, + 0x69, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x3a, 0x01, 0x2a, 0x22, 0x3b, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x73, 0x3a, 0x63, 0x6f, 0x70, 0x79, 0x12, 0xec, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa0, 0x01, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, - 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, - 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, - 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0xf3, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, + 0x72, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x3a, + 0x01, 0x2a, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x3b, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xf3, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa7, 0x01, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, - 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, - 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, - 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, - 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0xa4, + 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x3a, 0x01, 0x2a, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, + 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xa4, 0x02, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, @@ -4399,17 +4399,17 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9a, 0x01, 0x22, 0x41, 0x2f, 0x76, 0x32, - 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, - 0x2a, 0x5a, 0x52, 0x22, 0x4d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x69, 0x6f, 0x6e, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9a, 0x01, 0x3a, 0x01, 0x2a, 0x5a, 0x52, + 0x3a, 0x01, 0x2a, 0x22, 0x4d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0xde, 0x02, 0xca, 0x41, 0x1c, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x6e, 0x73, 0x22, 0x41, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xde, 0x02, 0xca, 0x41, 0x1c, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xbb, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, @@ -4431,21 +4431,21 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, - 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xdf, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xda, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x17, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, - 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x70, 0x62, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x1e, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, + 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, + 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, + 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -4461,7 +4461,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP() []by } var file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 47) -var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interface{}{ +var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []any{ (*RestoreTableRequest)(nil), // 0: google.bigtable.admin.v2.RestoreTableRequest (*RestoreTableMetadata)(nil), // 1: google.bigtable.admin.v2.RestoreTableMetadata (*OptimizeRestoredTableMetadata)(nil), // 2: google.bigtable.admin.v2.OptimizeRestoredTableMetadata @@ -4656,7 +4656,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { file_google_bigtable_admin_v2_common_proto_init() file_google_bigtable_admin_v2_table_proto_init() if !protoimpl.UnsafeEnabled { - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*RestoreTableRequest); i { case 0: return &v.state @@ -4668,7 +4668,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*RestoreTableMetadata); i { case 0: return &v.state @@ -4680,7 +4680,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*OptimizeRestoredTableMetadata); i { case 0: return &v.state @@ -4692,7 +4692,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*CreateTableRequest); i { case 0: return &v.state @@ -4704,7 +4704,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*CreateTableFromSnapshotRequest); i { case 0: return &v.state @@ -4716,7 +4716,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DropRowRangeRequest); i { case 0: return &v.state @@ -4728,7 +4728,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ListTablesRequest); i { case 0: return &v.state @@ -4740,7 +4740,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ListTablesResponse); i { case 0: return &v.state @@ -4752,7 +4752,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*GetTableRequest); i { case 0: return &v.state @@ -4764,7 +4764,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*UpdateTableRequest); i { case 0: return &v.state @@ -4776,7 +4776,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*UpdateTableMetadata); i { case 0: return &v.state @@ -4788,7 +4788,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*DeleteTableRequest); i { case 0: return &v.state @@ -4800,7 +4800,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*UndeleteTableRequest); i { case 0: return &v.state @@ -4812,7 +4812,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*UndeleteTableMetadata); i { case 0: return &v.state @@ -4824,7 +4824,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*ModifyColumnFamiliesRequest); i { case 0: return &v.state @@ -4836,7 +4836,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*GenerateConsistencyTokenRequest); i { case 0: return &v.state @@ -4848,7 +4848,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*GenerateConsistencyTokenResponse); i { case 0: return &v.state @@ -4860,7 +4860,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*CheckConsistencyRequest); i { case 0: return &v.state @@ -4872,7 +4872,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*StandardReadRemoteWrites); i { case 0: return &v.state @@ -4884,7 +4884,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*DataBoostReadLocalWrites); i { case 0: return &v.state @@ -4896,7 +4896,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*CheckConsistencyResponse); i { case 0: return &v.state @@ -4908,7 +4908,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*SnapshotTableRequest); i { case 0: return &v.state @@ -4920,7 +4920,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*GetSnapshotRequest); i { case 0: return &v.state @@ -4932,7 +4932,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*ListSnapshotsRequest); i { case 0: return &v.state @@ -4944,7 +4944,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*ListSnapshotsResponse); i { case 0: return &v.state @@ -4956,7 +4956,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*DeleteSnapshotRequest); i { case 0: return &v.state @@ -4968,7 +4968,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*SnapshotTableMetadata); i { case 0: return &v.state @@ -4980,7 +4980,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*CreateTableFromSnapshotMetadata); i { case 0: return &v.state @@ -4992,7 +4992,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*CreateBackupRequest); i { case 0: return &v.state @@ -5004,7 +5004,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*CreateBackupMetadata); i { case 0: return &v.state @@ -5016,7 +5016,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*UpdateBackupRequest); i { case 0: return &v.state @@ -5028,7 +5028,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*GetBackupRequest); i { case 0: return &v.state @@ -5040,7 +5040,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*DeleteBackupRequest); i { case 0: return &v.state @@ -5052,7 +5052,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33].Exporter = func(v any, i int) any { switch v := v.(*ListBackupsRequest); i { case 0: return &v.state @@ -5064,7 +5064,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34].Exporter = func(v any, i int) any { switch v := v.(*ListBackupsResponse); i { case 0: return &v.state @@ -5076,7 +5076,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[35].Exporter = func(v any, i int) any { switch v := v.(*CopyBackupRequest); i { case 0: return &v.state @@ -5088,7 +5088,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[36].Exporter = func(v any, i int) any { switch v := v.(*CopyBackupMetadata); i { case 0: return &v.state @@ -5100,7 +5100,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[37].Exporter = func(v any, i int) any { switch v := v.(*CreateAuthorizedViewRequest); i { case 0: return &v.state @@ -5112,7 +5112,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[38].Exporter = func(v any, i int) any { switch v := v.(*CreateAuthorizedViewMetadata); i { case 0: return &v.state @@ -5124,7 +5124,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[39].Exporter = func(v any, i int) any { switch v := v.(*ListAuthorizedViewsRequest); i { case 0: return &v.state @@ -5136,7 +5136,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[40].Exporter = func(v any, i int) any { switch v := v.(*ListAuthorizedViewsResponse); i { case 0: return &v.state @@ -5148,7 +5148,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[41].Exporter = func(v any, i int) any { switch v := v.(*GetAuthorizedViewRequest); i { case 0: return &v.state @@ -5160,7 +5160,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[42].Exporter = func(v any, i int) any { switch v := v.(*UpdateAuthorizedViewRequest); i { case 0: return &v.state @@ -5172,7 +5172,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[43].Exporter = func(v any, i int) any { switch v := v.(*UpdateAuthorizedViewMetadata); i { case 0: return &v.state @@ -5184,7 +5184,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[44].Exporter = func(v any, i int) any { switch v := v.(*DeleteAuthorizedViewRequest); i { case 0: return &v.state @@ -5196,7 +5196,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[45].Exporter = func(v any, i int) any { switch v := v.(*CreateTableRequest_Split); i { case 0: return &v.state @@ -5208,7 +5208,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { return nil } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[46].Exporter = func(v any, i int) any { switch v := v.(*ModifyColumnFamiliesRequest_Modification); i { case 0: return &v.state @@ -5221,21 +5221,21 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0].OneofWrappers = []any{ (*RestoreTableRequest_Backup)(nil), } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1].OneofWrappers = []any{ (*RestoreTableMetadata_BackupInfo)(nil), } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5].OneofWrappers = []any{ (*DropRowRangeRequest_RowKeyPrefix)(nil), (*DropRowRangeRequest_DeleteAllDataFromTable)(nil), } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17].OneofWrappers = []any{ (*CheckConsistencyRequest_StandardReadRemoteWrites)(nil), (*CheckConsistencyRequest_DataBoostReadLocalWrites)(nil), } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[46].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[46].OneofWrappers = []any{ (*ModifyColumnFamiliesRequest_Modification_Create)(nil), (*ModifyColumnFamiliesRequest_Modification_Update)(nil), (*ModifyColumnFamiliesRequest_Modification_Drop)(nil), @@ -5378,7 +5378,7 @@ type BigtableTableAdminClient interface { // returned table [long-running operation][google.longrunning.Operation] can // be used to track the progress of the operation, and to cancel it. The // [metadata][google.longrunning.Operation.metadata] field type is - // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + // [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. RestoreTable(ctx context.Context, in *RestoreTableRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) @@ -5783,7 +5783,7 @@ type BigtableTableAdminServer interface { // returned table [long-running operation][google.longrunning.Operation] can // be used to track the progress of the operation, and to cancel it. The // [metadata][google.longrunning.Operation.metadata] field type is - // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + // [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. RestoreTable(context.Context, *RestoreTableRequest) (*longrunningpb.Operation, error) diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/common.pb.go similarity index 88% rename from terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go rename to terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/common.pb.go index 5f172013dd4..caa6e944683 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/common.pb.go @@ -14,11 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v4.24.4 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/bigtable/admin/v2/common.proto -package admin +package adminpb import ( reflect "reflect" @@ -183,21 +183,20 @@ var file_google_bigtable_admin_v2_common_proto_rawDesc = []byte{ 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x53, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, - 0x44, 0x44, 0x10, 0x02, 0x42, 0xd3, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, + 0x44, 0x44, 0x10, 0x02, 0x42, 0xce, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, - 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, + 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, + 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, + 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -214,7 +213,7 @@ func file_google_bigtable_admin_v2_common_proto_rawDescGZIP() []byte { var file_google_bigtable_admin_v2_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_bigtable_admin_v2_common_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_bigtable_admin_v2_common_proto_goTypes = []interface{}{ +var file_google_bigtable_admin_v2_common_proto_goTypes = []any{ (StorageType)(0), // 0: google.bigtable.admin.v2.StorageType (*OperationProgress)(nil), // 1: google.bigtable.admin.v2.OperationProgress (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp @@ -235,7 +234,7 @@ func file_google_bigtable_admin_v2_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_bigtable_admin_v2_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*OperationProgress); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/instance.pb.go similarity index 80% rename from terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go rename to terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/instance.pb.go index 811a67ff0ab..0fc21d396de 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/instance.pb.go @@ -14,11 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v4.24.4 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/bigtable/admin/v2/instance.proto -package admin +package adminpb import ( reflect "reflect" @@ -822,7 +822,7 @@ func (m *AppProfile) GetIsolation() isAppProfile_Isolation { return nil } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in google/bigtable/admin/v2/instance.proto. func (x *AppProfile) GetPriority() AppProfile_Priority { if x, ok := x.GetIsolation().(*AppProfile_Priority_); ok { return x.Priority @@ -872,7 +872,7 @@ type AppProfile_Priority_ struct { // // The priority of requests sent using this app profile. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in google/bigtable/admin/v2/instance.proto. Priority AppProfile_Priority `protobuf:"varint,7,opt,name=priority,proto3,enum=google.bigtable.admin.v2.AppProfile_Priority,oneof"` } @@ -1186,6 +1186,19 @@ type AppProfile_MultiClusterRoutingUseAny struct { // The set of clusters to route to. The order is ignored; clusters will be // tried in order of distance. If left empty, all clusters are eligible. ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + // Possible algorithms for routing affinity. If enabled, Bigtable will + // route between equidistant clusters in a deterministic order rather than + // choosing randomly. + // + // This mechanism gives read-your-writes consistency for *most* requests + // under *most* circumstances, without sacrificing availability. Consistency + // is *not* guaranteed, as requests might still fail over between clusters + // in the event of errors or latency. + // + // Types that are assignable to Affinity: + // + // *AppProfile_MultiClusterRoutingUseAny_RowAffinity_ + Affinity isAppProfile_MultiClusterRoutingUseAny_Affinity `protobuf_oneof:"affinity"` } func (x *AppProfile_MultiClusterRoutingUseAny) Reset() { @@ -1227,6 +1240,33 @@ func (x *AppProfile_MultiClusterRoutingUseAny) GetClusterIds() []string { return nil } +func (m *AppProfile_MultiClusterRoutingUseAny) GetAffinity() isAppProfile_MultiClusterRoutingUseAny_Affinity { + if m != nil { + return m.Affinity + } + return nil +} + +func (x *AppProfile_MultiClusterRoutingUseAny) GetRowAffinity() *AppProfile_MultiClusterRoutingUseAny_RowAffinity { + if x, ok := x.GetAffinity().(*AppProfile_MultiClusterRoutingUseAny_RowAffinity_); ok { + return x.RowAffinity + } + return nil +} + +type isAppProfile_MultiClusterRoutingUseAny_Affinity interface { + isAppProfile_MultiClusterRoutingUseAny_Affinity() +} + +type AppProfile_MultiClusterRoutingUseAny_RowAffinity_ struct { + // Row affinity sticky routing based on the row key of the request. + // Requests that span multiple rows are routed non-deterministically. + RowAffinity *AppProfile_MultiClusterRoutingUseAny_RowAffinity `protobuf:"bytes,3,opt,name=row_affinity,json=rowAffinity,proto3,oneof"` +} + +func (*AppProfile_MultiClusterRoutingUseAny_RowAffinity_) isAppProfile_MultiClusterRoutingUseAny_Affinity() { +} + // Unconditionally routes all read/write requests to a specific cluster. // This option preserves read-your-writes consistency but does not improve // availability. @@ -1399,6 +1439,53 @@ func (x *AppProfile_DataBoostIsolationReadOnly) GetComputeBillingOwner() AppProf return AppProfile_DataBoostIsolationReadOnly_COMPUTE_BILLING_OWNER_UNSPECIFIED } +// If enabled, Bigtable will route the request based on the row key of the +// request, rather than randomly. Instead, each row key will be assigned +// to a cluster, and will stick to that cluster. If clusters are added or +// removed, then this may affect which row keys stick to which clusters. +// To avoid this, users can use a cluster group to specify which clusters +// are to be used. In this case, new clusters that are not a part of the +// cluster group will not be routed to, and routing will be unaffected by +// the new cluster. Moreover, clusters specified in the cluster group cannot +// be deleted unless removed from the cluster group. +type AppProfile_MultiClusterRoutingUseAny_RowAffinity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AppProfile_MultiClusterRoutingUseAny_RowAffinity) Reset() { + *x = AppProfile_MultiClusterRoutingUseAny_RowAffinity{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppProfile_MultiClusterRoutingUseAny_RowAffinity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppProfile_MultiClusterRoutingUseAny_RowAffinity) ProtoMessage() {} + +func (x *AppProfile_MultiClusterRoutingUseAny_RowAffinity) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppProfile_MultiClusterRoutingUseAny_RowAffinity.ProtoReflect.Descriptor instead. +func (*AppProfile_MultiClusterRoutingUseAny_RowAffinity) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_instance_proto_rawDescGZIP(), []int{4, 0, 0} +} + var File_google_bigtable_admin_v2_instance_proto protoreflect.FileDescriptor var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{ @@ -1543,8 +1630,8 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{ 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x7d, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa8, - 0x0b, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, + 0x74, 0x65, 0x72, 0x7d, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xb5, + 0x0c, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, @@ -1585,107 +1672,116 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{ 0x73, 0x74, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x48, 0x01, 0x52, 0x1a, 0x64, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, - 0x79, 0x1a, 0x3c, 0x0a, 0x19, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1f, - 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x1a, - 0x73, 0x0a, 0x14, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x73, 0x1a, 0x5e, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, - 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x08, 0x70, 0x72, 0x69, - 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x79, 0x1a, 0x92, 0x02, 0x0a, 0x1a, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, - 0x73, 0x74, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, - 0x6e, 0x6c, 0x79, 0x12, 0x8c, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, - 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x53, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, - 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, - 0x6f, 0x73, 0x74, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, - 0x4f, 0x6e, 0x6c, 0x79, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c, - 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6d, 0x70, - 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x88, - 0x01, 0x01, 0x22, 0x4b, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, - 0x6c, 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x21, 0x43, 0x4f, 0x4d, - 0x50, 0x55, 0x54, 0x45, 0x5f, 0x42, 0x49, 0x4c, 0x4c, 0x49, 0x4e, 0x47, 0x5f, 0x4f, 0x57, 0x4e, - 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x50, 0x41, 0x59, 0x53, 0x10, 0x01, 0x42, - 0x18, 0x0a, 0x16, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x62, 0x69, 0x6c, 0x6c, - 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x5e, 0x0a, 0x08, 0x50, 0x72, 0x69, - 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, - 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x10, 0x0a, 0x0c, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4c, 0x4f, 0x57, 0x10, - 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4d, 0x45, - 0x44, 0x49, 0x55, 0x4d, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, - 0x54, 0x59, 0x5f, 0x48, 0x49, 0x47, 0x48, 0x10, 0x03, 0x3a, 0x6f, 0xea, 0x41, 0x6c, 0x0a, 0x27, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, - 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, - 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70, - 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x0a, 0x09, - 0x69, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd4, 0x03, 0x0a, 0x09, 0x48, 0x6f, - 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0a, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x27, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, - 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, - 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x70, - 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x02, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6e, 0x6f, 0x64, 0x65, - 0x43, 0x70, 0x75, 0x55, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x3a, - 0x7f, 0xea, 0x41, 0x7c, 0x0a, 0x26, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x52, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x7d, - 0x42, 0xd0, 0x02, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x79, 0x1a, 0xc8, 0x01, 0x0a, 0x19, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, + 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, + 0x12, 0x6f, 0x0a, 0x0c, 0x72, 0x6f, 0x77, 0x5f, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x42, 0x0d, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, - 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, - 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, - 0x65, 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, + 0x74, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x79, 0x1a, 0x0d, 0x0a, 0x0b, 0x52, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, + 0x42, 0x0a, 0x0a, 0x08, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x1a, 0x73, 0x0a, 0x14, + 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x73, 0x1a, 0x5e, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x49, 0x73, 0x6f, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x50, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x1a, 0x92, 0x02, 0x0a, 0x1a, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74, 0x49, + 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, + 0x12, 0x8c, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x62, 0x69, 0x6c, + 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x53, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74, + 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, + 0x79, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, + 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x88, 0x01, 0x01, 0x22, + 0x4b, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, + 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x21, 0x43, 0x4f, 0x4d, 0x50, 0x55, 0x54, + 0x45, 0x5f, 0x42, 0x49, 0x4c, 0x4c, 0x49, 0x4e, 0x47, 0x5f, 0x4f, 0x57, 0x4e, 0x45, 0x52, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x50, 0x41, 0x59, 0x53, 0x10, 0x01, 0x42, 0x18, 0x0a, 0x16, + 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x5e, 0x0a, 0x08, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, + 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x13, + 0x0a, 0x0f, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4d, 0x45, 0x44, 0x49, 0x55, + 0x4d, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, + 0x48, 0x49, 0x47, 0x48, 0x10, 0x03, 0x3a, 0x6f, 0xea, 0x41, 0x6c, 0x0a, 0x27, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x61, 0x70, + 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x0a, 0x09, 0x69, 0x73, 0x6f, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd4, 0x03, 0x0a, 0x09, 0x48, 0x6f, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xfa, 0x41, + 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x6e, 0x64, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, + 0x65, 0x79, 0x12, 0x38, 0x0a, 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, + 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x02, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x70, 0x75, + 0x55, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x3a, 0x7f, 0xea, 0x41, + 0x7c, 0x0a, 0x26, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x52, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, + 0x2f, 0x7b, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x7d, 0x42, 0xcb, 0x02, + 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, + 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0x3b, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, + 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, + 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1701,56 +1797,58 @@ func file_google_bigtable_admin_v2_instance_proto_rawDescGZIP() []byte { } var file_google_bigtable_admin_v2_instance_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_google_bigtable_admin_v2_instance_proto_msgTypes = make([]protoimpl.MessageInfo, 14) -var file_google_bigtable_admin_v2_instance_proto_goTypes = []interface{}{ +var file_google_bigtable_admin_v2_instance_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_bigtable_admin_v2_instance_proto_goTypes = []any{ (Instance_State)(0), // 0: google.bigtable.admin.v2.Instance.State (Instance_Type)(0), // 1: google.bigtable.admin.v2.Instance.Type (Cluster_State)(0), // 2: google.bigtable.admin.v2.Cluster.State (AppProfile_Priority)(0), // 3: google.bigtable.admin.v2.AppProfile.Priority (AppProfile_DataBoostIsolationReadOnly_ComputeBillingOwner)(0), // 4: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner - (*Instance)(nil), // 5: google.bigtable.admin.v2.Instance - (*AutoscalingTargets)(nil), // 6: google.bigtable.admin.v2.AutoscalingTargets - (*AutoscalingLimits)(nil), // 7: google.bigtable.admin.v2.AutoscalingLimits - (*Cluster)(nil), // 8: google.bigtable.admin.v2.Cluster - (*AppProfile)(nil), // 9: google.bigtable.admin.v2.AppProfile - (*HotTablet)(nil), // 10: google.bigtable.admin.v2.HotTablet - nil, // 11: google.bigtable.admin.v2.Instance.LabelsEntry - (*Cluster_ClusterAutoscalingConfig)(nil), // 12: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig - (*Cluster_ClusterConfig)(nil), // 13: google.bigtable.admin.v2.Cluster.ClusterConfig - (*Cluster_EncryptionConfig)(nil), // 14: google.bigtable.admin.v2.Cluster.EncryptionConfig - (*AppProfile_MultiClusterRoutingUseAny)(nil), // 15: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny - (*AppProfile_SingleClusterRouting)(nil), // 16: google.bigtable.admin.v2.AppProfile.SingleClusterRouting - (*AppProfile_StandardIsolation)(nil), // 17: google.bigtable.admin.v2.AppProfile.StandardIsolation - (*AppProfile_DataBoostIsolationReadOnly)(nil), // 18: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly - (*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp - (StorageType)(0), // 20: google.bigtable.admin.v2.StorageType + (*Instance)(nil), // 5: google.bigtable.admin.v2.Instance + (*AutoscalingTargets)(nil), // 6: google.bigtable.admin.v2.AutoscalingTargets + (*AutoscalingLimits)(nil), // 7: google.bigtable.admin.v2.AutoscalingLimits + (*Cluster)(nil), // 8: google.bigtable.admin.v2.Cluster + (*AppProfile)(nil), // 9: google.bigtable.admin.v2.AppProfile + (*HotTablet)(nil), // 10: google.bigtable.admin.v2.HotTablet + nil, // 11: google.bigtable.admin.v2.Instance.LabelsEntry + (*Cluster_ClusterAutoscalingConfig)(nil), // 12: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig + (*Cluster_ClusterConfig)(nil), // 13: google.bigtable.admin.v2.Cluster.ClusterConfig + (*Cluster_EncryptionConfig)(nil), // 14: google.bigtable.admin.v2.Cluster.EncryptionConfig + (*AppProfile_MultiClusterRoutingUseAny)(nil), // 15: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny + (*AppProfile_SingleClusterRouting)(nil), // 16: google.bigtable.admin.v2.AppProfile.SingleClusterRouting + (*AppProfile_StandardIsolation)(nil), // 17: google.bigtable.admin.v2.AppProfile.StandardIsolation + (*AppProfile_DataBoostIsolationReadOnly)(nil), // 18: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + (*AppProfile_MultiClusterRoutingUseAny_RowAffinity)(nil), // 19: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.RowAffinity + (*timestamppb.Timestamp)(nil), // 20: google.protobuf.Timestamp + (StorageType)(0), // 21: google.bigtable.admin.v2.StorageType } var file_google_bigtable_admin_v2_instance_proto_depIdxs = []int32{ 0, // 0: google.bigtable.admin.v2.Instance.state:type_name -> google.bigtable.admin.v2.Instance.State 1, // 1: google.bigtable.admin.v2.Instance.type:type_name -> google.bigtable.admin.v2.Instance.Type 11, // 2: google.bigtable.admin.v2.Instance.labels:type_name -> google.bigtable.admin.v2.Instance.LabelsEntry - 19, // 3: google.bigtable.admin.v2.Instance.create_time:type_name -> google.protobuf.Timestamp + 20, // 3: google.bigtable.admin.v2.Instance.create_time:type_name -> google.protobuf.Timestamp 2, // 4: google.bigtable.admin.v2.Cluster.state:type_name -> google.bigtable.admin.v2.Cluster.State 13, // 5: google.bigtable.admin.v2.Cluster.cluster_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterConfig - 20, // 6: google.bigtable.admin.v2.Cluster.default_storage_type:type_name -> google.bigtable.admin.v2.StorageType + 21, // 6: google.bigtable.admin.v2.Cluster.default_storage_type:type_name -> google.bigtable.admin.v2.StorageType 14, // 7: google.bigtable.admin.v2.Cluster.encryption_config:type_name -> google.bigtable.admin.v2.Cluster.EncryptionConfig 15, // 8: google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny 16, // 9: google.bigtable.admin.v2.AppProfile.single_cluster_routing:type_name -> google.bigtable.admin.v2.AppProfile.SingleClusterRouting 3, // 10: google.bigtable.admin.v2.AppProfile.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority 17, // 11: google.bigtable.admin.v2.AppProfile.standard_isolation:type_name -> google.bigtable.admin.v2.AppProfile.StandardIsolation 18, // 12: google.bigtable.admin.v2.AppProfile.data_boost_isolation_read_only:type_name -> google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly - 19, // 13: google.bigtable.admin.v2.HotTablet.start_time:type_name -> google.protobuf.Timestamp - 19, // 14: google.bigtable.admin.v2.HotTablet.end_time:type_name -> google.protobuf.Timestamp + 20, // 13: google.bigtable.admin.v2.HotTablet.start_time:type_name -> google.protobuf.Timestamp + 20, // 14: google.bigtable.admin.v2.HotTablet.end_time:type_name -> google.protobuf.Timestamp 7, // 15: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_limits:type_name -> google.bigtable.admin.v2.AutoscalingLimits 6, // 16: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_targets:type_name -> google.bigtable.admin.v2.AutoscalingTargets 12, // 17: google.bigtable.admin.v2.Cluster.ClusterConfig.cluster_autoscaling_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig - 3, // 18: google.bigtable.admin.v2.AppProfile.StandardIsolation.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority - 4, // 19: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.compute_billing_owner:type_name -> google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner - 20, // [20:20] is the sub-list for method output_type - 20, // [20:20] is the sub-list for method input_type - 20, // [20:20] is the sub-list for extension type_name - 20, // [20:20] is the sub-list for extension extendee - 0, // [0:20] is the sub-list for field type_name + 19, // 18: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.row_affinity:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.RowAffinity + 3, // 19: google.bigtable.admin.v2.AppProfile.StandardIsolation.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority + 4, // 20: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.compute_billing_owner:type_name -> google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner + 21, // [21:21] is the sub-list for method output_type + 21, // [21:21] is the sub-list for method input_type + 21, // [21:21] is the sub-list for extension type_name + 21, // [21:21] is the sub-list for extension extendee + 0, // [0:21] is the sub-list for field type_name } func init() { file_google_bigtable_admin_v2_instance_proto_init() } @@ -1760,7 +1858,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { } file_google_bigtable_admin_v2_common_proto_init() if !protoimpl.UnsafeEnabled { - file_google_bigtable_admin_v2_instance_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Instance); i { case 0: return &v.state @@ -1772,7 +1870,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*AutoscalingTargets); i { case 0: return &v.state @@ -1784,7 +1882,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*AutoscalingLimits); i { case 0: return &v.state @@ -1796,7 +1894,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Cluster); i { case 0: return &v.state @@ -1808,7 +1906,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*AppProfile); i { case 0: return &v.state @@ -1820,7 +1918,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*HotTablet); i { case 0: return &v.state @@ -1832,7 +1930,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Cluster_ClusterAutoscalingConfig); i { case 0: return &v.state @@ -1844,7 +1942,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*Cluster_ClusterConfig); i { case 0: return &v.state @@ -1856,7 +1954,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*Cluster_EncryptionConfig); i { case 0: return &v.state @@ -1868,7 +1966,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*AppProfile_MultiClusterRoutingUseAny); i { case 0: return &v.state @@ -1880,7 +1978,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*AppProfile_SingleClusterRouting); i { case 0: return &v.state @@ -1892,7 +1990,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*AppProfile_StandardIsolation); i { case 0: return &v.state @@ -1904,7 +2002,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_instance_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*AppProfile_DataBoostIsolationReadOnly); i { case 0: return &v.state @@ -1916,26 +2014,41 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } + file_google_bigtable_admin_v2_instance_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*AppProfile_MultiClusterRoutingUseAny_RowAffinity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_google_bigtable_admin_v2_instance_proto_msgTypes[3].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_instance_proto_msgTypes[0].OneofWrappers = []any{} + file_google_bigtable_admin_v2_instance_proto_msgTypes[3].OneofWrappers = []any{ (*Cluster_ClusterConfig_)(nil), } - file_google_bigtable_admin_v2_instance_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_instance_proto_msgTypes[4].OneofWrappers = []any{ (*AppProfile_MultiClusterRoutingUseAny_)(nil), (*AppProfile_SingleClusterRouting_)(nil), (*AppProfile_Priority_)(nil), (*AppProfile_StandardIsolation_)(nil), (*AppProfile_DataBoostIsolationReadOnly_)(nil), } - file_google_bigtable_admin_v2_instance_proto_msgTypes[13].OneofWrappers = []interface{}{} + file_google_bigtable_admin_v2_instance_proto_msgTypes[10].OneofWrappers = []any{ + (*AppProfile_MultiClusterRoutingUseAny_RowAffinity_)(nil), + } + file_google_bigtable_admin_v2_instance_proto_msgTypes[13].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_admin_v2_instance_proto_rawDesc, NumEnums: 5, - NumMessages: 14, + NumMessages: 15, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/table.pb.go similarity index 87% rename from terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go rename to terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/table.pb.go index 01737bff74c..9cd3dbe892c 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/table.pb.go @@ -14,11 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v4.24.4 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/bigtable/admin/v2/table.proto -package admin +package adminpb import ( reflect "reflect" @@ -506,6 +506,64 @@ func (Backup_State) EnumDescriptor() ([]byte, []int) { return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{8, 0} } +// The type of the backup. +type Backup_BackupType int32 + +const ( + // Not specified. + Backup_BACKUP_TYPE_UNSPECIFIED Backup_BackupType = 0 + // The default type for Cloud Bigtable managed backups. Supported for + // backups created in both HDD and SSD instances. Requires optimization when + // restored to a table in an SSD instance. + Backup_STANDARD Backup_BackupType = 1 + // A backup type with faster restore to SSD performance. Only supported for + // backups created in SSD instances. A new SSD table restored from a hot + // backup reaches production performance more quickly than a standard + // backup. + Backup_HOT Backup_BackupType = 2 +) + +// Enum value maps for Backup_BackupType. +var ( + Backup_BackupType_name = map[int32]string{ + 0: "BACKUP_TYPE_UNSPECIFIED", + 1: "STANDARD", + 2: "HOT", + } + Backup_BackupType_value = map[string]int32{ + "BACKUP_TYPE_UNSPECIFIED": 0, + "STANDARD": 1, + "HOT": 2, + } +) + +func (x Backup_BackupType) Enum() *Backup_BackupType { + p := new(Backup_BackupType) + *p = x + return p +} + +func (x Backup_BackupType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Backup_BackupType) Descriptor() protoreflect.EnumDescriptor { + return file_google_bigtable_admin_v2_table_proto_enumTypes[8].Descriptor() +} + +func (Backup_BackupType) Type() protoreflect.EnumType { + return &file_google_bigtable_admin_v2_table_proto_enumTypes[8] +} + +func (x Backup_BackupType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Backup_BackupType.Descriptor instead. +func (Backup_BackupType) EnumDescriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{8, 1} +} + // Information about a table restore. type RestoreInfo struct { state protoimpl.MessageState @@ -1303,13 +1361,16 @@ type Backup struct { SourceTable string `protobuf:"bytes,2,opt,name=source_table,json=sourceTable,proto3" json:"source_table,omitempty"` // Output only. Name of the backup from which this backup was copied. If a // backup is not created by copying a backup, this field will be empty. Values - // are of the form: projects//instances//backups/. + // are of the form: + // projects//instances//clusters//backups/ SourceBackup string `protobuf:"bytes,10,opt,name=source_backup,json=sourceBackup,proto3" json:"source_backup,omitempty"` - // Required. The expiration time of the backup, with microseconds - // granularity that must be at least 6 hours and at most 90 days - // from the time the request is received. Once the `expire_time` - // has passed, Cloud Bigtable will delete the backup and free the - // resources used by the backup. + // Required. The expiration time of the backup. + // When creating a backup or updating its `expire_time`, the value must be + // greater than the backup creation time by: + // - At least 6 hours + // - At most 90 days + // + // Once the `expire_time` has passed, Cloud Bigtable will delete the backup. ExpireTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` // Output only. `start_time` is the time that the backup was started // (i.e. approximately the time the @@ -1326,6 +1387,17 @@ type Backup struct { State Backup_State `protobuf:"varint,7,opt,name=state,proto3,enum=google.bigtable.admin.v2.Backup_State" json:"state,omitempty"` // Output only. The encryption information for the backup. EncryptionInfo *EncryptionInfo `protobuf:"bytes,9,opt,name=encryption_info,json=encryptionInfo,proto3" json:"encryption_info,omitempty"` + // Indicates the backup type of the backup. + BackupType Backup_BackupType `protobuf:"varint,11,opt,name=backup_type,json=backupType,proto3,enum=google.bigtable.admin.v2.Backup_BackupType" json:"backup_type,omitempty"` + // The time at which the hot backup will be converted to a standard backup. + // Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the + // hot backup to a standard backup. This value must be greater than the backup + // creation time by: + // - At least 24 hours + // + // This field only applies for hot backups. When creating or updating a + // standard backup, attempting to set this field will fail the request. + HotToStandardTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=hot_to_standard_time,json=hotToStandardTime,proto3" json:"hot_to_standard_time,omitempty"` } func (x *Backup) Reset() { @@ -1423,6 +1495,20 @@ func (x *Backup) GetEncryptionInfo() *EncryptionInfo { return nil } +func (x *Backup) GetBackupType() Backup_BackupType { + if x != nil { + return x.BackupType + } + return Backup_BACKUP_TYPE_UNSPECIFIED +} + +func (x *Backup) GetHotToStandardTime() *timestamppb.Timestamp { + if x != nil { + return x.HotToStandardTime + } + return nil +} + // Information about a backup. type BackupInfo struct { state protoimpl.MessageState @@ -1441,7 +1527,8 @@ type BackupInfo struct { SourceTable string `protobuf:"bytes,4,opt,name=source_table,json=sourceTable,proto3" json:"source_table,omitempty"` // Output only. Name of the backup from which this backup was copied. If a // backup is not created by copying a backup, this field will be empty. Values - // are of the form: projects//instances//backups/. + // are of the form: + // projects//instances//clusters//backups/ SourceBackup string `protobuf:"bytes,10,opt,name=source_backup,json=sourceBackup,proto3" json:"source_backup,omitempty"` } @@ -2144,7 +2231,7 @@ var file_google_bigtable_admin_v2_table_proto_rawDesc = []byte{ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x7d, - 0x22, 0x9e, 0x05, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x22, 0xfb, 0x06, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x05, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x73, @@ -2175,62 +2262,75 @@ var file_google_bigtable_admin_v2_table_proto_rawDesc = []byte{ 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, - 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, - 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x3a, 0x75, 0xea, 0x41, 0x72, 0x0a, - 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x12, 0x4b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, - 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x7d, 0x22, 0xf7, 0x01, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, - 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x3e, 0x0a, - 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, - 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2a, 0x44, 0x0a, 0x11, 0x52, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, - 0x01, 0x42, 0xfc, 0x02, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x42, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, - 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, - 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, - 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, - 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, - 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, - 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x12, 0x4c, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x4b, 0x0a, 0x14, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, + 0x72, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x68, 0x6f, 0x74, 0x54, 0x6f, + 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x05, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, + 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, + 0x41, 0x44, 0x59, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x4e, 0x44, 0x41, 0x52, 0x44, 0x10, 0x01, 0x12, 0x07, + 0x0a, 0x03, 0x48, 0x4f, 0x54, 0x10, 0x02, 0x3a, 0x75, 0xea, 0x41, 0x72, 0x0a, 0x23, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x12, 0x4b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x7d, 0x22, 0xf7, + 0x01, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1b, 0x0a, + 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, + 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, + 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x28, + 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2a, 0x44, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, + 0x1f, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x01, 0x42, 0xf7, + 0x02, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, + 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, + 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, + 0x62, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2245,9 +2345,9 @@ func file_google_bigtable_admin_v2_table_proto_rawDescGZIP() []byte { return file_google_bigtable_admin_v2_table_proto_rawDescData } -var file_google_bigtable_admin_v2_table_proto_enumTypes = make([]protoimpl.EnumInfo, 8) +var file_google_bigtable_admin_v2_table_proto_enumTypes = make([]protoimpl.EnumInfo, 9) var file_google_bigtable_admin_v2_table_proto_msgTypes = make([]protoimpl.MessageInfo, 19) -var file_google_bigtable_admin_v2_table_proto_goTypes = []interface{}{ +var file_google_bigtable_admin_v2_table_proto_goTypes = []any{ (RestoreSourceType)(0), // 0: google.bigtable.admin.v2.RestoreSourceType (Table_TimestampGranularity)(0), // 1: google.bigtable.admin.v2.Table.TimestampGranularity (Table_View)(0), // 2: google.bigtable.admin.v2.Table.View @@ -2256,74 +2356,77 @@ var file_google_bigtable_admin_v2_table_proto_goTypes = []interface{}{ (EncryptionInfo_EncryptionType)(0), // 5: google.bigtable.admin.v2.EncryptionInfo.EncryptionType (Snapshot_State)(0), // 6: google.bigtable.admin.v2.Snapshot.State (Backup_State)(0), // 7: google.bigtable.admin.v2.Backup.State - (*RestoreInfo)(nil), // 8: google.bigtable.admin.v2.RestoreInfo - (*ChangeStreamConfig)(nil), // 9: google.bigtable.admin.v2.ChangeStreamConfig - (*Table)(nil), // 10: google.bigtable.admin.v2.Table - (*AuthorizedView)(nil), // 11: google.bigtable.admin.v2.AuthorizedView - (*ColumnFamily)(nil), // 12: google.bigtable.admin.v2.ColumnFamily - (*GcRule)(nil), // 13: google.bigtable.admin.v2.GcRule - (*EncryptionInfo)(nil), // 14: google.bigtable.admin.v2.EncryptionInfo - (*Snapshot)(nil), // 15: google.bigtable.admin.v2.Snapshot - (*Backup)(nil), // 16: google.bigtable.admin.v2.Backup - (*BackupInfo)(nil), // 17: google.bigtable.admin.v2.BackupInfo - (*Table_ClusterState)(nil), // 18: google.bigtable.admin.v2.Table.ClusterState - (*Table_AutomatedBackupPolicy)(nil), // 19: google.bigtable.admin.v2.Table.AutomatedBackupPolicy - nil, // 20: google.bigtable.admin.v2.Table.ClusterStatesEntry - nil, // 21: google.bigtable.admin.v2.Table.ColumnFamiliesEntry - (*AuthorizedView_FamilySubsets)(nil), // 22: google.bigtable.admin.v2.AuthorizedView.FamilySubsets - (*AuthorizedView_SubsetView)(nil), // 23: google.bigtable.admin.v2.AuthorizedView.SubsetView - nil, // 24: google.bigtable.admin.v2.AuthorizedView.SubsetView.FamilySubsetsEntry - (*GcRule_Intersection)(nil), // 25: google.bigtable.admin.v2.GcRule.Intersection - (*GcRule_Union)(nil), // 26: google.bigtable.admin.v2.GcRule.Union - (*durationpb.Duration)(nil), // 27: google.protobuf.Duration - (*Type)(nil), // 28: google.bigtable.admin.v2.Type - (*status.Status)(nil), // 29: google.rpc.Status - (*timestamppb.Timestamp)(nil), // 30: google.protobuf.Timestamp + (Backup_BackupType)(0), // 8: google.bigtable.admin.v2.Backup.BackupType + (*RestoreInfo)(nil), // 9: google.bigtable.admin.v2.RestoreInfo + (*ChangeStreamConfig)(nil), // 10: google.bigtable.admin.v2.ChangeStreamConfig + (*Table)(nil), // 11: google.bigtable.admin.v2.Table + (*AuthorizedView)(nil), // 12: google.bigtable.admin.v2.AuthorizedView + (*ColumnFamily)(nil), // 13: google.bigtable.admin.v2.ColumnFamily + (*GcRule)(nil), // 14: google.bigtable.admin.v2.GcRule + (*EncryptionInfo)(nil), // 15: google.bigtable.admin.v2.EncryptionInfo + (*Snapshot)(nil), // 16: google.bigtable.admin.v2.Snapshot + (*Backup)(nil), // 17: google.bigtable.admin.v2.Backup + (*BackupInfo)(nil), // 18: google.bigtable.admin.v2.BackupInfo + (*Table_ClusterState)(nil), // 19: google.bigtable.admin.v2.Table.ClusterState + (*Table_AutomatedBackupPolicy)(nil), // 20: google.bigtable.admin.v2.Table.AutomatedBackupPolicy + nil, // 21: google.bigtable.admin.v2.Table.ClusterStatesEntry + nil, // 22: google.bigtable.admin.v2.Table.ColumnFamiliesEntry + (*AuthorizedView_FamilySubsets)(nil), // 23: google.bigtable.admin.v2.AuthorizedView.FamilySubsets + (*AuthorizedView_SubsetView)(nil), // 24: google.bigtable.admin.v2.AuthorizedView.SubsetView + nil, // 25: google.bigtable.admin.v2.AuthorizedView.SubsetView.FamilySubsetsEntry + (*GcRule_Intersection)(nil), // 26: google.bigtable.admin.v2.GcRule.Intersection + (*GcRule_Union)(nil), // 27: google.bigtable.admin.v2.GcRule.Union + (*durationpb.Duration)(nil), // 28: google.protobuf.Duration + (*Type)(nil), // 29: google.bigtable.admin.v2.Type + (*status.Status)(nil), // 30: google.rpc.Status + (*timestamppb.Timestamp)(nil), // 31: google.protobuf.Timestamp } var file_google_bigtable_admin_v2_table_proto_depIdxs = []int32{ 0, // 0: google.bigtable.admin.v2.RestoreInfo.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType - 17, // 1: google.bigtable.admin.v2.RestoreInfo.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo - 27, // 2: google.bigtable.admin.v2.ChangeStreamConfig.retention_period:type_name -> google.protobuf.Duration - 20, // 3: google.bigtable.admin.v2.Table.cluster_states:type_name -> google.bigtable.admin.v2.Table.ClusterStatesEntry - 21, // 4: google.bigtable.admin.v2.Table.column_families:type_name -> google.bigtable.admin.v2.Table.ColumnFamiliesEntry + 18, // 1: google.bigtable.admin.v2.RestoreInfo.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo + 28, // 2: google.bigtable.admin.v2.ChangeStreamConfig.retention_period:type_name -> google.protobuf.Duration + 21, // 3: google.bigtable.admin.v2.Table.cluster_states:type_name -> google.bigtable.admin.v2.Table.ClusterStatesEntry + 22, // 4: google.bigtable.admin.v2.Table.column_families:type_name -> google.bigtable.admin.v2.Table.ColumnFamiliesEntry 1, // 5: google.bigtable.admin.v2.Table.granularity:type_name -> google.bigtable.admin.v2.Table.TimestampGranularity - 8, // 6: google.bigtable.admin.v2.Table.restore_info:type_name -> google.bigtable.admin.v2.RestoreInfo - 9, // 7: google.bigtable.admin.v2.Table.change_stream_config:type_name -> google.bigtable.admin.v2.ChangeStreamConfig - 19, // 8: google.bigtable.admin.v2.Table.automated_backup_policy:type_name -> google.bigtable.admin.v2.Table.AutomatedBackupPolicy - 23, // 9: google.bigtable.admin.v2.AuthorizedView.subset_view:type_name -> google.bigtable.admin.v2.AuthorizedView.SubsetView - 13, // 10: google.bigtable.admin.v2.ColumnFamily.gc_rule:type_name -> google.bigtable.admin.v2.GcRule - 28, // 11: google.bigtable.admin.v2.ColumnFamily.value_type:type_name -> google.bigtable.admin.v2.Type - 27, // 12: google.bigtable.admin.v2.GcRule.max_age:type_name -> google.protobuf.Duration - 25, // 13: google.bigtable.admin.v2.GcRule.intersection:type_name -> google.bigtable.admin.v2.GcRule.Intersection - 26, // 14: google.bigtable.admin.v2.GcRule.union:type_name -> google.bigtable.admin.v2.GcRule.Union + 9, // 6: google.bigtable.admin.v2.Table.restore_info:type_name -> google.bigtable.admin.v2.RestoreInfo + 10, // 7: google.bigtable.admin.v2.Table.change_stream_config:type_name -> google.bigtable.admin.v2.ChangeStreamConfig + 20, // 8: google.bigtable.admin.v2.Table.automated_backup_policy:type_name -> google.bigtable.admin.v2.Table.AutomatedBackupPolicy + 24, // 9: google.bigtable.admin.v2.AuthorizedView.subset_view:type_name -> google.bigtable.admin.v2.AuthorizedView.SubsetView + 14, // 10: google.bigtable.admin.v2.ColumnFamily.gc_rule:type_name -> google.bigtable.admin.v2.GcRule + 29, // 11: google.bigtable.admin.v2.ColumnFamily.value_type:type_name -> google.bigtable.admin.v2.Type + 28, // 12: google.bigtable.admin.v2.GcRule.max_age:type_name -> google.protobuf.Duration + 26, // 13: google.bigtable.admin.v2.GcRule.intersection:type_name -> google.bigtable.admin.v2.GcRule.Intersection + 27, // 14: google.bigtable.admin.v2.GcRule.union:type_name -> google.bigtable.admin.v2.GcRule.Union 5, // 15: google.bigtable.admin.v2.EncryptionInfo.encryption_type:type_name -> google.bigtable.admin.v2.EncryptionInfo.EncryptionType - 29, // 16: google.bigtable.admin.v2.EncryptionInfo.encryption_status:type_name -> google.rpc.Status - 10, // 17: google.bigtable.admin.v2.Snapshot.source_table:type_name -> google.bigtable.admin.v2.Table - 30, // 18: google.bigtable.admin.v2.Snapshot.create_time:type_name -> google.protobuf.Timestamp - 30, // 19: google.bigtable.admin.v2.Snapshot.delete_time:type_name -> google.protobuf.Timestamp + 30, // 16: google.bigtable.admin.v2.EncryptionInfo.encryption_status:type_name -> google.rpc.Status + 11, // 17: google.bigtable.admin.v2.Snapshot.source_table:type_name -> google.bigtable.admin.v2.Table + 31, // 18: google.bigtable.admin.v2.Snapshot.create_time:type_name -> google.protobuf.Timestamp + 31, // 19: google.bigtable.admin.v2.Snapshot.delete_time:type_name -> google.protobuf.Timestamp 6, // 20: google.bigtable.admin.v2.Snapshot.state:type_name -> google.bigtable.admin.v2.Snapshot.State - 30, // 21: google.bigtable.admin.v2.Backup.expire_time:type_name -> google.protobuf.Timestamp - 30, // 22: google.bigtable.admin.v2.Backup.start_time:type_name -> google.protobuf.Timestamp - 30, // 23: google.bigtable.admin.v2.Backup.end_time:type_name -> google.protobuf.Timestamp + 31, // 21: google.bigtable.admin.v2.Backup.expire_time:type_name -> google.protobuf.Timestamp + 31, // 22: google.bigtable.admin.v2.Backup.start_time:type_name -> google.protobuf.Timestamp + 31, // 23: google.bigtable.admin.v2.Backup.end_time:type_name -> google.protobuf.Timestamp 7, // 24: google.bigtable.admin.v2.Backup.state:type_name -> google.bigtable.admin.v2.Backup.State - 14, // 25: google.bigtable.admin.v2.Backup.encryption_info:type_name -> google.bigtable.admin.v2.EncryptionInfo - 30, // 26: google.bigtable.admin.v2.BackupInfo.start_time:type_name -> google.protobuf.Timestamp - 30, // 27: google.bigtable.admin.v2.BackupInfo.end_time:type_name -> google.protobuf.Timestamp - 3, // 28: google.bigtable.admin.v2.Table.ClusterState.replication_state:type_name -> google.bigtable.admin.v2.Table.ClusterState.ReplicationState - 14, // 29: google.bigtable.admin.v2.Table.ClusterState.encryption_info:type_name -> google.bigtable.admin.v2.EncryptionInfo - 27, // 30: google.bigtable.admin.v2.Table.AutomatedBackupPolicy.retention_period:type_name -> google.protobuf.Duration - 27, // 31: google.bigtable.admin.v2.Table.AutomatedBackupPolicy.frequency:type_name -> google.protobuf.Duration - 18, // 32: google.bigtable.admin.v2.Table.ClusterStatesEntry.value:type_name -> google.bigtable.admin.v2.Table.ClusterState - 12, // 33: google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value:type_name -> google.bigtable.admin.v2.ColumnFamily - 24, // 34: google.bigtable.admin.v2.AuthorizedView.SubsetView.family_subsets:type_name -> google.bigtable.admin.v2.AuthorizedView.SubsetView.FamilySubsetsEntry - 22, // 35: google.bigtable.admin.v2.AuthorizedView.SubsetView.FamilySubsetsEntry.value:type_name -> google.bigtable.admin.v2.AuthorizedView.FamilySubsets - 13, // 36: google.bigtable.admin.v2.GcRule.Intersection.rules:type_name -> google.bigtable.admin.v2.GcRule - 13, // 37: google.bigtable.admin.v2.GcRule.Union.rules:type_name -> google.bigtable.admin.v2.GcRule - 38, // [38:38] is the sub-list for method output_type - 38, // [38:38] is the sub-list for method input_type - 38, // [38:38] is the sub-list for extension type_name - 38, // [38:38] is the sub-list for extension extendee - 0, // [0:38] is the sub-list for field type_name + 15, // 25: google.bigtable.admin.v2.Backup.encryption_info:type_name -> google.bigtable.admin.v2.EncryptionInfo + 8, // 26: google.bigtable.admin.v2.Backup.backup_type:type_name -> google.bigtable.admin.v2.Backup.BackupType + 31, // 27: google.bigtable.admin.v2.Backup.hot_to_standard_time:type_name -> google.protobuf.Timestamp + 31, // 28: google.bigtable.admin.v2.BackupInfo.start_time:type_name -> google.protobuf.Timestamp + 31, // 29: google.bigtable.admin.v2.BackupInfo.end_time:type_name -> google.protobuf.Timestamp + 3, // 30: google.bigtable.admin.v2.Table.ClusterState.replication_state:type_name -> google.bigtable.admin.v2.Table.ClusterState.ReplicationState + 15, // 31: google.bigtable.admin.v2.Table.ClusterState.encryption_info:type_name -> google.bigtable.admin.v2.EncryptionInfo + 28, // 32: google.bigtable.admin.v2.Table.AutomatedBackupPolicy.retention_period:type_name -> google.protobuf.Duration + 28, // 33: google.bigtable.admin.v2.Table.AutomatedBackupPolicy.frequency:type_name -> google.protobuf.Duration + 19, // 34: google.bigtable.admin.v2.Table.ClusterStatesEntry.value:type_name -> google.bigtable.admin.v2.Table.ClusterState + 13, // 35: google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value:type_name -> google.bigtable.admin.v2.ColumnFamily + 25, // 36: google.bigtable.admin.v2.AuthorizedView.SubsetView.family_subsets:type_name -> google.bigtable.admin.v2.AuthorizedView.SubsetView.FamilySubsetsEntry + 23, // 37: google.bigtable.admin.v2.AuthorizedView.SubsetView.FamilySubsetsEntry.value:type_name -> google.bigtable.admin.v2.AuthorizedView.FamilySubsets + 14, // 38: google.bigtable.admin.v2.GcRule.Intersection.rules:type_name -> google.bigtable.admin.v2.GcRule + 14, // 39: google.bigtable.admin.v2.GcRule.Union.rules:type_name -> google.bigtable.admin.v2.GcRule + 40, // [40:40] is the sub-list for method output_type + 40, // [40:40] is the sub-list for method input_type + 40, // [40:40] is the sub-list for extension type_name + 40, // [40:40] is the sub-list for extension extendee + 0, // [0:40] is the sub-list for field type_name } func init() { file_google_bigtable_admin_v2_table_proto_init() } @@ -2333,7 +2436,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { } file_google_bigtable_admin_v2_types_proto_init() if !protoimpl.UnsafeEnabled { - file_google_bigtable_admin_v2_table_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*RestoreInfo); i { case 0: return &v.state @@ -2345,7 +2448,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ChangeStreamConfig); i { case 0: return &v.state @@ -2357,7 +2460,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Table); i { case 0: return &v.state @@ -2369,7 +2472,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*AuthorizedView); i { case 0: return &v.state @@ -2381,7 +2484,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ColumnFamily); i { case 0: return &v.state @@ -2393,7 +2496,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*GcRule); i { case 0: return &v.state @@ -2405,7 +2508,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*EncryptionInfo); i { case 0: return &v.state @@ -2417,7 +2520,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Snapshot); i { case 0: return &v.state @@ -2429,7 +2532,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*Backup); i { case 0: return &v.state @@ -2441,7 +2544,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*BackupInfo); i { case 0: return &v.state @@ -2453,7 +2556,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*Table_ClusterState); i { case 0: return &v.state @@ -2465,7 +2568,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*Table_AutomatedBackupPolicy); i { case 0: return &v.state @@ -2477,7 +2580,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*AuthorizedView_FamilySubsets); i { case 0: return &v.state @@ -2489,7 +2592,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*AuthorizedView_SubsetView); i { case 0: return &v.state @@ -2501,7 +2604,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*GcRule_Intersection); i { case 0: return &v.state @@ -2513,7 +2616,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*GcRule_Union); i { case 0: return &v.state @@ -2526,16 +2629,16 @@ func file_google_bigtable_admin_v2_table_proto_init() { } } } - file_google_bigtable_admin_v2_table_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_table_proto_msgTypes[0].OneofWrappers = []any{ (*RestoreInfo_BackupInfo)(nil), } - file_google_bigtable_admin_v2_table_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_table_proto_msgTypes[2].OneofWrappers = []any{ (*Table_AutomatedBackupPolicy_)(nil), } - file_google_bigtable_admin_v2_table_proto_msgTypes[3].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_table_proto_msgTypes[3].OneofWrappers = []any{ (*AuthorizedView_SubsetView_)(nil), } - file_google_bigtable_admin_v2_table_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_table_proto_msgTypes[5].OneofWrappers = []any{ (*GcRule_MaxNumVersions)(nil), (*GcRule_MaxAge)(nil), (*GcRule_Intersection_)(nil), @@ -2546,7 +2649,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_admin_v2_table_proto_rawDesc, - NumEnums: 8, + NumEnums: 9, NumMessages: 19, NumExtensions: 0, NumServices: 0, diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/types.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/types.pb.go new file mode 100644 index 00000000000..9997364f1d2 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/types.pb.go @@ -0,0 +1,2225 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/bigtable/admin/v2/types.proto + +package adminpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// `Type` represents the type of data that is written to, read from, or stored +// in Bigtable. It is heavily based on the GoogleSQL standard to help maintain +// familiarity and consistency across products and features. +// +// For compatibility with Bigtable's existing untyped APIs, each `Type` includes +// an `Encoding` which describes how to convert to/from the underlying data. +// +// Each encoding also defines the following properties: +// +// - Order-preserving: Does the encoded value sort consistently with the +// original typed value? Note that Bigtable will always sort data based on +// the raw encoded value, *not* the decoded type. +// - Example: BYTES values sort in the same order as their raw encodings. +// - Counterexample: Encoding INT64 as a fixed-width decimal string does +// *not* preserve sort order when dealing with negative numbers. +// `INT64(1) > INT64(-1)`, but `STRING("-00001") > STRING("00001)`. +// - Self-delimiting: If we concatenate two encoded values, can we always tell +// where the first one ends and the second one begins? +// - Example: If we encode INT64s to fixed-width STRINGs, the first value +// will always contain exactly N digits, possibly preceded by a sign. +// - Counterexample: If we concatenate two UTF-8 encoded STRINGs, we have +// no way to tell where the first one ends. +// - Compatibility: Which other systems have matching encoding schemes? For +// example, does this encoding have a GoogleSQL equivalent? HBase? Java? +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The kind of type that this represents. + // + // Types that are assignable to Kind: + // + // *Type_BytesType + // *Type_StringType + // *Type_Int64Type + // *Type_Float32Type + // *Type_Float64Type + // *Type_BoolType + // *Type_TimestampType + // *Type_DateType + // *Type_AggregateType + // *Type_StructType + // *Type_ArrayType + // *Type_MapType + Kind isType_Kind `protobuf_oneof:"kind"` +} + +func (x *Type) Reset() { + *x = Type{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0} +} + +func (m *Type) GetKind() isType_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Type) GetBytesType() *Type_Bytes { + if x, ok := x.GetKind().(*Type_BytesType); ok { + return x.BytesType + } + return nil +} + +func (x *Type) GetStringType() *Type_String { + if x, ok := x.GetKind().(*Type_StringType); ok { + return x.StringType + } + return nil +} + +func (x *Type) GetInt64Type() *Type_Int64 { + if x, ok := x.GetKind().(*Type_Int64Type); ok { + return x.Int64Type + } + return nil +} + +func (x *Type) GetFloat32Type() *Type_Float32 { + if x, ok := x.GetKind().(*Type_Float32Type); ok { + return x.Float32Type + } + return nil +} + +func (x *Type) GetFloat64Type() *Type_Float64 { + if x, ok := x.GetKind().(*Type_Float64Type); ok { + return x.Float64Type + } + return nil +} + +func (x *Type) GetBoolType() *Type_Bool { + if x, ok := x.GetKind().(*Type_BoolType); ok { + return x.BoolType + } + return nil +} + +func (x *Type) GetTimestampType() *Type_Timestamp { + if x, ok := x.GetKind().(*Type_TimestampType); ok { + return x.TimestampType + } + return nil +} + +func (x *Type) GetDateType() *Type_Date { + if x, ok := x.GetKind().(*Type_DateType); ok { + return x.DateType + } + return nil +} + +func (x *Type) GetAggregateType() *Type_Aggregate { + if x, ok := x.GetKind().(*Type_AggregateType); ok { + return x.AggregateType + } + return nil +} + +func (x *Type) GetStructType() *Type_Struct { + if x, ok := x.GetKind().(*Type_StructType); ok { + return x.StructType + } + return nil +} + +func (x *Type) GetArrayType() *Type_Array { + if x, ok := x.GetKind().(*Type_ArrayType); ok { + return x.ArrayType + } + return nil +} + +func (x *Type) GetMapType() *Type_Map { + if x, ok := x.GetKind().(*Type_MapType); ok { + return x.MapType + } + return nil +} + +type isType_Kind interface { + isType_Kind() +} + +type Type_BytesType struct { + // Bytes + BytesType *Type_Bytes `protobuf:"bytes,1,opt,name=bytes_type,json=bytesType,proto3,oneof"` +} + +type Type_StringType struct { + // String + StringType *Type_String `protobuf:"bytes,2,opt,name=string_type,json=stringType,proto3,oneof"` +} + +type Type_Int64Type struct { + // Int64 + Int64Type *Type_Int64 `protobuf:"bytes,5,opt,name=int64_type,json=int64Type,proto3,oneof"` +} + +type Type_Float32Type struct { + // Float32 + Float32Type *Type_Float32 `protobuf:"bytes,12,opt,name=float32_type,json=float32Type,proto3,oneof"` +} + +type Type_Float64Type struct { + // Float64 + Float64Type *Type_Float64 `protobuf:"bytes,9,opt,name=float64_type,json=float64Type,proto3,oneof"` +} + +type Type_BoolType struct { + // Bool + BoolType *Type_Bool `protobuf:"bytes,8,opt,name=bool_type,json=boolType,proto3,oneof"` +} + +type Type_TimestampType struct { + // Timestamp + TimestampType *Type_Timestamp `protobuf:"bytes,10,opt,name=timestamp_type,json=timestampType,proto3,oneof"` +} + +type Type_DateType struct { + // Date + DateType *Type_Date `protobuf:"bytes,11,opt,name=date_type,json=dateType,proto3,oneof"` +} + +type Type_AggregateType struct { + // Aggregate + AggregateType *Type_Aggregate `protobuf:"bytes,6,opt,name=aggregate_type,json=aggregateType,proto3,oneof"` +} + +type Type_StructType struct { + // Struct + StructType *Type_Struct `protobuf:"bytes,7,opt,name=struct_type,json=structType,proto3,oneof"` +} + +type Type_ArrayType struct { + // Array + ArrayType *Type_Array `protobuf:"bytes,3,opt,name=array_type,json=arrayType,proto3,oneof"` +} + +type Type_MapType struct { + // Map + MapType *Type_Map `protobuf:"bytes,4,opt,name=map_type,json=mapType,proto3,oneof"` +} + +func (*Type_BytesType) isType_Kind() {} + +func (*Type_StringType) isType_Kind() {} + +func (*Type_Int64Type) isType_Kind() {} + +func (*Type_Float32Type) isType_Kind() {} + +func (*Type_Float64Type) isType_Kind() {} + +func (*Type_BoolType) isType_Kind() {} + +func (*Type_TimestampType) isType_Kind() {} + +func (*Type_DateType) isType_Kind() {} + +func (*Type_AggregateType) isType_Kind() {} + +func (*Type_StructType) isType_Kind() {} + +func (*Type_ArrayType) isType_Kind() {} + +func (*Type_MapType) isType_Kind() {} + +// Bytes +// Values of type `Bytes` are stored in `Value.bytes_value`. +type Type_Bytes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encoding to use when converting to/from lower level types. + Encoding *Type_Bytes_Encoding `protobuf:"bytes,1,opt,name=encoding,proto3" json:"encoding,omitempty"` +} + +func (x *Type_Bytes) Reset() { + *x = Type_Bytes{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Bytes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Bytes) ProtoMessage() {} + +func (x *Type_Bytes) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Bytes.ProtoReflect.Descriptor instead. +func (*Type_Bytes) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Type_Bytes) GetEncoding() *Type_Bytes_Encoding { + if x != nil { + return x.Encoding + } + return nil +} + +// String +// Values of type `String` are stored in `Value.string_value`. +type Type_String struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encoding to use when converting to/from lower level types. + Encoding *Type_String_Encoding `protobuf:"bytes,1,opt,name=encoding,proto3" json:"encoding,omitempty"` +} + +func (x *Type_String) Reset() { + *x = Type_String{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_String) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_String) ProtoMessage() {} + +func (x *Type_String) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_String.ProtoReflect.Descriptor instead. +func (*Type_String) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Type_String) GetEncoding() *Type_String_Encoding { + if x != nil { + return x.Encoding + } + return nil +} + +// Int64 +// Values of type `Int64` are stored in `Value.int_value`. +type Type_Int64 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encoding to use when converting to/from lower level types. + Encoding *Type_Int64_Encoding `protobuf:"bytes,1,opt,name=encoding,proto3" json:"encoding,omitempty"` +} + +func (x *Type_Int64) Reset() { + *x = Type_Int64{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Int64) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Int64) ProtoMessage() {} + +func (x *Type_Int64) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Int64.ProtoReflect.Descriptor instead. +func (*Type_Int64) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Type_Int64) GetEncoding() *Type_Int64_Encoding { + if x != nil { + return x.Encoding + } + return nil +} + +// bool +// Values of type `Bool` are stored in `Value.bool_value`. +type Type_Bool struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Bool) Reset() { + *x = Type_Bool{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Bool) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Bool) ProtoMessage() {} + +func (x *Type_Bool) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Bool.ProtoReflect.Descriptor instead. +func (*Type_Bool) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 3} +} + +// Float32 +// Values of type `Float32` are stored in `Value.float_value`. +type Type_Float32 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Float32) Reset() { + *x = Type_Float32{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Float32) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Float32) ProtoMessage() {} + +func (x *Type_Float32) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Float32.ProtoReflect.Descriptor instead. +func (*Type_Float32) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 4} +} + +// Float64 +// Values of type `Float64` are stored in `Value.float_value`. +type Type_Float64 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Float64) Reset() { + *x = Type_Float64{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Float64) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Float64) ProtoMessage() {} + +func (x *Type_Float64) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Float64.ProtoReflect.Descriptor instead. +func (*Type_Float64) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 5} +} + +// Timestamp +// Values of type `Timestamp` are stored in `Value.timestamp_value`. +type Type_Timestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Timestamp) Reset() { + *x = Type_Timestamp{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Timestamp) ProtoMessage() {} + +func (x *Type_Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Timestamp.ProtoReflect.Descriptor instead. +func (*Type_Timestamp) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 6} +} + +// Date +// Values of type `Date` are stored in `Value.date_value`. +type Type_Date struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Date) Reset() { + *x = Type_Date{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Date) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Date) ProtoMessage() {} + +func (x *Type_Date) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Date.ProtoReflect.Descriptor instead. +func (*Type_Date) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 7} +} + +// A structured data value, consisting of fields which map to dynamically +// typed values. +// Values of type `Struct` are stored in `Value.array_value` where entries are +// in the same order and number as `field_types`. +type Type_Struct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The names and types of the fields in this struct. + Fields []*Type_Struct_Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` +} + +func (x *Type_Struct) Reset() { + *x = Type_Struct{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Struct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Struct) ProtoMessage() {} + +func (x *Type_Struct) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Struct.ProtoReflect.Descriptor instead. +func (*Type_Struct) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 8} +} + +func (x *Type_Struct) GetFields() []*Type_Struct_Field { + if x != nil { + return x.Fields + } + return nil +} + +// An ordered list of elements of a given type. +// Values of type `Array` are stored in `Value.array_value`. +type Type_Array struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of the elements in the array. This must not be `Array`. + ElementType *Type `protobuf:"bytes,1,opt,name=element_type,json=elementType,proto3" json:"element_type,omitempty"` +} + +func (x *Type_Array) Reset() { + *x = Type_Array{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Array) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Array) ProtoMessage() {} + +func (x *Type_Array) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Array.ProtoReflect.Descriptor instead. +func (*Type_Array) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 9} +} + +func (x *Type_Array) GetElementType() *Type { + if x != nil { + return x.ElementType + } + return nil +} + +// A mapping of keys to values of a given type. +// Values of type `Map` are stored in a `Value.array_value` where each entry +// is another `Value.array_value` with two elements (the key and the value, +// in that order). +// Normally encoded Map values won't have repeated keys, however, clients are +// expected to handle the case in which they do. If the same key appears +// multiple times, the _last_ value takes precedence. +type Type_Map struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of a map key. + // Only `Bytes`, `String`, and `Int64` are allowed as key types. + KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` + // The type of the values in a map. + ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` +} + +func (x *Type_Map) Reset() { + *x = Type_Map{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Map) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Map) ProtoMessage() {} + +func (x *Type_Map) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Map.ProtoReflect.Descriptor instead. +func (*Type_Map) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 10} +} + +func (x *Type_Map) GetKeyType() *Type { + if x != nil { + return x.KeyType + } + return nil +} + +func (x *Type_Map) GetValueType() *Type { + if x != nil { + return x.ValueType + } + return nil +} + +// A value that combines incremental updates into a summarized value. +// +// Data is never directly written or read using type `Aggregate`. Writes will +// provide either the `input_type` or `state_type`, and reads will always +// return the `state_type` . +type Type_Aggregate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of the inputs that are accumulated by this `Aggregate`, which must + // specify a full encoding. + // Use `AddInput` mutations to accumulate new inputs. + InputType *Type `protobuf:"bytes,1,opt,name=input_type,json=inputType,proto3" json:"input_type,omitempty"` + // Output only. Type that holds the internal accumulator state for the + // `Aggregate`. This is a function of the `input_type` and `aggregator` + // chosen, and will always specify a full encoding. + StateType *Type `protobuf:"bytes,2,opt,name=state_type,json=stateType,proto3" json:"state_type,omitempty"` + // Which aggregator function to use. The configured types must match. + // + // Types that are assignable to Aggregator: + // + // *Type_Aggregate_Sum_ + // *Type_Aggregate_HllppUniqueCount + // *Type_Aggregate_Max_ + // *Type_Aggregate_Min_ + Aggregator isType_Aggregate_Aggregator `protobuf_oneof:"aggregator"` +} + +func (x *Type_Aggregate) Reset() { + *x = Type_Aggregate{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Aggregate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Aggregate) ProtoMessage() {} + +func (x *Type_Aggregate) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Aggregate.ProtoReflect.Descriptor instead. +func (*Type_Aggregate) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 11} +} + +func (x *Type_Aggregate) GetInputType() *Type { + if x != nil { + return x.InputType + } + return nil +} + +func (x *Type_Aggregate) GetStateType() *Type { + if x != nil { + return x.StateType + } + return nil +} + +func (m *Type_Aggregate) GetAggregator() isType_Aggregate_Aggregator { + if m != nil { + return m.Aggregator + } + return nil +} + +func (x *Type_Aggregate) GetSum() *Type_Aggregate_Sum { + if x, ok := x.GetAggregator().(*Type_Aggregate_Sum_); ok { + return x.Sum + } + return nil +} + +func (x *Type_Aggregate) GetHllppUniqueCount() *Type_Aggregate_HyperLogLogPlusPlusUniqueCount { + if x, ok := x.GetAggregator().(*Type_Aggregate_HllppUniqueCount); ok { + return x.HllppUniqueCount + } + return nil +} + +func (x *Type_Aggregate) GetMax() *Type_Aggregate_Max { + if x, ok := x.GetAggregator().(*Type_Aggregate_Max_); ok { + return x.Max + } + return nil +} + +func (x *Type_Aggregate) GetMin() *Type_Aggregate_Min { + if x, ok := x.GetAggregator().(*Type_Aggregate_Min_); ok { + return x.Min + } + return nil +} + +type isType_Aggregate_Aggregator interface { + isType_Aggregate_Aggregator() +} + +type Type_Aggregate_Sum_ struct { + // Sum aggregator. + Sum *Type_Aggregate_Sum `protobuf:"bytes,4,opt,name=sum,proto3,oneof"` +} + +type Type_Aggregate_HllppUniqueCount struct { + // HyperLogLogPlusPlusUniqueCount aggregator. + HllppUniqueCount *Type_Aggregate_HyperLogLogPlusPlusUniqueCount `protobuf:"bytes,5,opt,name=hllpp_unique_count,json=hllppUniqueCount,proto3,oneof"` +} + +type Type_Aggregate_Max_ struct { + // Max aggregator. + Max *Type_Aggregate_Max `protobuf:"bytes,6,opt,name=max,proto3,oneof"` +} + +type Type_Aggregate_Min_ struct { + // Min aggregator. + Min *Type_Aggregate_Min `protobuf:"bytes,7,opt,name=min,proto3,oneof"` +} + +func (*Type_Aggregate_Sum_) isType_Aggregate_Aggregator() {} + +func (*Type_Aggregate_HllppUniqueCount) isType_Aggregate_Aggregator() {} + +func (*Type_Aggregate_Max_) isType_Aggregate_Aggregator() {} + +func (*Type_Aggregate_Min_) isType_Aggregate_Aggregator() {} + +// Rules used to convert to/from lower level types. +type Type_Bytes_Encoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Which encoding to use. + // + // Types that are assignable to Encoding: + // + // *Type_Bytes_Encoding_Raw_ + Encoding isType_Bytes_Encoding_Encoding `protobuf_oneof:"encoding"` +} + +func (x *Type_Bytes_Encoding) Reset() { + *x = Type_Bytes_Encoding{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Bytes_Encoding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Bytes_Encoding) ProtoMessage() {} + +func (x *Type_Bytes_Encoding) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Bytes_Encoding.ProtoReflect.Descriptor instead. +func (*Type_Bytes_Encoding) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (m *Type_Bytes_Encoding) GetEncoding() isType_Bytes_Encoding_Encoding { + if m != nil { + return m.Encoding + } + return nil +} + +func (x *Type_Bytes_Encoding) GetRaw() *Type_Bytes_Encoding_Raw { + if x, ok := x.GetEncoding().(*Type_Bytes_Encoding_Raw_); ok { + return x.Raw + } + return nil +} + +type isType_Bytes_Encoding_Encoding interface { + isType_Bytes_Encoding_Encoding() +} + +type Type_Bytes_Encoding_Raw_ struct { + // Use `Raw` encoding. + Raw *Type_Bytes_Encoding_Raw `protobuf:"bytes,1,opt,name=raw,proto3,oneof"` +} + +func (*Type_Bytes_Encoding_Raw_) isType_Bytes_Encoding_Encoding() {} + +// Leaves the value "as-is" +// * Order-preserving? Yes +// * Self-delimiting? No +// * Compatibility? N/A +type Type_Bytes_Encoding_Raw struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Bytes_Encoding_Raw) Reset() { + *x = Type_Bytes_Encoding_Raw{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Bytes_Encoding_Raw) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Bytes_Encoding_Raw) ProtoMessage() {} + +func (x *Type_Bytes_Encoding_Raw) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Bytes_Encoding_Raw.ProtoReflect.Descriptor instead. +func (*Type_Bytes_Encoding_Raw) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 0, 0, 0} +} + +// Rules used to convert to/from lower level types. +type Type_String_Encoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Which encoding to use. + // + // Types that are assignable to Encoding: + // + // *Type_String_Encoding_Utf8Raw_ + // *Type_String_Encoding_Utf8Bytes_ + Encoding isType_String_Encoding_Encoding `protobuf_oneof:"encoding"` +} + +func (x *Type_String_Encoding) Reset() { + *x = Type_String_Encoding{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_String_Encoding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_String_Encoding) ProtoMessage() {} + +func (x *Type_String_Encoding) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_String_Encoding.ProtoReflect.Descriptor instead. +func (*Type_String_Encoding) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (m *Type_String_Encoding) GetEncoding() isType_String_Encoding_Encoding { + if m != nil { + return m.Encoding + } + return nil +} + +// Deprecated: Marked as deprecated in google/bigtable/admin/v2/types.proto. +func (x *Type_String_Encoding) GetUtf8Raw() *Type_String_Encoding_Utf8Raw { + if x, ok := x.GetEncoding().(*Type_String_Encoding_Utf8Raw_); ok { + return x.Utf8Raw + } + return nil +} + +func (x *Type_String_Encoding) GetUtf8Bytes() *Type_String_Encoding_Utf8Bytes { + if x, ok := x.GetEncoding().(*Type_String_Encoding_Utf8Bytes_); ok { + return x.Utf8Bytes + } + return nil +} + +type isType_String_Encoding_Encoding interface { + isType_String_Encoding_Encoding() +} + +type Type_String_Encoding_Utf8Raw_ struct { + // Deprecated: if set, converts to an empty `utf8_bytes`. + // + // Deprecated: Marked as deprecated in google/bigtable/admin/v2/types.proto. + Utf8Raw *Type_String_Encoding_Utf8Raw `protobuf:"bytes,1,opt,name=utf8_raw,json=utf8Raw,proto3,oneof"` +} + +type Type_String_Encoding_Utf8Bytes_ struct { + // Use `Utf8Bytes` encoding. + Utf8Bytes *Type_String_Encoding_Utf8Bytes `protobuf:"bytes,2,opt,name=utf8_bytes,json=utf8Bytes,proto3,oneof"` +} + +func (*Type_String_Encoding_Utf8Raw_) isType_String_Encoding_Encoding() {} + +func (*Type_String_Encoding_Utf8Bytes_) isType_String_Encoding_Encoding() {} + +// Deprecated: prefer the equivalent `Utf8Bytes`. +// +// Deprecated: Marked as deprecated in google/bigtable/admin/v2/types.proto. +type Type_String_Encoding_Utf8Raw struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_String_Encoding_Utf8Raw) Reset() { + *x = Type_String_Encoding_Utf8Raw{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_String_Encoding_Utf8Raw) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_String_Encoding_Utf8Raw) ProtoMessage() {} + +func (x *Type_String_Encoding_Utf8Raw) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_String_Encoding_Utf8Raw.ProtoReflect.Descriptor instead. +func (*Type_String_Encoding_Utf8Raw) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 1, 0, 0} +} + +// UTF-8 encoding +// * Order-preserving? Yes (code point order) +// * Self-delimiting? No +// * Compatibility? +// - BigQuery Federation `TEXT` encoding +// - HBase `Bytes.toBytes` +// - Java `String#getBytes(StandardCharsets.UTF_8)` +type Type_String_Encoding_Utf8Bytes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_String_Encoding_Utf8Bytes) Reset() { + *x = Type_String_Encoding_Utf8Bytes{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_String_Encoding_Utf8Bytes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_String_Encoding_Utf8Bytes) ProtoMessage() {} + +func (x *Type_String_Encoding_Utf8Bytes) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_String_Encoding_Utf8Bytes.ProtoReflect.Descriptor instead. +func (*Type_String_Encoding_Utf8Bytes) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 1, 0, 1} +} + +// Rules used to convert to/from lower level types. +type Type_Int64_Encoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Which encoding to use. + // + // Types that are assignable to Encoding: + // + // *Type_Int64_Encoding_BigEndianBytes_ + Encoding isType_Int64_Encoding_Encoding `protobuf_oneof:"encoding"` +} + +func (x *Type_Int64_Encoding) Reset() { + *x = Type_Int64_Encoding{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Int64_Encoding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Int64_Encoding) ProtoMessage() {} + +func (x *Type_Int64_Encoding) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Int64_Encoding.ProtoReflect.Descriptor instead. +func (*Type_Int64_Encoding) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 2, 0} +} + +func (m *Type_Int64_Encoding) GetEncoding() isType_Int64_Encoding_Encoding { + if m != nil { + return m.Encoding + } + return nil +} + +func (x *Type_Int64_Encoding) GetBigEndianBytes() *Type_Int64_Encoding_BigEndianBytes { + if x, ok := x.GetEncoding().(*Type_Int64_Encoding_BigEndianBytes_); ok { + return x.BigEndianBytes + } + return nil +} + +type isType_Int64_Encoding_Encoding interface { + isType_Int64_Encoding_Encoding() +} + +type Type_Int64_Encoding_BigEndianBytes_ struct { + // Use `BigEndianBytes` encoding. + BigEndianBytes *Type_Int64_Encoding_BigEndianBytes `protobuf:"bytes,1,opt,name=big_endian_bytes,json=bigEndianBytes,proto3,oneof"` +} + +func (*Type_Int64_Encoding_BigEndianBytes_) isType_Int64_Encoding_Encoding() {} + +// Encodes the value as an 8-byte big endian twos complement `Bytes` +// value. +// * Order-preserving? No (positive values only) +// * Self-delimiting? Yes +// * Compatibility? +// - BigQuery Federation `BINARY` encoding +// - HBase `Bytes.toBytes` +// - Java `ByteBuffer.putLong()` with `ByteOrder.BIG_ENDIAN` +type Type_Int64_Encoding_BigEndianBytes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated: ignored if set. + BytesType *Type_Bytes `protobuf:"bytes,1,opt,name=bytes_type,json=bytesType,proto3" json:"bytes_type,omitempty"` +} + +func (x *Type_Int64_Encoding_BigEndianBytes) Reset() { + *x = Type_Int64_Encoding_BigEndianBytes{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Int64_Encoding_BigEndianBytes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Int64_Encoding_BigEndianBytes) ProtoMessage() {} + +func (x *Type_Int64_Encoding_BigEndianBytes) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Int64_Encoding_BigEndianBytes.ProtoReflect.Descriptor instead. +func (*Type_Int64_Encoding_BigEndianBytes) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 2, 0, 0} +} + +func (x *Type_Int64_Encoding_BigEndianBytes) GetBytesType() *Type_Bytes { + if x != nil { + return x.BytesType + } + return nil +} + +// A struct field and its type. +type Type_Struct_Field struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The field name (optional). Fields without a `field_name` are considered + // anonymous and cannot be referenced by name. + FieldName string `protobuf:"bytes,1,opt,name=field_name,json=fieldName,proto3" json:"field_name,omitempty"` + // The type of values in this field. + Type *Type `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *Type_Struct_Field) Reset() { + *x = Type_Struct_Field{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Struct_Field) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Struct_Field) ProtoMessage() {} + +func (x *Type_Struct_Field) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Struct_Field.ProtoReflect.Descriptor instead. +func (*Type_Struct_Field) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 8, 0} +} + +func (x *Type_Struct_Field) GetFieldName() string { + if x != nil { + return x.FieldName + } + return "" +} + +func (x *Type_Struct_Field) GetType() *Type { + if x != nil { + return x.Type + } + return nil +} + +// Computes the sum of the input values. +// Allowed input: `Int64` +// State: same as input +type Type_Aggregate_Sum struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Aggregate_Sum) Reset() { + *x = Type_Aggregate_Sum{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Aggregate_Sum) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Aggregate_Sum) ProtoMessage() {} + +func (x *Type_Aggregate_Sum) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Aggregate_Sum.ProtoReflect.Descriptor instead. +func (*Type_Aggregate_Sum) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 11, 0} +} + +// Computes the max of the input values. +// Allowed input: `Int64` +// State: same as input +type Type_Aggregate_Max struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Aggregate_Max) Reset() { + *x = Type_Aggregate_Max{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Aggregate_Max) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Aggregate_Max) ProtoMessage() {} + +func (x *Type_Aggregate_Max) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Aggregate_Max.ProtoReflect.Descriptor instead. +func (*Type_Aggregate_Max) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 11, 1} +} + +// Computes the min of the input values. +// Allowed input: `Int64` +// State: same as input +type Type_Aggregate_Min struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Aggregate_Min) Reset() { + *x = Type_Aggregate_Min{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Aggregate_Min) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Aggregate_Min) ProtoMessage() {} + +func (x *Type_Aggregate_Min) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Aggregate_Min.ProtoReflect.Descriptor instead. +func (*Type_Aggregate_Min) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 11, 2} +} + +// Computes an approximate unique count over the input values. When using +// raw data as input, be careful to use a consistent encoding. Otherwise +// the same value encoded differently could count more than once, or two +// distinct values could count as identical. +// Input: Any, or omit for Raw +// State: TBD +// Special state conversions: `Int64` (the unique count estimate) +type Type_Aggregate_HyperLogLogPlusPlusUniqueCount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) Reset() { + *x = Type_Aggregate_HyperLogLogPlusPlusUniqueCount{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Aggregate_HyperLogLogPlusPlusUniqueCount) ProtoMessage() {} + +func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Aggregate_HyperLogLogPlusPlusUniqueCount.ProtoReflect.Descriptor instead. +func (*Type_Aggregate_HyperLogLogPlusPlusUniqueCount) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 11, 3} +} + +var File_google_bigtable_admin_v2_types_proto protoreflect.FileDescriptor + +var file_google_bigtable_admin_v2_types_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xeb, 0x14, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x09, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, + 0x0a, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x69, + 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x48, 0x00, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x33, 0x32, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x33, 0x32, + 0x48, 0x00, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x33, 0x32, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x4b, 0x0a, 0x0c, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x36, 0x34, 0x48, 0x00, 0x52, + 0x0b, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x36, 0x34, 0x54, 0x79, 0x70, 0x65, 0x12, 0x42, 0x0a, 0x09, + 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x62, 0x6f, 0x6f, 0x6c, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x51, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x42, 0x0a, 0x09, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x08, 0x64, + 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, + 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x73, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x48, 0x00, + 0x52, 0x09, 0x61, 0x72, 0x72, 0x61, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3f, 0x0a, 0x08, 0x6d, + 0x61, 0x70, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, + 0x70, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x1a, 0xb8, 0x01, 0x0a, + 0x05, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2e, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x1a, 0x64, 0x0a, 0x08, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x45, 0x0a, + 0x03, 0x72, 0x61, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x61, 0x77, 0x48, 0x00, 0x52, + 0x03, 0x72, 0x61, 0x77, 0x1a, 0x05, 0x0a, 0x03, 0x52, 0x61, 0x77, 0x42, 0x0a, 0x0a, 0x08, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0xbd, 0x02, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x12, 0x4a, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0xe6, + 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x57, 0x0a, 0x08, 0x75, + 0x74, 0x66, 0x38, 0x5f, 0x72, 0x61, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x55, 0x74, + 0x66, 0x38, 0x52, 0x61, 0x77, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x07, 0x75, 0x74, 0x66, + 0x38, 0x52, 0x61, 0x77, 0x12, 0x59, 0x0a, 0x0a, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x48, 0x00, 0x52, 0x09, 0x75, 0x74, 0x66, 0x38, 0x42, 0x79, 0x74, 0x65, 0x73, 0x1a, + 0x0d, 0x0a, 0x07, 0x55, 0x74, 0x66, 0x38, 0x52, 0x61, 0x77, 0x3a, 0x02, 0x18, 0x01, 0x1a, 0x0b, + 0x0a, 0x09, 0x55, 0x74, 0x66, 0x38, 0x42, 0x79, 0x74, 0x65, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0xac, 0x02, 0x0a, 0x05, 0x49, 0x6e, 0x74, 0x36, + 0x34, 0x12, 0x49, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x52, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0xd7, 0x01, 0x0a, + 0x08, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x68, 0x0a, 0x10, 0x62, 0x69, 0x67, + 0x5f, 0x65, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x2e, 0x42, 0x69, 0x67, 0x45, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x48, 0x00, 0x52, 0x0e, 0x62, 0x69, 0x67, 0x45, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x1a, 0x55, 0x0a, 0x0e, 0x42, 0x69, 0x67, 0x45, 0x6e, 0x64, 0x69, 0x61, 0x6e, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, + 0x09, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x6f, 0x6c, 0x1a, 0x09, + 0x0a, 0x07, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x33, 0x32, 0x1a, 0x09, 0x0a, 0x07, 0x46, 0x6c, 0x6f, + 0x61, 0x74, 0x36, 0x34, 0x1a, 0x0b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x1a, 0x06, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x65, 0x1a, 0xa9, 0x01, 0x0a, 0x06, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x12, 0x43, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x5a, 0x0a, 0x05, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x32, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x4a, 0x0a, 0x05, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x41, + 0x0a, 0x0c, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x1a, 0x7f, 0x0a, 0x03, 0x4d, 0x61, 0x70, 0x12, 0x39, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x3d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x1a, 0x92, 0x04, 0x0a, 0x09, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, + 0x12, 0x3d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x42, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x75, 0x6d, 0x48, 0x00, + 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, 0x77, 0x0a, 0x12, 0x68, 0x6c, 0x6c, 0x70, 0x70, 0x5f, 0x75, + 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x47, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x48, 0x79, 0x70, 0x65, + 0x72, 0x4c, 0x6f, 0x67, 0x4c, 0x6f, 0x67, 0x50, 0x6c, 0x75, 0x73, 0x50, 0x6c, 0x75, 0x73, 0x55, + 0x6e, 0x69, 0x71, 0x75, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x10, 0x68, 0x6c, + 0x6c, 0x70, 0x70, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, + 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x78, 0x48, 0x00, 0x52, 0x03, 0x6d, 0x61, 0x78, + 0x12, 0x40, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x69, 0x6e, 0x48, 0x00, 0x52, 0x03, 0x6d, + 0x69, 0x6e, 0x1a, 0x05, 0x0a, 0x03, 0x53, 0x75, 0x6d, 0x1a, 0x05, 0x0a, 0x03, 0x4d, 0x61, 0x78, + 0x1a, 0x05, 0x0a, 0x03, 0x4d, 0x69, 0x6e, 0x1a, 0x20, 0x0a, 0x1e, 0x48, 0x79, 0x70, 0x65, 0x72, + 0x4c, 0x6f, 0x67, 0x4c, 0x6f, 0x67, 0x50, 0x6c, 0x75, 0x73, 0x50, 0x6c, 0x75, 0x73, 0x55, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x0c, 0x0a, 0x0a, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x42, + 0xcd, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x42, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, + 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_bigtable_admin_v2_types_proto_rawDescOnce sync.Once + file_google_bigtable_admin_v2_types_proto_rawDescData = file_google_bigtable_admin_v2_types_proto_rawDesc +) + +func file_google_bigtable_admin_v2_types_proto_rawDescGZIP() []byte { + file_google_bigtable_admin_v2_types_proto_rawDescOnce.Do(func() { + file_google_bigtable_admin_v2_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_bigtable_admin_v2_types_proto_rawDescData) + }) + return file_google_bigtable_admin_v2_types_proto_rawDescData +} + +var file_google_bigtable_admin_v2_types_proto_msgTypes = make([]protoimpl.MessageInfo, 25) +var file_google_bigtable_admin_v2_types_proto_goTypes = []any{ + (*Type)(nil), // 0: google.bigtable.admin.v2.Type + (*Type_Bytes)(nil), // 1: google.bigtable.admin.v2.Type.Bytes + (*Type_String)(nil), // 2: google.bigtable.admin.v2.Type.String + (*Type_Int64)(nil), // 3: google.bigtable.admin.v2.Type.Int64 + (*Type_Bool)(nil), // 4: google.bigtable.admin.v2.Type.Bool + (*Type_Float32)(nil), // 5: google.bigtable.admin.v2.Type.Float32 + (*Type_Float64)(nil), // 6: google.bigtable.admin.v2.Type.Float64 + (*Type_Timestamp)(nil), // 7: google.bigtable.admin.v2.Type.Timestamp + (*Type_Date)(nil), // 8: google.bigtable.admin.v2.Type.Date + (*Type_Struct)(nil), // 9: google.bigtable.admin.v2.Type.Struct + (*Type_Array)(nil), // 10: google.bigtable.admin.v2.Type.Array + (*Type_Map)(nil), // 11: google.bigtable.admin.v2.Type.Map + (*Type_Aggregate)(nil), // 12: google.bigtable.admin.v2.Type.Aggregate + (*Type_Bytes_Encoding)(nil), // 13: google.bigtable.admin.v2.Type.Bytes.Encoding + (*Type_Bytes_Encoding_Raw)(nil), // 14: google.bigtable.admin.v2.Type.Bytes.Encoding.Raw + (*Type_String_Encoding)(nil), // 15: google.bigtable.admin.v2.Type.String.Encoding + (*Type_String_Encoding_Utf8Raw)(nil), // 16: google.bigtable.admin.v2.Type.String.Encoding.Utf8Raw + (*Type_String_Encoding_Utf8Bytes)(nil), // 17: google.bigtable.admin.v2.Type.String.Encoding.Utf8Bytes + (*Type_Int64_Encoding)(nil), // 18: google.bigtable.admin.v2.Type.Int64.Encoding + (*Type_Int64_Encoding_BigEndianBytes)(nil), // 19: google.bigtable.admin.v2.Type.Int64.Encoding.BigEndianBytes + (*Type_Struct_Field)(nil), // 20: google.bigtable.admin.v2.Type.Struct.Field + (*Type_Aggregate_Sum)(nil), // 21: google.bigtable.admin.v2.Type.Aggregate.Sum + (*Type_Aggregate_Max)(nil), // 22: google.bigtable.admin.v2.Type.Aggregate.Max + (*Type_Aggregate_Min)(nil), // 23: google.bigtable.admin.v2.Type.Aggregate.Min + (*Type_Aggregate_HyperLogLogPlusPlusUniqueCount)(nil), // 24: google.bigtable.admin.v2.Type.Aggregate.HyperLogLogPlusPlusUniqueCount +} +var file_google_bigtable_admin_v2_types_proto_depIdxs = []int32{ + 1, // 0: google.bigtable.admin.v2.Type.bytes_type:type_name -> google.bigtable.admin.v2.Type.Bytes + 2, // 1: google.bigtable.admin.v2.Type.string_type:type_name -> google.bigtable.admin.v2.Type.String + 3, // 2: google.bigtable.admin.v2.Type.int64_type:type_name -> google.bigtable.admin.v2.Type.Int64 + 5, // 3: google.bigtable.admin.v2.Type.float32_type:type_name -> google.bigtable.admin.v2.Type.Float32 + 6, // 4: google.bigtable.admin.v2.Type.float64_type:type_name -> google.bigtable.admin.v2.Type.Float64 + 4, // 5: google.bigtable.admin.v2.Type.bool_type:type_name -> google.bigtable.admin.v2.Type.Bool + 7, // 6: google.bigtable.admin.v2.Type.timestamp_type:type_name -> google.bigtable.admin.v2.Type.Timestamp + 8, // 7: google.bigtable.admin.v2.Type.date_type:type_name -> google.bigtable.admin.v2.Type.Date + 12, // 8: google.bigtable.admin.v2.Type.aggregate_type:type_name -> google.bigtable.admin.v2.Type.Aggregate + 9, // 9: google.bigtable.admin.v2.Type.struct_type:type_name -> google.bigtable.admin.v2.Type.Struct + 10, // 10: google.bigtable.admin.v2.Type.array_type:type_name -> google.bigtable.admin.v2.Type.Array + 11, // 11: google.bigtable.admin.v2.Type.map_type:type_name -> google.bigtable.admin.v2.Type.Map + 13, // 12: google.bigtable.admin.v2.Type.Bytes.encoding:type_name -> google.bigtable.admin.v2.Type.Bytes.Encoding + 15, // 13: google.bigtable.admin.v2.Type.String.encoding:type_name -> google.bigtable.admin.v2.Type.String.Encoding + 18, // 14: google.bigtable.admin.v2.Type.Int64.encoding:type_name -> google.bigtable.admin.v2.Type.Int64.Encoding + 20, // 15: google.bigtable.admin.v2.Type.Struct.fields:type_name -> google.bigtable.admin.v2.Type.Struct.Field + 0, // 16: google.bigtable.admin.v2.Type.Array.element_type:type_name -> google.bigtable.admin.v2.Type + 0, // 17: google.bigtable.admin.v2.Type.Map.key_type:type_name -> google.bigtable.admin.v2.Type + 0, // 18: google.bigtable.admin.v2.Type.Map.value_type:type_name -> google.bigtable.admin.v2.Type + 0, // 19: google.bigtable.admin.v2.Type.Aggregate.input_type:type_name -> google.bigtable.admin.v2.Type + 0, // 20: google.bigtable.admin.v2.Type.Aggregate.state_type:type_name -> google.bigtable.admin.v2.Type + 21, // 21: google.bigtable.admin.v2.Type.Aggregate.sum:type_name -> google.bigtable.admin.v2.Type.Aggregate.Sum + 24, // 22: google.bigtable.admin.v2.Type.Aggregate.hllpp_unique_count:type_name -> google.bigtable.admin.v2.Type.Aggregate.HyperLogLogPlusPlusUniqueCount + 22, // 23: google.bigtable.admin.v2.Type.Aggregate.max:type_name -> google.bigtable.admin.v2.Type.Aggregate.Max + 23, // 24: google.bigtable.admin.v2.Type.Aggregate.min:type_name -> google.bigtable.admin.v2.Type.Aggregate.Min + 14, // 25: google.bigtable.admin.v2.Type.Bytes.Encoding.raw:type_name -> google.bigtable.admin.v2.Type.Bytes.Encoding.Raw + 16, // 26: google.bigtable.admin.v2.Type.String.Encoding.utf8_raw:type_name -> google.bigtable.admin.v2.Type.String.Encoding.Utf8Raw + 17, // 27: google.bigtable.admin.v2.Type.String.Encoding.utf8_bytes:type_name -> google.bigtable.admin.v2.Type.String.Encoding.Utf8Bytes + 19, // 28: google.bigtable.admin.v2.Type.Int64.Encoding.big_endian_bytes:type_name -> google.bigtable.admin.v2.Type.Int64.Encoding.BigEndianBytes + 1, // 29: google.bigtable.admin.v2.Type.Int64.Encoding.BigEndianBytes.bytes_type:type_name -> google.bigtable.admin.v2.Type.Bytes + 0, // 30: google.bigtable.admin.v2.Type.Struct.Field.type:type_name -> google.bigtable.admin.v2.Type + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_google_bigtable_admin_v2_types_proto_init() } +func file_google_bigtable_admin_v2_types_proto_init() { + if File_google_bigtable_admin_v2_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_bigtable_admin_v2_types_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Type); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Type_Bytes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*Type_String); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*Type_Int64); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Type_Bool); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*Type_Float32); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*Type_Float64); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*Type_Timestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*Type_Date); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*Type_Struct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*Type_Array); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*Type_Map); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*Type_Aggregate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*Type_Bytes_Encoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*Type_Bytes_Encoding_Raw); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*Type_String_Encoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*Type_String_Encoding_Utf8Raw); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*Type_String_Encoding_Utf8Bytes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*Type_Int64_Encoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*Type_Int64_Encoding_BigEndianBytes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*Type_Struct_Field); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[21].Exporter = func(v any, i int) any { + switch v := v.(*Type_Aggregate_Sum); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[22].Exporter = func(v any, i int) any { + switch v := v.(*Type_Aggregate_Max); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[23].Exporter = func(v any, i int) any { + switch v := v.(*Type_Aggregate_Min); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[24].Exporter = func(v any, i int) any { + switch v := v.(*Type_Aggregate_HyperLogLogPlusPlusUniqueCount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_bigtable_admin_v2_types_proto_msgTypes[0].OneofWrappers = []any{ + (*Type_BytesType)(nil), + (*Type_StringType)(nil), + (*Type_Int64Type)(nil), + (*Type_Float32Type)(nil), + (*Type_Float64Type)(nil), + (*Type_BoolType)(nil), + (*Type_TimestampType)(nil), + (*Type_DateType)(nil), + (*Type_AggregateType)(nil), + (*Type_StructType)(nil), + (*Type_ArrayType)(nil), + (*Type_MapType)(nil), + } + file_google_bigtable_admin_v2_types_proto_msgTypes[12].OneofWrappers = []any{ + (*Type_Aggregate_Sum_)(nil), + (*Type_Aggregate_HllppUniqueCount)(nil), + (*Type_Aggregate_Max_)(nil), + (*Type_Aggregate_Min_)(nil), + } + file_google_bigtable_admin_v2_types_proto_msgTypes[13].OneofWrappers = []any{ + (*Type_Bytes_Encoding_Raw_)(nil), + } + file_google_bigtable_admin_v2_types_proto_msgTypes[15].OneofWrappers = []any{ + (*Type_String_Encoding_Utf8Raw_)(nil), + (*Type_String_Encoding_Utf8Bytes_)(nil), + } + file_google_bigtable_admin_v2_types_proto_msgTypes[18].OneofWrappers = []any{ + (*Type_Int64_Encoding_BigEndianBytes_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_bigtable_admin_v2_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 25, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_bigtable_admin_v2_types_proto_goTypes, + DependencyIndexes: file_google_bigtable_admin_v2_types_proto_depIdxs, + MessageInfos: file_google_bigtable_admin_v2_types_proto_msgTypes, + }.Build() + File_google_bigtable_admin_v2_types_proto = out.File + file_google_bigtable_admin_v2_types_proto_rawDesc = nil + file_google_bigtable_admin_v2_types_proto_goTypes = nil + file_google_bigtable_admin_v2_types_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/aliasshim.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/aliasshim.go new file mode 100644 index 00000000000..47c0c37da85 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/aliasshim.go @@ -0,0 +1,24 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapicgen. DO NOT EDIT. + +//go:build aliasshim +// +build aliasshim + +// Package aliasshim is used to keep the dependency on go-genproto during our +// go-genproto to google-cloud-go stubs migration window. +package aliasshim + +import _ "google.golang.org/genproto/protobuf/api" diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/bigtable.pb.go similarity index 79% rename from terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go rename to terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/bigtable.pb.go index 3f2e59d8c50..e0880f480b6 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/bigtable.pb.go @@ -14,11 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v4.24.4 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/bigtable/v2/bigtable.proto -package bigtable +package bigtablepb import ( context "context" @@ -1670,6 +1670,246 @@ func (*ReadChangeStreamResponse_Heartbeat_) isReadChangeStreamResponse_StreamRec func (*ReadChangeStreamResponse_CloseStream_) isReadChangeStreamResponse_StreamRecord() {} +// Request message for Bigtable.ExecuteQuery +type ExecuteQueryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The unique name of the instance against which the query should be + // executed. + // Values are of the form `projects//instances/` + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"` + // Optional. This value specifies routing for replication. If not specified, + // the `default` application profile will be used. + AppProfileId string `protobuf:"bytes,2,opt,name=app_profile_id,json=appProfileId,proto3" json:"app_profile_id,omitempty"` + // Required. The query string. + Query string `protobuf:"bytes,3,opt,name=query,proto3" json:"query,omitempty"` + // Required. Requested data format for the response. + // + // Types that are assignable to DataFormat: + // + // *ExecuteQueryRequest_ProtoFormat + DataFormat isExecuteQueryRequest_DataFormat `protobuf_oneof:"data_format"` + // Optional. If this request is resuming a previously interrupted query + // execution, `resume_token` should be copied from the last + // PartialResultSet yielded before the interruption. Doing this + // enables the query execution to resume where the last one left + // off. + // The rest of the request parameters must exactly match the + // request that yielded this token. Otherwise the request will fail. + ResumeToken []byte `protobuf:"bytes,8,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` + // Required. params contains string type keys and Bigtable type values that + // bind to placeholders in the query string. In query string, a parameter + // placeholder consists of the + // `@` character followed by the parameter name (for example, `@firstName`) in + // the query string. + // + // For example, if + // `params["firstName"] = bytes_value: "foo" type {bytes_type {}}` + // + // then `@firstName` will be replaced with googlesql bytes value "foo" in the + // query string during query evaluation. + // + // In case of Value.kind is not set, it will be set to corresponding null + // value in googlesql. + // + // `params["firstName"] = type {string_type {}}` + // then `@firstName` will be replaced with googlesql null string. + // + // Value.type should always be set and no inference of type will be made from + // Value.kind. If Value.type is not set, we will return INVALID_ARGUMENT + // error. + Params map[string]*Value `protobuf:"bytes,7,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ExecuteQueryRequest) Reset() { + *x = ExecuteQueryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecuteQueryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteQueryRequest) ProtoMessage() {} + +func (x *ExecuteQueryRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteQueryRequest.ProtoReflect.Descriptor instead. +func (*ExecuteQueryRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{19} +} + +func (x *ExecuteQueryRequest) GetInstanceName() string { + if x != nil { + return x.InstanceName + } + return "" +} + +func (x *ExecuteQueryRequest) GetAppProfileId() string { + if x != nil { + return x.AppProfileId + } + return "" +} + +func (x *ExecuteQueryRequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (m *ExecuteQueryRequest) GetDataFormat() isExecuteQueryRequest_DataFormat { + if m != nil { + return m.DataFormat + } + return nil +} + +func (x *ExecuteQueryRequest) GetProtoFormat() *ProtoFormat { + if x, ok := x.GetDataFormat().(*ExecuteQueryRequest_ProtoFormat); ok { + return x.ProtoFormat + } + return nil +} + +func (x *ExecuteQueryRequest) GetResumeToken() []byte { + if x != nil { + return x.ResumeToken + } + return nil +} + +func (x *ExecuteQueryRequest) GetParams() map[string]*Value { + if x != nil { + return x.Params + } + return nil +} + +type isExecuteQueryRequest_DataFormat interface { + isExecuteQueryRequest_DataFormat() +} + +type ExecuteQueryRequest_ProtoFormat struct { + // Protocol buffer format as described by ProtoSchema and ProtoRows + // messages. + ProtoFormat *ProtoFormat `protobuf:"bytes,4,opt,name=proto_format,json=protoFormat,proto3,oneof"` +} + +func (*ExecuteQueryRequest_ProtoFormat) isExecuteQueryRequest_DataFormat() {} + +// Response message for Bigtable.ExecuteQuery +type ExecuteQueryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The first response streamed from the server is of type `ResultSetMetadata` + // and includes information about the columns and types of the result set. + // From there on, we stream `PartialResultSet` messages with no additional + // information. `PartialResultSet` will contain `resume_token` to restart the + // response if query interrupts. In case of resumption with `resume_token`, + // the server will not resend the ResultSetMetadata. + // + // Types that are assignable to Response: + // + // *ExecuteQueryResponse_Metadata + // *ExecuteQueryResponse_Results + Response isExecuteQueryResponse_Response `protobuf_oneof:"response"` +} + +func (x *ExecuteQueryResponse) Reset() { + *x = ExecuteQueryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecuteQueryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteQueryResponse) ProtoMessage() {} + +func (x *ExecuteQueryResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteQueryResponse.ProtoReflect.Descriptor instead. +func (*ExecuteQueryResponse) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{20} +} + +func (m *ExecuteQueryResponse) GetResponse() isExecuteQueryResponse_Response { + if m != nil { + return m.Response + } + return nil +} + +func (x *ExecuteQueryResponse) GetMetadata() *ResultSetMetadata { + if x, ok := x.GetResponse().(*ExecuteQueryResponse_Metadata); ok { + return x.Metadata + } + return nil +} + +func (x *ExecuteQueryResponse) GetResults() *PartialResultSet { + if x, ok := x.GetResponse().(*ExecuteQueryResponse_Results); ok { + return x.Results + } + return nil +} + +type isExecuteQueryResponse_Response interface { + isExecuteQueryResponse_Response() +} + +type ExecuteQueryResponse_Metadata struct { + // Structure of rows in this response stream. The first (and only the first) + // response streamed from the server will be of this type. + Metadata *ResultSetMetadata `protobuf:"bytes,1,opt,name=metadata,proto3,oneof"` +} + +type ExecuteQueryResponse_Results struct { + // A partial result set with row data potentially including additional + // instructions on how recent past and future partial responses should be + // interpreted. + Results *PartialResultSet `protobuf:"bytes,2,opt,name=results,proto3,oneof"` +} + +func (*ExecuteQueryResponse_Metadata) isExecuteQueryResponse_Response() {} + +func (*ExecuteQueryResponse_Results) isExecuteQueryResponse_Response() {} + // Specifies a piece of a row's contents returned as part of the read // response stream. type ReadRowsResponse_CellChunk struct { @@ -1731,7 +1971,7 @@ type ReadRowsResponse_CellChunk struct { func (x *ReadRowsResponse_CellChunk) Reset() { *x = ReadRowsResponse_CellChunk{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[19] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1744,7 +1984,7 @@ func (x *ReadRowsResponse_CellChunk) String() string { func (*ReadRowsResponse_CellChunk) ProtoMessage() {} func (x *ReadRowsResponse_CellChunk) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[19] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1867,7 +2107,7 @@ type MutateRowsRequest_Entry struct { func (x *MutateRowsRequest_Entry) Reset() { *x = MutateRowsRequest_Entry{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[20] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1880,7 +2120,7 @@ func (x *MutateRowsRequest_Entry) String() string { func (*MutateRowsRequest_Entry) ProtoMessage() {} func (x *MutateRowsRequest_Entry) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[20] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1929,7 +2169,7 @@ type MutateRowsResponse_Entry struct { func (x *MutateRowsResponse_Entry) Reset() { *x = MutateRowsResponse_Entry{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[21] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1942,7 +2182,7 @@ func (x *MutateRowsResponse_Entry) String() string { func (*MutateRowsResponse_Entry) ProtoMessage() {} func (x *MutateRowsResponse_Entry) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[21] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1990,7 +2230,7 @@ type ReadChangeStreamResponse_MutationChunk struct { func (x *ReadChangeStreamResponse_MutationChunk) Reset() { *x = ReadChangeStreamResponse_MutationChunk{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[22] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2003,7 +2243,7 @@ func (x *ReadChangeStreamResponse_MutationChunk) String() string { func (*ReadChangeStreamResponse_MutationChunk) ProtoMessage() {} func (x *ReadChangeStreamResponse_MutationChunk) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[22] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2085,7 +2325,7 @@ type ReadChangeStreamResponse_DataChange struct { func (x *ReadChangeStreamResponse_DataChange) Reset() { *x = ReadChangeStreamResponse_DataChange{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[23] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2098,7 +2338,7 @@ func (x *ReadChangeStreamResponse_DataChange) String() string { func (*ReadChangeStreamResponse_DataChange) ProtoMessage() {} func (x *ReadChangeStreamResponse_DataChange) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[23] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2199,7 +2439,7 @@ type ReadChangeStreamResponse_Heartbeat struct { func (x *ReadChangeStreamResponse_Heartbeat) Reset() { *x = ReadChangeStreamResponse_Heartbeat{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[24] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2212,7 +2452,7 @@ func (x *ReadChangeStreamResponse_Heartbeat) String() string { func (*ReadChangeStreamResponse_Heartbeat) ProtoMessage() {} func (x *ReadChangeStreamResponse_Heartbeat) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[24] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2285,7 +2525,7 @@ type ReadChangeStreamResponse_CloseStream struct { func (x *ReadChangeStreamResponse_CloseStream) Reset() { *x = ReadChangeStreamResponse_CloseStream{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[25] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2298,7 +2538,7 @@ func (x *ReadChangeStreamResponse_CloseStream) String() string { func (*ReadChangeStreamResponse_CloseStream) ProtoMessage() {} func (x *ReadChangeStreamResponse_CloseStream) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[25] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2355,7 +2595,7 @@ type ReadChangeStreamResponse_MutationChunk_ChunkInfo struct { func (x *ReadChangeStreamResponse_MutationChunk_ChunkInfo) Reset() { *x = ReadChangeStreamResponse_MutationChunk_ChunkInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[26] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2368,7 +2608,7 @@ func (x *ReadChangeStreamResponse_MutationChunk_ChunkInfo) String() string { func (*ReadChangeStreamResponse_MutationChunk_ChunkInfo) ProtoMessage() {} func (x *ReadChangeStreamResponse_MutationChunk_ChunkInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[26] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2817,57 +3057,161 @@ var file_google_bigtable_v2_bigtable_proto_rawDesc = []byte{ 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6e, 0x65, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x32, 0xef, 0x21, 0x0a, 0x08, 0x42, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0xdb, 0x03, 0x0a, 0x08, 0x52, 0x65, 0x61, 0x64, - 0x52, 0x6f, 0x77, 0x73, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, - 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, - 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x81, 0x03, 0xda, 0x41, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, - 0x41, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, - 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x9a, 0x01, 0x22, 0x39, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, + 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, 0xd4, 0x03, 0x0a, 0x13, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x52, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, + 0x64, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x44, 0x0a, 0x0c, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x26, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x72, + 0x65, 0x73, 0x75, 0x6d, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x50, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x54, 0x0a, 0x0b, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x22, 0xa9, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x40, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x9d, 0x24, + 0x0a, 0x08, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0xdb, 0x03, 0x0a, 0x08, 0x52, + 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x81, 0x03, 0xda, 0x41, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0xda, 0x41, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, + 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x9a, 0x01, 0x3a, 0x01, 0x2a, 0x5a, 0x5a, 0x3a, 0x01, 0x2a, 0x22, 0x55, 0x2f, + 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, + 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, + 0x52, 0x6f, 0x77, 0x73, 0x22, 0x39, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0xb0, 0x01, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x69, 0x64, 0x12, 0x60, 0x0a, 0x14, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x7b, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x3a, 0x01, 0x2a, - 0x5a, 0x5a, 0x22, 0x55, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, - 0x3a, 0x72, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0xb0, 0x01, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, + 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0xee, 0x03, 0x0a, 0x0d, 0x53, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x85, 0x03, 0xda, 0x41, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, + 0x41, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, + 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x9e, 0x01, 0x5a, 0x5c, 0x12, 0x5a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, + 0x12, 0x3e, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0xb0, 0x01, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0x60, 0x0a, 0x14, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x7b, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, + 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x82, 0x04, 0x0a, 0x09, 0x4d, 0x75, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa7, 0x03, 0xda, 0x41, 0x1c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x6d, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x2b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x6d, 0x75, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9c, 0x01, 0x3a, 0x01, 0x2a, 0x5a, 0x5b, + 0x3a, 0x01, 0x2a, 0x22, 0x56, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, - 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0x12, 0x60, 0x0a, 0x14, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, - 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x7b, 0x61, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, - 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, - 0x2f, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0xee, 0x03, 0x0a, 0x0d, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, - 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x85, 0x03, 0xda, - 0x41, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x19, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9e, 0x01, 0x12, - 0x3e, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x5a, - 0x5c, 0x12, 0x5a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, + 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x22, 0x3a, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, - 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x8a, 0xd3, 0xe4, + 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0xb0, 0x01, 0x12, 0x3a, + 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, + 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0x60, 0x0a, 0x14, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, + 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xf5, + 0x03, 0x0a, 0x0a, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x25, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x95, 0x03, 0xda, + 0x41, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x65, 0x6e, 0x74, + 0x72, 0x69, 0x65, 0x73, 0xda, 0x41, 0x21, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x2c, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9e, 0x01, 0x3a, + 0x01, 0x2a, 0x5a, 0x5c, 0x3a, 0x01, 0x2a, 0x22, 0x57, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, + 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, + 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0xb0, 0x01, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, @@ -2879,248 +3223,203 @@ var file_google_bigtable_v2_bigtable_proto_rawDesc = []byte{ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, - 0x73, 0x2f, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x82, 0x04, 0x0a, 0x09, 0x4d, 0x75, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x6f, 0x77, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x73, 0x2f, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0xf6, 0x04, 0x0a, 0x11, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0xa7, 0x03, 0xda, 0x41, 0x1c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x2b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9c, 0x01, 0x22, 0x3a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, - 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x5a, 0x5b, 0x22, 0x56, 0x2f, 0x76, 0x32, 0x2f, 0x7b, - 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, + 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x83, 0x04, 0xda, 0x41, 0x42, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, + 0x79, 0x2c, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x2c, 0x74, 0x72, 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2c, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0xda, 0x41, 0x51, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, + 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x74, 0x72, 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xac, 0x01, 0x3a, 0x01, 0x2a, 0x5a, + 0x63, 0x3a, 0x01, 0x2a, 0x22, 0x5e, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x6f, 0x77, 0x22, 0x42, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, + 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0xb0, 0x01, 0x12, + 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, + 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0x60, 0x0a, + 0x14, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x12, + 0xee, 0x01, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x12, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x69, 0x6e, + 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x8d, 0x01, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x13, 0x6e, 0x61, 0x6d, + 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x3a, 0x01, 0x2a, 0x22, 0x26, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, - 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, - 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0xb0, 0x01, 0x12, 0x3a, 0x0a, 0x0a, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0x60, 0x0a, 0x14, 0x61, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x48, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x69, 0x6e, + 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x12, 0x25, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1d, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, + 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, + 0x12, 0xa7, 0x04, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, + 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb1, 0x03, 0xda, 0x41, 0x18, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x27, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x2c, + 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0xae, 0x01, 0x3a, 0x01, 0x2a, 0x5a, 0x64, 0x3a, 0x01, 0x2a, 0x22, 0x5f, 0x2f, + 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xf5, 0x03, 0x0a, 0x0a, - 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x95, 0x03, 0xda, 0x41, 0x12, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, - 0x73, 0xda, 0x41, 0x21, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x65, - 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9e, 0x01, 0x22, 0x3b, 0x2f, 0x76, - 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, - 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x3a, 0x01, 0x2a, 0x5a, 0x5c, 0x22, 0x57, - 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, - 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, - 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0xb0, - 0x01, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, - 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, - 0x60, 0x0a, 0x14, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, - 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, - 0x7d, 0x30, 0x01, 0x12, 0xf6, 0x04, 0x0a, 0x11, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, - 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x83, 0x04, 0xda, 0x41, 0x42, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x70, - 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, - 0x74, 0x72, 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, - 0x51, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, - 0x6b, 0x65, 0x79, 0x2c, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x74, 0x72, 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2c, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xac, 0x01, 0x22, 0x42, 0x2f, 0x76, 0x32, 0x2f, 0x7b, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, - 0x5a, 0x63, 0x22, 0x5e, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, - 0x3a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0xb0, 0x01, 0x12, 0x3a, 0x0a, 0x0a, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, - 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0x60, 0x0a, 0x14, 0x61, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x48, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, - 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, - 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xee, 0x01, 0x0a, - 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x12, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, - 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8d, 0x01, - 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x13, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, - 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x2b, 0x22, 0x26, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x69, 0x6e, 0x67, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x39, 0x12, 0x25, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, - 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0xa7, 0x04, - 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, - 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, - 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0xb1, 0x03, 0xda, 0x41, 0x18, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x72, 0x75, 0x6c, 0x65, - 0x73, 0xda, 0x41, 0x27, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, - 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x2c, 0x61, 0x70, 0x70, - 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0xae, 0x01, 0x22, 0x43, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x5a, 0x64, 0x22, 0x5f, 0x2f, 0x76, - 0x32, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, + 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, + 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x22, 0x43, + 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0x3a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x6f, 0x77, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0xb0, 0x01, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0x60, 0x0a, 0x14, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x48, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x4d, - 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0xb0, 0x01, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0x60, 0x0a, 0x14, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x7b, - 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, - 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xbb, 0x02, 0x0a, 0x25, 0x47, 0x65, 0x6e, 0x65, + 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xbb, 0x02, 0x0a, 0x25, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, - 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8a, 0x01, 0xda, 0x41, 0x0a, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5b, 0x22, 0x56, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, - 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x3a, 0x01, 0x2a, 0x30, 0x01, 0x12, 0xe6, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x61, 0x64, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8a, 0x01, 0xda, 0x41, 0x0a, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x19, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5b, 0x3a, 0x01, 0x2a, 0x22, 0x56, + 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x74, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x30, 0x01, 0x12, 0xe6, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x61, + 0x64, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x2b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, - 0x64, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x75, 0xda, 0x41, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x46, 0x22, 0x41, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x3a, 0x01, 0x2a, 0x30, 0x01, 0x1a, 0xdb, - 0x02, 0xca, 0x41, 0x17, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xbd, 0x02, 0x68, - 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x6f, - 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, - 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, - 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x75, 0xda, 0x41, 0x0a, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x46, 0x3a, 0x01, 0x2a, 0x22, 0x41, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, + 0x65, 0x61, 0x64, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x30, + 0x01, 0x12, 0xab, 0x02, 0x0a, 0x0c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc5, 0x01, 0xda, 0x41, 0x13, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x71, 0x75, 0x65, 0x72, 0x79, 0xda, 0x41, + 0x22, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x3a, 0x01, 0x2a, 0x22, 0x37, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x12, 0x2e, 0x0a, 0x0d, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, + 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x30, 0x01, 0x1a, + 0xdb, 0x02, 0xca, 0x41, 0x17, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xbd, 0x02, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x68, 0x74, + 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x72, 0x65, 0x61, + 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, - 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, - 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, - 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xf6, 0x03, 0x0a, - 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, - 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0x50, 0x0a, 0x25, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0xea, 0x41, 0x5c, 0x0a, 0x22, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, 0xea, 0x41, 0x87, 0x01, 0x0a, 0x2b, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x12, 0x58, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x7d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, - 0x65, 0x77, 0x73, 0x2f, 0x7b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, - 0x76, 0x69, 0x65, 0x77, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xf4, 0x03, + 0xea, 0x41, 0x50, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x7d, 0xea, 0x41, 0x5c, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x7d, 0xea, 0x41, 0x87, 0x01, 0x0a, 0x2b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, + 0x69, 0x65, 0x77, 0x12, 0x58, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x7b, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x7d, 0x0a, 0x16, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x70, 0x62, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, + 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, + 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3136,8 +3435,8 @@ func file_google_bigtable_v2_bigtable_proto_rawDescGZIP() []byte { } var file_google_bigtable_v2_bigtable_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_bigtable_v2_bigtable_proto_msgTypes = make([]protoimpl.MessageInfo, 27) -var file_google_bigtable_v2_bigtable_proto_goTypes = []interface{}{ +var file_google_bigtable_v2_bigtable_proto_msgTypes = make([]protoimpl.MessageInfo, 30) +var file_google_bigtable_v2_bigtable_proto_goTypes = []any{ (ReadRowsRequest_RequestStatsView)(0), // 0: google.bigtable.v2.ReadRowsRequest.RequestStatsView (ReadChangeStreamResponse_DataChange_Type)(0), // 1: google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type (*ReadRowsRequest)(nil), // 2: google.bigtable.v2.ReadRowsRequest @@ -3159,92 +3458,106 @@ var file_google_bigtable_v2_bigtable_proto_goTypes = []interface{}{ (*GenerateInitialChangeStreamPartitionsResponse)(nil), // 18: google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse (*ReadChangeStreamRequest)(nil), // 19: google.bigtable.v2.ReadChangeStreamRequest (*ReadChangeStreamResponse)(nil), // 20: google.bigtable.v2.ReadChangeStreamResponse - (*ReadRowsResponse_CellChunk)(nil), // 21: google.bigtable.v2.ReadRowsResponse.CellChunk - (*MutateRowsRequest_Entry)(nil), // 22: google.bigtable.v2.MutateRowsRequest.Entry - (*MutateRowsResponse_Entry)(nil), // 23: google.bigtable.v2.MutateRowsResponse.Entry - (*ReadChangeStreamResponse_MutationChunk)(nil), // 24: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk - (*ReadChangeStreamResponse_DataChange)(nil), // 25: google.bigtable.v2.ReadChangeStreamResponse.DataChange - (*ReadChangeStreamResponse_Heartbeat)(nil), // 26: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat - (*ReadChangeStreamResponse_CloseStream)(nil), // 27: google.bigtable.v2.ReadChangeStreamResponse.CloseStream - (*ReadChangeStreamResponse_MutationChunk_ChunkInfo)(nil), // 28: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo - (*RowSet)(nil), // 29: google.bigtable.v2.RowSet - (*RowFilter)(nil), // 30: google.bigtable.v2.RowFilter - (*RequestStats)(nil), // 31: google.bigtable.v2.RequestStats - (*Mutation)(nil), // 32: google.bigtable.v2.Mutation - (*durationpb.Duration)(nil), // 33: google.protobuf.Duration - (*ReadModifyWriteRule)(nil), // 34: google.bigtable.v2.ReadModifyWriteRule - (*Row)(nil), // 35: google.bigtable.v2.Row - (*StreamPartition)(nil), // 36: google.bigtable.v2.StreamPartition - (*timestamppb.Timestamp)(nil), // 37: google.protobuf.Timestamp - (*StreamContinuationTokens)(nil), // 38: google.bigtable.v2.StreamContinuationTokens - (*wrapperspb.StringValue)(nil), // 39: google.protobuf.StringValue - (*wrapperspb.BytesValue)(nil), // 40: google.protobuf.BytesValue - (*status.Status)(nil), // 41: google.rpc.Status - (*StreamContinuationToken)(nil), // 42: google.bigtable.v2.StreamContinuationToken + (*ExecuteQueryRequest)(nil), // 21: google.bigtable.v2.ExecuteQueryRequest + (*ExecuteQueryResponse)(nil), // 22: google.bigtable.v2.ExecuteQueryResponse + (*ReadRowsResponse_CellChunk)(nil), // 23: google.bigtable.v2.ReadRowsResponse.CellChunk + (*MutateRowsRequest_Entry)(nil), // 24: google.bigtable.v2.MutateRowsRequest.Entry + (*MutateRowsResponse_Entry)(nil), // 25: google.bigtable.v2.MutateRowsResponse.Entry + (*ReadChangeStreamResponse_MutationChunk)(nil), // 26: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk + (*ReadChangeStreamResponse_DataChange)(nil), // 27: google.bigtable.v2.ReadChangeStreamResponse.DataChange + (*ReadChangeStreamResponse_Heartbeat)(nil), // 28: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat + (*ReadChangeStreamResponse_CloseStream)(nil), // 29: google.bigtable.v2.ReadChangeStreamResponse.CloseStream + (*ReadChangeStreamResponse_MutationChunk_ChunkInfo)(nil), // 30: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + nil, // 31: google.bigtable.v2.ExecuteQueryRequest.ParamsEntry + (*RowSet)(nil), // 32: google.bigtable.v2.RowSet + (*RowFilter)(nil), // 33: google.bigtable.v2.RowFilter + (*RequestStats)(nil), // 34: google.bigtable.v2.RequestStats + (*Mutation)(nil), // 35: google.bigtable.v2.Mutation + (*durationpb.Duration)(nil), // 36: google.protobuf.Duration + (*ReadModifyWriteRule)(nil), // 37: google.bigtable.v2.ReadModifyWriteRule + (*Row)(nil), // 38: google.bigtable.v2.Row + (*StreamPartition)(nil), // 39: google.bigtable.v2.StreamPartition + (*timestamppb.Timestamp)(nil), // 40: google.protobuf.Timestamp + (*StreamContinuationTokens)(nil), // 41: google.bigtable.v2.StreamContinuationTokens + (*ProtoFormat)(nil), // 42: google.bigtable.v2.ProtoFormat + (*ResultSetMetadata)(nil), // 43: google.bigtable.v2.ResultSetMetadata + (*PartialResultSet)(nil), // 44: google.bigtable.v2.PartialResultSet + (*wrapperspb.StringValue)(nil), // 45: google.protobuf.StringValue + (*wrapperspb.BytesValue)(nil), // 46: google.protobuf.BytesValue + (*status.Status)(nil), // 47: google.rpc.Status + (*StreamContinuationToken)(nil), // 48: google.bigtable.v2.StreamContinuationToken + (*Value)(nil), // 49: google.bigtable.v2.Value } var file_google_bigtable_v2_bigtable_proto_depIdxs = []int32{ - 29, // 0: google.bigtable.v2.ReadRowsRequest.rows:type_name -> google.bigtable.v2.RowSet - 30, // 1: google.bigtable.v2.ReadRowsRequest.filter:type_name -> google.bigtable.v2.RowFilter + 32, // 0: google.bigtable.v2.ReadRowsRequest.rows:type_name -> google.bigtable.v2.RowSet + 33, // 1: google.bigtable.v2.ReadRowsRequest.filter:type_name -> google.bigtable.v2.RowFilter 0, // 2: google.bigtable.v2.ReadRowsRequest.request_stats_view:type_name -> google.bigtable.v2.ReadRowsRequest.RequestStatsView - 21, // 3: google.bigtable.v2.ReadRowsResponse.chunks:type_name -> google.bigtable.v2.ReadRowsResponse.CellChunk - 31, // 4: google.bigtable.v2.ReadRowsResponse.request_stats:type_name -> google.bigtable.v2.RequestStats - 32, // 5: google.bigtable.v2.MutateRowRequest.mutations:type_name -> google.bigtable.v2.Mutation - 22, // 6: google.bigtable.v2.MutateRowsRequest.entries:type_name -> google.bigtable.v2.MutateRowsRequest.Entry - 23, // 7: google.bigtable.v2.MutateRowsResponse.entries:type_name -> google.bigtable.v2.MutateRowsResponse.Entry + 23, // 3: google.bigtable.v2.ReadRowsResponse.chunks:type_name -> google.bigtable.v2.ReadRowsResponse.CellChunk + 34, // 4: google.bigtable.v2.ReadRowsResponse.request_stats:type_name -> google.bigtable.v2.RequestStats + 35, // 5: google.bigtable.v2.MutateRowRequest.mutations:type_name -> google.bigtable.v2.Mutation + 24, // 6: google.bigtable.v2.MutateRowsRequest.entries:type_name -> google.bigtable.v2.MutateRowsRequest.Entry + 25, // 7: google.bigtable.v2.MutateRowsResponse.entries:type_name -> google.bigtable.v2.MutateRowsResponse.Entry 10, // 8: google.bigtable.v2.MutateRowsResponse.rate_limit_info:type_name -> google.bigtable.v2.RateLimitInfo - 33, // 9: google.bigtable.v2.RateLimitInfo.period:type_name -> google.protobuf.Duration - 30, // 10: google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter:type_name -> google.bigtable.v2.RowFilter - 32, // 11: google.bigtable.v2.CheckAndMutateRowRequest.true_mutations:type_name -> google.bigtable.v2.Mutation - 32, // 12: google.bigtable.v2.CheckAndMutateRowRequest.false_mutations:type_name -> google.bigtable.v2.Mutation - 34, // 13: google.bigtable.v2.ReadModifyWriteRowRequest.rules:type_name -> google.bigtable.v2.ReadModifyWriteRule - 35, // 14: google.bigtable.v2.ReadModifyWriteRowResponse.row:type_name -> google.bigtable.v2.Row - 36, // 15: google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse.partition:type_name -> google.bigtable.v2.StreamPartition - 36, // 16: google.bigtable.v2.ReadChangeStreamRequest.partition:type_name -> google.bigtable.v2.StreamPartition - 37, // 17: google.bigtable.v2.ReadChangeStreamRequest.start_time:type_name -> google.protobuf.Timestamp - 38, // 18: google.bigtable.v2.ReadChangeStreamRequest.continuation_tokens:type_name -> google.bigtable.v2.StreamContinuationTokens - 37, // 19: google.bigtable.v2.ReadChangeStreamRequest.end_time:type_name -> google.protobuf.Timestamp - 33, // 20: google.bigtable.v2.ReadChangeStreamRequest.heartbeat_duration:type_name -> google.protobuf.Duration - 25, // 21: google.bigtable.v2.ReadChangeStreamResponse.data_change:type_name -> google.bigtable.v2.ReadChangeStreamResponse.DataChange - 26, // 22: google.bigtable.v2.ReadChangeStreamResponse.heartbeat:type_name -> google.bigtable.v2.ReadChangeStreamResponse.Heartbeat - 27, // 23: google.bigtable.v2.ReadChangeStreamResponse.close_stream:type_name -> google.bigtable.v2.ReadChangeStreamResponse.CloseStream - 39, // 24: google.bigtable.v2.ReadRowsResponse.CellChunk.family_name:type_name -> google.protobuf.StringValue - 40, // 25: google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier:type_name -> google.protobuf.BytesValue - 32, // 26: google.bigtable.v2.MutateRowsRequest.Entry.mutations:type_name -> google.bigtable.v2.Mutation - 41, // 27: google.bigtable.v2.MutateRowsResponse.Entry.status:type_name -> google.rpc.Status - 28, // 28: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.chunk_info:type_name -> google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo - 32, // 29: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.mutation:type_name -> google.bigtable.v2.Mutation - 1, // 30: google.bigtable.v2.ReadChangeStreamResponse.DataChange.type:type_name -> google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type - 37, // 31: google.bigtable.v2.ReadChangeStreamResponse.DataChange.commit_timestamp:type_name -> google.protobuf.Timestamp - 24, // 32: google.bigtable.v2.ReadChangeStreamResponse.DataChange.chunks:type_name -> google.bigtable.v2.ReadChangeStreamResponse.MutationChunk - 37, // 33: google.bigtable.v2.ReadChangeStreamResponse.DataChange.estimated_low_watermark:type_name -> google.protobuf.Timestamp - 42, // 34: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.continuation_token:type_name -> google.bigtable.v2.StreamContinuationToken - 37, // 35: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.estimated_low_watermark:type_name -> google.protobuf.Timestamp - 41, // 36: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.status:type_name -> google.rpc.Status - 42, // 37: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.continuation_tokens:type_name -> google.bigtable.v2.StreamContinuationToken - 36, // 38: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.new_partitions:type_name -> google.bigtable.v2.StreamPartition - 2, // 39: google.bigtable.v2.Bigtable.ReadRows:input_type -> google.bigtable.v2.ReadRowsRequest - 4, // 40: google.bigtable.v2.Bigtable.SampleRowKeys:input_type -> google.bigtable.v2.SampleRowKeysRequest - 6, // 41: google.bigtable.v2.Bigtable.MutateRow:input_type -> google.bigtable.v2.MutateRowRequest - 8, // 42: google.bigtable.v2.Bigtable.MutateRows:input_type -> google.bigtable.v2.MutateRowsRequest - 11, // 43: google.bigtable.v2.Bigtable.CheckAndMutateRow:input_type -> google.bigtable.v2.CheckAndMutateRowRequest - 13, // 44: google.bigtable.v2.Bigtable.PingAndWarm:input_type -> google.bigtable.v2.PingAndWarmRequest - 15, // 45: google.bigtable.v2.Bigtable.ReadModifyWriteRow:input_type -> google.bigtable.v2.ReadModifyWriteRowRequest - 17, // 46: google.bigtable.v2.Bigtable.GenerateInitialChangeStreamPartitions:input_type -> google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest - 19, // 47: google.bigtable.v2.Bigtable.ReadChangeStream:input_type -> google.bigtable.v2.ReadChangeStreamRequest - 3, // 48: google.bigtable.v2.Bigtable.ReadRows:output_type -> google.bigtable.v2.ReadRowsResponse - 5, // 49: google.bigtable.v2.Bigtable.SampleRowKeys:output_type -> google.bigtable.v2.SampleRowKeysResponse - 7, // 50: google.bigtable.v2.Bigtable.MutateRow:output_type -> google.bigtable.v2.MutateRowResponse - 9, // 51: google.bigtable.v2.Bigtable.MutateRows:output_type -> google.bigtable.v2.MutateRowsResponse - 12, // 52: google.bigtable.v2.Bigtable.CheckAndMutateRow:output_type -> google.bigtable.v2.CheckAndMutateRowResponse - 14, // 53: google.bigtable.v2.Bigtable.PingAndWarm:output_type -> google.bigtable.v2.PingAndWarmResponse - 16, // 54: google.bigtable.v2.Bigtable.ReadModifyWriteRow:output_type -> google.bigtable.v2.ReadModifyWriteRowResponse - 18, // 55: google.bigtable.v2.Bigtable.GenerateInitialChangeStreamPartitions:output_type -> google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse - 20, // 56: google.bigtable.v2.Bigtable.ReadChangeStream:output_type -> google.bigtable.v2.ReadChangeStreamResponse - 48, // [48:57] is the sub-list for method output_type - 39, // [39:48] is the sub-list for method input_type - 39, // [39:39] is the sub-list for extension type_name - 39, // [39:39] is the sub-list for extension extendee - 0, // [0:39] is the sub-list for field type_name + 36, // 9: google.bigtable.v2.RateLimitInfo.period:type_name -> google.protobuf.Duration + 33, // 10: google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter:type_name -> google.bigtable.v2.RowFilter + 35, // 11: google.bigtable.v2.CheckAndMutateRowRequest.true_mutations:type_name -> google.bigtable.v2.Mutation + 35, // 12: google.bigtable.v2.CheckAndMutateRowRequest.false_mutations:type_name -> google.bigtable.v2.Mutation + 37, // 13: google.bigtable.v2.ReadModifyWriteRowRequest.rules:type_name -> google.bigtable.v2.ReadModifyWriteRule + 38, // 14: google.bigtable.v2.ReadModifyWriteRowResponse.row:type_name -> google.bigtable.v2.Row + 39, // 15: google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse.partition:type_name -> google.bigtable.v2.StreamPartition + 39, // 16: google.bigtable.v2.ReadChangeStreamRequest.partition:type_name -> google.bigtable.v2.StreamPartition + 40, // 17: google.bigtable.v2.ReadChangeStreamRequest.start_time:type_name -> google.protobuf.Timestamp + 41, // 18: google.bigtable.v2.ReadChangeStreamRequest.continuation_tokens:type_name -> google.bigtable.v2.StreamContinuationTokens + 40, // 19: google.bigtable.v2.ReadChangeStreamRequest.end_time:type_name -> google.protobuf.Timestamp + 36, // 20: google.bigtable.v2.ReadChangeStreamRequest.heartbeat_duration:type_name -> google.protobuf.Duration + 27, // 21: google.bigtable.v2.ReadChangeStreamResponse.data_change:type_name -> google.bigtable.v2.ReadChangeStreamResponse.DataChange + 28, // 22: google.bigtable.v2.ReadChangeStreamResponse.heartbeat:type_name -> google.bigtable.v2.ReadChangeStreamResponse.Heartbeat + 29, // 23: google.bigtable.v2.ReadChangeStreamResponse.close_stream:type_name -> google.bigtable.v2.ReadChangeStreamResponse.CloseStream + 42, // 24: google.bigtable.v2.ExecuteQueryRequest.proto_format:type_name -> google.bigtable.v2.ProtoFormat + 31, // 25: google.bigtable.v2.ExecuteQueryRequest.params:type_name -> google.bigtable.v2.ExecuteQueryRequest.ParamsEntry + 43, // 26: google.bigtable.v2.ExecuteQueryResponse.metadata:type_name -> google.bigtable.v2.ResultSetMetadata + 44, // 27: google.bigtable.v2.ExecuteQueryResponse.results:type_name -> google.bigtable.v2.PartialResultSet + 45, // 28: google.bigtable.v2.ReadRowsResponse.CellChunk.family_name:type_name -> google.protobuf.StringValue + 46, // 29: google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier:type_name -> google.protobuf.BytesValue + 35, // 30: google.bigtable.v2.MutateRowsRequest.Entry.mutations:type_name -> google.bigtable.v2.Mutation + 47, // 31: google.bigtable.v2.MutateRowsResponse.Entry.status:type_name -> google.rpc.Status + 30, // 32: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.chunk_info:type_name -> google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + 35, // 33: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.mutation:type_name -> google.bigtable.v2.Mutation + 1, // 34: google.bigtable.v2.ReadChangeStreamResponse.DataChange.type:type_name -> google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type + 40, // 35: google.bigtable.v2.ReadChangeStreamResponse.DataChange.commit_timestamp:type_name -> google.protobuf.Timestamp + 26, // 36: google.bigtable.v2.ReadChangeStreamResponse.DataChange.chunks:type_name -> google.bigtable.v2.ReadChangeStreamResponse.MutationChunk + 40, // 37: google.bigtable.v2.ReadChangeStreamResponse.DataChange.estimated_low_watermark:type_name -> google.protobuf.Timestamp + 48, // 38: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.continuation_token:type_name -> google.bigtable.v2.StreamContinuationToken + 40, // 39: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.estimated_low_watermark:type_name -> google.protobuf.Timestamp + 47, // 40: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.status:type_name -> google.rpc.Status + 48, // 41: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.continuation_tokens:type_name -> google.bigtable.v2.StreamContinuationToken + 39, // 42: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.new_partitions:type_name -> google.bigtable.v2.StreamPartition + 49, // 43: google.bigtable.v2.ExecuteQueryRequest.ParamsEntry.value:type_name -> google.bigtable.v2.Value + 2, // 44: google.bigtable.v2.Bigtable.ReadRows:input_type -> google.bigtable.v2.ReadRowsRequest + 4, // 45: google.bigtable.v2.Bigtable.SampleRowKeys:input_type -> google.bigtable.v2.SampleRowKeysRequest + 6, // 46: google.bigtable.v2.Bigtable.MutateRow:input_type -> google.bigtable.v2.MutateRowRequest + 8, // 47: google.bigtable.v2.Bigtable.MutateRows:input_type -> google.bigtable.v2.MutateRowsRequest + 11, // 48: google.bigtable.v2.Bigtable.CheckAndMutateRow:input_type -> google.bigtable.v2.CheckAndMutateRowRequest + 13, // 49: google.bigtable.v2.Bigtable.PingAndWarm:input_type -> google.bigtable.v2.PingAndWarmRequest + 15, // 50: google.bigtable.v2.Bigtable.ReadModifyWriteRow:input_type -> google.bigtable.v2.ReadModifyWriteRowRequest + 17, // 51: google.bigtable.v2.Bigtable.GenerateInitialChangeStreamPartitions:input_type -> google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest + 19, // 52: google.bigtable.v2.Bigtable.ReadChangeStream:input_type -> google.bigtable.v2.ReadChangeStreamRequest + 21, // 53: google.bigtable.v2.Bigtable.ExecuteQuery:input_type -> google.bigtable.v2.ExecuteQueryRequest + 3, // 54: google.bigtable.v2.Bigtable.ReadRows:output_type -> google.bigtable.v2.ReadRowsResponse + 5, // 55: google.bigtable.v2.Bigtable.SampleRowKeys:output_type -> google.bigtable.v2.SampleRowKeysResponse + 7, // 56: google.bigtable.v2.Bigtable.MutateRow:output_type -> google.bigtable.v2.MutateRowResponse + 9, // 57: google.bigtable.v2.Bigtable.MutateRows:output_type -> google.bigtable.v2.MutateRowsResponse + 12, // 58: google.bigtable.v2.Bigtable.CheckAndMutateRow:output_type -> google.bigtable.v2.CheckAndMutateRowResponse + 14, // 59: google.bigtable.v2.Bigtable.PingAndWarm:output_type -> google.bigtable.v2.PingAndWarmResponse + 16, // 60: google.bigtable.v2.Bigtable.ReadModifyWriteRow:output_type -> google.bigtable.v2.ReadModifyWriteRowResponse + 18, // 61: google.bigtable.v2.Bigtable.GenerateInitialChangeStreamPartitions:output_type -> google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse + 20, // 62: google.bigtable.v2.Bigtable.ReadChangeStream:output_type -> google.bigtable.v2.ReadChangeStreamResponse + 22, // 63: google.bigtable.v2.Bigtable.ExecuteQuery:output_type -> google.bigtable.v2.ExecuteQueryResponse + 54, // [54:64] is the sub-list for method output_type + 44, // [44:54] is the sub-list for method input_type + 44, // [44:44] is the sub-list for extension type_name + 44, // [44:44] is the sub-list for extension extendee + 0, // [0:44] is the sub-list for field type_name } func init() { file_google_bigtable_v2_bigtable_proto_init() } @@ -3255,7 +3568,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { file_google_bigtable_v2_data_proto_init() file_google_bigtable_v2_request_stats_proto_init() if !protoimpl.UnsafeEnabled { - file_google_bigtable_v2_bigtable_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ReadRowsRequest); i { case 0: return &v.state @@ -3267,7 +3580,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ReadRowsResponse); i { case 0: return &v.state @@ -3279,7 +3592,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*SampleRowKeysRequest); i { case 0: return &v.state @@ -3291,7 +3604,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*SampleRowKeysResponse); i { case 0: return &v.state @@ -3303,7 +3616,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*MutateRowRequest); i { case 0: return &v.state @@ -3315,7 +3628,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*MutateRowResponse); i { case 0: return &v.state @@ -3327,7 +3640,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MutateRowsRequest); i { case 0: return &v.state @@ -3339,7 +3652,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*MutateRowsResponse); i { case 0: return &v.state @@ -3351,7 +3664,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*RateLimitInfo); i { case 0: return &v.state @@ -3363,7 +3676,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*CheckAndMutateRowRequest); i { case 0: return &v.state @@ -3375,7 +3688,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*CheckAndMutateRowResponse); i { case 0: return &v.state @@ -3387,7 +3700,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*PingAndWarmRequest); i { case 0: return &v.state @@ -3399,7 +3712,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*PingAndWarmResponse); i { case 0: return &v.state @@ -3411,7 +3724,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*ReadModifyWriteRowRequest); i { case 0: return &v.state @@ -3423,7 +3736,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*ReadModifyWriteRowResponse); i { case 0: return &v.state @@ -3435,7 +3748,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*GenerateInitialChangeStreamPartitionsRequest); i { case 0: return &v.state @@ -3447,7 +3760,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*GenerateInitialChangeStreamPartitionsResponse); i { case 0: return &v.state @@ -3459,7 +3772,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*ReadChangeStreamRequest); i { case 0: return &v.state @@ -3471,7 +3784,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*ReadChangeStreamResponse); i { case 0: return &v.state @@ -3483,7 +3796,31 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*ExecuteQueryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_bigtable_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*ExecuteQueryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_bigtable_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*ReadRowsResponse_CellChunk); i { case 0: return &v.state @@ -3495,7 +3832,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*MutateRowsRequest_Entry); i { case 0: return &v.state @@ -3507,7 +3844,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*MutateRowsResponse_Entry); i { case 0: return &v.state @@ -3519,7 +3856,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*ReadChangeStreamResponse_MutationChunk); i { case 0: return &v.state @@ -3531,7 +3868,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ReadChangeStreamResponse_DataChange); i { case 0: return &v.state @@ -3543,7 +3880,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*ReadChangeStreamResponse_Heartbeat); i { case 0: return &v.state @@ -3555,7 +3892,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*ReadChangeStreamResponse_CloseStream); i { case 0: return &v.state @@ -3567,7 +3904,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return nil } } - file_google_bigtable_v2_bigtable_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_bigtable_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*ReadChangeStreamResponse_MutationChunk_ChunkInfo); i { case 0: return &v.state @@ -3580,17 +3917,24 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } } - file_google_bigtable_v2_bigtable_proto_msgTypes[7].OneofWrappers = []interface{}{} - file_google_bigtable_v2_bigtable_proto_msgTypes[17].OneofWrappers = []interface{}{ + file_google_bigtable_v2_bigtable_proto_msgTypes[7].OneofWrappers = []any{} + file_google_bigtable_v2_bigtable_proto_msgTypes[17].OneofWrappers = []any{ (*ReadChangeStreamRequest_StartTime)(nil), (*ReadChangeStreamRequest_ContinuationTokens)(nil), } - file_google_bigtable_v2_bigtable_proto_msgTypes[18].OneofWrappers = []interface{}{ + file_google_bigtable_v2_bigtable_proto_msgTypes[18].OneofWrappers = []any{ (*ReadChangeStreamResponse_DataChange_)(nil), (*ReadChangeStreamResponse_Heartbeat_)(nil), (*ReadChangeStreamResponse_CloseStream_)(nil), } - file_google_bigtable_v2_bigtable_proto_msgTypes[19].OneofWrappers = []interface{}{ + file_google_bigtable_v2_bigtable_proto_msgTypes[19].OneofWrappers = []any{ + (*ExecuteQueryRequest_ProtoFormat)(nil), + } + file_google_bigtable_v2_bigtable_proto_msgTypes[20].OneofWrappers = []any{ + (*ExecuteQueryResponse_Metadata)(nil), + (*ExecuteQueryResponse_Results)(nil), + } + file_google_bigtable_v2_bigtable_proto_msgTypes[21].OneofWrappers = []any{ (*ReadRowsResponse_CellChunk_ResetRow)(nil), (*ReadRowsResponse_CellChunk_CommitRow)(nil), } @@ -3600,7 +3944,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_v2_bigtable_proto_rawDesc, NumEnums: 2, - NumMessages: 27, + NumMessages: 30, NumExtensions: 0, NumServices: 1, }, @@ -3666,6 +4010,8 @@ type BigtableClient interface { // reflect both user-initiated mutations and mutations that are caused by // garbage collection. ReadChangeStream(ctx context.Context, in *ReadChangeStreamRequest, opts ...grpc.CallOption) (Bigtable_ReadChangeStreamClient, error) + // Executes a BTQL query against a particular Cloud Bigtable instance. + ExecuteQuery(ctx context.Context, in *ExecuteQueryRequest, opts ...grpc.CallOption) (Bigtable_ExecuteQueryClient, error) } type bigtableClient struct { @@ -3872,6 +4218,38 @@ func (x *bigtableReadChangeStreamClient) Recv() (*ReadChangeStreamResponse, erro return m, nil } +func (c *bigtableClient) ExecuteQuery(ctx context.Context, in *ExecuteQueryRequest, opts ...grpc.CallOption) (Bigtable_ExecuteQueryClient, error) { + stream, err := c.cc.NewStream(ctx, &_Bigtable_serviceDesc.Streams[5], "/google.bigtable.v2.Bigtable/ExecuteQuery", opts...) + if err != nil { + return nil, err + } + x := &bigtableExecuteQueryClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Bigtable_ExecuteQueryClient interface { + Recv() (*ExecuteQueryResponse, error) + grpc.ClientStream +} + +type bigtableExecuteQueryClient struct { + grpc.ClientStream +} + +func (x *bigtableExecuteQueryClient) Recv() (*ExecuteQueryResponse, error) { + m := new(ExecuteQueryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // BigtableServer is the server API for Bigtable service. type BigtableServer interface { // Streams back the contents of all requested rows in key order, optionally @@ -3913,6 +4291,8 @@ type BigtableServer interface { // reflect both user-initiated mutations and mutations that are caused by // garbage collection. ReadChangeStream(*ReadChangeStreamRequest, Bigtable_ReadChangeStreamServer) error + // Executes a BTQL query against a particular Cloud Bigtable instance. + ExecuteQuery(*ExecuteQueryRequest, Bigtable_ExecuteQueryServer) error } // UnimplementedBigtableServer can be embedded to have forward compatible implementations. @@ -3946,6 +4326,9 @@ func (*UnimplementedBigtableServer) GenerateInitialChangeStreamPartitions(*Gener func (*UnimplementedBigtableServer) ReadChangeStream(*ReadChangeStreamRequest, Bigtable_ReadChangeStreamServer) error { return status1.Errorf(codes.Unimplemented, "method ReadChangeStream not implemented") } +func (*UnimplementedBigtableServer) ExecuteQuery(*ExecuteQueryRequest, Bigtable_ExecuteQueryServer) error { + return status1.Errorf(codes.Unimplemented, "method ExecuteQuery not implemented") +} func RegisterBigtableServer(s *grpc.Server, srv BigtableServer) { s.RegisterService(&_Bigtable_serviceDesc, srv) @@ -4128,6 +4511,27 @@ func (x *bigtableReadChangeStreamServer) Send(m *ReadChangeStreamResponse) error return x.ServerStream.SendMsg(m) } +func _Bigtable_ExecuteQuery_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ExecuteQueryRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BigtableServer).ExecuteQuery(m, &bigtableExecuteQueryServer{stream}) +} + +type Bigtable_ExecuteQueryServer interface { + Send(*ExecuteQueryResponse) error + grpc.ServerStream +} + +type bigtableExecuteQueryServer struct { + grpc.ServerStream +} + +func (x *bigtableExecuteQueryServer) Send(m *ExecuteQueryResponse) error { + return x.ServerStream.SendMsg(m) +} + var _Bigtable_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.bigtable.v2.Bigtable", HandlerType: (*BigtableServer)(nil), @@ -4175,6 +4579,11 @@ var _Bigtable_serviceDesc = grpc.ServiceDesc{ Handler: _Bigtable_ReadChangeStream_Handler, ServerStreams: true, }, + { + StreamName: "ExecuteQuery", + Handler: _Bigtable_ExecuteQuery_Handler, + ServerStreams: true, + }, }, Metadata: "google/bigtable/v2/bigtable.proto", } diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/data.pb.go similarity index 57% rename from terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go rename to terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/data.pb.go index 1f2ea8d0aa2..0021c1a61e2 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/data.pb.go @@ -14,19 +14,21 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v4.24.4 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/bigtable/v2/data.proto -package bigtable +package bigtablepb import ( reflect "reflect" sync "sync" _ "google.golang.org/genproto/googleapis/api/annotations" + date "google.golang.org/genproto/googleapis/type/date" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -308,6 +310,20 @@ type Value struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The verified `Type` of this `Value`, if it cannot be inferred. + // + // Read results will never specify the encoding for `type` since the value + // will already have been decoded by the server. Furthermore, the `type` will + // be omitted entirely if it can be inferred from a previous response. The + // exact semantics for inferring `type` will vary, and are therefore + // documented separately for each read method. + // + // When using composite types (Struct, Array, Map) only the outermost `Value` + // will specify the `type`. This top-level `type` will define the types for + // any nested `Struct' fields, `Array` elements, or `Map` key/value pairs. + // If a nested `Value` provides a `type` on write, the request will be + // rejected with INVALID_ARGUMENT. + Type *Type `protobuf:"bytes,7,opt,name=type,proto3" json:"type,omitempty"` // Options for transporting values within the protobuf type system. A given // `kind` may support more than one `type` and vice versa. On write, this is // roughly analogous to a GoogleSQL literal. @@ -319,7 +335,14 @@ type Value struct { // // *Value_RawValue // *Value_RawTimestampMicros + // *Value_BytesValue + // *Value_StringValue // *Value_IntValue + // *Value_BoolValue + // *Value_FloatValue + // *Value_TimestampValue + // *Value_DateValue + // *Value_ArrayValue Kind isValue_Kind `protobuf_oneof:"kind"` } @@ -355,6 +378,13 @@ func (*Value) Descriptor() ([]byte, []int) { return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{4} } +func (x *Value) GetType() *Type { + if x != nil { + return x.Type + } + return nil +} + func (m *Value) GetKind() isValue_Kind { if m != nil { return m.Kind @@ -376,6 +406,20 @@ func (x *Value) GetRawTimestampMicros() int64 { return 0 } +func (x *Value) GetBytesValue() []byte { + if x, ok := x.GetKind().(*Value_BytesValue); ok { + return x.BytesValue + } + return nil +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + func (x *Value) GetIntValue() int64 { if x, ok := x.GetKind().(*Value_IntValue); ok { return x.IntValue @@ -383,6 +427,41 @@ func (x *Value) GetIntValue() int64 { return 0 } +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetFloatValue() float64 { + if x, ok := x.GetKind().(*Value_FloatValue); ok { + return x.FloatValue + } + return 0 +} + +func (x *Value) GetTimestampValue() *timestamppb.Timestamp { + if x, ok := x.GetKind().(*Value_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +func (x *Value) GetDateValue() *date.Date { + if x, ok := x.GetKind().(*Value_DateValue); ok { + return x.DateValue + } + return nil +} + +func (x *Value) GetArrayValue() *ArrayValue { + if x, ok := x.GetKind().(*Value_ArrayValue); ok { + return x.ArrayValue + } + return nil +} + type isValue_Kind interface { isValue_Kind() } @@ -399,18 +478,119 @@ type Value_RawTimestampMicros struct { RawTimestampMicros int64 `protobuf:"varint,9,opt,name=raw_timestamp_micros,json=rawTimestampMicros,proto3,oneof"` } +type Value_BytesValue struct { + // Represents a typed value transported as a byte sequence. + BytesValue []byte `protobuf:"bytes,2,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Value_StringValue struct { + // Represents a typed value transported as a string. + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + type Value_IntValue struct { // Represents a typed value transported as an integer. - // Default type for writes: `Int64` IntValue int64 `protobuf:"varint,6,opt,name=int_value,json=intValue,proto3,oneof"` } +type Value_BoolValue struct { + // Represents a typed value transported as a boolean. + BoolValue bool `protobuf:"varint,10,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_FloatValue struct { + // Represents a typed value transported as a floating point number. + FloatValue float64 `protobuf:"fixed64,11,opt,name=float_value,json=floatValue,proto3,oneof"` +} + +type Value_TimestampValue struct { + // Represents a typed value transported as a timestamp. + TimestampValue *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` +} + +type Value_DateValue struct { + // Represents a typed value transported as a date. + DateValue *date.Date `protobuf:"bytes,13,opt,name=date_value,json=dateValue,proto3,oneof"` +} + +type Value_ArrayValue struct { + // Represents a typed value transported as a sequence of values. + // To differentiate between `Struct`, `Array`, and `Map`, the outermost + // `Value` must provide an explicit `type` on write. This `type` will + // apply recursively to the nested `Struct` fields, `Array` elements, + // or `Map` key/value pairs, which *must not* supply their own `type`. + ArrayValue *ArrayValue `protobuf:"bytes,4,opt,name=array_value,json=arrayValue,proto3,oneof"` +} + func (*Value_RawValue) isValue_Kind() {} func (*Value_RawTimestampMicros) isValue_Kind() {} +func (*Value_BytesValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + func (*Value_IntValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_FloatValue) isValue_Kind() {} + +func (*Value_TimestampValue) isValue_Kind() {} + +func (*Value_DateValue) isValue_Kind() {} + +func (*Value_ArrayValue) isValue_Kind() {} + +// `ArrayValue` is an ordered list of `Value`. +type ArrayValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ordered elements in the array. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ArrayValue) Reset() { + *x = ArrayValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_data_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ArrayValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ArrayValue) ProtoMessage() {} + +func (x *ArrayValue) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_data_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ArrayValue.ProtoReflect.Descriptor instead. +func (*ArrayValue) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{5} +} + +func (x *ArrayValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + // Specifies a contiguous range of rows. type RowRange struct { state protoimpl.MessageState @@ -438,7 +618,7 @@ type RowRange struct { func (x *RowRange) Reset() { *x = RowRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[5] + mi := &file_google_bigtable_v2_data_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -451,7 +631,7 @@ func (x *RowRange) String() string { func (*RowRange) ProtoMessage() {} func (x *RowRange) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[5] + mi := &file_google_bigtable_v2_data_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -464,7 +644,7 @@ func (x *RowRange) ProtoReflect() protoreflect.Message { // Deprecated: Use RowRange.ProtoReflect.Descriptor instead. func (*RowRange) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{5} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{6} } func (m *RowRange) GetStartKey() isRowRange_StartKey { @@ -560,7 +740,7 @@ type RowSet struct { func (x *RowSet) Reset() { *x = RowSet{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[6] + mi := &file_google_bigtable_v2_data_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -573,7 +753,7 @@ func (x *RowSet) String() string { func (*RowSet) ProtoMessage() {} func (x *RowSet) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[6] + mi := &file_google_bigtable_v2_data_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -586,7 +766,7 @@ func (x *RowSet) ProtoReflect() protoreflect.Message { // Deprecated: Use RowSet.ProtoReflect.Descriptor instead. func (*RowSet) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{6} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{7} } func (x *RowSet) GetRowKeys() [][]byte { @@ -635,7 +815,7 @@ type ColumnRange struct { func (x *ColumnRange) Reset() { *x = ColumnRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[7] + mi := &file_google_bigtable_v2_data_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -648,7 +828,7 @@ func (x *ColumnRange) String() string { func (*ColumnRange) ProtoMessage() {} func (x *ColumnRange) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[7] + mi := &file_google_bigtable_v2_data_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -661,7 +841,7 @@ func (x *ColumnRange) ProtoReflect() protoreflect.Message { // Deprecated: Use ColumnRange.ProtoReflect.Descriptor instead. func (*ColumnRange) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{7} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{8} } func (x *ColumnRange) GetFamilyName() string { @@ -764,7 +944,7 @@ type TimestampRange struct { func (x *TimestampRange) Reset() { *x = TimestampRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[8] + mi := &file_google_bigtable_v2_data_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -777,7 +957,7 @@ func (x *TimestampRange) String() string { func (*TimestampRange) ProtoMessage() {} func (x *TimestampRange) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[8] + mi := &file_google_bigtable_v2_data_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -790,7 +970,7 @@ func (x *TimestampRange) ProtoReflect() protoreflect.Message { // Deprecated: Use TimestampRange.ProtoReflect.Descriptor instead. func (*TimestampRange) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{8} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{9} } func (x *TimestampRange) GetStartTimestampMicros() int64 { @@ -834,7 +1014,7 @@ type ValueRange struct { func (x *ValueRange) Reset() { *x = ValueRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[9] + mi := &file_google_bigtable_v2_data_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -847,7 +1027,7 @@ func (x *ValueRange) String() string { func (*ValueRange) ProtoMessage() {} func (x *ValueRange) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[9] + mi := &file_google_bigtable_v2_data_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -860,7 +1040,7 @@ func (x *ValueRange) ProtoReflect() protoreflect.Message { // Deprecated: Use ValueRange.ProtoReflect.Descriptor instead. func (*ValueRange) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{9} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{10} } func (m *ValueRange) GetStartValue() isValueRange_StartValue { @@ -1009,7 +1189,7 @@ type RowFilter struct { func (x *RowFilter) Reset() { *x = RowFilter{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[10] + mi := &file_google_bigtable_v2_data_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1022,7 +1202,7 @@ func (x *RowFilter) String() string { func (*RowFilter) ProtoMessage() {} func (x *RowFilter) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[10] + mi := &file_google_bigtable_v2_data_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1035,7 +1215,7 @@ func (x *RowFilter) ProtoReflect() protoreflect.Message { // Deprecated: Use RowFilter.ProtoReflect.Descriptor instead. func (*RowFilter) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{10} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{11} } func (m *RowFilter) GetFilter() isRowFilter_Filter { @@ -1432,6 +1612,7 @@ type Mutation struct { // // *Mutation_SetCell_ // *Mutation_AddToCell_ + // *Mutation_MergeToCell_ // *Mutation_DeleteFromColumn_ // *Mutation_DeleteFromFamily_ // *Mutation_DeleteFromRow_ @@ -1441,7 +1622,7 @@ type Mutation struct { func (x *Mutation) Reset() { *x = Mutation{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[11] + mi := &file_google_bigtable_v2_data_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1454,7 +1635,7 @@ func (x *Mutation) String() string { func (*Mutation) ProtoMessage() {} func (x *Mutation) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[11] + mi := &file_google_bigtable_v2_data_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1467,7 +1648,7 @@ func (x *Mutation) ProtoReflect() protoreflect.Message { // Deprecated: Use Mutation.ProtoReflect.Descriptor instead. func (*Mutation) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{11} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{12} } func (m *Mutation) GetMutation() isMutation_Mutation { @@ -1491,6 +1672,13 @@ func (x *Mutation) GetAddToCell() *Mutation_AddToCell { return nil } +func (x *Mutation) GetMergeToCell() *Mutation_MergeToCell { + if x, ok := x.GetMutation().(*Mutation_MergeToCell_); ok { + return x.MergeToCell + } + return nil +} + func (x *Mutation) GetDeleteFromColumn() *Mutation_DeleteFromColumn { if x, ok := x.GetMutation().(*Mutation_DeleteFromColumn_); ok { return x.DeleteFromColumn @@ -1526,6 +1714,11 @@ type Mutation_AddToCell_ struct { AddToCell *Mutation_AddToCell `protobuf:"bytes,5,opt,name=add_to_cell,json=addToCell,proto3,oneof"` } +type Mutation_MergeToCell_ struct { + // Merges accumulated state to an `Aggregate` cell. + MergeToCell *Mutation_MergeToCell `protobuf:"bytes,6,opt,name=merge_to_cell,json=mergeToCell,proto3,oneof"` +} + type Mutation_DeleteFromColumn_ struct { // Deletes cells from a column. DeleteFromColumn *Mutation_DeleteFromColumn `protobuf:"bytes,2,opt,name=delete_from_column,json=deleteFromColumn,proto3,oneof"` @@ -1545,6 +1738,8 @@ func (*Mutation_SetCell_) isMutation_Mutation() {} func (*Mutation_AddToCell_) isMutation_Mutation() {} +func (*Mutation_MergeToCell_) isMutation_Mutation() {} + func (*Mutation_DeleteFromColumn_) isMutation_Mutation() {} func (*Mutation_DeleteFromFamily_) isMutation_Mutation() {} @@ -1578,7 +1773,7 @@ type ReadModifyWriteRule struct { func (x *ReadModifyWriteRule) Reset() { *x = ReadModifyWriteRule{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[12] + mi := &file_google_bigtable_v2_data_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1591,7 +1786,7 @@ func (x *ReadModifyWriteRule) String() string { func (*ReadModifyWriteRule) ProtoMessage() {} func (x *ReadModifyWriteRule) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[12] + mi := &file_google_bigtable_v2_data_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1604,7 +1799,7 @@ func (x *ReadModifyWriteRule) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadModifyWriteRule.ProtoReflect.Descriptor instead. func (*ReadModifyWriteRule) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{12} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{13} } func (x *ReadModifyWriteRule) GetFamilyName() string { @@ -1680,7 +1875,7 @@ type StreamPartition struct { func (x *StreamPartition) Reset() { *x = StreamPartition{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[13] + mi := &file_google_bigtable_v2_data_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1693,7 +1888,7 @@ func (x *StreamPartition) String() string { func (*StreamPartition) ProtoMessage() {} func (x *StreamPartition) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[13] + mi := &file_google_bigtable_v2_data_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1706,7 +1901,7 @@ func (x *StreamPartition) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamPartition.ProtoReflect.Descriptor instead. func (*StreamPartition) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{13} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{14} } func (x *StreamPartition) GetRowRange() *RowRange { @@ -1731,7 +1926,7 @@ type StreamContinuationTokens struct { func (x *StreamContinuationTokens) Reset() { *x = StreamContinuationTokens{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[14] + mi := &file_google_bigtable_v2_data_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1744,7 +1939,7 @@ func (x *StreamContinuationTokens) String() string { func (*StreamContinuationTokens) ProtoMessage() {} func (x *StreamContinuationTokens) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[14] + mi := &file_google_bigtable_v2_data_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1757,7 +1952,7 @@ func (x *StreamContinuationTokens) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamContinuationTokens.ProtoReflect.Descriptor instead. func (*StreamContinuationTokens) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{14} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{15} } func (x *StreamContinuationTokens) GetTokens() []*StreamContinuationToken { @@ -1784,7 +1979,7 @@ type StreamContinuationToken struct { func (x *StreamContinuationToken) Reset() { *x = StreamContinuationToken{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[15] + mi := &file_google_bigtable_v2_data_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1797,7 +1992,7 @@ func (x *StreamContinuationToken) String() string { func (*StreamContinuationToken) ProtoMessage() {} func (x *StreamContinuationToken) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[15] + mi := &file_google_bigtable_v2_data_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1810,7 +2005,7 @@ func (x *StreamContinuationToken) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamContinuationToken.ProtoReflect.Descriptor instead. func (*StreamContinuationToken) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{15} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{16} } func (x *StreamContinuationToken) GetPartition() *StreamPartition { @@ -1827,35 +2022,31 @@ func (x *StreamContinuationToken) GetToken() string { return "" } -// A RowFilter which sends rows through several RowFilters in sequence. -type RowFilter_Chain struct { +// Protocol buffers format descriptor, as described by Messages ProtoSchema and +// ProtoRows +type ProtoFormat struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // The elements of "filters" are chained together to process the input row: - // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row - // The full chain is executed atomically. - Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (x *RowFilter_Chain) Reset() { - *x = RowFilter_Chain{} +func (x *ProtoFormat) Reset() { + *x = ProtoFormat{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[16] + mi := &file_google_bigtable_v2_data_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RowFilter_Chain) String() string { +func (x *ProtoFormat) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RowFilter_Chain) ProtoMessage() {} +func (*ProtoFormat) ProtoMessage() {} -func (x *RowFilter_Chain) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[16] +func (x *ProtoFormat) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_data_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1866,70 +2057,40 @@ func (x *RowFilter_Chain) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RowFilter_Chain.ProtoReflect.Descriptor instead. -func (*RowFilter_Chain) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{10, 0} -} - -func (x *RowFilter_Chain) GetFilters() []*RowFilter { - if x != nil { - return x.Filters - } - return nil +// Deprecated: Use ProtoFormat.ProtoReflect.Descriptor instead. +func (*ProtoFormat) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{17} } -// A RowFilter which sends each row to each of several component -// RowFilters and interleaves the results. -type RowFilter_Interleave struct { +// Describes a column in a Bigtable Query Language result set. +type ColumnMetadata struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The elements of "filters" all process a copy of the input row, and the - // results are pooled, sorted, and combined into a single output row. - // If multiple cells are produced with the same column and timestamp, - // they will all appear in the output row in an unspecified mutual order. - // Consider the following example, with three filters: - // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 - // - // All interleaved filters are executed atomically. - Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` + // The name of the column. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The type of the column. + Type *Type `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` } -func (x *RowFilter_Interleave) Reset() { - *x = RowFilter_Interleave{} +func (x *ColumnMetadata) Reset() { + *x = ColumnMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[17] + mi := &file_google_bigtable_v2_data_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RowFilter_Interleave) String() string { +func (x *ColumnMetadata) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RowFilter_Interleave) ProtoMessage() {} +func (*ColumnMetadata) ProtoMessage() {} -func (x *RowFilter_Interleave) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[17] +func (x *ColumnMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_data_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1940,46 +2101,523 @@ func (x *RowFilter_Interleave) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RowFilter_Interleave.ProtoReflect.Descriptor instead. -func (*RowFilter_Interleave) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{10, 1} +// Deprecated: Use ColumnMetadata.ProtoReflect.Descriptor instead. +func (*ColumnMetadata) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{18} } -func (x *RowFilter_Interleave) GetFilters() []*RowFilter { +func (x *ColumnMetadata) GetName() string { if x != nil { - return x.Filters + return x.Name + } + return "" +} + +func (x *ColumnMetadata) GetType() *Type { + if x != nil { + return x.Type } return nil } -// A RowFilter which evaluates one of two possible RowFilters, depending on -// whether or not a predicate RowFilter outputs any cells from the input row. -// -// IMPORTANT NOTE: The predicate filter does not execute atomically with the -// true and false filters, which may lead to inconsistent or unexpected -// results. Additionally, Condition filters have poor performance, especially -// when filters are set for the false condition. -type RowFilter_Condition struct { +// ResultSet schema in proto format +type ProtoSchema struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // If `predicate_filter` outputs any cells, then `true_filter` will be - // evaluated on the input row. Otherwise, `false_filter` will be evaluated. - PredicateFilter *RowFilter `protobuf:"bytes,1,opt,name=predicate_filter,json=predicateFilter,proto3" json:"predicate_filter,omitempty"` - // The filter to apply to the input row if `predicate_filter` returns any - // results. If not provided, no results will be returned in the true case. - TrueFilter *RowFilter `protobuf:"bytes,2,opt,name=true_filter,json=trueFilter,proto3" json:"true_filter,omitempty"` - // The filter to apply to the input row if `predicate_filter` does not - // return any results. If not provided, no results will be returned in the - // false case. - FalseFilter *RowFilter `protobuf:"bytes,3,opt,name=false_filter,json=falseFilter,proto3" json:"false_filter,omitempty"` + // The columns in the result set. + Columns []*ColumnMetadata `protobuf:"bytes,1,rep,name=columns,proto3" json:"columns,omitempty"` +} + +func (x *ProtoSchema) Reset() { + *x = ProtoSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_data_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProtoSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoSchema) ProtoMessage() {} + +func (x *ProtoSchema) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_data_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoSchema.ProtoReflect.Descriptor instead. +func (*ProtoSchema) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{19} +} + +func (x *ProtoSchema) GetColumns() []*ColumnMetadata { + if x != nil { + return x.Columns + } + return nil +} + +// Describes the structure of a Bigtable result set. +type ResultSetMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The schema of the ResultSet, contains ordered list of column names + // with types + // + // Types that are assignable to Schema: + // + // *ResultSetMetadata_ProtoSchema + Schema isResultSetMetadata_Schema `protobuf_oneof:"schema"` +} + +func (x *ResultSetMetadata) Reset() { + *x = ResultSetMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_data_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResultSetMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResultSetMetadata) ProtoMessage() {} + +func (x *ResultSetMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_data_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResultSetMetadata.ProtoReflect.Descriptor instead. +func (*ResultSetMetadata) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{20} +} + +func (m *ResultSetMetadata) GetSchema() isResultSetMetadata_Schema { + if m != nil { + return m.Schema + } + return nil +} + +func (x *ResultSetMetadata) GetProtoSchema() *ProtoSchema { + if x, ok := x.GetSchema().(*ResultSetMetadata_ProtoSchema); ok { + return x.ProtoSchema + } + return nil +} + +type isResultSetMetadata_Schema interface { + isResultSetMetadata_Schema() +} + +type ResultSetMetadata_ProtoSchema struct { + // Schema in proto format + ProtoSchema *ProtoSchema `protobuf:"bytes,1,opt,name=proto_schema,json=protoSchema,proto3,oneof"` +} + +func (*ResultSetMetadata_ProtoSchema) isResultSetMetadata_Schema() {} + +// Rows represented in proto format. +// +// This should be constructed by concatenating the `batch_data` from each +// of the relevant `ProtoRowsBatch` messages and parsing the result as a +// `ProtoRows` message. +type ProtoRows struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A proto rows message consists of a list of values. Every N complete values + // defines a row, where N is equal to the number of entries in the + // `metadata.proto_schema.columns` value received in the first response. + Values []*Value `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ProtoRows) Reset() { + *x = ProtoRows{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_data_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProtoRows) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoRows) ProtoMessage() {} + +func (x *ProtoRows) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_data_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoRows.ProtoReflect.Descriptor instead. +func (*ProtoRows) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{21} +} + +func (x *ProtoRows) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +// Batch of serialized ProtoRows. +type ProtoRowsBatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Merge partial results by concatenating these bytes, then parsing the + // overall value as a `ProtoRows` message. + BatchData []byte `protobuf:"bytes,1,opt,name=batch_data,json=batchData,proto3" json:"batch_data,omitempty"` +} + +func (x *ProtoRowsBatch) Reset() { + *x = ProtoRowsBatch{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_data_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProtoRowsBatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoRowsBatch) ProtoMessage() {} + +func (x *ProtoRowsBatch) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_data_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoRowsBatch.ProtoReflect.Descriptor instead. +func (*ProtoRowsBatch) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{22} +} + +func (x *ProtoRowsBatch) GetBatchData() []byte { + if x != nil { + return x.BatchData + } + return nil +} + +// A partial result set from the streaming query API. +// CBT client will buffer partial_rows from result_sets until it gets a +// resumption_token. +type PartialResultSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Partial Rows in one of the supported formats. It may require many + // PartialResultSets to stream a batch of rows that can decoded on the client. + // The client should buffer partial_rows until it gets a `resume_token`, + // at which point the batch is complete and can be decoded and yielded to the + // user. Each sub-message documents the appropriate way to combine results. + // + // Types that are assignable to PartialRows: + // + // *PartialResultSet_ProtoRowsBatch + PartialRows isPartialResultSet_PartialRows `protobuf_oneof:"partial_rows"` + // An opaque token sent by the server to allow query resumption and signal + // the client to accumulate `partial_rows` since the last non-empty + // `resume_token`. On resumption, the resumed query will return the remaining + // rows for this query. + // + // If there is a batch in progress, a non-empty `resume_token` + // means that that the batch of `partial_rows` will be complete after merging + // the `partial_rows` from this response. The client must only yield + // completed batches to the application, and must ensure that any future + // retries send the latest token to avoid returning duplicate data. + // + // The server may set 'resume_token' without a 'partial_rows'. If there is a + // batch in progress the client should yield it. + // + // The server will also send a sentinel `resume_token` when last batch of + // `partial_rows` is sent. If the client retries the ExecuteQueryRequest with + // the sentinel `resume_token`, the server will emit it again without any + // `partial_rows`, then return OK. + ResumeToken []byte `protobuf:"bytes,5,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` + // Estimated size of a new batch. The server will always set this when + // returning the first `partial_rows` of a batch, and will not set it at any + // other time. + // + // The client can use this estimate to allocate an initial buffer for the + // batched results. This helps minimize the number of allocations required, + // though the buffer size may still need to be increased if the estimate is + // too low. + EstimatedBatchSize int32 `protobuf:"varint,4,opt,name=estimated_batch_size,json=estimatedBatchSize,proto3" json:"estimated_batch_size,omitempty"` +} + +func (x *PartialResultSet) Reset() { + *x = PartialResultSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_data_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PartialResultSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PartialResultSet) ProtoMessage() {} + +func (x *PartialResultSet) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_data_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PartialResultSet.ProtoReflect.Descriptor instead. +func (*PartialResultSet) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{23} +} + +func (m *PartialResultSet) GetPartialRows() isPartialResultSet_PartialRows { + if m != nil { + return m.PartialRows + } + return nil +} + +func (x *PartialResultSet) GetProtoRowsBatch() *ProtoRowsBatch { + if x, ok := x.GetPartialRows().(*PartialResultSet_ProtoRowsBatch); ok { + return x.ProtoRowsBatch + } + return nil +} + +func (x *PartialResultSet) GetResumeToken() []byte { + if x != nil { + return x.ResumeToken + } + return nil +} + +func (x *PartialResultSet) GetEstimatedBatchSize() int32 { + if x != nil { + return x.EstimatedBatchSize + } + return 0 +} + +type isPartialResultSet_PartialRows interface { + isPartialResultSet_PartialRows() +} + +type PartialResultSet_ProtoRowsBatch struct { + // Partial rows in serialized ProtoRows format. + ProtoRowsBatch *ProtoRowsBatch `protobuf:"bytes,3,opt,name=proto_rows_batch,json=protoRowsBatch,proto3,oneof"` +} + +func (*PartialResultSet_ProtoRowsBatch) isPartialResultSet_PartialRows() {} + +// A RowFilter which sends rows through several RowFilters in sequence. +type RowFilter_Chain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The elements of "filters" are chained together to process the input row: + // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + // The full chain is executed atomically. + Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *RowFilter_Chain) Reset() { + *x = RowFilter_Chain{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_data_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RowFilter_Chain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RowFilter_Chain) ProtoMessage() {} + +func (x *RowFilter_Chain) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_data_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RowFilter_Chain.ProtoReflect.Descriptor instead. +func (*RowFilter_Chain) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *RowFilter_Chain) GetFilters() []*RowFilter { + if x != nil { + return x.Filters + } + return nil +} + +// A RowFilter which sends each row to each of several component +// RowFilters and interleaves the results. +type RowFilter_Interleave struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The elements of "filters" all process a copy of the input row, and the + // results are pooled, sorted, and combined into a single output row. + // If multiple cells are produced with the same column and timestamp, + // they will all appear in the output row in an unspecified mutual order. + // Consider the following example, with three filters: + // + // input row + // | + // ----------------------------------------------------- + // | | | + // f(0) f(1) f(2) + // | | | + // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + // 2: foo,blah,11,z far,blah,5,x far,blah,5,x + // | | | + // ----------------------------------------------------- + // | + // 1: foo,bar,10,z // could have switched with #2 + // 2: foo,bar,10,x // could have switched with #1 + // 3: foo,blah,11,z + // 4: far,bar,7,a + // 5: far,blah,5,x // identical to #6 + // 6: far,blah,5,x // identical to #5 + // + // All interleaved filters are executed atomically. + Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *RowFilter_Interleave) Reset() { + *x = RowFilter_Interleave{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_data_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RowFilter_Interleave) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RowFilter_Interleave) ProtoMessage() {} + +func (x *RowFilter_Interleave) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_data_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RowFilter_Interleave.ProtoReflect.Descriptor instead. +func (*RowFilter_Interleave) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{11, 1} +} + +func (x *RowFilter_Interleave) GetFilters() []*RowFilter { + if x != nil { + return x.Filters + } + return nil +} + +// A RowFilter which evaluates one of two possible RowFilters, depending on +// whether or not a predicate RowFilter outputs any cells from the input row. +// +// IMPORTANT NOTE: The predicate filter does not execute atomically with the +// true and false filters, which may lead to inconsistent or unexpected +// results. Additionally, Condition filters have poor performance, especially +// when filters are set for the false condition. +type RowFilter_Condition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If `predicate_filter` outputs any cells, then `true_filter` will be + // evaluated on the input row. Otherwise, `false_filter` will be evaluated. + PredicateFilter *RowFilter `protobuf:"bytes,1,opt,name=predicate_filter,json=predicateFilter,proto3" json:"predicate_filter,omitempty"` + // The filter to apply to the input row if `predicate_filter` returns any + // results. If not provided, no results will be returned in the true case. + TrueFilter *RowFilter `protobuf:"bytes,2,opt,name=true_filter,json=trueFilter,proto3" json:"true_filter,omitempty"` + // The filter to apply to the input row if `predicate_filter` does not + // return any results. If not provided, no results will be returned in the + // false case. + FalseFilter *RowFilter `protobuf:"bytes,3,opt,name=false_filter,json=falseFilter,proto3" json:"false_filter,omitempty"` } func (x *RowFilter_Condition) Reset() { *x = RowFilter_Condition{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[18] + mi := &file_google_bigtable_v2_data_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1992,7 +2630,7 @@ func (x *RowFilter_Condition) String() string { func (*RowFilter_Condition) ProtoMessage() {} func (x *RowFilter_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[18] + mi := &file_google_bigtable_v2_data_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2005,7 +2643,7 @@ func (x *RowFilter_Condition) ProtoReflect() protoreflect.Message { // Deprecated: Use RowFilter_Condition.ProtoReflect.Descriptor instead. func (*RowFilter_Condition) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{10, 2} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{11, 2} } func (x *RowFilter_Condition) GetPredicateFilter() *RowFilter { @@ -2054,7 +2692,7 @@ type Mutation_SetCell struct { func (x *Mutation_SetCell) Reset() { *x = Mutation_SetCell{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[19] + mi := &file_google_bigtable_v2_data_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2067,7 +2705,7 @@ func (x *Mutation_SetCell) String() string { func (*Mutation_SetCell) ProtoMessage() {} func (x *Mutation_SetCell) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[19] + mi := &file_google_bigtable_v2_data_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2080,7 +2718,7 @@ func (x *Mutation_SetCell) ProtoReflect() protoreflect.Message { // Deprecated: Use Mutation_SetCell.ProtoReflect.Descriptor instead. func (*Mutation_SetCell) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{11, 0} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{12, 0} } func (x *Mutation_SetCell) GetFamilyName() string { @@ -2135,7 +2773,7 @@ type Mutation_AddToCell struct { func (x *Mutation_AddToCell) Reset() { *x = Mutation_AddToCell{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[20] + mi := &file_google_bigtable_v2_data_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2148,7 +2786,7 @@ func (x *Mutation_AddToCell) String() string { func (*Mutation_AddToCell) ProtoMessage() {} func (x *Mutation_AddToCell) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[20] + mi := &file_google_bigtable_v2_data_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2161,7 +2799,7 @@ func (x *Mutation_AddToCell) ProtoReflect() protoreflect.Message { // Deprecated: Use Mutation_AddToCell.ProtoReflect.Descriptor instead. func (*Mutation_AddToCell) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{11, 1} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{12, 1} } func (x *Mutation_AddToCell) GetFamilyName() string { @@ -2192,6 +2830,89 @@ func (x *Mutation_AddToCell) GetInput() *Value { return nil } +// A Mutation which merges accumulated state into a cell in an `Aggregate` +// family. +type Mutation_MergeToCell struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the `Aggregate` family into which new data should be added. + // This must be a family with a `value_type` of `Aggregate`. + // Format: `[-_.a-zA-Z0-9]+` + FamilyName string `protobuf:"bytes,1,opt,name=family_name,json=familyName,proto3" json:"family_name,omitempty"` + // The qualifier of the column into which new data should be added. This + // must be a `raw_value`. + ColumnQualifier *Value `protobuf:"bytes,2,opt,name=column_qualifier,json=columnQualifier,proto3" json:"column_qualifier,omitempty"` + // The timestamp of the cell to which new data should be added. This must + // be a `raw_timestamp_micros` that matches the table's `granularity`. + Timestamp *Value `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The input value to be merged into the specified cell. This must be + // compatible with the family's `value_type.state_type`. Merging `NULL` is + // allowed, but has no effect. + Input *Value `protobuf:"bytes,4,opt,name=input,proto3" json:"input,omitempty"` +} + +func (x *Mutation_MergeToCell) Reset() { + *x = Mutation_MergeToCell{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_data_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Mutation_MergeToCell) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Mutation_MergeToCell) ProtoMessage() {} + +func (x *Mutation_MergeToCell) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_data_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Mutation_MergeToCell.ProtoReflect.Descriptor instead. +func (*Mutation_MergeToCell) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{12, 2} +} + +func (x *Mutation_MergeToCell) GetFamilyName() string { + if x != nil { + return x.FamilyName + } + return "" +} + +func (x *Mutation_MergeToCell) GetColumnQualifier() *Value { + if x != nil { + return x.ColumnQualifier + } + return nil +} + +func (x *Mutation_MergeToCell) GetTimestamp() *Value { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *Mutation_MergeToCell) GetInput() *Value { + if x != nil { + return x.Input + } + return nil +} + // A Mutation which deletes cells from the specified column, optionally // restricting the deletions to a given timestamp range. type Mutation_DeleteFromColumn struct { @@ -2212,7 +2933,7 @@ type Mutation_DeleteFromColumn struct { func (x *Mutation_DeleteFromColumn) Reset() { *x = Mutation_DeleteFromColumn{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[21] + mi := &file_google_bigtable_v2_data_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2225,7 +2946,7 @@ func (x *Mutation_DeleteFromColumn) String() string { func (*Mutation_DeleteFromColumn) ProtoMessage() {} func (x *Mutation_DeleteFromColumn) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[21] + mi := &file_google_bigtable_v2_data_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2238,7 +2959,7 @@ func (x *Mutation_DeleteFromColumn) ProtoReflect() protoreflect.Message { // Deprecated: Use Mutation_DeleteFromColumn.ProtoReflect.Descriptor instead. func (*Mutation_DeleteFromColumn) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{11, 2} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{12, 3} } func (x *Mutation_DeleteFromColumn) GetFamilyName() string { @@ -2276,7 +2997,7 @@ type Mutation_DeleteFromFamily struct { func (x *Mutation_DeleteFromFamily) Reset() { *x = Mutation_DeleteFromFamily{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[22] + mi := &file_google_bigtable_v2_data_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2289,7 +3010,7 @@ func (x *Mutation_DeleteFromFamily) String() string { func (*Mutation_DeleteFromFamily) ProtoMessage() {} func (x *Mutation_DeleteFromFamily) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[22] + mi := &file_google_bigtable_v2_data_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2302,7 +3023,7 @@ func (x *Mutation_DeleteFromFamily) ProtoReflect() protoreflect.Message { // Deprecated: Use Mutation_DeleteFromFamily.ProtoReflect.Descriptor instead. func (*Mutation_DeleteFromFamily) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{11, 3} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{12, 4} } func (x *Mutation_DeleteFromFamily) GetFamilyName() string { @@ -2322,7 +3043,7 @@ type Mutation_DeleteFromRow struct { func (x *Mutation_DeleteFromRow) Reset() { *x = Mutation_DeleteFromRow{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_data_proto_msgTypes[23] + mi := &file_google_bigtable_v2_data_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2335,7 +3056,7 @@ func (x *Mutation_DeleteFromRow) String() string { func (*Mutation_DeleteFromRow) ProtoMessage() {} func (x *Mutation_DeleteFromRow) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_data_proto_msgTypes[23] + mi := &file_google_bigtable_v2_data_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2348,7 +3069,7 @@ func (x *Mutation_DeleteFromRow) ProtoReflect() protoreflect.Message { // Deprecated: Use Mutation_DeleteFromRow.ProtoReflect.Descriptor instead. func (*Mutation_DeleteFromRow) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{11, 4} + return file_google_bigtable_v2_data_proto_rawDescGZIP(), []int{12, 5} } var File_google_bigtable_v2_data_proto protoreflect.FileDescriptor @@ -2359,299 +3080,388 @@ var file_google_bigtable_v2_data_proto_rawDesc = []byte{ 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4f, 0x0a, 0x03, 0x52, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, - 0x08, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x08, 0x66, 0x61, 0x6d, - 0x69, 0x6c, 0x69, 0x65, 0x73, 0x22, 0x52, 0x0a, 0x06, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x56, 0x0a, 0x06, 0x43, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x12, 0x2e, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x22, 0x5f, 0x0a, 0x04, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, - 0x63, 0x72, 0x6f, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, - 0x72, 0x61, 0x77, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x72, - 0x61, 0x77, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x69, 0x63, - 0x72, 0x6f, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x12, 0x72, 0x61, 0x77, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x12, - 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, - 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x77, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, - 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, - 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x12, - 0x26, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, - 0x0a, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x26, 0x0a, 0x0e, 0x65, - 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x0c, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x43, 0x6c, 0x6f, - 0x73, 0x65, 0x64, 0x42, 0x0b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, - 0x42, 0x09, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x60, 0x0a, 0x06, 0x52, - 0x6f, 0x77, 0x53, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, - 0x12, 0x3b, 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x09, 0x72, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xa2, 0x02, - 0x0a, 0x0b, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, - 0x0a, 0x16, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, - 0x52, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, - 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x12, 0x73, 0x74, 0x61, 0x72, 0x74, 0x51, 0x75, 0x61, - 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x14, 0x65, 0x6e, - 0x64, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x6f, 0x73, - 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x51, - 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x12, 0x2e, - 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, - 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x10, 0x65, 0x6e, - 0x64, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4f, 0x70, 0x65, 0x6e, 0x42, 0x11, - 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x42, 0x0f, 0x0a, 0x0d, 0x65, 0x6e, 0x64, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x22, 0x78, 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, - 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x69, 0x63, 0x72, - 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x22, 0xd8, 0x01, 0x0a, - 0x0a, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x10, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x65, 0x6e, 0x64, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x48, 0x01, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x43, 0x6c, 0x6f, - 0x73, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x65, 0x6e, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x0c, 0x65, - 0x6e, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x65, 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x65, 0x6e, - 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xfc, 0x0b, 0x0a, 0x09, 0x52, 0x6f, 0x77, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x48, 0x00, 0x52, 0x05, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x12, 0x4a, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x65, 0x61, 0x76, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4f, 0x0a, + 0x03, 0x52, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x08, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x69, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, + 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x08, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x22, 0x52, + 0x0a, 0x06, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x07, + 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x73, 0x22, 0x56, 0x0a, 0x06, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x1c, 0x0a, 0x09, + 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x09, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x65, 0x6c, 0x6c, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x5f, 0x0a, 0x04, 0x43, 0x65, + 0x6c, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, + 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x22, 0xf9, 0x03, 0x0a, 0x05, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x72, 0x61, 0x77, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x12, 0x72, 0x61, 0x77, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, + 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, + 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0b, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x48, + 0x00, 0x52, 0x09, 0x64, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x41, 0x0a, 0x0b, + 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x3f, 0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xc2, 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x77, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x43, 0x6c, 0x6f, 0x73, 0x65, + 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x6e, 0x64, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x01, 0x52, 0x0a, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x26, 0x0a, + 0x0e, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x0c, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x43, + 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x42, 0x0b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, + 0x65, 0x79, 0x42, 0x09, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x60, 0x0a, + 0x06, 0x52, 0x6f, 0x77, 0x53, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x6f, 0x77, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x6f, 0x77, 0x4b, 0x65, + 0x79, 0x73, 0x12, 0x3b, 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x09, 0x72, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, + 0xa2, 0x02, 0x0a, 0x0b, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x36, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x48, 0x00, 0x52, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x12, 0x73, 0x74, 0x61, 0x72, 0x74, 0x51, + 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x14, + 0x65, 0x6e, 0x64, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x63, 0x6c, + 0x6f, 0x73, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x12, 0x65, 0x6e, + 0x64, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, + 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x10, + 0x65, 0x6e, 0x64, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4f, 0x70, 0x65, 0x6e, + 0x42, 0x11, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x42, 0x0f, 0x0a, 0x0d, 0x65, 0x6e, 0x64, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x22, 0x78, 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x12, 0x30, 0x0a, 0x14, + 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x69, + 0x63, 0x72, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x22, 0xd8, + 0x01, 0x0a, 0x0a, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a, + 0x12, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x63, 0x6c, 0x6f, + 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x10, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x12, 0x2a, 0x0a, + 0x10, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x65, 0x6e, 0x64, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x43, + 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x65, 0x6e, 0x64, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, + 0x0c, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x65, 0x6e, 0x42, 0x0d, 0x0a, + 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x0a, 0x09, + 0x65, 0x6e, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xfc, 0x0b, 0x0a, 0x09, 0x52, 0x6f, + 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x65, 0x61, 0x76, 0x65, - 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x47, - 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x04, 0x73, 0x69, 0x6e, 0x6b, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x73, 0x69, 0x6e, 0x6b, 0x12, 0x28, 0x0a, - 0x0f, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x61, 0x6c, 0x6c, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x61, 0x73, 0x73, 0x41, 0x6c, - 0x6c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x61, 0x6c, 0x6c, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x12, 0x20, 0x01, 0x28, - 0x08, 0x48, 0x00, 0x52, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x6c, 0x6c, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x72, - 0x65, 0x67, 0x65, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0c, 0x48, 0x00, 0x52, 0x11, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x65, 0x78, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x11, 0x72, 0x6f, 0x77, 0x5f, 0x73, 0x61, - 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x01, 0x48, 0x00, 0x52, 0x0f, 0x72, 0x6f, 0x77, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x12, 0x39, 0x0a, 0x18, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x15, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, - 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, - 0x43, 0x0a, 0x1d, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x1a, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x65, 0x67, 0x65, 0x78, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x13, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x16, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x14, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x72, 0x65, 0x67, - 0x65, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x48, - 0x00, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x72, 0x61, 0x6e, - 0x67, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, - 0x00, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x1b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x70, 0x65, 0x72, - 0x5f, 0x72, 0x6f, 0x77, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x17, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x50, 0x65, 0x72, 0x52, 0x6f, 0x77, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x70, 0x65, 0x72, - 0x5f, 0x72, 0x6f, 0x77, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x16, 0x63, 0x65, 0x6c, 0x6c, 0x73, - 0x50, 0x65, 0x72, 0x52, 0x6f, 0x77, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x12, 0x42, 0x0a, 0x1d, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x19, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x50, 0x65, 0x72, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x38, 0x0a, 0x17, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x65, 0x72, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x15, 0x73, 0x74, 0x72, 0x69, 0x70, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x65, 0x72, 0x12, - 0x38, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x65, 0x72, 0x1a, 0x40, 0x0a, 0x05, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x12, 0x37, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x48, 0x00, 0x52, 0x05, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x12, 0x4a, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x65, 0x61, + 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, + 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x65, 0x61, + 0x76, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x65, 0x61, 0x76, 0x65, + 0x12, 0x47, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x45, 0x0a, 0x0a, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x73, 0x1a, 0xd7, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x48, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x74, 0x72, - 0x75, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0a, - 0x74, 0x72, 0x75, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x0c, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, - 0x0b, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0x0a, 0x06, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x99, 0x08, 0x0a, 0x08, 0x4d, 0x75, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x08, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x48, 0x00, 0x52, 0x07, 0x73, - 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x48, 0x0a, 0x0b, 0x61, 0x64, 0x64, 0x5f, 0x74, 0x6f, - 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x64, 0x64, 0x54, 0x6f, 0x43, - 0x65, 0x6c, 0x6c, 0x48, 0x00, 0x52, 0x09, 0x61, 0x64, 0x64, 0x54, 0x6f, 0x43, 0x65, 0x6c, 0x6c, - 0x12, 0x5d, 0x0a, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, - 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, + 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x09, + 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x04, 0x73, 0x69, 0x6e, + 0x6b, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x73, 0x69, 0x6e, 0x6b, 0x12, + 0x28, 0x0a, 0x0f, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x61, 0x6c, 0x6c, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x61, 0x73, 0x73, + 0x41, 0x6c, 0x6c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x10, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x5f, 0x61, 0x6c, 0x6c, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x6c, 0x6c, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x11, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, + 0x65, 0x78, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x11, 0x72, 0x6f, 0x77, 0x5f, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0f, 0x72, 0x6f, 0x77, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x39, 0x0a, 0x18, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x15, 0x66, 0x61, 0x6d, 0x69, + 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x12, 0x43, 0x0a, 0x1d, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x71, 0x75, 0x61, 0x6c, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x1a, 0x63, 0x6f, 0x6c, 0x75, + 0x6d, 0x6e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x65, 0x67, 0x65, 0x78, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x13, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x16, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x14, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x72, + 0x65, 0x67, 0x65, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x00, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x1b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x72, 0x6f, 0x77, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x17, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x50, 0x65, 0x72, 0x52, 0x6f, 0x77, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x72, 0x6f, 0x77, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x16, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x50, 0x65, 0x72, 0x52, 0x6f, 0x77, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x1d, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x70, 0x65, 0x72, + 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x19, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x50, 0x65, 0x72, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x38, 0x0a, 0x17, 0x73, 0x74, 0x72, 0x69, 0x70, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, + 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x15, 0x73, 0x74, 0x72, 0x69, + 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x65, + 0x72, 0x12, 0x38, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x65, 0x72, 0x1a, 0x40, 0x0a, 0x05, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x12, 0x37, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x45, 0x0a, + 0x0a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x48, 0x00, 0x52, 0x10, 0x64, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, - 0x5d, 0x0a, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x66, - 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x46, 0x72, 0x6f, 0x6d, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x10, 0x64, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x54, - 0x0a, 0x0f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x72, 0x6f, - 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x1a, 0xd7, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0f, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0b, + 0x74, 0x72, 0x75, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x52, 0x0a, 0x74, 0x72, 0x75, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x0c, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x52, 0x0b, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, + 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xca, 0x0a, 0x0a, 0x08, 0x4d, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x08, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, - 0x52, 0x6f, 0x77, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x72, 0x6f, - 0x6d, 0x52, 0x6f, 0x77, 0x1a, 0x96, 0x01, 0x0a, 0x07, 0x53, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, - 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x71, 0x75, 0x61, 0x6c, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x63, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xdc, 0x01, - 0x0a, 0x09, 0x41, 0x64, 0x64, 0x54, 0x6f, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x66, - 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x10, - 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x12, 0x37, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x05, 0x69, - 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xa1, 0x01, 0x0a, - 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x6f, 0x6c, 0x75, 0x6d, - 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x71, 0x75, 0x61, - 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x41, 0x0a, - 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x1a, 0x33, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x46, 0x61, - 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x48, 0x00, 0x52, + 0x07, 0x73, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x48, 0x0a, 0x0b, 0x61, 0x64, 0x64, 0x5f, + 0x74, 0x6f, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x64, 0x64, 0x54, + 0x6f, 0x43, 0x65, 0x6c, 0x6c, 0x48, 0x00, 0x52, 0x09, 0x61, 0x64, 0x64, 0x54, 0x6f, 0x43, 0x65, + 0x6c, 0x6c, 0x12, 0x4e, 0x0a, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x63, + 0x65, 0x6c, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, + 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x43, + 0x65, 0x6c, 0x6c, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x43, 0x65, + 0x6c, 0x6c, 0x12, 0x5d, 0x0a, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x66, 0x72, 0x6f, + 0x6d, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x48, 0x00, 0x52, + 0x10, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x12, 0x5d, 0x0a, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x66, 0x72, 0x6f, 0x6d, + 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x10, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, + 0x12, 0x54, 0x0a, 0x0f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, + 0x72, 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, + 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x72, + 0x6f, 0x6d, 0x52, 0x6f, 0x77, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, + 0x72, 0x6f, 0x6d, 0x52, 0x6f, 0x77, 0x1a, 0x96, 0x01, 0x0a, 0x07, 0x53, 0x65, 0x74, 0x43, 0x65, + 0x6c, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x71, 0x75, + 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x63, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x29, + 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x69, 0x63, 0x72, + 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, + 0xdc, 0x01, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x54, 0x6f, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x1f, 0x0a, + 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x44, + 0x0a, 0x10, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x51, 0x75, 0x61, 0x6c, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xde, + 0x01, 0x0a, 0x0b, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x1f, + 0x0a, 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x44, 0x0a, 0x10, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x51, 0x75, 0x61, 0x6c, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, + 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x1a, + 0xa1, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, - 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x0f, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, - 0x72, 0x6f, 0x6d, 0x52, 0x6f, 0x77, 0x42, 0x0a, 0x0a, 0x08, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0xbb, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, - 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x61, - 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x51, 0x75, 0x61, - 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0c, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0b, - 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2b, 0x0a, 0x10, 0x69, - 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x41, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x06, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, - 0x22, 0x4c, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x09, 0x72, 0x6f, 0x77, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x72, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x5f, - 0x0a, 0x18, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x43, 0x0a, 0x06, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x06, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x22, - 0x72, 0x0a, 0x17, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x61, - 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, + 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x12, 0x41, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x1a, 0x33, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x72, 0x6f, + 0x6d, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, + 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x61, + 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x0f, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x6f, 0x77, 0x42, 0x0a, 0x0a, 0x08, 0x6d, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbb, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, + 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, + 0x0a, 0x10, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, + 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0c, 0x61, 0x70, 0x70, + 0x65, 0x6e, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x0b, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2b, + 0x0a, 0x10, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6e, 0x63, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x41, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x06, 0x0a, 0x04, 0x72, + 0x75, 0x6c, 0x65, 0x22, 0x4c, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, + 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x09, 0x72, 0x6f, 0x77, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, + 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x72, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x22, 0x5f, 0x0a, 0x18, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x69, + 0x6e, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x43, 0x0a, + 0x06, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, - 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x42, 0xb5, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x09, - 0x44, 0x61, 0x74, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x06, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x73, 0x22, 0x72, 0x0a, 0x17, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x74, + 0x69, 0x6e, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x41, 0x0a, + 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x74, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x0d, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x52, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3c, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, + 0x6d, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x07, 0x63, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x63, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x53, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x0c, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x42, 0x08, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x3e, 0x0a, 0x09, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x31, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x2f, 0x0a, 0x0e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1d, 0x0a, + 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x44, 0x61, 0x74, 0x61, 0x22, 0xc7, 0x01, 0x0a, + 0x10, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, + 0x74, 0x12, 0x4e, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, + 0x62, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x48, + 0x00, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x12, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x64, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, + 0x6c, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x42, 0xb3, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x42, 0x09, 0x44, 0x61, 0x74, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, 0x3b, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, + 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, + 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2666,66 +3476,92 @@ func file_google_bigtable_v2_data_proto_rawDescGZIP() []byte { return file_google_bigtable_v2_data_proto_rawDescData } -var file_google_bigtable_v2_data_proto_msgTypes = make([]protoimpl.MessageInfo, 24) -var file_google_bigtable_v2_data_proto_goTypes = []interface{}{ +var file_google_bigtable_v2_data_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_bigtable_v2_data_proto_goTypes = []any{ (*Row)(nil), // 0: google.bigtable.v2.Row (*Family)(nil), // 1: google.bigtable.v2.Family (*Column)(nil), // 2: google.bigtable.v2.Column (*Cell)(nil), // 3: google.bigtable.v2.Cell (*Value)(nil), // 4: google.bigtable.v2.Value - (*RowRange)(nil), // 5: google.bigtable.v2.RowRange - (*RowSet)(nil), // 6: google.bigtable.v2.RowSet - (*ColumnRange)(nil), // 7: google.bigtable.v2.ColumnRange - (*TimestampRange)(nil), // 8: google.bigtable.v2.TimestampRange - (*ValueRange)(nil), // 9: google.bigtable.v2.ValueRange - (*RowFilter)(nil), // 10: google.bigtable.v2.RowFilter - (*Mutation)(nil), // 11: google.bigtable.v2.Mutation - (*ReadModifyWriteRule)(nil), // 12: google.bigtable.v2.ReadModifyWriteRule - (*StreamPartition)(nil), // 13: google.bigtable.v2.StreamPartition - (*StreamContinuationTokens)(nil), // 14: google.bigtable.v2.StreamContinuationTokens - (*StreamContinuationToken)(nil), // 15: google.bigtable.v2.StreamContinuationToken - (*RowFilter_Chain)(nil), // 16: google.bigtable.v2.RowFilter.Chain - (*RowFilter_Interleave)(nil), // 17: google.bigtable.v2.RowFilter.Interleave - (*RowFilter_Condition)(nil), // 18: google.bigtable.v2.RowFilter.Condition - (*Mutation_SetCell)(nil), // 19: google.bigtable.v2.Mutation.SetCell - (*Mutation_AddToCell)(nil), // 20: google.bigtable.v2.Mutation.AddToCell - (*Mutation_DeleteFromColumn)(nil), // 21: google.bigtable.v2.Mutation.DeleteFromColumn - (*Mutation_DeleteFromFamily)(nil), // 22: google.bigtable.v2.Mutation.DeleteFromFamily - (*Mutation_DeleteFromRow)(nil), // 23: google.bigtable.v2.Mutation.DeleteFromRow + (*ArrayValue)(nil), // 5: google.bigtable.v2.ArrayValue + (*RowRange)(nil), // 6: google.bigtable.v2.RowRange + (*RowSet)(nil), // 7: google.bigtable.v2.RowSet + (*ColumnRange)(nil), // 8: google.bigtable.v2.ColumnRange + (*TimestampRange)(nil), // 9: google.bigtable.v2.TimestampRange + (*ValueRange)(nil), // 10: google.bigtable.v2.ValueRange + (*RowFilter)(nil), // 11: google.bigtable.v2.RowFilter + (*Mutation)(nil), // 12: google.bigtable.v2.Mutation + (*ReadModifyWriteRule)(nil), // 13: google.bigtable.v2.ReadModifyWriteRule + (*StreamPartition)(nil), // 14: google.bigtable.v2.StreamPartition + (*StreamContinuationTokens)(nil), // 15: google.bigtable.v2.StreamContinuationTokens + (*StreamContinuationToken)(nil), // 16: google.bigtable.v2.StreamContinuationToken + (*ProtoFormat)(nil), // 17: google.bigtable.v2.ProtoFormat + (*ColumnMetadata)(nil), // 18: google.bigtable.v2.ColumnMetadata + (*ProtoSchema)(nil), // 19: google.bigtable.v2.ProtoSchema + (*ResultSetMetadata)(nil), // 20: google.bigtable.v2.ResultSetMetadata + (*ProtoRows)(nil), // 21: google.bigtable.v2.ProtoRows + (*ProtoRowsBatch)(nil), // 22: google.bigtable.v2.ProtoRowsBatch + (*PartialResultSet)(nil), // 23: google.bigtable.v2.PartialResultSet + (*RowFilter_Chain)(nil), // 24: google.bigtable.v2.RowFilter.Chain + (*RowFilter_Interleave)(nil), // 25: google.bigtable.v2.RowFilter.Interleave + (*RowFilter_Condition)(nil), // 26: google.bigtable.v2.RowFilter.Condition + (*Mutation_SetCell)(nil), // 27: google.bigtable.v2.Mutation.SetCell + (*Mutation_AddToCell)(nil), // 28: google.bigtable.v2.Mutation.AddToCell + (*Mutation_MergeToCell)(nil), // 29: google.bigtable.v2.Mutation.MergeToCell + (*Mutation_DeleteFromColumn)(nil), // 30: google.bigtable.v2.Mutation.DeleteFromColumn + (*Mutation_DeleteFromFamily)(nil), // 31: google.bigtable.v2.Mutation.DeleteFromFamily + (*Mutation_DeleteFromRow)(nil), // 32: google.bigtable.v2.Mutation.DeleteFromRow + (*Type)(nil), // 33: google.bigtable.v2.Type + (*timestamppb.Timestamp)(nil), // 34: google.protobuf.Timestamp + (*date.Date)(nil), // 35: google.type.Date } var file_google_bigtable_v2_data_proto_depIdxs = []int32{ 1, // 0: google.bigtable.v2.Row.families:type_name -> google.bigtable.v2.Family 2, // 1: google.bigtable.v2.Family.columns:type_name -> google.bigtable.v2.Column 3, // 2: google.bigtable.v2.Column.cells:type_name -> google.bigtable.v2.Cell - 5, // 3: google.bigtable.v2.RowSet.row_ranges:type_name -> google.bigtable.v2.RowRange - 16, // 4: google.bigtable.v2.RowFilter.chain:type_name -> google.bigtable.v2.RowFilter.Chain - 17, // 5: google.bigtable.v2.RowFilter.interleave:type_name -> google.bigtable.v2.RowFilter.Interleave - 18, // 6: google.bigtable.v2.RowFilter.condition:type_name -> google.bigtable.v2.RowFilter.Condition - 7, // 7: google.bigtable.v2.RowFilter.column_range_filter:type_name -> google.bigtable.v2.ColumnRange - 8, // 8: google.bigtable.v2.RowFilter.timestamp_range_filter:type_name -> google.bigtable.v2.TimestampRange - 9, // 9: google.bigtable.v2.RowFilter.value_range_filter:type_name -> google.bigtable.v2.ValueRange - 19, // 10: google.bigtable.v2.Mutation.set_cell:type_name -> google.bigtable.v2.Mutation.SetCell - 20, // 11: google.bigtable.v2.Mutation.add_to_cell:type_name -> google.bigtable.v2.Mutation.AddToCell - 21, // 12: google.bigtable.v2.Mutation.delete_from_column:type_name -> google.bigtable.v2.Mutation.DeleteFromColumn - 22, // 13: google.bigtable.v2.Mutation.delete_from_family:type_name -> google.bigtable.v2.Mutation.DeleteFromFamily - 23, // 14: google.bigtable.v2.Mutation.delete_from_row:type_name -> google.bigtable.v2.Mutation.DeleteFromRow - 5, // 15: google.bigtable.v2.StreamPartition.row_range:type_name -> google.bigtable.v2.RowRange - 15, // 16: google.bigtable.v2.StreamContinuationTokens.tokens:type_name -> google.bigtable.v2.StreamContinuationToken - 13, // 17: google.bigtable.v2.StreamContinuationToken.partition:type_name -> google.bigtable.v2.StreamPartition - 10, // 18: google.bigtable.v2.RowFilter.Chain.filters:type_name -> google.bigtable.v2.RowFilter - 10, // 19: google.bigtable.v2.RowFilter.Interleave.filters:type_name -> google.bigtable.v2.RowFilter - 10, // 20: google.bigtable.v2.RowFilter.Condition.predicate_filter:type_name -> google.bigtable.v2.RowFilter - 10, // 21: google.bigtable.v2.RowFilter.Condition.true_filter:type_name -> google.bigtable.v2.RowFilter - 10, // 22: google.bigtable.v2.RowFilter.Condition.false_filter:type_name -> google.bigtable.v2.RowFilter - 4, // 23: google.bigtable.v2.Mutation.AddToCell.column_qualifier:type_name -> google.bigtable.v2.Value - 4, // 24: google.bigtable.v2.Mutation.AddToCell.timestamp:type_name -> google.bigtable.v2.Value - 4, // 25: google.bigtable.v2.Mutation.AddToCell.input:type_name -> google.bigtable.v2.Value - 8, // 26: google.bigtable.v2.Mutation.DeleteFromColumn.time_range:type_name -> google.bigtable.v2.TimestampRange - 27, // [27:27] is the sub-list for method output_type - 27, // [27:27] is the sub-list for method input_type - 27, // [27:27] is the sub-list for extension type_name - 27, // [27:27] is the sub-list for extension extendee - 0, // [0:27] is the sub-list for field type_name + 33, // 3: google.bigtable.v2.Value.type:type_name -> google.bigtable.v2.Type + 34, // 4: google.bigtable.v2.Value.timestamp_value:type_name -> google.protobuf.Timestamp + 35, // 5: google.bigtable.v2.Value.date_value:type_name -> google.type.Date + 5, // 6: google.bigtable.v2.Value.array_value:type_name -> google.bigtable.v2.ArrayValue + 4, // 7: google.bigtable.v2.ArrayValue.values:type_name -> google.bigtable.v2.Value + 6, // 8: google.bigtable.v2.RowSet.row_ranges:type_name -> google.bigtable.v2.RowRange + 24, // 9: google.bigtable.v2.RowFilter.chain:type_name -> google.bigtable.v2.RowFilter.Chain + 25, // 10: google.bigtable.v2.RowFilter.interleave:type_name -> google.bigtable.v2.RowFilter.Interleave + 26, // 11: google.bigtable.v2.RowFilter.condition:type_name -> google.bigtable.v2.RowFilter.Condition + 8, // 12: google.bigtable.v2.RowFilter.column_range_filter:type_name -> google.bigtable.v2.ColumnRange + 9, // 13: google.bigtable.v2.RowFilter.timestamp_range_filter:type_name -> google.bigtable.v2.TimestampRange + 10, // 14: google.bigtable.v2.RowFilter.value_range_filter:type_name -> google.bigtable.v2.ValueRange + 27, // 15: google.bigtable.v2.Mutation.set_cell:type_name -> google.bigtable.v2.Mutation.SetCell + 28, // 16: google.bigtable.v2.Mutation.add_to_cell:type_name -> google.bigtable.v2.Mutation.AddToCell + 29, // 17: google.bigtable.v2.Mutation.merge_to_cell:type_name -> google.bigtable.v2.Mutation.MergeToCell + 30, // 18: google.bigtable.v2.Mutation.delete_from_column:type_name -> google.bigtable.v2.Mutation.DeleteFromColumn + 31, // 19: google.bigtable.v2.Mutation.delete_from_family:type_name -> google.bigtable.v2.Mutation.DeleteFromFamily + 32, // 20: google.bigtable.v2.Mutation.delete_from_row:type_name -> google.bigtable.v2.Mutation.DeleteFromRow + 6, // 21: google.bigtable.v2.StreamPartition.row_range:type_name -> google.bigtable.v2.RowRange + 16, // 22: google.bigtable.v2.StreamContinuationTokens.tokens:type_name -> google.bigtable.v2.StreamContinuationToken + 14, // 23: google.bigtable.v2.StreamContinuationToken.partition:type_name -> google.bigtable.v2.StreamPartition + 33, // 24: google.bigtable.v2.ColumnMetadata.type:type_name -> google.bigtable.v2.Type + 18, // 25: google.bigtable.v2.ProtoSchema.columns:type_name -> google.bigtable.v2.ColumnMetadata + 19, // 26: google.bigtable.v2.ResultSetMetadata.proto_schema:type_name -> google.bigtable.v2.ProtoSchema + 4, // 27: google.bigtable.v2.ProtoRows.values:type_name -> google.bigtable.v2.Value + 22, // 28: google.bigtable.v2.PartialResultSet.proto_rows_batch:type_name -> google.bigtable.v2.ProtoRowsBatch + 11, // 29: google.bigtable.v2.RowFilter.Chain.filters:type_name -> google.bigtable.v2.RowFilter + 11, // 30: google.bigtable.v2.RowFilter.Interleave.filters:type_name -> google.bigtable.v2.RowFilter + 11, // 31: google.bigtable.v2.RowFilter.Condition.predicate_filter:type_name -> google.bigtable.v2.RowFilter + 11, // 32: google.bigtable.v2.RowFilter.Condition.true_filter:type_name -> google.bigtable.v2.RowFilter + 11, // 33: google.bigtable.v2.RowFilter.Condition.false_filter:type_name -> google.bigtable.v2.RowFilter + 4, // 34: google.bigtable.v2.Mutation.AddToCell.column_qualifier:type_name -> google.bigtable.v2.Value + 4, // 35: google.bigtable.v2.Mutation.AddToCell.timestamp:type_name -> google.bigtable.v2.Value + 4, // 36: google.bigtable.v2.Mutation.AddToCell.input:type_name -> google.bigtable.v2.Value + 4, // 37: google.bigtable.v2.Mutation.MergeToCell.column_qualifier:type_name -> google.bigtable.v2.Value + 4, // 38: google.bigtable.v2.Mutation.MergeToCell.timestamp:type_name -> google.bigtable.v2.Value + 4, // 39: google.bigtable.v2.Mutation.MergeToCell.input:type_name -> google.bigtable.v2.Value + 9, // 40: google.bigtable.v2.Mutation.DeleteFromColumn.time_range:type_name -> google.bigtable.v2.TimestampRange + 41, // [41:41] is the sub-list for method output_type + 41, // [41:41] is the sub-list for method input_type + 41, // [41:41] is the sub-list for extension type_name + 41, // [41:41] is the sub-list for extension extendee + 0, // [0:41] is the sub-list for field type_name } func init() { file_google_bigtable_v2_data_proto_init() } @@ -2733,8 +3569,9 @@ func file_google_bigtable_v2_data_proto_init() { if File_google_bigtable_v2_data_proto != nil { return } + file_google_bigtable_v2_types_proto_init() if !protoimpl.UnsafeEnabled { - file_google_bigtable_v2_data_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Row); i { case 0: return &v.state @@ -2746,7 +3583,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Family); i { case 0: return &v.state @@ -2758,7 +3595,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Column); i { case 0: return &v.state @@ -2770,7 +3607,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Cell); i { case 0: return &v.state @@ -2782,7 +3619,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Value); i { case 0: return &v.state @@ -2794,7 +3631,19 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ArrayValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_data_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*RowRange); i { case 0: return &v.state @@ -2806,7 +3655,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*RowSet); i { case 0: return &v.state @@ -2818,7 +3667,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ColumnRange); i { case 0: return &v.state @@ -2830,7 +3679,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*TimestampRange); i { case 0: return &v.state @@ -2842,7 +3691,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ValueRange); i { case 0: return &v.state @@ -2854,7 +3703,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*RowFilter); i { case 0: return &v.state @@ -2866,7 +3715,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*Mutation); i { case 0: return &v.state @@ -2878,7 +3727,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*ReadModifyWriteRule); i { case 0: return &v.state @@ -2890,7 +3739,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*StreamPartition); i { case 0: return &v.state @@ -2902,7 +3751,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*StreamContinuationTokens); i { case 0: return &v.state @@ -2914,7 +3763,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*StreamContinuationToken); i { case 0: return &v.state @@ -2926,7 +3775,91 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*ProtoFormat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_data_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*ColumnMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_data_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*ProtoSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_data_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*ResultSetMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_data_proto_msgTypes[21].Exporter = func(v any, i int) any { + switch v := v.(*ProtoRows); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_data_proto_msgTypes[22].Exporter = func(v any, i int) any { + switch v := v.(*ProtoRowsBatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_data_proto_msgTypes[23].Exporter = func(v any, i int) any { + switch v := v.(*PartialResultSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_data_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*RowFilter_Chain); i { case 0: return &v.state @@ -2938,7 +3871,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*RowFilter_Interleave); i { case 0: return &v.state @@ -2950,7 +3883,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*RowFilter_Condition); i { case 0: return &v.state @@ -2962,7 +3895,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*Mutation_SetCell); i { case 0: return &v.state @@ -2974,7 +3907,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*Mutation_AddToCell); i { case 0: return &v.state @@ -2986,7 +3919,19 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[29].Exporter = func(v any, i int) any { + switch v := v.(*Mutation_MergeToCell); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_data_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*Mutation_DeleteFromColumn); i { case 0: return &v.state @@ -2998,7 +3943,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*Mutation_DeleteFromFamily); i { case 0: return &v.state @@ -3010,7 +3955,7 @@ func file_google_bigtable_v2_data_proto_init() { return nil } } - file_google_bigtable_v2_data_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_data_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*Mutation_DeleteFromRow); i { case 0: return &v.state @@ -3023,30 +3968,37 @@ func file_google_bigtable_v2_data_proto_init() { } } } - file_google_bigtable_v2_data_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_google_bigtable_v2_data_proto_msgTypes[4].OneofWrappers = []any{ (*Value_RawValue)(nil), (*Value_RawTimestampMicros)(nil), + (*Value_BytesValue)(nil), + (*Value_StringValue)(nil), (*Value_IntValue)(nil), + (*Value_BoolValue)(nil), + (*Value_FloatValue)(nil), + (*Value_TimestampValue)(nil), + (*Value_DateValue)(nil), + (*Value_ArrayValue)(nil), } - file_google_bigtable_v2_data_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_google_bigtable_v2_data_proto_msgTypes[6].OneofWrappers = []any{ (*RowRange_StartKeyClosed)(nil), (*RowRange_StartKeyOpen)(nil), (*RowRange_EndKeyOpen)(nil), (*RowRange_EndKeyClosed)(nil), } - file_google_bigtable_v2_data_proto_msgTypes[7].OneofWrappers = []interface{}{ + file_google_bigtable_v2_data_proto_msgTypes[8].OneofWrappers = []any{ (*ColumnRange_StartQualifierClosed)(nil), (*ColumnRange_StartQualifierOpen)(nil), (*ColumnRange_EndQualifierClosed)(nil), (*ColumnRange_EndQualifierOpen)(nil), } - file_google_bigtable_v2_data_proto_msgTypes[9].OneofWrappers = []interface{}{ + file_google_bigtable_v2_data_proto_msgTypes[10].OneofWrappers = []any{ (*ValueRange_StartValueClosed)(nil), (*ValueRange_StartValueOpen)(nil), (*ValueRange_EndValueClosed)(nil), (*ValueRange_EndValueOpen)(nil), } - file_google_bigtable_v2_data_proto_msgTypes[10].OneofWrappers = []interface{}{ + file_google_bigtable_v2_data_proto_msgTypes[11].OneofWrappers = []any{ (*RowFilter_Chain_)(nil), (*RowFilter_Interleave_)(nil), (*RowFilter_Condition_)(nil), @@ -3067,24 +4019,31 @@ func file_google_bigtable_v2_data_proto_init() { (*RowFilter_StripValueTransformer)(nil), (*RowFilter_ApplyLabelTransformer)(nil), } - file_google_bigtable_v2_data_proto_msgTypes[11].OneofWrappers = []interface{}{ + file_google_bigtable_v2_data_proto_msgTypes[12].OneofWrappers = []any{ (*Mutation_SetCell_)(nil), (*Mutation_AddToCell_)(nil), + (*Mutation_MergeToCell_)(nil), (*Mutation_DeleteFromColumn_)(nil), (*Mutation_DeleteFromFamily_)(nil), (*Mutation_DeleteFromRow_)(nil), } - file_google_bigtable_v2_data_proto_msgTypes[12].OneofWrappers = []interface{}{ + file_google_bigtable_v2_data_proto_msgTypes[13].OneofWrappers = []any{ (*ReadModifyWriteRule_AppendValue)(nil), (*ReadModifyWriteRule_IncrementAmount)(nil), } + file_google_bigtable_v2_data_proto_msgTypes[20].OneofWrappers = []any{ + (*ResultSetMetadata_ProtoSchema)(nil), + } + file_google_bigtable_v2_data_proto_msgTypes[23].OneofWrappers = []any{ + (*PartialResultSet_ProtoRowsBatch)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_v2_data_proto_rawDesc, NumEnums: 0, - NumMessages: 24, + NumMessages: 33, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/feature_flags.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/feature_flags.pb.go similarity index 89% rename from terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/feature_flags.pb.go rename to terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/feature_flags.pb.go index d17396cc362..3ecb550dd56 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/feature_flags.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/feature_flags.pb.go @@ -14,11 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v4.24.4 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/bigtable/v2/feature_flags.proto -package bigtable +package bigtablepb import ( reflect "reflect" @@ -182,19 +182,19 @@ var file_google_bigtable_v2_feature_flags_proto_rawDesc = []byte{ 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x42, 0xbd, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x42, 0xbb, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x11, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2f, 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, - 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, 0x3b, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, + 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -210,7 +210,7 @@ func file_google_bigtable_v2_feature_flags_proto_rawDescGZIP() []byte { } var file_google_bigtable_v2_feature_flags_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_bigtable_v2_feature_flags_proto_goTypes = []interface{}{ +var file_google_bigtable_v2_feature_flags_proto_goTypes = []any{ (*FeatureFlags)(nil), // 0: google.bigtable.v2.FeatureFlags } var file_google_bigtable_v2_feature_flags_proto_depIdxs = []int32{ @@ -227,7 +227,7 @@ func file_google_bigtable_v2_feature_flags_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_bigtable_v2_feature_flags_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_feature_flags_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FeatureFlags); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/request_stats.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/request_stats.pb.go similarity index 93% rename from terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/request_stats.pb.go rename to terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/request_stats.pb.go index c4702669ff4..a3b26670fe4 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/request_stats.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/request_stats.pb.go @@ -14,11 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v4.24.4 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/bigtable/v2/request_stats.proto -package bigtable +package bigtablepb import ( reflect "reflect" @@ -371,19 +371,19 @@ var file_google_bigtable_v2_request_stats_proto_rawDesc = []byte{ 0x32, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x56, 0x69, 0x65, 0x77, 0x48, 0x00, 0x52, 0x11, 0x66, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x56, 0x69, 0x65, 0x77, 0x42, 0x0c, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, - 0x73, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x42, 0xbd, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x73, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x42, 0xbb, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x11, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x70, 0x62, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, + 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, + 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -399,7 +399,7 @@ func file_google_bigtable_v2_request_stats_proto_rawDescGZIP() []byte { } var file_google_bigtable_v2_request_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_bigtable_v2_request_stats_proto_goTypes = []interface{}{ +var file_google_bigtable_v2_request_stats_proto_goTypes = []any{ (*ReadIterationStats)(nil), // 0: google.bigtable.v2.ReadIterationStats (*RequestLatencyStats)(nil), // 1: google.bigtable.v2.RequestLatencyStats (*FullReadStatsView)(nil), // 2: google.bigtable.v2.FullReadStatsView @@ -424,7 +424,7 @@ func file_google_bigtable_v2_request_stats_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_bigtable_v2_request_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_request_stats_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ReadIterationStats); i { case 0: return &v.state @@ -436,7 +436,7 @@ func file_google_bigtable_v2_request_stats_proto_init() { return nil } } - file_google_bigtable_v2_request_stats_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_request_stats_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*RequestLatencyStats); i { case 0: return &v.state @@ -448,7 +448,7 @@ func file_google_bigtable_v2_request_stats_proto_init() { return nil } } - file_google_bigtable_v2_request_stats_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_request_stats_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*FullReadStatsView); i { case 0: return &v.state @@ -460,7 +460,7 @@ func file_google_bigtable_v2_request_stats_proto_init() { return nil } } - file_google_bigtable_v2_request_stats_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_request_stats_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*RequestStats); i { case 0: return &v.state @@ -473,7 +473,7 @@ func file_google_bigtable_v2_request_stats_proto_init() { } } } - file_google_bigtable_v2_request_stats_proto_msgTypes[3].OneofWrappers = []interface{}{ + file_google_bigtable_v2_request_stats_proto_msgTypes[3].OneofWrappers = []any{ (*RequestStats_FullReadStatsView)(nil), } type x struct{} diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/response_params.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/response_params.pb.go similarity index 85% rename from terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/response_params.pb.go rename to terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/response_params.pb.go index ce3362f4b51..54caa744fae 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/response_params.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/response_params.pb.go @@ -14,11 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v4.24.4 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/bigtable/v2/response_params.proto -package bigtable +package bigtablepb import ( reflect "reflect" @@ -110,20 +110,20 @@ var file_google_bigtable_v2_response_params_proto_rawDesc = []byte{ 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x42, 0x0d, - 0x0a, 0x0b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x42, 0xbf, 0x01, + 0x0a, 0x0b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x42, 0xbd, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x13, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, - 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, - 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x38, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, 0x3b, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, + 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, + 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -139,7 +139,7 @@ func file_google_bigtable_v2_response_params_proto_rawDescGZIP() []byte { } var file_google_bigtable_v2_response_params_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_bigtable_v2_response_params_proto_goTypes = []interface{}{ +var file_google_bigtable_v2_response_params_proto_goTypes = []any{ (*ResponseParams)(nil), // 0: google.bigtable.v2.ResponseParams } var file_google_bigtable_v2_response_params_proto_depIdxs = []int32{ @@ -156,7 +156,7 @@ func file_google_bigtable_v2_response_params_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_bigtable_v2_response_params_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_v2_response_params_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ResponseParams); i { case 0: return &v.state @@ -169,7 +169,7 @@ func file_google_bigtable_v2_response_params_proto_init() { } } } - file_google_bigtable_v2_response_params_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_google_bigtable_v2_response_params_proto_msgTypes[0].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/types.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/types.pb.go new file mode 100644 index 00000000000..1090862843a --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/types.pb.go @@ -0,0 +1,2211 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/bigtable/v2/types.proto + +package bigtablepb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// `Type` represents the type of data that is written to, read from, or stored +// in Bigtable. It is heavily based on the GoogleSQL standard to help maintain +// familiarity and consistency across products and features. +// +// For compatibility with Bigtable's existing untyped APIs, each `Type` includes +// an `Encoding` which describes how to convert to/from the underlying data. +// +// Each encoding also defines the following properties: +// +// - Order-preserving: Does the encoded value sort consistently with the +// original typed value? Note that Bigtable will always sort data based on +// the raw encoded value, *not* the decoded type. +// - Example: BYTES values sort in the same order as their raw encodings. +// - Counterexample: Encoding INT64 as a fixed-width decimal string does +// *not* preserve sort order when dealing with negative numbers. +// `INT64(1) > INT64(-1)`, but `STRING("-00001") > STRING("00001)`. +// - Self-delimiting: If we concatenate two encoded values, can we always tell +// where the first one ends and the second one begins? +// - Example: If we encode INT64s to fixed-width STRINGs, the first value +// will always contain exactly N digits, possibly preceded by a sign. +// - Counterexample: If we concatenate two UTF-8 encoded STRINGs, we have +// no way to tell where the first one ends. +// - Compatibility: Which other systems have matching encoding schemes? For +// example, does this encoding have a GoogleSQL equivalent? HBase? Java? +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The kind of type that this represents. + // + // Types that are assignable to Kind: + // + // *Type_BytesType + // *Type_StringType + // *Type_Int64Type + // *Type_Float32Type + // *Type_Float64Type + // *Type_BoolType + // *Type_TimestampType + // *Type_DateType + // *Type_AggregateType + // *Type_StructType + // *Type_ArrayType + // *Type_MapType + Kind isType_Kind `protobuf_oneof:"kind"` +} + +func (x *Type) Reset() { + *x = Type{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0} +} + +func (m *Type) GetKind() isType_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Type) GetBytesType() *Type_Bytes { + if x, ok := x.GetKind().(*Type_BytesType); ok { + return x.BytesType + } + return nil +} + +func (x *Type) GetStringType() *Type_String { + if x, ok := x.GetKind().(*Type_StringType); ok { + return x.StringType + } + return nil +} + +func (x *Type) GetInt64Type() *Type_Int64 { + if x, ok := x.GetKind().(*Type_Int64Type); ok { + return x.Int64Type + } + return nil +} + +func (x *Type) GetFloat32Type() *Type_Float32 { + if x, ok := x.GetKind().(*Type_Float32Type); ok { + return x.Float32Type + } + return nil +} + +func (x *Type) GetFloat64Type() *Type_Float64 { + if x, ok := x.GetKind().(*Type_Float64Type); ok { + return x.Float64Type + } + return nil +} + +func (x *Type) GetBoolType() *Type_Bool { + if x, ok := x.GetKind().(*Type_BoolType); ok { + return x.BoolType + } + return nil +} + +func (x *Type) GetTimestampType() *Type_Timestamp { + if x, ok := x.GetKind().(*Type_TimestampType); ok { + return x.TimestampType + } + return nil +} + +func (x *Type) GetDateType() *Type_Date { + if x, ok := x.GetKind().(*Type_DateType); ok { + return x.DateType + } + return nil +} + +func (x *Type) GetAggregateType() *Type_Aggregate { + if x, ok := x.GetKind().(*Type_AggregateType); ok { + return x.AggregateType + } + return nil +} + +func (x *Type) GetStructType() *Type_Struct { + if x, ok := x.GetKind().(*Type_StructType); ok { + return x.StructType + } + return nil +} + +func (x *Type) GetArrayType() *Type_Array { + if x, ok := x.GetKind().(*Type_ArrayType); ok { + return x.ArrayType + } + return nil +} + +func (x *Type) GetMapType() *Type_Map { + if x, ok := x.GetKind().(*Type_MapType); ok { + return x.MapType + } + return nil +} + +type isType_Kind interface { + isType_Kind() +} + +type Type_BytesType struct { + // Bytes + BytesType *Type_Bytes `protobuf:"bytes,1,opt,name=bytes_type,json=bytesType,proto3,oneof"` +} + +type Type_StringType struct { + // String + StringType *Type_String `protobuf:"bytes,2,opt,name=string_type,json=stringType,proto3,oneof"` +} + +type Type_Int64Type struct { + // Int64 + Int64Type *Type_Int64 `protobuf:"bytes,5,opt,name=int64_type,json=int64Type,proto3,oneof"` +} + +type Type_Float32Type struct { + // Float32 + Float32Type *Type_Float32 `protobuf:"bytes,12,opt,name=float32_type,json=float32Type,proto3,oneof"` +} + +type Type_Float64Type struct { + // Float64 + Float64Type *Type_Float64 `protobuf:"bytes,9,opt,name=float64_type,json=float64Type,proto3,oneof"` +} + +type Type_BoolType struct { + // Bool + BoolType *Type_Bool `protobuf:"bytes,8,opt,name=bool_type,json=boolType,proto3,oneof"` +} + +type Type_TimestampType struct { + // Timestamp + TimestampType *Type_Timestamp `protobuf:"bytes,10,opt,name=timestamp_type,json=timestampType,proto3,oneof"` +} + +type Type_DateType struct { + // Date + DateType *Type_Date `protobuf:"bytes,11,opt,name=date_type,json=dateType,proto3,oneof"` +} + +type Type_AggregateType struct { + // Aggregate + AggregateType *Type_Aggregate `protobuf:"bytes,6,opt,name=aggregate_type,json=aggregateType,proto3,oneof"` +} + +type Type_StructType struct { + // Struct + StructType *Type_Struct `protobuf:"bytes,7,opt,name=struct_type,json=structType,proto3,oneof"` +} + +type Type_ArrayType struct { + // Array + ArrayType *Type_Array `protobuf:"bytes,3,opt,name=array_type,json=arrayType,proto3,oneof"` +} + +type Type_MapType struct { + // Map + MapType *Type_Map `protobuf:"bytes,4,opt,name=map_type,json=mapType,proto3,oneof"` +} + +func (*Type_BytesType) isType_Kind() {} + +func (*Type_StringType) isType_Kind() {} + +func (*Type_Int64Type) isType_Kind() {} + +func (*Type_Float32Type) isType_Kind() {} + +func (*Type_Float64Type) isType_Kind() {} + +func (*Type_BoolType) isType_Kind() {} + +func (*Type_TimestampType) isType_Kind() {} + +func (*Type_DateType) isType_Kind() {} + +func (*Type_AggregateType) isType_Kind() {} + +func (*Type_StructType) isType_Kind() {} + +func (*Type_ArrayType) isType_Kind() {} + +func (*Type_MapType) isType_Kind() {} + +// Bytes +// Values of type `Bytes` are stored in `Value.bytes_value`. +type Type_Bytes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encoding to use when converting to/from lower level types. + Encoding *Type_Bytes_Encoding `protobuf:"bytes,1,opt,name=encoding,proto3" json:"encoding,omitempty"` +} + +func (x *Type_Bytes) Reset() { + *x = Type_Bytes{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Bytes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Bytes) ProtoMessage() {} + +func (x *Type_Bytes) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Bytes.ProtoReflect.Descriptor instead. +func (*Type_Bytes) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Type_Bytes) GetEncoding() *Type_Bytes_Encoding { + if x != nil { + return x.Encoding + } + return nil +} + +// String +// Values of type `String` are stored in `Value.string_value`. +type Type_String struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encoding to use when converting to/from lower level types. + Encoding *Type_String_Encoding `protobuf:"bytes,1,opt,name=encoding,proto3" json:"encoding,omitempty"` +} + +func (x *Type_String) Reset() { + *x = Type_String{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_String) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_String) ProtoMessage() {} + +func (x *Type_String) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_String.ProtoReflect.Descriptor instead. +func (*Type_String) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Type_String) GetEncoding() *Type_String_Encoding { + if x != nil { + return x.Encoding + } + return nil +} + +// Int64 +// Values of type `Int64` are stored in `Value.int_value`. +type Type_Int64 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encoding to use when converting to/from lower level types. + Encoding *Type_Int64_Encoding `protobuf:"bytes,1,opt,name=encoding,proto3" json:"encoding,omitempty"` +} + +func (x *Type_Int64) Reset() { + *x = Type_Int64{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Int64) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Int64) ProtoMessage() {} + +func (x *Type_Int64) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Int64.ProtoReflect.Descriptor instead. +func (*Type_Int64) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Type_Int64) GetEncoding() *Type_Int64_Encoding { + if x != nil { + return x.Encoding + } + return nil +} + +// bool +// Values of type `Bool` are stored in `Value.bool_value`. +type Type_Bool struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Bool) Reset() { + *x = Type_Bool{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Bool) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Bool) ProtoMessage() {} + +func (x *Type_Bool) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Bool.ProtoReflect.Descriptor instead. +func (*Type_Bool) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 3} +} + +// Float32 +// Values of type `Float32` are stored in `Value.float_value`. +type Type_Float32 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Float32) Reset() { + *x = Type_Float32{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Float32) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Float32) ProtoMessage() {} + +func (x *Type_Float32) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Float32.ProtoReflect.Descriptor instead. +func (*Type_Float32) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 4} +} + +// Float64 +// Values of type `Float64` are stored in `Value.float_value`. +type Type_Float64 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Float64) Reset() { + *x = Type_Float64{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Float64) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Float64) ProtoMessage() {} + +func (x *Type_Float64) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Float64.ProtoReflect.Descriptor instead. +func (*Type_Float64) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 5} +} + +// Timestamp +// Values of type `Timestamp` are stored in `Value.timestamp_value`. +type Type_Timestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Timestamp) Reset() { + *x = Type_Timestamp{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Timestamp) ProtoMessage() {} + +func (x *Type_Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Timestamp.ProtoReflect.Descriptor instead. +func (*Type_Timestamp) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 6} +} + +// Date +// Values of type `Date` are stored in `Value.date_value`. +type Type_Date struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Date) Reset() { + *x = Type_Date{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Date) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Date) ProtoMessage() {} + +func (x *Type_Date) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Date.ProtoReflect.Descriptor instead. +func (*Type_Date) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 7} +} + +// A structured data value, consisting of fields which map to dynamically +// typed values. +// Values of type `Struct` are stored in `Value.array_value` where entries are +// in the same order and number as `field_types`. +type Type_Struct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The names and types of the fields in this struct. + Fields []*Type_Struct_Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` +} + +func (x *Type_Struct) Reset() { + *x = Type_Struct{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Struct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Struct) ProtoMessage() {} + +func (x *Type_Struct) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Struct.ProtoReflect.Descriptor instead. +func (*Type_Struct) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 8} +} + +func (x *Type_Struct) GetFields() []*Type_Struct_Field { + if x != nil { + return x.Fields + } + return nil +} + +// An ordered list of elements of a given type. +// Values of type `Array` are stored in `Value.array_value`. +type Type_Array struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of the elements in the array. This must not be `Array`. + ElementType *Type `protobuf:"bytes,1,opt,name=element_type,json=elementType,proto3" json:"element_type,omitempty"` +} + +func (x *Type_Array) Reset() { + *x = Type_Array{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Array) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Array) ProtoMessage() {} + +func (x *Type_Array) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Array.ProtoReflect.Descriptor instead. +func (*Type_Array) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 9} +} + +func (x *Type_Array) GetElementType() *Type { + if x != nil { + return x.ElementType + } + return nil +} + +// A mapping of keys to values of a given type. +// Values of type `Map` are stored in a `Value.array_value` where each entry +// is another `Value.array_value` with two elements (the key and the value, +// in that order). +// Normally encoded Map values won't have repeated keys, however, clients are +// expected to handle the case in which they do. If the same key appears +// multiple times, the _last_ value takes precedence. +type Type_Map struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of a map key. + // Only `Bytes`, `String`, and `Int64` are allowed as key types. + KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` + // The type of the values in a map. + ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` +} + +func (x *Type_Map) Reset() { + *x = Type_Map{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Map) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Map) ProtoMessage() {} + +func (x *Type_Map) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Map.ProtoReflect.Descriptor instead. +func (*Type_Map) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 10} +} + +func (x *Type_Map) GetKeyType() *Type { + if x != nil { + return x.KeyType + } + return nil +} + +func (x *Type_Map) GetValueType() *Type { + if x != nil { + return x.ValueType + } + return nil +} + +// A value that combines incremental updates into a summarized value. +// +// Data is never directly written or read using type `Aggregate`. Writes will +// provide either the `input_type` or `state_type`, and reads will always +// return the `state_type` . +type Type_Aggregate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of the inputs that are accumulated by this `Aggregate`, which must + // specify a full encoding. + // Use `AddInput` mutations to accumulate new inputs. + InputType *Type `protobuf:"bytes,1,opt,name=input_type,json=inputType,proto3" json:"input_type,omitempty"` + // Output only. Type that holds the internal accumulator state for the + // `Aggregate`. This is a function of the `input_type` and `aggregator` + // chosen, and will always specify a full encoding. + StateType *Type `protobuf:"bytes,2,opt,name=state_type,json=stateType,proto3" json:"state_type,omitempty"` + // Which aggregator function to use. The configured types must match. + // + // Types that are assignable to Aggregator: + // + // *Type_Aggregate_Sum_ + // *Type_Aggregate_HllppUniqueCount + // *Type_Aggregate_Max_ + // *Type_Aggregate_Min_ + Aggregator isType_Aggregate_Aggregator `protobuf_oneof:"aggregator"` +} + +func (x *Type_Aggregate) Reset() { + *x = Type_Aggregate{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Aggregate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Aggregate) ProtoMessage() {} + +func (x *Type_Aggregate) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Aggregate.ProtoReflect.Descriptor instead. +func (*Type_Aggregate) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 11} +} + +func (x *Type_Aggregate) GetInputType() *Type { + if x != nil { + return x.InputType + } + return nil +} + +func (x *Type_Aggregate) GetStateType() *Type { + if x != nil { + return x.StateType + } + return nil +} + +func (m *Type_Aggregate) GetAggregator() isType_Aggregate_Aggregator { + if m != nil { + return m.Aggregator + } + return nil +} + +func (x *Type_Aggregate) GetSum() *Type_Aggregate_Sum { + if x, ok := x.GetAggregator().(*Type_Aggregate_Sum_); ok { + return x.Sum + } + return nil +} + +func (x *Type_Aggregate) GetHllppUniqueCount() *Type_Aggregate_HyperLogLogPlusPlusUniqueCount { + if x, ok := x.GetAggregator().(*Type_Aggregate_HllppUniqueCount); ok { + return x.HllppUniqueCount + } + return nil +} + +func (x *Type_Aggregate) GetMax() *Type_Aggregate_Max { + if x, ok := x.GetAggregator().(*Type_Aggregate_Max_); ok { + return x.Max + } + return nil +} + +func (x *Type_Aggregate) GetMin() *Type_Aggregate_Min { + if x, ok := x.GetAggregator().(*Type_Aggregate_Min_); ok { + return x.Min + } + return nil +} + +type isType_Aggregate_Aggregator interface { + isType_Aggregate_Aggregator() +} + +type Type_Aggregate_Sum_ struct { + // Sum aggregator. + Sum *Type_Aggregate_Sum `protobuf:"bytes,4,opt,name=sum,proto3,oneof"` +} + +type Type_Aggregate_HllppUniqueCount struct { + // HyperLogLogPlusPlusUniqueCount aggregator. + HllppUniqueCount *Type_Aggregate_HyperLogLogPlusPlusUniqueCount `protobuf:"bytes,5,opt,name=hllpp_unique_count,json=hllppUniqueCount,proto3,oneof"` +} + +type Type_Aggregate_Max_ struct { + // Max aggregator. + Max *Type_Aggregate_Max `protobuf:"bytes,6,opt,name=max,proto3,oneof"` +} + +type Type_Aggregate_Min_ struct { + // Min aggregator. + Min *Type_Aggregate_Min `protobuf:"bytes,7,opt,name=min,proto3,oneof"` +} + +func (*Type_Aggregate_Sum_) isType_Aggregate_Aggregator() {} + +func (*Type_Aggregate_HllppUniqueCount) isType_Aggregate_Aggregator() {} + +func (*Type_Aggregate_Max_) isType_Aggregate_Aggregator() {} + +func (*Type_Aggregate_Min_) isType_Aggregate_Aggregator() {} + +// Rules used to convert to/from lower level types. +type Type_Bytes_Encoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Which encoding to use. + // + // Types that are assignable to Encoding: + // + // *Type_Bytes_Encoding_Raw_ + Encoding isType_Bytes_Encoding_Encoding `protobuf_oneof:"encoding"` +} + +func (x *Type_Bytes_Encoding) Reset() { + *x = Type_Bytes_Encoding{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Bytes_Encoding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Bytes_Encoding) ProtoMessage() {} + +func (x *Type_Bytes_Encoding) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Bytes_Encoding.ProtoReflect.Descriptor instead. +func (*Type_Bytes_Encoding) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (m *Type_Bytes_Encoding) GetEncoding() isType_Bytes_Encoding_Encoding { + if m != nil { + return m.Encoding + } + return nil +} + +func (x *Type_Bytes_Encoding) GetRaw() *Type_Bytes_Encoding_Raw { + if x, ok := x.GetEncoding().(*Type_Bytes_Encoding_Raw_); ok { + return x.Raw + } + return nil +} + +type isType_Bytes_Encoding_Encoding interface { + isType_Bytes_Encoding_Encoding() +} + +type Type_Bytes_Encoding_Raw_ struct { + // Use `Raw` encoding. + Raw *Type_Bytes_Encoding_Raw `protobuf:"bytes,1,opt,name=raw,proto3,oneof"` +} + +func (*Type_Bytes_Encoding_Raw_) isType_Bytes_Encoding_Encoding() {} + +// Leaves the value "as-is" +// * Order-preserving? Yes +// * Self-delimiting? No +// * Compatibility? N/A +type Type_Bytes_Encoding_Raw struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Bytes_Encoding_Raw) Reset() { + *x = Type_Bytes_Encoding_Raw{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Bytes_Encoding_Raw) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Bytes_Encoding_Raw) ProtoMessage() {} + +func (x *Type_Bytes_Encoding_Raw) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Bytes_Encoding_Raw.ProtoReflect.Descriptor instead. +func (*Type_Bytes_Encoding_Raw) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 0, 0, 0} +} + +// Rules used to convert to/from lower level types. +type Type_String_Encoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Which encoding to use. + // + // Types that are assignable to Encoding: + // + // *Type_String_Encoding_Utf8Raw_ + // *Type_String_Encoding_Utf8Bytes_ + Encoding isType_String_Encoding_Encoding `protobuf_oneof:"encoding"` +} + +func (x *Type_String_Encoding) Reset() { + *x = Type_String_Encoding{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_String_Encoding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_String_Encoding) ProtoMessage() {} + +func (x *Type_String_Encoding) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_String_Encoding.ProtoReflect.Descriptor instead. +func (*Type_String_Encoding) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (m *Type_String_Encoding) GetEncoding() isType_String_Encoding_Encoding { + if m != nil { + return m.Encoding + } + return nil +} + +// Deprecated: Marked as deprecated in google/bigtable/v2/types.proto. +func (x *Type_String_Encoding) GetUtf8Raw() *Type_String_Encoding_Utf8Raw { + if x, ok := x.GetEncoding().(*Type_String_Encoding_Utf8Raw_); ok { + return x.Utf8Raw + } + return nil +} + +func (x *Type_String_Encoding) GetUtf8Bytes() *Type_String_Encoding_Utf8Bytes { + if x, ok := x.GetEncoding().(*Type_String_Encoding_Utf8Bytes_); ok { + return x.Utf8Bytes + } + return nil +} + +type isType_String_Encoding_Encoding interface { + isType_String_Encoding_Encoding() +} + +type Type_String_Encoding_Utf8Raw_ struct { + // Deprecated: if set, converts to an empty `utf8_bytes`. + // + // Deprecated: Marked as deprecated in google/bigtable/v2/types.proto. + Utf8Raw *Type_String_Encoding_Utf8Raw `protobuf:"bytes,1,opt,name=utf8_raw,json=utf8Raw,proto3,oneof"` +} + +type Type_String_Encoding_Utf8Bytes_ struct { + // Use `Utf8Bytes` encoding. + Utf8Bytes *Type_String_Encoding_Utf8Bytes `protobuf:"bytes,2,opt,name=utf8_bytes,json=utf8Bytes,proto3,oneof"` +} + +func (*Type_String_Encoding_Utf8Raw_) isType_String_Encoding_Encoding() {} + +func (*Type_String_Encoding_Utf8Bytes_) isType_String_Encoding_Encoding() {} + +// Deprecated: prefer the equivalent `Utf8Bytes`. +// +// Deprecated: Marked as deprecated in google/bigtable/v2/types.proto. +type Type_String_Encoding_Utf8Raw struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_String_Encoding_Utf8Raw) Reset() { + *x = Type_String_Encoding_Utf8Raw{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_String_Encoding_Utf8Raw) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_String_Encoding_Utf8Raw) ProtoMessage() {} + +func (x *Type_String_Encoding_Utf8Raw) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_String_Encoding_Utf8Raw.ProtoReflect.Descriptor instead. +func (*Type_String_Encoding_Utf8Raw) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 1, 0, 0} +} + +// UTF-8 encoding +// * Order-preserving? Yes (code point order) +// * Self-delimiting? No +// * Compatibility? +// - BigQuery Federation `TEXT` encoding +// - HBase `Bytes.toBytes` +// - Java `String#getBytes(StandardCharsets.UTF_8)` +type Type_String_Encoding_Utf8Bytes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_String_Encoding_Utf8Bytes) Reset() { + *x = Type_String_Encoding_Utf8Bytes{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_String_Encoding_Utf8Bytes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_String_Encoding_Utf8Bytes) ProtoMessage() {} + +func (x *Type_String_Encoding_Utf8Bytes) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_String_Encoding_Utf8Bytes.ProtoReflect.Descriptor instead. +func (*Type_String_Encoding_Utf8Bytes) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 1, 0, 1} +} + +// Rules used to convert to/from lower level types. +type Type_Int64_Encoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Which encoding to use. + // + // Types that are assignable to Encoding: + // + // *Type_Int64_Encoding_BigEndianBytes_ + Encoding isType_Int64_Encoding_Encoding `protobuf_oneof:"encoding"` +} + +func (x *Type_Int64_Encoding) Reset() { + *x = Type_Int64_Encoding{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Int64_Encoding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Int64_Encoding) ProtoMessage() {} + +func (x *Type_Int64_Encoding) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Int64_Encoding.ProtoReflect.Descriptor instead. +func (*Type_Int64_Encoding) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 2, 0} +} + +func (m *Type_Int64_Encoding) GetEncoding() isType_Int64_Encoding_Encoding { + if m != nil { + return m.Encoding + } + return nil +} + +func (x *Type_Int64_Encoding) GetBigEndianBytes() *Type_Int64_Encoding_BigEndianBytes { + if x, ok := x.GetEncoding().(*Type_Int64_Encoding_BigEndianBytes_); ok { + return x.BigEndianBytes + } + return nil +} + +type isType_Int64_Encoding_Encoding interface { + isType_Int64_Encoding_Encoding() +} + +type Type_Int64_Encoding_BigEndianBytes_ struct { + // Use `BigEndianBytes` encoding. + BigEndianBytes *Type_Int64_Encoding_BigEndianBytes `protobuf:"bytes,1,opt,name=big_endian_bytes,json=bigEndianBytes,proto3,oneof"` +} + +func (*Type_Int64_Encoding_BigEndianBytes_) isType_Int64_Encoding_Encoding() {} + +// Encodes the value as an 8-byte big endian twos complement `Bytes` +// value. +// * Order-preserving? No (positive values only) +// * Self-delimiting? Yes +// * Compatibility? +// - BigQuery Federation `BINARY` encoding +// - HBase `Bytes.toBytes` +// - Java `ByteBuffer.putLong()` with `ByteOrder.BIG_ENDIAN` +type Type_Int64_Encoding_BigEndianBytes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated: ignored if set. + BytesType *Type_Bytes `protobuf:"bytes,1,opt,name=bytes_type,json=bytesType,proto3" json:"bytes_type,omitempty"` +} + +func (x *Type_Int64_Encoding_BigEndianBytes) Reset() { + *x = Type_Int64_Encoding_BigEndianBytes{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Int64_Encoding_BigEndianBytes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Int64_Encoding_BigEndianBytes) ProtoMessage() {} + +func (x *Type_Int64_Encoding_BigEndianBytes) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Int64_Encoding_BigEndianBytes.ProtoReflect.Descriptor instead. +func (*Type_Int64_Encoding_BigEndianBytes) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 2, 0, 0} +} + +func (x *Type_Int64_Encoding_BigEndianBytes) GetBytesType() *Type_Bytes { + if x != nil { + return x.BytesType + } + return nil +} + +// A struct field and its type. +type Type_Struct_Field struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The field name (optional). Fields without a `field_name` are considered + // anonymous and cannot be referenced by name. + FieldName string `protobuf:"bytes,1,opt,name=field_name,json=fieldName,proto3" json:"field_name,omitempty"` + // The type of values in this field. + Type *Type `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *Type_Struct_Field) Reset() { + *x = Type_Struct_Field{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Struct_Field) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Struct_Field) ProtoMessage() {} + +func (x *Type_Struct_Field) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Struct_Field.ProtoReflect.Descriptor instead. +func (*Type_Struct_Field) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 8, 0} +} + +func (x *Type_Struct_Field) GetFieldName() string { + if x != nil { + return x.FieldName + } + return "" +} + +func (x *Type_Struct_Field) GetType() *Type { + if x != nil { + return x.Type + } + return nil +} + +// Computes the sum of the input values. +// Allowed input: `Int64` +// State: same as input +type Type_Aggregate_Sum struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Aggregate_Sum) Reset() { + *x = Type_Aggregate_Sum{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Aggregate_Sum) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Aggregate_Sum) ProtoMessage() {} + +func (x *Type_Aggregate_Sum) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Aggregate_Sum.ProtoReflect.Descriptor instead. +func (*Type_Aggregate_Sum) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 11, 0} +} + +// Computes the max of the input values. +// Allowed input: `Int64` +// State: same as input +type Type_Aggregate_Max struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Aggregate_Max) Reset() { + *x = Type_Aggregate_Max{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Aggregate_Max) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Aggregate_Max) ProtoMessage() {} + +func (x *Type_Aggregate_Max) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Aggregate_Max.ProtoReflect.Descriptor instead. +func (*Type_Aggregate_Max) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 11, 1} +} + +// Computes the min of the input values. +// Allowed input: `Int64` +// State: same as input +type Type_Aggregate_Min struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Aggregate_Min) Reset() { + *x = Type_Aggregate_Min{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Aggregate_Min) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Aggregate_Min) ProtoMessage() {} + +func (x *Type_Aggregate_Min) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Aggregate_Min.ProtoReflect.Descriptor instead. +func (*Type_Aggregate_Min) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 11, 2} +} + +// Computes an approximate unique count over the input values. When using +// raw data as input, be careful to use a consistent encoding. Otherwise +// the same value encoded differently could count more than once, or two +// distinct values could count as identical. +// Input: Any, or omit for Raw +// State: TBD +// Special state conversions: `Int64` (the unique count estimate) +type Type_Aggregate_HyperLogLogPlusPlusUniqueCount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) Reset() { + *x = Type_Aggregate_HyperLogLogPlusPlusUniqueCount{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_types_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_Aggregate_HyperLogLogPlusPlusUniqueCount) ProtoMessage() {} + +func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_types_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_Aggregate_HyperLogLogPlusPlusUniqueCount.ProtoReflect.Descriptor instead. +func (*Type_Aggregate_HyperLogLogPlusPlusUniqueCount) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_types_proto_rawDescGZIP(), []int{0, 11, 3} +} + +var File_google_bigtable_v2_types_proto protoreflect.FileDescriptor + +var file_google_bigtable_v2_types_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb1, 0x13, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3f, + 0x0a, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x48, 0x00, 0x52, 0x09, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x42, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x3f, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x48, 0x00, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x36, 0x34, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0c, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x33, 0x32, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x33, 0x32, 0x48, 0x00, 0x52, 0x0b, + 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x33, 0x32, 0x54, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0c, 0x66, + 0x6c, 0x6f, 0x61, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x61, + 0x74, 0x36, 0x34, 0x48, 0x00, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x36, 0x34, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x3c, 0x0a, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x62, 0x6f, 0x6f, 0x6c, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x4b, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0d, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3c, 0x0a, + 0x09, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x48, + 0x00, 0x52, 0x08, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x42, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, + 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3f, 0x0a, 0x0a, + 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x48, 0x00, 0x52, 0x09, 0x61, 0x72, 0x72, 0x61, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, + 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, + 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x1a, 0xac, 0x01, 0x0a, 0x05, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x43, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0x5e, 0x0a, 0x08, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, 0x03, 0x72, 0x61, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x61, 0x77, 0x48, 0x00, 0x52, + 0x03, 0x72, 0x61, 0x77, 0x1a, 0x05, 0x0a, 0x03, 0x52, 0x61, 0x77, 0x42, 0x0a, 0x0a, 0x08, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0xab, 0x02, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x12, 0x44, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x08, + 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0xda, 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x51, 0x0a, 0x08, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x72, 0x61, + 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x52, 0x61, 0x77, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x07, 0x75, 0x74, 0x66, 0x38, 0x52, 0x61, 0x77, 0x12, 0x53, 0x0a, 0x0a, 0x75, 0x74, 0x66, 0x38, + 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x48, 0x00, 0x52, 0x09, 0x75, 0x74, 0x66, 0x38, 0x42, 0x79, 0x74, 0x65, 0x73, 0x1a, 0x0d, 0x0a, + 0x07, 0x55, 0x74, 0x66, 0x38, 0x52, 0x61, 0x77, 0x3a, 0x02, 0x18, 0x01, 0x1a, 0x0b, 0x0a, 0x09, + 0x55, 0x74, 0x66, 0x38, 0x42, 0x79, 0x74, 0x65, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x65, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0x9a, 0x02, 0x0a, 0x05, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x12, + 0x43, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x36, + 0x34, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x1a, 0xcb, 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x12, 0x62, 0x0a, 0x10, 0x62, 0x69, 0x67, 0x5f, 0x65, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x2e, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x69, 0x67, 0x45, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x62, 0x69, 0x67, 0x45, 0x6e, 0x64, 0x69, 0x61, 0x6e, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x1a, 0x4f, 0x0a, 0x0e, 0x42, 0x69, 0x67, 0x45, 0x6e, 0x64, 0x69, + 0x61, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x09, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x6f, 0x6c, 0x1a, 0x09, 0x0a, 0x07, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x33, 0x32, 0x1a, 0x09, 0x0a, 0x07, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x36, 0x34, + 0x1a, 0x0b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1a, 0x06, 0x0a, + 0x04, 0x44, 0x61, 0x74, 0x65, 0x1a, 0x9d, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x12, 0x3d, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, + 0x54, 0x0a, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x44, 0x0a, 0x05, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x3b, + 0x0a, 0x0c, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x73, 0x0a, 0x03, 0x4d, + 0x61, 0x70, 0x12, 0x33, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, + 0x6b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x1a, 0xee, 0x03, 0x0a, 0x09, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x37, + 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3c, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3a, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x75, 0x6d, 0x48, 0x00, 0x52, 0x03, 0x73, 0x75, + 0x6d, 0x12, 0x71, 0x0a, 0x12, 0x68, 0x6c, 0x6c, 0x70, 0x70, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, + 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x2e, 0x48, 0x79, 0x70, 0x65, 0x72, 0x4c, 0x6f, 0x67, 0x4c, 0x6f, 0x67, 0x50, 0x6c, 0x75, + 0x73, 0x50, 0x6c, 0x75, 0x73, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x48, 0x00, 0x52, 0x10, 0x68, 0x6c, 0x6c, 0x70, 0x70, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x78, 0x48, 0x00, 0x52, 0x03, 0x6d, 0x61, 0x78, + 0x12, 0x3a, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x2e, 0x4d, 0x69, 0x6e, 0x48, 0x00, 0x52, 0x03, 0x6d, 0x69, 0x6e, 0x1a, 0x05, 0x0a, 0x03, + 0x53, 0x75, 0x6d, 0x1a, 0x05, 0x0a, 0x03, 0x4d, 0x61, 0x78, 0x1a, 0x05, 0x0a, 0x03, 0x4d, 0x69, + 0x6e, 0x1a, 0x20, 0x0a, 0x1e, 0x48, 0x79, 0x70, 0x65, 0x72, 0x4c, 0x6f, 0x67, 0x4c, 0x6f, 0x67, + 0x50, 0x6c, 0x75, 0x73, 0x50, 0x6c, 0x75, 0x73, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x42, 0x0c, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x6f, + 0x72, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x42, 0xb4, 0x01, 0x0a, 0x16, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, + 0x62, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, + 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_bigtable_v2_types_proto_rawDescOnce sync.Once + file_google_bigtable_v2_types_proto_rawDescData = file_google_bigtable_v2_types_proto_rawDesc +) + +func file_google_bigtable_v2_types_proto_rawDescGZIP() []byte { + file_google_bigtable_v2_types_proto_rawDescOnce.Do(func() { + file_google_bigtable_v2_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_bigtable_v2_types_proto_rawDescData) + }) + return file_google_bigtable_v2_types_proto_rawDescData +} + +var file_google_bigtable_v2_types_proto_msgTypes = make([]protoimpl.MessageInfo, 25) +var file_google_bigtable_v2_types_proto_goTypes = []any{ + (*Type)(nil), // 0: google.bigtable.v2.Type + (*Type_Bytes)(nil), // 1: google.bigtable.v2.Type.Bytes + (*Type_String)(nil), // 2: google.bigtable.v2.Type.String + (*Type_Int64)(nil), // 3: google.bigtable.v2.Type.Int64 + (*Type_Bool)(nil), // 4: google.bigtable.v2.Type.Bool + (*Type_Float32)(nil), // 5: google.bigtable.v2.Type.Float32 + (*Type_Float64)(nil), // 6: google.bigtable.v2.Type.Float64 + (*Type_Timestamp)(nil), // 7: google.bigtable.v2.Type.Timestamp + (*Type_Date)(nil), // 8: google.bigtable.v2.Type.Date + (*Type_Struct)(nil), // 9: google.bigtable.v2.Type.Struct + (*Type_Array)(nil), // 10: google.bigtable.v2.Type.Array + (*Type_Map)(nil), // 11: google.bigtable.v2.Type.Map + (*Type_Aggregate)(nil), // 12: google.bigtable.v2.Type.Aggregate + (*Type_Bytes_Encoding)(nil), // 13: google.bigtable.v2.Type.Bytes.Encoding + (*Type_Bytes_Encoding_Raw)(nil), // 14: google.bigtable.v2.Type.Bytes.Encoding.Raw + (*Type_String_Encoding)(nil), // 15: google.bigtable.v2.Type.String.Encoding + (*Type_String_Encoding_Utf8Raw)(nil), // 16: google.bigtable.v2.Type.String.Encoding.Utf8Raw + (*Type_String_Encoding_Utf8Bytes)(nil), // 17: google.bigtable.v2.Type.String.Encoding.Utf8Bytes + (*Type_Int64_Encoding)(nil), // 18: google.bigtable.v2.Type.Int64.Encoding + (*Type_Int64_Encoding_BigEndianBytes)(nil), // 19: google.bigtable.v2.Type.Int64.Encoding.BigEndianBytes + (*Type_Struct_Field)(nil), // 20: google.bigtable.v2.Type.Struct.Field + (*Type_Aggregate_Sum)(nil), // 21: google.bigtable.v2.Type.Aggregate.Sum + (*Type_Aggregate_Max)(nil), // 22: google.bigtable.v2.Type.Aggregate.Max + (*Type_Aggregate_Min)(nil), // 23: google.bigtable.v2.Type.Aggregate.Min + (*Type_Aggregate_HyperLogLogPlusPlusUniqueCount)(nil), // 24: google.bigtable.v2.Type.Aggregate.HyperLogLogPlusPlusUniqueCount +} +var file_google_bigtable_v2_types_proto_depIdxs = []int32{ + 1, // 0: google.bigtable.v2.Type.bytes_type:type_name -> google.bigtable.v2.Type.Bytes + 2, // 1: google.bigtable.v2.Type.string_type:type_name -> google.bigtable.v2.Type.String + 3, // 2: google.bigtable.v2.Type.int64_type:type_name -> google.bigtable.v2.Type.Int64 + 5, // 3: google.bigtable.v2.Type.float32_type:type_name -> google.bigtable.v2.Type.Float32 + 6, // 4: google.bigtable.v2.Type.float64_type:type_name -> google.bigtable.v2.Type.Float64 + 4, // 5: google.bigtable.v2.Type.bool_type:type_name -> google.bigtable.v2.Type.Bool + 7, // 6: google.bigtable.v2.Type.timestamp_type:type_name -> google.bigtable.v2.Type.Timestamp + 8, // 7: google.bigtable.v2.Type.date_type:type_name -> google.bigtable.v2.Type.Date + 12, // 8: google.bigtable.v2.Type.aggregate_type:type_name -> google.bigtable.v2.Type.Aggregate + 9, // 9: google.bigtable.v2.Type.struct_type:type_name -> google.bigtable.v2.Type.Struct + 10, // 10: google.bigtable.v2.Type.array_type:type_name -> google.bigtable.v2.Type.Array + 11, // 11: google.bigtable.v2.Type.map_type:type_name -> google.bigtable.v2.Type.Map + 13, // 12: google.bigtable.v2.Type.Bytes.encoding:type_name -> google.bigtable.v2.Type.Bytes.Encoding + 15, // 13: google.bigtable.v2.Type.String.encoding:type_name -> google.bigtable.v2.Type.String.Encoding + 18, // 14: google.bigtable.v2.Type.Int64.encoding:type_name -> google.bigtable.v2.Type.Int64.Encoding + 20, // 15: google.bigtable.v2.Type.Struct.fields:type_name -> google.bigtable.v2.Type.Struct.Field + 0, // 16: google.bigtable.v2.Type.Array.element_type:type_name -> google.bigtable.v2.Type + 0, // 17: google.bigtable.v2.Type.Map.key_type:type_name -> google.bigtable.v2.Type + 0, // 18: google.bigtable.v2.Type.Map.value_type:type_name -> google.bigtable.v2.Type + 0, // 19: google.bigtable.v2.Type.Aggregate.input_type:type_name -> google.bigtable.v2.Type + 0, // 20: google.bigtable.v2.Type.Aggregate.state_type:type_name -> google.bigtable.v2.Type + 21, // 21: google.bigtable.v2.Type.Aggregate.sum:type_name -> google.bigtable.v2.Type.Aggregate.Sum + 24, // 22: google.bigtable.v2.Type.Aggregate.hllpp_unique_count:type_name -> google.bigtable.v2.Type.Aggregate.HyperLogLogPlusPlusUniqueCount + 22, // 23: google.bigtable.v2.Type.Aggregate.max:type_name -> google.bigtable.v2.Type.Aggregate.Max + 23, // 24: google.bigtable.v2.Type.Aggregate.min:type_name -> google.bigtable.v2.Type.Aggregate.Min + 14, // 25: google.bigtable.v2.Type.Bytes.Encoding.raw:type_name -> google.bigtable.v2.Type.Bytes.Encoding.Raw + 16, // 26: google.bigtable.v2.Type.String.Encoding.utf8_raw:type_name -> google.bigtable.v2.Type.String.Encoding.Utf8Raw + 17, // 27: google.bigtable.v2.Type.String.Encoding.utf8_bytes:type_name -> google.bigtable.v2.Type.String.Encoding.Utf8Bytes + 19, // 28: google.bigtable.v2.Type.Int64.Encoding.big_endian_bytes:type_name -> google.bigtable.v2.Type.Int64.Encoding.BigEndianBytes + 1, // 29: google.bigtable.v2.Type.Int64.Encoding.BigEndianBytes.bytes_type:type_name -> google.bigtable.v2.Type.Bytes + 0, // 30: google.bigtable.v2.Type.Struct.Field.type:type_name -> google.bigtable.v2.Type + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_google_bigtable_v2_types_proto_init() } +func file_google_bigtable_v2_types_proto_init() { + if File_google_bigtable_v2_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_bigtable_v2_types_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Type); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Type_Bytes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*Type_String); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*Type_Int64); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Type_Bool); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*Type_Float32); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*Type_Float64); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*Type_Timestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*Type_Date); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*Type_Struct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*Type_Array); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*Type_Map); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*Type_Aggregate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*Type_Bytes_Encoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*Type_Bytes_Encoding_Raw); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*Type_String_Encoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*Type_String_Encoding_Utf8Raw); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*Type_String_Encoding_Utf8Bytes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*Type_Int64_Encoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*Type_Int64_Encoding_BigEndianBytes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*Type_Struct_Field); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[21].Exporter = func(v any, i int) any { + switch v := v.(*Type_Aggregate_Sum); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[22].Exporter = func(v any, i int) any { + switch v := v.(*Type_Aggregate_Max); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[23].Exporter = func(v any, i int) any { + switch v := v.(*Type_Aggregate_Min); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_types_proto_msgTypes[24].Exporter = func(v any, i int) any { + switch v := v.(*Type_Aggregate_HyperLogLogPlusPlusUniqueCount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_bigtable_v2_types_proto_msgTypes[0].OneofWrappers = []any{ + (*Type_BytesType)(nil), + (*Type_StringType)(nil), + (*Type_Int64Type)(nil), + (*Type_Float32Type)(nil), + (*Type_Float64Type)(nil), + (*Type_BoolType)(nil), + (*Type_TimestampType)(nil), + (*Type_DateType)(nil), + (*Type_AggregateType)(nil), + (*Type_StructType)(nil), + (*Type_ArrayType)(nil), + (*Type_MapType)(nil), + } + file_google_bigtable_v2_types_proto_msgTypes[12].OneofWrappers = []any{ + (*Type_Aggregate_Sum_)(nil), + (*Type_Aggregate_HllppUniqueCount)(nil), + (*Type_Aggregate_Max_)(nil), + (*Type_Aggregate_Min_)(nil), + } + file_google_bigtable_v2_types_proto_msgTypes[13].OneofWrappers = []any{ + (*Type_Bytes_Encoding_Raw_)(nil), + } + file_google_bigtable_v2_types_proto_msgTypes[15].OneofWrappers = []any{ + (*Type_String_Encoding_Utf8Raw_)(nil), + (*Type_String_Encoding_Utf8Bytes_)(nil), + } + file_google_bigtable_v2_types_proto_msgTypes[18].OneofWrappers = []any{ + (*Type_Int64_Encoding_BigEndianBytes_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_bigtable_v2_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 25, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_bigtable_v2_types_proto_goTypes, + DependencyIndexes: file_google_bigtable_v2_types_proto_depIdxs, + MessageInfos: file_google_bigtable_v2_types_proto_msgTypes, + }.Build() + File_google_bigtable_v2_types_proto = out.File + file_google_bigtable_v2_types_proto_rawDesc = nil + file_google_bigtable_v2_types_proto_goTypes = nil + file_google_bigtable_v2_types_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/bigtable.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/bigtable.go index b3fbefa8aa8..b0878565871 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/bigtable.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/bigtable.go @@ -18,21 +18,24 @@ package bigtable // import "cloud.google.com/go/bigtable" import ( "context" + "encoding/base64" "errors" "fmt" "io" "net/url" + "os" "strconv" "strings" "time" + btpb "cloud.google.com/go/bigtable/apiv2/bigtablepb" btopt "cloud.google.com/go/bigtable/internal/option" "cloud.google.com/go/internal/trace" gax "github.com/googleapis/gax-go/v2" + "go.opentelemetry.io/otel/metric" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" - btpb "google.golang.org/genproto/googleapis/bigtable/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" @@ -47,15 +50,17 @@ import ( const prodAddr = "bigtable.googleapis.com:443" const mtlsProdAddr = "bigtable.mtls.googleapis.com:443" +const featureFlagsHeaderKey = "bigtable-features" // Client is a client for reading and writing data to tables in an instance. // // A Client is safe to use concurrently, except for its Close method. type Client struct { - connPool gtransport.ConnPool - client btpb.BigtableClient - project, instance string - appProfile string + connPool gtransport.ConnPool + client btpb.BigtableClient + project, instance string + appProfile string + metricsTracerFactory *builtinMetricsTracerFactory } // ClientConfig has configurations for the client. @@ -63,8 +68,25 @@ type ClientConfig struct { // The id of the app profile to associate with all data operations sent from this client. // If unspecified, the default app profile for the instance will be used. AppProfile string + + // If not set or set to nil, client side metrics will be collected and exported + // + // To disable client side metrics, set 'MetricsProvider' to 'NoopMetricsProvider' + // + // TODO: support user provided meter provider + MetricsProvider MetricsProvider +} + +// MetricsProvider is a wrapper for built in metrics meter provider +type MetricsProvider interface { + isMetricsProvider() } +// NoopMetricsProvider can be used to disable built in metrics +type NoopMetricsProvider struct{} + +func (NoopMetricsProvider) isMetricsProvider() {} + // NewClient creates a new Client for a given project and instance. // The default ClientConfig will be used. func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) { @@ -95,17 +117,33 @@ func NewClientWithConfig(ctx context.Context, project, instance string, config C return nil, fmt.Errorf("dialing: %w", err) } + metricsProvider := config.MetricsProvider + if emulatorAddr := os.Getenv("BIGTABLE_EMULATOR_HOST"); emulatorAddr != "" { + // Do not emit metrics when emulator is being used + metricsProvider = NoopMetricsProvider{} + } + + // Create a OpenTelemetry metrics configuration + metricsTracerFactory, err := newBuiltinMetricsTracerFactory(ctx, project, instance, config.AppProfile, metricsProvider, opts...) + if err != nil { + return nil, err + } + return &Client{ - connPool: connPool, - client: btpb.NewBigtableClient(connPool), - project: project, - instance: instance, - appProfile: config.AppProfile, + connPool: connPool, + client: btpb.NewBigtableClient(connPool), + project: project, + instance: instance, + appProfile: config.AppProfile, + metricsTracerFactory: metricsTracerFactory, }, nil } // Close closes the Client. func (c *Client) Close() error { + if c.metricsTracerFactory != nil { + c.metricsTracerFactory.shutdown() + } return c.connPool.Close() } @@ -166,19 +204,21 @@ func init() { } // Convert error to grpc status error -func convertToGrpcStatusErr(err error) error { - if err != nil { - if errStatus, ok := status.FromError(err); ok { - return status.Error(errStatus.Code(), errStatus.Message()) - } +func convertToGrpcStatusErr(err error) (codes.Code, error) { + if err == nil { + return codes.OK, nil + } - ctxStatus := status.FromContextError(err) - if ctxStatus.Code() != codes.Unknown { - return status.Error(ctxStatus.Code(), ctxStatus.Message()) - } + if errStatus, ok := status.FromError(err); ok { + return errStatus.Code(), status.Error(errStatus.Code(), errStatus.Message()) } - return err + ctxStatus := status.FromContextError(err) + if ctxStatus.Code() != codes.Unknown { + return ctxStatus.Code(), status.Error(ctxStatus.Code(), ctxStatus.Message()) + } + + return codes.Unknown, err } func (c *Client) fullTableName(table string) string { @@ -228,6 +268,25 @@ type Table struct { authorizedView string } +// newFeatureFlags creates the feature flags `bigtable-features` header +// to be sent on each request. This includes all features supported and +// and enabled on the client +func (c *Client) newFeatureFlags() metadata.MD { + ff := btpb.FeatureFlags{ + ReverseScans: true, + LastScannedRowResponses: true, + ClientSideMetricsEnabled: c.metricsTracerFactory.enabled, + } + + val := "" + b, err := proto.Marshal(&ff) + if err == nil { + val = base64.URLEncoding.EncodeToString(b) + } + + return metadata.Pairs(featureFlagsHeaderKey, val) +} + // Open opens a table. func (c *Client) Open(table string) *Table { return &Table{ @@ -236,7 +295,7 @@ func (c *Client) Open(table string) *Table { md: metadata.Join(metadata.Pairs( resourcePrefixHeader, c.fullTableName(table), requestParamsHeader, c.requestParamsHeaderValue(table), - ), btopt.WithFeatureFlags()), + ), c.newFeatureFlags()), } } @@ -248,7 +307,7 @@ func (c *Client) OpenTable(table string) TableAPI { md: metadata.Join(metadata.Pairs( resourcePrefixHeader, c.fullTableName(table), requestParamsHeader, c.requestParamsHeaderValue(table), - ), btopt.WithFeatureFlags()), + ), c.newFeatureFlags()), }} } @@ -260,7 +319,7 @@ func (c *Client) OpenAuthorizedView(table, authorizedView string) TableAPI { md: metadata.Join(metadata.Pairs( resourcePrefixHeader, c.fullAuthorizedViewName(table, authorizedView), requestParamsHeader, c.requestParamsHeaderValue(table), - ), btopt.WithFeatureFlags()), + ), c.newFeatureFlags()), authorizedView: authorizedView, }} } @@ -285,6 +344,10 @@ func (ti *tableImpl) ApplyReadModifyWrite(ctx context.Context, row string, m *Re return ti.Table.ApplyReadModifyWrite(ctx, row, m) } +func (ti *tableImpl) newBuiltinMetricsTracer(ctx context.Context, isStreaming bool) *builtinMetricsTracer { + return ti.Table.newBuiltinMetricsTracer(ctx, isStreaming) +} + // TODO(dsymonds): Read method that returns a sequence of ReadItems. // ReadRows reads rows from a table. f is called for each row. @@ -299,9 +362,19 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigtable.ReadRows") defer func() { trace.EndSpan(ctx, err) }() + mt := t.newBuiltinMetricsTracer(ctx, true) + defer recordOperationCompletion(mt) + + err = t.readRows(ctx, arg, f, mt, opts...) + statusCode, statusErr := convertToGrpcStatusErr(err) + mt.currOp.setStatus(statusCode.String()) + return statusErr +} + +func (t *Table) readRows(ctx context.Context, arg RowSet, f func(Row) bool, mt *builtinMetricsTracer, opts ...ReadOption) (err error) { var prevRowKey string attrMap := make(map[string]interface{}) - err = gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + err = gaxInvokeWithRecorder(ctx, mt, "ReadRows", func(ctx context.Context, headerMD, trailerMD *metadata.MD, _ gax.CallSettings) error { req := &btpb.ReadRowsRequest{ AppProfileId: t.c.appProfile, } @@ -340,12 +413,19 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts cr = newChunkReader() } + // Ignore error since header is only being used to record builtin metrics + // Failure to record metrics should not fail the operation + *headerMD, _ = stream.Header() + res := new(btpb.ReadRowsResponse) for { - res, err := stream.Recv() + proto.Reset(res) + err := stream.RecvMsg(res) if err == io.EOF { + *trailerMD = stream.Trailer() break } if err != nil { + *trailerMD = stream.Trailer() // Reset arg for next Invoke call. if arg == nil { // Should be lowest possible key value, an empty byte array @@ -380,7 +460,9 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts // Cancel and drain stream. cancel() for { - if _, err := stream.Recv(); err != nil { + proto.Reset(res) + if err := stream.RecvMsg(res); err != nil { + *trailerMD = stream.Trailer() // The stream has ended. We don't return an error // because the caller has intentionally interrupted the scan. return nil @@ -407,7 +489,7 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts return err }, retryOptions...) - return convertToGrpcStatusErr(err) + return err } // ReadRow is a convenience implementation of a single-row reader. @@ -624,9 +706,9 @@ func (r RowRange) String() string { var endStr string switch r.endBound { case rangeOpen: - endStr = r.end + ")" + endStr = strconv.Quote(r.end) + ")" case rangeClosed: - endStr = r.end + "]" + endStr = strconv.Quote(r.end) + "]" case rangeUnbounded: endStr = "∞)" } @@ -922,7 +1004,16 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl ctx = mergeOutgoingMetadata(ctx, t.md) ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigtable/Apply") defer func() { trace.EndSpan(ctx, err) }() + mt := t.newBuiltinMetricsTracer(ctx, false) + defer recordOperationCompletion(mt) + err = t.apply(ctx, mt, row, m, opts...) + statusCode, statusErr := convertToGrpcStatusErr(err) + mt.currOp.setStatus(statusCode.String()) + return statusErr +} + +func (t *Table) apply(ctx context.Context, mt *builtinMetricsTracer, row string, m *Mutation, opts ...ApplyOption) (err error) { after := func(res proto.Message) { for _, o := range opts { o.after(res) @@ -945,15 +1036,15 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl callOptions = retryOptions } var res *btpb.MutateRowResponse - err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + err := gaxInvokeWithRecorder(ctx, mt, "MutateRow", func(ctx context.Context, headerMD, trailerMD *metadata.MD, _ gax.CallSettings) error { var err error - res, err = t.c.client.MutateRow(ctx, req) + res, err = t.c.client.MutateRow(ctx, req, grpc.Header(headerMD), grpc.Trailer(trailerMD)) return err }, callOptions...) if err == nil { after(res) } - return convertToGrpcStatusErr(err) + return err } req := &btpb.CheckAndMutateRowRequest{ @@ -982,15 +1073,15 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl callOptions = retryOptions } var cmRes *btpb.CheckAndMutateRowResponse - err = gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + err = gaxInvokeWithRecorder(ctx, mt, "CheckAndMutateRow", func(ctx context.Context, headerMD, trailerMD *metadata.MD, _ gax.CallSettings) error { var err error - cmRes, err = t.c.client.CheckAndMutateRow(ctx, req) + cmRes, err = t.c.client.CheckAndMutateRow(ctx, req, grpc.Header(headerMD), grpc.Trailer(trailerMD)) return err }, callOptions...) if err == nil { after(cmRes) } - return convertToGrpcStatusErr(err) + return err } // An ApplyOption is an optional argument to Apply. @@ -1100,6 +1191,20 @@ func (m *Mutation) addToCell(family, column string, ts Timestamp, value *btpb.Va }}}) } +// MergeBytesToCell merges a bytes accumulator value to a cell in an aggregate column family. +func (m *Mutation) MergeBytesToCell(family, column string, ts Timestamp, value []byte) { + m.mergeToCell(family, column, ts, &btpb.Value{Kind: &btpb.Value_RawValue{RawValue: value}}) +} + +func (m *Mutation) mergeToCell(family, column string, ts Timestamp, value *btpb.Value) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_MergeToCell_{MergeToCell: &btpb.Mutation_MergeToCell{ + FamilyName: family, + ColumnQualifier: &btpb.Value{Kind: &btpb.Value_RawValue{RawValue: []byte(column)}}, + Timestamp: &btpb.Value{Kind: &btpb.Value_RawTimestampMicros{RawTimestampMicros: int64(ts.TruncateToMilliseconds())}}, + Input: value, + }}}) +} + // entryErr is a container that combines an entry with the error that was returned for it. // Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed. type entryErr struct { @@ -1136,23 +1241,7 @@ func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutatio } for _, group := range groupEntries(origEntries, maxMutations) { - attrMap := make(map[string]interface{}) - err = gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { - attrMap["rowCount"] = len(group) - trace.TracePrintf(ctx, attrMap, "Row count in ApplyBulk") - err := t.doApplyBulk(ctx, group, opts...) - if err != nil { - // We want to retry the entire request with the current group - return err - } - group = t.getApplyBulkRetries(group) - if len(group) > 0 && len(idempotentRetryCodes) > 0 { - // We have at least one mutation that needs to be retried. - // Return an arbitrary error that is retryable according to callOptions. - return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk") - } - return nil - }, retryOptions...) + err := t.applyGroup(ctx, group, opts...) if err != nil { return nil, err } @@ -1173,6 +1262,33 @@ func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutatio return nil, nil } +func (t *Table) applyGroup(ctx context.Context, group []*entryErr, opts ...ApplyOption) (err error) { + attrMap := make(map[string]interface{}) + mt := t.newBuiltinMetricsTracer(ctx, true) + defer recordOperationCompletion(mt) + + err = gaxInvokeWithRecorder(ctx, mt, "MutateRows", func(ctx context.Context, headerMD, trailerMD *metadata.MD, _ gax.CallSettings) error { + attrMap["rowCount"] = len(group) + trace.TracePrintf(ctx, attrMap, "Row count in ApplyBulk") + err := t.doApplyBulk(ctx, group, headerMD, trailerMD, opts...) + if err != nil { + // We want to retry the entire request with the current group + return err + } + group = t.getApplyBulkRetries(group) + if len(group) > 0 && len(idempotentRetryCodes) > 0 { + // We have at least one mutation that needs to be retried. + // Return an arbitrary error that is retryable according to callOptions. + return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk") + } + return nil + }, retryOptions...) + + statusCode, statusErr := convertToGrpcStatusErr(err) + mt.currOp.setStatus(statusCode.String()) + return statusErr +} + // getApplyBulkRetries returns the entries that need to be retried func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr { var retryEntries []*entryErr @@ -1187,7 +1303,7 @@ func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr { } // doApplyBulk does the work of a single ApplyBulk invocation -func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error { +func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, headerMD, trailerMD *metadata.MD, opts ...ApplyOption) error { after := func(res proto.Message) { for _, o := range opts { o.after(res) @@ -1207,16 +1323,23 @@ func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ... } else { req.AuthorizedViewName = t.c.fullAuthorizedViewName(t.table, t.authorizedView) } + stream, err := t.c.client.MutateRows(ctx, req) if err != nil { return err } + + // Ignore error since header is only being used to record builtin metrics + // Failure to record metrics should not fail the operation + *headerMD, _ = stream.Header() for { res, err := stream.Recv() if err == io.EOF { + *trailerMD = stream.Trailer() break } if err != nil { + *trailerMD = stream.Trailer() return err } @@ -1288,6 +1411,17 @@ func (ts Timestamp) TruncateToMilliseconds() Timestamp { // It returns the newly written cells. func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { ctx = mergeOutgoingMetadata(ctx, t.md) + + mt := t.newBuiltinMetricsTracer(ctx, false) + defer recordOperationCompletion(mt) + + updatedRow, err := t.applyReadModifyWrite(ctx, mt, row, m) + statusCode, statusErr := convertToGrpcStatusErr(err) + mt.currOp.setStatus(statusCode.String()) + return updatedRow, statusErr +} + +func (t *Table) applyReadModifyWrite(ctx context.Context, mt *builtinMetricsTracer, row string, m *ReadModifyWrite) (Row, error) { req := &btpb.ReadModifyWriteRowRequest{ AppProfileId: t.c.appProfile, RowKey: []byte(row), @@ -1298,18 +1432,23 @@ func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadMod } else { req.AuthorizedViewName = t.c.fullAuthorizedViewName(t.table, t.authorizedView) } - res, err := t.c.client.ReadModifyWriteRow(ctx, req) - if err != nil { - return nil, err - } - if res.Row == nil { - return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil") - } - r := make(Row) - for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family - decodeFamilyProto(r, row, fam) - } - return r, nil + + var r Row + err := gaxInvokeWithRecorder(ctx, mt, "ReadModifyWriteRow", func(ctx context.Context, headerMD, trailerMD *metadata.MD, _ gax.CallSettings) error { + res, err := t.c.client.ReadModifyWriteRow(ctx, req, grpc.Header(headerMD), grpc.Trailer(trailerMD)) + if err != nil { + return err + } + if res.Row == nil { + return errors.New("unable to apply ReadModifyWrite: res.Row=nil") + } + r = make(Row) + for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family + decodeFamilyProto(r, row, fam) + } + return nil + }) + return r, err } // ReadModifyWrite represents a set of operations on a single row of a table. @@ -1353,8 +1492,19 @@ func (m *ReadModifyWrite) Increment(family, column string, delta int64) { // the table of approximately equal size, which can be used to break up the data for distributed tasks like mapreduces. func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) { ctx = mergeOutgoingMetadata(ctx, t.md) + + mt := t.newBuiltinMetricsTracer(ctx, true) + defer recordOperationCompletion(mt) + + rowKeys, err := t.sampleRowKeys(ctx, mt) + statusCode, statusErr := convertToGrpcStatusErr(err) + mt.currOp.setStatus(statusCode.String()) + return rowKeys, statusErr +} + +func (t *Table) sampleRowKeys(ctx context.Context, mt *builtinMetricsTracer) ([]string, error) { var sampledRowKeys []string - err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + err := gaxInvokeWithRecorder(ctx, mt, "SampleRowKeys", func(ctx context.Context, headerMD, trailerMD *metadata.MD, _ gax.CallSettings) error { sampledRowKeys = nil req := &btpb.SampleRowKeysRequest{ AppProfileId: t.c.appProfile, @@ -1371,12 +1521,18 @@ func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) { if err != nil { return err } + + // Ignore error since header is only being used to record builtin metrics + // Failure to record metrics should not fail the operation + *headerMD, _ = stream.Header() for { res, err := stream.Recv() if err == io.EOF { + *trailerMD = stream.Trailer() break } if err != nil { + *trailerMD = stream.Trailer() return err } @@ -1389,5 +1545,111 @@ func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) { } return nil }, retryOptions...) - return sampledRowKeys, convertToGrpcStatusErr(err) + + return sampledRowKeys, err +} + +func (t *Table) newBuiltinMetricsTracer(ctx context.Context, isStreaming bool) *builtinMetricsTracer { + mt := t.c.metricsTracerFactory.createBuiltinMetricsTracer(ctx, t.table, isStreaming) + return &mt +} + +// recordOperationCompletion records as many operation specific metrics as it can +// Ignores error seen while creating metric attributes since metric can still +// be recorded with rest of the attributes +func recordOperationCompletion(mt *builtinMetricsTracer) { + if !mt.builtInEnabled { + return + } + + // Calculate elapsed time + elapsedTimeMs := convertToMs(time.Since(mt.currOp.startTime)) + + // Record operation_latencies + opLatAttrs, _ := mt.toOtelMetricAttrs(metricNameOperationLatencies) + mt.instrumentOperationLatencies.Record(mt.ctx, elapsedTimeMs, metric.WithAttributes(opLatAttrs...)) + + // Record retry_count + retryCntAttrs, _ := mt.toOtelMetricAttrs(metricNameRetryCount) + if mt.currOp.attemptCount > 1 { + // Only record when retry count is greater than 0 so the retry + // graph will be less confusing + mt.instrumentRetryCount.Add(mt.ctx, mt.currOp.attemptCount-1, metric.WithAttributes(retryCntAttrs...)) + } +} + +// gaxInvokeWithRecorder: +// - wraps 'f' in a new function 'callWrapper' that: +// - updates tracer state and records built in attempt specific metrics +// - does not return errors seen while recording the metrics +// +// - then, calls gax.Invoke with 'callWrapper' as an argument +func gaxInvokeWithRecorder(ctx context.Context, mt *builtinMetricsTracer, method string, + f func(ctx context.Context, headerMD, trailerMD *metadata.MD, _ gax.CallSettings) error, opts ...gax.CallOption) error { + attemptHeaderMD := metadata.New(nil) + attempTrailerMD := metadata.New(nil) + mt.method = method + + var callWrapper func(context.Context, gax.CallSettings) error + if !mt.builtInEnabled { + callWrapper = func(ctx context.Context, callSettings gax.CallSettings) error { + // f makes calls to CBT service + return f(ctx, &attemptHeaderMD, &attempTrailerMD, callSettings) + } + } else { + callWrapper = func(ctx context.Context, callSettings gax.CallSettings) error { + // Increment number of attempts + mt.currOp.incrementAttemptCount() + + mt.currOp.currAttempt = attemptTracer{} + + // record start time + mt.currOp.currAttempt.setStartTime(time.Now()) + + // f makes calls to CBT service + err := f(ctx, &attemptHeaderMD, &attempTrailerMD, callSettings) + + // Set attempt status + statusCode, _ := convertToGrpcStatusErr(err) + mt.currOp.currAttempt.setStatus(statusCode.String()) + + // Get location attributes from metadata and set it in tracer + // Ignore get location error since the metric can still be recorded with rest of the attributes + clusterID, zoneID, _ := extractLocation(attemptHeaderMD, attempTrailerMD) + mt.currOp.currAttempt.setClusterID(clusterID) + mt.currOp.currAttempt.setZoneID(zoneID) + + // Set server latency in tracer + serverLatency, serverLatencyErr := extractServerLatency(attemptHeaderMD, attempTrailerMD) + mt.currOp.currAttempt.setServerLatencyErr(serverLatencyErr) + mt.currOp.currAttempt.setServerLatency(serverLatency) + + // Record attempt specific metrics + recordAttemptCompletion(mt) + return err + } + } + return gax.Invoke(ctx, callWrapper, opts...) +} + +// recordAttemptCompletion records as many attempt specific metrics as it can +// Ignore errors seen while creating metric attributes since metric can still +// be recorded with rest of the attributes +func recordAttemptCompletion(mt *builtinMetricsTracer) { + if !mt.builtInEnabled { + return + } + + // Calculate elapsed time + elapsedTime := convertToMs(time.Since(mt.currOp.currAttempt.startTime)) + + // Record attempt_latencies + attemptLatAttrs, _ := mt.toOtelMetricAttrs(metricNameAttemptLatencies) + mt.instrumentAttemptLatencies.Record(mt.ctx, elapsedTime, metric.WithAttributes(attemptLatAttrs...)) + + // Record server_latencies + serverLatAttrs, _ := mt.toOtelMetricAttrs(metricNameServerLatencies) + if mt.currOp.currAttempt.serverLatencyErr == nil { + mt.instrumentServerLatencies.Record(mt.ctx, mt.currOp.currAttempt.serverLatency, metric.WithAttributes(serverLatAttrs...)) + } } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/conformance_test.sh b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/conformance_test.sh index 35a126e2f94..bf6f520a6b0 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/conformance_test.sh +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/conformance_test.sh @@ -50,10 +50,10 @@ trap cleanup EXIT # Run the conformance tests cd $conformanceTestsHome -# Tests in https://github.com/googleapis/cloud-bigtable-clients-test/tree/main/tests can only be run on go1.20.2 -go install golang.org/dl/go1.20.2@latest -go1.20.2 download -go1.20.2 test -v -proxy_addr=:$testProxyPort | tee -a $sponge_log +# Tests in https://github.com/googleapis/cloud-bigtable-clients-test/tree/main/tests can only be run on go1.22.5 +go install golang.org/dl/go1.22.5@latest +go1.22.5 download +go1.22.5 test -v -proxy_addr=:$testProxyPort | tee -a $sponge_log RETURN_CODE=$? echo "exiting with ${RETURN_CODE}" diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/filter.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/filter.go index bef378b7316..bd32a6853d6 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/filter.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/filter.go @@ -21,7 +21,7 @@ import ( "strings" "time" - btpb "google.golang.org/genproto/googleapis/bigtable/v2" + btpb "cloud.google.com/go/bigtable/apiv2/bigtablepb" ) // A Filter represents a row filter. diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/gc.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/gc.go index d2e6a382876..a5149900d16 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/gc.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/gc.go @@ -21,7 +21,7 @@ import ( "strings" "time" - bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + bttdpb "cloud.google.com/go/bigtable/admin/apiv2/adminpb" "google.golang.org/protobuf/types/known/durationpb" ) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/option/option.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/option/option.go index cd4de72cd45..d6c879266e2 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/option/option.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/option/option.go @@ -19,13 +19,9 @@ package option import ( "context" - "encoding/base64" "fmt" "os" - btpb "google.golang.org/genproto/googleapis/bigtable/v2" - "google.golang.org/protobuf/proto" - "cloud.google.com/go/bigtable/internal" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go/v2" @@ -66,25 +62,6 @@ func withGoogleClientInfo() metadata.MD { return metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } -func makeFeatureFlags() string { - ff := btpb.FeatureFlags{ReverseScans: true, LastScannedRowResponses: true} - b, err := proto.Marshal(&ff) - if err != nil { - return "" - } - - return base64.URLEncoding.EncodeToString(b) -} - -var featureFlags = makeFeatureFlags() - -// WithFeatureFlags set the feature flags the client supports in the -// `bigtable-features` header sent on each request. Intended for -// use by Google-written clients. -func WithFeatureFlags() metadata.MD { - return metadata.Pairs("bigtable-features", featureFlags) -} - // streamInterceptor intercepts the creation of ClientStream within the bigtable // client to inject Google client information into the context metadata for // streaming RPCs. diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/version.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/version.go index 291a237fe1c..ba70a43673b 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/version.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.24.0" +const Version = "1.33.0" diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics.go new file mode 100644 index 00000000000..c76ecfa1e0d --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics.go @@ -0,0 +1,417 @@ +/* +Copyright 2024 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + "cloud.google.com/go/bigtable/internal" + "github.com/google/uuid" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "google.golang.org/api/option" +) + +const ( + builtInMetricsMeterName = "bigtable.googleapis.com/internal/client/" + + metricsPrefix = "bigtable/" + locationMDKey = "x-goog-ext-425905942-bin" + serverTimingMDKey = "server-timing" + serverTimingValPrefix = "gfet4t7; dur=" + + // Monitored resource labels + monitoredResLabelKeyProject = "project_id" + monitoredResLabelKeyInstance = "instance" + monitoredResLabelKeyTable = "table" + monitoredResLabelKeyCluster = "cluster" + monitoredResLabelKeyZone = "zone" + + // Metric labels + metricLabelKeyAppProfile = "app_profile" + metricLabelKeyMethod = "method" + metricLabelKeyStatus = "status" + metricLabelKeyStreamingOperation = "streaming" + metricLabelKeyClientName = "client_name" + metricLabelKeyClientUID = "client_uid" + + // Metric names + metricNameOperationLatencies = "operation_latencies" + metricNameAttemptLatencies = "attempt_latencies" + metricNameServerLatencies = "server_latencies" + metricNameRetryCount = "retry_count" + + // Metric units + metricUnitMS = "ms" + metricUnitCount = "1" +) + +// These are effectively constant, but for testing purposes they are mutable +var ( + // duration between two metric exports + defaultSamplePeriod = 5 * time.Minute + + metricsErrorPrefix = "bigtable-metrics: " + + clientName = fmt.Sprintf("go-bigtable/%v", internal.Version) + + bucketBounds = []float64{0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0, + 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, 300.0, 400.0, 500.0, 650.0, + 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0, 200000.0, + 400000.0, 800000.0, 1600000.0, 3200000.0} + + // All the built-in metrics have same attributes except 'status' and 'streaming' + // These attributes need to be added to only few of the metrics + metricsDetails = map[string]metricInfo{ + metricNameOperationLatencies: { + additionalAttrs: []string{ + metricLabelKeyStatus, + metricLabelKeyStreamingOperation, + }, + recordedPerAttempt: false, + }, + metricNameAttemptLatencies: { + additionalAttrs: []string{ + metricLabelKeyStatus, + metricLabelKeyStreamingOperation, + }, + recordedPerAttempt: true, + }, + metricNameServerLatencies: { + additionalAttrs: []string{ + metricLabelKeyStatus, + metricLabelKeyStreamingOperation, + }, + recordedPerAttempt: true, + }, + metricNameRetryCount: { + additionalAttrs: []string{ + metricLabelKeyStatus, + }, + recordedPerAttempt: true, + }, + } + + // Generates unique client ID in the format go-@ + generateClientUID = func() (string, error) { + hostname := "localhost" + hostname, err := os.Hostname() + if err != nil { + return "", err + } + return "go-" + uuid.NewString() + "@" + hostname, nil + } + + // GCM exporter should use the same options as Bigtable client + // createExporterOptions takes Bigtable client options and returns exporter options + // Overwritten in tests + createExporterOptions = func(btOpts ...option.ClientOption) []option.ClientOption { + return btOpts + } +) + +type metricInfo struct { + additionalAttrs []string + recordedPerAttempt bool +} + +type builtinMetricsTracerFactory struct { + enabled bool + + // To be called on client close + shutdown func() + + // attributes that are specific to a client instance and + // do not change across different function calls on client + clientAttributes []attribute.KeyValue + + operationLatencies metric.Float64Histogram + serverLatencies metric.Float64Histogram + attemptLatencies metric.Float64Histogram + retryCount metric.Int64Counter +} + +func newBuiltinMetricsTracerFactory(ctx context.Context, project, instance, appProfile string, metricsProvider MetricsProvider, opts ...option.ClientOption) (*builtinMetricsTracerFactory, error) { + clientUID, err := generateClientUID() + if err != nil { + return nil, err + } + + tracerFactory := &builtinMetricsTracerFactory{ + enabled: false, + clientAttributes: []attribute.KeyValue{ + attribute.String(monitoredResLabelKeyProject, project), + attribute.String(monitoredResLabelKeyInstance, instance), + attribute.String(metricLabelKeyAppProfile, appProfile), + attribute.String(metricLabelKeyClientUID, clientUID), + attribute.String(metricLabelKeyClientName, clientName), + }, + shutdown: func() {}, + } + + var meterProvider *sdkmetric.MeterProvider + if metricsProvider == nil { + // Create default meter provider + mpOptions, err := builtInMeterProviderOptions(project, opts...) + if err != nil { + return tracerFactory, err + } + meterProvider = sdkmetric.NewMeterProvider(mpOptions...) + + tracerFactory.enabled = true + tracerFactory.shutdown = func() { meterProvider.Shutdown(ctx) } + } else { + switch metricsProvider.(type) { + case NoopMetricsProvider: + tracerFactory.enabled = false + return tracerFactory, nil + default: + tracerFactory.enabled = false + return tracerFactory, errors.New("unknown MetricsProvider type") + } + } + + // Create meter and instruments + meter := meterProvider.Meter(builtInMetricsMeterName, metric.WithInstrumentationVersion(internal.Version)) + err = tracerFactory.createInstruments(meter) + return tracerFactory, err +} + +func builtInMeterProviderOptions(project string, opts ...option.ClientOption) ([]sdkmetric.Option, error) { + allOpts := createExporterOptions(opts...) + defaultExporter, err := newMonitoringExporter(context.Background(), project, allOpts...) + if err != nil { + return nil, err + } + + return []sdkmetric.Option{sdkmetric.WithReader( + sdkmetric.NewPeriodicReader( + defaultExporter, + sdkmetric.WithInterval(defaultSamplePeriod), + ), + )}, nil +} + +func (tf *builtinMetricsTracerFactory) createInstruments(meter metric.Meter) error { + var err error + + // Create operation_latencies + tf.operationLatencies, err = meter.Float64Histogram( + metricNameOperationLatencies, + metric.WithDescription("Total time until final operation success or failure, including retries and backoff."), + metric.WithUnit(metricUnitMS), + metric.WithExplicitBucketBoundaries(bucketBounds...), + ) + if err != nil { + return err + } + + // Create attempt_latencies + tf.attemptLatencies, err = meter.Float64Histogram( + metricNameAttemptLatencies, + metric.WithDescription("Client observed latency per RPC attempt."), + metric.WithUnit(metricUnitMS), + metric.WithExplicitBucketBoundaries(bucketBounds...), + ) + if err != nil { + return err + } + + // Create server_latencies + tf.serverLatencies, err = meter.Float64Histogram( + metricNameServerLatencies, + metric.WithDescription("The latency measured from the moment that the RPC entered the Google data center until the RPC was completed."), + metric.WithUnit(metricUnitMS), + metric.WithExplicitBucketBoundaries(bucketBounds...), + ) + if err != nil { + return err + } + + // Create retry_count + tf.retryCount, err = meter.Int64Counter( + metricNameRetryCount, + metric.WithDescription("The number of additional RPCs sent after the initial attempt."), + metric.WithUnit(metricUnitCount), + ) + return err +} + +// builtinMetricsTracer is created one per operation +// It is used to store metric instruments, attribute values +// and other data required to obtain and record them +type builtinMetricsTracer struct { + ctx context.Context + builtInEnabled bool + + // attributes that are specific to a client instance and + // do not change across different operations on client + clientAttributes []attribute.KeyValue + + instrumentOperationLatencies metric.Float64Histogram + instrumentServerLatencies metric.Float64Histogram + instrumentAttemptLatencies metric.Float64Histogram + instrumentRetryCount metric.Int64Counter + + tableName string + method string + isStreaming bool + + currOp opTracer +} + +// opTracer is used to record metrics for the entire operation, including retries. +// Operation is a logical unit that represents a single method invocation on client. +// The method might require multiple attempts/rpcs and backoff logic to complete +type opTracer struct { + attemptCount int64 + + startTime time.Time + + // gRPC status code of last completed attempt + status string + + currAttempt attemptTracer +} + +func (o *opTracer) setStartTime(t time.Time) { + o.startTime = t +} + +func (o *opTracer) setStatus(status string) { + o.status = status +} + +func (o *opTracer) incrementAttemptCount() { + o.attemptCount++ +} + +// attemptTracer is used to record metrics for each individual attempt of the operation. +// Attempt corresponds to an attempt of an RPC. +type attemptTracer struct { + startTime time.Time + clusterID string + zoneID string + + // gRPC status code + status string + + // Server latency in ms + serverLatency float64 + + // Error seen while getting server latency from headers + serverLatencyErr error +} + +func (a *attemptTracer) setStartTime(t time.Time) { + a.startTime = t +} + +func (a *attemptTracer) setClusterID(clusterID string) { + a.clusterID = clusterID +} + +func (a *attemptTracer) setZoneID(zoneID string) { + a.zoneID = zoneID +} + +func (a *attemptTracer) setStatus(status string) { + a.status = status +} + +func (a *attemptTracer) setServerLatency(latency float64) { + a.serverLatency = latency +} + +func (a *attemptTracer) setServerLatencyErr(err error) { + a.serverLatencyErr = err +} + +func (tf *builtinMetricsTracerFactory) createBuiltinMetricsTracer(ctx context.Context, tableName string, isStreaming bool) builtinMetricsTracer { + // Operation has started but not the attempt. + // So, create only operation tracer and not attempt tracer + currOpTracer := opTracer{} + currOpTracer.setStartTime(time.Now()) + + return builtinMetricsTracer{ + ctx: ctx, + builtInEnabled: tf.enabled, + + currOp: currOpTracer, + clientAttributes: tf.clientAttributes, + + instrumentOperationLatencies: tf.operationLatencies, + instrumentServerLatencies: tf.serverLatencies, + instrumentAttemptLatencies: tf.attemptLatencies, + instrumentRetryCount: tf.retryCount, + + tableName: tableName, + isStreaming: isStreaming, + } +} + +// toOtelMetricAttrs: +// - converts metric attributes values captured throughout the operation / attempt +// to OpenTelemetry attributes format, +// - combines these with common client attributes and returns +func (mt *builtinMetricsTracer) toOtelMetricAttrs(metricName string) ([]attribute.KeyValue, error) { + // Create attribute key value pairs for attributes common to all metricss + attrKeyValues := []attribute.KeyValue{ + attribute.String(metricLabelKeyMethod, mt.method), + + // Add resource labels to otel metric labels. + // These will be used for creating the monitored resource but exporter + // will not add them to Google Cloud Monitoring metric labels + attribute.String(monitoredResLabelKeyTable, mt.tableName), + + // Irrespective of whether metric is attempt specific or operation specific, + // use last attempt's cluster and zone + attribute.String(monitoredResLabelKeyCluster, mt.currOp.currAttempt.clusterID), + attribute.String(monitoredResLabelKeyZone, mt.currOp.currAttempt.zoneID), + } + attrKeyValues = append(attrKeyValues, mt.clientAttributes...) + + // Get metric details + mDetails, found := metricsDetails[metricName] + if !found { + return attrKeyValues, fmt.Errorf("unable to create attributes list for unknown metric: %v", metricName) + } + + status := mt.currOp.status + if mDetails.recordedPerAttempt { + status = mt.currOp.currAttempt.status + } + + // Add additional attributes to metrics + for _, attrKey := range mDetails.additionalAttrs { + switch attrKey { + case metricLabelKeyStatus: + attrKeyValues = append(attrKeyValues, attribute.String(metricLabelKeyStatus, status)) + case metricLabelKeyStreamingOperation: + attrKeyValues = append(attrKeyValues, attribute.Bool(metricLabelKeyStreamingOperation, mt.isStreaming)) + default: + return attrKeyValues, fmt.Errorf("unknown additional attribute: %v", attrKey) + } + } + + return attrKeyValues, nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics_monitoring_exporter.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics_monitoring_exporter.go new file mode 100644 index 00000000000..98d63743d05 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics_monitoring_exporter.go @@ -0,0 +1,354 @@ +/* +Copyright 2024 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is a modified version of https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/exporter/metric/v0.46.0/exporter/metric/metric.go + +package bigtable + +import ( + "context" + "errors" + "fmt" + "math" + "reflect" + "sync" + "time" + + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "go.opentelemetry.io/otel/attribute" + otelmetric "go.opentelemetry.io/otel/sdk/metric" + otelmetricdata "go.opentelemetry.io/otel/sdk/metric/metricdata" + "google.golang.org/api/option" + "google.golang.org/genproto/googleapis/api/distribution" + googlemetricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + bigtableResourceType = "bigtable_client_raw" + + // The number of timeserieses to send to GCM in a single request. This + // is a hard limit in the GCM API, so we never want to exceed 200. + sendBatchSize = 200 +) + +var ( + monitoredResLabelsSet = map[string]bool{ + monitoredResLabelKeyProject: true, + monitoredResLabelKeyInstance: true, + monitoredResLabelKeyCluster: true, + monitoredResLabelKeyTable: true, + monitoredResLabelKeyZone: true, + } + + errShutdown = fmt.Errorf("exporter is shutdown") +) + +type errUnexpectedAggregationKind struct { + kind string +} + +func (e errUnexpectedAggregationKind) Error() string { + return fmt.Sprintf("the metric kind is unexpected: %v", e.kind) +} + +// monitoringExporter is the implementation of OpenTelemetry metric exporter for +// Google Cloud Monitoring. +// Default exporter for built-in metrics +type monitoringExporter struct { + shutdown chan struct{} + client *monitoring.MetricClient + shutdownOnce sync.Once + projectID string +} + +func newMonitoringExporter(ctx context.Context, project string, opts ...option.ClientOption) (*monitoringExporter, error) { + client, err := monitoring.NewMetricClient(ctx, opts...) + if err != nil { + return nil, err + } + return &monitoringExporter{ + client: client, + shutdown: make(chan struct{}), + projectID: project, + }, nil +} + +func wrapMetricsError(err error) error { + if err == nil { + return err + } + return fmt.Errorf("%v%w", metricsErrorPrefix, err) +} + +// ForceFlush does nothing, the exporter holds no state. +func (me *monitoringExporter) ForceFlush(ctx context.Context) error { + return wrapMetricsError(ctx.Err()) +} + +// Shutdown shuts down the client connections. +func (me *monitoringExporter) Shutdown(ctx context.Context) error { + err := errShutdown + me.shutdownOnce.Do(func() { + close(me.shutdown) + err = errors.Join(ctx.Err(), me.client.Close()) + }) + return wrapMetricsError(err) +} + +// Export exports OpenTelemetry Metrics to Google Cloud Monitoring. +func (me *monitoringExporter) Export(ctx context.Context, rm *otelmetricdata.ResourceMetrics) error { + select { + case <-me.shutdown: + return wrapMetricsError(errShutdown) + default: + } + + return wrapMetricsError(me.exportTimeSeries(ctx, rm)) +} + +// Temporality returns the Temporality to use for an instrument kind. +func (me *monitoringExporter) Temporality(ik otelmetric.InstrumentKind) otelmetricdata.Temporality { + return otelmetricdata.CumulativeTemporality +} + +// Aggregation returns the Aggregation to use for an instrument kind. +func (me *monitoringExporter) Aggregation(ik otelmetric.InstrumentKind) otelmetric.Aggregation { + return otelmetric.DefaultAggregationSelector(ik) +} + +// exportTimeSeries create TimeSeries from the records in cps. +// res should be the common resource among all TimeSeries, such as instance id, application name and so on. +func (me *monitoringExporter) exportTimeSeries(ctx context.Context, rm *otelmetricdata.ResourceMetrics) error { + tss, err := me.recordsToTimeSeriesPbs(rm) + if len(tss) == 0 { + return err + } + + name := fmt.Sprintf("projects/%s", me.projectID) + + errs := []error{err} + for i := 0; i < len(tss); i += sendBatchSize { + j := i + sendBatchSize + if j >= len(tss) { + j = len(tss) + } + + req := &monitoringpb.CreateTimeSeriesRequest{ + Name: name, + TimeSeries: tss[i:j], + } + errs = append(errs, me.client.CreateServiceTimeSeries(ctx, req)) + } + + return errors.Join(errs...) +} + +// recordToMetricAndMonitoredResourcePbs converts data from records to Metric and Monitored resource proto type for Cloud Monitoring. +func (me *monitoringExporter) recordToMetricAndMonitoredResourcePbs(metrics otelmetricdata.Metrics, attributes attribute.Set) (*googlemetricpb.Metric, *monitoredrespb.MonitoredResource) { + mr := &monitoredrespb.MonitoredResource{ + Type: bigtableResourceType, + Labels: map[string]string{}, + } + labels := make(map[string]string) + addAttributes := func(attr *attribute.Set) { + iter := attr.Iter() + for iter.Next() { + kv := iter.Attribute() + labelKey := string(kv.Key) + + if _, isResLabel := monitoredResLabelsSet[labelKey]; isResLabel { + // Add labels to monitored resource + mr.Labels[labelKey] = kv.Value.Emit() + } else { + // Add labels to metric + labels[labelKey] = kv.Value.Emit() + + } + } + } + addAttributes(&attributes) + return &googlemetricpb.Metric{ + Type: fmt.Sprintf("%v%s", builtInMetricsMeterName, metrics.Name), + Labels: labels, + }, mr +} + +func (me *monitoringExporter) recordsToTimeSeriesPbs(rm *otelmetricdata.ResourceMetrics) ([]*monitoringpb.TimeSeries, error) { + var ( + tss []*monitoringpb.TimeSeries + errs []error + ) + for _, scope := range rm.ScopeMetrics { + if scope.Scope.Name != builtInMetricsMeterName { + // Filter out metric data for instruments that are not part of the bigtable builtin metrics + continue + } + for _, metrics := range scope.Metrics { + ts, err := me.recordToTimeSeriesPb(metrics) + errs = append(errs, err) + tss = append(tss, ts...) + } + } + + return tss, errors.Join(errs...) +} + +// recordToTimeSeriesPb converts record to TimeSeries proto type with common resource. +// ref. https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries +func (me *monitoringExporter) recordToTimeSeriesPb(m otelmetricdata.Metrics) ([]*monitoringpb.TimeSeries, error) { + var tss []*monitoringpb.TimeSeries + var errs []error + if m.Data == nil { + return nil, nil + } + switch a := m.Data.(type) { + case otelmetricdata.Histogram[float64]: + for _, point := range a.DataPoints { + metric, mr := me.recordToMetricAndMonitoredResourcePbs(m, point.Attributes) + ts, err := histogramToTimeSeries(point, m, mr) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = metric + tss = append(tss, ts) + } + case otelmetricdata.Sum[int64]: + for _, point := range a.DataPoints { + metric, mr := me.recordToMetricAndMonitoredResourcePbs(m, point.Attributes) + var ts *monitoringpb.TimeSeries + var err error + ts, err = sumToTimeSeries[int64](point, m, mr) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = metric + tss = append(tss, ts) + } + default: + errs = append(errs, errUnexpectedAggregationKind{kind: reflect.TypeOf(m.Data).String()}) + } + return tss, errors.Join(errs...) +} + +func sumToTimeSeries[N int64 | float64](point otelmetricdata.DataPoint[N], metrics otelmetricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) { + interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time) + if err != nil { + return nil, err + } + value, valueType := numberDataPointToValue[N](point) + return &monitoringpb.TimeSeries{ + Resource: mr, + Unit: string(metrics.Unit), + MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE, + ValueType: valueType, + Points: []*monitoringpb.Point{{ + Interval: interval, + Value: value, + }}, + }, nil +} + +func histogramToTimeSeries[N int64 | float64](point otelmetricdata.HistogramDataPoint[N], metrics otelmetricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) { + interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time) + if err != nil { + return nil, err + } + distributionValue := histToDistribution(point) + return &monitoringpb.TimeSeries{ + Resource: mr, + Unit: string(metrics.Unit), + MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE, + ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION, + Points: []*monitoringpb.Point{{ + Interval: interval, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: distributionValue, + }, + }, + }}, + }, nil +} + +func toNonemptyTimeIntervalpb(start, end time.Time) (*monitoringpb.TimeInterval, error) { + // The end time of a new interval must be at least a millisecond after the end time of the + // previous interval, for all non-gauge types. + // https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#timeinterval + if end.Sub(start).Milliseconds() <= 1 { + end = start.Add(time.Millisecond) + } + startpb := timestamppb.New(start) + endpb := timestamppb.New(end) + err := errors.Join( + startpb.CheckValid(), + endpb.CheckValid(), + ) + if err != nil { + return nil, err + } + + return &monitoringpb.TimeInterval{ + StartTime: startpb, + EndTime: endpb, + }, nil +} + +func histToDistribution[N int64 | float64](hist otelmetricdata.HistogramDataPoint[N]) *distribution.Distribution { + counts := make([]int64, len(hist.BucketCounts)) + for i, v := range hist.BucketCounts { + counts[i] = int64(v) + } + var mean float64 + if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero + mean = float64(hist.Sum) / float64(hist.Count) + } + return &distribution.Distribution{ + Count: int64(hist.Count), + Mean: mean, + BucketCounts: counts, + BucketOptions: &distribution.Distribution_BucketOptions{ + Options: &distribution.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{ + Bounds: hist.Bounds, + }, + }, + }, + } +} + +func numberDataPointToValue[N int64 | float64]( + point otelmetricdata.DataPoint[N], +) (*monitoringpb.TypedValue, googlemetricpb.MetricDescriptor_ValueType) { + switch v := any(point.Value).(type) { + case int64: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: v, + }}, + googlemetricpb.MetricDescriptor_INT64 + case float64: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v, + }}, + googlemetricpb.MetricDescriptor_DOUBLE + } + // It is impossible to reach this statement + return nil, googlemetricpb.MetricDescriptor_INT64 +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics_util.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics_util.go new file mode 100644 index 00000000000..8783f6ff4b2 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/metrics_util.go @@ -0,0 +1,99 @@ +/* +Copyright 2024 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "errors" + "strconv" + "strings" + "time" + + btpb "cloud.google.com/go/bigtable/apiv2/bigtablepb" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" +) + +const ( + defaultCluster = "unspecified" + defaultZone = "global" +) + +// get GFE latency in ms from response metadata +func extractServerLatency(headerMD metadata.MD, trailerMD metadata.MD) (float64, error) { + serverTimingStr := "" + + // Check whether server latency available in response header metadata + if headerMD != nil { + headerMDValues := headerMD.Get(serverTimingMDKey) + if len(headerMDValues) != 0 { + serverTimingStr = headerMDValues[0] + } + } + + if len(serverTimingStr) == 0 { + // Check whether server latency available in response trailer metadata + if trailerMD != nil { + trailerMDValues := trailerMD.Get(serverTimingMDKey) + if len(trailerMDValues) != 0 { + serverTimingStr = trailerMDValues[0] + } + } + } + + serverLatencyMillisStr := strings.TrimPrefix(serverTimingStr, serverTimingValPrefix) + serverLatencyMillis, err := strconv.ParseFloat(strings.TrimSpace(serverLatencyMillisStr), 64) + if !strings.HasPrefix(serverTimingStr, serverTimingValPrefix) || err != nil { + return serverLatencyMillis, err + } + + return serverLatencyMillis, nil +} + +// Obtain cluster and zone from response metadata +func extractLocation(headerMD metadata.MD, trailerMD metadata.MD) (string, string, error) { + var locationMetadata []string + + // Check whether location metadata available in response header metadata + if headerMD != nil { + locationMetadata = headerMD.Get(locationMDKey) + } + + if locationMetadata == nil { + // Check whether location metadata available in response trailer metadata + // if none found in response header metadata + if trailerMD != nil { + locationMetadata = trailerMD.Get(locationMDKey) + } + } + + if len(locationMetadata) < 1 { + return defaultCluster, defaultZone, errors.New("failed to get location metadata") + } + + // Unmarshal binary location metadata + responseParams := &btpb.ResponseParams{} + err := proto.Unmarshal([]byte(locationMetadata[0]), responseParams) + if err != nil { + return defaultCluster, defaultZone, err + } + + return responseParams.GetClusterId(), responseParams.GetZoneId(), nil +} + +func convertToMs(d time.Duration) float64 { + return float64(d.Nanoseconds()) / 1000000 +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/reader.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/reader.go index 8f0a4c1cd21..0ca14bc1d59 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/reader.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/reader.go @@ -21,7 +21,7 @@ import ( "fmt" "strings" - btpb "google.golang.org/genproto/googleapis/bigtable/v2" + btpb "cloud.google.com/go/bigtable/apiv2/bigtablepb" ) // A Row is returned by ReadRows. The map is keyed by column family (the prefix diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/type.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/type.go index a390e8fd44b..88dd9921969 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/type.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/type.go @@ -16,7 +16,11 @@ limitations under the License. package bigtable -import btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" +import ( + btapb "cloud.google.com/go/bigtable/admin/apiv2/adminpb" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) // Type wraps the protobuf representation of a type. See the protobuf definition // for more details on types. @@ -24,6 +28,49 @@ type Type interface { proto() *btapb.Type } +var marshalOptions = protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} +var unmarshalOptions = protojson.UnmarshalOptions{AllowPartial: true} + +// MarshalJSON returns the string representation of the Type protobuf. +func MarshalJSON(t Type) ([]byte, error) { + return marshalOptions.Marshal(t.proto()) +} + +// UnmarshalJSON returns a Type object from json bytes. +func UnmarshalJSON(data []byte) (Type, error) { + result := &btapb.Type{} + if err := unmarshalOptions.Unmarshal(data, result); err != nil { + return nil, err + } + return ProtoToType(result), nil +} + +// Equal compares Type objects. +func Equal(a, b Type) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return proto.Equal(a.proto(), b.proto()) +} + +// TypeUnspecified represents the absence of a type. +type TypeUnspecified struct{} + +func (n TypeUnspecified) proto() *btapb.Type { + return &btapb.Type{} +} + +type unknown[T interface{}] struct { + wrapped *T +} + +func (u unknown[T]) proto() *T { + return u.wrapped +} + // BytesEncoding represents the encoding of a Bytes type. type BytesEncoding interface { proto() *btapb.Type_Bytes_Encoding @@ -54,24 +101,59 @@ func (bytes BytesType) proto() *btapb.Type { return &btapb.Type{Kind: &btapb.Type_BytesType{BytesType: &btapb.Type_Bytes{Encoding: encoding}}} } +// StringEncoding represents the encoding of a String. +type StringEncoding interface { + proto() *btapb.Type_String_Encoding +} + +// StringUtf8Encoding represents an UTF-8 raw encoding for a string. +// DEPRECATED: Please use StringUtf8BytesEncoding. +type StringUtf8Encoding struct{} + +func (encoding StringUtf8Encoding) proto() *btapb.Type_String_Encoding { + return &btapb.Type_String_Encoding{ + Encoding: &btapb.Type_String_Encoding_Utf8Raw_{}, + } +} + +// StringUtf8BytesEncoding represents an UTF-8 bytes encoding for a string. +type StringUtf8BytesEncoding struct{} + +func (encoding StringUtf8BytesEncoding) proto() *btapb.Type_String_Encoding { + return &btapb.Type_String_Encoding{ + Encoding: &btapb.Type_String_Encoding_Utf8Bytes_{}, + } +} + +// StringType represents a string +type StringType struct { + Encoding StringEncoding +} + +func (str StringType) proto() *btapb.Type { + var encoding *btapb.Type_String_Encoding + if str.Encoding != nil { + encoding = str.Encoding.proto() + } else { + encoding = StringUtf8Encoding{}.proto() + } + return &btapb.Type{Kind: &btapb.Type_StringType{StringType: &btapb.Type_String{Encoding: encoding}}} +} + // Int64Encoding represents the encoding of an Int64 type. type Int64Encoding interface { proto() *btapb.Type_Int64_Encoding } // BigEndianBytesEncoding represents an Int64 encoding where the value is encoded -// as an 8-byte big-endian value. The byte representation may also have further encoding -// via Bytes. +// as an 8-byte big-endian value. type BigEndianBytesEncoding struct { - Bytes BytesType } func (beb BigEndianBytesEncoding) proto() *btapb.Type_Int64_Encoding { return &btapb.Type_Int64_Encoding{ Encoding: &btapb.Type_Int64_Encoding_BigEndianBytes_{ - BigEndianBytes: &btapb.Type_Int64_Encoding_BigEndianBytes{ - BytesType: beb.Bytes.proto().GetBytesType(), - }, + BigEndianBytes: &btapb.Type_Int64_Encoding_BigEndianBytes{}, }, } } @@ -112,6 +194,38 @@ func (sum SumAggregator) fillProto(proto *btapb.Type_Aggregate) { proto.Aggregator = &btapb.Type_Aggregate_Sum_{Sum: &btapb.Type_Aggregate_Sum{}} } +// MinAggregator is an aggregation function that finds the minimum between the input and the accumulator. +type MinAggregator struct{} + +func (min MinAggregator) fillProto(proto *btapb.Type_Aggregate) { + proto.Aggregator = &btapb.Type_Aggregate_Min_{Min: &btapb.Type_Aggregate_Min{}} +} + +// MaxAggregator is an aggregation function that finds the maximum between the input and the accumulator. +type MaxAggregator struct{} + +func (max MaxAggregator) fillProto(proto *btapb.Type_Aggregate) { + proto.Aggregator = &btapb.Type_Aggregate_Max_{Max: &btapb.Type_Aggregate_Max{}} +} + +// HllppUniqueCountAggregator is an aggregation function that calculates the unique count of inputs and the accumulator. +type HllppUniqueCountAggregator struct{} + +func (hll HllppUniqueCountAggregator) fillProto(proto *btapb.Type_Aggregate) { + proto.Aggregator = &btapb.Type_Aggregate_HllppUniqueCount{HllppUniqueCount: &btapb.Type_Aggregate_HyperLogLogPlusPlusUniqueCount{}} +} + +type unknownAggregator struct { + wrapped *btapb.Type_Aggregate +} + +func (ua unknownAggregator) fillProto(proto *btapb.Type_Aggregate) { + if ua.wrapped == nil { + return + } + proto.Aggregator = ua.wrapped.Aggregator +} + // AggregateType represents an aggregate. See types.proto for more details // on aggregate types. type AggregateType struct { @@ -127,3 +241,98 @@ func (agg AggregateType) proto() *btapb.Type { agg.Aggregator.fillProto(protoAgg) return &btapb.Type{Kind: &btapb.Type_AggregateType{AggregateType: protoAgg}} } + +// ProtoToType converts a protobuf *btapb.Type to an instance of the Type interface, for use of the admin API. +func ProtoToType(pb *btapb.Type) Type { + if pb == nil { + return unknown[btapb.Type]{wrapped: nil} + } + if pb.Kind == nil { + return TypeUnspecified{} + } + switch t := pb.Kind.(type) { + case *btapb.Type_Int64Type: + return int64ProtoToType(t.Int64Type) + case *btapb.Type_BytesType: + return bytesProtoToType(t.BytesType) + case *btapb.Type_StringType: + return stringProtoToType(t.StringType) + case *btapb.Type_AggregateType: + return aggregateProtoToType(t.AggregateType) + default: + return unknown[btapb.Type]{wrapped: pb} + } +} + +func bytesEncodingProtoToType(be *btapb.Type_Bytes_Encoding) BytesEncoding { + if be == nil { + return unknown[btapb.Type_Bytes_Encoding]{wrapped: be} + } + + switch be.Encoding.(type) { + case *btapb.Type_Bytes_Encoding_Raw_: + return RawBytesEncoding{} + default: + return unknown[btapb.Type_Bytes_Encoding]{wrapped: be} + } +} + +func bytesProtoToType(b *btapb.Type_Bytes) BytesType { + return BytesType{Encoding: bytesEncodingProtoToType(b.Encoding)} +} + +func stringEncodingProtoToType(se *btapb.Type_String_Encoding) StringEncoding { + if se == nil { + return unknown[btapb.Type_String_Encoding]{wrapped: se} + } + + switch se.Encoding.(type) { + case *btapb.Type_String_Encoding_Utf8Raw_: + return StringUtf8Encoding{} + default: + return unknown[btapb.Type_String_Encoding]{wrapped: se} + } +} + +func stringProtoToType(s *btapb.Type_String) Type { + return StringType{Encoding: stringEncodingProtoToType(s.Encoding)} +} + +func int64EncodingProtoToEncoding(ie *btapb.Type_Int64_Encoding) Int64Encoding { + if ie == nil { + return unknown[btapb.Type_Int64_Encoding]{wrapped: ie} + } + + switch ie.Encoding.(type) { + case *btapb.Type_Int64_Encoding_BigEndianBytes_: + return BigEndianBytesEncoding{} + default: + return unknown[btapb.Type_Int64_Encoding]{wrapped: ie} + } +} + +func int64ProtoToType(i *btapb.Type_Int64) Type { + return Int64Type{Encoding: int64EncodingProtoToEncoding(i.Encoding)} +} + +func aggregateProtoToType(agg *btapb.Type_Aggregate) AggregateType { + if agg == nil { + return AggregateType{Input: nil, Aggregator: unknownAggregator{wrapped: agg}} + } + + it := ProtoToType(agg.InputType) + var aggregator Aggregator + switch agg.Aggregator.(type) { + case *btapb.Type_Aggregate_Sum_: + aggregator = SumAggregator{} + case *btapb.Type_Aggregate_Min_: + aggregator = MinAggregator{} + case *btapb.Type_Aggregate_Max_: + aggregator = MaxAggregator{} + case *btapb.Type_Aggregate_HllppUniqueCount: + aggregator = HllppUniqueCountAggregator{} + default: + aggregator = unknownAggregator{wrapped: agg} + } + return AggregateType{Input: it, Aggregator: aggregator} +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 967e060747f..da7db19b1c6 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,38 @@ # Changes +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) + + +### Bug Fixes + +* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) + + +### Features + +* **compute/metadata:** Add sys check for windows OnGCE ([#10521](https://github.com/googleapis/google-cloud-go/issues/10521)) ([3b9a830](https://github.com/googleapis/google-cloud-go/commit/3b9a83063960d2a2ac20beb47cc15818a68bd302)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01) + + +### Features + +* **compute/metadata:** Add context for all functions/methods ([#10370](https://github.com/googleapis/google-cloud-go/issues/10370)) ([66b8efe](https://github.com/googleapis/google-cloud-go/commit/66b8efe7ad877e052b2987bb4475477e38c67bb3)) + + +### Documentation + +* **compute/metadata:** Update OnGCE description ([#10408](https://github.com/googleapis/google-cloud-go/issues/10408)) ([6a46dca](https://github.com/googleapis/google-cloud-go/commit/6a46dca4eae4f88ec6f88822e01e5bf8aeca787f)) + ## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/metadata.go b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/metadata.go index f67e3c7eeae..c160b4786bb 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -28,7 +28,6 @@ import ( "net/http" "net/url" "os" - "runtime" "strings" "sync" "time" @@ -88,16 +87,16 @@ func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } -func (c *cachedValue) get(cl *Client) (v string, err error) { +func (c *cachedValue) get(ctx context.Context, cl *Client) (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { - v, err = cl.getTrimmed(context.Background(), c.k) + v, err = cl.getTrimmed(ctx, c.k) } else { - v, err = cl.GetWithContext(context.Background(), c.k) + v, err = cl.GetWithContext(ctx, c.k) } if err == nil { c.v = v @@ -110,7 +109,9 @@ var ( onGCE bool ) -// OnGCE reports whether this process is running on Google Compute Engine. +// OnGCE reports whether this process is running on Google Compute Platforms. +// NOTE: True returned from `OnGCE` does not guarantee that the metadata server +// is accessible from this process and have all the metadata defined. func OnGCE() bool { onGCEOnce.Do(initOnGCE) return onGCE @@ -188,21 +189,9 @@ func testOnGCE() bool { return <-resc } -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - // Subscribe calls Client.SubscribeWithContext on the default client. +// +// Deprecated: Please use the context aware variant [SubscribeWithContext]. func Subscribe(suffix string, fn func(v string, ok bool) error) error { return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) } @@ -225,55 +214,188 @@ func GetWithContext(ctx context.Context, suffix string) (string, error) { } // ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } +// +// Deprecated: Please use the context aware variant [ProjectIDWithContext]. +func ProjectID() (string, error) { + return defaultClient.ProjectIDWithContext(context.Background()) +} + +// ProjectIDWithContext returns the current instance's project ID string. +func ProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.ProjectIDWithContext(ctx) +} // NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } +// +// Deprecated: Please use the context aware variant [NumericProjectIDWithContext]. +func NumericProjectID() (string, error) { + return defaultClient.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func NumericProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.NumericProjectIDWithContext(ctx) +} // InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } +// +// Deprecated: Please use the context aware variant [InternalIPWithContext]. +func InternalIP() (string, error) { + return defaultClient.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func InternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.InternalIPWithContext(ctx) +} // ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } +// +// Deprecated: Please use the context aware variant [ExternalIPWithContext]. +func ExternalIP() (string, error) { + return defaultClient.ExternalIPWithContext(context.Background()) +} -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func ExternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.ExternalIPWithContext(ctx) +} + +// Email calls Client.EmailWithContext on the default client. +// +// Deprecated: Please use the context aware variant [EmailWithContext]. +func Email(serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext calls Client.EmailWithContext on the default client. +func EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(ctx, serviceAccount) +} // Hostname returns the instance's hostname. This will be of the form // ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } +// +// Deprecated: Please use the context aware variant [HostnameWithContext]. +func Hostname() (string, error) { + return defaultClient.HostnameWithContext(context.Background()) +} + +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func HostnameWithContext(ctx context.Context) (string, error) { + return defaultClient.HostnameWithContext(ctx) +} // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } +// +// Deprecated: Please use the context aware variant [InstanceTagsWithContext]. +func InstanceTags() ([]string, error) { + return defaultClient.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTagsWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceTagsWithContext(ctx) +} // InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } +// +// Deprecated: Please use the context aware variant [InstanceIDWithContext]. +func InstanceID() (string, error) { + return defaultClient.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func InstanceIDWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceIDWithContext(ctx) +} // InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } +// +// Deprecated: Please use the context aware variant [InstanceNameWithContext]. +func InstanceName() (string, error) { + return defaultClient.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func InstanceNameWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceNameWithContext(ctx) +} // Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } +// +// Deprecated: Please use the context aware variant [ZoneWithContext]. +func Zone() (string, error) { + return defaultClient.ZoneWithContext(context.Background()) +} + +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func ZoneWithContext(ctx context.Context) (string, error) { + return defaultClient.ZoneWithContext(ctx) +} -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } +// InstanceAttributes calls Client.InstanceAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributesWithContext. +func InstanceAttributes() ([]string, error) { + return defaultClient.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceAttributesWithContext(ctx) +} -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } +// ProjectAttributes calls Client.ProjectAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributesWithContext]. +func ProjectAttributes() ([]string, error) { + return defaultClient.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.ProjectAttributesWithContext(ctx) +} -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +// InstanceAttributeValue calls Client.InstanceAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributeValueWithContext]. func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) + return defaultClient.InstanceAttributeValueWithContext(context.Background(), attr) +} + +// InstanceAttributeValueWithContext calls Client.InstanceAttributeValueWithContext on the default client. +func InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.InstanceAttributeValueWithContext(ctx, attr) } -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +// ProjectAttributeValue calls Client.ProjectAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributeValueWithContext]. func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) + return defaultClient.ProjectAttributeValueWithContext(context.Background(), attr) } -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } +// ProjectAttributeValueWithContext calls Client.ProjectAttributeValueWithContext on the default client. +func ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.ProjectAttributeValueWithContext(ctx, attr) +} + +// Scopes calls Client.ScopesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ScopesWithContext]. +func Scopes(serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext calls Client.ScopesWithContext on the default client. +func ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(ctx, serviceAccount) +} func strsContains(ss []string, s string) bool { for _, v := range ss { @@ -296,7 +418,6 @@ func NewClient(c *http.Client) *Client { if c == nil { return defaultClient } - return &Client{hc: c} } @@ -335,6 +456,9 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string code = res.StatusCode } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if res != nil && res.Body != nil { + res.Body.Close() + } if err := sleep(ctx, delay); err != nil { return "", "", err } @@ -381,6 +505,10 @@ func (c *Client) Get(suffix string) (string, error) { // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. +// +// NOTE: Without an extra deadline in the context this call can take in the +// worst case, with internal backoff retries, up to 15 seconds (e.g. when server +// is responding slowly). Pass context with additional timeouts when needed. func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) { val, _, err := c.getETag(ctx, suffix) return val, err @@ -392,8 +520,8 @@ func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err e return } -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.GetWithContext(context.Background(), suffix) +func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) { + j, err := c.GetWithContext(ctx, suffix) if err != nil { return nil, err } @@ -405,45 +533,104 @@ func (c *Client) lines(suffix string) ([]string, error) { } // ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.ProjectIDWithContext]. +func (c *Client) ProjectID() (string, error) { return c.ProjectIDWithContext(context.Background()) } + +// ProjectIDWithContext returns the current instance's project ID string. +func (c *Client) ProjectIDWithContext(ctx context.Context) (string, error) { return projID.get(ctx, c) } // NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } +// +// Deprecated: Please use the context aware variant [Client.NumericProjectIDWithContext]. +func (c *Client) NumericProjectID() (string, error) { + return c.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func (c *Client) NumericProjectIDWithContext(ctx context.Context) (string, error) { + return projNum.get(ctx, c) +} // InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.InstanceIDWithContext]. +func (c *Client) InstanceID() (string, error) { + return c.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func (c *Client) InstanceIDWithContext(ctx context.Context) (string, error) { + return instID.get(ctx, c) +} // InternalIP returns the instance's primary internal IP address. +// +// Deprecated: Please use the context aware variant [Client.InternalIPWithContext]. func (c *Client) InternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip") + return c.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func (c *Client) InternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/ip") } // Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. +// +// Deprecated: Please use the context aware variant [Client.EmailWithContext]. func (c *Client) Email(serviceAccount string) (string, error) { + return c.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext returns the email address associated with the service account. +// The serviceAccount parameter default value (empty string or "default" value) +// will use the instance's main account. +func (c *Client) EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email") + return c.getTrimmed(ctx, "instance/service-accounts/"+serviceAccount+"/email") } // ExternalIP returns the instance's primary external (public) IP address. +// +// Deprecated: Please use the context aware variant [Client.ExternalIPWithContext]. func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip") + return c.ExternalIPWithContext(context.Background()) +} + +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func (c *Client) ExternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". +// +// Deprecated: Please use the context aware variant [Client.HostnameWithContext]. func (c *Client) Hostname() (string, error) { - return c.getTrimmed(context.Background(), "instance/hostname") + return c.HostnameWithContext(context.Background()) } -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) HostnameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags. +// +// Deprecated: Please use the context aware variant [Client.InstanceTagsWithContext]. func (c *Client) InstanceTags() ([]string, error) { + return c.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error) { var s []string - j, err := c.GetWithContext(context.Background(), "instance/tags") + j, err := c.GetWithContext(ctx, "instance/tags") if err != nil { return nil, err } @@ -454,13 +641,27 @@ func (c *Client) InstanceTags() ([]string, error) { } // InstanceName returns the current VM's instance ID string. +// +// Deprecated: Please use the context aware variant [Client.InstanceNameWithContext]. func (c *Client) InstanceName() (string, error) { - return c.getTrimmed(context.Background(), "instance/name") + return c.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func (c *Client) InstanceNameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". +// +// Deprecated: Please use the context aware variant [Client.ZoneWithContext]. func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed(context.Background(), "instance/zone") + return c.ZoneWithContext(context.Background()) +} + +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func (c *Client) ZoneWithContext(ctx context.Context) (string, error) { + zone, err := c.getTrimmed(ctx, "instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -471,12 +672,34 @@ func (c *Client) Zone() (string, error) { // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributesWithContext]. +func (c *Client) InstanceAttributes() ([]string, error) { + return c.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "instance/attributes/") +} // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributesWithContext]. +func (c *Client) ProjectAttributes() ([]string, error) { + return c.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "project/attributes/") +} // InstanceAttributeValue returns the value of the provided VM // instance attribute. @@ -486,8 +709,22 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributeValueWithContext]. func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "instance/attributes/"+attr) + return c.InstanceAttributeValueWithContext(context.Background(), attr) +} + +// InstanceAttributeValueWithContext returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "instance/attributes/"+attr) } // ProjectAttributeValue returns the value of the provided @@ -498,18 +735,41 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) { // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributeValueWithContext]. func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "project/attributes/"+attr) + return c.ProjectAttributeValueWithContext(context.Background(), attr) +} + +// ProjectAttributeValueWithContext returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "project/attributes/"+attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. +// +// Deprecated: Please use the context aware variant [Client.ScopesWithContext]. func (c *Client) Scopes(serviceAccount string) ([]string, error) { + return c.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") + return c.lines(ctx, "instance/service-accounts/"+serviceAccount+"/scopes") } // Subscribe subscribes to a value from the metadata service. diff --git a/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/retry_linux.go index bb412f8917e..2e53f012300 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -17,10 +17,15 @@ package metadata -import "syscall" +import ( + "errors" + "syscall" +) func init() { // Initialize syscallRetryable to return true on transient socket-level // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck.go b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck.go new file mode 100644 index 00000000000..e0704fa6477 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck.go @@ -0,0 +1,26 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !linux + +package metadata + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + // We don't currently have checks for other GOOS + return false +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go new file mode 100644 index 00000000000..74689acbbbf --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go @@ -0,0 +1,28 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux + +package metadata + +import ( + "os" + "strings" +) + +func systemInfoSuggestsGCE() bool { + b, _ := os.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(b)) + return name == "Google" || name == "Google Compute Engine" +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go new file mode 100644 index 00000000000..c0ce627872f --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go @@ -0,0 +1,38 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package metadata + +import ( + "strings" + + "golang.org/x/sys/windows/registry" +) + +func systemInfoSuggestsGCE() bool { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE) + if err != nil { + return false + } + defer k.Close() + + s, _, err := k.GetStringValue("SystemProductName") + if err != nil { + return false + } + s = strings.TrimSpace(s) + return strings.HasPrefix(s, "Google") +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/doc.go b/terraform/providers/google/vendor/cloud.google.com/go/doc.go index 133ff68553f..8644f614c86 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/doc.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/doc.go @@ -79,12 +79,15 @@ are also provided in all auto-generated libraries: for example, cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example: ctx := context.Background() - // https://pkg.go.dev/golang.org/x/oauth2/google - creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) + // https://pkg.go.dev/cloud.google.com/go/auth/credentials + creds, err := credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: secretmanager.DefaultAuthScopes(), + CredentialsJSON: []byte("JSON creds") + }), secretmanager.DefaultAuthScopes()...) if err != nil { // TODO: handle error. } - client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) + client, err := secretmanager.NewClient(ctx, option.WithAuthCredentials(creds)) if err != nil { // TODO: handle error. } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/go.work b/terraform/providers/google/vendor/cloud.google.com/go/go.work index ccb14f51a81..122a980e6ea 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/go.work +++ b/terraform/providers/google/vendor/cloud.google.com/go/go.work @@ -1,4 +1,6 @@ -go 1.20 +go 1.21.13 + +toolchain go1.23.0 use ( . @@ -12,6 +14,7 @@ use ( ./apigateway ./apigeeconnect ./apigeeregistry + ./apihub ./apikeys ./appengine ./apphub @@ -124,6 +127,7 @@ use ( ./policysimulator ./policytroubleshooter ./privatecatalog + ./privilegedaccessmanager ./profiler ./pubsub ./pubsublite diff --git a/terraform/providers/google/vendor/cloud.google.com/go/go.work.sum b/terraform/providers/google/vendor/cloud.google.com/go/go.work.sum index 880e5914a44..ed5eb16ae50 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/go.work.sum +++ b/terraform/providers/google/vendor/cloud.google.com/go/go.work.sum @@ -4,12 +4,10 @@ cloud.google.com/go/dataproc v1.12.0 h1:W47qHL3W4BPkAIbk4SWmIERwsWBaNnWm0P2sdx3Y cloud.google.com/go/gaming v1.9.0 h1:7vEhFnZmd931Mo7sZ6pJy7uQPDxF7m7v8xtBheG08tc= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0 h1:ugYJK/neZQtQeh2jc5xNoDFiMQojlAkoqJMRb7vTu1U= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.46.0/go.mod h1:V28hx+cUCZC9e3qcqszMb+Sbt8cQZtHTiXOmyDzoDOg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0/go.mod h1:p2puVVSKjQ84Qb1gzw2XHLs34WQyHTYFZLaVxypAFYs= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go-v2 v1.16.10/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo= github.com/aws/aws-sdk-go-v2/config v1.15.9/go.mod h1:rv/l/TbZo67kp99v/3Kb0qV6Fm1KEtKyruEV2GvVfgs= github.com/aws/aws-sdk-go-v2/credentials v1.12.12/go.mod h1:vFHC2HifIWHebmoVsfpqliKuqbAY2LaVlvy03JzF4c4= @@ -29,13 +27,11 @@ github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0V github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/fullstorydev/grpcurl v1.8.7/go.mod h1:pVtM4qe3CMoLaIzYS8uvTuDj2jVYmXqMUkZeijnXp/E= -github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= -github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= +github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U= github.com/google/go-jsonnet v0.20.0/go.mod h1:VbgWF9JX7ztlv770x/TolZNGGFfiHEVx9G6ca2eUmeA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/hoisie/redis v0.0.0-20160730154456-b5c6e81454e0/go.mod h1:pMYMxVaKJqCDC1JUg/XbPJ4/fSazB25zORpFzqsIGIc= @@ -54,13 +50,12 @@ github.com/miekg/dns v1.1.33/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7 github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +go.opentelemetry.io/contrib/detectors/gcp v1.27.0/go.mod h1:amd+4uZxqJAUx7zI1JvygUtAc2EVWtQeyz8D+3161SQ= go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= go.opentelemetry.io/otel/bridge/opencensus v0.40.0 h1:pqDiayRhBgoqy1vwnscik+TizcImJ58l053NScJyZso= go.opentelemetry.io/otel/bridge/opencensus v0.40.0/go.mod h1:1NvVHb6tLTe5A9qCYz+eErW0t8iPn4ZfR6tDKcqlGTM= @@ -68,20 +63,19 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0/go.mod h1:sTt30Evb7hJB/gEk27qLb1+l9n4Tb8HvHkR0Wx3S6CU= go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ= -golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= google.golang.org/api v0.174.0/go.mod h1:aC7tB6j0HR1Nl0ni5ghpx6iLasmAX78Zkh/wgxAAjLg= google.golang.org/genproto v0.0.0-20230725213213-b022f6e96895/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= google.golang.org/genproto/googleapis/api v0.0.0-20230725213213-b022f6e96895/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda/go.mod h1:AHcE/gZH76Bk/ROZhQphlRoWo5xKDEtz3eVEO1LfA8c= google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= google.golang.org/genproto/googleapis/bytestream v0.0.0-20231120223509-83a465c0220f/go.mod h1:iIgEblxoG4klcXsG0d9cpoxJ4xndv6+1FkDROCHhPRI= google.golang.org/genproto/googleapis/bytestream v0.0.0-20240102182953-50ed04b92917/go.mod h1:O9TvT7A9NLgdqqF0JJXJ+axpaoYiEb8txGmkvy+AvLc= diff --git a/terraform/providers/google/vendor/cloud.google.com/go/iam/CHANGES.md b/terraform/providers/google/vendor/cloud.google.com/go/iam/CHANGES.md index af5ff374887..498a15a5fcd 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,55 @@ # Changes +## [1.2.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.0...iam/v1.2.1) (2024-09-12) + + +### Bug Fixes + +* **iam:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) + +## [1.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.13...iam/v1.2.0) (2024-08-20) + + +### Features + +* **iam:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) + +## [1.1.13](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.12...iam/v1.1.13) (2024-08-08) + + +### Bug Fixes + +* **iam:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73)) + +## [1.1.12](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.11...iam/v1.1.12) (2024-07-24) + + +### Bug Fixes + +* **iam:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [1.1.11](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.10...iam/v1.1.11) (2024-07-10) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [1.1.10](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.9...iam/v1.1.10) (2024-07-01) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + +## [1.1.9](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.8...iam/v1.1.9) (2024-06-26) + + +### Bug Fixes + +* **iam:** Enable new auth lib ([b95805f](https://github.com/googleapis/google-cloud-go/commit/b95805f4c87d3e8d10ea23bd7a2d68d7a4157568)) + ## [1.1.8](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.7...iam/v1.1.8) (2024-05-01) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 3fbf4530d0d..619b4c4fa3f 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.2 // protoc v4.25.3 // source: google/iam/v1/iam_policy.proto @@ -388,7 +388,7 @@ func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte { } var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{ +var file_google_iam_v1_iam_policy_proto_goTypes = []any{ (*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest (*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest (*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest @@ -422,7 +422,7 @@ func file_google_iam_v1_iam_policy_proto_init() { file_google_iam_v1_options_proto_init() file_google_iam_v1_policy_proto_init() if !protoimpl.UnsafeEnabled { - file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyRequest); i { case 0: return &v.state @@ -434,7 +434,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyRequest); i { case 0: return &v.state @@ -446,7 +446,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsRequest); i { case 0: return &v.state @@ -458,7 +458,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsResponse); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 29738ad1ce3..f1c1c084e34 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.2 // protoc v4.25.3 // source: google/iam/v1/options.proto @@ -136,7 +136,7 @@ func file_google_iam_v1_options_proto_rawDescGZIP() []byte { } var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_iam_v1_options_proto_goTypes = []interface{}{ +var file_google_iam_v1_options_proto_goTypes = []any{ (*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions } var file_google_iam_v1_options_proto_depIdxs = []int32{ @@ -153,7 +153,7 @@ func file_google_iam_v1_options_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GetPolicyOptions); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index a4e15741b64..4dda5d6d056 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.2 // protoc v4.25.3 // source: google/iam/v1/policy.proto @@ -1036,7 +1036,7 @@ func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_google_iam_v1_policy_proto_goTypes = []interface{}{ +var file_google_iam_v1_policy_proto_goTypes = []any{ (AuditLogConfig_LogType)(0), // 0: google.iam.v1.AuditLogConfig.LogType (BindingDelta_Action)(0), // 1: google.iam.v1.BindingDelta.Action (AuditConfigDelta_Action)(0), // 2: google.iam.v1.AuditConfigDelta.Action @@ -1073,7 +1073,7 @@ func file_google_iam_v1_policy_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Policy); i { case 0: return &v.state @@ -1085,7 +1085,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Binding); i { case 0: return &v.state @@ -1097,7 +1097,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*AuditConfig); i { case 0: return &v.state @@ -1109,7 +1109,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*AuditLogConfig); i { case 0: return &v.state @@ -1121,7 +1121,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*PolicyDelta); i { case 0: return &v.state @@ -1133,7 +1133,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*BindingDelta); i { case 0: return &v.state @@ -1145,7 +1145,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*AuditConfigDelta); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/CHANGES.md b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/CHANGES.md index 6c6a7b6661a..d120456cd3b 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/CHANGES.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/CHANGES.md @@ -1,5 +1,54 @@ # Changes +## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.6.0...longrunning/v0.6.1) (2024-09-12) + + +### Bug Fixes + +* **longrunning:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.12...longrunning/v0.6.0) (2024-08-20) + + +### Features + +* **longrunning:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) + +## [0.5.12](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.11...longrunning/v0.5.12) (2024-08-08) + + +### Bug Fixes + +* **longrunning:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73)) + +## [0.5.11](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.10...longrunning/v0.5.11) (2024-07-24) + + +### Bug Fixes + +* **longrunning:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.5.10](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.9...longrunning/v0.5.10) (2024-07-10) + + +### Bug Fixes + +* **longrunning:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [0.5.9](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.8...longrunning/v0.5.9) (2024-07-01) + + +### Bug Fixes + +* **longrunning:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + +## [0.5.8](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.7...longrunning/v0.5.8) (2024-06-26) + + +### Bug Fixes + +* **longrunning:** Enable new auth lib ([b95805f](https://github.com/googleapis/google-cloud-go/commit/b95805f4c87d3e8d10ea23bd7a2d68d7a4157568)) + ## [0.5.7](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.6...longrunning/v0.5.7) (2024-05-01) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go new file mode 100644 index 00000000000..eca6d4def1f --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go @@ -0,0 +1,32 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package longrunning + +import ( + "iter" + + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + "github.com/googleapis/gax-go/v2/iterator" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *OperationIterator) All() iter.Seq2[*longrunningpb.Operation, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go index be148ff97a3..0a4d66c6373 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.2 // protoc v4.25.3 // source: google/longrunning/operations.proto @@ -765,7 +765,7 @@ func file_google_longrunning_operations_proto_rawDescGZIP() []byte { } var file_google_longrunning_operations_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_google_longrunning_operations_proto_goTypes = []interface{}{ +var file_google_longrunning_operations_proto_goTypes = []any{ (*Operation)(nil), // 0: google.longrunning.Operation (*GetOperationRequest)(nil), // 1: google.longrunning.GetOperationRequest (*ListOperationsRequest)(nil), // 2: google.longrunning.ListOperationsRequest @@ -811,7 +811,7 @@ func file_google_longrunning_operations_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_longrunning_operations_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_longrunning_operations_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Operation); i { case 0: return &v.state @@ -823,7 +823,7 @@ func file_google_longrunning_operations_proto_init() { return nil } } - file_google_longrunning_operations_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_longrunning_operations_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetOperationRequest); i { case 0: return &v.state @@ -835,7 +835,7 @@ func file_google_longrunning_operations_proto_init() { return nil } } - file_google_longrunning_operations_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_longrunning_operations_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListOperationsRequest); i { case 0: return &v.state @@ -847,7 +847,7 @@ func file_google_longrunning_operations_proto_init() { return nil } } - file_google_longrunning_operations_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_longrunning_operations_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ListOperationsResponse); i { case 0: return &v.state @@ -859,7 +859,7 @@ func file_google_longrunning_operations_proto_init() { return nil } } - file_google_longrunning_operations_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_longrunning_operations_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*CancelOperationRequest); i { case 0: return &v.state @@ -871,7 +871,7 @@ func file_google_longrunning_operations_proto_init() { return nil } } - file_google_longrunning_operations_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_longrunning_operations_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DeleteOperationRequest); i { case 0: return &v.state @@ -883,7 +883,7 @@ func file_google_longrunning_operations_proto_init() { return nil } } - file_google_longrunning_operations_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_longrunning_operations_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*WaitOperationRequest); i { case 0: return &v.state @@ -895,7 +895,7 @@ func file_google_longrunning_operations_proto_init() { return nil } } - file_google_longrunning_operations_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_longrunning_operations_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*OperationInfo); i { case 0: return &v.state @@ -908,7 +908,7 @@ func file_google_longrunning_operations_proto_init() { } } } - file_google_longrunning_operations_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_google_longrunning_operations_proto_msgTypes[0].OneofWrappers = []any{ (*Operation_Error)(nil), (*Operation_Response)(nil), } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go index abdb2d6b638..3be65a155e6 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -60,6 +60,7 @@ func defaultOperationsGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -351,7 +352,9 @@ func (c *operationsGRPCClient) Connection() *grpc.ClientConn { func (c *operationsGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when @@ -412,6 +415,7 @@ func defaultOperationsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -421,7 +425,9 @@ func defaultOperationsRESTClientOptions() []option.ClientOption { func (c *operationsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when @@ -815,11 +821,11 @@ func (c *operationsRESTClient) WaitOperation(ctx context.Context, req *longrunni params.Add("name", fmt.Sprintf("%v", req.GetName())) } if req.GetTimeout() != nil { - timeout, err := protojson.Marshal(req.GetTimeout()) + field, err := protojson.Marshal(req.GetTimeout()) if err != nil { return nil, err } - params.Add("timeout", string(timeout[1:len(timeout)-1])) + params.Add("timeout", string(field[1:len(field)-1])) } baseUrl.RawQuery = params.Encode() diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/LICENSE b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go new file mode 100644 index 00000000000..ae1dd6b9a23 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go @@ -0,0 +1,399 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newAlertPolicyClientHook clientHook + +// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient. +type AlertPolicyCallOptions struct { + ListAlertPolicies []gax.CallOption + GetAlertPolicy []gax.CallOption + CreateAlertPolicy []gax.CallOption + DeleteAlertPolicy []gax.CallOption + UpdateAlertPolicy []gax.CallOption +} + +func defaultAlertPolicyGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions { + return &AlertPolicyCallOptions{ + ListAlertPolicies: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + } +} + +// internalAlertPolicyClient is an interface that defines the methods available from Cloud Monitoring API. +type internalAlertPolicyClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListAlertPolicies(context.Context, *monitoringpb.ListAlertPoliciesRequest, ...gax.CallOption) *AlertPolicyIterator + GetAlertPolicy(context.Context, *monitoringpb.GetAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error) + CreateAlertPolicy(context.Context, *monitoringpb.CreateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error) + DeleteAlertPolicy(context.Context, *monitoringpb.DeleteAlertPolicyRequest, ...gax.CallOption) error + UpdateAlertPolicy(context.Context, *monitoringpb.UpdateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error) +} + +// AlertPolicyClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The AlertPolicyService API is used to manage (list, create, delete, +// edit) alert policies in Cloud Monitoring. An alerting policy is +// a description of the conditions under which some aspect of your +// system is considered to be “unhealthy” and the ways to notify +// people or services about this state. In addition to using this API, alert +// policies can also be managed through +// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/), +// which can be reached by clicking the “Monitoring” tab in +// Cloud console (at https://console.cloud.google.com/). +type AlertPolicyClient struct { + // The internal transport-dependent client. + internalClient internalAlertPolicyClient + + // The call options for this service. + CallOptions *AlertPolicyCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *AlertPolicyClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *AlertPolicyClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListAlertPolicies lists the existing alerting policies for the workspace. +func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { + return c.internalClient.ListAlertPolicies(ctx, req, opts...) +} + +// GetAlertPolicy gets a single alerting policy. +func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + return c.internalClient.GetAlertPolicy(ctx, req, opts...) +} + +// CreateAlertPolicy creates a new alerting policy. +// +// Design your application to single-thread API calls that modify the state of +// alerting policies in a single project. This includes calls to +// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. +func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + return c.internalClient.CreateAlertPolicy(ctx, req, opts...) +} + +// DeleteAlertPolicy deletes an alerting policy. +// +// Design your application to single-thread API calls that modify the state of +// alerting policies in a single project. This includes calls to +// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. +func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteAlertPolicy(ctx, req, opts...) +} + +// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with +// a new one or replace only certain fields in the current alerting policy by +// specifying the fields to be updated via updateMask. Returns the +// updated alerting policy. +// +// Design your application to single-thread API calls that modify the state of +// alerting policies in a single project. This includes calls to +// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. +func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + return c.internalClient.UpdateAlertPolicy(ctx, req, opts...) +} + +// alertPolicyGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type alertPolicyGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing AlertPolicyClient + CallOptions **AlertPolicyCallOptions + + // The gRPC API client. + alertPolicyClient monitoringpb.AlertPolicyServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewAlertPolicyClient creates a new alert policy service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The AlertPolicyService API is used to manage (list, create, delete, +// edit) alert policies in Cloud Monitoring. An alerting policy is +// a description of the conditions under which some aspect of your +// system is considered to be “unhealthy” and the ways to notify +// people or services about this state. In addition to using this API, alert +// policies can also be managed through +// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/), +// which can be reached by clicking the “Monitoring” tab in +// Cloud console (at https://console.cloud.google.com/). +func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) { + clientOpts := defaultAlertPolicyGRPCClientOptions() + if newAlertPolicyClientHook != nil { + hookOpts, err := newAlertPolicyClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := AlertPolicyClient{CallOptions: defaultAlertPolicyCallOptions()} + + c := &alertPolicyGRPCClient{ + connPool: connPool, + alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *alertPolicyGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *alertPolicyGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *alertPolicyGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *alertPolicyGRPCClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListAlertPolicies[0:len((*c.CallOptions).ListAlertPolicies):len((*c.CallOptions).ListAlertPolicies)], opts...) + it := &AlertPolicyIterator{} + req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) { + resp := &monitoringpb.ListAlertPoliciesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetAlertPolicies(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *alertPolicyGRPCClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetAlertPolicy[0:len((*c.CallOptions).GetAlertPolicy):len((*c.CallOptions).GetAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *alertPolicyGRPCClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateAlertPolicy[0:len((*c.CallOptions).CreateAlertPolicy):len((*c.CallOptions).CreateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *alertPolicyGRPCClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteAlertPolicy[0:len((*c.CallOptions).DeleteAlertPolicy):len((*c.CallOptions).DeleteAlertPolicy)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *alertPolicyGRPCClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "alert_policy.name", url.QueryEscape(req.GetAlertPolicy().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateAlertPolicy[0:len((*c.CallOptions).UpdateAlertPolicy):len((*c.CallOptions).UpdateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go new file mode 100644 index 00000000000..4de74e773e1 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go @@ -0,0 +1,682 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "google.golang.org/api/iterator" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy. +type AlertPolicyIterator struct { + items []*monitoringpb.AlertPolicy + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) { + var item *monitoringpb.AlertPolicy + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *AlertPolicyIterator) bufLen() int { + return len(it.items) +} + +func (it *AlertPolicyIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// GroupIterator manages a stream of *monitoringpb.Group. +type GroupIterator struct { + items []*monitoringpb.Group + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *GroupIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *GroupIterator) Next() (*monitoringpb.Group, error) { + var item *monitoringpb.Group + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *GroupIterator) bufLen() int { + return len(it.items) +} + +func (it *GroupIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. +type MetricDescriptorIterator struct { + items []*metricpb.MetricDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { + var item *metricpb.MetricDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MetricDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. +type MonitoredResourceIterator struct { + items []*monitoredrespb.MonitoredResource + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { + var item *monitoredrespb.MonitoredResource + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor. +type NotificationChannelDescriptorIterator struct { + items []*monitoringpb.NotificationChannelDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) { + var item *monitoringpb.NotificationChannelDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel. +type NotificationChannelIterator struct { + items []*monitoringpb.NotificationChannel + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) { + var item *monitoringpb.NotificationChannel + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ServiceIterator manages a stream of *monitoringpb.Service. +type ServiceIterator struct { + items []*monitoringpb.Service + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Service, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ServiceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ServiceIterator) Next() (*monitoringpb.Service, error) { + var item *monitoringpb.Service + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ServiceIterator) bufLen() int { + return len(it.items) +} + +func (it *ServiceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ServiceLevelObjectiveIterator manages a stream of *monitoringpb.ServiceLevelObjective. +type ServiceLevelObjectiveIterator struct { + items []*monitoringpb.ServiceLevelObjective + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.ServiceLevelObjective, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ServiceLevelObjectiveIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ServiceLevelObjectiveIterator) Next() (*monitoringpb.ServiceLevelObjective, error) { + var item *monitoringpb.ServiceLevelObjective + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ServiceLevelObjectiveIterator) bufLen() int { + return len(it.items) +} + +func (it *ServiceLevelObjectiveIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// SnoozeIterator manages a stream of *monitoringpb.Snooze. +type SnoozeIterator struct { + items []*monitoringpb.Snooze + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Snooze, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SnoozeIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SnoozeIterator) Next() (*monitoringpb.Snooze, error) { + var item *monitoringpb.Snooze + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SnoozeIterator) bufLen() int { + return len(it.items) +} + +func (it *SnoozeIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesDataIterator manages a stream of *monitoringpb.TimeSeriesData. +type TimeSeriesDataIterator struct { + items []*monitoringpb.TimeSeriesData + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeriesData, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesDataIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesDataIterator) Next() (*monitoringpb.TimeSeriesData, error) { + var item *monitoringpb.TimeSeriesData + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesDataIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesDataIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. +type TimeSeriesIterator struct { + items []*monitoringpb.TimeSeries + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { + var item *monitoringpb.TimeSeries + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig. +type UptimeCheckConfigIterator struct { + items []*monitoringpb.UptimeCheckConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) { + var item *monitoringpb.UptimeCheckConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp. +type UptimeCheckIpIterator struct { + items []*monitoringpb.UptimeCheckIp + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) { + var item *monitoringpb.UptimeCheckIp + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckIpIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckIpIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go new file mode 100644 index 00000000000..2982e1f84d5 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go @@ -0,0 +1,112 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package monitoring + +import ( + "iter" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "github.com/googleapis/gax-go/v2/iterator" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *AlertPolicyIterator) All() iter.Seq2[*monitoringpb.AlertPolicy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *GroupIterator) All() iter.Seq2[*monitoringpb.Group, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MetricDescriptorIterator) All() iter.Seq2[*metricpb.MetricDescriptor, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MonitoredResourceDescriptorIterator) All() iter.Seq2[*monitoredrespb.MonitoredResourceDescriptor, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MonitoredResourceIterator) All() iter.Seq2[*monitoredrespb.MonitoredResource, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NotificationChannelDescriptorIterator) All() iter.Seq2[*monitoringpb.NotificationChannelDescriptor, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NotificationChannelIterator) All() iter.Seq2[*monitoringpb.NotificationChannel, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ServiceIterator) All() iter.Seq2[*monitoringpb.Service, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ServiceLevelObjectiveIterator) All() iter.Seq2[*monitoringpb.ServiceLevelObjective, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SnoozeIterator) All() iter.Seq2[*monitoringpb.Snooze, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TimeSeriesDataIterator) All() iter.Seq2[*monitoringpb.TimeSeriesData, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TimeSeriesIterator) All() iter.Seq2[*monitoringpb.TimeSeries, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *UptimeCheckConfigIterator) All() iter.Seq2[*monitoringpb.UptimeCheckConfig, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *UptimeCheckIpIterator) All() iter.Seq2[*monitoringpb.UptimeCheckIp, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go new file mode 100644 index 00000000000..e8c40364753 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go @@ -0,0 +1,124 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package monitoring is an auto-generated package for the +// Cloud Monitoring API. +// +// Manages your Cloud Monitoring data and configurations. +// +// # General documentation +// +// For information that is relevant for all client libraries please reference +// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this +// page includes: +// +// - [Authentication and Authorization] +// - [Timeouts and Cancellation] +// - [Testing against Client Libraries] +// - [Debugging Client Libraries] +// - [Inspecting errors] +// +// # Example usage +// +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := monitoring.NewAlertPolicyClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := monitoring.NewAlertPolicyClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &monitoringpb.CreateAlertPolicyRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb#CreateAlertPolicyRequest. +// } +// resp, err := c.CreateAlertPolicy(ctx, req) +// if err != nil { +// // TODO: Handle error. +// } +// // TODO: Use resp. +// _ = resp +// +// # Use of Context +// +// The ctx passed to NewAlertPolicyClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization +// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation +// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing +// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging +// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors +package monitoring // import "cloud.google.com/go/monitoring/apiv3/v2" + +import ( + "context" + + "google.golang.org/api/option" +) + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + } +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json new file mode 100644 index 00000000000..a33cb6fcf53 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json @@ -0,0 +1,336 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.monitoring.v3", + "libraryPackage": "cloud.google.com/go/monitoring/apiv3/v2", + "services": { + "AlertPolicyService": { + "clients": { + "grpc": { + "libraryClient": "AlertPolicyClient", + "rpcs": { + "CreateAlertPolicy": { + "methods": [ + "CreateAlertPolicy" + ] + }, + "DeleteAlertPolicy": { + "methods": [ + "DeleteAlertPolicy" + ] + }, + "GetAlertPolicy": { + "methods": [ + "GetAlertPolicy" + ] + }, + "ListAlertPolicies": { + "methods": [ + "ListAlertPolicies" + ] + }, + "UpdateAlertPolicy": { + "methods": [ + "UpdateAlertPolicy" + ] + } + } + } + } + }, + "GroupService": { + "clients": { + "grpc": { + "libraryClient": "GroupClient", + "rpcs": { + "CreateGroup": { + "methods": [ + "CreateGroup" + ] + }, + "DeleteGroup": { + "methods": [ + "DeleteGroup" + ] + }, + "GetGroup": { + "methods": [ + "GetGroup" + ] + }, + "ListGroupMembers": { + "methods": [ + "ListGroupMembers" + ] + }, + "ListGroups": { + "methods": [ + "ListGroups" + ] + }, + "UpdateGroup": { + "methods": [ + "UpdateGroup" + ] + } + } + } + } + }, + "MetricService": { + "clients": { + "grpc": { + "libraryClient": "MetricClient", + "rpcs": { + "CreateMetricDescriptor": { + "methods": [ + "CreateMetricDescriptor" + ] + }, + "CreateServiceTimeSeries": { + "methods": [ + "CreateServiceTimeSeries" + ] + }, + "CreateTimeSeries": { + "methods": [ + "CreateTimeSeries" + ] + }, + "DeleteMetricDescriptor": { + "methods": [ + "DeleteMetricDescriptor" + ] + }, + "GetMetricDescriptor": { + "methods": [ + "GetMetricDescriptor" + ] + }, + "GetMonitoredResourceDescriptor": { + "methods": [ + "GetMonitoredResourceDescriptor" + ] + }, + "ListMetricDescriptors": { + "methods": [ + "ListMetricDescriptors" + ] + }, + "ListMonitoredResourceDescriptors": { + "methods": [ + "ListMonitoredResourceDescriptors" + ] + }, + "ListTimeSeries": { + "methods": [ + "ListTimeSeries" + ] + } + } + } + } + }, + "NotificationChannelService": { + "clients": { + "grpc": { + "libraryClient": "NotificationChannelClient", + "rpcs": { + "CreateNotificationChannel": { + "methods": [ + "CreateNotificationChannel" + ] + }, + "DeleteNotificationChannel": { + "methods": [ + "DeleteNotificationChannel" + ] + }, + "GetNotificationChannel": { + "methods": [ + "GetNotificationChannel" + ] + }, + "GetNotificationChannelDescriptor": { + "methods": [ + "GetNotificationChannelDescriptor" + ] + }, + "GetNotificationChannelVerificationCode": { + "methods": [ + "GetNotificationChannelVerificationCode" + ] + }, + "ListNotificationChannelDescriptors": { + "methods": [ + "ListNotificationChannelDescriptors" + ] + }, + "ListNotificationChannels": { + "methods": [ + "ListNotificationChannels" + ] + }, + "SendNotificationChannelVerificationCode": { + "methods": [ + "SendNotificationChannelVerificationCode" + ] + }, + "UpdateNotificationChannel": { + "methods": [ + "UpdateNotificationChannel" + ] + }, + "VerifyNotificationChannel": { + "methods": [ + "VerifyNotificationChannel" + ] + } + } + } + } + }, + "QueryService": { + "clients": { + "grpc": { + "libraryClient": "QueryClient", + "rpcs": { + "QueryTimeSeries": { + "methods": [ + "QueryTimeSeries" + ] + } + } + } + } + }, + "ServiceMonitoringService": { + "clients": { + "grpc": { + "libraryClient": "ServiceMonitoringClient", + "rpcs": { + "CreateService": { + "methods": [ + "CreateService" + ] + }, + "CreateServiceLevelObjective": { + "methods": [ + "CreateServiceLevelObjective" + ] + }, + "DeleteService": { + "methods": [ + "DeleteService" + ] + }, + "DeleteServiceLevelObjective": { + "methods": [ + "DeleteServiceLevelObjective" + ] + }, + "GetService": { + "methods": [ + "GetService" + ] + }, + "GetServiceLevelObjective": { + "methods": [ + "GetServiceLevelObjective" + ] + }, + "ListServiceLevelObjectives": { + "methods": [ + "ListServiceLevelObjectives" + ] + }, + "ListServices": { + "methods": [ + "ListServices" + ] + }, + "UpdateService": { + "methods": [ + "UpdateService" + ] + }, + "UpdateServiceLevelObjective": { + "methods": [ + "UpdateServiceLevelObjective" + ] + } + } + } + } + }, + "SnoozeService": { + "clients": { + "grpc": { + "libraryClient": "SnoozeClient", + "rpcs": { + "CreateSnooze": { + "methods": [ + "CreateSnooze" + ] + }, + "GetSnooze": { + "methods": [ + "GetSnooze" + ] + }, + "ListSnoozes": { + "methods": [ + "ListSnoozes" + ] + }, + "UpdateSnooze": { + "methods": [ + "UpdateSnooze" + ] + } + } + } + } + }, + "UptimeCheckService": { + "clients": { + "grpc": { + "libraryClient": "UptimeCheckClient", + "rpcs": { + "CreateUptimeCheckConfig": { + "methods": [ + "CreateUptimeCheckConfig" + ] + }, + "DeleteUptimeCheckConfig": { + "methods": [ + "DeleteUptimeCheckConfig" + ] + }, + "GetUptimeCheckConfig": { + "methods": [ + "GetUptimeCheckConfig" + ] + }, + "ListUptimeCheckConfigs": { + "methods": [ + "ListUptimeCheckConfigs" + ] + }, + "ListUptimeCheckIps": { + "methods": [ + "ListUptimeCheckIps" + ] + }, + "UpdateUptimeCheckConfig": { + "methods": [ + "UpdateUptimeCheckConfig" + ] + } + } + } + } + } + } +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go new file mode 100644 index 00000000000..da216081d5a --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go @@ -0,0 +1,466 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newGroupClientHook clientHook + +// GroupCallOptions contains the retry settings for each method of GroupClient. +type GroupCallOptions struct { + ListGroups []gax.CallOption + GetGroup []gax.CallOption + CreateGroup []gax.CallOption + UpdateGroup []gax.CallOption + DeleteGroup []gax.CallOption + ListGroupMembers []gax.CallOption +} + +func defaultGroupGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultGroupCallOptions() *GroupCallOptions { + return &GroupCallOptions{ + ListGroups: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetGroup: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateGroup: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + UpdateGroup: []gax.CallOption{ + gax.WithTimeout(180000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + DeleteGroup: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListGroupMembers: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalGroupClient is an interface that defines the methods available from Cloud Monitoring API. +type internalGroupClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListGroups(context.Context, *monitoringpb.ListGroupsRequest, ...gax.CallOption) *GroupIterator + GetGroup(context.Context, *monitoringpb.GetGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error) + CreateGroup(context.Context, *monitoringpb.CreateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error) + UpdateGroup(context.Context, *monitoringpb.UpdateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error) + DeleteGroup(context.Context, *monitoringpb.DeleteGroupRequest, ...gax.CallOption) error + ListGroupMembers(context.Context, *monitoringpb.ListGroupMembersRequest, ...gax.CallOption) *MonitoredResourceIterator +} + +// GroupClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The Group API lets you inspect and manage your +// groups (at #google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +type GroupClient struct { + // The internal transport-dependent client. + internalClient internalGroupClient + + // The call options for this service. + CallOptions *GroupCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *GroupClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *GroupClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *GroupClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListGroups lists the existing groups. +func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + return c.internalClient.ListGroups(ctx, req, opts...) +} + +// GetGroup gets a single group. +func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + return c.internalClient.GetGroup(ctx, req, opts...) +} + +// CreateGroup creates a new group. +func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + return c.internalClient.CreateGroup(ctx, req, opts...) +} + +// UpdateGroup updates an existing group. +// You can change any group attributes except name. +func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + return c.internalClient.UpdateGroup(ctx, req, opts...) +} + +// DeleteGroup deletes an existing group. +func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteGroup(ctx, req, opts...) +} + +// ListGroupMembers lists the monitored resources that are members of a group. +func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + return c.internalClient.ListGroupMembers(ctx, req, opts...) +} + +// groupGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type groupGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing GroupClient + CallOptions **GroupCallOptions + + // The gRPC API client. + groupClient monitoringpb.GroupServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewGroupClient creates a new group service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The Group API lets you inspect and manage your +// groups (at #google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) { + clientOpts := defaultGroupGRPCClientOptions() + if newGroupClientHook != nil { + hookOpts, err := newGroupClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := GroupClient{CallOptions: defaultGroupCallOptions()} + + c := &groupGRPCClient{ + connPool: connPool, + groupClient: monitoringpb.NewGroupServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *groupGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *groupGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *groupGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *groupGRPCClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListGroups[0:len((*c.CallOptions).ListGroups):len((*c.CallOptions).ListGroups)], opts...) + it := &GroupIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { + resp := &monitoringpb.ListGroupsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetGroup(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *groupGRPCClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetGroup[0:len((*c.CallOptions).GetGroup):len((*c.CallOptions).GetGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *groupGRPCClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateGroup[0:len((*c.CallOptions).CreateGroup):len((*c.CallOptions).CreateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *groupGRPCClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "group.name", url.QueryEscape(req.GetGroup().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateGroup[0:len((*c.CallOptions).UpdateGroup):len((*c.CallOptions).UpdateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *groupGRPCClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteGroup[0:len((*c.CallOptions).DeleteGroup):len((*c.CallOptions).DeleteGroup)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *groupGRPCClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListGroupMembers[0:len((*c.CallOptions).ListGroupMembers):len((*c.CallOptions).ListGroupMembers)], opts...) + it := &MonitoredResourceIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { + resp := &monitoringpb.ListGroupMembersResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetMembers(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go new file mode 100644 index 00000000000..d43d261d185 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go @@ -0,0 +1,578 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newMetricClientHook clientHook + +// MetricCallOptions contains the retry settings for each method of MetricClient. +type MetricCallOptions struct { + ListMonitoredResourceDescriptors []gax.CallOption + GetMonitoredResourceDescriptor []gax.CallOption + ListMetricDescriptors []gax.CallOption + GetMetricDescriptor []gax.CallOption + CreateMetricDescriptor []gax.CallOption + DeleteMetricDescriptor []gax.CallOption + ListTimeSeries []gax.CallOption + CreateTimeSeries []gax.CallOption + CreateServiceTimeSeries []gax.CallOption +} + +func defaultMetricGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultMetricCallOptions() *MetricCallOptions { + return &MetricCallOptions{ + ListMonitoredResourceDescriptors: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetMonitoredResourceDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListMetricDescriptors: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetMetricDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateMetricDescriptor: []gax.CallOption{ + gax.WithTimeout(12000 * time.Millisecond), + }, + DeleteMetricDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListTimeSeries: []gax.CallOption{ + gax.WithTimeout(90000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateTimeSeries: []gax.CallOption{ + gax.WithTimeout(12000 * time.Millisecond), + }, + CreateServiceTimeSeries: []gax.CallOption{}, + } +} + +// internalMetricClient is an interface that defines the methods available from Cloud Monitoring API. +type internalMetricClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListMonitoredResourceDescriptors(context.Context, *monitoringpb.ListMonitoredResourceDescriptorsRequest, ...gax.CallOption) *MonitoredResourceDescriptorIterator + GetMonitoredResourceDescriptor(context.Context, *monitoringpb.GetMonitoredResourceDescriptorRequest, ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) + ListMetricDescriptors(context.Context, *monitoringpb.ListMetricDescriptorsRequest, ...gax.CallOption) *MetricDescriptorIterator + GetMetricDescriptor(context.Context, *monitoringpb.GetMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error) + CreateMetricDescriptor(context.Context, *monitoringpb.CreateMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error) + DeleteMetricDescriptor(context.Context, *monitoringpb.DeleteMetricDescriptorRequest, ...gax.CallOption) error + ListTimeSeries(context.Context, *monitoringpb.ListTimeSeriesRequest, ...gax.CallOption) *TimeSeriesIterator + CreateTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error + CreateServiceTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error +} + +// MetricClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +type MetricClient struct { + // The internal transport-dependent client. + internalClient internalMetricClient + + // The call options for this service. + CallOptions *MetricCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *MetricClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. +func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + return c.internalClient.ListMonitoredResourceDescriptors(ctx, req, opts...) +} + +// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. +func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + return c.internalClient.GetMonitoredResourceDescriptor(ctx, req, opts...) +} + +// ListMetricDescriptors lists metric descriptors that match a filter. +func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + return c.internalClient.ListMetricDescriptors(ctx, req, opts...) +} + +// GetMetricDescriptor gets a single metric descriptor. +func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + return c.internalClient.GetMetricDescriptor(ctx, req, opts...) +} + +// CreateMetricDescriptor creates a new metric descriptor. +// The creation is executed asynchronously. +// User-created metric descriptors define +// custom metrics (at https://cloud.google.com/monitoring/custom-metrics). +// The metric descriptor is updated if it already exists, +// except that metric labels are never removed. +func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + return c.internalClient.CreateMetricDescriptor(ctx, req, opts...) +} + +// DeleteMetricDescriptor deletes a metric descriptor. Only user-created +// custom metrics (at https://cloud.google.com/monitoring/custom-metrics) can be +// deleted. +func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteMetricDescriptor(ctx, req, opts...) +} + +// ListTimeSeries lists time series that match a filter. +func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + return c.internalClient.ListTimeSeries(ctx, req, opts...) +} + +// CreateTimeSeries creates or adds data to one or more time series. +// The response is empty if all time series in the request were written. +// If any time series could not be written, a corresponding failure message is +// included in the error response. +// This method does not support +// resource locations constraint of an organization +// policy (at https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). +func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + return c.internalClient.CreateTimeSeries(ctx, req, opts...) +} + +// CreateServiceTimeSeries creates or adds data to one or more service time series. A service time +// series is a time series for a metric from a Google Cloud service. The +// response is empty if all time series in the request were written. If any +// time series could not be written, a corresponding failure message is +// included in the error response. This endpoint rejects writes to +// user-defined metrics. +// This method is only for use by Google Cloud services. Use +// projects.timeSeries.create +// instead. +func (c *MetricClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + return c.internalClient.CreateServiceTimeSeries(ctx, req, opts...) +} + +// metricGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type metricGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing MetricClient + CallOptions **MetricCallOptions + + // The gRPC API client. + metricClient monitoringpb.MetricServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewMetricClient creates a new metric service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) { + clientOpts := defaultMetricGRPCClientOptions() + if newMetricClientHook != nil { + hookOpts, err := newMetricClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := MetricClient{CallOptions: defaultMetricCallOptions()} + + c := &metricGRPCClient{ + connPool: connPool, + metricClient: monitoringpb.NewMetricServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *metricGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *metricGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *metricGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *metricGRPCClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListMonitoredResourceDescriptors[0:len((*c.CallOptions).ListMonitoredResourceDescriptors):len((*c.CallOptions).ListMonitoredResourceDescriptors)], opts...) + it := &MonitoredResourceDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + resp := &monitoringpb.ListMonitoredResourceDescriptorsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetResourceDescriptors(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *metricGRPCClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetMonitoredResourceDescriptor[0:len((*c.CallOptions).GetMonitoredResourceDescriptor):len((*c.CallOptions).GetMonitoredResourceDescriptor)], opts...) + var resp *monitoredrespb.MonitoredResourceDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *metricGRPCClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListMetricDescriptors[0:len((*c.CallOptions).ListMetricDescriptors):len((*c.CallOptions).ListMetricDescriptors)], opts...) + it := &MetricDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { + resp := &monitoringpb.ListMetricDescriptorsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetMetricDescriptors(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *metricGRPCClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetMetricDescriptor[0:len((*c.CallOptions).GetMetricDescriptor):len((*c.CallOptions).GetMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *metricGRPCClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateMetricDescriptor[0:len((*c.CallOptions).CreateMetricDescriptor):len((*c.CallOptions).CreateMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *metricGRPCClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteMetricDescriptor[0:len((*c.CallOptions).DeleteMetricDescriptor):len((*c.CallOptions).DeleteMetricDescriptor)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *metricGRPCClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListTimeSeries[0:len((*c.CallOptions).ListTimeSeries):len((*c.CallOptions).ListTimeSeries)], opts...) + it := &TimeSeriesIterator{} + req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { + resp := &monitoringpb.ListTimeSeriesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetTimeSeries(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *metricGRPCClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateTimeSeries[0:len((*c.CallOptions).CreateTimeSeries):len((*c.CallOptions).CreateTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *metricGRPCClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateServiceTimeSeries[0:len((*c.CallOptions).CreateServiceTimeSeries):len((*c.CallOptions).CreateServiceTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.CreateServiceTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go new file mode 100644 index 00000000000..e7b3595fa64 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go @@ -0,0 +1,2432 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/alert.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Operators for combining conditions. +type AlertPolicy_ConditionCombinerType int32 + +const ( + // An unspecified combiner. + AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0 + // Combine conditions using the logical `AND` operator. An + // incident is created only if all the conditions are met + // simultaneously. This combiner is satisfied if all conditions are + // met, even if they are met on completely different resources. + AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1 + // Combine conditions using the logical `OR` operator. An incident + // is created if any of the listed conditions is met. + AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2 + // Combine conditions using logical `AND` operator, but unlike the regular + // `AND` option, an incident is created only if all conditions are met + // simultaneously on at least one resource. + AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3 +) + +// Enum value maps for AlertPolicy_ConditionCombinerType. +var ( + AlertPolicy_ConditionCombinerType_name = map[int32]string{ + 0: "COMBINE_UNSPECIFIED", + 1: "AND", + 2: "OR", + 3: "AND_WITH_MATCHING_RESOURCE", + } + AlertPolicy_ConditionCombinerType_value = map[string]int32{ + "COMBINE_UNSPECIFIED": 0, + "AND": 1, + "OR": 2, + "AND_WITH_MATCHING_RESOURCE": 3, + } +) + +func (x AlertPolicy_ConditionCombinerType) Enum() *AlertPolicy_ConditionCombinerType { + p := new(AlertPolicy_ConditionCombinerType) + *p = x + return p +} + +func (x AlertPolicy_ConditionCombinerType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlertPolicy_ConditionCombinerType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_alert_proto_enumTypes[0].Descriptor() +} + +func (AlertPolicy_ConditionCombinerType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_alert_proto_enumTypes[0] +} + +func (x AlertPolicy_ConditionCombinerType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlertPolicy_ConditionCombinerType.Descriptor instead. +func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0} +} + +// An enumeration of possible severity level for an Alert Policy. +type AlertPolicy_Severity int32 + +const ( + // No severity is specified. This is the default value. + AlertPolicy_SEVERITY_UNSPECIFIED AlertPolicy_Severity = 0 + // This is the highest severity level. Use this if the problem could + // cause significant damage or downtime. + AlertPolicy_CRITICAL AlertPolicy_Severity = 1 + // This is the medium severity level. Use this if the problem could + // cause minor damage or downtime. + AlertPolicy_ERROR AlertPolicy_Severity = 2 + // This is the lowest severity level. Use this if the problem is not causing + // any damage or downtime, but could potentially lead to a problem in the + // future. + AlertPolicy_WARNING AlertPolicy_Severity = 3 +) + +// Enum value maps for AlertPolicy_Severity. +var ( + AlertPolicy_Severity_name = map[int32]string{ + 0: "SEVERITY_UNSPECIFIED", + 1: "CRITICAL", + 2: "ERROR", + 3: "WARNING", + } + AlertPolicy_Severity_value = map[string]int32{ + "SEVERITY_UNSPECIFIED": 0, + "CRITICAL": 1, + "ERROR": 2, + "WARNING": 3, + } +) + +func (x AlertPolicy_Severity) Enum() *AlertPolicy_Severity { + p := new(AlertPolicy_Severity) + *p = x + return p +} + +func (x AlertPolicy_Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlertPolicy_Severity) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_alert_proto_enumTypes[1].Descriptor() +} + +func (AlertPolicy_Severity) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_alert_proto_enumTypes[1] +} + +func (x AlertPolicy_Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlertPolicy_Severity.Descriptor instead. +func (AlertPolicy_Severity) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1} +} + +// A condition control that determines how metric-threshold conditions +// are evaluated when data stops arriving. +// This control doesn't affect metric-absence policies. +type AlertPolicy_Condition_EvaluationMissingData int32 + +const ( + // An unspecified evaluation missing data option. Equivalent to + // EVALUATION_MISSING_DATA_NO_OP. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED AlertPolicy_Condition_EvaluationMissingData = 0 + // If there is no data to evaluate the condition, then evaluate the + // condition as false. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_INACTIVE AlertPolicy_Condition_EvaluationMissingData = 1 + // If there is no data to evaluate the condition, then evaluate the + // condition as true. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_ACTIVE AlertPolicy_Condition_EvaluationMissingData = 2 + // Do not evaluate the condition to any value if there is no data. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_NO_OP AlertPolicy_Condition_EvaluationMissingData = 3 +) + +// Enum value maps for AlertPolicy_Condition_EvaluationMissingData. +var ( + AlertPolicy_Condition_EvaluationMissingData_name = map[int32]string{ + 0: "EVALUATION_MISSING_DATA_UNSPECIFIED", + 1: "EVALUATION_MISSING_DATA_INACTIVE", + 2: "EVALUATION_MISSING_DATA_ACTIVE", + 3: "EVALUATION_MISSING_DATA_NO_OP", + } + AlertPolicy_Condition_EvaluationMissingData_value = map[string]int32{ + "EVALUATION_MISSING_DATA_UNSPECIFIED": 0, + "EVALUATION_MISSING_DATA_INACTIVE": 1, + "EVALUATION_MISSING_DATA_ACTIVE": 2, + "EVALUATION_MISSING_DATA_NO_OP": 3, + } +) + +func (x AlertPolicy_Condition_EvaluationMissingData) Enum() *AlertPolicy_Condition_EvaluationMissingData { + p := new(AlertPolicy_Condition_EvaluationMissingData) + *p = x + return p +} + +func (x AlertPolicy_Condition_EvaluationMissingData) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlertPolicy_Condition_EvaluationMissingData) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_alert_proto_enumTypes[2].Descriptor() +} + +func (AlertPolicy_Condition_EvaluationMissingData) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_alert_proto_enumTypes[2] +} + +func (x AlertPolicy_Condition_EvaluationMissingData) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlertPolicy_Condition_EvaluationMissingData.Descriptor instead. +func (AlertPolicy_Condition_EvaluationMissingData) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 0} +} + +// A description of the conditions under which some aspect of your system is +// considered to be "unhealthy" and the ways to notify people or services about +// this state. For an overview of alert policies, see +// [Introduction to Alerting](https://cloud.google.com/monitoring/alerts/). +type AlertPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required if the policy exists. The resource name for this policy. The + // format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + // + // `[ALERT_POLICY_ID]` is assigned by Cloud Monitoring when the policy + // is created. When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the alerting policy passed as + // part of the request. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the policy in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple policies in the same project. The name is + // limited to 512 Unicode characters. + // + // The convention for the display_name of a PrometheusQueryLanguageCondition + // is "{rule group name}/{alert name}", where the {rule group name} and + // {alert name} should be taken from the corresponding Prometheus + // configuration file. This convention is not enforced. + // In any case the display_name is not a unique key of the AlertPolicy. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Documentation that is included with notifications and incidents related to + // this policy. Best practice is for the documentation to include information + // to help responders understand, mitigate, escalate, and correct the + // underlying problems detected by the alerting policy. Notification channels + // that have limited capacity might not show this documentation. + Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation,proto3" json:"documentation,omitempty"` + // User-supplied key/value data to be used for organizing and + // identifying the `AlertPolicy` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + // + // Note that Prometheus {alert name} is a + // [valid Prometheus label + // names](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels), + // whereas Prometheus {rule group} is an unrestricted UTF-8 string. + // This means that they cannot be stored as-is in user labels, because + // they may contain characters that are not allowed in user-label values. + UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A list of conditions for the policy. The conditions are combined by AND or + // OR according to the `combiner` field. If the combined conditions evaluate + // to true, then an incident is created. A policy can have from one to six + // conditions. + // If `condition_time_series_query_language` is present, it must be the only + // `condition`. + // If `condition_monitoring_query_language` is present, it must be the only + // `condition`. + Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions,proto3" json:"conditions,omitempty"` + // How to combine the results of multiple conditions to determine if an + // incident should be opened. + // If `condition_time_series_query_language` is present, this must be + // `COMBINE_UNSPECIFIED`. + Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,proto3,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"` + // Whether or not the policy is enabled. On write, the default interpretation + // if unset is that the policy is enabled. On read, clients should not make + // any assumption about the state if it has not been populated. The + // field should always be populated on List and Get operations, unless + // a field projection has been specified that strips it out. + Enabled *wrapperspb.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Read-only description of how the alert policy is invalid. This field is + // only set when the alert policy is invalid. An invalid alert policy will not + // generate incidents. + Validity *status.Status `protobuf:"bytes,18,opt,name=validity,proto3" json:"validity,omitempty"` + // Identifies the notification channels to which notifications should be sent + // when incidents are opened or closed or when new violations occur on + // an already opened incident. Each element of this array corresponds to + // the `name` field in each of the + // [`NotificationChannel`][google.monitoring.v3.NotificationChannel] + // objects that are returned from the [`ListNotificationChannels`] + // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // method. The format of the entries in this field is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // A read-only record of the creation of the alerting policy. If provided + // in a call to create or update, this field will be ignored. + CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"` + // A read-only record of the most recent change to the alerting policy. If + // provided in a call to create or update, this field will be ignored. + MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord,proto3" json:"mutation_record,omitempty"` + // Control over how this alert policy's notification channels are notified. + AlertStrategy *AlertPolicy_AlertStrategy `protobuf:"bytes,21,opt,name=alert_strategy,json=alertStrategy,proto3" json:"alert_strategy,omitempty"` + // Optional. The severity of an alert policy indicates how important incidents + // generated by that policy are. The severity level will be displayed on the + // Incident detail page and in notifications. + Severity AlertPolicy_Severity `protobuf:"varint,22,opt,name=severity,proto3,enum=google.monitoring.v3.AlertPolicy_Severity" json:"severity,omitempty"` +} + +func (x *AlertPolicy) Reset() { + *x = AlertPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy) ProtoMessage() {} + +func (x *AlertPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy.ProtoReflect.Descriptor instead. +func (*AlertPolicy) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0} +} + +func (x *AlertPolicy) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AlertPolicy) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation { + if x != nil { + return x.Documentation + } + return nil +} + +func (x *AlertPolicy) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +func (x *AlertPolicy) GetConditions() []*AlertPolicy_Condition { + if x != nil { + return x.Conditions + } + return nil +} + +func (x *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType { + if x != nil { + return x.Combiner + } + return AlertPolicy_COMBINE_UNSPECIFIED +} + +func (x *AlertPolicy) GetEnabled() *wrapperspb.BoolValue { + if x != nil { + return x.Enabled + } + return nil +} + +func (x *AlertPolicy) GetValidity() *status.Status { + if x != nil { + return x.Validity + } + return nil +} + +func (x *AlertPolicy) GetNotificationChannels() []string { + if x != nil { + return x.NotificationChannels + } + return nil +} + +func (x *AlertPolicy) GetCreationRecord() *MutationRecord { + if x != nil { + return x.CreationRecord + } + return nil +} + +func (x *AlertPolicy) GetMutationRecord() *MutationRecord { + if x != nil { + return x.MutationRecord + } + return nil +} + +func (x *AlertPolicy) GetAlertStrategy() *AlertPolicy_AlertStrategy { + if x != nil { + return x.AlertStrategy + } + return nil +} + +func (x *AlertPolicy) GetSeverity() AlertPolicy_Severity { + if x != nil { + return x.Severity + } + return AlertPolicy_SEVERITY_UNSPECIFIED +} + +// Documentation that is included in the notifications and incidents +// pertaining to this policy. +type AlertPolicy_Documentation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The body of the documentation, interpreted according to `mime_type`. + // The content may not exceed 8,192 Unicode characters and may not exceed + // more than 10,240 bytes when encoded in UTF-8 format, whichever is + // smaller. This text can be [templatized by using + // variables](https://cloud.google.com/monitoring/alerts/doc-variables). + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // The format of the `content` field. Presently, only the value + // `"text/markdown"` is supported. See + // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information. + MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"` + // Optional. The subject line of the notification. The subject line may not + // exceed 10,240 bytes. In notifications generated by this policy, the + // contents of the subject line after variable expansion will be truncated + // to 255 bytes or shorter at the latest UTF-8 character boundary. The + // 255-byte limit is recommended by [this + // thread](https://stackoverflow.com/questions/1592291/what-is-the-email-subject-length-limit). + // It is both the limit imposed by some third-party ticketing products and + // it is common to define textual fields in databases as VARCHAR(255). + // + // The contents of the subject line can be [templatized by using + // variables](https://cloud.google.com/monitoring/alerts/doc-variables). + // If this field is missing or empty, a default subject line will be + // generated. + Subject string `protobuf:"bytes,3,opt,name=subject,proto3" json:"subject,omitempty"` + // Optional. Links to content such as playbooks, repositories, and other + // resources. This field can contain up to 3 entries. + Links []*AlertPolicy_Documentation_Link `protobuf:"bytes,4,rep,name=links,proto3" json:"links,omitempty"` +} + +func (x *AlertPolicy_Documentation) Reset() { + *x = AlertPolicy_Documentation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Documentation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Documentation) ProtoMessage() {} + +func (x *AlertPolicy_Documentation) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Documentation.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *AlertPolicy_Documentation) GetContent() string { + if x != nil { + return x.Content + } + return "" +} + +func (x *AlertPolicy_Documentation) GetMimeType() string { + if x != nil { + return x.MimeType + } + return "" +} + +func (x *AlertPolicy_Documentation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *AlertPolicy_Documentation) GetLinks() []*AlertPolicy_Documentation_Link { + if x != nil { + return x.Links + } + return nil +} + +// A condition is a true/false test that determines when an alerting policy +// should open an incident. If a condition evaluates to true, it signifies +// that something is wrong. +type AlertPolicy_Condition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required if the condition exists. The unique resource name for this + // condition. Its format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] + // + // `[CONDITION_ID]` is assigned by Cloud Monitoring when the + // condition is created as part of a new or updated alerting policy. + // + // When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the conditions of the + // requested alerting policy. Cloud Monitoring creates the + // condition identifiers and includes them in the new policy. + // + // When calling the + // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy] + // method to update a policy, including a condition `name` causes the + // existing condition to be updated. Conditions without names are added to + // the updated policy. Existing conditions are deleted if they are not + // updated. + // + // Best practice is to preserve `[CONDITION_ID]` if you make only small + // changes, such as those to condition thresholds, durations, or trigger + // values. Otherwise, treat the change as a new condition and let the + // existing condition be deleted. + Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the condition in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple conditions in the same policy. + DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Only one of the following condition types will be specified. + // + // Types that are assignable to Condition: + // + // *AlertPolicy_Condition_ConditionThreshold + // *AlertPolicy_Condition_ConditionAbsent + // *AlertPolicy_Condition_ConditionMatchedLog + // *AlertPolicy_Condition_ConditionMonitoringQueryLanguage + // *AlertPolicy_Condition_ConditionPrometheusQueryLanguage + Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"` +} + +func (x *AlertPolicy_Condition) Reset() { + *x = AlertPolicy_Condition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition) ProtoMessage() {} + +func (x *AlertPolicy_Condition) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *AlertPolicy_Condition) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AlertPolicy_Condition) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok { + return x.ConditionThreshold + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok { + return x.ConditionAbsent + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionMatchedLog() *AlertPolicy_Condition_LogMatch { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionMatchedLog); ok { + return x.ConditionMatchedLog + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionMonitoringQueryLanguage() *AlertPolicy_Condition_MonitoringQueryLanguageCondition { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionMonitoringQueryLanguage); ok { + return x.ConditionMonitoringQueryLanguage + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionPrometheusQueryLanguage() *AlertPolicy_Condition_PrometheusQueryLanguageCondition { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionPrometheusQueryLanguage); ok { + return x.ConditionPrometheusQueryLanguage + } + return nil +} + +type isAlertPolicy_Condition_Condition interface { + isAlertPolicy_Condition_Condition() +} + +type AlertPolicy_Condition_ConditionThreshold struct { + // A condition that compares a time series against a threshold. + ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionAbsent struct { + // A condition that checks that a time series continues to + // receive new data points. + ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionMatchedLog struct { + // A condition that checks for log messages matching given constraints. If + // set, no other conditions can be present. + ConditionMatchedLog *AlertPolicy_Condition_LogMatch `protobuf:"bytes,20,opt,name=condition_matched_log,json=conditionMatchedLog,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionMonitoringQueryLanguage struct { + // A condition that uses the Monitoring Query Language to define + // alerts. + ConditionMonitoringQueryLanguage *AlertPolicy_Condition_MonitoringQueryLanguageCondition `protobuf:"bytes,19,opt,name=condition_monitoring_query_language,json=conditionMonitoringQueryLanguage,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionPrometheusQueryLanguage struct { + // A condition that uses the Prometheus query language to define alerts. + ConditionPrometheusQueryLanguage *AlertPolicy_Condition_PrometheusQueryLanguageCondition `protobuf:"bytes,21,opt,name=condition_prometheus_query_language,json=conditionPrometheusQueryLanguage,proto3,oneof"` +} + +func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionMatchedLog) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage) isAlertPolicy_Condition_Condition() {} + +// Control over how the notification channels in `notification_channels` +// are notified when this alert fires. +type AlertPolicy_AlertStrategy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required for alert policies with a `LogMatch` condition. + // + // This limit is not implemented for alert policies that are not log-based. + NotificationRateLimit *AlertPolicy_AlertStrategy_NotificationRateLimit `protobuf:"bytes,1,opt,name=notification_rate_limit,json=notificationRateLimit,proto3" json:"notification_rate_limit,omitempty"` + // If an alert policy that was active has no data for this long, any open + // incidents will close + AutoClose *durationpb.Duration `protobuf:"bytes,3,opt,name=auto_close,json=autoClose,proto3" json:"auto_close,omitempty"` + // Control how notifications will be sent out, on a per-channel basis. + NotificationChannelStrategy []*AlertPolicy_AlertStrategy_NotificationChannelStrategy `protobuf:"bytes,4,rep,name=notification_channel_strategy,json=notificationChannelStrategy,proto3" json:"notification_channel_strategy,omitempty"` +} + +func (x *AlertPolicy_AlertStrategy) Reset() { + *x = AlertPolicy_AlertStrategy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_AlertStrategy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_AlertStrategy) ProtoMessage() {} + +func (x *AlertPolicy_AlertStrategy) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_AlertStrategy.ProtoReflect.Descriptor instead. +func (*AlertPolicy_AlertStrategy) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *AlertPolicy_AlertStrategy) GetNotificationRateLimit() *AlertPolicy_AlertStrategy_NotificationRateLimit { + if x != nil { + return x.NotificationRateLimit + } + return nil +} + +func (x *AlertPolicy_AlertStrategy) GetAutoClose() *durationpb.Duration { + if x != nil { + return x.AutoClose + } + return nil +} + +func (x *AlertPolicy_AlertStrategy) GetNotificationChannelStrategy() []*AlertPolicy_AlertStrategy_NotificationChannelStrategy { + if x != nil { + return x.NotificationChannelStrategy + } + return nil +} + +// Links to content such as playbooks, repositories, and other resources. +type AlertPolicy_Documentation_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A short display name for the link. The display name must not be empty + // or exceed 63 characters. Example: "playbook". + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The url of a webpage. + // A url can be templatized by using variables + // in the path or the query parameters. The total length of a URL should + // not exceed 2083 characters before and after variable expansion. + // Example: "https://my_domain.com/playbook?name=${resource.name}" + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *AlertPolicy_Documentation_Link) Reset() { + *x = AlertPolicy_Documentation_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Documentation_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Documentation_Link) ProtoMessage() {} + +func (x *AlertPolicy_Documentation_Link) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Documentation_Link.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Documentation_Link) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *AlertPolicy_Documentation_Link) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *AlertPolicy_Documentation_Link) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +// Specifies how many time series must fail a predicate to trigger a +// condition. If not specified, then a `{count: 1}` trigger is used. +type AlertPolicy_Condition_Trigger struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A type of trigger. + // + // Types that are assignable to Type: + // + // *AlertPolicy_Condition_Trigger_Count + // *AlertPolicy_Condition_Trigger_Percent + Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"` +} + +func (x *AlertPolicy_Condition_Trigger) Reset() { + *x = AlertPolicy_Condition_Trigger{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_Trigger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_Trigger) ProtoMessage() {} + +func (x *AlertPolicy_Condition_Trigger) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_Trigger.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *AlertPolicy_Condition_Trigger) GetCount() int32 { + if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Count); ok { + return x.Count + } + return 0 +} + +func (x *AlertPolicy_Condition_Trigger) GetPercent() float64 { + if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok { + return x.Percent + } + return 0 +} + +type isAlertPolicy_Condition_Trigger_Type interface { + isAlertPolicy_Condition_Trigger_Type() +} + +type AlertPolicy_Condition_Trigger_Count struct { + // The absolute number of time series that must fail + // the predicate for the condition to be triggered. + Count int32 `protobuf:"varint,1,opt,name=count,proto3,oneof"` +} + +type AlertPolicy_Condition_Trigger_Percent struct { + // The percentage of time series that must fail the + // predicate for the condition to be triggered. + Percent float64 `protobuf:"fixed64,2,opt,name=percent,proto3,oneof"` +} + +func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {} + +func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {} + +// A condition type that compares a collection of time series +// against a threshold. +type AlertPolicy_Condition_MetricThreshold struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. A + // [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) + // (that call is useful to verify the time series that will be retrieved / + // processed). The filter must specify the metric type and the resource + // type. Optionally, it can specify resource labels and metric labels. + // This field must not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the one in the [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this + // field. + Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // A [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies a time series that should be used as the denominator of a + // ratio that will be compared with the threshold. If a + // `denominator_filter` is specified, the time series specified by the + // `filter` field will be used as the numerator. + // + // The filter must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter,proto3" json:"denominator_filter,omitempty"` + // Specifies the alignment of data points in individual time series + // selected by `denominatorFilter` as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). + // + // When computing ratios, the `aggregations` and + // `denominator_aggregations` fields must use the same alignment period + // and produce time series that have the same periodicity and labels. + DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations,proto3" json:"denominator_aggregations,omitempty"` + // When this field is present, the `MetricThreshold` condition forecasts + // whether the time series is predicted to violate the threshold within + // the `forecast_horizon`. When this field is not set, the + // `MetricThreshold` tests the current value of the timeseries against the + // threshold. + ForecastOptions *AlertPolicy_Condition_MetricThreshold_ForecastOptions `protobuf:"bytes,12,opt,name=forecast_options,json=forecastOptions,proto3" json:"forecast_options,omitempty"` + // The comparison to apply between the time series (indicated by `filter` + // and `aggregation`) and the threshold (indicated by `threshold_value`). + // The comparison is applied on each time series, with the time series + // on the left-hand side and the threshold on the right-hand side. + // + // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently. + Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"` + // A value against which to compare the time series. + ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue,proto3" json:"threshold_value,omitempty"` + // The amount of time that a time series must violate the + // threshold to be considered failing. Currently, only values + // that are a multiple of a minute--e.g., 0, 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. When choosing a duration, it is useful to + // keep in mind the frequency of the underlying time series data + // (which may also be affected by any alignments specified in the + // `aggregations` field); a good duration is long enough so that a single + // outlier does not generate spurious alerts, but short enough that + // unhealthy states are detected and alerted on quickly. + Duration *durationpb.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`, + // or by the ratio, if `denominator_filter` and `denominator_aggregations` + // are specified. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"` + // A condition control that determines how metric-threshold conditions + // are evaluated when data stops arriving. To use this control, the value + // of the `duration` field must be greater than or equal to 60 seconds. + EvaluationMissingData AlertPolicy_Condition_EvaluationMissingData `protobuf:"varint,11,opt,name=evaluation_missing_data,json=evaluationMissingData,proto3,enum=google.monitoring.v3.AlertPolicy_Condition_EvaluationMissingData" json:"evaluation_missing_data,omitempty"` +} + +func (x *AlertPolicy_Condition_MetricThreshold) Reset() { + *x = AlertPolicy_Condition_MetricThreshold{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MetricThreshold) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MetricThreshold) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MetricThreshold.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 1} +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation { + if x != nil { + return x.Aggregations + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string { + if x != nil { + return x.DenominatorFilter + } + return "" +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation { + if x != nil { + return x.DenominatorAggregations + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetForecastOptions() *AlertPolicy_Condition_MetricThreshold_ForecastOptions { + if x != nil { + return x.ForecastOptions + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType { + if x != nil { + return x.Comparison + } + return ComparisonType_COMPARISON_UNSPECIFIED +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 { + if x != nil { + return x.ThresholdValue + } + return 0 +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger { + if x != nil { + return x.Trigger + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetEvaluationMissingData() AlertPolicy_Condition_EvaluationMissingData { + if x != nil { + return x.EvaluationMissingData + } + return AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED +} + +// A condition type that checks that monitored resources +// are reporting data. The configuration defines a metric and +// a set of monitored resources. The predicate is considered in violation +// when a time series for the specified metric of a monitored +// resource does not include any data in the specified `duration`. +type AlertPolicy_Condition_MetricAbsence struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. A + // [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) + // (that call is useful to verify the time series that will be retrieved / + // processed). The filter must specify the metric type and the resource + // type. Optionally, it can specify resource labels and metric labels. + // This field must not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the one in the [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this + // field. + Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // The amount of time that a time series must fail to report new + // data to be considered failing. The minimum value of this field + // is 120 seconds. Larger values that are a multiple of a + // minute--for example, 240 or 300 seconds--are supported. + // If an invalid value is given, an + // error will be returned. The `Duration.nanos` field is + // ignored. + Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"` +} + +func (x *AlertPolicy_Condition_MetricAbsence) Reset() { + *x = AlertPolicy_Condition_MetricAbsence{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MetricAbsence) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MetricAbsence) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MetricAbsence.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 2} +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation { + if x != nil { + return x.Aggregations + } + return nil +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger { + if x != nil { + return x.Trigger + } + return nil +} + +// A condition type that checks whether a log message in the [scoping +// project](https://cloud.google.com/monitoring/api/v3#project_name) +// satisfies the given filter. Logs from other projects in the metrics +// scope are not evaluated. +type AlertPolicy_Condition_LogMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. A logs-based filter. See [Advanced Logs + // Queries](https://cloud.google.com/logging/docs/view/advanced-queries) + // for how this filter should be constructed. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. A map from a label key to an extractor expression, which is + // used to extract the value for this label key. Each entry in this map is + // a specification for how data should be extracted from log entries that + // match `filter`. Each combination of extracted values is treated as a + // separate rule for the purposes of triggering notifications. Label keys + // and corresponding values can be used in notifications generated by this + // condition. + // + // Please see [the documentation on logs-based metric + // `valueExtractor`s](https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics#LogMetric.FIELDS.value_extractor) + // for syntax and examples. + LabelExtractors map[string]string `protobuf:"bytes,2,rep,name=label_extractors,json=labelExtractors,proto3" json:"label_extractors,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *AlertPolicy_Condition_LogMatch) Reset() { + *x = AlertPolicy_Condition_LogMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_LogMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_LogMatch) ProtoMessage() {} + +func (x *AlertPolicy_Condition_LogMatch) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_LogMatch.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_LogMatch) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 3} +} + +func (x *AlertPolicy_Condition_LogMatch) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *AlertPolicy_Condition_LogMatch) GetLabelExtractors() map[string]string { + if x != nil { + return x.LabelExtractors + } + return nil +} + +// A condition type that allows alert policies to be defined using +// [Monitoring Query Language](https://cloud.google.com/monitoring/mql). +type AlertPolicy_Condition_MonitoringQueryLanguageCondition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [Monitoring Query Language](https://cloud.google.com/monitoring/mql) + // query that outputs a boolean stream. + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + // The amount of time that a time series must violate the + // threshold to be considered failing. Currently, only values + // that are a multiple of a minute--e.g., 0, 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. When choosing a duration, it is useful to + // keep in mind the frequency of the underlying time series data + // (which may also be affected by any alignments specified in the + // `aggregations` field); a good duration is long enough so that a single + // outlier does not generate spurious alerts, but short enough that + // unhealthy states are detected and alerted on quickly. + Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`, + // or by the ratio, if `denominator_filter` and `denominator_aggregations` + // are specified. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"` + // A condition control that determines how metric-threshold conditions + // are evaluated when data stops arriving. + EvaluationMissingData AlertPolicy_Condition_EvaluationMissingData `protobuf:"varint,4,opt,name=evaluation_missing_data,json=evaluationMissingData,proto3,enum=google.monitoring.v3.AlertPolicy_Condition_EvaluationMissingData" json:"evaluation_missing_data,omitempty"` +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) Reset() { + *x = AlertPolicy_Condition_MonitoringQueryLanguageCondition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MonitoringQueryLanguageCondition.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 4} +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetTrigger() *AlertPolicy_Condition_Trigger { + if x != nil { + return x.Trigger + } + return nil +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetEvaluationMissingData() AlertPolicy_Condition_EvaluationMissingData { + if x != nil { + return x.EvaluationMissingData + } + return AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED +} + +// A condition type that allows alert policies to be defined using +// [Prometheus Query Language +// (PromQL)](https://prometheus.io/docs/prometheus/latest/querying/basics/). +// +// The PrometheusQueryLanguageCondition message contains information +// from a Prometheus alerting rule and its associated rule group. +// +// A Prometheus alerting rule is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/). +// The semantics of a Prometheus alerting rule is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule). +// +// A Prometheus rule group is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). +// The semantics of a Prometheus rule group is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule_group). +// +// Because Cloud Alerting has no representation of a Prometheus rule +// group resource, we must embed the information of the parent rule +// group inside each of the conditions that refer to it. We must also +// update the contents of all Prometheus alerts in case the information +// of their rule group changes. +// +// The PrometheusQueryLanguageCondition protocol buffer combines the +// information of the corresponding rule group and alerting rule. +// The structure of the PrometheusQueryLanguageCondition protocol buffer +// does NOT mimic the structure of the Prometheus rule group and alerting +// rule YAML declarations. The PrometheusQueryLanguageCondition protocol +// buffer may change in the future to support future rule group and/or +// alerting rule features. There are no new such features at the present +// time (2023-06-26). +type AlertPolicy_Condition_PrometheusQueryLanguageCondition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The PromQL expression to evaluate. Every evaluation cycle + // this expression is evaluated at the current time, and all resultant + // time series become pending/firing alerts. This field must not be empty. + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + // Optional. Alerts are considered firing once their PromQL expression was + // evaluated to be "true" for this long. + // Alerts whose PromQL expression was not evaluated to be "true" for + // long enough are considered pending. + // Must be a non-negative duration or missing. + // This field is optional. Its default value is zero. + Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // Optional. How often this rule should be evaluated. + // Must be a positive multiple of 30 seconds or missing. + // This field is optional. Its default value is 30 seconds. + // If this PrometheusQueryLanguageCondition was generated from a + // Prometheus alerting rule, then this value should be taken from the + // enclosing rule group. + EvaluationInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=evaluation_interval,json=evaluationInterval,proto3" json:"evaluation_interval,omitempty"` + // Optional. Labels to add to or overwrite in the PromQL query result. + // Label names [must be + // valid](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). + // Label values can be [templatized by using + // variables](https://cloud.google.com/monitoring/alerts/doc-variables). + // The only available variable names are the names of the labels in the + // PromQL result, including "__name__" and "value". "labels" may be empty. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional. The rule group name of this alert in the corresponding + // Prometheus configuration file. + // + // Some external tools may require this field to be populated correctly + // in order to refer to the original Prometheus configuration file. + // The rule group name and the alert name are necessary to update the + // relevant AlertPolicies in case the definition of the rule group changes + // in the future. + // + // This field is optional. If this field is not empty, then it must + // contain a valid UTF-8 string. + // This field may not exceed 2048 Unicode characters in length. + RuleGroup string `protobuf:"bytes,5,opt,name=rule_group,json=ruleGroup,proto3" json:"rule_group,omitempty"` + // Optional. The alerting rule name of this alert in the corresponding + // Prometheus configuration file. + // + // Some external tools may require this field to be populated correctly + // in order to refer to the original Prometheus configuration file. + // The rule group name and the alert name are necessary to update the + // relevant AlertPolicies in case the definition of the rule group changes + // in the future. + // + // This field is optional. If this field is not empty, then it must be a + // [valid Prometheus label + // name](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). + // This field may not exceed 2048 Unicode characters in length. + AlertRule string `protobuf:"bytes,6,opt,name=alert_rule,json=alertRule,proto3" json:"alert_rule,omitempty"` +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) Reset() { + *x = AlertPolicy_Condition_PrometheusQueryLanguageCondition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoMessage() {} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_PrometheusQueryLanguageCondition.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 5} +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetEvaluationInterval() *durationpb.Duration { + if x != nil { + return x.EvaluationInterval + } + return nil +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetRuleGroup() string { + if x != nil { + return x.RuleGroup + } + return "" +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetAlertRule() string { + if x != nil { + return x.AlertRule + } + return "" +} + +// Options used when forecasting the time series and testing +// the predicted value against the threshold. +type AlertPolicy_Condition_MetricThreshold_ForecastOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The length of time into the future to forecast whether a + // time series will violate the threshold. If the predicted value is + // found to violate the threshold, and the violation is observed in all + // forecasts made for the configured `duration`, then the time series is + // considered to be failing. + // The forecast horizon can range from 1 hour to 60 hours. + ForecastHorizon *durationpb.Duration `protobuf:"bytes,1,opt,name=forecast_horizon,json=forecastHorizon,proto3" json:"forecast_horizon,omitempty"` +} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) Reset() { + *x = AlertPolicy_Condition_MetricThreshold_ForecastOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MetricThreshold_ForecastOptions.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 1, 0} +} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) GetForecastHorizon() *durationpb.Duration { + if x != nil { + return x.ForecastHorizon + } + return nil +} + +// Control over the rate of notifications sent to this alert policy's +// notification channels. +type AlertPolicy_AlertStrategy_NotificationRateLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Not more than one notification per `period`. + Period *durationpb.Duration `protobuf:"bytes,1,opt,name=period,proto3" json:"period,omitempty"` +} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) Reset() { + *x = AlertPolicy_AlertStrategy_NotificationRateLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoMessage() {} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_AlertStrategy_NotificationRateLimit.ProtoReflect.Descriptor instead. +func (*AlertPolicy_AlertStrategy_NotificationRateLimit) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 0} +} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) GetPeriod() *durationpb.Duration { + if x != nil { + return x.Period + } + return nil +} + +// Control over how the notification channels in `notification_channels` +// are notified when this alert fires, on a per-channel basis. +type AlertPolicy_AlertStrategy_NotificationChannelStrategy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The full REST resource name for the notification channels that these + // settings apply to. Each of these correspond to the name field in one + // of the NotificationChannel objects referenced in the + // notification_channels field of this AlertPolicy. + // The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + NotificationChannelNames []string `protobuf:"bytes,1,rep,name=notification_channel_names,json=notificationChannelNames,proto3" json:"notification_channel_names,omitempty"` + // The frequency at which to send reminder notifications for open + // incidents. + RenotifyInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=renotify_interval,json=renotifyInterval,proto3" json:"renotify_interval,omitempty"` +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) Reset() { + *x = AlertPolicy_AlertStrategy_NotificationChannelStrategy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoMessage() {} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_AlertStrategy_NotificationChannelStrategy.ProtoReflect.Descriptor instead. +func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 1} +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) GetNotificationChannelNames() []string { + if x != nil { + return x.NotificationChannelNames + } + return nil +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) GetRenotifyInterval() *durationpb.Duration { + if x != nil { + return x.RenotifyInterval + } + return nil +} + +var File_google_monitoring_v3_alert_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_alert_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x2b, 0x0a, + 0x0b, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x64, 0x6f, 0x63, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x0b, 0x75, 0x73, + 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4b, + 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x53, 0x0a, 0x08, 0x63, + 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, + 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, + 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, + 0x74, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x08, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, + 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4d, 0x0a, 0x0f, 0x6d, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x6d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x56, 0x0a, 0x0e, 0x61, 0x6c, 0x65, + 0x72, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, + 0x79, 0x12, 0x4b, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x16, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x1a, 0xf3, + 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, + 0x6d, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, + 0x69, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x73, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x4f, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3b, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x75, 0x72, 0x6c, 0x1a, 0x92, 0x1a, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, + 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, + 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x6e, 0x0a, 0x13, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, + 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, + 0x6f, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x66, 0x0a, 0x10, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, + 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x62, 0x73, 0x65, 0x6e, + 0x74, 0x12, 0x6a, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, + 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x12, 0x9d, 0x01, + 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x9d, 0x01, + 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x1a, 0x45, 0x0a, + 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x48, 0x00, 0x52, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x06, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc8, 0x06, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2d, 0x0a, 0x12, + 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, + 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x18, 0x64, + 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x17, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x41, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x76, 0x0a, 0x10, 0x66, 0x6f, 0x72, + 0x65, 0x63, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x2e, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, + 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, + 0x61, 0x1a, 0x5c, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, + 0x5f, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, + 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x1a, + 0xf9, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, + 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, + 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x1a, 0xe1, 0x01, 0x0a, 0x08, + 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x65, + 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x49, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0xb9, 0x02, 0x0a, 0x20, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, + 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x1a, 0xc4, 0x03, 0x0a, 0x20, + 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x13, 0x65, 0x76, 0x61, 0x6c, 0x75, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x75, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x58, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x22, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xad, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x27, 0x0a, 0x23, + 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, + 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x49, 0x4e, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x22, 0x0a, 0x1e, 0x45, + 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, + 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, + 0x21, 0x0a, 0x1d, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, + 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x4f, 0x50, + 0x10, 0x03, 0x3a, 0x97, 0x02, 0xea, 0x41, 0x93, 0x02, 0x0a, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, + 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, + 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0b, 0x0a, 0x09, + 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcc, 0x04, 0x0a, 0x0d, 0x41, 0x6c, + 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x7d, 0x0a, 0x17, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x52, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x38, 0x0a, 0x0a, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x43, + 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x1b, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x1a, 0x4a, 0x0a, 0x15, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x1a, 0xa3, 0x01, 0x0a, 0x1b, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x12, 0x3c, 0x0a, 0x1a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x12, 0x46, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4d, 0x42, 0x49, 0x4e, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x44, + 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x4e, + 0x44, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x03, 0x22, 0x4a, 0x0a, 0x08, 0x53, 0x65, + 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, + 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, + 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0xc9, 0x01, 0xea, 0x41, 0xc5, 0x01, 0x0a, 0x25, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, 0x39, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, + 0x12, 0x2d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, + 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, + 0x0a, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, + 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_alert_proto_rawDescOnce sync.Once + file_google_monitoring_v3_alert_proto_rawDescData = file_google_monitoring_v3_alert_proto_rawDesc +) + +func file_google_monitoring_v3_alert_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_alert_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_alert_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_proto_rawDescData) + }) + return file_google_monitoring_v3_alert_proto_rawDescData +} + +var file_google_monitoring_v3_alert_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_google_monitoring_v3_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_monitoring_v3_alert_proto_goTypes = []any{ + (AlertPolicy_ConditionCombinerType)(0), // 0: google.monitoring.v3.AlertPolicy.ConditionCombinerType + (AlertPolicy_Severity)(0), // 1: google.monitoring.v3.AlertPolicy.Severity + (AlertPolicy_Condition_EvaluationMissingData)(0), // 2: google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + (*AlertPolicy)(nil), // 3: google.monitoring.v3.AlertPolicy + (*AlertPolicy_Documentation)(nil), // 4: google.monitoring.v3.AlertPolicy.Documentation + (*AlertPolicy_Condition)(nil), // 5: google.monitoring.v3.AlertPolicy.Condition + (*AlertPolicy_AlertStrategy)(nil), // 6: google.monitoring.v3.AlertPolicy.AlertStrategy + nil, // 7: google.monitoring.v3.AlertPolicy.UserLabelsEntry + (*AlertPolicy_Documentation_Link)(nil), // 8: google.monitoring.v3.AlertPolicy.Documentation.Link + (*AlertPolicy_Condition_Trigger)(nil), // 9: google.monitoring.v3.AlertPolicy.Condition.Trigger + (*AlertPolicy_Condition_MetricThreshold)(nil), // 10: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold + (*AlertPolicy_Condition_MetricAbsence)(nil), // 11: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence + (*AlertPolicy_Condition_LogMatch)(nil), // 12: google.monitoring.v3.AlertPolicy.Condition.LogMatch + (*AlertPolicy_Condition_MonitoringQueryLanguageCondition)(nil), // 13: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition + (*AlertPolicy_Condition_PrometheusQueryLanguageCondition)(nil), // 14: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition + (*AlertPolicy_Condition_MetricThreshold_ForecastOptions)(nil), // 15: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions + nil, // 16: google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry + nil, // 17: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry + (*AlertPolicy_AlertStrategy_NotificationRateLimit)(nil), // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit + (*AlertPolicy_AlertStrategy_NotificationChannelStrategy)(nil), // 19: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy + (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue + (*status.Status)(nil), // 21: google.rpc.Status + (*MutationRecord)(nil), // 22: google.monitoring.v3.MutationRecord + (*durationpb.Duration)(nil), // 23: google.protobuf.Duration + (*Aggregation)(nil), // 24: google.monitoring.v3.Aggregation + (ComparisonType)(0), // 25: google.monitoring.v3.ComparisonType +} +var file_google_monitoring_v3_alert_proto_depIdxs = []int32{ + 4, // 0: google.monitoring.v3.AlertPolicy.documentation:type_name -> google.monitoring.v3.AlertPolicy.Documentation + 7, // 1: google.monitoring.v3.AlertPolicy.user_labels:type_name -> google.monitoring.v3.AlertPolicy.UserLabelsEntry + 5, // 2: google.monitoring.v3.AlertPolicy.conditions:type_name -> google.monitoring.v3.AlertPolicy.Condition + 0, // 3: google.monitoring.v3.AlertPolicy.combiner:type_name -> google.monitoring.v3.AlertPolicy.ConditionCombinerType + 20, // 4: google.monitoring.v3.AlertPolicy.enabled:type_name -> google.protobuf.BoolValue + 21, // 5: google.monitoring.v3.AlertPolicy.validity:type_name -> google.rpc.Status + 22, // 6: google.monitoring.v3.AlertPolicy.creation_record:type_name -> google.monitoring.v3.MutationRecord + 22, // 7: google.monitoring.v3.AlertPolicy.mutation_record:type_name -> google.monitoring.v3.MutationRecord + 6, // 8: google.monitoring.v3.AlertPolicy.alert_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy + 1, // 9: google.monitoring.v3.AlertPolicy.severity:type_name -> google.monitoring.v3.AlertPolicy.Severity + 8, // 10: google.monitoring.v3.AlertPolicy.Documentation.links:type_name -> google.monitoring.v3.AlertPolicy.Documentation.Link + 10, // 11: google.monitoring.v3.AlertPolicy.Condition.condition_threshold:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold + 11, // 12: google.monitoring.v3.AlertPolicy.Condition.condition_absent:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricAbsence + 12, // 13: google.monitoring.v3.AlertPolicy.Condition.condition_matched_log:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch + 13, // 14: google.monitoring.v3.AlertPolicy.Condition.condition_monitoring_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition + 14, // 15: google.monitoring.v3.AlertPolicy.Condition.condition_prometheus_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition + 18, // 16: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_rate_limit:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit + 23, // 17: google.monitoring.v3.AlertPolicy.AlertStrategy.auto_close:type_name -> google.protobuf.Duration + 19, // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_channel_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy + 24, // 19: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.aggregations:type_name -> google.monitoring.v3.Aggregation + 24, // 20: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_aggregations:type_name -> google.monitoring.v3.Aggregation + 15, // 21: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.forecast_options:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions + 25, // 22: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.comparison:type_name -> google.monitoring.v3.ComparisonType + 23, // 23: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.duration:type_name -> google.protobuf.Duration + 9, // 24: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 2, // 25: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + 24, // 26: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.aggregations:type_name -> google.monitoring.v3.Aggregation + 23, // 27: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.duration:type_name -> google.protobuf.Duration + 9, // 28: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 16, // 29: google.monitoring.v3.AlertPolicy.Condition.LogMatch.label_extractors:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry + 23, // 30: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.duration:type_name -> google.protobuf.Duration + 9, // 31: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 2, // 32: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + 23, // 33: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.duration:type_name -> google.protobuf.Duration + 23, // 34: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.evaluation_interval:type_name -> google.protobuf.Duration + 17, // 35: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.labels:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry + 23, // 36: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions.forecast_horizon:type_name -> google.protobuf.Duration + 23, // 37: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit.period:type_name -> google.protobuf.Duration + 23, // 38: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy.renotify_interval:type_name -> google.protobuf.Duration + 39, // [39:39] is the sub-list for method output_type + 39, // [39:39] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_alert_proto_init() } +func file_google_monitoring_v3_alert_proto_init() { + if File_google_monitoring_v3_alert_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_mutation_record_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_alert_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Documentation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_AlertStrategy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Documentation_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_Trigger); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MetricThreshold); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MetricAbsence); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_LogMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MonitoringQueryLanguageCondition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_PrometheusQueryLanguageCondition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MetricThreshold_ForecastOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_AlertStrategy_NotificationRateLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_AlertStrategy_NotificationChannelStrategy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_alert_proto_msgTypes[2].OneofWrappers = []any{ + (*AlertPolicy_Condition_ConditionThreshold)(nil), + (*AlertPolicy_Condition_ConditionAbsent)(nil), + (*AlertPolicy_Condition_ConditionMatchedLog)(nil), + (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage)(nil), + (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage)(nil), + } + file_google_monitoring_v3_alert_proto_msgTypes[6].OneofWrappers = []any{ + (*AlertPolicy_Condition_Trigger_Count)(nil), + (*AlertPolicy_Condition_Trigger_Percent)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_alert_proto_rawDesc, + NumEnums: 3, + NumMessages: 17, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_alert_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_alert_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_alert_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_alert_proto_msgTypes, + }.Build() + File_google_monitoring_v3_alert_proto = out.File + file_google_monitoring_v3_alert_proto_rawDesc = nil + file_google_monitoring_v3_alert_proto_goTypes = nil + file_google_monitoring_v3_alert_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go new file mode 100644 index 00000000000..f0e149d16b6 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go @@ -0,0 +1,1045 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/alert_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The protocol for the `CreateAlertPolicy` request. +type CreateAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // to create the alerting policy. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this field names the parent container in which the alerting + // policy will be written, not the name of the created policy. |name| must be + // a host project of a Metrics Scope, otherwise INVALID_ARGUMENT error will + // return. The alerting policy that is returned will have a name that contains + // a normalized representation of this name as a prefix but adds a suffix of + // the form `/alertPolicies/[ALERT_POLICY_ID]`, identifying the policy in the + // container. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The requested alerting policy. You should omit the `name` field + // in this policy. The name will be returned in the new policy, including a + // new `[ALERT_POLICY_ID]` value. + AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` +} + +func (x *CreateAlertPolicyRequest) Reset() { + *x = CreateAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAlertPolicyRequest) ProtoMessage() {} + +func (x *CreateAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateAlertPolicyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if x != nil { + return x.AlertPolicy + } + return nil +} + +// The protocol for the `GetAlertPolicy` request. +type GetAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The alerting policy to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetAlertPolicyRequest) Reset() { + *x = GetAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAlertPolicyRequest) ProtoMessage() {} + +func (x *GetAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{1} +} + +func (x *GetAlertPolicyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The protocol for the `ListAlertPolicies` request. +type ListAlertPoliciesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // alert policies are to be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this field names the parent container in which the alerting + // policies to be listed are stored. To retrieve a single alerting policy + // by name, use the + // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // alert policies to be included in the response. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of field references as the `filter` field. Entries can be + // prefixed with a minus sign to sort by the field in descending order. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListAlertPoliciesRequest) Reset() { + *x = ListAlertPoliciesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListAlertPoliciesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListAlertPoliciesRequest) ProtoMessage() {} + +func (x *ListAlertPoliciesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListAlertPoliciesRequest.ProtoReflect.Descriptor instead. +func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListAlertPoliciesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListAlertPoliciesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListAlertPoliciesRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +func (x *ListAlertPoliciesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListAlertPoliciesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The protocol for the `ListAlertPolicies` response. +type ListAlertPoliciesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The returned alert policies. + AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"` + // If there might be more results than were returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of alert policies in all pages. This number is only an + // estimate, and may change in subsequent pages. https://aip.dev/158 + TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListAlertPoliciesResponse) Reset() { + *x = ListAlertPoliciesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListAlertPoliciesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListAlertPoliciesResponse) ProtoMessage() {} + +func (x *ListAlertPoliciesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListAlertPoliciesResponse.ProtoReflect.Descriptor instead. +func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy { + if x != nil { + return x.AlertPolicies + } + return nil +} + +func (x *ListAlertPoliciesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListAlertPoliciesResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// The protocol for the `UpdateAlertPolicy` request. +type UpdateAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. A list of alerting policy field names. If this field is not + // empty, each listed field in the existing alerting policy is set to the + // value of the corresponding field in the supplied policy (`alert_policy`), + // or to the field's default value if the field is not in the supplied + // alerting policy. Fields not listed retain their previous value. + // + // Examples of valid field masks include `display_name`, `documentation`, + // `documentation.content`, `documentation.mime_type`, `user_labels`, + // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc. + // + // If this field is empty, then the supplied alerting policy replaces the + // existing policy. It is the same as deleting the existing policy and + // adding the supplied policy, except for the following: + // + // - The new policy will have the same `[ALERT_POLICY_ID]` as the former + // policy. This gives you continuity with the former policy in your + // notifications and incidents. + // - Conditions in the new policy will keep their former `[CONDITION_ID]` if + // the supplied condition includes the `name` field with that + // `[CONDITION_ID]`. If the supplied condition omits the `name` field, + // then a new `[CONDITION_ID]` is created. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. The updated alerting policy or the updated values for the + // fields listed in `update_mask`. + // If `update_mask` is not empty, any fields in this policy that are + // not in `update_mask` are ignored. + AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` +} + +func (x *UpdateAlertPolicyRequest) Reset() { + *x = UpdateAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateAlertPolicyRequest) ProtoMessage() {} + +func (x *UpdateAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateAlertPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if x != nil { + return x.AlertPolicy + } + return nil +} + +// The protocol for the `DeleteAlertPolicy` request. +type DeleteAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The alerting policy to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + // + // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy]. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteAlertPolicyRequest) Reset() { + *x = DeleteAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteAlertPolicyRequest) ProtoMessage() {} + +func (x *DeleteAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteAlertPolicyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_google_monitoring_v3_alert_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_alert_service_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, + 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, + 0x5a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xcc, 0x01, 0x0a, 0x18, + 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x1b, + 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xac, 0x01, 0x0a, 0x19, 0x4c, + 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0e, 0x61, 0x6c, 0x65, 0x72, + 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa2, 0x01, 0x0a, 0x18, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x61, 0x73, 0x6b, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x5d, + 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, + 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0x9e, 0x08, + 0x0a, 0x12, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, + 0x96, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x22, 0x34, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x27, 0x12, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb5, 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0x4d, 0xda, 0x41, 0x11, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x6c, 0x65, 0x72, 0x74, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x3a, 0x0c, 0x61, + 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x23, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x12, 0x91, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x34, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, + 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, + 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x63, 0xda, + 0x41, 0x18, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, + 0x3a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x32, 0x32, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcc, + 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, + 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_alert_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_alert_service_proto_rawDescData = file_google_monitoring_v3_alert_service_proto_rawDesc +) + +func file_google_monitoring_v3_alert_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_alert_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_alert_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_service_proto_rawDescData) + }) + return file_google_monitoring_v3_alert_service_proto_rawDescData +} + +var file_google_monitoring_v3_alert_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_google_monitoring_v3_alert_service_proto_goTypes = []any{ + (*CreateAlertPolicyRequest)(nil), // 0: google.monitoring.v3.CreateAlertPolicyRequest + (*GetAlertPolicyRequest)(nil), // 1: google.monitoring.v3.GetAlertPolicyRequest + (*ListAlertPoliciesRequest)(nil), // 2: google.monitoring.v3.ListAlertPoliciesRequest + (*ListAlertPoliciesResponse)(nil), // 3: google.monitoring.v3.ListAlertPoliciesResponse + (*UpdateAlertPolicyRequest)(nil), // 4: google.monitoring.v3.UpdateAlertPolicyRequest + (*DeleteAlertPolicyRequest)(nil), // 5: google.monitoring.v3.DeleteAlertPolicyRequest + (*AlertPolicy)(nil), // 6: google.monitoring.v3.AlertPolicy + (*fieldmaskpb.FieldMask)(nil), // 7: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 8: google.protobuf.Empty +} +var file_google_monitoring_v3_alert_service_proto_depIdxs = []int32{ + 6, // 0: google.monitoring.v3.CreateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy + 6, // 1: google.monitoring.v3.ListAlertPoliciesResponse.alert_policies:type_name -> google.monitoring.v3.AlertPolicy + 7, // 2: google.monitoring.v3.UpdateAlertPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask + 6, // 3: google.monitoring.v3.UpdateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy + 2, // 4: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:input_type -> google.monitoring.v3.ListAlertPoliciesRequest + 1, // 5: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:input_type -> google.monitoring.v3.GetAlertPolicyRequest + 0, // 6: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:input_type -> google.monitoring.v3.CreateAlertPolicyRequest + 5, // 7: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:input_type -> google.monitoring.v3.DeleteAlertPolicyRequest + 4, // 8: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:input_type -> google.monitoring.v3.UpdateAlertPolicyRequest + 3, // 9: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:output_type -> google.monitoring.v3.ListAlertPoliciesResponse + 6, // 10: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy + 6, // 11: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy + 8, // 12: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:output_type -> google.protobuf.Empty + 6, // 13: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy + 9, // [9:14] is the sub-list for method output_type + 4, // [4:9] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_alert_service_proto_init() } +func file_google_monitoring_v3_alert_service_proto_init() { + if File_google_monitoring_v3_alert_service_proto != nil { + return + } + file_google_monitoring_v3_alert_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_alert_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CreateAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListAlertPoliciesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListAlertPoliciesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_alert_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_alert_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_alert_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_alert_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_alert_service_proto = out.File + file_google_monitoring_v3_alert_service_proto_rawDesc = nil + file_google_monitoring_v3_alert_service_proto_goTypes = nil + file_google_monitoring_v3_alert_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// AlertPolicyServiceClient is the client API for AlertPolicyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AlertPolicyServiceClient interface { + // Lists the existing alerting policies for the workspace. + ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Creates a new alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Deletes an alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) +} + +type alertPolicyServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewAlertPolicyServiceClient(cc grpc.ClientConnInterface) AlertPolicyServiceClient { + return &alertPolicyServiceClient{cc} +} + +func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) { + out := new(ListAlertPoliciesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AlertPolicyServiceServer is the server API for AlertPolicyService service. +type AlertPolicyServiceServer interface { + // Lists the existing alerting policies for the workspace. + ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) + // Creates a new alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) + // Deletes an alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) +} + +// UnimplementedAlertPolicyServiceServer can be embedded to have forward compatible implementations. +type UnimplementedAlertPolicyServiceServer struct { +} + +func (*UnimplementedAlertPolicyServiceServer) ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListAlertPolicies not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateAlertPolicy not implemented") +} + +func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) { + s.RegisterService(&_AlertPolicyService_serviceDesc, srv) +} + +func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListAlertPoliciesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.AlertPolicyService", + HandlerType: (*AlertPolicyServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListAlertPolicies", + Handler: _AlertPolicyService_ListAlertPolicies_Handler, + }, + { + MethodName: "GetAlertPolicy", + Handler: _AlertPolicyService_GetAlertPolicy_Handler, + }, + { + MethodName: "CreateAlertPolicy", + Handler: _AlertPolicyService_CreateAlertPolicy_Handler, + }, + { + MethodName: "DeleteAlertPolicy", + Handler: _AlertPolicyService_DeleteAlertPolicy_Handler, + }, + { + MethodName: "UpdateAlertPolicy", + Handler: _AlertPolicyService_UpdateAlertPolicy_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/alert_service.proto", +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go new file mode 100644 index 00000000000..c9aa5a02472 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go @@ -0,0 +1,1165 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/common.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + distribution "google.golang.org/genproto/googleapis/api/distribution" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies an ordering relationship on two arguments, called `left` and +// `right`. +type ComparisonType int32 + +const ( + // No ordering relationship is specified. + ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0 + // True if the left argument is greater than the right argument. + ComparisonType_COMPARISON_GT ComparisonType = 1 + // True if the left argument is greater than or equal to the right argument. + ComparisonType_COMPARISON_GE ComparisonType = 2 + // True if the left argument is less than the right argument. + ComparisonType_COMPARISON_LT ComparisonType = 3 + // True if the left argument is less than or equal to the right argument. + ComparisonType_COMPARISON_LE ComparisonType = 4 + // True if the left argument is equal to the right argument. + ComparisonType_COMPARISON_EQ ComparisonType = 5 + // True if the left argument is not equal to the right argument. + ComparisonType_COMPARISON_NE ComparisonType = 6 +) + +// Enum value maps for ComparisonType. +var ( + ComparisonType_name = map[int32]string{ + 0: "COMPARISON_UNSPECIFIED", + 1: "COMPARISON_GT", + 2: "COMPARISON_GE", + 3: "COMPARISON_LT", + 4: "COMPARISON_LE", + 5: "COMPARISON_EQ", + 6: "COMPARISON_NE", + } + ComparisonType_value = map[string]int32{ + "COMPARISON_UNSPECIFIED": 0, + "COMPARISON_GT": 1, + "COMPARISON_GE": 2, + "COMPARISON_LT": 3, + "COMPARISON_LE": 4, + "COMPARISON_EQ": 5, + "COMPARISON_NE": 6, + } +) + +func (x ComparisonType) Enum() *ComparisonType { + p := new(ComparisonType) + *p = x + return p +} + +func (x ComparisonType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ComparisonType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[0].Descriptor() +} + +func (ComparisonType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[0] +} + +func (x ComparisonType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ComparisonType.Descriptor instead. +func (ComparisonType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0} +} + +// The tier of service for a Metrics Scope. Please see the +// [service tiers +// documentation](https://cloud.google.com/monitoring/workspaces/tiers) for more +// details. +// +// Deprecated: Marked as deprecated in google/monitoring/v3/common.proto. +type ServiceTier int32 + +const ( + // An invalid sentinel value, used to indicate that a tier has not + // been provided explicitly. + ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0 + // The Cloud Monitoring Basic tier, a free tier of service that provides basic + // features, a moderate allotment of logs, and access to built-in metrics. + // A number of features are not available in this tier. For more details, + // see [the service tiers + // documentation](https://cloud.google.com/monitoring/workspaces/tiers). + ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1 + // The Cloud Monitoring Premium tier, a higher, more expensive tier of service + // that provides access to all Cloud Monitoring features, lets you use Cloud + // Monitoring with AWS accounts, and has a larger allotments for logs and + // metrics. For more details, see [the service tiers + // documentation](https://cloud.google.com/monitoring/workspaces/tiers). + ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2 +) + +// Enum value maps for ServiceTier. +var ( + ServiceTier_name = map[int32]string{ + 0: "SERVICE_TIER_UNSPECIFIED", + 1: "SERVICE_TIER_BASIC", + 2: "SERVICE_TIER_PREMIUM", + } + ServiceTier_value = map[string]int32{ + "SERVICE_TIER_UNSPECIFIED": 0, + "SERVICE_TIER_BASIC": 1, + "SERVICE_TIER_PREMIUM": 2, + } +) + +func (x ServiceTier) Enum() *ServiceTier { + p := new(ServiceTier) + *p = x + return p +} + +func (x ServiceTier) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceTier) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[1].Descriptor() +} + +func (ServiceTier) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[1] +} + +func (x ServiceTier) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceTier.Descriptor instead. +func (ServiceTier) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1} +} + +// The `Aligner` specifies the operation that will be applied to the data +// points in each alignment period in a time series. Except for +// `ALIGN_NONE`, which specifies that no operation be applied, each alignment +// operation replaces the set of data values in each alignment period with +// a single value: the result of applying the operation to the data values. +// An aligned time series has a single data value at the end of each +// `alignment_period`. +// +// An alignment operation can change the data type of the values, too. For +// example, if you apply a counting operation to boolean values, the data +// `value_type` in the original time series is `BOOLEAN`, but the `value_type` +// in the aligned result is `INT64`. +type Aggregation_Aligner int32 + +const ( + // No alignment. Raw data is returned. Not valid if cross-series reduction + // is requested. The `value_type` of the result is the same as the + // `value_type` of the input. + Aggregation_ALIGN_NONE Aggregation_Aligner = 0 + // Align and convert to + // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA]. + // The output is `delta = y1 - y0`. + // + // This alignment is valid for + // [CUMULATIVE][google.api.MetricDescriptor.MetricKind.CUMULATIVE] and + // `DELTA` metrics. If the selected alignment period results in periods + // with no data, then the aligned value for such a period is created by + // interpolation. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_DELTA Aggregation_Aligner = 1 + // Align and convert to a rate. The result is computed as + // `rate = (y1 - y0)/(t1 - t0)`, or "delta over time". + // Think of this aligner as providing the slope of the line that passes + // through the value at the start and at the end of the `alignment_period`. + // + // This aligner is valid for `CUMULATIVE` + // and `DELTA` metrics with numeric values. If the selected alignment + // period results in periods with no data, then the aligned value for + // such a period is created by interpolation. The output is a `GAUGE` + // metric with `value_type` `DOUBLE`. + // + // If, by "rate", you mean "percentage change", see the + // `ALIGN_PERCENT_CHANGE` aligner instead. + Aggregation_ALIGN_RATE Aggregation_Aligner = 2 + // Align by interpolating between adjacent points around the alignment + // period boundary. This aligner is valid for `GAUGE` metrics with + // numeric values. The `value_type` of the aligned result is the same as the + // `value_type` of the input. + Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3 + // Align by moving the most recent data point before the end of the + // alignment period to the boundary at the end of the alignment + // period. This aligner is valid for `GAUGE` metrics. The `value_type` of + // the aligned result is the same as the `value_type` of the input. + Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4 + // Align the time series by returning the minimum value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_MIN Aggregation_Aligner = 10 + // Align the time series by returning the maximum value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_MAX Aggregation_Aligner = 11 + // Align the time series by returning the mean value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is `DOUBLE`. + Aggregation_ALIGN_MEAN Aggregation_Aligner = 12 + // Align the time series by returning the number of values in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric or Boolean values. The `value_type` of the aligned result is + // `INT64`. + Aggregation_ALIGN_COUNT Aggregation_Aligner = 13 + // Align the time series by returning the sum of the values in each + // alignment period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with numeric and distribution values. The `value_type` of the + // aligned result is the same as the `value_type` of the input. + Aggregation_ALIGN_SUM Aggregation_Aligner = 14 + // Align the time series by returning the standard deviation of the values + // in each alignment period. This aligner is valid for `GAUGE` and + // `DELTA` metrics with numeric values. The `value_type` of the output is + // `DOUBLE`. + Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15 + // Align the time series by returning the number of `True` values in + // each alignment period. This aligner is valid for `GAUGE` metrics with + // Boolean values. The `value_type` of the output is `INT64`. + Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16 + // Align the time series by returning the number of `False` values in + // each alignment period. This aligner is valid for `GAUGE` metrics with + // Boolean values. The `value_type` of the output is `INT64`. + Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24 + // Align the time series by returning the ratio of the number of `True` + // values to the total number of values in each alignment period. This + // aligner is valid for `GAUGE` metrics with Boolean values. The output + // value is in the range [0.0, 1.0] and has `value_type` `DOUBLE`. + Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 99th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 95th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 50th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 5th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21 + // Align and convert to a percentage change. This aligner is valid for + // `GAUGE` and `DELTA` metrics with numeric values. This alignment returns + // `((current - previous)/previous) * 100`, where the value of `previous` is + // determined based on the `alignment_period`. + // + // If the values of `current` and `previous` are both 0, then the returned + // value is 0. If only `previous` is 0, the returned value is infinity. + // + // A 10-minute moving mean is computed at each point of the alignment period + // prior to the above calculation to smooth the metric and prevent false + // positives from very short-lived spikes. The moving mean is only + // applicable for data whose values are `>= 0`. Any values `< 0` are + // treated as a missing datapoint, and are ignored. While `DELTA` + // metrics are accepted by this alignment, special care should be taken that + // the values for the metric will always be positive. The output is a + // `GAUGE` metric with `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23 +) + +// Enum value maps for Aggregation_Aligner. +var ( + Aggregation_Aligner_name = map[int32]string{ + 0: "ALIGN_NONE", + 1: "ALIGN_DELTA", + 2: "ALIGN_RATE", + 3: "ALIGN_INTERPOLATE", + 4: "ALIGN_NEXT_OLDER", + 10: "ALIGN_MIN", + 11: "ALIGN_MAX", + 12: "ALIGN_MEAN", + 13: "ALIGN_COUNT", + 14: "ALIGN_SUM", + 15: "ALIGN_STDDEV", + 16: "ALIGN_COUNT_TRUE", + 24: "ALIGN_COUNT_FALSE", + 17: "ALIGN_FRACTION_TRUE", + 18: "ALIGN_PERCENTILE_99", + 19: "ALIGN_PERCENTILE_95", + 20: "ALIGN_PERCENTILE_50", + 21: "ALIGN_PERCENTILE_05", + 23: "ALIGN_PERCENT_CHANGE", + } + Aggregation_Aligner_value = map[string]int32{ + "ALIGN_NONE": 0, + "ALIGN_DELTA": 1, + "ALIGN_RATE": 2, + "ALIGN_INTERPOLATE": 3, + "ALIGN_NEXT_OLDER": 4, + "ALIGN_MIN": 10, + "ALIGN_MAX": 11, + "ALIGN_MEAN": 12, + "ALIGN_COUNT": 13, + "ALIGN_SUM": 14, + "ALIGN_STDDEV": 15, + "ALIGN_COUNT_TRUE": 16, + "ALIGN_COUNT_FALSE": 24, + "ALIGN_FRACTION_TRUE": 17, + "ALIGN_PERCENTILE_99": 18, + "ALIGN_PERCENTILE_95": 19, + "ALIGN_PERCENTILE_50": 20, + "ALIGN_PERCENTILE_05": 21, + "ALIGN_PERCENT_CHANGE": 23, + } +) + +func (x Aggregation_Aligner) Enum() *Aggregation_Aligner { + p := new(Aggregation_Aligner) + *p = x + return p +} + +func (x Aggregation_Aligner) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Aggregation_Aligner) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[2].Descriptor() +} + +func (Aggregation_Aligner) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[2] +} + +func (x Aggregation_Aligner) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Aggregation_Aligner.Descriptor instead. +func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 0} +} + +// A Reducer operation describes how to aggregate data points from multiple +// time series into a single time series, where the value of each data point +// in the resulting series is a function of all the already aligned values in +// the input time series. +type Aggregation_Reducer int32 + +const ( + // No cross-time series reduction. The output of the `Aligner` is + // returned. + Aggregation_REDUCE_NONE Aggregation_Reducer = 0 + // Reduce by computing the mean value across time series for each + // alignment period. This reducer is valid for + // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA] and + // [GAUGE][google.api.MetricDescriptor.MetricKind.GAUGE] metrics with + // numeric or distribution values. The `value_type` of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_MEAN Aggregation_Reducer = 1 + // Reduce by computing the minimum value across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric values. The `value_type` of the output is the same as the + // `value_type` of the input. + Aggregation_REDUCE_MIN Aggregation_Reducer = 2 + // Reduce by computing the maximum value across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric values. The `value_type` of the output is the same as the + // `value_type` of the input. + Aggregation_REDUCE_MAX Aggregation_Reducer = 3 + // Reduce by computing the sum across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric and distribution values. The `value_type` of the output is + // the same as the `value_type` of the input. + Aggregation_REDUCE_SUM Aggregation_Reducer = 4 + // Reduce by computing the standard deviation across time series + // for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics with numeric or distribution values. The `value_type` + // of the output is `DOUBLE`. + Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5 + // Reduce by computing the number of data points across time series + // for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of numeric, Boolean, distribution, and string + // `value_type`. The `value_type` of the output is `INT64`. + Aggregation_REDUCE_COUNT Aggregation_Reducer = 6 + // Reduce by computing the number of `True`-valued data points across time + // series for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output + // is `INT64`. + Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7 + // Reduce by computing the number of `False`-valued data points across time + // series for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output + // is `INT64`. + Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15 + // Reduce by computing the ratio of the number of `True`-valued data points + // to the total number of data points for each alignment period. This + // reducer is valid for `DELTA` and `GAUGE` metrics of Boolean `value_type`. + // The output value is in the range [0.0, 1.0] and has `value_type` + // `DOUBLE`. + Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8 + // Reduce by computing the [99th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9 + // Reduce by computing the [95th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10 + // Reduce by computing the [50th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11 + // Reduce by computing the [5th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12 +) + +// Enum value maps for Aggregation_Reducer. +var ( + Aggregation_Reducer_name = map[int32]string{ + 0: "REDUCE_NONE", + 1: "REDUCE_MEAN", + 2: "REDUCE_MIN", + 3: "REDUCE_MAX", + 4: "REDUCE_SUM", + 5: "REDUCE_STDDEV", + 6: "REDUCE_COUNT", + 7: "REDUCE_COUNT_TRUE", + 15: "REDUCE_COUNT_FALSE", + 8: "REDUCE_FRACTION_TRUE", + 9: "REDUCE_PERCENTILE_99", + 10: "REDUCE_PERCENTILE_95", + 11: "REDUCE_PERCENTILE_50", + 12: "REDUCE_PERCENTILE_05", + } + Aggregation_Reducer_value = map[string]int32{ + "REDUCE_NONE": 0, + "REDUCE_MEAN": 1, + "REDUCE_MIN": 2, + "REDUCE_MAX": 3, + "REDUCE_SUM": 4, + "REDUCE_STDDEV": 5, + "REDUCE_COUNT": 6, + "REDUCE_COUNT_TRUE": 7, + "REDUCE_COUNT_FALSE": 15, + "REDUCE_FRACTION_TRUE": 8, + "REDUCE_PERCENTILE_99": 9, + "REDUCE_PERCENTILE_95": 10, + "REDUCE_PERCENTILE_50": 11, + "REDUCE_PERCENTILE_05": 12, + } +) + +func (x Aggregation_Reducer) Enum() *Aggregation_Reducer { + p := new(Aggregation_Reducer) + *p = x + return p +} + +func (x Aggregation_Reducer) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Aggregation_Reducer) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[3].Descriptor() +} + +func (Aggregation_Reducer) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[3] +} + +func (x Aggregation_Reducer) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Aggregation_Reducer.Descriptor instead. +func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 1} +} + +// A single strongly-typed value. +type TypedValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The typed value field. + // + // Types that are assignable to Value: + // + // *TypedValue_BoolValue + // *TypedValue_Int64Value + // *TypedValue_DoubleValue + // *TypedValue_StringValue + // *TypedValue_DistributionValue + Value isTypedValue_Value `protobuf_oneof:"value"` +} + +func (x *TypedValue) Reset() { + *x = TypedValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypedValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypedValue) ProtoMessage() {} + +func (x *TypedValue) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypedValue.ProtoReflect.Descriptor instead. +func (*TypedValue) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0} +} + +func (m *TypedValue) GetValue() isTypedValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *TypedValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*TypedValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *TypedValue) GetInt64Value() int64 { + if x, ok := x.GetValue().(*TypedValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *TypedValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*TypedValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *TypedValue) GetStringValue() string { + if x, ok := x.GetValue().(*TypedValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *TypedValue) GetDistributionValue() *distribution.Distribution { + if x, ok := x.GetValue().(*TypedValue_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +type isTypedValue_Value interface { + isTypedValue_Value() +} + +type TypedValue_BoolValue struct { + // A Boolean value: `true` or `false`. + BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type TypedValue_Int64Value struct { + // A 64-bit integer. Its range is approximately ±9.2x1018. + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type TypedValue_DoubleValue struct { + // A 64-bit double-precision floating-point number. Its magnitude + // is approximately ±10±300 and it has 16 + // significant digits of precision. + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type TypedValue_StringValue struct { + // A variable-length string value. + StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type TypedValue_DistributionValue struct { + // A distribution value. + DistributionValue *distribution.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,proto3,oneof"` +} + +func (*TypedValue_BoolValue) isTypedValue_Value() {} + +func (*TypedValue_Int64Value) isTypedValue_Value() {} + +func (*TypedValue_DoubleValue) isTypedValue_Value() {} + +func (*TypedValue_StringValue) isTypedValue_Value() {} + +func (*TypedValue_DistributionValue) isTypedValue_Value() {} + +// Describes a time interval: +// +// - Reads: A half-open time interval. It includes the end time but +// excludes the start time: `(startTime, endTime]`. The start time +// must be specified, must be earlier than the end time, and should be +// no older than the data retention period for the metric. +// - Writes: A closed time interval. It extends from the start time to the end +// time, +// and includes both: `[startTime, endTime]`. Valid time intervals +// depend on the +// [`MetricKind`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) +// of the metric value. The end time must not be earlier than the start +// time, and the end time must not be more than 25 hours in the past or more +// than five minutes in the future. +// - For `GAUGE` metrics, the `startTime` value is technically optional; if +// no value is specified, the start time defaults to the value of the +// end time, and the interval represents a single point in time. If both +// start and end times are specified, they must be identical. Such an +// interval is valid only for `GAUGE` metrics, which are point-in-time +// measurements. The end time of a new interval must be at least a +// millisecond after the end time of the previous interval. +// - For `DELTA` metrics, the start time and end time must specify a +// non-zero interval, with subsequent points specifying contiguous and +// non-overlapping intervals. For `DELTA` metrics, the start time of +// the next interval must be at least a millisecond after the end time +// of the previous interval. +// - For `CUMULATIVE` metrics, the start time and end time must specify a +// non-zero interval, with subsequent points specifying the same +// start time and increasing end times, until an event resets the +// cumulative value to zero and sets a new start time for the following +// points. The new start time must be at least a millisecond after the +// end time of the previous interval. +// - The start time of a new interval must be at least a millisecond after +// the +// end time of the previous interval because intervals are closed. If the +// start time of a new interval is the same as the end time of the +// previous interval, then data written at the new start time could +// overwrite data written at the previous end time. +type TimeInterval struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The end of the time interval. + EndTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // Optional. The beginning of the time interval. The default value + // for the start time is the end time. The start time must not be + // later than the end time. + StartTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` +} + +func (x *TimeInterval) Reset() { + *x = TimeInterval{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeInterval) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeInterval) ProtoMessage() {} + +func (x *TimeInterval) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeInterval.ProtoReflect.Descriptor instead. +func (*TimeInterval) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1} +} + +func (x *TimeInterval) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + +func (x *TimeInterval) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +// Describes how to combine multiple time series to provide a different view of +// the data. Aggregation of time series is done in two steps. First, each time +// series in the set is _aligned_ to the same time interval boundaries, then the +// set of time series is optionally _reduced_ in number. +// +// Alignment consists of applying the `per_series_aligner` operation +// to each time series after its data has been divided into regular +// `alignment_period` time intervals. This process takes _all_ of the data +// points in an alignment period, applies a mathematical transformation such as +// averaging, minimum, maximum, delta, etc., and converts them into a single +// data point per period. +// +// Reduction is when the aligned and transformed time series can optionally be +// combined, reducing the number of time series through similar mathematical +// transformations. Reduction involves applying a `cross_series_reducer` to +// all the time series, optionally sorting the time series into subsets with +// `group_by_fields`, and applying the reducer to each subset. +// +// The raw time series data can contain a huge amount of information from +// multiple sources. Alignment and reduction transforms this mass of data into +// a more manageable and representative collection of data, for example "the +// 95% latency across the average of all tasks in a cluster". This +// representative data can be more easily graphed and comprehended, and the +// individual time series data is still available for later drilldown. For more +// details, see [Filtering and +// aggregation](https://cloud.google.com/monitoring/api/v3/aggregation). +type Aggregation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The `alignment_period` specifies a time interval, in seconds, that is used + // to divide the data in all the + // [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + // time. This will be done before the per-series aligner can be applied to + // the data. + // + // The value must be at least 60 seconds. If a per-series + // aligner other than `ALIGN_NONE` is specified, this field is required or an + // error is returned. If no per-series aligner is specified, or the aligner + // `ALIGN_NONE` is specified, then this field is ignored. + // + // The maximum value of the `alignment_period` is 104 weeks (2 years) for + // charts, and 90,000 seconds (25 hours) for alerting policies. + AlignmentPeriod *durationpb.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"` + // An `Aligner` describes how to bring the data points in a single + // time series into temporal alignment. Except for `ALIGN_NONE`, all + // alignments cause all the data points in an `alignment_period` to be + // mathematically grouped together, resulting in a single data point for + // each `alignment_period` with end timestamp at the end of the period. + // + // Not all alignment operations may be applied to all time series. The valid + // choices depend on the `metric_kind` and `value_type` of the original time + // series. Alignment can change the `metric_kind` or the `value_type` of + // the time series. + // + // Time series data must be aligned in order to perform cross-time + // series reduction. If `cross_series_reducer` is specified, then + // `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + // and `alignment_period` must be specified; otherwise, an error is + // returned. + PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,proto3,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"` + // The reduction operation to be used to combine time series into a single + // time series, where the value of each data point in the resulting series is + // a function of all the already aligned values in the input time series. + // + // Not all reducer operations can be applied to all time series. The valid + // choices depend on the `metric_kind` and the `value_type` of the original + // time series. Reduction can yield a time series with a different + // `metric_kind` or `value_type` than the input time series. + // + // Time series data must first be aligned (see `per_series_aligner`) in order + // to perform cross-time series reduction. If `cross_series_reducer` is + // specified, then `per_series_aligner` must be specified, and must not be + // `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + // error is returned. + CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,proto3,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"` + // The set of fields to preserve when `cross_series_reducer` is + // specified. The `group_by_fields` determine how the time series are + // partitioned into subsets prior to applying the aggregation + // operation. Each subset contains time series that have the same + // value for each of the grouping fields. Each individual time + // series is a member of exactly one subset. The + // `cross_series_reducer` is applied to each subset of time series. + // It is not possible to reduce across different resource types, so + // this field implicitly contains `resource.type`. Fields not + // specified in `group_by_fields` are aggregated away. If + // `group_by_fields` is not specified and all the time series have + // the same resource type, then the time series are aggregated into + // a single output time series. If `cross_series_reducer` is not + // defined, this field is ignored. + GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields,proto3" json:"group_by_fields,omitempty"` +} + +func (x *Aggregation) Reset() { + *x = Aggregation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Aggregation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Aggregation) ProtoMessage() {} + +func (x *Aggregation) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Aggregation.ProtoReflect.Descriptor instead. +func (*Aggregation) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2} +} + +func (x *Aggregation) GetAlignmentPeriod() *durationpb.Duration { + if x != nil { + return x.AlignmentPeriod + } + return nil +} + +func (x *Aggregation) GetPerSeriesAligner() Aggregation_Aligner { + if x != nil { + return x.PerSeriesAligner + } + return Aggregation_ALIGN_NONE +} + +func (x *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer { + if x != nil { + return x.CrossSeriesReducer + } + return Aggregation_REDUCE_NONE +} + +func (x *Aggregation) GetGroupByFields() []string { + if x != nil { + return x.GroupByFields + } + return nil +} + +var File_google_monitoring_v3_common_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_common_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x01, 0x0a, 0x0a, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, + 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x49, 0x0a, 0x12, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x80, 0x01, 0x0a, 0x0c, 0x54, + 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x35, 0x0a, 0x08, 0x65, + 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xf3, 0x07, + 0x0a, 0x0b, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, + 0x10, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x70, 0x65, 0x72, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x14, + 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x64, + 0x75, 0x63, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x64, 0x75, 0x63, 0x65, 0x72, 0x52, 0x12, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x22, 0x8b, 0x03, 0x0a, 0x07, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x0e, 0x0a, + 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, + 0x0b, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x01, 0x12, 0x0e, + 0x0a, 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x10, 0x02, 0x12, 0x15, + 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x50, 0x4f, 0x4c, + 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e, + 0x45, 0x58, 0x54, 0x5f, 0x4f, 0x4c, 0x44, 0x45, 0x52, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x41, + 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x0a, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, + 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x4c, 0x49, + 0x47, 0x4e, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x4c, 0x49, + 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, + 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4c, 0x49, + 0x47, 0x4e, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x0f, 0x12, 0x14, 0x0a, 0x10, 0x41, + 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, + 0x10, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, + 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x18, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, + 0x4e, 0x5f, 0x46, 0x52, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, + 0x11, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, + 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x39, 0x10, 0x12, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, + 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, + 0x35, 0x10, 0x13, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, + 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x14, 0x12, 0x17, 0x0a, 0x13, + 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, + 0x5f, 0x30, 0x35, 0x10, 0x15, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, + 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x17, 0x22, + 0xb1, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x0f, 0x0a, 0x0b, 0x52, + 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, + 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x01, 0x12, 0x0e, 0x0a, + 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x03, 0x12, 0x0e, 0x0a, + 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x04, 0x12, 0x11, 0x0a, + 0x0d, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x05, + 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, + 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x44, + 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, + 0x0f, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x46, 0x52, 0x41, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x08, 0x12, 0x18, 0x0a, 0x14, 0x52, + 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, + 0x5f, 0x39, 0x39, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, + 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x35, 0x10, 0x0a, 0x12, + 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, + 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x0b, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, + 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x30, + 0x35, 0x10, 0x0c, 0x2a, 0x9e, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, + 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, + 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, + 0x5f, 0x47, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, + 0x53, 0x4f, 0x4e, 0x5f, 0x47, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, + 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x54, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43, + 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x11, + 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x45, 0x51, 0x10, + 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, + 0x4e, 0x45, 0x10, 0x06, 0x2a, 0x61, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, + 0x69, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, + 0x49, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, + 0x52, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52, + 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x45, 0x4d, 0x49, 0x55, + 0x4d, 0x10, 0x02, 0x1a, 0x02, 0x18, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xa2, 0x02, 0x04, 0x47, 0x4d, 0x4f, 0x4e, 0xaa, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_common_proto_rawDescOnce sync.Once + file_google_monitoring_v3_common_proto_rawDescData = file_google_monitoring_v3_common_proto_rawDesc +) + +func file_google_monitoring_v3_common_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_common_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_common_proto_rawDescData) + }) + return file_google_monitoring_v3_common_proto_rawDescData +} + +var file_google_monitoring_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_google_monitoring_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_monitoring_v3_common_proto_goTypes = []any{ + (ComparisonType)(0), // 0: google.monitoring.v3.ComparisonType + (ServiceTier)(0), // 1: google.monitoring.v3.ServiceTier + (Aggregation_Aligner)(0), // 2: google.monitoring.v3.Aggregation.Aligner + (Aggregation_Reducer)(0), // 3: google.monitoring.v3.Aggregation.Reducer + (*TypedValue)(nil), // 4: google.monitoring.v3.TypedValue + (*TimeInterval)(nil), // 5: google.monitoring.v3.TimeInterval + (*Aggregation)(nil), // 6: google.monitoring.v3.Aggregation + (*distribution.Distribution)(nil), // 7: google.api.Distribution + (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration +} +var file_google_monitoring_v3_common_proto_depIdxs = []int32{ + 7, // 0: google.monitoring.v3.TypedValue.distribution_value:type_name -> google.api.Distribution + 8, // 1: google.monitoring.v3.TimeInterval.end_time:type_name -> google.protobuf.Timestamp + 8, // 2: google.monitoring.v3.TimeInterval.start_time:type_name -> google.protobuf.Timestamp + 9, // 3: google.monitoring.v3.Aggregation.alignment_period:type_name -> google.protobuf.Duration + 2, // 4: google.monitoring.v3.Aggregation.per_series_aligner:type_name -> google.monitoring.v3.Aggregation.Aligner + 3, // 5: google.monitoring.v3.Aggregation.cross_series_reducer:type_name -> google.monitoring.v3.Aggregation.Reducer + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_common_proto_init() } +func file_google_monitoring_v3_common_proto_init() { + if File_google_monitoring_v3_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_common_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*TypedValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_common_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*TimeInterval); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_common_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*Aggregation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_common_proto_msgTypes[0].OneofWrappers = []any{ + (*TypedValue_BoolValue)(nil), + (*TypedValue_Int64Value)(nil), + (*TypedValue_DoubleValue)(nil), + (*TypedValue_StringValue)(nil), + (*TypedValue_DistributionValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_common_proto_rawDesc, + NumEnums: 4, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_common_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_common_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_common_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_common_proto_msgTypes, + }.Build() + File_google_monitoring_v3_common_proto = out.File + file_google_monitoring_v3_common_proto_rawDesc = nil + file_google_monitoring_v3_common_proto_goTypes = nil + file_google_monitoring_v3_common_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go new file mode 100644 index 00000000000..7b1dc962da8 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go @@ -0,0 +1,197 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/dropped_labels.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A set of (label, value) pairs that were removed from a Distribution +// time series during aggregation and then added as an attachment to a +// Distribution.Exemplar. +// +// The full label set for the exemplars is constructed by using the dropped +// pairs in combination with the label values that remain on the aggregated +// Distribution time series. The constructed full label set can be used to +// identify the specific entity, such as the instance or job, which might be +// contributing to a long-tail. However, with dropped labels, the storage +// requirements are reduced because only the aggregated distribution values for +// a large group of time series are stored. +// +// Note that there are no guarantees on ordering of the labels from +// exemplar-to-exemplar and from distribution-to-distribution in the same +// stream, and there may be duplicates. It is up to clients to resolve any +// ambiguities. +type DroppedLabels struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map from label to its value, for all labels dropped in any aggregation. + Label map[string]string `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DroppedLabels) Reset() { + *x = DroppedLabels{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DroppedLabels) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DroppedLabels) ProtoMessage() {} + +func (x *DroppedLabels) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DroppedLabels.ProtoReflect.Descriptor instead. +func (*DroppedLabels) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP(), []int{0} +} + +func (x *DroppedLabels) GetLabel() map[string]string { + if x != nil { + return x.Label + } + return nil +} + +var File_google_monitoring_v3_dropped_labels_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_dropped_labels_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x22, 0x8f, 0x01, 0x0a, 0x0d, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x44, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x1a, 0x38, 0x0a, 0x0a, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x42, 0x12, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, + 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_dropped_labels_proto_rawDescOnce sync.Once + file_google_monitoring_v3_dropped_labels_proto_rawDescData = file_google_monitoring_v3_dropped_labels_proto_rawDesc +) + +func file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_dropped_labels_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_dropped_labels_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_dropped_labels_proto_rawDescData) + }) + return file_google_monitoring_v3_dropped_labels_proto_rawDescData +} + +var file_google_monitoring_v3_dropped_labels_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_monitoring_v3_dropped_labels_proto_goTypes = []any{ + (*DroppedLabels)(nil), // 0: google.monitoring.v3.DroppedLabels + nil, // 1: google.monitoring.v3.DroppedLabels.LabelEntry +} +var file_google_monitoring_v3_dropped_labels_proto_depIdxs = []int32{ + 1, // 0: google.monitoring.v3.DroppedLabels.label:type_name -> google.monitoring.v3.DroppedLabels.LabelEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_dropped_labels_proto_init() } +func file_google_monitoring_v3_dropped_labels_proto_init() { + if File_google_monitoring_v3_dropped_labels_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_dropped_labels_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*DroppedLabels); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_dropped_labels_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_dropped_labels_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_dropped_labels_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_dropped_labels_proto_msgTypes, + }.Build() + File_google_monitoring_v3_dropped_labels_proto = out.File + file_google_monitoring_v3_dropped_labels_proto_rawDesc = nil + file_google_monitoring_v3_dropped_labels_proto_goTypes = nil + file_google_monitoring_v3_dropped_labels_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go new file mode 100644 index 00000000000..dff27f9d8ce --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go @@ -0,0 +1,265 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/group.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The description of a dynamic collection of monitored resources. Each group +// has a filter that is matched against monitored resources and their associated +// metadata. If a group's filter matches an available monitored resource, then +// that resource is a member of that group. Groups can contain any number of +// monitored resources, and each monitored resource can be a member of any +// number of groups. +// +// Groups can be nested in parent-child hierarchies. The `parentName` field +// identifies an optional parent for each group. If a group has a parent, then +// the only monitored resources available to be matched by the group's filter +// are the resources contained in the parent group. In other words, a group +// contains the monitored resources that match its filter and the filters of all +// the group's ancestors. A group without a parent can contain any monitored +// resource. +// +// For example, consider an infrastructure running a set of instances with two +// user-defined tags: `"environment"` and `"role"`. A parent group has a filter, +// `environment="production"`. A child of that parent group has a filter, +// `role="transcoder"`. The parent group contains all instances in the +// production environment, regardless of their roles. The child group contains +// instances that have the transcoder role *and* are in the production +// environment. +// +// The monitored resources contained in a group can change at any moment, +// depending on what resources exist and what filters are associated with the +// group and its ancestors. +type Group struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The name of this group. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // When creating a group, this field is ignored and a new name is created + // consisting of the project specified in the call to `CreateGroup` + // and a unique `[GROUP_ID]` that is generated automatically. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A user-assigned name for this group, used only for display purposes. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The name of the group's parent, if it has one. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // For groups with no parent, `parent_name` is the empty string, `""`. + ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"` + // The filter used to determine which monitored resources belong to this + // group. + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // If true, the members of this group are considered to be a cluster. + // The system can perform additional analysis on groups that are clusters. + IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"` +} + +func (x *Group) Reset() { + *x = Group{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Group) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Group) ProtoMessage() {} + +func (x *Group) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Group.ProtoReflect.Descriptor instead. +func (*Group) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_proto_rawDescGZIP(), []int{0} +} + +func (x *Group) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Group) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Group) GetParentName() string { + if x != nil { + return x.ParentName + } + return "" +} + +func (x *Group) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *Group) GetIsCluster() bool { + if x != nil { + return x.IsCluster + } + return false +} + +var File_google_monitoring_v3_group_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_group_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1d, 0x0a, + 0x0a, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x69, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x99, 0x01, 0xea, + 0x41, 0x95, 0x01, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, + 0x7b, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x2b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x1f, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, + 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, + 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_group_proto_rawDescOnce sync.Once + file_google_monitoring_v3_group_proto_rawDescData = file_google_monitoring_v3_group_proto_rawDesc +) + +func file_google_monitoring_v3_group_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_group_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_group_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_proto_rawDescData) + }) + return file_google_monitoring_v3_group_proto_rawDescData +} + +var file_google_monitoring_v3_group_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_monitoring_v3_group_proto_goTypes = []any{ + (*Group)(nil), // 0: google.monitoring.v3.Group +} +var file_google_monitoring_v3_group_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_group_proto_init() } +func file_google_monitoring_v3_group_proto_init() { + if File_google_monitoring_v3_group_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_group_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Group); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_group_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_group_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_group_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_group_proto_msgTypes, + }.Build() + File_google_monitoring_v3_group_proto = out.File + file_google_monitoring_v3_group_proto_rawDesc = nil + file_google_monitoring_v3_group_proto_goTypes = nil + file_google_monitoring_v3_group_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go new file mode 100644 index 00000000000..46747d90643 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go @@ -0,0 +1,1319 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/group_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `ListGroup` request. +type ListGroupsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // groups are to be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // An optional filter consisting of a single group name. The filters limit + // the groups returned based on their parent-child relationship with the + // specified group. If no filter is specified, all groups are returned. + // + // Types that are assignable to Filter: + // + // *ListGroupsRequest_ChildrenOfGroup + // *ListGroupsRequest_AncestorsOfGroup + // *ListGroupsRequest_DescendantsOfGroup + Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `next_page_token` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListGroupsRequest) Reset() { + *x = ListGroupsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupsRequest) ProtoMessage() {} + +func (x *ListGroupsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupsRequest.ProtoReflect.Descriptor instead. +func (*ListGroupsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListGroupsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (x *ListGroupsRequest) GetChildrenOfGroup() string { + if x, ok := x.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok { + return x.ChildrenOfGroup + } + return "" +} + +func (x *ListGroupsRequest) GetAncestorsOfGroup() string { + if x, ok := x.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok { + return x.AncestorsOfGroup + } + return "" +} + +func (x *ListGroupsRequest) GetDescendantsOfGroup() string { + if x, ok := x.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok { + return x.DescendantsOfGroup + } + return "" +} + +func (x *ListGroupsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListGroupsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +type isListGroupsRequest_Filter interface { + isListGroupsRequest_Filter() +} + +type ListGroupsRequest_ChildrenOfGroup struct { + // A group name. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // Returns groups whose `parent_name` field contains the group + // name. If no groups have this parent, the results are empty. + ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_AncestorsOfGroup struct { + // A group name. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // Returns groups that are ancestors of the specified group. + // The groups are returned in order, starting with the immediate parent and + // ending with the most distant ancestor. If the specified group has no + // immediate parent, the results are empty. + AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_DescendantsOfGroup struct { + // A group name. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // Returns the descendants of the specified group. This is a superset of + // the results returned by the `children_of_group` filter, and includes + // children-of-children, and so forth. + DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,proto3,oneof"` +} + +func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {} + +// The `ListGroups` response. +type ListGroupsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The groups that match the specified filters. + Group []*Group `protobuf:"bytes,1,rep,name=group,proto3" json:"group,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListGroupsResponse) Reset() { + *x = ListGroupsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupsResponse) ProtoMessage() {} + +func (x *ListGroupsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupsResponse.ProtoReflect.Descriptor instead. +func (*ListGroupsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListGroupsResponse) GetGroup() []*Group { + if x != nil { + return x.Group + } + return nil +} + +func (x *ListGroupsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetGroup` request. +type GetGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The group to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetGroupRequest) Reset() { + *x = GetGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetGroupRequest) ProtoMessage() {} + +func (x *GetGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetGroupRequest.ProtoReflect.Descriptor instead. +func (*GetGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetGroupRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateGroup` request. +type CreateGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // to create the group. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Required. A group definition. It is an error to define the `name` field + // because the system assigns the name. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not create the group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` +} + +func (x *CreateGroupRequest) Reset() { + *x = CreateGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateGroupRequest) ProtoMessage() {} + +func (x *CreateGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateGroupRequest.ProtoReflect.Descriptor instead. +func (*CreateGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateGroupRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateGroupRequest) GetGroup() *Group { + if x != nil { + return x.Group + } + return nil +} + +func (x *CreateGroupRequest) GetValidateOnly() bool { + if x != nil { + return x.ValidateOnly + } + return false +} + +// The `UpdateGroup` request. +type UpdateGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The new definition of the group. All fields of the existing + // group, excepting `name`, are replaced with the corresponding fields of this + // group. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not update the existing group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` +} + +func (x *UpdateGroupRequest) Reset() { + *x = UpdateGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateGroupRequest) ProtoMessage() {} + +func (x *UpdateGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateGroupRequest.ProtoReflect.Descriptor instead. +func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateGroupRequest) GetGroup() *Group { + if x != nil { + return x.Group + } + return nil +} + +func (x *UpdateGroupRequest) GetValidateOnly() bool { + if x != nil { + return x.ValidateOnly + } + return false +} + +// The `DeleteGroup` request. The default behavior is to be able to delete a +// single group without any descendants. +type DeleteGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The group to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // If this field is true, then the request means to delete a group with all + // its descendants. Otherwise, the request means to delete a group only when + // it has no descendants. The default value is false. + Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"` +} + +func (x *DeleteGroupRequest) Reset() { + *x = DeleteGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteGroupRequest) ProtoMessage() {} + +func (x *DeleteGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteGroupRequest.ProtoReflect.Descriptor instead. +func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteGroupRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteGroupRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +// The `ListGroupMembers` request. +type ListGroupMembersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The group whose members are listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `next_page_token` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // An optional [list + // filter](https://cloud.google.com/monitoring/api/learn_more#filtering) + // describing the members to be returned. The filter may reference the type, + // labels, and metadata of monitored resources that comprise the group. For + // example, to return only resources representing Compute Engine VM instances, + // use this filter: + // + // `resource.type = "gce_instance"` + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // An optional time interval for which results should be returned. Only + // members that were part of the group during the specified interval are + // included in the response. If no interval is provided then the group + // membership over the last minute is returned. + Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval,proto3" json:"interval,omitempty"` +} + +func (x *ListGroupMembersRequest) Reset() { + *x = ListGroupMembersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupMembersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupMembersRequest) ProtoMessage() {} + +func (x *ListGroupMembersRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupMembersRequest.ProtoReflect.Descriptor instead. +func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{6} +} + +func (x *ListGroupMembersRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListGroupMembersRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListGroupMembersRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListGroupMembersRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListGroupMembersRequest) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +// The `ListGroupMembers` response. +type ListGroupMembersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A set of monitored resources in the group. + Members []*monitoredres.MonitoredResource `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"` + // If there are more results than have been returned, then this field is + // set to a non-empty value. To see the additional results, use that value as + // `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of elements matching this request. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListGroupMembersResponse) Reset() { + *x = ListGroupMembersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupMembersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupMembersResponse) ProtoMessage() {} + +func (x *ListGroupMembersResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupMembersResponse.ProtoReflect.Descriptor instead. +func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{7} +} + +func (x *ListGroupMembersResponse) GetMembers() []*monitoredres.MonitoredResource { + if x != nil { + return x.Members + } + return nil +} + +func (x *ListGroupMembersResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListGroupMembersResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +var File_google_monitoring_v3_group_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_group_service_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, + 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, + 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x03, 0x0a, 0x11, 0x4c, + 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x52, 0x0a, + 0x11, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, + 0x52, 0x0f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x12, 0x54, 0x0a, 0x12, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x6f, + 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, + 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x10, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, + 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x58, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x63, 0x65, + 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x12, 0x64, + 0x65, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x08, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, + 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4e, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xae, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x05, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x71, 0x0a, 0x12, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x36, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x6f, 0x0a, 0x12, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0xea, 0x01, + 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, + 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x08, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x9a, 0x01, 0x0a, 0x18, 0x4c, + 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x32, 0x98, 0x08, 0x0a, 0x0c, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x8c, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, + 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x7d, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x2d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8e, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x38, 0xda, + 0x41, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x25, 0x3a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x3b, + 0xda, 0x41, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x3a, 0x05, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x1a, 0x24, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0b, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2d, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x2a, 0x1e, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x10, + 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x35, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, + 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, + 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, + 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_group_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_group_service_proto_rawDescData = file_google_monitoring_v3_group_service_proto_rawDesc +) + +func file_google_monitoring_v3_group_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_group_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_group_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_service_proto_rawDescData) + }) + return file_google_monitoring_v3_group_service_proto_rawDescData +} + +var file_google_monitoring_v3_group_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_google_monitoring_v3_group_service_proto_goTypes = []any{ + (*ListGroupsRequest)(nil), // 0: google.monitoring.v3.ListGroupsRequest + (*ListGroupsResponse)(nil), // 1: google.monitoring.v3.ListGroupsResponse + (*GetGroupRequest)(nil), // 2: google.monitoring.v3.GetGroupRequest + (*CreateGroupRequest)(nil), // 3: google.monitoring.v3.CreateGroupRequest + (*UpdateGroupRequest)(nil), // 4: google.monitoring.v3.UpdateGroupRequest + (*DeleteGroupRequest)(nil), // 5: google.monitoring.v3.DeleteGroupRequest + (*ListGroupMembersRequest)(nil), // 6: google.monitoring.v3.ListGroupMembersRequest + (*ListGroupMembersResponse)(nil), // 7: google.monitoring.v3.ListGroupMembersResponse + (*Group)(nil), // 8: google.monitoring.v3.Group + (*TimeInterval)(nil), // 9: google.monitoring.v3.TimeInterval + (*monitoredres.MonitoredResource)(nil), // 10: google.api.MonitoredResource + (*emptypb.Empty)(nil), // 11: google.protobuf.Empty +} +var file_google_monitoring_v3_group_service_proto_depIdxs = []int32{ + 8, // 0: google.monitoring.v3.ListGroupsResponse.group:type_name -> google.monitoring.v3.Group + 8, // 1: google.monitoring.v3.CreateGroupRequest.group:type_name -> google.monitoring.v3.Group + 8, // 2: google.monitoring.v3.UpdateGroupRequest.group:type_name -> google.monitoring.v3.Group + 9, // 3: google.monitoring.v3.ListGroupMembersRequest.interval:type_name -> google.monitoring.v3.TimeInterval + 10, // 4: google.monitoring.v3.ListGroupMembersResponse.members:type_name -> google.api.MonitoredResource + 0, // 5: google.monitoring.v3.GroupService.ListGroups:input_type -> google.monitoring.v3.ListGroupsRequest + 2, // 6: google.monitoring.v3.GroupService.GetGroup:input_type -> google.monitoring.v3.GetGroupRequest + 3, // 7: google.monitoring.v3.GroupService.CreateGroup:input_type -> google.monitoring.v3.CreateGroupRequest + 4, // 8: google.monitoring.v3.GroupService.UpdateGroup:input_type -> google.monitoring.v3.UpdateGroupRequest + 5, // 9: google.monitoring.v3.GroupService.DeleteGroup:input_type -> google.monitoring.v3.DeleteGroupRequest + 6, // 10: google.monitoring.v3.GroupService.ListGroupMembers:input_type -> google.monitoring.v3.ListGroupMembersRequest + 1, // 11: google.monitoring.v3.GroupService.ListGroups:output_type -> google.monitoring.v3.ListGroupsResponse + 8, // 12: google.monitoring.v3.GroupService.GetGroup:output_type -> google.monitoring.v3.Group + 8, // 13: google.monitoring.v3.GroupService.CreateGroup:output_type -> google.monitoring.v3.Group + 8, // 14: google.monitoring.v3.GroupService.UpdateGroup:output_type -> google.monitoring.v3.Group + 11, // 15: google.monitoring.v3.GroupService.DeleteGroup:output_type -> google.protobuf.Empty + 7, // 16: google.monitoring.v3.GroupService.ListGroupMembers:output_type -> google.monitoring.v3.ListGroupMembersResponse + 11, // [11:17] is the sub-list for method output_type + 5, // [5:11] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_group_service_proto_init() } +func file_google_monitoring_v3_group_service_proto_init() { + if File_google_monitoring_v3_group_service_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_group_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_group_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupMembersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupMembersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[0].OneofWrappers = []any{ + (*ListGroupsRequest_ChildrenOfGroup)(nil), + (*ListGroupsRequest_AncestorsOfGroup)(nil), + (*ListGroupsRequest_DescendantsOfGroup)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_group_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_group_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_group_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_group_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_group_service_proto = out.File + file_google_monitoring_v3_group_service_proto_rawDesc = nil + file_google_monitoring_v3_group_service_proto_goTypes = nil + file_google_monitoring_v3_group_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// GroupServiceClient is the client API for GroupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GroupServiceClient interface { + // Lists the existing groups. + ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Creates a new group. + CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Deletes an existing group. + DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) +} + +type groupServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewGroupServiceClient(cc grpc.ClientConnInterface) GroupServiceClient { + return &groupServiceClient{cc} +} + +func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) { + out := new(ListGroupsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) { + out := new(ListGroupMembersResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GroupServiceServer is the server API for GroupService service. +type GroupServiceServer interface { + // Lists the existing groups. + ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(context.Context, *GetGroupRequest) (*Group, error) + // Creates a new group. + CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) + // Deletes an existing group. + DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) +} + +// UnimplementedGroupServiceServer can be embedded to have forward compatible implementations. +type UnimplementedGroupServiceServer struct { +} + +func (*UnimplementedGroupServiceServer) ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListGroups not implemented") +} +func (*UnimplementedGroupServiceServer) GetGroup(context.Context, *GetGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetGroup not implemented") +} +func (*UnimplementedGroupServiceServer) CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateGroup not implemented") +} +func (*UnimplementedGroupServiceServer) UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateGroup not implemented") +} +func (*UnimplementedGroupServiceServer) DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteGroup not implemented") +} +func (*UnimplementedGroupServiceServer) ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListGroupMembers not implemented") +} + +func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) { + s.RegisterService(&_GroupService_serviceDesc, srv) +} + +func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).GetGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/GetGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).CreateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/CreateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).UpdateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).DeleteGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupMembersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroupMembers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _GroupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.GroupService", + HandlerType: (*GroupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListGroups", + Handler: _GroupService_ListGroups_Handler, + }, + { + MethodName: "GetGroup", + Handler: _GroupService_GetGroup_Handler, + }, + { + MethodName: "CreateGroup", + Handler: _GroupService_CreateGroup_Handler, + }, + { + MethodName: "UpdateGroup", + Handler: _GroupService_UpdateGroup_Handler, + }, + { + MethodName: "DeleteGroup", + Handler: _GroupService_DeleteGroup_Handler, + }, + { + MethodName: "ListGroupMembers", + Handler: _GroupService_ListGroupMembers_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/group_service.proto", +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go new file mode 100644 index 00000000000..b22c22d07e5 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go @@ -0,0 +1,1194 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/metric.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + label "google.golang.org/genproto/googleapis/api/label" + metric "google.golang.org/genproto/googleapis/api/metric" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A single data point in a time series. +type Point struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The time interval to which the data point applies. For `GAUGE` metrics, + // the start time is optional, but if it is supplied, it must equal the + // end time. For `DELTA` metrics, the start + // and end time should specify a non-zero interval, with subsequent points + // specifying contiguous and non-overlapping intervals. For `CUMULATIVE` + // metrics, the start and end time should specify a non-zero interval, with + // subsequent points specifying the same start time and increasing end times, + // until an event resets the cumulative value to zero and sets a new start + // time for the following points. + Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` + // The value of the data point. + Value *TypedValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Point) Reset() { + *x = Point{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Point) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Point) ProtoMessage() {} + +func (x *Point) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Point.ProtoReflect.Descriptor instead. +func (*Point) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{0} +} + +func (x *Point) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +func (x *Point) GetValue() *TypedValue { + if x != nil { + return x.Value + } + return nil +} + +// A collection of data points that describes the time-varying values +// of a metric. A time series is identified by a combination of a +// fully-specified monitored resource and a fully-specified metric. +// This type is used for both listing and creating time series. +type TimeSeries struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The associated metric. A fully-specified metric used to identify the time + // series. + Metric *metric.Metric `protobuf:"bytes,1,opt,name=metric,proto3" json:"metric,omitempty"` + // The associated monitored resource. Custom metrics can use only certain + // monitored resource types in their time series data. For more information, + // see [Monitored resources for custom + // metrics](https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources). + Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + // Output only. The associated monitored resource metadata. When reading a + // time series, this field will include metadata labels that are explicitly + // named in the reduction. When creating a time series, this field is ignored. + Metadata *monitoredres.MonitoredResourceMetadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The metric kind of the time series. When listing time series, this metric + // kind might be different from the metric kind of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the metric kind of the associated metric. If the associated + // metric's descriptor must be auto-created, then this field specifies the + // metric kind of the new descriptor and must be either `GAUGE` (the default) + // or `CUMULATIVE`. + MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // The value type of the time series. When listing time series, this value + // type might be different from the value type of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the type of the data in the `points` field. + ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The data points of this time series. When listing time series, points are + // returned in reverse time order. + // + // When creating a time series, this field must contain exactly one point and + // the point's type must be the same as the value type of the associated + // metric. If the associated metric's descriptor must be auto-created, then + // the value type of the descriptor is determined by the point's type, which + // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`. + Points []*Point `protobuf:"bytes,5,rep,name=points,proto3" json:"points,omitempty"` + // The units in which the metric value is reported. It is only applicable + // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` + // defines the representation of the stored metric values. + Unit string `protobuf:"bytes,8,opt,name=unit,proto3" json:"unit,omitempty"` +} + +func (x *TimeSeries) Reset() { + *x = TimeSeries{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeries) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeries) ProtoMessage() {} + +func (x *TimeSeries) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeries.ProtoReflect.Descriptor instead. +func (*TimeSeries) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{1} +} + +func (x *TimeSeries) GetMetric() *metric.Metric { + if x != nil { + return x.Metric + } + return nil +} + +func (x *TimeSeries) GetResource() *monitoredres.MonitoredResource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *TimeSeries) GetMetadata() *monitoredres.MonitoredResourceMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *TimeSeries) GetMetricKind() metric.MetricDescriptor_MetricKind { + if x != nil { + return x.MetricKind + } + return metric.MetricDescriptor_MetricKind(0) +} + +func (x *TimeSeries) GetValueType() metric.MetricDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return metric.MetricDescriptor_ValueType(0) +} + +func (x *TimeSeries) GetPoints() []*Point { + if x != nil { + return x.Points + } + return nil +} + +func (x *TimeSeries) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +// A descriptor for the labels and points in a time series. +type TimeSeriesDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Descriptors for the labels. + LabelDescriptors []*label.LabelDescriptor `protobuf:"bytes,1,rep,name=label_descriptors,json=labelDescriptors,proto3" json:"label_descriptors,omitempty"` + // Descriptors for the point data value columns. + PointDescriptors []*TimeSeriesDescriptor_ValueDescriptor `protobuf:"bytes,5,rep,name=point_descriptors,json=pointDescriptors,proto3" json:"point_descriptors,omitempty"` +} + +func (x *TimeSeriesDescriptor) Reset() { + *x = TimeSeriesDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesDescriptor) ProtoMessage() {} + +func (x *TimeSeriesDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesDescriptor.ProtoReflect.Descriptor instead. +func (*TimeSeriesDescriptor) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2} +} + +func (x *TimeSeriesDescriptor) GetLabelDescriptors() []*label.LabelDescriptor { + if x != nil { + return x.LabelDescriptors + } + return nil +} + +func (x *TimeSeriesDescriptor) GetPointDescriptors() []*TimeSeriesDescriptor_ValueDescriptor { + if x != nil { + return x.PointDescriptors + } + return nil +} + +// Represents the values of a time series associated with a +// TimeSeriesDescriptor. +type TimeSeriesData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The values of the labels in the time series identifier, given in the same + // order as the `label_descriptors` field of the TimeSeriesDescriptor + // associated with this object. Each value must have a value of the type + // given in the corresponding entry of `label_descriptors`. + LabelValues []*LabelValue `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + // The points in the time series. + PointData []*TimeSeriesData_PointData `protobuf:"bytes,2,rep,name=point_data,json=pointData,proto3" json:"point_data,omitempty"` +} + +func (x *TimeSeriesData) Reset() { + *x = TimeSeriesData{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesData) ProtoMessage() {} + +func (x *TimeSeriesData) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesData.ProtoReflect.Descriptor instead. +func (*TimeSeriesData) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3} +} + +func (x *TimeSeriesData) GetLabelValues() []*LabelValue { + if x != nil { + return x.LabelValues + } + return nil +} + +func (x *TimeSeriesData) GetPointData() []*TimeSeriesData_PointData { + if x != nil { + return x.PointData + } + return nil +} + +// A label value. +type LabelValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The label value can be a bool, int64, or string. + // + // Types that are assignable to Value: + // + // *LabelValue_BoolValue + // *LabelValue_Int64Value + // *LabelValue_StringValue + Value isLabelValue_Value `protobuf_oneof:"value"` +} + +func (x *LabelValue) Reset() { + *x = LabelValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelValue) ProtoMessage() {} + +func (x *LabelValue) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelValue.ProtoReflect.Descriptor instead. +func (*LabelValue) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{4} +} + +func (m *LabelValue) GetValue() isLabelValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *LabelValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*LabelValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *LabelValue) GetInt64Value() int64 { + if x, ok := x.GetValue().(*LabelValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *LabelValue) GetStringValue() string { + if x, ok := x.GetValue().(*LabelValue_StringValue); ok { + return x.StringValue + } + return "" +} + +type isLabelValue_Value interface { + isLabelValue_Value() +} + +type LabelValue_BoolValue struct { + // A bool label value. + BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type LabelValue_Int64Value struct { + // An int64 label value. + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type LabelValue_StringValue struct { + // A string label value. + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +func (*LabelValue_BoolValue) isLabelValue_Value() {} + +func (*LabelValue_Int64Value) isLabelValue_Value() {} + +func (*LabelValue_StringValue) isLabelValue_Value() {} + +// An error associated with a query in the time series query language format. +type QueryError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The location of the time series query language text that this error applies + // to. + Locator *TextLocator `protobuf:"bytes,1,opt,name=locator,proto3" json:"locator,omitempty"` + // The error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *QueryError) Reset() { + *x = QueryError{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryError) ProtoMessage() {} + +func (x *QueryError) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryError.ProtoReflect.Descriptor instead. +func (*QueryError) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{5} +} + +func (x *QueryError) GetLocator() *TextLocator { + if x != nil { + return x.Locator + } + return nil +} + +func (x *QueryError) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +// A locator for text. Indicates a particular part of the text of a request or +// of an object referenced in the request. +// +// For example, suppose the request field `text` contains: +// +// text: "The quick brown fox jumps over the lazy dog." +// +// Then the locator: +// +// source: "text" +// start_position { +// line: 1 +// column: 17 +// } +// end_position { +// line: 1 +// column: 19 +// } +// +// refers to the part of the text: "fox". +type TextLocator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The source of the text. The source may be a field in the request, in which + // case its format is the format of the + // google.rpc.BadRequest.FieldViolation.field field in + // https://cloud.google.com/apis/design/errors#error_details. It may also be + // be a source other than the request field (e.g. a macro definition + // referenced in the text of the query), in which case this is the name of + // the source (e.g. the macro name). + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + // The position of the first byte within the text. + StartPosition *TextLocator_Position `protobuf:"bytes,2,opt,name=start_position,json=startPosition,proto3" json:"start_position,omitempty"` + // The position of the last byte within the text. + EndPosition *TextLocator_Position `protobuf:"bytes,3,opt,name=end_position,json=endPosition,proto3" json:"end_position,omitempty"` + // If `source`, `start_position`, and `end_position` describe a call on + // some object (e.g. a macro in the time series query language text) and a + // location is to be designated in that object's text, `nested_locator` + // identifies the location within that object. + NestedLocator *TextLocator `protobuf:"bytes,4,opt,name=nested_locator,json=nestedLocator,proto3" json:"nested_locator,omitempty"` + // When `nested_locator` is set, this field gives the reason for the nesting. + // Usually, the reason is a macro invocation. In that case, the macro name + // (including the leading '@') signals the location of the macro call + // in the text and a macro argument name (including the leading '$') signals + // the location of the macro argument inside the macro body that got + // substituted away. + NestingReason string `protobuf:"bytes,5,opt,name=nesting_reason,json=nestingReason,proto3" json:"nesting_reason,omitempty"` +} + +func (x *TextLocator) Reset() { + *x = TextLocator{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TextLocator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TextLocator) ProtoMessage() {} + +func (x *TextLocator) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TextLocator.ProtoReflect.Descriptor instead. +func (*TextLocator) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6} +} + +func (x *TextLocator) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *TextLocator) GetStartPosition() *TextLocator_Position { + if x != nil { + return x.StartPosition + } + return nil +} + +func (x *TextLocator) GetEndPosition() *TextLocator_Position { + if x != nil { + return x.EndPosition + } + return nil +} + +func (x *TextLocator) GetNestedLocator() *TextLocator { + if x != nil { + return x.NestedLocator + } + return nil +} + +func (x *TextLocator) GetNestingReason() string { + if x != nil { + return x.NestingReason + } + return "" +} + +// A descriptor for the value columns in a data point. +type TimeSeriesDescriptor_ValueDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value key. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value type. + ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The value stream kind. + MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // The unit in which `time_series` point values are reported. `unit` + // follows the UCUM format for units as seen in + // https://unitsofmeasure.org/ucum.html. + // `unit` is only valid if `value_type` is INTEGER, DOUBLE, DISTRIBUTION. + Unit string `protobuf:"bytes,4,opt,name=unit,proto3" json:"unit,omitempty"` +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) Reset() { + *x = TimeSeriesDescriptor_ValueDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesDescriptor_ValueDescriptor) ProtoMessage() {} + +func (x *TimeSeriesDescriptor_ValueDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesDescriptor_ValueDescriptor.ProtoReflect.Descriptor instead. +func (*TimeSeriesDescriptor_ValueDescriptor) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetValueType() metric.MetricDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return metric.MetricDescriptor_ValueType(0) +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetMetricKind() metric.MetricDescriptor_MetricKind { + if x != nil { + return x.MetricKind + } + return metric.MetricDescriptor_MetricKind(0) +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +// A point's value columns and time interval. Each point has one or more +// point values corresponding to the entries in `point_descriptors` field in +// the TimeSeriesDescriptor associated with this object. +type TimeSeriesData_PointData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The values that make up the point. + Values []*TypedValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + // The time interval associated with the point. + TimeInterval *TimeInterval `protobuf:"bytes,2,opt,name=time_interval,json=timeInterval,proto3" json:"time_interval,omitempty"` +} + +func (x *TimeSeriesData_PointData) Reset() { + *x = TimeSeriesData_PointData{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesData_PointData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesData_PointData) ProtoMessage() {} + +func (x *TimeSeriesData_PointData) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesData_PointData.ProtoReflect.Descriptor instead. +func (*TimeSeriesData_PointData) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *TimeSeriesData_PointData) GetValues() []*TypedValue { + if x != nil { + return x.Values + } + return nil +} + +func (x *TimeSeriesData_PointData) GetTimeInterval() *TimeInterval { + if x != nil { + return x.TimeInterval + } + return nil +} + +// The position of a byte within the text. +type TextLocator_Position struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The line, starting with 1, where the byte is positioned. + Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"` + // The column within the line, starting with 1, where the byte is + // positioned. This is a byte index even though the text is UTF-8. + Column int32 `protobuf:"varint,2,opt,name=column,proto3" json:"column,omitempty"` +} + +func (x *TextLocator_Position) Reset() { + *x = TextLocator_Position{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TextLocator_Position) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TextLocator_Position) ProtoMessage() {} + +func (x *TextLocator_Position) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TextLocator_Position.ProtoReflect.Descriptor instead. +func (*TextLocator_Position) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *TextLocator_Position) GetLine() int32 { + if x != nil { + return x.Line + } + return 0 +} + +func (x *TextLocator_Position) GetColumn() int32 { + if x != nil { + return x.Column + } + return 0 +} + +var File_google_monitoring_v3_metric_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_metric_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x7f, 0x0a, 0x05, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x90, 0x03, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x39, + 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x48, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, + 0x06, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0x94, 0x03, 0x0a, 0x14, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, + 0x48, 0x0a, 0x11, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x67, 0x0a, 0x11, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x10, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x48, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0xb5, 0x02, + 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x43, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x0a, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x2e, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x09, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x1a, 0x8e, 0x01, 0x0a, 0x09, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x38, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0d, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x7e, 0x0a, 0x0a, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x63, 0x0a, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xf0, 0x02, 0x0a, 0x0b, 0x54, + 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x50, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e, 0x64, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x0e, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, + 0x0d, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x25, + 0x0a, 0x0e, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x1a, 0x36, 0x0a, 0x08, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0xc6, 0x01, + 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, + 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, + 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_metric_proto_rawDescOnce sync.Once + file_google_monitoring_v3_metric_proto_rawDescData = file_google_monitoring_v3_metric_proto_rawDesc +) + +func file_google_monitoring_v3_metric_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_metric_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_metric_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_proto_rawDescData) + }) + return file_google_monitoring_v3_metric_proto_rawDescData +} + +var file_google_monitoring_v3_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_google_monitoring_v3_metric_proto_goTypes = []any{ + (*Point)(nil), // 0: google.monitoring.v3.Point + (*TimeSeries)(nil), // 1: google.monitoring.v3.TimeSeries + (*TimeSeriesDescriptor)(nil), // 2: google.monitoring.v3.TimeSeriesDescriptor + (*TimeSeriesData)(nil), // 3: google.monitoring.v3.TimeSeriesData + (*LabelValue)(nil), // 4: google.monitoring.v3.LabelValue + (*QueryError)(nil), // 5: google.monitoring.v3.QueryError + (*TextLocator)(nil), // 6: google.monitoring.v3.TextLocator + (*TimeSeriesDescriptor_ValueDescriptor)(nil), // 7: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor + (*TimeSeriesData_PointData)(nil), // 8: google.monitoring.v3.TimeSeriesData.PointData + (*TextLocator_Position)(nil), // 9: google.monitoring.v3.TextLocator.Position + (*TimeInterval)(nil), // 10: google.monitoring.v3.TimeInterval + (*TypedValue)(nil), // 11: google.monitoring.v3.TypedValue + (*metric.Metric)(nil), // 12: google.api.Metric + (*monitoredres.MonitoredResource)(nil), // 13: google.api.MonitoredResource + (*monitoredres.MonitoredResourceMetadata)(nil), // 14: google.api.MonitoredResourceMetadata + (metric.MetricDescriptor_MetricKind)(0), // 15: google.api.MetricDescriptor.MetricKind + (metric.MetricDescriptor_ValueType)(0), // 16: google.api.MetricDescriptor.ValueType + (*label.LabelDescriptor)(nil), // 17: google.api.LabelDescriptor +} +var file_google_monitoring_v3_metric_proto_depIdxs = []int32{ + 10, // 0: google.monitoring.v3.Point.interval:type_name -> google.monitoring.v3.TimeInterval + 11, // 1: google.monitoring.v3.Point.value:type_name -> google.monitoring.v3.TypedValue + 12, // 2: google.monitoring.v3.TimeSeries.metric:type_name -> google.api.Metric + 13, // 3: google.monitoring.v3.TimeSeries.resource:type_name -> google.api.MonitoredResource + 14, // 4: google.monitoring.v3.TimeSeries.metadata:type_name -> google.api.MonitoredResourceMetadata + 15, // 5: google.monitoring.v3.TimeSeries.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind + 16, // 6: google.monitoring.v3.TimeSeries.value_type:type_name -> google.api.MetricDescriptor.ValueType + 0, // 7: google.monitoring.v3.TimeSeries.points:type_name -> google.monitoring.v3.Point + 17, // 8: google.monitoring.v3.TimeSeriesDescriptor.label_descriptors:type_name -> google.api.LabelDescriptor + 7, // 9: google.monitoring.v3.TimeSeriesDescriptor.point_descriptors:type_name -> google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor + 4, // 10: google.monitoring.v3.TimeSeriesData.label_values:type_name -> google.monitoring.v3.LabelValue + 8, // 11: google.monitoring.v3.TimeSeriesData.point_data:type_name -> google.monitoring.v3.TimeSeriesData.PointData + 6, // 12: google.monitoring.v3.QueryError.locator:type_name -> google.monitoring.v3.TextLocator + 9, // 13: google.monitoring.v3.TextLocator.start_position:type_name -> google.monitoring.v3.TextLocator.Position + 9, // 14: google.monitoring.v3.TextLocator.end_position:type_name -> google.monitoring.v3.TextLocator.Position + 6, // 15: google.monitoring.v3.TextLocator.nested_locator:type_name -> google.monitoring.v3.TextLocator + 16, // 16: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType + 15, // 17: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind + 11, // 18: google.monitoring.v3.TimeSeriesData.PointData.values:type_name -> google.monitoring.v3.TypedValue + 10, // 19: google.monitoring.v3.TimeSeriesData.PointData.time_interval:type_name -> google.monitoring.v3.TimeInterval + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_metric_proto_init() } +func file_google_monitoring_v3_metric_proto_init() { + if File_google_monitoring_v3_metric_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_metric_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Point); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeries); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*LabelValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*QueryError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*TextLocator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesDescriptor_ValueDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesData_PointData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*TextLocator_Position); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_metric_proto_msgTypes[4].OneofWrappers = []any{ + (*LabelValue_BoolValue)(nil), + (*LabelValue_Int64Value)(nil), + (*LabelValue_StringValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_metric_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_metric_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_metric_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_metric_proto_msgTypes, + }.Build() + File_google_monitoring_v3_metric_proto = out.File + file_google_monitoring_v3_metric_proto_rawDesc = nil + file_google_monitoring_v3_metric_proto_goTypes = nil + file_google_monitoring_v3_metric_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go new file mode 100644 index 00000000000..52e1c1e0b9b --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go @@ -0,0 +1,2502 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/metric_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + metric "google.golang.org/genproto/googleapis/api/metric" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + status "google.golang.org/genproto/googleapis/rpc/status" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status1 "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Controls which fields are returned by `ListTimeSeries*`. +type ListTimeSeriesRequest_TimeSeriesView int32 + +const ( + // Returns the identity of the metric(s), the time series, + // and the time series data. + ListTimeSeriesRequest_FULL ListTimeSeriesRequest_TimeSeriesView = 0 + // Returns the identity of the metric and the time series resource, + // but not the time series data. + ListTimeSeriesRequest_HEADERS ListTimeSeriesRequest_TimeSeriesView = 1 +) + +// Enum value maps for ListTimeSeriesRequest_TimeSeriesView. +var ( + ListTimeSeriesRequest_TimeSeriesView_name = map[int32]string{ + 0: "FULL", + 1: "HEADERS", + } + ListTimeSeriesRequest_TimeSeriesView_value = map[string]int32{ + "FULL": 0, + "HEADERS": 1, + } +) + +func (x ListTimeSeriesRequest_TimeSeriesView) Enum() *ListTimeSeriesRequest_TimeSeriesView { + p := new(ListTimeSeriesRequest_TimeSeriesView) + *p = x + return p +} + +func (x ListTimeSeriesRequest_TimeSeriesView) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ListTimeSeriesRequest_TimeSeriesView) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_metric_service_proto_enumTypes[0].Descriptor() +} + +func (ListTimeSeriesRequest_TimeSeriesView) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_metric_service_proto_enumTypes[0] +} + +func (x ListTimeSeriesRequest_TimeSeriesView) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ListTimeSeriesRequest_TimeSeriesView.Descriptor instead. +func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8, 0} +} + +// The `ListMonitoredResourceDescriptors` request. +type ListMonitoredResourceDescriptorsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // An optional [filter](https://cloud.google.com/monitoring/api/v3/filters) + // describing the descriptors to be returned. The filter can reference the + // descriptor's type and labels. For example, the following filter returns + // only Google Compute Engine descriptors that have an `id` label: + // + // resource.type = starts_with("gce_") AND resource.label:id + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListMonitoredResourceDescriptorsRequest) Reset() { + *x = ListMonitoredResourceDescriptorsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMonitoredResourceDescriptorsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {} + +func (x *ListMonitoredResourceDescriptorsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMonitoredResourceDescriptorsRequest.ProtoReflect.Descriptor instead. +func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListMonitoredResourceDescriptors` response. +type ListMonitoredResourceDescriptorsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The monitored resource descriptors that are available to this project + // and that match `filter`, if present. + ResourceDescriptors []*monitoredres.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors,proto3" json:"resource_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListMonitoredResourceDescriptorsResponse) Reset() { + *x = ListMonitoredResourceDescriptorsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMonitoredResourceDescriptorsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {} + +func (x *ListMonitoredResourceDescriptorsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMonitoredResourceDescriptorsResponse.ProtoReflect.Descriptor instead. +func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*monitoredres.MonitoredResourceDescriptor { + if x != nil { + return x.ResourceDescriptors + } + return nil +} + +func (x *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetMonitoredResourceDescriptor` request. +type GetMonitoredResourceDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The monitored resource descriptor to get. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] + // + // The `[RESOURCE_TYPE]` is a predefined type, such as + // `cloudsql_database`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMonitoredResourceDescriptorRequest) Reset() { + *x = GetMonitoredResourceDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMonitoredResourceDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {} + +func (x *GetMonitoredResourceDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMonitoredResourceDescriptorRequest.ProtoReflect.Descriptor instead. +func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetMonitoredResourceDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `ListMetricDescriptors` request. +type ListMetricDescriptorsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If this field is empty, all custom and + // system-defined metric descriptors are returned. + // Otherwise, the [filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifies which metric descriptors are to be + // returned. For example, the following filter matches all + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics): + // + // metric.type = starts_with("custom.googleapis.com/") + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. The + // default and maximum value is 10,000. If a page_size <= 0 or > 10,000 is + // submitted, will instead return a maximum of 10,000 results. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListMetricDescriptorsRequest) Reset() { + *x = ListMetricDescriptorsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMetricDescriptorsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMetricDescriptorsRequest) ProtoMessage() {} + +func (x *ListMetricDescriptorsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMetricDescriptorsRequest.ProtoReflect.Descriptor instead. +func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListMetricDescriptorsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListMetricDescriptorsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListMetricDescriptorsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListMetricDescriptorsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListMetricDescriptors` response. +type ListMetricDescriptorsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The metric descriptors that are available to the project + // and that match the value of `filter`, if present. + MetricDescriptors []*metric.MetricDescriptor `protobuf:"bytes,1,rep,name=metric_descriptors,json=metricDescriptors,proto3" json:"metric_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListMetricDescriptorsResponse) Reset() { + *x = ListMetricDescriptorsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMetricDescriptorsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMetricDescriptorsResponse) ProtoMessage() {} + +func (x *ListMetricDescriptorsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMetricDescriptorsResponse.ProtoReflect.Descriptor instead. +func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{4} +} + +func (x *ListMetricDescriptorsResponse) GetMetricDescriptors() []*metric.MetricDescriptor { + if x != nil { + return x.MetricDescriptors + } + return nil +} + +func (x *ListMetricDescriptorsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetMetricDescriptor` request. +type GetMetricDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The metric descriptor on which to execute the request. The format + // is: + // + // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + // + // An example value of `[METRIC_ID]` is + // `"compute.googleapis.com/instance/disk/read_bytes_count"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMetricDescriptorRequest) Reset() { + *x = GetMetricDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetricDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetricDescriptorRequest) ProtoMessage() {} + +func (x *GetMetricDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetricDescriptorRequest.ProtoReflect.Descriptor instead. +func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{5} +} + +func (x *GetMetricDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateMetricDescriptor` request. +type CreateMetricDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // 4 + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The new [custom + // metric](https://cloud.google.com/monitoring/custom-metrics) descriptor. + MetricDescriptor *metric.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` +} + +func (x *CreateMetricDescriptorRequest) Reset() { + *x = CreateMetricDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateMetricDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateMetricDescriptorRequest) ProtoMessage() {} + +func (x *CreateMetricDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateMetricDescriptorRequest.ProtoReflect.Descriptor instead. +func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateMetricDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateMetricDescriptorRequest) GetMetricDescriptor() *metric.MetricDescriptor { + if x != nil { + return x.MetricDescriptor + } + return nil +} + +// The `DeleteMetricDescriptor` request. +type DeleteMetricDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The metric descriptor on which to execute the request. The format + // is: + // + // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + // + // An example of `[METRIC_ID]` is: + // `"custom.googleapis.com/my_test_metric"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteMetricDescriptorRequest) Reset() { + *x = DeleteMetricDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteMetricDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteMetricDescriptorRequest) ProtoMessage() {} + +func (x *DeleteMetricDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteMetricDescriptorRequest.ProtoReflect.Descriptor instead. +func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{7} +} + +func (x *DeleteMetricDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `ListTimeSeries` request. +type ListTimeSeriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name), + // organization or folder on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // organizations/[ORGANIZATION_ID] + // folders/[FOLDER_ID] + Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"` + // Required. A [monitoring + // filter](https://cloud.google.com/monitoring/api/v3/filters) that specifies + // which time series should be returned. The filter must specify a single + // metric type, and can additionally specify metric labels and other + // information. For example: + // + // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND + // metric.labels.instance_name = "my-instance-name" + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Required. The time interval for which results should be returned. Only time + // series that contain data points in the specified interval are included in + // the response. + Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series across specified labels. + // + // By default (if no `aggregation` is explicitly specified), the raw time + // series data is returned. + Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation,proto3" json:"aggregation,omitempty"` + // Apply a second aggregation after `aggregation` is applied. May only be + // specified if `aggregation` is specified. + SecondaryAggregation *Aggregation `protobuf:"bytes,11,opt,name=secondary_aggregation,json=secondaryAggregation,proto3" json:"secondary_aggregation,omitempty"` + // Unsupported: must be left blank. The points in each time series are + // currently returned in reverse time order (most recent to oldest). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // Required. Specifies which information is returned about the time series. + View ListTimeSeriesRequest_TimeSeriesView `protobuf:"varint,7,opt,name=view,proto3,enum=google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView" json:"view,omitempty"` + // A positive number that is the maximum number of results to return. If + // `page_size` is empty or more than 100,000 results, the effective + // `page_size` is 100,000 results. If `view` is set to `FULL`, this is the + // maximum number of `Points` returned. If `view` is set to `HEADERS`, this is + // the maximum number of `TimeSeries` returned. + PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListTimeSeriesRequest) Reset() { + *x = ListTimeSeriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTimeSeriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTimeSeriesRequest) ProtoMessage() {} + +func (x *ListTimeSeriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTimeSeriesRequest.ProtoReflect.Descriptor instead. +func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8} +} + +func (x *ListTimeSeriesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListTimeSeriesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListTimeSeriesRequest) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +func (x *ListTimeSeriesRequest) GetAggregation() *Aggregation { + if x != nil { + return x.Aggregation + } + return nil +} + +func (x *ListTimeSeriesRequest) GetSecondaryAggregation() *Aggregation { + if x != nil { + return x.SecondaryAggregation + } + return nil +} + +func (x *ListTimeSeriesRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +func (x *ListTimeSeriesRequest) GetView() ListTimeSeriesRequest_TimeSeriesView { + if x != nil { + return x.View + } + return ListTimeSeriesRequest_FULL +} + +func (x *ListTimeSeriesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListTimeSeriesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListTimeSeries` response. +type ListTimeSeriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // One or more time series that match the filter included in the request. + TimeSeries []*TimeSeries `protobuf:"bytes,1,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // Query execution errors that may have caused the time series data returned + // to be incomplete. + ExecutionErrors []*status.Status `protobuf:"bytes,3,rep,name=execution_errors,json=executionErrors,proto3" json:"execution_errors,omitempty"` + // The unit in which all `time_series` point values are reported. `unit` + // follows the UCUM format for units as seen in + // https://unitsofmeasure.org/ucum.html. + // If different `time_series` have different units (for example, because they + // come from different metric types, or a unit is absent), then `unit` will be + // "{not_a_unit}". + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` +} + +func (x *ListTimeSeriesResponse) Reset() { + *x = ListTimeSeriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTimeSeriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTimeSeriesResponse) ProtoMessage() {} + +func (x *ListTimeSeriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTimeSeriesResponse.ProtoReflect.Descriptor instead. +func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{9} +} + +func (x *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries { + if x != nil { + return x.TimeSeries + } + return nil +} + +func (x *ListTimeSeriesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListTimeSeriesResponse) GetExecutionErrors() []*status.Status { + if x != nil { + return x.ExecutionErrors + } + return nil +} + +func (x *ListTimeSeriesResponse) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +// The `CreateTimeSeries` request. +type CreateTimeSeriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The new data to be added to a list of time series. + // Adds at most one data point to each of several time series. The new data + // point must be more recent than any other point in its time series. Each + // `TimeSeries` value must fully specify a unique time series by supplying + // all label values for the metric and the monitored resource. + // + // The maximum number of `TimeSeries` objects per `Create` request is 200. + TimeSeries []*TimeSeries `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` +} + +func (x *CreateTimeSeriesRequest) Reset() { + *x = CreateTimeSeriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesRequest) ProtoMessage() {} + +func (x *CreateTimeSeriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesRequest.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{10} +} + +func (x *CreateTimeSeriesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateTimeSeriesRequest) GetTimeSeries() []*TimeSeries { + if x != nil { + return x.TimeSeries + } + return nil +} + +// DEPRECATED. Used to hold per-time-series error status. +type CreateTimeSeriesError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DEPRECATED. Time series ID that resulted in the `status` error. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. + TimeSeries *TimeSeries `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // DEPRECATED. The status of the requested write operation for `time_series`. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. + Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *CreateTimeSeriesError) Reset() { + *x = CreateTimeSeriesError{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesError) ProtoMessage() {} + +func (x *CreateTimeSeriesError) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesError.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{11} +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. +func (x *CreateTimeSeriesError) GetTimeSeries() *TimeSeries { + if x != nil { + return x.TimeSeries + } + return nil +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. +func (x *CreateTimeSeriesError) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +// Summary of the result of a failed request to write data to a time series. +type CreateTimeSeriesSummary struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of points in the request. + TotalPointCount int32 `protobuf:"varint,1,opt,name=total_point_count,json=totalPointCount,proto3" json:"total_point_count,omitempty"` + // The number of points that were successfully written. + SuccessPointCount int32 `protobuf:"varint,2,opt,name=success_point_count,json=successPointCount,proto3" json:"success_point_count,omitempty"` + // The number of points that failed to be written. Order is not guaranteed. + Errors []*CreateTimeSeriesSummary_Error `protobuf:"bytes,3,rep,name=errors,proto3" json:"errors,omitempty"` +} + +func (x *CreateTimeSeriesSummary) Reset() { + *x = CreateTimeSeriesSummary{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesSummary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesSummary) ProtoMessage() {} + +func (x *CreateTimeSeriesSummary) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesSummary.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesSummary) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12} +} + +func (x *CreateTimeSeriesSummary) GetTotalPointCount() int32 { + if x != nil { + return x.TotalPointCount + } + return 0 +} + +func (x *CreateTimeSeriesSummary) GetSuccessPointCount() int32 { + if x != nil { + return x.SuccessPointCount + } + return 0 +} + +func (x *CreateTimeSeriesSummary) GetErrors() []*CreateTimeSeriesSummary_Error { + if x != nil { + return x.Errors + } + return nil +} + +// The `QueryTimeSeries` request. +type QueryTimeSeriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The query in the [Monitoring Query + // Language](https://cloud.google.com/monitoring/mql/reference) format. + // The default time zone is in UTC. + Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"` + // A positive number that is the maximum number of time_series_data to return. + PageSize int32 `protobuf:"varint,9,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *QueryTimeSeriesRequest) Reset() { + *x = QueryTimeSeriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryTimeSeriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryTimeSeriesRequest) ProtoMessage() {} + +func (x *QueryTimeSeriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryTimeSeriesRequest.ProtoReflect.Descriptor instead. +func (*QueryTimeSeriesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{13} +} + +func (x *QueryTimeSeriesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *QueryTimeSeriesRequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *QueryTimeSeriesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *QueryTimeSeriesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `QueryTimeSeries` response. +type QueryTimeSeriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The descriptor for the time series data. + TimeSeriesDescriptor *TimeSeriesDescriptor `protobuf:"bytes,8,opt,name=time_series_descriptor,json=timeSeriesDescriptor,proto3" json:"time_series_descriptor,omitempty"` + // The time series data. + TimeSeriesData []*TimeSeriesData `protobuf:"bytes,9,rep,name=time_series_data,json=timeSeriesData,proto3" json:"time_series_data,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, use that value as + // `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,10,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // Query execution errors that may have caused the time series data returned + // to be incomplete. The available data will be available in the + // response. + PartialErrors []*status.Status `protobuf:"bytes,11,rep,name=partial_errors,json=partialErrors,proto3" json:"partial_errors,omitempty"` +} + +func (x *QueryTimeSeriesResponse) Reset() { + *x = QueryTimeSeriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryTimeSeriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryTimeSeriesResponse) ProtoMessage() {} + +func (x *QueryTimeSeriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryTimeSeriesResponse.ProtoReflect.Descriptor instead. +func (*QueryTimeSeriesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{14} +} + +func (x *QueryTimeSeriesResponse) GetTimeSeriesDescriptor() *TimeSeriesDescriptor { + if x != nil { + return x.TimeSeriesDescriptor + } + return nil +} + +func (x *QueryTimeSeriesResponse) GetTimeSeriesData() []*TimeSeriesData { + if x != nil { + return x.TimeSeriesData + } + return nil +} + +func (x *QueryTimeSeriesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *QueryTimeSeriesResponse) GetPartialErrors() []*status.Status { + if x != nil { + return x.PartialErrors + } + return nil +} + +// This is an error detail intended to be used with INVALID_ARGUMENT errors. +type QueryErrorList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Errors in parsing the time series query language text. The number of errors + // in the response may be limited. + Errors []*QueryError `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` + // A summary of all the errors. + ErrorSummary string `protobuf:"bytes,2,opt,name=error_summary,json=errorSummary,proto3" json:"error_summary,omitempty"` +} + +func (x *QueryErrorList) Reset() { + *x = QueryErrorList{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryErrorList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryErrorList) ProtoMessage() {} + +func (x *QueryErrorList) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryErrorList.ProtoReflect.Descriptor instead. +func (*QueryErrorList) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{15} +} + +func (x *QueryErrorList) GetErrors() []*QueryError { + if x != nil { + return x.Errors + } + return nil +} + +func (x *QueryErrorList) GetErrorSummary() string { + if x != nil { + return x.ErrorSummary + } + return "" +} + +// Detailed information about an error category. +type CreateTimeSeriesSummary_Error struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status of the requested write operation. + Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // The number of points that couldn't be written because of `status`. + PointCount int32 `protobuf:"varint,2,opt,name=point_count,json=pointCount,proto3" json:"point_count,omitempty"` +} + +func (x *CreateTimeSeriesSummary_Error) Reset() { + *x = CreateTimeSeriesSummary_Error{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesSummary_Error) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesSummary_Error) ProtoMessage() {} + +func (x *CreateTimeSeriesSummary_Error) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesSummary_Error.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesSummary_Error) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *CreateTimeSeriesSummary_Error) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *CreateTimeSeriesSummary_Error) GetPointCount() int32 { + if x != nil { + return x.PointCount + } + return 0 +} + +var File_google_monitoring_v3_metric_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_metric_service_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd0, + 0x01, 0x0a, 0x27, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x37, + 0x12, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0xae, 0x01, 0x0a, 0x28, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, + 0x0a, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, + 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x7a, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x37, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xba, + 0x01, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1d, + 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, + 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, + 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x64, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xb7, 0x01, 0x0a, 0x1d, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, + 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x22, 0x67, 0x0a, 0x1d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xad, 0x04, 0x0a, 0x15, + 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x12, 0x24, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x0b, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x56, + 0x0a, 0x15, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x61, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x14, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x41, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, + 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, + 0x79, 0x12, 0x53, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x56, 0x69, 0x65, 0x77, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x27, 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x56, 0x69, 0x65, 0x77, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x10, 0x01, 0x22, 0xd6, 0x01, 0x0a, 0x16, + 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x74, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x3d, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, + 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x75, 0x6e, 0x69, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x45, 0x0a, 0x0b, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0x98, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x2a, + 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x1a, 0x54, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x88, 0x01, + 0x0a, 0x16, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xae, 0x02, 0x0a, 0x17, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x14, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x4e, 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, + 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x6f, 0x0a, 0x0e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x06, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x06, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x32, 0xbc, 0x0f, 0x0a, 0x0d, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xe4, 0x01, 0x0a, + 0x20, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x73, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x41, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, + 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x12, 0xcc, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, + 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x44, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x12, 0x35, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, + 0x2a, 0x7d, 0x12, 0xb8, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x29, 0x12, 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xa0, 0x01, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, + 0x12, 0xc8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x5b, + 0xda, 0x41, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x3a, + 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x22, 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xa0, 0x01, 0x0a, 0x16, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x2c, 0x2a, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x12, 0xfe, + 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, 0x01, 0xda, + 0x41, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x2c, 0x76, 0x69, 0x65, 0x77, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x6e, 0x5a, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, + 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5a, 0x21, 0x12, 0x1f, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x20, 0x2f, + 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, + 0x99, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3e, 0xda, 0x41, 0x10, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0xae, 0x01, 0x0a, 0x17, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x4c, + 0xda, 0x41, 0x10, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x3a, 0x01, 0x2a, 0x22, 0x2e, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x3a, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xda, 0x01, 0xca, + 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xba, 0x01, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, + 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0x89, 0x08, 0xea, 0x41, 0xf0, 0x01, + 0x0a, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, + 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x39, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, + 0xea, 0x41, 0xb7, 0x02, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x4f, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x59, 0x6f, 0x72, + 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, + 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x4d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, + 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, + 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0xea, 0x41, 0x51, 0x0a, 0x23, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x12, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x12, 0x16, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0xea, + 0x41, 0xb5, 0x01, 0x0a, 0x24, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x7d, 0x12, 0x35, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, + 0x7b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x7d, 0x12, 0x29, 0x66, + 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x42, 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, + 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_metric_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_metric_service_proto_rawDescData = file_google_monitoring_v3_metric_service_proto_rawDesc +) + +func file_google_monitoring_v3_metric_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_metric_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_metric_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_service_proto_rawDescData) + }) + return file_google_monitoring_v3_metric_service_proto_rawDescData +} + +var file_google_monitoring_v3_metric_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_monitoring_v3_metric_service_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_monitoring_v3_metric_service_proto_goTypes = []any{ + (ListTimeSeriesRequest_TimeSeriesView)(0), // 0: google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView + (*ListMonitoredResourceDescriptorsRequest)(nil), // 1: google.monitoring.v3.ListMonitoredResourceDescriptorsRequest + (*ListMonitoredResourceDescriptorsResponse)(nil), // 2: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse + (*GetMonitoredResourceDescriptorRequest)(nil), // 3: google.monitoring.v3.GetMonitoredResourceDescriptorRequest + (*ListMetricDescriptorsRequest)(nil), // 4: google.monitoring.v3.ListMetricDescriptorsRequest + (*ListMetricDescriptorsResponse)(nil), // 5: google.monitoring.v3.ListMetricDescriptorsResponse + (*GetMetricDescriptorRequest)(nil), // 6: google.monitoring.v3.GetMetricDescriptorRequest + (*CreateMetricDescriptorRequest)(nil), // 7: google.monitoring.v3.CreateMetricDescriptorRequest + (*DeleteMetricDescriptorRequest)(nil), // 8: google.monitoring.v3.DeleteMetricDescriptorRequest + (*ListTimeSeriesRequest)(nil), // 9: google.monitoring.v3.ListTimeSeriesRequest + (*ListTimeSeriesResponse)(nil), // 10: google.monitoring.v3.ListTimeSeriesResponse + (*CreateTimeSeriesRequest)(nil), // 11: google.monitoring.v3.CreateTimeSeriesRequest + (*CreateTimeSeriesError)(nil), // 12: google.monitoring.v3.CreateTimeSeriesError + (*CreateTimeSeriesSummary)(nil), // 13: google.monitoring.v3.CreateTimeSeriesSummary + (*QueryTimeSeriesRequest)(nil), // 14: google.monitoring.v3.QueryTimeSeriesRequest + (*QueryTimeSeriesResponse)(nil), // 15: google.monitoring.v3.QueryTimeSeriesResponse + (*QueryErrorList)(nil), // 16: google.monitoring.v3.QueryErrorList + (*CreateTimeSeriesSummary_Error)(nil), // 17: google.monitoring.v3.CreateTimeSeriesSummary.Error + (*monitoredres.MonitoredResourceDescriptor)(nil), // 18: google.api.MonitoredResourceDescriptor + (*metric.MetricDescriptor)(nil), // 19: google.api.MetricDescriptor + (*TimeInterval)(nil), // 20: google.monitoring.v3.TimeInterval + (*Aggregation)(nil), // 21: google.monitoring.v3.Aggregation + (*TimeSeries)(nil), // 22: google.monitoring.v3.TimeSeries + (*status.Status)(nil), // 23: google.rpc.Status + (*TimeSeriesDescriptor)(nil), // 24: google.monitoring.v3.TimeSeriesDescriptor + (*TimeSeriesData)(nil), // 25: google.monitoring.v3.TimeSeriesData + (*QueryError)(nil), // 26: google.monitoring.v3.QueryError + (*emptypb.Empty)(nil), // 27: google.protobuf.Empty +} +var file_google_monitoring_v3_metric_service_proto_depIdxs = []int32{ + 18, // 0: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse.resource_descriptors:type_name -> google.api.MonitoredResourceDescriptor + 19, // 1: google.monitoring.v3.ListMetricDescriptorsResponse.metric_descriptors:type_name -> google.api.MetricDescriptor + 19, // 2: google.monitoring.v3.CreateMetricDescriptorRequest.metric_descriptor:type_name -> google.api.MetricDescriptor + 20, // 3: google.monitoring.v3.ListTimeSeriesRequest.interval:type_name -> google.monitoring.v3.TimeInterval + 21, // 4: google.monitoring.v3.ListTimeSeriesRequest.aggregation:type_name -> google.monitoring.v3.Aggregation + 21, // 5: google.monitoring.v3.ListTimeSeriesRequest.secondary_aggregation:type_name -> google.monitoring.v3.Aggregation + 0, // 6: google.monitoring.v3.ListTimeSeriesRequest.view:type_name -> google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView + 22, // 7: google.monitoring.v3.ListTimeSeriesResponse.time_series:type_name -> google.monitoring.v3.TimeSeries + 23, // 8: google.monitoring.v3.ListTimeSeriesResponse.execution_errors:type_name -> google.rpc.Status + 22, // 9: google.monitoring.v3.CreateTimeSeriesRequest.time_series:type_name -> google.monitoring.v3.TimeSeries + 22, // 10: google.monitoring.v3.CreateTimeSeriesError.time_series:type_name -> google.monitoring.v3.TimeSeries + 23, // 11: google.monitoring.v3.CreateTimeSeriesError.status:type_name -> google.rpc.Status + 17, // 12: google.monitoring.v3.CreateTimeSeriesSummary.errors:type_name -> google.monitoring.v3.CreateTimeSeriesSummary.Error + 24, // 13: google.monitoring.v3.QueryTimeSeriesResponse.time_series_descriptor:type_name -> google.monitoring.v3.TimeSeriesDescriptor + 25, // 14: google.monitoring.v3.QueryTimeSeriesResponse.time_series_data:type_name -> google.monitoring.v3.TimeSeriesData + 23, // 15: google.monitoring.v3.QueryTimeSeriesResponse.partial_errors:type_name -> google.rpc.Status + 26, // 16: google.monitoring.v3.QueryErrorList.errors:type_name -> google.monitoring.v3.QueryError + 23, // 17: google.monitoring.v3.CreateTimeSeriesSummary.Error.status:type_name -> google.rpc.Status + 1, // 18: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:input_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsRequest + 3, // 19: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:input_type -> google.monitoring.v3.GetMonitoredResourceDescriptorRequest + 4, // 20: google.monitoring.v3.MetricService.ListMetricDescriptors:input_type -> google.monitoring.v3.ListMetricDescriptorsRequest + 6, // 21: google.monitoring.v3.MetricService.GetMetricDescriptor:input_type -> google.monitoring.v3.GetMetricDescriptorRequest + 7, // 22: google.monitoring.v3.MetricService.CreateMetricDescriptor:input_type -> google.monitoring.v3.CreateMetricDescriptorRequest + 8, // 23: google.monitoring.v3.MetricService.DeleteMetricDescriptor:input_type -> google.monitoring.v3.DeleteMetricDescriptorRequest + 9, // 24: google.monitoring.v3.MetricService.ListTimeSeries:input_type -> google.monitoring.v3.ListTimeSeriesRequest + 11, // 25: google.monitoring.v3.MetricService.CreateTimeSeries:input_type -> google.monitoring.v3.CreateTimeSeriesRequest + 11, // 26: google.monitoring.v3.MetricService.CreateServiceTimeSeries:input_type -> google.monitoring.v3.CreateTimeSeriesRequest + 2, // 27: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:output_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsResponse + 18, // 28: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:output_type -> google.api.MonitoredResourceDescriptor + 5, // 29: google.monitoring.v3.MetricService.ListMetricDescriptors:output_type -> google.monitoring.v3.ListMetricDescriptorsResponse + 19, // 30: google.monitoring.v3.MetricService.GetMetricDescriptor:output_type -> google.api.MetricDescriptor + 19, // 31: google.monitoring.v3.MetricService.CreateMetricDescriptor:output_type -> google.api.MetricDescriptor + 27, // 32: google.monitoring.v3.MetricService.DeleteMetricDescriptor:output_type -> google.protobuf.Empty + 10, // 33: google.monitoring.v3.MetricService.ListTimeSeries:output_type -> google.monitoring.v3.ListTimeSeriesResponse + 27, // 34: google.monitoring.v3.MetricService.CreateTimeSeries:output_type -> google.protobuf.Empty + 27, // 35: google.monitoring.v3.MetricService.CreateServiceTimeSeries:output_type -> google.protobuf.Empty + 27, // [27:36] is the sub-list for method output_type + 18, // [18:27] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_metric_service_proto_init() } +func file_google_monitoring_v3_metric_service_proto_init() { + if File_google_monitoring_v3_metric_service_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_metric_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_metric_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListMonitoredResourceDescriptorsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListMonitoredResourceDescriptorsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetMonitoredResourceDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListMetricDescriptorsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListMetricDescriptorsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*GetMetricDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*CreateMetricDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*DeleteMetricDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*ListTimeSeriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*ListTimeSeriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesSummary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*QueryTimeSeriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*QueryTimeSeriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*QueryErrorList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesSummary_Error); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_metric_service_proto_rawDesc, + NumEnums: 1, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_metric_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_metric_service_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_metric_service_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_metric_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_metric_service_proto = out.File + file_google_monitoring_v3_metric_service_proto_rawDesc = nil + file_google_monitoring_v3_metric_service_proto_goTypes = nil + file_google_monitoring_v3_metric_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// MetricServiceClient is the client API for MetricService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricServiceClient interface { + // Lists monitored resource descriptors that match a filter. + ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. + GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. + ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. + GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // The creation is executed asynchronously. + // User-created metric descriptors define + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). + // The metric descriptor is updated if it already exists, + // except that metric labels are never removed. + CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be + // deleted. + DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Lists time series that match a filter. + ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + // This method does not support + // [resource locations constraint of an organization + // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). + CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Creates or adds data to one or more service time series. A service time + // series is a time series for a metric from a Google Cloud service. The + // response is empty if all time series in the request were written. If any + // time series could not be written, a corresponding failure message is + // included in the error response. This endpoint rejects writes to + // user-defined metrics. + // This method is only for use by Google Cloud services. Use + // [projects.timeSeries.create][google.monitoring.v3.MetricService.CreateTimeSeries] + // instead. + CreateServiceTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type metricServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMetricServiceClient(cc grpc.ClientConnInterface) MetricServiceClient { + return &metricServiceClient{cc} +} + +func (c *metricServiceClient) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { + out := new(ListMonitoredResourceDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) { + out := new(monitoredres.MonitoredResourceDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) { + out := new(ListMetricDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMetricDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) { + out := new(ListTimeSeriesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateServiceTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateServiceTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricServiceServer is the server API for MetricService service. +type MetricServiceServer interface { + // Lists monitored resource descriptors that match a filter. + ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. + GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. + ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. + GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // The creation is executed asynchronously. + // User-created metric descriptors define + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). + // The metric descriptor is updated if it already exists, + // except that metric labels are never removed. + CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be + // deleted. + DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) + // Lists time series that match a filter. + ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + // This method does not support + // [resource locations constraint of an organization + // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). + CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) + // Creates or adds data to one or more service time series. A service time + // series is a time series for a metric from a Google Cloud service. The + // response is empty if all time series in the request were written. If any + // time series could not be written, a corresponding failure message is + // included in the error response. This endpoint rejects writes to + // user-defined metrics. + // This method is only for use by Google Cloud services. Use + // [projects.timeSeries.create][google.monitoring.v3.MetricService.CreateTimeSeries] + // instead. + CreateServiceTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) +} + +// UnimplementedMetricServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricServiceServer struct { +} + +func (*UnimplementedMetricServiceServer) ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListMonitoredResourceDescriptors not implemented") +} +func (*UnimplementedMetricServiceServer) GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method GetMonitoredResourceDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListMetricDescriptors not implemented") +} +func (*UnimplementedMetricServiceServer) GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method GetMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CreateMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method DeleteMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListTimeSeries not implemented") +} +func (*UnimplementedMetricServiceServer) CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CreateTimeSeries not implemented") +} +func (*UnimplementedMetricServiceServer) CreateServiceTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CreateServiceTimeSeries not implemented") +} + +func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) { + s.RegisterService(&_MetricService_serviceDesc, srv) +} + +func _MetricService_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMonitoredResourceDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMonitoredResourceDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMonitoredResourceDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, req.(*GetMonitoredResourceDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMetricDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMetricDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, req.(*GetMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, req.(*CreateMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_DeleteMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, req.(*DeleteMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListTimeSeries(ctx, req.(*ListTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, req.(*CreateTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateServiceTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateServiceTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateServiceTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateServiceTimeSeries(ctx, req.(*CreateTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.MetricService", + HandlerType: (*MetricServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListMonitoredResourceDescriptors", + Handler: _MetricService_ListMonitoredResourceDescriptors_Handler, + }, + { + MethodName: "GetMonitoredResourceDescriptor", + Handler: _MetricService_GetMonitoredResourceDescriptor_Handler, + }, + { + MethodName: "ListMetricDescriptors", + Handler: _MetricService_ListMetricDescriptors_Handler, + }, + { + MethodName: "GetMetricDescriptor", + Handler: _MetricService_GetMetricDescriptor_Handler, + }, + { + MethodName: "CreateMetricDescriptor", + Handler: _MetricService_CreateMetricDescriptor_Handler, + }, + { + MethodName: "DeleteMetricDescriptor", + Handler: _MetricService_DeleteMetricDescriptor_Handler, + }, + { + MethodName: "ListTimeSeries", + Handler: _MetricService_ListTimeSeries_Handler, + }, + { + MethodName: "CreateTimeSeries", + Handler: _MetricService_CreateTimeSeries_Handler, + }, + { + MethodName: "CreateServiceTimeSeries", + Handler: _MetricService_CreateServiceTimeSeries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/metric_service.proto", +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go new file mode 100644 index 00000000000..643b244e4d3 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go @@ -0,0 +1,192 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/mutation_record.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes a change made to a configuration. +type MutationRecord struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When the change occurred. + MutateTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"` + // The email address of the user making the change. + MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"` +} + +func (x *MutationRecord) Reset() { + *x = MutationRecord{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MutationRecord) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MutationRecord) ProtoMessage() {} + +func (x *MutationRecord) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MutationRecord.ProtoReflect.Descriptor instead. +func (*MutationRecord) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_mutation_record_proto_rawDescGZIP(), []int{0} +} + +func (x *MutationRecord) GetMutateTime() *timestamppb.Timestamp { + if x != nil { + return x.MutateTime + } + return nil +} + +func (x *MutationRecord) GetMutatedBy() string { + if x != nil { + return x.MutatedBy + } + return "" +} + +var File_google_monitoring_v3_mutation_record_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_mutation_record_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x6c, 0x0a, 0x0e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x42, + 0x79, 0x42, 0xce, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x13, + 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, + 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, + 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_mutation_record_proto_rawDescOnce sync.Once + file_google_monitoring_v3_mutation_record_proto_rawDescData = file_google_monitoring_v3_mutation_record_proto_rawDesc +) + +func file_google_monitoring_v3_mutation_record_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_mutation_record_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_mutation_record_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_mutation_record_proto_rawDescData) + }) + return file_google_monitoring_v3_mutation_record_proto_rawDescData +} + +var file_google_monitoring_v3_mutation_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_monitoring_v3_mutation_record_proto_goTypes = []any{ + (*MutationRecord)(nil), // 0: google.monitoring.v3.MutationRecord + (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp +} +var file_google_monitoring_v3_mutation_record_proto_depIdxs = []int32{ + 1, // 0: google.monitoring.v3.MutationRecord.mutate_time:type_name -> google.protobuf.Timestamp + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_mutation_record_proto_init() } +func file_google_monitoring_v3_mutation_record_proto_init() { + if File_google_monitoring_v3_mutation_record_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_mutation_record_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*MutationRecord); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_mutation_record_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_mutation_record_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_mutation_record_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_mutation_record_proto_msgTypes, + }.Build() + File_google_monitoring_v3_mutation_record_proto = out.File + file_google_monitoring_v3_mutation_record_proto_rawDesc = nil + file_google_monitoring_v3_mutation_record_proto_goTypes = nil + file_google_monitoring_v3_mutation_record_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go new file mode 100644 index 00000000000..603b5bcdde1 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go @@ -0,0 +1,647 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/notification.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + _ "google.golang.org/genproto/googleapis/api/annotations" + label "google.golang.org/genproto/googleapis/api/label" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Indicates whether the channel has been verified or not. It is illegal +// to specify this field in a +// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel] +// or an +// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] +// operation. +type NotificationChannel_VerificationStatus int32 + +const ( + // Sentinel value used to indicate that the state is unknown, omitted, or + // is not applicable (as in the case of channels that neither support + // nor require verification in order to function). + NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0 + // The channel has yet to be verified and requires verification to function. + // Note that this state also applies to the case where the verification + // process has been initiated by sending a verification code but where + // the verification code has not been submitted to complete the process. + NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1 + // It has been proven that notifications can be received on this + // notification channel and that someone on the project has access + // to messages that are delivered to that channel. + NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2 +) + +// Enum value maps for NotificationChannel_VerificationStatus. +var ( + NotificationChannel_VerificationStatus_name = map[int32]string{ + 0: "VERIFICATION_STATUS_UNSPECIFIED", + 1: "UNVERIFIED", + 2: "VERIFIED", + } + NotificationChannel_VerificationStatus_value = map[string]int32{ + "VERIFICATION_STATUS_UNSPECIFIED": 0, + "UNVERIFIED": 1, + "VERIFIED": 2, + } +) + +func (x NotificationChannel_VerificationStatus) Enum() *NotificationChannel_VerificationStatus { + p := new(NotificationChannel_VerificationStatus) + *p = x + return p +} + +func (x NotificationChannel_VerificationStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NotificationChannel_VerificationStatus) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_notification_proto_enumTypes[0].Descriptor() +} + +func (NotificationChannel_VerificationStatus) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_notification_proto_enumTypes[0] +} + +func (x NotificationChannel_VerificationStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NotificationChannel_VerificationStatus.Descriptor instead. +func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1, 0} +} + +// A description of a notification channel. The descriptor includes +// the properties of the channel and the set of labels or fields that +// must be specified to configure channels of a given type. +type NotificationChannelDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The full REST resource name for this descriptor. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE] + // + // In the above, `[TYPE]` is the value of the `type` field. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // The type of notification channel, such as "email" and "sms". To view the + // full list of channels, see + // [Channel + // descriptors](https://cloud.google.com/monitoring/alerts/using-channels-api#ncd). + // Notification channel types are globally unique. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // A human-readable name for the notification channel type. This + // form of the name is suitable for a user interface. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // A human-readable description of the notification channel + // type. The description may include a description of the properties + // of the channel and pointers to external documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The set of labels that must be defined to identify a particular + // channel of the corresponding type. Each label includes a + // description for how that field should be populated. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + // The tiers that support this notification channel; the project service tier + // must be one of the supported_tiers. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto. + SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"` + // The product launch stage for channels of this type. + LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` +} + +func (x *NotificationChannelDescriptor) Reset() { + *x = NotificationChannelDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotificationChannelDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotificationChannelDescriptor) ProtoMessage() {} + +func (x *NotificationChannelDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotificationChannelDescriptor.ProtoReflect.Descriptor instead. +func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{0} +} + +func (x *NotificationChannelDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NotificationChannelDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *NotificationChannelDescriptor) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *NotificationChannelDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor { + if x != nil { + return x.Labels + } + return nil +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto. +func (x *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier { + if x != nil { + return x.SupportedTiers + } + return nil +} + +func (x *NotificationChannelDescriptor) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage(0) +} + +// A `NotificationChannel` is a medium through which an alert is +// delivered when a policy violation is detected. Examples of channels +// include email, SMS, and third-party messaging applications. Fields +// containing sensitive information like authentication tokens or +// contact info are only partially populated on retrieval. +type NotificationChannel struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of the notification channel. This field matches the + // value of the + // [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] + // field. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The full REST resource name for this channel. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + // + // The `[CHANNEL_ID]` is automatically assigned by the server on creation. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // An optional human-readable name for this notification channel. It is + // recommended that you specify a non-empty and unique name in order to + // make it easier to identify the channels in your project, though this is + // not enforced. The display name is limited to 512 Unicode characters. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // An optional human-readable description of this notification channel. This + // description may provide additional details, beyond the display + // name, for the channel. This may not exceed 1024 Unicode characters. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Configuration fields that define the channel and its behavior. The + // permissible and required labels are specified in the + // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] + // of the `NotificationChannelDescriptor` corresponding to the `type` field. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // User-supplied key/value data that does not need to conform to + // the corresponding `NotificationChannelDescriptor`'s schema, unlike + // the `labels` field. This field is intended to be used for organizing + // and identifying the `NotificationChannel` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Indicates whether this channel has been verified or not. On a + // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // or + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + // operation, this field is expected to be populated. + // + // If the value is `UNVERIFIED`, then it indicates that the channel is + // non-functioning (it both requires verification and lacks verification); + // otherwise, it is assumed that the channel works. + // + // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that + // the channel is of a type that does not require verification or that + // this specific channel has been exempted from verification because it was + // created prior to verification being required for channels of this type. + // + // This field cannot be modified using a standard + // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] + // operation. To change the value of this field, you must call + // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel]. + VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"` + // Whether notifications are forwarded to the described channel. This makes + // it possible to disable delivery of notifications to a particular channel + // without removing the channel from all alerting policies that reference + // the channel. This is a more convenient approach when the change is + // temporary and you want to receive notifications from the same set + // of alerting policies on the channel at some point in the future. + Enabled *wrapperspb.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Record of the creation of this channel. + CreationRecord *MutationRecord `protobuf:"bytes,12,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"` + // Records of the modification of this channel. + MutationRecords []*MutationRecord `protobuf:"bytes,13,rep,name=mutation_records,json=mutationRecords,proto3" json:"mutation_records,omitempty"` +} + +func (x *NotificationChannel) Reset() { + *x = NotificationChannel{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotificationChannel) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotificationChannel) ProtoMessage() {} + +func (x *NotificationChannel) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotificationChannel.ProtoReflect.Descriptor instead. +func (*NotificationChannel) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1} +} + +func (x *NotificationChannel) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *NotificationChannel) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NotificationChannel) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *NotificationChannel) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *NotificationChannel) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *NotificationChannel) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +func (x *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus { + if x != nil { + return x.VerificationStatus + } + return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED +} + +func (x *NotificationChannel) GetEnabled() *wrapperspb.BoolValue { + if x != nil { + return x.Enabled + } + return nil +} + +func (x *NotificationChannel) GetCreationRecord() *MutationRecord { + if x != nil { + return x.CreationRecord + } + return nil +} + +func (x *NotificationChannel) GetMutationRecords() []*MutationRecord { + if x != nil { + return x.MutationRecords + } + return nil +} + +var File_google_monitoring_v3_notification_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_notification_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, + 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xf0, 0x04, 0x0a, 0x1d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, + 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x69, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x65, 0x72, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, + 0x69, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, + 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, + 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, + 0x3a, 0xa0, 0x02, 0xea, 0x41, 0x9c, 0x02, 0x0a, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, + 0x12, 0x01, 0x2a, 0x22, 0xc6, 0x08, 0x0a, 0x13, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x5a, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x12, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4d, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4f, 0x0a, 0x10, 0x6d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x57, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x1f, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, + 0x08, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x02, 0x3a, 0xfe, 0x01, 0xea, 0x41, + 0xfa, 0x01, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x12, 0x3e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x7d, 0x12, 0x48, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, 0x3c, 0x66, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0xcc, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, + 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, + 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, + 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_notification_proto_rawDescOnce sync.Once + file_google_monitoring_v3_notification_proto_rawDescData = file_google_monitoring_v3_notification_proto_rawDesc +) + +func file_google_monitoring_v3_notification_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_notification_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_notification_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_proto_rawDescData) + }) + return file_google_monitoring_v3_notification_proto_rawDescData +} + +var file_google_monitoring_v3_notification_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_monitoring_v3_notification_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_monitoring_v3_notification_proto_goTypes = []any{ + (NotificationChannel_VerificationStatus)(0), // 0: google.monitoring.v3.NotificationChannel.VerificationStatus + (*NotificationChannelDescriptor)(nil), // 1: google.monitoring.v3.NotificationChannelDescriptor + (*NotificationChannel)(nil), // 2: google.monitoring.v3.NotificationChannel + nil, // 3: google.monitoring.v3.NotificationChannel.LabelsEntry + nil, // 4: google.monitoring.v3.NotificationChannel.UserLabelsEntry + (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor + (ServiceTier)(0), // 6: google.monitoring.v3.ServiceTier + (api.LaunchStage)(0), // 7: google.api.LaunchStage + (*wrapperspb.BoolValue)(nil), // 8: google.protobuf.BoolValue + (*MutationRecord)(nil), // 9: google.monitoring.v3.MutationRecord +} +var file_google_monitoring_v3_notification_proto_depIdxs = []int32{ + 5, // 0: google.monitoring.v3.NotificationChannelDescriptor.labels:type_name -> google.api.LabelDescriptor + 6, // 1: google.monitoring.v3.NotificationChannelDescriptor.supported_tiers:type_name -> google.monitoring.v3.ServiceTier + 7, // 2: google.monitoring.v3.NotificationChannelDescriptor.launch_stage:type_name -> google.api.LaunchStage + 3, // 3: google.monitoring.v3.NotificationChannel.labels:type_name -> google.monitoring.v3.NotificationChannel.LabelsEntry + 4, // 4: google.monitoring.v3.NotificationChannel.user_labels:type_name -> google.monitoring.v3.NotificationChannel.UserLabelsEntry + 0, // 5: google.monitoring.v3.NotificationChannel.verification_status:type_name -> google.monitoring.v3.NotificationChannel.VerificationStatus + 8, // 6: google.monitoring.v3.NotificationChannel.enabled:type_name -> google.protobuf.BoolValue + 9, // 7: google.monitoring.v3.NotificationChannel.creation_record:type_name -> google.monitoring.v3.MutationRecord + 9, // 8: google.monitoring.v3.NotificationChannel.mutation_records:type_name -> google.monitoring.v3.MutationRecord + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_notification_proto_init() } +func file_google_monitoring_v3_notification_proto_init() { + if File_google_monitoring_v3_notification_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_mutation_record_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_notification_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*NotificationChannelDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*NotificationChannel); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_notification_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_notification_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_notification_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_notification_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_notification_proto_msgTypes, + }.Build() + File_google_monitoring_v3_notification_proto = out.File + file_google_monitoring_v3_notification_proto_rawDesc = nil + file_google_monitoring_v3_notification_proto_goTypes = nil + file_google_monitoring_v3_notification_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go new file mode 100644 index 00000000000..ac7bafd1f10 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go @@ -0,0 +1,2002 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/notification_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `ListNotificationChannelDescriptors` request. +type ListNotificationChannelDescriptorsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The REST resource name of the parent from which to retrieve + // the notification channel descriptors. The expected syntax is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this + // [names](https://cloud.google.com/monitoring/api/v3#project_name) the parent + // container in which to look for the descriptors; to retrieve a single + // descriptor by name, use the + // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListNotificationChannelDescriptorsRequest) Reset() { + *x = ListNotificationChannelDescriptorsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelDescriptorsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {} + +func (x *ListNotificationChannelDescriptorsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelDescriptorsRequest.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListNotificationChannelDescriptorsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListNotificationChannelDescriptorsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListNotificationChannelDescriptors` response. +type ListNotificationChannelDescriptorsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The monitored resource descriptors supported for the specified + // project, optionally filtered. + ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors,proto3" json:"channel_descriptors,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListNotificationChannelDescriptorsResponse) Reset() { + *x = ListNotificationChannelDescriptorsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelDescriptorsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {} + +func (x *ListNotificationChannelDescriptorsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelDescriptorsResponse.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor { + if x != nil { + return x.ChannelDescriptors + } + return nil +} + +func (x *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetNotificationChannelDescriptor` response. +type GetNotificationChannelDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The channel type for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetNotificationChannelDescriptorRequest) Reset() { + *x = GetNotificationChannelDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {} + +func (x *GetNotificationChannelDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelDescriptorRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetNotificationChannelDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateNotificationChannel` request. +type CreateNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // This names the container into which the channel will be + // written, this does not name the newly created channel. The resulting + // channel's name will have a normalized version of this field as a prefix, + // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The definition of the `NotificationChannel` to create. + NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` +} + +func (x *CreateNotificationChannelRequest) Reset() { + *x = CreateNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateNotificationChannelRequest) ProtoMessage() {} + +func (x *CreateNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if x != nil { + return x.NotificationChannel + } + return nil +} + +// The `ListNotificationChannels` request. +type ListNotificationChannelsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // This names the container + // in which to look for the notification channels; it does not name a + // specific channel. To query a specific channel by REST resource name, use + // the + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + // operation. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // notification channels to be included in the response. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of fields as in `filter`. Entries can be prefixed with + // a minus sign to sort in descending rather than ascending order. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListNotificationChannelsRequest) Reset() { + *x = ListNotificationChannelsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelsRequest) ProtoMessage() {} + +func (x *ListNotificationChannelsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelsRequest.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{4} +} + +func (x *ListNotificationChannelsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListNotificationChannelsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListNotificationChannelsRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +func (x *ListNotificationChannelsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListNotificationChannelsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListNotificationChannels` response. +type ListNotificationChannelsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The notification channels defined for the specified project. + NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of notification channels in all pages. This number is only + // an estimate, and may change in subsequent pages. https://aip.dev/158 + TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListNotificationChannelsResponse) Reset() { + *x = ListNotificationChannelsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelsResponse) ProtoMessage() {} + +func (x *ListNotificationChannelsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelsResponse.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{5} +} + +func (x *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel { + if x != nil { + return x.NotificationChannels + } + return nil +} + +func (x *ListNotificationChannelsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListNotificationChannelsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// The `GetNotificationChannel` request. +type GetNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The channel for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetNotificationChannelRequest) Reset() { + *x = GetNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelRequest) ProtoMessage() {} + +func (x *GetNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{6} +} + +func (x *GetNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `UpdateNotificationChannel` request. +type UpdateNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fields to update. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. A description of the changes to be applied to the specified + // notification channel. The description must provide a definition for + // fields to be updated; the names of these fields should also be + // included in the `update_mask`. + NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` +} + +func (x *UpdateNotificationChannelRequest) Reset() { + *x = UpdateNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateNotificationChannelRequest) ProtoMessage() {} + +func (x *UpdateNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateNotificationChannelRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if x != nil { + return x.NotificationChannel + } + return nil +} + +// The `DeleteNotificationChannel` request. +type DeleteNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The channel for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // If true, the notification channel will be deleted regardless of its + // use in alert policies (the policies will be updated to remove the + // channel). If false, channels that are still referenced by an existing + // alerting policy will fail to be deleted in a delete operation. + Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"` +} + +func (x *DeleteNotificationChannelRequest) Reset() { + *x = DeleteNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteNotificationChannelRequest) ProtoMessage() {} + +func (x *DeleteNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{8} +} + +func (x *DeleteNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteNotificationChannelRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +// The `SendNotificationChannelVerificationCode` request. +type SendNotificationChannelVerificationCodeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The notification channel to which to send a verification code. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *SendNotificationChannelVerificationCodeRequest) Reset() { + *x = SendNotificationChannelVerificationCodeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendNotificationChannelVerificationCodeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {} + +func (x *SendNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead. +func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{9} +} + +func (x *SendNotificationChannelVerificationCodeRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The notification channel for which a verification code is to be + // generated and retrieved. This must name a channel that is already verified; + // if the specified channel is not verified, the request will fail. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The desired expiration time. If specified, the API will guarantee that + // the returned code will not be valid after the specified timestamp; + // however, the API cannot guarantee that the returned code will be + // valid for at least as long as the requested time (the API puts an upper + // bound on the amount of time for which a code may be valid). If omitted, + // a default expiration will be used, which may be less than the max + // permissible expiration (so specifying an expiration may extend the + // code's lifetime over omitting an expiration, even though the API does + // impose an upper limit on the maximum expiration that is permitted). + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` +} + +func (x *GetNotificationChannelVerificationCodeRequest) Reset() { + *x = GetNotificationChannelVerificationCodeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelVerificationCodeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {} + +func (x *GetNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{10} +} + +func (x *GetNotificationChannelVerificationCodeRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireTime + } + return nil +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The verification code, which may be used to verify other channels + // that have an equivalent identity (i.e. other channels of the same + // type with the same fingerprint such as other email channels with + // the same email address or other sms channels with the same number). + Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"` + // The expiration time associated with the code that was returned. If + // an expiration was provided in the request, this is the minimum of the + // requested expiration in the request and the max permitted expiration. + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` +} + +func (x *GetNotificationChannelVerificationCodeResponse) Reset() { + *x = GetNotificationChannelVerificationCodeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelVerificationCodeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {} + +func (x *GetNotificationChannelVerificationCodeResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelVerificationCodeResponse.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{11} +} + +func (x *GetNotificationChannelVerificationCodeResponse) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +func (x *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireTime + } + return nil +} + +// The `VerifyNotificationChannel` request. +type VerifyNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The notification channel to verify. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The verification code that was delivered to the channel as + // a result of invoking the `SendNotificationChannelVerificationCode` API + // method or that was retrieved from a verified channel via + // `GetNotificationChannelVerificationCode`. For example, one might have + // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only + // guaranteed that the code is valid UTF-8; one should not + // make any assumptions regarding the structure or format of the code). + Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"` +} + +func (x *VerifyNotificationChannelRequest) Reset() { + *x = VerifyNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerifyNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyNotificationChannelRequest) ProtoMessage() {} + +func (x *VerifyNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{12} +} + +func (x *VerifyNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *VerifyNotificationChannelRequest) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +var File_google_monitoring_v3_notification_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_notification_service_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, + 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xbc, 0x01, 0x0a, 0x29, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39, 0x12, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0xba, 0x01, 0x0a, 0x2a, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x64, 0x0a, 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x7e, 0x0a, 0x27, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39, + 0x0a, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0xd0, 0x01, 0x0a, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x61, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x22, 0xdb, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x42, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0xc9, 0x01, 0x0a, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, + 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, + 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x6a, 0x0a, 0x1d, + 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc2, 0x01, 0x0a, 0x20, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, + 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x61, 0x0a, 0x14, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x83, 0x01, + 0x0a, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x22, 0x7b, 0x0a, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0xb7, 0x01, 0x0a, 0x2d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, + 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x81, 0x01, 0x0a, 0x2e, 0x47, + 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x86, + 0x01, 0x0a, 0x20, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x32, 0xea, 0x12, 0x0a, 0x1a, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xec, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x3f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x43, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x12, + 0x34, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xdd, 0x01, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x45, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, + 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xc4, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, + 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0xb5, 0x01, 0x0a, + 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, + 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x3b, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xe4, 0x01, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x64, 0xda, 0x41, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, + 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x83, 0x02, 0x0a, 0x19, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x82, 0x01, 0xda, + 0x41, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x32, 0x41, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, + 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, + 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x41, 0xda, 0x41, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2e, 0x2a, 0x2c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0xdc, 0x01, 0x0a, 0x27, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x44, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x53, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x46, 0x3a, 0x01, 0x2a, 0x22, 0x41, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x6e, + 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, + 0x65, 0x12, 0x87, 0x02, 0x0a, 0x26, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x43, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, 0x3a, 0x01, 0x2a, 0x22, 0x40, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0xca, 0x01, 0x0a, 0x19, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x4a, 0xda, 0x41, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x64, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, + 0x3a, 0x01, 0x2a, 0x22, 0x33, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, + 0x7d, 0x3a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x72, 0x65, 0x61, 0x64, 0x42, 0xd3, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x42, 0x18, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, + 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_notification_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_notification_service_proto_rawDescData = file_google_monitoring_v3_notification_service_proto_rawDesc +) + +func file_google_monitoring_v3_notification_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_notification_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_notification_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_service_proto_rawDescData) + }) + return file_google_monitoring_v3_notification_service_proto_rawDescData +} + +var file_google_monitoring_v3_notification_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_google_monitoring_v3_notification_service_proto_goTypes = []any{ + (*ListNotificationChannelDescriptorsRequest)(nil), // 0: google.monitoring.v3.ListNotificationChannelDescriptorsRequest + (*ListNotificationChannelDescriptorsResponse)(nil), // 1: google.monitoring.v3.ListNotificationChannelDescriptorsResponse + (*GetNotificationChannelDescriptorRequest)(nil), // 2: google.monitoring.v3.GetNotificationChannelDescriptorRequest + (*CreateNotificationChannelRequest)(nil), // 3: google.monitoring.v3.CreateNotificationChannelRequest + (*ListNotificationChannelsRequest)(nil), // 4: google.monitoring.v3.ListNotificationChannelsRequest + (*ListNotificationChannelsResponse)(nil), // 5: google.monitoring.v3.ListNotificationChannelsResponse + (*GetNotificationChannelRequest)(nil), // 6: google.monitoring.v3.GetNotificationChannelRequest + (*UpdateNotificationChannelRequest)(nil), // 7: google.monitoring.v3.UpdateNotificationChannelRequest + (*DeleteNotificationChannelRequest)(nil), // 8: google.monitoring.v3.DeleteNotificationChannelRequest + (*SendNotificationChannelVerificationCodeRequest)(nil), // 9: google.monitoring.v3.SendNotificationChannelVerificationCodeRequest + (*GetNotificationChannelVerificationCodeRequest)(nil), // 10: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest + (*GetNotificationChannelVerificationCodeResponse)(nil), // 11: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse + (*VerifyNotificationChannelRequest)(nil), // 12: google.monitoring.v3.VerifyNotificationChannelRequest + (*NotificationChannelDescriptor)(nil), // 13: google.monitoring.v3.NotificationChannelDescriptor + (*NotificationChannel)(nil), // 14: google.monitoring.v3.NotificationChannel + (*fieldmaskpb.FieldMask)(nil), // 15: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty +} +var file_google_monitoring_v3_notification_service_proto_depIdxs = []int32{ + 13, // 0: google.monitoring.v3.ListNotificationChannelDescriptorsResponse.channel_descriptors:type_name -> google.monitoring.v3.NotificationChannelDescriptor + 14, // 1: google.monitoring.v3.CreateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel + 14, // 2: google.monitoring.v3.ListNotificationChannelsResponse.notification_channels:type_name -> google.monitoring.v3.NotificationChannel + 15, // 3: google.monitoring.v3.UpdateNotificationChannelRequest.update_mask:type_name -> google.protobuf.FieldMask + 14, // 4: google.monitoring.v3.UpdateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel + 16, // 5: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest.expire_time:type_name -> google.protobuf.Timestamp + 16, // 6: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse.expire_time:type_name -> google.protobuf.Timestamp + 0, // 7: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:input_type -> google.monitoring.v3.ListNotificationChannelDescriptorsRequest + 2, // 8: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:input_type -> google.monitoring.v3.GetNotificationChannelDescriptorRequest + 4, // 9: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:input_type -> google.monitoring.v3.ListNotificationChannelsRequest + 6, // 10: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:input_type -> google.monitoring.v3.GetNotificationChannelRequest + 3, // 11: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:input_type -> google.monitoring.v3.CreateNotificationChannelRequest + 7, // 12: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:input_type -> google.monitoring.v3.UpdateNotificationChannelRequest + 8, // 13: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:input_type -> google.monitoring.v3.DeleteNotificationChannelRequest + 9, // 14: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:input_type -> google.monitoring.v3.SendNotificationChannelVerificationCodeRequest + 10, // 15: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:input_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeRequest + 12, // 16: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:input_type -> google.monitoring.v3.VerifyNotificationChannelRequest + 1, // 17: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:output_type -> google.monitoring.v3.ListNotificationChannelDescriptorsResponse + 13, // 18: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:output_type -> google.monitoring.v3.NotificationChannelDescriptor + 5, // 19: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:output_type -> google.monitoring.v3.ListNotificationChannelsResponse + 14, // 20: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 14, // 21: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 14, // 22: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 17, // 23: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:output_type -> google.protobuf.Empty + 17, // 24: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:output_type -> google.protobuf.Empty + 11, // 25: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:output_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeResponse + 14, // 26: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 17, // [17:27] is the sub-list for method output_type + 7, // [7:17] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_notification_service_proto_init() } +func file_google_monitoring_v3_notification_service_proto_init() { + if File_google_monitoring_v3_notification_service_proto != nil { + return + } + file_google_monitoring_v3_notification_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_notification_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelDescriptorsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelDescriptorsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*UpdateNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*DeleteNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*SendNotificationChannelVerificationCodeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelVerificationCodeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelVerificationCodeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*VerifyNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_notification_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 13, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_notification_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_notification_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_notification_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_notification_service_proto = out.File + file_google_monitoring_v3_notification_service_proto_rawDesc = nil + file_google_monitoring_v3_notification_service_proto_goTypes = nil + file_google_monitoring_v3_notification_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// NotificationChannelServiceClient is the client API for NotificationChannelService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NotificationChannelServiceClient interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + // To list the types of notification channels that are supported, use + // the `ListNotificationChannelDescriptors` method. + ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or PagerDuty service. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Deletes a notification channel. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) +} + +type notificationChannelServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewNotificationChannelServiceClient(cc grpc.ClientConnInterface) NotificationChannelServiceClient { + return ¬ificationChannelServiceClient{cc} +} + +func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) { + out := new(ListNotificationChannelDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) { + out := new(NotificationChannelDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) { + out := new(ListNotificationChannelsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) { + out := new(GetNotificationChannelVerificationCodeResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NotificationChannelServiceServer is the server API for NotificationChannelService service. +type NotificationChannelServiceServer interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + // To list the types of notification channels that are supported, use + // the `ListNotificationChannelDescriptors` method. + ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or PagerDuty service. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) + // Deletes a notification channel. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) +} + +// UnimplementedNotificationChannelServiceServer can be embedded to have forward compatible implementations. +type UnimplementedNotificationChannelServiceServer struct { +} + +func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannelDescriptors not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelDescriptor not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannels not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendNotificationChannelVerificationCode not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelVerificationCode not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyNotificationChannel not implemented") +} + +func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) { + s.RegisterService(&_NotificationChannelService_serviceDesc, srv) +} + +func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.NotificationChannelService", + HandlerType: (*NotificationChannelServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListNotificationChannelDescriptors", + Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler, + }, + { + MethodName: "GetNotificationChannelDescriptor", + Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler, + }, + { + MethodName: "ListNotificationChannels", + Handler: _NotificationChannelService_ListNotificationChannels_Handler, + }, + { + MethodName: "GetNotificationChannel", + Handler: _NotificationChannelService_GetNotificationChannel_Handler, + }, + { + MethodName: "CreateNotificationChannel", + Handler: _NotificationChannelService_CreateNotificationChannel_Handler, + }, + { + MethodName: "UpdateNotificationChannel", + Handler: _NotificationChannelService_UpdateNotificationChannel_Handler, + }, + { + MethodName: "DeleteNotificationChannel", + Handler: _NotificationChannelService_DeleteNotificationChannel_Handler, + }, + { + MethodName: "SendNotificationChannelVerificationCode", + Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "GetNotificationChannelVerificationCode", + Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "VerifyNotificationChannel", + Handler: _NotificationChannelService_VerifyNotificationChannel_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/notification_service.proto", +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go new file mode 100644 index 00000000000..e9bfbd68f53 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go @@ -0,0 +1,212 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/query_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_google_monitoring_v3_query_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_query_service_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x32, 0xde, 0x02, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0xa1, 0x01, 0x0a, 0x0f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x3a, 0x01, 0x2a, 0x22, + 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x3a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x42, 0x11, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, + 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, + 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_google_monitoring_v3_query_service_proto_goTypes = []any{ + (*QueryTimeSeriesRequest)(nil), // 0: google.monitoring.v3.QueryTimeSeriesRequest + (*QueryTimeSeriesResponse)(nil), // 1: google.monitoring.v3.QueryTimeSeriesResponse +} +var file_google_monitoring_v3_query_service_proto_depIdxs = []int32{ + 0, // 0: google.monitoring.v3.QueryService.QueryTimeSeries:input_type -> google.monitoring.v3.QueryTimeSeriesRequest + 1, // 1: google.monitoring.v3.QueryService.QueryTimeSeries:output_type -> google.monitoring.v3.QueryTimeSeriesResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_query_service_proto_init() } +func file_google_monitoring_v3_query_service_proto_init() { + if File_google_monitoring_v3_query_service_proto != nil { + return + } + file_google_monitoring_v3_metric_service_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_query_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_query_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_query_service_proto_depIdxs, + }.Build() + File_google_monitoring_v3_query_service_proto = out.File + file_google_monitoring_v3_query_service_proto_rawDesc = nil + file_google_monitoring_v3_query_service_proto_goTypes = nil + file_google_monitoring_v3_query_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// QueryServiceClient is the client API for QueryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryServiceClient interface { + // Queries time series using Monitoring Query Language. + QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error) +} + +type queryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewQueryServiceClient(cc grpc.ClientConnInterface) QueryServiceClient { + return &queryServiceClient{cc} +} + +func (c *queryServiceClient) QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error) { + out := new(QueryTimeSeriesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.QueryService/QueryTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServiceServer is the server API for QueryService service. +type QueryServiceServer interface { + // Queries time series using Monitoring Query Language. + QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error) +} + +// UnimplementedQueryServiceServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServiceServer struct { +} + +func (*UnimplementedQueryServiceServer) QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method QueryTimeSeries not implemented") +} + +func RegisterQueryServiceServer(s *grpc.Server, srv QueryServiceServer) { + s.RegisterService(&_QueryService_serviceDesc, srv) +} + +func _QueryService_QueryTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServiceServer).QueryTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.QueryService/QueryTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServiceServer).QueryTimeSeries(ctx, req.(*QueryTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _QueryService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.QueryService", + HandlerType: (*QueryServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "QueryTimeSeries", + Handler: _QueryService_QueryTimeSeries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/query_service.proto", +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go new file mode 100644 index 00000000000..869a3738c09 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go @@ -0,0 +1,3107 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/service.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + calendarperiod "google.golang.org/genproto/googleapis/type/calendarperiod" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// `ServiceLevelObjective.View` determines what form of +// `ServiceLevelObjective` is returned from `GetServiceLevelObjective`, +// `ListServiceLevelObjectives`, and `ListServiceLevelObjectiveVersions` RPCs. +type ServiceLevelObjective_View int32 + +const ( + // Same as FULL. + ServiceLevelObjective_VIEW_UNSPECIFIED ServiceLevelObjective_View = 0 + // Return the embedded `ServiceLevelIndicator` in the form in which it was + // defined. If it was defined using a `BasicSli`, return that `BasicSli`. + ServiceLevelObjective_FULL ServiceLevelObjective_View = 2 + // For `ServiceLevelIndicator`s using `BasicSli` articulation, instead + // return the `ServiceLevelIndicator` with its mode of computation fully + // spelled out as a `RequestBasedSli`. For `ServiceLevelIndicator`s using + // `RequestBasedSli` or `WindowsBasedSli`, return the + // `ServiceLevelIndicator` as it was provided. + ServiceLevelObjective_EXPLICIT ServiceLevelObjective_View = 1 +) + +// Enum value maps for ServiceLevelObjective_View. +var ( + ServiceLevelObjective_View_name = map[int32]string{ + 0: "VIEW_UNSPECIFIED", + 2: "FULL", + 1: "EXPLICIT", + } + ServiceLevelObjective_View_value = map[string]int32{ + "VIEW_UNSPECIFIED": 0, + "FULL": 2, + "EXPLICIT": 1, + } +) + +func (x ServiceLevelObjective_View) Enum() *ServiceLevelObjective_View { + p := new(ServiceLevelObjective_View) + *p = x + return p +} + +func (x ServiceLevelObjective_View) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceLevelObjective_View) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_service_proto_enumTypes[0].Descriptor() +} + +func (ServiceLevelObjective_View) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_service_proto_enumTypes[0] +} + +func (x ServiceLevelObjective_View) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceLevelObjective_View.Descriptor instead. +func (ServiceLevelObjective_View) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1, 0} +} + +// A `Service` is a discrete, autonomous, and network-accessible unit, designed +// to solve an individual concern +// ([Wikipedia](https://en.wikipedia.org/wiki/Service-orientation)). In +// Cloud Monitoring, a `Service` acts as the root resource under which +// operational aspects of the service are accessible. +type Service struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier. Resource name for this Service. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Name used for UI elements listing this Service. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // REQUIRED. Service-identifying atoms specifying the underlying service. + // + // Types that are assignable to Identifier: + // + // *Service_Custom_ + // *Service_AppEngine_ + // *Service_CloudEndpoints_ + // *Service_ClusterIstio_ + // *Service_MeshIstio_ + // *Service_IstioCanonicalService_ + // *Service_CloudRun_ + // *Service_GkeNamespace_ + // *Service_GkeWorkload_ + // *Service_GkeService_ + Identifier isService_Identifier `protobuf_oneof:"identifier"` + // Message that contains the service type and service labels of this service + // if it is a basic service. + // Documentation and examples + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + BasicService *Service_BasicService `protobuf:"bytes,19,opt,name=basic_service,json=basicService,proto3" json:"basic_service,omitempty"` + // Configuration for how to query telemetry on a Service. + Telemetry *Service_Telemetry `protobuf:"bytes,13,opt,name=telemetry,proto3" json:"telemetry,omitempty"` + // Labels which have been used to annotate the service. Label keys must start + // with a letter. Label keys and values may contain lowercase letters, + // numbers, underscores, and dashes. Label keys and values have a maximum + // length of 63 characters, and must be less than 128 bytes in size. Up to 64 + // label entries may be stored. For labels which do not have a semantic value, + // the empty string may be supplied for the label value. + UserLabels map[string]string `protobuf:"bytes,14,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Service) Reset() { + *x = Service{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service) ProtoMessage() {} + +func (x *Service) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service.ProtoReflect.Descriptor instead. +func (*Service) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0} +} + +func (x *Service) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Service) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (m *Service) GetIdentifier() isService_Identifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (x *Service) GetCustom() *Service_Custom { + if x, ok := x.GetIdentifier().(*Service_Custom_); ok { + return x.Custom + } + return nil +} + +func (x *Service) GetAppEngine() *Service_AppEngine { + if x, ok := x.GetIdentifier().(*Service_AppEngine_); ok { + return x.AppEngine + } + return nil +} + +func (x *Service) GetCloudEndpoints() *Service_CloudEndpoints { + if x, ok := x.GetIdentifier().(*Service_CloudEndpoints_); ok { + return x.CloudEndpoints + } + return nil +} + +func (x *Service) GetClusterIstio() *Service_ClusterIstio { + if x, ok := x.GetIdentifier().(*Service_ClusterIstio_); ok { + return x.ClusterIstio + } + return nil +} + +func (x *Service) GetMeshIstio() *Service_MeshIstio { + if x, ok := x.GetIdentifier().(*Service_MeshIstio_); ok { + return x.MeshIstio + } + return nil +} + +func (x *Service) GetIstioCanonicalService() *Service_IstioCanonicalService { + if x, ok := x.GetIdentifier().(*Service_IstioCanonicalService_); ok { + return x.IstioCanonicalService + } + return nil +} + +func (x *Service) GetCloudRun() *Service_CloudRun { + if x, ok := x.GetIdentifier().(*Service_CloudRun_); ok { + return x.CloudRun + } + return nil +} + +func (x *Service) GetGkeNamespace() *Service_GkeNamespace { + if x, ok := x.GetIdentifier().(*Service_GkeNamespace_); ok { + return x.GkeNamespace + } + return nil +} + +func (x *Service) GetGkeWorkload() *Service_GkeWorkload { + if x, ok := x.GetIdentifier().(*Service_GkeWorkload_); ok { + return x.GkeWorkload + } + return nil +} + +func (x *Service) GetGkeService() *Service_GkeService { + if x, ok := x.GetIdentifier().(*Service_GkeService_); ok { + return x.GkeService + } + return nil +} + +func (x *Service) GetBasicService() *Service_BasicService { + if x != nil { + return x.BasicService + } + return nil +} + +func (x *Service) GetTelemetry() *Service_Telemetry { + if x != nil { + return x.Telemetry + } + return nil +} + +func (x *Service) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +type isService_Identifier interface { + isService_Identifier() +} + +type Service_Custom_ struct { + // Custom service type. + Custom *Service_Custom `protobuf:"bytes,6,opt,name=custom,proto3,oneof"` +} + +type Service_AppEngine_ struct { + // Type used for App Engine services. + AppEngine *Service_AppEngine `protobuf:"bytes,7,opt,name=app_engine,json=appEngine,proto3,oneof"` +} + +type Service_CloudEndpoints_ struct { + // Type used for Cloud Endpoints services. + CloudEndpoints *Service_CloudEndpoints `protobuf:"bytes,8,opt,name=cloud_endpoints,json=cloudEndpoints,proto3,oneof"` +} + +type Service_ClusterIstio_ struct { + // Type used for Istio services that live in a Kubernetes cluster. + ClusterIstio *Service_ClusterIstio `protobuf:"bytes,9,opt,name=cluster_istio,json=clusterIstio,proto3,oneof"` +} + +type Service_MeshIstio_ struct { + // Type used for Istio services scoped to an Istio mesh. + MeshIstio *Service_MeshIstio `protobuf:"bytes,10,opt,name=mesh_istio,json=meshIstio,proto3,oneof"` +} + +type Service_IstioCanonicalService_ struct { + // Type used for canonical services scoped to an Istio mesh. + // Metrics for Istio are + // [documented here](https://istio.io/latest/docs/reference/config/metrics/) + IstioCanonicalService *Service_IstioCanonicalService `protobuf:"bytes,11,opt,name=istio_canonical_service,json=istioCanonicalService,proto3,oneof"` +} + +type Service_CloudRun_ struct { + // Type used for Cloud Run services. + CloudRun *Service_CloudRun `protobuf:"bytes,12,opt,name=cloud_run,json=cloudRun,proto3,oneof"` +} + +type Service_GkeNamespace_ struct { + // Type used for GKE Namespaces. + GkeNamespace *Service_GkeNamespace `protobuf:"bytes,15,opt,name=gke_namespace,json=gkeNamespace,proto3,oneof"` +} + +type Service_GkeWorkload_ struct { + // Type used for GKE Workloads. + GkeWorkload *Service_GkeWorkload `protobuf:"bytes,16,opt,name=gke_workload,json=gkeWorkload,proto3,oneof"` +} + +type Service_GkeService_ struct { + // Type used for GKE Services (the Kubernetes concept of a service). + GkeService *Service_GkeService `protobuf:"bytes,17,opt,name=gke_service,json=gkeService,proto3,oneof"` +} + +func (*Service_Custom_) isService_Identifier() {} + +func (*Service_AppEngine_) isService_Identifier() {} + +func (*Service_CloudEndpoints_) isService_Identifier() {} + +func (*Service_ClusterIstio_) isService_Identifier() {} + +func (*Service_MeshIstio_) isService_Identifier() {} + +func (*Service_IstioCanonicalService_) isService_Identifier() {} + +func (*Service_CloudRun_) isService_Identifier() {} + +func (*Service_GkeNamespace_) isService_Identifier() {} + +func (*Service_GkeWorkload_) isService_Identifier() {} + +func (*Service_GkeService_) isService_Identifier() {} + +// A Service-Level Objective (SLO) describes a level of desired good service. It +// consists of a service-level indicator (SLI), a performance goal, and a period +// over which the objective is to be evaluated against that goal. The SLO can +// use SLIs defined in a number of different manners. Typical SLOs might include +// "99% of requests in each rolling week have latency below 200 milliseconds" or +// "99.5% of requests in each calendar month return successfully." +type ServiceLevelObjective struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier. Resource name for this `ServiceLevelObjective`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Name used for UI elements listing this SLO. + DisplayName string `protobuf:"bytes,11,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The definition of good service, used to measure and calculate the quality + // of the `Service`'s performance with respect to a single aspect of service + // quality. + ServiceLevelIndicator *ServiceLevelIndicator `protobuf:"bytes,3,opt,name=service_level_indicator,json=serviceLevelIndicator,proto3" json:"service_level_indicator,omitempty"` + // The fraction of service that must be good in order for this objective to be + // met. `0 < goal <= 0.999`. + Goal float64 `protobuf:"fixed64,4,opt,name=goal,proto3" json:"goal,omitempty"` + // The time period over which the objective will be evaluated. + // + // Types that are assignable to Period: + // + // *ServiceLevelObjective_RollingPeriod + // *ServiceLevelObjective_CalendarPeriod + Period isServiceLevelObjective_Period `protobuf_oneof:"period"` + // Labels which have been used to annotate the service-level objective. Label + // keys must start with a letter. Label keys and values may contain lowercase + // letters, numbers, underscores, and dashes. Label keys and values have a + // maximum length of 63 characters, and must be less than 128 bytes in size. + // Up to 64 label entries may be stored. For labels which do not have a + // semantic value, the empty string may be supplied for the label value. + UserLabels map[string]string `protobuf:"bytes,12,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ServiceLevelObjective) Reset() { + *x = ServiceLevelObjective{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceLevelObjective) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceLevelObjective) ProtoMessage() {} + +func (x *ServiceLevelObjective) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceLevelObjective.ProtoReflect.Descriptor instead. +func (*ServiceLevelObjective) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ServiceLevelObjective) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ServiceLevelObjective) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *ServiceLevelObjective) GetServiceLevelIndicator() *ServiceLevelIndicator { + if x != nil { + return x.ServiceLevelIndicator + } + return nil +} + +func (x *ServiceLevelObjective) GetGoal() float64 { + if x != nil { + return x.Goal + } + return 0 +} + +func (m *ServiceLevelObjective) GetPeriod() isServiceLevelObjective_Period { + if m != nil { + return m.Period + } + return nil +} + +func (x *ServiceLevelObjective) GetRollingPeriod() *durationpb.Duration { + if x, ok := x.GetPeriod().(*ServiceLevelObjective_RollingPeriod); ok { + return x.RollingPeriod + } + return nil +} + +func (x *ServiceLevelObjective) GetCalendarPeriod() calendarperiod.CalendarPeriod { + if x, ok := x.GetPeriod().(*ServiceLevelObjective_CalendarPeriod); ok { + return x.CalendarPeriod + } + return calendarperiod.CalendarPeriod(0) +} + +func (x *ServiceLevelObjective) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +type isServiceLevelObjective_Period interface { + isServiceLevelObjective_Period() +} + +type ServiceLevelObjective_RollingPeriod struct { + // A rolling time period, semantically "in the past ``". + // Must be an integer multiple of 1 day no larger than 30 days. + RollingPeriod *durationpb.Duration `protobuf:"bytes,5,opt,name=rolling_period,json=rollingPeriod,proto3,oneof"` +} + +type ServiceLevelObjective_CalendarPeriod struct { + // A calendar period, semantically "since the start of the current + // ``". At this time, only `DAY`, `WEEK`, `FORTNIGHT`, and + // `MONTH` are supported. + CalendarPeriod calendarperiod.CalendarPeriod `protobuf:"varint,6,opt,name=calendar_period,json=calendarPeriod,proto3,enum=google.type.CalendarPeriod,oneof"` +} + +func (*ServiceLevelObjective_RollingPeriod) isServiceLevelObjective_Period() {} + +func (*ServiceLevelObjective_CalendarPeriod) isServiceLevelObjective_Period() {} + +// A Service-Level Indicator (SLI) describes the "performance" of a service. For +// some services, the SLI is well-defined. In such cases, the SLI can be +// described easily by referencing the well-known SLI and providing the needed +// parameters. Alternatively, a "custom" SLI can be defined with a query to the +// underlying metric store. An SLI is defined to be `good_service / +// total_service` over any queried time interval. The value of performance +// always falls into the range `0 <= performance <= 1`. A custom SLI describes +// how to compute this ratio, whether this is by dividing values from a pair of +// time series, cutting a `Distribution` into good and bad counts, or counting +// time windows in which the service complies with a criterion. For separation +// of concerns, a single Service-Level Indicator measures performance for only +// one aspect of service quality, such as fraction of successful queries or +// fast-enough queries. +type ServiceLevelIndicator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Service level indicators can be grouped by whether the "unit" of service + // being measured is based on counts of good requests or on counts of good + // time windows + // + // Types that are assignable to Type: + // + // *ServiceLevelIndicator_BasicSli + // *ServiceLevelIndicator_RequestBased + // *ServiceLevelIndicator_WindowsBased + Type isServiceLevelIndicator_Type `protobuf_oneof:"type"` +} + +func (x *ServiceLevelIndicator) Reset() { + *x = ServiceLevelIndicator{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceLevelIndicator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceLevelIndicator) ProtoMessage() {} + +func (x *ServiceLevelIndicator) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceLevelIndicator.ProtoReflect.Descriptor instead. +func (*ServiceLevelIndicator) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{2} +} + +func (m *ServiceLevelIndicator) GetType() isServiceLevelIndicator_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *ServiceLevelIndicator) GetBasicSli() *BasicSli { + if x, ok := x.GetType().(*ServiceLevelIndicator_BasicSli); ok { + return x.BasicSli + } + return nil +} + +func (x *ServiceLevelIndicator) GetRequestBased() *RequestBasedSli { + if x, ok := x.GetType().(*ServiceLevelIndicator_RequestBased); ok { + return x.RequestBased + } + return nil +} + +func (x *ServiceLevelIndicator) GetWindowsBased() *WindowsBasedSli { + if x, ok := x.GetType().(*ServiceLevelIndicator_WindowsBased); ok { + return x.WindowsBased + } + return nil +} + +type isServiceLevelIndicator_Type interface { + isServiceLevelIndicator_Type() +} + +type ServiceLevelIndicator_BasicSli struct { + // Basic SLI on a well-known service type. + BasicSli *BasicSli `protobuf:"bytes,4,opt,name=basic_sli,json=basicSli,proto3,oneof"` +} + +type ServiceLevelIndicator_RequestBased struct { + // Request-based SLIs + RequestBased *RequestBasedSli `protobuf:"bytes,1,opt,name=request_based,json=requestBased,proto3,oneof"` +} + +type ServiceLevelIndicator_WindowsBased struct { + // Windows-based SLIs + WindowsBased *WindowsBasedSli `protobuf:"bytes,2,opt,name=windows_based,json=windowsBased,proto3,oneof"` +} + +func (*ServiceLevelIndicator_BasicSli) isServiceLevelIndicator_Type() {} + +func (*ServiceLevelIndicator_RequestBased) isServiceLevelIndicator_Type() {} + +func (*ServiceLevelIndicator_WindowsBased) isServiceLevelIndicator_Type() {} + +// An SLI measuring performance on a well-known service type. Performance will +// be computed on the basis of pre-defined metrics. The type of the +// `service_resource` determines the metrics to use and the +// `service_resource.labels` and `metric_labels` are used to construct a +// monitoring filter to filter that metric down to just the data relevant to +// this service. +type BasicSli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from + // other methods will not be used to calculate performance for this SLI. If + // omitted, this SLI applies to all the Service's methods. For service types + // that don't support breaking down by method, setting this field will result + // in an error. + Method []string `protobuf:"bytes,7,rep,name=method,proto3" json:"method,omitempty"` + // OPTIONAL: The set of locations to which this SLI is relevant. Telemetry + // from other locations will not be used to calculate performance for this + // SLI. If omitted, this SLI applies to all locations in which the Service has + // activity. For service types that don't support breaking down by location, + // setting this field will result in an error. + Location []string `protobuf:"bytes,8,rep,name=location,proto3" json:"location,omitempty"` + // OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry + // from other API versions will not be used to calculate performance for this + // SLI. If omitted, this SLI applies to all API versions. For service types + // that don't support breaking down by version, setting this field will result + // in an error. + Version []string `protobuf:"bytes,9,rep,name=version,proto3" json:"version,omitempty"` + // This SLI can be evaluated on the basis of availability or latency. + // + // Types that are assignable to SliCriteria: + // + // *BasicSli_Availability + // *BasicSli_Latency + SliCriteria isBasicSli_SliCriteria `protobuf_oneof:"sli_criteria"` +} + +func (x *BasicSli) Reset() { + *x = BasicSli{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicSli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicSli) ProtoMessage() {} + +func (x *BasicSli) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicSli.ProtoReflect.Descriptor instead. +func (*BasicSli) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3} +} + +func (x *BasicSli) GetMethod() []string { + if x != nil { + return x.Method + } + return nil +} + +func (x *BasicSli) GetLocation() []string { + if x != nil { + return x.Location + } + return nil +} + +func (x *BasicSli) GetVersion() []string { + if x != nil { + return x.Version + } + return nil +} + +func (m *BasicSli) GetSliCriteria() isBasicSli_SliCriteria { + if m != nil { + return m.SliCriteria + } + return nil +} + +func (x *BasicSli) GetAvailability() *BasicSli_AvailabilityCriteria { + if x, ok := x.GetSliCriteria().(*BasicSli_Availability); ok { + return x.Availability + } + return nil +} + +func (x *BasicSli) GetLatency() *BasicSli_LatencyCriteria { + if x, ok := x.GetSliCriteria().(*BasicSli_Latency); ok { + return x.Latency + } + return nil +} + +type isBasicSli_SliCriteria interface { + isBasicSli_SliCriteria() +} + +type BasicSli_Availability struct { + // Good service is defined to be the count of requests made to this service + // that return successfully. + Availability *BasicSli_AvailabilityCriteria `protobuf:"bytes,2,opt,name=availability,proto3,oneof"` +} + +type BasicSli_Latency struct { + // Good service is defined to be the count of requests made to this service + // that are fast enough with respect to `latency.threshold`. + Latency *BasicSli_LatencyCriteria `protobuf:"bytes,3,opt,name=latency,proto3,oneof"` +} + +func (*BasicSli_Availability) isBasicSli_SliCriteria() {} + +func (*BasicSli_Latency) isBasicSli_SliCriteria() {} + +// Range of numerical values within `min` and `max`. +type Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Range minimum. + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + // Range maximum. + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` +} + +func (x *Range) Reset() { + *x = Range{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Range) ProtoMessage() {} + +func (x *Range) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Range.ProtoReflect.Descriptor instead. +func (*Range) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{4} +} + +func (x *Range) GetMin() float64 { + if x != nil { + return x.Min + } + return 0 +} + +func (x *Range) GetMax() float64 { + if x != nil { + return x.Max + } + return 0 +} + +// Service Level Indicators for which atomic units of service are counted +// directly. +type RequestBasedSli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The means to compute a ratio of `good_service` to `total_service`. + // + // Types that are assignable to Method: + // + // *RequestBasedSli_GoodTotalRatio + // *RequestBasedSli_DistributionCut + Method isRequestBasedSli_Method `protobuf_oneof:"method"` +} + +func (x *RequestBasedSli) Reset() { + *x = RequestBasedSli{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestBasedSli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestBasedSli) ProtoMessage() {} + +func (x *RequestBasedSli) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestBasedSli.ProtoReflect.Descriptor instead. +func (*RequestBasedSli) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{5} +} + +func (m *RequestBasedSli) GetMethod() isRequestBasedSli_Method { + if m != nil { + return m.Method + } + return nil +} + +func (x *RequestBasedSli) GetGoodTotalRatio() *TimeSeriesRatio { + if x, ok := x.GetMethod().(*RequestBasedSli_GoodTotalRatio); ok { + return x.GoodTotalRatio + } + return nil +} + +func (x *RequestBasedSli) GetDistributionCut() *DistributionCut { + if x, ok := x.GetMethod().(*RequestBasedSli_DistributionCut); ok { + return x.DistributionCut + } + return nil +} + +type isRequestBasedSli_Method interface { + isRequestBasedSli_Method() +} + +type RequestBasedSli_GoodTotalRatio struct { + // `good_total_ratio` is used when the ratio of `good_service` to + // `total_service` is computed from two `TimeSeries`. + GoodTotalRatio *TimeSeriesRatio `protobuf:"bytes,1,opt,name=good_total_ratio,json=goodTotalRatio,proto3,oneof"` +} + +type RequestBasedSli_DistributionCut struct { + // `distribution_cut` is used when `good_service` is a count of values + // aggregated in a `Distribution` that fall into a good range. The + // `total_service` is the total count of all values aggregated in the + // `Distribution`. + DistributionCut *DistributionCut `protobuf:"bytes,3,opt,name=distribution_cut,json=distributionCut,proto3,oneof"` +} + +func (*RequestBasedSli_GoodTotalRatio) isRequestBasedSli_Method() {} + +func (*RequestBasedSli_DistributionCut) isRequestBasedSli_Method() {} + +// A `TimeSeriesRatio` specifies two `TimeSeries` to use for computing the +// `good_service / total_service` ratio. The specified `TimeSeries` must have +// `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = +// DELTA` or `MetricKind = CUMULATIVE`. The `TimeSeriesRatio` must specify +// exactly two of good, bad, and total, and the relationship `good_service + +// bad_service = total_service` will be assumed. +type TimeSeriesRatio struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying good service provided. Must have + // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = + // DELTA` or `MetricKind = CUMULATIVE`. + GoodServiceFilter string `protobuf:"bytes,4,opt,name=good_service_filter,json=goodServiceFilter,proto3" json:"good_service_filter,omitempty"` + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying bad service, either demanded service + // that was not provided or demanded service that was of inadequate quality. + // Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have + // `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. + BadServiceFilter string `protobuf:"bytes,5,opt,name=bad_service_filter,json=badServiceFilter,proto3" json:"bad_service_filter,omitempty"` + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying total demanded service. Must have + // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = + // DELTA` or `MetricKind = CUMULATIVE`. + TotalServiceFilter string `protobuf:"bytes,6,opt,name=total_service_filter,json=totalServiceFilter,proto3" json:"total_service_filter,omitempty"` +} + +func (x *TimeSeriesRatio) Reset() { + *x = TimeSeriesRatio{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesRatio) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesRatio) ProtoMessage() {} + +func (x *TimeSeriesRatio) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesRatio.ProtoReflect.Descriptor instead. +func (*TimeSeriesRatio) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{6} +} + +func (x *TimeSeriesRatio) GetGoodServiceFilter() string { + if x != nil { + return x.GoodServiceFilter + } + return "" +} + +func (x *TimeSeriesRatio) GetBadServiceFilter() string { + if x != nil { + return x.BadServiceFilter + } + return "" +} + +func (x *TimeSeriesRatio) GetTotalServiceFilter() string { + if x != nil { + return x.TotalServiceFilter + } + return "" +} + +// A `DistributionCut` defines a `TimeSeries` and thresholds used for measuring +// good service and total service. The `TimeSeries` must have `ValueType = +// DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. The +// computed `good_service` will be the estimated count of values in the +// `Distribution` that fall within the specified `min` and `max`. +type DistributionCut struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` aggregating values. Must have `ValueType = + // DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. + DistributionFilter string `protobuf:"bytes,4,opt,name=distribution_filter,json=distributionFilter,proto3" json:"distribution_filter,omitempty"` + // Range of values considered "good." For a one-sided range, set one bound to + // an infinite value. + Range *Range `protobuf:"bytes,5,opt,name=range,proto3" json:"range,omitempty"` +} + +func (x *DistributionCut) Reset() { + *x = DistributionCut{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistributionCut) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistributionCut) ProtoMessage() {} + +func (x *DistributionCut) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistributionCut.ProtoReflect.Descriptor instead. +func (*DistributionCut) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{7} +} + +func (x *DistributionCut) GetDistributionFilter() string { + if x != nil { + return x.DistributionFilter + } + return "" +} + +func (x *DistributionCut) GetRange() *Range { + if x != nil { + return x.Range + } + return nil +} + +// A `WindowsBasedSli` defines `good_service` as the count of time windows for +// which the provided service was of good quality. Criteria for determining +// if service was good are embedded in the `window_criterion`. +type WindowsBasedSli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The criterion to use for evaluating window goodness. + // + // Types that are assignable to WindowCriterion: + // + // *WindowsBasedSli_GoodBadMetricFilter + // *WindowsBasedSli_GoodTotalRatioThreshold + // *WindowsBasedSli_MetricMeanInRange + // *WindowsBasedSli_MetricSumInRange + WindowCriterion isWindowsBasedSli_WindowCriterion `protobuf_oneof:"window_criterion"` + // Duration over which window quality is evaluated. Must be an integer + // fraction of a day and at least `60s`. + WindowPeriod *durationpb.Duration `protobuf:"bytes,4,opt,name=window_period,json=windowPeriod,proto3" json:"window_period,omitempty"` +} + +func (x *WindowsBasedSli) Reset() { + *x = WindowsBasedSli{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WindowsBasedSli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WindowsBasedSli) ProtoMessage() {} + +func (x *WindowsBasedSli) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WindowsBasedSli.ProtoReflect.Descriptor instead. +func (*WindowsBasedSli) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8} +} + +func (m *WindowsBasedSli) GetWindowCriterion() isWindowsBasedSli_WindowCriterion { + if m != nil { + return m.WindowCriterion + } + return nil +} + +func (x *WindowsBasedSli) GetGoodBadMetricFilter() string { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodBadMetricFilter); ok { + return x.GoodBadMetricFilter + } + return "" +} + +func (x *WindowsBasedSli) GetGoodTotalRatioThreshold() *WindowsBasedSli_PerformanceThreshold { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodTotalRatioThreshold); ok { + return x.GoodTotalRatioThreshold + } + return nil +} + +func (x *WindowsBasedSli) GetMetricMeanInRange() *WindowsBasedSli_MetricRange { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricMeanInRange); ok { + return x.MetricMeanInRange + } + return nil +} + +func (x *WindowsBasedSli) GetMetricSumInRange() *WindowsBasedSli_MetricRange { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricSumInRange); ok { + return x.MetricSumInRange + } + return nil +} + +func (x *WindowsBasedSli) GetWindowPeriod() *durationpb.Duration { + if x != nil { + return x.WindowPeriod + } + return nil +} + +type isWindowsBasedSli_WindowCriterion interface { + isWindowsBasedSli_WindowCriterion() +} + +type WindowsBasedSli_GoodBadMetricFilter struct { + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` with `ValueType = BOOL`. The window is good if + // any `true` values appear in the window. + GoodBadMetricFilter string `protobuf:"bytes,5,opt,name=good_bad_metric_filter,json=goodBadMetricFilter,proto3,oneof"` +} + +type WindowsBasedSli_GoodTotalRatioThreshold struct { + // A window is good if its `performance` is high enough. + GoodTotalRatioThreshold *WindowsBasedSli_PerformanceThreshold `protobuf:"bytes,2,opt,name=good_total_ratio_threshold,json=goodTotalRatioThreshold,proto3,oneof"` +} + +type WindowsBasedSli_MetricMeanInRange struct { + // A window is good if the metric's value is in a good range, averaged + // across returned streams. + MetricMeanInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,6,opt,name=metric_mean_in_range,json=metricMeanInRange,proto3,oneof"` +} + +type WindowsBasedSli_MetricSumInRange struct { + // A window is good if the metric's value is in a good range, summed across + // returned streams. + MetricSumInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,7,opt,name=metric_sum_in_range,json=metricSumInRange,proto3,oneof"` +} + +func (*WindowsBasedSli_GoodBadMetricFilter) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_GoodTotalRatioThreshold) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_MetricMeanInRange) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_MetricSumInRange) isWindowsBasedSli_WindowCriterion() {} + +// Use a custom service to designate a service that you want to monitor +// when none of the other service types (like App Engine, Cloud Run, or +// a GKE type) matches your intended service. +type Service_Custom struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Service_Custom) Reset() { + *x = Service_Custom{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_Custom) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_Custom) ProtoMessage() {} + +func (x *Service_Custom) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_Custom.ProtoReflect.Descriptor instead. +func (*Service_Custom) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 0} +} + +// App Engine service. Learn more at https://cloud.google.com/appengine. +type Service_AppEngine struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ID of the App Engine module underlying this service. Corresponds to + // the `module_id` resource label in the [`gae_app` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_gae_app). + ModuleId string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,proto3" json:"module_id,omitempty"` +} + +func (x *Service_AppEngine) Reset() { + *x = Service_AppEngine{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_AppEngine) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_AppEngine) ProtoMessage() {} + +func (x *Service_AppEngine) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_AppEngine.ProtoReflect.Descriptor instead. +func (*Service_AppEngine) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Service_AppEngine) GetModuleId() string { + if x != nil { + return x.ModuleId + } + return "" +} + +// Cloud Endpoints service. Learn more at https://cloud.google.com/endpoints. +type Service_CloudEndpoints struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Cloud Endpoints service underlying this service. + // Corresponds to the `service` resource label in the [`api` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_api). + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (x *Service_CloudEndpoints) Reset() { + *x = Service_CloudEndpoints{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_CloudEndpoints) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_CloudEndpoints) ProtoMessage() {} + +func (x *Service_CloudEndpoints) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_CloudEndpoints.ProtoReflect.Descriptor instead. +func (*Service_CloudEndpoints) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Service_CloudEndpoints) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +// Istio service scoped to a single Kubernetes cluster. Learn more at +// https://istio.io. Clusters running OSS Istio will have their services +// ingested as this type. +type Service_ClusterIstio struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The location of the Kubernetes cluster in which this Istio service is + // defined. Corresponds to the `location` resource label in `k8s_cluster` + // resources. + Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` + // The name of the Kubernetes cluster in which this Istio service is + // defined. Corresponds to the `cluster_name` resource label in + // `k8s_cluster` resources. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The namespace of the Istio service underlying this service. Corresponds + // to the `destination_service_namespace` metric label in Istio metrics. + ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"` + // The name of the Istio service underlying this service. Corresponds to the + // `destination_service_name` metric label in Istio metrics. + ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Service_ClusterIstio) Reset() { + *x = Service_ClusterIstio{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_ClusterIstio) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_ClusterIstio) ProtoMessage() {} + +func (x *Service_ClusterIstio) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_ClusterIstio.ProtoReflect.Descriptor instead. +func (*Service_ClusterIstio) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 3} +} + +func (x *Service_ClusterIstio) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_ClusterIstio) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_ClusterIstio) GetServiceNamespace() string { + if x != nil { + return x.ServiceNamespace + } + return "" +} + +func (x *Service_ClusterIstio) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// Istio service scoped to an Istio mesh. Anthos clusters running ASM < 1.6.8 +// will have their services ingested as this type. +type Service_MeshIstio struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for the mesh in which this Istio service is defined. + // Corresponds to the `mesh_uid` metric label in Istio metrics. + MeshUid string `protobuf:"bytes,1,opt,name=mesh_uid,json=meshUid,proto3" json:"mesh_uid,omitempty"` + // The namespace of the Istio service underlying this service. Corresponds + // to the `destination_service_namespace` metric label in Istio metrics. + ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"` + // The name of the Istio service underlying this service. Corresponds to the + // `destination_service_name` metric label in Istio metrics. + ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Service_MeshIstio) Reset() { + *x = Service_MeshIstio{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_MeshIstio) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_MeshIstio) ProtoMessage() {} + +func (x *Service_MeshIstio) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_MeshIstio.ProtoReflect.Descriptor instead. +func (*Service_MeshIstio) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 4} +} + +func (x *Service_MeshIstio) GetMeshUid() string { + if x != nil { + return x.MeshUid + } + return "" +} + +func (x *Service_MeshIstio) GetServiceNamespace() string { + if x != nil { + return x.ServiceNamespace + } + return "" +} + +func (x *Service_MeshIstio) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// Canonical service scoped to an Istio mesh. Anthos clusters running ASM >= +// 1.6.8 will have their services ingested as this type. +type Service_IstioCanonicalService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for the Istio mesh in which this canonical service is defined. + // Corresponds to the `mesh_uid` metric label in + // [Istio metrics](https://cloud.google.com/monitoring/api/metrics_istio). + MeshUid string `protobuf:"bytes,1,opt,name=mesh_uid,json=meshUid,proto3" json:"mesh_uid,omitempty"` + // The namespace of the canonical service underlying this service. + // Corresponds to the `destination_canonical_service_namespace` metric + // label in [Istio + // metrics](https://cloud.google.com/monitoring/api/metrics_istio). + CanonicalServiceNamespace string `protobuf:"bytes,3,opt,name=canonical_service_namespace,json=canonicalServiceNamespace,proto3" json:"canonical_service_namespace,omitempty"` + // The name of the canonical service underlying this service. + // Corresponds to the `destination_canonical_service_name` metric label in + // label in [Istio + // metrics](https://cloud.google.com/monitoring/api/metrics_istio). + CanonicalService string `protobuf:"bytes,4,opt,name=canonical_service,json=canonicalService,proto3" json:"canonical_service,omitempty"` +} + +func (x *Service_IstioCanonicalService) Reset() { + *x = Service_IstioCanonicalService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_IstioCanonicalService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_IstioCanonicalService) ProtoMessage() {} + +func (x *Service_IstioCanonicalService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_IstioCanonicalService.ProtoReflect.Descriptor instead. +func (*Service_IstioCanonicalService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 5} +} + +func (x *Service_IstioCanonicalService) GetMeshUid() string { + if x != nil { + return x.MeshUid + } + return "" +} + +func (x *Service_IstioCanonicalService) GetCanonicalServiceNamespace() string { + if x != nil { + return x.CanonicalServiceNamespace + } + return "" +} + +func (x *Service_IstioCanonicalService) GetCanonicalService() string { + if x != nil { + return x.CanonicalService + } + return "" +} + +// Cloud Run service. Learn more at https://cloud.google.com/run. +type Service_CloudRun struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Cloud Run service. Corresponds to the `service_name` + // resource label in the [`cloud_run_revision` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // The location the service is run. Corresponds to the `location` + // resource label in the [`cloud_run_revision` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` +} + +func (x *Service_CloudRun) Reset() { + *x = Service_CloudRun{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_CloudRun) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_CloudRun) ProtoMessage() {} + +func (x *Service_CloudRun) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_CloudRun.ProtoReflect.Descriptor instead. +func (*Service_CloudRun) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 6} +} + +func (x *Service_CloudRun) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *Service_CloudRun) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +// GKE Namespace. The field names correspond to the resource metadata labels +// on monitored resources that fall under a namespace (for example, +// `k8s_container` or `k8s_pod`). +type Service_GkeNamespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of this namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` +} + +func (x *Service_GkeNamespace) Reset() { + *x = Service_GkeNamespace{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeNamespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeNamespace) ProtoMessage() {} + +func (x *Service_GkeNamespace) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeNamespace.ProtoReflect.Descriptor instead. +func (*Service_GkeNamespace) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 7} +} + +func (x *Service_GkeNamespace) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeNamespace) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeNamespace) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeNamespace) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +// A GKE Workload (Deployment, StatefulSet, etc). The field names correspond +// to the metadata labels on monitored resources that fall under a workload +// (for example, `k8s_container` or `k8s_pod`). +type Service_GkeWorkload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of the parent namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + // The type of this workload (for example, "Deployment" or "DaemonSet") + TopLevelControllerType string `protobuf:"bytes,5,opt,name=top_level_controller_type,json=topLevelControllerType,proto3" json:"top_level_controller_type,omitempty"` + // The name of this workload. + TopLevelControllerName string `protobuf:"bytes,6,opt,name=top_level_controller_name,json=topLevelControllerName,proto3" json:"top_level_controller_name,omitempty"` +} + +func (x *Service_GkeWorkload) Reset() { + *x = Service_GkeWorkload{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeWorkload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeWorkload) ProtoMessage() {} + +func (x *Service_GkeWorkload) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeWorkload.ProtoReflect.Descriptor instead. +func (*Service_GkeWorkload) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 8} +} + +func (x *Service_GkeWorkload) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeWorkload) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeWorkload) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeWorkload) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *Service_GkeWorkload) GetTopLevelControllerType() string { + if x != nil { + return x.TopLevelControllerType + } + return "" +} + +func (x *Service_GkeWorkload) GetTopLevelControllerName() string { + if x != nil { + return x.TopLevelControllerName + } + return "" +} + +// GKE Service. The "service" here represents a +// [Kubernetes service +// object](https://kubernetes.io/docs/concepts/services-networking/service). +// The field names correspond to the resource labels on [`k8s_service` +// monitored +// resources](https://cloud.google.com/monitoring/api/resources#tag_k8s_service). +type Service_GkeService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of the parent namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + // The name of this service. + ServiceName string `protobuf:"bytes,5,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Service_GkeService) Reset() { + *x = Service_GkeService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeService) ProtoMessage() {} + +func (x *Service_GkeService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeService.ProtoReflect.Descriptor instead. +func (*Service_GkeService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 9} +} + +func (x *Service_GkeService) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeService) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeService) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeService) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *Service_GkeService) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// A well-known service type, defined by its service type and service labels. +// Documentation and examples +// [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). +type Service_BasicService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of service that this basic service defines, e.g. + // APP_ENGINE service type. + // Documentation and valid values + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceType string `protobuf:"bytes,1,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"` + // Labels that specify the resource that emits the monitoring data which + // is used for SLO reporting of this `Service`. + // Documentation and valid values for given service types + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceLabels map[string]string `protobuf:"bytes,2,rep,name=service_labels,json=serviceLabels,proto3" json:"service_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Service_BasicService) Reset() { + *x = Service_BasicService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_BasicService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_BasicService) ProtoMessage() {} + +func (x *Service_BasicService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_BasicService.ProtoReflect.Descriptor instead. +func (*Service_BasicService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 10} +} + +func (x *Service_BasicService) GetServiceType() string { + if x != nil { + return x.ServiceType + } + return "" +} + +func (x *Service_BasicService) GetServiceLabels() map[string]string { + if x != nil { + return x.ServiceLabels + } + return nil +} + +// Configuration for how to query telemetry on a Service. +type Service_Telemetry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The full name of the resource that defines this service. Formatted as + // described in https://cloud.google.com/apis/design/resource_names. + ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` +} + +func (x *Service_Telemetry) Reset() { + *x = Service_Telemetry{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_Telemetry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_Telemetry) ProtoMessage() {} + +func (x *Service_Telemetry) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_Telemetry.ProtoReflect.Descriptor instead. +func (*Service_Telemetry) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 11} +} + +func (x *Service_Telemetry) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +// Future parameters for the availability SLI. +type BasicSli_AvailabilityCriteria struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *BasicSli_AvailabilityCriteria) Reset() { + *x = BasicSli_AvailabilityCriteria{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicSli_AvailabilityCriteria) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicSli_AvailabilityCriteria) ProtoMessage() {} + +func (x *BasicSli_AvailabilityCriteria) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicSli_AvailabilityCriteria.ProtoReflect.Descriptor instead. +func (*BasicSli_AvailabilityCriteria) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 0} +} + +// Parameters for a latency threshold SLI. +type BasicSli_LatencyCriteria struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Good service is defined to be the count of requests made to this service + // that return in no more than `threshold`. + Threshold *durationpb.Duration `protobuf:"bytes,3,opt,name=threshold,proto3" json:"threshold,omitempty"` +} + +func (x *BasicSli_LatencyCriteria) Reset() { + *x = BasicSli_LatencyCriteria{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicSli_LatencyCriteria) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicSli_LatencyCriteria) ProtoMessage() {} + +func (x *BasicSli_LatencyCriteria) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicSli_LatencyCriteria.ProtoReflect.Descriptor instead. +func (*BasicSli_LatencyCriteria) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *BasicSli_LatencyCriteria) GetThreshold() *durationpb.Duration { + if x != nil { + return x.Threshold + } + return nil +} + +// A `PerformanceThreshold` is used when each window is good when that window +// has a sufficiently high `performance`. +type WindowsBasedSli_PerformanceThreshold struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The means, either a request-based SLI or a basic SLI, by which to compute + // performance over a window. + // + // Types that are assignable to Type: + // + // *WindowsBasedSli_PerformanceThreshold_Performance + // *WindowsBasedSli_PerformanceThreshold_BasicSliPerformance + Type isWindowsBasedSli_PerformanceThreshold_Type `protobuf_oneof:"type"` + // If window `performance >= threshold`, the window is counted as good. + Threshold float64 `protobuf:"fixed64,2,opt,name=threshold,proto3" json:"threshold,omitempty"` +} + +func (x *WindowsBasedSli_PerformanceThreshold) Reset() { + *x = WindowsBasedSli_PerformanceThreshold{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WindowsBasedSli_PerformanceThreshold) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WindowsBasedSli_PerformanceThreshold) ProtoMessage() {} + +func (x *WindowsBasedSli_PerformanceThreshold) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WindowsBasedSli_PerformanceThreshold.ProtoReflect.Descriptor instead. +func (*WindowsBasedSli_PerformanceThreshold) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 0} +} + +func (m *WindowsBasedSli_PerformanceThreshold) GetType() isWindowsBasedSli_PerformanceThreshold_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *WindowsBasedSli_PerformanceThreshold) GetPerformance() *RequestBasedSli { + if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_Performance); ok { + return x.Performance + } + return nil +} + +func (x *WindowsBasedSli_PerformanceThreshold) GetBasicSliPerformance() *BasicSli { + if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance); ok { + return x.BasicSliPerformance + } + return nil +} + +func (x *WindowsBasedSli_PerformanceThreshold) GetThreshold() float64 { + if x != nil { + return x.Threshold + } + return 0 +} + +type isWindowsBasedSli_PerformanceThreshold_Type interface { + isWindowsBasedSli_PerformanceThreshold_Type() +} + +type WindowsBasedSli_PerformanceThreshold_Performance struct { + // `RequestBasedSli` to evaluate to judge window quality. + Performance *RequestBasedSli `protobuf:"bytes,1,opt,name=performance,proto3,oneof"` +} + +type WindowsBasedSli_PerformanceThreshold_BasicSliPerformance struct { + // `BasicSli` to evaluate to judge window quality. + BasicSliPerformance *BasicSli `protobuf:"bytes,3,opt,name=basic_sli_performance,json=basicSliPerformance,proto3,oneof"` +} + +func (*WindowsBasedSli_PerformanceThreshold_Performance) isWindowsBasedSli_PerformanceThreshold_Type() { +} + +func (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance) isWindowsBasedSli_PerformanceThreshold_Type() { +} + +// A `MetricRange` is used when each window is good when the value x of a +// single `TimeSeries` satisfies `range.min <= x <= range.max`. The provided +// `TimeSeries` must have `ValueType = INT64` or `ValueType = DOUBLE` and +// `MetricKind = GAUGE`. +type WindowsBasedSli_MetricRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying the `TimeSeries` to use for evaluating window quality. + TimeSeries string `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // Range of values considered "good." For a one-sided range, set one bound + // to an infinite value. + Range *Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` +} + +func (x *WindowsBasedSli_MetricRange) Reset() { + *x = WindowsBasedSli_MetricRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WindowsBasedSli_MetricRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WindowsBasedSli_MetricRange) ProtoMessage() {} + +func (x *WindowsBasedSli_MetricRange) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WindowsBasedSli_MetricRange.ProtoReflect.Descriptor instead. +func (*WindowsBasedSli_MetricRange) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 1} +} + +func (x *WindowsBasedSli_MetricRange) GetTimeSeries() string { + if x != nil { + return x.TimeSeries + } + return "" +} + +func (x *WindowsBasedSli_MetricRange) GetRange() *Range { + if x != nil { + return x.Range + } + return nil +} + +var File_google_monitoring_v3_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_service_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, + 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x16, 0x0a, 0x07, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x12, 0x48, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x48, 0x00, + 0x52, 0x09, 0x61, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x57, 0x0a, 0x0f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x73, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x48, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x68, 0x5f, + 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x49, + 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, + 0x6f, 0x12, 0x6d, 0x0a, 0x17, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x5f, 0x63, 0x61, 0x6e, 0x6f, 0x6e, + 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x15, 0x69, 0x73, 0x74, 0x69, 0x6f, + 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x45, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x51, 0x0a, 0x0d, 0x67, 0x6b, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6b, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x6b, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x6b, + 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x00, 0x52, 0x0b, 0x67, + 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x4b, 0x0a, 0x0b, 0x67, 0x6b, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, + 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6b, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, + 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x09, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, + 0x4e, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0e, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, + 0x08, 0x0a, 0x06, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x1a, 0x28, 0x0a, 0x09, 0x41, 0x70, 0x70, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x75, 0x6c, + 0x65, 0x49, 0x64, 0x1a, 0x2a, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, + 0x9d, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, + 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x76, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x19, 0x0a, 0x08, + 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x15, 0x49, 0x73, 0x74, 0x69, + 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x3e, 0x0a, 0x1b, + 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x19, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, + 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, + 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x49, 0x0a, 0x08, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x98, 0x01, 0x0a, 0x0c, 0x47, 0x6b, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x8d, 0x02, 0x0a, 0x0b, 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, + 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0xb9, 0x01, 0x0a, 0x0a, 0x47, 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x22, + 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x0c, + 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x64, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x30, 0x0a, 0x09, 0x54, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0xa7, 0x01, 0xea, 0x41, 0xa3, 0x01, 0x0a, + 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x25, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x2f, 0x6f, 0x72, 0x67, 0x61, 0x6e, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x23, 0x66, 0x6f, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, + 0x01, 0x2a, 0x42, 0x0c, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x22, 0x82, 0x07, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x67, + 0x6f, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x67, 0x6f, 0x61, 0x6c, 0x12, + 0x42, 0x0a, 0x0e, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x46, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x43, 0x61, 0x6c, 0x65, 0x6e, + 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x61, 0x6c, + 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x5c, 0x0a, 0x0b, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x55, 0x73, + 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, + 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, + 0x12, 0x14, 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02, + 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x3a, 0xca, + 0x02, 0xea, 0x41, 0xc6, 0x02, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x56, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x60, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, + 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, + 0x12, 0x54, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x3d, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, + 0x6c, 0x69, 0x48, 0x00, 0x52, 0x08, 0x62, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x4c, + 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x0d, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, + 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69, + 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x22, 0xf3, 0x02, 0x0a, 0x08, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, + 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, + 0x53, 0x6c, 0x69, 0x2e, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x76, 0x61, 0x69, + 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x1a, 0x16, 0x0a, 0x14, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x4a, 0x0a, 0x0f, + 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, + 0x37, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x73, 0x6c, 0x69, 0x5f, + 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, + 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x03, 0x6d, 0x61, 0x78, 0x22, 0xc2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x51, 0x0a, 0x10, 0x67, 0x6f, 0x6f, + 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x6f, + 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x52, 0x0a, 0x10, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x75, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, + 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, + 0x42, 0x08, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xa1, 0x01, 0x0a, 0x0f, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2e, + 0x0a, 0x13, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x67, 0x6f, 0x6f, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c, + 0x0a, 0x12, 0x62, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x62, 0x61, 0x64, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x75, + 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, + 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xa4, 0x06, 0x0a, 0x0f, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x35, 0x0a, 0x16, 0x67, 0x6f, 0x6f, + 0x64, 0x5f, 0x62, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, + 0x64, 0x42, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x79, 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x50, 0x65, 0x72, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x48, 0x00, 0x52, 0x17, 0x67, 0x6f, 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, + 0x69, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x64, 0x0a, 0x14, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x62, 0x0a, 0x13, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x5f, + 0x69, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, + 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x75, 0x6d, 0x49, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x0d, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x50, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xdd, 0x01, 0x0a, 0x14, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x49, + 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x65, + 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x15, 0x62, 0x61, 0x73, + 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x5f, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, + 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x13, 0x62, 0x61, 0x73, 0x69, + 0x63, 0x53, 0x6c, 0x69, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x06, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x61, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x42, 0xd1, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, + 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_service_proto_rawDescData = file_google_monitoring_v3_service_proto_rawDesc +) + +func file_google_monitoring_v3_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_proto_rawDescData) + }) + return file_google_monitoring_v3_service_proto_rawDescData +} + +var file_google_monitoring_v3_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_monitoring_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_monitoring_v3_service_proto_goTypes = []any{ + (ServiceLevelObjective_View)(0), // 0: google.monitoring.v3.ServiceLevelObjective.View + (*Service)(nil), // 1: google.monitoring.v3.Service + (*ServiceLevelObjective)(nil), // 2: google.monitoring.v3.ServiceLevelObjective + (*ServiceLevelIndicator)(nil), // 3: google.monitoring.v3.ServiceLevelIndicator + (*BasicSli)(nil), // 4: google.monitoring.v3.BasicSli + (*Range)(nil), // 5: google.monitoring.v3.Range + (*RequestBasedSli)(nil), // 6: google.monitoring.v3.RequestBasedSli + (*TimeSeriesRatio)(nil), // 7: google.monitoring.v3.TimeSeriesRatio + (*DistributionCut)(nil), // 8: google.monitoring.v3.DistributionCut + (*WindowsBasedSli)(nil), // 9: google.monitoring.v3.WindowsBasedSli + (*Service_Custom)(nil), // 10: google.monitoring.v3.Service.Custom + (*Service_AppEngine)(nil), // 11: google.monitoring.v3.Service.AppEngine + (*Service_CloudEndpoints)(nil), // 12: google.monitoring.v3.Service.CloudEndpoints + (*Service_ClusterIstio)(nil), // 13: google.monitoring.v3.Service.ClusterIstio + (*Service_MeshIstio)(nil), // 14: google.monitoring.v3.Service.MeshIstio + (*Service_IstioCanonicalService)(nil), // 15: google.monitoring.v3.Service.IstioCanonicalService + (*Service_CloudRun)(nil), // 16: google.monitoring.v3.Service.CloudRun + (*Service_GkeNamespace)(nil), // 17: google.monitoring.v3.Service.GkeNamespace + (*Service_GkeWorkload)(nil), // 18: google.monitoring.v3.Service.GkeWorkload + (*Service_GkeService)(nil), // 19: google.monitoring.v3.Service.GkeService + (*Service_BasicService)(nil), // 20: google.monitoring.v3.Service.BasicService + (*Service_Telemetry)(nil), // 21: google.monitoring.v3.Service.Telemetry + nil, // 22: google.monitoring.v3.Service.UserLabelsEntry + nil, // 23: google.monitoring.v3.Service.BasicService.ServiceLabelsEntry + nil, // 24: google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry + (*BasicSli_AvailabilityCriteria)(nil), // 25: google.monitoring.v3.BasicSli.AvailabilityCriteria + (*BasicSli_LatencyCriteria)(nil), // 26: google.monitoring.v3.BasicSli.LatencyCriteria + (*WindowsBasedSli_PerformanceThreshold)(nil), // 27: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold + (*WindowsBasedSli_MetricRange)(nil), // 28: google.monitoring.v3.WindowsBasedSli.MetricRange + (*durationpb.Duration)(nil), // 29: google.protobuf.Duration + (calendarperiod.CalendarPeriod)(0), // 30: google.type.CalendarPeriod +} +var file_google_monitoring_v3_service_proto_depIdxs = []int32{ + 10, // 0: google.monitoring.v3.Service.custom:type_name -> google.monitoring.v3.Service.Custom + 11, // 1: google.monitoring.v3.Service.app_engine:type_name -> google.monitoring.v3.Service.AppEngine + 12, // 2: google.monitoring.v3.Service.cloud_endpoints:type_name -> google.monitoring.v3.Service.CloudEndpoints + 13, // 3: google.monitoring.v3.Service.cluster_istio:type_name -> google.monitoring.v3.Service.ClusterIstio + 14, // 4: google.monitoring.v3.Service.mesh_istio:type_name -> google.monitoring.v3.Service.MeshIstio + 15, // 5: google.monitoring.v3.Service.istio_canonical_service:type_name -> google.monitoring.v3.Service.IstioCanonicalService + 16, // 6: google.monitoring.v3.Service.cloud_run:type_name -> google.monitoring.v3.Service.CloudRun + 17, // 7: google.monitoring.v3.Service.gke_namespace:type_name -> google.monitoring.v3.Service.GkeNamespace + 18, // 8: google.monitoring.v3.Service.gke_workload:type_name -> google.monitoring.v3.Service.GkeWorkload + 19, // 9: google.monitoring.v3.Service.gke_service:type_name -> google.monitoring.v3.Service.GkeService + 20, // 10: google.monitoring.v3.Service.basic_service:type_name -> google.monitoring.v3.Service.BasicService + 21, // 11: google.monitoring.v3.Service.telemetry:type_name -> google.monitoring.v3.Service.Telemetry + 22, // 12: google.monitoring.v3.Service.user_labels:type_name -> google.monitoring.v3.Service.UserLabelsEntry + 3, // 13: google.monitoring.v3.ServiceLevelObjective.service_level_indicator:type_name -> google.monitoring.v3.ServiceLevelIndicator + 29, // 14: google.monitoring.v3.ServiceLevelObjective.rolling_period:type_name -> google.protobuf.Duration + 30, // 15: google.monitoring.v3.ServiceLevelObjective.calendar_period:type_name -> google.type.CalendarPeriod + 24, // 16: google.monitoring.v3.ServiceLevelObjective.user_labels:type_name -> google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry + 4, // 17: google.monitoring.v3.ServiceLevelIndicator.basic_sli:type_name -> google.monitoring.v3.BasicSli + 6, // 18: google.monitoring.v3.ServiceLevelIndicator.request_based:type_name -> google.monitoring.v3.RequestBasedSli + 9, // 19: google.monitoring.v3.ServiceLevelIndicator.windows_based:type_name -> google.monitoring.v3.WindowsBasedSli + 25, // 20: google.monitoring.v3.BasicSli.availability:type_name -> google.monitoring.v3.BasicSli.AvailabilityCriteria + 26, // 21: google.monitoring.v3.BasicSli.latency:type_name -> google.monitoring.v3.BasicSli.LatencyCriteria + 7, // 22: google.monitoring.v3.RequestBasedSli.good_total_ratio:type_name -> google.monitoring.v3.TimeSeriesRatio + 8, // 23: google.monitoring.v3.RequestBasedSli.distribution_cut:type_name -> google.monitoring.v3.DistributionCut + 5, // 24: google.monitoring.v3.DistributionCut.range:type_name -> google.monitoring.v3.Range + 27, // 25: google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold:type_name -> google.monitoring.v3.WindowsBasedSli.PerformanceThreshold + 28, // 26: google.monitoring.v3.WindowsBasedSli.metric_mean_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange + 28, // 27: google.monitoring.v3.WindowsBasedSli.metric_sum_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange + 29, // 28: google.monitoring.v3.WindowsBasedSli.window_period:type_name -> google.protobuf.Duration + 23, // 29: google.monitoring.v3.Service.BasicService.service_labels:type_name -> google.monitoring.v3.Service.BasicService.ServiceLabelsEntry + 29, // 30: google.monitoring.v3.BasicSli.LatencyCriteria.threshold:type_name -> google.protobuf.Duration + 6, // 31: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance:type_name -> google.monitoring.v3.RequestBasedSli + 4, // 32: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance:type_name -> google.monitoring.v3.BasicSli + 5, // 33: google.monitoring.v3.WindowsBasedSli.MetricRange.range:type_name -> google.monitoring.v3.Range + 34, // [34:34] is the sub-list for method output_type + 34, // [34:34] is the sub-list for method input_type + 34, // [34:34] is the sub-list for extension type_name + 34, // [34:34] is the sub-list for extension extendee + 0, // [0:34] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_service_proto_init() } +func file_google_monitoring_v3_service_proto_init() { + if File_google_monitoring_v3_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Service); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ServiceLevelObjective); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServiceLevelIndicator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*BasicSli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*RequestBasedSli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesRatio); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*DistributionCut); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*WindowsBasedSli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*Service_Custom); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*Service_AppEngine); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*Service_CloudEndpoints); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*Service_ClusterIstio); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*Service_MeshIstio); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*Service_IstioCanonicalService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*Service_CloudRun); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeNamespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeWorkload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*Service_BasicService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*Service_Telemetry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[24].Exporter = func(v any, i int) any { + switch v := v.(*BasicSli_AvailabilityCriteria); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[25].Exporter = func(v any, i int) any { + switch v := v.(*BasicSli_LatencyCriteria); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[26].Exporter = func(v any, i int) any { + switch v := v.(*WindowsBasedSli_PerformanceThreshold); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[27].Exporter = func(v any, i int) any { + switch v := v.(*WindowsBasedSli_MetricRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_service_proto_msgTypes[0].OneofWrappers = []any{ + (*Service_Custom_)(nil), + (*Service_AppEngine_)(nil), + (*Service_CloudEndpoints_)(nil), + (*Service_ClusterIstio_)(nil), + (*Service_MeshIstio_)(nil), + (*Service_IstioCanonicalService_)(nil), + (*Service_CloudRun_)(nil), + (*Service_GkeNamespace_)(nil), + (*Service_GkeWorkload_)(nil), + (*Service_GkeService_)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[1].OneofWrappers = []any{ + (*ServiceLevelObjective_RollingPeriod)(nil), + (*ServiceLevelObjective_CalendarPeriod)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[2].OneofWrappers = []any{ + (*ServiceLevelIndicator_BasicSli)(nil), + (*ServiceLevelIndicator_RequestBased)(nil), + (*ServiceLevelIndicator_WindowsBased)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[3].OneofWrappers = []any{ + (*BasicSli_Availability)(nil), + (*BasicSli_Latency)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[5].OneofWrappers = []any{ + (*RequestBasedSli_GoodTotalRatio)(nil), + (*RequestBasedSli_DistributionCut)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[8].OneofWrappers = []any{ + (*WindowsBasedSli_GoodBadMetricFilter)(nil), + (*WindowsBasedSli_GoodTotalRatioThreshold)(nil), + (*WindowsBasedSli_MetricMeanInRange)(nil), + (*WindowsBasedSli_MetricSumInRange)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[26].OneofWrappers = []any{ + (*WindowsBasedSli_PerformanceThreshold_Performance)(nil), + (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_service_proto_rawDesc, + NumEnums: 1, + NumMessages: 28, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_service_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_service_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_service_proto = out.File + file_google_monitoring_v3_service_proto_rawDesc = nil + file_google_monitoring_v3_service_proto_goTypes = nil + file_google_monitoring_v3_service_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go new file mode 100644 index 00000000000..15e1f04d6a5 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go @@ -0,0 +1,1796 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/service_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `CreateService` request. +type CreateServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource + // [name](https://cloud.google.com/monitoring/api/v3#project_name) of the + // parent Metrics Scope. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. The Service id to use for this Service. If omitted, an id will be + // generated instead. Must match the pattern `[a-z0-9\-]+` + ServiceId string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // Required. The `Service` to create. + Service *Service `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` +} + +func (x *CreateServiceRequest) Reset() { + *x = CreateServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateServiceRequest) ProtoMessage() {} + +func (x *CreateServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateServiceRequest.ProtoReflect.Descriptor instead. +func (*CreateServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateServiceRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateServiceRequest) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +func (x *CreateServiceRequest) GetService() *Service { + if x != nil { + return x.Service + } + return nil +} + +// The `GetService` request. +type GetServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `Service`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetServiceRequest) Reset() { + *x = GetServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetServiceRequest) ProtoMessage() {} + +func (x *GetServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetServiceRequest.ProtoReflect.Descriptor instead. +func (*GetServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{1} +} + +func (x *GetServiceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `ListServices` request. +type ListServicesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the parent containing the listed services, + // either a [project](https://cloud.google.com/monitoring/api/v3#project_name) + // or a Monitoring Metrics Scope. The formats are: + // + // projects/[PROJECT_ID_OR_NUMBER] + // workspaces/[HOST_PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // A filter specifying what `Service`s to return. The filter supports + // filtering on a particular service-identifier type or one of its attributes. + // + // To filter on a particular service-identifier type, the `identifier_case` + // refers to which option in the `identifier` field is populated. For example, + // the filter `identifier_case = "CUSTOM"` would match all services with a + // value for the `custom` field. Valid options include "CUSTOM", "APP_ENGINE", + // "MESH_ISTIO", and the other options listed at + // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service + // + // To filter on an attribute of a service-identifier type, apply the filter + // name by using the snake case of the service-identifier type and the + // attribute of that service-identifier type, and join the two with a period. + // For example, to filter by the `meshUid` field of the `MeshIstio` + // service-identifier type, you must filter on `mesh_istio.mesh_uid = + // "123"` to match all services with mesh UID "123". Service-identifier types + // and their attributes are described at + // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A non-negative number that is the maximum number of results to return. + // When 0, use default page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListServicesRequest) Reset() { + *x = ListServicesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServicesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServicesRequest) ProtoMessage() {} + +func (x *ListServicesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead. +func (*ListServicesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListServicesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListServicesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListServicesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListServicesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListServices` response. +type ListServicesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The `Service`s matching the specified filter. + Services []*Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListServicesResponse) Reset() { + *x = ListServicesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServicesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServicesResponse) ProtoMessage() {} + +func (x *ListServicesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead. +func (*ListServicesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListServicesResponse) GetServices() []*Service { + if x != nil { + return x.Services + } + return nil +} + +func (x *ListServicesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `UpdateService` request. +type UpdateServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The `Service` to draw updates from. + // The given `name` specifies the resource to update. + Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + // A set of field paths defining which fields to use for the update. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateServiceRequest) Reset() { + *x = UpdateServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateServiceRequest) ProtoMessage() {} + +func (x *UpdateServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateServiceRequest.ProtoReflect.Descriptor instead. +func (*UpdateServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateServiceRequest) GetService() *Service { + if x != nil { + return x.Service + } + return nil +} + +func (x *UpdateServiceRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// The `DeleteService` request. +type DeleteServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `Service` to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteServiceRequest) Reset() { + *x = DeleteServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteServiceRequest) ProtoMessage() {} + +func (x *DeleteServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteServiceRequest.ProtoReflect.Descriptor instead. +func (*DeleteServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteServiceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateServiceLevelObjective` request. +type CreateServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the parent `Service`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. The ServiceLevelObjective id to use for this + // ServiceLevelObjective. If omitted, an id will be generated instead. Must + // match the pattern `^[a-zA-Z0-9-_:.]+$` + ServiceLevelObjectiveId string `protobuf:"bytes,3,opt,name=service_level_objective_id,json=serviceLevelObjectiveId,proto3" json:"service_level_objective_id,omitempty"` + // Required. The `ServiceLevelObjective` to create. + // The provided `name` will be respected if no `ServiceLevelObjective` exists + // with this name. + ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,2,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"` +} + +func (x *CreateServiceLevelObjectiveRequest) Reset() { + *x = CreateServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *CreateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*CreateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateServiceLevelObjectiveRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjectiveId() string { + if x != nil { + return x.ServiceLevelObjectiveId + } + return "" +} + +func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective { + if x != nil { + return x.ServiceLevelObjective + } + return nil +} + +// The `GetServiceLevelObjective` request. +type GetServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `ServiceLevelObjective` to get. The format + // is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // View of the `ServiceLevelObjective` to return. If `DEFAULT`, return the + // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the + // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the + // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed. + View ServiceLevelObjective_View `protobuf:"varint,2,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"` +} + +func (x *GetServiceLevelObjectiveRequest) Reset() { + *x = GetServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *GetServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*GetServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{7} +} + +func (x *GetServiceLevelObjectiveRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GetServiceLevelObjectiveRequest) GetView() ServiceLevelObjective_View { + if x != nil { + return x.View + } + return ServiceLevelObjective_VIEW_UNSPECIFIED +} + +// The `ListServiceLevelObjectives` request. +type ListServiceLevelObjectivesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the parent containing the listed SLOs, either a + // project or a Monitoring Metrics Scope. The formats are: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + // workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // A filter specifying what `ServiceLevelObjective`s to return. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A non-negative number that is the maximum number of results to return. + // When 0, use default page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // View of the `ServiceLevelObjective`s to return. If `DEFAULT`, return each + // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the + // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the + // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed. + View ServiceLevelObjective_View `protobuf:"varint,5,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"` +} + +func (x *ListServiceLevelObjectivesRequest) Reset() { + *x = ListServiceLevelObjectivesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceLevelObjectivesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceLevelObjectivesRequest) ProtoMessage() {} + +func (x *ListServiceLevelObjectivesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceLevelObjectivesRequest.ProtoReflect.Descriptor instead. +func (*ListServiceLevelObjectivesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{8} +} + +func (x *ListServiceLevelObjectivesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListServiceLevelObjectivesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListServiceLevelObjectivesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListServiceLevelObjectivesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListServiceLevelObjectivesRequest) GetView() ServiceLevelObjective_View { + if x != nil { + return x.View + } + return ServiceLevelObjective_VIEW_UNSPECIFIED +} + +// The `ListServiceLevelObjectives` response. +type ListServiceLevelObjectivesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The `ServiceLevelObjective`s matching the specified filter. + ServiceLevelObjectives []*ServiceLevelObjective `protobuf:"bytes,1,rep,name=service_level_objectives,json=serviceLevelObjectives,proto3" json:"service_level_objectives,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListServiceLevelObjectivesResponse) Reset() { + *x = ListServiceLevelObjectivesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceLevelObjectivesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceLevelObjectivesResponse) ProtoMessage() {} + +func (x *ListServiceLevelObjectivesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceLevelObjectivesResponse.ProtoReflect.Descriptor instead. +func (*ListServiceLevelObjectivesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{9} +} + +func (x *ListServiceLevelObjectivesResponse) GetServiceLevelObjectives() []*ServiceLevelObjective { + if x != nil { + return x.ServiceLevelObjectives + } + return nil +} + +func (x *ListServiceLevelObjectivesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `UpdateServiceLevelObjective` request. +type UpdateServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The `ServiceLevelObjective` to draw updates from. + // The given `name` specifies the resource to update. + ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,1,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"` + // A set of field paths defining which fields to use for the update. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateServiceLevelObjectiveRequest) Reset() { + *x = UpdateServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *UpdateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*UpdateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{10} +} + +func (x *UpdateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective { + if x != nil { + return x.ServiceLevelObjective + } + return nil +} + +func (x *UpdateServiceLevelObjectiveRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// The `DeleteServiceLevelObjective` request. +type DeleteServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `ServiceLevelObjective` to delete. The + // format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteServiceLevelObjectiveRequest) Reset() { + *x = DeleteServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *DeleteServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*DeleteServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{11} +} + +func (x *DeleteServiceLevelObjectiveRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_google_monitoring_v3_service_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_service_service_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb6, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x22, 0x52, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xac, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x79, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x08, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x91, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x61, 0x73, 0x6b, 0x22, 0x55, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8e, 0x02, 0x0a, 0x22, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x49, + 0x64, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0xb4, 0x01, 0x0a, 0x1f, + 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x04, + 0x76, 0x69, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, 0x04, 0x76, 0x69, + 0x65, 0x77, 0x22, 0x80, 0x02, 0x0a, 0x21, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, + 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x44, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, + 0x04, 0x76, 0x69, 0x65, 0x77, 0x22, 0xb3, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x18, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x16, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, + 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xcb, 0x01, 0x0a, 0x22, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x3b, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x71, 0x0a, 0x22, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xea, 0x0f, 0x0a, + 0x18, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x0d, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x3b, 0xda, 0x41, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x3a, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x7e, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x28, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x12, 0x91, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0xda, 0x41, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x22, 0x3c, 0xda, 0x41, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x32, + 0x21, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0x7d, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x28, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x7d, 0x12, 0xfa, 0x01, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0x74, 0xda, 0x41, 0x1e, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x4d, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0x32, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x12, 0xc1, + 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x35, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, + 0x41, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0xd4, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x73, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x12, 0x8c, 0x02, 0x0a, 0x1b, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x22, 0x85, 0x01, 0xda, 0x41, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x65, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x32, 0x4a, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb2, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x41, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x2a, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, + 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xd8, 0x01, 0x0a, 0x18, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, + 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_service_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_service_service_proto_rawDescData = file_google_monitoring_v3_service_service_proto_rawDesc +) + +func file_google_monitoring_v3_service_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_service_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_service_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_service_proto_rawDescData) + }) + return file_google_monitoring_v3_service_service_proto_rawDescData +} + +var file_google_monitoring_v3_service_service_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_google_monitoring_v3_service_service_proto_goTypes = []any{ + (*CreateServiceRequest)(nil), // 0: google.monitoring.v3.CreateServiceRequest + (*GetServiceRequest)(nil), // 1: google.monitoring.v3.GetServiceRequest + (*ListServicesRequest)(nil), // 2: google.monitoring.v3.ListServicesRequest + (*ListServicesResponse)(nil), // 3: google.monitoring.v3.ListServicesResponse + (*UpdateServiceRequest)(nil), // 4: google.monitoring.v3.UpdateServiceRequest + (*DeleteServiceRequest)(nil), // 5: google.monitoring.v3.DeleteServiceRequest + (*CreateServiceLevelObjectiveRequest)(nil), // 6: google.monitoring.v3.CreateServiceLevelObjectiveRequest + (*GetServiceLevelObjectiveRequest)(nil), // 7: google.monitoring.v3.GetServiceLevelObjectiveRequest + (*ListServiceLevelObjectivesRequest)(nil), // 8: google.monitoring.v3.ListServiceLevelObjectivesRequest + (*ListServiceLevelObjectivesResponse)(nil), // 9: google.monitoring.v3.ListServiceLevelObjectivesResponse + (*UpdateServiceLevelObjectiveRequest)(nil), // 10: google.monitoring.v3.UpdateServiceLevelObjectiveRequest + (*DeleteServiceLevelObjectiveRequest)(nil), // 11: google.monitoring.v3.DeleteServiceLevelObjectiveRequest + (*Service)(nil), // 12: google.monitoring.v3.Service + (*fieldmaskpb.FieldMask)(nil), // 13: google.protobuf.FieldMask + (*ServiceLevelObjective)(nil), // 14: google.monitoring.v3.ServiceLevelObjective + (ServiceLevelObjective_View)(0), // 15: google.monitoring.v3.ServiceLevelObjective.View + (*emptypb.Empty)(nil), // 16: google.protobuf.Empty +} +var file_google_monitoring_v3_service_service_proto_depIdxs = []int32{ + 12, // 0: google.monitoring.v3.CreateServiceRequest.service:type_name -> google.monitoring.v3.Service + 12, // 1: google.monitoring.v3.ListServicesResponse.services:type_name -> google.monitoring.v3.Service + 12, // 2: google.monitoring.v3.UpdateServiceRequest.service:type_name -> google.monitoring.v3.Service + 13, // 3: google.monitoring.v3.UpdateServiceRequest.update_mask:type_name -> google.protobuf.FieldMask + 14, // 4: google.monitoring.v3.CreateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective + 15, // 5: google.monitoring.v3.GetServiceLevelObjectiveRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View + 15, // 6: google.monitoring.v3.ListServiceLevelObjectivesRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View + 14, // 7: google.monitoring.v3.ListServiceLevelObjectivesResponse.service_level_objectives:type_name -> google.monitoring.v3.ServiceLevelObjective + 14, // 8: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective + 13, // 9: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 10: google.monitoring.v3.ServiceMonitoringService.CreateService:input_type -> google.monitoring.v3.CreateServiceRequest + 1, // 11: google.monitoring.v3.ServiceMonitoringService.GetService:input_type -> google.monitoring.v3.GetServiceRequest + 2, // 12: google.monitoring.v3.ServiceMonitoringService.ListServices:input_type -> google.monitoring.v3.ListServicesRequest + 4, // 13: google.monitoring.v3.ServiceMonitoringService.UpdateService:input_type -> google.monitoring.v3.UpdateServiceRequest + 5, // 14: google.monitoring.v3.ServiceMonitoringService.DeleteService:input_type -> google.monitoring.v3.DeleteServiceRequest + 6, // 15: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:input_type -> google.monitoring.v3.CreateServiceLevelObjectiveRequest + 7, // 16: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:input_type -> google.monitoring.v3.GetServiceLevelObjectiveRequest + 8, // 17: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:input_type -> google.monitoring.v3.ListServiceLevelObjectivesRequest + 10, // 18: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:input_type -> google.monitoring.v3.UpdateServiceLevelObjectiveRequest + 11, // 19: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:input_type -> google.monitoring.v3.DeleteServiceLevelObjectiveRequest + 12, // 20: google.monitoring.v3.ServiceMonitoringService.CreateService:output_type -> google.monitoring.v3.Service + 12, // 21: google.monitoring.v3.ServiceMonitoringService.GetService:output_type -> google.monitoring.v3.Service + 3, // 22: google.monitoring.v3.ServiceMonitoringService.ListServices:output_type -> google.monitoring.v3.ListServicesResponse + 12, // 23: google.monitoring.v3.ServiceMonitoringService.UpdateService:output_type -> google.monitoring.v3.Service + 16, // 24: google.monitoring.v3.ServiceMonitoringService.DeleteService:output_type -> google.protobuf.Empty + 14, // 25: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective + 14, // 26: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective + 9, // 27: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:output_type -> google.monitoring.v3.ListServiceLevelObjectivesResponse + 14, // 28: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective + 16, // 29: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:output_type -> google.protobuf.Empty + 20, // [20:30] is the sub-list for method output_type + 10, // [10:20] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_service_service_proto_init() } +func file_google_monitoring_v3_service_service_proto_init() { + if File_google_monitoring_v3_service_service_proto != nil { + return + } + file_google_monitoring_v3_service_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_service_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CreateServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListServicesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListServicesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*CreateServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*GetServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*ListServiceLevelObjectivesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*ListServiceLevelObjectivesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*UpdateServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*DeleteServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_service_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 12, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_service_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_service_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_service_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_service_service_proto = out.File + file_google_monitoring_v3_service_service_proto_rawDesc = nil + file_google_monitoring_v3_service_service_proto_goTypes = nil + file_google_monitoring_v3_service_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ServiceMonitoringServiceClient is the client API for ServiceMonitoringService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ServiceMonitoringServiceClient interface { + // Create a `Service`. + CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) + // Get the named `Service`. + GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) + // List `Service`s for this Metrics Scope. + ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + // Update this `Service`. + UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) + // Soft delete this `Service`. + DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Create a `ServiceLevelObjective` for the given `Service`. + CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // Get a `ServiceLevelObjective` by name. + GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // List the `ServiceLevelObjective`s for the given `Service`. + ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) + // Update the given `ServiceLevelObjective`. + UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // Delete the given `ServiceLevelObjective`. + DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type serviceMonitoringServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewServiceMonitoringServiceClient(cc grpc.ClientConnInterface) ServiceMonitoringServiceClient { + return &serviceMonitoringServiceClient{cc} +} + +func (c *serviceMonitoringServiceClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { + out := new(ListServicesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServices", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) { + out := new(ListServiceLevelObjectivesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ServiceMonitoringServiceServer is the server API for ServiceMonitoringService service. +type ServiceMonitoringServiceServer interface { + // Create a `Service`. + CreateService(context.Context, *CreateServiceRequest) (*Service, error) + // Get the named `Service`. + GetService(context.Context, *GetServiceRequest) (*Service, error) + // List `Service`s for this Metrics Scope. + ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + // Update this `Service`. + UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) + // Soft delete this `Service`. + DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) + // Create a `ServiceLevelObjective` for the given `Service`. + CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // Get a `ServiceLevelObjective` by name. + GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // List the `ServiceLevelObjective`s for the given `Service`. + ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) + // Update the given `ServiceLevelObjective`. + UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // Delete the given `ServiceLevelObjective`. + DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) +} + +// UnimplementedServiceMonitoringServiceServer can be embedded to have forward compatible implementations. +type UnimplementedServiceMonitoringServiceServer struct { +} + +func (*UnimplementedServiceMonitoringServiceServer) CreateService(context.Context, *CreateServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) GetService(context.Context, *GetServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListServiceLevelObjectives not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteServiceLevelObjective not implemented") +} + +func RegisterServiceMonitoringServiceServer(s *grpc.Server, srv ServiceMonitoringServiceServer) { + s.RegisterService(&_ServiceMonitoringService_serviceDesc, srv) +} + +func _ServiceMonitoringService_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).CreateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).CreateService(ctx, req.(*CreateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).GetService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).GetService(ctx, req.(*GetServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).ListServices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).ListServices(ctx, req.(*ListServicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, req.(*UpdateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_DeleteService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, req.(*DeleteServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_CreateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, req.(*CreateServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_GetServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, req.(*GetServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_ListServiceLevelObjectives_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceLevelObjectivesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, req.(*ListServiceLevelObjectivesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_UpdateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, req.(*UpdateServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_DeleteServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, req.(*DeleteServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ServiceMonitoringService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.ServiceMonitoringService", + HandlerType: (*ServiceMonitoringServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateService", + Handler: _ServiceMonitoringService_CreateService_Handler, + }, + { + MethodName: "GetService", + Handler: _ServiceMonitoringService_GetService_Handler, + }, + { + MethodName: "ListServices", + Handler: _ServiceMonitoringService_ListServices_Handler, + }, + { + MethodName: "UpdateService", + Handler: _ServiceMonitoringService_UpdateService_Handler, + }, + { + MethodName: "DeleteService", + Handler: _ServiceMonitoringService_DeleteService_Handler, + }, + { + MethodName: "CreateServiceLevelObjective", + Handler: _ServiceMonitoringService_CreateServiceLevelObjective_Handler, + }, + { + MethodName: "GetServiceLevelObjective", + Handler: _ServiceMonitoringService_GetServiceLevelObjective_Handler, + }, + { + MethodName: "ListServiceLevelObjectives", + Handler: _ServiceMonitoringService_ListServiceLevelObjectives_Handler, + }, + { + MethodName: "UpdateServiceLevelObjective", + Handler: _ServiceMonitoringService_UpdateServiceLevelObjective_Handler, + }, + { + MethodName: "DeleteServiceLevelObjective", + Handler: _ServiceMonitoringService_DeleteServiceLevelObjective_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/service_service.proto", +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go new file mode 100644 index 00000000000..ab49868045d --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go @@ -0,0 +1,315 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/snooze.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A `Snooze` will prevent any alerts from being opened, and close any that +// are already open. The `Snooze` will work on alerts that match the +// criteria defined in the `Snooze`. The `Snooze` will be active from +// `interval.start_time` through `interval.end_time`. +type Snooze struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the `Snooze`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] + // + // The ID of the `Snooze` will be generated by the system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. This defines the criteria for applying the `Snooze`. See + // `Criteria` for more information. + Criteria *Snooze_Criteria `protobuf:"bytes,3,opt,name=criteria,proto3" json:"criteria,omitempty"` + // Required. The `Snooze` will be active from `interval.start_time` through + // `interval.end_time`. + // `interval.start_time` cannot be in the past. There is a 15 second clock + // skew to account for the time it takes for a request to reach the API from + // the UI. + Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` + // Required. A display name for the `Snooze`. This can be, at most, 512 + // unicode characters. + DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` +} + +func (x *Snooze) Reset() { + *x = Snooze{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Snooze) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Snooze) ProtoMessage() {} + +func (x *Snooze) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Snooze.ProtoReflect.Descriptor instead. +func (*Snooze) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0} +} + +func (x *Snooze) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Snooze) GetCriteria() *Snooze_Criteria { + if x != nil { + return x.Criteria + } + return nil +} + +func (x *Snooze) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +func (x *Snooze) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +// Criteria specific to the `AlertPolicy`s that this `Snooze` applies to. The +// `Snooze` will suppress alerts that come from one of the `AlertPolicy`s +// whose names are supplied. +type Snooze_Criteria struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The specific `AlertPolicy` names for the alert that should be snoozed. + // The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] + // + // There is a limit of 16 policies per snooze. This limit is checked during + // snooze creation. + Policies []string `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"` +} + +func (x *Snooze_Criteria) Reset() { + *x = Snooze_Criteria{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Snooze_Criteria) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Snooze_Criteria) ProtoMessage() {} + +func (x *Snooze_Criteria) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Snooze_Criteria.ProtoReflect.Descriptor instead. +func (*Snooze_Criteria) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Snooze_Criteria) GetPolicies() []string { + if x != nil { + return x.Policies + } + return nil +} + +var File_google_monitoring_v3_snooze_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_snooze_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf6, 0x02, 0x0a, 0x06, 0x53, 0x6e, 0x6f, + 0x6f, 0x7a, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x08, + 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x43, 0x72, 0x69, + 0x74, 0x65, 0x72, 0x69, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x63, 0x72, 0x69, 0x74, + 0x65, 0x72, 0x69, 0x61, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0c, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x1a, 0x52, 0x0a, 0x08, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, 0x46, 0x0a, + 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, + 0x2a, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x3a, 0x4a, 0xea, 0x41, 0x47, 0x0a, 0x20, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x23, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, + 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x7d, 0x42, 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, + 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_snooze_proto_rawDescOnce sync.Once + file_google_monitoring_v3_snooze_proto_rawDescData = file_google_monitoring_v3_snooze_proto_rawDesc +) + +func file_google_monitoring_v3_snooze_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_snooze_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_snooze_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_proto_rawDescData) + }) + return file_google_monitoring_v3_snooze_proto_rawDescData +} + +var file_google_monitoring_v3_snooze_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_monitoring_v3_snooze_proto_goTypes = []any{ + (*Snooze)(nil), // 0: google.monitoring.v3.Snooze + (*Snooze_Criteria)(nil), // 1: google.monitoring.v3.Snooze.Criteria + (*TimeInterval)(nil), // 2: google.monitoring.v3.TimeInterval +} +var file_google_monitoring_v3_snooze_proto_depIdxs = []int32{ + 1, // 0: google.monitoring.v3.Snooze.criteria:type_name -> google.monitoring.v3.Snooze.Criteria + 2, // 1: google.monitoring.v3.Snooze.interval:type_name -> google.monitoring.v3.TimeInterval + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_snooze_proto_init() } +func file_google_monitoring_v3_snooze_proto_init() { + if File_google_monitoring_v3_snooze_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_snooze_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Snooze); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Snooze_Criteria); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_snooze_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_snooze_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_snooze_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_snooze_proto_msgTypes, + }.Build() + File_google_monitoring_v3_snooze_proto = out.File + file_google_monitoring_v3_snooze_proto_rawDesc = nil + file_google_monitoring_v3_snooze_proto_goTypes = nil + file_google_monitoring_v3_snooze_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go new file mode 100644 index 00000000000..39388a99828 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go @@ -0,0 +1,867 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/snooze_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message definition for creating a `Snooze`. Users must provide the body +// of the `Snooze` to be created but must omit the `Snooze` field, `name`. +type CreateSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // a `Snooze` should be created. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. The `Snooze` to create. Omit the `name` field, as it will be + // filled in by the API. + Snooze *Snooze `protobuf:"bytes,2,opt,name=snooze,proto3" json:"snooze,omitempty"` +} + +func (x *CreateSnoozeRequest) Reset() { + *x = CreateSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSnoozeRequest) ProtoMessage() {} + +func (x *CreateSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSnoozeRequest.ProtoReflect.Descriptor instead. +func (*CreateSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateSnoozeRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateSnoozeRequest) GetSnooze() *Snooze { + if x != nil { + return x.Snooze + } + return nil +} + +// The message definition for listing `Snooze`s associated with the given +// `parent`, satisfying the optional `filter`. +type ListSnoozesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // `Snooze`s should be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional filter to restrict results to the given criteria. The + // following fields are supported. + // + // - `interval.start_time` + // - `interval.end_time` + // + // For example: + // + // ``` + // interval.start_time > "2022-03-11T00:00:00-08:00" AND + // interval.end_time < "2022-03-12T00:00:00-08:00" + // ``` + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. The maximum number of results to return for a single query. The + // server may further constrain the maximum number of results returned in a + // single page. The value should be in the range [1, 1000]. If the value given + // is outside this range, the server will decide the number of results to be + // returned. + PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. The `next_page_token` from a previous call to + // `ListSnoozesRequest` to get the next page of results. + PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListSnoozesRequest) Reset() { + *x = ListSnoozesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSnoozesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnoozesRequest) ProtoMessage() {} + +func (x *ListSnoozesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnoozesRequest.ProtoReflect.Descriptor instead. +func (*ListSnoozesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListSnoozesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListSnoozesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListSnoozesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListSnoozesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The results of a successful `ListSnoozes` call, containing the matching +// `Snooze`s. +type ListSnoozesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // `Snooze`s matching this list call. + Snoozes []*Snooze `protobuf:"bytes,1,rep,name=snoozes,proto3" json:"snoozes,omitempty"` + // Page token for repeated calls to `ListSnoozes`, to fetch additional pages + // of results. If this is empty or missing, there are no more pages. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListSnoozesResponse) Reset() { + *x = ListSnoozesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSnoozesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnoozesResponse) ProtoMessage() {} + +func (x *ListSnoozesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnoozesResponse.ProtoReflect.Descriptor instead. +func (*ListSnoozesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListSnoozesResponse) GetSnoozes() []*Snooze { + if x != nil { + return x.Snoozes + } + return nil +} + +func (x *ListSnoozesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The message definition for retrieving a `Snooze`. Users must specify the +// field, `name`, which identifies the `Snooze`. +type GetSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The ID of the `Snooze` to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetSnoozeRequest) Reset() { + *x = GetSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSnoozeRequest) ProtoMessage() {} + +func (x *GetSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSnoozeRequest.ProtoReflect.Descriptor instead. +func (*GetSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{3} +} + +func (x *GetSnoozeRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The message definition for updating a `Snooze`. The field, `snooze.name` +// identifies the `Snooze` to be updated. The remainder of `snooze` gives the +// content the `Snooze` in question will be assigned. +// +// What fields can be updated depends on the start time and end time of the +// `Snooze`. +// +// - end time is in the past: These `Snooze`s are considered +// read-only and cannot be updated. +// - start time is in the past and end time is in the future: `display_name` +// and `interval.end_time` can be updated. +// - start time is in the future: `display_name`, `interval.start_time` and +// `interval.end_time` can be updated. +type UpdateSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The `Snooze` to update. Must have the name field present. + Snooze *Snooze `protobuf:"bytes,1,opt,name=snooze,proto3" json:"snooze,omitempty"` + // Required. The fields to update. + // + // For each field listed in `update_mask`: + // + // - If the `Snooze` object supplied in the `UpdateSnoozeRequest` has a + // value for that field, the value of the field in the existing `Snooze` + // will be set to the value of the field in the supplied `Snooze`. + // - If the field does not have a value in the supplied `Snooze`, the field + // in the existing `Snooze` is set to its default value. + // + // Fields not listed retain their existing value. + // + // The following are the field names that are accepted in `update_mask`: + // + // - `display_name` + // - `interval.start_time` + // - `interval.end_time` + // + // That said, the start time and end time of the `Snooze` determines which + // fields can legally be updated. Before attempting an update, users should + // consult the documentation for `UpdateSnoozeRequest`, which talks about + // which fields can be updated. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateSnoozeRequest) Reset() { + *x = UpdateSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSnoozeRequest) ProtoMessage() {} + +func (x *UpdateSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSnoozeRequest.ProtoReflect.Descriptor instead. +func (*UpdateSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateSnoozeRequest) GetSnooze() *Snooze { + if x != nil { + return x.Snooze + } + return nil +} + +func (x *UpdateSnoozeRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +var File_google_monitoring_v3_snooze_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_snooze_service_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0xb9, + 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, + 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x75, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x36, 0x0a, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x52, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x0a, 0x20, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, + 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x32, 0x98, 0x06, 0x0a, 0x0d, 0x53, 0x6e, 0x6f, + 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x3f, 0xda, 0x41, 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, + 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x06, 0x73, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x94, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0xda, 0x41, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x81, 0x01, 0x0a, + 0x09, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x22, 0x2e, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, + 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0x12, 0xa4, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x4b, 0xda, 0x41, 0x12, 0x73, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x3a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x32, + 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f, + 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x42, 0x12, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, + 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_snooze_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_snooze_service_proto_rawDescData = file_google_monitoring_v3_snooze_service_proto_rawDesc +) + +func file_google_monitoring_v3_snooze_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_snooze_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_snooze_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_service_proto_rawDescData) + }) + return file_google_monitoring_v3_snooze_service_proto_rawDescData +} + +var file_google_monitoring_v3_snooze_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_monitoring_v3_snooze_service_proto_goTypes = []any{ + (*CreateSnoozeRequest)(nil), // 0: google.monitoring.v3.CreateSnoozeRequest + (*ListSnoozesRequest)(nil), // 1: google.monitoring.v3.ListSnoozesRequest + (*ListSnoozesResponse)(nil), // 2: google.monitoring.v3.ListSnoozesResponse + (*GetSnoozeRequest)(nil), // 3: google.monitoring.v3.GetSnoozeRequest + (*UpdateSnoozeRequest)(nil), // 4: google.monitoring.v3.UpdateSnoozeRequest + (*Snooze)(nil), // 5: google.monitoring.v3.Snooze + (*fieldmaskpb.FieldMask)(nil), // 6: google.protobuf.FieldMask +} +var file_google_monitoring_v3_snooze_service_proto_depIdxs = []int32{ + 5, // 0: google.monitoring.v3.CreateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze + 5, // 1: google.monitoring.v3.ListSnoozesResponse.snoozes:type_name -> google.monitoring.v3.Snooze + 5, // 2: google.monitoring.v3.UpdateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze + 6, // 3: google.monitoring.v3.UpdateSnoozeRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 4: google.monitoring.v3.SnoozeService.CreateSnooze:input_type -> google.monitoring.v3.CreateSnoozeRequest + 1, // 5: google.monitoring.v3.SnoozeService.ListSnoozes:input_type -> google.monitoring.v3.ListSnoozesRequest + 3, // 6: google.monitoring.v3.SnoozeService.GetSnooze:input_type -> google.monitoring.v3.GetSnoozeRequest + 4, // 7: google.monitoring.v3.SnoozeService.UpdateSnooze:input_type -> google.monitoring.v3.UpdateSnoozeRequest + 5, // 8: google.monitoring.v3.SnoozeService.CreateSnooze:output_type -> google.monitoring.v3.Snooze + 2, // 9: google.monitoring.v3.SnoozeService.ListSnoozes:output_type -> google.monitoring.v3.ListSnoozesResponse + 5, // 10: google.monitoring.v3.SnoozeService.GetSnooze:output_type -> google.monitoring.v3.Snooze + 5, // 11: google.monitoring.v3.SnoozeService.UpdateSnooze:output_type -> google.monitoring.v3.Snooze + 8, // [8:12] is the sub-list for method output_type + 4, // [4:8] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_snooze_service_proto_init() } +func file_google_monitoring_v3_snooze_service_proto_init() { + if File_google_monitoring_v3_snooze_service_proto != nil { + return + } + file_google_monitoring_v3_snooze_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_snooze_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CreateSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListSnoozesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListSnoozesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*GetSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_snooze_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_snooze_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_snooze_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_snooze_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_snooze_service_proto = out.File + file_google_monitoring_v3_snooze_service_proto_rawDesc = nil + file_google_monitoring_v3_snooze_service_proto_goTypes = nil + file_google_monitoring_v3_snooze_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// SnoozeServiceClient is the client API for SnoozeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SnoozeServiceClient interface { + // Creates a `Snooze` that will prevent alerts, which match the provided + // criteria, from being opened. The `Snooze` applies for a specific time + // interval. + CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) + // Lists the `Snooze`s associated with a project. Can optionally pass in + // `filter`, which specifies predicates to match `Snooze`s. + ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error) + // Retrieves a `Snooze` by `name`. + GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) + // Updates a `Snooze`, identified by its `name`, with the parameters in the + // given `Snooze` object. + UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) +} + +type snoozeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSnoozeServiceClient(cc grpc.ClientConnInterface) SnoozeServiceClient { + return &snoozeServiceClient{cc} +} + +func (c *snoozeServiceClient) CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/CreateSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error) { + out := new(ListSnoozesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/ListSnoozes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/GetSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/UpdateSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SnoozeServiceServer is the server API for SnoozeService service. +type SnoozeServiceServer interface { + // Creates a `Snooze` that will prevent alerts, which match the provided + // criteria, from being opened. The `Snooze` applies for a specific time + // interval. + CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error) + // Lists the `Snooze`s associated with a project. Can optionally pass in + // `filter`, which specifies predicates to match `Snooze`s. + ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error) + // Retrieves a `Snooze` by `name`. + GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error) + // Updates a `Snooze`, identified by its `name`, with the parameters in the + // given `Snooze` object. + UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error) +} + +// UnimplementedSnoozeServiceServer can be embedded to have forward compatible implementations. +type UnimplementedSnoozeServiceServer struct { +} + +func (*UnimplementedSnoozeServiceServer) CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnooze not implemented") +} +func (*UnimplementedSnoozeServiceServer) ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnoozes not implemented") +} +func (*UnimplementedSnoozeServiceServer) GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSnooze not implemented") +} +func (*UnimplementedSnoozeServiceServer) UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateSnooze not implemented") +} + +func RegisterSnoozeServiceServer(s *grpc.Server, srv SnoozeServiceServer) { + s.RegisterService(&_SnoozeService_serviceDesc, srv) +} + +func _SnoozeService_CreateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).CreateSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/CreateSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).CreateSnooze(ctx, req.(*CreateSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_ListSnoozes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnoozesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).ListSnoozes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/ListSnoozes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).ListSnoozes(ctx, req.(*ListSnoozesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_GetSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).GetSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/GetSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).GetSnooze(ctx, req.(*GetSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_UpdateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).UpdateSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/UpdateSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).UpdateSnooze(ctx, req.(*UpdateSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SnoozeService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.SnoozeService", + HandlerType: (*SnoozeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSnooze", + Handler: _SnoozeService_CreateSnooze_Handler, + }, + { + MethodName: "ListSnoozes", + Handler: _SnoozeService_ListSnoozes_Handler, + }, + { + MethodName: "GetSnooze", + Handler: _SnoozeService_GetSnooze_Handler, + }, + { + MethodName: "UpdateSnooze", + Handler: _SnoozeService_UpdateSnooze_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/snooze_service.proto", +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go new file mode 100644 index 00000000000..5a55ecc6650 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go @@ -0,0 +1,188 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/span_context.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The context of a span. This is attached to an +// [Exemplar][google.api.Distribution.Exemplar] +// in [Distribution][google.api.Distribution] values during aggregation. +// +// It contains the name of a span with format: +// +// projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] +type SpanContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the span. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] + // + // `[TRACE_ID]` is a unique identifier for a trace within a project; + // it is a 32-character hexadecimal encoding of a 16-byte array. + // + // `[SPAN_ID]` is a unique identifier for a span within a trace; it + // is a 16-character hexadecimal encoding of an 8-byte array. + SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"` +} + +func (x *SpanContext) Reset() { + *x = SpanContext{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SpanContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpanContext) ProtoMessage() {} + +func (x *SpanContext) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpanContext.ProtoReflect.Descriptor instead. +func (*SpanContext) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_span_context_proto_rawDescGZIP(), []int{0} +} + +func (x *SpanContext) GetSpanName() string { + if x != nil { + return x.SpanName + } + return "" +} + +var File_google_monitoring_v3_span_context_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_span_context_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x22, + 0x2a, 0x0a, 0x0b, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x73, 0x70, 0x61, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0xcb, 0x01, 0x0a, 0x18, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, + 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, + 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_monitoring_v3_span_context_proto_rawDescOnce sync.Once + file_google_monitoring_v3_span_context_proto_rawDescData = file_google_monitoring_v3_span_context_proto_rawDesc +) + +func file_google_monitoring_v3_span_context_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_span_context_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_span_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_span_context_proto_rawDescData) + }) + return file_google_monitoring_v3_span_context_proto_rawDescData +} + +var file_google_monitoring_v3_span_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_monitoring_v3_span_context_proto_goTypes = []any{ + (*SpanContext)(nil), // 0: google.monitoring.v3.SpanContext +} +var file_google_monitoring_v3_span_context_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_span_context_proto_init() } +func file_google_monitoring_v3_span_context_proto_init() { + if File_google_monitoring_v3_span_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_span_context_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*SpanContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_span_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_span_context_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_span_context_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_span_context_proto_msgTypes, + }.Build() + File_google_monitoring_v3_span_context_proto = out.File + file_google_monitoring_v3_span_context_proto_rawDesc = nil + file_google_monitoring_v3_span_context_proto_goTypes = nil + file_google_monitoring_v3_span_context_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go new file mode 100644 index 00000000000..e0b9e4a385a --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go @@ -0,0 +1,2726 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/uptime.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The regions from which an Uptime check can be run. +type UptimeCheckRegion int32 + +const ( + // Default value if no region is specified. Will result in Uptime checks + // running from all regions. + UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0 + // Allows checks to run from locations within the United States of America. + UptimeCheckRegion_USA UptimeCheckRegion = 1 + // Allows checks to run from locations within the continent of Europe. + UptimeCheckRegion_EUROPE UptimeCheckRegion = 2 + // Allows checks to run from locations within the continent of South + // America. + UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3 + // Allows checks to run from locations within the Asia Pacific area (ex: + // Singapore). + UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4 + // Allows checks to run from locations within the western United States of + // America + UptimeCheckRegion_USA_OREGON UptimeCheckRegion = 5 + // Allows checks to run from locations within the central United States of + // America + UptimeCheckRegion_USA_IOWA UptimeCheckRegion = 6 + // Allows checks to run from locations within the eastern United States of + // America + UptimeCheckRegion_USA_VIRGINIA UptimeCheckRegion = 7 +) + +// Enum value maps for UptimeCheckRegion. +var ( + UptimeCheckRegion_name = map[int32]string{ + 0: "REGION_UNSPECIFIED", + 1: "USA", + 2: "EUROPE", + 3: "SOUTH_AMERICA", + 4: "ASIA_PACIFIC", + 5: "USA_OREGON", + 6: "USA_IOWA", + 7: "USA_VIRGINIA", + } + UptimeCheckRegion_value = map[string]int32{ + "REGION_UNSPECIFIED": 0, + "USA": 1, + "EUROPE": 2, + "SOUTH_AMERICA": 3, + "ASIA_PACIFIC": 4, + "USA_OREGON": 5, + "USA_IOWA": 6, + "USA_VIRGINIA": 7, + } +) + +func (x UptimeCheckRegion) Enum() *UptimeCheckRegion { + p := new(UptimeCheckRegion) + *p = x + return p +} + +func (x UptimeCheckRegion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckRegion) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[0].Descriptor() +} + +func (UptimeCheckRegion) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[0] +} + +func (x UptimeCheckRegion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckRegion.Descriptor instead. +func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0} +} + +// The supported resource types that can be used as values of +// `group_resource.resource_type`. +// `INSTANCE` includes `gce_instance` and `aws_ec2_instance` resource types. +// The resource types `gae_app` and `uptime_url` are not valid here because +// group checks on App Engine modules and URLs are not allowed. +type GroupResourceType int32 + +const ( + // Default value (not valid). + GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0 + // A group of instances from Google Cloud Platform (GCP) or + // Amazon Web Services (AWS). + GroupResourceType_INSTANCE GroupResourceType = 1 + // A group of Amazon ELB load balancers. + GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2 +) + +// Enum value maps for GroupResourceType. +var ( + GroupResourceType_name = map[int32]string{ + 0: "RESOURCE_TYPE_UNSPECIFIED", + 1: "INSTANCE", + 2: "AWS_ELB_LOAD_BALANCER", + } + GroupResourceType_value = map[string]int32{ + "RESOURCE_TYPE_UNSPECIFIED": 0, + "INSTANCE": 1, + "AWS_ELB_LOAD_BALANCER": 2, + } +) + +func (x GroupResourceType) Enum() *GroupResourceType { + p := new(GroupResourceType) + *p = x + return p +} + +func (x GroupResourceType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GroupResourceType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[1].Descriptor() +} + +func (GroupResourceType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[1] +} + +func (x GroupResourceType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GroupResourceType.Descriptor instead. +func (GroupResourceType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} +} + +// Operational states for an internal checker. +type InternalChecker_State int32 + +const ( + // An internal checker should never be in the unspecified state. + InternalChecker_UNSPECIFIED InternalChecker_State = 0 + // The checker is being created, provisioned, and configured. A checker in + // this state can be returned by `ListInternalCheckers` or + // `GetInternalChecker`, as well as by examining the [long running + // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations) + // that created it. + InternalChecker_CREATING InternalChecker_State = 1 + // The checker is running and available for use. A checker in this state + // can be returned by `ListInternalCheckers` or `GetInternalChecker` as + // well as by examining the [long running + // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations) + // that created it. + // If a checker is being torn down, it is neither visible nor usable, so + // there is no "deleting" or "down" state. + InternalChecker_RUNNING InternalChecker_State = 2 +) + +// Enum value maps for InternalChecker_State. +var ( + InternalChecker_State_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "CREATING", + 2: "RUNNING", + } + InternalChecker_State_value = map[string]int32{ + "UNSPECIFIED": 0, + "CREATING": 1, + "RUNNING": 2, + } +) + +func (x InternalChecker_State) Enum() *InternalChecker_State { + p := new(InternalChecker_State) + *p = x + return p +} + +func (x InternalChecker_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (InternalChecker_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[2].Descriptor() +} + +func (InternalChecker_State) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[2] +} + +func (x InternalChecker_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use InternalChecker_State.Descriptor instead. +func (InternalChecker_State) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0, 0} +} + +// What kind of checkers are available to be used by the check. +type UptimeCheckConfig_CheckerType int32 + +const ( + // The default checker type. Currently converted to `STATIC_IP_CHECKERS` + // on creation, the default conversion behavior may change in the future. + UptimeCheckConfig_CHECKER_TYPE_UNSPECIFIED UptimeCheckConfig_CheckerType = 0 + // `STATIC_IP_CHECKERS` are used for uptime checks that perform egress + // across the public internet. `STATIC_IP_CHECKERS` use the static IP + // addresses returned by `ListUptimeCheckIps`. + UptimeCheckConfig_STATIC_IP_CHECKERS UptimeCheckConfig_CheckerType = 1 + // `VPC_CHECKERS` are used for uptime checks that perform egress using + // Service Directory and private network access. When using `VPC_CHECKERS`, + // the monitored resource type must be `servicedirectory_service`. + UptimeCheckConfig_VPC_CHECKERS UptimeCheckConfig_CheckerType = 3 +) + +// Enum value maps for UptimeCheckConfig_CheckerType. +var ( + UptimeCheckConfig_CheckerType_name = map[int32]string{ + 0: "CHECKER_TYPE_UNSPECIFIED", + 1: "STATIC_IP_CHECKERS", + 3: "VPC_CHECKERS", + } + UptimeCheckConfig_CheckerType_value = map[string]int32{ + "CHECKER_TYPE_UNSPECIFIED": 0, + "STATIC_IP_CHECKERS": 1, + "VPC_CHECKERS": 3, + } +) + +func (x UptimeCheckConfig_CheckerType) Enum() *UptimeCheckConfig_CheckerType { + p := new(UptimeCheckConfig_CheckerType) + *p = x + return p +} + +func (x UptimeCheckConfig_CheckerType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_CheckerType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[3].Descriptor() +} + +func (UptimeCheckConfig_CheckerType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[3] +} + +func (x UptimeCheckConfig_CheckerType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_CheckerType.Descriptor instead. +func (UptimeCheckConfig_CheckerType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0} +} + +// The HTTP request method options. +type UptimeCheckConfig_HttpCheck_RequestMethod int32 + +const ( + // No request method specified. + UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED UptimeCheckConfig_HttpCheck_RequestMethod = 0 + // GET request. + UptimeCheckConfig_HttpCheck_GET UptimeCheckConfig_HttpCheck_RequestMethod = 1 + // POST request. + UptimeCheckConfig_HttpCheck_POST UptimeCheckConfig_HttpCheck_RequestMethod = 2 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_RequestMethod. +var ( + UptimeCheckConfig_HttpCheck_RequestMethod_name = map[int32]string{ + 0: "METHOD_UNSPECIFIED", + 1: "GET", + 2: "POST", + } + UptimeCheckConfig_HttpCheck_RequestMethod_value = map[string]int32{ + "METHOD_UNSPECIFIED": 0, + "GET": 1, + "POST": 2, + } +) + +func (x UptimeCheckConfig_HttpCheck_RequestMethod) Enum() *UptimeCheckConfig_HttpCheck_RequestMethod { + p := new(UptimeCheckConfig_HttpCheck_RequestMethod) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_RequestMethod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_RequestMethod) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[4].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_RequestMethod) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[4] +} + +func (x UptimeCheckConfig_HttpCheck_RequestMethod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_RequestMethod.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_RequestMethod) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0} +} + +// Header options corresponding to the content type of a HTTP request body. +type UptimeCheckConfig_HttpCheck_ContentType int32 + +const ( + // No content type specified. + UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ContentType = 0 + // `body` is in URL-encoded form. Equivalent to setting the `Content-Type` + // to `application/x-www-form-urlencoded` in the HTTP request. + UptimeCheckConfig_HttpCheck_URL_ENCODED UptimeCheckConfig_HttpCheck_ContentType = 1 + // `body` is in `custom_content_type` form. Equivalent to setting the + // `Content-Type` to the contents of `custom_content_type` in the HTTP + // request. + UptimeCheckConfig_HttpCheck_USER_PROVIDED UptimeCheckConfig_HttpCheck_ContentType = 2 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_ContentType. +var ( + UptimeCheckConfig_HttpCheck_ContentType_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "URL_ENCODED", + 2: "USER_PROVIDED", + } + UptimeCheckConfig_HttpCheck_ContentType_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "URL_ENCODED": 1, + "USER_PROVIDED": 2, + } +) + +func (x UptimeCheckConfig_HttpCheck_ContentType) Enum() *UptimeCheckConfig_HttpCheck_ContentType { + p := new(UptimeCheckConfig_HttpCheck_ContentType) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_ContentType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_ContentType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[5].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_ContentType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[5] +} + +func (x UptimeCheckConfig_HttpCheck_ContentType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ContentType.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_ContentType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1} +} + +// An HTTP status code class. +type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass int32 + +const ( + // Default value that matches no status codes. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_UNSPECIFIED UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 0 + // The class of status codes between 100 and 199. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_1XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 100 + // The class of status codes between 200 and 299. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_2XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 200 + // The class of status codes between 300 and 399. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_3XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 300 + // The class of status codes between 400 and 499. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_4XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 400 + // The class of status codes between 500 and 599. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_5XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 500 + // The class of all status codes. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_ANY UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 1000 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass. +var ( + UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_name = map[int32]string{ + 0: "STATUS_CLASS_UNSPECIFIED", + 100: "STATUS_CLASS_1XX", + 200: "STATUS_CLASS_2XX", + 300: "STATUS_CLASS_3XX", + 400: "STATUS_CLASS_4XX", + 500: "STATUS_CLASS_5XX", + 1000: "STATUS_CLASS_ANY", + } + UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_value = map[string]int32{ + "STATUS_CLASS_UNSPECIFIED": 0, + "STATUS_CLASS_1XX": 100, + "STATUS_CLASS_2XX": 200, + "STATUS_CLASS_3XX": 300, + "STATUS_CLASS_4XX": 400, + "STATUS_CLASS_5XX": 500, + "STATUS_CLASS_ANY": 1000, + } +) + +func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Enum() *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass { + p := new(UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[6].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[6] +} + +func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1, 0} +} + +// Type of authentication. +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType int32 + +const ( + // Default value, will result in OIDC Authentication. + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 0 + // OIDC Authentication + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_OIDC_TOKEN UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 1 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType. +var ( + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_name = map[int32]string{ + 0: "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED", + 1: "OIDC_TOKEN", + } + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_value = map[string]int32{ + "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED": 0, + "OIDC_TOKEN": 1, + } +) + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Enum() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType { + p := new(UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[7].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[7] +} + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2, 0} +} + +// Options to perform content matching. +type UptimeCheckConfig_ContentMatcher_ContentMatcherOption int32 + +const ( + // No content matcher type specified (maintained for backward + // compatibility, but deprecated for future use). + // Treated as `CONTAINS_STRING`. + UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 0 + // Selects substring matching. The match succeeds if the output contains + // the `content` string. This is the default value for checks without + // a `matcher` option, or where the value of `matcher` is + // `CONTENT_MATCHER_OPTION_UNSPECIFIED`. + UptimeCheckConfig_ContentMatcher_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 1 + // Selects negation of substring matching. The match succeeds if the + // output does _NOT_ contain the `content` string. + UptimeCheckConfig_ContentMatcher_NOT_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 2 + // Selects regular-expression matching. The match succeeds if the output + // matches the regular expression specified in the `content` string. + // Regex matching is only supported for HTTP/HTTPS checks. + UptimeCheckConfig_ContentMatcher_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 3 + // Selects negation of regular-expression matching. The match succeeds if + // the output does _NOT_ match the regular expression specified in the + // `content` string. Regex matching is only supported for HTTP/HTTPS + // checks. + UptimeCheckConfig_ContentMatcher_NOT_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 4 + // Selects JSONPath matching. See `JsonPathMatcher` for details on when + // the match succeeds. JSONPath matching is only supported for HTTP/HTTPS + // checks. + UptimeCheckConfig_ContentMatcher_MATCHES_JSON_PATH UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 5 + // Selects JSONPath matching. See `JsonPathMatcher` for details on when + // the match succeeds. Succeeds when output does _NOT_ match as specified. + // JSONPath is only supported for HTTP/HTTPS checks. + UptimeCheckConfig_ContentMatcher_NOT_MATCHES_JSON_PATH UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 6 +) + +// Enum value maps for UptimeCheckConfig_ContentMatcher_ContentMatcherOption. +var ( + UptimeCheckConfig_ContentMatcher_ContentMatcherOption_name = map[int32]string{ + 0: "CONTENT_MATCHER_OPTION_UNSPECIFIED", + 1: "CONTAINS_STRING", + 2: "NOT_CONTAINS_STRING", + 3: "MATCHES_REGEX", + 4: "NOT_MATCHES_REGEX", + 5: "MATCHES_JSON_PATH", + 6: "NOT_MATCHES_JSON_PATH", + } + UptimeCheckConfig_ContentMatcher_ContentMatcherOption_value = map[string]int32{ + "CONTENT_MATCHER_OPTION_UNSPECIFIED": 0, + "CONTAINS_STRING": 1, + "NOT_CONTAINS_STRING": 2, + "MATCHES_REGEX": 3, + "NOT_MATCHES_REGEX": 4, + "MATCHES_JSON_PATH": 5, + "NOT_MATCHES_JSON_PATH": 6, + } +) + +func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Enum() *UptimeCheckConfig_ContentMatcher_ContentMatcherOption { + p := new(UptimeCheckConfig_ContentMatcher_ContentMatcherOption) + *p = x + return p +} + +func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[8].Descriptor() +} + +func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[8] +} + +func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher_ContentMatcherOption.Descriptor instead. +func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0} +} + +// Options to perform JSONPath content matching. +type UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption int32 + +const ( + // No JSONPath matcher type specified (not valid). + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JSON_PATH_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 0 + // Selects 'exact string' matching. The match succeeds if the content at + // the `json_path` within the output is exactly the same as the + // `content` string. + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_EXACT_MATCH UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 1 + // Selects regular-expression matching. The match succeeds if the + // content at the `json_path` within the output matches the regular + // expression specified in the `content` string. + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_REGEX_MATCH UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 2 +) + +// Enum value maps for UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption. +var ( + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption_name = map[int32]string{ + 0: "JSON_PATH_MATCHER_OPTION_UNSPECIFIED", + 1: "EXACT_MATCH", + 2: "REGEX_MATCH", + } + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption_value = map[string]int32{ + "JSON_PATH_MATCHER_OPTION_UNSPECIFIED": 0, + "EXACT_MATCH": 1, + "REGEX_MATCH": 2, + } +) + +func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Enum() *UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption { + p := new(UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) + *p = x + return p +} + +func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[9].Descriptor() +} + +func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[9] +} + +func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption.Descriptor instead. +func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0, 0} +} + +// An internal checker allows Uptime checks to run on private/internal GCP +// resources. +// +// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. +type InternalChecker struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique resource name for this InternalChecker. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID] + // + // `[PROJECT_ID_OR_NUMBER]` is the Cloud Monitoring Metrics Scope project for + // the Uptime check config associated with the internal checker. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The checker's human-readable name. The display name + // should be unique within a Cloud Monitoring Metrics Scope in order to make + // it easier to identify; however, uniqueness is not enforced. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The [GCP VPC network](https://cloud.google.com/vpc/docs/vpc) where the + // internal resource lives (ex: "default"). + Network string `protobuf:"bytes,3,opt,name=network,proto3" json:"network,omitempty"` + // The GCP zone the Uptime check should egress from. Only respected for + // internal Uptime checks, where internal_network is specified. + GcpZone string `protobuf:"bytes,4,opt,name=gcp_zone,json=gcpZone,proto3" json:"gcp_zone,omitempty"` + // The GCP project ID where the internal checker lives. Not necessary + // the same as the Metrics Scope project. + PeerProjectId string `protobuf:"bytes,6,opt,name=peer_project_id,json=peerProjectId,proto3" json:"peer_project_id,omitempty"` + // The current operational state of the internal checker. + State InternalChecker_State `protobuf:"varint,7,opt,name=state,proto3,enum=google.monitoring.v3.InternalChecker_State" json:"state,omitempty"` +} + +func (x *InternalChecker) Reset() { + *x = InternalChecker{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InternalChecker) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InternalChecker) ProtoMessage() {} + +func (x *InternalChecker) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InternalChecker.ProtoReflect.Descriptor instead. +func (*InternalChecker) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0} +} + +func (x *InternalChecker) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *InternalChecker) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *InternalChecker) GetNetwork() string { + if x != nil { + return x.Network + } + return "" +} + +func (x *InternalChecker) GetGcpZone() string { + if x != nil { + return x.GcpZone + } + return "" +} + +func (x *InternalChecker) GetPeerProjectId() string { + if x != nil { + return x.PeerProjectId + } + return "" +} + +func (x *InternalChecker) GetState() InternalChecker_State { + if x != nil { + return x.State + } + return InternalChecker_UNSPECIFIED +} + +// Describes a Synthetic Monitor to be invoked by Uptime. +type SyntheticMonitorTarget struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies a Synthetic Monitor's execution stack. + // + // Types that are assignable to Target: + // + // *SyntheticMonitorTarget_CloudFunctionV2 + Target isSyntheticMonitorTarget_Target `protobuf_oneof:"target"` +} + +func (x *SyntheticMonitorTarget) Reset() { + *x = SyntheticMonitorTarget{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyntheticMonitorTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyntheticMonitorTarget) ProtoMessage() {} + +func (x *SyntheticMonitorTarget) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyntheticMonitorTarget.ProtoReflect.Descriptor instead. +func (*SyntheticMonitorTarget) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} +} + +func (m *SyntheticMonitorTarget) GetTarget() isSyntheticMonitorTarget_Target { + if m != nil { + return m.Target + } + return nil +} + +func (x *SyntheticMonitorTarget) GetCloudFunctionV2() *SyntheticMonitorTarget_CloudFunctionV2Target { + if x, ok := x.GetTarget().(*SyntheticMonitorTarget_CloudFunctionV2); ok { + return x.CloudFunctionV2 + } + return nil +} + +type isSyntheticMonitorTarget_Target interface { + isSyntheticMonitorTarget_Target() +} + +type SyntheticMonitorTarget_CloudFunctionV2 struct { + // Target a Synthetic Monitor GCFv2 instance. + CloudFunctionV2 *SyntheticMonitorTarget_CloudFunctionV2Target `protobuf:"bytes,1,opt,name=cloud_function_v2,json=cloudFunctionV2,proto3,oneof"` +} + +func (*SyntheticMonitorTarget_CloudFunctionV2) isSyntheticMonitorTarget_Target() {} + +// This message configures which resources and services to monitor for +// availability. +type UptimeCheckConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier. A unique resource name for this Uptime check configuration. The + // format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + // + // `[PROJECT_ID_OR_NUMBER]` is the Workspace host project associated with the + // Uptime check. + // + // This field should be omitted when creating the Uptime check configuration; + // on create, the resource name is assigned by the server and included in the + // response. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A human-friendly name for the Uptime check configuration. The display name + // should be unique within a Cloud Monitoring Workspace in order to make it + // easier to identify; however, uniqueness is not enforced. Required. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The resource the check is checking. Required. + // + // Types that are assignable to Resource: + // + // *UptimeCheckConfig_MonitoredResource + // *UptimeCheckConfig_ResourceGroup_ + // *UptimeCheckConfig_SyntheticMonitor + Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"` + // The type of Uptime check request. + // + // Types that are assignable to CheckRequestType: + // + // *UptimeCheckConfig_HttpCheck_ + // *UptimeCheckConfig_TcpCheck_ + CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"` + // How often, in seconds, the Uptime check is performed. + // Currently, the only supported values are `60s` (1 minute), `300s` + // (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional, + // defaults to `60s`. + Period *durationpb.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"` + // The maximum amount of time to wait for the request to complete (must be + // between 1 and 60 seconds). Required. + Timeout *durationpb.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + // The content that is expected to appear in the data returned by the target + // server against which the check is run. Currently, only the first entry + // in the `content_matchers` list is supported, and additional entries will + // be ignored. This field is optional and should only be specified if a + // content match is required as part of the/ Uptime check. + ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers,proto3" json:"content_matchers,omitempty"` + // The type of checkers to use to execute the Uptime check. + CheckerType UptimeCheckConfig_CheckerType `protobuf:"varint,17,opt,name=checker_type,json=checkerType,proto3,enum=google.monitoring.v3.UptimeCheckConfig_CheckerType" json:"checker_type,omitempty"` + // The list of regions from which the check will be run. + // Some regions contain one location, and others contain more than one. + // If this field is specified, enough regions must be provided to include a + // minimum of 3 locations. Not specifying this field will result in Uptime + // checks running from all available regions. + SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"` + // If this is `true`, then checks are made only from the 'internal_checkers'. + // If it is `false`, then checks are made only from the 'selected_regions'. + // It is an error to provide 'selected_regions' when is_internal is `true`, + // or to provide 'internal_checkers' when is_internal is `false`. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. + IsInternal bool `protobuf:"varint,15,opt,name=is_internal,json=isInternal,proto3" json:"is_internal,omitempty"` + // The internal checkers that this check will egress from. If `is_internal` is + // `true` and this list is empty, the check will egress from all the + // InternalCheckers configured for the project that owns this + // `UptimeCheckConfig`. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. + InternalCheckers []*InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers,proto3" json:"internal_checkers,omitempty"` + // User-supplied key/value data to be used for organizing and + // identifying the `UptimeCheckConfig` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,20,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *UptimeCheckConfig) Reset() { + *x = UptimeCheckConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig) ProtoMessage() {} + +func (x *UptimeCheckConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2} +} + +func (x *UptimeCheckConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UptimeCheckConfig) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (x *UptimeCheckConfig) GetMonitoredResource() *monitoredres.MonitoredResource { + if x, ok := x.GetResource().(*UptimeCheckConfig_MonitoredResource); ok { + return x.MonitoredResource + } + return nil +} + +func (x *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup { + if x, ok := x.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok { + return x.ResourceGroup + } + return nil +} + +func (x *UptimeCheckConfig) GetSyntheticMonitor() *SyntheticMonitorTarget { + if x, ok := x.GetResource().(*UptimeCheckConfig_SyntheticMonitor); ok { + return x.SyntheticMonitor + } + return nil +} + +func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType { + if m != nil { + return m.CheckRequestType + } + return nil +} + +func (x *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck { + if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok { + return x.HttpCheck + } + return nil +} + +func (x *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck { + if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok { + return x.TcpCheck + } + return nil +} + +func (x *UptimeCheckConfig) GetPeriod() *durationpb.Duration { + if x != nil { + return x.Period + } + return nil +} + +func (x *UptimeCheckConfig) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher { + if x != nil { + return x.ContentMatchers + } + return nil +} + +func (x *UptimeCheckConfig) GetCheckerType() UptimeCheckConfig_CheckerType { + if x != nil { + return x.CheckerType + } + return UptimeCheckConfig_CHECKER_TYPE_UNSPECIFIED +} + +func (x *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion { + if x != nil { + return x.SelectedRegions + } + return nil +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. +func (x *UptimeCheckConfig) GetIsInternal() bool { + if x != nil { + return x.IsInternal + } + return false +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. +func (x *UptimeCheckConfig) GetInternalCheckers() []*InternalChecker { + if x != nil { + return x.InternalCheckers + } + return nil +} + +func (x *UptimeCheckConfig) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +type isUptimeCheckConfig_Resource interface { + isUptimeCheckConfig_Resource() +} + +type UptimeCheckConfig_MonitoredResource struct { + // The [monitored + // resource](https://cloud.google.com/monitoring/api/resources) associated + // with the configuration. + // The following monitored resource types are valid for this field: + // + // `uptime_url`, + // `gce_instance`, + // `gae_app`, + // `aws_ec2_instance`, + // `aws_elb_load_balancer` + // `k8s_service` + // `servicedirectory_service` + // `cloud_run_revision` + MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"` +} + +type UptimeCheckConfig_ResourceGroup_ struct { + // The group resource associated with the configuration. + ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"` +} + +type UptimeCheckConfig_SyntheticMonitor struct { + // Specifies a Synthetic Monitor to invoke. + SyntheticMonitor *SyntheticMonitorTarget `protobuf:"bytes,21,opt,name=synthetic_monitor,json=syntheticMonitor,proto3,oneof"` +} + +func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {} + +func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {} + +func (*UptimeCheckConfig_SyntheticMonitor) isUptimeCheckConfig_Resource() {} + +type isUptimeCheckConfig_CheckRequestType interface { + isUptimeCheckConfig_CheckRequestType() +} + +type UptimeCheckConfig_HttpCheck_ struct { + // Contains information needed to make an HTTP or HTTPS check. + HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,proto3,oneof"` +} + +type UptimeCheckConfig_TcpCheck_ struct { + // Contains information needed to make a TCP check. + TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,proto3,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +// Contains the region, location, and list of IP +// addresses where checkers in the location run from. +type UptimeCheckIp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A broad region category in which the IP address is located. + Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"` + // A more specific location within the region that typically encodes + // a particular city/town/metro (and its containing state/province or country) + // within the broader umbrella region category. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The IP address from which the Uptime check originates. This is a fully + // specified IP address (not an IP address range). Most IP addresses, as of + // this publication, are in IPv4 format; however, one should not rely on the + // IP addresses being in IPv4 format indefinitely, and should support + // interpreting this field in either IPv4 or IPv6 format. + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` +} + +func (x *UptimeCheckIp) Reset() { + *x = UptimeCheckIp{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckIp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckIp) ProtoMessage() {} + +func (x *UptimeCheckIp) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckIp.ProtoReflect.Descriptor instead. +func (*UptimeCheckIp) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{3} +} + +func (x *UptimeCheckIp) GetRegion() UptimeCheckRegion { + if x != nil { + return x.Region + } + return UptimeCheckRegion_REGION_UNSPECIFIED +} + +func (x *UptimeCheckIp) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *UptimeCheckIp) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +// A Synthetic Monitor deployed to a Cloud Functions V2 instance. +type SyntheticMonitorTarget_CloudFunctionV2Target struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Fully qualified GCFv2 resource name + // i.e. `projects/{project}/locations/{location}/functions/{function}` + // Required. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The `cloud_run_revision` Monitored Resource associated with + // the GCFv2. The Synthetic Monitor execution results (metrics, logs, and + // spans) are reported against this Monitored Resource. This field is output + // only. + CloudRunRevision *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=cloud_run_revision,json=cloudRunRevision,proto3" json:"cloud_run_revision,omitempty"` +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) Reset() { + *x = SyntheticMonitorTarget_CloudFunctionV2Target{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyntheticMonitorTarget_CloudFunctionV2Target) ProtoMessage() {} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyntheticMonitorTarget_CloudFunctionV2Target.ProtoReflect.Descriptor instead. +func (*SyntheticMonitorTarget_CloudFunctionV2Target) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetCloudRunRevision() *monitoredres.MonitoredResource { + if x != nil { + return x.CloudRunRevision + } + return nil +} + +// The resource submessage for group checks. It can be used instead of a +// monitored resource, when multiple resources are being monitored. +type UptimeCheckConfig_ResourceGroup struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The group of resources being monitored. Should be only the `[GROUP_ID]`, + // and not the full-path + // `projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]`. + GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"` + // The resource type of the group members. + ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"` +} + +func (x *UptimeCheckConfig_ResourceGroup) Reset() { + *x = UptimeCheckConfig_ResourceGroup{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_ResourceGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {} + +func (x *UptimeCheckConfig_ResourceGroup) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_ResourceGroup.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *UptimeCheckConfig_ResourceGroup) GetGroupId() string { + if x != nil { + return x.GroupId + } + return "" +} + +func (x *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType { + if x != nil { + return x.ResourceType + } + return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED +} + +// Information involved in sending ICMP pings alongside public HTTP/TCP +// checks. For HTTP, the pings are performed for each part of the redirect +// chain. +type UptimeCheckConfig_PingConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number of ICMP pings. A maximum of 3 ICMP pings is currently supported. + PingsCount int32 `protobuf:"varint,1,opt,name=pings_count,json=pingsCount,proto3" json:"pings_count,omitempty"` +} + +func (x *UptimeCheckConfig_PingConfig) Reset() { + *x = UptimeCheckConfig_PingConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_PingConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_PingConfig) ProtoMessage() {} + +func (x *UptimeCheckConfig_PingConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_PingConfig.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_PingConfig) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *UptimeCheckConfig_PingConfig) GetPingsCount() int32 { + if x != nil { + return x.PingsCount + } + return 0 +} + +// Information involved in an HTTP/HTTPS Uptime check request. +type UptimeCheckConfig_HttpCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP request method to use for the check. If set to + // `METHOD_UNSPECIFIED` then `request_method` defaults to `GET`. + RequestMethod UptimeCheckConfig_HttpCheck_RequestMethod `protobuf:"varint,8,opt,name=request_method,json=requestMethod,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_RequestMethod" json:"request_method,omitempty"` + // If `true`, use HTTPS instead of HTTP to run the check. + UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"` + // Optional (defaults to "/"). The path to the page against which to run + // the check. Will be combined with the `host` (specified within the + // `monitored_resource`) and `port` to construct the full URL. If the + // provided path does not begin with "/", a "/" will be prepended + // automatically. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // Optional (defaults to 80 when `use_ssl` is `false`, and 443 when + // `use_ssl` is `true`). The TCP port on the HTTP server against which to + // run the check. Will be combined with host (specified within the + // `monitored_resource`) and `path` to construct the full URL. + Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + // The authentication information. Optional when creating an HTTP check; + // defaults to empty. + // Do not set both `auth_method` and `auth_info`. + AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"` + // Boolean specifying whether to encrypt the header information. + // Encryption should be specified for any headers related to authentication + // that you do not wish to be seen when retrieving the configuration. The + // server will be responsible for encrypting the headers. + // On Get/List calls, if `mask_headers` is set to `true` then the headers + // will be obscured with `******.` + MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders,proto3" json:"mask_headers,omitempty"` + // The list of headers to send as part of the Uptime check request. + // If two headers have the same key and different values, they should + // be entered as a single header, with the value being a comma-separated + // list of all the desired values as described at + // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). + // Entering two separate headers with the same key in a Create call will + // cause the first to be overwritten by the second. + // The maximum number of headers allowed is 100. + Headers map[string]string `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The content type header to use for the check. The following + // configurations result in errors: + // 1. Content type is specified in both the `headers` field and the + // `content_type` field. + // 2. Request method is `GET` and `content_type` is not `TYPE_UNSPECIFIED` + // 3. Request method is `POST` and `content_type` is `TYPE_UNSPECIFIED`. + // 4. Request method is `POST` and a "Content-Type" header is provided via + // `headers` field. The `content_type` field should be used instead. + ContentType UptimeCheckConfig_HttpCheck_ContentType `protobuf:"varint,9,opt,name=content_type,json=contentType,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ContentType" json:"content_type,omitempty"` + // A user provided content type header to use for the check. The invalid + // configurations outlined in the `content_type` field apply to + // `custom_content_type`, as well as the following: + // 1. `content_type` is `URL_ENCODED` and `custom_content_type` is set. + // 2. `content_type` is `USER_PROVIDED` and `custom_content_type` is not + // set. + CustomContentType string `protobuf:"bytes,13,opt,name=custom_content_type,json=customContentType,proto3" json:"custom_content_type,omitempty"` + // Boolean specifying whether to include SSL certificate validation as a + // part of the Uptime check. Only applies to checks where + // `monitored_resource` is set to `uptime_url`. If `use_ssl` is `false`, + // setting `validate_ssl` to `true` has no effect. + ValidateSsl bool `protobuf:"varint,7,opt,name=validate_ssl,json=validateSsl,proto3" json:"validate_ssl,omitempty"` + // The request body associated with the HTTP POST request. If `content_type` + // is `URL_ENCODED`, the body passed in must be URL-encoded. Users can + // provide a `Content-Length` header via the `headers` field or the API will + // do so. If the `request_method` is `GET` and `body` is not empty, the API + // will return an error. The maximum byte size is 1 megabyte. + // + // Note: If client libraries aren't used (which performs the conversion + // automatically) base64 encode your `body` data since the field is of + // `bytes` type. + Body []byte `protobuf:"bytes,10,opt,name=body,proto3" json:"body,omitempty"` + // If present, the check will only pass if the HTTP response status code is + // in this set of status codes. If empty, the HTTP status code will only + // pass if the HTTP status code is 200-299. + AcceptedResponseStatusCodes []*UptimeCheckConfig_HttpCheck_ResponseStatusCode `protobuf:"bytes,11,rep,name=accepted_response_status_codes,json=acceptedResponseStatusCodes,proto3" json:"accepted_response_status_codes,omitempty"` + // Contains information needed to add pings to an HTTP check. + PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,12,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"` + // This field is optional and should be set only by users interested in + // an authenticated uptime check. + // Do not set both `auth_method` and `auth_info`. + // + // Types that are assignable to AuthMethod: + // + // *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ + AuthMethod isUptimeCheckConfig_HttpCheck_AuthMethod `protobuf_oneof:"auth_method"` +} + +func (x *UptimeCheckConfig_HttpCheck) Reset() { + *x = UptimeCheckConfig_HttpCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2} +} + +func (x *UptimeCheckConfig_HttpCheck) GetRequestMethod() UptimeCheckConfig_HttpCheck_RequestMethod { + if x != nil { + return x.RequestMethod + } + return UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED +} + +func (x *UptimeCheckConfig_HttpCheck) GetUseSsl() bool { + if x != nil { + return x.UseSsl + } + return false +} + +func (x *UptimeCheckConfig_HttpCheck) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *UptimeCheckConfig_HttpCheck) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication { + if x != nil { + return x.AuthInfo + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool { + if x != nil { + return x.MaskHeaders + } + return false +} + +func (x *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string { + if x != nil { + return x.Headers + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetContentType() UptimeCheckConfig_HttpCheck_ContentType { + if x != nil { + return x.ContentType + } + return UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED +} + +func (x *UptimeCheckConfig_HttpCheck) GetCustomContentType() string { + if x != nil { + return x.CustomContentType + } + return "" +} + +func (x *UptimeCheckConfig_HttpCheck) GetValidateSsl() bool { + if x != nil { + return x.ValidateSsl + } + return false +} + +func (x *UptimeCheckConfig_HttpCheck) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetAcceptedResponseStatusCodes() []*UptimeCheckConfig_HttpCheck_ResponseStatusCode { + if x != nil { + return x.AcceptedResponseStatusCodes + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetPingConfig() *UptimeCheckConfig_PingConfig { + if x != nil { + return x.PingConfig + } + return nil +} + +func (m *UptimeCheckConfig_HttpCheck) GetAuthMethod() isUptimeCheckConfig_HttpCheck_AuthMethod { + if m != nil { + return m.AuthMethod + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetServiceAgentAuthentication() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication { + if x, ok := x.GetAuthMethod().(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_); ok { + return x.ServiceAgentAuthentication + } + return nil +} + +type isUptimeCheckConfig_HttpCheck_AuthMethod interface { + isUptimeCheckConfig_HttpCheck_AuthMethod() +} + +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ struct { + // If specified, Uptime will generate and attach an OIDC JWT token for the + // Monitoring service agent service account as an `Authorization` header + // in the HTTP request when probing. + ServiceAgentAuthentication *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication `protobuf:"bytes,14,opt,name=service_agent_authentication,json=serviceAgentAuthentication,proto3,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_) isUptimeCheckConfig_HttpCheck_AuthMethod() { +} + +// Information required for a TCP Uptime check request. +type UptimeCheckConfig_TcpCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The TCP port on the server against which to run the check. Will be + // combined with host (specified within the `monitored_resource`) to + // construct the full URL. Required. + Port int32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"` + // Contains information needed to add pings to a TCP check. + PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,2,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"` +} + +func (x *UptimeCheckConfig_TcpCheck) Reset() { + *x = UptimeCheckConfig_TcpCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_TcpCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {} + +func (x *UptimeCheckConfig_TcpCheck) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_TcpCheck.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 3} +} + +func (x *UptimeCheckConfig_TcpCheck) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *UptimeCheckConfig_TcpCheck) GetPingConfig() *UptimeCheckConfig_PingConfig { + if x != nil { + return x.PingConfig + } + return nil +} + +// Optional. Used to perform content matching. This allows matching based on +// substrings and regular expressions, together with their negations. Only the +// first 4 MB of an HTTP or HTTPS check's response (and the first +// 1 MB of a TCP check's response) are examined for purposes of content +// matching. +type UptimeCheckConfig_ContentMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // String, regex or JSON content to match. Maximum 1024 bytes. An empty + // `content` string indicates no content matching is to be performed. + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // The type of content matcher that will be applied to the server output, + // compared to the `content` string when the check is run. + Matcher UptimeCheckConfig_ContentMatcher_ContentMatcherOption `protobuf:"varint,2,opt,name=matcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_ContentMatcherOption" json:"matcher,omitempty"` + // Certain `ContentMatcherOption` types require additional information. + // `MATCHES_JSON_PATH` or `NOT_MATCHES_JSON_PATH` require a + // `JsonPathMatcher`; not used for other options. + // + // Types that are assignable to AdditionalMatcherInfo: + // + // *UptimeCheckConfig_ContentMatcher_JsonPathMatcher_ + AdditionalMatcherInfo isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo `protobuf_oneof:"additional_matcher_info"` +} + +func (x *UptimeCheckConfig_ContentMatcher) Reset() { + *x = UptimeCheckConfig_ContentMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_ContentMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {} + +func (x *UptimeCheckConfig_ContentMatcher) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4} +} + +func (x *UptimeCheckConfig_ContentMatcher) GetContent() string { + if x != nil { + return x.Content + } + return "" +} + +func (x *UptimeCheckConfig_ContentMatcher) GetMatcher() UptimeCheckConfig_ContentMatcher_ContentMatcherOption { + if x != nil { + return x.Matcher + } + return UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED +} + +func (m *UptimeCheckConfig_ContentMatcher) GetAdditionalMatcherInfo() isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo { + if m != nil { + return m.AdditionalMatcherInfo + } + return nil +} + +func (x *UptimeCheckConfig_ContentMatcher) GetJsonPathMatcher() *UptimeCheckConfig_ContentMatcher_JsonPathMatcher { + if x, ok := x.GetAdditionalMatcherInfo().(*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_); ok { + return x.JsonPathMatcher + } + return nil +} + +type isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo interface { + isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo() +} + +type UptimeCheckConfig_ContentMatcher_JsonPathMatcher_ struct { + // Matcher information for `MATCHES_JSON_PATH` and `NOT_MATCHES_JSON_PATH` + JsonPathMatcher *UptimeCheckConfig_ContentMatcher_JsonPathMatcher `protobuf:"bytes,3,opt,name=json_path_matcher,json=jsonPathMatcher,proto3,oneof"` +} + +func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_) isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo() { +} + +// The authentication parameters to provide to the specified resource or +// URL that requires a username and password. Currently, only +// [Basic HTTP authentication](https://tools.ietf.org/html/rfc7617) is +// supported in Uptime checks. +type UptimeCheckConfig_HttpCheck_BasicAuthentication struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The username to use when authenticating with the HTTP server. + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + // The password to use when authenticating with the HTTP server. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() { + *x = UptimeCheckConfig_HttpCheck_BasicAuthentication{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_BasicAuthentication.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0} +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +// A status to accept. Either a status code class like "2xx", or an integer +// status code like "200". +type UptimeCheckConfig_HttpCheck_ResponseStatusCode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Either a specific value or a class of status codes. + // + // Types that are assignable to StatusCode: + // + // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue + // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_ + StatusCode isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode `protobuf_oneof:"status_code"` +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) Reset() { + *x = UptimeCheckConfig_HttpCheck_ResponseStatusCode{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1} +} + +func (m *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusCode() isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode { + if m != nil { + return m.StatusCode + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusValue() int32 { + if x, ok := x.GetStatusCode().(*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue); ok { + return x.StatusValue + } + return 0 +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusClass() UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass { + if x, ok := x.GetStatusCode().(*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_); ok { + return x.StatusClass + } + return UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_UNSPECIFIED +} + +type isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode interface { + isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() +} + +type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue struct { + // A status code to accept. + StatusValue int32 `protobuf:"varint,1,opt,name=status_value,json=statusValue,proto3,oneof"` +} + +type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_ struct { + // A class of status codes to accept. + StatusClass UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass `protobuf:"varint,2,opt,name=status_class,json=statusClass,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() { +} + +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() { +} + +// Contains information needed for generating an +// [OpenID Connect +// token](https://developers.google.com/identity/protocols/OpenIDConnect). +// The OIDC token will be generated for the Monitoring service agent service +// account. +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of authentication. + Type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType `protobuf:"varint,1,opt,name=type,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType" json:"type,omitempty"` +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Reset() { + *x = UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2} +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) GetType() UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType { + if x != nil { + return x.Type + } + return UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED +} + +// Information needed to perform a JSONPath content match. +// Used for `ContentMatcherOption::MATCHES_JSON_PATH` and +// `ContentMatcherOption::NOT_MATCHES_JSON_PATH`. +type UptimeCheckConfig_ContentMatcher_JsonPathMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // JSONPath within the response output pointing to the expected + // `ContentMatcher::content` to match against. + JsonPath string `protobuf:"bytes,1,opt,name=json_path,json=jsonPath,proto3" json:"json_path,omitempty"` + // The type of JSONPath match that will be applied to the JSON output + // (`ContentMatcher.content`) + JsonMatcher UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption `protobuf:"varint,2,opt,name=json_matcher,json=jsonMatcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption" json:"json_matcher,omitempty"` +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Reset() { + *x = UptimeCheckConfig_ContentMatcher_JsonPathMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoMessage() {} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0} +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonPath() string { + if x != nil { + return x.JsonPath + } + return "" +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonMatcher() UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption { + if x != nil { + return x.JsonMatcher + } + return UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JSON_PATH_MATCHER_OPTION_UNSPECIFIED +} + +var File_google_monitoring_v3_uptime_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_uptime_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x02, 0x0a, 0x0f, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, + 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, + 0x19, 0x0a, 0x08, 0x67, 0x63, 0x70, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x67, 0x63, 0x70, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x65, + 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x64, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x02, 0x18, 0x01, 0x22, 0xc4, + 0x02, 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x70, 0x0a, 0x11, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x32, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, + 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x56, 0x32, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x1a, 0xad, 0x01, 0x0a, 0x15, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x42, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2e, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x28, 0x0a, 0x26, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x52, 0x75, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x94, 0x23, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, + 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x12, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x48, 0x00, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x5b, 0x0a, 0x11, 0x73, 0x79, 0x6e, 0x74, 0x68, + 0x65, 0x74, 0x69, 0x63, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, + 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x48, 0x00, 0x52, 0x10, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x09, 0x68, + 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x4f, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, + 0x08, 0x74, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x33, 0x0a, 0x07, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x61, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x73, 0x12, 0x56, 0x0a, 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x52, 0x0a, 0x10, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, + 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x56, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, + 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x14, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x73, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x78, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x1a, 0x2d, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1f, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x1a, 0xef, 0x0e, 0x0a, 0x09, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x66, + 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x5f, 0x73, 0x73, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x73, 0x65, 0x53, 0x73, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x42, 0x61, + 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x6d, + 0x61, 0x73, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x58, + 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x89, 0x01, 0x0a, 0x1e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, + 0x52, 0x1b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x53, 0x0a, + 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x90, 0x01, 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x4d, 0x0a, 0x13, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x1a, 0xf6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x75, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x31, 0x58, 0x58, 0x10, 0x64, 0x12, 0x15, 0x0a, 0x10, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x32, 0x58, 0x58, 0x10, + 0xc8, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, + 0x53, 0x53, 0x5f, 0x33, 0x58, 0x58, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x34, 0x58, 0x58, 0x10, 0x90, 0x03, + 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, + 0x5f, 0x35, 0x58, 0x58, 0x10, 0xf4, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x41, 0x4e, 0x59, 0x10, 0xe8, 0x07, 0x42, 0x0d, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0x82, 0x02, + 0x0a, 0x1a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x63, 0x0a, + 0x1e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x31, 0x0a, 0x2d, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x41, 0x47, 0x45, 0x4e, 0x54, + 0x5f, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x49, 0x44, 0x43, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, + 0x10, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, + 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, + 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x22, 0x47, 0x0a, 0x0b, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x52, 0x4c, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, + 0x44, 0x10, 0x02, 0x42, 0x0d, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x1a, 0x73, 0x0a, 0x08, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x12, 0x53, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x84, 0x06, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x12, 0x65, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x11, 0x6a, + 0x73, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4a, + 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0f, 0x6a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x1a, 0x94, 0x02, 0x0a, 0x0f, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x7f, 0x0a, 0x0c, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x22, 0x63, 0x0a, 0x15, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x24, + 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, + 0x52, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x41, 0x43, 0x54, 0x5f, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x47, 0x45, 0x58, + 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0xc8, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x26, 0x0a, 0x22, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x4d, 0x41, 0x54, + 0x43, 0x48, 0x45, 0x52, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x4f, 0x4e, + 0x54, 0x41, 0x49, 0x4e, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, + 0x0a, 0x13, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x53, 0x5f, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x41, 0x54, 0x43, 0x48, + 0x45, 0x53, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, + 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, + 0x04, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x4a, 0x53, 0x4f, + 0x4e, 0x5f, 0x50, 0x41, 0x54, 0x48, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x4e, 0x4f, 0x54, 0x5f, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x50, 0x41, 0x54, + 0x48, 0x10, 0x06, 0x42, 0x19, 0x0a, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x1a, 0x3d, + 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x55, 0x0a, + 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, + 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, + 0x41, 0x54, 0x49, 0x43, 0x5f, 0x49, 0x50, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x53, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x56, 0x50, 0x43, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, + 0x52, 0x53, 0x10, 0x03, 0x3a, 0xf3, 0x01, 0xea, 0x41, 0xef, 0x01, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x75, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, + 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x39, 0x66, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x75, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x14, 0x0a, 0x12, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x8b, 0x01, 0x0a, + 0x0d, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x12, 0x3f, + 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, + 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x69, + 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2a, 0x95, 0x01, 0x0a, 0x11, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x47, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x53, 0x41, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x55, 0x52, 0x4f, 0x50, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, + 0x0d, 0x53, 0x4f, 0x55, 0x54, 0x48, 0x5f, 0x41, 0x4d, 0x45, 0x52, 0x49, 0x43, 0x41, 0x10, 0x03, + 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x53, 0x49, 0x41, 0x5f, 0x50, 0x41, 0x43, 0x49, 0x46, 0x49, 0x43, + 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x53, 0x41, 0x5f, 0x4f, 0x52, 0x45, 0x47, 0x4f, 0x4e, + 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x55, 0x53, 0x41, 0x5f, 0x49, 0x4f, 0x57, 0x41, 0x10, 0x06, + 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x53, 0x41, 0x5f, 0x56, 0x49, 0x52, 0x47, 0x49, 0x4e, 0x49, 0x41, + 0x10, 0x07, 0x2a, 0x5b, 0x0a, 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x53, 0x4f, 0x55, + 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, + 0x43, 0x45, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x57, 0x53, 0x5f, 0x45, 0x4c, 0x42, 0x5f, + 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x52, 0x10, 0x02, 0x42, + 0xaf, 0x02, 0xea, 0x41, 0x66, 0x0a, 0x26, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, + 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_uptime_proto_rawDescOnce sync.Once + file_google_monitoring_v3_uptime_proto_rawDescData = file_google_monitoring_v3_uptime_proto_rawDesc +) + +func file_google_monitoring_v3_uptime_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_uptime_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_uptime_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_proto_rawDescData) + }) + return file_google_monitoring_v3_uptime_proto_rawDescData +} + +var file_google_monitoring_v3_uptime_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_monitoring_v3_uptime_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_monitoring_v3_uptime_proto_goTypes = []any{ + (UptimeCheckRegion)(0), // 0: google.monitoring.v3.UptimeCheckRegion + (GroupResourceType)(0), // 1: google.monitoring.v3.GroupResourceType + (InternalChecker_State)(0), // 2: google.monitoring.v3.InternalChecker.State + (UptimeCheckConfig_CheckerType)(0), // 3: google.monitoring.v3.UptimeCheckConfig.CheckerType + (UptimeCheckConfig_HttpCheck_RequestMethod)(0), // 4: google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod + (UptimeCheckConfig_HttpCheck_ContentType)(0), // 5: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType + (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass)(0), // 6: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass + (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType)(0), // 7: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType + (UptimeCheckConfig_ContentMatcher_ContentMatcherOption)(0), // 8: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption + (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption)(0), // 9: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption + (*InternalChecker)(nil), // 10: google.monitoring.v3.InternalChecker + (*SyntheticMonitorTarget)(nil), // 11: google.monitoring.v3.SyntheticMonitorTarget + (*UptimeCheckConfig)(nil), // 12: google.monitoring.v3.UptimeCheckConfig + (*UptimeCheckIp)(nil), // 13: google.monitoring.v3.UptimeCheckIp + (*SyntheticMonitorTarget_CloudFunctionV2Target)(nil), // 14: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target + (*UptimeCheckConfig_ResourceGroup)(nil), // 15: google.monitoring.v3.UptimeCheckConfig.ResourceGroup + (*UptimeCheckConfig_PingConfig)(nil), // 16: google.monitoring.v3.UptimeCheckConfig.PingConfig + (*UptimeCheckConfig_HttpCheck)(nil), // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck + (*UptimeCheckConfig_TcpCheck)(nil), // 18: google.monitoring.v3.UptimeCheckConfig.TcpCheck + (*UptimeCheckConfig_ContentMatcher)(nil), // 19: google.monitoring.v3.UptimeCheckConfig.ContentMatcher + nil, // 20: google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry + (*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication + (*UptimeCheckConfig_HttpCheck_ResponseStatusCode)(nil), // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode + (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication)(nil), // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication + nil, // 24: google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry + (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher)(nil), // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher + (*monitoredres.MonitoredResource)(nil), // 26: google.api.MonitoredResource + (*durationpb.Duration)(nil), // 27: google.protobuf.Duration +} +var file_google_monitoring_v3_uptime_proto_depIdxs = []int32{ + 2, // 0: google.monitoring.v3.InternalChecker.state:type_name -> google.monitoring.v3.InternalChecker.State + 14, // 1: google.monitoring.v3.SyntheticMonitorTarget.cloud_function_v2:type_name -> google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target + 26, // 2: google.monitoring.v3.UptimeCheckConfig.monitored_resource:type_name -> google.api.MonitoredResource + 15, // 3: google.monitoring.v3.UptimeCheckConfig.resource_group:type_name -> google.monitoring.v3.UptimeCheckConfig.ResourceGroup + 11, // 4: google.monitoring.v3.UptimeCheckConfig.synthetic_monitor:type_name -> google.monitoring.v3.SyntheticMonitorTarget + 17, // 5: google.monitoring.v3.UptimeCheckConfig.http_check:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck + 18, // 6: google.monitoring.v3.UptimeCheckConfig.tcp_check:type_name -> google.monitoring.v3.UptimeCheckConfig.TcpCheck + 27, // 7: google.monitoring.v3.UptimeCheckConfig.period:type_name -> google.protobuf.Duration + 27, // 8: google.monitoring.v3.UptimeCheckConfig.timeout:type_name -> google.protobuf.Duration + 19, // 9: google.monitoring.v3.UptimeCheckConfig.content_matchers:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher + 3, // 10: google.monitoring.v3.UptimeCheckConfig.checker_type:type_name -> google.monitoring.v3.UptimeCheckConfig.CheckerType + 0, // 11: google.monitoring.v3.UptimeCheckConfig.selected_regions:type_name -> google.monitoring.v3.UptimeCheckRegion + 10, // 12: google.monitoring.v3.UptimeCheckConfig.internal_checkers:type_name -> google.monitoring.v3.InternalChecker + 20, // 13: google.monitoring.v3.UptimeCheckConfig.user_labels:type_name -> google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry + 0, // 14: google.monitoring.v3.UptimeCheckIp.region:type_name -> google.monitoring.v3.UptimeCheckRegion + 26, // 15: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target.cloud_run_revision:type_name -> google.api.MonitoredResource + 1, // 16: google.monitoring.v3.UptimeCheckConfig.ResourceGroup.resource_type:type_name -> google.monitoring.v3.GroupResourceType + 4, // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck.request_method:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod + 21, // 18: google.monitoring.v3.UptimeCheckConfig.HttpCheck.auth_info:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication + 24, // 19: google.monitoring.v3.UptimeCheckConfig.HttpCheck.headers:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry + 5, // 20: google.monitoring.v3.UptimeCheckConfig.HttpCheck.content_type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType + 22, // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.accepted_response_status_codes:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode + 16, // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig + 23, // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.service_agent_authentication:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication + 16, // 24: google.monitoring.v3.UptimeCheckConfig.TcpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig + 8, // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption + 25, // 26: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.json_path_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher + 6, // 27: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.status_class:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass + 7, // 28: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType + 9, // 29: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.json_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_uptime_proto_init() } +func file_google_monitoring_v3_uptime_proto_init() { + if File_google_monitoring_v3_uptime_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_uptime_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*InternalChecker); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*SyntheticMonitorTarget); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckIp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*SyntheticMonitorTarget_CloudFunctionV2Target); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_ResourceGroup); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_PingConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_TcpCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_ContentMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck_BasicAuthentication); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck_ResponseStatusCode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_ContentMatcher_JsonPathMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[1].OneofWrappers = []any{ + (*SyntheticMonitorTarget_CloudFunctionV2)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[2].OneofWrappers = []any{ + (*UptimeCheckConfig_MonitoredResource)(nil), + (*UptimeCheckConfig_ResourceGroup_)(nil), + (*UptimeCheckConfig_SyntheticMonitor)(nil), + (*UptimeCheckConfig_HttpCheck_)(nil), + (*UptimeCheckConfig_TcpCheck_)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[7].OneofWrappers = []any{ + (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[9].OneofWrappers = []any{ + (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[12].OneofWrappers = []any{ + (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue)(nil), + (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_uptime_proto_rawDesc, + NumEnums: 10, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_uptime_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_uptime_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_uptime_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_uptime_proto_msgTypes, + }.Build() + File_google_monitoring_v3_uptime_proto = out.File + file_google_monitoring_v3_uptime_proto_rawDesc = nil + file_google_monitoring_v3_uptime_proto_goTypes = nil + file_google_monitoring_v3_uptime_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go new file mode 100644 index 00000000000..d4ba902fb07 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go @@ -0,0 +1,1226 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/uptime_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The protocol for the `ListUptimeCheckConfigs` request. +type ListUptimeCheckConfigsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // Uptime check configurations are listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // If provided, this field specifies the criteria that must be met by + // uptime checks to be included in the response. + // + // For more details, see [Filtering + // syntax](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering#filter_syntax). + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListUptimeCheckConfigsRequest) Reset() { + *x = ListUptimeCheckConfigsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckConfigsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckConfigsRequest) ProtoMessage() {} + +func (x *ListUptimeCheckConfigsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckConfigsRequest.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListUptimeCheckConfigsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListUptimeCheckConfigsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListUptimeCheckConfigsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListUptimeCheckConfigsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckConfigs` response. +type ListUptimeCheckConfigsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The returned Uptime check configurations. + UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs,proto3" json:"uptime_check_configs,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of Uptime check configurations for the project, + // irrespective of any pagination. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListUptimeCheckConfigsResponse) Reset() { + *x = ListUptimeCheckConfigsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckConfigsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckConfigsResponse) ProtoMessage() {} + +func (x *ListUptimeCheckConfigsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckConfigsResponse.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig { + if x != nil { + return x.UptimeCheckConfigs + } + return nil +} + +func (x *ListUptimeCheckConfigsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListUptimeCheckConfigsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// The protocol for the `GetUptimeCheckConfig` request. +type GetUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The Uptime check configuration to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetUptimeCheckConfigRequest) Reset() { + *x = GetUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *GetUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetUptimeCheckConfigRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The protocol for the `CreateUptimeCheckConfig` request. +type CreateUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // to create the Uptime check. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. The new Uptime check configuration. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` +} + +func (x *CreateUptimeCheckConfigRequest) Reset() { + *x = CreateUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *CreateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateUptimeCheckConfigRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if x != nil { + return x.UptimeCheckConfig + } + return nil +} + +// The protocol for the `UpdateUptimeCheckConfig` request. +type UpdateUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. If present, only the listed fields in the current Uptime check + // configuration are updated with values from the new configuration. If this + // field is empty, then the current configuration is completely replaced with + // the new configuration. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. If an `updateMask` has been specified, this field gives + // the values for the set of fields mentioned in the `updateMask`. If an + // `updateMask` has not been given, this Uptime check configuration replaces + // the current configuration. If a field is mentioned in `updateMask` but + // the corresponding field is omitted in this partial Uptime check + // configuration, it has the effect of deleting/clearing the field from the + // configuration on the server. + // + // The following fields can be updated: `display_name`, + // `http_check`, `tcp_check`, `timeout`, `content_matchers`, and + // `selected_regions`. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` +} + +func (x *UpdateUptimeCheckConfigRequest) Reset() { + *x = UpdateUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *UpdateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateUptimeCheckConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if x != nil { + return x.UptimeCheckConfig + } + return nil +} + +// The protocol for the `DeleteUptimeCheckConfig` request. +type DeleteUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The Uptime check configuration to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteUptimeCheckConfigRequest) Reset() { + *x = DeleteUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *DeleteUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteUptimeCheckConfigRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` request. +type ListUptimeCheckIpsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + // NOTE: this field is not yet implemented + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + // NOTE: this field is not yet implemented + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListUptimeCheckIpsRequest) Reset() { + *x = ListUptimeCheckIpsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckIpsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckIpsRequest) ProtoMessage() {} + +func (x *ListUptimeCheckIpsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckIpsRequest.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{6} +} + +func (x *ListUptimeCheckIpsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListUptimeCheckIpsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` response. +type ListUptimeCheckIpsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The returned list of IP addresses (including region and location) that the + // checkers run from. + UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps,proto3" json:"uptime_check_ips,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + // NOTE: this field is not yet implemented + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListUptimeCheckIpsResponse) Reset() { + *x = ListUptimeCheckIpsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckIpsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckIpsResponse) ProtoMessage() {} + +func (x *ListUptimeCheckIpsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckIpsResponse.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{7} +} + +func (x *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp { + if x != nil { + return x.UptimeCheckIps + } + return nil +} + +func (x *ListUptimeCheckIpsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +var File_google_monitoring_v3_uptime_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_uptime_service_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc0, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, + 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc2, 0x01, 0x0a, 0x1e, 0x4c, 0x69, + 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x14, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x66, + 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xcb, 0x01, 0x0a, 0x1e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x2d, 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5c, 0x0a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, 0x0a, 0x1e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x5c, 0x0a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x69, 0x0a, 0x1e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x57, 0x0a, + 0x19, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, + 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x49, 0x70, 0x52, 0x0e, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x49, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, + 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xbd, 0x0a, 0x0a, + 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0xc0, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x33, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xad, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x39, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x64, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x75, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x3a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x2a, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xeb, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x71, 0xda, 0x41, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x55, + 0x3a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x3e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa2, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x4c, + 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, + 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x76, + 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, + 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, + 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcd, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, + 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_uptime_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_uptime_service_proto_rawDescData = file_google_monitoring_v3_uptime_service_proto_rawDesc +) + +func file_google_monitoring_v3_uptime_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_uptime_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_uptime_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_service_proto_rawDescData) + }) + return file_google_monitoring_v3_uptime_service_proto_rawDescData +} + +var file_google_monitoring_v3_uptime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_google_monitoring_v3_uptime_service_proto_goTypes = []any{ + (*ListUptimeCheckConfigsRequest)(nil), // 0: google.monitoring.v3.ListUptimeCheckConfigsRequest + (*ListUptimeCheckConfigsResponse)(nil), // 1: google.monitoring.v3.ListUptimeCheckConfigsResponse + (*GetUptimeCheckConfigRequest)(nil), // 2: google.monitoring.v3.GetUptimeCheckConfigRequest + (*CreateUptimeCheckConfigRequest)(nil), // 3: google.monitoring.v3.CreateUptimeCheckConfigRequest + (*UpdateUptimeCheckConfigRequest)(nil), // 4: google.monitoring.v3.UpdateUptimeCheckConfigRequest + (*DeleteUptimeCheckConfigRequest)(nil), // 5: google.monitoring.v3.DeleteUptimeCheckConfigRequest + (*ListUptimeCheckIpsRequest)(nil), // 6: google.monitoring.v3.ListUptimeCheckIpsRequest + (*ListUptimeCheckIpsResponse)(nil), // 7: google.monitoring.v3.ListUptimeCheckIpsResponse + (*UptimeCheckConfig)(nil), // 8: google.monitoring.v3.UptimeCheckConfig + (*fieldmaskpb.FieldMask)(nil), // 9: google.protobuf.FieldMask + (*UptimeCheckIp)(nil), // 10: google.monitoring.v3.UptimeCheckIp + (*emptypb.Empty)(nil), // 11: google.protobuf.Empty +} +var file_google_monitoring_v3_uptime_service_proto_depIdxs = []int32{ + 8, // 0: google.monitoring.v3.ListUptimeCheckConfigsResponse.uptime_check_configs:type_name -> google.monitoring.v3.UptimeCheckConfig + 8, // 1: google.monitoring.v3.CreateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig + 9, // 2: google.monitoring.v3.UpdateUptimeCheckConfigRequest.update_mask:type_name -> google.protobuf.FieldMask + 8, // 3: google.monitoring.v3.UpdateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig + 10, // 4: google.monitoring.v3.ListUptimeCheckIpsResponse.uptime_check_ips:type_name -> google.monitoring.v3.UptimeCheckIp + 0, // 5: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:input_type -> google.monitoring.v3.ListUptimeCheckConfigsRequest + 2, // 6: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:input_type -> google.monitoring.v3.GetUptimeCheckConfigRequest + 3, // 7: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:input_type -> google.monitoring.v3.CreateUptimeCheckConfigRequest + 4, // 8: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:input_type -> google.monitoring.v3.UpdateUptimeCheckConfigRequest + 5, // 9: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:input_type -> google.monitoring.v3.DeleteUptimeCheckConfigRequest + 6, // 10: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:input_type -> google.monitoring.v3.ListUptimeCheckIpsRequest + 1, // 11: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:output_type -> google.monitoring.v3.ListUptimeCheckConfigsResponse + 8, // 12: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig + 8, // 13: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig + 8, // 14: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig + 11, // 15: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:output_type -> google.protobuf.Empty + 7, // 16: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:output_type -> google.monitoring.v3.ListUptimeCheckIpsResponse + 11, // [11:17] is the sub-list for method output_type + 5, // [5:11] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_uptime_service_proto_init() } +func file_google_monitoring_v3_uptime_service_proto_init() { + if File_google_monitoring_v3_uptime_service_proto != nil { + return + } + file_google_monitoring_v3_uptime_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_uptime_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckConfigsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckConfigsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckIpsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckIpsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_uptime_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_uptime_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_uptime_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_uptime_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_uptime_service_proto = out.File + file_google_monitoring_v3_uptime_service_proto_rawDesc = nil + file_google_monitoring_v3_uptime_service_proto_goTypes = nil + file_google_monitoring_v3_uptime_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// UptimeCheckServiceClient is the client API for UptimeCheckService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UptimeCheckServiceClient interface { + // Lists the existing valid Uptime check configurations for the project + // (leaving out any invalid configurations). + ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) + // Gets a single Uptime check configuration. + GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Creates a new Uptime check configuration. + CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Updates an Uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `updateMask`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Deletes an Uptime check configuration. Note that this method will fail + // if the Uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Returns the list of IP addresses that checkers run from + ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) +} + +type uptimeCheckServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewUptimeCheckServiceClient(cc grpc.ClientConnInterface) UptimeCheckServiceClient { + return &uptimeCheckServiceClient{cc} +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) { + out := new(ListUptimeCheckConfigsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) { + out := new(ListUptimeCheckIpsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UptimeCheckServiceServer is the server API for UptimeCheckService service. +type UptimeCheckServiceServer interface { + // Lists the existing valid Uptime check configurations for the project + // (leaving out any invalid configurations). + ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) + // Gets a single Uptime check configuration. + GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Creates a new Uptime check configuration. + CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Updates an Uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `updateMask`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Deletes an Uptime check configuration. Note that this method will fail + // if the Uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) + // Returns the list of IP addresses that checkers run from + ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) +} + +// UnimplementedUptimeCheckServiceServer can be embedded to have forward compatible implementations. +type UnimplementedUptimeCheckServiceServer struct { +} + +func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckConfigs not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckIps not implemented") +} + +func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) { + s.RegisterService(&_UptimeCheckService_serviceDesc, srv) +} + +func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckIpsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.UptimeCheckService", + HandlerType: (*UptimeCheckServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListUptimeCheckConfigs", + Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler, + }, + { + MethodName: "GetUptimeCheckConfig", + Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler, + }, + { + MethodName: "CreateUptimeCheckConfig", + Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler, + }, + { + MethodName: "UpdateUptimeCheckConfig", + Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler, + }, + { + MethodName: "DeleteUptimeCheckConfig", + Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler, + }, + { + MethodName: "ListUptimeCheckIps", + Handler: _UptimeCheckService_ListUptimeCheckIps_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/uptime_service.proto", +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go new file mode 100644 index 00000000000..6f7fe5d7c49 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go @@ -0,0 +1,618 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newNotificationChannelClientHook clientHook + +// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient. +type NotificationChannelCallOptions struct { + ListNotificationChannelDescriptors []gax.CallOption + GetNotificationChannelDescriptor []gax.CallOption + ListNotificationChannels []gax.CallOption + GetNotificationChannel []gax.CallOption + CreateNotificationChannel []gax.CallOption + UpdateNotificationChannel []gax.CallOption + DeleteNotificationChannel []gax.CallOption + SendNotificationChannelVerificationCode []gax.CallOption + GetNotificationChannelVerificationCode []gax.CallOption + VerifyNotificationChannel []gax.CallOption +} + +func defaultNotificationChannelGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions { + return &NotificationChannelCallOptions{ + ListNotificationChannelDescriptors: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetNotificationChannelDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListNotificationChannels: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + UpdateNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + SendNotificationChannelVerificationCode: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + GetNotificationChannelVerificationCode: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + VerifyNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalNotificationChannelClient is an interface that defines the methods available from Cloud Monitoring API. +type internalNotificationChannelClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListNotificationChannelDescriptors(context.Context, *monitoringpb.ListNotificationChannelDescriptorsRequest, ...gax.CallOption) *NotificationChannelDescriptorIterator + GetNotificationChannelDescriptor(context.Context, *monitoringpb.GetNotificationChannelDescriptorRequest, ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) + ListNotificationChannels(context.Context, *monitoringpb.ListNotificationChannelsRequest, ...gax.CallOption) *NotificationChannelIterator + GetNotificationChannel(context.Context, *monitoringpb.GetNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) + CreateNotificationChannel(context.Context, *monitoringpb.CreateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) + UpdateNotificationChannel(context.Context, *monitoringpb.UpdateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) + DeleteNotificationChannel(context.Context, *monitoringpb.DeleteNotificationChannelRequest, ...gax.CallOption) error + SendNotificationChannelVerificationCode(context.Context, *monitoringpb.SendNotificationChannelVerificationCodeRequest, ...gax.CallOption) error + GetNotificationChannelVerificationCode(context.Context, *monitoringpb.GetNotificationChannelVerificationCodeRequest, ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) + VerifyNotificationChannel(context.Context, *monitoringpb.VerifyNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) +} + +// NotificationChannelClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The Notification Channel API provides access to configuration that +// controls how messages related to incidents are sent. +type NotificationChannelClient struct { + // The internal transport-dependent client. + internalClient internalNotificationChannelClient + + // The call options for this service. + CallOptions *NotificationChannelCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *NotificationChannelClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *NotificationChannelClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors +// makes it possible for new channel types to be dynamically added. +func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { + return c.internalClient.ListNotificationChannelDescriptors(ctx, req, opts...) +} + +// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields +// are expected / permitted for a notification channel of the given type. +func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { + return c.internalClient.GetNotificationChannelDescriptor(ctx, req, opts...) +} + +// ListNotificationChannels lists the notification channels that have been created for the project. +// To list the types of notification channels that are supported, use +// the ListNotificationChannelDescriptors method. +func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { + return c.internalClient.ListNotificationChannels(ctx, req, opts...) +} + +// GetNotificationChannel gets a single notification channel. The channel includes the relevant +// configuration details with which the channel was created. However, the +// response may truncate or omit passwords, API keys, or other private key +// matter and thus the response may not be 100% identical to the information +// that was supplied in the call to the create method. +func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.GetNotificationChannel(ctx, req, opts...) +} + +// CreateNotificationChannel creates a new notification channel, representing a single notification +// endpoint such as an email address, SMS number, or PagerDuty service. +// +// Design your application to single-thread API calls that modify the state of +// notification channels in a single project. This includes calls to +// CreateNotificationChannel, DeleteNotificationChannel and +// UpdateNotificationChannel. +func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.CreateNotificationChannel(ctx, req, opts...) +} + +// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask +// remain unchanged. +// +// Design your application to single-thread API calls that modify the state of +// notification channels in a single project. This includes calls to +// CreateNotificationChannel, DeleteNotificationChannel and +// UpdateNotificationChannel. +func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.UpdateNotificationChannel(ctx, req, opts...) +} + +// DeleteNotificationChannel deletes a notification channel. +// +// Design your application to single-thread API calls that modify the state of +// notification channels in a single project. This includes calls to +// CreateNotificationChannel, DeleteNotificationChannel and +// UpdateNotificationChannel. +func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteNotificationChannel(ctx, req, opts...) +} + +// SendNotificationChannelVerificationCode causes a verification code to be delivered to the channel. The code +// can then be supplied in VerifyNotificationChannel to verify the channel. +func (c *NotificationChannelClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error { + return c.internalClient.SendNotificationChannelVerificationCode(ctx, req, opts...) +} + +// GetNotificationChannelVerificationCode requests a verification code for an already verified channel that can then +// be used in a call to VerifyNotificationChannel() on a different channel +// with an equivalent identity in the same or in a different project. This +// makes it possible to copy a channel between projects without requiring +// manual reverification of the channel. If the channel is not in the +// verified state, this method will fail (in other words, this may only be +// used if the SendNotificationChannelVerificationCode and +// VerifyNotificationChannel paths have already been used to put the given +// channel into the verified state). +// +// There is no guarantee that the verification codes returned by this method +// will be of a similar structure or form as the ones that are delivered +// to the channel via SendNotificationChannelVerificationCode; while +// VerifyNotificationChannel() will recognize both the codes delivered via +// SendNotificationChannelVerificationCode() and returned from +// GetNotificationChannelVerificationCode(), it is typically the case that +// the verification codes delivered via +// SendNotificationChannelVerificationCode() will be shorter and also +// have a shorter expiration (e.g. codes such as “G-123456”) whereas +// GetVerificationCode() will typically return a much longer, websafe base +// 64 encoded string that has a longer expiration time. +func (c *NotificationChannelClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) { + return c.internalClient.GetNotificationChannelVerificationCode(ctx, req, opts...) +} + +// VerifyNotificationChannel verifies a NotificationChannel by proving receipt of the code +// delivered to the channel as a result of calling +// SendNotificationChannelVerificationCode. +func (c *NotificationChannelClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.VerifyNotificationChannel(ctx, req, opts...) +} + +// notificationChannelGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type notificationChannelGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing NotificationChannelClient + CallOptions **NotificationChannelCallOptions + + // The gRPC API client. + notificationChannelClient monitoringpb.NotificationChannelServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewNotificationChannelClient creates a new notification channel service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The Notification Channel API provides access to configuration that +// controls how messages related to incidents are sent. +func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) { + clientOpts := defaultNotificationChannelGRPCClientOptions() + if newNotificationChannelClientHook != nil { + hookOpts, err := newNotificationChannelClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := NotificationChannelClient{CallOptions: defaultNotificationChannelCallOptions()} + + c := ¬ificationChannelGRPCClient{ + connPool: connPool, + notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *notificationChannelGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *notificationChannelGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *notificationChannelGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *notificationChannelGRPCClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListNotificationChannelDescriptors[0:len((*c.CallOptions).ListNotificationChannelDescriptors):len((*c.CallOptions).ListNotificationChannelDescriptors)], opts...) + it := &NotificationChannelDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) { + resp := &monitoringpb.ListNotificationChannelDescriptorsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetChannelDescriptors(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *notificationChannelGRPCClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetNotificationChannelDescriptor[0:len((*c.CallOptions).GetNotificationChannelDescriptor):len((*c.CallOptions).GetNotificationChannelDescriptor)], opts...) + var resp *monitoringpb.NotificationChannelDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListNotificationChannels[0:len((*c.CallOptions).ListNotificationChannels):len((*c.CallOptions).ListNotificationChannels)], opts...) + it := &NotificationChannelIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) { + resp := &monitoringpb.ListNotificationChannelsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetNotificationChannels(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *notificationChannelGRPCClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetNotificationChannel[0:len((*c.CallOptions).GetNotificationChannel):len((*c.CallOptions).GetNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateNotificationChannel[0:len((*c.CallOptions).CreateNotificationChannel):len((*c.CallOptions).CreateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "notification_channel.name", url.QueryEscape(req.GetNotificationChannel().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateNotificationChannel[0:len((*c.CallOptions).UpdateNotificationChannel):len((*c.CallOptions).UpdateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteNotificationChannel[0:len((*c.CallOptions).DeleteNotificationChannel):len((*c.CallOptions).DeleteNotificationChannel)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *notificationChannelGRPCClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).SendNotificationChannelVerificationCode[0:len((*c.CallOptions).SendNotificationChannelVerificationCode):len((*c.CallOptions).SendNotificationChannelVerificationCode)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.SendNotificationChannelVerificationCode(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *notificationChannelGRPCClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetNotificationChannelVerificationCode[0:len((*c.CallOptions).GetNotificationChannelVerificationCode):len((*c.CallOptions).GetNotificationChannelVerificationCode)], opts...) + var resp *monitoringpb.GetNotificationChannelVerificationCodeResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelVerificationCode(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).VerifyNotificationChannel[0:len((*c.CallOptions).VerifyNotificationChannel):len((*c.CallOptions).VerifyNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.VerifyNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go new file mode 100644 index 00000000000..3c111637e19 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go @@ -0,0 +1,233 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" +) + +var newQueryClientHook clientHook + +// QueryCallOptions contains the retry settings for each method of QueryClient. +type QueryCallOptions struct { + QueryTimeSeries []gax.CallOption +} + +func defaultQueryGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultQueryCallOptions() *QueryCallOptions { + return &QueryCallOptions{ + QueryTimeSeries: []gax.CallOption{}, + } +} + +// internalQueryClient is an interface that defines the methods available from Cloud Monitoring API. +type internalQueryClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + QueryTimeSeries(context.Context, *monitoringpb.QueryTimeSeriesRequest, ...gax.CallOption) *TimeSeriesDataIterator +} + +// QueryClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The QueryService API is used to manage time series data in Cloud +// Monitoring. Time series data is a collection of data points that describes +// the time-varying values of a metric. +type QueryClient struct { + // The internal transport-dependent client. + internalClient internalQueryClient + + // The call options for this service. + CallOptions *QueryCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *QueryClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *QueryClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *QueryClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// QueryTimeSeries queries time series using Monitoring Query Language. +func (c *QueryClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator { + return c.internalClient.QueryTimeSeries(ctx, req, opts...) +} + +// queryGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type queryGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing QueryClient + CallOptions **QueryCallOptions + + // The gRPC API client. + queryClient monitoringpb.QueryServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewQueryClient creates a new query service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The QueryService API is used to manage time series data in Cloud +// Monitoring. Time series data is a collection of data points that describes +// the time-varying values of a metric. +func NewQueryClient(ctx context.Context, opts ...option.ClientOption) (*QueryClient, error) { + clientOpts := defaultQueryGRPCClientOptions() + if newQueryClientHook != nil { + hookOpts, err := newQueryClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := QueryClient{CallOptions: defaultQueryCallOptions()} + + c := &queryGRPCClient{ + connPool: connPool, + queryClient: monitoringpb.NewQueryServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *queryGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *queryGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *queryGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *queryGRPCClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).QueryTimeSeries[0:len((*c.CallOptions).QueryTimeSeries):len((*c.CallOptions).QueryTimeSeries)], opts...) + it := &TimeSeriesDataIterator{} + req = proto.Clone(req).(*monitoringpb.QueryTimeSeriesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeriesData, string, error) { + resp := &monitoringpb.QueryTimeSeriesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.queryClient.QueryTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetTimeSeriesData(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go new file mode 100644 index 00000000000..7776c425f9f --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go @@ -0,0 +1,565 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newServiceMonitoringClientHook clientHook + +// ServiceMonitoringCallOptions contains the retry settings for each method of ServiceMonitoringClient. +type ServiceMonitoringCallOptions struct { + CreateService []gax.CallOption + GetService []gax.CallOption + ListServices []gax.CallOption + UpdateService []gax.CallOption + DeleteService []gax.CallOption + CreateServiceLevelObjective []gax.CallOption + GetServiceLevelObjective []gax.CallOption + ListServiceLevelObjectives []gax.CallOption + UpdateServiceLevelObjective []gax.CallOption + DeleteServiceLevelObjective []gax.CallOption +} + +func defaultServiceMonitoringGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultServiceMonitoringCallOptions() *ServiceMonitoringCallOptions { + return &ServiceMonitoringCallOptions{ + CreateService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + GetService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListServices: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + GetServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListServiceLevelObjectives: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalServiceMonitoringClient is an interface that defines the methods available from Cloud Monitoring API. +type internalServiceMonitoringClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateService(context.Context, *monitoringpb.CreateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error) + GetService(context.Context, *monitoringpb.GetServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error) + ListServices(context.Context, *monitoringpb.ListServicesRequest, ...gax.CallOption) *ServiceIterator + UpdateService(context.Context, *monitoringpb.UpdateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error) + DeleteService(context.Context, *monitoringpb.DeleteServiceRequest, ...gax.CallOption) error + CreateServiceLevelObjective(context.Context, *monitoringpb.CreateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) + GetServiceLevelObjective(context.Context, *monitoringpb.GetServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) + ListServiceLevelObjectives(context.Context, *monitoringpb.ListServiceLevelObjectivesRequest, ...gax.CallOption) *ServiceLevelObjectiveIterator + UpdateServiceLevelObjective(context.Context, *monitoringpb.UpdateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) + DeleteServiceLevelObjective(context.Context, *monitoringpb.DeleteServiceLevelObjectiveRequest, ...gax.CallOption) error +} + +// ServiceMonitoringClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for +// managing and querying aspects of a Metrics Scope’s services. These include +// the Service's monitored resources, its Service-Level Objectives, and a +// taxonomy of categorized Health Metrics. +type ServiceMonitoringClient struct { + // The internal transport-dependent client. + internalClient internalServiceMonitoringClient + + // The call options for this service. + CallOptions *ServiceMonitoringCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ServiceMonitoringClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ServiceMonitoringClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *ServiceMonitoringClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateService create a Service. +func (c *ServiceMonitoringClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + return c.internalClient.CreateService(ctx, req, opts...) +} + +// GetService get the named Service. +func (c *ServiceMonitoringClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + return c.internalClient.GetService(ctx, req, opts...) +} + +// ListServices list Services for this Metrics Scope. +func (c *ServiceMonitoringClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator { + return c.internalClient.ListServices(ctx, req, opts...) +} + +// UpdateService update this Service. +func (c *ServiceMonitoringClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + return c.internalClient.UpdateService(ctx, req, opts...) +} + +// DeleteService soft delete this Service. +func (c *ServiceMonitoringClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteService(ctx, req, opts...) +} + +// CreateServiceLevelObjective create a ServiceLevelObjective for the given Service. +func (c *ServiceMonitoringClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + return c.internalClient.CreateServiceLevelObjective(ctx, req, opts...) +} + +// GetServiceLevelObjective get a ServiceLevelObjective by name. +func (c *ServiceMonitoringClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + return c.internalClient.GetServiceLevelObjective(ctx, req, opts...) +} + +// ListServiceLevelObjectives list the ServiceLevelObjectives for the given Service. +func (c *ServiceMonitoringClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator { + return c.internalClient.ListServiceLevelObjectives(ctx, req, opts...) +} + +// UpdateServiceLevelObjective update the given ServiceLevelObjective. +func (c *ServiceMonitoringClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + return c.internalClient.UpdateServiceLevelObjective(ctx, req, opts...) +} + +// DeleteServiceLevelObjective delete the given ServiceLevelObjective. +func (c *ServiceMonitoringClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteServiceLevelObjective(ctx, req, opts...) +} + +// serviceMonitoringGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type serviceMonitoringGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing ServiceMonitoringClient + CallOptions **ServiceMonitoringCallOptions + + // The gRPC API client. + serviceMonitoringClient monitoringpb.ServiceMonitoringServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewServiceMonitoringClient creates a new service monitoring service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for +// managing and querying aspects of a Metrics Scope’s services. These include +// the Service's monitored resources, its Service-Level Objectives, and a +// taxonomy of categorized Health Metrics. +func NewServiceMonitoringClient(ctx context.Context, opts ...option.ClientOption) (*ServiceMonitoringClient, error) { + clientOpts := defaultServiceMonitoringGRPCClientOptions() + if newServiceMonitoringClientHook != nil { + hookOpts, err := newServiceMonitoringClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := ServiceMonitoringClient{CallOptions: defaultServiceMonitoringCallOptions()} + + c := &serviceMonitoringGRPCClient{ + connPool: connPool, + serviceMonitoringClient: monitoringpb.NewServiceMonitoringServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *serviceMonitoringGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *serviceMonitoringGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *serviceMonitoringGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *serviceMonitoringGRPCClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateService[0:len((*c.CallOptions).CreateService):len((*c.CallOptions).CreateService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.CreateService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetService[0:len((*c.CallOptions).GetService):len((*c.CallOptions).GetService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.GetService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListServices[0:len((*c.CallOptions).ListServices):len((*c.CallOptions).ListServices)], opts...) + it := &ServiceIterator{} + req = proto.Clone(req).(*monitoringpb.ListServicesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Service, string, error) { + resp := &monitoringpb.ListServicesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.ListServices(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetServices(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *serviceMonitoringGRPCClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service.name", url.QueryEscape(req.GetService().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateService[0:len((*c.CallOptions).UpdateService):len((*c.CallOptions).UpdateService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.UpdateService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteService[0:len((*c.CallOptions).DeleteService):len((*c.CallOptions).DeleteService)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.serviceMonitoringClient.DeleteService(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *serviceMonitoringGRPCClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateServiceLevelObjective[0:len((*c.CallOptions).CreateServiceLevelObjective):len((*c.CallOptions).CreateServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.CreateServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetServiceLevelObjective[0:len((*c.CallOptions).GetServiceLevelObjective):len((*c.CallOptions).GetServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.GetServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListServiceLevelObjectives[0:len((*c.CallOptions).ListServiceLevelObjectives):len((*c.CallOptions).ListServiceLevelObjectives)], opts...) + it := &ServiceLevelObjectiveIterator{} + req = proto.Clone(req).(*monitoringpb.ListServiceLevelObjectivesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.ServiceLevelObjective, string, error) { + resp := &monitoringpb.ListServiceLevelObjectivesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.ListServiceLevelObjectives(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetServiceLevelObjectives(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *serviceMonitoringGRPCClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service_level_objective.name", url.QueryEscape(req.GetServiceLevelObjective().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateServiceLevelObjective[0:len((*c.CallOptions).UpdateServiceLevelObjective):len((*c.CallOptions).UpdateServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.UpdateServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteServiceLevelObjective[0:len((*c.CallOptions).DeleteServiceLevelObjective):len((*c.CallOptions).DeleteServiceLevelObjective)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.serviceMonitoringClient.DeleteServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go new file mode 100644 index 00000000000..32cad577e3f --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go @@ -0,0 +1,343 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newSnoozeClientHook clientHook + +// SnoozeCallOptions contains the retry settings for each method of SnoozeClient. +type SnoozeCallOptions struct { + CreateSnooze []gax.CallOption + ListSnoozes []gax.CallOption + GetSnooze []gax.CallOption + UpdateSnooze []gax.CallOption +} + +func defaultSnoozeGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultSnoozeCallOptions() *SnoozeCallOptions { + return &SnoozeCallOptions{ + CreateSnooze: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + ListSnoozes: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetSnooze: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateSnooze: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + } +} + +// internalSnoozeClient is an interface that defines the methods available from Cloud Monitoring API. +type internalSnoozeClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateSnooze(context.Context, *monitoringpb.CreateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) + ListSnoozes(context.Context, *monitoringpb.ListSnoozesRequest, ...gax.CallOption) *SnoozeIterator + GetSnooze(context.Context, *monitoringpb.GetSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) + UpdateSnooze(context.Context, *monitoringpb.UpdateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) +} + +// SnoozeClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The SnoozeService API is used to temporarily prevent an alert policy from +// generating alerts. A Snooze is a description of the criteria under which one +// or more alert policies should not fire alerts for the specified duration. +type SnoozeClient struct { + // The internal transport-dependent client. + internalClient internalSnoozeClient + + // The call options for this service. + CallOptions *SnoozeCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SnoozeClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SnoozeClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *SnoozeClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateSnooze creates a Snooze that will prevent alerts, which match the provided +// criteria, from being opened. The Snooze applies for a specific time +// interval. +func (c *SnoozeClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.CreateSnooze(ctx, req, opts...) +} + +// ListSnoozes lists the Snoozes associated with a project. Can optionally pass in +// filter, which specifies predicates to match Snoozes. +func (c *SnoozeClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator { + return c.internalClient.ListSnoozes(ctx, req, opts...) +} + +// GetSnooze retrieves a Snooze by name. +func (c *SnoozeClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.GetSnooze(ctx, req, opts...) +} + +// UpdateSnooze updates a Snooze, identified by its name, with the parameters in the +// given Snooze object. +func (c *SnoozeClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.UpdateSnooze(ctx, req, opts...) +} + +// snoozeGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type snoozeGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing SnoozeClient + CallOptions **SnoozeCallOptions + + // The gRPC API client. + snoozeClient monitoringpb.SnoozeServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewSnoozeClient creates a new snooze service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The SnoozeService API is used to temporarily prevent an alert policy from +// generating alerts. A Snooze is a description of the criteria under which one +// or more alert policies should not fire alerts for the specified duration. +func NewSnoozeClient(ctx context.Context, opts ...option.ClientOption) (*SnoozeClient, error) { + clientOpts := defaultSnoozeGRPCClientOptions() + if newSnoozeClientHook != nil { + hookOpts, err := newSnoozeClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := SnoozeClient{CallOptions: defaultSnoozeCallOptions()} + + c := &snoozeGRPCClient{ + connPool: connPool, + snoozeClient: monitoringpb.NewSnoozeServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *snoozeGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *snoozeGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *snoozeGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *snoozeGRPCClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateSnooze[0:len((*c.CallOptions).CreateSnooze):len((*c.CallOptions).CreateSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.CreateSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *snoozeGRPCClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListSnoozes[0:len((*c.CallOptions).ListSnoozes):len((*c.CallOptions).ListSnoozes)], opts...) + it := &SnoozeIterator{} + req = proto.Clone(req).(*monitoringpb.ListSnoozesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Snooze, string, error) { + resp := &monitoringpb.ListSnoozesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.ListSnoozes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetSnoozes(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *snoozeGRPCClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetSnooze[0:len((*c.CallOptions).GetSnooze):len((*c.CallOptions).GetSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.GetSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *snoozeGRPCClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "snooze.name", url.QueryEscape(req.GetSnooze().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateSnooze[0:len((*c.CallOptions).UpdateSnooze):len((*c.CallOptions).UpdateSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.UpdateSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go new file mode 100644 index 00000000000..d3815251374 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go @@ -0,0 +1,450 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newUptimeCheckClientHook clientHook + +// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient. +type UptimeCheckCallOptions struct { + ListUptimeCheckConfigs []gax.CallOption + GetUptimeCheckConfig []gax.CallOption + CreateUptimeCheckConfig []gax.CallOption + UpdateUptimeCheckConfig []gax.CallOption + DeleteUptimeCheckConfig []gax.CallOption + ListUptimeCheckIps []gax.CallOption +} + +func defaultUptimeCheckGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions { + return &UptimeCheckCallOptions{ + ListUptimeCheckConfigs: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + UpdateUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListUptimeCheckIps: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalUptimeCheckClient is an interface that defines the methods available from Cloud Monitoring API. +type internalUptimeCheckClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListUptimeCheckConfigs(context.Context, *monitoringpb.ListUptimeCheckConfigsRequest, ...gax.CallOption) *UptimeCheckConfigIterator + GetUptimeCheckConfig(context.Context, *monitoringpb.GetUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) + CreateUptimeCheckConfig(context.Context, *monitoringpb.CreateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) + UpdateUptimeCheckConfig(context.Context, *monitoringpb.UpdateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) + DeleteUptimeCheckConfig(context.Context, *monitoringpb.DeleteUptimeCheckConfigRequest, ...gax.CallOption) error + ListUptimeCheckIps(context.Context, *monitoringpb.ListUptimeCheckIpsRequest, ...gax.CallOption) *UptimeCheckIpIterator +} + +// UptimeCheckClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The UptimeCheckService API is used to manage (list, create, delete, edit) +// Uptime check configurations in the Cloud Monitoring product. An Uptime +// check is a piece of configuration that determines which resources and +// services to monitor for availability. These configurations can also be +// configured interactively by navigating to the [Cloud console] +// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project, +// clicking on “Monitoring” on the left-hand side to navigate to Cloud +// Monitoring, and then clicking on “Uptime”. +type UptimeCheckClient struct { + // The internal transport-dependent client. + internalClient internalUptimeCheckClient + + // The call options for this service. + CallOptions *UptimeCheckCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *UptimeCheckClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *UptimeCheckClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListUptimeCheckConfigs lists the existing valid Uptime check configurations for the project +// (leaving out any invalid configurations). +func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator { + return c.internalClient.ListUptimeCheckConfigs(ctx, req, opts...) +} + +// GetUptimeCheckConfig gets a single Uptime check configuration. +func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + return c.internalClient.GetUptimeCheckConfig(ctx, req, opts...) +} + +// CreateUptimeCheckConfig creates a new Uptime check configuration. +func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + return c.internalClient.CreateUptimeCheckConfig(ctx, req, opts...) +} + +// UpdateUptimeCheckConfig updates an Uptime check configuration. You can either replace the entire +// configuration with a new one or replace only certain fields in the current +// configuration by specifying the fields to be updated via updateMask. +// Returns the updated configuration. +func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + return c.internalClient.UpdateUptimeCheckConfig(ctx, req, opts...) +} + +// DeleteUptimeCheckConfig deletes an Uptime check configuration. Note that this method will fail +// if the Uptime check configuration is referenced by an alert policy or +// other dependent configs that would be rendered invalid by the deletion. +func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteUptimeCheckConfig(ctx, req, opts...) +} + +// ListUptimeCheckIps returns the list of IP addresses that checkers run from +func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator { + return c.internalClient.ListUptimeCheckIps(ctx, req, opts...) +} + +// uptimeCheckGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type uptimeCheckGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing UptimeCheckClient + CallOptions **UptimeCheckCallOptions + + // The gRPC API client. + uptimeCheckClient monitoringpb.UptimeCheckServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewUptimeCheckClient creates a new uptime check service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The UptimeCheckService API is used to manage (list, create, delete, edit) +// Uptime check configurations in the Cloud Monitoring product. An Uptime +// check is a piece of configuration that determines which resources and +// services to monitor for availability. These configurations can also be +// configured interactively by navigating to the [Cloud console] +// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project, +// clicking on “Monitoring” on the left-hand side to navigate to Cloud +// Monitoring, and then clicking on “Uptime”. +func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) { + clientOpts := defaultUptimeCheckGRPCClientOptions() + if newUptimeCheckClientHook != nil { + hookOpts, err := newUptimeCheckClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := UptimeCheckClient{CallOptions: defaultUptimeCheckCallOptions()} + + c := &uptimeCheckGRPCClient{ + connPool: connPool, + uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *uptimeCheckGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *uptimeCheckGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *uptimeCheckGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *uptimeCheckGRPCClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListUptimeCheckConfigs[0:len((*c.CallOptions).ListUptimeCheckConfigs):len((*c.CallOptions).ListUptimeCheckConfigs)], opts...) + it := &UptimeCheckConfigIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) { + resp := &monitoringpb.ListUptimeCheckConfigsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetUptimeCheckConfigs(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *uptimeCheckGRPCClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetUptimeCheckConfig[0:len((*c.CallOptions).GetUptimeCheckConfig):len((*c.CallOptions).GetUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.GetUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *uptimeCheckGRPCClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateUptimeCheckConfig[0:len((*c.CallOptions).CreateUptimeCheckConfig):len((*c.CallOptions).CreateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.CreateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *uptimeCheckGRPCClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "uptime_check_config.name", url.QueryEscape(req.GetUptimeCheckConfig().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateUptimeCheckConfig[0:len((*c.CallOptions).UpdateUptimeCheckConfig):len((*c.CallOptions).UpdateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.UpdateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *uptimeCheckGRPCClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteUptimeCheckConfig[0:len((*c.CallOptions).DeleteUptimeCheckConfig):len((*c.CallOptions).DeleteUptimeCheckConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.uptimeCheckClient.DeleteUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *uptimeCheckGRPCClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator { + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) + opts = append((*c.CallOptions).ListUptimeCheckIps[0:len((*c.CallOptions).ListUptimeCheckIps):len((*c.CallOptions).ListUptimeCheckIps)], opts...) + it := &UptimeCheckIpIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) { + resp := &monitoringpb.ListUptimeCheckIpsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckIps(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetUptimeCheckIps(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go new file mode 100644 index 00000000000..accff0f5e47 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go @@ -0,0 +1,23 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapicgen. DO NOT EDIT. + +package monitoring + +import "cloud.google.com/go/monitoring/internal" + +func init() { + versionClient = internal.Version +} diff --git a/terraform/providers/google/vendor/cloud.google.com/go/monitoring/internal/version.go b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/internal/version.go new file mode 100644 index 00000000000..670f0797ee6 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/monitoring/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.21.1" diff --git a/terraform/providers/google/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/terraform/providers/google/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json index d7ca5aa8a36..73021df5391 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json +++ b/terraform/providers/google/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -27,6 +27,9 @@ "apigeeregistry": { "component": "apigeeregistry" }, + "apihub": { + "component": "apihub" + }, "apikeys": { "component": "apikeys" }, @@ -306,6 +309,9 @@ "privatecatalog": { "component": "privatecatalog" }, + "privilegedaccessmanager": { + "component": "privilegedaccessmanager" + }, "rapidmigrationassessment": { "component": "rapidmigrationassessment" }, diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.go index d5734fbf9fe..bfe4f10ef60 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.go @@ -31,6 +31,7 @@ type Workload struct { ComplianceRegime *WorkloadComplianceRegimeEnum `json:"complianceRegime"` CreateTime *string `json:"createTime"` BillingAccount *string `json:"billingAccount"` + PartnerServicesBillingAccount *string `json:"partnerServicesBillingAccount"` Labels map[string]string `json:"labels"` ProvisionedResourcesParent *string `json:"provisionedResourcesParent"` KmsSettings *WorkloadKmsSettings `json:"kmsSettings"` @@ -42,6 +43,7 @@ type Workload struct { CompliantButDisallowedServices []string `json:"compliantButDisallowedServices"` Partner *WorkloadPartnerEnum `json:"partner"` PartnerPermissions *WorkloadPartnerPermissions `json:"partnerPermissions"` + WorkloadOptions *WorkloadWorkloadOptions `json:"workloadOptions"` EkmProvisioningResponse *WorkloadEkmProvisioningResponse `json:"ekmProvisioningResponse"` ViolationNotificationsEnabled *bool `json:"violationNotificationsEnabled"` Organization *string `json:"organization"` @@ -94,7 +96,7 @@ func (v WorkloadComplianceRegimeEnum) Validate() error { // Empty enum is okay. return nil } - for _, s := range []string{"COMPLIANCE_REGIME_UNSPECIFIED", "IL4", "CJIS", "FEDRAMP_HIGH", "FEDRAMP_MODERATE", "US_REGIONAL_ACCESS", "HIPAA", "HITRUST", "EU_REGIONS_AND_SUPPORT", "CA_REGIONS_AND_SUPPORT", "ITAR", "AU_REGIONS_AND_US_SUPPORT", "ASSURED_WORKLOADS_FOR_PARTNERS", "ISR_REGIONS", "ISR_REGIONS_AND_SUPPORT", "CA_PROTECTED_B", "IL5", "IL2", "JP_REGIONS_AND_SUPPORT"} { + for _, s := range []string{"COMPLIANCE_REGIME_UNSPECIFIED", "IL4", "CJIS", "FEDRAMP_HIGH", "FEDRAMP_MODERATE", "US_REGIONAL_ACCESS", "HIPAA", "HITRUST", "EU_REGIONS_AND_SUPPORT", "CA_REGIONS_AND_SUPPORT", "ITAR", "AU_REGIONS_AND_US_SUPPORT", "ASSURED_WORKLOADS_FOR_PARTNERS", "ISR_REGIONS", "ISR_REGIONS_AND_SUPPORT", "CA_PROTECTED_B", "IL5", "IL2", "JP_REGIONS_AND_SUPPORT", "KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS", "REGIONAL_CONTROLS", "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS", "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT"} { if string(v) == s { return nil } @@ -229,7 +231,7 @@ func (v WorkloadPartnerEnum) Validate() error { // Empty enum is okay. return nil } - for _, s := range []string{"PARTNER_UNSPECIFIED", "LOCAL_CONTROLS_BY_S3NS", "SOVEREIGN_CONTROLS_BY_T_SYSTEMS", "SOVEREIGN_CONTROLS_BY_SIA_MINSAIT", "SOVEREIGN_CONTROLS_BY_PSN"} { + for _, s := range []string{"PARTNER_UNSPECIFIED", "LOCAL_CONTROLS_BY_S3NS", "SOVEREIGN_CONTROLS_BY_T_SYSTEMS", "SOVEREIGN_CONTROLS_BY_SIA_MINSAIT", "SOVEREIGN_CONTROLS_BY_PSN", "SOVEREIGN_CONTROLS_BY_CNTXT", "SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM"} { if string(v) == s { return nil } @@ -241,6 +243,33 @@ func (v WorkloadPartnerEnum) Validate() error { } } +// The enum WorkloadWorkloadOptionsKajEnrollmentTypeEnum. +type WorkloadWorkloadOptionsKajEnrollmentTypeEnum string + +// WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef returns a *WorkloadWorkloadOptionsKajEnrollmentTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef(s string) *WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + v := WorkloadWorkloadOptionsKajEnrollmentTypeEnum(s) + return &v +} + +func (v WorkloadWorkloadOptionsKajEnrollmentTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"KAJ_ENROLLMENT_TYPE_UNSPECIFIED", "FULL_KAJ", "EKM_ONLY", "KEY_ACCESS_TRANSPARENCY_OFF"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadWorkloadOptionsKajEnrollmentTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + // The enum WorkloadEkmProvisioningResponseEkmProvisioningStateEnum. type WorkloadEkmProvisioningResponseEkmProvisioningStateEnum string @@ -622,6 +651,52 @@ func (r *WorkloadPartnerPermissions) HashCode() string { return fmt.Sprintf("%x", hash) } +type WorkloadWorkloadOptions struct { + empty bool `json:"-"` + KajEnrollmentType *WorkloadWorkloadOptionsKajEnrollmentTypeEnum `json:"kajEnrollmentType"` +} + +type jsonWorkloadWorkloadOptions WorkloadWorkloadOptions + +func (r *WorkloadWorkloadOptions) UnmarshalJSON(data []byte) error { + var res jsonWorkloadWorkloadOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadWorkloadOptions + } else { + + r.KajEnrollmentType = res.KajEnrollmentType + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadWorkloadOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadWorkloadOptions *WorkloadWorkloadOptions = &WorkloadWorkloadOptions{empty: true} + +func (r *WorkloadWorkloadOptions) Empty() bool { + return r.empty +} + +func (r *WorkloadWorkloadOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadWorkloadOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + type WorkloadEkmProvisioningResponse struct { empty bool `json:"-"` EkmProvisioningState *WorkloadEkmProvisioningResponseEkmProvisioningStateEnum `json:"ekmProvisioningState"` @@ -696,6 +771,7 @@ func (r *Workload) ID() (string, error) { "compliance_regime": dcl.ValueOrEmptyString(nr.ComplianceRegime), "create_time": dcl.ValueOrEmptyString(nr.CreateTime), "billing_account": dcl.ValueOrEmptyString(nr.BillingAccount), + "partner_services_billing_account": dcl.ValueOrEmptyString(nr.PartnerServicesBillingAccount), "labels": dcl.ValueOrEmptyString(nr.Labels), "provisioned_resources_parent": dcl.ValueOrEmptyString(nr.ProvisionedResourcesParent), "kms_settings": dcl.ValueOrEmptyString(nr.KmsSettings), @@ -707,6 +783,7 @@ func (r *Workload) ID() (string, error) { "compliant_but_disallowed_services": dcl.ValueOrEmptyString(nr.CompliantButDisallowedServices), "partner": dcl.ValueOrEmptyString(nr.Partner), "partner_permissions": dcl.ValueOrEmptyString(nr.PartnerPermissions), + "workload_options": dcl.ValueOrEmptyString(nr.WorkloadOptions), "ekm_provisioning_response": dcl.ValueOrEmptyString(nr.EkmProvisioningResponse), "violation_notifications_enabled": dcl.ValueOrEmptyString(nr.ViolationNotificationsEnabled), "organization": dcl.ValueOrEmptyString(nr.Organization), diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.yaml index 4558f882149..df24c5c5a76 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.yaml @@ -99,7 +99,9 @@ components: workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, - ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT' + ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, + KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, + HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT' x-kubernetes-immutable: true enum: - COMPLIANCE_REGIME_UNSPECIFIED @@ -121,6 +123,10 @@ components: - IL5 - IL2 - JP_REGIONS_AND_SUPPORT + - KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS + - REGIONAL_CONTROLS + - HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS + - HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT complianceStatus: type: object x-dcl-go-name: ComplianceStatus @@ -312,7 +318,8 @@ components: x-dcl-go-type: WorkloadPartnerEnum description: 'Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, - SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN' + SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, + SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM' x-kubernetes-immutable: true enum: - PARTNER_UNSPECIFIED @@ -320,6 +327,8 @@ components: - SOVEREIGN_CONTROLS_BY_T_SYSTEMS - SOVEREIGN_CONTROLS_BY_SIA_MINSAIT - SOVEREIGN_CONTROLS_BY_PSN + - SOVEREIGN_CONTROLS_BY_CNTXT + - SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM partnerPermissions: type: object x-dcl-go-name: PartnerPermissions @@ -344,6 +353,15 @@ components: x-dcl-go-name: ServiceAccessApprover description: Optional. Allow partner to view access approval logs. x-kubernetes-immutable: true + partnerServicesBillingAccount: + type: string + x-dcl-go-name: PartnerServicesBillingAccount + description: Optional. Input only. Billing account necessary for purchasing + services from Sovereign Partners. This field is required for creating + SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' + IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + x-kubernetes-immutable: true + x-dcl-mutable-unreadable: true provisionedResourcesParent: type: string x-dcl-go-name: ProvisionedResourcesParent @@ -476,3 +494,28 @@ components: call will not be honored. This will always be true while creating the workload. x-kubernetes-immutable: true + workloadOptions: + type: object + x-dcl-go-name: WorkloadOptions + x-dcl-go-type: WorkloadWorkloadOptions + description: Optional. Used to specify certain options for a workload during + workload creation - currently only supporting KAT Optionality for Regional + Controls workloads. + x-kubernetes-immutable: true + x-dcl-mutable-unreadable: true + properties: + kajEnrollmentType: + type: string + x-dcl-go-name: KajEnrollmentType + x-dcl-go-type: WorkloadWorkloadOptionsKajEnrollmentTypeEnum + description: 'Indicates type of KAJ enrollment for the workload. Currently, + only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not + enroll in KAT-level KAJ enrollment for Regional Controls workloads. + Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, + KEY_ACCESS_TRANSPARENCY_OFF' + x-kubernetes-immutable: true + enum: + - KAJ_ENROLLMENT_TYPE_UNSPECIFIED + - FULL_KAJ + - EKM_ONLY + - KEY_ACCESS_TRANSPARENCY_OFF diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_internal.go index 1bf5c71e9b3..3dfeaaccd2c 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_internal.go @@ -59,6 +59,11 @@ func (r *Workload) validate() error { return err } } + if !dcl.IsEmptyValueIndirect(r.WorkloadOptions) { + if err := r.WorkloadOptions.validate(); err != nil { + return err + } + } if !dcl.IsEmptyValueIndirect(r.EkmProvisioningResponse) { if err := r.EkmProvisioningResponse.validate(); err != nil { return err @@ -90,6 +95,9 @@ func (r *WorkloadComplianceStatus) validate() error { func (r *WorkloadPartnerPermissions) validate() error { return nil } +func (r *WorkloadWorkloadOptions) validate() error { + return nil +} func (r *WorkloadEkmProvisioningResponse) validate() error { return nil } @@ -508,6 +516,7 @@ func canonicalizeWorkloadDesiredState(rawDesired, rawInitial *Workload, opts ... rawDesired.SaaEnrollmentResponse = canonicalizeWorkloadSaaEnrollmentResponse(rawDesired.SaaEnrollmentResponse, nil, opts...) rawDesired.ComplianceStatus = canonicalizeWorkloadComplianceStatus(rawDesired.ComplianceStatus, nil, opts...) rawDesired.PartnerPermissions = canonicalizeWorkloadPartnerPermissions(rawDesired.PartnerPermissions, nil, opts...) + rawDesired.WorkloadOptions = canonicalizeWorkloadWorkloadOptions(rawDesired.WorkloadOptions, nil, opts...) rawDesired.EkmProvisioningResponse = canonicalizeWorkloadEkmProvisioningResponse(rawDesired.EkmProvisioningResponse, nil, opts...) return rawDesired, nil @@ -535,6 +544,11 @@ func canonicalizeWorkloadDesiredState(rawDesired, rawInitial *Workload, opts ... } else { canonicalDesired.BillingAccount = rawDesired.BillingAccount } + if dcl.StringCanonicalize(rawDesired.PartnerServicesBillingAccount, rawInitial.PartnerServicesBillingAccount) { + canonicalDesired.PartnerServicesBillingAccount = rawInitial.PartnerServicesBillingAccount + } else { + canonicalDesired.PartnerServicesBillingAccount = rawDesired.PartnerServicesBillingAccount + } if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { // Desired and initial values are equivalent, so set canonical desired value to initial value. canonicalDesired.Labels = rawInitial.Labels @@ -560,6 +574,7 @@ func canonicalizeWorkloadDesiredState(rawDesired, rawInitial *Workload, opts ... canonicalDesired.Partner = rawDesired.Partner } canonicalDesired.PartnerPermissions = canonicalizeWorkloadPartnerPermissions(rawDesired.PartnerPermissions, rawInitial.PartnerPermissions, opts...) + canonicalDesired.WorkloadOptions = canonicalizeWorkloadWorkloadOptions(rawDesired.WorkloadOptions, rawInitial.WorkloadOptions, opts...) if dcl.BoolCanonicalize(rawDesired.ViolationNotificationsEnabled, rawInitial.ViolationNotificationsEnabled) { canonicalDesired.ViolationNotificationsEnabled = rawInitial.ViolationNotificationsEnabled } else { @@ -615,6 +630,12 @@ func canonicalizeWorkloadNewState(c *Client, rawNew, rawDesired *Workload) (*Wor rawNew.BillingAccount = rawDesired.BillingAccount } + if dcl.IsEmptyValueIndirect(rawNew.PartnerServicesBillingAccount) && dcl.IsEmptyValueIndirect(rawDesired.PartnerServicesBillingAccount) { + rawNew.PartnerServicesBillingAccount = rawDesired.PartnerServicesBillingAccount + } else { + rawNew.PartnerServicesBillingAccount = rawDesired.PartnerServicesBillingAccount + } + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { rawNew.Labels = rawDesired.Labels } else { @@ -670,6 +691,8 @@ func canonicalizeWorkloadNewState(c *Client, rawNew, rawDesired *Workload) (*Wor rawNew.PartnerPermissions = canonicalizeNewWorkloadPartnerPermissions(c, rawDesired.PartnerPermissions, rawNew.PartnerPermissions) } + rawNew.WorkloadOptions = rawDesired.WorkloadOptions + if dcl.IsEmptyValueIndirect(rawNew.EkmProvisioningResponse) && dcl.IsEmptyValueIndirect(rawDesired.EkmProvisioningResponse) { rawNew.EkmProvisioningResponse = rawDesired.EkmProvisioningResponse } else { @@ -1444,6 +1467,121 @@ func canonicalizeNewWorkloadPartnerPermissionsSlice(c *Client, des, nw []Workloa return items } +func canonicalizeWorkloadWorkloadOptions(des, initial *WorkloadWorkloadOptions, opts ...dcl.ApplyOption) *WorkloadWorkloadOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadWorkloadOptions{} + + if dcl.IsZeroValue(des.KajEnrollmentType) || (dcl.IsEmptyValueIndirect(des.KajEnrollmentType) && dcl.IsEmptyValueIndirect(initial.KajEnrollmentType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.KajEnrollmentType = initial.KajEnrollmentType + } else { + cDes.KajEnrollmentType = des.KajEnrollmentType + } + + return cDes +} + +func canonicalizeWorkloadWorkloadOptionsSlice(des, initial []WorkloadWorkloadOptions, opts ...dcl.ApplyOption) []WorkloadWorkloadOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadWorkloadOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadWorkloadOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadWorkloadOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadWorkloadOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadWorkloadOptions(c *Client, des, nw *WorkloadWorkloadOptions) *WorkloadWorkloadOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadWorkloadOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkloadWorkloadOptionsSet(c *Client, des, nw []WorkloadWorkloadOptions) []WorkloadWorkloadOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadWorkloadOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadWorkloadOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadWorkloadOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadWorkloadOptionsSlice(c *Client, des, nw []WorkloadWorkloadOptions) []WorkloadWorkloadOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadWorkloadOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadWorkloadOptions(c, &d, &n)) + } + + return items +} + func canonicalizeWorkloadEkmProvisioningResponse(des, initial *WorkloadEkmProvisioningResponse, opts ...dcl.ApplyOption) *WorkloadEkmProvisioningResponse { if des == nil { return initial @@ -1631,6 +1769,13 @@ func diffWorkload(c *Client, desired, actual *Workload, opts ...dcl.ApplyOption) newDiffs = append(newDiffs, ds...) } + if ds, err := dcl.Diff(desired.PartnerServicesBillingAccount, actual.PartnerServicesBillingAccount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PartnerServicesBillingAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkloadUpdateWorkloadOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { if err != nil { return nil, err @@ -1708,6 +1853,13 @@ func diffWorkload(c *Client, desired, actual *Workload, opts ...dcl.ApplyOption) newDiffs = append(newDiffs, ds...) } + if ds, err := dcl.Diff(desired.WorkloadOptions, actual.WorkloadOptions, dcl.DiffInfo{Ignore: true, ObjectFunction: compareWorkloadWorkloadOptionsNewStyle, EmptyObject: EmptyWorkloadWorkloadOptions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + if ds, err := dcl.Diff(desired.EkmProvisioningResponse, actual.EkmProvisioningResponse, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareWorkloadEkmProvisioningResponseNewStyle, EmptyObject: EmptyWorkloadEkmProvisioningResponse, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EkmProvisioningResponse")); len(ds) != 0 || err != nil { if err != nil { return nil, err @@ -1971,6 +2123,35 @@ func compareWorkloadPartnerPermissionsNewStyle(d, a interface{}, fn dcl.FieldNam return diffs, nil } +func compareWorkloadWorkloadOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadWorkloadOptions) + if !ok { + desiredNotPointer, ok := d.(WorkloadWorkloadOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadWorkloadOptions or *WorkloadWorkloadOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadWorkloadOptions) + if !ok { + actualNotPointer, ok := a.(WorkloadWorkloadOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadWorkloadOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KajEnrollmentType, actual.KajEnrollmentType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KajEnrollmentType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + func compareWorkloadEkmProvisioningResponseNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { var diffs []*dcl.FieldDiff @@ -2022,6 +2203,7 @@ func (r *Workload) urlNormalized() *Workload { normalized.Name = dcl.SelfLinkToName(r.Name) normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) normalized.BillingAccount = dcl.SelfLinkToName(r.BillingAccount) + normalized.PartnerServicesBillingAccount = dcl.SelfLinkToName(r.PartnerServicesBillingAccount) normalized.ProvisionedResourcesParent = dcl.SelfLinkToName(r.ProvisionedResourcesParent) normalized.Organization = dcl.SelfLinkToName(r.Organization) normalized.Location = dcl.SelfLinkToName(r.Location) @@ -2092,6 +2274,9 @@ func expandWorkload(c *Client, f *Workload) (map[string]interface{}, error) { if v := f.BillingAccount; dcl.ValueShouldBeSent(v) { m["billingAccount"] = v } + if v := f.PartnerServicesBillingAccount; dcl.ValueShouldBeSent(v) { + m["partnerServicesBillingAccount"] = v + } if v := f.Labels; dcl.ValueShouldBeSent(v) { m["labels"] = v } @@ -2119,6 +2304,11 @@ func expandWorkload(c *Client, f *Workload) (map[string]interface{}, error) { } else if !dcl.IsEmptyValueIndirect(v) { m["partnerPermissions"] = v } + if v, err := expandWorkloadWorkloadOptions(c, f.WorkloadOptions, res); err != nil { + return nil, fmt.Errorf("error expanding WorkloadOptions into workloadOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["workloadOptions"] = v + } if v := f.ViolationNotificationsEnabled; dcl.ValueShouldBeSent(v) { m["violationNotificationsEnabled"] = v } @@ -2154,6 +2344,7 @@ func flattenWorkload(c *Client, i interface{}, res *Workload) *Workload { resultRes.ComplianceRegime = flattenWorkloadComplianceRegimeEnum(m["complianceRegime"]) resultRes.CreateTime = dcl.FlattenString(m["createTime"]) resultRes.BillingAccount = dcl.FlattenString(m["billingAccount"]) + resultRes.PartnerServicesBillingAccount = dcl.FlattenString(m["partnerServicesBillingAccount"]) resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) resultRes.ProvisionedResourcesParent = dcl.FlattenSecretValue(m["provisionedResourcesParent"]) resultRes.KmsSettings = flattenWorkloadKmsSettings(c, m["kmsSettings"], res) @@ -2165,6 +2356,7 @@ func flattenWorkload(c *Client, i interface{}, res *Workload) *Workload { resultRes.CompliantButDisallowedServices = dcl.FlattenStringSlice(m["compliantButDisallowedServices"]) resultRes.Partner = flattenWorkloadPartnerEnum(m["partner"]) resultRes.PartnerPermissions = flattenWorkloadPartnerPermissions(c, m["partnerPermissions"], res) + resultRes.WorkloadOptions = flattenWorkloadWorkloadOptions(c, m["workloadOptions"], res) resultRes.EkmProvisioningResponse = flattenWorkloadEkmProvisioningResponse(c, m["ekmProvisioningResponse"], res) resultRes.ViolationNotificationsEnabled = dcl.FlattenBool(m["violationNotificationsEnabled"]) resultRes.Organization = dcl.FlattenString(m["organization"]) @@ -2889,6 +3081,120 @@ func flattenWorkloadPartnerPermissions(c *Client, i interface{}, res *Workload) return r } +// expandWorkloadWorkloadOptionsMap expands the contents of WorkloadWorkloadOptions into a JSON +// request object. +func expandWorkloadWorkloadOptionsMap(c *Client, f map[string]WorkloadWorkloadOptions, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadWorkloadOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadWorkloadOptionsSlice expands the contents of WorkloadWorkloadOptions into a JSON +// request object. +func expandWorkloadWorkloadOptionsSlice(c *Client, f []WorkloadWorkloadOptions, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadWorkloadOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadWorkloadOptionsMap flattens the contents of WorkloadWorkloadOptions from a JSON +// response object. +func flattenWorkloadWorkloadOptionsMap(c *Client, i interface{}, res *Workload) map[string]WorkloadWorkloadOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadWorkloadOptions{} + } + + if len(a) == 0 { + return map[string]WorkloadWorkloadOptions{} + } + + items := make(map[string]WorkloadWorkloadOptions) + for k, item := range a { + items[k] = *flattenWorkloadWorkloadOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadWorkloadOptionsSlice flattens the contents of WorkloadWorkloadOptions from a JSON +// response object. +func flattenWorkloadWorkloadOptionsSlice(c *Client, i interface{}, res *Workload) []WorkloadWorkloadOptions { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadWorkloadOptions{} + } + + if len(a) == 0 { + return []WorkloadWorkloadOptions{} + } + + items := make([]WorkloadWorkloadOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadWorkloadOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadWorkloadOptions expands an instance of WorkloadWorkloadOptions into a JSON +// request object. +func expandWorkloadWorkloadOptions(c *Client, f *WorkloadWorkloadOptions, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KajEnrollmentType; !dcl.IsEmptyValueIndirect(v) { + m["kajEnrollmentType"] = v + } + + return m, nil +} + +// flattenWorkloadWorkloadOptions flattens an instance of WorkloadWorkloadOptions from a JSON +// response object. +func flattenWorkloadWorkloadOptions(c *Client, i interface{}, res *Workload) *WorkloadWorkloadOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadWorkloadOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadWorkloadOptions + } + r.KajEnrollmentType = flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(m["kajEnrollmentType"]) + + return r +} + // expandWorkloadEkmProvisioningResponseMap expands the contents of WorkloadEkmProvisioningResponse into a JSON // request object. func expandWorkloadEkmProvisioningResponseMap(c *Client, f map[string]WorkloadEkmProvisioningResponse, res *Workload) (map[string]interface{}, error) { @@ -3368,6 +3674,57 @@ func flattenWorkloadPartnerEnum(i interface{}) *WorkloadPartnerEnum { return WorkloadPartnerEnumRef(s) } +// flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumMap flattens the contents of WorkloadWorkloadOptionsKajEnrollmentTypeEnum from a JSON +// response object. +func flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + items := make(map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum) + for k, item := range a { + items[k] = *flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumSlice flattens the contents of WorkloadWorkloadOptionsKajEnrollmentTypeEnum from a JSON +// response object. +func flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + if len(a) == 0 { + return []WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + items := make([]WorkloadWorkloadOptionsKajEnrollmentTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadWorkloadOptionsKajEnrollmentTypeEnum with the same value as that string. +func flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(i interface{}) *WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef(s) +} + // flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnumMap flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningStateEnum from a JSON // response object. func flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadEkmProvisioningResponseEkmProvisioningStateEnum { @@ -3659,6 +4016,17 @@ func extractWorkloadFields(r *Workload) error { if !dcl.IsEmptyValueIndirect(vPartnerPermissions) { r.PartnerPermissions = vPartnerPermissions } + vWorkloadOptions := r.WorkloadOptions + if vWorkloadOptions == nil { + // note: explicitly not the empty object. + vWorkloadOptions = &WorkloadWorkloadOptions{} + } + if err := extractWorkloadWorkloadOptionsFields(r, vWorkloadOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadOptions) { + r.WorkloadOptions = vWorkloadOptions + } vEkmProvisioningResponse := r.EkmProvisioningResponse if vEkmProvisioningResponse == nil { // note: explicitly not the empty object. @@ -3690,6 +4058,9 @@ func extractWorkloadComplianceStatusFields(r *Workload, o *WorkloadComplianceSta func extractWorkloadPartnerPermissionsFields(r *Workload, o *WorkloadPartnerPermissions) error { return nil } +func extractWorkloadWorkloadOptionsFields(r *Workload, o *WorkloadWorkloadOptions) error { + return nil +} func extractWorkloadEkmProvisioningResponseFields(r *Workload, o *WorkloadEkmProvisioningResponse) error { return nil } @@ -3739,6 +4110,17 @@ func postReadExtractWorkloadFields(r *Workload) error { if !dcl.IsEmptyValueIndirect(vPartnerPermissions) { r.PartnerPermissions = vPartnerPermissions } + vWorkloadOptions := r.WorkloadOptions + if vWorkloadOptions == nil { + // note: explicitly not the empty object. + vWorkloadOptions = &WorkloadWorkloadOptions{} + } + if err := postReadExtractWorkloadWorkloadOptionsFields(r, vWorkloadOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadOptions) { + r.WorkloadOptions = vWorkloadOptions + } vEkmProvisioningResponse := r.EkmProvisioningResponse if vEkmProvisioningResponse == nil { // note: explicitly not the empty object. @@ -3770,6 +4152,9 @@ func postReadExtractWorkloadComplianceStatusFields(r *Workload, o *WorkloadCompl func postReadExtractWorkloadPartnerPermissionsFields(r *Workload, o *WorkloadPartnerPermissions) error { return nil } +func postReadExtractWorkloadWorkloadOptionsFields(r *Workload, o *WorkloadWorkloadOptions) error { + return nil +} func postReadExtractWorkloadEkmProvisioningResponseFields(r *Workload, o *WorkloadEkmProvisioningResponse) error { return nil } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_schema.go index 95c632cd490..93180338838 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_schema.go @@ -129,7 +129,7 @@ func DCLWorkloadSchema() *dcl.Schema { Type: "string", GoName: "ComplianceRegime", GoType: "WorkloadComplianceRegimeEnum", - Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT", + Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT", Immutable: true, Enum: []string{ "COMPLIANCE_REGIME_UNSPECIFIED", @@ -151,6 +151,10 @@ func DCLWorkloadSchema() *dcl.Schema { "IL5", "IL2", "JP_REGIONS_AND_SUPPORT", + "KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS", + "REGIONAL_CONTROLS", + "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS", + "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT", }, }, "complianceStatus": &dcl.Property{ @@ -352,7 +356,7 @@ func DCLWorkloadSchema() *dcl.Schema { Type: "string", GoName: "Partner", GoType: "WorkloadPartnerEnum", - Description: "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN", + Description: "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM", Immutable: true, Enum: []string{ "PARTNER_UNSPECIFIED", @@ -360,6 +364,8 @@ func DCLWorkloadSchema() *dcl.Schema { "SOVEREIGN_CONTROLS_BY_T_SYSTEMS", "SOVEREIGN_CONTROLS_BY_SIA_MINSAIT", "SOVEREIGN_CONTROLS_BY_PSN", + "SOVEREIGN_CONTROLS_BY_CNTXT", + "SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM", }, }, "partnerPermissions": &dcl.Property{ @@ -389,6 +395,13 @@ func DCLWorkloadSchema() *dcl.Schema { }, }, }, + "partnerServicesBillingAccount": &dcl.Property{ + Type: "string", + GoName: "PartnerServicesBillingAccount", + Description: "Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC.", + Immutable: true, + Unreadable: true, + }, "provisionedResourcesParent": &dcl.Property{ Type: "string", GoName: "ProvisionedResourcesParent", @@ -519,6 +532,29 @@ func DCLWorkloadSchema() *dcl.Schema { Description: "Optional. Indicates whether the e-mail notification for a violation is enabled for a workload. This value will be by default True, and if not present will be considered as true. This should only be updated via updateWorkload call. Any Changes to this field during the createWorkload call will not be honored. This will always be true while creating the workload.", Immutable: true, }, + "workloadOptions": &dcl.Property{ + Type: "object", + GoName: "WorkloadOptions", + GoType: "WorkloadWorkloadOptions", + Description: "Optional. Used to specify certain options for a workload during workload creation - currently only supporting KAT Optionality for Regional Controls workloads.", + Immutable: true, + Unreadable: true, + Properties: map[string]*dcl.Property{ + "kajEnrollmentType": &dcl.Property{ + Type: "string", + GoName: "KajEnrollmentType", + GoType: "WorkloadWorkloadOptionsKajEnrollmentTypeEnum", + Description: "Indicates type of KAJ enrollment for the workload. Currently, only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not enroll in KAT-level KAJ enrollment for Regional Controls workloads. Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, KEY_ACCESS_TRANSPARENCY_OFF", + Immutable: true, + Enum: []string{ + "KAJ_ENROLLMENT_TYPE_UNSPECIFIED", + "FULL_KAJ", + "EKM_ONLY", + "KEY_ACCESS_TRANSPARENCY_OFF", + }, + }, + }, + }, }, }, }, diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_yaml_embed.go index 02d056f60bd..14d8356bd5e 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_yaml_embed.go @@ -17,7 +17,7 @@ package assuredworkloads // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/assuredworkloads/workload.yaml -var YAML_workload = []byte("info:\n title: AssuredWorkloads/Workload\n description: The AssuredWorkloads Workload resource\n x-dcl-struct-name: Workload\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n apply:\n description: The function used to apply information about a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n delete:\n description: The function used to delete a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n deleteAll:\n description: The function used to delete all Workload\n parameters:\n - name: organization\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Workload\n parameters:\n - name: organization\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Workload:\n title: Workload\n x-dcl-id: organizations/{{organization}}/locations/{{location}}/workloads/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: organization\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - displayName\n - complianceRegime\n - organization\n - location\n properties:\n billingAccount:\n type: string\n x-dcl-go-name: BillingAccount\n description: Optional. Input only. The billing account used for the resources\n which are direct children of workload. This billing account is initially\n associated with the resources created as part of Workload creation. After\n the initial creation of these resources, the customer can change the assigned\n billing account. The resource name has the form `billingAccounts/{billing_account_id}`.\n For example, `billingAccounts/012345-567890-ABCDEF`.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/BillingAccount\n field: name\n x-dcl-mutable-unreadable: true\n complianceRegime:\n type: string\n x-dcl-go-name: ComplianceRegime\n x-dcl-go-type: WorkloadComplianceRegimeEnum\n description: 'Required. Immutable. Compliance Regime associated with this\n workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH,\n FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT,\n CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS,\n ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT'\n x-kubernetes-immutable: true\n enum:\n - COMPLIANCE_REGIME_UNSPECIFIED\n - IL4\n - CJIS\n - FEDRAMP_HIGH\n - FEDRAMP_MODERATE\n - US_REGIONAL_ACCESS\n - HIPAA\n - HITRUST\n - EU_REGIONS_AND_SUPPORT\n - CA_REGIONS_AND_SUPPORT\n - ITAR\n - AU_REGIONS_AND_US_SUPPORT\n - ASSURED_WORKLOADS_FOR_PARTNERS\n - ISR_REGIONS\n - ISR_REGIONS_AND_SUPPORT\n - CA_PROTECTED_B\n - IL5\n - IL2\n - JP_REGIONS_AND_SUPPORT\n complianceStatus:\n type: object\n x-dcl-go-name: ComplianceStatus\n x-dcl-go-type: WorkloadComplianceStatus\n readOnly: true\n description: Output only. Count of active Violations in the Workload.\n x-kubernetes-immutable: true\n properties:\n acknowledgedViolationCount:\n type: array\n x-dcl-go-name: AcknowledgedViolationCount\n description: Number of current orgPolicy violations which are acknowledged.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n activeViolationCount:\n type: array\n x-dcl-go-name: ActiveViolationCount\n description: Number of current orgPolicy violations which are not acknowledged.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n compliantButDisallowedServices:\n type: array\n x-dcl-go-name: CompliantButDisallowedServices\n readOnly: true\n description: Output only. Urls for services which are compliant for this\n Assured Workload, but which are currently disallowed by the ResourceUsageRestriction\n org policy. Invoke workloads.restrictAllowedResources endpoint to allow\n your project developers to use these services in their environment.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Immutable. The Workload creation timestamp.\n x-kubernetes-immutable: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: 'Required. The user-assigned display name of the Workload.\n When present it must be between 4 to 30 characters. Allowed characters\n are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example:\n My Workload'\n ekmProvisioningResponse:\n type: object\n x-dcl-go-name: EkmProvisioningResponse\n x-dcl-go-type: WorkloadEkmProvisioningResponse\n readOnly: true\n description: Optional. Represents the Ekm Provisioning State of the given\n workload.\n x-kubernetes-immutable: true\n properties:\n ekmProvisioningErrorDomain:\n type: string\n x-dcl-go-name: EkmProvisioningErrorDomain\n x-dcl-go-type: WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum\n description: 'Indicates Ekm provisioning error if any. Possible values:\n EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED, UNSPECIFIED_ERROR, GOOGLE_SERVER_ERROR,\n EXTERNAL_USER_ERROR, EXTERNAL_PARTNER_ERROR, TIMEOUT_ERROR'\n x-kubernetes-immutable: true\n enum:\n - EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED\n - UNSPECIFIED_ERROR\n - GOOGLE_SERVER_ERROR\n - EXTERNAL_USER_ERROR\n - EXTERNAL_PARTNER_ERROR\n - TIMEOUT_ERROR\n ekmProvisioningErrorMapping:\n type: string\n x-dcl-go-name: EkmProvisioningErrorMapping\n x-dcl-go-type: WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum\n description: 'Detailed error message if Ekm provisioning fails Possible\n values: EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED, INVALID_SERVICE_ACCOUNT,\n MISSING_METRICS_SCOPE_ADMIN_PERMISSION, MISSING_EKM_CONNECTION_ADMIN_PERMISSION'\n x-kubernetes-immutable: true\n enum:\n - EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED\n - INVALID_SERVICE_ACCOUNT\n - MISSING_METRICS_SCOPE_ADMIN_PERMISSION\n - MISSING_EKM_CONNECTION_ADMIN_PERMISSION\n ekmProvisioningState:\n type: string\n x-dcl-go-name: EkmProvisioningState\n x-dcl-go-type: WorkloadEkmProvisioningResponseEkmProvisioningStateEnum\n description: 'Indicates Ekm enrollment Provisioning of a given workload.\n Possible values: EKM_PROVISIONING_STATE_UNSPECIFIED, EKM_PROVISIONING_STATE_PENDING,\n EKM_PROVISIONING_STATE_FAILED, EKM_PROVISIONING_STATE_COMPLETED'\n x-kubernetes-immutable: true\n enum:\n - EKM_PROVISIONING_STATE_UNSPECIFIED\n - EKM_PROVISIONING_STATE_PENDING\n - EKM_PROVISIONING_STATE_FAILED\n - EKM_PROVISIONING_STATE_COMPLETED\n enableSovereignControls:\n type: boolean\n x-dcl-go-name: EnableSovereignControls\n description: Optional. Indicates the sovereignty status of the given workload.\n Currently meant to be used by Europe/Canada customers.\n x-kubernetes-immutable: true\n kajEnrollmentState:\n type: string\n x-dcl-go-name: KajEnrollmentState\n x-dcl-go-type: WorkloadKajEnrollmentStateEnum\n readOnly: true\n description: 'Output only. Represents the KAJ enrollment state of the given\n workload. Possible values: KAJ_ENROLLMENT_STATE_UNSPECIFIED, KAJ_ENROLLMENT_STATE_PENDING,\n KAJ_ENROLLMENT_STATE_COMPLETE'\n x-kubernetes-immutable: true\n enum:\n - KAJ_ENROLLMENT_STATE_UNSPECIFIED\n - KAJ_ENROLLMENT_STATE_PENDING\n - KAJ_ENROLLMENT_STATE_COMPLETE\n kmsSettings:\n type: object\n x-dcl-go-name: KmsSettings\n x-dcl-go-type: WorkloadKmsSettings\n description: '**DEPRECATED** Input only. Settings used to create a CMEK\n crypto key. When set, a project with a KMS CMEK key is provisioned. This\n field is deprecated as of Feb 28, 2022. In order to create a Keyring,\n callers should specify, ENCRYPTION_KEYS_PROJECT or KEYRING in ResourceSettings.resource_type\n field.'\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n required:\n - nextRotationTime\n - rotationPeriod\n properties:\n nextRotationTime:\n type: string\n format: date-time\n x-dcl-go-name: NextRotationTime\n description: Required. Input only. Immutable. The time at which the\n Key Management Service will automatically create a new version of\n the crypto key and mark it as the primary.\n x-kubernetes-immutable: true\n rotationPeriod:\n type: string\n x-dcl-go-name: RotationPeriod\n description: Required. Input only. Immutable. will be advanced by this\n period when the Key Management Service automatically rotates a key.\n Must be at least 24 hours and at most 876,000 hours.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. Labels applied to the workload.\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Output only. The resource name of the workload.\n x-kubernetes-immutable: true\n x-dcl-server-generated-parameter: true\n x-dcl-has-long-form: true\n organization:\n type: string\n x-dcl-go-name: Organization\n description: The organization for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Organization\n field: name\n parent: true\n x-dcl-parameter: true\n partner:\n type: string\n x-dcl-go-name: Partner\n x-dcl-go-type: WorkloadPartnerEnum\n description: 'Optional. Partner regime associated with this workload. Possible\n values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS,\n SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN'\n x-kubernetes-immutable: true\n enum:\n - PARTNER_UNSPECIFIED\n - LOCAL_CONTROLS_BY_S3NS\n - SOVEREIGN_CONTROLS_BY_T_SYSTEMS\n - SOVEREIGN_CONTROLS_BY_SIA_MINSAIT\n - SOVEREIGN_CONTROLS_BY_PSN\n partnerPermissions:\n type: object\n x-dcl-go-name: PartnerPermissions\n x-dcl-go-type: WorkloadPartnerPermissions\n description: Optional. Permissions granted to the AW Partner SA account\n for the customer workload\n x-kubernetes-immutable: true\n properties:\n assuredWorkloadsMonitoring:\n type: boolean\n x-dcl-go-name: AssuredWorkloadsMonitoring\n description: Optional. Allow partner to view violation alerts.\n x-kubernetes-immutable: true\n dataLogsViewer:\n type: boolean\n x-dcl-go-name: DataLogsViewer\n description: Allow the partner to view inspectability logs and monitoring\n violations.\n x-kubernetes-immutable: true\n serviceAccessApprover:\n type: boolean\n x-dcl-go-name: ServiceAccessApprover\n description: Optional. Allow partner to view access approval logs.\n x-kubernetes-immutable: true\n provisionedResourcesParent:\n type: string\n x-dcl-go-name: ProvisionedResourcesParent\n description: 'Input only. The parent resource for the resources managed\n by this Assured Workload. May be either empty or a folder resource which\n is a child of the Workload parent. If not specified all resources are\n created under the parent organization. Format: folders/{folder_id}'\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n resourceSettings:\n type: array\n x-dcl-go-name: ResourceSettings\n description: Input only. Resource properties that are used to customize\n workload resources. These properties (such as custom project id) will\n be used to create workload resources if possible. This field is optional.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkloadResourceSettings\n properties:\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: User-assigned resource display name. If not empty it\n will be used to create a resource with the specified name.\n x-kubernetes-immutable: true\n resourceId:\n type: string\n x-dcl-go-name: ResourceId\n description: Resource identifier. For a project this represents projectId.\n If the project is already taken, the workload creation will fail.\n For KeyRing, this represents the keyring_id. For a folder, don't\n set this value as folder_id is assigned by Google.\n x-kubernetes-immutable: true\n resourceType:\n type: string\n x-dcl-go-name: ResourceType\n x-dcl-go-type: WorkloadResourceSettingsResourceTypeEnum\n description: 'Indicates the type of resource. This field should be\n specified to correspond the id to the right project type (CONSUMER_PROJECT\n or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED,\n CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER'\n x-kubernetes-immutable: true\n enum:\n - RESOURCE_TYPE_UNSPECIFIED\n - CONSUMER_PROJECT\n - ENCRYPTION_KEYS_PROJECT\n - KEYRING\n - CONSUMER_FOLDER\n x-dcl-mutable-unreadable: true\n resources:\n type: array\n x-dcl-go-name: Resources\n readOnly: true\n description: Output only. The resources associated with this workload. These\n resources will be created when creating the workload. If any of the projects\n already exist, the workload creation will fail. Always read only.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkloadResources\n properties:\n resourceId:\n type: integer\n format: int64\n x-dcl-go-name: ResourceId\n description: Resource identifier. For a project this represents project_number.\n x-kubernetes-immutable: true\n resourceType:\n type: string\n x-dcl-go-name: ResourceType\n x-dcl-go-type: WorkloadResourcesResourceTypeEnum\n description: 'Indicates the type of resource. Possible values: RESOURCE_TYPE_UNSPECIFIED,\n CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER'\n x-kubernetes-immutable: true\n enum:\n - RESOURCE_TYPE_UNSPECIFIED\n - CONSUMER_PROJECT\n - ENCRYPTION_KEYS_PROJECT\n - KEYRING\n - CONSUMER_FOLDER\n saaEnrollmentResponse:\n type: object\n x-dcl-go-name: SaaEnrollmentResponse\n x-dcl-go-type: WorkloadSaaEnrollmentResponse\n readOnly: true\n description: Output only. Represents the SAA enrollment response of the\n given workload. SAA enrollment response is queried during workloads.get\n call. In failure cases, user friendly error message is shown in SAA details\n page.\n x-kubernetes-immutable: true\n properties:\n setupErrors:\n type: array\n x-dcl-go-name: SetupErrors\n description: Indicates SAA enrollment setup error if any.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: WorkloadSaaEnrollmentResponseSetupErrorsEnum\n enum:\n - SETUP_ERROR_UNSPECIFIED\n - ERROR_INVALID_BASE_SETUP\n - ERROR_MISSING_EXTERNAL_SIGNING_KEY\n - ERROR_NOT_ALL_SERVICES_ENROLLED\n - ERROR_SETUP_CHECK_FAILED\n setupStatus:\n type: string\n x-dcl-go-name: SetupStatus\n x-dcl-go-type: WorkloadSaaEnrollmentResponseSetupStatusEnum\n description: 'Indicates SAA enrollment status of a given workload. Possible\n values: SETUP_STATE_UNSPECIFIED, STATUS_PENDING, STATUS_COMPLETE'\n x-kubernetes-immutable: true\n enum:\n - SETUP_STATE_UNSPECIFIED\n - STATUS_PENDING\n - STATUS_COMPLETE\n violationNotificationsEnabled:\n type: boolean\n x-dcl-go-name: ViolationNotificationsEnabled\n description: Optional. Indicates whether the e-mail notification for a violation\n is enabled for a workload. This value will be by default True, and if\n not present will be considered as true. This should only be updated via\n updateWorkload call. Any Changes to this field during the createWorkload\n call will not be honored. This will always be true while creating the\n workload.\n x-kubernetes-immutable: true\n") +var YAML_workload = []byte("info:\n title: AssuredWorkloads/Workload\n description: The AssuredWorkloads Workload resource\n x-dcl-struct-name: Workload\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n apply:\n description: The function used to apply information about a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n delete:\n description: The function used to delete a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n deleteAll:\n description: The function used to delete all Workload\n parameters:\n - name: organization\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Workload\n parameters:\n - name: organization\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Workload:\n title: Workload\n x-dcl-id: organizations/{{organization}}/locations/{{location}}/workloads/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: organization\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - displayName\n - complianceRegime\n - organization\n - location\n properties:\n billingAccount:\n type: string\n x-dcl-go-name: BillingAccount\n description: Optional. Input only. The billing account used for the resources\n which are direct children of workload. This billing account is initially\n associated with the resources created as part of Workload creation. After\n the initial creation of these resources, the customer can change the assigned\n billing account. The resource name has the form `billingAccounts/{billing_account_id}`.\n For example, `billingAccounts/012345-567890-ABCDEF`.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/BillingAccount\n field: name\n x-dcl-mutable-unreadable: true\n complianceRegime:\n type: string\n x-dcl-go-name: ComplianceRegime\n x-dcl-go-type: WorkloadComplianceRegimeEnum\n description: 'Required. Immutable. Compliance Regime associated with this\n workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH,\n FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT,\n CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS,\n ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT,\n KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS,\n HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT'\n x-kubernetes-immutable: true\n enum:\n - COMPLIANCE_REGIME_UNSPECIFIED\n - IL4\n - CJIS\n - FEDRAMP_HIGH\n - FEDRAMP_MODERATE\n - US_REGIONAL_ACCESS\n - HIPAA\n - HITRUST\n - EU_REGIONS_AND_SUPPORT\n - CA_REGIONS_AND_SUPPORT\n - ITAR\n - AU_REGIONS_AND_US_SUPPORT\n - ASSURED_WORKLOADS_FOR_PARTNERS\n - ISR_REGIONS\n - ISR_REGIONS_AND_SUPPORT\n - CA_PROTECTED_B\n - IL5\n - IL2\n - JP_REGIONS_AND_SUPPORT\n - KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS\n - REGIONAL_CONTROLS\n - HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS\n - HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT\n complianceStatus:\n type: object\n x-dcl-go-name: ComplianceStatus\n x-dcl-go-type: WorkloadComplianceStatus\n readOnly: true\n description: Output only. Count of active Violations in the Workload.\n x-kubernetes-immutable: true\n properties:\n acknowledgedViolationCount:\n type: array\n x-dcl-go-name: AcknowledgedViolationCount\n description: Number of current orgPolicy violations which are acknowledged.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n activeViolationCount:\n type: array\n x-dcl-go-name: ActiveViolationCount\n description: Number of current orgPolicy violations which are not acknowledged.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n compliantButDisallowedServices:\n type: array\n x-dcl-go-name: CompliantButDisallowedServices\n readOnly: true\n description: Output only. Urls for services which are compliant for this\n Assured Workload, but which are currently disallowed by the ResourceUsageRestriction\n org policy. Invoke workloads.restrictAllowedResources endpoint to allow\n your project developers to use these services in their environment.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Immutable. The Workload creation timestamp.\n x-kubernetes-immutable: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: 'Required. The user-assigned display name of the Workload.\n When present it must be between 4 to 30 characters. Allowed characters\n are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example:\n My Workload'\n ekmProvisioningResponse:\n type: object\n x-dcl-go-name: EkmProvisioningResponse\n x-dcl-go-type: WorkloadEkmProvisioningResponse\n readOnly: true\n description: Optional. Represents the Ekm Provisioning State of the given\n workload.\n x-kubernetes-immutable: true\n properties:\n ekmProvisioningErrorDomain:\n type: string\n x-dcl-go-name: EkmProvisioningErrorDomain\n x-dcl-go-type: WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum\n description: 'Indicates Ekm provisioning error if any. Possible values:\n EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED, UNSPECIFIED_ERROR, GOOGLE_SERVER_ERROR,\n EXTERNAL_USER_ERROR, EXTERNAL_PARTNER_ERROR, TIMEOUT_ERROR'\n x-kubernetes-immutable: true\n enum:\n - EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED\n - UNSPECIFIED_ERROR\n - GOOGLE_SERVER_ERROR\n - EXTERNAL_USER_ERROR\n - EXTERNAL_PARTNER_ERROR\n - TIMEOUT_ERROR\n ekmProvisioningErrorMapping:\n type: string\n x-dcl-go-name: EkmProvisioningErrorMapping\n x-dcl-go-type: WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum\n description: 'Detailed error message if Ekm provisioning fails Possible\n values: EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED, INVALID_SERVICE_ACCOUNT,\n MISSING_METRICS_SCOPE_ADMIN_PERMISSION, MISSING_EKM_CONNECTION_ADMIN_PERMISSION'\n x-kubernetes-immutable: true\n enum:\n - EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED\n - INVALID_SERVICE_ACCOUNT\n - MISSING_METRICS_SCOPE_ADMIN_PERMISSION\n - MISSING_EKM_CONNECTION_ADMIN_PERMISSION\n ekmProvisioningState:\n type: string\n x-dcl-go-name: EkmProvisioningState\n x-dcl-go-type: WorkloadEkmProvisioningResponseEkmProvisioningStateEnum\n description: 'Indicates Ekm enrollment Provisioning of a given workload.\n Possible values: EKM_PROVISIONING_STATE_UNSPECIFIED, EKM_PROVISIONING_STATE_PENDING,\n EKM_PROVISIONING_STATE_FAILED, EKM_PROVISIONING_STATE_COMPLETED'\n x-kubernetes-immutable: true\n enum:\n - EKM_PROVISIONING_STATE_UNSPECIFIED\n - EKM_PROVISIONING_STATE_PENDING\n - EKM_PROVISIONING_STATE_FAILED\n - EKM_PROVISIONING_STATE_COMPLETED\n enableSovereignControls:\n type: boolean\n x-dcl-go-name: EnableSovereignControls\n description: Optional. Indicates the sovereignty status of the given workload.\n Currently meant to be used by Europe/Canada customers.\n x-kubernetes-immutable: true\n kajEnrollmentState:\n type: string\n x-dcl-go-name: KajEnrollmentState\n x-dcl-go-type: WorkloadKajEnrollmentStateEnum\n readOnly: true\n description: 'Output only. Represents the KAJ enrollment state of the given\n workload. Possible values: KAJ_ENROLLMENT_STATE_UNSPECIFIED, KAJ_ENROLLMENT_STATE_PENDING,\n KAJ_ENROLLMENT_STATE_COMPLETE'\n x-kubernetes-immutable: true\n enum:\n - KAJ_ENROLLMENT_STATE_UNSPECIFIED\n - KAJ_ENROLLMENT_STATE_PENDING\n - KAJ_ENROLLMENT_STATE_COMPLETE\n kmsSettings:\n type: object\n x-dcl-go-name: KmsSettings\n x-dcl-go-type: WorkloadKmsSettings\n description: '**DEPRECATED** Input only. Settings used to create a CMEK\n crypto key. When set, a project with a KMS CMEK key is provisioned. This\n field is deprecated as of Feb 28, 2022. In order to create a Keyring,\n callers should specify, ENCRYPTION_KEYS_PROJECT or KEYRING in ResourceSettings.resource_type\n field.'\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n required:\n - nextRotationTime\n - rotationPeriod\n properties:\n nextRotationTime:\n type: string\n format: date-time\n x-dcl-go-name: NextRotationTime\n description: Required. Input only. Immutable. The time at which the\n Key Management Service will automatically create a new version of\n the crypto key and mark it as the primary.\n x-kubernetes-immutable: true\n rotationPeriod:\n type: string\n x-dcl-go-name: RotationPeriod\n description: Required. Input only. Immutable. will be advanced by this\n period when the Key Management Service automatically rotates a key.\n Must be at least 24 hours and at most 876,000 hours.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. Labels applied to the workload.\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Output only. The resource name of the workload.\n x-kubernetes-immutable: true\n x-dcl-server-generated-parameter: true\n x-dcl-has-long-form: true\n organization:\n type: string\n x-dcl-go-name: Organization\n description: The organization for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Organization\n field: name\n parent: true\n x-dcl-parameter: true\n partner:\n type: string\n x-dcl-go-name: Partner\n x-dcl-go-type: WorkloadPartnerEnum\n description: 'Optional. Partner regime associated with this workload. Possible\n values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS,\n SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT,\n SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM'\n x-kubernetes-immutable: true\n enum:\n - PARTNER_UNSPECIFIED\n - LOCAL_CONTROLS_BY_S3NS\n - SOVEREIGN_CONTROLS_BY_T_SYSTEMS\n - SOVEREIGN_CONTROLS_BY_SIA_MINSAIT\n - SOVEREIGN_CONTROLS_BY_PSN\n - SOVEREIGN_CONTROLS_BY_CNTXT\n - SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM\n partnerPermissions:\n type: object\n x-dcl-go-name: PartnerPermissions\n x-dcl-go-type: WorkloadPartnerPermissions\n description: Optional. Permissions granted to the AW Partner SA account\n for the customer workload\n x-kubernetes-immutable: true\n properties:\n assuredWorkloadsMonitoring:\n type: boolean\n x-dcl-go-name: AssuredWorkloadsMonitoring\n description: Optional. Allow partner to view violation alerts.\n x-kubernetes-immutable: true\n dataLogsViewer:\n type: boolean\n x-dcl-go-name: DataLogsViewer\n description: Allow the partner to view inspectability logs and monitoring\n violations.\n x-kubernetes-immutable: true\n serviceAccessApprover:\n type: boolean\n x-dcl-go-name: ServiceAccessApprover\n description: Optional. Allow partner to view access approval logs.\n x-kubernetes-immutable: true\n partnerServicesBillingAccount:\n type: string\n x-dcl-go-name: PartnerServicesBillingAccount\n description: Optional. Input only. Billing account necessary for purchasing\n services from Sovereign Partners. This field is required for creating\n SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create'\n IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC.\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n provisionedResourcesParent:\n type: string\n x-dcl-go-name: ProvisionedResourcesParent\n description: 'Input only. The parent resource for the resources managed\n by this Assured Workload. May be either empty or a folder resource which\n is a child of the Workload parent. If not specified all resources are\n created under the parent organization. Format: folders/{folder_id}'\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n resourceSettings:\n type: array\n x-dcl-go-name: ResourceSettings\n description: Input only. Resource properties that are used to customize\n workload resources. These properties (such as custom project id) will\n be used to create workload resources if possible. This field is optional.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkloadResourceSettings\n properties:\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: User-assigned resource display name. If not empty it\n will be used to create a resource with the specified name.\n x-kubernetes-immutable: true\n resourceId:\n type: string\n x-dcl-go-name: ResourceId\n description: Resource identifier. For a project this represents projectId.\n If the project is already taken, the workload creation will fail.\n For KeyRing, this represents the keyring_id. For a folder, don't\n set this value as folder_id is assigned by Google.\n x-kubernetes-immutable: true\n resourceType:\n type: string\n x-dcl-go-name: ResourceType\n x-dcl-go-type: WorkloadResourceSettingsResourceTypeEnum\n description: 'Indicates the type of resource. This field should be\n specified to correspond the id to the right project type (CONSUMER_PROJECT\n or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED,\n CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER'\n x-kubernetes-immutable: true\n enum:\n - RESOURCE_TYPE_UNSPECIFIED\n - CONSUMER_PROJECT\n - ENCRYPTION_KEYS_PROJECT\n - KEYRING\n - CONSUMER_FOLDER\n x-dcl-mutable-unreadable: true\n resources:\n type: array\n x-dcl-go-name: Resources\n readOnly: true\n description: Output only. The resources associated with this workload. These\n resources will be created when creating the workload. If any of the projects\n already exist, the workload creation will fail. Always read only.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkloadResources\n properties:\n resourceId:\n type: integer\n format: int64\n x-dcl-go-name: ResourceId\n description: Resource identifier. For a project this represents project_number.\n x-kubernetes-immutable: true\n resourceType:\n type: string\n x-dcl-go-name: ResourceType\n x-dcl-go-type: WorkloadResourcesResourceTypeEnum\n description: 'Indicates the type of resource. Possible values: RESOURCE_TYPE_UNSPECIFIED,\n CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER'\n x-kubernetes-immutable: true\n enum:\n - RESOURCE_TYPE_UNSPECIFIED\n - CONSUMER_PROJECT\n - ENCRYPTION_KEYS_PROJECT\n - KEYRING\n - CONSUMER_FOLDER\n saaEnrollmentResponse:\n type: object\n x-dcl-go-name: SaaEnrollmentResponse\n x-dcl-go-type: WorkloadSaaEnrollmentResponse\n readOnly: true\n description: Output only. Represents the SAA enrollment response of the\n given workload. SAA enrollment response is queried during workloads.get\n call. In failure cases, user friendly error message is shown in SAA details\n page.\n x-kubernetes-immutable: true\n properties:\n setupErrors:\n type: array\n x-dcl-go-name: SetupErrors\n description: Indicates SAA enrollment setup error if any.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: WorkloadSaaEnrollmentResponseSetupErrorsEnum\n enum:\n - SETUP_ERROR_UNSPECIFIED\n - ERROR_INVALID_BASE_SETUP\n - ERROR_MISSING_EXTERNAL_SIGNING_KEY\n - ERROR_NOT_ALL_SERVICES_ENROLLED\n - ERROR_SETUP_CHECK_FAILED\n setupStatus:\n type: string\n x-dcl-go-name: SetupStatus\n x-dcl-go-type: WorkloadSaaEnrollmentResponseSetupStatusEnum\n description: 'Indicates SAA enrollment status of a given workload. Possible\n values: SETUP_STATE_UNSPECIFIED, STATUS_PENDING, STATUS_COMPLETE'\n x-kubernetes-immutable: true\n enum:\n - SETUP_STATE_UNSPECIFIED\n - STATUS_PENDING\n - STATUS_COMPLETE\n violationNotificationsEnabled:\n type: boolean\n x-dcl-go-name: ViolationNotificationsEnabled\n description: Optional. Indicates whether the e-mail notification for a violation\n is enabled for a workload. This value will be by default True, and if\n not present will be considered as true. This should only be updated via\n updateWorkload call. Any Changes to this field during the createWorkload\n call will not be honored. This will always be true while creating the\n workload.\n x-kubernetes-immutable: true\n workloadOptions:\n type: object\n x-dcl-go-name: WorkloadOptions\n x-dcl-go-type: WorkloadWorkloadOptions\n description: Optional. Used to specify certain options for a workload during\n workload creation - currently only supporting KAT Optionality for Regional\n Controls workloads.\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n properties:\n kajEnrollmentType:\n type: string\n x-dcl-go-name: KajEnrollmentType\n x-dcl-go-type: WorkloadWorkloadOptionsKajEnrollmentTypeEnum\n description: 'Indicates type of KAJ enrollment for the workload. Currently,\n only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not\n enroll in KAT-level KAJ enrollment for Regional Controls workloads.\n Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY,\n KEY_ACCESS_TRANSPARENCY_OFF'\n x-kubernetes-immutable: true\n enum:\n - KAJ_ENROLLMENT_TYPE_UNSPECIFIED\n - FULL_KAJ\n - EKM_ONLY\n - KEY_ACCESS_TRANSPARENCY_OFF\n") -// 20173 bytes -// MD5: 3ef13debefcdfb2a577397061b91fdec +// 22541 bytes +// MD5: e0c667c22d1ca26912c3e8fc2bd24c26 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.go index ff8653c101d..00fd8791a34 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.go @@ -496,6 +496,7 @@ type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGa Deployment *string `json:"deployment"` RouteUpdateWaitTime *string `json:"routeUpdateWaitTime"` StableCutbackDuration *string `json:"stableCutbackDuration"` + PodSelectorLabel *string `json:"podSelectorLabel"` } type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh @@ -523,6 +524,8 @@ func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernet r.StableCutbackDuration = res.StableCutbackDuration + r.PodSelectorLabel = res.PodSelectorLabel + } return nil } @@ -552,6 +555,7 @@ type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSe Service *string `json:"service"` Deployment *string `json:"deployment"` DisablePodOverprovisioning *bool `json:"disablePodOverprovisioning"` + PodSelectorLabel *string `json:"podSelectorLabel"` } type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking @@ -575,6 +579,8 @@ func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernet r.DisablePodOverprovisioning = res.DisablePodOverprovisioning + r.PodSelectorLabel = res.PodSelectorLabel + } return nil } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.yaml index 70e164004dc..23c63a40329 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.yaml @@ -537,6 +537,13 @@ components: x-dcl-go-name: HttpRoute description: Required. Name of the Gateway API HTTPRoute. + podSelectorLabel: + type: string + x-dcl-go-name: PodSelectorLabel + description: Optional. The label to use when + selecting Pods for the Deployment and Service + resources. This label must already be present + in both resources. routeUpdateWaitTime: type: string x-dcl-go-name: RouteUpdateWaitTime @@ -585,6 +592,13 @@ components: the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster. + podSelectorLabel: + type: string + x-dcl-go-name: PodSelectorLabel + description: Optional. The label to use when + selecting Pods for the Deployment resource. + This label must already be present in the + Deployment. service: type: string x-dcl-go-name: Service diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_internal.go index 75865d8b5df..f863fc58d6f 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_internal.go @@ -1987,6 +1987,11 @@ func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig } else { cDes.StableCutbackDuration = des.StableCutbackDuration } + if dcl.StringCanonicalize(des.PodSelectorLabel, initial.PodSelectorLabel) || dcl.IsZeroValue(des.PodSelectorLabel) { + cDes.PodSelectorLabel = initial.PodSelectorLabel + } else { + cDes.PodSelectorLabel = des.PodSelectorLabel + } return cDes } @@ -2048,6 +2053,9 @@ func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeCon if dcl.StringCanonicalize(des.StableCutbackDuration, nw.StableCutbackDuration) { nw.StableCutbackDuration = des.StableCutbackDuration } + if dcl.StringCanonicalize(des.PodSelectorLabel, nw.PodSelectorLabel) { + nw.PodSelectorLabel = des.PodSelectorLabel + } return nw } @@ -2127,6 +2135,11 @@ func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig } else { cDes.DisablePodOverprovisioning = des.DisablePodOverprovisioning } + if dcl.StringCanonicalize(des.PodSelectorLabel, initial.PodSelectorLabel) || dcl.IsZeroValue(des.PodSelectorLabel) { + cDes.PodSelectorLabel = initial.PodSelectorLabel + } else { + cDes.PodSelectorLabel = des.PodSelectorLabel + } return cDes } @@ -2182,6 +2195,9 @@ func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeCon if dcl.BoolCanonicalize(des.DisablePodOverprovisioning, nw.DisablePodOverprovisioning) { nw.DisablePodOverprovisioning = des.DisablePodOverprovisioning } + if dcl.StringCanonicalize(des.PodSelectorLabel, nw.PodSelectorLabel) { + nw.PodSelectorLabel = des.PodSelectorLabel + } return nw } @@ -4348,6 +4364,13 @@ func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKuber } diffs = append(diffs, ds...) } + + if ds, err := dcl.Diff(desired.PodSelectorLabel, actual.PodSelectorLabel, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PodSelectorLabel")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } return diffs, nil } @@ -4391,6 +4414,13 @@ func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKuber } diffs = append(diffs, ds...) } + + if ds, err := dcl.Diff(desired.PodSelectorLabel, actual.PodSelectorLabel, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PodSelectorLabel")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } return diffs, nil } @@ -6224,6 +6254,9 @@ func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubern if v := f.StableCutbackDuration; !dcl.IsEmptyValueIndirect(v) { m["stableCutbackDuration"] = v } + if v := f.PodSelectorLabel; !dcl.IsEmptyValueIndirect(v) { + m["podSelectorLabel"] = v + } return m, nil } @@ -6246,6 +6279,7 @@ func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKuber r.Deployment = dcl.FlattenString(m["deployment"]) r.RouteUpdateWaitTime = dcl.FlattenString(m["routeUpdateWaitTime"]) r.StableCutbackDuration = dcl.FlattenString(m["stableCutbackDuration"]) + r.PodSelectorLabel = dcl.FlattenString(m["podSelectorLabel"]) return r } @@ -6348,6 +6382,9 @@ func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubern if v := f.DisablePodOverprovisioning; !dcl.IsEmptyValueIndirect(v) { m["disablePodOverprovisioning"] = v } + if v := f.PodSelectorLabel; !dcl.IsEmptyValueIndirect(v) { + m["podSelectorLabel"] = v + } return m, nil } @@ -6368,6 +6405,7 @@ func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKuber r.Service = dcl.FlattenString(m["service"]) r.Deployment = dcl.FlattenString(m["deployment"]) r.DisablePodOverprovisioning = dcl.FlattenBool(m["disablePodOverprovisioning"]) + r.PodSelectorLabel = dcl.FlattenString(m["podSelectorLabel"]) return r } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_schema.go index 8a833859bbf..b1a5b5847c9 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_schema.go @@ -584,6 +584,11 @@ func DCLDeliveryPipelineSchema() *dcl.Schema { GoName: "HttpRoute", Description: "Required. Name of the Gateway API HTTPRoute.", }, + "podSelectorLabel": &dcl.Property{ + Type: "string", + GoName: "PodSelectorLabel", + Description: "Optional. The label to use when selecting Pods for the Deployment and Service resources. This label must already be present in both resources.", + }, "routeUpdateWaitTime": &dcl.Property{ Type: "string", GoName: "RouteUpdateWaitTime", @@ -624,6 +629,11 @@ func DCLDeliveryPipelineSchema() *dcl.Schema { GoName: "DisablePodOverprovisioning", Description: "Optional. Whether to disable Pod overprovisioning. If Pod overprovisioning is disabled then Cloud Deploy will limit the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster.", }, + "podSelectorLabel": &dcl.Property{ + Type: "string", + GoName: "PodSelectorLabel", + Description: "Optional. The label to use when selecting Pods for the Deployment resource. This label must already be present in the Deployment.", + }, "service": &dcl.Property{ Type: "string", GoName: "Service", diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_yaml_embed.go index 3d461995fb1..0616e0cb1fc 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_yaml_embed.go @@ -17,7 +17,7 @@ package clouddeploy // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/clouddeploy/delivery_pipeline.yaml -var YAML_delivery_pipeline = []byte("info:\n title: Clouddeploy/DeliveryPipeline\n description: The Cloud Deploy `DeliveryPipeline` resource\n x-dcl-struct-name: DeliveryPipeline\n x-dcl-has-iam: false\n x-dcl-ref:\n text: REST API\n url: https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.deliveryPipelines\npaths:\n get:\n description: The function used to get information about a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n apply:\n description: The function used to apply information about a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n delete:\n description: The function used to delete a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n deleteAll:\n description: The function used to delete all DeliveryPipeline\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many DeliveryPipeline\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n DeliveryPipeline:\n title: DeliveryPipeline\n x-dcl-id: projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: User annotations. These attributes can only be set and used\n by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations\n for more details such as format and size limitations.\n condition:\n type: object\n x-dcl-go-name: Condition\n x-dcl-go-type: DeliveryPipelineCondition\n readOnly: true\n description: Output only. Information around the state of the Delivery Pipeline.\n properties:\n pipelineReadyCondition:\n type: object\n x-dcl-go-name: PipelineReadyCondition\n x-dcl-go-type: DeliveryPipelineConditionPipelineReadyCondition\n description: Details around the Pipeline's overall status.\n properties:\n status:\n type: boolean\n x-dcl-go-name: Status\n description: True if the Pipeline is in a valid state. Otherwise\n at least one condition in `PipelineCondition` is in an invalid\n state. Iterate over those conditions and see which condition(s)\n has status = false to find out what is wrong with the Pipeline.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last time the condition was updated.\n targetsPresentCondition:\n type: object\n x-dcl-go-name: TargetsPresentCondition\n x-dcl-go-type: DeliveryPipelineConditionTargetsPresentCondition\n description: Details around targets enumerated in the pipeline.\n properties:\n missingTargets:\n type: array\n x-dcl-go-name: MissingTargets\n description: The list of Target names that are missing. For example,\n projects/{project_id}/locations/{location_name}/targets/{target_name}.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Clouddeploy/Target\n field: selfLink\n status:\n type: boolean\n x-dcl-go-name: Status\n description: True if there aren't any missing Targets.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last time the condition was updated.\n targetsTypeCondition:\n type: object\n x-dcl-go-name: TargetsTypeCondition\n x-dcl-go-type: DeliveryPipelineConditionTargetsTypeCondition\n description: Details on the whether the targets enumerated in the pipeline\n are of the same type.\n properties:\n errorDetails:\n type: string\n x-dcl-go-name: ErrorDetails\n description: Human readable error message.\n status:\n type: boolean\n x-dcl-go-name: Status\n description: True if the targets are all a comparable type. For\n example this is true if all targets are GKE clusters. This is\n false if some targets are Cloud Run targets and others are GKE\n clusters.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Time at which the pipeline was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Description of the `DeliveryPipeline`. Max length is 255 characters.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: This checksum is computed by the server based on the value\n of other fields, and may be sent on update and delete requests to ensure\n the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Labels are attributes that can be set and used by both the\n user and by Google Cloud Deploy. Labels must meet the following constraints:\n * Keys and values can contain only lowercase letters, numeric characters,\n underscores, and dashes. * All characters must use UTF-8 encoding, and\n international characters are allowed. * Keys must start with a lowercase\n letter or international character. * Each resource is limited to a maximum\n of 64 labels. Both keys and values are additionally constrained to be\n <= 128 bytes.'\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the `DeliveryPipeline`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n x-dcl-has-long-form: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-parameter: true\n serialPipeline:\n type: object\n x-dcl-go-name: SerialPipeline\n x-dcl-go-type: DeliveryPipelineSerialPipeline\n description: SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`.\n properties:\n stages:\n type: array\n x-dcl-go-name: Stages\n description: Each stage specifies configuration for a `Target`. The\n ordering of this list defines the promotion flow.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeliveryPipelineSerialPipelineStages\n properties:\n deployParameters:\n type: array\n x-dcl-go-name: DeployParameters\n description: Optional. The deploy parameters to use for the target\n in this stage.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesDeployParameters\n required:\n - values\n properties:\n matchTargetLabels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: MatchTargetLabels\n description: Optional. Deploy parameters are applied to\n targets with match labels. If unspecified, deploy parameters\n are applied to all targets (including child targets of\n a multi-target).\n values:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Values\n description: Required. Values are deploy parameters in key-value\n pairs.\n profiles:\n type: array\n x-dcl-go-name: Profiles\n description: Skaffold profiles to use when rendering the manifest\n for this stage's `Target`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n strategy:\n type: object\n x-dcl-go-name: Strategy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategy\n description: Optional. The strategy to use for a `Rollout` to\n this stage.\n properties:\n canary:\n type: object\n x-dcl-go-name: Canary\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanary\n description: Canary deployment strategy provides progressive\n percentage based deployments to a Target.\n properties:\n canaryDeployment:\n type: object\n x-dcl-go-name: CanaryDeployment\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment\n description: Configures the progressive based deployment\n for a Target.\n x-dcl-conflicts:\n - customCanaryDeployment\n required:\n - percentages\n properties:\n percentages:\n type: array\n x-dcl-go-name: Percentages\n description: Required. The percentage based deployments\n that will occur as a part of a `Rollout`. List is\n expected in ascending order and each integer n is\n 0 <= n < 100.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n postdeploy:\n type: object\n x-dcl-go-name: Postdeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy\n description: Optional. Configuration for the postdeploy\n job of the last phase. If this is not configured,\n postdeploy job will not be present.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold\n custom actions to invoke during execution of\n the postdeploy job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n predeploy:\n type: object\n x-dcl-go-name: Predeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy\n description: Optional. Configuration for the predeploy\n job of the first phase. If this is not configured,\n predeploy job will not be present.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold\n custom actions to invoke during execution of\n the predeploy job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n verify:\n type: boolean\n x-dcl-go-name: Verify\n description: Whether to run verify tests after each\n percentage deployment.\n customCanaryDeployment:\n type: object\n x-dcl-go-name: CustomCanaryDeployment\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment\n description: Configures the progressive based deployment\n for a Target, but allows customizing at the phase level\n where a phase represents each of the percentage deployments.\n x-dcl-conflicts:\n - canaryDeployment\n required:\n - phaseConfigs\n properties:\n phaseConfigs:\n type: array\n x-dcl-go-name: PhaseConfigs\n description: Required. Configuration for each phase\n in the canary deployment in the order executed.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs\n required:\n - phaseId\n - percentage\n properties:\n percentage:\n type: integer\n format: int64\n x-dcl-go-name: Percentage\n description: Required. Percentage deployment\n for the phase.\n phaseId:\n type: string\n x-dcl-go-name: PhaseId\n description: 'Required. The ID to assign to\n the `Rollout` phase. This value must consist\n of lower-case letters, numbers, and hyphens,\n start with a letter and end with a letter\n or a number, and have a max length of 63 characters.\n In other words, it must match the following\n regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.'\n postdeploy:\n type: object\n x-dcl-go-name: Postdeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy\n description: Optional. Configuration for the\n postdeploy job of this phase. If this is not\n configured, postdeploy job will not be present\n for this phase.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold\n custom actions to invoke during execution\n of the postdeploy job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n predeploy:\n type: object\n x-dcl-go-name: Predeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy\n description: Optional. Configuration for the\n predeploy job of this phase. If this is not\n configured, predeploy job will not be present\n for this phase.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold\n custom actions to invoke during execution\n of the predeploy job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n profiles:\n type: array\n x-dcl-go-name: Profiles\n description: Skaffold profiles to use when rendering\n the manifest for this phase. These are in\n addition to the profiles list specified in\n the `DeliveryPipeline` stage.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n verify:\n type: boolean\n x-dcl-go-name: Verify\n description: Whether to run verify tests after\n the deployment.\n runtimeConfig:\n type: object\n x-dcl-go-name: RuntimeConfig\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig\n description: Optional. Runtime specific configurations\n for the deployment strategy. The runtime configuration\n is used to determine how Cloud Deploy will split traffic\n to enable a progressive deployment.\n properties:\n cloudRun:\n type: object\n x-dcl-go-name: CloudRun\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun\n description: Cloud Run runtime configuration.\n x-dcl-conflicts:\n - kubernetes\n properties:\n automaticTrafficControl:\n type: boolean\n x-dcl-go-name: AutomaticTrafficControl\n description: Whether Cloud Deploy should update\n the traffic stanza in a Cloud Run Service on\n the user's behalf to facilitate traffic splitting.\n This is required to be true for CanaryDeployments,\n but optional for CustomCanaryDeployments.\n canaryRevisionTags:\n type: array\n x-dcl-go-name: CanaryRevisionTags\n description: Optional. A list of tags that are\n added to the canary revision while the canary\n phase is in progress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n priorRevisionTags:\n type: array\n x-dcl-go-name: PriorRevisionTags\n description: Optional. A list of tags that are\n added to the prior revision while the canary\n phase is in progress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n stableRevisionTags:\n type: array\n x-dcl-go-name: StableRevisionTags\n description: Optional. A list of tags that are\n added to the final stable revision when the\n stable phase is applied.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n kubernetes:\n type: object\n x-dcl-go-name: Kubernetes\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes\n description: Kubernetes runtime configuration.\n x-dcl-conflicts:\n - cloudRun\n properties:\n gatewayServiceMesh:\n type: object\n x-dcl-go-name: GatewayServiceMesh\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh\n description: Kubernetes Gateway API service mesh\n configuration.\n x-dcl-conflicts:\n - serviceNetworking\n required:\n - httpRoute\n - service\n - deployment\n properties:\n deployment:\n type: string\n x-dcl-go-name: Deployment\n description: Required. Name of the Kubernetes\n Deployment whose traffic is managed by the\n specified HTTPRoute and Service.\n httpRoute:\n type: string\n x-dcl-go-name: HttpRoute\n description: Required. Name of the Gateway\n API HTTPRoute.\n routeUpdateWaitTime:\n type: string\n x-dcl-go-name: RouteUpdateWaitTime\n description: Optional. The time to wait for\n route updates to propagate. The maximum\n configurable time is 3 hours, in seconds\n format. If unspecified, there is no wait\n time.\n service:\n type: string\n x-dcl-go-name: Service\n description: Required. Name of the Kubernetes\n Service.\n stableCutbackDuration:\n type: string\n x-dcl-go-name: StableCutbackDuration\n description: Optional. The amount of time\n to migrate traffic back from the canary\n Service to the original Service during the\n stable phase deployment. If specified, must\n be between 15s and 3600s. If unspecified,\n there is no cutback time.\n serviceNetworking:\n type: object\n x-dcl-go-name: ServiceNetworking\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking\n description: Kubernetes Service networking configuration.\n x-dcl-conflicts:\n - gatewayServiceMesh\n required:\n - service\n - deployment\n properties:\n deployment:\n type: string\n x-dcl-go-name: Deployment\n description: Required. Name of the Kubernetes\n Deployment whose traffic is managed by the\n specified Service.\n disablePodOverprovisioning:\n type: boolean\n x-dcl-go-name: DisablePodOverprovisioning\n description: Optional. Whether to disable\n Pod overprovisioning. If Pod overprovisioning\n is disabled then Cloud Deploy will limit\n the number of total Pods used for the deployment\n strategy to the number of Pods the Deployment\n has on the cluster.\n service:\n type: string\n x-dcl-go-name: Service\n description: Required. Name of the Kubernetes\n Service.\n standard:\n type: object\n x-dcl-go-name: Standard\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyStandard\n description: Standard deployment strategy executes a single\n deploy and allows verifying the deployment.\n properties:\n postdeploy:\n type: object\n x-dcl-go-name: Postdeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy\n description: Optional. Configuration for the postdeploy\n job. If this is not configured, postdeploy job will\n not be present.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold custom\n actions to invoke during execution of the postdeploy\n job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n predeploy:\n type: object\n x-dcl-go-name: Predeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy\n description: Optional. Configuration for the predeploy\n job. If this is not configured, predeploy job will not\n be present.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold custom\n actions to invoke during execution of the predeploy\n job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n verify:\n type: boolean\n x-dcl-go-name: Verify\n description: Whether to verify a deployment.\n targetId:\n type: string\n x-dcl-go-name: TargetId\n description: The target_id to which this stage points. This field\n refers exclusively to the last segment of a target name. For\n example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`).\n The location of the `Target` is inferred to be the same as the\n location of the `DeliveryPipeline` that contains this `Stage`.\n suspended:\n type: boolean\n x-dcl-go-name: Suspended\n description: When suspended, no new releases or rollouts can be created,\n but in-progress ones will complete.\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Unique identifier of the `DeliveryPipeline`.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Most recent time at which the pipeline was updated.\n x-kubernetes-immutable: true\n") +var YAML_delivery_pipeline = []byte("info:\n title: Clouddeploy/DeliveryPipeline\n description: The Cloud Deploy `DeliveryPipeline` resource\n x-dcl-struct-name: DeliveryPipeline\n x-dcl-has-iam: false\n x-dcl-ref:\n text: REST API\n url: https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.deliveryPipelines\npaths:\n get:\n description: The function used to get information about a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n apply:\n description: The function used to apply information about a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n delete:\n description: The function used to delete a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n deleteAll:\n description: The function used to delete all DeliveryPipeline\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many DeliveryPipeline\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n DeliveryPipeline:\n title: DeliveryPipeline\n x-dcl-id: projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: User annotations. These attributes can only be set and used\n by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations\n for more details such as format and size limitations.\n condition:\n type: object\n x-dcl-go-name: Condition\n x-dcl-go-type: DeliveryPipelineCondition\n readOnly: true\n description: Output only. Information around the state of the Delivery Pipeline.\n properties:\n pipelineReadyCondition:\n type: object\n x-dcl-go-name: PipelineReadyCondition\n x-dcl-go-type: DeliveryPipelineConditionPipelineReadyCondition\n description: Details around the Pipeline's overall status.\n properties:\n status:\n type: boolean\n x-dcl-go-name: Status\n description: True if the Pipeline is in a valid state. Otherwise\n at least one condition in `PipelineCondition` is in an invalid\n state. Iterate over those conditions and see which condition(s)\n has status = false to find out what is wrong with the Pipeline.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last time the condition was updated.\n targetsPresentCondition:\n type: object\n x-dcl-go-name: TargetsPresentCondition\n x-dcl-go-type: DeliveryPipelineConditionTargetsPresentCondition\n description: Details around targets enumerated in the pipeline.\n properties:\n missingTargets:\n type: array\n x-dcl-go-name: MissingTargets\n description: The list of Target names that are missing. For example,\n projects/{project_id}/locations/{location_name}/targets/{target_name}.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Clouddeploy/Target\n field: selfLink\n status:\n type: boolean\n x-dcl-go-name: Status\n description: True if there aren't any missing Targets.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last time the condition was updated.\n targetsTypeCondition:\n type: object\n x-dcl-go-name: TargetsTypeCondition\n x-dcl-go-type: DeliveryPipelineConditionTargetsTypeCondition\n description: Details on the whether the targets enumerated in the pipeline\n are of the same type.\n properties:\n errorDetails:\n type: string\n x-dcl-go-name: ErrorDetails\n description: Human readable error message.\n status:\n type: boolean\n x-dcl-go-name: Status\n description: True if the targets are all a comparable type. For\n example this is true if all targets are GKE clusters. This is\n false if some targets are Cloud Run targets and others are GKE\n clusters.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Time at which the pipeline was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Description of the `DeliveryPipeline`. Max length is 255 characters.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: This checksum is computed by the server based on the value\n of other fields, and may be sent on update and delete requests to ensure\n the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Labels are attributes that can be set and used by both the\n user and by Google Cloud Deploy. Labels must meet the following constraints:\n * Keys and values can contain only lowercase letters, numeric characters,\n underscores, and dashes. * All characters must use UTF-8 encoding, and\n international characters are allowed. * Keys must start with a lowercase\n letter or international character. * Each resource is limited to a maximum\n of 64 labels. Both keys and values are additionally constrained to be\n <= 128 bytes.'\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the `DeliveryPipeline`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n x-dcl-has-long-form: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-parameter: true\n serialPipeline:\n type: object\n x-dcl-go-name: SerialPipeline\n x-dcl-go-type: DeliveryPipelineSerialPipeline\n description: SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`.\n properties:\n stages:\n type: array\n x-dcl-go-name: Stages\n description: Each stage specifies configuration for a `Target`. The\n ordering of this list defines the promotion flow.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeliveryPipelineSerialPipelineStages\n properties:\n deployParameters:\n type: array\n x-dcl-go-name: DeployParameters\n description: Optional. The deploy parameters to use for the target\n in this stage.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesDeployParameters\n required:\n - values\n properties:\n matchTargetLabels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: MatchTargetLabels\n description: Optional. Deploy parameters are applied to\n targets with match labels. If unspecified, deploy parameters\n are applied to all targets (including child targets of\n a multi-target).\n values:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Values\n description: Required. Values are deploy parameters in key-value\n pairs.\n profiles:\n type: array\n x-dcl-go-name: Profiles\n description: Skaffold profiles to use when rendering the manifest\n for this stage's `Target`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n strategy:\n type: object\n x-dcl-go-name: Strategy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategy\n description: Optional. The strategy to use for a `Rollout` to\n this stage.\n properties:\n canary:\n type: object\n x-dcl-go-name: Canary\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanary\n description: Canary deployment strategy provides progressive\n percentage based deployments to a Target.\n properties:\n canaryDeployment:\n type: object\n x-dcl-go-name: CanaryDeployment\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment\n description: Configures the progressive based deployment\n for a Target.\n x-dcl-conflicts:\n - customCanaryDeployment\n required:\n - percentages\n properties:\n percentages:\n type: array\n x-dcl-go-name: Percentages\n description: Required. The percentage based deployments\n that will occur as a part of a `Rollout`. List is\n expected in ascending order and each integer n is\n 0 <= n < 100.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n postdeploy:\n type: object\n x-dcl-go-name: Postdeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy\n description: Optional. Configuration for the postdeploy\n job of the last phase. If this is not configured,\n postdeploy job will not be present.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold\n custom actions to invoke during execution of\n the postdeploy job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n predeploy:\n type: object\n x-dcl-go-name: Predeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy\n description: Optional. Configuration for the predeploy\n job of the first phase. If this is not configured,\n predeploy job will not be present.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold\n custom actions to invoke during execution of\n the predeploy job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n verify:\n type: boolean\n x-dcl-go-name: Verify\n description: Whether to run verify tests after each\n percentage deployment.\n customCanaryDeployment:\n type: object\n x-dcl-go-name: CustomCanaryDeployment\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment\n description: Configures the progressive based deployment\n for a Target, but allows customizing at the phase level\n where a phase represents each of the percentage deployments.\n x-dcl-conflicts:\n - canaryDeployment\n required:\n - phaseConfigs\n properties:\n phaseConfigs:\n type: array\n x-dcl-go-name: PhaseConfigs\n description: Required. Configuration for each phase\n in the canary deployment in the order executed.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs\n required:\n - phaseId\n - percentage\n properties:\n percentage:\n type: integer\n format: int64\n x-dcl-go-name: Percentage\n description: Required. Percentage deployment\n for the phase.\n phaseId:\n type: string\n x-dcl-go-name: PhaseId\n description: 'Required. The ID to assign to\n the `Rollout` phase. This value must consist\n of lower-case letters, numbers, and hyphens,\n start with a letter and end with a letter\n or a number, and have a max length of 63 characters.\n In other words, it must match the following\n regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.'\n postdeploy:\n type: object\n x-dcl-go-name: Postdeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy\n description: Optional. Configuration for the\n postdeploy job of this phase. If this is not\n configured, postdeploy job will not be present\n for this phase.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold\n custom actions to invoke during execution\n of the postdeploy job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n predeploy:\n type: object\n x-dcl-go-name: Predeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy\n description: Optional. Configuration for the\n predeploy job of this phase. If this is not\n configured, predeploy job will not be present\n for this phase.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold\n custom actions to invoke during execution\n of the predeploy job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n profiles:\n type: array\n x-dcl-go-name: Profiles\n description: Skaffold profiles to use when rendering\n the manifest for this phase. These are in\n addition to the profiles list specified in\n the `DeliveryPipeline` stage.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n verify:\n type: boolean\n x-dcl-go-name: Verify\n description: Whether to run verify tests after\n the deployment.\n runtimeConfig:\n type: object\n x-dcl-go-name: RuntimeConfig\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig\n description: Optional. Runtime specific configurations\n for the deployment strategy. The runtime configuration\n is used to determine how Cloud Deploy will split traffic\n to enable a progressive deployment.\n properties:\n cloudRun:\n type: object\n x-dcl-go-name: CloudRun\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun\n description: Cloud Run runtime configuration.\n x-dcl-conflicts:\n - kubernetes\n properties:\n automaticTrafficControl:\n type: boolean\n x-dcl-go-name: AutomaticTrafficControl\n description: Whether Cloud Deploy should update\n the traffic stanza in a Cloud Run Service on\n the user's behalf to facilitate traffic splitting.\n This is required to be true for CanaryDeployments,\n but optional for CustomCanaryDeployments.\n canaryRevisionTags:\n type: array\n x-dcl-go-name: CanaryRevisionTags\n description: Optional. A list of tags that are\n added to the canary revision while the canary\n phase is in progress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n priorRevisionTags:\n type: array\n x-dcl-go-name: PriorRevisionTags\n description: Optional. A list of tags that are\n added to the prior revision while the canary\n phase is in progress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n stableRevisionTags:\n type: array\n x-dcl-go-name: StableRevisionTags\n description: Optional. A list of tags that are\n added to the final stable revision when the\n stable phase is applied.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n kubernetes:\n type: object\n x-dcl-go-name: Kubernetes\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes\n description: Kubernetes runtime configuration.\n x-dcl-conflicts:\n - cloudRun\n properties:\n gatewayServiceMesh:\n type: object\n x-dcl-go-name: GatewayServiceMesh\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh\n description: Kubernetes Gateway API service mesh\n configuration.\n x-dcl-conflicts:\n - serviceNetworking\n required:\n - httpRoute\n - service\n - deployment\n properties:\n deployment:\n type: string\n x-dcl-go-name: Deployment\n description: Required. Name of the Kubernetes\n Deployment whose traffic is managed by the\n specified HTTPRoute and Service.\n httpRoute:\n type: string\n x-dcl-go-name: HttpRoute\n description: Required. Name of the Gateway\n API HTTPRoute.\n podSelectorLabel:\n type: string\n x-dcl-go-name: PodSelectorLabel\n description: Optional. The label to use when\n selecting Pods for the Deployment and Service\n resources. This label must already be present\n in both resources.\n routeUpdateWaitTime:\n type: string\n x-dcl-go-name: RouteUpdateWaitTime\n description: Optional. The time to wait for\n route updates to propagate. The maximum\n configurable time is 3 hours, in seconds\n format. If unspecified, there is no wait\n time.\n service:\n type: string\n x-dcl-go-name: Service\n description: Required. Name of the Kubernetes\n Service.\n stableCutbackDuration:\n type: string\n x-dcl-go-name: StableCutbackDuration\n description: Optional. The amount of time\n to migrate traffic back from the canary\n Service to the original Service during the\n stable phase deployment. If specified, must\n be between 15s and 3600s. If unspecified,\n there is no cutback time.\n serviceNetworking:\n type: object\n x-dcl-go-name: ServiceNetworking\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking\n description: Kubernetes Service networking configuration.\n x-dcl-conflicts:\n - gatewayServiceMesh\n required:\n - service\n - deployment\n properties:\n deployment:\n type: string\n x-dcl-go-name: Deployment\n description: Required. Name of the Kubernetes\n Deployment whose traffic is managed by the\n specified Service.\n disablePodOverprovisioning:\n type: boolean\n x-dcl-go-name: DisablePodOverprovisioning\n description: Optional. Whether to disable\n Pod overprovisioning. If Pod overprovisioning\n is disabled then Cloud Deploy will limit\n the number of total Pods used for the deployment\n strategy to the number of Pods the Deployment\n has on the cluster.\n podSelectorLabel:\n type: string\n x-dcl-go-name: PodSelectorLabel\n description: Optional. The label to use when\n selecting Pods for the Deployment resource.\n This label must already be present in the\n Deployment.\n service:\n type: string\n x-dcl-go-name: Service\n description: Required. Name of the Kubernetes\n Service.\n standard:\n type: object\n x-dcl-go-name: Standard\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyStandard\n description: Standard deployment strategy executes a single\n deploy and allows verifying the deployment.\n properties:\n postdeploy:\n type: object\n x-dcl-go-name: Postdeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy\n description: Optional. Configuration for the postdeploy\n job. If this is not configured, postdeploy job will\n not be present.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold custom\n actions to invoke during execution of the postdeploy\n job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n predeploy:\n type: object\n x-dcl-go-name: Predeploy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy\n description: Optional. Configuration for the predeploy\n job. If this is not configured, predeploy job will not\n be present.\n properties:\n actions:\n type: array\n x-dcl-go-name: Actions\n description: Optional. A sequence of skaffold custom\n actions to invoke during execution of the predeploy\n job.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n verify:\n type: boolean\n x-dcl-go-name: Verify\n description: Whether to verify a deployment.\n targetId:\n type: string\n x-dcl-go-name: TargetId\n description: The target_id to which this stage points. This field\n refers exclusively to the last segment of a target name. For\n example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`).\n The location of the `Target` is inferred to be the same as the\n location of the `DeliveryPipeline` that contains this `Stage`.\n suspended:\n type: boolean\n x-dcl-go-name: Suspended\n description: When suspended, no new releases or rollouts can be created,\n but in-progress ones will complete.\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Unique identifier of the `DeliveryPipeline`.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Most recent time at which the pipeline was updated.\n x-kubernetes-immutable: true\n") -// 35158 bytes -// MD5: 6b63861d228941f3512ec4a9777c6ffe +// 36151 bytes +// MD5: ff459f48fb76c99e0104adec32956cb6 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.go index 88ca7c9182c..955088ebcf4 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.go @@ -82,6 +82,7 @@ type TargetGke struct { empty bool `json:"-"` Cluster *string `json:"cluster"` InternalIP *bool `json:"internalIP"` + ProxyUrl *string `json:"proxyUrl"` } type jsonTargetGke TargetGke @@ -103,6 +104,8 @@ func (r *TargetGke) UnmarshalJSON(data []byte) error { r.InternalIP = res.InternalIP + r.ProxyUrl = res.ProxyUrl + } return nil } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.yaml index 1d2d8d05392..a408c1642c7 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.yaml @@ -245,6 +245,11 @@ components: is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). + proxyUrl: + type: string + x-dcl-go-name: ProxyUrl + description: Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) + to the Kubernetes server. labels: type: object additionalProperties: diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_internal.go index 0e7c2dd1869..dc5d13c01b7 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_internal.go @@ -841,6 +841,11 @@ func canonicalizeTargetGke(des, initial *TargetGke, opts ...dcl.ApplyOption) *Ta } else { cDes.InternalIP = des.InternalIP } + if dcl.StringCanonicalize(des.ProxyUrl, initial.ProxyUrl) || dcl.IsZeroValue(des.ProxyUrl) { + cDes.ProxyUrl = initial.ProxyUrl + } else { + cDes.ProxyUrl = des.ProxyUrl + } return cDes } @@ -890,6 +895,9 @@ func canonicalizeNewTargetGke(c *Client, des, nw *TargetGke) *TargetGke { if dcl.BoolCanonicalize(des.InternalIP, nw.InternalIP) { nw.InternalIP = des.InternalIP } + if dcl.StringCanonicalize(des.ProxyUrl, nw.ProxyUrl) { + nw.ProxyUrl = des.ProxyUrl + } return nw } @@ -1749,6 +1757,13 @@ func compareTargetGkeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldD } diffs = append(diffs, ds...) } + + if ds, err := dcl.Diff(desired.ProxyUrl, actual.ProxyUrl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ProxyUrl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } return diffs, nil } @@ -2191,6 +2206,9 @@ func expandTargetGke(c *Client, f *TargetGke, res *Target) (map[string]interface if v := f.InternalIP; !dcl.IsEmptyValueIndirect(v) { m["internalIp"] = v } + if v := f.ProxyUrl; !dcl.IsEmptyValueIndirect(v) { + m["proxyUrl"] = v + } return m, nil } @@ -2210,6 +2228,7 @@ func flattenTargetGke(c *Client, i interface{}, res *Target) *TargetGke { } r.Cluster = dcl.FlattenString(m["cluster"]) r.InternalIP = dcl.FlattenBool(m["internalIp"]) + r.ProxyUrl = dcl.FlattenString(m["proxyUrl"]) return r } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_schema.go index 0ce59b6c0c2..d6b7b9f8135 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_schema.go @@ -297,6 +297,11 @@ func DCLTargetSchema() *dcl.Schema { GoName: "InternalIP", Description: "Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).", }, + "proxyUrl": &dcl.Property{ + Type: "string", + GoName: "ProxyUrl", + Description: "Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server.", + }, }, }, "labels": &dcl.Property{ diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_yaml_embed.go index 7d9a4d9f5ca..114186eef31 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_yaml_embed.go @@ -17,7 +17,7 @@ package clouddeploy // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/clouddeploy/target.yaml -var YAML_target = []byte("info:\n title: Clouddeploy/Target\n description: The Cloud Deploy `Target` resource\n x-dcl-struct-name: Target\n x-dcl-has-iam: false\n x-dcl-ref:\n text: REST API\n url: https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.targets\npaths:\n get:\n description: The function used to get information about a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n apply:\n description: The function used to apply information about a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n delete:\n description: The function used to delete a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n deleteAll:\n description: The function used to delete all Target\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Target\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Target:\n title: Target\n x-dcl-id: projects/{{project}}/locations/{{location}}/targets/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: Optional. User annotations. These attributes can only be set\n and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations\n for more details such as format and size limitations.\n anthosCluster:\n type: object\n x-dcl-go-name: AnthosCluster\n x-dcl-go-type: TargetAnthosCluster\n description: Information specifying an Anthos Cluster.\n x-dcl-conflicts:\n - gke\n - run\n - multiTarget\n - customTarget\n properties:\n membership:\n type: string\n x-dcl-go-name: Membership\n description: Membership of the GKE Hub-registered cluster to which to\n apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.\n x-dcl-references:\n - resource: Gkehub/Membership\n field: selfLink\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Time at which the `Target` was created.\n x-kubernetes-immutable: true\n customTarget:\n type: object\n x-dcl-go-name: CustomTarget\n x-dcl-go-type: TargetCustomTarget\n description: Optional. Information specifying a Custom Target.\n x-dcl-conflicts:\n - gke\n - anthosCluster\n - run\n - multiTarget\n required:\n - customTargetType\n properties:\n customTargetType:\n type: string\n x-dcl-go-name: CustomTargetType\n description: Required. The name of the CustomTargetType. Format must\n be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`.\n x-dcl-references:\n - resource: Clouddeploy/CustomTargetType\n field: selfLink\n deployParameters:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DeployParameters\n description: Optional. The deploy parameters to use for this target.\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Description of the `Target`. Max length is 255 characters.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Optional. This checksum is computed by the server based on\n the value of other fields, and may be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n executionConfigs:\n type: array\n x-dcl-go-name: ExecutionConfigs\n description: Configurations for all execution that relates to this `Target`.\n Each `ExecutionEnvironmentUsage` value may only be used in a single configuration;\n using the same value multiple times is an error. When one or more configurations\n are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage`\n values. When no configurations are specified, execution will use the default\n specified in `DefaultPool`.\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: TargetExecutionConfigs\n required:\n - usages\n properties:\n artifactStorage:\n type: string\n x-dcl-go-name: ArtifactStorage\n description: Optional. Cloud Storage location in which to store execution\n outputs. This can either be a bucket (\"gs://my-bucket\") or a path\n within a bucket (\"gs://my-bucket/my-dir\"). If unspecified, a default\n bucket located in the same region will be used.\n x-dcl-server-default: true\n executionTimeout:\n type: string\n x-dcl-go-name: ExecutionTimeout\n description: Optional. Execution timeout for a Cloud Build Execution.\n This must be between 10m and 24h in seconds format. If unspecified,\n a default timeout of 1h is used.\n x-dcl-server-default: true\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Optional. Google service account to use for execution.\n If unspecified, the project execution service account (-compute@developer.gserviceaccount.com)\n is used.\n x-dcl-server-default: true\n usages:\n type: array\n x-dcl-go-name: Usages\n description: Required. Usages when this configuration should be applied.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: TargetExecutionConfigsUsagesEnum\n enum:\n - EXECUTION_ENVIRONMENT_USAGE_UNSPECIFIED\n - RENDER\n - DEPLOY\n verbose:\n type: boolean\n x-dcl-go-name: Verbose\n description: Optional. If true, additional logging will be enabled\n when running builds in this execution environment.\n workerPool:\n type: string\n x-dcl-go-name: WorkerPool\n description: Optional. The resource name of the `WorkerPool`, with\n the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`.\n If this optional field is unspecified, the default Cloud Build pool\n will be used.\n x-dcl-references:\n - resource: Cloudbuild/WorkerPool\n field: selfLink\n gke:\n type: object\n x-dcl-go-name: Gke\n x-dcl-go-type: TargetGke\n description: Information specifying a GKE Cluster.\n x-dcl-conflicts:\n - anthosCluster\n - run\n - multiTarget\n - customTarget\n properties:\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}.\n x-dcl-references:\n - resource: Container/Cluster\n field: selfLink\n internalIP:\n type: boolean\n x-dcl-go-name: InternalIP\n description: Optional. If true, `cluster` is accessed using the private\n IP address of the control plane endpoint. Otherwise, the default IP\n address of the control plane endpoint is used. The default IP address\n is the private IP address for clusters with private control-plane\n endpoints and the public IP address otherwise. Only specify this option\n when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. Labels are attributes that can be set and used by\n both the user and by Google Cloud Deploy. Labels must meet the following\n constraints: * Keys and values can contain only lowercase letters, numeric\n characters, underscores, and dashes. * All characters must use UTF-8 encoding,\n and international characters are allowed. * Keys must start with a lowercase\n letter or international character. * Each resource is limited to a maximum\n of 64 labels. Both keys and values are additionally constrained to be\n <= 128 bytes.'\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n multiTarget:\n type: object\n x-dcl-go-name: MultiTarget\n x-dcl-go-type: TargetMultiTarget\n description: Information specifying a multiTarget.\n x-dcl-conflicts:\n - gke\n - anthosCluster\n - run\n - customTarget\n required:\n - targetIds\n properties:\n targetIds:\n type: array\n x-dcl-go-name: TargetIds\n description: Required. The target_ids of this multiTarget.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the `Target`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n x-dcl-has-long-form: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-parameter: true\n requireApproval:\n type: boolean\n x-dcl-go-name: RequireApproval\n description: Optional. Whether or not the `Target` requires approval.\n run:\n type: object\n x-dcl-go-name: Run\n x-dcl-go-type: TargetRun\n description: Information specifying a Cloud Run deployment target.\n x-dcl-conflicts:\n - gke\n - anthosCluster\n - multiTarget\n - customTarget\n required:\n - location\n properties:\n location:\n type: string\n x-dcl-go-name: Location\n description: Required. The location where the Cloud Run Service should\n be located. Format is `projects/{project}/locations/{location}`.\n targetId:\n type: string\n x-dcl-go-name: TargetId\n readOnly: true\n description: Output only. Resource id of the `Target`.\n x-kubernetes-immutable: true\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Unique identifier of the `Target`.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Most recent time at which the `Target` was updated.\n x-kubernetes-immutable: true\n") +var YAML_target = []byte("info:\n title: Clouddeploy/Target\n description: The Cloud Deploy `Target` resource\n x-dcl-struct-name: Target\n x-dcl-has-iam: false\n x-dcl-ref:\n text: REST API\n url: https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.targets\npaths:\n get:\n description: The function used to get information about a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n apply:\n description: The function used to apply information about a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n delete:\n description: The function used to delete a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n deleteAll:\n description: The function used to delete all Target\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Target\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Target:\n title: Target\n x-dcl-id: projects/{{project}}/locations/{{location}}/targets/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: Optional. User annotations. These attributes can only be set\n and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations\n for more details such as format and size limitations.\n anthosCluster:\n type: object\n x-dcl-go-name: AnthosCluster\n x-dcl-go-type: TargetAnthosCluster\n description: Information specifying an Anthos Cluster.\n x-dcl-conflicts:\n - gke\n - run\n - multiTarget\n - customTarget\n properties:\n membership:\n type: string\n x-dcl-go-name: Membership\n description: Membership of the GKE Hub-registered cluster to which to\n apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.\n x-dcl-references:\n - resource: Gkehub/Membership\n field: selfLink\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Time at which the `Target` was created.\n x-kubernetes-immutable: true\n customTarget:\n type: object\n x-dcl-go-name: CustomTarget\n x-dcl-go-type: TargetCustomTarget\n description: Optional. Information specifying a Custom Target.\n x-dcl-conflicts:\n - gke\n - anthosCluster\n - run\n - multiTarget\n required:\n - customTargetType\n properties:\n customTargetType:\n type: string\n x-dcl-go-name: CustomTargetType\n description: Required. The name of the CustomTargetType. Format must\n be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`.\n x-dcl-references:\n - resource: Clouddeploy/CustomTargetType\n field: selfLink\n deployParameters:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DeployParameters\n description: Optional. The deploy parameters to use for this target.\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Description of the `Target`. Max length is 255 characters.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Optional. This checksum is computed by the server based on\n the value of other fields, and may be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n executionConfigs:\n type: array\n x-dcl-go-name: ExecutionConfigs\n description: Configurations for all execution that relates to this `Target`.\n Each `ExecutionEnvironmentUsage` value may only be used in a single configuration;\n using the same value multiple times is an error. When one or more configurations\n are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage`\n values. When no configurations are specified, execution will use the default\n specified in `DefaultPool`.\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: TargetExecutionConfigs\n required:\n - usages\n properties:\n artifactStorage:\n type: string\n x-dcl-go-name: ArtifactStorage\n description: Optional. Cloud Storage location in which to store execution\n outputs. This can either be a bucket (\"gs://my-bucket\") or a path\n within a bucket (\"gs://my-bucket/my-dir\"). If unspecified, a default\n bucket located in the same region will be used.\n x-dcl-server-default: true\n executionTimeout:\n type: string\n x-dcl-go-name: ExecutionTimeout\n description: Optional. Execution timeout for a Cloud Build Execution.\n This must be between 10m and 24h in seconds format. If unspecified,\n a default timeout of 1h is used.\n x-dcl-server-default: true\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Optional. Google service account to use for execution.\n If unspecified, the project execution service account (-compute@developer.gserviceaccount.com)\n is used.\n x-dcl-server-default: true\n usages:\n type: array\n x-dcl-go-name: Usages\n description: Required. Usages when this configuration should be applied.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: TargetExecutionConfigsUsagesEnum\n enum:\n - EXECUTION_ENVIRONMENT_USAGE_UNSPECIFIED\n - RENDER\n - DEPLOY\n verbose:\n type: boolean\n x-dcl-go-name: Verbose\n description: Optional. If true, additional logging will be enabled\n when running builds in this execution environment.\n workerPool:\n type: string\n x-dcl-go-name: WorkerPool\n description: Optional. The resource name of the `WorkerPool`, with\n the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`.\n If this optional field is unspecified, the default Cloud Build pool\n will be used.\n x-dcl-references:\n - resource: Cloudbuild/WorkerPool\n field: selfLink\n gke:\n type: object\n x-dcl-go-name: Gke\n x-dcl-go-type: TargetGke\n description: Information specifying a GKE Cluster.\n x-dcl-conflicts:\n - anthosCluster\n - run\n - multiTarget\n - customTarget\n properties:\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}.\n x-dcl-references:\n - resource: Container/Cluster\n field: selfLink\n internalIP:\n type: boolean\n x-dcl-go-name: InternalIP\n description: Optional. If true, `cluster` is accessed using the private\n IP address of the control plane endpoint. Otherwise, the default IP\n address of the control plane endpoint is used. The default IP address\n is the private IP address for clusters with private control-plane\n endpoints and the public IP address otherwise. Only specify this option\n when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).\n proxyUrl:\n type: string\n x-dcl-go-name: ProxyUrl\n description: Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy)\n to the Kubernetes server.\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. Labels are attributes that can be set and used by\n both the user and by Google Cloud Deploy. Labels must meet the following\n constraints: * Keys and values can contain only lowercase letters, numeric\n characters, underscores, and dashes. * All characters must use UTF-8 encoding,\n and international characters are allowed. * Keys must start with a lowercase\n letter or international character. * Each resource is limited to a maximum\n of 64 labels. Both keys and values are additionally constrained to be\n <= 128 bytes.'\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n multiTarget:\n type: object\n x-dcl-go-name: MultiTarget\n x-dcl-go-type: TargetMultiTarget\n description: Information specifying a multiTarget.\n x-dcl-conflicts:\n - gke\n - anthosCluster\n - run\n - customTarget\n required:\n - targetIds\n properties:\n targetIds:\n type: array\n x-dcl-go-name: TargetIds\n description: Required. The target_ids of this multiTarget.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the `Target`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n x-dcl-has-long-form: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-parameter: true\n requireApproval:\n type: boolean\n x-dcl-go-name: RequireApproval\n description: Optional. Whether or not the `Target` requires approval.\n run:\n type: object\n x-dcl-go-name: Run\n x-dcl-go-type: TargetRun\n description: Information specifying a Cloud Run deployment target.\n x-dcl-conflicts:\n - gke\n - anthosCluster\n - multiTarget\n - customTarget\n required:\n - location\n properties:\n location:\n type: string\n x-dcl-go-name: Location\n description: Required. The location where the Cloud Run Service should\n be located. Format is `projects/{project}/locations/{location}`.\n targetId:\n type: string\n x-dcl-go-name: TargetId\n readOnly: true\n description: Output only. Resource id of the `Target`.\n x-kubernetes-immutable: true\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Unique identifier of the `Target`.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Most recent time at which the `Target` was updated.\n x-kubernetes-immutable: true\n") -// 12961 bytes -// MD5: af29d8850ca004a1fffe0f13577c3edd +// 13256 bytes +// MD5: 6af2eccf69e0aafa0d3498bc876d8a28 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/instance_template.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/instance_template.yaml index 79c82a9f74f..6f7185aa6fe 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/instance_template.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/instance_template.yaml @@ -630,7 +630,7 @@ components: type: string x-dcl-go-name: Key description: Corresponds to the label key of a reservation resource. - To target a SPECIFIC_RESERVATION by name, specify googleapis.com/reservation-name + To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as its value. x-kubernetes-immutable: true values: diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.go index e51aa93c85c..7eca229e0d3 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.go @@ -39,6 +39,7 @@ type NodePool struct { Annotations map[string]string `json:"annotations"` MaxPodsConstraint *NodePoolMaxPodsConstraint `json:"maxPodsConstraint"` Management *NodePoolManagement `json:"management"` + KubeletConfig *NodePoolKubeletConfig `json:"kubeletConfig"` UpdateSettings *NodePoolUpdateSettings `json:"updateSettings"` Project *string `json:"project"` Location *string `json:"location"` @@ -130,6 +131,33 @@ func (v NodePoolStateEnum) Validate() error { } } +// The enum NodePoolKubeletConfigCpuManagerPolicyEnum. +type NodePoolKubeletConfigCpuManagerPolicyEnum string + +// NodePoolKubeletConfigCpuManagerPolicyEnumRef returns a *NodePoolKubeletConfigCpuManagerPolicyEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolKubeletConfigCpuManagerPolicyEnumRef(s string) *NodePoolKubeletConfigCpuManagerPolicyEnum { + v := NodePoolKubeletConfigCpuManagerPolicyEnum(s) + return &v +} + +func (v NodePoolKubeletConfigCpuManagerPolicyEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"none", "static"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolKubeletConfigCpuManagerPolicyEnum", + Value: string(v), + Valid: []string{}, + } +} + type NodePoolConfig struct { empty bool `json:"-"` InstanceType *string `json:"instanceType"` @@ -647,6 +675,61 @@ func (r *NodePoolManagement) HashCode() string { return fmt.Sprintf("%x", hash) } +type NodePoolKubeletConfig struct { + empty bool `json:"-"` + CpuManagerPolicy *NodePoolKubeletConfigCpuManagerPolicyEnum `json:"cpuManagerPolicy"` + CpuCfsQuota *bool `json:"cpuCfsQuota"` + CpuCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod"` + PodPidsLimit *int64 `json:"podPidsLimit"` +} + +type jsonNodePoolKubeletConfig NodePoolKubeletConfig + +func (r *NodePoolKubeletConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolKubeletConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolKubeletConfig + } else { + + r.CpuManagerPolicy = res.CpuManagerPolicy + + r.CpuCfsQuota = res.CpuCfsQuota + + r.CpuCfsQuotaPeriod = res.CpuCfsQuotaPeriod + + r.PodPidsLimit = res.PodPidsLimit + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolKubeletConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolKubeletConfig *NodePoolKubeletConfig = &NodePoolKubeletConfig{empty: true} + +func (r *NodePoolKubeletConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolKubeletConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolKubeletConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + type NodePoolUpdateSettings struct { empty bool `json:"-"` SurgeSettings *NodePoolUpdateSettingsSurgeSettings `json:"surgeSettings"` @@ -772,6 +855,7 @@ func (r *NodePool) ID() (string, error) { "annotations": dcl.ValueOrEmptyString(nr.Annotations), "max_pods_constraint": dcl.ValueOrEmptyString(nr.MaxPodsConstraint), "management": dcl.ValueOrEmptyString(nr.Management), + "kubelet_config": dcl.ValueOrEmptyString(nr.KubeletConfig), "update_settings": dcl.ValueOrEmptyString(nr.UpdateSettings), "project": dcl.ValueOrEmptyString(nr.Project), "location": dcl.ValueOrEmptyString(nr.Location), diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.yaml index 7d797960d93..da8d9abf369 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.yaml @@ -361,6 +361,44 @@ components: optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. x-kubernetes-immutable: true + kubeletConfig: + type: object + x-dcl-go-name: KubeletConfig + x-dcl-go-type: NodePoolKubeletConfig + description: The kubelet configuration for the node pool. + x-kubernetes-immutable: true + x-dcl-server-default: true + properties: + cpuCfsQuota: + type: boolean + x-dcl-go-name: CpuCfsQuota + description: Whether or not to enable CPU CFS quota. Defaults to true. + x-kubernetes-immutable: true + x-dcl-server-default: true + cpuCfsQuotaPeriod: + type: string + x-dcl-go-name: CpuCfsQuotaPeriod + description: Optional. The CPU CFS quota period to use for the node. + Defaults to "100ms". + x-kubernetes-immutable: true + cpuManagerPolicy: + type: string + x-dcl-go-name: CpuManagerPolicy + x-dcl-go-type: NodePoolKubeletConfigCpuManagerPolicyEnum + description: The CpuManagerPolicy to use for the node. Defaults to "none". + x-kubernetes-immutable: true + x-dcl-server-default: true + enum: + - none + - static + podPidsLimit: + type: integer + format: int64 + x-dcl-go-name: PodPidsLimit + description: Optional. The maximum number of PIDs in each pod running + on the node. The limit scales automatically based on underlying machine + size if left unset. + x-kubernetes-immutable: true location: type: string x-dcl-go-name: Location diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_internal.go index 3e31a879896..d3926f9db0d 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_internal.go @@ -74,6 +74,11 @@ func (r *NodePool) validate() error { return err } } + if !dcl.IsEmptyValueIndirect(r.KubeletConfig) { + if err := r.KubeletConfig.validate(); err != nil { + return err + } + } if !dcl.IsEmptyValueIndirect(r.UpdateSettings) { if err := r.UpdateSettings.validate(); err != nil { return err @@ -175,6 +180,9 @@ func (r *NodePoolMaxPodsConstraint) validate() error { func (r *NodePoolManagement) validate() error { return nil } +func (r *NodePoolKubeletConfig) validate() error { + return nil +} func (r *NodePoolUpdateSettings) validate() error { if !dcl.IsEmptyValueIndirect(r.SurgeSettings) { if err := r.SurgeSettings.validate(); err != nil { @@ -628,6 +636,7 @@ func canonicalizeNodePoolDesiredState(rawDesired, rawInitial *NodePool, opts ... rawDesired.Autoscaling = canonicalizeNodePoolAutoscaling(rawDesired.Autoscaling, nil, opts...) rawDesired.MaxPodsConstraint = canonicalizeNodePoolMaxPodsConstraint(rawDesired.MaxPodsConstraint, nil, opts...) rawDesired.Management = canonicalizeNodePoolManagement(rawDesired.Management, nil, opts...) + rawDesired.KubeletConfig = canonicalizeNodePoolKubeletConfig(rawDesired.KubeletConfig, nil, opts...) rawDesired.UpdateSettings = canonicalizeNodePoolUpdateSettings(rawDesired.UpdateSettings, nil, opts...) return rawDesired, nil @@ -658,6 +667,7 @@ func canonicalizeNodePoolDesiredState(rawDesired, rawInitial *NodePool, opts ... } canonicalDesired.MaxPodsConstraint = canonicalizeNodePoolMaxPodsConstraint(rawDesired.MaxPodsConstraint, rawInitial.MaxPodsConstraint, opts...) canonicalDesired.Management = canonicalizeNodePoolManagement(rawDesired.Management, rawInitial.Management, opts...) + canonicalDesired.KubeletConfig = canonicalizeNodePoolKubeletConfig(rawDesired.KubeletConfig, rawInitial.KubeletConfig, opts...) canonicalDesired.UpdateSettings = canonicalizeNodePoolUpdateSettings(rawDesired.UpdateSettings, rawInitial.UpdateSettings, opts...) if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { canonicalDesired.Project = rawInitial.Project @@ -771,6 +781,12 @@ func canonicalizeNodePoolNewState(c *Client, rawNew, rawDesired *NodePool) (*Nod rawNew.Management = canonicalizeNewNodePoolManagement(c, rawDesired.Management, rawNew.Management) } + if dcl.IsEmptyValueIndirect(rawNew.KubeletConfig) && dcl.IsEmptyValueIndirect(rawDesired.KubeletConfig) { + rawNew.KubeletConfig = rawDesired.KubeletConfig + } else { + rawNew.KubeletConfig = canonicalizeNewNodePoolKubeletConfig(c, rawDesired.KubeletConfig, rawNew.KubeletConfig) + } + if dcl.IsEmptyValueIndirect(rawNew.UpdateSettings) && dcl.IsEmptyValueIndirect(rawDesired.UpdateSettings) { rawNew.UpdateSettings = rawDesired.UpdateSettings } else { @@ -2060,6 +2076,144 @@ func canonicalizeNewNodePoolManagementSlice(c *Client, des, nw []NodePoolManagem return items } +func canonicalizeNodePoolKubeletConfig(des, initial *NodePoolKubeletConfig, opts ...dcl.ApplyOption) *NodePoolKubeletConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolKubeletConfig{} + + if dcl.IsZeroValue(des.CpuManagerPolicy) || (dcl.IsEmptyValueIndirect(des.CpuManagerPolicy) && dcl.IsEmptyValueIndirect(initial.CpuManagerPolicy)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.CpuManagerPolicy = initial.CpuManagerPolicy + } else { + cDes.CpuManagerPolicy = des.CpuManagerPolicy + } + if dcl.BoolCanonicalize(des.CpuCfsQuota, initial.CpuCfsQuota) || dcl.IsZeroValue(des.CpuCfsQuota) { + cDes.CpuCfsQuota = initial.CpuCfsQuota + } else { + cDes.CpuCfsQuota = des.CpuCfsQuota + } + if dcl.StringCanonicalize(des.CpuCfsQuotaPeriod, initial.CpuCfsQuotaPeriod) || dcl.IsZeroValue(des.CpuCfsQuotaPeriod) { + cDes.CpuCfsQuotaPeriod = initial.CpuCfsQuotaPeriod + } else { + cDes.CpuCfsQuotaPeriod = des.CpuCfsQuotaPeriod + } + if dcl.IsZeroValue(des.PodPidsLimit) || (dcl.IsEmptyValueIndirect(des.PodPidsLimit) && dcl.IsEmptyValueIndirect(initial.PodPidsLimit)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PodPidsLimit = initial.PodPidsLimit + } else { + cDes.PodPidsLimit = des.PodPidsLimit + } + + return cDes +} + +func canonicalizeNodePoolKubeletConfigSlice(des, initial []NodePoolKubeletConfig, opts ...dcl.ApplyOption) []NodePoolKubeletConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolKubeletConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolKubeletConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolKubeletConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolKubeletConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolKubeletConfig(c *Client, des, nw *NodePoolKubeletConfig) *NodePoolKubeletConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolKubeletConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.CpuCfsQuota, nw.CpuCfsQuota) { + nw.CpuCfsQuota = des.CpuCfsQuota + } + if dcl.StringCanonicalize(des.CpuCfsQuotaPeriod, nw.CpuCfsQuotaPeriod) { + nw.CpuCfsQuotaPeriod = des.CpuCfsQuotaPeriod + } + + return nw +} + +func canonicalizeNewNodePoolKubeletConfigSet(c *Client, des, nw []NodePoolKubeletConfig) []NodePoolKubeletConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolKubeletConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolKubeletConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolKubeletConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolKubeletConfigSlice(c *Client, des, nw []NodePoolKubeletConfig) []NodePoolKubeletConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolKubeletConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolKubeletConfig(c, &d, &n)) + } + + return items +} + func canonicalizeNodePoolUpdateSettings(des, initial *NodePoolUpdateSettings, opts ...dcl.ApplyOption) *NodePoolUpdateSettings { if des == nil { return initial @@ -2409,6 +2563,13 @@ func diffNodePool(c *Client, desired, actual *NodePool, opts ...dcl.ApplyOption) newDiffs = append(newDiffs, ds...) } + if ds, err := dcl.Diff(desired.KubeletConfig, actual.KubeletConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolKubeletConfigNewStyle, EmptyObject: EmptyNodePoolKubeletConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubeletConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + if ds, err := dcl.Diff(desired.UpdateSettings, actual.UpdateSettings, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolUpdateSettingsNewStyle, EmptyObject: EmptyNodePoolUpdateSettings, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("UpdateSettings")); len(ds) != 0 || err != nil { if err != nil { return nil, err @@ -2865,6 +3026,56 @@ func compareNodePoolManagementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*d return diffs, nil } +func compareNodePoolKubeletConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolKubeletConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolKubeletConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolKubeletConfig or *NodePoolKubeletConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolKubeletConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolKubeletConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolKubeletConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.CpuManagerPolicy, actual.CpuManagerPolicy, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuManagerPolicy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CpuCfsQuota, actual.CpuCfsQuota, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuCfsQuota")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CpuCfsQuotaPeriod, actual.CpuCfsQuotaPeriod, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuCfsQuotaPeriod")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodPidsLimit, actual.PodPidsLimit, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PodPidsLimit")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + func compareNodePoolUpdateSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { var diffs []*dcl.FieldDiff @@ -3031,6 +3242,11 @@ func expandNodePool(c *Client, f *NodePool) (map[string]interface{}, error) { } else if !dcl.IsEmptyValueIndirect(v) { m["management"] = v } + if v, err := expandNodePoolKubeletConfig(c, f.KubeletConfig, res); err != nil { + return nil, fmt.Errorf("error expanding KubeletConfig into kubeletConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kubeletConfig"] = v + } if v, err := expandNodePoolUpdateSettings(c, f.UpdateSettings, res); err != nil { return nil, fmt.Errorf("error expanding UpdateSettings into updateSettings: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { @@ -3081,6 +3297,7 @@ func flattenNodePool(c *Client, i interface{}, res *NodePool) *NodePool { resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) resultRes.MaxPodsConstraint = flattenNodePoolMaxPodsConstraint(c, m["maxPodsConstraint"], res) resultRes.Management = flattenNodePoolManagement(c, m["management"], res) + resultRes.KubeletConfig = flattenNodePoolKubeletConfig(c, m["kubeletConfig"], res) resultRes.UpdateSettings = flattenNodePoolUpdateSettings(c, m["updateSettings"], res) resultRes.Project = dcl.FlattenString(m["project"]) resultRes.Location = dcl.FlattenString(m["location"]) @@ -4317,6 +4534,132 @@ func flattenNodePoolManagement(c *Client, i interface{}, res *NodePool) *NodePoo return r } +// expandNodePoolKubeletConfigMap expands the contents of NodePoolKubeletConfig into a JSON +// request object. +func expandNodePoolKubeletConfigMap(c *Client, f map[string]NodePoolKubeletConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolKubeletConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolKubeletConfigSlice expands the contents of NodePoolKubeletConfig into a JSON +// request object. +func expandNodePoolKubeletConfigSlice(c *Client, f []NodePoolKubeletConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolKubeletConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolKubeletConfigMap flattens the contents of NodePoolKubeletConfig from a JSON +// response object. +func flattenNodePoolKubeletConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolKubeletConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolKubeletConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolKubeletConfig{} + } + + items := make(map[string]NodePoolKubeletConfig) + for k, item := range a { + items[k] = *flattenNodePoolKubeletConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolKubeletConfigSlice flattens the contents of NodePoolKubeletConfig from a JSON +// response object. +func flattenNodePoolKubeletConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolKubeletConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolKubeletConfig{} + } + + if len(a) == 0 { + return []NodePoolKubeletConfig{} + } + + items := make([]NodePoolKubeletConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolKubeletConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolKubeletConfig expands an instance of NodePoolKubeletConfig into a JSON +// request object. +func expandNodePoolKubeletConfig(c *Client, f *NodePoolKubeletConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.CpuManagerPolicy; !dcl.IsEmptyValueIndirect(v) { + m["cpuManagerPolicy"] = v + } + if v := f.CpuCfsQuota; !dcl.IsEmptyValueIndirect(v) { + m["cpuCfsQuota"] = v + } + if v := f.CpuCfsQuotaPeriod; !dcl.IsEmptyValueIndirect(v) { + m["cpuCfsQuotaPeriod"] = v + } + if v := f.PodPidsLimit; !dcl.IsEmptyValueIndirect(v) { + m["podPidsLimit"] = v + } + + return m, nil +} + +// flattenNodePoolKubeletConfig flattens an instance of NodePoolKubeletConfig from a JSON +// response object. +func flattenNodePoolKubeletConfig(c *Client, i interface{}, res *NodePool) *NodePoolKubeletConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolKubeletConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolKubeletConfig + } + r.CpuManagerPolicy = flattenNodePoolKubeletConfigCpuManagerPolicyEnum(m["cpuManagerPolicy"]) + r.CpuCfsQuota = dcl.FlattenBool(m["cpuCfsQuota"]) + r.CpuCfsQuotaPeriod = dcl.FlattenString(m["cpuCfsQuotaPeriod"]) + r.PodPidsLimit = dcl.FlattenInteger(m["podPidsLimit"]) + + return r +} + // expandNodePoolUpdateSettingsMap expands the contents of NodePoolUpdateSettings into a JSON // request object. func expandNodePoolUpdateSettingsMap(c *Client, f map[string]NodePoolUpdateSettings, res *NodePool) (map[string]interface{}, error) { @@ -4704,6 +5047,57 @@ func flattenNodePoolStateEnum(i interface{}) *NodePoolStateEnum { return NodePoolStateEnumRef(s) } +// flattenNodePoolKubeletConfigCpuManagerPolicyEnumMap flattens the contents of NodePoolKubeletConfigCpuManagerPolicyEnum from a JSON +// response object. +func flattenNodePoolKubeletConfigCpuManagerPolicyEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolKubeletConfigCpuManagerPolicyEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + items := make(map[string]NodePoolKubeletConfigCpuManagerPolicyEnum) + for k, item := range a { + items[k] = *flattenNodePoolKubeletConfigCpuManagerPolicyEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolKubeletConfigCpuManagerPolicyEnumSlice flattens the contents of NodePoolKubeletConfigCpuManagerPolicyEnum from a JSON +// response object. +func flattenNodePoolKubeletConfigCpuManagerPolicyEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolKubeletConfigCpuManagerPolicyEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + if len(a) == 0 { + return []NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + items := make([]NodePoolKubeletConfigCpuManagerPolicyEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolKubeletConfigCpuManagerPolicyEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolKubeletConfigCpuManagerPolicyEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolKubeletConfigCpuManagerPolicyEnum with the same value as that string. +func flattenNodePoolKubeletConfigCpuManagerPolicyEnum(i interface{}) *NodePoolKubeletConfigCpuManagerPolicyEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolKubeletConfigCpuManagerPolicyEnumRef(s) +} + // This function returns a matcher that checks whether a serialized resource matches this resource // in its parameters (as defined by the fields in a Get, which definitionally define resource // identity). This is useful in extracting the element from a List call. @@ -4850,6 +5244,17 @@ func extractNodePoolFields(r *NodePool) error { if !dcl.IsEmptyValueIndirect(vManagement) { r.Management = vManagement } + vKubeletConfig := r.KubeletConfig + if vKubeletConfig == nil { + // note: explicitly not the empty object. + vKubeletConfig = &NodePoolKubeletConfig{} + } + if err := extractNodePoolKubeletConfigFields(r, vKubeletConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubeletConfig) { + r.KubeletConfig = vKubeletConfig + } vUpdateSettings := r.UpdateSettings if vUpdateSettings == nil { // note: explicitly not the empty object. @@ -4948,6 +5353,9 @@ func extractNodePoolMaxPodsConstraintFields(r *NodePool, o *NodePoolMaxPodsConst func extractNodePoolManagementFields(r *NodePool, o *NodePoolManagement) error { return nil } +func extractNodePoolKubeletConfigFields(r *NodePool, o *NodePoolKubeletConfig) error { + return nil +} func extractNodePoolUpdateSettingsFields(r *NodePool, o *NodePoolUpdateSettings) error { vSurgeSettings := o.SurgeSettings if vSurgeSettings == nil { @@ -5011,6 +5419,17 @@ func postReadExtractNodePoolFields(r *NodePool) error { if !dcl.IsEmptyValueIndirect(vManagement) { r.Management = vManagement } + vKubeletConfig := r.KubeletConfig + if vKubeletConfig == nil { + // note: explicitly not the empty object. + vKubeletConfig = &NodePoolKubeletConfig{} + } + if err := postReadExtractNodePoolKubeletConfigFields(r, vKubeletConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubeletConfig) { + r.KubeletConfig = vKubeletConfig + } vUpdateSettings := r.UpdateSettings if vUpdateSettings == nil { // note: explicitly not the empty object. @@ -5109,6 +5528,9 @@ func postReadExtractNodePoolMaxPodsConstraintFields(r *NodePool, o *NodePoolMaxP func postReadExtractNodePoolManagementFields(r *NodePool, o *NodePoolManagement) error { return nil } +func postReadExtractNodePoolKubeletConfigFields(r *NodePool, o *NodePoolKubeletConfig) error { + return nil +} func postReadExtractNodePoolUpdateSettingsFields(r *NodePool, o *NodePoolUpdateSettings) error { vSurgeSettings := o.SurgeSettings if vSurgeSettings == nil { diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_schema.go index d844c24c817..2d795114431 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_schema.go @@ -422,6 +422,48 @@ func DCLNodePoolSchema() *dcl.Schema { Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", Immutable: true, }, + "kubeletConfig": &dcl.Property{ + Type: "object", + GoName: "KubeletConfig", + GoType: "NodePoolKubeletConfig", + Description: "The kubelet configuration for the node pool.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "cpuCfsQuota": &dcl.Property{ + Type: "boolean", + GoName: "CpuCfsQuota", + Description: "Whether or not to enable CPU CFS quota. Defaults to true.", + Immutable: true, + ServerDefault: true, + }, + "cpuCfsQuotaPeriod": &dcl.Property{ + Type: "string", + GoName: "CpuCfsQuotaPeriod", + Description: "Optional. The CPU CFS quota period to use for the node. Defaults to \"100ms\".", + Immutable: true, + }, + "cpuManagerPolicy": &dcl.Property{ + Type: "string", + GoName: "CpuManagerPolicy", + GoType: "NodePoolKubeletConfigCpuManagerPolicyEnum", + Description: "The CpuManagerPolicy to use for the node. Defaults to \"none\".", + Immutable: true, + ServerDefault: true, + Enum: []string{ + "none", + "static", + }, + }, + "podPidsLimit": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "PodPidsLimit", + Description: "Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.", + Immutable: true, + }, + }, + }, "location": &dcl.Property{ Type: "string", GoName: "Location", diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_yaml_embed.go index 51e204f78fc..93c5119c54d 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_yaml_embed.go @@ -17,7 +17,7 @@ package containeraws // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/containeraws/node_pool.yaml -var YAML_node_pool = []byte("info:\n title: ContainerAws/NodePool\n description: An Anthos node pool running on AWS.\n x-dcl-struct-name: NodePool\n x-dcl-has-iam: false\n x-dcl-ref:\n text: API reference\n url: https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.awsClusters.awsNodePools\n x-dcl-guides:\n - text: Multicloud overview\n url: https://cloud.google.com/kubernetes-engine/multi-cloud/docs\npaths:\n get:\n description: The function used to get information about a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n apply:\n description: The function used to apply information about a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n delete:\n description: The function used to delete a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n deleteAll:\n description: The function used to delete all NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n NodePool:\n title: NodePool\n x-dcl-id: projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - version\n - config\n - autoscaling\n - subnetId\n - maxPodsConstraint\n - project\n - location\n - cluster\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'Optional. Annotations on the node pool. This field has the\n same restrictions as Kubernetes annotations. The total size of all keys\n and values combined is limited to 256k. Key can have 2 segments: prefix\n (optional) and name (required), separated by a slash (/). Prefix must\n be a DNS subdomain. Name must be 63 characters or less, begin and end\n with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics\n between.'\n autoscaling:\n type: object\n x-dcl-go-name: Autoscaling\n x-dcl-go-type: NodePoolAutoscaling\n description: Autoscaler configuration for this node pool.\n required:\n - minNodeCount\n - maxNodeCount\n properties:\n maxNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MaxNodeCount\n description: Maximum number of nodes in the NodePool. Must be >= min_node_count.\n minNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MinNodeCount\n description: Minimum number of nodes in the NodePool. Must be >= 1 and\n <= max_node_count.\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: The awsCluster for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Gkemulticloud/Cluster\n field: name\n parent: true\n x-dcl-parameter: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: NodePoolConfig\n description: The configuration of the node pool.\n required:\n - iamInstanceProfile\n - configEncryption\n properties:\n autoscalingMetricsCollection:\n type: object\n x-dcl-go-name: AutoscalingMetricsCollection\n x-dcl-go-type: NodePoolConfigAutoscalingMetricsCollection\n description: Optional. Configuration related to CloudWatch metrics collection\n on the Auto Scaling group of the node pool. When unspecified, metrics\n collection is disabled.\n required:\n - granularity\n properties:\n granularity:\n type: string\n x-dcl-go-name: Granularity\n description: The frequency at which EC2 Auto Scaling sends aggregated\n data to AWS CloudWatch. The only valid value is \"1Minute\".\n metrics:\n type: array\n x-dcl-go-name: Metrics\n description: The metrics to enable. For a list of valid metrics,\n see https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html.\n If you specify granularity and don't specify any metrics, all\n metrics are enabled.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n configEncryption:\n type: object\n x-dcl-go-name: ConfigEncryption\n x-dcl-go-type: NodePoolConfigConfigEncryption\n description: The ARN of the AWS KMS key used to encrypt node pool configuration.\n required:\n - kmsKeyArn\n properties:\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: The ARN of the AWS KMS key used to encrypt node pool\n configuration.\n iamInstanceProfile:\n type: string\n x-dcl-go-name: IamInstanceProfile\n description: The name of the AWS IAM role assigned to nodes in the pool.\n instanceType:\n type: string\n x-dcl-go-name: InstanceType\n description: Optional. The AWS instance type. When unspecified, it defaults\n to `m5.large`.\n x-dcl-server-default: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The initial labels assigned to nodes of this\n node pool. An object containing a list of \"key\": value pairs. Example:\n { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.'\n proxyConfig:\n type: object\n x-dcl-go-name: ProxyConfig\n x-dcl-go-type: NodePoolConfigProxyConfig\n description: Proxy configuration for outbound HTTP(S) traffic.\n required:\n - secretArn\n - secretVersion\n properties:\n secretArn:\n type: string\n x-dcl-go-name: SecretArn\n description: The ARN of the AWS Secret Manager secret that contains\n the HTTP(S) proxy configuration.\n secretVersion:\n type: string\n x-dcl-go-name: SecretVersion\n description: The version string of the AWS Secret Manager secret\n that contains the HTTP(S) proxy configuration.\n rootVolume:\n type: object\n x-dcl-go-name: RootVolume\n x-dcl-go-type: NodePoolConfigRootVolume\n description: Optional. Template for the root volume provisioned for\n node pool nodes. Volumes will be provisioned in the availability zone\n assigned to the node pool subnet. When unspecified, it defaults to\n 32 GiB with the GP2 volume type.\n x-dcl-server-default: true\n properties:\n iops:\n type: integer\n format: int64\n x-dcl-go-name: Iops\n description: Optional. The number of I/O operations per second (IOPS)\n to provision for GP3 volume.\n x-dcl-server-default: true\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: Optional. The Amazon Resource Name (ARN) of the Customer\n Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified,\n the default Amazon managed key associated to the AWS region where\n this cluster runs will be used.\n sizeGib:\n type: integer\n format: int64\n x-dcl-go-name: SizeGib\n description: Optional. The size of the volume, in GiBs. When unspecified,\n a default value is provided. See the specific reference in the\n parent resource.\n x-dcl-server-default: true\n throughput:\n type: integer\n format: int64\n x-dcl-go-name: Throughput\n description: Optional. The throughput to provision for the volume,\n in MiB/s. Only valid if the volume type is GP3. If volume type\n is gp3 and throughput is not specified, the throughput will defaults\n to 125.\n x-dcl-server-default: true\n volumeType:\n type: string\n x-dcl-go-name: VolumeType\n x-dcl-go-type: NodePoolConfigRootVolumeVolumeTypeEnum\n description: 'Optional. Type of the EBS volume. When unspecified,\n it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED,\n GP2, GP3'\n x-dcl-server-default: true\n enum:\n - VOLUME_TYPE_UNSPECIFIED\n - GP2\n - GP3\n securityGroupIds:\n type: array\n x-dcl-go-name: SecurityGroupIds\n description: Optional. The IDs of additional security groups to add\n to nodes in this pool. The manager will automatically create security\n groups with minimum rules needed for a functioning cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n sshConfig:\n type: object\n x-dcl-go-name: SshConfig\n x-dcl-go-type: NodePoolConfigSshConfig\n description: Optional. The SSH configuration.\n required:\n - ec2KeyPair\n properties:\n ec2KeyPair:\n type: string\n x-dcl-go-name: Ec2KeyPair\n description: The name of the EC2 key pair used to login into cluster\n machines.\n tags:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Tags\n description: Optional. Key/value metadata to assign to each underlying\n AWS resource. Specify at most 50 pairs containing alphanumerics, spaces,\n and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters.\n Values can be up to 255 Unicode characters.\n taints:\n type: array\n x-dcl-go-name: Taints\n description: Optional. The initial taints assigned to nodes of this\n node pool.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NodePoolConfigTaints\n required:\n - key\n - value\n - effect\n properties:\n effect:\n type: string\n x-dcl-go-name: Effect\n x-dcl-go-type: NodePoolConfigTaintsEffectEnum\n description: 'The taint effect. Possible values: EFFECT_UNSPECIFIED,\n NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE'\n x-kubernetes-immutable: true\n enum:\n - EFFECT_UNSPECIFIED\n - NO_SCHEDULE\n - PREFER_NO_SCHEDULE\n - NO_EXECUTE\n key:\n type: string\n x-dcl-go-name: Key\n description: Key for the taint.\n x-kubernetes-immutable: true\n value:\n type: string\n x-dcl-go-name: Value\n description: Value for the taint.\n x-kubernetes-immutable: true\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which this node pool was created.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Allows clients to perform consistent read-modify-writes through\n optimistic concurrency control. May be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n management:\n type: object\n x-dcl-go-name: Management\n x-dcl-go-type: NodePoolManagement\n description: The Management configuration for this node pool.\n properties:\n autoRepair:\n type: boolean\n x-dcl-go-name: AutoRepair\n description: Optional. Whether or not the nodes will be automatically\n repaired.\n maxPodsConstraint:\n type: object\n x-dcl-go-name: MaxPodsConstraint\n x-dcl-go-type: NodePoolMaxPodsConstraint\n description: The constraint on the maximum number of pods that can be run\n simultaneously on a node in the node pool.\n x-kubernetes-immutable: true\n required:\n - maxPodsPerNode\n properties:\n maxPodsPerNode:\n type: integer\n format: int64\n x-dcl-go-name: MaxPodsPerNode\n description: The maximum number of pods to schedule on a single node.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of this resource.\n x-kubernetes-immutable: true\n x-dcl-has-long-form: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-parameter: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: Output only. If set, there are currently changes in flight\n to the node pool.\n x-kubernetes-immutable: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: NodePoolStateEnum\n readOnly: true\n description: 'Output only. The lifecycle state of the node pool. Possible\n values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING,\n ERROR, DEGRADED'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - PROVISIONING\n - RUNNING\n - RECONCILING\n - STOPPING\n - ERROR\n - DEGRADED\n subnetId:\n type: string\n x-dcl-go-name: SubnetId\n description: The subnet where the node pool node run.\n x-kubernetes-immutable: true\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. A globally unique identifier for the node pool.\n x-kubernetes-immutable: true\n updateSettings:\n type: object\n x-dcl-go-name: UpdateSettings\n x-dcl-go-type: NodePoolUpdateSettings\n description: Optional. Update settings control the speed and disruption\n of the node pool update.\n x-dcl-server-default: true\n properties:\n surgeSettings:\n type: object\n x-dcl-go-name: SurgeSettings\n x-dcl-go-type: NodePoolUpdateSettingsSurgeSettings\n description: Optional. Settings for surge update.\n x-dcl-server-default: true\n properties:\n maxSurge:\n type: integer\n format: int64\n x-dcl-go-name: MaxSurge\n description: Optional. The maximum number of nodes that can be created\n beyond the current size of the node pool during the update process.\n x-dcl-server-default: true\n maxUnavailable:\n type: integer\n format: int64\n x-dcl-go-name: MaxUnavailable\n description: Optional. The maximum number of nodes that can be simultaneously\n unavailable during the update process. A node is considered unavailable\n if its status is not Ready.\n x-dcl-server-default: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which this node pool was last updated.\n x-kubernetes-immutable: true\n version:\n type: string\n x-dcl-go-name: Version\n description: The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`).\n You can list all supported versions on a given Google Cloud region by\n calling GetAwsServerConfig.\n") +var YAML_node_pool = []byte("info:\n title: ContainerAws/NodePool\n description: An Anthos node pool running on AWS.\n x-dcl-struct-name: NodePool\n x-dcl-has-iam: false\n x-dcl-ref:\n text: API reference\n url: https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.awsClusters.awsNodePools\n x-dcl-guides:\n - text: Multicloud overview\n url: https://cloud.google.com/kubernetes-engine/multi-cloud/docs\npaths:\n get:\n description: The function used to get information about a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n apply:\n description: The function used to apply information about a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n delete:\n description: The function used to delete a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n deleteAll:\n description: The function used to delete all NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n NodePool:\n title: NodePool\n x-dcl-id: projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - version\n - config\n - autoscaling\n - subnetId\n - maxPodsConstraint\n - project\n - location\n - cluster\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'Optional. Annotations on the node pool. This field has the\n same restrictions as Kubernetes annotations. The total size of all keys\n and values combined is limited to 256k. Key can have 2 segments: prefix\n (optional) and name (required), separated by a slash (/). Prefix must\n be a DNS subdomain. Name must be 63 characters or less, begin and end\n with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics\n between.'\n autoscaling:\n type: object\n x-dcl-go-name: Autoscaling\n x-dcl-go-type: NodePoolAutoscaling\n description: Autoscaler configuration for this node pool.\n required:\n - minNodeCount\n - maxNodeCount\n properties:\n maxNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MaxNodeCount\n description: Maximum number of nodes in the NodePool. Must be >= min_node_count.\n minNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MinNodeCount\n description: Minimum number of nodes in the NodePool. Must be >= 1 and\n <= max_node_count.\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: The awsCluster for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Gkemulticloud/Cluster\n field: name\n parent: true\n x-dcl-parameter: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: NodePoolConfig\n description: The configuration of the node pool.\n required:\n - iamInstanceProfile\n - configEncryption\n properties:\n autoscalingMetricsCollection:\n type: object\n x-dcl-go-name: AutoscalingMetricsCollection\n x-dcl-go-type: NodePoolConfigAutoscalingMetricsCollection\n description: Optional. Configuration related to CloudWatch metrics collection\n on the Auto Scaling group of the node pool. When unspecified, metrics\n collection is disabled.\n required:\n - granularity\n properties:\n granularity:\n type: string\n x-dcl-go-name: Granularity\n description: The frequency at which EC2 Auto Scaling sends aggregated\n data to AWS CloudWatch. The only valid value is \"1Minute\".\n metrics:\n type: array\n x-dcl-go-name: Metrics\n description: The metrics to enable. For a list of valid metrics,\n see https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html.\n If you specify granularity and don't specify any metrics, all\n metrics are enabled.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n configEncryption:\n type: object\n x-dcl-go-name: ConfigEncryption\n x-dcl-go-type: NodePoolConfigConfigEncryption\n description: The ARN of the AWS KMS key used to encrypt node pool configuration.\n required:\n - kmsKeyArn\n properties:\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: The ARN of the AWS KMS key used to encrypt node pool\n configuration.\n iamInstanceProfile:\n type: string\n x-dcl-go-name: IamInstanceProfile\n description: The name of the AWS IAM role assigned to nodes in the pool.\n instanceType:\n type: string\n x-dcl-go-name: InstanceType\n description: Optional. The AWS instance type. When unspecified, it defaults\n to `m5.large`.\n x-dcl-server-default: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The initial labels assigned to nodes of this\n node pool. An object containing a list of \"key\": value pairs. Example:\n { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.'\n proxyConfig:\n type: object\n x-dcl-go-name: ProxyConfig\n x-dcl-go-type: NodePoolConfigProxyConfig\n description: Proxy configuration for outbound HTTP(S) traffic.\n required:\n - secretArn\n - secretVersion\n properties:\n secretArn:\n type: string\n x-dcl-go-name: SecretArn\n description: The ARN of the AWS Secret Manager secret that contains\n the HTTP(S) proxy configuration.\n secretVersion:\n type: string\n x-dcl-go-name: SecretVersion\n description: The version string of the AWS Secret Manager secret\n that contains the HTTP(S) proxy configuration.\n rootVolume:\n type: object\n x-dcl-go-name: RootVolume\n x-dcl-go-type: NodePoolConfigRootVolume\n description: Optional. Template for the root volume provisioned for\n node pool nodes. Volumes will be provisioned in the availability zone\n assigned to the node pool subnet. When unspecified, it defaults to\n 32 GiB with the GP2 volume type.\n x-dcl-server-default: true\n properties:\n iops:\n type: integer\n format: int64\n x-dcl-go-name: Iops\n description: Optional. The number of I/O operations per second (IOPS)\n to provision for GP3 volume.\n x-dcl-server-default: true\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: Optional. The Amazon Resource Name (ARN) of the Customer\n Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified,\n the default Amazon managed key associated to the AWS region where\n this cluster runs will be used.\n sizeGib:\n type: integer\n format: int64\n x-dcl-go-name: SizeGib\n description: Optional. The size of the volume, in GiBs. When unspecified,\n a default value is provided. See the specific reference in the\n parent resource.\n x-dcl-server-default: true\n throughput:\n type: integer\n format: int64\n x-dcl-go-name: Throughput\n description: Optional. The throughput to provision for the volume,\n in MiB/s. Only valid if the volume type is GP3. If volume type\n is gp3 and throughput is not specified, the throughput will defaults\n to 125.\n x-dcl-server-default: true\n volumeType:\n type: string\n x-dcl-go-name: VolumeType\n x-dcl-go-type: NodePoolConfigRootVolumeVolumeTypeEnum\n description: 'Optional. Type of the EBS volume. When unspecified,\n it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED,\n GP2, GP3'\n x-dcl-server-default: true\n enum:\n - VOLUME_TYPE_UNSPECIFIED\n - GP2\n - GP3\n securityGroupIds:\n type: array\n x-dcl-go-name: SecurityGroupIds\n description: Optional. The IDs of additional security groups to add\n to nodes in this pool. The manager will automatically create security\n groups with minimum rules needed for a functioning cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n sshConfig:\n type: object\n x-dcl-go-name: SshConfig\n x-dcl-go-type: NodePoolConfigSshConfig\n description: Optional. The SSH configuration.\n required:\n - ec2KeyPair\n properties:\n ec2KeyPair:\n type: string\n x-dcl-go-name: Ec2KeyPair\n description: The name of the EC2 key pair used to login into cluster\n machines.\n tags:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Tags\n description: Optional. Key/value metadata to assign to each underlying\n AWS resource. Specify at most 50 pairs containing alphanumerics, spaces,\n and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters.\n Values can be up to 255 Unicode characters.\n taints:\n type: array\n x-dcl-go-name: Taints\n description: Optional. The initial taints assigned to nodes of this\n node pool.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NodePoolConfigTaints\n required:\n - key\n - value\n - effect\n properties:\n effect:\n type: string\n x-dcl-go-name: Effect\n x-dcl-go-type: NodePoolConfigTaintsEffectEnum\n description: 'The taint effect. Possible values: EFFECT_UNSPECIFIED,\n NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE'\n x-kubernetes-immutable: true\n enum:\n - EFFECT_UNSPECIFIED\n - NO_SCHEDULE\n - PREFER_NO_SCHEDULE\n - NO_EXECUTE\n key:\n type: string\n x-dcl-go-name: Key\n description: Key for the taint.\n x-kubernetes-immutable: true\n value:\n type: string\n x-dcl-go-name: Value\n description: Value for the taint.\n x-kubernetes-immutable: true\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which this node pool was created.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Allows clients to perform consistent read-modify-writes through\n optimistic concurrency control. May be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n kubeletConfig:\n type: object\n x-dcl-go-name: KubeletConfig\n x-dcl-go-type: NodePoolKubeletConfig\n description: The kubelet configuration for the node pool.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n cpuCfsQuota:\n type: boolean\n x-dcl-go-name: CpuCfsQuota\n description: Whether or not to enable CPU CFS quota. Defaults to true.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n cpuCfsQuotaPeriod:\n type: string\n x-dcl-go-name: CpuCfsQuotaPeriod\n description: Optional. The CPU CFS quota period to use for the node.\n Defaults to \"100ms\".\n x-kubernetes-immutable: true\n cpuManagerPolicy:\n type: string\n x-dcl-go-name: CpuManagerPolicy\n x-dcl-go-type: NodePoolKubeletConfigCpuManagerPolicyEnum\n description: The CpuManagerPolicy to use for the node. Defaults to \"none\".\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n enum:\n - none\n - static\n podPidsLimit:\n type: integer\n format: int64\n x-dcl-go-name: PodPidsLimit\n description: Optional. The maximum number of PIDs in each pod running\n on the node. The limit scales automatically based on underlying machine\n size if left unset.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n management:\n type: object\n x-dcl-go-name: Management\n x-dcl-go-type: NodePoolManagement\n description: The Management configuration for this node pool.\n properties:\n autoRepair:\n type: boolean\n x-dcl-go-name: AutoRepair\n description: Optional. Whether or not the nodes will be automatically\n repaired.\n maxPodsConstraint:\n type: object\n x-dcl-go-name: MaxPodsConstraint\n x-dcl-go-type: NodePoolMaxPodsConstraint\n description: The constraint on the maximum number of pods that can be run\n simultaneously on a node in the node pool.\n x-kubernetes-immutable: true\n required:\n - maxPodsPerNode\n properties:\n maxPodsPerNode:\n type: integer\n format: int64\n x-dcl-go-name: MaxPodsPerNode\n description: The maximum number of pods to schedule on a single node.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of this resource.\n x-kubernetes-immutable: true\n x-dcl-has-long-form: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-parameter: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: Output only. If set, there are currently changes in flight\n to the node pool.\n x-kubernetes-immutable: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: NodePoolStateEnum\n readOnly: true\n description: 'Output only. The lifecycle state of the node pool. Possible\n values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING,\n ERROR, DEGRADED'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - PROVISIONING\n - RUNNING\n - RECONCILING\n - STOPPING\n - ERROR\n - DEGRADED\n subnetId:\n type: string\n x-dcl-go-name: SubnetId\n description: The subnet where the node pool node run.\n x-kubernetes-immutable: true\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. A globally unique identifier for the node pool.\n x-kubernetes-immutable: true\n updateSettings:\n type: object\n x-dcl-go-name: UpdateSettings\n x-dcl-go-type: NodePoolUpdateSettings\n description: Optional. Update settings control the speed and disruption\n of the node pool update.\n x-dcl-server-default: true\n properties:\n surgeSettings:\n type: object\n x-dcl-go-name: SurgeSettings\n x-dcl-go-type: NodePoolUpdateSettingsSurgeSettings\n description: Optional. Settings for surge update.\n x-dcl-server-default: true\n properties:\n maxSurge:\n type: integer\n format: int64\n x-dcl-go-name: MaxSurge\n description: Optional. The maximum number of nodes that can be created\n beyond the current size of the node pool during the update process.\n x-dcl-server-default: true\n maxUnavailable:\n type: integer\n format: int64\n x-dcl-go-name: MaxUnavailable\n description: Optional. The maximum number of nodes that can be simultaneously\n unavailable during the update process. A node is considered unavailable\n if its status is not Ready.\n x-dcl-server-default: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which this node pool was last updated.\n x-kubernetes-immutable: true\n version:\n type: string\n x-dcl-go-name: Version\n description: The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`).\n You can list all supported versions on a given Google Cloud region by\n calling GetAwsServerConfig.\n") -// 18893 bytes -// MD5: b441d7a9fd8765145b38ac3c1e28b42d +// 20508 bytes +// MD5: 3e25d675fcc79c1d0c2b71e637d3585a diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership.go index 2b4b09b5dde..f6bd091b0a2 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership.go @@ -119,6 +119,33 @@ func (v FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum) } } +// The enum FeatureMembershipConfigmanagementManagementEnum. +type FeatureMembershipConfigmanagementManagementEnum string + +// FeatureMembershipConfigmanagementManagementEnumRef returns a *FeatureMembershipConfigmanagementManagementEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipConfigmanagementManagementEnumRef(s string) *FeatureMembershipConfigmanagementManagementEnum { + v := FeatureMembershipConfigmanagementManagementEnum(s) + return &v +} + +func (v FeatureMembershipConfigmanagementManagementEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"MANAGEMENT_UNSPECIFIED", "MANAGEMENT_AUTOMATIC", "MANAGEMENT_MANUAL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipConfigmanagementManagementEnum", + Value: string(v), + Valid: []string{}, + } +} + // The enum FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum. type FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum string @@ -283,6 +310,7 @@ type FeatureMembershipConfigmanagement struct { Binauthz *FeatureMembershipConfigmanagementBinauthz `json:"binauthz"` HierarchyController *FeatureMembershipConfigmanagementHierarchyController `json:"hierarchyController"` Version *string `json:"version"` + Management *FeatureMembershipConfigmanagementManagementEnum `json:"management"` } type jsonFeatureMembershipConfigmanagement FeatureMembershipConfigmanagement @@ -310,6 +338,8 @@ func (r *FeatureMembershipConfigmanagement) UnmarshalJSON(data []byte) error { r.Version = res.Version + r.Management = res.Management + } return nil } @@ -338,6 +368,7 @@ type FeatureMembershipConfigmanagementConfigSync struct { empty bool `json:"-"` Git *FeatureMembershipConfigmanagementConfigSyncGit `json:"git"` SourceFormat *string `json:"sourceFormat"` + Enabled *bool `json:"enabled"` PreventDrift *bool `json:"preventDrift"` MetricsGcpServiceAccountEmail *string `json:"metricsGcpServiceAccountEmail"` Oci *FeatureMembershipConfigmanagementConfigSyncOci `json:"oci"` @@ -362,6 +393,8 @@ func (r *FeatureMembershipConfigmanagementConfigSync) UnmarshalJSON(data []byte) r.SourceFormat = res.SourceFormat + r.Enabled = res.Enabled + r.PreventDrift = res.PreventDrift r.MetricsGcpServiceAccountEmail = res.MetricsGcpServiceAccountEmail diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership.yaml index 69064f1b40b..3ac1cdc09b5 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership.yaml @@ -110,6 +110,15 @@ components: description: Config Sync configuration for the cluster. x-dcl-send-empty: true properties: + enabled: + type: boolean + x-dcl-go-name: Enabled + description: Enables the installation of ConfigSync. If set to true, + ConfigSync resources will be created and the other ConfigSync + fields will be applied if exist. If set to false, all other ConfigSync + fields will be ignored, ConfigSync resources will be deleted. + If omitted, ConfigSync resources will be managed depends on the + presence of the git or oci field. git: type: object x-dcl-go-name: Git @@ -238,13 +247,29 @@ components: enabled: type: boolean x-dcl-go-name: Enabled - description: Whether Hierarchy Controller is enabled in this cluster. + description: '**DEPRECATED** Configuring Hierarchy Controller through + the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces + instead.' x-dcl-send-empty: true + management: + type: string + x-dcl-go-name: Management + x-dcl-go-type: FeatureMembershipConfigmanagementManagementEnum + description: Set this field to MANAGEMENT_AUTOMATIC to enable Config + Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED + to disable Config Sync auto-upgrades. + x-dcl-server-default: true + enum: + - MANAGEMENT_UNSPECIFIED + - MANAGEMENT_AUTOMATIC + - MANAGEMENT_MANUAL policyController: type: object x-dcl-go-name: PolicyController x-dcl-go-type: FeatureMembershipConfigmanagementPolicyController - description: Policy Controller configuration for the cluster. + description: '**DEPRECATED** Configuring Policy Controller through the + configmanagement feature is no longer recommended. Use the policycontroller + feature instead.' properties: auditIntervalSeconds: type: string diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_internal.go index 41541f0a0c1..bb4c989865c 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_internal.go @@ -604,6 +604,12 @@ func canonicalizeFeatureMembershipConfigmanagement(des, initial *FeatureMembersh } else { cDes.Version = des.Version } + if dcl.IsZeroValue(des.Management) || (dcl.IsEmptyValueIndirect(des.Management) && dcl.IsEmptyValueIndirect(initial.Management)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Management = initial.Management + } else { + cDes.Management = des.Management + } return cDes } @@ -727,6 +733,11 @@ func canonicalizeFeatureMembershipConfigmanagementConfigSync(des, initial *Featu } else { cDes.SourceFormat = des.SourceFormat } + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } if dcl.BoolCanonicalize(des.PreventDrift, initial.PreventDrift) || dcl.IsZeroValue(des.PreventDrift) { cDes.PreventDrift = initial.PreventDrift } else { @@ -789,6 +800,9 @@ func canonicalizeNewFeatureMembershipConfigmanagementConfigSync(c *Client, des, if dcl.StringCanonicalize(des.SourceFormat, nw.SourceFormat) { nw.SourceFormat = des.SourceFormat } + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } if dcl.BoolCanonicalize(des.PreventDrift, nw.PreventDrift) { nw.PreventDrift = des.PreventDrift } @@ -3297,6 +3311,13 @@ func compareFeatureMembershipConfigmanagementNewStyle(d, a interface{}, fn dcl.F } diffs = append(diffs, ds...) } + + if ds, err := dcl.Diff(desired.Management, actual.Management, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Management")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } return diffs, nil } @@ -3334,6 +3355,13 @@ func compareFeatureMembershipConfigmanagementConfigSyncNewStyle(d, a interface{} diffs = append(diffs, ds...) } + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + if ds, err := dcl.Diff(desired.PreventDrift, actual.PreventDrift, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PreventDrift")); len(ds) != 0 || err != nil { if err != nil { return nil, err @@ -4491,6 +4519,9 @@ func expandFeatureMembershipConfigmanagement(c *Client, f *FeatureMembershipConf if v := f.Version; !dcl.IsEmptyValueIndirect(v) { m["version"] = v } + if v := f.Management; !dcl.IsEmptyValueIndirect(v) { + m["management"] = v + } return m, nil } @@ -4513,6 +4544,7 @@ func flattenFeatureMembershipConfigmanagement(c *Client, i interface{}, res *Fea r.Binauthz = flattenFeatureMembershipConfigmanagementBinauthz(c, m["binauthz"], res) r.HierarchyController = flattenHierarchyControllerConfig(c, m["hierarchyController"], res) r.Version = dcl.FlattenString(m["version"]) + r.Management = flattenFeatureMembershipConfigmanagementManagementEnum(m["management"]) return r } @@ -4614,6 +4646,9 @@ func expandFeatureMembershipConfigmanagementConfigSync(c *Client, f *FeatureMemb if v := f.SourceFormat; !dcl.IsEmptyValueIndirect(v) { m["sourceFormat"] = v } + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } if v := f.PreventDrift; !dcl.IsEmptyValueIndirect(v) { m["preventDrift"] = v } @@ -4644,6 +4679,7 @@ func flattenFeatureMembershipConfigmanagementConfigSync(c *Client, i interface{} } r.Git = flattenFeatureMembershipConfigmanagementConfigSyncGit(c, m["git"], res) r.SourceFormat = dcl.FlattenString(m["sourceFormat"]) + r.Enabled = dcl.FlattenBool(m["enabled"]) r.PreventDrift = dcl.FlattenBool(m["preventDrift"]) r.MetricsGcpServiceAccountEmail = dcl.FlattenString(m["metricsGcpServiceAccountEmail"]) r.Oci = flattenFeatureMembershipConfigmanagementConfigSyncOci(c, m["oci"], res) @@ -6924,6 +6960,57 @@ func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsE return FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef(s) } +// flattenFeatureMembershipConfigmanagementManagementEnumMap flattens the contents of FeatureMembershipConfigmanagementManagementEnum from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementManagementEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementManagementEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementManagementEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementManagementEnum{} + } + + items := make(map[string]FeatureMembershipConfigmanagementManagementEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementManagementEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementManagementEnumSlice flattens the contents of FeatureMembershipConfigmanagementManagementEnum from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementManagementEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementManagementEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementManagementEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementManagementEnum{} + } + + items := make([]FeatureMembershipConfigmanagementManagementEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementManagementEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementManagementEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipConfigmanagementManagementEnum with the same value as that string. +func flattenFeatureMembershipConfigmanagementManagementEnum(i interface{}) *FeatureMembershipConfigmanagementManagementEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipConfigmanagementManagementEnumRef(s) +} + // flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum from a JSON // response object. func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum { diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_schema.go index cb8480a787c..45b37014528 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_schema.go @@ -153,6 +153,11 @@ func DCLFeatureMembershipSchema() *dcl.Schema { Description: "Config Sync configuration for the cluster.", SendEmpty: true, Properties: map[string]*dcl.Property{ + "enabled": &dcl.Property{ + Type: "boolean", + GoName: "Enabled", + Description: "Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", + }, "git": &dcl.Property{ Type: "object", GoName: "Git", @@ -290,16 +295,28 @@ func DCLFeatureMembershipSchema() *dcl.Schema { "enabled": &dcl.Property{ Type: "boolean", GoName: "Enabled", - Description: "Whether Hierarchy Controller is enabled in this cluster.", + Description: "**DEPRECATED** Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead.", SendEmpty: true, }, }, }, + "management": &dcl.Property{ + Type: "string", + GoName: "Management", + GoType: "FeatureMembershipConfigmanagementManagementEnum", + Description: "Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades.", + ServerDefault: true, + Enum: []string{ + "MANAGEMENT_UNSPECIFIED", + "MANAGEMENT_AUTOMATIC", + "MANAGEMENT_MANUAL", + }, + }, "policyController": &dcl.Property{ Type: "object", GoName: "PolicyController", GoType: "FeatureMembershipConfigmanagementPolicyController", - Description: "Policy Controller configuration for the cluster.", + Description: "**DEPRECATED** Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead.", Properties: map[string]*dcl.Property{ "auditIntervalSeconds": &dcl.Property{ Type: "string", diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_yaml_embed.go index ff8071149e8..9a090e22de4 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/feature_membership_yaml_embed.go @@ -17,7 +17,7 @@ package gkehub // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/gkehub/feature_membership.yaml -var YAML_feature_membership = []byte("info:\n title: GkeHub/FeatureMembership\n description: The GkeHub FeatureMembership resource\n x-dcl-struct-name: FeatureMembership\n x-dcl-has-iam: false\n x-dcl-mutex: '{{project}}/{{location}}/{{feature}}'\npaths:\n get:\n description: The function used to get information about a FeatureMembership\n parameters:\n - name: featureMembership\n required: true\n description: A full instance of a FeatureMembership\n apply:\n description: The function used to apply information about a FeatureMembership\n parameters:\n - name: featureMembership\n required: true\n description: A full instance of a FeatureMembership\n delete:\n description: The function used to delete a FeatureMembership\n parameters:\n - name: featureMembership\n required: true\n description: A full instance of a FeatureMembership\n deleteAll:\n description: The function used to delete all FeatureMembership\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: feature\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many FeatureMembership\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: feature\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n FeatureMembership:\n title: FeatureMembership\n x-dcl-id: projects/{{project}}/locations/{{location}}/features/{{feature}}/memberships/{{membership}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - project\n - location\n - feature\n - membership\n properties:\n configmanagement:\n type: object\n x-dcl-go-name: Configmanagement\n x-dcl-go-type: FeatureMembershipConfigmanagement\n description: Config Management-specific spec.\n properties:\n binauthz:\n type: object\n x-dcl-go-name: Binauthz\n x-dcl-go-type: FeatureMembershipConfigmanagementBinauthz\n description: '**DEPRECATED** Binauthz configuration for the cluster.\n This field will be ignored and should not be set.'\n x-dcl-server-default: true\n properties:\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: Whether binauthz is enabled in this cluster.\n x-dcl-parameter: true\n configSync:\n type: object\n x-dcl-go-name: ConfigSync\n x-dcl-go-type: FeatureMembershipConfigmanagementConfigSync\n description: Config Sync configuration for the cluster.\n x-dcl-send-empty: true\n properties:\n git:\n type: object\n x-dcl-go-name: Git\n x-dcl-go-type: FeatureMembershipConfigmanagementConfigSyncGit\n properties:\n gcpServiceAccountEmail:\n type: string\n x-dcl-go-name: GcpServiceAccountEmail\n description: The GCP Service Account Email used for auth when\n secretType is gcpServiceAccount.\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n httpsProxy:\n type: string\n x-dcl-go-name: HttpsProxy\n description: URL for the HTTPS proxy to be used when communicating\n with the Git repo.\n policyDir:\n type: string\n x-dcl-go-name: PolicyDir\n description: 'The path within the Git repository that represents\n the top level of the repo to sync. Default: the root directory\n of the repository.'\n secretType:\n type: string\n x-dcl-go-name: SecretType\n description: Type of secret configured for access to the Git\n repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount\n or none. The validation of this is case-sensitive.\n syncBranch:\n type: string\n x-dcl-go-name: SyncBranch\n description: 'The branch of the repository to sync from. Default:\n master.'\n syncRepo:\n type: string\n x-dcl-go-name: SyncRepo\n description: The URL of the Git repository to use as the source\n of truth.\n syncRev:\n type: string\n x-dcl-go-name: SyncRev\n description: Git revision (tag or hash) to check out. Default\n HEAD.\n syncWaitSecs:\n type: string\n x-dcl-go-name: SyncWaitSecs\n description: 'Period in seconds between consecutive syncs. Default:\n 15.'\n metricsGcpServiceAccountEmail:\n type: string\n x-dcl-go-name: MetricsGcpServiceAccountEmail\n description: The Email of the Google Cloud Service Account (GSA)\n used for exporting Config Sync metrics to Cloud Monitoring. The\n GSA should have the Monitoring Metric Writer(roles/monitoring.metricWriter)\n IAM role. The Kubernetes ServiceAccount `default` in the namespace\n `config-management-monitoring` should be bound to the GSA.\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n oci:\n type: object\n x-dcl-go-name: Oci\n x-dcl-go-type: FeatureMembershipConfigmanagementConfigSyncOci\n properties:\n gcpServiceAccountEmail:\n type: string\n x-dcl-go-name: GcpServiceAccountEmail\n description: 'The GCP Service Account Email used for auth when\n secret_type is gcpserviceaccount. '\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n policyDir:\n type: string\n x-dcl-go-name: PolicyDir\n description: 'The absolute path of the directory that contains\n the local resources. Default: the root directory of the image.'\n secretType:\n type: string\n x-dcl-go-name: SecretType\n description: Type of secret configured for access to the OCI\n Image. Must be one of gcenode, gcpserviceaccount or none.\n The validation of this is case-sensitive.\n syncRepo:\n type: string\n x-dcl-go-name: SyncRepo\n description: The OCI image repository URL for the package to\n sync from. e.g. LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME.\n syncWaitSecs:\n type: string\n x-dcl-go-name: SyncWaitSecs\n description: 'Period in seconds(int64 format) between consecutive\n syncs. Default: 15.'\n preventDrift:\n type: boolean\n x-dcl-go-name: PreventDrift\n description: Set to true to enable the Config Sync admission webhook\n to prevent drifts. If set to `false`, disables the Config Sync\n admission webhook and does not prevent drifts.\n x-dcl-server-default: true\n sourceFormat:\n type: string\n x-dcl-go-name: SourceFormat\n description: Specifies whether the Config Sync Repo is in \"hierarchical\"\n or \"unstructured\" mode.\n hierarchyController:\n type: object\n x-dcl-go-name: HierarchyController\n x-dcl-go-type: FeatureMembershipConfigmanagementHierarchyController\n description: Hierarchy Controller configuration for the cluster.\n x-dcl-send-empty: true\n properties:\n enableHierarchicalResourceQuota:\n type: boolean\n x-dcl-go-name: EnableHierarchicalResourceQuota\n description: Whether hierarchical resource quota is enabled in this\n cluster.\n x-dcl-send-empty: true\n enablePodTreeLabels:\n type: boolean\n x-dcl-go-name: EnablePodTreeLabels\n description: Whether pod tree labels are enabled in this cluster.\n x-dcl-send-empty: true\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: Whether Hierarchy Controller is enabled in this cluster.\n x-dcl-send-empty: true\n policyController:\n type: object\n x-dcl-go-name: PolicyController\n x-dcl-go-type: FeatureMembershipConfigmanagementPolicyController\n description: Policy Controller configuration for the cluster.\n properties:\n auditIntervalSeconds:\n type: string\n x-dcl-go-name: AuditIntervalSeconds\n description: Sets the interval for Policy Controller Audit Scans\n (in seconds). When set to 0, this disables audit functionality\n altogether.\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: Enables the installation of Policy Controller. If false,\n the rest of PolicyController fields take no effect.\n exemptableNamespaces:\n type: array\n x-dcl-go-name: ExemptableNamespaces\n description: The set of namespaces that are excluded from Policy\n Controller checks. Namespaces do not need to currently exist on\n the cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n logDeniesEnabled:\n type: boolean\n x-dcl-go-name: LogDeniesEnabled\n description: Logs all denies and dry run failures.\n monitoring:\n type: object\n x-dcl-go-name: Monitoring\n x-dcl-go-type: FeatureMembershipConfigmanagementPolicyControllerMonitoring\n description: 'Specifies the backends Policy Controller should export\n metrics to. For example, to specify metrics should be exported\n to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\",\n \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]'\n x-dcl-server-default: true\n properties:\n backends:\n type: array\n x-dcl-go-name: Backends\n description: ' Specifies the list of backends Policy Controller\n will export to. Specifying an empty value `[]` disables metrics\n export.'\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum\n enum:\n - MONITORING_BACKEND_UNSPECIFIED\n - PROMETHEUS\n - CLOUD_MONITORING\n mutationEnabled:\n type: boolean\n x-dcl-go-name: MutationEnabled\n description: Enable or disable mutation in policy controller. If\n true, mutation CRDs, webhook and controller deployment will be\n deployed to the cluster.\n referentialRulesEnabled:\n type: boolean\n x-dcl-go-name: ReferentialRulesEnabled\n description: Enables the ability to use Constraint Templates that\n reference to objects other than the object currently being evaluated.\n templateLibraryInstalled:\n type: boolean\n x-dcl-go-name: TemplateLibraryInstalled\n description: Installs the default template library along with Policy\n Controller.\n version:\n type: string\n x-dcl-go-name: Version\n description: Optional. Version of ACM to install. Defaults to the latest\n version.\n x-dcl-server-default: true\n feature:\n type: string\n x-dcl-go-name: Feature\n description: The name of the feature\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Gkehub/Feature\n field: name\n parent: true\n x-dcl-parameter: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location of the feature\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n membership:\n type: string\n x-dcl-go-name: Membership\n description: The name of the membership\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Gkehub/Membership\n field: name\n x-dcl-parameter: true\n membershipLocation:\n type: string\n x-dcl-go-name: MembershipLocation\n description: The location of the membership\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n mesh:\n type: object\n x-dcl-go-name: Mesh\n x-dcl-go-type: FeatureMembershipMesh\n description: Manage Mesh Features\n properties:\n controlPlane:\n type: string\n x-dcl-go-name: ControlPlane\n x-dcl-go-type: FeatureMembershipMeshControlPlaneEnum\n description: '**DEPRECATED** Whether to automatically manage Service\n Mesh control planes. Possible values: CONTROL_PLANE_MANAGEMENT_UNSPECIFIED,\n AUTOMATIC, MANUAL'\n enum:\n - CONTROL_PLANE_MANAGEMENT_UNSPECIFIED\n - AUTOMATIC\n - MANUAL\n management:\n type: string\n x-dcl-go-name: Management\n x-dcl-go-type: FeatureMembershipMeshManagementEnum\n description: 'Whether to automatically manage Service Mesh. Possible\n values: MANAGEMENT_UNSPECIFIED, MANAGEMENT_AUTOMATIC, MANAGEMENT_MANUAL'\n enum:\n - MANAGEMENT_UNSPECIFIED\n - MANAGEMENT_AUTOMATIC\n - MANAGEMENT_MANUAL\n policycontroller:\n type: object\n x-dcl-go-name: Policycontroller\n x-dcl-go-type: FeatureMembershipPolicycontroller\n description: Policy Controller-specific spec.\n required:\n - policyControllerHubConfig\n properties:\n policyControllerHubConfig:\n type: object\n x-dcl-go-name: PolicyControllerHubConfig\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfig\n description: Policy Controller configuration for the cluster.\n properties:\n auditIntervalSeconds:\n type: integer\n format: int64\n x-dcl-go-name: AuditIntervalSeconds\n description: Sets the interval for Policy Controller Audit Scans\n (in seconds). When set to 0, this disables audit functionality\n altogether.\n constraintViolationLimit:\n type: integer\n format: int64\n x-dcl-go-name: ConstraintViolationLimit\n description: The maximum number of audit violations to be stored\n in a constraint. If not set, the internal default of 20 will be\n used.\n deploymentConfigs:\n type: object\n additionalProperties:\n type: object\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs\n properties:\n containerResources:\n type: object\n x-dcl-go-name: ContainerResources\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources\n description: Container resource requirements.\n x-dcl-conflicts:\n - replicaCount\n - podAffinity\n - podTolerations\n properties:\n limits:\n type: object\n x-dcl-go-name: Limits\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits\n description: Limits describes the maximum amount of compute\n resources allowed for use by the running container.\n properties:\n cpu:\n type: string\n x-dcl-go-name: Cpu\n description: CPU requirement expressed in Kubernetes\n resource units.\n memory:\n type: string\n x-dcl-go-name: Memory\n description: Memory requirement expressed in Kubernetes\n resource units.\n requests:\n type: object\n x-dcl-go-name: Requests\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests\n description: Requests describes the amount of compute\n resources reserved for the container by the kube-scheduler.\n properties:\n cpu:\n type: string\n x-dcl-go-name: Cpu\n description: CPU requirement expressed in Kubernetes\n resource units.\n memory:\n type: string\n x-dcl-go-name: Memory\n description: Memory requirement expressed in Kubernetes\n resource units.\n podAffinity:\n type: string\n x-dcl-go-name: PodAffinity\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum\n description: 'Pod affinity configuration. Possible values:\n AFFINITY_UNSPECIFIED, NO_AFFINITY, ANTI_AFFINITY'\n x-dcl-conflicts:\n - replicaCount\n - containerResources\n - podTolerations\n enum:\n - AFFINITY_UNSPECIFIED\n - NO_AFFINITY\n - ANTI_AFFINITY\n podTolerations:\n type: array\n x-dcl-go-name: PodTolerations\n description: Pod tolerations of node taints.\n x-dcl-conflicts:\n - replicaCount\n - containerResources\n - podAffinity\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations\n properties:\n effect:\n type: string\n x-dcl-go-name: Effect\n description: Matches a taint effect.\n key:\n type: string\n x-dcl-go-name: Key\n description: Matches a taint key (not necessarily unique).\n operator:\n type: string\n x-dcl-go-name: Operator\n description: Matches a taint operator.\n value:\n type: string\n x-dcl-go-name: Value\n description: Matches a taint value.\n replicaCount:\n type: integer\n format: int64\n x-dcl-go-name: ReplicaCount\n description: Pod replica count.\n x-dcl-conflicts:\n - containerResources\n - podAffinity\n - podTolerations\n x-dcl-go-name: DeploymentConfigs\n description: Map of deployment configs to deployments (\"admission\",\n \"audit\", \"mutation\").\n x-dcl-server-default: true\n exemptableNamespaces:\n type: array\n x-dcl-go-name: ExemptableNamespaces\n description: The set of namespaces that are excluded from Policy\n Controller checks. Namespaces do not need to currently exist on\n the cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n installSpec:\n type: string\n x-dcl-go-name: InstallSpec\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum\n description: 'Configures the mode of the Policy Controller installation.\n Possible values: INSTALL_SPEC_UNSPECIFIED, INSTALL_SPEC_NOT_INSTALLED,\n INSTALL_SPEC_ENABLED, INSTALL_SPEC_SUSPENDED, INSTALL_SPEC_DETACHED'\n enum:\n - INSTALL_SPEC_UNSPECIFIED\n - INSTALL_SPEC_NOT_INSTALLED\n - INSTALL_SPEC_ENABLED\n - INSTALL_SPEC_SUSPENDED\n - INSTALL_SPEC_DETACHED\n logDeniesEnabled:\n type: boolean\n x-dcl-go-name: LogDeniesEnabled\n description: Logs all denies and dry run failures.\n monitoring:\n type: object\n x-dcl-go-name: Monitoring\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring\n description: 'Specifies the backends Policy Controller should export\n metrics to. For example, to specify metrics should be exported\n to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\",\n \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]'\n x-dcl-server-default: true\n properties:\n backends:\n type: array\n x-dcl-go-name: Backends\n description: ' Specifies the list of backends Policy Controller\n will export to. Specifying an empty value `[]` disables metrics\n export.'\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum\n enum:\n - MONITORING_BACKEND_UNSPECIFIED\n - PROMETHEUS\n - CLOUD_MONITORING\n mutationEnabled:\n type: boolean\n x-dcl-go-name: MutationEnabled\n description: Enables the ability to mutate resources using Policy\n Controller.\n policyContent:\n type: object\n x-dcl-go-name: PolicyContent\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent\n description: Specifies the desired policy content on the cluster.\n x-dcl-server-default: true\n properties:\n bundles:\n type: object\n additionalProperties:\n type: object\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles\n properties:\n exemptedNamespaces:\n type: array\n x-dcl-go-name: ExemptedNamespaces\n description: The set of namespaces to be exempted from\n the bundle.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-go-name: Bundles\n description: map of bundle name to BundleInstallSpec. The bundle\n name maps to the `bundleName` key in the `policycontroller.gke.io/constraintData`\n annotation on a constraint.\n templateLibrary:\n type: object\n x-dcl-go-name: TemplateLibrary\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary\n description: Configures the installation of the Template Library.\n x-dcl-server-default: true\n properties:\n installation:\n type: string\n x-dcl-go-name: Installation\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum\n description: 'Configures the manner in which the template\n library is installed on the cluster. Possible values:\n INSTALLATION_UNSPECIFIED, NOT_INSTALLED, ALL'\n enum:\n - INSTALLATION_UNSPECIFIED\n - NOT_INSTALLED\n - ALL\n referentialRulesEnabled:\n type: boolean\n x-dcl-go-name: ReferentialRulesEnabled\n description: Enables the ability to use Constraint Templates that\n reference to objects other than the object currently being evaluated.\n version:\n type: string\n x-dcl-go-name: Version\n description: Optional. Version of Policy Controller to install. Defaults\n to the latest version.\n x-dcl-server-default: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project of the feature\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-parameter: true\n") +var YAML_feature_membership = []byte("info:\n title: GkeHub/FeatureMembership\n description: The GkeHub FeatureMembership resource\n x-dcl-struct-name: FeatureMembership\n x-dcl-has-iam: false\n x-dcl-mutex: '{{project}}/{{location}}/{{feature}}'\npaths:\n get:\n description: The function used to get information about a FeatureMembership\n parameters:\n - name: featureMembership\n required: true\n description: A full instance of a FeatureMembership\n apply:\n description: The function used to apply information about a FeatureMembership\n parameters:\n - name: featureMembership\n required: true\n description: A full instance of a FeatureMembership\n delete:\n description: The function used to delete a FeatureMembership\n parameters:\n - name: featureMembership\n required: true\n description: A full instance of a FeatureMembership\n deleteAll:\n description: The function used to delete all FeatureMembership\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: feature\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many FeatureMembership\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: feature\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n FeatureMembership:\n title: FeatureMembership\n x-dcl-id: projects/{{project}}/locations/{{location}}/features/{{feature}}/memberships/{{membership}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - project\n - location\n - feature\n - membership\n properties:\n configmanagement:\n type: object\n x-dcl-go-name: Configmanagement\n x-dcl-go-type: FeatureMembershipConfigmanagement\n description: Config Management-specific spec.\n properties:\n binauthz:\n type: object\n x-dcl-go-name: Binauthz\n x-dcl-go-type: FeatureMembershipConfigmanagementBinauthz\n description: '**DEPRECATED** Binauthz configuration for the cluster.\n This field will be ignored and should not be set.'\n x-dcl-server-default: true\n properties:\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: Whether binauthz is enabled in this cluster.\n x-dcl-parameter: true\n configSync:\n type: object\n x-dcl-go-name: ConfigSync\n x-dcl-go-type: FeatureMembershipConfigmanagementConfigSync\n description: Config Sync configuration for the cluster.\n x-dcl-send-empty: true\n properties:\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: Enables the installation of ConfigSync. If set to true,\n ConfigSync resources will be created and the other ConfigSync\n fields will be applied if exist. If set to false, all other ConfigSync\n fields will be ignored, ConfigSync resources will be deleted.\n If omitted, ConfigSync resources will be managed depends on the\n presence of the git or oci field.\n git:\n type: object\n x-dcl-go-name: Git\n x-dcl-go-type: FeatureMembershipConfigmanagementConfigSyncGit\n properties:\n gcpServiceAccountEmail:\n type: string\n x-dcl-go-name: GcpServiceAccountEmail\n description: The GCP Service Account Email used for auth when\n secretType is gcpServiceAccount.\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n httpsProxy:\n type: string\n x-dcl-go-name: HttpsProxy\n description: URL for the HTTPS proxy to be used when communicating\n with the Git repo.\n policyDir:\n type: string\n x-dcl-go-name: PolicyDir\n description: 'The path within the Git repository that represents\n the top level of the repo to sync. Default: the root directory\n of the repository.'\n secretType:\n type: string\n x-dcl-go-name: SecretType\n description: Type of secret configured for access to the Git\n repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount\n or none. The validation of this is case-sensitive.\n syncBranch:\n type: string\n x-dcl-go-name: SyncBranch\n description: 'The branch of the repository to sync from. Default:\n master.'\n syncRepo:\n type: string\n x-dcl-go-name: SyncRepo\n description: The URL of the Git repository to use as the source\n of truth.\n syncRev:\n type: string\n x-dcl-go-name: SyncRev\n description: Git revision (tag or hash) to check out. Default\n HEAD.\n syncWaitSecs:\n type: string\n x-dcl-go-name: SyncWaitSecs\n description: 'Period in seconds between consecutive syncs. Default:\n 15.'\n metricsGcpServiceAccountEmail:\n type: string\n x-dcl-go-name: MetricsGcpServiceAccountEmail\n description: The Email of the Google Cloud Service Account (GSA)\n used for exporting Config Sync metrics to Cloud Monitoring. The\n GSA should have the Monitoring Metric Writer(roles/monitoring.metricWriter)\n IAM role. The Kubernetes ServiceAccount `default` in the namespace\n `config-management-monitoring` should be bound to the GSA.\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n oci:\n type: object\n x-dcl-go-name: Oci\n x-dcl-go-type: FeatureMembershipConfigmanagementConfigSyncOci\n properties:\n gcpServiceAccountEmail:\n type: string\n x-dcl-go-name: GcpServiceAccountEmail\n description: 'The GCP Service Account Email used for auth when\n secret_type is gcpserviceaccount. '\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n policyDir:\n type: string\n x-dcl-go-name: PolicyDir\n description: 'The absolute path of the directory that contains\n the local resources. Default: the root directory of the image.'\n secretType:\n type: string\n x-dcl-go-name: SecretType\n description: Type of secret configured for access to the OCI\n Image. Must be one of gcenode, gcpserviceaccount or none.\n The validation of this is case-sensitive.\n syncRepo:\n type: string\n x-dcl-go-name: SyncRepo\n description: The OCI image repository URL for the package to\n sync from. e.g. LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME.\n syncWaitSecs:\n type: string\n x-dcl-go-name: SyncWaitSecs\n description: 'Period in seconds(int64 format) between consecutive\n syncs. Default: 15.'\n preventDrift:\n type: boolean\n x-dcl-go-name: PreventDrift\n description: Set to true to enable the Config Sync admission webhook\n to prevent drifts. If set to `false`, disables the Config Sync\n admission webhook and does not prevent drifts.\n x-dcl-server-default: true\n sourceFormat:\n type: string\n x-dcl-go-name: SourceFormat\n description: Specifies whether the Config Sync Repo is in \"hierarchical\"\n or \"unstructured\" mode.\n hierarchyController:\n type: object\n x-dcl-go-name: HierarchyController\n x-dcl-go-type: FeatureMembershipConfigmanagementHierarchyController\n description: Hierarchy Controller configuration for the cluster.\n x-dcl-send-empty: true\n properties:\n enableHierarchicalResourceQuota:\n type: boolean\n x-dcl-go-name: EnableHierarchicalResourceQuota\n description: Whether hierarchical resource quota is enabled in this\n cluster.\n x-dcl-send-empty: true\n enablePodTreeLabels:\n type: boolean\n x-dcl-go-name: EnablePodTreeLabels\n description: Whether pod tree labels are enabled in this cluster.\n x-dcl-send-empty: true\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: '**DEPRECATED** Configuring Hierarchy Controller through\n the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces\n instead.'\n x-dcl-send-empty: true\n management:\n type: string\n x-dcl-go-name: Management\n x-dcl-go-type: FeatureMembershipConfigmanagementManagementEnum\n description: Set this field to MANAGEMENT_AUTOMATIC to enable Config\n Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED\n to disable Config Sync auto-upgrades.\n x-dcl-server-default: true\n enum:\n - MANAGEMENT_UNSPECIFIED\n - MANAGEMENT_AUTOMATIC\n - MANAGEMENT_MANUAL\n policyController:\n type: object\n x-dcl-go-name: PolicyController\n x-dcl-go-type: FeatureMembershipConfigmanagementPolicyController\n description: '**DEPRECATED** Configuring Policy Controller through the\n configmanagement feature is no longer recommended. Use the policycontroller\n feature instead.'\n properties:\n auditIntervalSeconds:\n type: string\n x-dcl-go-name: AuditIntervalSeconds\n description: Sets the interval for Policy Controller Audit Scans\n (in seconds). When set to 0, this disables audit functionality\n altogether.\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: Enables the installation of Policy Controller. If false,\n the rest of PolicyController fields take no effect.\n exemptableNamespaces:\n type: array\n x-dcl-go-name: ExemptableNamespaces\n description: The set of namespaces that are excluded from Policy\n Controller checks. Namespaces do not need to currently exist on\n the cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n logDeniesEnabled:\n type: boolean\n x-dcl-go-name: LogDeniesEnabled\n description: Logs all denies and dry run failures.\n monitoring:\n type: object\n x-dcl-go-name: Monitoring\n x-dcl-go-type: FeatureMembershipConfigmanagementPolicyControllerMonitoring\n description: 'Specifies the backends Policy Controller should export\n metrics to. For example, to specify metrics should be exported\n to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\",\n \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]'\n x-dcl-server-default: true\n properties:\n backends:\n type: array\n x-dcl-go-name: Backends\n description: ' Specifies the list of backends Policy Controller\n will export to. Specifying an empty value `[]` disables metrics\n export.'\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum\n enum:\n - MONITORING_BACKEND_UNSPECIFIED\n - PROMETHEUS\n - CLOUD_MONITORING\n mutationEnabled:\n type: boolean\n x-dcl-go-name: MutationEnabled\n description: Enable or disable mutation in policy controller. If\n true, mutation CRDs, webhook and controller deployment will be\n deployed to the cluster.\n referentialRulesEnabled:\n type: boolean\n x-dcl-go-name: ReferentialRulesEnabled\n description: Enables the ability to use Constraint Templates that\n reference to objects other than the object currently being evaluated.\n templateLibraryInstalled:\n type: boolean\n x-dcl-go-name: TemplateLibraryInstalled\n description: Installs the default template library along with Policy\n Controller.\n version:\n type: string\n x-dcl-go-name: Version\n description: Optional. Version of ACM to install. Defaults to the latest\n version.\n x-dcl-server-default: true\n feature:\n type: string\n x-dcl-go-name: Feature\n description: The name of the feature\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Gkehub/Feature\n field: name\n parent: true\n x-dcl-parameter: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location of the feature\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n membership:\n type: string\n x-dcl-go-name: Membership\n description: The name of the membership\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Gkehub/Membership\n field: name\n x-dcl-parameter: true\n membershipLocation:\n type: string\n x-dcl-go-name: MembershipLocation\n description: The location of the membership\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n mesh:\n type: object\n x-dcl-go-name: Mesh\n x-dcl-go-type: FeatureMembershipMesh\n description: Manage Mesh Features\n properties:\n controlPlane:\n type: string\n x-dcl-go-name: ControlPlane\n x-dcl-go-type: FeatureMembershipMeshControlPlaneEnum\n description: '**DEPRECATED** Whether to automatically manage Service\n Mesh control planes. Possible values: CONTROL_PLANE_MANAGEMENT_UNSPECIFIED,\n AUTOMATIC, MANUAL'\n enum:\n - CONTROL_PLANE_MANAGEMENT_UNSPECIFIED\n - AUTOMATIC\n - MANUAL\n management:\n type: string\n x-dcl-go-name: Management\n x-dcl-go-type: FeatureMembershipMeshManagementEnum\n description: 'Whether to automatically manage Service Mesh. Possible\n values: MANAGEMENT_UNSPECIFIED, MANAGEMENT_AUTOMATIC, MANAGEMENT_MANUAL'\n enum:\n - MANAGEMENT_UNSPECIFIED\n - MANAGEMENT_AUTOMATIC\n - MANAGEMENT_MANUAL\n policycontroller:\n type: object\n x-dcl-go-name: Policycontroller\n x-dcl-go-type: FeatureMembershipPolicycontroller\n description: Policy Controller-specific spec.\n required:\n - policyControllerHubConfig\n properties:\n policyControllerHubConfig:\n type: object\n x-dcl-go-name: PolicyControllerHubConfig\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfig\n description: Policy Controller configuration for the cluster.\n properties:\n auditIntervalSeconds:\n type: integer\n format: int64\n x-dcl-go-name: AuditIntervalSeconds\n description: Sets the interval for Policy Controller Audit Scans\n (in seconds). When set to 0, this disables audit functionality\n altogether.\n constraintViolationLimit:\n type: integer\n format: int64\n x-dcl-go-name: ConstraintViolationLimit\n description: The maximum number of audit violations to be stored\n in a constraint. If not set, the internal default of 20 will be\n used.\n deploymentConfigs:\n type: object\n additionalProperties:\n type: object\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs\n properties:\n containerResources:\n type: object\n x-dcl-go-name: ContainerResources\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources\n description: Container resource requirements.\n x-dcl-conflicts:\n - replicaCount\n - podAffinity\n - podTolerations\n properties:\n limits:\n type: object\n x-dcl-go-name: Limits\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits\n description: Limits describes the maximum amount of compute\n resources allowed for use by the running container.\n properties:\n cpu:\n type: string\n x-dcl-go-name: Cpu\n description: CPU requirement expressed in Kubernetes\n resource units.\n memory:\n type: string\n x-dcl-go-name: Memory\n description: Memory requirement expressed in Kubernetes\n resource units.\n requests:\n type: object\n x-dcl-go-name: Requests\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests\n description: Requests describes the amount of compute\n resources reserved for the container by the kube-scheduler.\n properties:\n cpu:\n type: string\n x-dcl-go-name: Cpu\n description: CPU requirement expressed in Kubernetes\n resource units.\n memory:\n type: string\n x-dcl-go-name: Memory\n description: Memory requirement expressed in Kubernetes\n resource units.\n podAffinity:\n type: string\n x-dcl-go-name: PodAffinity\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum\n description: 'Pod affinity configuration. Possible values:\n AFFINITY_UNSPECIFIED, NO_AFFINITY, ANTI_AFFINITY'\n x-dcl-conflicts:\n - replicaCount\n - containerResources\n - podTolerations\n enum:\n - AFFINITY_UNSPECIFIED\n - NO_AFFINITY\n - ANTI_AFFINITY\n podTolerations:\n type: array\n x-dcl-go-name: PodTolerations\n description: Pod tolerations of node taints.\n x-dcl-conflicts:\n - replicaCount\n - containerResources\n - podAffinity\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations\n properties:\n effect:\n type: string\n x-dcl-go-name: Effect\n description: Matches a taint effect.\n key:\n type: string\n x-dcl-go-name: Key\n description: Matches a taint key (not necessarily unique).\n operator:\n type: string\n x-dcl-go-name: Operator\n description: Matches a taint operator.\n value:\n type: string\n x-dcl-go-name: Value\n description: Matches a taint value.\n replicaCount:\n type: integer\n format: int64\n x-dcl-go-name: ReplicaCount\n description: Pod replica count.\n x-dcl-conflicts:\n - containerResources\n - podAffinity\n - podTolerations\n x-dcl-go-name: DeploymentConfigs\n description: Map of deployment configs to deployments (\"admission\",\n \"audit\", \"mutation\").\n x-dcl-server-default: true\n exemptableNamespaces:\n type: array\n x-dcl-go-name: ExemptableNamespaces\n description: The set of namespaces that are excluded from Policy\n Controller checks. Namespaces do not need to currently exist on\n the cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n installSpec:\n type: string\n x-dcl-go-name: InstallSpec\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum\n description: 'Configures the mode of the Policy Controller installation.\n Possible values: INSTALL_SPEC_UNSPECIFIED, INSTALL_SPEC_NOT_INSTALLED,\n INSTALL_SPEC_ENABLED, INSTALL_SPEC_SUSPENDED, INSTALL_SPEC_DETACHED'\n enum:\n - INSTALL_SPEC_UNSPECIFIED\n - INSTALL_SPEC_NOT_INSTALLED\n - INSTALL_SPEC_ENABLED\n - INSTALL_SPEC_SUSPENDED\n - INSTALL_SPEC_DETACHED\n logDeniesEnabled:\n type: boolean\n x-dcl-go-name: LogDeniesEnabled\n description: Logs all denies and dry run failures.\n monitoring:\n type: object\n x-dcl-go-name: Monitoring\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring\n description: 'Specifies the backends Policy Controller should export\n metrics to. For example, to specify metrics should be exported\n to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\",\n \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]'\n x-dcl-server-default: true\n properties:\n backends:\n type: array\n x-dcl-go-name: Backends\n description: ' Specifies the list of backends Policy Controller\n will export to. Specifying an empty value `[]` disables metrics\n export.'\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum\n enum:\n - MONITORING_BACKEND_UNSPECIFIED\n - PROMETHEUS\n - CLOUD_MONITORING\n mutationEnabled:\n type: boolean\n x-dcl-go-name: MutationEnabled\n description: Enables the ability to mutate resources using Policy\n Controller.\n policyContent:\n type: object\n x-dcl-go-name: PolicyContent\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent\n description: Specifies the desired policy content on the cluster.\n x-dcl-server-default: true\n properties:\n bundles:\n type: object\n additionalProperties:\n type: object\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles\n properties:\n exemptedNamespaces:\n type: array\n x-dcl-go-name: ExemptedNamespaces\n description: The set of namespaces to be exempted from\n the bundle.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-go-name: Bundles\n description: map of bundle name to BundleInstallSpec. The bundle\n name maps to the `bundleName` key in the `policycontroller.gke.io/constraintData`\n annotation on a constraint.\n templateLibrary:\n type: object\n x-dcl-go-name: TemplateLibrary\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary\n description: Configures the installation of the Template Library.\n x-dcl-server-default: true\n properties:\n installation:\n type: string\n x-dcl-go-name: Installation\n x-dcl-go-type: FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum\n description: 'Configures the manner in which the template\n library is installed on the cluster. Possible values:\n INSTALLATION_UNSPECIFIED, NOT_INSTALLED, ALL'\n enum:\n - INSTALLATION_UNSPECIFIED\n - NOT_INSTALLED\n - ALL\n referentialRulesEnabled:\n type: boolean\n x-dcl-go-name: ReferentialRulesEnabled\n description: Enables the ability to use Constraint Templates that\n reference to objects other than the object currently being evaluated.\n version:\n type: string\n x-dcl-go-name: Version\n description: Optional. Version of Policy Controller to install. Defaults\n to the latest version.\n x-dcl-server-default: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project of the feature\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-parameter: true\n") -// 29568 bytes -// MD5: 6b8ad44a9f3a7ea167a5688797017807 +// 31027 bytes +// MD5: b31d3069bf0a62263b6221b906cefc4d diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/client.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/client.go deleted file mode 100644 index be644938ca1..00000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/client.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2024 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Package networkconnectivity defines operations in the declarative SDK. -package networkconnectivity - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -// The Client is the base struct of all operations. This will receive the -// Get, Delete, List, and Apply operations on all resources. -type Client struct { - Config *dcl.Config -} - -// NewClient creates a client that retries all operations a few times each. -func NewClient(c *dcl.Config) *Client { - return &Client{ - Config: c, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.go deleted file mode 100644 index 25ed7d15a45..00000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.go +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2024 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package networkconnectivity - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type Hub struct { - Name *string `json:"name"` - CreateTime *string `json:"createTime"` - UpdateTime *string `json:"updateTime"` - Labels map[string]string `json:"labels"` - Description *string `json:"description"` - UniqueId *string `json:"uniqueId"` - State *HubStateEnum `json:"state"` - Project *string `json:"project"` - RoutingVpcs []HubRoutingVpcs `json:"routingVpcs"` -} - -func (r *Hub) String() string { - return dcl.SprintResource(r) -} - -// The enum HubStateEnum. -type HubStateEnum string - -// HubStateEnumRef returns a *HubStateEnum with the value of string s -// If the empty string is provided, nil is returned. -func HubStateEnumRef(s string) *HubStateEnum { - v := HubStateEnum(s) - return &v -} - -func (v HubStateEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"STATE_UNSPECIFIED", "CREATING", "ACTIVE", "DELETING"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "HubStateEnum", - Value: string(v), - Valid: []string{}, - } -} - -type HubRoutingVpcs struct { - empty bool `json:"-"` - Uri *string `json:"uri"` -} - -type jsonHubRoutingVpcs HubRoutingVpcs - -func (r *HubRoutingVpcs) UnmarshalJSON(data []byte) error { - var res jsonHubRoutingVpcs - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyHubRoutingVpcs - } else { - - r.Uri = res.Uri - - } - return nil -} - -// This object is used to assert a desired state where this HubRoutingVpcs is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyHubRoutingVpcs *HubRoutingVpcs = &HubRoutingVpcs{empty: true} - -func (r *HubRoutingVpcs) Empty() bool { - return r.empty -} - -func (r *HubRoutingVpcs) String() string { - return dcl.SprintResource(r) -} - -func (r *HubRoutingVpcs) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *Hub) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "network_connectivity", - Type: "Hub", - Version: "networkconnectivity", - } -} - -func (r *Hub) ID() (string, error) { - if err := extractHubFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), - "labels": dcl.ValueOrEmptyString(nr.Labels), - "description": dcl.ValueOrEmptyString(nr.Description), - "unique_id": dcl.ValueOrEmptyString(nr.UniqueId), - "state": dcl.ValueOrEmptyString(nr.State), - "project": dcl.ValueOrEmptyString(nr.Project), - "routing_vpcs": dcl.ValueOrEmptyString(nr.RoutingVpcs), - } - return dcl.Nprintf("projects/{{project}}/locations/global/hubs/{{name}}", params), nil -} - -const HubMaxPage = -1 - -type HubList struct { - Items []*Hub - - nextToken string - - pageSize int32 - - resource *Hub -} - -func (l *HubList) HasNext() bool { - return l.nextToken != "" -} - -func (l *HubList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listHub(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListHub(ctx context.Context, project string) (*HubList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListHubWithMaxResults(ctx, project, HubMaxPage) - -} - -func (c *Client) ListHubWithMaxResults(ctx context.Context, project string, pageSize int32) (*HubList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &Hub{ - Project: &project, - } - items, token, err := c.listHub(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &HubList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetHub(ctx context.Context, r *Hub) (*Hub, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractHubFields(r) - - b, err := c.getHubRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalHub(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeHubNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractHubFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteHub(ctx context.Context, r *Hub) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("Hub resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting Hub...") - deleteOp := deleteHubOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllHub deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllHub(ctx context.Context, project string, filter func(*Hub) bool) error { - listObj, err := c.ListHub(ctx, project) - if err != nil { - return err - } - - err = c.deleteAllHub(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllHub(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyHub(ctx context.Context, rawDesired *Hub, opts ...dcl.ApplyOption) (*Hub, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *Hub - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyHubHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyHubHelper(c *Client, ctx context.Context, rawDesired *Hub, opts ...dcl.ApplyOption) (*Hub, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyHub...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractHubFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.hubDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToHubDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []hubApiOperation - if create { - ops = append(ops, &createHubOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyHubDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyHubDiff(c *Client, ctx context.Context, desired *Hub, rawDesired *Hub, ops []hubApiOperation, opts ...dcl.ApplyOption) (*Hub, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetHub(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createHubOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapHub(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeHubNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeHubNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeHubDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractHubFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractHubFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffHub(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.yaml deleted file mode 100644 index c7e4ac3293a..00000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.yaml +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2024 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: NetworkConnectivity/Hub - description: The NetworkConnectivity Hub resource - x-dcl-struct-name: Hub - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a Hub - parameters: - - name: hub - required: true - description: A full instance of a Hub - apply: - description: The function used to apply information about a Hub - parameters: - - name: hub - required: true - description: A full instance of a Hub - delete: - description: The function used to delete a Hub - parameters: - - name: hub - required: true - description: A full instance of a Hub - deleteAll: - description: The function used to delete all Hub - parameters: - - name: project - required: true - schema: - type: string - list: - description: The function used to list information about many Hub - parameters: - - name: project - required: true - schema: - type: string -components: - schemas: - Hub: - title: Hub - x-dcl-id: projects/{{project}}/locations/global/hubs/{{name}} - x-dcl-locations: - - global - x-dcl-parent-container: project - x-dcl-labels: labels - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - name - - project - properties: - createTime: - type: string - format: date-time - x-dcl-go-name: CreateTime - readOnly: true - description: Output only. The time the hub was created. - x-kubernetes-immutable: true - description: - type: string - x-dcl-go-name: Description - description: An optional description of the hub. - labels: - type: object - additionalProperties: - type: string - x-dcl-go-name: Labels - description: Optional labels in key:value format. For more information about - labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements). - name: - type: string - x-dcl-go-name: Name - description: 'Immutable. The name of the hub. Hub names must be unique. - They use the following form: `projects/{project_number}/locations/global/hubs/{hub_id}`' - x-kubernetes-immutable: true - x-dcl-has-long-form: true - project: - type: string - x-dcl-go-name: Project - description: The project for the resource - x-kubernetes-immutable: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true - x-dcl-parameter: true - routingVpcs: - type: array - x-dcl-go-name: RoutingVpcs - readOnly: true - description: The VPC network associated with this hub's spokes. All of the - VPN tunnels, VLAN attachments, and router appliance instances referenced - by this hub's spokes must belong to this VPC network. This field is read-only. - Network Connectivity Center automatically populates it based on the set - of spokes attached to the hub. - x-kubernetes-immutable: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: HubRoutingVpcs - properties: - uri: - type: string - x-dcl-go-name: Uri - description: The URI of the VPC network. - x-dcl-references: - - resource: Compute/Network - field: selfLink - state: - type: string - x-dcl-go-name: State - x-dcl-go-type: HubStateEnum - readOnly: true - description: 'Output only. The current lifecycle state of this hub. Possible - values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING' - x-kubernetes-immutable: true - enum: - - STATE_UNSPECIFIED - - CREATING - - ACTIVE - - DELETING - uniqueId: - type: string - x-dcl-go-name: UniqueId - readOnly: true - description: Output only. The Google-generated UUID for the hub. This value - is unique across all hub resources. If a hub is deleted and another with - the same name is created, the new hub is assigned a different unique_id. - x-kubernetes-immutable: true - updateTime: - type: string - format: date-time - x-dcl-go-name: UpdateTime - readOnly: true - description: Output only. The time the hub was last updated. - x-kubernetes-immutable: true diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_internal.go deleted file mode 100644 index ae16825c2e7..00000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_internal.go +++ /dev/null @@ -1,1113 +0,0 @@ -// Copyright 2024 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package networkconnectivity - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations" -) - -func (r *Hub) validate() error { - - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - return nil -} -func (r *HubRoutingVpcs) validate() error { - return nil -} -func (r *Hub) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://networkconnectivity.googleapis.com/v1/", params) -} - -func (r *Hub) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/global/hubs/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *Hub) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/locations/global/hubs", nr.basePath(), userBasePath, params), nil - -} - -func (r *Hub) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/global/hubs?hubId={{name}}", nr.basePath(), userBasePath, params), nil - -} - -func (r *Hub) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/global/hubs/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// hubApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type hubApiOperation interface { - do(context.Context, *Hub, *Client) error -} - -// newUpdateHubUpdateHubRequest creates a request for an -// Hub resource's UpdateHub update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateHubUpdateHubRequest(ctx context.Context, f *Hub, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { - req["labels"] = v - } - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - req["description"] = v - } - return req, nil -} - -// marshalUpdateHubUpdateHubRequest converts the update into -// the final JSON request body. -func marshalUpdateHubUpdateHubRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateHubUpdateHubOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateHubUpdateHubOperation) do(ctx context.Context, r *Hub, c *Client) error { - _, err := c.GetHub(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateHub") - if err != nil { - return err - } - mask := dcl.UpdateMask(op.FieldDiffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateHubUpdateHubRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateHubUpdateHubRequest(c, req) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") - - if err != nil { - return err - } - - return nil -} - -func (c *Client) listHubRaw(ctx context.Context, r *Hub, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != HubMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listHubOperation struct { - Hubs []map[string]interface{} `json:"hubs"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listHub(ctx context.Context, r *Hub, pageToken string, pageSize int32) ([]*Hub, string, error) { - b, err := c.listHubRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listHubOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*Hub - for _, v := range m.Hubs { - res, err := unmarshalMapHub(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllHub(ctx context.Context, f func(*Hub) bool, resources []*Hub) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteHub(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteHubOperation struct{} - -func (op *deleteHubOperation) do(ctx context.Context, r *Hub, c *Client) error { - r, err := c.GetHub(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "Hub not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetHub checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return err - } - - // wait for object to be deleted. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - return err - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetHub(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createHubOperation struct { - response map[string]interface{} -} - -func (op *createHubOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createHubOperation) do(ctx context.Context, r *Hub, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - // wait for object to be created. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) - return err - } - c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") - op.response, _ = o.FirstResponse() - - if _, err := c.GetHub(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getHubRaw(ctx context.Context, r *Hub) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) hubDiffsForRawDesired(ctx context.Context, rawDesired *Hub, opts ...dcl.ApplyOption) (initial, desired *Hub, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *Hub - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*Hub); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Hub, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetHub(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Hub resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve Hub resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that Hub resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeHubDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Hub: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Hub: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractHubFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeHubInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Hub: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeHubDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Hub: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffHub(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeHubInitialState(rawInitial, rawDesired *Hub) (*Hub, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeHubDesiredState(rawDesired, rawInitial *Hub, opts ...dcl.ApplyOption) (*Hub, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - - return rawDesired, nil - } - canonicalDesired := &Hub{} - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.Labels = rawInitial.Labels - } else { - canonicalDesired.Labels = rawDesired.Labels - } - if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { - canonicalDesired.Description = rawInitial.Description - } else { - canonicalDesired.Description = rawDesired.Description - } - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - return canonicalDesired, nil -} - -func canonicalizeHubNewState(c *Client, rawNew, rawDesired *Hub) (*Hub, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { - rawNew.CreateTime = rawDesired.CreateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { - rawNew.UpdateTime = rawDesired.UpdateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { - rawNew.Labels = rawDesired.Labels - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { - rawNew.Description = rawDesired.Description - } else { - if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { - rawNew.Description = rawDesired.Description - } - } - - if dcl.IsEmptyValueIndirect(rawNew.UniqueId) && dcl.IsEmptyValueIndirect(rawDesired.UniqueId) { - rawNew.UniqueId = rawDesired.UniqueId - } else { - if dcl.StringCanonicalize(rawDesired.UniqueId, rawNew.UniqueId) { - rawNew.UniqueId = rawDesired.UniqueId - } - } - - if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { - rawNew.State = rawDesired.State - } else { - } - - rawNew.Project = rawDesired.Project - - if dcl.IsEmptyValueIndirect(rawNew.RoutingVpcs) && dcl.IsEmptyValueIndirect(rawDesired.RoutingVpcs) { - rawNew.RoutingVpcs = rawDesired.RoutingVpcs - } else { - rawNew.RoutingVpcs = canonicalizeNewHubRoutingVpcsSlice(c, rawDesired.RoutingVpcs, rawNew.RoutingVpcs) - } - - return rawNew, nil -} - -func canonicalizeHubRoutingVpcs(des, initial *HubRoutingVpcs, opts ...dcl.ApplyOption) *HubRoutingVpcs { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &HubRoutingVpcs{} - - if dcl.IsZeroValue(des.Uri) || (dcl.IsEmptyValueIndirect(des.Uri) && dcl.IsEmptyValueIndirect(initial.Uri)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Uri = initial.Uri - } else { - cDes.Uri = des.Uri - } - - return cDes -} - -func canonicalizeHubRoutingVpcsSlice(des, initial []HubRoutingVpcs, opts ...dcl.ApplyOption) []HubRoutingVpcs { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]HubRoutingVpcs, 0, len(des)) - for _, d := range des { - cd := canonicalizeHubRoutingVpcs(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]HubRoutingVpcs, 0, len(des)) - for i, d := range des { - cd := canonicalizeHubRoutingVpcs(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewHubRoutingVpcs(c *Client, des, nw *HubRoutingVpcs) *HubRoutingVpcs { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for HubRoutingVpcs while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewHubRoutingVpcsSet(c *Client, des, nw []HubRoutingVpcs) []HubRoutingVpcs { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []HubRoutingVpcs - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareHubRoutingVpcsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewHubRoutingVpcs(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewHubRoutingVpcsSlice(c *Client, des, nw []HubRoutingVpcs) []HubRoutingVpcs { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []HubRoutingVpcs - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewHubRoutingVpcs(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffHub(c *Client, desired, actual *Hub, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateHubUpdateHubOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateHubUpdateHubOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UniqueId, actual.UniqueId, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UniqueId")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.RoutingVpcs, actual.RoutingVpcs, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareHubRoutingVpcsNewStyle, EmptyObject: EmptyHubRoutingVpcs, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RoutingVpcs")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareHubRoutingVpcsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*HubRoutingVpcs) - if !ok { - desiredNotPointer, ok := d.(HubRoutingVpcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a HubRoutingVpcs or *HubRoutingVpcs", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*HubRoutingVpcs) - if !ok { - actualNotPointer, ok := a.(HubRoutingVpcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a HubRoutingVpcs", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Uri, actual.Uri, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateHubUpdateHubOperation")}, fn.AddNest("Uri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *Hub) urlNormalized() *Hub { - normalized := dcl.Copy(*r).(Hub) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Description = dcl.SelfLinkToName(r.Description) - normalized.UniqueId = dcl.SelfLinkToName(r.UniqueId) - normalized.Project = dcl.SelfLinkToName(r.Project) - return &normalized -} - -func (r *Hub) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateHub" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/global/hubs/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the Hub resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *Hub) marshal(c *Client) ([]byte, error) { - m, err := expandHub(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling Hub: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalHub decodes JSON responses into the Hub resource schema. -func unmarshalHub(b []byte, c *Client, res *Hub) (*Hub, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapHub(m, c, res) -} - -func unmarshalMapHub(m map[string]interface{}, c *Client, res *Hub) (*Hub, error) { - - flattened := flattenHub(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandHub expands Hub into a JSON request object. -func expandHub(c *Client, f *Hub) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.DeriveField("projects/%s/locations/global/hubs/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Name)); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.Labels; dcl.ValueShouldBeSent(v) { - m["labels"] = v - } - if v := f.Description; dcl.ValueShouldBeSent(v) { - m["description"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - - return m, nil -} - -// flattenHub flattens Hub from a JSON request object into the -// Hub type. -func flattenHub(c *Client, i interface{}, res *Hub) *Hub { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &Hub{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) - resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) - resultRes.Description = dcl.FlattenString(m["description"]) - resultRes.UniqueId = dcl.FlattenString(m["uniqueId"]) - resultRes.State = flattenHubStateEnum(m["state"]) - resultRes.Project = dcl.FlattenString(m["project"]) - resultRes.RoutingVpcs = flattenHubRoutingVpcsSlice(c, m["routingVpcs"], res) - - return resultRes -} - -// expandHubRoutingVpcsMap expands the contents of HubRoutingVpcs into a JSON -// request object. -func expandHubRoutingVpcsMap(c *Client, f map[string]HubRoutingVpcs, res *Hub) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandHubRoutingVpcs(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandHubRoutingVpcsSlice expands the contents of HubRoutingVpcs into a JSON -// request object. -func expandHubRoutingVpcsSlice(c *Client, f []HubRoutingVpcs, res *Hub) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandHubRoutingVpcs(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenHubRoutingVpcsMap flattens the contents of HubRoutingVpcs from a JSON -// response object. -func flattenHubRoutingVpcsMap(c *Client, i interface{}, res *Hub) map[string]HubRoutingVpcs { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]HubRoutingVpcs{} - } - - if len(a) == 0 { - return map[string]HubRoutingVpcs{} - } - - items := make(map[string]HubRoutingVpcs) - for k, item := range a { - items[k] = *flattenHubRoutingVpcs(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenHubRoutingVpcsSlice flattens the contents of HubRoutingVpcs from a JSON -// response object. -func flattenHubRoutingVpcsSlice(c *Client, i interface{}, res *Hub) []HubRoutingVpcs { - a, ok := i.([]interface{}) - if !ok { - return []HubRoutingVpcs{} - } - - if len(a) == 0 { - return []HubRoutingVpcs{} - } - - items := make([]HubRoutingVpcs, 0, len(a)) - for _, item := range a { - items = append(items, *flattenHubRoutingVpcs(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandHubRoutingVpcs expands an instance of HubRoutingVpcs into a JSON -// request object. -func expandHubRoutingVpcs(c *Client, f *HubRoutingVpcs, res *Hub) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Uri; !dcl.IsEmptyValueIndirect(v) { - m["uri"] = v - } - - return m, nil -} - -// flattenHubRoutingVpcs flattens an instance of HubRoutingVpcs from a JSON -// response object. -func flattenHubRoutingVpcs(c *Client, i interface{}, res *Hub) *HubRoutingVpcs { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &HubRoutingVpcs{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyHubRoutingVpcs - } - r.Uri = dcl.FlattenString(m["uri"]) - - return r -} - -// flattenHubStateEnumMap flattens the contents of HubStateEnum from a JSON -// response object. -func flattenHubStateEnumMap(c *Client, i interface{}, res *Hub) map[string]HubStateEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]HubStateEnum{} - } - - if len(a) == 0 { - return map[string]HubStateEnum{} - } - - items := make(map[string]HubStateEnum) - for k, item := range a { - items[k] = *flattenHubStateEnum(item.(interface{})) - } - - return items -} - -// flattenHubStateEnumSlice flattens the contents of HubStateEnum from a JSON -// response object. -func flattenHubStateEnumSlice(c *Client, i interface{}, res *Hub) []HubStateEnum { - a, ok := i.([]interface{}) - if !ok { - return []HubStateEnum{} - } - - if len(a) == 0 { - return []HubStateEnum{} - } - - items := make([]HubStateEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenHubStateEnum(item.(interface{}))) - } - - return items -} - -// flattenHubStateEnum asserts that an interface is a string, and returns a -// pointer to a *HubStateEnum with the same value as that string. -func flattenHubStateEnum(i interface{}) *HubStateEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return HubStateEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *Hub) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalHub(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type hubDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp hubApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToHubDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]hubDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []hubDiff - // For each operation name, create a hubDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := hubDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToHubApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToHubApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (hubApiOperation, error) { - switch opName { - - case "updateHubUpdateHubOperation": - return &updateHubUpdateHubOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractHubFields(r *Hub) error { - return nil -} -func extractHubRoutingVpcsFields(r *Hub, o *HubRoutingVpcs) error { - return nil -} - -func postReadExtractHubFields(r *Hub) error { - return nil -} -func postReadExtractHubRoutingVpcsFields(r *Hub, o *HubRoutingVpcs) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_schema.go deleted file mode 100644 index 0b5a97be708..00000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_schema.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2024 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package networkconnectivity - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLHubSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "NetworkConnectivity/Hub", - Description: "The NetworkConnectivity Hub resource", - StructName: "Hub", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Hub", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "hub", - Required: true, - Description: "A full instance of a Hub", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Hub", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "hub", - Required: true, - Description: "A full instance of a Hub", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Hub", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "hub", - Required: true, - Description: "A full instance of a Hub", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Hub", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Hub", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Hub": &dcl.Component{ - Title: "Hub", - ID: "projects/{{project}}/locations/global/hubs/{{name}}", - Locations: []string{ - "global", - }, - ParentContainer: "project", - LabelsField: "labels", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "project", - }, - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time the hub was created.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "An optional description of the hub.", - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements).", - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Immutable. The name of the hub. Hub names must be unique. They use the following form: `projects/{project_number}/locations/global/hubs/{hub_id}`", - Immutable: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "routingVpcs": &dcl.Property{ - Type: "array", - GoName: "RoutingVpcs", - ReadOnly: true, - Description: "The VPC network associated with this hub's spokes. All of the VPN tunnels, VLAN attachments, and router appliance instances referenced by this hub's spokes must belong to this VPC network. This field is read-only. Network Connectivity Center automatically populates it based on the set of spokes attached to the hub.", - Immutable: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "HubRoutingVpcs", - Properties: map[string]*dcl.Property{ - "uri": &dcl.Property{ - Type: "string", - GoName: "Uri", - Description: "The URI of the VPC network.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Network", - Field: "selfLink", - }, - }, - }, - }, - }, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "HubStateEnum", - ReadOnly: true, - Description: "Output only. The current lifecycle state of this hub. Possible values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING", - Immutable: true, - Enum: []string{ - "STATE_UNSPECIFIED", - "CREATING", - "ACTIVE", - "DELETING", - }, - }, - "uniqueId": &dcl.Property{ - Type: "string", - GoName: "UniqueId", - ReadOnly: true, - Description: "Output only. The Google-generated UUID for the hub. This value is unique across all hub resources. If a hub is deleted and another with the same name is created, the new hub is assigned a different unique_id.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time the hub was last updated.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_yaml_embed.go deleted file mode 100644 index 7164548e5b9..00000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2024 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package networkconnectivity -var YAML_hub blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/networkconnectivity/hub.yaml - -package networkconnectivity - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/networkconnectivity/hub.yaml -var YAML_hub = []byte("info:\n title: NetworkConnectivity/Hub\n description: The NetworkConnectivity Hub resource\n x-dcl-struct-name: Hub\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Hub\n parameters:\n - name: hub\n required: true\n description: A full instance of a Hub\n apply:\n description: The function used to apply information about a Hub\n parameters:\n - name: hub\n required: true\n description: A full instance of a Hub\n delete:\n description: The function used to delete a Hub\n parameters:\n - name: hub\n required: true\n description: A full instance of a Hub\n deleteAll:\n description: The function used to delete all Hub\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Hub\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Hub:\n title: Hub\n x-dcl-id: projects/{{project}}/locations/global/hubs/{{name}}\n x-dcl-locations:\n - global\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time the hub was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: An optional description of the hub.\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional labels in key:value format. For more information about\n labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements).\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Immutable. The name of the hub. Hub names must be unique.\n They use the following form: `projects/{project_number}/locations/global/hubs/{hub_id}`'\n x-kubernetes-immutable: true\n x-dcl-has-long-form: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-parameter: true\n routingVpcs:\n type: array\n x-dcl-go-name: RoutingVpcs\n readOnly: true\n description: The VPC network associated with this hub's spokes. All of the\n VPN tunnels, VLAN attachments, and router appliance instances referenced\n by this hub's spokes must belong to this VPC network. This field is read-only.\n Network Connectivity Center automatically populates it based on the set\n of spokes attached to the hub.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: HubRoutingVpcs\n properties:\n uri:\n type: string\n x-dcl-go-name: Uri\n description: The URI of the VPC network.\n x-dcl-references:\n - resource: Compute/Network\n field: selfLink\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: HubStateEnum\n readOnly: true\n description: 'Output only. The current lifecycle state of this hub. Possible\n values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - CREATING\n - ACTIVE\n - DELETING\n uniqueId:\n type: string\n x-dcl-go-name: UniqueId\n readOnly: true\n description: Output only. The Google-generated UUID for the hub. This value\n is unique across all hub resources. If a hub is deleted and another with\n the same name is created, the new hub is assigned a different unique_id.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time the hub was last updated.\n x-kubernetes-immutable: true\n") - -// 4792 bytes -// MD5: f115b00540be96ac6392755af4527e7f diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.go deleted file mode 100644 index 0e0e3bc7793..00000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.go +++ /dev/null @@ -1,661 +0,0 @@ -// Copyright 2024 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package networkconnectivity - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type Spoke struct { - Name *string `json:"name"` - CreateTime *string `json:"createTime"` - UpdateTime *string `json:"updateTime"` - Labels map[string]string `json:"labels"` - Description *string `json:"description"` - Hub *string `json:"hub"` - LinkedVpnTunnels *SpokeLinkedVpnTunnels `json:"linkedVpnTunnels"` - LinkedInterconnectAttachments *SpokeLinkedInterconnectAttachments `json:"linkedInterconnectAttachments"` - LinkedRouterApplianceInstances *SpokeLinkedRouterApplianceInstances `json:"linkedRouterApplianceInstances"` - LinkedVPCNetwork *SpokeLinkedVPCNetwork `json:"linkedVPCNetwork"` - UniqueId *string `json:"uniqueId"` - State *SpokeStateEnum `json:"state"` - Project *string `json:"project"` - Location *string `json:"location"` -} - -func (r *Spoke) String() string { - return dcl.SprintResource(r) -} - -// The enum SpokeStateEnum. -type SpokeStateEnum string - -// SpokeStateEnumRef returns a *SpokeStateEnum with the value of string s -// If the empty string is provided, nil is returned. -func SpokeStateEnumRef(s string) *SpokeStateEnum { - v := SpokeStateEnum(s) - return &v -} - -func (v SpokeStateEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"STATE_UNSPECIFIED", "CREATING", "ACTIVE", "DELETING"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "SpokeStateEnum", - Value: string(v), - Valid: []string{}, - } -} - -type SpokeLinkedVpnTunnels struct { - empty bool `json:"-"` - Uris []string `json:"uris"` - SiteToSiteDataTransfer *bool `json:"siteToSiteDataTransfer"` -} - -type jsonSpokeLinkedVpnTunnels SpokeLinkedVpnTunnels - -func (r *SpokeLinkedVpnTunnels) UnmarshalJSON(data []byte) error { - var res jsonSpokeLinkedVpnTunnels - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptySpokeLinkedVpnTunnels - } else { - - r.Uris = res.Uris - - r.SiteToSiteDataTransfer = res.SiteToSiteDataTransfer - - } - return nil -} - -// This object is used to assert a desired state where this SpokeLinkedVpnTunnels is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptySpokeLinkedVpnTunnels *SpokeLinkedVpnTunnels = &SpokeLinkedVpnTunnels{empty: true} - -func (r *SpokeLinkedVpnTunnels) Empty() bool { - return r.empty -} - -func (r *SpokeLinkedVpnTunnels) String() string { - return dcl.SprintResource(r) -} - -func (r *SpokeLinkedVpnTunnels) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type SpokeLinkedInterconnectAttachments struct { - empty bool `json:"-"` - Uris []string `json:"uris"` - SiteToSiteDataTransfer *bool `json:"siteToSiteDataTransfer"` -} - -type jsonSpokeLinkedInterconnectAttachments SpokeLinkedInterconnectAttachments - -func (r *SpokeLinkedInterconnectAttachments) UnmarshalJSON(data []byte) error { - var res jsonSpokeLinkedInterconnectAttachments - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptySpokeLinkedInterconnectAttachments - } else { - - r.Uris = res.Uris - - r.SiteToSiteDataTransfer = res.SiteToSiteDataTransfer - - } - return nil -} - -// This object is used to assert a desired state where this SpokeLinkedInterconnectAttachments is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptySpokeLinkedInterconnectAttachments *SpokeLinkedInterconnectAttachments = &SpokeLinkedInterconnectAttachments{empty: true} - -func (r *SpokeLinkedInterconnectAttachments) Empty() bool { - return r.empty -} - -func (r *SpokeLinkedInterconnectAttachments) String() string { - return dcl.SprintResource(r) -} - -func (r *SpokeLinkedInterconnectAttachments) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type SpokeLinkedRouterApplianceInstances struct { - empty bool `json:"-"` - Instances []SpokeLinkedRouterApplianceInstancesInstances `json:"instances"` - SiteToSiteDataTransfer *bool `json:"siteToSiteDataTransfer"` -} - -type jsonSpokeLinkedRouterApplianceInstances SpokeLinkedRouterApplianceInstances - -func (r *SpokeLinkedRouterApplianceInstances) UnmarshalJSON(data []byte) error { - var res jsonSpokeLinkedRouterApplianceInstances - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptySpokeLinkedRouterApplianceInstances - } else { - - r.Instances = res.Instances - - r.SiteToSiteDataTransfer = res.SiteToSiteDataTransfer - - } - return nil -} - -// This object is used to assert a desired state where this SpokeLinkedRouterApplianceInstances is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptySpokeLinkedRouterApplianceInstances *SpokeLinkedRouterApplianceInstances = &SpokeLinkedRouterApplianceInstances{empty: true} - -func (r *SpokeLinkedRouterApplianceInstances) Empty() bool { - return r.empty -} - -func (r *SpokeLinkedRouterApplianceInstances) String() string { - return dcl.SprintResource(r) -} - -func (r *SpokeLinkedRouterApplianceInstances) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type SpokeLinkedRouterApplianceInstancesInstances struct { - empty bool `json:"-"` - VirtualMachine *string `json:"virtualMachine"` - IPAddress *string `json:"ipAddress"` -} - -type jsonSpokeLinkedRouterApplianceInstancesInstances SpokeLinkedRouterApplianceInstancesInstances - -func (r *SpokeLinkedRouterApplianceInstancesInstances) UnmarshalJSON(data []byte) error { - var res jsonSpokeLinkedRouterApplianceInstancesInstances - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptySpokeLinkedRouterApplianceInstancesInstances - } else { - - r.VirtualMachine = res.VirtualMachine - - r.IPAddress = res.IPAddress - - } - return nil -} - -// This object is used to assert a desired state where this SpokeLinkedRouterApplianceInstancesInstances is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptySpokeLinkedRouterApplianceInstancesInstances *SpokeLinkedRouterApplianceInstancesInstances = &SpokeLinkedRouterApplianceInstancesInstances{empty: true} - -func (r *SpokeLinkedRouterApplianceInstancesInstances) Empty() bool { - return r.empty -} - -func (r *SpokeLinkedRouterApplianceInstancesInstances) String() string { - return dcl.SprintResource(r) -} - -func (r *SpokeLinkedRouterApplianceInstancesInstances) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type SpokeLinkedVPCNetwork struct { - empty bool `json:"-"` - Uri *string `json:"uri"` - ExcludeExportRanges []string `json:"excludeExportRanges"` -} - -type jsonSpokeLinkedVPCNetwork SpokeLinkedVPCNetwork - -func (r *SpokeLinkedVPCNetwork) UnmarshalJSON(data []byte) error { - var res jsonSpokeLinkedVPCNetwork - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptySpokeLinkedVPCNetwork - } else { - - r.Uri = res.Uri - - r.ExcludeExportRanges = res.ExcludeExportRanges - - } - return nil -} - -// This object is used to assert a desired state where this SpokeLinkedVPCNetwork is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptySpokeLinkedVPCNetwork *SpokeLinkedVPCNetwork = &SpokeLinkedVPCNetwork{empty: true} - -func (r *SpokeLinkedVPCNetwork) Empty() bool { - return r.empty -} - -func (r *SpokeLinkedVPCNetwork) String() string { - return dcl.SprintResource(r) -} - -func (r *SpokeLinkedVPCNetwork) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *Spoke) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "network_connectivity", - Type: "Spoke", - Version: "networkconnectivity", - } -} - -func (r *Spoke) ID() (string, error) { - if err := extractSpokeFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), - "labels": dcl.ValueOrEmptyString(nr.Labels), - "description": dcl.ValueOrEmptyString(nr.Description), - "hub": dcl.ValueOrEmptyString(nr.Hub), - "linked_vpn_tunnels": dcl.ValueOrEmptyString(nr.LinkedVpnTunnels), - "linked_interconnect_attachments": dcl.ValueOrEmptyString(nr.LinkedInterconnectAttachments), - "linked_router_appliance_instances": dcl.ValueOrEmptyString(nr.LinkedRouterApplianceInstances), - "linked_vpc_network": dcl.ValueOrEmptyString(nr.LinkedVPCNetwork), - "unique_id": dcl.ValueOrEmptyString(nr.UniqueId), - "state": dcl.ValueOrEmptyString(nr.State), - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.Nprintf("projects/{{project}}/locations/{{location}}/spokes/{{name}}", params), nil -} - -const SpokeMaxPage = -1 - -type SpokeList struct { - Items []*Spoke - - nextToken string - - pageSize int32 - - resource *Spoke -} - -func (l *SpokeList) HasNext() bool { - return l.nextToken != "" -} - -func (l *SpokeList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listSpoke(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListSpoke(ctx context.Context, project, location string) (*SpokeList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListSpokeWithMaxResults(ctx, project, location, SpokeMaxPage) - -} - -func (c *Client) ListSpokeWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*SpokeList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &Spoke{ - Project: &project, - Location: &location, - } - items, token, err := c.listSpoke(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &SpokeList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetSpoke(ctx context.Context, r *Spoke) (*Spoke, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractSpokeFields(r) - - b, err := c.getSpokeRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalSpoke(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Location = r.Location - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeSpokeNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractSpokeFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteSpoke(ctx context.Context, r *Spoke) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("Spoke resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting Spoke...") - deleteOp := deleteSpokeOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllSpoke deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllSpoke(ctx context.Context, project, location string, filter func(*Spoke) bool) error { - listObj, err := c.ListSpoke(ctx, project, location) - if err != nil { - return err - } - - err = c.deleteAllSpoke(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllSpoke(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplySpoke(ctx context.Context, rawDesired *Spoke, opts ...dcl.ApplyOption) (*Spoke, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *Spoke - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applySpokeHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applySpokeHelper(c *Client, ctx context.Context, rawDesired *Spoke, opts ...dcl.ApplyOption) (*Spoke, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplySpoke...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractSpokeFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.spokeDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToSpokeDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []spokeApiOperation - if create { - ops = append(ops, &createSpokeOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applySpokeDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applySpokeDiff(c *Client, ctx context.Context, desired *Spoke, rawDesired *Spoke, ops []spokeApiOperation, opts ...dcl.ApplyOption) (*Spoke, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetSpoke(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createSpokeOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapSpoke(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeSpokeNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeSpokeNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeSpokeDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractSpokeFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractSpokeFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffSpoke(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.yaml deleted file mode 100644 index 31236c16890..00000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.yaml +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright 2024 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: NetworkConnectivity/Spoke - description: The NetworkConnectivity Spoke resource - x-dcl-struct-name: Spoke - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a Spoke - parameters: - - name: spoke - required: true - description: A full instance of a Spoke - apply: - description: The function used to apply information about a Spoke - parameters: - - name: spoke - required: true - description: A full instance of a Spoke - delete: - description: The function used to delete a Spoke - parameters: - - name: spoke - required: true - description: A full instance of a Spoke - deleteAll: - description: The function used to delete all Spoke - parameters: - - name: project - required: true - schema: - type: string - - name: location - required: true - schema: - type: string - list: - description: The function used to list information about many Spoke - parameters: - - name: project - required: true - schema: - type: string - - name: location - required: true - schema: - type: string -components: - schemas: - Spoke: - title: Spoke - x-dcl-id: projects/{{project}}/locations/{{location}}/spokes/{{name}} - x-dcl-parent-container: project - x-dcl-labels: labels - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - name - - hub - - project - - location - properties: - createTime: - type: string - format: date-time - x-dcl-go-name: CreateTime - readOnly: true - description: Output only. The time the spoke was created. - x-kubernetes-immutable: true - description: - type: string - x-dcl-go-name: Description - description: An optional description of the spoke. - hub: - type: string - x-dcl-go-name: Hub - description: Immutable. The URI of the hub that this spoke is attached to. - x-kubernetes-immutable: true - x-dcl-references: - - resource: Networkconnectivity/Hub - field: name - labels: - type: object - additionalProperties: - type: string - x-dcl-go-name: Labels - description: Optional labels in key:value format. For more information about - labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements). - linkedInterconnectAttachments: - type: object - x-dcl-go-name: LinkedInterconnectAttachments - x-dcl-go-type: SpokeLinkedInterconnectAttachments - description: A collection of VLAN attachment resources. These resources - should be redundant attachments that all advertise the same prefixes to - Google Cloud. Alternatively, in active/passive configurations, all attachments - should be capable of advertising the same prefixes. - x-kubernetes-immutable: true - x-dcl-conflicts: - - linkedVpnTunnels - - linkedRouterApplianceInstances - - linkedVPCNetwork - required: - - uris - - siteToSiteDataTransfer - properties: - siteToSiteDataTransfer: - type: boolean - x-dcl-go-name: SiteToSiteDataTransfer - description: A value that controls whether site-to-site data transfer - is enabled for these resources. Note that data transfer is available - only in supported locations. - x-kubernetes-immutable: true - uris: - type: array - x-dcl-go-name: Uris - description: The URIs of linked interconnect attachment resources - x-kubernetes-immutable: true - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - x-dcl-references: - - resource: Compute/InterconnectAttachment - field: selfLink - linkedRouterApplianceInstances: - type: object - x-dcl-go-name: LinkedRouterApplianceInstances - x-dcl-go-type: SpokeLinkedRouterApplianceInstances - description: The URIs of linked Router appliance resources - x-kubernetes-immutable: true - x-dcl-conflicts: - - linkedVpnTunnels - - linkedInterconnectAttachments - - linkedVPCNetwork - required: - - instances - - siteToSiteDataTransfer - properties: - instances: - type: array - x-dcl-go-name: Instances - description: The list of router appliance instances - x-kubernetes-immutable: true - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: SpokeLinkedRouterApplianceInstancesInstances - properties: - ipAddress: - type: string - x-dcl-go-name: IPAddress - description: The IP address on the VM to use for peering. - x-kubernetes-immutable: true - virtualMachine: - type: string - x-dcl-go-name: VirtualMachine - description: The URI of the virtual machine resource - x-kubernetes-immutable: true - x-dcl-references: - - resource: Compute/Instance - field: selfLink - siteToSiteDataTransfer: - type: boolean - x-dcl-go-name: SiteToSiteDataTransfer - description: A value that controls whether site-to-site data transfer - is enabled for these resources. Note that data transfer is available - only in supported locations. - x-kubernetes-immutable: true - linkedVPCNetwork: - type: object - x-dcl-go-name: LinkedVPCNetwork - x-dcl-go-type: SpokeLinkedVPCNetwork - description: VPC network that is associated with the spoke. - x-kubernetes-immutable: true - x-dcl-conflicts: - - linkedVpnTunnels - - linkedInterconnectAttachments - - linkedRouterApplianceInstances - required: - - uri - properties: - excludeExportRanges: - type: array - x-dcl-go-name: ExcludeExportRanges - description: IP ranges encompassing the subnets to be excluded from - peering. - x-kubernetes-immutable: true - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - uri: - type: string - x-dcl-go-name: Uri - description: The URI of the VPC network resource. - x-kubernetes-immutable: true - x-dcl-references: - - resource: Compute/Network - field: selfLink - linkedVpnTunnels: - type: object - x-dcl-go-name: LinkedVpnTunnels - x-dcl-go-type: SpokeLinkedVpnTunnels - description: The URIs of linked VPN tunnel resources - x-kubernetes-immutable: true - x-dcl-conflicts: - - linkedInterconnectAttachments - - linkedRouterApplianceInstances - - linkedVPCNetwork - required: - - uris - - siteToSiteDataTransfer - properties: - siteToSiteDataTransfer: - type: boolean - x-dcl-go-name: SiteToSiteDataTransfer - description: A value that controls whether site-to-site data transfer - is enabled for these resources. Note that data transfer is available - only in supported locations. - x-kubernetes-immutable: true - uris: - type: array - x-dcl-go-name: Uris - description: The URIs of linked VPN tunnel resources. - x-kubernetes-immutable: true - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - x-dcl-references: - - resource: Compute/VpnTunnel - field: selfLink - location: - type: string - x-dcl-go-name: Location - description: The location for the resource - x-kubernetes-immutable: true - x-dcl-parameter: true - name: - type: string - x-dcl-go-name: Name - description: Immutable. The name of the spoke. Spoke names must be unique. - x-kubernetes-immutable: true - x-dcl-has-long-form: true - project: - type: string - x-dcl-go-name: Project - description: The project for the resource - x-kubernetes-immutable: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true - x-dcl-parameter: true - state: - type: string - x-dcl-go-name: State - x-dcl-go-type: SpokeStateEnum - readOnly: true - description: 'Output only. The current lifecycle state of this spoke. Possible - values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING' - x-kubernetes-immutable: true - enum: - - STATE_UNSPECIFIED - - CREATING - - ACTIVE - - DELETING - uniqueId: - type: string - x-dcl-go-name: UniqueId - readOnly: true - description: Output only. The Google-generated UUID for the spoke. This - value is unique across all spoke resources. If a spoke is deleted and - another with the same name is created, the new spoke is assigned a different - unique_id. - x-kubernetes-immutable: true - updateTime: - type: string - format: date-time - x-dcl-go-name: UpdateTime - readOnly: true - description: Output only. The time the spoke was last updated. - x-kubernetes-immutable: true diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_internal.go deleted file mode 100644 index ca74abf4f2b..00000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_internal.go +++ /dev/null @@ -1,2607 +0,0 @@ -// Copyright 2024 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package networkconnectivity - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations" -) - -func (r *Spoke) validate() error { - - if err := dcl.ValidateExactlyOneOfFieldsSet([]string{"LinkedVpnTunnels", "LinkedInterconnectAttachments", "LinkedRouterApplianceInstances", "LinkedVPCNetwork"}, r.LinkedVpnTunnels, r.LinkedInterconnectAttachments, r.LinkedRouterApplianceInstances, r.LinkedVPCNetwork); err != nil { - return err - } - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.Required(r, "hub"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.LinkedVpnTunnels) { - if err := r.LinkedVpnTunnels.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.LinkedInterconnectAttachments) { - if err := r.LinkedInterconnectAttachments.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.LinkedRouterApplianceInstances) { - if err := r.LinkedRouterApplianceInstances.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.LinkedVPCNetwork) { - if err := r.LinkedVPCNetwork.validate(); err != nil { - return err - } - } - return nil -} -func (r *SpokeLinkedVpnTunnels) validate() error { - if err := dcl.Required(r, "uris"); err != nil { - return err - } - if err := dcl.Required(r, "siteToSiteDataTransfer"); err != nil { - return err - } - return nil -} -func (r *SpokeLinkedInterconnectAttachments) validate() error { - if err := dcl.Required(r, "uris"); err != nil { - return err - } - if err := dcl.Required(r, "siteToSiteDataTransfer"); err != nil { - return err - } - return nil -} -func (r *SpokeLinkedRouterApplianceInstances) validate() error { - if err := dcl.Required(r, "instances"); err != nil { - return err - } - if err := dcl.Required(r, "siteToSiteDataTransfer"); err != nil { - return err - } - return nil -} -func (r *SpokeLinkedRouterApplianceInstancesInstances) validate() error { - return nil -} -func (r *SpokeLinkedVPCNetwork) validate() error { - if err := dcl.Required(r, "uri"); err != nil { - return err - } - return nil -} -func (r *Spoke) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://networkconnectivity.googleapis.com/v1/", params) -} - -func (r *Spoke) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/{{location}}/spokes/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *Spoke) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.URL("projects/{{project}}/locations/{{location}}/spokes", nr.basePath(), userBasePath, params), nil - -} - -func (r *Spoke) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/{{location}}/spokes?spokeId={{name}}", nr.basePath(), userBasePath, params), nil - -} - -func (r *Spoke) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/{{location}}/spokes/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// spokeApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type spokeApiOperation interface { - do(context.Context, *Spoke, *Client) error -} - -// newUpdateSpokeUpdateSpokeRequest creates a request for an -// Spoke resource's UpdateSpoke update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateSpokeUpdateSpokeRequest(ctx context.Context, f *Spoke, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { - req["labels"] = v - } - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - req["description"] = v - } - return req, nil -} - -// marshalUpdateSpokeUpdateSpokeRequest converts the update into -// the final JSON request body. -func marshalUpdateSpokeUpdateSpokeRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateSpokeUpdateSpokeOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateSpokeUpdateSpokeOperation) do(ctx context.Context, r *Spoke, c *Client) error { - _, err := c.GetSpoke(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateSpoke") - if err != nil { - return err - } - mask := dcl.UpdateMask(op.FieldDiffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateSpokeUpdateSpokeRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateSpokeUpdateSpokeRequest(c, req) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") - - if err != nil { - return err - } - - return nil -} - -func (c *Client) listSpokeRaw(ctx context.Context, r *Spoke, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != SpokeMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listSpokeOperation struct { - Spokes []map[string]interface{} `json:"spokes"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listSpoke(ctx context.Context, r *Spoke, pageToken string, pageSize int32) ([]*Spoke, string, error) { - b, err := c.listSpokeRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listSpokeOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*Spoke - for _, v := range m.Spokes { - res, err := unmarshalMapSpoke(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - res.Location = r.Location - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllSpoke(ctx context.Context, f func(*Spoke) bool, resources []*Spoke) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteSpoke(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteSpokeOperation struct{} - -func (op *deleteSpokeOperation) do(ctx context.Context, r *Spoke, c *Client) error { - r, err := c.GetSpoke(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "Spoke not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetSpoke checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return err - } - - // wait for object to be deleted. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - return err - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetSpoke(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createSpokeOperation struct { - response map[string]interface{} -} - -func (op *createSpokeOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createSpokeOperation) do(ctx context.Context, r *Spoke, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - // wait for object to be created. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) - return err - } - c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") - op.response, _ = o.FirstResponse() - - if _, err := c.GetSpoke(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getSpokeRaw(ctx context.Context, r *Spoke) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) spokeDiffsForRawDesired(ctx context.Context, rawDesired *Spoke, opts ...dcl.ApplyOption) (initial, desired *Spoke, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *Spoke - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*Spoke); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Spoke, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetSpoke(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Spoke resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve Spoke resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that Spoke resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeSpokeDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Spoke: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Spoke: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractSpokeFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeSpokeInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Spoke: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeSpokeDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Spoke: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffSpoke(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeSpokeInitialState(rawInitial, rawDesired *Spoke) (*Spoke, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - - if !dcl.IsZeroValue(rawInitial.LinkedVpnTunnels) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.LinkedInterconnectAttachments, rawInitial.LinkedRouterApplianceInstances, rawInitial.LinkedVPCNetwork) { - rawInitial.LinkedVpnTunnels = EmptySpokeLinkedVpnTunnels - } - } - - if !dcl.IsZeroValue(rawInitial.LinkedInterconnectAttachments) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.LinkedVpnTunnels, rawInitial.LinkedRouterApplianceInstances, rawInitial.LinkedVPCNetwork) { - rawInitial.LinkedInterconnectAttachments = EmptySpokeLinkedInterconnectAttachments - } - } - - if !dcl.IsZeroValue(rawInitial.LinkedRouterApplianceInstances) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.LinkedVpnTunnels, rawInitial.LinkedInterconnectAttachments, rawInitial.LinkedVPCNetwork) { - rawInitial.LinkedRouterApplianceInstances = EmptySpokeLinkedRouterApplianceInstances - } - } - - if !dcl.IsZeroValue(rawInitial.LinkedVPCNetwork) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.LinkedVpnTunnels, rawInitial.LinkedInterconnectAttachments, rawInitial.LinkedRouterApplianceInstances) { - rawInitial.LinkedVPCNetwork = EmptySpokeLinkedVPCNetwork - } - } - - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeSpokeDesiredState(rawDesired, rawInitial *Spoke, opts ...dcl.ApplyOption) (*Spoke, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.LinkedVpnTunnels = canonicalizeSpokeLinkedVpnTunnels(rawDesired.LinkedVpnTunnels, nil, opts...) - rawDesired.LinkedInterconnectAttachments = canonicalizeSpokeLinkedInterconnectAttachments(rawDesired.LinkedInterconnectAttachments, nil, opts...) - rawDesired.LinkedRouterApplianceInstances = canonicalizeSpokeLinkedRouterApplianceInstances(rawDesired.LinkedRouterApplianceInstances, nil, opts...) - rawDesired.LinkedVPCNetwork = canonicalizeSpokeLinkedVPCNetwork(rawDesired.LinkedVPCNetwork, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &Spoke{} - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.Labels = rawInitial.Labels - } else { - canonicalDesired.Labels = rawDesired.Labels - } - if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { - canonicalDesired.Description = rawInitial.Description - } else { - canonicalDesired.Description = rawDesired.Description - } - if dcl.IsZeroValue(rawDesired.Hub) || (dcl.IsEmptyValueIndirect(rawDesired.Hub) && dcl.IsEmptyValueIndirect(rawInitial.Hub)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.Hub = rawInitial.Hub - } else { - canonicalDesired.Hub = rawDesired.Hub - } - canonicalDesired.LinkedVpnTunnels = canonicalizeSpokeLinkedVpnTunnels(rawDesired.LinkedVpnTunnels, rawInitial.LinkedVpnTunnels, opts...) - canonicalDesired.LinkedInterconnectAttachments = canonicalizeSpokeLinkedInterconnectAttachments(rawDesired.LinkedInterconnectAttachments, rawInitial.LinkedInterconnectAttachments, opts...) - canonicalDesired.LinkedRouterApplianceInstances = canonicalizeSpokeLinkedRouterApplianceInstances(rawDesired.LinkedRouterApplianceInstances, rawInitial.LinkedRouterApplianceInstances, opts...) - canonicalDesired.LinkedVPCNetwork = canonicalizeSpokeLinkedVPCNetwork(rawDesired.LinkedVPCNetwork, rawInitial.LinkedVPCNetwork, opts...) - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { - canonicalDesired.Location = rawInitial.Location - } else { - canonicalDesired.Location = rawDesired.Location - } - - if canonicalDesired.LinkedVpnTunnels != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.LinkedInterconnectAttachments, rawDesired.LinkedRouterApplianceInstances, rawDesired.LinkedVPCNetwork) { - canonicalDesired.LinkedVpnTunnels = EmptySpokeLinkedVpnTunnels - } - } - - if canonicalDesired.LinkedInterconnectAttachments != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.LinkedVpnTunnels, rawDesired.LinkedRouterApplianceInstances, rawDesired.LinkedVPCNetwork) { - canonicalDesired.LinkedInterconnectAttachments = EmptySpokeLinkedInterconnectAttachments - } - } - - if canonicalDesired.LinkedRouterApplianceInstances != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.LinkedVpnTunnels, rawDesired.LinkedInterconnectAttachments, rawDesired.LinkedVPCNetwork) { - canonicalDesired.LinkedRouterApplianceInstances = EmptySpokeLinkedRouterApplianceInstances - } - } - - if canonicalDesired.LinkedVPCNetwork != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.LinkedVpnTunnels, rawDesired.LinkedInterconnectAttachments, rawDesired.LinkedRouterApplianceInstances) { - canonicalDesired.LinkedVPCNetwork = EmptySpokeLinkedVPCNetwork - } - } - - return canonicalDesired, nil -} - -func canonicalizeSpokeNewState(c *Client, rawNew, rawDesired *Spoke) (*Spoke, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { - rawNew.CreateTime = rawDesired.CreateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { - rawNew.UpdateTime = rawDesired.UpdateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { - rawNew.Labels = rawDesired.Labels - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { - rawNew.Description = rawDesired.Description - } else { - if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { - rawNew.Description = rawDesired.Description - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Hub) && dcl.IsEmptyValueIndirect(rawDesired.Hub) { - rawNew.Hub = rawDesired.Hub - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.LinkedVpnTunnels) && dcl.IsEmptyValueIndirect(rawDesired.LinkedVpnTunnels) { - rawNew.LinkedVpnTunnels = rawDesired.LinkedVpnTunnels - } else { - rawNew.LinkedVpnTunnels = canonicalizeNewSpokeLinkedVpnTunnels(c, rawDesired.LinkedVpnTunnels, rawNew.LinkedVpnTunnels) - } - - if dcl.IsEmptyValueIndirect(rawNew.LinkedInterconnectAttachments) && dcl.IsEmptyValueIndirect(rawDesired.LinkedInterconnectAttachments) { - rawNew.LinkedInterconnectAttachments = rawDesired.LinkedInterconnectAttachments - } else { - rawNew.LinkedInterconnectAttachments = canonicalizeNewSpokeLinkedInterconnectAttachments(c, rawDesired.LinkedInterconnectAttachments, rawNew.LinkedInterconnectAttachments) - } - - if dcl.IsEmptyValueIndirect(rawNew.LinkedRouterApplianceInstances) && dcl.IsEmptyValueIndirect(rawDesired.LinkedRouterApplianceInstances) { - rawNew.LinkedRouterApplianceInstances = rawDesired.LinkedRouterApplianceInstances - } else { - rawNew.LinkedRouterApplianceInstances = canonicalizeNewSpokeLinkedRouterApplianceInstances(c, rawDesired.LinkedRouterApplianceInstances, rawNew.LinkedRouterApplianceInstances) - } - - if dcl.IsEmptyValueIndirect(rawNew.LinkedVPCNetwork) && dcl.IsEmptyValueIndirect(rawDesired.LinkedVPCNetwork) { - rawNew.LinkedVPCNetwork = rawDesired.LinkedVPCNetwork - } else { - rawNew.LinkedVPCNetwork = canonicalizeNewSpokeLinkedVPCNetwork(c, rawDesired.LinkedVPCNetwork, rawNew.LinkedVPCNetwork) - } - - if dcl.IsEmptyValueIndirect(rawNew.UniqueId) && dcl.IsEmptyValueIndirect(rawDesired.UniqueId) { - rawNew.UniqueId = rawDesired.UniqueId - } else { - if dcl.StringCanonicalize(rawDesired.UniqueId, rawNew.UniqueId) { - rawNew.UniqueId = rawDesired.UniqueId - } - } - - if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { - rawNew.State = rawDesired.State - } else { - } - - rawNew.Project = rawDesired.Project - - rawNew.Location = rawDesired.Location - - return rawNew, nil -} - -func canonicalizeSpokeLinkedVpnTunnels(des, initial *SpokeLinkedVpnTunnels, opts ...dcl.ApplyOption) *SpokeLinkedVpnTunnels { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &SpokeLinkedVpnTunnels{} - - if dcl.StringArrayCanonicalize(des.Uris, initial.Uris) { - cDes.Uris = initial.Uris - } else { - cDes.Uris = des.Uris - } - if dcl.BoolCanonicalize(des.SiteToSiteDataTransfer, initial.SiteToSiteDataTransfer) || dcl.IsZeroValue(des.SiteToSiteDataTransfer) { - cDes.SiteToSiteDataTransfer = initial.SiteToSiteDataTransfer - } else { - cDes.SiteToSiteDataTransfer = des.SiteToSiteDataTransfer - } - - return cDes -} - -func canonicalizeSpokeLinkedVpnTunnelsSlice(des, initial []SpokeLinkedVpnTunnels, opts ...dcl.ApplyOption) []SpokeLinkedVpnTunnels { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]SpokeLinkedVpnTunnels, 0, len(des)) - for _, d := range des { - cd := canonicalizeSpokeLinkedVpnTunnels(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]SpokeLinkedVpnTunnels, 0, len(des)) - for i, d := range des { - cd := canonicalizeSpokeLinkedVpnTunnels(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewSpokeLinkedVpnTunnels(c *Client, des, nw *SpokeLinkedVpnTunnels) *SpokeLinkedVpnTunnels { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for SpokeLinkedVpnTunnels while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringArrayCanonicalize(des.Uris, nw.Uris) { - nw.Uris = des.Uris - } - if dcl.BoolCanonicalize(des.SiteToSiteDataTransfer, nw.SiteToSiteDataTransfer) { - nw.SiteToSiteDataTransfer = des.SiteToSiteDataTransfer - } - - return nw -} - -func canonicalizeNewSpokeLinkedVpnTunnelsSet(c *Client, des, nw []SpokeLinkedVpnTunnels) []SpokeLinkedVpnTunnels { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []SpokeLinkedVpnTunnels - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareSpokeLinkedVpnTunnelsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewSpokeLinkedVpnTunnels(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewSpokeLinkedVpnTunnelsSlice(c *Client, des, nw []SpokeLinkedVpnTunnels) []SpokeLinkedVpnTunnels { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []SpokeLinkedVpnTunnels - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewSpokeLinkedVpnTunnels(c, &d, &n)) - } - - return items -} - -func canonicalizeSpokeLinkedInterconnectAttachments(des, initial *SpokeLinkedInterconnectAttachments, opts ...dcl.ApplyOption) *SpokeLinkedInterconnectAttachments { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &SpokeLinkedInterconnectAttachments{} - - if dcl.StringArrayCanonicalize(des.Uris, initial.Uris) { - cDes.Uris = initial.Uris - } else { - cDes.Uris = des.Uris - } - if dcl.BoolCanonicalize(des.SiteToSiteDataTransfer, initial.SiteToSiteDataTransfer) || dcl.IsZeroValue(des.SiteToSiteDataTransfer) { - cDes.SiteToSiteDataTransfer = initial.SiteToSiteDataTransfer - } else { - cDes.SiteToSiteDataTransfer = des.SiteToSiteDataTransfer - } - - return cDes -} - -func canonicalizeSpokeLinkedInterconnectAttachmentsSlice(des, initial []SpokeLinkedInterconnectAttachments, opts ...dcl.ApplyOption) []SpokeLinkedInterconnectAttachments { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]SpokeLinkedInterconnectAttachments, 0, len(des)) - for _, d := range des { - cd := canonicalizeSpokeLinkedInterconnectAttachments(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]SpokeLinkedInterconnectAttachments, 0, len(des)) - for i, d := range des { - cd := canonicalizeSpokeLinkedInterconnectAttachments(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewSpokeLinkedInterconnectAttachments(c *Client, des, nw *SpokeLinkedInterconnectAttachments) *SpokeLinkedInterconnectAttachments { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for SpokeLinkedInterconnectAttachments while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringArrayCanonicalize(des.Uris, nw.Uris) { - nw.Uris = des.Uris - } - if dcl.BoolCanonicalize(des.SiteToSiteDataTransfer, nw.SiteToSiteDataTransfer) { - nw.SiteToSiteDataTransfer = des.SiteToSiteDataTransfer - } - - return nw -} - -func canonicalizeNewSpokeLinkedInterconnectAttachmentsSet(c *Client, des, nw []SpokeLinkedInterconnectAttachments) []SpokeLinkedInterconnectAttachments { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []SpokeLinkedInterconnectAttachments - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareSpokeLinkedInterconnectAttachmentsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewSpokeLinkedInterconnectAttachments(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewSpokeLinkedInterconnectAttachmentsSlice(c *Client, des, nw []SpokeLinkedInterconnectAttachments) []SpokeLinkedInterconnectAttachments { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []SpokeLinkedInterconnectAttachments - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewSpokeLinkedInterconnectAttachments(c, &d, &n)) - } - - return items -} - -func canonicalizeSpokeLinkedRouterApplianceInstances(des, initial *SpokeLinkedRouterApplianceInstances, opts ...dcl.ApplyOption) *SpokeLinkedRouterApplianceInstances { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &SpokeLinkedRouterApplianceInstances{} - - cDes.Instances = canonicalizeSpokeLinkedRouterApplianceInstancesInstancesSlice(des.Instances, initial.Instances, opts...) - if dcl.BoolCanonicalize(des.SiteToSiteDataTransfer, initial.SiteToSiteDataTransfer) || dcl.IsZeroValue(des.SiteToSiteDataTransfer) { - cDes.SiteToSiteDataTransfer = initial.SiteToSiteDataTransfer - } else { - cDes.SiteToSiteDataTransfer = des.SiteToSiteDataTransfer - } - - return cDes -} - -func canonicalizeSpokeLinkedRouterApplianceInstancesSlice(des, initial []SpokeLinkedRouterApplianceInstances, opts ...dcl.ApplyOption) []SpokeLinkedRouterApplianceInstances { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]SpokeLinkedRouterApplianceInstances, 0, len(des)) - for _, d := range des { - cd := canonicalizeSpokeLinkedRouterApplianceInstances(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]SpokeLinkedRouterApplianceInstances, 0, len(des)) - for i, d := range des { - cd := canonicalizeSpokeLinkedRouterApplianceInstances(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewSpokeLinkedRouterApplianceInstances(c *Client, des, nw *SpokeLinkedRouterApplianceInstances) *SpokeLinkedRouterApplianceInstances { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for SpokeLinkedRouterApplianceInstances while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Instances = canonicalizeNewSpokeLinkedRouterApplianceInstancesInstancesSlice(c, des.Instances, nw.Instances) - if dcl.BoolCanonicalize(des.SiteToSiteDataTransfer, nw.SiteToSiteDataTransfer) { - nw.SiteToSiteDataTransfer = des.SiteToSiteDataTransfer - } - - return nw -} - -func canonicalizeNewSpokeLinkedRouterApplianceInstancesSet(c *Client, des, nw []SpokeLinkedRouterApplianceInstances) []SpokeLinkedRouterApplianceInstances { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []SpokeLinkedRouterApplianceInstances - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareSpokeLinkedRouterApplianceInstancesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewSpokeLinkedRouterApplianceInstances(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewSpokeLinkedRouterApplianceInstancesSlice(c *Client, des, nw []SpokeLinkedRouterApplianceInstances) []SpokeLinkedRouterApplianceInstances { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []SpokeLinkedRouterApplianceInstances - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewSpokeLinkedRouterApplianceInstances(c, &d, &n)) - } - - return items -} - -func canonicalizeSpokeLinkedRouterApplianceInstancesInstances(des, initial *SpokeLinkedRouterApplianceInstancesInstances, opts ...dcl.ApplyOption) *SpokeLinkedRouterApplianceInstancesInstances { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &SpokeLinkedRouterApplianceInstancesInstances{} - - if dcl.IsZeroValue(des.VirtualMachine) || (dcl.IsEmptyValueIndirect(des.VirtualMachine) && dcl.IsEmptyValueIndirect(initial.VirtualMachine)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.VirtualMachine = initial.VirtualMachine - } else { - cDes.VirtualMachine = des.VirtualMachine - } - if dcl.StringCanonicalize(des.IPAddress, initial.IPAddress) || dcl.IsZeroValue(des.IPAddress) { - cDes.IPAddress = initial.IPAddress - } else { - cDes.IPAddress = des.IPAddress - } - - return cDes -} - -func canonicalizeSpokeLinkedRouterApplianceInstancesInstancesSlice(des, initial []SpokeLinkedRouterApplianceInstancesInstances, opts ...dcl.ApplyOption) []SpokeLinkedRouterApplianceInstancesInstances { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]SpokeLinkedRouterApplianceInstancesInstances, 0, len(des)) - for _, d := range des { - cd := canonicalizeSpokeLinkedRouterApplianceInstancesInstances(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]SpokeLinkedRouterApplianceInstancesInstances, 0, len(des)) - for i, d := range des { - cd := canonicalizeSpokeLinkedRouterApplianceInstancesInstances(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewSpokeLinkedRouterApplianceInstancesInstances(c *Client, des, nw *SpokeLinkedRouterApplianceInstancesInstances) *SpokeLinkedRouterApplianceInstancesInstances { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for SpokeLinkedRouterApplianceInstancesInstances while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.IPAddress, nw.IPAddress) { - nw.IPAddress = des.IPAddress - } - - return nw -} - -func canonicalizeNewSpokeLinkedRouterApplianceInstancesInstancesSet(c *Client, des, nw []SpokeLinkedRouterApplianceInstancesInstances) []SpokeLinkedRouterApplianceInstancesInstances { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []SpokeLinkedRouterApplianceInstancesInstances - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareSpokeLinkedRouterApplianceInstancesInstancesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewSpokeLinkedRouterApplianceInstancesInstances(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewSpokeLinkedRouterApplianceInstancesInstancesSlice(c *Client, des, nw []SpokeLinkedRouterApplianceInstancesInstances) []SpokeLinkedRouterApplianceInstancesInstances { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []SpokeLinkedRouterApplianceInstancesInstances - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewSpokeLinkedRouterApplianceInstancesInstances(c, &d, &n)) - } - - return items -} - -func canonicalizeSpokeLinkedVPCNetwork(des, initial *SpokeLinkedVPCNetwork, opts ...dcl.ApplyOption) *SpokeLinkedVPCNetwork { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &SpokeLinkedVPCNetwork{} - - if dcl.IsZeroValue(des.Uri) || (dcl.IsEmptyValueIndirect(des.Uri) && dcl.IsEmptyValueIndirect(initial.Uri)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Uri = initial.Uri - } else { - cDes.Uri = des.Uri - } - if dcl.StringArrayCanonicalize(des.ExcludeExportRanges, initial.ExcludeExportRanges) { - cDes.ExcludeExportRanges = initial.ExcludeExportRanges - } else { - cDes.ExcludeExportRanges = des.ExcludeExportRanges - } - - return cDes -} - -func canonicalizeSpokeLinkedVPCNetworkSlice(des, initial []SpokeLinkedVPCNetwork, opts ...dcl.ApplyOption) []SpokeLinkedVPCNetwork { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]SpokeLinkedVPCNetwork, 0, len(des)) - for _, d := range des { - cd := canonicalizeSpokeLinkedVPCNetwork(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]SpokeLinkedVPCNetwork, 0, len(des)) - for i, d := range des { - cd := canonicalizeSpokeLinkedVPCNetwork(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewSpokeLinkedVPCNetwork(c *Client, des, nw *SpokeLinkedVPCNetwork) *SpokeLinkedVPCNetwork { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for SpokeLinkedVPCNetwork while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringArrayCanonicalize(des.ExcludeExportRanges, nw.ExcludeExportRanges) { - nw.ExcludeExportRanges = des.ExcludeExportRanges - } - - return nw -} - -func canonicalizeNewSpokeLinkedVPCNetworkSet(c *Client, des, nw []SpokeLinkedVPCNetwork) []SpokeLinkedVPCNetwork { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []SpokeLinkedVPCNetwork - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareSpokeLinkedVPCNetworkNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewSpokeLinkedVPCNetwork(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewSpokeLinkedVPCNetworkSlice(c *Client, des, nw []SpokeLinkedVPCNetwork) []SpokeLinkedVPCNetwork { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []SpokeLinkedVPCNetwork - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewSpokeLinkedVPCNetwork(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffSpoke(c *Client, desired, actual *Spoke, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateSpokeUpdateSpokeOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateSpokeUpdateSpokeOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Hub, actual.Hub, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Hub")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.LinkedVpnTunnels, actual.LinkedVpnTunnels, dcl.DiffInfo{ObjectFunction: compareSpokeLinkedVpnTunnelsNewStyle, EmptyObject: EmptySpokeLinkedVpnTunnels, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LinkedVpnTunnels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.LinkedInterconnectAttachments, actual.LinkedInterconnectAttachments, dcl.DiffInfo{ObjectFunction: compareSpokeLinkedInterconnectAttachmentsNewStyle, EmptyObject: EmptySpokeLinkedInterconnectAttachments, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LinkedInterconnectAttachments")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.LinkedRouterApplianceInstances, actual.LinkedRouterApplianceInstances, dcl.DiffInfo{ObjectFunction: compareSpokeLinkedRouterApplianceInstancesNewStyle, EmptyObject: EmptySpokeLinkedRouterApplianceInstances, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LinkedRouterApplianceInstances")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.LinkedVPCNetwork, actual.LinkedVPCNetwork, dcl.DiffInfo{ObjectFunction: compareSpokeLinkedVPCNetworkNewStyle, EmptyObject: EmptySpokeLinkedVPCNetwork, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LinkedVpcNetwork")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UniqueId, actual.UniqueId, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UniqueId")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareSpokeLinkedVpnTunnelsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*SpokeLinkedVpnTunnels) - if !ok { - desiredNotPointer, ok := d.(SpokeLinkedVpnTunnels) - if !ok { - return nil, fmt.Errorf("obj %v is not a SpokeLinkedVpnTunnels or *SpokeLinkedVpnTunnels", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*SpokeLinkedVpnTunnels) - if !ok { - actualNotPointer, ok := a.(SpokeLinkedVpnTunnels) - if !ok { - return nil, fmt.Errorf("obj %v is not a SpokeLinkedVpnTunnels", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Uris, actual.Uris, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uris")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SiteToSiteDataTransfer, actual.SiteToSiteDataTransfer, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SiteToSiteDataTransfer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareSpokeLinkedInterconnectAttachmentsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*SpokeLinkedInterconnectAttachments) - if !ok { - desiredNotPointer, ok := d.(SpokeLinkedInterconnectAttachments) - if !ok { - return nil, fmt.Errorf("obj %v is not a SpokeLinkedInterconnectAttachments or *SpokeLinkedInterconnectAttachments", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*SpokeLinkedInterconnectAttachments) - if !ok { - actualNotPointer, ok := a.(SpokeLinkedInterconnectAttachments) - if !ok { - return nil, fmt.Errorf("obj %v is not a SpokeLinkedInterconnectAttachments", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Uris, actual.Uris, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uris")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SiteToSiteDataTransfer, actual.SiteToSiteDataTransfer, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SiteToSiteDataTransfer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareSpokeLinkedRouterApplianceInstancesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*SpokeLinkedRouterApplianceInstances) - if !ok { - desiredNotPointer, ok := d.(SpokeLinkedRouterApplianceInstances) - if !ok { - return nil, fmt.Errorf("obj %v is not a SpokeLinkedRouterApplianceInstances or *SpokeLinkedRouterApplianceInstances", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*SpokeLinkedRouterApplianceInstances) - if !ok { - actualNotPointer, ok := a.(SpokeLinkedRouterApplianceInstances) - if !ok { - return nil, fmt.Errorf("obj %v is not a SpokeLinkedRouterApplianceInstances", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Instances, actual.Instances, dcl.DiffInfo{ObjectFunction: compareSpokeLinkedRouterApplianceInstancesInstancesNewStyle, EmptyObject: EmptySpokeLinkedRouterApplianceInstancesInstances, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Instances")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SiteToSiteDataTransfer, actual.SiteToSiteDataTransfer, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SiteToSiteDataTransfer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareSpokeLinkedRouterApplianceInstancesInstancesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*SpokeLinkedRouterApplianceInstancesInstances) - if !ok { - desiredNotPointer, ok := d.(SpokeLinkedRouterApplianceInstancesInstances) - if !ok { - return nil, fmt.Errorf("obj %v is not a SpokeLinkedRouterApplianceInstancesInstances or *SpokeLinkedRouterApplianceInstancesInstances", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*SpokeLinkedRouterApplianceInstancesInstances) - if !ok { - actualNotPointer, ok := a.(SpokeLinkedRouterApplianceInstancesInstances) - if !ok { - return nil, fmt.Errorf("obj %v is not a SpokeLinkedRouterApplianceInstancesInstances", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.VirtualMachine, actual.VirtualMachine, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VirtualMachine")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.IPAddress, actual.IPAddress, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IpAddress")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareSpokeLinkedVPCNetworkNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*SpokeLinkedVPCNetwork) - if !ok { - desiredNotPointer, ok := d.(SpokeLinkedVPCNetwork) - if !ok { - return nil, fmt.Errorf("obj %v is not a SpokeLinkedVPCNetwork or *SpokeLinkedVPCNetwork", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*SpokeLinkedVPCNetwork) - if !ok { - actualNotPointer, ok := a.(SpokeLinkedVPCNetwork) - if !ok { - return nil, fmt.Errorf("obj %v is not a SpokeLinkedVPCNetwork", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Uri, actual.Uri, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ExcludeExportRanges, actual.ExcludeExportRanges, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExcludeExportRanges")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *Spoke) urlNormalized() *Spoke { - normalized := dcl.Copy(*r).(Spoke) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Description = dcl.SelfLinkToName(r.Description) - normalized.Hub = dcl.SelfLinkToName(r.Hub) - normalized.UniqueId = dcl.SelfLinkToName(r.UniqueId) - normalized.Project = dcl.SelfLinkToName(r.Project) - normalized.Location = dcl.SelfLinkToName(r.Location) - return &normalized -} - -func (r *Spoke) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateSpoke" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/{{location}}/spokes/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the Spoke resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *Spoke) marshal(c *Client) ([]byte, error) { - m, err := expandSpoke(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling Spoke: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalSpoke decodes JSON responses into the Spoke resource schema. -func unmarshalSpoke(b []byte, c *Client, res *Spoke) (*Spoke, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapSpoke(m, c, res) -} - -func unmarshalMapSpoke(m map[string]interface{}, c *Client, res *Spoke) (*Spoke, error) { - - flattened := flattenSpoke(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandSpoke expands Spoke into a JSON request object. -func expandSpoke(c *Client, f *Spoke) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.DeriveField("projects/%s/locations/%s/spokes/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.Labels; dcl.ValueShouldBeSent(v) { - m["labels"] = v - } - if v := f.Description; dcl.ValueShouldBeSent(v) { - m["description"] = v - } - if v := f.Hub; dcl.ValueShouldBeSent(v) { - m["hub"] = v - } - if v, err := expandSpokeLinkedVpnTunnels(c, f.LinkedVpnTunnels, res); err != nil { - return nil, fmt.Errorf("error expanding LinkedVpnTunnels into linkedVpnTunnels: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["linkedVpnTunnels"] = v - } - if v, err := expandSpokeLinkedInterconnectAttachments(c, f.LinkedInterconnectAttachments, res); err != nil { - return nil, fmt.Errorf("error expanding LinkedInterconnectAttachments into linkedInterconnectAttachments: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["linkedInterconnectAttachments"] = v - } - if v, err := expandSpokeLinkedRouterApplianceInstances(c, f.LinkedRouterApplianceInstances, res); err != nil { - return nil, fmt.Errorf("error expanding LinkedRouterApplianceInstances into linkedRouterApplianceInstances: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["linkedRouterApplianceInstances"] = v - } - if v, err := expandSpokeLinkedVPCNetwork(c, f.LinkedVPCNetwork, res); err != nil { - return nil, fmt.Errorf("error expanding LinkedVPCNetwork into linkedVpcNetwork: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["linkedVpcNetwork"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Location into location: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["location"] = v - } - - return m, nil -} - -// flattenSpoke flattens Spoke from a JSON request object into the -// Spoke type. -func flattenSpoke(c *Client, i interface{}, res *Spoke) *Spoke { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &Spoke{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) - resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) - resultRes.Description = dcl.FlattenString(m["description"]) - resultRes.Hub = dcl.FlattenString(m["hub"]) - resultRes.LinkedVpnTunnels = flattenSpokeLinkedVpnTunnels(c, m["linkedVpnTunnels"], res) - resultRes.LinkedInterconnectAttachments = flattenSpokeLinkedInterconnectAttachments(c, m["linkedInterconnectAttachments"], res) - resultRes.LinkedRouterApplianceInstances = flattenSpokeLinkedRouterApplianceInstances(c, m["linkedRouterApplianceInstances"], res) - resultRes.LinkedVPCNetwork = flattenSpokeLinkedVPCNetwork(c, m["linkedVpcNetwork"], res) - resultRes.UniqueId = dcl.FlattenString(m["uniqueId"]) - resultRes.State = flattenSpokeStateEnum(m["state"]) - resultRes.Project = dcl.FlattenString(m["project"]) - resultRes.Location = dcl.FlattenString(m["location"]) - - return resultRes -} - -// expandSpokeLinkedVpnTunnelsMap expands the contents of SpokeLinkedVpnTunnels into a JSON -// request object. -func expandSpokeLinkedVpnTunnelsMap(c *Client, f map[string]SpokeLinkedVpnTunnels, res *Spoke) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandSpokeLinkedVpnTunnels(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandSpokeLinkedVpnTunnelsSlice expands the contents of SpokeLinkedVpnTunnels into a JSON -// request object. -func expandSpokeLinkedVpnTunnelsSlice(c *Client, f []SpokeLinkedVpnTunnels, res *Spoke) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandSpokeLinkedVpnTunnels(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenSpokeLinkedVpnTunnelsMap flattens the contents of SpokeLinkedVpnTunnels from a JSON -// response object. -func flattenSpokeLinkedVpnTunnelsMap(c *Client, i interface{}, res *Spoke) map[string]SpokeLinkedVpnTunnels { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]SpokeLinkedVpnTunnels{} - } - - if len(a) == 0 { - return map[string]SpokeLinkedVpnTunnels{} - } - - items := make(map[string]SpokeLinkedVpnTunnels) - for k, item := range a { - items[k] = *flattenSpokeLinkedVpnTunnels(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenSpokeLinkedVpnTunnelsSlice flattens the contents of SpokeLinkedVpnTunnels from a JSON -// response object. -func flattenSpokeLinkedVpnTunnelsSlice(c *Client, i interface{}, res *Spoke) []SpokeLinkedVpnTunnels { - a, ok := i.([]interface{}) - if !ok { - return []SpokeLinkedVpnTunnels{} - } - - if len(a) == 0 { - return []SpokeLinkedVpnTunnels{} - } - - items := make([]SpokeLinkedVpnTunnels, 0, len(a)) - for _, item := range a { - items = append(items, *flattenSpokeLinkedVpnTunnels(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandSpokeLinkedVpnTunnels expands an instance of SpokeLinkedVpnTunnels into a JSON -// request object. -func expandSpokeLinkedVpnTunnels(c *Client, f *SpokeLinkedVpnTunnels, res *Spoke) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Uris; v != nil { - m["uris"] = v - } - if v := f.SiteToSiteDataTransfer; !dcl.IsEmptyValueIndirect(v) { - m["siteToSiteDataTransfer"] = v - } - - return m, nil -} - -// flattenSpokeLinkedVpnTunnels flattens an instance of SpokeLinkedVpnTunnels from a JSON -// response object. -func flattenSpokeLinkedVpnTunnels(c *Client, i interface{}, res *Spoke) *SpokeLinkedVpnTunnels { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &SpokeLinkedVpnTunnels{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptySpokeLinkedVpnTunnels - } - r.Uris = dcl.FlattenStringSlice(m["uris"]) - r.SiteToSiteDataTransfer = dcl.FlattenBool(m["siteToSiteDataTransfer"]) - - return r -} - -// expandSpokeLinkedInterconnectAttachmentsMap expands the contents of SpokeLinkedInterconnectAttachments into a JSON -// request object. -func expandSpokeLinkedInterconnectAttachmentsMap(c *Client, f map[string]SpokeLinkedInterconnectAttachments, res *Spoke) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandSpokeLinkedInterconnectAttachments(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandSpokeLinkedInterconnectAttachmentsSlice expands the contents of SpokeLinkedInterconnectAttachments into a JSON -// request object. -func expandSpokeLinkedInterconnectAttachmentsSlice(c *Client, f []SpokeLinkedInterconnectAttachments, res *Spoke) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandSpokeLinkedInterconnectAttachments(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenSpokeLinkedInterconnectAttachmentsMap flattens the contents of SpokeLinkedInterconnectAttachments from a JSON -// response object. -func flattenSpokeLinkedInterconnectAttachmentsMap(c *Client, i interface{}, res *Spoke) map[string]SpokeLinkedInterconnectAttachments { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]SpokeLinkedInterconnectAttachments{} - } - - if len(a) == 0 { - return map[string]SpokeLinkedInterconnectAttachments{} - } - - items := make(map[string]SpokeLinkedInterconnectAttachments) - for k, item := range a { - items[k] = *flattenSpokeLinkedInterconnectAttachments(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenSpokeLinkedInterconnectAttachmentsSlice flattens the contents of SpokeLinkedInterconnectAttachments from a JSON -// response object. -func flattenSpokeLinkedInterconnectAttachmentsSlice(c *Client, i interface{}, res *Spoke) []SpokeLinkedInterconnectAttachments { - a, ok := i.([]interface{}) - if !ok { - return []SpokeLinkedInterconnectAttachments{} - } - - if len(a) == 0 { - return []SpokeLinkedInterconnectAttachments{} - } - - items := make([]SpokeLinkedInterconnectAttachments, 0, len(a)) - for _, item := range a { - items = append(items, *flattenSpokeLinkedInterconnectAttachments(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandSpokeLinkedInterconnectAttachments expands an instance of SpokeLinkedInterconnectAttachments into a JSON -// request object. -func expandSpokeLinkedInterconnectAttachments(c *Client, f *SpokeLinkedInterconnectAttachments, res *Spoke) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Uris; v != nil { - m["uris"] = v - } - if v := f.SiteToSiteDataTransfer; !dcl.IsEmptyValueIndirect(v) { - m["siteToSiteDataTransfer"] = v - } - - return m, nil -} - -// flattenSpokeLinkedInterconnectAttachments flattens an instance of SpokeLinkedInterconnectAttachments from a JSON -// response object. -func flattenSpokeLinkedInterconnectAttachments(c *Client, i interface{}, res *Spoke) *SpokeLinkedInterconnectAttachments { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &SpokeLinkedInterconnectAttachments{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptySpokeLinkedInterconnectAttachments - } - r.Uris = dcl.FlattenStringSlice(m["uris"]) - r.SiteToSiteDataTransfer = dcl.FlattenBool(m["siteToSiteDataTransfer"]) - - return r -} - -// expandSpokeLinkedRouterApplianceInstancesMap expands the contents of SpokeLinkedRouterApplianceInstances into a JSON -// request object. -func expandSpokeLinkedRouterApplianceInstancesMap(c *Client, f map[string]SpokeLinkedRouterApplianceInstances, res *Spoke) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandSpokeLinkedRouterApplianceInstances(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandSpokeLinkedRouterApplianceInstancesSlice expands the contents of SpokeLinkedRouterApplianceInstances into a JSON -// request object. -func expandSpokeLinkedRouterApplianceInstancesSlice(c *Client, f []SpokeLinkedRouterApplianceInstances, res *Spoke) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandSpokeLinkedRouterApplianceInstances(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenSpokeLinkedRouterApplianceInstancesMap flattens the contents of SpokeLinkedRouterApplianceInstances from a JSON -// response object. -func flattenSpokeLinkedRouterApplianceInstancesMap(c *Client, i interface{}, res *Spoke) map[string]SpokeLinkedRouterApplianceInstances { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]SpokeLinkedRouterApplianceInstances{} - } - - if len(a) == 0 { - return map[string]SpokeLinkedRouterApplianceInstances{} - } - - items := make(map[string]SpokeLinkedRouterApplianceInstances) - for k, item := range a { - items[k] = *flattenSpokeLinkedRouterApplianceInstances(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenSpokeLinkedRouterApplianceInstancesSlice flattens the contents of SpokeLinkedRouterApplianceInstances from a JSON -// response object. -func flattenSpokeLinkedRouterApplianceInstancesSlice(c *Client, i interface{}, res *Spoke) []SpokeLinkedRouterApplianceInstances { - a, ok := i.([]interface{}) - if !ok { - return []SpokeLinkedRouterApplianceInstances{} - } - - if len(a) == 0 { - return []SpokeLinkedRouterApplianceInstances{} - } - - items := make([]SpokeLinkedRouterApplianceInstances, 0, len(a)) - for _, item := range a { - items = append(items, *flattenSpokeLinkedRouterApplianceInstances(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandSpokeLinkedRouterApplianceInstances expands an instance of SpokeLinkedRouterApplianceInstances into a JSON -// request object. -func expandSpokeLinkedRouterApplianceInstances(c *Client, f *SpokeLinkedRouterApplianceInstances, res *Spoke) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandSpokeLinkedRouterApplianceInstancesInstancesSlice(c, f.Instances, res); err != nil { - return nil, fmt.Errorf("error expanding Instances into instances: %w", err) - } else if v != nil { - m["instances"] = v - } - if v := f.SiteToSiteDataTransfer; !dcl.IsEmptyValueIndirect(v) { - m["siteToSiteDataTransfer"] = v - } - - return m, nil -} - -// flattenSpokeLinkedRouterApplianceInstances flattens an instance of SpokeLinkedRouterApplianceInstances from a JSON -// response object. -func flattenSpokeLinkedRouterApplianceInstances(c *Client, i interface{}, res *Spoke) *SpokeLinkedRouterApplianceInstances { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &SpokeLinkedRouterApplianceInstances{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptySpokeLinkedRouterApplianceInstances - } - r.Instances = flattenSpokeLinkedRouterApplianceInstancesInstancesSlice(c, m["instances"], res) - r.SiteToSiteDataTransfer = dcl.FlattenBool(m["siteToSiteDataTransfer"]) - - return r -} - -// expandSpokeLinkedRouterApplianceInstancesInstancesMap expands the contents of SpokeLinkedRouterApplianceInstancesInstances into a JSON -// request object. -func expandSpokeLinkedRouterApplianceInstancesInstancesMap(c *Client, f map[string]SpokeLinkedRouterApplianceInstancesInstances, res *Spoke) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandSpokeLinkedRouterApplianceInstancesInstances(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandSpokeLinkedRouterApplianceInstancesInstancesSlice expands the contents of SpokeLinkedRouterApplianceInstancesInstances into a JSON -// request object. -func expandSpokeLinkedRouterApplianceInstancesInstancesSlice(c *Client, f []SpokeLinkedRouterApplianceInstancesInstances, res *Spoke) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandSpokeLinkedRouterApplianceInstancesInstances(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenSpokeLinkedRouterApplianceInstancesInstancesMap flattens the contents of SpokeLinkedRouterApplianceInstancesInstances from a JSON -// response object. -func flattenSpokeLinkedRouterApplianceInstancesInstancesMap(c *Client, i interface{}, res *Spoke) map[string]SpokeLinkedRouterApplianceInstancesInstances { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]SpokeLinkedRouterApplianceInstancesInstances{} - } - - if len(a) == 0 { - return map[string]SpokeLinkedRouterApplianceInstancesInstances{} - } - - items := make(map[string]SpokeLinkedRouterApplianceInstancesInstances) - for k, item := range a { - items[k] = *flattenSpokeLinkedRouterApplianceInstancesInstances(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenSpokeLinkedRouterApplianceInstancesInstancesSlice flattens the contents of SpokeLinkedRouterApplianceInstancesInstances from a JSON -// response object. -func flattenSpokeLinkedRouterApplianceInstancesInstancesSlice(c *Client, i interface{}, res *Spoke) []SpokeLinkedRouterApplianceInstancesInstances { - a, ok := i.([]interface{}) - if !ok { - return []SpokeLinkedRouterApplianceInstancesInstances{} - } - - if len(a) == 0 { - return []SpokeLinkedRouterApplianceInstancesInstances{} - } - - items := make([]SpokeLinkedRouterApplianceInstancesInstances, 0, len(a)) - for _, item := range a { - items = append(items, *flattenSpokeLinkedRouterApplianceInstancesInstances(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandSpokeLinkedRouterApplianceInstancesInstances expands an instance of SpokeLinkedRouterApplianceInstancesInstances into a JSON -// request object. -func expandSpokeLinkedRouterApplianceInstancesInstances(c *Client, f *SpokeLinkedRouterApplianceInstancesInstances, res *Spoke) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.VirtualMachine; !dcl.IsEmptyValueIndirect(v) { - m["virtualMachine"] = v - } - if v := f.IPAddress; !dcl.IsEmptyValueIndirect(v) { - m["ipAddress"] = v - } - - return m, nil -} - -// flattenSpokeLinkedRouterApplianceInstancesInstances flattens an instance of SpokeLinkedRouterApplianceInstancesInstances from a JSON -// response object. -func flattenSpokeLinkedRouterApplianceInstancesInstances(c *Client, i interface{}, res *Spoke) *SpokeLinkedRouterApplianceInstancesInstances { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &SpokeLinkedRouterApplianceInstancesInstances{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptySpokeLinkedRouterApplianceInstancesInstances - } - r.VirtualMachine = dcl.FlattenString(m["virtualMachine"]) - r.IPAddress = dcl.FlattenString(m["ipAddress"]) - - return r -} - -// expandSpokeLinkedVPCNetworkMap expands the contents of SpokeLinkedVPCNetwork into a JSON -// request object. -func expandSpokeLinkedVPCNetworkMap(c *Client, f map[string]SpokeLinkedVPCNetwork, res *Spoke) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandSpokeLinkedVPCNetwork(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandSpokeLinkedVPCNetworkSlice expands the contents of SpokeLinkedVPCNetwork into a JSON -// request object. -func expandSpokeLinkedVPCNetworkSlice(c *Client, f []SpokeLinkedVPCNetwork, res *Spoke) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandSpokeLinkedVPCNetwork(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenSpokeLinkedVPCNetworkMap flattens the contents of SpokeLinkedVPCNetwork from a JSON -// response object. -func flattenSpokeLinkedVPCNetworkMap(c *Client, i interface{}, res *Spoke) map[string]SpokeLinkedVPCNetwork { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]SpokeLinkedVPCNetwork{} - } - - if len(a) == 0 { - return map[string]SpokeLinkedVPCNetwork{} - } - - items := make(map[string]SpokeLinkedVPCNetwork) - for k, item := range a { - items[k] = *flattenSpokeLinkedVPCNetwork(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenSpokeLinkedVPCNetworkSlice flattens the contents of SpokeLinkedVPCNetwork from a JSON -// response object. -func flattenSpokeLinkedVPCNetworkSlice(c *Client, i interface{}, res *Spoke) []SpokeLinkedVPCNetwork { - a, ok := i.([]interface{}) - if !ok { - return []SpokeLinkedVPCNetwork{} - } - - if len(a) == 0 { - return []SpokeLinkedVPCNetwork{} - } - - items := make([]SpokeLinkedVPCNetwork, 0, len(a)) - for _, item := range a { - items = append(items, *flattenSpokeLinkedVPCNetwork(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandSpokeLinkedVPCNetwork expands an instance of SpokeLinkedVPCNetwork into a JSON -// request object. -func expandSpokeLinkedVPCNetwork(c *Client, f *SpokeLinkedVPCNetwork, res *Spoke) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Uri; !dcl.IsEmptyValueIndirect(v) { - m["uri"] = v - } - if v := f.ExcludeExportRanges; v != nil { - m["excludeExportRanges"] = v - } - - return m, nil -} - -// flattenSpokeLinkedVPCNetwork flattens an instance of SpokeLinkedVPCNetwork from a JSON -// response object. -func flattenSpokeLinkedVPCNetwork(c *Client, i interface{}, res *Spoke) *SpokeLinkedVPCNetwork { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &SpokeLinkedVPCNetwork{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptySpokeLinkedVPCNetwork - } - r.Uri = dcl.FlattenString(m["uri"]) - r.ExcludeExportRanges = dcl.FlattenStringSlice(m["excludeExportRanges"]) - - return r -} - -// flattenSpokeStateEnumMap flattens the contents of SpokeStateEnum from a JSON -// response object. -func flattenSpokeStateEnumMap(c *Client, i interface{}, res *Spoke) map[string]SpokeStateEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]SpokeStateEnum{} - } - - if len(a) == 0 { - return map[string]SpokeStateEnum{} - } - - items := make(map[string]SpokeStateEnum) - for k, item := range a { - items[k] = *flattenSpokeStateEnum(item.(interface{})) - } - - return items -} - -// flattenSpokeStateEnumSlice flattens the contents of SpokeStateEnum from a JSON -// response object. -func flattenSpokeStateEnumSlice(c *Client, i interface{}, res *Spoke) []SpokeStateEnum { - a, ok := i.([]interface{}) - if !ok { - return []SpokeStateEnum{} - } - - if len(a) == 0 { - return []SpokeStateEnum{} - } - - items := make([]SpokeStateEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenSpokeStateEnum(item.(interface{}))) - } - - return items -} - -// flattenSpokeStateEnum asserts that an interface is a string, and returns a -// pointer to a *SpokeStateEnum with the same value as that string. -func flattenSpokeStateEnum(i interface{}) *SpokeStateEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return SpokeStateEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *Spoke) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalSpoke(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Location == nil && ncr.Location == nil { - c.Config.Logger.Info("Both Location fields null - considering equal.") - } else if nr.Location == nil || ncr.Location == nil { - c.Config.Logger.Info("Only one Location field is null - considering unequal.") - return false - } else if *nr.Location != *ncr.Location { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type spokeDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp spokeApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToSpokeDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]spokeDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []spokeDiff - // For each operation name, create a spokeDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := spokeDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToSpokeApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToSpokeApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (spokeApiOperation, error) { - switch opName { - - case "updateSpokeUpdateSpokeOperation": - return &updateSpokeUpdateSpokeOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractSpokeFields(r *Spoke) error { - vLinkedVpnTunnels := r.LinkedVpnTunnels - if vLinkedVpnTunnels == nil { - // note: explicitly not the empty object. - vLinkedVpnTunnels = &SpokeLinkedVpnTunnels{} - } - if err := extractSpokeLinkedVpnTunnelsFields(r, vLinkedVpnTunnels); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLinkedVpnTunnels) { - r.LinkedVpnTunnels = vLinkedVpnTunnels - } - vLinkedInterconnectAttachments := r.LinkedInterconnectAttachments - if vLinkedInterconnectAttachments == nil { - // note: explicitly not the empty object. - vLinkedInterconnectAttachments = &SpokeLinkedInterconnectAttachments{} - } - if err := extractSpokeLinkedInterconnectAttachmentsFields(r, vLinkedInterconnectAttachments); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLinkedInterconnectAttachments) { - r.LinkedInterconnectAttachments = vLinkedInterconnectAttachments - } - vLinkedRouterApplianceInstances := r.LinkedRouterApplianceInstances - if vLinkedRouterApplianceInstances == nil { - // note: explicitly not the empty object. - vLinkedRouterApplianceInstances = &SpokeLinkedRouterApplianceInstances{} - } - if err := extractSpokeLinkedRouterApplianceInstancesFields(r, vLinkedRouterApplianceInstances); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLinkedRouterApplianceInstances) { - r.LinkedRouterApplianceInstances = vLinkedRouterApplianceInstances - } - vLinkedVPCNetwork := r.LinkedVPCNetwork - if vLinkedVPCNetwork == nil { - // note: explicitly not the empty object. - vLinkedVPCNetwork = &SpokeLinkedVPCNetwork{} - } - if err := extractSpokeLinkedVPCNetworkFields(r, vLinkedVPCNetwork); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLinkedVPCNetwork) { - r.LinkedVPCNetwork = vLinkedVPCNetwork - } - return nil -} -func extractSpokeLinkedVpnTunnelsFields(r *Spoke, o *SpokeLinkedVpnTunnels) error { - return nil -} -func extractSpokeLinkedInterconnectAttachmentsFields(r *Spoke, o *SpokeLinkedInterconnectAttachments) error { - return nil -} -func extractSpokeLinkedRouterApplianceInstancesFields(r *Spoke, o *SpokeLinkedRouterApplianceInstances) error { - return nil -} -func extractSpokeLinkedRouterApplianceInstancesInstancesFields(r *Spoke, o *SpokeLinkedRouterApplianceInstancesInstances) error { - return nil -} -func extractSpokeLinkedVPCNetworkFields(r *Spoke, o *SpokeLinkedVPCNetwork) error { - return nil -} - -func postReadExtractSpokeFields(r *Spoke) error { - vLinkedVpnTunnels := r.LinkedVpnTunnels - if vLinkedVpnTunnels == nil { - // note: explicitly not the empty object. - vLinkedVpnTunnels = &SpokeLinkedVpnTunnels{} - } - if err := postReadExtractSpokeLinkedVpnTunnelsFields(r, vLinkedVpnTunnels); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLinkedVpnTunnels) { - r.LinkedVpnTunnels = vLinkedVpnTunnels - } - vLinkedInterconnectAttachments := r.LinkedInterconnectAttachments - if vLinkedInterconnectAttachments == nil { - // note: explicitly not the empty object. - vLinkedInterconnectAttachments = &SpokeLinkedInterconnectAttachments{} - } - if err := postReadExtractSpokeLinkedInterconnectAttachmentsFields(r, vLinkedInterconnectAttachments); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLinkedInterconnectAttachments) { - r.LinkedInterconnectAttachments = vLinkedInterconnectAttachments - } - vLinkedRouterApplianceInstances := r.LinkedRouterApplianceInstances - if vLinkedRouterApplianceInstances == nil { - // note: explicitly not the empty object. - vLinkedRouterApplianceInstances = &SpokeLinkedRouterApplianceInstances{} - } - if err := postReadExtractSpokeLinkedRouterApplianceInstancesFields(r, vLinkedRouterApplianceInstances); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLinkedRouterApplianceInstances) { - r.LinkedRouterApplianceInstances = vLinkedRouterApplianceInstances - } - vLinkedVPCNetwork := r.LinkedVPCNetwork - if vLinkedVPCNetwork == nil { - // note: explicitly not the empty object. - vLinkedVPCNetwork = &SpokeLinkedVPCNetwork{} - } - if err := postReadExtractSpokeLinkedVPCNetworkFields(r, vLinkedVPCNetwork); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLinkedVPCNetwork) { - r.LinkedVPCNetwork = vLinkedVPCNetwork - } - return nil -} -func postReadExtractSpokeLinkedVpnTunnelsFields(r *Spoke, o *SpokeLinkedVpnTunnels) error { - return nil -} -func postReadExtractSpokeLinkedInterconnectAttachmentsFields(r *Spoke, o *SpokeLinkedInterconnectAttachments) error { - return nil -} -func postReadExtractSpokeLinkedRouterApplianceInstancesFields(r *Spoke, o *SpokeLinkedRouterApplianceInstances) error { - return nil -} -func postReadExtractSpokeLinkedRouterApplianceInstancesInstancesFields(r *Spoke, o *SpokeLinkedRouterApplianceInstancesInstances) error { - return nil -} -func postReadExtractSpokeLinkedVPCNetworkFields(r *Spoke, o *SpokeLinkedVPCNetwork) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_schema.go deleted file mode 100644 index 79344d3bbaa..00000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_schema.go +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2024 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package networkconnectivity - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLSpokeSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "NetworkConnectivity/Spoke", - Description: "The NetworkConnectivity Spoke resource", - StructName: "Spoke", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Spoke", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "spoke", - Required: true, - Description: "A full instance of a Spoke", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Spoke", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "spoke", - Required: true, - Description: "A full instance of a Spoke", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Spoke", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "spoke", - Required: true, - Description: "A full instance of a Spoke", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Spoke", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Spoke", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Spoke": &dcl.Component{ - Title: "Spoke", - ID: "projects/{{project}}/locations/{{location}}/spokes/{{name}}", - ParentContainer: "project", - LabelsField: "labels", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "hub", - "project", - "location", - }, - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time the spoke was created.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "An optional description of the spoke.", - }, - "hub": &dcl.Property{ - Type: "string", - GoName: "Hub", - Description: "Immutable. The URI of the hub that this spoke is attached to.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Networkconnectivity/Hub", - Field: "name", - }, - }, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements).", - }, - "linkedInterconnectAttachments": &dcl.Property{ - Type: "object", - GoName: "LinkedInterconnectAttachments", - GoType: "SpokeLinkedInterconnectAttachments", - Description: "A collection of VLAN attachment resources. These resources should be redundant attachments that all advertise the same prefixes to Google Cloud. Alternatively, in active/passive configurations, all attachments should be capable of advertising the same prefixes.", - Immutable: true, - Conflicts: []string{ - "linkedVpnTunnels", - "linkedRouterApplianceInstances", - "linkedVPCNetwork", - }, - Required: []string{ - "uris", - "siteToSiteDataTransfer", - }, - Properties: map[string]*dcl.Property{ - "siteToSiteDataTransfer": &dcl.Property{ - Type: "boolean", - GoName: "SiteToSiteDataTransfer", - Description: "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", - Immutable: true, - }, - "uris": &dcl.Property{ - Type: "array", - GoName: "Uris", - Description: "The URIs of linked interconnect attachment resources", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/InterconnectAttachment", - Field: "selfLink", - }, - }, - }, - }, - }, - }, - "linkedRouterApplianceInstances": &dcl.Property{ - Type: "object", - GoName: "LinkedRouterApplianceInstances", - GoType: "SpokeLinkedRouterApplianceInstances", - Description: "The URIs of linked Router appliance resources", - Immutable: true, - Conflicts: []string{ - "linkedVpnTunnels", - "linkedInterconnectAttachments", - "linkedVPCNetwork", - }, - Required: []string{ - "instances", - "siteToSiteDataTransfer", - }, - Properties: map[string]*dcl.Property{ - "instances": &dcl.Property{ - Type: "array", - GoName: "Instances", - Description: "The list of router appliance instances", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "SpokeLinkedRouterApplianceInstancesInstances", - Properties: map[string]*dcl.Property{ - "ipAddress": &dcl.Property{ - Type: "string", - GoName: "IPAddress", - Description: "The IP address on the VM to use for peering.", - Immutable: true, - }, - "virtualMachine": &dcl.Property{ - Type: "string", - GoName: "VirtualMachine", - Description: "The URI of the virtual machine resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Instance", - Field: "selfLink", - }, - }, - }, - }, - }, - }, - "siteToSiteDataTransfer": &dcl.Property{ - Type: "boolean", - GoName: "SiteToSiteDataTransfer", - Description: "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", - Immutable: true, - }, - }, - }, - "linkedVPCNetwork": &dcl.Property{ - Type: "object", - GoName: "LinkedVPCNetwork", - GoType: "SpokeLinkedVPCNetwork", - Description: "VPC network that is associated with the spoke.", - Immutable: true, - Conflicts: []string{ - "linkedVpnTunnels", - "linkedInterconnectAttachments", - "linkedRouterApplianceInstances", - }, - Required: []string{ - "uri", - }, - Properties: map[string]*dcl.Property{ - "excludeExportRanges": &dcl.Property{ - Type: "array", - GoName: "ExcludeExportRanges", - Description: "IP ranges encompassing the subnets to be excluded from peering.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "uri": &dcl.Property{ - Type: "string", - GoName: "Uri", - Description: "The URI of the VPC network resource.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Network", - Field: "selfLink", - }, - }, - }, - }, - }, - "linkedVpnTunnels": &dcl.Property{ - Type: "object", - GoName: "LinkedVpnTunnels", - GoType: "SpokeLinkedVpnTunnels", - Description: "The URIs of linked VPN tunnel resources", - Immutable: true, - Conflicts: []string{ - "linkedInterconnectAttachments", - "linkedRouterApplianceInstances", - "linkedVPCNetwork", - }, - Required: []string{ - "uris", - "siteToSiteDataTransfer", - }, - Properties: map[string]*dcl.Property{ - "siteToSiteDataTransfer": &dcl.Property{ - Type: "boolean", - GoName: "SiteToSiteDataTransfer", - Description: "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", - Immutable: true, - }, - "uris": &dcl.Property{ - Type: "array", - GoName: "Uris", - Description: "The URIs of linked VPN tunnel resources.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/VpnTunnel", - Field: "selfLink", - }, - }, - }, - }, - }, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Immutable. The name of the spoke. Spoke names must be unique.", - Immutable: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "SpokeStateEnum", - ReadOnly: true, - Description: "Output only. The current lifecycle state of this spoke. Possible values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING", - Immutable: true, - Enum: []string{ - "STATE_UNSPECIFIED", - "CREATING", - "ACTIVE", - "DELETING", - }, - }, - "uniqueId": &dcl.Property{ - Type: "string", - GoName: "UniqueId", - ReadOnly: true, - Description: "Output only. The Google-generated UUID for the spoke. This value is unique across all spoke resources. If a spoke is deleted and another with the same name is created, the new spoke is assigned a different unique_id.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time the spoke was last updated.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_yaml_embed.go deleted file mode 100644 index 17a1d0f8834..00000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2024 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package networkconnectivity -var YAML_spoke blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/networkconnectivity/spoke.yaml - -package networkconnectivity - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/networkconnectivity/spoke.yaml -var YAML_spoke = []byte("info:\n title: NetworkConnectivity/Spoke\n description: The NetworkConnectivity Spoke resource\n x-dcl-struct-name: Spoke\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Spoke\n parameters:\n - name: spoke\n required: true\n description: A full instance of a Spoke\n apply:\n description: The function used to apply information about a Spoke\n parameters:\n - name: spoke\n required: true\n description: A full instance of a Spoke\n delete:\n description: The function used to delete a Spoke\n parameters:\n - name: spoke\n required: true\n description: A full instance of a Spoke\n deleteAll:\n description: The function used to delete all Spoke\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Spoke\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Spoke:\n title: Spoke\n x-dcl-id: projects/{{project}}/locations/{{location}}/spokes/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - hub\n - project\n - location\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time the spoke was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: An optional description of the spoke.\n hub:\n type: string\n x-dcl-go-name: Hub\n description: Immutable. The URI of the hub that this spoke is attached to.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Networkconnectivity/Hub\n field: name\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional labels in key:value format. For more information about\n labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements).\n linkedInterconnectAttachments:\n type: object\n x-dcl-go-name: LinkedInterconnectAttachments\n x-dcl-go-type: SpokeLinkedInterconnectAttachments\n description: A collection of VLAN attachment resources. These resources\n should be redundant attachments that all advertise the same prefixes to\n Google Cloud. Alternatively, in active/passive configurations, all attachments\n should be capable of advertising the same prefixes.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - linkedVpnTunnels\n - linkedRouterApplianceInstances\n - linkedVPCNetwork\n required:\n - uris\n - siteToSiteDataTransfer\n properties:\n siteToSiteDataTransfer:\n type: boolean\n x-dcl-go-name: SiteToSiteDataTransfer\n description: A value that controls whether site-to-site data transfer\n is enabled for these resources. Note that data transfer is available\n only in supported locations.\n x-kubernetes-immutable: true\n uris:\n type: array\n x-dcl-go-name: Uris\n description: The URIs of linked interconnect attachment resources\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/InterconnectAttachment\n field: selfLink\n linkedRouterApplianceInstances:\n type: object\n x-dcl-go-name: LinkedRouterApplianceInstances\n x-dcl-go-type: SpokeLinkedRouterApplianceInstances\n description: The URIs of linked Router appliance resources\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - linkedVpnTunnels\n - linkedInterconnectAttachments\n - linkedVPCNetwork\n required:\n - instances\n - siteToSiteDataTransfer\n properties:\n instances:\n type: array\n x-dcl-go-name: Instances\n description: The list of router appliance instances\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: SpokeLinkedRouterApplianceInstancesInstances\n properties:\n ipAddress:\n type: string\n x-dcl-go-name: IPAddress\n description: The IP address on the VM to use for peering.\n x-kubernetes-immutable: true\n virtualMachine:\n type: string\n x-dcl-go-name: VirtualMachine\n description: The URI of the virtual machine resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n siteToSiteDataTransfer:\n type: boolean\n x-dcl-go-name: SiteToSiteDataTransfer\n description: A value that controls whether site-to-site data transfer\n is enabled for these resources. Note that data transfer is available\n only in supported locations.\n x-kubernetes-immutable: true\n linkedVPCNetwork:\n type: object\n x-dcl-go-name: LinkedVPCNetwork\n x-dcl-go-type: SpokeLinkedVPCNetwork\n description: VPC network that is associated with the spoke.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - linkedVpnTunnels\n - linkedInterconnectAttachments\n - linkedRouterApplianceInstances\n required:\n - uri\n properties:\n excludeExportRanges:\n type: array\n x-dcl-go-name: ExcludeExportRanges\n description: IP ranges encompassing the subnets to be excluded from\n peering.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n uri:\n type: string\n x-dcl-go-name: Uri\n description: The URI of the VPC network resource.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Network\n field: selfLink\n linkedVpnTunnels:\n type: object\n x-dcl-go-name: LinkedVpnTunnels\n x-dcl-go-type: SpokeLinkedVpnTunnels\n description: The URIs of linked VPN tunnel resources\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - linkedInterconnectAttachments\n - linkedRouterApplianceInstances\n - linkedVPCNetwork\n required:\n - uris\n - siteToSiteDataTransfer\n properties:\n siteToSiteDataTransfer:\n type: boolean\n x-dcl-go-name: SiteToSiteDataTransfer\n description: A value that controls whether site-to-site data transfer\n is enabled for these resources. Note that data transfer is available\n only in supported locations.\n x-kubernetes-immutable: true\n uris:\n type: array\n x-dcl-go-name: Uris\n description: The URIs of linked VPN tunnel resources.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/VpnTunnel\n field: selfLink\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n x-dcl-parameter: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Immutable. The name of the spoke. Spoke names must be unique.\n x-kubernetes-immutable: true\n x-dcl-has-long-form: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-parameter: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: SpokeStateEnum\n readOnly: true\n description: 'Output only. The current lifecycle state of this spoke. Possible\n values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - CREATING\n - ACTIVE\n - DELETING\n uniqueId:\n type: string\n x-dcl-go-name: UniqueId\n readOnly: true\n description: Output only. The Google-generated UUID for the spoke. This\n value is unique across all spoke resources. If a spoke is deleted and\n another with the same name is created, the new spoke is assigned a different\n unique_id.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time the spoke was last updated.\n x-kubernetes-immutable: true\n") - -// 10468 bytes -// MD5: ddf357667cf6e78a2ba68c1b49f28984 diff --git a/terraform/providers/google/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/terraform/providers/google/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index ff14da3185d..420625386b5 100644 --- a/terraform/providers/google/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/terraform/providers/google/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -127,13 +127,6 @@ type VerifiableSignature struct { Packet *Signature } -// SaltedHashSpecifier specifies that the given salt and hash are -// used by a v6 signature. -type SaltedHashSpecifier struct { - Hash crypto.Hash - Salt []byte -} - // NewVerifiableSig returns a struct of type VerifiableSignature referencing the input signature. func NewVerifiableSig(signature *Signature) *VerifiableSignature { return &VerifiableSignature{ diff --git a/terraform/providers/google/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/terraform/providers/google/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go index ac897d709e7..408506592fc 100644 --- a/terraform/providers/google/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ b/terraform/providers/google/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -6,7 +6,6 @@ package openpgp // import "github.com/ProtonMail/go-crypto/openpgp" import ( - "bytes" "crypto" _ "crypto/sha256" _ "crypto/sha512" @@ -455,19 +454,13 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // if any, and a possible signature verification error. // If the signer isn't known, ErrUnknownIssuer is returned. func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { - return verifyDetachedSignature(keyring, signed, signature, nil, nil, false, config) + return verifyDetachedSignature(keyring, signed, signature, nil, false, config) } // VerifyDetachedSignatureAndHash performs the same actions as // VerifyDetachedSignature and checks that the expected hash functions were used. func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { - return verifyDetachedSignature(keyring, signed, signature, expectedHashes, nil, true, config) -} - -// VerifyDetachedSignatureAndSaltedHash performs the same actions as -// VerifyDetachedSignature and checks that the expected hash functions and salts were used. -func VerifyDetachedSignatureAndSaltedHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, expectedSaltedHashes []*packet.SaltedHashSpecifier, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { - return verifyDetachedSignature(keyring, signed, signature, expectedHashes, expectedSaltedHashes, true, config) + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) } // CheckDetachedSignature takes a signed file and a detached signature and @@ -475,25 +468,18 @@ func VerifyDetachedSignatureAndSaltedHash(keyring KeyRing, signed, signature io. // signature verification error. If the signer isn't known, // ErrUnknownIssuer is returned. func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { - _, signer, err = verifyDetachedSignature(keyring, signed, signature, nil, nil, false, config) - return -} - -// CheckDetachedSignatureAndSaltedHash performs the same actions as -// CheckDetachedSignature and checks that the expected hash functions or salted hash functions were used. -func CheckDetachedSignatureAndSaltedHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, expectedSaltedHashes []*packet.SaltedHashSpecifier, config *packet.Config) (signer *Entity, err error) { - _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, expectedSaltedHashes, true, config) + _, signer, err = verifyDetachedSignature(keyring, signed, signature, nil, false, config) return } // CheckDetachedSignatureAndHash performs the same actions as // CheckDetachedSignature and checks that the expected hash functions were used. func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { - _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, nil, true, config) + _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) return } -func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, expectedSaltedHashes []*packet.SaltedHashSpecifier, checkHashes bool, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { +func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, checkHashes bool, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { var issuerKeyId uint64 var hashFunc crypto.Hash var sigType packet.SignatureType @@ -523,22 +509,11 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec sigType = sig.SigType if checkHashes { matchFound := false - if sig.Version == 6 { - // check for salted hashes - for _, expectedSaltedHash := range expectedSaltedHashes { - if hashFunc == expectedSaltedHash.Hash && bytes.Equal(sig.Salt(), expectedSaltedHash.Salt) { - matchFound = true - break - } - } - - } else { - // check for hashes - for _, expectedHash := range expectedHashes { - if hashFunc == expectedHash { - matchFound = true - break - } + // check for hashes + for _, expectedHash := range expectedHashes { + if hashFunc == expectedHash { + matchFound = true + break } } if !matchFound { diff --git a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/README.md b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b7815..33c88305c46 100644 --- a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/README.md +++ b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash.go b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c9dc..78bddf1ceed 100644 --- a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a40c1..78f95f25610 100644 --- a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bba4b..118e49e819e 100644 --- a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5fd8e..05f5e7dfe7b 100644 --- a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd88a..cf9d42aed53 100644 --- a/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/terraform/providers/google/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go index 7d3e1536b36..0281b3ee584 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: udpa/annotations/migrate.proto package annotations diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go index 71957789538..cf858bd9773 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: udpa/annotations/security.proto package annotations diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go index 8631b8568c1..2d5c78dc29a 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: udpa/annotations/sensitive.proto package annotations diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go index f2fdc3ca388..c96818b17cd 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: udpa/annotations/status.proto package annotations diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go index df83e0a2eb5..b3ab9e346b0 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: udpa/annotations/versioning.proto package annotations diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go index 493253d4f59..e8f23f7858f 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go @@ -1,15 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: udpa/type/v1/typed_struct.proto package v1 import ( - _struct "github.com/golang/protobuf/ptypes/struct" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" ) @@ -26,8 +26,8 @@ type TypedStruct struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - Value *_struct.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + Value *structpb.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } func (x *TypedStruct) Reset() { @@ -69,7 +69,7 @@ func (x *TypedStruct) GetTypeUrl() string { return "" } -func (x *TypedStruct) GetValue() *_struct.Struct { +func (x *TypedStruct) GetValue() *structpb.Struct { if x != nil { return x.Value } @@ -112,8 +112,8 @@ func file_udpa_type_v1_typed_struct_proto_rawDescGZIP() []byte { var file_udpa_type_v1_typed_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_udpa_type_v1_typed_struct_proto_goTypes = []interface{}{ - (*TypedStruct)(nil), // 0: udpa.type.v1.TypedStruct - (*_struct.Struct)(nil), // 1: google.protobuf.Struct + (*TypedStruct)(nil), // 0: udpa.type.v1.TypedStruct + (*structpb.Struct)(nil), // 1: google.protobuf.Struct } var file_udpa_type_v1_typed_struct_proto_depIdxs = []int32{ 1, // 0: udpa.type.v1.TypedStruct.value:type_name -> google.protobuf.Struct diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go index ad24b1f7f6c..705a71e8873 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/annotations/v3/migrate.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go index 61df6890bd3..0278e516589 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/annotations/v3/security.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go index 274eace058d..57161aab476 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/annotations/v3/sensitive.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go index 2497e0b2fea..255d109fc51 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/annotations/v3/status.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go index 2307dc874a4..2de032f159c 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/annotations/v3/versioning.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go index 3c361216c0d..3058286d575 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/authority.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go index 60587a2fa95..0e339b5899d 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/cidr.proto package v3 @@ -9,9 +9,9 @@ package v3 import ( _ "github.com/cncf/xds/go/xds/annotations/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -28,8 +28,8 @@ type CidrRange struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"` - PrefixLen *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"` + AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"` + PrefixLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"` } func (x *CidrRange) Reset() { @@ -71,7 +71,7 @@ func (x *CidrRange) GetAddressPrefix() string { return "" } -func (x *CidrRange) GetPrefixLen() *wrappers.UInt32Value { +func (x *CidrRange) GetPrefixLen() *wrapperspb.UInt32Value { if x != nil { return x.PrefixLen } @@ -120,8 +120,8 @@ func file_xds_core_v3_cidr_proto_rawDescGZIP() []byte { var file_xds_core_v3_cidr_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_xds_core_v3_cidr_proto_goTypes = []interface{}{ - (*CidrRange)(nil), // 0: xds.core.v3.CidrRange - (*wrappers.UInt32Value)(nil), // 1: google.protobuf.UInt32Value + (*CidrRange)(nil), // 0: xds.core.v3.CidrRange + (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value } var file_xds_core_v3_cidr_proto_depIdxs = []int32{ 1, // 0: xds.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go index 63e33eeb81f..0d45b961bf2 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/collection_entry.proto package v3 @@ -9,9 +9,9 @@ package v3 import ( _ "github.com/cncf/xds/go/xds/annotations/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -109,9 +109,9 @@ type CollectionEntry_InlineEntry struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - Resource *any1.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` } func (x *CollectionEntry_InlineEntry) Reset() { @@ -160,7 +160,7 @@ func (x *CollectionEntry_InlineEntry) GetVersion() string { return "" } -func (x *CollectionEntry_InlineEntry) GetResource() *any1.Any { +func (x *CollectionEntry_InlineEntry) GetResource() *anypb.Any { if x != nil { return x.Resource } @@ -227,7 +227,7 @@ var file_xds_core_v3_collection_entry_proto_goTypes = []interface{}{ (*CollectionEntry)(nil), // 0: xds.core.v3.CollectionEntry (*CollectionEntry_InlineEntry)(nil), // 1: xds.core.v3.CollectionEntry.InlineEntry (*ResourceLocator)(nil), // 2: xds.core.v3.ResourceLocator - (*any1.Any)(nil), // 3: google.protobuf.Any + (*anypb.Any)(nil), // 3: google.protobuf.Any } var file_xds_core_v3_collection_entry_proto_depIdxs = []int32{ 2, // 0: xds.core.v3.CollectionEntry.locator:type_name -> xds.core.v3.ResourceLocator diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go index 563775a1fb5..714ab436734 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/context_params.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go index 35845e01875..be4ea10c6b2 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go @@ -1,16 +1,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/extension.proto package v3 import ( _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -27,8 +27,8 @@ type TypedExtensionConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - TypedConfig *any1.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` } func (x *TypedExtensionConfig) Reset() { @@ -70,7 +70,7 @@ func (x *TypedExtensionConfig) GetName() string { return "" } -func (x *TypedExtensionConfig) GetTypedConfig() *any1.Any { +func (x *TypedExtensionConfig) GetTypedConfig() *anypb.Any { if x != nil { return x.TypedConfig } @@ -116,7 +116,7 @@ func file_xds_core_v3_extension_proto_rawDescGZIP() []byte { var file_xds_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_xds_core_v3_extension_proto_goTypes = []interface{}{ (*TypedExtensionConfig)(nil), // 0: xds.core.v3.TypedExtensionConfig - (*any1.Any)(nil), // 1: google.protobuf.Any + (*anypb.Any)(nil), // 1: google.protobuf.Any } var file_xds_core_v3_extension_proto_depIdxs = []int32{ 1, // 0: xds.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go index 4cf1f7e7e79..641e3411ac3 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go @@ -1,16 +1,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/resource.proto package v3 import ( _ "github.com/cncf/xds/go/xds/annotations/v3" - any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -29,7 +29,7 @@ type Resource struct { Name *ResourceName `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - Resource *any1.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` } func (x *Resource) Reset() { @@ -78,7 +78,7 @@ func (x *Resource) GetVersion() string { return "" } -func (x *Resource) GetResource() *any1.Any { +func (x *Resource) GetResource() *anypb.Any { if x != nil { return x.Resource } @@ -129,7 +129,7 @@ var file_xds_core_v3_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_xds_core_v3_resource_proto_goTypes = []interface{}{ (*Resource)(nil), // 0: xds.core.v3.Resource (*ResourceName)(nil), // 1: xds.core.v3.ResourceName - (*any1.Any)(nil), // 2: google.protobuf.Any + (*anypb.Any)(nil), // 2: google.protobuf.Any } var file_xds_core_v3_resource_proto_depIdxs = []int32{ 1, // 0: xds.core.v3.Resource.name:type_name -> xds.core.v3.ResourceName diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go index 50fe599dbfe..3f99d4beeca 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/resource_locator.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go index 92d5fa85395..3d42818b7a3 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/resource_name.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go index 9cc4053a09e..74899339b89 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/data/orca/v3/orca_load_report.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go index 58eedc08bd5..463f4ed331c 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go @@ -1,20 +1,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/service/orca/v3/orca.proto package v3 import ( - context "context" v3 "github.com/cncf/xds/go/xds/data/orca/v3" - duration "github.com/golang/protobuf/ptypes/duration" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -31,8 +27,8 @@ type OrcaLoadReportRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ReportInterval *duration.Duration `protobuf:"bytes,1,opt,name=report_interval,json=reportInterval,proto3" json:"report_interval,omitempty"` - RequestCostNames []string `protobuf:"bytes,2,rep,name=request_cost_names,json=requestCostNames,proto3" json:"request_cost_names,omitempty"` + ReportInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=report_interval,json=reportInterval,proto3" json:"report_interval,omitempty"` + RequestCostNames []string `protobuf:"bytes,2,rep,name=request_cost_names,json=requestCostNames,proto3" json:"request_cost_names,omitempty"` } func (x *OrcaLoadReportRequest) Reset() { @@ -67,7 +63,7 @@ func (*OrcaLoadReportRequest) Descriptor() ([]byte, []int) { return file_xds_service_orca_v3_orca_proto_rawDescGZIP(), []int{0} } -func (x *OrcaLoadReportRequest) GetReportInterval() *duration.Duration { +func (x *OrcaLoadReportRequest) GetReportInterval() *durationpb.Duration { if x != nil { return x.ReportInterval } @@ -132,7 +128,7 @@ func file_xds_service_orca_v3_orca_proto_rawDescGZIP() []byte { var file_xds_service_orca_v3_orca_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_xds_service_orca_v3_orca_proto_goTypes = []interface{}{ (*OrcaLoadReportRequest)(nil), // 0: xds.service.orca.v3.OrcaLoadReportRequest - (*duration.Duration)(nil), // 1: google.protobuf.Duration + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration (*v3.OrcaLoadReport)(nil), // 2: xds.data.orca.v3.OrcaLoadReport } var file_xds_service_orca_v3_orca_proto_depIdxs = []int32{ @@ -184,110 +180,3 @@ func file_xds_service_orca_v3_orca_proto_init() { file_xds_service_orca_v3_orca_proto_goTypes = nil file_xds_service_orca_v3_orca_proto_depIdxs = nil } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// OpenRcaServiceClient is the client API for OpenRcaService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type OpenRcaServiceClient interface { - StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) -} - -type openRcaServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewOpenRcaServiceClient(cc grpc.ClientConnInterface) OpenRcaServiceClient { - return &openRcaServiceClient{cc} -} - -func (c *openRcaServiceClient) StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) { - stream, err := c.cc.NewStream(ctx, &_OpenRcaService_serviceDesc.Streams[0], "/xds.service.orca.v3.OpenRcaService/StreamCoreMetrics", opts...) - if err != nil { - return nil, err - } - x := &openRcaServiceStreamCoreMetricsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type OpenRcaService_StreamCoreMetricsClient interface { - Recv() (*v3.OrcaLoadReport, error) - grpc.ClientStream -} - -type openRcaServiceStreamCoreMetricsClient struct { - grpc.ClientStream -} - -func (x *openRcaServiceStreamCoreMetricsClient) Recv() (*v3.OrcaLoadReport, error) { - m := new(v3.OrcaLoadReport) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// OpenRcaServiceServer is the server API for OpenRcaService service. -type OpenRcaServiceServer interface { - StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error -} - -// UnimplementedOpenRcaServiceServer can be embedded to have forward compatible implementations. -type UnimplementedOpenRcaServiceServer struct { -} - -func (*UnimplementedOpenRcaServiceServer) StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error { - return status.Errorf(codes.Unimplemented, "method StreamCoreMetrics not implemented") -} - -func RegisterOpenRcaServiceServer(s *grpc.Server, srv OpenRcaServiceServer) { - s.RegisterService(&_OpenRcaService_serviceDesc, srv) -} - -func _OpenRcaService_StreamCoreMetrics_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(OrcaLoadReportRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(OpenRcaServiceServer).StreamCoreMetrics(m, &openRcaServiceStreamCoreMetricsServer{stream}) -} - -type OpenRcaService_StreamCoreMetricsServer interface { - Send(*v3.OrcaLoadReport) error - grpc.ServerStream -} - -type openRcaServiceStreamCoreMetricsServer struct { - grpc.ServerStream -} - -func (x *openRcaServiceStreamCoreMetricsServer) Send(m *v3.OrcaLoadReport) error { - return x.ServerStream.SendMsg(m) -} - -var _OpenRcaService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "xds.service.orca.v3.OpenRcaService", - HandlerType: (*OpenRcaServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamCoreMetrics", - Handler: _OpenRcaService_StreamCoreMetrics_Handler, - ServerStreams: true, - }, - }, - Metadata: "xds/service/orca/v3/orca.proto", -} diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go new file mode 100644 index 00000000000..6cecac149af --- /dev/null +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go @@ -0,0 +1,135 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.27.0--rc2 +// source: xds/service/orca/v3/orca.proto + +package v3 + +import ( + context "context" + v3 "github.com/cncf/xds/go/xds/data/orca/v3" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + OpenRcaService_StreamCoreMetrics_FullMethodName = "/xds.service.orca.v3.OpenRcaService/StreamCoreMetrics" +) + +// OpenRcaServiceClient is the client API for OpenRcaService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type OpenRcaServiceClient interface { + StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) +} + +type openRcaServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewOpenRcaServiceClient(cc grpc.ClientConnInterface) OpenRcaServiceClient { + return &openRcaServiceClient{cc} +} + +func (c *openRcaServiceClient) StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) { + stream, err := c.cc.NewStream(ctx, &OpenRcaService_ServiceDesc.Streams[0], OpenRcaService_StreamCoreMetrics_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &openRcaServiceStreamCoreMetricsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type OpenRcaService_StreamCoreMetricsClient interface { + Recv() (*v3.OrcaLoadReport, error) + grpc.ClientStream +} + +type openRcaServiceStreamCoreMetricsClient struct { + grpc.ClientStream +} + +func (x *openRcaServiceStreamCoreMetricsClient) Recv() (*v3.OrcaLoadReport, error) { + m := new(v3.OrcaLoadReport) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// OpenRcaServiceServer is the server API for OpenRcaService service. +// All implementations should embed UnimplementedOpenRcaServiceServer +// for forward compatibility +type OpenRcaServiceServer interface { + StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error +} + +// UnimplementedOpenRcaServiceServer should be embedded to have forward compatible implementations. +type UnimplementedOpenRcaServiceServer struct { +} + +func (UnimplementedOpenRcaServiceServer) StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error { + return status.Errorf(codes.Unimplemented, "method StreamCoreMetrics not implemented") +} + +// UnsafeOpenRcaServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to OpenRcaServiceServer will +// result in compilation errors. +type UnsafeOpenRcaServiceServer interface { + mustEmbedUnimplementedOpenRcaServiceServer() +} + +func RegisterOpenRcaServiceServer(s grpc.ServiceRegistrar, srv OpenRcaServiceServer) { + s.RegisterService(&OpenRcaService_ServiceDesc, srv) +} + +func _OpenRcaService_StreamCoreMetrics_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(OrcaLoadReportRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OpenRcaServiceServer).StreamCoreMetrics(m, &openRcaServiceStreamCoreMetricsServer{stream}) +} + +type OpenRcaService_StreamCoreMetricsServer interface { + Send(*v3.OrcaLoadReport) error + grpc.ServerStream +} + +type openRcaServiceStreamCoreMetricsServer struct { + grpc.ServerStream +} + +func (x *openRcaServiceStreamCoreMetricsServer) Send(m *v3.OrcaLoadReport) error { + return x.ServerStream.SendMsg(m) +} + +// OpenRcaService_ServiceDesc is the grpc.ServiceDesc for OpenRcaService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var OpenRcaService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "xds.service.orca.v3.OpenRcaService", + HandlerType: (*OpenRcaServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamCoreMetrics", + Handler: _OpenRcaService_StreamCoreMetrics_Handler, + ServerStreams: true, + }, + }, + Metadata: "xds/service/orca/v3/orca.proto", +} diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go index 0d9825aff71..7299227a3d9 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go @@ -1,13 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/type/matcher/v3/cel.proto package v3 import ( - _ "github.com/cncf/xds/go/xds/annotations/v3" v3 "github.com/cncf/xds/go/xds/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -84,26 +83,23 @@ var file_xds_type_matcher_v3_cel_proto_rawDesc = []byte{ 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, - 0x76, 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, - 0x78, 0x70, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x60, 0xd2, 0xc6, 0xa4, 0xe1, - 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, - 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78, + 0x70, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x58, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go index 28a4655ed8a..5f72c8d1100 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/type/matcher/v3/domain.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go index 40656caf0c2..4393bb7e292 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go @@ -1,13 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/type/matcher/v3/http_inputs.proto package v3 import ( - _ "github.com/cncf/xds/go/xds/annotations/v3" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -65,18 +64,16 @@ var file_xds_type_matcher_v3_http_inputs_proto_rawDesc = []byte{ 0x0a, 0x25, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, - 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, - 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x1d, 0x0a, - 0x1b, 0x48, 0x74, 0x74, 0x70, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, - 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x67, 0xd2, 0xc6, - 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, - 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, - 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x22, 0x1d, 0x0a, 0x1b, + 0x48, 0x74, 0x74, 0x70, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x65, + 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x5f, 0x0a, 0x1e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, + 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, + 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go index 7254d3ba57f..fdb6599461d 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/type/matcher/v3/ip.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go index eff1ce1aa0f..d94b03b5595 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go @@ -1,13 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/type/matcher/v3/matcher.proto package v3 import ( - _ "github.com/cncf/xds/go/xds/annotations/v3" v3 "github.com/cncf/xds/go/xds/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -698,149 +697,146 @@ var file_xds_type_matcher_v3_matcher_proto_rawDesc = []byte{ 0x0a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x80, 0x10, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4d, 0x0a, - 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, - 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xf6, 0x0f, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0c, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, 0x00, 0x52, 0x0b, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x1a, 0x91, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x38, 0x0a, - 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb6, 0x08, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, - 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x73, 0x1a, 0x91, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x6f, 0x0a, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x78, 0x64, 0x73, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, - 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, - 0x52, 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x12, 0x61, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, - 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x1a, 0x91, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x38, 0x0a, 0x07, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb6, 0x08, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x73, 0x1a, 0x91, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x6f, 0x0a, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, - 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, - 0x6e, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, - 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x1a, 0xf3, 0x01, 0x0a, 0x0f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, - 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x78, - 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, - 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb5, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xa9, - 0x04, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x41, - 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, - 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x12, 0x5b, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, - 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x5d, - 0x0a, 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, - 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, + 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, + 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x61, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, + 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, - 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x46, 0x0a, - 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, + 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x6e, + 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, + 0xf3, 0x01, 0x0a, 0x0f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, + 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a, + 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xc0, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, - 0x61, 0x70, 0x12, 0x56, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x3a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, - 0x61, 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x9a, 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x5c, 0x0a, 0x08, 0x4d, 0x61, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb5, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xa9, 0x04, + 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x41, 0x0a, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, + 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x5b, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, + 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, + 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0d, + 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x5d, 0x0a, + 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61, + 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, - 0x06, 0x02, 0x08, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x42, 0x5c, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, - 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, - 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, + 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x46, 0x0a, 0x0c, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x1a, 0xc0, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, + 0x70, 0x12, 0x56, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, + 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x9a, + 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x5c, 0x0a, 0x08, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x5c, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go index d7bc620b91a..2861768daa8 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/type/matcher/v3/range.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go index 28d3c8064ff..3dcf303ac24 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/type/matcher/v3/regex.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go index 7e1946cb16d..f9067918c73 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/type/matcher/v3/string.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go index 367054f024b..c7d42d4a94b 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go @@ -1,18 +1,19 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/type/v3/cel.proto package v3 import ( + expr "cel.dev/expr" _ "github.com/cncf/xds/go/xds/annotations/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" v1alpha1 "google.golang.org/genproto/googleapis/api/expr/v1alpha1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -33,7 +34,9 @@ type CelExpression struct { // // *CelExpression_ParsedExpr // *CelExpression_CheckedExpr - ExprSpecifier isCelExpression_ExprSpecifier `protobuf_oneof:"expr_specifier"` + ExprSpecifier isCelExpression_ExprSpecifier `protobuf_oneof:"expr_specifier"` + CelExprParsed *expr.ParsedExpr `protobuf:"bytes,3,opt,name=cel_expr_parsed,json=celExprParsed,proto3" json:"cel_expr_parsed,omitempty"` + CelExprChecked *expr.CheckedExpr `protobuf:"bytes,4,opt,name=cel_expr_checked,json=celExprChecked,proto3" json:"cel_expr_checked,omitempty"` } func (x *CelExpression) Reset() { @@ -75,6 +78,7 @@ func (m *CelExpression) GetExprSpecifier() isCelExpression_ExprSpecifier { return nil } +// Deprecated: Marked as deprecated in xds/type/v3/cel.proto. func (x *CelExpression) GetParsedExpr() *v1alpha1.ParsedExpr { if x, ok := x.GetExprSpecifier().(*CelExpression_ParsedExpr); ok { return x.ParsedExpr @@ -82,6 +86,7 @@ func (x *CelExpression) GetParsedExpr() *v1alpha1.ParsedExpr { return nil } +// Deprecated: Marked as deprecated in xds/type/v3/cel.proto. func (x *CelExpression) GetCheckedExpr() *v1alpha1.CheckedExpr { if x, ok := x.GetExprSpecifier().(*CelExpression_CheckedExpr); ok { return x.CheckedExpr @@ -89,15 +94,31 @@ func (x *CelExpression) GetCheckedExpr() *v1alpha1.CheckedExpr { return nil } +func (x *CelExpression) GetCelExprParsed() *expr.ParsedExpr { + if x != nil { + return x.CelExprParsed + } + return nil +} + +func (x *CelExpression) GetCelExprChecked() *expr.CheckedExpr { + if x != nil { + return x.CelExprChecked + } + return nil +} + type isCelExpression_ExprSpecifier interface { isCelExpression_ExprSpecifier() } type CelExpression_ParsedExpr struct { + // Deprecated: Marked as deprecated in xds/type/v3/cel.proto. ParsedExpr *v1alpha1.ParsedExpr `protobuf:"bytes,1,opt,name=parsed_expr,json=parsedExpr,proto3,oneof"` } type CelExpression_CheckedExpr struct { + // Deprecated: Marked as deprecated in xds/type/v3/cel.proto. CheckedExpr *v1alpha1.CheckedExpr `protobuf:"bytes,2,opt,name=checked_expr,json=checkedExpr,proto3,oneof"` } @@ -110,8 +131,8 @@ type CelExtractString struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ExprExtract *CelExpression `protobuf:"bytes,1,opt,name=expr_extract,json=exprExtract,proto3" json:"expr_extract,omitempty"` - DefaultValue *wrappers.StringValue `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + ExprExtract *CelExpression `protobuf:"bytes,1,opt,name=expr_extract,json=exprExtract,proto3" json:"expr_extract,omitempty"` + DefaultValue *wrapperspb.StringValue `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` } func (x *CelExtractString) Reset() { @@ -153,7 +174,7 @@ func (x *CelExtractString) GetExprExtract() *CelExpression { return nil } -func (x *CelExtractString) GetDefaultValue() *wrappers.StringValue { +func (x *CelExtractString) GetDefaultValue() *wrapperspb.StringValue { if x != nil { return x.DefaultValue } @@ -170,40 +191,51 @@ var file_xds_type_v3_cel_proto_rawDesc = []byte{ 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x01, - 0x0a, 0x0d, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x47, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x70, 0x61, - 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4a, 0x0a, 0x0c, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, - 0x64, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, - 0x45, 0x78, 0x70, 0x72, 0x42, 0x15, 0x0a, 0x0e, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x70, 0x65, - 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x9e, 0x01, 0x0a, 0x10, - 0x43, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x12, 0x47, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, 0x78, - 0x70, 0x72, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x50, 0xd2, 0xc6, - 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x08, - 0x43, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, - 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x63, 0x65, 0x6c, + 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbd, 0x02, 0x0a, + 0x0d, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4b, + 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, + 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x0a, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4e, 0x0a, 0x0c, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, + 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0b, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3c, 0x0a, 0x0f, 0x63, + 0x65, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x63, 0x65, 0x6c, 0x45, + 0x78, 0x70, 0x72, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x63, 0x65, 0x6c, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0e, 0x63, 0x65, 0x6c, 0x45, + 0x78, 0x70, 0x72, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x42, 0x10, 0x0a, 0x0e, 0x65, 0x78, + 0x70, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0x9e, 0x01, 0x0a, + 0x10, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x12, 0x47, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, + 0x78, 0x70, 0x72, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x50, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x08, 0x43, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -220,22 +252,26 @@ func file_xds_type_v3_cel_proto_rawDescGZIP() []byte { var file_xds_type_v3_cel_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_xds_type_v3_cel_proto_goTypes = []interface{}{ - (*CelExpression)(nil), // 0: xds.type.v3.CelExpression - (*CelExtractString)(nil), // 1: xds.type.v3.CelExtractString - (*v1alpha1.ParsedExpr)(nil), // 2: google.api.expr.v1alpha1.ParsedExpr - (*v1alpha1.CheckedExpr)(nil), // 3: google.api.expr.v1alpha1.CheckedExpr - (*wrappers.StringValue)(nil), // 4: google.protobuf.StringValue + (*CelExpression)(nil), // 0: xds.type.v3.CelExpression + (*CelExtractString)(nil), // 1: xds.type.v3.CelExtractString + (*v1alpha1.ParsedExpr)(nil), // 2: google.api.expr.v1alpha1.ParsedExpr + (*v1alpha1.CheckedExpr)(nil), // 3: google.api.expr.v1alpha1.CheckedExpr + (*expr.ParsedExpr)(nil), // 4: cel.expr.ParsedExpr + (*expr.CheckedExpr)(nil), // 5: cel.expr.CheckedExpr + (*wrapperspb.StringValue)(nil), // 6: google.protobuf.StringValue } var file_xds_type_v3_cel_proto_depIdxs = []int32{ 2, // 0: xds.type.v3.CelExpression.parsed_expr:type_name -> google.api.expr.v1alpha1.ParsedExpr 3, // 1: xds.type.v3.CelExpression.checked_expr:type_name -> google.api.expr.v1alpha1.CheckedExpr - 0, // 2: xds.type.v3.CelExtractString.expr_extract:type_name -> xds.type.v3.CelExpression - 4, // 3: xds.type.v3.CelExtractString.default_value:type_name -> google.protobuf.StringValue - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name + 4, // 2: xds.type.v3.CelExpression.cel_expr_parsed:type_name -> cel.expr.ParsedExpr + 5, // 3: xds.type.v3.CelExpression.cel_expr_checked:type_name -> cel.expr.CheckedExpr + 0, // 4: xds.type.v3.CelExtractString.expr_extract:type_name -> xds.type.v3.CelExpression + 6, // 5: xds.type.v3.CelExtractString.default_value:type_name -> google.protobuf.StringValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_xds_type_v3_cel_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go index 852efc9286e..0855edee9bb 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go @@ -57,7 +57,64 @@ func (m *CelExpression) validate(all bool) error { var errors []error - oneofExprSpecifierPresent := false + if all { + switch v := interface{}(m.GetCelExprParsed()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprParsed", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprParsed", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCelExprParsed()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExpressionValidationError{ + field: "CelExprParsed", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCelExprChecked()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprChecked", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprChecked", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCelExprChecked()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExpressionValidationError{ + field: "CelExprChecked", + reason: "embedded message failed validation", + cause: err, + } + } + } + switch v := m.ExprSpecifier.(type) { case *CelExpression_ParsedExpr: if v == nil { @@ -70,7 +127,6 @@ func (m *CelExpression) validate(all bool) error { } errors = append(errors, err) } - oneofExprSpecifierPresent = true if all { switch v := interface{}(m.GetParsedExpr()).(type) { @@ -112,7 +168,6 @@ func (m *CelExpression) validate(all bool) error { } errors = append(errors, err) } - oneofExprSpecifierPresent = true if all { switch v := interface{}(m.GetCheckedExpr()).(type) { @@ -146,16 +201,6 @@ func (m *CelExpression) validate(all bool) error { default: _ = v // ensures v is used } - if !oneofExprSpecifierPresent { - err := CelExpressionValidationError{ - field: "ExprSpecifier", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } if len(errors) > 0 { return CelExpressionMultiError(errors) diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go index bebf344856b..ca9d3e1b7f8 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/type/v3/range.proto package v3 diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go index 6c81b2375f7..72ec85ed600 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go @@ -1,15 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/type/v3/typed_struct.proto package v3 import ( - _struct "github.com/golang/protobuf/ptypes/struct" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" ) @@ -26,8 +26,8 @@ type TypedStruct struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - Value *_struct.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + Value *structpb.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } func (x *TypedStruct) Reset() { @@ -69,7 +69,7 @@ func (x *TypedStruct) GetTypeUrl() string { return "" } -func (x *TypedStruct) GetValue() *_struct.Struct { +func (x *TypedStruct) GetValue() *structpb.Struct { if x != nil { return x.Value } @@ -111,8 +111,8 @@ func file_xds_type_v3_typed_struct_proto_rawDescGZIP() []byte { var file_xds_type_v3_typed_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_xds_type_v3_typed_struct_proto_goTypes = []interface{}{ - (*TypedStruct)(nil), // 0: xds.type.v3.TypedStruct - (*_struct.Struct)(nil), // 1: google.protobuf.Struct + (*TypedStruct)(nil), // 0: xds.type.v3.TypedStruct + (*structpb.Struct)(nil), // 1: google.protobuf.Struct } var file_xds_type_v3_typed_struct_proto_depIdxs = []int32{ 1, // 0: xds.type.v3.TypedStruct.value:type_name -> google.protobuf.Struct diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go index 85d524d8f86..13d644dba61 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go @@ -1,16 +1,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/admin/v3/certs.proto package adminv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" - timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -145,9 +145,9 @@ type CertificateDetails struct { // Minimum of days until expiration of certificate and it's chain. DaysUntilExpiration uint64 `protobuf:"varint,4,opt,name=days_until_expiration,json=daysUntilExpiration,proto3" json:"days_until_expiration,omitempty"` // Indicates the time from which the certificate is valid. - ValidFrom *timestamp.Timestamp `protobuf:"bytes,5,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"` + ValidFrom *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"` // Indicates the time at which the certificate expires. - ExpirationTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=expiration_time,json=expirationTime,proto3" json:"expiration_time,omitempty"` + ExpirationTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expiration_time,json=expirationTime,proto3" json:"expiration_time,omitempty"` // Details related to the OCSP response associated with this certificate, if any. OcspDetails *CertificateDetails_OcspDetails `protobuf:"bytes,7,opt,name=ocsp_details,json=ocspDetails,proto3" json:"ocsp_details,omitempty"` } @@ -212,14 +212,14 @@ func (x *CertificateDetails) GetDaysUntilExpiration() uint64 { return 0 } -func (x *CertificateDetails) GetValidFrom() *timestamp.Timestamp { +func (x *CertificateDetails) GetValidFrom() *timestamppb.Timestamp { if x != nil { return x.ValidFrom } return nil } -func (x *CertificateDetails) GetExpirationTime() *timestamp.Timestamp { +func (x *CertificateDetails) GetExpirationTime() *timestamppb.Timestamp { if x != nil { return x.ExpirationTime } @@ -336,9 +336,9 @@ type CertificateDetails_OcspDetails struct { unknownFields protoimpl.UnknownFields // Indicates the time from which the OCSP response is valid. - ValidFrom *timestamp.Timestamp `protobuf:"bytes,1,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"` + ValidFrom *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"` // Indicates the time at which the OCSP response expires. - Expiration *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expiration,proto3" json:"expiration,omitempty"` + Expiration *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiration,proto3" json:"expiration,omitempty"` } func (x *CertificateDetails_OcspDetails) Reset() { @@ -373,14 +373,14 @@ func (*CertificateDetails_OcspDetails) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{2, 0} } -func (x *CertificateDetails_OcspDetails) GetValidFrom() *timestamp.Timestamp { +func (x *CertificateDetails_OcspDetails) GetValidFrom() *timestamppb.Timestamp { if x != nil { return x.ValidFrom } return nil } -func (x *CertificateDetails_OcspDetails) GetExpiration() *timestamp.Timestamp { +func (x *CertificateDetails_OcspDetails) GetExpiration() *timestamppb.Timestamp { if x != nil { return x.Expiration } @@ -495,7 +495,7 @@ var file_envoy_admin_v3_certs_proto_goTypes = []interface{}{ (*CertificateDetails)(nil), // 2: envoy.admin.v3.CertificateDetails (*SubjectAlternateName)(nil), // 3: envoy.admin.v3.SubjectAlternateName (*CertificateDetails_OcspDetails)(nil), // 4: envoy.admin.v3.CertificateDetails.OcspDetails - (*timestamp.Timestamp)(nil), // 5: google.protobuf.Timestamp + (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp } var file_envoy_admin_v3_certs_proto_depIdxs = []int32{ 1, // 0: envoy.admin.v3.Certificates.certificates:type_name -> envoy.admin.v3.Certificate diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go index 4294f96506b..413895689e7 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/admin/v3/certs.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go new file mode 100644 index 00000000000..3c325787d2c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go @@ -0,0 +1,504 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/certs.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Certificates) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Certificates) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Certificates) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Certificates) > 0 { + for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Certificates[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Certificate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Certificate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Certificate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CertChain) > 0 { + for iNdEx := len(m.CertChain) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CertChain[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.CaCert) > 0 { + for iNdEx := len(m.CaCert) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CaCert[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CertificateDetails_OcspDetails) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateDetails_OcspDetails) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateDetails_OcspDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Expiration != nil { + size, err := (*timestamppb.Timestamp)(m.Expiration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.ValidFrom != nil { + size, err := (*timestamppb.Timestamp)(m.ValidFrom).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CertificateDetails) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateDetails) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OcspDetails != nil { + size, err := m.OcspDetails.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.ExpirationTime != nil { + size, err := (*timestamppb.Timestamp)(m.ExpirationTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.ValidFrom != nil { + size, err := (*timestamppb.Timestamp)(m.ValidFrom).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.DaysUntilExpiration != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DaysUntilExpiration)) + i-- + dAtA[i] = 0x20 + } + if len(m.SubjectAltNames) > 0 { + for iNdEx := len(m.SubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SubjectAltNames[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.SerialNumber) > 0 { + i -= len(m.SerialNumber) + copy(dAtA[i:], m.SerialNumber) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SerialNumber))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SubjectAlternateName) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAlternateName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Name.(*SubjectAlternateName_IpAddress); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Name.(*SubjectAlternateName_Uri); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Name.(*SubjectAlternateName_Dns); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *SubjectAlternateName_Dns) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName_Dns) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Dns) + copy(dAtA[i:], m.Dns) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Dns))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *SubjectAlternateName_Uri) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName_Uri) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *SubjectAlternateName_IpAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName_IpAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.IpAddress) + copy(dAtA[i:], m.IpAddress) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.IpAddress))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Certificates) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Certificates) > 0 { + for _, e := range m.Certificates { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Certificate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CaCert) > 0 { + for _, e := range m.CaCert { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.CertChain) > 0 { + for _, e := range m.CertChain { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateDetails_OcspDetails) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidFrom != nil { + l = (*timestamppb.Timestamp)(m.ValidFrom).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Expiration != nil { + l = (*timestamppb.Timestamp)(m.Expiration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateDetails) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SerialNumber) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SubjectAltNames) > 0 { + for _, e := range m.SubjectAltNames { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DaysUntilExpiration != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DaysUntilExpiration)) + } + if m.ValidFrom != nil { + l = (*timestamppb.Timestamp)(m.ValidFrom).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExpirationTime != nil { + l = (*timestamppb.Timestamp)(m.ExpirationTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcspDetails != nil { + l = m.OcspDetails.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SubjectAlternateName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Name.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *SubjectAlternateName_Dns) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Dns) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SubjectAlternateName_Uri) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uri) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SubjectAlternateName_IpAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IpAddress) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go index d3ddba25414..06b79187fd6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/admin/v3/clusters.proto package adminv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go index a147a9b7578..d7658a09faf 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/admin/v3/clusters.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go new file mode 100644 index 00000000000..418581107ce --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go @@ -0,0 +1,656 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/clusters.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Clusters) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Clusters) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Clusters) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterStatuses) > 0 { + for iNdEx := len(m.ClusterStatuses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ClusterStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.EdsServiceName) > 0 { + i -= len(m.EdsServiceName) + copy(dAtA[i:], m.EdsServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EdsServiceName))) + i-- + dAtA[i] = 0x42 + } + if len(m.ObservabilityName) > 0 { + i -= len(m.ObservabilityName) + copy(dAtA[i:], m.ObservabilityName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ObservabilityName))) + i-- + dAtA[i] = 0x3a + } + if m.CircuitBreakers != nil { + if vtmsg, ok := interface{}(m.CircuitBreakers).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CircuitBreakers) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.LocalOriginSuccessRateEjectionThreshold != nil { + if vtmsg, ok := interface{}(m.LocalOriginSuccessRateEjectionThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalOriginSuccessRateEjectionThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if len(m.HostStatuses) > 0 { + for iNdEx := len(m.HostStatuses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HostStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.SuccessRateEjectionThreshold != nil { + if vtmsg, ok := interface{}(m.SuccessRateEjectionThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SuccessRateEjectionThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.AddedViaApi { + i-- + if m.AddedViaApi { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HostStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HostStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Locality != nil { + if vtmsg, ok := interface{}(m.Locality).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Locality) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.LocalOriginSuccessRate != nil { + if vtmsg, ok := interface{}(m.LocalOriginSuccessRate).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalOriginSuccessRate) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x38 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x32 + } + if m.Weight != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x28 + } + if m.SuccessRate != nil { + if vtmsg, ok := interface{}(m.SuccessRate).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SuccessRate) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.HealthStatus != nil { + size, err := m.HealthStatus.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HostHealthStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostHealthStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HostHealthStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ActiveHcTimeout { + i-- + if m.ActiveHcTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.ExcludedViaImmediateHcFail { + i-- + if m.ExcludedViaImmediateHcFail { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.PendingActiveHc { + i-- + if m.PendingActiveHc { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.PendingDynamicRemoval { + i-- + if m.PendingDynamicRemoval { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.FailedActiveDegradedCheck { + i-- + if m.FailedActiveDegradedCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.EdsHealthStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.EdsHealthStatus)) + i-- + dAtA[i] = 0x18 + } + if m.FailedOutlierCheck { + i-- + if m.FailedOutlierCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.FailedActiveHealthCheck { + i-- + if m.FailedActiveHealthCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Clusters) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterStatuses) > 0 { + for _, e := range m.ClusterStatuses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AddedViaApi { + n += 2 + } + if m.SuccessRateEjectionThreshold != nil { + if size, ok := interface{}(m.SuccessRateEjectionThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SuccessRateEjectionThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HostStatuses) > 0 { + for _, e := range m.HostStatuses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LocalOriginSuccessRateEjectionThreshold != nil { + if size, ok := interface{}(m.LocalOriginSuccessRateEjectionThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalOriginSuccessRateEjectionThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CircuitBreakers != nil { + if size, ok := interface{}(m.CircuitBreakers).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CircuitBreakers) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ObservabilityName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.EdsServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HostStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.HealthStatus != nil { + l = m.HealthStatus.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRate != nil { + if size, ok := interface{}(m.SuccessRate).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SuccessRate) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Weight != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Weight)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if m.LocalOriginSuccessRate != nil { + if size, ok := interface{}(m.LocalOriginSuccessRate).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalOriginSuccessRate) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Locality != nil { + if size, ok := interface{}(m.Locality).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Locality) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HostHealthStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FailedActiveHealthCheck { + n += 2 + } + if m.FailedOutlierCheck { + n += 2 + } + if m.EdsHealthStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.EdsHealthStatus)) + } + if m.FailedActiveDegradedCheck { + n += 2 + } + if m.PendingDynamicRemoval { + n += 2 + } + if m.PendingActiveHc { + n += 2 + } + if m.ExcludedViaImmediateHcFail { + n += 2 + } + if m.ActiveHcTimeout { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go index 445fad63ff5..ef711d966f8 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/admin/v3/config_dump.proto package adminv3 @@ -9,10 +9,10 @@ package adminv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3" - any1 "github.com/golang/protobuf/ptypes/any" - timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -58,7 +58,7 @@ type ConfigDump struct { // :ref:`/config_dump?mask={} `, // or :ref:`/config_dump?resource={},mask={} // ` for more information. - Configs []*any1.Any `protobuf:"bytes,1,rep,name=configs,proto3" json:"configs,omitempty"` + Configs []*anypb.Any `protobuf:"bytes,1,rep,name=configs,proto3" json:"configs,omitempty"` } func (x *ConfigDump) Reset() { @@ -93,7 +93,7 @@ func (*ConfigDump) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{0} } -func (x *ConfigDump) GetConfigs() []*any1.Any { +func (x *ConfigDump) GetConfigs() []*anypb.Any { if x != nil { return x.Configs } @@ -111,7 +111,7 @@ type BootstrapConfigDump struct { Bootstrap *v3.Bootstrap `protobuf:"bytes,1,opt,name=bootstrap,proto3" json:"bootstrap,omitempty"` // The timestamp when the BootstrapConfig was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` } func (x *BootstrapConfigDump) Reset() { @@ -153,7 +153,7 @@ func (x *BootstrapConfigDump) GetBootstrap() *v3.Bootstrap { return nil } -func (x *BootstrapConfigDump) GetLastUpdated() *timestamp.Timestamp { +func (x *BootstrapConfigDump) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -241,11 +241,11 @@ type SecretsConfigDump_DynamicSecret struct { // This is the per-resource version information. VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` // The timestamp when the secret was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. - Secret *any1.Any `protobuf:"bytes,4,opt,name=secret,proto3" json:"secret,omitempty"` + Secret *anypb.Any `protobuf:"bytes,4,opt,name=secret,proto3" json:"secret,omitempty"` // Set if the last update failed, cleared after the next successful update. // The *error_state* field contains the rejected version of this particular // resource along with the reason and timestamp. For successfully updated or @@ -303,14 +303,14 @@ func (x *SecretsConfigDump_DynamicSecret) GetVersionInfo() string { return "" } -func (x *SecretsConfigDump_DynamicSecret) GetLastUpdated() *timestamp.Timestamp { +func (x *SecretsConfigDump_DynamicSecret) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } return nil } -func (x *SecretsConfigDump_DynamicSecret) GetSecret() *any1.Any { +func (x *SecretsConfigDump_DynamicSecret) GetSecret() *anypb.Any { if x != nil { return x.Secret } @@ -340,11 +340,11 @@ type SecretsConfigDump_StaticSecret struct { // The name assigned to the secret. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The timestamp when the secret was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. - Secret *any1.Any `protobuf:"bytes,3,opt,name=secret,proto3" json:"secret,omitempty"` + Secret *anypb.Any `protobuf:"bytes,3,opt,name=secret,proto3" json:"secret,omitempty"` } func (x *SecretsConfigDump_StaticSecret) Reset() { @@ -386,14 +386,14 @@ func (x *SecretsConfigDump_StaticSecret) GetName() string { return "" } -func (x *SecretsConfigDump_StaticSecret) GetLastUpdated() *timestamp.Timestamp { +func (x *SecretsConfigDump_StaticSecret) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } return nil } -func (x *SecretsConfigDump_StaticSecret) GetSecret() *any1.Any { +func (x *SecretsConfigDump_StaticSecret) GetSecret() *anypb.Any { if x != nil { return x.Secret } @@ -527,9 +527,9 @@ var file_envoy_admin_v3_config_dump_proto_goTypes = []interface{}{ (*SecretsConfigDump)(nil), // 2: envoy.admin.v3.SecretsConfigDump (*SecretsConfigDump_DynamicSecret)(nil), // 3: envoy.admin.v3.SecretsConfigDump.DynamicSecret (*SecretsConfigDump_StaticSecret)(nil), // 4: envoy.admin.v3.SecretsConfigDump.StaticSecret - (*any1.Any)(nil), // 5: google.protobuf.Any + (*anypb.Any)(nil), // 5: google.protobuf.Any (*v3.Bootstrap)(nil), // 6: envoy.config.bootstrap.v3.Bootstrap - (*timestamp.Timestamp)(nil), // 7: google.protobuf.Timestamp + (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp (*UpdateFailureState)(nil), // 8: envoy.admin.v3.UpdateFailureState (ClientResourceStatus)(0), // 9: envoy.admin.v3.ClientResourceStatus } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go index 57d1a77f06a..6f494af0b66 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/admin/v3/config_dump.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go index 9180be6df78..feb0921ae23 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go @@ -1,17 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/admin/v3/config_dump_shared.proto package adminv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" - any1 "github.com/golang/protobuf/ptypes/any" - timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -98,9 +98,9 @@ type UpdateFailureState struct { // What the component configuration would have been if the update had succeeded. // This field may not be populated by xDS clients due to storage overhead. - FailedConfiguration *any1.Any `protobuf:"bytes,1,opt,name=failed_configuration,json=failedConfiguration,proto3" json:"failed_configuration,omitempty"` + FailedConfiguration *anypb.Any `protobuf:"bytes,1,opt,name=failed_configuration,json=failedConfiguration,proto3" json:"failed_configuration,omitempty"` // Time of the latest failed update attempt. - LastUpdateAttempt *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_update_attempt,json=lastUpdateAttempt,proto3" json:"last_update_attempt,omitempty"` + LastUpdateAttempt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_update_attempt,json=lastUpdateAttempt,proto3" json:"last_update_attempt,omitempty"` // Details about the last failed update attempt. Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"` // This is the version of the rejected resource. @@ -140,14 +140,14 @@ func (*UpdateFailureState) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{0} } -func (x *UpdateFailureState) GetFailedConfiguration() *any1.Any { +func (x *UpdateFailureState) GetFailedConfiguration() *anypb.Any { if x != nil { return x.FailedConfiguration } return nil } -func (x *UpdateFailureState) GetLastUpdateAttempt() *timestamp.Timestamp { +func (x *UpdateFailureState) GetLastUpdateAttempt() *timestamppb.Timestamp { if x != nil { return x.LastUpdateAttempt } @@ -565,9 +565,9 @@ type ListenersConfigDump_StaticListener struct { unknownFields protoimpl.UnknownFields // The listener config. - Listener *any1.Any `protobuf:"bytes,1,opt,name=listener,proto3" json:"listener,omitempty"` + Listener *anypb.Any `protobuf:"bytes,1,opt,name=listener,proto3" json:"listener,omitempty"` // The timestamp when the Listener was last successfully updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` } func (x *ListenersConfigDump_StaticListener) Reset() { @@ -602,14 +602,14 @@ func (*ListenersConfigDump_StaticListener) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 0} } -func (x *ListenersConfigDump_StaticListener) GetListener() *any1.Any { +func (x *ListenersConfigDump_StaticListener) GetListener() *anypb.Any { if x != nil { return x.Listener } return nil } -func (x *ListenersConfigDump_StaticListener) GetLastUpdated() *timestamp.Timestamp { +func (x *ListenersConfigDump_StaticListener) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -627,9 +627,9 @@ type ListenersConfigDump_DynamicListenerState struct { // by the API. VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` // The listener config. - Listener *any1.Any `protobuf:"bytes,2,opt,name=listener,proto3" json:"listener,omitempty"` + Listener *anypb.Any `protobuf:"bytes,2,opt,name=listener,proto3" json:"listener,omitempty"` // The timestamp when the Listener was last successfully updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` } func (x *ListenersConfigDump_DynamicListenerState) Reset() { @@ -671,14 +671,14 @@ func (x *ListenersConfigDump_DynamicListenerState) GetVersionInfo() string { return "" } -func (x *ListenersConfigDump_DynamicListenerState) GetListener() *any1.Any { +func (x *ListenersConfigDump_DynamicListenerState) GetListener() *anypb.Any { if x != nil { return x.Listener } return nil } -func (x *ListenersConfigDump_DynamicListenerState) GetLastUpdated() *timestamp.Timestamp { +func (x *ListenersConfigDump_DynamicListenerState) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -798,9 +798,9 @@ type ClustersConfigDump_StaticCluster struct { unknownFields protoimpl.UnknownFields // The cluster config. - Cluster *any1.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + Cluster *anypb.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` // The timestamp when the Cluster was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` } func (x *ClustersConfigDump_StaticCluster) Reset() { @@ -835,14 +835,14 @@ func (*ClustersConfigDump_StaticCluster) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2, 0} } -func (x *ClustersConfigDump_StaticCluster) GetCluster() *any1.Any { +func (x *ClustersConfigDump_StaticCluster) GetCluster() *anypb.Any { if x != nil { return x.Cluster } return nil } -func (x *ClustersConfigDump_StaticCluster) GetLastUpdated() *timestamp.Timestamp { +func (x *ClustersConfigDump_StaticCluster) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -862,9 +862,9 @@ type ClustersConfigDump_DynamicCluster struct { // the API. VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` // The cluster config. - Cluster *any1.Any `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + Cluster *anypb.Any `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` // The timestamp when the Cluster was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` // Set if the last update failed, cleared after the next successful update. // The “error_state“ field contains the rejected version of this particular // resource along with the reason and timestamp. For successfully updated or @@ -915,14 +915,14 @@ func (x *ClustersConfigDump_DynamicCluster) GetVersionInfo() string { return "" } -func (x *ClustersConfigDump_DynamicCluster) GetCluster() *any1.Any { +func (x *ClustersConfigDump_DynamicCluster) GetCluster() *anypb.Any { if x != nil { return x.Cluster } return nil } -func (x *ClustersConfigDump_DynamicCluster) GetLastUpdated() *timestamp.Timestamp { +func (x *ClustersConfigDump_DynamicCluster) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -949,9 +949,9 @@ type RoutesConfigDump_StaticRouteConfig struct { unknownFields protoimpl.UnknownFields // The route config. - RouteConfig *any1.Any `protobuf:"bytes,1,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` + RouteConfig *anypb.Any `protobuf:"bytes,1,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` // The timestamp when the Route was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` } func (x *RoutesConfigDump_StaticRouteConfig) Reset() { @@ -986,14 +986,14 @@ func (*RoutesConfigDump_StaticRouteConfig) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3, 0} } -func (x *RoutesConfigDump_StaticRouteConfig) GetRouteConfig() *any1.Any { +func (x *RoutesConfigDump_StaticRouteConfig) GetRouteConfig() *anypb.Any { if x != nil { return x.RouteConfig } return nil } -func (x *RoutesConfigDump_StaticRouteConfig) GetLastUpdated() *timestamp.Timestamp { +func (x *RoutesConfigDump_StaticRouteConfig) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -1011,9 +1011,9 @@ type RoutesConfigDump_DynamicRouteConfig struct { // the route configuration was loaded. VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` // The route config. - RouteConfig *any1.Any `protobuf:"bytes,2,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` + RouteConfig *anypb.Any `protobuf:"bytes,2,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` // The timestamp when the Route was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` // Set if the last update failed, cleared after the next successful update. // The “error_state“ field contains the rejected version of this particular // resource along with the reason and timestamp. For successfully updated or @@ -1064,14 +1064,14 @@ func (x *RoutesConfigDump_DynamicRouteConfig) GetVersionInfo() string { return "" } -func (x *RoutesConfigDump_DynamicRouteConfig) GetRouteConfig() *any1.Any { +func (x *RoutesConfigDump_DynamicRouteConfig) GetRouteConfig() *anypb.Any { if x != nil { return x.RouteConfig } return nil } -func (x *RoutesConfigDump_DynamicRouteConfig) GetLastUpdated() *timestamp.Timestamp { +func (x *RoutesConfigDump_DynamicRouteConfig) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -1100,9 +1100,9 @@ type ScopedRoutesConfigDump_InlineScopedRouteConfigs struct { // The name assigned to the scoped route configurations. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The scoped route configurations. - ScopedRouteConfigs []*any1.Any `protobuf:"bytes,2,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` + ScopedRouteConfigs []*anypb.Any `protobuf:"bytes,2,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` // The timestamp when the scoped route config set was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` } func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Reset() { @@ -1144,14 +1144,14 @@ func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetName() string { return "" } -func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetScopedRouteConfigs() []*any1.Any { +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetScopedRouteConfigs() []*anypb.Any { if x != nil { return x.ScopedRouteConfigs } return nil } -func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetLastUpdated() *timestamp.Timestamp { +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -1171,9 +1171,9 @@ type ScopedRoutesConfigDump_DynamicScopedRouteConfigs struct { // the scoped routes configuration was loaded. VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` // The scoped route configurations. - ScopedRouteConfigs []*any1.Any `protobuf:"bytes,3,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` + ScopedRouteConfigs []*anypb.Any `protobuf:"bytes,3,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` // The timestamp when the scoped route config set was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,4,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` // Set if the last update failed, cleared after the next successful update. // The “error_state“ field contains the rejected version of this particular // resource along with the reason and timestamp. For successfully updated or @@ -1231,14 +1231,14 @@ func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetVersionInfo() stri return "" } -func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetScopedRouteConfigs() []*any1.Any { +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetScopedRouteConfigs() []*anypb.Any { if x != nil { return x.ScopedRouteConfigs } return nil } -func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetLastUpdated() *timestamp.Timestamp { +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -1265,9 +1265,9 @@ type EndpointsConfigDump_StaticEndpointConfig struct { unknownFields protoimpl.UnknownFields // The endpoint config. - EndpointConfig *any1.Any `protobuf:"bytes,1,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` + EndpointConfig *anypb.Any `protobuf:"bytes,1,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` } func (x *EndpointsConfigDump_StaticEndpointConfig) Reset() { @@ -1302,14 +1302,14 @@ func (*EndpointsConfigDump_StaticEndpointConfig) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5, 0} } -func (x *EndpointsConfigDump_StaticEndpointConfig) GetEndpointConfig() *any1.Any { +func (x *EndpointsConfigDump_StaticEndpointConfig) GetEndpointConfig() *anypb.Any { if x != nil { return x.EndpointConfig } return nil } -func (x *EndpointsConfigDump_StaticEndpointConfig) GetLastUpdated() *timestamp.Timestamp { +func (x *EndpointsConfigDump_StaticEndpointConfig) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -1327,9 +1327,9 @@ type EndpointsConfigDump_DynamicEndpointConfig struct { // the endpoint configuration was loaded. VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` // The endpoint config. - EndpointConfig *any1.Any `protobuf:"bytes,2,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` + EndpointConfig *anypb.Any `protobuf:"bytes,2,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` // Set if the last update failed, cleared after the next successful update. // The “error_state“ field contains the rejected version of this particular // resource along with the reason and timestamp. For successfully updated or @@ -1380,14 +1380,14 @@ func (x *EndpointsConfigDump_DynamicEndpointConfig) GetVersionInfo() string { return "" } -func (x *EndpointsConfigDump_DynamicEndpointConfig) GetEndpointConfig() *any1.Any { +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetEndpointConfig() *anypb.Any { if x != nil { return x.EndpointConfig } return nil } -func (x *EndpointsConfigDump_DynamicEndpointConfig) GetLastUpdated() *timestamp.Timestamp { +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -1420,9 +1420,9 @@ type EcdsConfigDump_EcdsFilterConfig struct { // field at the time that the ECDS filter was loaded. VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` // The ECDS filter config. - EcdsFilter *any1.Any `protobuf:"bytes,2,opt,name=ecds_filter,json=ecdsFilter,proto3" json:"ecds_filter,omitempty"` + EcdsFilter *anypb.Any `protobuf:"bytes,2,opt,name=ecds_filter,json=ecdsFilter,proto3" json:"ecds_filter,omitempty"` // The timestamp when the ECDS filter was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` // Set if the last update failed, cleared after the next successful update. // The “error_state“ field contains the rejected version of this // particular resource along with the reason and timestamp. For successfully @@ -1473,14 +1473,14 @@ func (x *EcdsConfigDump_EcdsFilterConfig) GetVersionInfo() string { return "" } -func (x *EcdsConfigDump_EcdsFilterConfig) GetEcdsFilter() *any1.Any { +func (x *EcdsConfigDump_EcdsFilterConfig) GetEcdsFilter() *anypb.Any { if x != nil { return x.EcdsFilter } return nil } -func (x *EcdsConfigDump_EcdsFilterConfig) GetLastUpdated() *timestamp.Timestamp { +func (x *EcdsConfigDump_EcdsFilterConfig) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -1923,8 +1923,8 @@ var file_envoy_admin_v3_config_dump_shared_proto_goTypes = []interface{}{ (*EndpointsConfigDump_StaticEndpointConfig)(nil), // 17: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig (*EndpointsConfigDump_DynamicEndpointConfig)(nil), // 18: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig (*EcdsConfigDump_EcdsFilterConfig)(nil), // 19: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig - (*any1.Any)(nil), // 20: google.protobuf.Any - (*timestamp.Timestamp)(nil), // 21: google.protobuf.Timestamp + (*anypb.Any)(nil), // 20: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp } var file_envoy_admin_v3_config_dump_shared_proto_depIdxs = []int32{ 20, // 0: envoy.admin.v3.UpdateFailureState.failed_configuration:type_name -> google.protobuf.Any diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go index 3a78136a987..dd16990ad41 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/admin/v3/config_dump_shared.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go new file mode 100644 index 00000000000..934de8568bf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go @@ -0,0 +1,1715 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/config_dump_shared.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UpdateFailureState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateFailureState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpdateFailureState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x22 + } + if len(m.Details) > 0 { + i -= len(m.Details) + copy(dAtA[i:], m.Details) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Details))) + i-- + dAtA[i] = 0x1a + } + if m.LastUpdateAttempt != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdateAttempt).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FailedConfiguration != nil { + size, err := (*anypb.Any)(m.FailedConfiguration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump_StaticListener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump_StaticListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump_StaticListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Listener != nil { + size, err := (*anypb.Any)(m.Listener).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump_DynamicListenerState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump_DynamicListenerState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump_DynamicListenerState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Listener != nil { + size, err := (*anypb.Any)(m.Listener).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump_DynamicListener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump_DynamicListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump_DynamicListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x30 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.DrainingState != nil { + size, err := m.DrainingState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.WarmingState != nil { + size, err := m.WarmingState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ActiveState != nil { + size, err := m.ActiveState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicListeners) > 0 { + for iNdEx := len(m.DynamicListeners) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicListeners[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticListeners) > 0 { + for iNdEx := len(m.StaticListeners) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticListeners[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClustersConfigDump_StaticCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClustersConfigDump_StaticCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClustersConfigDump_StaticCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Cluster != nil { + size, err := (*anypb.Any)(m.Cluster).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClustersConfigDump_DynamicCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClustersConfigDump_DynamicCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClustersConfigDump_DynamicCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Cluster != nil { + size, err := (*anypb.Any)(m.Cluster).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClustersConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClustersConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClustersConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicWarmingClusters) > 0 { + for iNdEx := len(m.DynamicWarmingClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicWarmingClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.DynamicActiveClusters) > 0 { + for iNdEx := len(m.DynamicActiveClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicActiveClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticClusters) > 0 { + for iNdEx := len(m.StaticClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoutesConfigDump_StaticRouteConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutesConfigDump_StaticRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RoutesConfigDump_StaticRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RouteConfig != nil { + size, err := (*anypb.Any)(m.RouteConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoutesConfigDump_DynamicRouteConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutesConfigDump_DynamicRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RoutesConfigDump_DynamicRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.RouteConfig != nil { + size, err := (*anypb.Any)(m.RouteConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoutesConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutesConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RoutesConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicRouteConfigs) > 0 { + for iNdEx := len(m.DynamicRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticRouteConfigs) > 0 { + for iNdEx := len(m.StaticRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.ScopedRouteConfigs) > 0 { + for iNdEx := len(m.ScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.ScopedRouteConfigs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x30 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.ScopedRouteConfigs) > 0 { + for iNdEx := len(m.ScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.ScopedRouteConfigs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutesConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutesConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutesConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicScopedRouteConfigs) > 0 { + for iNdEx := len(m.DynamicScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicScopedRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.InlineScopedRouteConfigs) > 0 { + for iNdEx := len(m.InlineScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InlineScopedRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EndpointConfig != nil { + size, err := (*anypb.Any)(m.EndpointConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.EndpointConfig != nil { + size, err := (*anypb.Any)(m.EndpointConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointsConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicEndpointConfigs) > 0 { + for iNdEx := len(m.DynamicEndpointConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicEndpointConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticEndpointConfigs) > 0 { + for iNdEx := len(m.StaticEndpointConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticEndpointConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *EcdsConfigDump_EcdsFilterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EcdsConfigDump_EcdsFilterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EcdsConfigDump_EcdsFilterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.EcdsFilter != nil { + size, err := (*anypb.Any)(m.EcdsFilter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EcdsConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EcdsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EcdsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.EcdsFilters) > 0 { + for iNdEx := len(m.EcdsFilters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.EcdsFilters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UpdateFailureState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FailedConfiguration != nil { + l = (*anypb.Any)(m.FailedConfiguration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdateAttempt != nil { + l = (*timestamppb.Timestamp)(m.LastUpdateAttempt).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Details) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump_StaticListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Listener != nil { + l = (*anypb.Any)(m.Listener).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump_DynamicListenerState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Listener != nil { + l = (*anypb.Any)(m.Listener).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump_DynamicListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ActiveState != nil { + l = m.ActiveState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WarmingState != nil { + l = m.WarmingState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainingState != nil { + l = m.DrainingState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.StaticListeners) > 0 { + for _, e := range m.StaticListeners { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicListeners) > 0 { + for _, e := range m.DynamicListeners { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ClustersConfigDump_StaticCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = (*anypb.Any)(m.Cluster).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClustersConfigDump_DynamicCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Cluster != nil { + l = (*anypb.Any)(m.Cluster).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClustersConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.StaticClusters) > 0 { + for _, e := range m.StaticClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicActiveClusters) > 0 { + for _, e := range m.DynamicActiveClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicWarmingClusters) > 0 { + for _, e := range m.DynamicWarmingClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RoutesConfigDump_StaticRouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RouteConfig != nil { + l = (*anypb.Any)(m.RouteConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RoutesConfigDump_DynamicRouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RouteConfig != nil { + l = (*anypb.Any)(m.RouteConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *RoutesConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StaticRouteConfigs) > 0 { + for _, e := range m.StaticRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicRouteConfigs) > 0 { + for _, e := range m.DynamicRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ScopedRouteConfigs) > 0 { + for _, e := range m.ScopedRouteConfigs { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ScopedRouteConfigs) > 0 { + for _, e := range m.ScopedRouteConfigs { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutesConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.InlineScopedRouteConfigs) > 0 { + for _, e := range m.InlineScopedRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicScopedRouteConfigs) > 0 { + for _, e := range m.DynamicScopedRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndpointConfig != nil { + l = (*anypb.Any)(m.EndpointConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EndpointConfig != nil { + l = (*anypb.Any)(m.EndpointConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointsConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StaticEndpointConfigs) > 0 { + for _, e := range m.StaticEndpointConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicEndpointConfigs) > 0 { + for _, e := range m.DynamicEndpointConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *EcdsConfigDump_EcdsFilterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EcdsFilter != nil { + l = (*anypb.Any)(m.EcdsFilter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *EcdsConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.EcdsFilters) > 0 { + for _, e := range m.EcdsFilters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go new file mode 100644 index 00000000000..78e37eec911 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go @@ -0,0 +1,466 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/config_dump.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Configs) > 0 { + for iNdEx := len(m.Configs) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.Configs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BootstrapConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BootstrapConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BootstrapConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Bootstrap != nil { + if vtmsg, ok := interface{}(m.Bootstrap).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Bootstrap) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretsConfigDump_DynamicSecret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretsConfigDump_DynamicSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SecretsConfigDump_DynamicSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x30 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.Secret != nil { + size, err := (*anypb.Any)(m.Secret).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretsConfigDump_StaticSecret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretsConfigDump_StaticSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SecretsConfigDump_StaticSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Secret != nil { + size, err := (*anypb.Any)(m.Secret).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretsConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SecretsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicWarmingSecrets) > 0 { + for iNdEx := len(m.DynamicWarmingSecrets) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicWarmingSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DynamicActiveSecrets) > 0 { + for iNdEx := len(m.DynamicActiveSecrets) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicActiveSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.StaticSecrets) > 0 { + for iNdEx := len(m.StaticSecrets) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *BootstrapConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Bootstrap != nil { + if size, ok := interface{}(m.Bootstrap).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Bootstrap) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecretsConfigDump_DynamicSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Secret != nil { + l = (*anypb.Any)(m.Secret).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecretsConfigDump_StaticSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Secret != nil { + l = (*anypb.Any)(m.Secret).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecretsConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StaticSecrets) > 0 { + for _, e := range m.StaticSecrets { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicActiveSecrets) > 0 { + for _, e := range m.DynamicActiveSecrets { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicWarmingSecrets) > 0 { + for _, e := range m.DynamicWarmingSecrets { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go index 03f7035926e..388c1de3262 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/admin/v3/init_dump.proto package adminv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go index 26eebf9092a..f746a12648a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/admin/v3/init_dump.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go new file mode 100644 index 00000000000..d957042b88b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go @@ -0,0 +1,149 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/init_dump.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TargetNames) > 0 { + for iNdEx := len(m.TargetNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetNames[iNdEx]) + copy(dAtA[i:], m.TargetNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TargetNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UnreadyTargetsDumps) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnreadyTargetsDumps) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UnreadyTargetsDumps) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.UnreadyTargetsDumps) > 0 { + for iNdEx := len(m.UnreadyTargetsDumps) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UnreadyTargetsDumps[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TargetNames) > 0 { + for _, s := range m.TargetNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *UnreadyTargetsDumps) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.UnreadyTargetsDumps) > 0 { + for _, e := range m.UnreadyTargetsDumps { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go index 593f59f4ccd..ac6015fac52 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/admin/v3/listeners.proto package adminv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go index 25fe3f36ac7..02cce263915 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/admin/v3/listeners.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go new file mode 100644 index 00000000000..816437acfbb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go @@ -0,0 +1,203 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/listeners.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Listeners) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listeners) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listeners) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ListenerStatuses) > 0 { + for iNdEx := len(m.ListenerStatuses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ListenerStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ListenerStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AdditionalLocalAddresses) > 0 { + for iNdEx := len(m.AdditionalLocalAddresses) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AdditionalLocalAddresses[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AdditionalLocalAddresses[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if m.LocalAddress != nil { + if vtmsg, ok := interface{}(m.LocalAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Listeners) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ListenerStatuses) > 0 { + for _, e := range m.ListenerStatuses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalAddress != nil { + if size, ok := interface{}(m.LocalAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AdditionalLocalAddresses) > 0 { + for _, e := range m.AdditionalLocalAddresses { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go index f54ea8e7dee..32de56ce3b0 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/admin/v3/memory.proto package adminv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go index 201885308ba..bcb9c1d2018 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/admin/v3/memory.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go new file mode 100644 index 00000000000..6e3a23688a3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go @@ -0,0 +1,110 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/memory.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Memory) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Memory) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Memory) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalPhysicalBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalPhysicalBytes)) + i-- + dAtA[i] = 0x30 + } + if m.TotalThreadCache != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalThreadCache)) + i-- + dAtA[i] = 0x28 + } + if m.PageheapFree != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PageheapFree)) + i-- + dAtA[i] = 0x20 + } + if m.PageheapUnmapped != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PageheapUnmapped)) + i-- + dAtA[i] = 0x18 + } + if m.HeapSize != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HeapSize)) + i-- + dAtA[i] = 0x10 + } + if m.Allocated != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Allocated)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Memory) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allocated != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Allocated)) + } + if m.HeapSize != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HeapSize)) + } + if m.PageheapUnmapped != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PageheapUnmapped)) + } + if m.PageheapFree != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PageheapFree)) + } + if m.TotalThreadCache != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalThreadCache)) + } + if m.TotalPhysicalBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalPhysicalBytes)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go index b4d35e8f60b..3b718959600 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/admin/v3/metrics.proto package adminv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go index 96e04a5ab4d..903d70e199e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/admin/v3/metrics.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go new file mode 100644 index 00000000000..0c09ae04590 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go @@ -0,0 +1,89 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/metrics.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SimpleMetric) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SimpleMetric) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SimpleMetric) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SimpleMetric) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + } + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go index ddaec87020e..44f35183dd9 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/admin/v3/mutex_stats.proto package adminv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go index 55d89de2d7d..236524c54a6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/admin/v3/mutex_stats.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go new file mode 100644 index 00000000000..4318cbc9939 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go @@ -0,0 +1,86 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/mutex_stats.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MutexStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutexStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MutexStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LifetimeWaitCycles != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LifetimeWaitCycles)) + i-- + dAtA[i] = 0x18 + } + if m.CurrentWaitCycles != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CurrentWaitCycles)) + i-- + dAtA[i] = 0x10 + } + if m.NumContentions != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumContentions)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MutexStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NumContentions != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumContentions)) + } + if m.CurrentWaitCycles != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CurrentWaitCycles)) + } + if m.LifetimeWaitCycles != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LifetimeWaitCycles)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go index 8e8f5375d13..4538ff30f15 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/admin/v3/server_info.proto package adminv3 @@ -9,9 +9,9 @@ package adminv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - duration "github.com/golang/protobuf/ptypes/duration" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -238,9 +238,9 @@ type ServerInfo struct { // State of the server. State ServerInfo_State `protobuf:"varint,2,opt,name=state,proto3,enum=envoy.admin.v3.ServerInfo_State" json:"state,omitempty"` // Uptime since current epoch was started. - UptimeCurrentEpoch *duration.Duration `protobuf:"bytes,3,opt,name=uptime_current_epoch,json=uptimeCurrentEpoch,proto3" json:"uptime_current_epoch,omitempty"` + UptimeCurrentEpoch *durationpb.Duration `protobuf:"bytes,3,opt,name=uptime_current_epoch,json=uptimeCurrentEpoch,proto3" json:"uptime_current_epoch,omitempty"` // Uptime since the start of the first epoch. - UptimeAllEpochs *duration.Duration `protobuf:"bytes,4,opt,name=uptime_all_epochs,json=uptimeAllEpochs,proto3" json:"uptime_all_epochs,omitempty"` + UptimeAllEpochs *durationpb.Duration `protobuf:"bytes,4,opt,name=uptime_all_epochs,json=uptimeAllEpochs,proto3" json:"uptime_all_epochs,omitempty"` // Hot restart version. HotRestartVersion string `protobuf:"bytes,5,opt,name=hot_restart_version,json=hotRestartVersion,proto3" json:"hot_restart_version,omitempty"` // Command line options the server is currently running with. @@ -295,14 +295,14 @@ func (x *ServerInfo) GetState() ServerInfo_State { return ServerInfo_LIVE } -func (x *ServerInfo) GetUptimeCurrentEpoch() *duration.Duration { +func (x *ServerInfo) GetUptimeCurrentEpoch() *durationpb.Duration { if x != nil { return x.UptimeCurrentEpoch } return nil } -func (x *ServerInfo) GetUptimeAllEpochs() *duration.Duration { +func (x *ServerInfo) GetUptimeAllEpochs() *durationpb.Duration { if x != nil { return x.UptimeAllEpochs } @@ -330,7 +330,7 @@ func (x *ServerInfo) GetNode() *v3.Node { return nil } -// [#next-free-field: 39] +// [#next-free-field: 41] type CommandLineOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -340,6 +340,10 @@ type CommandLineOptions struct { BaseId uint64 `protobuf:"varint,1,opt,name=base_id,json=baseId,proto3" json:"base_id,omitempty"` // See :option:`--use-dynamic-base-id` for details. UseDynamicBaseId bool `protobuf:"varint,31,opt,name=use_dynamic_base_id,json=useDynamicBaseId,proto3" json:"use_dynamic_base_id,omitempty"` + // See :option:`--skip-hot-restart-on-no-parent` for details. + SkipHotRestartOnNoParent bool `protobuf:"varint,39,opt,name=skip_hot_restart_on_no_parent,json=skipHotRestartOnNoParent,proto3" json:"skip_hot_restart_on_no_parent,omitempty"` + // See :option:`--skip-hot-restart-parent-stats` for details. + SkipHotRestartParentStats bool `protobuf:"varint,40,opt,name=skip_hot_restart_parent_stats,json=skipHotRestartParentStats,proto3" json:"skip_hot_restart_parent_stats,omitempty"` // See :option:`--base-id-path` for details. BaseIdPath string `protobuf:"bytes,32,opt,name=base_id_path,json=baseIdPath,proto3" json:"base_id_path,omitempty"` // See :option:`--concurrency` for details. @@ -375,13 +379,13 @@ type CommandLineOptions struct { // See :option:`--service-zone` for details. ServiceZone string `protobuf:"bytes,15,opt,name=service_zone,json=serviceZone,proto3" json:"service_zone,omitempty"` // See :option:`--file-flush-interval-msec` for details. - FileFlushInterval *duration.Duration `protobuf:"bytes,16,opt,name=file_flush_interval,json=fileFlushInterval,proto3" json:"file_flush_interval,omitempty"` + FileFlushInterval *durationpb.Duration `protobuf:"bytes,16,opt,name=file_flush_interval,json=fileFlushInterval,proto3" json:"file_flush_interval,omitempty"` // See :option:`--drain-time-s` for details. - DrainTime *duration.Duration `protobuf:"bytes,17,opt,name=drain_time,json=drainTime,proto3" json:"drain_time,omitempty"` + DrainTime *durationpb.Duration `protobuf:"bytes,17,opt,name=drain_time,json=drainTime,proto3" json:"drain_time,omitempty"` // See :option:`--drain-strategy` for details. DrainStrategy CommandLineOptions_DrainStrategy `protobuf:"varint,33,opt,name=drain_strategy,json=drainStrategy,proto3,enum=envoy.admin.v3.CommandLineOptions_DrainStrategy" json:"drain_strategy,omitempty"` // See :option:`--parent-shutdown-time-s` for details. - ParentShutdownTime *duration.Duration `protobuf:"bytes,18,opt,name=parent_shutdown_time,json=parentShutdownTime,proto3" json:"parent_shutdown_time,omitempty"` + ParentShutdownTime *durationpb.Duration `protobuf:"bytes,18,opt,name=parent_shutdown_time,json=parentShutdownTime,proto3" json:"parent_shutdown_time,omitempty"` // See :option:`--mode` for details. Mode CommandLineOptions_Mode `protobuf:"varint,19,opt,name=mode,proto3,enum=envoy.admin.v3.CommandLineOptions_Mode" json:"mode,omitempty"` // See :option:`--disable-hot-restart` for details. @@ -452,6 +456,20 @@ func (x *CommandLineOptions) GetUseDynamicBaseId() bool { return false } +func (x *CommandLineOptions) GetSkipHotRestartOnNoParent() bool { + if x != nil { + return x.SkipHotRestartOnNoParent + } + return false +} + +func (x *CommandLineOptions) GetSkipHotRestartParentStats() bool { + if x != nil { + return x.SkipHotRestartParentStats + } + return false +} + func (x *CommandLineOptions) GetBaseIdPath() string { if x != nil { return x.BaseIdPath @@ -571,14 +589,14 @@ func (x *CommandLineOptions) GetServiceZone() string { return "" } -func (x *CommandLineOptions) GetFileFlushInterval() *duration.Duration { +func (x *CommandLineOptions) GetFileFlushInterval() *durationpb.Duration { if x != nil { return x.FileFlushInterval } return nil } -func (x *CommandLineOptions) GetDrainTime() *duration.Duration { +func (x *CommandLineOptions) GetDrainTime() *durationpb.Duration { if x != nil { return x.DrainTime } @@ -592,7 +610,7 @@ func (x *CommandLineOptions) GetDrainStrategy() CommandLineOptions_DrainStrategy return CommandLineOptions_Gradual } -func (x *CommandLineOptions) GetParentShutdownTime() *duration.Duration { +func (x *CommandLineOptions) GetParentShutdownTime() *durationpb.Duration { if x != nil { return x.ParentShutdownTime } @@ -724,132 +742,141 @@ var file_envoy_admin_v3_server_info_proto_rawDesc = []byte{ 0x41, 0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, - 0x22, 0xdb, 0x0e, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, + 0x22, 0xde, 0x0f, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x13, 0x75, 0x73, 0x65, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, 0x73, 0x65, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x42, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, - 0x20, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, - 0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, 0x50, 0x61, 0x74, - 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x79, - 0x61, 0x6d, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x59, 0x61, 0x6d, 0x6c, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x75, - 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x75, - 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x72, 0x65, 0x6a, - 0x65, 0x63, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x67, 0x6e, 0x6f, 0x72, - 0x65, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, - 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x41, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x65, 0x0a, 0x18, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x70, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, - 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x2e, 0x0a, 0x13, - 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, - 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x6f, - 0x6e, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, - 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x6c, - 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, - 0x64, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, - 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, - 0x50, 0x61, 0x74, 0x68, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, - 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x0e, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5a, - 0x6f, 0x6e, 0x65, 0x12, 0x49, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x66, 0x6c, 0x75, 0x73, - 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x66, 0x69, 0x6c, - 0x65, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x38, - 0x0a, 0x0a, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, + 0x3f, 0x0a, 0x1d, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x5f, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x73, 0x6b, 0x69, 0x70, 0x48, 0x6f, 0x74, 0x52, + 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4f, 0x6e, 0x4e, 0x6f, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x40, 0x0a, 0x1d, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x18, 0x28, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x6b, 0x69, 0x70, 0x48, 0x6f, 0x74, + 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x12, 0x20, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, + 0x50, 0x61, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x59, 0x61, 0x6d, 0x6c, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x72, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, + 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x2c, 0x0a, + 0x12, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x65, 0x0a, 0x18, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x70, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x2e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x2c, + 0x0a, 0x12, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x65, 0x73, 0x63, + 0x61, 0x70, 0x65, 0x64, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x6f, 0x67, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, + 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x7a, + 0x6f, 0x6e, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x49, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x66, + 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x66, 0x69, 0x6c, 0x65, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x12, 0x38, 0x0a, 0x0a, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x64, + 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x21, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, + 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x79, 0x12, 0x4b, 0x0a, 0x14, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, - 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x64, 0x72, 0x61, 0x69, - 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, - 0x67, 0x79, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, - 0x79, 0x12, 0x4b, 0x0a, 0x14, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x68, 0x75, 0x74, - 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, - 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x64, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x48, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x5f, 0x74, 0x72, 0x61, 0x63, - 0x69, 0x6e, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, - 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x18, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x45, 0x70, 0x6f, - 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x70, 0x75, 0x73, 0x65, 0x74, 0x5f, 0x74, 0x68, 0x72, - 0x65, 0x61, 0x64, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x63, 0x70, 0x75, 0x73, - 0x65, 0x74, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x69, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6e, 0x65, 0x5f, 0x67, 0x72, 0x61, 0x69, 0x6e, 0x5f, - 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x22, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6e, 0x65, 0x47, 0x72, 0x61, 0x69, 0x6e, 0x4c, 0x6f, - 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x63, 0x6b, - 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x63, - 0x6b, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x18, 0x25, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x72, 0x65, 0x44, 0x75, 0x6d, - 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x26, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x73, 0x54, 0x61, 0x67, 0x22, 0x1b, - 0x0a, 0x09, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x76, - 0x34, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x76, 0x36, 0x10, 0x01, 0x22, 0x2d, 0x0a, 0x04, 0x4d, - 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x65, 0x72, 0x76, 0x65, 0x10, 0x00, 0x12, 0x0c, - 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, - 0x49, 0x6e, 0x69, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x10, 0x02, 0x22, 0x2b, 0x0a, 0x0d, 0x44, 0x72, - 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x47, - 0x72, 0x61, 0x64, 0x75, 0x61, 0x6c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x6d, 0x6d, 0x65, - 0x64, 0x69, 0x61, 0x74, 0x65, 0x10, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x14, - 0x10, 0x15, 0x4a, 0x04, 0x08, 0x15, 0x10, 0x16, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e, 0x52, 0x09, - 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x52, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x6f, - 0x62, 0x6a, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x52, 0x11, 0x62, 0x6f, 0x6f, - 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x78, - 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, - 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, - 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x2e, + 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x48, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, + 0x0a, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x5f, 0x74, + 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, + 0x68, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x70, 0x75, 0x73, 0x65, 0x74, 0x5f, + 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x63, + 0x70, 0x75, 0x73, 0x65, 0x74, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x13, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x64, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x39, 0x0a, + 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6e, 0x65, 0x5f, 0x67, 0x72, 0x61, + 0x69, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x22, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x16, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6e, 0x65, 0x47, 0x72, 0x61, 0x69, + 0x6e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x18, 0x25, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x72, 0x65, + 0x44, 0x75, 0x6d, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x74, 0x61, + 0x67, 0x18, 0x26, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x73, 0x54, 0x61, + 0x67, 0x22, 0x1b, 0x0a, 0x09, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x06, + 0x0a, 0x02, 0x76, 0x34, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x76, 0x36, 0x10, 0x01, 0x22, 0x2d, + 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x65, 0x72, 0x76, 0x65, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x49, 0x6e, 0x69, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x10, 0x02, 0x22, 0x2b, 0x0a, + 0x0d, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x0b, + 0x0a, 0x07, 0x47, 0x72, 0x61, 0x64, 0x75, 0x61, 0x6c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x49, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x10, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, + 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, + 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, + 0x04, 0x08, 0x14, 0x10, 0x15, 0x4a, 0x04, 0x08, 0x15, 0x10, 0x16, 0x4a, 0x04, 0x08, 0x1d, 0x10, + 0x1e, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x52, 0x10, 0x6d, 0x61, + 0x78, 0x5f, 0x6f, 0x62, 0x6a, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x52, 0x11, + 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x42, 0x78, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -873,7 +900,7 @@ var file_envoy_admin_v3_server_info_proto_goTypes = []interface{}{ (CommandLineOptions_DrainStrategy)(0), // 3: envoy.admin.v3.CommandLineOptions.DrainStrategy (*ServerInfo)(nil), // 4: envoy.admin.v3.ServerInfo (*CommandLineOptions)(nil), // 5: envoy.admin.v3.CommandLineOptions - (*duration.Duration)(nil), // 6: google.protobuf.Duration + (*durationpb.Duration)(nil), // 6: google.protobuf.Duration (*v3.Node)(nil), // 7: envoy.config.core.v3.Node } var file_envoy_admin_v3_server_info_proto_depIdxs = []int32{ diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go index d77b43f64f9..8db097828bd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/admin/v3/server_info.proto @@ -282,6 +283,10 @@ func (m *CommandLineOptions) validate(all bool) error { // no validation rules for UseDynamicBaseId + // no validation rules for SkipHotRestartOnNoParent + + // no validation rules for SkipHotRestartParentStats + // no validation rules for BaseIdPath // no validation rules for Concurrency diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go new file mode 100644 index 00000000000..5bf55561e92 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go @@ -0,0 +1,671 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/server_info.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ServerInfo) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerInfo) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ServerInfo) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.CommandLineOptions != nil { + size, err := m.CommandLineOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.HotRestartVersion) > 0 { + i -= len(m.HotRestartVersion) + copy(dAtA[i:], m.HotRestartVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HotRestartVersion))) + i-- + dAtA[i] = 0x2a + } + if m.UptimeAllEpochs != nil { + size, err := (*durationpb.Duration)(m.UptimeAllEpochs).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.UptimeCurrentEpoch != nil { + size, err := (*durationpb.Duration)(m.UptimeCurrentEpoch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.State != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommandLineOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommandLineOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommandLineOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipHotRestartParentStats { + i-- + if m.SkipHotRestartParentStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + } + if m.SkipHotRestartOnNoParent { + i-- + if m.SkipHotRestartOnNoParent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + } + if len(m.StatsTag) > 0 { + for iNdEx := len(m.StatsTag) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StatsTag[iNdEx]) + copy(dAtA[i:], m.StatsTag[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatsTag[iNdEx]))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + } + if m.EnableCoreDump { + i-- + if m.EnableCoreDump { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + } + if m.SocketMode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SocketMode)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa0 + } + if len(m.SocketPath) > 0 { + i -= len(m.SocketPath) + copy(dAtA[i:], m.SocketPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SocketPath))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.EnableFineGrainLogging { + i-- + if m.EnableFineGrainLogging { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x90 + } + if m.DrainStrategy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DrainStrategy)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x88 + } + if len(m.BaseIdPath) > 0 { + i -= len(m.BaseIdPath) + copy(dAtA[i:], m.BaseIdPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BaseIdPath))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if m.UseDynamicBaseId { + i-- + if m.UseDynamicBaseId { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.IgnoreUnknownDynamicFields { + i-- + if m.IgnoreUnknownDynamicFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if len(m.DisabledExtensions) > 0 { + for iNdEx := len(m.DisabledExtensions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DisabledExtensions[iNdEx]) + copy(dAtA[i:], m.DisabledExtensions[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DisabledExtensions[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + } + if m.LogFormatEscaped { + i-- + if m.LogFormatEscaped { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.RejectUnknownDynamicFields { + i-- + if m.RejectUnknownDynamicFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.CpusetThreads { + i-- + if m.CpusetThreads { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + if m.RestartEpoch != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RestartEpoch)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.EnableMutexTracing { + i-- + if m.EnableMutexTracing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.DisableHotRestart { + i-- + if m.DisableHotRestart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.Mode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.ParentShutdownTime != nil { + size, err := (*durationpb.Duration)(m.ParentShutdownTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.DrainTime != nil { + size, err := (*durationpb.Duration)(m.DrainTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.FileFlushInterval != nil { + size, err := (*durationpb.Duration)(m.FileFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.ServiceZone) > 0 { + i -= len(m.ServiceZone) + copy(dAtA[i:], m.ServiceZone) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceZone))) + i-- + dAtA[i] = 0x7a + } + if len(m.ServiceNode) > 0 { + i -= len(m.ServiceNode) + copy(dAtA[i:], m.ServiceNode) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceNode))) + i-- + dAtA[i] = 0x72 + } + if len(m.ServiceCluster) > 0 { + i -= len(m.ServiceCluster) + copy(dAtA[i:], m.ServiceCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceCluster))) + i-- + dAtA[i] = 0x6a + } + if len(m.LogPath) > 0 { + i -= len(m.LogPath) + copy(dAtA[i:], m.LogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogPath))) + i-- + dAtA[i] = 0x5a + } + if len(m.LogFormat) > 0 { + i -= len(m.LogFormat) + copy(dAtA[i:], m.LogFormat) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogFormat))) + i-- + dAtA[i] = 0x52 + } + if len(m.ComponentLogLevel) > 0 { + i -= len(m.ComponentLogLevel) + copy(dAtA[i:], m.ComponentLogLevel) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ComponentLogLevel))) + i-- + dAtA[i] = 0x4a + } + if len(m.LogLevel) > 0 { + i -= len(m.LogLevel) + copy(dAtA[i:], m.LogLevel) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogLevel))) + i-- + dAtA[i] = 0x42 + } + if m.LocalAddressIpVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LocalAddressIpVersion)) + i-- + dAtA[i] = 0x38 + } + if len(m.AdminAddressPath) > 0 { + i -= len(m.AdminAddressPath) + copy(dAtA[i:], m.AdminAddressPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AdminAddressPath))) + i-- + dAtA[i] = 0x32 + } + if m.AllowUnknownStaticFields { + i-- + if m.AllowUnknownStaticFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ConfigYaml) > 0 { + i -= len(m.ConfigYaml) + copy(dAtA[i:], m.ConfigYaml) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigYaml))) + i-- + dAtA[i] = 0x22 + } + if len(m.ConfigPath) > 0 { + i -= len(m.ConfigPath) + copy(dAtA[i:], m.ConfigPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigPath))) + i-- + dAtA[i] = 0x1a + } + if m.Concurrency != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x10 + } + if m.BaseId != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BaseId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ServerInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.State != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.State)) + } + if m.UptimeCurrentEpoch != nil { + l = (*durationpb.Duration)(m.UptimeCurrentEpoch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UptimeAllEpochs != nil { + l = (*durationpb.Duration)(m.UptimeAllEpochs).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.HotRestartVersion) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CommandLineOptions != nil { + l = m.CommandLineOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommandLineOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseId != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BaseId)) + } + if m.Concurrency != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Concurrency)) + } + l = len(m.ConfigPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ConfigYaml) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowUnknownStaticFields { + n += 2 + } + l = len(m.AdminAddressPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalAddressIpVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LocalAddressIpVersion)) + } + l = len(m.LogLevel) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ComponentLogLevel) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LogFormat) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LogPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceNode) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceZone) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FileFlushInterval != nil { + l = (*durationpb.Duration)(m.FileFlushInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainTime != nil { + l = (*durationpb.Duration)(m.DrainTime).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ParentShutdownTime != nil { + l = (*durationpb.Duration)(m.ParentShutdownTime).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Mode != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.Mode)) + } + if m.DisableHotRestart { + n += 3 + } + if m.EnableMutexTracing { + n += 3 + } + if m.RestartEpoch != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.RestartEpoch)) + } + if m.CpusetThreads { + n += 3 + } + if m.RejectUnknownDynamicFields { + n += 3 + } + if m.LogFormatEscaped { + n += 3 + } + if len(m.DisabledExtensions) > 0 { + for _, s := range m.DisabledExtensions { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IgnoreUnknownDynamicFields { + n += 3 + } + if m.UseDynamicBaseId { + n += 3 + } + l = len(m.BaseIdPath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainStrategy != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DrainStrategy)) + } + if m.EnableFineGrainLogging { + n += 3 + } + l = len(m.SocketPath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SocketMode != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.SocketMode)) + } + if m.EnableCoreDump { + n += 3 + } + if len(m.StatsTag) > 0 { + for _, s := range m.StatsTag { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SkipHotRestartOnNoParent { + n += 3 + } + if m.SkipHotRestartParentStats { + n += 3 + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go index 056ee37b952..f30e4500e46 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/admin/v3/tap.proto package adminv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go index 2364c9ccef0..d524f2aefe6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/admin/v3/tap.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go new file mode 100644 index 00000000000..4524bfb4f70 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go @@ -0,0 +1,106 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/tap.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TapRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TapRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TapRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TapConfig != nil { + if vtmsg, ok := interface{}(m.TapConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TapConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ConfigId) > 0 { + i -= len(m.ConfigId) + copy(dAtA[i:], m.ConfigId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TapRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConfigId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TapConfig != nil { + if size, ok := interface{}(m.TapConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TapConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go index 75ba29582d4..258fcfe2fbd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/annotations/deprecation.proto package annotations diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go index 2452fcdd6ed..be58aa52446 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/annotations/deprecation.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go index 5da50ae67a3..828c87c5e3b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/annotations/resource.proto package annotations diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go index 05dc7896820..2929a5813b7 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/annotations/resource.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go new file mode 100644 index 00000000000..324cb091661 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go @@ -0,0 +1,73 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/annotations/resource.proto + +package annotations + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ResourceAnnotation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAnnotation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceAnnotation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceAnnotation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go index 062c8c9dce4..34996d4975f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/accesslog/v3/accesslog.proto package accesslogv3 @@ -14,10 +14,10 @@ import ( v33 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -243,7 +243,7 @@ func (m *AccessLog) GetConfigType() isAccessLog_ConfigType { return nil } -func (x *AccessLog) GetTypedConfig() *any1.Any { +func (x *AccessLog) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*AccessLog_TypedConfig); ok { return x.TypedConfig } @@ -255,7 +255,7 @@ type isAccessLog_ConfigType interface { } type AccessLog_TypedConfig struct { - TypedConfig *any1.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*AccessLog_TypedConfig) isAccessLog_ConfigType() {} @@ -1115,7 +1115,7 @@ type MetadataFilter struct { Matcher *v33.MetadataMatcher `protobuf:"bytes,1,opt,name=matcher,proto3" json:"matcher,omitempty"` // Default result if the key does not exist in dynamic metadata: if unset or // true, then log; if false, then don't log. - MatchIfKeyNotFound *wrappers.BoolValue `protobuf:"bytes,2,opt,name=match_if_key_not_found,json=matchIfKeyNotFound,proto3" json:"match_if_key_not_found,omitempty"` + MatchIfKeyNotFound *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=match_if_key_not_found,json=matchIfKeyNotFound,proto3" json:"match_if_key_not_found,omitempty"` } func (x *MetadataFilter) Reset() { @@ -1157,7 +1157,7 @@ func (x *MetadataFilter) GetMatcher() *v33.MetadataMatcher { return nil } -func (x *MetadataFilter) GetMatchIfKeyNotFound() *wrappers.BoolValue { +func (x *MetadataFilter) GetMatchIfKeyNotFound() *wrapperspb.BoolValue { if x != nil { return x.MatchIfKeyNotFound } @@ -1286,7 +1286,7 @@ func (m *ExtensionFilter) GetConfigType() isExtensionFilter_ConfigType { return nil } -func (x *ExtensionFilter) GetTypedConfig() *any1.Any { +func (x *ExtensionFilter) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*ExtensionFilter_TypedConfig); ok { return x.TypedConfig } @@ -1298,7 +1298,7 @@ type isExtensionFilter_ConfigType interface { } type ExtensionFilter_TypedConfig struct { - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*ExtensionFilter_TypedConfig) isExtensionFilter_ConfigType() {} @@ -1512,97 +1512,97 @@ var file_envoy_config_accesslog_v3_accesslog_proto_rawDesc = []byte{ 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0xa2, 0x01, - 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x8b, 0x01, - 0xfa, 0x42, 0x87, 0x01, 0x92, 0x01, 0x83, 0x01, 0x22, 0x80, 0x01, 0x72, 0x7e, 0x52, 0x02, 0x4c, - 0x48, 0x52, 0x02, 0x55, 0x48, 0x52, 0x02, 0x55, 0x54, 0x52, 0x02, 0x4c, 0x52, 0x52, 0x02, 0x55, - 0x52, 0x52, 0x02, 0x55, 0x46, 0x52, 0x02, 0x55, 0x43, 0x52, 0x02, 0x55, 0x4f, 0x52, 0x02, 0x4e, - 0x52, 0x52, 0x02, 0x44, 0x49, 0x52, 0x02, 0x46, 0x49, 0x52, 0x02, 0x52, 0x4c, 0x52, 0x04, 0x55, - 0x41, 0x45, 0x58, 0x52, 0x04, 0x52, 0x4c, 0x53, 0x45, 0x52, 0x02, 0x44, 0x43, 0x52, 0x03, 0x55, - 0x52, 0x58, 0x52, 0x02, 0x53, 0x49, 0x52, 0x02, 0x49, 0x48, 0x52, 0x03, 0x44, 0x50, 0x45, 0x52, - 0x05, 0x55, 0x4d, 0x53, 0x44, 0x52, 0x52, 0x04, 0x52, 0x46, 0x43, 0x46, 0x52, 0x04, 0x4e, 0x46, - 0x43, 0x46, 0x52, 0x02, 0x44, 0x54, 0x52, 0x03, 0x55, 0x50, 0x45, 0x52, 0x02, 0x4e, 0x43, 0x52, - 0x02, 0x4f, 0x4d, 0x52, 0x02, 0x44, 0x46, 0x52, 0x02, 0x44, 0x4f, 0x52, 0x05, 0x66, 0x6c, 0x61, - 0x67, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x80, - 0x04, 0x0a, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, - 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x22, 0xb8, 0x02, 0x0a, - 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0x00, 0x12, - 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, - 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, - 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x03, - 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x5f, 0x45, 0x58, 0x43, - 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, - 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, - 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, - 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, - 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x58, - 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, 0x0a, 0x13, 0x46, 0x41, 0x49, - 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, - 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, - 0x0b, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, - 0x45, 0x44, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, - 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, - 0x45, 0x10, 0x0e, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, - 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, - 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0xa7, 0x01, + 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x90, 0x01, + 0xfa, 0x42, 0x8c, 0x01, 0x92, 0x01, 0x88, 0x01, 0x22, 0x85, 0x01, 0x72, 0x82, 0x01, 0x52, 0x02, + 0x4c, 0x48, 0x52, 0x02, 0x55, 0x48, 0x52, 0x02, 0x55, 0x54, 0x52, 0x02, 0x4c, 0x52, 0x52, 0x02, + 0x55, 0x52, 0x52, 0x02, 0x55, 0x46, 0x52, 0x02, 0x55, 0x43, 0x52, 0x02, 0x55, 0x4f, 0x52, 0x02, + 0x4e, 0x52, 0x52, 0x02, 0x44, 0x49, 0x52, 0x02, 0x46, 0x49, 0x52, 0x02, 0x52, 0x4c, 0x52, 0x04, + 0x55, 0x41, 0x45, 0x58, 0x52, 0x04, 0x52, 0x4c, 0x53, 0x45, 0x52, 0x02, 0x44, 0x43, 0x52, 0x03, + 0x55, 0x52, 0x58, 0x52, 0x02, 0x53, 0x49, 0x52, 0x02, 0x49, 0x48, 0x52, 0x03, 0x44, 0x50, 0x45, + 0x52, 0x05, 0x55, 0x4d, 0x53, 0x44, 0x52, 0x52, 0x04, 0x52, 0x46, 0x43, 0x46, 0x52, 0x04, 0x4e, + 0x46, 0x43, 0x46, 0x52, 0x02, 0x44, 0x54, 0x52, 0x03, 0x55, 0x50, 0x45, 0x52, 0x02, 0x4e, 0x43, + 0x52, 0x02, 0x4f, 0x4d, 0x52, 0x02, 0x44, 0x46, 0x52, 0x02, 0x44, 0x4f, 0x52, 0x02, 0x44, 0x52, + 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, - 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x22, 0xda, 0x01, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x16, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, - 0x69, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x66, 0x4b, 0x65, 0x79, 0x4e, 0x6f, - 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x76, - 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, - 0x4b, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, - 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, - 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, - 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, - 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, - 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, - 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, - 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x27, 0x69, 0x6f, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, - 0x67, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x22, 0x80, 0x04, 0x0a, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d, + 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x22, 0xb8, 0x02, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x06, 0x0a, 0x02, + 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, + 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d, + 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, 0x4e, + 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, + 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x41, + 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, 0x12, + 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, + 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, + 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, + 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, + 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, + 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, + 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, + 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, + 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x3a, 0x38, 0x9a, 0xc5, + 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xda, 0x01, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x16, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x66, + 0x4b, 0x65, 0x79, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x3a, 0x36, 0x9a, 0xc5, 0x88, + 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x22, 0x76, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x0f, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, + 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x37, + 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x27, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1638,12 +1638,12 @@ var file_envoy_config_accesslog_v3_accesslog_proto_goTypes = []interface{}{ (*MetadataFilter)(nil), // 15: envoy.config.accesslog.v3.MetadataFilter (*LogTypeFilter)(nil), // 16: envoy.config.accesslog.v3.LogTypeFilter (*ExtensionFilter)(nil), // 17: envoy.config.accesslog.v3.ExtensionFilter - (*any1.Any)(nil), // 18: google.protobuf.Any + (*anypb.Any)(nil), // 18: google.protobuf.Any (*v3.RuntimeUInt32)(nil), // 19: envoy.config.core.v3.RuntimeUInt32 (*v31.FractionalPercent)(nil), // 20: envoy.type.v3.FractionalPercent (*v32.HeaderMatcher)(nil), // 21: envoy.config.route.v3.HeaderMatcher (*v33.MetadataMatcher)(nil), // 22: envoy.type.matcher.v3.MetadataMatcher - (*wrappers.BoolValue)(nil), // 23: google.protobuf.BoolValue + (*wrapperspb.BoolValue)(nil), // 23: google.protobuf.BoolValue (v34.AccessLogType)(0), // 24: envoy.data.accesslog.v3.AccessLogType } var file_envoy_config_accesslog_v3_accesslog_proto_depIdxs = []int32{ diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go index 63104a547b6..746f6f2c4c8 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/accesslog/v3/accesslog.proto @@ -2107,7 +2108,7 @@ func (m *ResponseFlagFilter) validate(all bool) error { if _, ok := _ResponseFlagFilter_Flags_InLookup[item]; !ok { err := ResponseFlagFilterValidationError{ field: fmt.Sprintf("Flags[%v]", idx), - reason: "value must be in list [LH UH UT LR UR UF UC UO NR DI FI RL UAEX RLSE DC URX SI IH DPE UMSDR RFCF NFCF DT UPE NC OM DF DO]", + reason: "value must be in list [LH UH UT LR UR UF UC UO NR DI FI RL UAEX RLSE DC URX SI IH DPE UMSDR RFCF NFCF DT UPE NC OM DF DO DR]", } if !all { return err @@ -2226,6 +2227,7 @@ var _ResponseFlagFilter_Flags_InLookup = map[string]struct{}{ "OM": {}, "DF": {}, "DO": {}, + "DR": {}, } // Validate checks the field values on GrpcStatusFilter with the rules defined diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go new file mode 100644 index 00000000000..e75bf014ac1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go @@ -0,0 +1,1751 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AccessLog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccessLog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*AccessLog_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Filter != nil { + size, err := m.Filter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AccessLog_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLog_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccessLogFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_LogTypeFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_MetadataFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_ExtensionFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_GrpcStatusFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_ResponseFlagFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_HeaderFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_OrFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_AndFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_RuntimeFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_TraceableFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_NotHealthCheckFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_DurationFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_StatusCodeFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *AccessLogFilter_StatusCodeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_StatusCodeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StatusCodeFilter != nil { + size, err := m.StatusCodeFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_DurationFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_DurationFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DurationFilter != nil { + size, err := m.DurationFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_NotHealthCheckFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_NotHealthCheckFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotHealthCheckFilter != nil { + size, err := m.NotHealthCheckFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_TraceableFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_TraceableFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TraceableFilter != nil { + size, err := m.TraceableFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_RuntimeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_RuntimeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RuntimeFilter != nil { + size, err := m.RuntimeFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_AndFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_AndFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndFilter != nil { + size, err := m.AndFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_OrFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_OrFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrFilter != nil { + size, err := m.OrFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_HeaderFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_HeaderFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderFilter != nil { + size, err := m.HeaderFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_ResponseFlagFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_ResponseFlagFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ResponseFlagFilter != nil { + size, err := m.ResponseFlagFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_GrpcStatusFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_GrpcStatusFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GrpcStatusFilter != nil { + size, err := m.GrpcStatusFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_ExtensionFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_ExtensionFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtensionFilter != nil { + size, err := m.ExtensionFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_MetadataFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_MetadataFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MetadataFilter != nil { + size, err := m.MetadataFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_LogTypeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_LogTypeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LogTypeFilter != nil { + size, err := m.LogTypeFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *ComparisonFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ComparisonFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ComparisonFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != nil { + if vtmsg, ok := interface{}(m.Value).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Value) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Op != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Op)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatusCodeFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusCodeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatusCodeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Comparison != nil { + size, err := m.Comparison.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DurationFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DurationFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DurationFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Comparison != nil { + size, err := m.Comparison.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NotHealthCheckFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NotHealthCheckFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *NotHealthCheckFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *TraceableFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceableFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TraceableFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RuntimeFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UseIndependentRandomness { + i-- + if m.UseIndependentRandomness { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.PercentSampled != nil { + if vtmsg, ok := interface{}(m.PercentSampled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PercentSampled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AndFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AndFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AndFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OrFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OrFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *HeaderFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Header != nil { + if vtmsg, ok := interface{}(m.Header).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Header) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseFlagFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFlagFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseFlagFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Flags) > 0 { + for iNdEx := len(m.Flags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Flags[iNdEx]) + copy(dAtA[i:], m.Flags[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Flags[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GrpcStatusFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcStatusFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcStatusFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Exclude { + i-- + if m.Exclude { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Statuses) > 0 { + var pksize2 int + for _, num := range m.Statuses { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.Statuses { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetadataFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MatchIfKeyNotFound != nil { + size, err := (*wrapperspb.BoolValue)(m.MatchIfKeyNotFound).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LogTypeFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogTypeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LogTypeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Exclude { + i-- + if m.Exclude { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Types) > 0 { + var pksize2 int + for _, num := range m.Types { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.Types { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExtensionFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtensionFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtensionFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*ExtensionFilter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExtensionFilter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtensionFilter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *AccessLog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Filter != nil { + l = m.Filter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *AccessLog_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.FilterSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *AccessLogFilter_StatusCodeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatusCodeFilter != nil { + l = m.StatusCodeFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_DurationFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DurationFilter != nil { + l = m.DurationFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_NotHealthCheckFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotHealthCheckFilter != nil { + l = m.NotHealthCheckFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_TraceableFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TraceableFilter != nil { + l = m.TraceableFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_RuntimeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RuntimeFilter != nil { + l = m.RuntimeFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_AndFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndFilter != nil { + l = m.AndFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_OrFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrFilter != nil { + l = m.OrFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_HeaderFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderFilter != nil { + l = m.HeaderFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_ResponseFlagFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResponseFlagFilter != nil { + l = m.ResponseFlagFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_GrpcStatusFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcStatusFilter != nil { + l = m.GrpcStatusFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_ExtensionFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtensionFilter != nil { + l = m.ExtensionFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_MetadataFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MetadataFilter != nil { + l = m.MetadataFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_LogTypeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LogTypeFilter != nil { + l = m.LogTypeFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ComparisonFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Op != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Op)) + } + if m.Value != nil { + if size, ok := interface{}(m.Value).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Value) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StatusCodeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Comparison != nil { + l = m.Comparison.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DurationFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Comparison != nil { + l = m.Comparison.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *NotHealthCheckFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *TraceableFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RuntimeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PercentSampled != nil { + if size, ok := interface{}(m.PercentSampled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PercentSampled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseIndependentRandomness { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *AndFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *OrFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + if size, ok := interface{}(m.Header).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Header) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseFlagFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Flags) > 0 { + for _, s := range m.Flags { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcStatusFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Statuses) > 0 { + l = 0 + for _, e := range m.Statuses { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.Exclude { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MatchIfKeyNotFound != nil { + l = (*wrapperspb.BoolValue)(m.MatchIfKeyNotFound).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LogTypeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Types) > 0 { + l = 0 + for _, e := range m.Types { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.Exclude { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExtensionFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ExtensionFilter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go index c05715899fe..893acf21e34 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/bootstrap/v3/bootstrap.proto package bootstrapv3 @@ -19,11 +19,11 @@ import ( v38 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" v35 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - _struct "github.com/golang/protobuf/ptypes/struct" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -149,7 +149,7 @@ func (CustomInlineHeader_InlineHeaderType) EnumDescriptor() ([]byte, []int) { } // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 41] +// [#next-free-field: 42] type Bootstrap struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -215,7 +215,7 @@ type Bootstrap struct { // seconds). Only one of “stats_flush_interval“ or “stats_flush_on_admin“ // can be set. // Duration must be at least 1ms and at most 5 min. - StatsFlushInterval *duration.Duration `protobuf:"bytes,7,opt,name=stats_flush_interval,json=statsFlushInterval,proto3" json:"stats_flush_interval,omitempty"` + StatsFlushInterval *durationpb.Duration `protobuf:"bytes,7,opt,name=stats_flush_interval,json=statsFlushInterval,proto3" json:"stats_flush_interval,omitempty"` // Types that are assignable to StatsFlush: // // *Bootstrap_StatsFlushOnAdmin @@ -267,7 +267,7 @@ type Bootstrap struct { // Optional proxy version which will be used to set the value of :ref:`server.version statistic // ` if specified. Envoy will not process this value, it will be sent as is to // :ref:`stats sinks `. - StatsServerVersionOverride *wrappers.UInt64Value `protobuf:"bytes,19,opt,name=stats_server_version_override,json=statsServerVersionOverride,proto3" json:"stats_server_version_override,omitempty"` + StatsServerVersionOverride *wrapperspb.UInt64Value `protobuf:"bytes,19,opt,name=stats_server_version_override,json=statsServerVersionOverride,proto3" json:"stats_server_version_override,omitempty"` // Always use TCP queries instead of UDP queries for DNS lookups. // This may be overridden on a per-cluster basis in cds_config, // when :ref:`dns_resolvers ` and @@ -377,6 +377,9 @@ type Bootstrap struct { ApplicationLogConfig *Bootstrap_ApplicationLogConfig `protobuf:"bytes,38,opt,name=application_log_config,json=applicationLogConfig,proto3" json:"application_log_config,omitempty"` // Optional gRPC async manager config. GrpcAsyncClientManagerConfig *Bootstrap_GrpcAsyncClientManagerConfig `protobuf:"bytes,40,opt,name=grpc_async_client_manager_config,json=grpcAsyncClientManagerConfig,proto3" json:"grpc_async_client_manager_config,omitempty"` + // Optional configuration for memory allocation manager. + // Memory releasing is only supported for `tcmalloc allocator `_. + MemoryAllocatorManager *MemoryAllocatorManager `protobuf:"bytes,41,opt,name=memory_allocator_manager,json=memoryAllocatorManager,proto3" json:"memory_allocator_manager,omitempty"` } func (x *Bootstrap) Reset() { @@ -481,7 +484,7 @@ func (x *Bootstrap) GetStatsConfig() *v31.StatsConfig { return nil } -func (x *Bootstrap) GetStatsFlushInterval() *duration.Duration { +func (x *Bootstrap) GetStatsFlushInterval() *durationpb.Duration { if x != nil { return x.StatsFlushInterval } @@ -560,7 +563,7 @@ func (x *Bootstrap) GetHeaderPrefix() string { return "" } -func (x *Bootstrap) GetStatsServerVersionOverride() *wrappers.UInt64Value { +func (x *Bootstrap) GetStatsServerVersionOverride() *wrapperspb.UInt64Value { if x != nil { return x.StatsServerVersionOverride } @@ -688,6 +691,13 @@ func (x *Bootstrap) GetGrpcAsyncClientManagerConfig() *Bootstrap_GrpcAsyncClient return nil } +func (x *Bootstrap) GetMemoryAllocatorManager() *MemoryAllocatorManager { + if x != nil { + return x.MemoryAllocatorManager + } + return nil +} + type isBootstrap_StatsFlush interface { isBootstrap_StatsFlush() } @@ -982,25 +992,25 @@ type Watchdog struct { Actions []*Watchdog_WatchdogAction `protobuf:"bytes,7,rep,name=actions,proto3" json:"actions,omitempty"` // The duration after which Envoy counts a nonresponsive thread in the // “watchdog_miss“ statistic. If not specified the default is 200ms. - MissTimeout *duration.Duration `protobuf:"bytes,1,opt,name=miss_timeout,json=missTimeout,proto3" json:"miss_timeout,omitempty"` + MissTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=miss_timeout,json=missTimeout,proto3" json:"miss_timeout,omitempty"` // The duration after which Envoy counts a nonresponsive thread in the // “watchdog_mega_miss“ statistic. If not specified the default is // 1000ms. - MegamissTimeout *duration.Duration `protobuf:"bytes,2,opt,name=megamiss_timeout,json=megamissTimeout,proto3" json:"megamiss_timeout,omitempty"` + MegamissTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=megamiss_timeout,json=megamissTimeout,proto3" json:"megamiss_timeout,omitempty"` // If a watched thread has been nonresponsive for this duration, assume a // programming error and kill the entire Envoy process. Set to 0 to disable // kill behavior. If not specified the default is 0 (disabled). - KillTimeout *duration.Duration `protobuf:"bytes,3,opt,name=kill_timeout,json=killTimeout,proto3" json:"kill_timeout,omitempty"` + KillTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=kill_timeout,json=killTimeout,proto3" json:"kill_timeout,omitempty"` // Defines the maximum jitter used to adjust the “kill_timeout“ if “kill_timeout“ is // enabled. Enabling this feature would help to reduce risk of synchronized // watchdog kill events across proxies due to external triggers. Set to 0 to // disable. If not specified the default is 0 (disabled). - MaxKillTimeoutJitter *duration.Duration `protobuf:"bytes,6,opt,name=max_kill_timeout_jitter,json=maxKillTimeoutJitter,proto3" json:"max_kill_timeout_jitter,omitempty"` + MaxKillTimeoutJitter *durationpb.Duration `protobuf:"bytes,6,opt,name=max_kill_timeout_jitter,json=maxKillTimeoutJitter,proto3" json:"max_kill_timeout_jitter,omitempty"` // If “max(2, ceil(registered_threads * Fraction(*multikill_threshold*)))“ // threads have been nonresponsive for at least this duration kill the entire // Envoy process. Set to 0 to disable this behavior. If not specified the // default is 0 (disabled). - MultikillTimeout *duration.Duration `protobuf:"bytes,4,opt,name=multikill_timeout,json=multikillTimeout,proto3" json:"multikill_timeout,omitempty"` + MultikillTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=multikill_timeout,json=multikillTimeout,proto3" json:"multikill_timeout,omitempty"` // Sets the threshold for “multikill_timeout“ in terms of the percentage of // nonresponsive threads required for the “multikill_timeout“. // If not specified the default is 0. @@ -1046,35 +1056,35 @@ func (x *Watchdog) GetActions() []*Watchdog_WatchdogAction { return nil } -func (x *Watchdog) GetMissTimeout() *duration.Duration { +func (x *Watchdog) GetMissTimeout() *durationpb.Duration { if x != nil { return x.MissTimeout } return nil } -func (x *Watchdog) GetMegamissTimeout() *duration.Duration { +func (x *Watchdog) GetMegamissTimeout() *durationpb.Duration { if x != nil { return x.MegamissTimeout } return nil } -func (x *Watchdog) GetKillTimeout() *duration.Duration { +func (x *Watchdog) GetKillTimeout() *durationpb.Duration { if x != nil { return x.KillTimeout } return nil } -func (x *Watchdog) GetMaxKillTimeoutJitter() *duration.Duration { +func (x *Watchdog) GetMaxKillTimeoutJitter() *durationpb.Duration { if x != nil { return x.MaxKillTimeoutJitter } return nil } -func (x *Watchdog) GetMultikillTimeout() *duration.Duration { +func (x *Watchdog) GetMultikillTimeout() *durationpb.Duration { if x != nil { return x.MultikillTimeout } @@ -1173,7 +1183,7 @@ type Runtime struct { // ` by other runtime layers, e.g. // disk or admin. This follows the :ref:`runtime protobuf JSON representation // encoding `. - Base *_struct.Struct `protobuf:"bytes,4,opt,name=base,proto3" json:"base,omitempty"` + Base *structpb.Struct `protobuf:"bytes,4,opt,name=base,proto3" json:"base,omitempty"` } func (x *Runtime) Reset() { @@ -1229,7 +1239,7 @@ func (x *Runtime) GetOverrideSubdirectory() string { return "" } -func (x *Runtime) GetBase() *_struct.Struct { +func (x *Runtime) GetBase() *structpb.Struct { if x != nil { return x.Base } @@ -1300,7 +1310,7 @@ func (m *RuntimeLayer) GetLayerSpecifier() isRuntimeLayer_LayerSpecifier { return nil } -func (x *RuntimeLayer) GetStaticLayer() *_struct.Struct { +func (x *RuntimeLayer) GetStaticLayer() *structpb.Struct { if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_StaticLayer); ok { return x.StaticLayer } @@ -1337,7 +1347,7 @@ type RuntimeLayer_StaticLayer struct { // This follows the :ref:`runtime protobuf JSON representation encoding // `. Unlike static xDS resources, this static // layer is overridable by later layers in the runtime virtual filesystem. - StaticLayer *_struct.Struct `protobuf:"bytes,2,opt,name=static_layer,json=staticLayer,proto3,oneof"` + StaticLayer *structpb.Struct `protobuf:"bytes,2,opt,name=static_layer,json=staticLayer,proto3,oneof"` } type RuntimeLayer_DiskLayer_ struct { @@ -1491,6 +1501,66 @@ func (x *CustomInlineHeader) GetInlineHeaderType() CustomInlineHeader_InlineHead return CustomInlineHeader_REQUEST_HEADER } +type MemoryAllocatorManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures tcmalloc to perform background release of free memory in amount of bytes per “memory_release_interval“ interval. + // If equals to “0“, no memory release will occur. Defaults to “0“. + BytesToRelease uint64 `protobuf:"varint,1,opt,name=bytes_to_release,json=bytesToRelease,proto3" json:"bytes_to_release,omitempty"` + // Interval in milliseconds for memory releasing. If specified, during every + // interval Envoy will try to release “bytes_to_release“ of free memory back to operating system for reuse. + // Defaults to 1000 milliseconds. + MemoryReleaseInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=memory_release_interval,json=memoryReleaseInterval,proto3" json:"memory_release_interval,omitempty"` +} + +func (x *MemoryAllocatorManager) Reset() { + *x = MemoryAllocatorManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MemoryAllocatorManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MemoryAllocatorManager) ProtoMessage() {} + +func (x *MemoryAllocatorManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemoryAllocatorManager.ProtoReflect.Descriptor instead. +func (*MemoryAllocatorManager) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{10} +} + +func (x *MemoryAllocatorManager) GetBytesToRelease() uint64 { + if x != nil { + return x.BytesToRelease + } + return 0 +} + +func (x *MemoryAllocatorManager) GetMemoryReleaseInterval() *durationpb.Duration { + if x != nil { + return x.MemoryReleaseInterval + } + return nil +} + type Bootstrap_StaticResources struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1514,7 +1584,7 @@ type Bootstrap_StaticResources struct { func (x *Bootstrap_StaticResources) Reset() { *x = Bootstrap_StaticResources{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1527,7 +1597,7 @@ func (x *Bootstrap_StaticResources) String() string { func (*Bootstrap_StaticResources) ProtoMessage() {} func (x *Bootstrap_StaticResources) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1596,7 +1666,7 @@ type Bootstrap_DynamicResources struct { func (x *Bootstrap_DynamicResources) Reset() { *x = Bootstrap_DynamicResources{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1609,7 +1679,7 @@ func (x *Bootstrap_DynamicResources) String() string { func (*Bootstrap_DynamicResources) ProtoMessage() {} func (x *Bootstrap_DynamicResources) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1674,7 +1744,7 @@ type Bootstrap_ApplicationLogConfig struct { func (x *Bootstrap_ApplicationLogConfig) Reset() { *x = Bootstrap_ApplicationLogConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1687,7 +1757,7 @@ func (x *Bootstrap_ApplicationLogConfig) String() string { func (*Bootstrap_ApplicationLogConfig) ProtoMessage() {} func (x *Bootstrap_ApplicationLogConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1729,7 +1799,7 @@ type Bootstrap_DeferredStatOptions struct { func (x *Bootstrap_DeferredStatOptions) Reset() { *x = Bootstrap_DeferredStatOptions{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1742,7 +1812,7 @@ func (x *Bootstrap_DeferredStatOptions) String() string { func (*Bootstrap_DeferredStatOptions) ProtoMessage() {} func (x *Bootstrap_DeferredStatOptions) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1772,13 +1842,13 @@ type Bootstrap_GrpcAsyncClientManagerConfig struct { // Optional field to set the expiration time for the cached gRPC client object. // The minimal value is 5s and the default is 50s. - MaxCachedEntryIdleDuration *duration.Duration `protobuf:"bytes,1,opt,name=max_cached_entry_idle_duration,json=maxCachedEntryIdleDuration,proto3" json:"max_cached_entry_idle_duration,omitempty"` + MaxCachedEntryIdleDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=max_cached_entry_idle_duration,json=maxCachedEntryIdleDuration,proto3" json:"max_cached_entry_idle_duration,omitempty"` } func (x *Bootstrap_GrpcAsyncClientManagerConfig) Reset() { *x = Bootstrap_GrpcAsyncClientManagerConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1791,7 +1861,7 @@ func (x *Bootstrap_GrpcAsyncClientManagerConfig) String() string { func (*Bootstrap_GrpcAsyncClientManagerConfig) ProtoMessage() {} func (x *Bootstrap_GrpcAsyncClientManagerConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1807,7 +1877,7 @@ func (*Bootstrap_GrpcAsyncClientManagerConfig) Descriptor() ([]byte, []int) { return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 4} } -func (x *Bootstrap_GrpcAsyncClientManagerConfig) GetMaxCachedEntryIdleDuration() *duration.Duration { +func (x *Bootstrap_GrpcAsyncClientManagerConfig) GetMaxCachedEntryIdleDuration() *durationpb.Duration { if x != nil { return x.MaxCachedEntryIdleDuration } @@ -1829,7 +1899,7 @@ type Bootstrap_ApplicationLogConfig_LogFormat struct { func (x *Bootstrap_ApplicationLogConfig_LogFormat) Reset() { *x = Bootstrap_ApplicationLogConfig_LogFormat{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[16] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1842,7 +1912,7 @@ func (x *Bootstrap_ApplicationLogConfig_LogFormat) String() string { func (*Bootstrap_ApplicationLogConfig_LogFormat) ProtoMessage() {} func (x *Bootstrap_ApplicationLogConfig_LogFormat) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[16] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1865,7 +1935,7 @@ func (m *Bootstrap_ApplicationLogConfig_LogFormat) GetLogFormat() isBootstrap_Ap return nil } -func (x *Bootstrap_ApplicationLogConfig_LogFormat) GetJsonFormat() *_struct.Struct { +func (x *Bootstrap_ApplicationLogConfig_LogFormat) GetJsonFormat() *structpb.Struct { if x, ok := x.GetLogFormat().(*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat); ok { return x.JsonFormat } @@ -1887,7 +1957,7 @@ type Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat struct { // Flush application logs in JSON format. The configured JSON struct can // support all the format flags specified in the :option:`--log-format` // command line options section, except for the “%v“ and “%_“ flags. - JsonFormat *_struct.Struct `protobuf:"bytes,1,opt,name=json_format,json=jsonFormat,proto3,oneof"` + JsonFormat *structpb.Struct `protobuf:"bytes,1,opt,name=json_format,json=jsonFormat,proto3,oneof"` } type Bootstrap_ApplicationLogConfig_LogFormat_TextFormat struct { @@ -1919,7 +1989,7 @@ type ClusterManager_OutlierDetection struct { func (x *ClusterManager_OutlierDetection) Reset() { *x = ClusterManager_OutlierDetection{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1932,7 +2002,7 @@ func (x *ClusterManager_OutlierDetection) String() string { func (*ClusterManager_OutlierDetection) ProtoMessage() {} func (x *ClusterManager_OutlierDetection) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1975,7 +2045,7 @@ type Watchdog_WatchdogAction struct { func (x *Watchdog_WatchdogAction) Reset() { *x = Watchdog_WatchdogAction{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1988,7 +2058,7 @@ func (x *Watchdog_WatchdogAction) String() string { func (*Watchdog_WatchdogAction) ProtoMessage() {} func (x *Watchdog_WatchdogAction) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2044,7 +2114,7 @@ type RuntimeLayer_DiskLayer struct { func (x *RuntimeLayer_DiskLayer) Reset() { *x = RuntimeLayer_DiskLayer{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2057,7 +2127,7 @@ func (x *RuntimeLayer_DiskLayer) String() string { func (*RuntimeLayer_DiskLayer) ProtoMessage() {} func (x *RuntimeLayer_DiskLayer) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2104,7 +2174,7 @@ type RuntimeLayer_AdminLayer struct { func (x *RuntimeLayer_AdminLayer) Reset() { *x = RuntimeLayer_AdminLayer{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2117,7 +2187,7 @@ func (x *RuntimeLayer_AdminLayer) String() string { func (*RuntimeLayer_AdminLayer) ProtoMessage() {} func (x *RuntimeLayer_AdminLayer) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2148,7 +2218,7 @@ type RuntimeLayer_RtdsLayer struct { func (x *RuntimeLayer_RtdsLayer) Reset() { *x = RuntimeLayer_RtdsLayer{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2161,7 +2231,7 @@ func (x *RuntimeLayer_RtdsLayer) String() string { func (*RuntimeLayer_RtdsLayer) ProtoMessage() {} func (x *RuntimeLayer_RtdsLayer) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21] + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2254,7 +2324,7 @@ var file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = []byte{ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xac, 0x23, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, + 0x6f, 0x22, 0x99, 0x24, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, @@ -2451,332 +2521,348 @@ var file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = []byte{ 0x41, 0x73, 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x67, 0x72, 0x70, 0x63, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x9a, 0x02, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x69, - 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x6c, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x3c, 0x0a, 0x08, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x07, - 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, - 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, - 0x72, 0x61, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x1a, 0x89, 0x03, 0x0a, 0x10, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0a, 0x6c, 0x64, 0x73, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x09, 0x6c, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x6c, - 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x64, 0x73, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, - 0x41, 0x0a, 0x0a, 0x63, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x63, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x13, 0x63, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, - 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x61, 0x64, 0x73, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x09, 0x61, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x3b, 0x9a, 0xc5, - 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x1a, - 0xf9, 0x01, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, - 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x62, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, - 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, - 0x61, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, - 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x7d, 0x0a, 0x09, - 0x4c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x3a, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, - 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x21, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, - 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x11, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x5a, 0x0a, 0x13, 0x44, - 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x43, 0x0a, 0x1e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, - 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x1a, 0x89, 0x01, 0x0a, 0x1c, 0x47, 0x72, 0x70, 0x63, - 0x41, 0x73, 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, 0x1e, 0x6d, 0x61, 0x78, 0x5f, - 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, - 0x65, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, - 0xaa, 0x01, 0x04, 0x32, 0x02, 0x08, 0x05, 0x52, 0x1a, 0x6d, 0x61, 0x78, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0x7b, 0x0a, 0x21, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x42, 0x0d, 0x0a, 0x0b, - 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x4a, 0x04, 0x08, 0x0a, 0x10, - 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x22, 0x89, 0x03, 0x0a, 0x05, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, - 0x33, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, - 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, - 0x50, 0x61, 0x74, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, - 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, - 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, - 0x6e, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, - 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, - 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x22, 0x94, 0x05, 0x0a, - 0x0e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, - 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x67, 0x0a, - 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x18, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, - 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, 0x0a, 0x11, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x6c, 0x6f, - 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, - 0x20, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, - 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xc9, 0x01, 0x0a, 0x10, 0x4f, 0x75, 0x74, 0x6c, 0x69, - 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, - 0x68, 0x12, 0x4d, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x3a, 0x40, 0x9a, 0xc5, 0x88, 0x1e, 0x3b, 0x0a, 0x39, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, - 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x22, 0xb0, 0x01, 0x0a, 0x09, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, - 0x73, 0x12, 0x55, 0x0a, 0x14, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, - 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, - 0x68, 0x64, 0x6f, 0x67, 0x52, 0x12, 0x6d, 0x61, 0x69, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, - 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x4c, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x5f, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x16, 0x6d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x1a, 0x9a, 0x02, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x3c, 0x0a, 0x08, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x08, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x07, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x1a, 0x89, 0x03, 0x0a, 0x10, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0a, 0x6c, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c, + 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x64, 0x73, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x41, 0x0a, 0x0a, + 0x63, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x32, 0x0a, 0x15, 0x63, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, + 0x63, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x61, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, + 0x61, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, + 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x1a, 0xf9, 0x01, 0x0a, + 0x14, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x62, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, + 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x09, + 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x7d, 0x0a, 0x09, 0x4c, 0x6f, 0x67, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x3a, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x21, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x11, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x5a, 0x0a, 0x13, 0x44, 0x65, 0x66, 0x65, + 0x72, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x43, 0x0a, 0x1e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, + 0x65, 0x64, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, + 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x1a, 0x89, 0x01, 0x0a, 0x1c, 0x47, 0x72, 0x70, 0x63, 0x41, 0x73, 0x79, + 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, 0x1e, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, + 0x32, 0x02, 0x08, 0x05, 0x52, 0x1a, 0x6d, 0x61, 0x78, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x7b, 0x0a, 0x21, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x2a, 0x9a, + 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04, + 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x89, 0x03, + 0x0a, 0x05, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, + 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x33, 0x0a, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, + 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x50, 0x61, 0x74, 0x68, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x49, 0x0a, + 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x22, 0x94, 0x05, 0x0a, 0x0e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x12, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x67, 0x0a, 0x11, 0x6f, 0x75, + 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, 0x6e, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x20, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x65, + 0x72, 0x72, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xc9, 0x01, 0x0a, 0x10, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, + 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4d, + 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x40, 0x9a, + 0xc5, 0x88, 0x1e, 0x3b, 0x0a, 0x39, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4f, + 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, + 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x22, 0xb0, 0x01, 0x0a, 0x09, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x12, 0x55, + 0x0a, 0x14, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x61, + 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, + 0x67, 0x52, 0x12, 0x6d, 0x61, 0x69, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x57, 0x61, 0x74, + 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x4c, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, + 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x64, 0x6f, 0x67, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x64, 0x6f, 0x67, 0x22, 0xba, 0x06, 0x0a, 0x08, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, - 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x57, 0x61, - 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x22, 0xba, 0x06, 0x0a, 0x08, 0x57, 0x61, 0x74, 0x63, 0x68, - 0x64, 0x6f, 0x67, 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, - 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, - 0x6f, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, - 0x44, 0x0a, 0x10, 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x12, 0x5a, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x06, + 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, + 0x0a, 0x0c, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x4b, 0x69, - 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, - 0x46, 0x0a, 0x11, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x47, 0x0a, 0x13, 0x6d, 0x75, 0x6c, 0x74, 0x69, - 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x12, 0x6d, 0x75, - 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, - 0x1a, 0x85, 0x02, 0x0a, 0x0e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, - 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, - 0x68, 0x64, 0x6f, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, - 0x64, 0x6f, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x0d, 0x57, 0x61, 0x74, - 0x63, 0x68, 0x64, 0x6f, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4b, 0x49, 0x4c, 0x4c, 0x10, - 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x02, - 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x45, 0x47, 0x41, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x03, 0x12, 0x08, - 0x0a, 0x04, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x04, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, - 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, - 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, - 0x64, 0x6f, 0x67, 0x22, 0x51, 0x0a, 0x0b, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xdc, 0x01, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, - 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, - 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, - 0x64, 0x65, 0x53, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x2b, - 0x0a, 0x04, 0x62, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x62, 0x61, 0x73, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, - 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xdb, 0x06, 0x0a, 0x0c, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x61, - 0x79, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x44, 0x0a, 0x10, + 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0f, 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x5a, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x4b, 0x69, 0x6c, 0x6c, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x11, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x12, 0x47, 0x0a, 0x13, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, + 0x6c, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x12, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x1a, 0x85, 0x02, + 0x0a, 0x0e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, + 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, + 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x0d, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, + 0x6f, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x02, 0x12, 0x0c, 0x0a, + 0x08, 0x4d, 0x45, 0x47, 0x41, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x4d, + 0x49, 0x53, 0x53, 0x10, 0x04, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, + 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x22, 0x51, 0x0a, 0x0b, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x22, 0xdc, 0x01, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, + 0x6f, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, + 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x2b, 0x0a, 0x04, 0x62, + 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x61, 0x79, 0x65, - 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x63, 0x74, 0x52, 0x04, 0x62, 0x61, 0x73, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, + 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x22, 0xdb, 0x06, 0x0a, 0x0c, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, + 0x79, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x3c, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, + 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, + 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, + 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, + 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x72, 0x74, 0x64, + 0x73, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, + 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x09, 0x72, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0xc1, 0x01, + 0x0a, 0x09, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22, + 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, + 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, + 0x72, 0x1a, 0x46, 0x0a, 0x0a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, + 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, - 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, - 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, - 0x61, 0x79, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, + 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0x9d, 0x01, 0x0a, 0x09, 0x52, 0x74, + 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x72, + 0x74, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x72, 0x74, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, + 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x22, 0x82, 0x01, 0x0a, 0x0e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x61, + 0x79, 0x65, 0x72, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, - 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, - 0x79, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x0a, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0a, - 0x72, 0x74, 0x64, 0x73, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, - 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x72, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, - 0x1a, 0xc1, 0x01, 0x0a, 0x09, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x21, - 0x0a, 0x0c, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, 0x6f, - 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, - 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x4c, - 0x61, 0x79, 0x65, 0x72, 0x1a, 0x46, 0x0a, 0x0a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, - 0x65, 0x72, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, - 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, - 0x72, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0x9d, 0x01, 0x0a, - 0x09, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x43, - 0x0a, 0x0b, 0x72, 0x74, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x72, 0x74, 0x64, 0x73, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xb1, 0x02, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x12, + 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, + 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x76, 0x0a, 0x12, 0x69, 0x6e, 0x6c, + 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, + 0x65, 0x22, 0x66, 0x0a, 0x10, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, + 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x51, + 0x55, 0x45, 0x53, 0x54, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x01, 0x12, 0x13, + 0x0a, 0x0f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, + 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, + 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x03, 0x22, 0x95, 0x01, 0x0a, 0x16, 0x4d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f, + 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x51, + 0x0a, 0x17, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x65, 0x6d, 0x6f, + 0x72, 0x79, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x27, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, - 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, - 0x65, 0x72, 0x2e, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, - 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x6c, - 0x61, 0x79, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, - 0xf8, 0x42, 0x01, 0x22, 0x82, 0x01, 0x0a, 0x0e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x52, - 0x06, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, - 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, - 0x64, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xb1, 0x02, 0x0a, 0x12, 0x43, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, - 0x3b, 0x0a, 0x12, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, - 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x6c, 0x69, - 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x76, 0x0a, 0x12, - 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, - 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, - 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x54, 0x79, 0x70, 0x65, 0x22, 0x66, 0x0a, 0x10, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x51, 0x55, - 0x45, 0x53, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, - 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x48, 0x45, - 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, - 0x53, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x03, 0x42, 0x91, 0x01, 0xba, - 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x27, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, - 0x42, 0x0e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, - 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x76, 0x33, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f, + 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2792,7 +2878,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP() []byte { } var file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes = make([]protoimpl.MessageInfo, 22) +var file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes = make([]protoimpl.MessageInfo, 23) var file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes = []interface{}{ (Watchdog_WatchdogAction_WatchdogEvent)(0), // 0: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent (CustomInlineHeader_InlineHeaderType)(0), // 1: envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType @@ -2806,113 +2892,116 @@ var file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes = []interface{}{ (*RuntimeLayer)(nil), // 9: envoy.config.bootstrap.v3.RuntimeLayer (*LayeredRuntime)(nil), // 10: envoy.config.bootstrap.v3.LayeredRuntime (*CustomInlineHeader)(nil), // 11: envoy.config.bootstrap.v3.CustomInlineHeader - (*Bootstrap_StaticResources)(nil), // 12: envoy.config.bootstrap.v3.Bootstrap.StaticResources - (*Bootstrap_DynamicResources)(nil), // 13: envoy.config.bootstrap.v3.Bootstrap.DynamicResources - (*Bootstrap_ApplicationLogConfig)(nil), // 14: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig - (*Bootstrap_DeferredStatOptions)(nil), // 15: envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions - (*Bootstrap_GrpcAsyncClientManagerConfig)(nil), // 16: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig - nil, // 17: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry - (*Bootstrap_ApplicationLogConfig_LogFormat)(nil), // 18: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat - (*ClusterManager_OutlierDetection)(nil), // 19: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection - (*Watchdog_WatchdogAction)(nil), // 20: envoy.config.bootstrap.v3.Watchdog.WatchdogAction - (*RuntimeLayer_DiskLayer)(nil), // 21: envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer - (*RuntimeLayer_AdminLayer)(nil), // 22: envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer - (*RuntimeLayer_RtdsLayer)(nil), // 23: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer - (*v3.Node)(nil), // 24: envoy.config.core.v3.Node - (*v3.ApiConfigSource)(nil), // 25: envoy.config.core.v3.ApiConfigSource - (*v31.StatsSink)(nil), // 26: envoy.config.metrics.v3.StatsSink - (*v31.StatsConfig)(nil), // 27: envoy.config.metrics.v3.StatsConfig - (*duration.Duration)(nil), // 28: google.protobuf.Duration - (*v32.Tracing)(nil), // 29: envoy.config.trace.v3.Tracing - (*v33.OverloadManager)(nil), // 30: envoy.config.overload.v3.OverloadManager - (*wrappers.UInt64Value)(nil), // 31: google.protobuf.UInt64Value - (*v3.DnsResolutionConfig)(nil), // 32: envoy.config.core.v3.DnsResolutionConfig - (*v3.TypedExtensionConfig)(nil), // 33: envoy.config.core.v3.TypedExtensionConfig - (*v3.ConfigSource)(nil), // 34: envoy.config.core.v3.ConfigSource - (*v34.AccessLog)(nil), // 35: envoy.config.accesslog.v3.AccessLog - (*v3.Address)(nil), // 36: envoy.config.core.v3.Address - (*v3.SocketOption)(nil), // 37: envoy.config.core.v3.SocketOption - (*v3.BindConfig)(nil), // 38: envoy.config.core.v3.BindConfig - (*v35.Percent)(nil), // 39: envoy.type.v3.Percent - (*_struct.Struct)(nil), // 40: google.protobuf.Struct - (*v36.Listener)(nil), // 41: envoy.config.listener.v3.Listener - (*v37.Cluster)(nil), // 42: envoy.config.cluster.v3.Cluster - (*v38.Secret)(nil), // 43: envoy.extensions.transport_sockets.tls.v3.Secret - (*v3.EventServiceConfig)(nil), // 44: envoy.config.core.v3.EventServiceConfig + (*MemoryAllocatorManager)(nil), // 12: envoy.config.bootstrap.v3.MemoryAllocatorManager + (*Bootstrap_StaticResources)(nil), // 13: envoy.config.bootstrap.v3.Bootstrap.StaticResources + (*Bootstrap_DynamicResources)(nil), // 14: envoy.config.bootstrap.v3.Bootstrap.DynamicResources + (*Bootstrap_ApplicationLogConfig)(nil), // 15: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig + (*Bootstrap_DeferredStatOptions)(nil), // 16: envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions + (*Bootstrap_GrpcAsyncClientManagerConfig)(nil), // 17: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig + nil, // 18: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry + (*Bootstrap_ApplicationLogConfig_LogFormat)(nil), // 19: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat + (*ClusterManager_OutlierDetection)(nil), // 20: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection + (*Watchdog_WatchdogAction)(nil), // 21: envoy.config.bootstrap.v3.Watchdog.WatchdogAction + (*RuntimeLayer_DiskLayer)(nil), // 22: envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer + (*RuntimeLayer_AdminLayer)(nil), // 23: envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer + (*RuntimeLayer_RtdsLayer)(nil), // 24: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer + (*v3.Node)(nil), // 25: envoy.config.core.v3.Node + (*v3.ApiConfigSource)(nil), // 26: envoy.config.core.v3.ApiConfigSource + (*v31.StatsSink)(nil), // 27: envoy.config.metrics.v3.StatsSink + (*v31.StatsConfig)(nil), // 28: envoy.config.metrics.v3.StatsConfig + (*durationpb.Duration)(nil), // 29: google.protobuf.Duration + (*v32.Tracing)(nil), // 30: envoy.config.trace.v3.Tracing + (*v33.OverloadManager)(nil), // 31: envoy.config.overload.v3.OverloadManager + (*wrapperspb.UInt64Value)(nil), // 32: google.protobuf.UInt64Value + (*v3.DnsResolutionConfig)(nil), // 33: envoy.config.core.v3.DnsResolutionConfig + (*v3.TypedExtensionConfig)(nil), // 34: envoy.config.core.v3.TypedExtensionConfig + (*v3.ConfigSource)(nil), // 35: envoy.config.core.v3.ConfigSource + (*v34.AccessLog)(nil), // 36: envoy.config.accesslog.v3.AccessLog + (*v3.Address)(nil), // 37: envoy.config.core.v3.Address + (*v3.SocketOption)(nil), // 38: envoy.config.core.v3.SocketOption + (*v3.BindConfig)(nil), // 39: envoy.config.core.v3.BindConfig + (*v35.Percent)(nil), // 40: envoy.type.v3.Percent + (*structpb.Struct)(nil), // 41: google.protobuf.Struct + (*v36.Listener)(nil), // 42: envoy.config.listener.v3.Listener + (*v37.Cluster)(nil), // 43: envoy.config.cluster.v3.Cluster + (*v38.Secret)(nil), // 44: envoy.extensions.transport_sockets.tls.v3.Secret + (*v3.EventServiceConfig)(nil), // 45: envoy.config.core.v3.EventServiceConfig } var file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs = []int32{ - 24, // 0: envoy.config.bootstrap.v3.Bootstrap.node:type_name -> envoy.config.core.v3.Node - 12, // 1: envoy.config.bootstrap.v3.Bootstrap.static_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.StaticResources - 13, // 2: envoy.config.bootstrap.v3.Bootstrap.dynamic_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.DynamicResources + 25, // 0: envoy.config.bootstrap.v3.Bootstrap.node:type_name -> envoy.config.core.v3.Node + 13, // 1: envoy.config.bootstrap.v3.Bootstrap.static_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.StaticResources + 14, // 2: envoy.config.bootstrap.v3.Bootstrap.dynamic_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.DynamicResources 4, // 3: envoy.config.bootstrap.v3.Bootstrap.cluster_manager:type_name -> envoy.config.bootstrap.v3.ClusterManager - 25, // 4: envoy.config.bootstrap.v3.Bootstrap.hds_config:type_name -> envoy.config.core.v3.ApiConfigSource - 26, // 5: envoy.config.bootstrap.v3.Bootstrap.stats_sinks:type_name -> envoy.config.metrics.v3.StatsSink - 15, // 6: envoy.config.bootstrap.v3.Bootstrap.deferred_stat_options:type_name -> envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions - 27, // 7: envoy.config.bootstrap.v3.Bootstrap.stats_config:type_name -> envoy.config.metrics.v3.StatsConfig - 28, // 8: envoy.config.bootstrap.v3.Bootstrap.stats_flush_interval:type_name -> google.protobuf.Duration + 26, // 4: envoy.config.bootstrap.v3.Bootstrap.hds_config:type_name -> envoy.config.core.v3.ApiConfigSource + 27, // 5: envoy.config.bootstrap.v3.Bootstrap.stats_sinks:type_name -> envoy.config.metrics.v3.StatsSink + 16, // 6: envoy.config.bootstrap.v3.Bootstrap.deferred_stat_options:type_name -> envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions + 28, // 7: envoy.config.bootstrap.v3.Bootstrap.stats_config:type_name -> envoy.config.metrics.v3.StatsConfig + 29, // 8: envoy.config.bootstrap.v3.Bootstrap.stats_flush_interval:type_name -> google.protobuf.Duration 6, // 9: envoy.config.bootstrap.v3.Bootstrap.watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog 5, // 10: envoy.config.bootstrap.v3.Bootstrap.watchdogs:type_name -> envoy.config.bootstrap.v3.Watchdogs - 29, // 11: envoy.config.bootstrap.v3.Bootstrap.tracing:type_name -> envoy.config.trace.v3.Tracing + 30, // 11: envoy.config.bootstrap.v3.Bootstrap.tracing:type_name -> envoy.config.trace.v3.Tracing 10, // 12: envoy.config.bootstrap.v3.Bootstrap.layered_runtime:type_name -> envoy.config.bootstrap.v3.LayeredRuntime 3, // 13: envoy.config.bootstrap.v3.Bootstrap.admin:type_name -> envoy.config.bootstrap.v3.Admin - 30, // 14: envoy.config.bootstrap.v3.Bootstrap.overload_manager:type_name -> envoy.config.overload.v3.OverloadManager - 31, // 15: envoy.config.bootstrap.v3.Bootstrap.stats_server_version_override:type_name -> google.protobuf.UInt64Value - 32, // 16: envoy.config.bootstrap.v3.Bootstrap.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig - 33, // 17: envoy.config.bootstrap.v3.Bootstrap.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 33, // 18: envoy.config.bootstrap.v3.Bootstrap.bootstrap_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig + 31, // 14: envoy.config.bootstrap.v3.Bootstrap.overload_manager:type_name -> envoy.config.overload.v3.OverloadManager + 32, // 15: envoy.config.bootstrap.v3.Bootstrap.stats_server_version_override:type_name -> google.protobuf.UInt64Value + 33, // 16: envoy.config.bootstrap.v3.Bootstrap.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig + 34, // 17: envoy.config.bootstrap.v3.Bootstrap.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 18: envoy.config.bootstrap.v3.Bootstrap.bootstrap_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig 7, // 19: envoy.config.bootstrap.v3.Bootstrap.fatal_actions:type_name -> envoy.config.bootstrap.v3.FatalAction - 34, // 20: envoy.config.bootstrap.v3.Bootstrap.config_sources:type_name -> envoy.config.core.v3.ConfigSource - 34, // 21: envoy.config.bootstrap.v3.Bootstrap.default_config_source:type_name -> envoy.config.core.v3.ConfigSource - 17, // 22: envoy.config.bootstrap.v3.Bootstrap.certificate_provider_instances:type_name -> envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry + 35, // 20: envoy.config.bootstrap.v3.Bootstrap.config_sources:type_name -> envoy.config.core.v3.ConfigSource + 35, // 21: envoy.config.bootstrap.v3.Bootstrap.default_config_source:type_name -> envoy.config.core.v3.ConfigSource + 18, // 22: envoy.config.bootstrap.v3.Bootstrap.certificate_provider_instances:type_name -> envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry 11, // 23: envoy.config.bootstrap.v3.Bootstrap.inline_headers:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader - 33, // 24: envoy.config.bootstrap.v3.Bootstrap.default_regex_engine:type_name -> envoy.config.core.v3.TypedExtensionConfig - 33, // 25: envoy.config.bootstrap.v3.Bootstrap.xds_delegate_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig - 33, // 26: envoy.config.bootstrap.v3.Bootstrap.xds_config_tracker_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig - 33, // 27: envoy.config.bootstrap.v3.Bootstrap.listener_manager:type_name -> envoy.config.core.v3.TypedExtensionConfig - 14, // 28: envoy.config.bootstrap.v3.Bootstrap.application_log_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig - 16, // 29: envoy.config.bootstrap.v3.Bootstrap.grpc_async_client_manager_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig - 35, // 30: envoy.config.bootstrap.v3.Admin.access_log:type_name -> envoy.config.accesslog.v3.AccessLog - 36, // 31: envoy.config.bootstrap.v3.Admin.address:type_name -> envoy.config.core.v3.Address - 37, // 32: envoy.config.bootstrap.v3.Admin.socket_options:type_name -> envoy.config.core.v3.SocketOption - 19, // 33: envoy.config.bootstrap.v3.ClusterManager.outlier_detection:type_name -> envoy.config.bootstrap.v3.ClusterManager.OutlierDetection - 38, // 34: envoy.config.bootstrap.v3.ClusterManager.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig - 25, // 35: envoy.config.bootstrap.v3.ClusterManager.load_stats_config:type_name -> envoy.config.core.v3.ApiConfigSource - 6, // 36: envoy.config.bootstrap.v3.Watchdogs.main_thread_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog - 6, // 37: envoy.config.bootstrap.v3.Watchdogs.worker_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog - 20, // 38: envoy.config.bootstrap.v3.Watchdog.actions:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction - 28, // 39: envoy.config.bootstrap.v3.Watchdog.miss_timeout:type_name -> google.protobuf.Duration - 28, // 40: envoy.config.bootstrap.v3.Watchdog.megamiss_timeout:type_name -> google.protobuf.Duration - 28, // 41: envoy.config.bootstrap.v3.Watchdog.kill_timeout:type_name -> google.protobuf.Duration - 28, // 42: envoy.config.bootstrap.v3.Watchdog.max_kill_timeout_jitter:type_name -> google.protobuf.Duration - 28, // 43: envoy.config.bootstrap.v3.Watchdog.multikill_timeout:type_name -> google.protobuf.Duration - 39, // 44: envoy.config.bootstrap.v3.Watchdog.multikill_threshold:type_name -> envoy.type.v3.Percent - 33, // 45: envoy.config.bootstrap.v3.FatalAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 40, // 46: envoy.config.bootstrap.v3.Runtime.base:type_name -> google.protobuf.Struct - 40, // 47: envoy.config.bootstrap.v3.RuntimeLayer.static_layer:type_name -> google.protobuf.Struct - 21, // 48: envoy.config.bootstrap.v3.RuntimeLayer.disk_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer - 22, // 49: envoy.config.bootstrap.v3.RuntimeLayer.admin_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer - 23, // 50: envoy.config.bootstrap.v3.RuntimeLayer.rtds_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer - 9, // 51: envoy.config.bootstrap.v3.LayeredRuntime.layers:type_name -> envoy.config.bootstrap.v3.RuntimeLayer - 1, // 52: envoy.config.bootstrap.v3.CustomInlineHeader.inline_header_type:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType - 41, // 53: envoy.config.bootstrap.v3.Bootstrap.StaticResources.listeners:type_name -> envoy.config.listener.v3.Listener - 42, // 54: envoy.config.bootstrap.v3.Bootstrap.StaticResources.clusters:type_name -> envoy.config.cluster.v3.Cluster - 43, // 55: envoy.config.bootstrap.v3.Bootstrap.StaticResources.secrets:type_name -> envoy.extensions.transport_sockets.tls.v3.Secret - 34, // 56: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.lds_config:type_name -> envoy.config.core.v3.ConfigSource - 34, // 57: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.cds_config:type_name -> envoy.config.core.v3.ConfigSource - 25, // 58: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.ads_config:type_name -> envoy.config.core.v3.ApiConfigSource - 18, // 59: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.log_format:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat - 28, // 60: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig.max_cached_entry_idle_duration:type_name -> google.protobuf.Duration - 33, // 61: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry.value:type_name -> envoy.config.core.v3.TypedExtensionConfig - 40, // 62: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat.json_format:type_name -> google.protobuf.Struct - 44, // 63: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection.event_service:type_name -> envoy.config.core.v3.EventServiceConfig - 33, // 64: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 0, // 65: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.event:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent - 34, // 66: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer.rtds_config:type_name -> envoy.config.core.v3.ConfigSource - 67, // [67:67] is the sub-list for method output_type - 67, // [67:67] is the sub-list for method input_type - 67, // [67:67] is the sub-list for extension type_name - 67, // [67:67] is the sub-list for extension extendee - 0, // [0:67] is the sub-list for field type_name + 34, // 24: envoy.config.bootstrap.v3.Bootstrap.default_regex_engine:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 25: envoy.config.bootstrap.v3.Bootstrap.xds_delegate_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 26: envoy.config.bootstrap.v3.Bootstrap.xds_config_tracker_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 27: envoy.config.bootstrap.v3.Bootstrap.listener_manager:type_name -> envoy.config.core.v3.TypedExtensionConfig + 15, // 28: envoy.config.bootstrap.v3.Bootstrap.application_log_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig + 17, // 29: envoy.config.bootstrap.v3.Bootstrap.grpc_async_client_manager_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig + 12, // 30: envoy.config.bootstrap.v3.Bootstrap.memory_allocator_manager:type_name -> envoy.config.bootstrap.v3.MemoryAllocatorManager + 36, // 31: envoy.config.bootstrap.v3.Admin.access_log:type_name -> envoy.config.accesslog.v3.AccessLog + 37, // 32: envoy.config.bootstrap.v3.Admin.address:type_name -> envoy.config.core.v3.Address + 38, // 33: envoy.config.bootstrap.v3.Admin.socket_options:type_name -> envoy.config.core.v3.SocketOption + 20, // 34: envoy.config.bootstrap.v3.ClusterManager.outlier_detection:type_name -> envoy.config.bootstrap.v3.ClusterManager.OutlierDetection + 39, // 35: envoy.config.bootstrap.v3.ClusterManager.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig + 26, // 36: envoy.config.bootstrap.v3.ClusterManager.load_stats_config:type_name -> envoy.config.core.v3.ApiConfigSource + 6, // 37: envoy.config.bootstrap.v3.Watchdogs.main_thread_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog + 6, // 38: envoy.config.bootstrap.v3.Watchdogs.worker_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog + 21, // 39: envoy.config.bootstrap.v3.Watchdog.actions:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction + 29, // 40: envoy.config.bootstrap.v3.Watchdog.miss_timeout:type_name -> google.protobuf.Duration + 29, // 41: envoy.config.bootstrap.v3.Watchdog.megamiss_timeout:type_name -> google.protobuf.Duration + 29, // 42: envoy.config.bootstrap.v3.Watchdog.kill_timeout:type_name -> google.protobuf.Duration + 29, // 43: envoy.config.bootstrap.v3.Watchdog.max_kill_timeout_jitter:type_name -> google.protobuf.Duration + 29, // 44: envoy.config.bootstrap.v3.Watchdog.multikill_timeout:type_name -> google.protobuf.Duration + 40, // 45: envoy.config.bootstrap.v3.Watchdog.multikill_threshold:type_name -> envoy.type.v3.Percent + 34, // 46: envoy.config.bootstrap.v3.FatalAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 41, // 47: envoy.config.bootstrap.v3.Runtime.base:type_name -> google.protobuf.Struct + 41, // 48: envoy.config.bootstrap.v3.RuntimeLayer.static_layer:type_name -> google.protobuf.Struct + 22, // 49: envoy.config.bootstrap.v3.RuntimeLayer.disk_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer + 23, // 50: envoy.config.bootstrap.v3.RuntimeLayer.admin_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer + 24, // 51: envoy.config.bootstrap.v3.RuntimeLayer.rtds_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer + 9, // 52: envoy.config.bootstrap.v3.LayeredRuntime.layers:type_name -> envoy.config.bootstrap.v3.RuntimeLayer + 1, // 53: envoy.config.bootstrap.v3.CustomInlineHeader.inline_header_type:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType + 29, // 54: envoy.config.bootstrap.v3.MemoryAllocatorManager.memory_release_interval:type_name -> google.protobuf.Duration + 42, // 55: envoy.config.bootstrap.v3.Bootstrap.StaticResources.listeners:type_name -> envoy.config.listener.v3.Listener + 43, // 56: envoy.config.bootstrap.v3.Bootstrap.StaticResources.clusters:type_name -> envoy.config.cluster.v3.Cluster + 44, // 57: envoy.config.bootstrap.v3.Bootstrap.StaticResources.secrets:type_name -> envoy.extensions.transport_sockets.tls.v3.Secret + 35, // 58: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.lds_config:type_name -> envoy.config.core.v3.ConfigSource + 35, // 59: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.cds_config:type_name -> envoy.config.core.v3.ConfigSource + 26, // 60: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.ads_config:type_name -> envoy.config.core.v3.ApiConfigSource + 19, // 61: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.log_format:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat + 29, // 62: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig.max_cached_entry_idle_duration:type_name -> google.protobuf.Duration + 34, // 63: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry.value:type_name -> envoy.config.core.v3.TypedExtensionConfig + 41, // 64: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat.json_format:type_name -> google.protobuf.Struct + 45, // 65: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection.event_service:type_name -> envoy.config.core.v3.EventServiceConfig + 34, // 66: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 0, // 67: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.event:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent + 35, // 68: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer.rtds_config:type_name -> envoy.config.core.v3.ConfigSource + 69, // [69:69] is the sub-list for method output_type + 69, // [69:69] is the sub-list for method input_type + 69, // [69:69] is the sub-list for extension type_name + 69, // [69:69] is the sub-list for extension extendee + 0, // [0:69] is the sub-list for field type_name } func init() { file_envoy_config_bootstrap_v3_bootstrap_proto_init() } @@ -3042,7 +3131,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { } } file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bootstrap_StaticResources); i { + switch v := v.(*MemoryAllocatorManager); i { case 0: return &v.state case 1: @@ -3054,7 +3143,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { } } file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bootstrap_DynamicResources); i { + switch v := v.(*Bootstrap_StaticResources); i { case 0: return &v.state case 1: @@ -3066,7 +3155,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { } } file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bootstrap_ApplicationLogConfig); i { + switch v := v.(*Bootstrap_DynamicResources); i { case 0: return &v.state case 1: @@ -3078,7 +3167,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { } } file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bootstrap_DeferredStatOptions); i { + switch v := v.(*Bootstrap_ApplicationLogConfig); i { case 0: return &v.state case 1: @@ -3090,6 +3179,18 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { } } file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_DeferredStatOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bootstrap_GrpcAsyncClientManagerConfig); i { case 0: return &v.state @@ -3101,7 +3202,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { return nil } } - file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bootstrap_ApplicationLogConfig_LogFormat); i { case 0: return &v.state @@ -3113,7 +3214,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { return nil } } - file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ClusterManager_OutlierDetection); i { case 0: return &v.state @@ -3125,7 +3226,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { return nil } } - file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Watchdog_WatchdogAction); i { case 0: return &v.state @@ -3137,7 +3238,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { return nil } } - file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RuntimeLayer_DiskLayer); i { case 0: return &v.state @@ -3149,7 +3250,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { return nil } } - file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RuntimeLayer_AdminLayer); i { case 0: return &v.state @@ -3161,7 +3262,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { return nil } } - file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RuntimeLayer_RtdsLayer); i { case 0: return &v.state @@ -3183,7 +3284,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { (*RuntimeLayer_AdminLayer_)(nil), (*RuntimeLayer_RtdsLayer_)(nil), } - file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[16].OneofWrappers = []interface{}{ + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17].OneofWrappers = []interface{}{ (*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat)(nil), (*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat)(nil), } @@ -3193,7 +3294,7 @@ func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc, NumEnums: 2, - NumMessages: 22, + NumMessages: 23, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go index 762370613cf..55724c0957a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/bootstrap/v3/bootstrap.proto @@ -983,6 +984,35 @@ func (m *Bootstrap) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetMemoryAllocatorManager()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "MemoryAllocatorManager", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "MemoryAllocatorManager", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMemoryAllocatorManager()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "MemoryAllocatorManager", + reason: "embedded message failed validation", + cause: err, + } + } + } + switch v := m.StatsFlush.(type) { case *Bootstrap_StatsFlushOnAdmin: if v == nil { @@ -2771,6 +2801,139 @@ var _ interface { var _CustomInlineHeader_InlineHeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") +// Validate checks the field values on MemoryAllocatorManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MemoryAllocatorManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MemoryAllocatorManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MemoryAllocatorManagerMultiError, or nil if none found. +func (m *MemoryAllocatorManager) ValidateAll() error { + return m.validate(true) +} + +func (m *MemoryAllocatorManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for BytesToRelease + + if all { + switch v := interface{}(m.GetMemoryReleaseInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MemoryAllocatorManagerValidationError{ + field: "MemoryReleaseInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MemoryAllocatorManagerValidationError{ + field: "MemoryReleaseInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMemoryReleaseInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MemoryAllocatorManagerValidationError{ + field: "MemoryReleaseInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return MemoryAllocatorManagerMultiError(errors) + } + + return nil +} + +// MemoryAllocatorManagerMultiError is an error wrapping multiple validation +// errors returned by MemoryAllocatorManager.ValidateAll() if the designated +// constraints aren't met. +type MemoryAllocatorManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MemoryAllocatorManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MemoryAllocatorManagerMultiError) AllErrors() []error { return m } + +// MemoryAllocatorManagerValidationError is the validation error returned by +// MemoryAllocatorManager.Validate if the designated constraints aren't met. +type MemoryAllocatorManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MemoryAllocatorManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MemoryAllocatorManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MemoryAllocatorManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MemoryAllocatorManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MemoryAllocatorManagerValidationError) ErrorName() string { + return "MemoryAllocatorManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e MemoryAllocatorManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMemoryAllocatorManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MemoryAllocatorManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MemoryAllocatorManagerValidationError{} + // Validate checks the field values on Bootstrap_StaticResources with the rules // defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go new file mode 100644 index 00000000000..51e10e0e08e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go @@ -0,0 +1,3128 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/bootstrap/v3/bootstrap.proto + +package bootstrapv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Bootstrap_StaticResources) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_StaticResources) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_StaticResources) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Secrets[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Secrets[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Clusters[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Clusters[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Listeners) > 0 { + for iNdEx := len(m.Listeners) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Listeners[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Listeners[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_DynamicResources) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_DynamicResources) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_DynamicResources) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CdsResourcesLocator) > 0 { + i -= len(m.CdsResourcesLocator) + copy(dAtA[i:], m.CdsResourcesLocator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CdsResourcesLocator))) + i-- + dAtA[i] = 0x32 + } + if len(m.LdsResourcesLocator) > 0 { + i -= len(m.LdsResourcesLocator) + copy(dAtA[i:], m.LdsResourcesLocator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LdsResourcesLocator))) + i-- + dAtA[i] = 0x2a + } + if m.AdsConfig != nil { + if vtmsg, ok := interface{}(m.AdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.CdsConfig != nil { + if vtmsg, ok := interface{}(m.CdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.LdsConfig != nil { + if vtmsg, ok := interface{}(m.LdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LogFormat.(*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LogFormat.(*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.JsonFormat != nil { + size, err := (*structpb.Struct)(m.JsonFormat).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.TextFormat) + copy(dAtA[i:], m.TextFormat) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TextFormat))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *Bootstrap_ApplicationLogConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_ApplicationLogConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LogFormat != nil { + size, err := m.LogFormat.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_DeferredStatOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_DeferredStatOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_DeferredStatOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EnableDeferredCreationStats { + i-- + if m.EnableDeferredCreationStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxCachedEntryIdleDuration != nil { + size, err := (*durationpb.Duration)(m.MaxCachedEntryIdleDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MemoryAllocatorManager != nil { + size, err := m.MemoryAllocatorManager.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if m.GrpcAsyncClientManagerConfig != nil { + size, err := m.GrpcAsyncClientManagerConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + if m.DeferredStatOptions != nil { + size, err := m.DeferredStatOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + if m.ApplicationLogConfig != nil { + size, err := m.ApplicationLogConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + if m.ListenerManager != nil { + if vtmsg, ok := interface{}(m.ListenerManager).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ListenerManager) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + if m.XdsConfigTrackerExtension != nil { + if vtmsg, ok := interface{}(m.XdsConfigTrackerExtension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.XdsConfigTrackerExtension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.XdsDelegateExtension != nil { + if vtmsg, ok := interface{}(m.XdsDelegateExtension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.XdsDelegateExtension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.DefaultRegexEngine != nil { + if vtmsg, ok := interface{}(m.DefaultRegexEngine).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultRegexEngine) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if len(m.PerfTracingFilePath) > 0 { + i -= len(m.PerfTracingFilePath) + copy(dAtA[i:], m.PerfTracingFilePath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PerfTracingFilePath))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.InlineHeaders) > 0 { + for iNdEx := len(m.InlineHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InlineHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if m.TypedDnsResolverConfig != nil { + if vtmsg, ok := interface{}(m.TypedDnsResolverConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedDnsResolverConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if m.DnsResolutionConfig != nil { + if vtmsg, ok := interface{}(m.DnsResolutionConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DnsResolutionConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if msg, ok := m.StatsFlush.(*Bootstrap_StatsFlushOnAdmin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.FatalActions) > 0 { + for iNdEx := len(m.FatalActions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.FatalActions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + } + if m.Watchdogs != nil { + size, err := m.Watchdogs.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if len(m.NodeContextParams) > 0 { + for iNdEx := len(m.NodeContextParams) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NodeContextParams[iNdEx]) + copy(dAtA[i:], m.NodeContextParams[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.NodeContextParams[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + } + if len(m.CertificateProviderInstances) > 0 { + for k := range m.CertificateProviderInstances { + v := m.CertificateProviderInstances[k] + baseI := i + if vtmsg, ok := interface{}(v).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(v) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if len(m.DefaultSocketInterface) > 0 { + i -= len(m.DefaultSocketInterface) + copy(dAtA[i:], m.DefaultSocketInterface) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultSocketInterface))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.DefaultConfigSource != nil { + if vtmsg, ok := interface{}(m.DefaultConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.ConfigSources) > 0 { + for iNdEx := len(m.ConfigSources) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ConfigSources[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigSources[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if len(m.BootstrapExtensions) > 0 { + for iNdEx := len(m.BootstrapExtensions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.BootstrapExtensions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BootstrapExtensions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if m.UseTcpForDnsLookups { + i-- + if m.UseTcpForDnsLookups { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.StatsServerVersionOverride != nil { + size, err := (*wrapperspb.UInt64Value)(m.StatsServerVersionOverride).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.HeaderPrefix) > 0 { + i -= len(m.HeaderPrefix) + copy(dAtA[i:], m.HeaderPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderPrefix))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.LayeredRuntime != nil { + size, err := m.LayeredRuntime.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.EnableDispatcherStats { + i-- + if m.EnableDispatcherStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.OverloadManager != nil { + if vtmsg, ok := interface{}(m.OverloadManager).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverloadManager) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x7a + } + if m.HdsConfig != nil { + if vtmsg, ok := interface{}(m.HdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.StatsConfig != nil { + if vtmsg, ok := interface{}(m.StatsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StatsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if m.Admin != nil { + size, err := m.Admin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.Tracing != nil { + if vtmsg, ok := interface{}(m.Tracing).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Tracing) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.Watchdog != nil { + size, err := m.Watchdog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.StatsFlushInterval != nil { + size, err := (*durationpb.Duration)(m.StatsFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if len(m.StatsSinks) > 0 { + for iNdEx := len(m.StatsSinks) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.StatsSinks[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StatsSinks[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.FlagsPath) > 0 { + i -= len(m.FlagsPath) + copy(dAtA[i:], m.FlagsPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.FlagsPath))) + i-- + dAtA[i] = 0x2a + } + if m.ClusterManager != nil { + size, err := m.ClusterManager.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.DynamicResources != nil { + size, err := m.DynamicResources.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.StaticResources != nil { + size, err := m.StaticResources.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_StatsFlushOnAdmin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_StatsFlushOnAdmin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.StatsFlushOnAdmin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + return len(dAtA) - i, nil +} +func (m *Admin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Admin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Admin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IgnoreGlobalConnLimit { + i-- + if m.IgnoreGlobalConnLimit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.AccessLog) > 0 { + for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AccessLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SocketOptions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SocketOptions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ProfilePath) > 0 { + i -= len(m.ProfilePath) + copy(dAtA[i:], m.ProfilePath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ProfilePath))) + i-- + dAtA[i] = 0x12 + } + if len(m.AccessLogPath) > 0 { + i -= len(m.AccessLogPath) + copy(dAtA[i:], m.AccessLogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AccessLogPath))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterManager_OutlierDetection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterManager_OutlierDetection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterManager_OutlierDetection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EventService != nil { + if vtmsg, ok := interface{}(m.EventService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EventService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.EventLogPath) > 0 { + i -= len(m.EventLogPath) + copy(dAtA[i:], m.EventLogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EventLogPath))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EnableDeferredClusterCreation { + i-- + if m.EnableDeferredClusterCreation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.LoadStatsConfig != nil { + if vtmsg, ok := interface{}(m.LoadStatsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LoadStatsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.UpstreamBindConfig != nil { + if vtmsg, ok := interface{}(m.UpstreamBindConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamBindConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.OutlierDetection != nil { + size, err := m.OutlierDetection.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.LocalClusterName) > 0 { + i -= len(m.LocalClusterName) + copy(dAtA[i:], m.LocalClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LocalClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Watchdogs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Watchdogs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Watchdogs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WorkerWatchdog != nil { + size, err := m.WorkerWatchdog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MainThreadWatchdog != nil { + size, err := m.MainThreadWatchdog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Watchdog_WatchdogAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Watchdog_WatchdogAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Watchdog_WatchdogAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Event != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Event)) + i-- + dAtA[i] = 0x10 + } + if m.Config != nil { + if vtmsg, ok := interface{}(m.Config).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Config) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Watchdog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Watchdog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Watchdog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Actions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if m.MaxKillTimeoutJitter != nil { + size, err := (*durationpb.Duration)(m.MaxKillTimeoutJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.MultikillThreshold != nil { + if vtmsg, ok := interface{}(m.MultikillThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MultikillThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.MultikillTimeout != nil { + size, err := (*durationpb.Duration)(m.MultikillTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.KillTimeout != nil { + size, err := (*durationpb.Duration)(m.KillTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MegamissTimeout != nil { + size, err := (*durationpb.Duration)(m.MegamissTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MissTimeout != nil { + size, err := (*durationpb.Duration)(m.MissTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FatalAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FatalAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FatalAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Config != nil { + if vtmsg, ok := interface{}(m.Config).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Config) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Runtime) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Runtime) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Runtime) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Base != nil { + size, err := (*structpb.Struct)(m.Base).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.OverrideSubdirectory) > 0 { + i -= len(m.OverrideSubdirectory) + copy(dAtA[i:], m.OverrideSubdirectory) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OverrideSubdirectory))) + i-- + dAtA[i] = 0x1a + } + if len(m.Subdirectory) > 0 { + i -= len(m.Subdirectory) + copy(dAtA[i:], m.Subdirectory) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subdirectory))) + i-- + dAtA[i] = 0x12 + } + if len(m.SymlinkRoot) > 0 { + i -= len(m.SymlinkRoot) + copy(dAtA[i:], m.SymlinkRoot) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SymlinkRoot))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_DiskLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer_DiskLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_DiskLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Subdirectory) > 0 { + i -= len(m.Subdirectory) + copy(dAtA[i:], m.Subdirectory) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subdirectory))) + i-- + dAtA[i] = 0x1a + } + if m.AppendServiceCluster { + i-- + if m.AppendServiceCluster { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.SymlinkRoot) > 0 { + i -= len(m.SymlinkRoot) + copy(dAtA[i:], m.SymlinkRoot) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SymlinkRoot))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_AdminLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer_AdminLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_AdminLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_RtdsLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer_RtdsLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_RtdsLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RtdsConfig != nil { + if vtmsg, ok := interface{}(m.RtdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RtdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_RtdsLayer_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_AdminLayer_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_DiskLayer_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_StaticLayer); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_StaticLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_StaticLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StaticLayer != nil { + size, err := (*structpb.Struct)(m.StaticLayer).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RuntimeLayer_DiskLayer_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_DiskLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DiskLayer != nil { + size, err := m.DiskLayer.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RuntimeLayer_AdminLayer_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_AdminLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AdminLayer != nil { + size, err := m.AdminLayer.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *RuntimeLayer_RtdsLayer_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_RtdsLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RtdsLayer != nil { + size, err := m.RtdsLayer.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *LayeredRuntime) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LayeredRuntime) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LayeredRuntime) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Layers) > 0 { + for iNdEx := len(m.Layers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Layers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CustomInlineHeader) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomInlineHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomInlineHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.InlineHeaderType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.InlineHeaderType)) + i-- + dAtA[i] = 0x10 + } + if len(m.InlineHeaderName) > 0 { + i -= len(m.InlineHeaderName) + copy(dAtA[i:], m.InlineHeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InlineHeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MemoryAllocatorManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryAllocatorManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MemoryAllocatorManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MemoryReleaseInterval != nil { + size, err := (*durationpb.Duration)(m.MemoryReleaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BytesToRelease != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BytesToRelease)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_StaticResources) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Listeners) > 0 { + for _, e := range m.Listeners { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_DynamicResources) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LdsConfig != nil { + if size, ok := interface{}(m.LdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CdsConfig != nil { + if size, ok := interface{}(m.CdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AdsConfig != nil { + if size, ok := interface{}(m.AdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LdsResourcesLocator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CdsResourcesLocator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.LogFormat.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.JsonFormat != nil { + l = (*structpb.Struct)(m.JsonFormat).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TextFormat) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *Bootstrap_ApplicationLogConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LogFormat != nil { + l = m.LogFormat.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_DeferredStatOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnableDeferredCreationStats { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxCachedEntryIdleDuration != nil { + l = (*durationpb.Duration)(m.MaxCachedEntryIdleDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StaticResources != nil { + l = m.StaticResources.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DynamicResources != nil { + l = m.DynamicResources.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClusterManager != nil { + l = m.ClusterManager.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.FlagsPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.StatsSinks) > 0 { + for _, e := range m.StatsSinks { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.StatsFlushInterval != nil { + l = (*durationpb.Duration)(m.StatsFlushInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Watchdog != nil { + l = m.Watchdog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Tracing != nil { + if size, ok := interface{}(m.Tracing).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Tracing) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Admin != nil { + l = m.Admin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatsConfig != nil { + if size, ok := interface{}(m.StatsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StatsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HdsConfig != nil { + if size, ok := interface{}(m.HdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverloadManager != nil { + if size, ok := interface{}(m.OverloadManager).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverloadManager) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableDispatcherStats { + n += 3 + } + if m.LayeredRuntime != nil { + l = m.LayeredRuntime.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.HeaderPrefix) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatsServerVersionOverride != nil { + l = (*wrapperspb.UInt64Value)(m.StatsServerVersionOverride).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseTcpForDnsLookups { + n += 3 + } + if len(m.BootstrapExtensions) > 0 { + for _, e := range m.BootstrapExtensions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ConfigSources) > 0 { + for _, e := range m.ConfigSources { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DefaultConfigSource != nil { + if size, ok := interface{}(m.DefaultConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultConfigSource) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultSocketInterface) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CertificateProviderInstances) > 0 { + for k, v := range m.CertificateProviderInstances { + _ = k + _ = v + l = 0 + if v != nil { + if size, ok := interface{}(v).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(v) + } + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.NodeContextParams) > 0 { + for _, s := range m.NodeContextParams { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Watchdogs != nil { + l = m.Watchdogs.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.FatalActions) > 0 { + for _, e := range m.FatalActions { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if vtmsg, ok := m.StatsFlush.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.DnsResolutionConfig != nil { + if size, ok := interface{}(m.DnsResolutionConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DnsResolutionConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedDnsResolverConfig != nil { + if size, ok := interface{}(m.TypedDnsResolverConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedDnsResolverConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.InlineHeaders) > 0 { + for _, e := range m.InlineHeaders { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.PerfTracingFilePath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DefaultRegexEngine != nil { + if size, ok := interface{}(m.DefaultRegexEngine).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultRegexEngine) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.XdsDelegateExtension != nil { + if size, ok := interface{}(m.XdsDelegateExtension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.XdsDelegateExtension) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.XdsConfigTrackerExtension != nil { + if size, ok := interface{}(m.XdsConfigTrackerExtension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.XdsConfigTrackerExtension) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ListenerManager != nil { + if size, ok := interface{}(m.ListenerManager).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ListenerManager) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApplicationLogConfig != nil { + l = m.ApplicationLogConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DeferredStatOptions != nil { + l = m.DeferredStatOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcAsyncClientManagerConfig != nil { + l = m.GrpcAsyncClientManagerConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MemoryAllocatorManager != nil { + l = m.MemoryAllocatorManager.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_StatsFlushOnAdmin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 3 + return n +} +func (m *Admin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AccessLogPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ProfilePath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.AccessLog) > 0 { + for _, e := range m.AccessLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IgnoreGlobalConnLimit { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterManager_OutlierDetection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EventLogPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EventService != nil { + if size, ok := interface{}(m.EventService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EventService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LocalClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OutlierDetection != nil { + l = m.OutlierDetection.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamBindConfig != nil { + if size, ok := interface{}(m.UpstreamBindConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamBindConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LoadStatsConfig != nil { + if size, ok := interface{}(m.LoadStatsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LoadStatsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableDeferredClusterCreation { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Watchdogs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MainThreadWatchdog != nil { + l = m.MainThreadWatchdog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WorkerWatchdog != nil { + l = m.WorkerWatchdog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Watchdog_WatchdogAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + if size, ok := interface{}(m.Config).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Config) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Event != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Event)) + } + n += len(m.unknownFields) + return n +} + +func (m *Watchdog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MissTimeout != nil { + l = (*durationpb.Duration)(m.MissTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MegamissTimeout != nil { + l = (*durationpb.Duration)(m.MegamissTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KillTimeout != nil { + l = (*durationpb.Duration)(m.KillTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MultikillTimeout != nil { + l = (*durationpb.Duration)(m.MultikillTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MultikillThreshold != nil { + if size, ok := interface{}(m.MultikillThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MultikillThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxKillTimeoutJitter != nil { + l = (*durationpb.Duration)(m.MaxKillTimeoutJitter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *FatalAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + if size, ok := interface{}(m.Config).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Config) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Runtime) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SymlinkRoot) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Subdirectory) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.OverrideSubdirectory) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Base != nil { + l = (*structpb.Struct)(m.Base).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_DiskLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SymlinkRoot) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendServiceCluster { + n += 2 + } + l = len(m.Subdirectory) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_AdminLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_RtdsLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RtdsConfig != nil { + if size, ok := interface{}(m.RtdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RtdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LayerSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_StaticLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StaticLayer != nil { + l = (*structpb.Struct)(m.StaticLayer).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeLayer_DiskLayer_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DiskLayer != nil { + l = m.DiskLayer.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeLayer_AdminLayer_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AdminLayer != nil { + l = m.AdminLayer.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeLayer_RtdsLayer_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RtdsLayer != nil { + l = m.RtdsLayer.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LayeredRuntime) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Layers) > 0 { + for _, e := range m.Layers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CustomInlineHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InlineHeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InlineHeaderType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.InlineHeaderType)) + } + n += len(m.unknownFields) + return n +} + +func (m *MemoryAllocatorManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesToRelease != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BytesToRelease)) + } + if m.MemoryReleaseInterval != nil { + l = (*durationpb.Duration)(m.MemoryReleaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go index d3d0c93d44f..cffadb9d882 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/cluster/v3/circuit_breaker.proto package clusterv3 @@ -11,9 +11,9 @@ import ( v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -112,18 +112,18 @@ type CircuitBreakers_Thresholds struct { Priority v3.RoutingPriority `protobuf:"varint,1,opt,name=priority,proto3,enum=envoy.config.core.v3.RoutingPriority" json:"priority,omitempty"` // The maximum number of connections that Envoy will make to the upstream // cluster. If not specified, the default is 1024. - MaxConnections *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"` + MaxConnections *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"` // The maximum number of pending requests that Envoy will allow to the // upstream cluster. If not specified, the default is 1024. // This limit is applied as a connection limit for non-HTTP traffic. - MaxPendingRequests *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=max_pending_requests,json=maxPendingRequests,proto3" json:"max_pending_requests,omitempty"` + MaxPendingRequests *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=max_pending_requests,json=maxPendingRequests,proto3" json:"max_pending_requests,omitempty"` // The maximum number of parallel requests that Envoy will make to the // upstream cluster. If not specified, the default is 1024. // This limit does not apply to non-HTTP traffic. - MaxRequests *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=max_requests,json=maxRequests,proto3" json:"max_requests,omitempty"` + MaxRequests *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_requests,json=maxRequests,proto3" json:"max_requests,omitempty"` // The maximum number of parallel retries that Envoy will allow to the // upstream cluster. If not specified, the default is 3. - MaxRetries *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` + MaxRetries *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` // Specifies a limit on concurrent retries in relation to the number of active requests. This // parameter is optional. // @@ -146,7 +146,7 @@ type CircuitBreakers_Thresholds struct { // large number of connection pools. See // :ref:`Circuit Breaking ` for // more details. - MaxConnectionPools *wrappers.UInt32Value `protobuf:"bytes,7,opt,name=max_connection_pools,json=maxConnectionPools,proto3" json:"max_connection_pools,omitempty"` + MaxConnectionPools *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_connection_pools,json=maxConnectionPools,proto3" json:"max_connection_pools,omitempty"` } func (x *CircuitBreakers_Thresholds) Reset() { @@ -188,28 +188,28 @@ func (x *CircuitBreakers_Thresholds) GetPriority() v3.RoutingPriority { return v3.RoutingPriority(0) } -func (x *CircuitBreakers_Thresholds) GetMaxConnections() *wrappers.UInt32Value { +func (x *CircuitBreakers_Thresholds) GetMaxConnections() *wrapperspb.UInt32Value { if x != nil { return x.MaxConnections } return nil } -func (x *CircuitBreakers_Thresholds) GetMaxPendingRequests() *wrappers.UInt32Value { +func (x *CircuitBreakers_Thresholds) GetMaxPendingRequests() *wrapperspb.UInt32Value { if x != nil { return x.MaxPendingRequests } return nil } -func (x *CircuitBreakers_Thresholds) GetMaxRequests() *wrappers.UInt32Value { +func (x *CircuitBreakers_Thresholds) GetMaxRequests() *wrapperspb.UInt32Value { if x != nil { return x.MaxRequests } return nil } -func (x *CircuitBreakers_Thresholds) GetMaxRetries() *wrappers.UInt32Value { +func (x *CircuitBreakers_Thresholds) GetMaxRetries() *wrapperspb.UInt32Value { if x != nil { return x.MaxRetries } @@ -230,7 +230,7 @@ func (x *CircuitBreakers_Thresholds) GetTrackRemaining() bool { return false } -func (x *CircuitBreakers_Thresholds) GetMaxConnectionPools() *wrappers.UInt32Value { +func (x *CircuitBreakers_Thresholds) GetMaxConnectionPools() *wrapperspb.UInt32Value { if x != nil { return x.MaxConnectionPools } @@ -252,7 +252,7 @@ type CircuitBreakers_Thresholds_RetryBudget struct { // number of active retries may never go below this number. // // This parameter is optional. Defaults to 3. - MinRetryConcurrency *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=min_retry_concurrency,json=minRetryConcurrency,proto3" json:"min_retry_concurrency,omitempty"` + MinRetryConcurrency *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=min_retry_concurrency,json=minRetryConcurrency,proto3" json:"min_retry_concurrency,omitempty"` } func (x *CircuitBreakers_Thresholds_RetryBudget) Reset() { @@ -294,7 +294,7 @@ func (x *CircuitBreakers_Thresholds_RetryBudget) GetBudgetPercent() *v31.Percent return nil } -func (x *CircuitBreakers_Thresholds_RetryBudget) GetMinRetryConcurrency() *wrappers.UInt32Value { +func (x *CircuitBreakers_Thresholds_RetryBudget) GetMinRetryConcurrency() *wrapperspb.UInt32Value { if x != nil { return x.MinRetryConcurrency } @@ -421,7 +421,7 @@ var file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes = []interface{}{ (*CircuitBreakers_Thresholds)(nil), // 1: envoy.config.cluster.v3.CircuitBreakers.Thresholds (*CircuitBreakers_Thresholds_RetryBudget)(nil), // 2: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget (v3.RoutingPriority)(0), // 3: envoy.config.core.v3.RoutingPriority - (*wrappers.UInt32Value)(nil), // 4: google.protobuf.UInt32Value + (*wrapperspb.UInt32Value)(nil), // 4: google.protobuf.UInt32Value (*v31.Percent)(nil), // 5: envoy.type.v3.Percent } var file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs = []int32{ diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go index feb99f65077..8bf3373beb4 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/cluster/v3/circuit_breaker.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go new file mode 100644 index 00000000000..14ca0a1f11a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go @@ -0,0 +1,337 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/circuit_breaker.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinRetryConcurrency != nil { + size, err := (*wrapperspb.UInt32Value)(m.MinRetryConcurrency).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BudgetPercent != nil { + if vtmsg, ok := interface{}(m.BudgetPercent).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BudgetPercent) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CircuitBreakers_Thresholds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CircuitBreakers_Thresholds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CircuitBreakers_Thresholds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RetryBudget != nil { + size, err := m.RetryBudget.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.MaxConnectionPools != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConnectionPools).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.TrackRemaining { + i-- + if m.TrackRemaining { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.MaxRetries != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRetries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.MaxRequests != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequests).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.MaxPendingRequests != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxPendingRequests).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxConnections != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConnections).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CircuitBreakers) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CircuitBreakers) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CircuitBreakers) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PerHostThresholds) > 0 { + for iNdEx := len(m.PerHostThresholds) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.PerHostThresholds[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Thresholds) > 0 { + for iNdEx := len(m.Thresholds) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Thresholds[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CircuitBreakers_Thresholds_RetryBudget) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BudgetPercent != nil { + if size, ok := interface{}(m.BudgetPercent).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.BudgetPercent) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinRetryConcurrency != nil { + l = (*wrapperspb.UInt32Value)(m.MinRetryConcurrency).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CircuitBreakers_Thresholds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if m.MaxConnections != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConnections).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxPendingRequests != nil { + l = (*wrapperspb.UInt32Value)(m.MaxPendingRequests).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxRequests != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequests).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxRetries != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRetries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackRemaining { + n += 2 + } + if m.MaxConnectionPools != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConnectionPools).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryBudget != nil { + l = m.RetryBudget.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CircuitBreakers) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Thresholds) > 0 { + for _, e := range m.Thresholds { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.PerHostThresholds) > 0 { + for _, e := range m.PerHostThresholds { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go index 1356af8e474..53c9afecd6e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/cluster/v3/cluster.proto package clusterv3 @@ -15,12 +15,12 @@ import ( v34 "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3" v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - _struct "github.com/golang/protobuf/ptypes/struct" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -571,6 +571,57 @@ func (Cluster_RingHashLbConfig_HashFunction) EnumDescriptor() ([]byte, []int) { return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 7, 0} } +type UpstreamConnectionOptions_FirstAddressFamilyVersion int32 + +const ( + // respect the native ranking of destination ip addresses returned from dns + // resolution + UpstreamConnectionOptions_DEFAULT UpstreamConnectionOptions_FirstAddressFamilyVersion = 0 + UpstreamConnectionOptions_V4 UpstreamConnectionOptions_FirstAddressFamilyVersion = 1 + UpstreamConnectionOptions_V6 UpstreamConnectionOptions_FirstAddressFamilyVersion = 2 +) + +// Enum value maps for UpstreamConnectionOptions_FirstAddressFamilyVersion. +var ( + UpstreamConnectionOptions_FirstAddressFamilyVersion_name = map[int32]string{ + 0: "DEFAULT", + 1: "V4", + 2: "V6", + } + UpstreamConnectionOptions_FirstAddressFamilyVersion_value = map[string]int32{ + "DEFAULT": 0, + "V4": 1, + "V6": 2, + } +) + +func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) Enum() *UpstreamConnectionOptions_FirstAddressFamilyVersion { + p := new(UpstreamConnectionOptions_FirstAddressFamilyVersion) + *p = x + return p +} + +func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UpstreamConnectionOptions_FirstAddressFamilyVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[8].Descriptor() +} + +func (UpstreamConnectionOptions_FirstAddressFamilyVersion) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[8] +} + +func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UpstreamConnectionOptions_FirstAddressFamilyVersion.Descriptor instead. +func (UpstreamConnectionOptions_FirstAddressFamilyVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{3, 0} +} + // Cluster list collections. Entries are “Cluster“ resources or references. // [#not-implemented-hide:] type ClusterCollection struct { @@ -621,18 +672,20 @@ func (x *ClusterCollection) GetEntries() *v3.CollectionEntry { } // Configuration for a single upstream cluster. -// [#next-free-field: 57] +// [#next-free-field: 58] type Cluster struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Configuration to use different transport sockets for different endpoints. - // The entry of “envoy.transport_socket_match“ in the - // :ref:`LbEndpoint.Metadata ` - // is used to match against the transport sockets as they appear in the list. The first - // :ref:`match ` is used. - // For example, with the following match + // Configuration to use different transport sockets for different endpoints. The entry of + // “envoy.transport_socket_match“ in the :ref:`LbEndpoint.Metadata + // ` is used to match against the + // transport sockets as they appear in the list. If a match is not found, the search continues in + // :ref:`LocalityLbEndpoints.Metadata + // `. The first :ref:`match + // ` is used. For example, with + // the following match // // .. code-block:: yaml // @@ -656,8 +709,9 @@ type Cluster struct { // socket match in case above. // // If an endpoint metadata's value under “envoy.transport_socket_match“ does not match any - // “TransportSocketMatch“, socket configuration fallbacks to use the “tls_context“ or - // “transport_socket“ specified in this cluster. + // “TransportSocketMatch“, the locality metadata is then checked for a match. Barring any + // matches in the endpoint or locality metadata, the socket configuration fallbacks to use the + // “tls_context“ or “transport_socket“ specified in this cluster. // // This field allows gradual and flexible transport socket configuration changes. // @@ -700,10 +754,10 @@ type Cluster struct { EdsClusterConfig *Cluster_EdsClusterConfig `protobuf:"bytes,3,opt,name=eds_cluster_config,json=edsClusterConfig,proto3" json:"eds_cluster_config,omitempty"` // The timeout for new network connections to hosts in the cluster. // If not set, a default value of 5s will be used. - ConnectTimeout *duration.Duration `protobuf:"bytes,4,opt,name=connect_timeout,json=connectTimeout,proto3" json:"connect_timeout,omitempty"` + ConnectTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=connect_timeout,json=connectTimeout,proto3" json:"connect_timeout,omitempty"` // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). - PerConnectionBufferLimitBytes *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=per_connection_buffer_limit_bytes,json=perConnectionBufferLimitBytes,proto3" json:"per_connection_buffer_limit_bytes,omitempty"` + PerConnectionBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=per_connection_buffer_limit_bytes,json=perConnectionBufferLimitBytes,proto3" json:"per_connection_buffer_limit_bytes,omitempty"` // The :ref:`load balancer type ` to use // when picking a host in the cluster. LbPolicy Cluster_LbPolicy `protobuf:"varint,6,opt,name=lb_policy,json=lbPolicy,proto3,enum=envoy.config.cluster.v3.Cluster_LbPolicy" json:"lb_policy,omitempty"` @@ -733,7 +787,7 @@ type Cluster struct { // This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. // // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. - MaxRequestsPerConnection *wrappers.UInt32Value `protobuf:"bytes,9,opt,name=max_requests_per_connection,json=maxRequestsPerConnection,proto3" json:"max_requests_per_connection,omitempty"` + MaxRequestsPerConnection *wrapperspb.UInt32Value `protobuf:"bytes,9,opt,name=max_requests_per_connection,json=maxRequestsPerConnection,proto3" json:"max_requests_per_connection,omitempty"` // Optional :ref:`circuit breaking ` for the cluster. CircuitBreakers *CircuitBreakers `protobuf:"bytes,10,opt,name=circuit_breakers,json=circuitBreakers,proto3" json:"circuit_breakers,omitempty"` // HTTP protocol options that are applied only to upstream HTTP connections. @@ -794,7 +848,7 @@ type Cluster struct { // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. // [#next-major-version: make this a list of typed extensions.] - TypedExtensionProtocolOptions map[string]*any1.Any `protobuf:"bytes,36,rep,name=typed_extension_protocol_options,json=typedExtensionProtocolOptions,proto3" json:"typed_extension_protocol_options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypedExtensionProtocolOptions map[string]*anypb.Any `protobuf:"bytes,36,rep,name=typed_extension_protocol_options,json=typedExtensionProtocolOptions,proto3" json:"typed_extension_protocol_options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // If the DNS refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, @@ -804,7 +858,7 @@ type Cluster struct { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. - DnsRefreshRate *duration.Duration `protobuf:"bytes,16,opt,name=dns_refresh_rate,json=dnsRefreshRate,proto3" json:"dns_refresh_rate,omitempty"` + DnsRefreshRate *durationpb.Duration `protobuf:"bytes,16,opt,name=dns_refresh_rate,json=dnsRefreshRate,proto3" json:"dns_refresh_rate,omitempty"` // If the DNS failure refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, @@ -867,7 +921,7 @@ type Cluster struct { // or :ref:`Redis Cluster`. // If true, cluster readiness blocks on warm-up. If false, the cluster will complete // initialization whether or not warm-up has completed. Defaults to true. - WaitForWarmOnInit *wrappers.BoolValue `protobuf:"bytes,54,opt,name=wait_for_warm_on_init,json=waitForWarmOnInit,proto3" json:"wait_for_warm_on_init,omitempty"` + WaitForWarmOnInit *wrapperspb.BoolValue `protobuf:"bytes,54,opt,name=wait_for_warm_on_init,json=waitForWarmOnInit,proto3" json:"wait_for_warm_on_init,omitempty"` // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. @@ -885,7 +939,7 @@ type Cluster struct { // value defaults to 5000ms. For cluster types other than // :ref:`ORIGINAL_DST` // this setting is ignored. - CleanupInterval *duration.Duration `protobuf:"bytes,20,opt,name=cleanup_interval,json=cleanupInterval,proto3" json:"cleanup_interval,omitempty"` + CleanupInterval *durationpb.Duration `protobuf:"bytes,20,opt,name=cleanup_interval,json=cleanupInterval,proto3" json:"cleanup_interval,omitempty"` // Optional configuration used to bind newly established upstream connections. // This overrides any bind_config specified in the bootstrap proto. // If the address and port are empty, no bind will be performed. @@ -974,6 +1028,21 @@ type Cluster struct { // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation // from the LRS stream here.] LrsServer *v32.ConfigSource `protobuf:"bytes,42,opt,name=lrs_server,json=lrsServer,proto3" json:"lrs_server,omitempty"` + // [#not-implemented-hide:] + // A list of metric names from ORCA load reports to propagate to LRS. + // + // For map fields in the ORCA proto, the string will be of the form “.“. + // For example, the string “named_metrics.foo“ will mean to look for the key “foo“ in the ORCA + // “named_metrics“ field. + // + // The special map key “*“ means to report all entries in the map (e.g., “named_metrics.*“ means to + // report all entries in the ORCA named_metrics field). Note that this should be used only with trusted + // backends. + // + // The metric names in LRS will follow the same semantics as this field. In other words, if this field + // contains “named_metrics.foo“, then the LRS load report will include the data with that same string + // as the key. + LrsReportEndpointMetrics []string `protobuf:"bytes,57,rep,name=lrs_report_endpoint_metrics,json=lrsReportEndpointMetrics,proto3" json:"lrs_report_endpoint_metrics,omitempty"` // If track_timeout_budgets is true, the :ref:`timeout budget histograms // ` will be published for each // request. These show what percentage of a request's per try and global timeout was used. A value @@ -1096,14 +1165,14 @@ func (x *Cluster) GetEdsClusterConfig() *Cluster_EdsClusterConfig { return nil } -func (x *Cluster) GetConnectTimeout() *duration.Duration { +func (x *Cluster) GetConnectTimeout() *durationpb.Duration { if x != nil { return x.ConnectTimeout } return nil } -func (x *Cluster) GetPerConnectionBufferLimitBytes() *wrappers.UInt32Value { +func (x *Cluster) GetPerConnectionBufferLimitBytes() *wrapperspb.UInt32Value { if x != nil { return x.PerConnectionBufferLimitBytes } @@ -1132,7 +1201,7 @@ func (x *Cluster) GetHealthChecks() []*v32.HealthCheck { } // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. -func (x *Cluster) GetMaxRequestsPerConnection() *wrappers.UInt32Value { +func (x *Cluster) GetMaxRequestsPerConnection() *wrapperspb.UInt32Value { if x != nil { return x.MaxRequestsPerConnection } @@ -1178,14 +1247,14 @@ func (x *Cluster) GetHttp2ProtocolOptions() *v32.Http2ProtocolOptions { return nil } -func (x *Cluster) GetTypedExtensionProtocolOptions() map[string]*any1.Any { +func (x *Cluster) GetTypedExtensionProtocolOptions() map[string]*anypb.Any { if x != nil { return x.TypedExtensionProtocolOptions } return nil } -func (x *Cluster) GetDnsRefreshRate() *duration.Duration { +func (x *Cluster) GetDnsRefreshRate() *durationpb.Duration { if x != nil { return x.DnsRefreshRate } @@ -1244,7 +1313,7 @@ func (x *Cluster) GetTypedDnsResolverConfig() *v32.TypedExtensionConfig { return nil } -func (x *Cluster) GetWaitForWarmOnInit() *wrappers.BoolValue { +func (x *Cluster) GetWaitForWarmOnInit() *wrapperspb.BoolValue { if x != nil { return x.WaitForWarmOnInit } @@ -1258,7 +1327,7 @@ func (x *Cluster) GetOutlierDetection() *OutlierDetection { return nil } -func (x *Cluster) GetCleanupInterval() *duration.Duration { +func (x *Cluster) GetCleanupInterval() *durationpb.Duration { if x != nil { return x.CleanupInterval } @@ -1392,6 +1461,13 @@ func (x *Cluster) GetLrsServer() *v32.ConfigSource { return nil } +func (x *Cluster) GetLrsReportEndpointMetrics() []string { + if x != nil { + return x.LrsReportEndpointMetrics + } + return nil +} + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. func (x *Cluster) GetTrackTimeoutBudgets() bool { if x != nil { @@ -1566,6 +1642,10 @@ type UpstreamConnectionOptions struct { // This can be used by extensions during processing of requests. The association mechanism is // implementation specific. Defaults to false due to performance concerns. SetLocalInterfaceNameOnUpstreamConnections bool `protobuf:"varint,2,opt,name=set_local_interface_name_on_upstream_connections,json=setLocalInterfaceNameOnUpstreamConnections,proto3" json:"set_local_interface_name_on_upstream_connections,omitempty"` + // Configurations for happy eyeballs algorithm. + // Add configs for first_address_family_version and first_address_family_count + // when sorting destination ip addresses. + HappyEyeballsConfig *UpstreamConnectionOptions_HappyEyeballsConfig `protobuf:"bytes,3,opt,name=happy_eyeballs_config,json=happyEyeballsConfig,proto3" json:"happy_eyeballs_config,omitempty"` } func (x *UpstreamConnectionOptions) Reset() { @@ -1614,6 +1694,13 @@ func (x *UpstreamConnectionOptions) GetSetLocalInterfaceNameOnUpstreamConnection return false } +func (x *UpstreamConnectionOptions) GetHappyEyeballsConfig() *UpstreamConnectionOptions_HappyEyeballsConfig { + if x != nil { + return x.HappyEyeballsConfig + } + return nil +} + type TrackClusterStats struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1707,12 +1794,12 @@ type Cluster_TransportSocketMatch struct { // The name of the match, used in stats generation. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Optional endpoint metadata match criteria. + // Optional metadata match criteria. // The connection to the endpoint with metadata matching what is set in this field // will use the transport socket configuration specified here. // The endpoint's metadata entry in “envoy.transport_socket_match“ is used to match // against the values specified in this field. - Match *_struct.Struct `protobuf:"bytes,2,opt,name=match,proto3" json:"match,omitempty"` + Match *structpb.Struct `protobuf:"bytes,2,opt,name=match,proto3" json:"match,omitempty"` // The configuration of the transport socket. // [#extension-category: envoy.transport_sockets.upstream] TransportSocket *v32.TransportSocket `protobuf:"bytes,3,opt,name=transport_socket,json=transportSocket,proto3" json:"transport_socket,omitempty"` @@ -1757,7 +1844,7 @@ func (x *Cluster_TransportSocketMatch) GetName() string { return "" } -func (x *Cluster_TransportSocketMatch) GetMatch() *_struct.Struct { +func (x *Cluster_TransportSocketMatch) GetMatch() *structpb.Struct { if x != nil { return x.Match } @@ -1782,7 +1869,7 @@ type Cluster_CustomClusterType struct { // Cluster specific configuration which depends on the cluster being instantiated. // See the supported cluster for further documentation. // [#extension-category: envoy.clusters] - TypedConfig *any1.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` } func (x *Cluster_CustomClusterType) Reset() { @@ -1824,7 +1911,7 @@ func (x *Cluster_CustomClusterType) GetName() string { return "" } -func (x *Cluster_CustomClusterType) GetTypedConfig() *any1.Any { +func (x *Cluster_CustomClusterType) GetTypedConfig() *anypb.Any { if x != nil { return x.TypedConfig } @@ -1911,7 +1998,7 @@ type Cluster_LbSubsetConfig struct { // namespace. It is valid for no hosts to match, in which case the behavior // is the same as a fallback_policy of // :ref:`NO_FALLBACK`. - DefaultSubset *_struct.Struct `protobuf:"bytes,2,opt,name=default_subset,json=defaultSubset,proto3" json:"default_subset,omitempty"` + DefaultSubset *structpb.Struct `protobuf:"bytes,2,opt,name=default_subset,json=defaultSubset,proto3" json:"default_subset,omitempty"` // For each entry, LbEndpoint.Metadata's // “envoy.lb“ namespace is traversed and a subset is created for each unique // combination of key and value. For example: @@ -2003,7 +2090,7 @@ func (x *Cluster_LbSubsetConfig) GetFallbackPolicy() Cluster_LbSubsetConfig_LbSu return Cluster_LbSubsetConfig_NO_FALLBACK } -func (x *Cluster_LbSubsetConfig) GetDefaultSubset() *_struct.Struct { +func (x *Cluster_LbSubsetConfig) GetDefaultSubset() *structpb.Struct { if x != nil { return x.DefaultSubset } @@ -2061,7 +2148,7 @@ type Cluster_SlowStartConfig struct { // Represents the size of slow start window. // If set, the newly created host remains in slow start mode starting from its creation time // for the duration of slow start window. - SlowStartWindow *duration.Duration `protobuf:"bytes,1,opt,name=slow_start_window,json=slowStartWindow,proto3" json:"slow_start_window,omitempty"` + SlowStartWindow *durationpb.Duration `protobuf:"bytes,1,opt,name=slow_start_window,json=slowStartWindow,proto3" json:"slow_start_window,omitempty"` // This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, // so that endpoint would get linearly increasing amount of traffic. // When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. @@ -2113,7 +2200,7 @@ func (*Cluster_SlowStartConfig) Descriptor() ([]byte, []int) { return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 4} } -func (x *Cluster_SlowStartConfig) GetSlowStartWindow() *duration.Duration { +func (x *Cluster_SlowStartConfig) GetSlowStartWindow() *durationpb.Duration { if x != nil { return x.SlowStartWindow } @@ -2192,7 +2279,7 @@ type Cluster_LeastRequestLbConfig struct { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - ChoiceCount *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=choice_count,json=choiceCount,proto3" json:"choice_count,omitempty"` + ChoiceCount *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=choice_count,json=choiceCount,proto3" json:"choice_count,omitempty"` // The following formula is used to calculate the dynamic weights when hosts have different load // balancing weights: // @@ -2255,7 +2342,7 @@ func (*Cluster_LeastRequestLbConfig) Descriptor() ([]byte, []int) { return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 6} } -func (x *Cluster_LeastRequestLbConfig) GetChoiceCount() *wrappers.UInt32Value { +func (x *Cluster_LeastRequestLbConfig) GetChoiceCount() *wrapperspb.UInt32Value { if x != nil { return x.ChoiceCount } @@ -2287,14 +2374,14 @@ type Cluster_RingHashLbConfig struct { // provided host) the better the request distribution will reflect the desired weights. Defaults // to 1024 entries, and limited to 8M entries. See also // :ref:`maximum_ring_size`. - MinimumRingSize *wrappers.UInt64Value `protobuf:"bytes,1,opt,name=minimum_ring_size,json=minimumRingSize,proto3" json:"minimum_ring_size,omitempty"` + MinimumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,1,opt,name=minimum_ring_size,json=minimumRingSize,proto3" json:"minimum_ring_size,omitempty"` // The hash function used to hash hosts onto the ketama ring. The value defaults to // :ref:`XX_HASH`. HashFunction Cluster_RingHashLbConfig_HashFunction `protobuf:"varint,3,opt,name=hash_function,json=hashFunction,proto3,enum=envoy.config.cluster.v3.Cluster_RingHashLbConfig_HashFunction" json:"hash_function,omitempty"` // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered // to further constrain resource use. See also // :ref:`minimum_ring_size`. - MaximumRingSize *wrappers.UInt64Value `protobuf:"bytes,4,opt,name=maximum_ring_size,json=maximumRingSize,proto3" json:"maximum_ring_size,omitempty"` + MaximumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,4,opt,name=maximum_ring_size,json=maximumRingSize,proto3" json:"maximum_ring_size,omitempty"` } func (x *Cluster_RingHashLbConfig) Reset() { @@ -2329,7 +2416,7 @@ func (*Cluster_RingHashLbConfig) Descriptor() ([]byte, []int) { return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 7} } -func (x *Cluster_RingHashLbConfig) GetMinimumRingSize() *wrappers.UInt64Value { +func (x *Cluster_RingHashLbConfig) GetMinimumRingSize() *wrapperspb.UInt64Value { if x != nil { return x.MinimumRingSize } @@ -2343,7 +2430,7 @@ func (x *Cluster_RingHashLbConfig) GetHashFunction() Cluster_RingHashLbConfig_Ha return Cluster_RingHashLbConfig_XX_HASH } -func (x *Cluster_RingHashLbConfig) GetMaximumRingSize() *wrappers.UInt64Value { +func (x *Cluster_RingHashLbConfig) GetMaximumRingSize() *wrapperspb.UInt64Value { if x != nil { return x.MaximumRingSize } @@ -2361,7 +2448,7 @@ type Cluster_MaglevLbConfig struct { // Minimal disruption means that when the set of upstream hosts change, a connection will likely be sent to the same // upstream as it was before. Increasing the table size reduces the amount of disruption. // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. - TableSize *wrappers.UInt64Value `protobuf:"bytes,1,opt,name=table_size,json=tableSize,proto3" json:"table_size,omitempty"` + TableSize *wrapperspb.UInt64Value `protobuf:"bytes,1,opt,name=table_size,json=tableSize,proto3" json:"table_size,omitempty"` } func (x *Cluster_MaglevLbConfig) Reset() { @@ -2396,7 +2483,7 @@ func (*Cluster_MaglevLbConfig) Descriptor() ([]byte, []int) { return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 8} } -func (x *Cluster_MaglevLbConfig) GetTableSize() *wrappers.UInt64Value { +func (x *Cluster_MaglevLbConfig) GetTableSize() *wrapperspb.UInt64Value { if x != nil { return x.TableSize } @@ -2430,7 +2517,7 @@ type Cluster_OriginalDstLbConfig struct { HttpHeaderName string `protobuf:"bytes,2,opt,name=http_header_name,json=httpHeaderName,proto3" json:"http_header_name,omitempty"` // The port to override for the original dst address. This port // will take precedence over filter state and header override ports - UpstreamPortOverride *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=upstream_port_override,json=upstreamPortOverride,proto3" json:"upstream_port_override,omitempty"` + UpstreamPortOverride *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=upstream_port_override,json=upstreamPortOverride,proto3" json:"upstream_port_override,omitempty"` // The dynamic metadata key to override destination address. // First the request metadata is considered, then the connection one. MetadataKey *v34.MetadataKey `protobuf:"bytes,4,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` @@ -2482,7 +2569,7 @@ func (x *Cluster_OriginalDstLbConfig) GetHttpHeaderName() string { return "" } -func (x *Cluster_OriginalDstLbConfig) GetUpstreamPortOverride() *wrappers.UInt32Value { +func (x *Cluster_OriginalDstLbConfig) GetUpstreamPortOverride() *wrapperspb.UInt32Value { if x != nil { return x.UpstreamPortOverride } @@ -2530,7 +2617,7 @@ type Cluster_CommonLbConfig struct { // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is // because merging those updates isn't currently safe. See // https://github.com/envoyproxy/envoy/pull/3941. - UpdateMergeWindow *duration.Duration `protobuf:"bytes,4,opt,name=update_merge_window,json=updateMergeWindow,proto3" json:"update_merge_window,omitempty"` + UpdateMergeWindow *durationpb.Duration `protobuf:"bytes,4,opt,name=update_merge_window,json=updateMergeWindow,proto3" json:"update_merge_window,omitempty"` // If set to true, Envoy will :ref:`exclude ` new hosts // when computing load balancing weights until they have been health checked for the first time. // This will have no effect unless active health checking is also configured. @@ -2609,7 +2696,7 @@ func (x *Cluster_CommonLbConfig) GetLocalityWeightedLbConfig() *Cluster_CommonLb return nil } -func (x *Cluster_CommonLbConfig) GetUpdateMergeWindow() *duration.Duration { +func (x *Cluster_CommonLbConfig) GetUpdateMergeWindow() *durationpb.Duration { if x != nil { return x.UpdateMergeWindow } @@ -2670,12 +2757,12 @@ type Cluster_RefreshRate struct { // Specifies the base interval between refreshes. This parameter is required and must be greater // than zero and less than // :ref:`max_interval `. - BaseInterval *duration.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` + BaseInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` // Specifies the maximum interval between refreshes. This parameter is optional, but must be // greater than or equal to the // :ref:`base_interval ` if set. The default // is 10 times the :ref:`base_interval `. - MaxInterval *duration.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` } func (x *Cluster_RefreshRate) Reset() { @@ -2710,14 +2797,14 @@ func (*Cluster_RefreshRate) Descriptor() ([]byte, []int) { return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 11} } -func (x *Cluster_RefreshRate) GetBaseInterval() *duration.Duration { +func (x *Cluster_RefreshRate) GetBaseInterval() *durationpb.Duration { if x != nil { return x.BaseInterval } return nil } -func (x *Cluster_RefreshRate) GetMaxInterval() *duration.Duration { +func (x *Cluster_RefreshRate) GetMaxInterval() *durationpb.Duration { if x != nil { return x.MaxInterval } @@ -2753,7 +2840,7 @@ type Cluster_PreconnectPolicy struct { // // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can // harm latency more than the preconnecting helps. - PerUpstreamPreconnectRatio *wrappers.DoubleValue `protobuf:"bytes,1,opt,name=per_upstream_preconnect_ratio,json=perUpstreamPreconnectRatio,proto3" json:"per_upstream_preconnect_ratio,omitempty"` + PerUpstreamPreconnectRatio *wrapperspb.DoubleValue `protobuf:"bytes,1,opt,name=per_upstream_preconnect_ratio,json=perUpstreamPreconnectRatio,proto3" json:"per_upstream_preconnect_ratio,omitempty"` // Indicates how many streams (rounded up) can be anticipated across a cluster for each // stream, useful for low QPS services. This is currently supported for a subset of // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). @@ -2776,7 +2863,7 @@ type Cluster_PreconnectPolicy struct { // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each // upstream. - PredictivePreconnectRatio *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=predictive_preconnect_ratio,json=predictivePreconnectRatio,proto3" json:"predictive_preconnect_ratio,omitempty"` + PredictivePreconnectRatio *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=predictive_preconnect_ratio,json=predictivePreconnectRatio,proto3" json:"predictive_preconnect_ratio,omitempty"` } func (x *Cluster_PreconnectPolicy) Reset() { @@ -2811,14 +2898,14 @@ func (*Cluster_PreconnectPolicy) Descriptor() ([]byte, []int) { return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 12} } -func (x *Cluster_PreconnectPolicy) GetPerUpstreamPreconnectRatio() *wrappers.DoubleValue { +func (x *Cluster_PreconnectPolicy) GetPerUpstreamPreconnectRatio() *wrapperspb.DoubleValue { if x != nil { return x.PerUpstreamPreconnectRatio } return nil } -func (x *Cluster_PreconnectPolicy) GetPredictivePreconnectRatio() *wrappers.DoubleValue { +func (x *Cluster_PreconnectPolicy) GetPredictivePreconnectRatio() *wrapperspb.DoubleValue { if x != nil { return x.PredictivePreconnectRatio } @@ -2935,7 +3022,7 @@ type Cluster_CommonLbConfig_ZoneAwareLbConfig struct { // even if zone aware routing is configured. If not specified, the default is 6. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. - MinClusterSize *wrappers.UInt64Value `protobuf:"bytes,2,opt,name=min_cluster_size,json=minClusterSize,proto3" json:"min_cluster_size,omitempty"` + MinClusterSize *wrapperspb.UInt64Value `protobuf:"bytes,2,opt,name=min_cluster_size,json=minClusterSize,proto3" json:"min_cluster_size,omitempty"` // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic // mode`. Instead, the cluster will fail all // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a @@ -2982,7 +3069,7 @@ func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) GetRoutingEnabled() *v33.Perc return nil } -func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) GetMinClusterSize() *wrappers.UInt64Value { +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) GetMinClusterSize() *wrapperspb.UInt64Value { if x != nil { return x.MinClusterSize } @@ -3063,7 +3150,7 @@ type Cluster_CommonLbConfig_ConsistentHashingLbConfig struct { // // This is an O(N) algorithm, unlike other load balancers. Using a lower “hash_balance_factor“ results in more hosts // being probed, so use a higher value if you require better performance. - HashBalanceFactor *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` + HashBalanceFactor *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` } func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) Reset() { @@ -3105,7 +3192,7 @@ func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) GetUseHostnameForHash return false } -func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) GetHashBalanceFactor() *wrappers.UInt32Value { +func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) GetHashBalanceFactor() *wrapperspb.UInt32Value { if x != nil { return x.HashBalanceFactor } @@ -3160,6 +3247,65 @@ func (x *LoadBalancingPolicy_Policy) GetTypedExtensionConfig() *v32.TypedExtensi return nil } +type UpstreamConnectionOptions_HappyEyeballsConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specify the IP address family to attempt connection first in happy + // eyeballs algorithm according to RFC8305#section-4. + FirstAddressFamilyVersion UpstreamConnectionOptions_FirstAddressFamilyVersion `protobuf:"varint,1,opt,name=first_address_family_version,json=firstAddressFamilyVersion,proto3,enum=envoy.config.cluster.v3.UpstreamConnectionOptions_FirstAddressFamilyVersion" json:"first_address_family_version,omitempty"` + // Specify the number of addresses of the first_address_family_version being + // attempted for connection before the other address family. + FirstAddressFamilyCount *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=first_address_family_count,json=firstAddressFamilyCount,proto3" json:"first_address_family_count,omitempty"` +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) Reset() { + *x = UpstreamConnectionOptions_HappyEyeballsConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamConnectionOptions_HappyEyeballsConfig) ProtoMessage() {} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamConnectionOptions_HappyEyeballsConfig.ProtoReflect.Descriptor instead. +func (*UpstreamConnectionOptions_HappyEyeballsConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) GetFirstAddressFamilyVersion() UpstreamConnectionOptions_FirstAddressFamilyVersion { + if x != nil { + return x.FirstAddressFamilyVersion + } + return UpstreamConnectionOptions_DEFAULT +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) GetFirstAddressFamilyCount() *wrapperspb.UInt32Value { + if x != nil { + return x.FirstAddressFamilyCount + } + return nil +} + var File_envoy_config_cluster_v3_cluster_proto protoreflect.FileDescriptor var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ @@ -3225,7 +3371,7 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, - 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x97, 0x53, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0xd6, 0x53, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x18, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x2b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, @@ -3488,466 +3634,499 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x76, 0x65, 0x72, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c, - 0x72, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x63, - 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, - 0x73, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, - 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x0f, 0x75, 0x70, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x30, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, - 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, - 0x0a, 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x11, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x5e, 0x0a, 0x11, 0x70, 0x72, - 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, - 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x58, 0x0a, 0x29, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x70, 0x65, - 0x72, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x25, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x65, 0x72, - 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xe6, 0x01, 0x0a, 0x14, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, - 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x3a, 0x30, 0x9a, 0xc5, 0x88, - 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, - 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x98, 0x01, - 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, - 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, - 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x1a, 0xa6, 0x01, 0x0a, 0x10, 0x45, 0x64, 0x73, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, - 0x0a, 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x65, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x2e, 0x45, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x1a, 0xa4, 0x0a, 0x0a, 0x0e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x79, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x46, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, - 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, - 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, - 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x3e, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, - 0x6b, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, - 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, - 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0f, 0x73, 0x75, 0x62, - 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, 0x0a, 0x15, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, - 0x61, 0x77, 0x61, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x41, 0x77, 0x61, 0x72, 0x65, - 0x12, 0x32, 0x0a, 0x15, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, - 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x13, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x6d, 0x6f, - 0x64, 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x70, 0x61, - 0x6e, 0x69, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1e, 0x0a, 0x0b, 0x6c, 0x69, - 0x73, 0x74, 0x5f, 0x61, 0x73, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x09, 0x6c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x41, 0x6e, 0x79, 0x12, 0x92, 0x01, 0x0a, 0x18, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4e, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, - 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, - 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, - 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, - 0xda, 0x03, 0x0a, 0x10, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x33, 0x0a, 0x16, 0x73, 0x69, 0x6e, 0x67, - 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, - 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, - 0x48, 0x6f, 0x73, 0x74, 0x50, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x92, 0x01, - 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, - 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, - 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, - 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, - 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6b, - 0x65, 0x79, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x12, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4b, 0x65, 0x79, 0x73, 0x53, 0x75, - 0x62, 0x73, 0x65, 0x74, 0x22, 0x79, 0x0a, 0x1e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, - 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x44, 0x45, - 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, - 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, - 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, - 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x4b, 0x45, 0x59, 0x53, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x04, 0x3a, - 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, - 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, - 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x4f, 0x0a, 0x16, - 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, - 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, - 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, - 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x02, 0x22, 0x4d, 0x0a, - 0x1e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x18, 0x0a, 0x14, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x46, - 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x41, 0x4c, - 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x01, 0x3a, 0x2a, 0x9a, 0xc5, - 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe3, 0x01, 0x0a, 0x0f, 0x53, 0x6c, 0x6f, - 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x11, - 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x69, 0x6e, - 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x0a, 0x61, 0x67, - 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x12, 0x6d, 0x69, 0x6e, 0x5f, - 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x10, 0x6d, 0x69, - 0x6e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x72, - 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x1a, 0xc5, 0x02, 0x0a, 0x14, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x63, - 0x68, 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, - 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, - 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x91, 0x03, 0x0a, 0x10, 0x52, - 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x54, 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, - 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, - 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6d, 0x0a, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x66, 0x75, - 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, + 0x72, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x72, 0x73, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x39, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6c, + 0x72, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x63, 0x6b, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, + 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x0f, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x30, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, 0x0a, + 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x73, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x11, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x5e, 0x0a, 0x11, 0x70, 0x72, 0x65, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x32, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x58, 0x0a, 0x29, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x70, 0x65, 0x72, + 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x25, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x65, 0x72, 0x44, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xe6, 0x01, 0x0a, 0x14, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, + 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x98, 0x01, 0x0a, + 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, + 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x1a, 0xa6, 0x01, 0x0a, 0x10, 0x45, 0x64, 0x73, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x0a, + 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x65, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x45, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0xa4, 0x0a, 0x0a, 0x0e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x79, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, - 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, - 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x2e, 0x0a, 0x0c, 0x48, 0x61, - 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x58, 0x58, - 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x55, 0x52, 0x4d, 0x55, - 0x52, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x01, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, - 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, - 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x1a, 0x59, - 0x0a, 0x0e, 0x4d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0xcb, 0x96, 0xb1, 0x02, 0x52, 0x09, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0xbf, 0x02, 0x0a, 0x13, 0x4f, 0x72, - 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x75, 0x73, 0x65, 0x48, - 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x68, 0x74, 0x74, - 0x70, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x5d, 0x0a, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, - 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x14, 0x75, 0x70, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x6f, 0x72, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, - 0x64, 0x65, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, - 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, - 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, - 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd5, 0x0b, 0x0a, 0x0e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, - 0x0a, 0x17, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, - 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, - 0x50, 0x61, 0x6e, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x74, - 0x0a, 0x14, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x61, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x6c, 0x62, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, + 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, + 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0e, + 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3e, + 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x6b, + 0x0a, 0x10, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x73, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x61, + 0x77, 0x61, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x41, 0x77, 0x61, 0x72, 0x65, 0x12, + 0x32, 0x0a, 0x15, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x6d, 0x6f, 0x64, + 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x70, 0x61, 0x6e, + 0x69, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1e, 0x0a, 0x0b, 0x6c, 0x69, 0x73, + 0x74, 0x5f, 0x61, 0x73, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x41, 0x6e, 0x79, 0x12, 0x92, 0x01, 0x0a, 0x18, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, - 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x11, 0x7a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x49, 0x0a, 0x13, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, - 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x4d, 0x65, 0x72, 0x67, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x1f, 0x69, - 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x5f, - 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x68, 0x63, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4e, 0x65, 0x77, 0x48, - 0x6f, 0x73, 0x74, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x46, 0x69, 0x72, 0x73, 0x74, 0x48, 0x63, - 0x12, 0x4d, 0x0a, 0x24, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, - 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, - 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x4f, 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x8a, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, + 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x61, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xda, + 0x03, 0x0a, 0x10, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x33, 0x0a, 0x16, 0x73, 0x69, 0x6e, 0x67, 0x6c, + 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x48, + 0x6f, 0x73, 0x74, 0x50, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x92, 0x01, 0x0a, + 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, - 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x14, - 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, - 0x74, 0x52, 0x12, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x8d, 0x02, 0x0a, 0x11, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, - 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x10, - 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x72, 0x61, - 0x66, 0x66, 0x69, 0x63, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, - 0x4f, 0x6e, 0x50, 0x61, 0x6e, 0x69, 0x63, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6b, 0x65, + 0x79, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x12, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4b, 0x65, 0x79, 0x73, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x22, 0x79, 0x0a, 0x1e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x44, 0x45, 0x46, + 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, + 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, + 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, + 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x0f, 0x0a, + 0x0b, 0x4b, 0x45, 0x59, 0x53, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x04, 0x3a, 0x3b, + 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, + 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x4f, 0x0a, 0x16, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, 0x4c, + 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, 0x4e, + 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, 0x41, + 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x02, 0x22, 0x4d, 0x0a, 0x1e, + 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, + 0x0a, 0x14, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x46, 0x41, + 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x41, 0x4c, 0x4c, + 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x01, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, + 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe3, 0x01, 0x0a, 0x0f, 0x53, 0x6c, 0x6f, 0x77, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x11, 0x73, + 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x0a, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x12, 0x6d, 0x69, 0x6e, 0x5f, 0x77, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x10, 0x6d, 0x69, 0x6e, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x72, 0x0a, + 0x12, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0xc5, 0x02, 0x0a, 0x14, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, + 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, + 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x5f, 0x0a, 0x18, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x3a, 0x43, 0x9a, 0xc5, 0x88, 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, - 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xf1, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, - 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, - 0x61, 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x55, 0x0a, - 0x13, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, - 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, - 0x64, 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, - 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x44, 0x9a, 0xc5, 0x88, 0x1e, 0x3f, 0x0a, 0x3d, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, - 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, - 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x91, 0x03, 0x0a, 0x10, 0x52, 0x69, + 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x54, + 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, + 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6d, 0x0a, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, + 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, + 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, + 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x2e, 0x0a, 0x0c, 0x48, 0x61, 0x73, + 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x58, 0x58, 0x5f, + 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x55, 0x52, 0x4d, 0x55, 0x52, + 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x01, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x1a, 0x59, 0x0a, + 0x0e, 0x4d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0xcb, 0x96, 0xb1, 0x02, 0x52, 0x09, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0xbf, 0x02, 0x0a, 0x13, 0x4f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x75, 0x73, 0x65, 0x48, 0x74, + 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x5d, 0x0a, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x14, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x6f, 0x72, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd5, 0x0b, 0x0a, 0x0e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, + 0x17, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x50, + 0x61, 0x6e, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x74, 0x0a, + 0x14, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x61, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x6c, 0x62, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, + 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, + 0x52, 0x11, 0x7a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x49, 0x0a, 0x13, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x65, 0x72, 0x67, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x1f, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x5f, 0x75, + 0x6e, 0x74, 0x69, 0x6c, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x68, 0x63, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4e, 0x65, 0x77, 0x48, 0x6f, + 0x73, 0x74, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x46, 0x69, 0x72, 0x73, 0x74, 0x48, 0x63, 0x12, + 0x4d, 0x0a, 0x24, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x63, + 0x6c, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, + 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x8a, + 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, - 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x1a, 0xd2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, - 0x61, 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x2a, - 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x76, 0x61, 0x6c, 0x12, 0x4a, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, + 0x52, 0x12, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x1a, 0x8d, 0x02, 0x0a, 0x11, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, + 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x10, 0x6d, + 0x69, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x66, + 0x66, 0x69, 0x63, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x4f, + 0x6e, 0x50, 0x61, 0x6e, 0x69, 0x63, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x5f, 0x0a, 0x18, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x43, 0x9a, 0xc5, 0x88, 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xf1, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x55, 0x0a, 0x13, + 0x68, 0x61, 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, + 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x3a, 0x44, 0x9a, 0xc5, 0x88, 0x1e, 0x3f, 0x0a, 0x3d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, + 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x1a, 0xd2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, + 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, 0xc0, - 0x84, 0x3d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, - 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x02, 0x0a, 0x10, 0x50, 0x72, 0x65, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x78, 0x0a, - 0x1d, 0x70, 0x65, 0x72, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, 0x72, - 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x1a, 0x70, 0x65, 0x72, - 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x75, 0x0a, 0x1b, 0x70, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, - 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0xf0, 0x3f, 0x52, 0x19, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, - 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x66, - 0x0a, 0x22, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, - 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, 0x49, - 0x43, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x44, 0x4e, - 0x53, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, - 0x4e, 0x53, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x03, 0x12, 0x10, 0x0a, - 0x0c, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x53, 0x54, 0x10, 0x04, 0x22, - 0xa4, 0x01, 0x0a, 0x08, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, - 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, - 0x0d, 0x4c, 0x45, 0x41, 0x53, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x02, 0x12, - 0x0a, 0x0a, 0x06, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4d, - 0x41, 0x47, 0x4c, 0x45, 0x56, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4c, 0x55, 0x53, 0x54, - 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, 0x10, 0x06, 0x12, 0x20, 0x0a, - 0x1c, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x5f, - 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x07, 0x22, - 0x04, 0x08, 0x04, 0x10, 0x04, 0x2a, 0x0f, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, - 0x44, 0x53, 0x54, 0x5f, 0x4c, 0x42, 0x22, 0x50, 0x0a, 0x0f, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, - 0x4f, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x34, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, - 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x36, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x10, 0x0a, - 0x0c, 0x56, 0x34, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x45, 0x52, 0x52, 0x45, 0x44, 0x10, 0x03, 0x12, - 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x04, 0x22, 0x54, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, - 0x49, 0x47, 0x55, 0x52, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, - 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x53, 0x54, 0x52, - 0x45, 0x41, 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x01, 0x3a, 0x1b, - 0x9a, 0xc5, 0x88, 0x1e, 0x16, 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, - 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x23, 0x10, 0x24, - 0x52, 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x52, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x78, 0x74, 0x52, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0xda, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, - 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x06, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x01, - 0x10, 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xf9, 0x01, - 0x0a, 0x19, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x0d, 0x74, - 0x63, 0x70, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, - 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x52, 0x0c, 0x74, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, - 0x6c, 0x69, 0x76, 0x65, 0x12, 0x64, 0x0a, 0x30, 0x73, 0x65, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x5f, 0x6f, 0x6e, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x2a, - 0x73, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4f, 0x6e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, - 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x11, 0x54, 0x72, - 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, - 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, 0x2c, - 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x65, 0x72, 0x45, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x89, 0x01, 0xba, - 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x2a, 0x04, + 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x12, 0x4a, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, 0xc0, 0x84, + 0x3d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x27, + 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x02, 0x0a, 0x10, 0x50, 0x72, 0x65, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x78, 0x0a, 0x1d, + 0x70, 0x65, 0x72, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, 0x72, 0x65, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x75, 0x0a, 0x1b, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, + 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, + 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf0, 0x3f, 0x52, 0x19, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x66, 0x0a, + 0x22, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x44, 0x4e, 0x53, + 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x4e, + 0x53, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, + 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x53, 0x54, 0x10, 0x04, 0x22, 0xa4, + 0x01, 0x0a, 0x08, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x52, + 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, + 0x4c, 0x45, 0x41, 0x53, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, + 0x0d, 0x0a, 0x09, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x02, 0x12, 0x0a, + 0x0a, 0x06, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, + 0x47, 0x4c, 0x45, 0x56, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, + 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, 0x10, 0x06, 0x12, 0x20, 0x0a, 0x1c, + 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x07, 0x22, 0x04, + 0x08, 0x04, 0x10, 0x04, 0x2a, 0x0f, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, + 0x53, 0x54, 0x5f, 0x4c, 0x42, 0x22, 0x50, 0x0a, 0x0f, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, 0x4f, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x34, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x56, 0x36, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, + 0x56, 0x34, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x45, 0x52, 0x52, 0x45, 0x44, 0x10, 0x03, 0x12, 0x07, + 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x04, 0x22, 0x54, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, + 0x47, 0x55, 0x52, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x00, + 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x53, 0x54, 0x52, 0x45, + 0x41, 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x01, 0x3a, 0x1b, 0x9a, + 0xc5, 0x88, 0x1e, 0x16, 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, + 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x23, 0x10, 0x24, 0x52, + 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x52, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x52, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xda, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xbb, 0x05, 0x0a, + 0x19, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x0d, 0x74, 0x63, + 0x70, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x52, 0x0c, 0x74, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, + 0x69, 0x76, 0x65, 0x12, 0x64, 0x0a, 0x30, 0x73, 0x65, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x6f, 0x6e, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x2a, 0x73, + 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x4f, 0x6e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7a, 0x0a, 0x15, 0x68, 0x61, 0x70, + 0x70, 0x79, 0x5f, 0x65, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x61, 0x70, + 0x70, 0x79, 0x45, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x13, 0x68, 0x61, 0x70, 0x70, 0x79, 0x45, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x89, 0x02, 0x0a, 0x13, 0x48, 0x61, 0x70, 0x70, 0x79, 0x45, + 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x8d, 0x01, + 0x0a, 0x1c, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, + 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x19, 0x66, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x62, 0x0a, + 0x1a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x66, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x17, 0x66, 0x69, 0x72, 0x73, 0x74, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x38, 0x0a, 0x19, 0x46, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0b, + 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, + 0x34, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x36, 0x10, 0x02, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, + 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x11, 0x54, + 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, + 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, + 0x2c, 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x65, 0x72, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x89, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, + 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -3962,8 +4141,8 @@ func file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP() []byte { return file_envoy_config_cluster_v3_cluster_proto_rawDescData } -var file_envoy_config_cluster_v3_cluster_proto_enumTypes = make([]protoimpl.EnumInfo, 8) -var file_envoy_config_cluster_v3_cluster_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_envoy_config_cluster_v3_cluster_proto_enumTypes = make([]protoimpl.EnumInfo, 9) +var file_envoy_config_cluster_v3_cluster_proto_msgTypes = make([]protoimpl.MessageInfo, 25) var file_envoy_config_cluster_v3_cluster_proto_goTypes = []interface{}{ (Cluster_DiscoveryType)(0), // 0: envoy.config.cluster.v3.Cluster.DiscoveryType (Cluster_LbPolicy)(0), // 1: envoy.config.cluster.v3.Cluster.LbPolicy @@ -3973,149 +4152,154 @@ var file_envoy_config_cluster_v3_cluster_proto_goTypes = []interface{}{ (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy)(0), // 5: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy)(0), // 6: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy (Cluster_RingHashLbConfig_HashFunction)(0), // 7: envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction - (*ClusterCollection)(nil), // 8: envoy.config.cluster.v3.ClusterCollection - (*Cluster)(nil), // 9: envoy.config.cluster.v3.Cluster - (*LoadBalancingPolicy)(nil), // 10: envoy.config.cluster.v3.LoadBalancingPolicy - (*UpstreamConnectionOptions)(nil), // 11: envoy.config.cluster.v3.UpstreamConnectionOptions - (*TrackClusterStats)(nil), // 12: envoy.config.cluster.v3.TrackClusterStats - (*Cluster_TransportSocketMatch)(nil), // 13: envoy.config.cluster.v3.Cluster.TransportSocketMatch - (*Cluster_CustomClusterType)(nil), // 14: envoy.config.cluster.v3.Cluster.CustomClusterType - (*Cluster_EdsClusterConfig)(nil), // 15: envoy.config.cluster.v3.Cluster.EdsClusterConfig - (*Cluster_LbSubsetConfig)(nil), // 16: envoy.config.cluster.v3.Cluster.LbSubsetConfig - (*Cluster_SlowStartConfig)(nil), // 17: envoy.config.cluster.v3.Cluster.SlowStartConfig - (*Cluster_RoundRobinLbConfig)(nil), // 18: envoy.config.cluster.v3.Cluster.RoundRobinLbConfig - (*Cluster_LeastRequestLbConfig)(nil), // 19: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig - (*Cluster_RingHashLbConfig)(nil), // 20: envoy.config.cluster.v3.Cluster.RingHashLbConfig - (*Cluster_MaglevLbConfig)(nil), // 21: envoy.config.cluster.v3.Cluster.MaglevLbConfig - (*Cluster_OriginalDstLbConfig)(nil), // 22: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig - (*Cluster_CommonLbConfig)(nil), // 23: envoy.config.cluster.v3.Cluster.CommonLbConfig - (*Cluster_RefreshRate)(nil), // 24: envoy.config.cluster.v3.Cluster.RefreshRate - (*Cluster_PreconnectPolicy)(nil), // 25: envoy.config.cluster.v3.Cluster.PreconnectPolicy - nil, // 26: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry - (*Cluster_LbSubsetConfig_LbSubsetSelector)(nil), // 27: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector - (*Cluster_CommonLbConfig_ZoneAwareLbConfig)(nil), // 28: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig - (*Cluster_CommonLbConfig_LocalityWeightedLbConfig)(nil), // 29: envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig - (*Cluster_CommonLbConfig_ConsistentHashingLbConfig)(nil), // 30: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig - (*LoadBalancingPolicy_Policy)(nil), // 31: envoy.config.cluster.v3.LoadBalancingPolicy.Policy - (*v3.CollectionEntry)(nil), // 32: xds.core.v3.CollectionEntry - (*duration.Duration)(nil), // 33: google.protobuf.Duration - (*wrappers.UInt32Value)(nil), // 34: google.protobuf.UInt32Value - (*v31.ClusterLoadAssignment)(nil), // 35: envoy.config.endpoint.v3.ClusterLoadAssignment - (*v32.HealthCheck)(nil), // 36: envoy.config.core.v3.HealthCheck - (*CircuitBreakers)(nil), // 37: envoy.config.cluster.v3.CircuitBreakers - (*v32.UpstreamHttpProtocolOptions)(nil), // 38: envoy.config.core.v3.UpstreamHttpProtocolOptions - (*v32.HttpProtocolOptions)(nil), // 39: envoy.config.core.v3.HttpProtocolOptions - (*v32.Http1ProtocolOptions)(nil), // 40: envoy.config.core.v3.Http1ProtocolOptions - (*v32.Http2ProtocolOptions)(nil), // 41: envoy.config.core.v3.Http2ProtocolOptions - (*v32.Address)(nil), // 42: envoy.config.core.v3.Address - (*v32.DnsResolutionConfig)(nil), // 43: envoy.config.core.v3.DnsResolutionConfig - (*v32.TypedExtensionConfig)(nil), // 44: envoy.config.core.v3.TypedExtensionConfig - (*wrappers.BoolValue)(nil), // 45: google.protobuf.BoolValue - (*OutlierDetection)(nil), // 46: envoy.config.cluster.v3.OutlierDetection - (*v32.BindConfig)(nil), // 47: envoy.config.core.v3.BindConfig - (*v32.TransportSocket)(nil), // 48: envoy.config.core.v3.TransportSocket - (*v32.Metadata)(nil), // 49: envoy.config.core.v3.Metadata - (*Filter)(nil), // 50: envoy.config.cluster.v3.Filter - (*v32.ConfigSource)(nil), // 51: envoy.config.core.v3.ConfigSource - (*v32.TcpKeepalive)(nil), // 52: envoy.config.core.v3.TcpKeepalive - (*_struct.Struct)(nil), // 53: google.protobuf.Struct - (*any1.Any)(nil), // 54: google.protobuf.Any - (*v32.RuntimeDouble)(nil), // 55: envoy.config.core.v3.RuntimeDouble - (*v33.Percent)(nil), // 56: envoy.type.v3.Percent - (*wrappers.UInt64Value)(nil), // 57: google.protobuf.UInt64Value - (*v34.MetadataKey)(nil), // 58: envoy.type.metadata.v3.MetadataKey - (*v32.HealthStatusSet)(nil), // 59: envoy.config.core.v3.HealthStatusSet - (*wrappers.DoubleValue)(nil), // 60: google.protobuf.DoubleValue + (UpstreamConnectionOptions_FirstAddressFamilyVersion)(0), // 8: envoy.config.cluster.v3.UpstreamConnectionOptions.FirstAddressFamilyVersion + (*ClusterCollection)(nil), // 9: envoy.config.cluster.v3.ClusterCollection + (*Cluster)(nil), // 10: envoy.config.cluster.v3.Cluster + (*LoadBalancingPolicy)(nil), // 11: envoy.config.cluster.v3.LoadBalancingPolicy + (*UpstreamConnectionOptions)(nil), // 12: envoy.config.cluster.v3.UpstreamConnectionOptions + (*TrackClusterStats)(nil), // 13: envoy.config.cluster.v3.TrackClusterStats + (*Cluster_TransportSocketMatch)(nil), // 14: envoy.config.cluster.v3.Cluster.TransportSocketMatch + (*Cluster_CustomClusterType)(nil), // 15: envoy.config.cluster.v3.Cluster.CustomClusterType + (*Cluster_EdsClusterConfig)(nil), // 16: envoy.config.cluster.v3.Cluster.EdsClusterConfig + (*Cluster_LbSubsetConfig)(nil), // 17: envoy.config.cluster.v3.Cluster.LbSubsetConfig + (*Cluster_SlowStartConfig)(nil), // 18: envoy.config.cluster.v3.Cluster.SlowStartConfig + (*Cluster_RoundRobinLbConfig)(nil), // 19: envoy.config.cluster.v3.Cluster.RoundRobinLbConfig + (*Cluster_LeastRequestLbConfig)(nil), // 20: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig + (*Cluster_RingHashLbConfig)(nil), // 21: envoy.config.cluster.v3.Cluster.RingHashLbConfig + (*Cluster_MaglevLbConfig)(nil), // 22: envoy.config.cluster.v3.Cluster.MaglevLbConfig + (*Cluster_OriginalDstLbConfig)(nil), // 23: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig + (*Cluster_CommonLbConfig)(nil), // 24: envoy.config.cluster.v3.Cluster.CommonLbConfig + (*Cluster_RefreshRate)(nil), // 25: envoy.config.cluster.v3.Cluster.RefreshRate + (*Cluster_PreconnectPolicy)(nil), // 26: envoy.config.cluster.v3.Cluster.PreconnectPolicy + nil, // 27: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry + (*Cluster_LbSubsetConfig_LbSubsetSelector)(nil), // 28: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector + (*Cluster_CommonLbConfig_ZoneAwareLbConfig)(nil), // 29: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig + (*Cluster_CommonLbConfig_LocalityWeightedLbConfig)(nil), // 30: envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig + (*Cluster_CommonLbConfig_ConsistentHashingLbConfig)(nil), // 31: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig + (*LoadBalancingPolicy_Policy)(nil), // 32: envoy.config.cluster.v3.LoadBalancingPolicy.Policy + (*UpstreamConnectionOptions_HappyEyeballsConfig)(nil), // 33: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig + (*v3.CollectionEntry)(nil), // 34: xds.core.v3.CollectionEntry + (*durationpb.Duration)(nil), // 35: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 36: google.protobuf.UInt32Value + (*v31.ClusterLoadAssignment)(nil), // 37: envoy.config.endpoint.v3.ClusterLoadAssignment + (*v32.HealthCheck)(nil), // 38: envoy.config.core.v3.HealthCheck + (*CircuitBreakers)(nil), // 39: envoy.config.cluster.v3.CircuitBreakers + (*v32.UpstreamHttpProtocolOptions)(nil), // 40: envoy.config.core.v3.UpstreamHttpProtocolOptions + (*v32.HttpProtocolOptions)(nil), // 41: envoy.config.core.v3.HttpProtocolOptions + (*v32.Http1ProtocolOptions)(nil), // 42: envoy.config.core.v3.Http1ProtocolOptions + (*v32.Http2ProtocolOptions)(nil), // 43: envoy.config.core.v3.Http2ProtocolOptions + (*v32.Address)(nil), // 44: envoy.config.core.v3.Address + (*v32.DnsResolutionConfig)(nil), // 45: envoy.config.core.v3.DnsResolutionConfig + (*v32.TypedExtensionConfig)(nil), // 46: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.BoolValue)(nil), // 47: google.protobuf.BoolValue + (*OutlierDetection)(nil), // 48: envoy.config.cluster.v3.OutlierDetection + (*v32.BindConfig)(nil), // 49: envoy.config.core.v3.BindConfig + (*v32.TransportSocket)(nil), // 50: envoy.config.core.v3.TransportSocket + (*v32.Metadata)(nil), // 51: envoy.config.core.v3.Metadata + (*Filter)(nil), // 52: envoy.config.cluster.v3.Filter + (*v32.ConfigSource)(nil), // 53: envoy.config.core.v3.ConfigSource + (*v32.TcpKeepalive)(nil), // 54: envoy.config.core.v3.TcpKeepalive + (*structpb.Struct)(nil), // 55: google.protobuf.Struct + (*anypb.Any)(nil), // 56: google.protobuf.Any + (*v32.RuntimeDouble)(nil), // 57: envoy.config.core.v3.RuntimeDouble + (*v33.Percent)(nil), // 58: envoy.type.v3.Percent + (*wrapperspb.UInt64Value)(nil), // 59: google.protobuf.UInt64Value + (*v34.MetadataKey)(nil), // 60: envoy.type.metadata.v3.MetadataKey + (*v32.HealthStatusSet)(nil), // 61: envoy.config.core.v3.HealthStatusSet + (*wrapperspb.DoubleValue)(nil), // 62: google.protobuf.DoubleValue } var file_envoy_config_cluster_v3_cluster_proto_depIdxs = []int32{ - 32, // 0: envoy.config.cluster.v3.ClusterCollection.entries:type_name -> xds.core.v3.CollectionEntry - 13, // 1: envoy.config.cluster.v3.Cluster.transport_socket_matches:type_name -> envoy.config.cluster.v3.Cluster.TransportSocketMatch + 34, // 0: envoy.config.cluster.v3.ClusterCollection.entries:type_name -> xds.core.v3.CollectionEntry + 14, // 1: envoy.config.cluster.v3.Cluster.transport_socket_matches:type_name -> envoy.config.cluster.v3.Cluster.TransportSocketMatch 0, // 2: envoy.config.cluster.v3.Cluster.type:type_name -> envoy.config.cluster.v3.Cluster.DiscoveryType - 14, // 3: envoy.config.cluster.v3.Cluster.cluster_type:type_name -> envoy.config.cluster.v3.Cluster.CustomClusterType - 15, // 4: envoy.config.cluster.v3.Cluster.eds_cluster_config:type_name -> envoy.config.cluster.v3.Cluster.EdsClusterConfig - 33, // 5: envoy.config.cluster.v3.Cluster.connect_timeout:type_name -> google.protobuf.Duration - 34, // 6: envoy.config.cluster.v3.Cluster.per_connection_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 15, // 3: envoy.config.cluster.v3.Cluster.cluster_type:type_name -> envoy.config.cluster.v3.Cluster.CustomClusterType + 16, // 4: envoy.config.cluster.v3.Cluster.eds_cluster_config:type_name -> envoy.config.cluster.v3.Cluster.EdsClusterConfig + 35, // 5: envoy.config.cluster.v3.Cluster.connect_timeout:type_name -> google.protobuf.Duration + 36, // 6: envoy.config.cluster.v3.Cluster.per_connection_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value 1, // 7: envoy.config.cluster.v3.Cluster.lb_policy:type_name -> envoy.config.cluster.v3.Cluster.LbPolicy - 35, // 8: envoy.config.cluster.v3.Cluster.load_assignment:type_name -> envoy.config.endpoint.v3.ClusterLoadAssignment - 36, // 9: envoy.config.cluster.v3.Cluster.health_checks:type_name -> envoy.config.core.v3.HealthCheck - 34, // 10: envoy.config.cluster.v3.Cluster.max_requests_per_connection:type_name -> google.protobuf.UInt32Value - 37, // 11: envoy.config.cluster.v3.Cluster.circuit_breakers:type_name -> envoy.config.cluster.v3.CircuitBreakers - 38, // 12: envoy.config.cluster.v3.Cluster.upstream_http_protocol_options:type_name -> envoy.config.core.v3.UpstreamHttpProtocolOptions - 39, // 13: envoy.config.cluster.v3.Cluster.common_http_protocol_options:type_name -> envoy.config.core.v3.HttpProtocolOptions - 40, // 14: envoy.config.cluster.v3.Cluster.http_protocol_options:type_name -> envoy.config.core.v3.Http1ProtocolOptions - 41, // 15: envoy.config.cluster.v3.Cluster.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions - 26, // 16: envoy.config.cluster.v3.Cluster.typed_extension_protocol_options:type_name -> envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry - 33, // 17: envoy.config.cluster.v3.Cluster.dns_refresh_rate:type_name -> google.protobuf.Duration - 24, // 18: envoy.config.cluster.v3.Cluster.dns_failure_refresh_rate:type_name -> envoy.config.cluster.v3.Cluster.RefreshRate + 37, // 8: envoy.config.cluster.v3.Cluster.load_assignment:type_name -> envoy.config.endpoint.v3.ClusterLoadAssignment + 38, // 9: envoy.config.cluster.v3.Cluster.health_checks:type_name -> envoy.config.core.v3.HealthCheck + 36, // 10: envoy.config.cluster.v3.Cluster.max_requests_per_connection:type_name -> google.protobuf.UInt32Value + 39, // 11: envoy.config.cluster.v3.Cluster.circuit_breakers:type_name -> envoy.config.cluster.v3.CircuitBreakers + 40, // 12: envoy.config.cluster.v3.Cluster.upstream_http_protocol_options:type_name -> envoy.config.core.v3.UpstreamHttpProtocolOptions + 41, // 13: envoy.config.cluster.v3.Cluster.common_http_protocol_options:type_name -> envoy.config.core.v3.HttpProtocolOptions + 42, // 14: envoy.config.cluster.v3.Cluster.http_protocol_options:type_name -> envoy.config.core.v3.Http1ProtocolOptions + 43, // 15: envoy.config.cluster.v3.Cluster.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions + 27, // 16: envoy.config.cluster.v3.Cluster.typed_extension_protocol_options:type_name -> envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry + 35, // 17: envoy.config.cluster.v3.Cluster.dns_refresh_rate:type_name -> google.protobuf.Duration + 25, // 18: envoy.config.cluster.v3.Cluster.dns_failure_refresh_rate:type_name -> envoy.config.cluster.v3.Cluster.RefreshRate 2, // 19: envoy.config.cluster.v3.Cluster.dns_lookup_family:type_name -> envoy.config.cluster.v3.Cluster.DnsLookupFamily - 42, // 20: envoy.config.cluster.v3.Cluster.dns_resolvers:type_name -> envoy.config.core.v3.Address - 43, // 21: envoy.config.cluster.v3.Cluster.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig - 44, // 22: envoy.config.cluster.v3.Cluster.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 45, // 23: envoy.config.cluster.v3.Cluster.wait_for_warm_on_init:type_name -> google.protobuf.BoolValue - 46, // 24: envoy.config.cluster.v3.Cluster.outlier_detection:type_name -> envoy.config.cluster.v3.OutlierDetection - 33, // 25: envoy.config.cluster.v3.Cluster.cleanup_interval:type_name -> google.protobuf.Duration - 47, // 26: envoy.config.cluster.v3.Cluster.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig - 16, // 27: envoy.config.cluster.v3.Cluster.lb_subset_config:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig - 20, // 28: envoy.config.cluster.v3.Cluster.ring_hash_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig - 21, // 29: envoy.config.cluster.v3.Cluster.maglev_lb_config:type_name -> envoy.config.cluster.v3.Cluster.MaglevLbConfig - 22, // 30: envoy.config.cluster.v3.Cluster.original_dst_lb_config:type_name -> envoy.config.cluster.v3.Cluster.OriginalDstLbConfig - 19, // 31: envoy.config.cluster.v3.Cluster.least_request_lb_config:type_name -> envoy.config.cluster.v3.Cluster.LeastRequestLbConfig - 18, // 32: envoy.config.cluster.v3.Cluster.round_robin_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RoundRobinLbConfig - 23, // 33: envoy.config.cluster.v3.Cluster.common_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig - 48, // 34: envoy.config.cluster.v3.Cluster.transport_socket:type_name -> envoy.config.core.v3.TransportSocket - 49, // 35: envoy.config.cluster.v3.Cluster.metadata:type_name -> envoy.config.core.v3.Metadata + 44, // 20: envoy.config.cluster.v3.Cluster.dns_resolvers:type_name -> envoy.config.core.v3.Address + 45, // 21: envoy.config.cluster.v3.Cluster.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig + 46, // 22: envoy.config.cluster.v3.Cluster.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 47, // 23: envoy.config.cluster.v3.Cluster.wait_for_warm_on_init:type_name -> google.protobuf.BoolValue + 48, // 24: envoy.config.cluster.v3.Cluster.outlier_detection:type_name -> envoy.config.cluster.v3.OutlierDetection + 35, // 25: envoy.config.cluster.v3.Cluster.cleanup_interval:type_name -> google.protobuf.Duration + 49, // 26: envoy.config.cluster.v3.Cluster.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig + 17, // 27: envoy.config.cluster.v3.Cluster.lb_subset_config:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig + 21, // 28: envoy.config.cluster.v3.Cluster.ring_hash_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig + 22, // 29: envoy.config.cluster.v3.Cluster.maglev_lb_config:type_name -> envoy.config.cluster.v3.Cluster.MaglevLbConfig + 23, // 30: envoy.config.cluster.v3.Cluster.original_dst_lb_config:type_name -> envoy.config.cluster.v3.Cluster.OriginalDstLbConfig + 20, // 31: envoy.config.cluster.v3.Cluster.least_request_lb_config:type_name -> envoy.config.cluster.v3.Cluster.LeastRequestLbConfig + 19, // 32: envoy.config.cluster.v3.Cluster.round_robin_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RoundRobinLbConfig + 24, // 33: envoy.config.cluster.v3.Cluster.common_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig + 50, // 34: envoy.config.cluster.v3.Cluster.transport_socket:type_name -> envoy.config.core.v3.TransportSocket + 51, // 35: envoy.config.cluster.v3.Cluster.metadata:type_name -> envoy.config.core.v3.Metadata 3, // 36: envoy.config.cluster.v3.Cluster.protocol_selection:type_name -> envoy.config.cluster.v3.Cluster.ClusterProtocolSelection - 11, // 37: envoy.config.cluster.v3.Cluster.upstream_connection_options:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions - 50, // 38: envoy.config.cluster.v3.Cluster.filters:type_name -> envoy.config.cluster.v3.Filter - 10, // 39: envoy.config.cluster.v3.Cluster.load_balancing_policy:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy - 51, // 40: envoy.config.cluster.v3.Cluster.lrs_server:type_name -> envoy.config.core.v3.ConfigSource - 44, // 41: envoy.config.cluster.v3.Cluster.upstream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 12, // 42: envoy.config.cluster.v3.Cluster.track_cluster_stats:type_name -> envoy.config.cluster.v3.TrackClusterStats - 25, // 43: envoy.config.cluster.v3.Cluster.preconnect_policy:type_name -> envoy.config.cluster.v3.Cluster.PreconnectPolicy - 31, // 44: envoy.config.cluster.v3.LoadBalancingPolicy.policies:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy.Policy - 52, // 45: envoy.config.cluster.v3.UpstreamConnectionOptions.tcp_keepalive:type_name -> envoy.config.core.v3.TcpKeepalive - 53, // 46: envoy.config.cluster.v3.Cluster.TransportSocketMatch.match:type_name -> google.protobuf.Struct - 48, // 47: envoy.config.cluster.v3.Cluster.TransportSocketMatch.transport_socket:type_name -> envoy.config.core.v3.TransportSocket - 54, // 48: envoy.config.cluster.v3.Cluster.CustomClusterType.typed_config:type_name -> google.protobuf.Any - 51, // 49: envoy.config.cluster.v3.Cluster.EdsClusterConfig.eds_config:type_name -> envoy.config.core.v3.ConfigSource - 4, // 50: envoy.config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy - 53, // 51: envoy.config.cluster.v3.Cluster.LbSubsetConfig.default_subset:type_name -> google.protobuf.Struct - 27, // 52: envoy.config.cluster.v3.Cluster.LbSubsetConfig.subset_selectors:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector - 5, // 53: envoy.config.cluster.v3.Cluster.LbSubsetConfig.metadata_fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy - 33, // 54: envoy.config.cluster.v3.Cluster.SlowStartConfig.slow_start_window:type_name -> google.protobuf.Duration - 55, // 55: envoy.config.cluster.v3.Cluster.SlowStartConfig.aggression:type_name -> envoy.config.core.v3.RuntimeDouble - 56, // 56: envoy.config.cluster.v3.Cluster.SlowStartConfig.min_weight_percent:type_name -> envoy.type.v3.Percent - 17, // 57: envoy.config.cluster.v3.Cluster.RoundRobinLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig - 34, // 58: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.choice_count:type_name -> google.protobuf.UInt32Value - 55, // 59: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble - 17, // 60: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig - 57, // 61: envoy.config.cluster.v3.Cluster.RingHashLbConfig.minimum_ring_size:type_name -> google.protobuf.UInt64Value - 7, // 62: envoy.config.cluster.v3.Cluster.RingHashLbConfig.hash_function:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction - 57, // 63: envoy.config.cluster.v3.Cluster.RingHashLbConfig.maximum_ring_size:type_name -> google.protobuf.UInt64Value - 57, // 64: envoy.config.cluster.v3.Cluster.MaglevLbConfig.table_size:type_name -> google.protobuf.UInt64Value - 34, // 65: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.upstream_port_override:type_name -> google.protobuf.UInt32Value - 58, // 66: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey - 56, // 67: envoy.config.cluster.v3.Cluster.CommonLbConfig.healthy_panic_threshold:type_name -> envoy.type.v3.Percent - 28, // 68: envoy.config.cluster.v3.Cluster.CommonLbConfig.zone_aware_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig - 29, // 69: envoy.config.cluster.v3.Cluster.CommonLbConfig.locality_weighted_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig - 33, // 70: envoy.config.cluster.v3.Cluster.CommonLbConfig.update_merge_window:type_name -> google.protobuf.Duration - 30, // 71: envoy.config.cluster.v3.Cluster.CommonLbConfig.consistent_hashing_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig - 59, // 72: envoy.config.cluster.v3.Cluster.CommonLbConfig.override_host_status:type_name -> envoy.config.core.v3.HealthStatusSet - 33, // 73: envoy.config.cluster.v3.Cluster.RefreshRate.base_interval:type_name -> google.protobuf.Duration - 33, // 74: envoy.config.cluster.v3.Cluster.RefreshRate.max_interval:type_name -> google.protobuf.Duration - 60, // 75: envoy.config.cluster.v3.Cluster.PreconnectPolicy.per_upstream_preconnect_ratio:type_name -> google.protobuf.DoubleValue - 60, // 76: envoy.config.cluster.v3.Cluster.PreconnectPolicy.predictive_preconnect_ratio:type_name -> google.protobuf.DoubleValue - 54, // 77: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry.value:type_name -> google.protobuf.Any - 6, // 78: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy - 56, // 79: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.routing_enabled:type_name -> envoy.type.v3.Percent - 57, // 80: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.min_cluster_size:type_name -> google.protobuf.UInt64Value - 34, // 81: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig.hash_balance_factor:type_name -> google.protobuf.UInt32Value - 44, // 82: envoy.config.cluster.v3.LoadBalancingPolicy.Policy.typed_extension_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 83, // [83:83] is the sub-list for method output_type - 83, // [83:83] is the sub-list for method input_type - 83, // [83:83] is the sub-list for extension type_name - 83, // [83:83] is the sub-list for extension extendee - 0, // [0:83] is the sub-list for field type_name + 12, // 37: envoy.config.cluster.v3.Cluster.upstream_connection_options:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions + 52, // 38: envoy.config.cluster.v3.Cluster.filters:type_name -> envoy.config.cluster.v3.Filter + 11, // 39: envoy.config.cluster.v3.Cluster.load_balancing_policy:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy + 53, // 40: envoy.config.cluster.v3.Cluster.lrs_server:type_name -> envoy.config.core.v3.ConfigSource + 46, // 41: envoy.config.cluster.v3.Cluster.upstream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 13, // 42: envoy.config.cluster.v3.Cluster.track_cluster_stats:type_name -> envoy.config.cluster.v3.TrackClusterStats + 26, // 43: envoy.config.cluster.v3.Cluster.preconnect_policy:type_name -> envoy.config.cluster.v3.Cluster.PreconnectPolicy + 32, // 44: envoy.config.cluster.v3.LoadBalancingPolicy.policies:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy.Policy + 54, // 45: envoy.config.cluster.v3.UpstreamConnectionOptions.tcp_keepalive:type_name -> envoy.config.core.v3.TcpKeepalive + 33, // 46: envoy.config.cluster.v3.UpstreamConnectionOptions.happy_eyeballs_config:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig + 55, // 47: envoy.config.cluster.v3.Cluster.TransportSocketMatch.match:type_name -> google.protobuf.Struct + 50, // 48: envoy.config.cluster.v3.Cluster.TransportSocketMatch.transport_socket:type_name -> envoy.config.core.v3.TransportSocket + 56, // 49: envoy.config.cluster.v3.Cluster.CustomClusterType.typed_config:type_name -> google.protobuf.Any + 53, // 50: envoy.config.cluster.v3.Cluster.EdsClusterConfig.eds_config:type_name -> envoy.config.core.v3.ConfigSource + 4, // 51: envoy.config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy + 55, // 52: envoy.config.cluster.v3.Cluster.LbSubsetConfig.default_subset:type_name -> google.protobuf.Struct + 28, // 53: envoy.config.cluster.v3.Cluster.LbSubsetConfig.subset_selectors:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector + 5, // 54: envoy.config.cluster.v3.Cluster.LbSubsetConfig.metadata_fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy + 35, // 55: envoy.config.cluster.v3.Cluster.SlowStartConfig.slow_start_window:type_name -> google.protobuf.Duration + 57, // 56: envoy.config.cluster.v3.Cluster.SlowStartConfig.aggression:type_name -> envoy.config.core.v3.RuntimeDouble + 58, // 57: envoy.config.cluster.v3.Cluster.SlowStartConfig.min_weight_percent:type_name -> envoy.type.v3.Percent + 18, // 58: envoy.config.cluster.v3.Cluster.RoundRobinLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig + 36, // 59: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.choice_count:type_name -> google.protobuf.UInt32Value + 57, // 60: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble + 18, // 61: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig + 59, // 62: envoy.config.cluster.v3.Cluster.RingHashLbConfig.minimum_ring_size:type_name -> google.protobuf.UInt64Value + 7, // 63: envoy.config.cluster.v3.Cluster.RingHashLbConfig.hash_function:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction + 59, // 64: envoy.config.cluster.v3.Cluster.RingHashLbConfig.maximum_ring_size:type_name -> google.protobuf.UInt64Value + 59, // 65: envoy.config.cluster.v3.Cluster.MaglevLbConfig.table_size:type_name -> google.protobuf.UInt64Value + 36, // 66: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.upstream_port_override:type_name -> google.protobuf.UInt32Value + 60, // 67: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 58, // 68: envoy.config.cluster.v3.Cluster.CommonLbConfig.healthy_panic_threshold:type_name -> envoy.type.v3.Percent + 29, // 69: envoy.config.cluster.v3.Cluster.CommonLbConfig.zone_aware_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig + 30, // 70: envoy.config.cluster.v3.Cluster.CommonLbConfig.locality_weighted_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig + 35, // 71: envoy.config.cluster.v3.Cluster.CommonLbConfig.update_merge_window:type_name -> google.protobuf.Duration + 31, // 72: envoy.config.cluster.v3.Cluster.CommonLbConfig.consistent_hashing_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig + 61, // 73: envoy.config.cluster.v3.Cluster.CommonLbConfig.override_host_status:type_name -> envoy.config.core.v3.HealthStatusSet + 35, // 74: envoy.config.cluster.v3.Cluster.RefreshRate.base_interval:type_name -> google.protobuf.Duration + 35, // 75: envoy.config.cluster.v3.Cluster.RefreshRate.max_interval:type_name -> google.protobuf.Duration + 62, // 76: envoy.config.cluster.v3.Cluster.PreconnectPolicy.per_upstream_preconnect_ratio:type_name -> google.protobuf.DoubleValue + 62, // 77: envoy.config.cluster.v3.Cluster.PreconnectPolicy.predictive_preconnect_ratio:type_name -> google.protobuf.DoubleValue + 56, // 78: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry.value:type_name -> google.protobuf.Any + 6, // 79: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy + 58, // 80: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.routing_enabled:type_name -> envoy.type.v3.Percent + 59, // 81: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.min_cluster_size:type_name -> google.protobuf.UInt64Value + 36, // 82: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig.hash_balance_factor:type_name -> google.protobuf.UInt32Value + 46, // 83: envoy.config.cluster.v3.LoadBalancingPolicy.Policy.typed_extension_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 8, // 84: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig.first_address_family_version:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions.FirstAddressFamilyVersion + 36, // 85: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig.first_address_family_count:type_name -> google.protobuf.UInt32Value + 86, // [86:86] is the sub-list for method output_type + 86, // [86:86] is the sub-list for method input_type + 86, // [86:86] is the sub-list for extension type_name + 86, // [86:86] is the sub-list for extension extendee + 0, // [0:86] is the sub-list for field type_name } func init() { file_envoy_config_cluster_v3_cluster_proto_init() } @@ -4403,6 +4587,18 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamConnectionOptions_HappyEyeballsConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_envoy_config_cluster_v3_cluster_proto_msgTypes[1].OneofWrappers = []interface{}{ (*Cluster_Type)(nil), @@ -4422,8 +4618,8 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_cluster_v3_cluster_proto_rawDesc, - NumEnums: 8, - NumMessages: 24, + NumEnums: 9, + NumMessages: 25, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go index e2c95efa0e1..7d33f5e84e8 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/cluster/v3/cluster.proto @@ -1777,6 +1778,35 @@ func (m *UpstreamConnectionOptions) validate(all bool) error { // no validation rules for SetLocalInterfaceNameOnUpstreamConnections + if all { + switch v := interface{}(m.GetHappyEyeballsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamConnectionOptionsValidationError{ + field: "HappyEyeballsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamConnectionOptionsValidationError{ + field: "HappyEyeballsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHappyEyeballsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamConnectionOptionsValidationError{ + field: "HappyEyeballsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return UpstreamConnectionOptionsMultiError(errors) } @@ -4784,3 +4814,129 @@ var _ interface { Cause() error ErrorName() string } = LoadBalancingPolicy_PolicyValidationError{} + +// Validate checks the field values on +// UpstreamConnectionOptions_HappyEyeballsConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// UpstreamConnectionOptions_HappyEyeballsConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// UpstreamConnectionOptions_HappyEyeballsConfigMultiError, or nil if none found. +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FirstAddressFamilyVersion + + if wrapper := m.GetFirstAddressFamilyCount(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := UpstreamConnectionOptions_HappyEyeballsConfigValidationError{ + field: "FirstAddressFamilyCount", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return UpstreamConnectionOptions_HappyEyeballsConfigMultiError(errors) + } + + return nil +} + +// UpstreamConnectionOptions_HappyEyeballsConfigMultiError is an error wrapping +// multiple validation errors returned by +// UpstreamConnectionOptions_HappyEyeballsConfig.ValidateAll() if the +// designated constraints aren't met. +type UpstreamConnectionOptions_HappyEyeballsConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamConnectionOptions_HappyEyeballsConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamConnectionOptions_HappyEyeballsConfigMultiError) AllErrors() []error { return m } + +// UpstreamConnectionOptions_HappyEyeballsConfigValidationError is the +// validation error returned by +// UpstreamConnectionOptions_HappyEyeballsConfig.Validate if the designated +// constraints aren't met. +type UpstreamConnectionOptions_HappyEyeballsConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) ErrorName() string { + return "UpstreamConnectionOptions_HappyEyeballsConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamConnectionOptions_HappyEyeballsConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamConnectionOptions_HappyEyeballsConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamConnectionOptions_HappyEyeballsConfigValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go new file mode 100644 index 00000000000..4e2c50887e5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go @@ -0,0 +1,3439 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/cluster.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClusterCollection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterCollection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterCollection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Entries != nil { + if vtmsg, ok := interface{}(m.Entries).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Entries) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_TransportSocketMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_TransportSocketMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_TransportSocketMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TransportSocket != nil { + if vtmsg, ok := interface{}(m.TransportSocket).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TransportSocket) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Match != nil { + size, err := (*structpb.Struct)(m.Match).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CustomClusterType) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CustomClusterType) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CustomClusterType) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_EdsClusterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_EdsClusterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_EdsClusterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x12 + } + if m.EdsConfig != nil { + if vtmsg, ok := interface{}(m.EdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SingleHostPerSubset { + i-- + if m.SingleHostPerSubset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.FallbackKeysSubset) > 0 { + for iNdEx := len(m.FallbackKeysSubset) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.FallbackKeysSubset[iNdEx]) + copy(dAtA[i:], m.FallbackKeysSubset[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.FallbackKeysSubset[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.FallbackPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.FallbackPolicy)) + i-- + dAtA[i] = 0x10 + } + if len(m.Keys) > 0 { + for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Keys[iNdEx]) + copy(dAtA[i:], m.Keys[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Keys[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Cluster_LbSubsetConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_LbSubsetConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LbSubsetConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MetadataFallbackPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MetadataFallbackPolicy)) + i-- + dAtA[i] = 0x40 + } + if m.ListAsAny { + i-- + if m.ListAsAny { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.PanicModeAny { + i-- + if m.PanicModeAny { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.ScaleLocalityWeight { + i-- + if m.ScaleLocalityWeight { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.LocalityWeightAware { + i-- + if m.LocalityWeightAware { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.SubsetSelectors) > 0 { + for iNdEx := len(m.SubsetSelectors) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SubsetSelectors[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.DefaultSubset != nil { + size, err := (*structpb.Struct)(m.DefaultSubset).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FallbackPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.FallbackPolicy)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Cluster_SlowStartConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_SlowStartConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_SlowStartConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinWeightPercent != nil { + if vtmsg, ok := interface{}(m.MinWeightPercent).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MinWeightPercent) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Aggression != nil { + if vtmsg, ok := interface{}(m.Aggression).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Aggression) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SlowStartWindow != nil { + size, err := (*durationpb.Duration)(m.SlowStartWindow).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_RoundRobinLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_RoundRobinLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RoundRobinLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SlowStartConfig != nil { + size, err := m.SlowStartConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_LeastRequestLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_LeastRequestLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LeastRequestLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SlowStartConfig != nil { + size, err := m.SlowStartConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ActiveRequestBias != nil { + if vtmsg, ok := interface{}(m.ActiveRequestBias).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ActiveRequestBias) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ChoiceCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.ChoiceCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_RingHashLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_RingHashLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RingHashLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaximumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaximumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.HashFunction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HashFunction)) + i-- + dAtA[i] = 0x18 + } + if m.MinimumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinimumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_MaglevLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_MaglevLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_MaglevLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TableSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.TableSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_OriginalDstLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_OriginalDstLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_OriginalDstLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.UpstreamPortOverride != nil { + size, err := (*wrapperspb.UInt32Value)(m.UpstreamPortOverride).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.HttpHeaderName) > 0 { + i -= len(m.HttpHeaderName) + copy(dAtA[i:], m.HttpHeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HttpHeaderName))) + i-- + dAtA[i] = 0x12 + } + if m.UseHttpHeader { + i-- + if m.UseHttpHeader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FailTrafficOnPanic { + i-- + if m.FailTrafficOnPanic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.MinClusterSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinClusterSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RoutingEnabled != nil { + if vtmsg, ok := interface{}(m.RoutingEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RoutingEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HashBalanceFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.HashBalanceFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.UseHostnameForHashing { + i-- + if m.UseHostnameForHashing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OverrideHostStatus != nil { + if vtmsg, ok := interface{}(m.OverrideHostStatus).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverrideHostStatus) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.ConsistentHashingLbConfig != nil { + size, err := m.ConsistentHashingLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.CloseConnectionsOnHostSetChange { + i-- + if m.CloseConnectionsOnHostSetChange { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.IgnoreNewHostsUntilFirstHc { + i-- + if m.IgnoreNewHostsUntilFirstHc { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.UpdateMergeWindow != nil { + size, err := (*durationpb.Duration)(m.UpdateMergeWindow).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.LocalityConfigSpecifier.(*Cluster_CommonLbConfig_LocalityWeightedLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LocalityConfigSpecifier.(*Cluster_CommonLbConfig_ZoneAwareLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.HealthyPanicThreshold != nil { + if vtmsg, ok := interface{}(m.HealthyPanicThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HealthyPanicThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ZoneAwareLbConfig != nil { + size, err := m.ZoneAwareLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LocalityWeightedLbConfig != nil { + size, err := m.LocalityWeightedLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Cluster_RefreshRate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_RefreshRate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RefreshRate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BaseInterval != nil { + size, err := (*durationpb.Duration)(m.BaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_PreconnectPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_PreconnectPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_PreconnectPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PredictivePreconnectRatio != nil { + size, err := (*wrapperspb.DoubleValue)(m.PredictivePreconnectRatio).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.PerUpstreamPreconnectRatio != nil { + size, err := (*wrapperspb.DoubleValue)(m.PerUpstreamPreconnectRatio).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LrsReportEndpointMetrics) > 0 { + for iNdEx := len(m.LrsReportEndpointMetrics) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LrsReportEndpointMetrics[iNdEx]) + copy(dAtA[i:], m.LrsReportEndpointMetrics[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LrsReportEndpointMetrics[iNdEx]))) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xca + } + } + if msg, ok := m.LbConfig.(*Cluster_RoundRobinLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TypedDnsResolverConfig != nil { + if vtmsg, ok := interface{}(m.TypedDnsResolverConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedDnsResolverConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xba + } + if m.WaitForWarmOnInit != nil { + size, err := (*wrapperspb.BoolValue)(m.WaitForWarmOnInit).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xb2 + } + if m.DnsResolutionConfig != nil { + if vtmsg, ok := interface{}(m.DnsResolutionConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DnsResolutionConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xaa + } + if msg, ok := m.LbConfig.(*Cluster_MaglevLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.ConnectionPoolPerDownstreamConnection { + i-- + if m.ConnectionPoolPerDownstreamConnection { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x98 + } + if m.PreconnectPolicy != nil { + size, err := m.PreconnectPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x92 + } + if m.TrackClusterStats != nil { + size, err := m.TrackClusterStats.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x8a + } + if m.UpstreamConfig != nil { + if vtmsg, ok := interface{}(m.UpstreamConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x82 + } + if m.TrackTimeoutBudgets { + i-- + if m.TrackTimeoutBudgets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf8 + } + if m.UpstreamHttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.UpstreamHttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamHttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf2 + } + if m.UseTcpForDnsLookups { + i-- + if m.UseTcpForDnsLookups { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe8 + } + if m.DnsFailureRefreshRate != nil { + size, err := m.DnsFailureRefreshRate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe2 + } + if len(m.TransportSocketMatches) > 0 { + for iNdEx := len(m.TransportSocketMatches) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TransportSocketMatches[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xda + } + } + if m.LrsServer != nil { + if vtmsg, ok := interface{}(m.LrsServer).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LrsServer) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if m.LoadBalancingPolicy != nil { + size, err := m.LoadBalancingPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + } + if m.RespectDnsTtl { + i-- + if m.RespectDnsTtl { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + } + if msg, ok := m.ClusterDiscoveryType.(*Cluster_ClusterType); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LbConfig.(*Cluster_LeastRequestLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TypedExtensionProtocolOptions) > 0 { + for k := range m.TypedExtensionProtocolOptions { + v := m.TypedExtensionProtocolOptions[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + } + if msg, ok := m.LbConfig.(*Cluster_OriginalDstLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.LoadAssignment != nil { + if vtmsg, ok := interface{}(m.LoadAssignment).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LoadAssignment) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if m.IgnoreHealthOnHostRemoval { + i-- + if m.IgnoreHealthOnHostRemoval { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if m.CloseConnectionsOnHostHealthFailure { + i-- + if m.CloseConnectionsOnHostHealthFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.UpstreamConnectionOptions != nil { + size, err := m.UpstreamConnectionOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if m.CommonHttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CommonHttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if len(m.AltStatName) > 0 { + i -= len(m.AltStatName) + copy(dAtA[i:], m.AltStatName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AltStatName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.CommonLbConfig != nil { + size, err := m.CommonLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if m.ProtocolSelection != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ProtocolSelection)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.TransportSocket != nil { + if vtmsg, ok := interface{}(m.TransportSocket).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TransportSocket) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if msg, ok := m.LbConfig.(*Cluster_RingHashLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.LbSubsetConfig != nil { + size, err := m.LbSubsetConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.UpstreamBindConfig != nil { + if vtmsg, ok := interface{}(m.UpstreamBindConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamBindConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.CleanupInterval != nil { + size, err := (*durationpb.Duration)(m.CleanupInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.OutlierDetection != nil { + size, err := m.OutlierDetection.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.DnsResolvers) > 0 { + for iNdEx := len(m.DnsResolvers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.DnsResolvers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DnsResolvers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if m.DnsLookupFamily != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DnsLookupFamily)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.DnsRefreshRate != nil { + size, err := (*durationpb.Duration)(m.DnsRefreshRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Http2ProtocolOptions != nil { + if vtmsg, ok := interface{}(m.Http2ProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Http2ProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.HttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.HttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if m.CircuitBreakers != nil { + size, err := m.CircuitBreakers.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.MaxRequestsPerConnection != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.HealthChecks) > 0 { + for iNdEx := len(m.HealthChecks) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.HealthChecks[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HealthChecks[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + } + if m.LbPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LbPolicy)) + i-- + dAtA[i] = 0x30 + } + if m.PerConnectionBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.ConnectTimeout != nil { + size, err := (*durationpb.Duration)(m.ConnectTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.EdsClusterConfig != nil { + size, err := m.EdsClusterConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.ClusterDiscoveryType.(*Cluster_Type); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_Type) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_Type) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *Cluster_RingHashLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RingHashLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RingHashLbConfig != nil { + size, err := m.RingHashLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + return len(dAtA) - i, nil +} +func (m *Cluster_OriginalDstLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_OriginalDstLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OriginalDstLbConfig != nil { + size, err := m.OriginalDstLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *Cluster_LeastRequestLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LeastRequestLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LeastRequestLbConfig != nil { + size, err := m.LeastRequestLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + return len(dAtA) - i, nil +} +func (m *Cluster_ClusterType) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_ClusterType) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ClusterType != nil { + size, err := m.ClusterType.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + return len(dAtA) - i, nil +} +func (m *Cluster_MaglevLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_MaglevLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MaglevLbConfig != nil { + size, err := m.MaglevLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xa2 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xa2 + } + return len(dAtA) - i, nil +} +func (m *Cluster_RoundRobinLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RoundRobinLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RoundRobinLbConfig != nil { + size, err := m.RoundRobinLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc2 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc2 + } + return len(dAtA) - i, nil +} +func (m *LoadBalancingPolicy_Policy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancingPolicy_Policy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadBalancingPolicy_Policy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedExtensionConfig != nil { + if vtmsg, ok := interface{}(m.TypedExtensionConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedExtensionConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} + +func (m *LoadBalancingPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancingPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadBalancingPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Policies) > 0 { + for iNdEx := len(m.Policies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Policies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FirstAddressFamilyCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.FirstAddressFamilyCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FirstAddressFamilyVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.FirstAddressFamilyVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UpstreamConnectionOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamConnectionOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamConnectionOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HappyEyeballsConfig != nil { + size, err := m.HappyEyeballsConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.SetLocalInterfaceNameOnUpstreamConnections { + i-- + if m.SetLocalInterfaceNameOnUpstreamConnections { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TcpKeepalive != nil { + if vtmsg, ok := interface{}(m.TcpKeepalive).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TcpKeepalive) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TrackClusterStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TrackClusterStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TrackClusterStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PerEndpointStats { + i-- + if m.PerEndpointStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.RequestResponseSizes { + i-- + if m.RequestResponseSizes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TimeoutBudgets { + i-- + if m.TimeoutBudgets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ClusterCollection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Entries != nil { + if size, ok := interface{}(m.Entries).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Entries) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_TransportSocketMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Match != nil { + l = (*structpb.Struct)(m.Match).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocket != nil { + if size, ok := interface{}(m.TransportSocket).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TransportSocket) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CustomClusterType) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_EdsClusterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EdsConfig != nil { + if size, ok := interface{}(m.EdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keys) > 0 { + for _, s := range m.Keys { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.FallbackPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.FallbackPolicy)) + } + if len(m.FallbackKeysSubset) > 0 { + for _, s := range m.FallbackKeysSubset { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SingleHostPerSubset { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_LbSubsetConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FallbackPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.FallbackPolicy)) + } + if m.DefaultSubset != nil { + l = (*structpb.Struct)(m.DefaultSubset).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SubsetSelectors) > 0 { + for _, e := range m.SubsetSelectors { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LocalityWeightAware { + n += 2 + } + if m.ScaleLocalityWeight { + n += 2 + } + if m.PanicModeAny { + n += 2 + } + if m.ListAsAny { + n += 2 + } + if m.MetadataFallbackPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MetadataFallbackPolicy)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_SlowStartConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SlowStartWindow != nil { + l = (*durationpb.Duration)(m.SlowStartWindow).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Aggression != nil { + if size, ok := interface{}(m.Aggression).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Aggression) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinWeightPercent != nil { + if size, ok := interface{}(m.MinWeightPercent).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MinWeightPercent) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_RoundRobinLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SlowStartConfig != nil { + l = m.SlowStartConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_LeastRequestLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChoiceCount != nil { + l = (*wrapperspb.UInt32Value)(m.ChoiceCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ActiveRequestBias != nil { + if size, ok := interface{}(m.ActiveRequestBias).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ActiveRequestBias) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SlowStartConfig != nil { + l = m.SlowStartConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_RingHashLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinimumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinimumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HashFunction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HashFunction)) + } + if m.MaximumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MaximumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_MaglevLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TableSize != nil { + l = (*wrapperspb.UInt64Value)(m.TableSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_OriginalDstLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseHttpHeader { + n += 2 + } + l = len(m.HttpHeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamPortOverride != nil { + l = (*wrapperspb.UInt32Value)(m.UpstreamPortOverride).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoutingEnabled != nil { + if size, ok := interface{}(m.RoutingEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RoutingEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinClusterSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinClusterSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailTrafficOnPanic { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseHostnameForHashing { + n += 2 + } + if m.HashBalanceFactor != nil { + l = (*wrapperspb.UInt32Value)(m.HashBalanceFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HealthyPanicThreshold != nil { + if size, ok := interface{}(m.HealthyPanicThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HealthyPanicThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LocalityConfigSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.UpdateMergeWindow != nil { + l = (*durationpb.Duration)(m.UpdateMergeWindow).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IgnoreNewHostsUntilFirstHc { + n += 2 + } + if m.CloseConnectionsOnHostSetChange { + n += 2 + } + if m.ConsistentHashingLbConfig != nil { + l = m.ConsistentHashingLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverrideHostStatus != nil { + if size, ok := interface{}(m.OverrideHostStatus).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverrideHostStatus) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ZoneAwareLbConfig != nil { + l = m.ZoneAwareLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LocalityWeightedLbConfig != nil { + l = m.LocalityWeightedLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Cluster_RefreshRate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseInterval != nil { + l = (*durationpb.Duration)(m.BaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_PreconnectPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PerUpstreamPreconnectRatio != nil { + l = (*wrapperspb.DoubleValue)(m.PerUpstreamPreconnectRatio).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PredictivePreconnectRatio != nil { + l = (*wrapperspb.DoubleValue)(m.PredictivePreconnectRatio).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ClusterDiscoveryType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.EdsClusterConfig != nil { + l = m.EdsClusterConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectTimeout != nil { + l = (*durationpb.Duration)(m.ConnectTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerConnectionBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LbPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LbPolicy)) + } + if len(m.HealthChecks) > 0 { + for _, e := range m.HealthChecks { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxRequestsPerConnection != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CircuitBreakers != nil { + l = m.CircuitBreakers.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpProtocolOptions != nil { + if size, ok := interface{}(m.HttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Http2ProtocolOptions != nil { + if size, ok := interface{}(m.Http2ProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Http2ProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DnsRefreshRate != nil { + l = (*durationpb.Duration)(m.DnsRefreshRate).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DnsLookupFamily != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DnsLookupFamily)) + } + if len(m.DnsResolvers) > 0 { + for _, e := range m.DnsResolvers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.OutlierDetection != nil { + l = m.OutlierDetection.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CleanupInterval != nil { + l = (*durationpb.Duration)(m.CleanupInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamBindConfig != nil { + if size, ok := interface{}(m.UpstreamBindConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamBindConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LbSubsetConfig != nil { + l = m.LbSubsetConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LbConfig.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.TransportSocket != nil { + if size, ok := interface{}(m.TransportSocket).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TransportSocket) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProtocolSelection != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ProtocolSelection)) + } + if m.CommonLbConfig != nil { + l = m.CommonLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AltStatName) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CommonHttpProtocolOptions != nil { + if size, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CommonHttpProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamConnectionOptions != nil { + l = m.UpstreamConnectionOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CloseConnectionsOnHostHealthFailure { + n += 3 + } + if m.IgnoreHealthOnHostRemoval { + n += 3 + } + if m.LoadAssignment != nil { + if size, ok := interface{}(m.LoadAssignment).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LoadAssignment) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TypedExtensionProtocolOptions) > 0 { + for k, v := range m.TypedExtensionProtocolOptions { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.RespectDnsTtl { + n += 3 + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LoadBalancingPolicy != nil { + l = m.LoadBalancingPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LrsServer != nil { + if size, ok := interface{}(m.LrsServer).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LrsServer) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TransportSocketMatches) > 0 { + for _, e := range m.TransportSocketMatches { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DnsFailureRefreshRate != nil { + l = m.DnsFailureRefreshRate.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseTcpForDnsLookups { + n += 3 + } + if m.UpstreamHttpProtocolOptions != nil { + if size, ok := interface{}(m.UpstreamHttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamHttpProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackTimeoutBudgets { + n += 3 + } + if m.UpstreamConfig != nil { + if size, ok := interface{}(m.UpstreamConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackClusterStats != nil { + l = m.TrackClusterStats.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PreconnectPolicy != nil { + l = m.PreconnectPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionPoolPerDownstreamConnection { + n += 3 + } + if m.DnsResolutionConfig != nil { + if size, ok := interface{}(m.DnsResolutionConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DnsResolutionConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WaitForWarmOnInit != nil { + l = (*wrapperspb.BoolValue)(m.WaitForWarmOnInit).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedDnsResolverConfig != nil { + if size, ok := interface{}(m.TypedDnsResolverConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedDnsResolverConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LrsReportEndpointMetrics) > 0 { + for _, s := range m.LrsReportEndpointMetrics { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_Type) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + return n +} +func (m *Cluster_RingHashLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RingHashLbConfig != nil { + l = m.RingHashLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_OriginalDstLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OriginalDstLbConfig != nil { + l = m.OriginalDstLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_LeastRequestLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LeastRequestLbConfig != nil { + l = m.LeastRequestLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_ClusterType) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClusterType != nil { + l = m.ClusterType.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_MaglevLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaglevLbConfig != nil { + l = m.MaglevLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_RoundRobinLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoundRobinLbConfig != nil { + l = m.RoundRobinLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *LoadBalancingPolicy_Policy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedExtensionConfig != nil { + if size, ok := interface{}(m.TypedExtensionConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedExtensionConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LoadBalancingPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Policies) > 0 { + for _, e := range m.Policies { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FirstAddressFamilyVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.FirstAddressFamilyVersion)) + } + if m.FirstAddressFamilyCount != nil { + l = (*wrapperspb.UInt32Value)(m.FirstAddressFamilyCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamConnectionOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TcpKeepalive != nil { + if size, ok := interface{}(m.TcpKeepalive).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TcpKeepalive) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SetLocalInterfaceNameOnUpstreamConnections { + n += 2 + } + if m.HappyEyeballsConfig != nil { + l = m.HappyEyeballsConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TrackClusterStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeoutBudgets { + n += 2 + } + if m.RequestResponseSizes { + n += 2 + } + if m.PerEndpointStats { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go index 52a15c0413d..42ddbe2bb35 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/cluster/v3/filter.proto package clusterv3 @@ -10,9 +10,9 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -36,7 +36,7 @@ type Filter struct { // Note that Envoy's :ref:`downstream network // filters ` are not valid upstream network filters. // Only one of typed_config or config_discovery can be used. - TypedConfig *any1.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` // Configuration source specifier for an extension configuration discovery // service. In case of a failure and without the default configuration, the // listener closes the connections. @@ -83,7 +83,7 @@ func (x *Filter) GetName() string { return "" } -func (x *Filter) GetTypedConfig() *any1.Any { +func (x *Filter) GetTypedConfig() *anypb.Any { if x != nil { return x.TypedConfig } @@ -154,7 +154,7 @@ func file_envoy_config_cluster_v3_filter_proto_rawDescGZIP() []byte { var file_envoy_config_cluster_v3_filter_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_cluster_v3_filter_proto_goTypes = []interface{}{ (*Filter)(nil), // 0: envoy.config.cluster.v3.Filter - (*any1.Any)(nil), // 1: google.protobuf.Any + (*anypb.Any)(nil), // 1: google.protobuf.Any (*v3.ExtensionConfigSource)(nil), // 2: envoy.config.core.v3.ExtensionConfigSource } var file_envoy_config_cluster_v3_filter_proto_depIdxs = []int32{ diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.validate.go index a9bb71921ce..6de8120e089 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/cluster/v3/filter.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter_vtproto.pb.go new file mode 100644 index 00000000000..f253344e6ae --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter_vtproto.pb.go @@ -0,0 +1,121 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/filter.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Filter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Filter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Filter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go index 291ff788358..531cbd0efcd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go @@ -1,18 +1,19 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/cluster/v3/outlier_detection.proto package clusterv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -26,7 +27,7 @@ const ( // See the :ref:`architecture overview ` for // more information on outlier detection. -// [#next-free-field: 24] +// [#next-free-field: 26] type OutlierDetection struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -35,38 +36,38 @@ type OutlierDetection struct { // The number of consecutive server-side error responses (for HTTP traffic, // 5xx responses; for TCP traffic, connection failures; for Redis, failure to // respond PONG; etc.) before a consecutive 5xx ejection occurs. Defaults to 5. - Consecutive_5Xx *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=consecutive_5xx,json=consecutive5xx,proto3" json:"consecutive_5xx,omitempty"` + Consecutive_5Xx *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=consecutive_5xx,json=consecutive5xx,proto3" json:"consecutive_5xx,omitempty"` // The time interval between ejection analysis sweeps. This can result in // both new ejections as well as hosts being returned to service. Defaults // to 10000ms or 10s. - Interval *duration.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"` + Interval *durationpb.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"` // The base time that a host is ejected for. The real time is equal to the // base time multiplied by the number of times the host has been ejected and is // capped by :ref:`max_ejection_time`. // Defaults to 30000ms or 30s. - BaseEjectionTime *duration.Duration `protobuf:"bytes,3,opt,name=base_ejection_time,json=baseEjectionTime,proto3" json:"base_ejection_time,omitempty"` - // The maximum % of an upstream cluster that can be ejected due to outlier - // detection. Defaults to 10% but will eject at least one host regardless of the value. - MaxEjectionPercent *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=max_ejection_percent,json=maxEjectionPercent,proto3" json:"max_ejection_percent,omitempty"` + BaseEjectionTime *durationpb.Duration `protobuf:"bytes,3,opt,name=base_ejection_time,json=baseEjectionTime,proto3" json:"base_ejection_time,omitempty"` + // The maximum % of an upstream cluster that can be ejected due to outlier detection. Defaults to 10% . + // Will eject at least one host regardless of the value if :ref:`always_eject_one_host` is enabled. + MaxEjectionPercent *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_ejection_percent,json=maxEjectionPercent,proto3" json:"max_ejection_percent,omitempty"` // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive 5xx. This setting can be used to disable // ejection or to ramp it up slowly. Defaults to 100. - EnforcingConsecutive_5Xx *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=enforcing_consecutive_5xx,json=enforcingConsecutive5xx,proto3" json:"enforcing_consecutive_5xx,omitempty"` + EnforcingConsecutive_5Xx *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=enforcing_consecutive_5xx,json=enforcingConsecutive5xx,proto3" json:"enforcing_consecutive_5xx,omitempty"` // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics. This setting can be used to // disable ejection or to ramp it up slowly. Defaults to 100. - EnforcingSuccessRate *wrappers.UInt32Value `protobuf:"bytes,6,opt,name=enforcing_success_rate,json=enforcingSuccessRate,proto3" json:"enforcing_success_rate,omitempty"` + EnforcingSuccessRate *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=enforcing_success_rate,json=enforcingSuccessRate,proto3" json:"enforcing_success_rate,omitempty"` // The number of hosts in a cluster that must have enough request volume to // detect success rate outliers. If the number of hosts is less than this // setting, outlier detection via success rate statistics is not performed // for any host in the cluster. Defaults to 5. - SuccessRateMinimumHosts *wrappers.UInt32Value `protobuf:"bytes,7,opt,name=success_rate_minimum_hosts,json=successRateMinimumHosts,proto3" json:"success_rate_minimum_hosts,omitempty"` + SuccessRateMinimumHosts *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=success_rate_minimum_hosts,json=successRateMinimumHosts,proto3" json:"success_rate_minimum_hosts,omitempty"` // The minimum number of total requests that must be collected in one // interval (as defined by the interval duration above) to include this host // in success rate based outlier detection. If the volume is lower than this // setting, outlier detection via success rate statistics is not performed // for that host. Defaults to 100. - SuccessRateRequestVolume *wrappers.UInt32Value `protobuf:"bytes,8,opt,name=success_rate_request_volume,json=successRateRequestVolume,proto3" json:"success_rate_request_volume,omitempty"` + SuccessRateRequestVolume *wrapperspb.UInt32Value `protobuf:"bytes,8,opt,name=success_rate_request_volume,json=successRateRequestVolume,proto3" json:"success_rate_request_volume,omitempty"` // This factor is used to determine the ejection threshold for success rate // outlier ejection. The ejection threshold is the difference between the // mean success rate, and the product of this factor and the standard @@ -74,14 +75,14 @@ type OutlierDetection struct { // success_rate_stdev_factor). This factor is divided by a thousand to get a // double. That is, if the desired factor is 1.9, the runtime value should // be 1900. Defaults to 1900. - SuccessRateStdevFactor *wrappers.UInt32Value `protobuf:"bytes,9,opt,name=success_rate_stdev_factor,json=successRateStdevFactor,proto3" json:"success_rate_stdev_factor,omitempty"` + SuccessRateStdevFactor *wrapperspb.UInt32Value `protobuf:"bytes,9,opt,name=success_rate_stdev_factor,json=successRateStdevFactor,proto3" json:"success_rate_stdev_factor,omitempty"` // The number of consecutive gateway failures (502, 503, 504 status codes) // before a consecutive gateway failure ejection occurs. Defaults to 5. - ConsecutiveGatewayFailure *wrappers.UInt32Value `protobuf:"bytes,10,opt,name=consecutive_gateway_failure,json=consecutiveGatewayFailure,proto3" json:"consecutive_gateway_failure,omitempty"` + ConsecutiveGatewayFailure *wrapperspb.UInt32Value `protobuf:"bytes,10,opt,name=consecutive_gateway_failure,json=consecutiveGatewayFailure,proto3" json:"consecutive_gateway_failure,omitempty"` // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive gateway failures. This setting can be // used to disable ejection or to ramp it up slowly. Defaults to 0. - EnforcingConsecutiveGatewayFailure *wrappers.UInt32Value `protobuf:"bytes,11,opt,name=enforcing_consecutive_gateway_failure,json=enforcingConsecutiveGatewayFailure,proto3" json:"enforcing_consecutive_gateway_failure,omitempty"` + EnforcingConsecutiveGatewayFailure *wrapperspb.UInt32Value `protobuf:"bytes,11,opt,name=enforcing_consecutive_gateway_failure,json=enforcingConsecutiveGatewayFailure,proto3" json:"enforcing_consecutive_gateway_failure,omitempty"` // Determines whether to distinguish local origin failures from external errors. If set to true // the following configuration parameters are taken into account: // :ref:`consecutive_local_origin_failure`, @@ -94,59 +95,65 @@ type OutlierDetection struct { // occurs. Defaults to 5. Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. - ConsecutiveLocalOriginFailure *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=consecutive_local_origin_failure,json=consecutiveLocalOriginFailure,proto3" json:"consecutive_local_origin_failure,omitempty"` + ConsecutiveLocalOriginFailure *wrapperspb.UInt32Value `protobuf:"bytes,13,opt,name=consecutive_local_origin_failure,json=consecutiveLocalOriginFailure,proto3" json:"consecutive_local_origin_failure,omitempty"` // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive locally originated failures. This setting can be // used to disable ejection or to ramp it up slowly. Defaults to 100. // Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. - EnforcingConsecutiveLocalOriginFailure *wrappers.UInt32Value `protobuf:"bytes,14,opt,name=enforcing_consecutive_local_origin_failure,json=enforcingConsecutiveLocalOriginFailure,proto3" json:"enforcing_consecutive_local_origin_failure,omitempty"` + EnforcingConsecutiveLocalOriginFailure *wrapperspb.UInt32Value `protobuf:"bytes,14,opt,name=enforcing_consecutive_local_origin_failure,json=enforcingConsecutiveLocalOriginFailure,proto3" json:"enforcing_consecutive_local_origin_failure,omitempty"` // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics for locally originated errors. // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. // Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. - EnforcingLocalOriginSuccessRate *wrappers.UInt32Value `protobuf:"bytes,15,opt,name=enforcing_local_origin_success_rate,json=enforcingLocalOriginSuccessRate,proto3" json:"enforcing_local_origin_success_rate,omitempty"` + EnforcingLocalOriginSuccessRate *wrapperspb.UInt32Value `protobuf:"bytes,15,opt,name=enforcing_local_origin_success_rate,json=enforcingLocalOriginSuccessRate,proto3" json:"enforcing_local_origin_success_rate,omitempty"` // The failure percentage to use when determining failure percentage-based outlier detection. If // the failure percentage of a given host is greater than or equal to this value, it will be // ejected. Defaults to 85. - FailurePercentageThreshold *wrappers.UInt32Value `protobuf:"bytes,16,opt,name=failure_percentage_threshold,json=failurePercentageThreshold,proto3" json:"failure_percentage_threshold,omitempty"` + FailurePercentageThreshold *wrapperspb.UInt32Value `protobuf:"bytes,16,opt,name=failure_percentage_threshold,json=failurePercentageThreshold,proto3" json:"failure_percentage_threshold,omitempty"` // The % chance that a host will be actually ejected when an outlier status is detected through // failure percentage statistics. This setting can be used to disable ejection or to ramp it up // slowly. Defaults to 0. // // [#next-major-version: setting this without setting failure_percentage_threshold should be // invalid in v4.] - EnforcingFailurePercentage *wrappers.UInt32Value `protobuf:"bytes,17,opt,name=enforcing_failure_percentage,json=enforcingFailurePercentage,proto3" json:"enforcing_failure_percentage,omitempty"` + EnforcingFailurePercentage *wrapperspb.UInt32Value `protobuf:"bytes,17,opt,name=enforcing_failure_percentage,json=enforcingFailurePercentage,proto3" json:"enforcing_failure_percentage,omitempty"` // The % chance that a host will be actually ejected when an outlier status is detected through // local-origin failure percentage statistics. This setting can be used to disable ejection or to // ramp it up slowly. Defaults to 0. - EnforcingFailurePercentageLocalOrigin *wrappers.UInt32Value `protobuf:"bytes,18,opt,name=enforcing_failure_percentage_local_origin,json=enforcingFailurePercentageLocalOrigin,proto3" json:"enforcing_failure_percentage_local_origin,omitempty"` + EnforcingFailurePercentageLocalOrigin *wrapperspb.UInt32Value `protobuf:"bytes,18,opt,name=enforcing_failure_percentage_local_origin,json=enforcingFailurePercentageLocalOrigin,proto3" json:"enforcing_failure_percentage_local_origin,omitempty"` // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. // If the total number of hosts in the cluster is less than this value, failure percentage-based // ejection will not be performed. Defaults to 5. - FailurePercentageMinimumHosts *wrappers.UInt32Value `protobuf:"bytes,19,opt,name=failure_percentage_minimum_hosts,json=failurePercentageMinimumHosts,proto3" json:"failure_percentage_minimum_hosts,omitempty"` + FailurePercentageMinimumHosts *wrapperspb.UInt32Value `protobuf:"bytes,19,opt,name=failure_percentage_minimum_hosts,json=failurePercentageMinimumHosts,proto3" json:"failure_percentage_minimum_hosts,omitempty"` // The minimum number of total requests that must be collected in one interval (as defined by the // interval duration above) to perform failure percentage-based ejection for this host. If the // volume is lower than this setting, failure percentage-based ejection will not be performed for // this host. Defaults to 50. - FailurePercentageRequestVolume *wrappers.UInt32Value `protobuf:"bytes,20,opt,name=failure_percentage_request_volume,json=failurePercentageRequestVolume,proto3" json:"failure_percentage_request_volume,omitempty"` + FailurePercentageRequestVolume *wrapperspb.UInt32Value `protobuf:"bytes,20,opt,name=failure_percentage_request_volume,json=failurePercentageRequestVolume,proto3" json:"failure_percentage_request_volume,omitempty"` // The maximum time that a host is ejected for. See :ref:`base_ejection_time` // for more information. If not specified, the default value (300000ms or 300s) or // :ref:`base_ejection_time` value is applied, whatever is larger. - MaxEjectionTime *duration.Duration `protobuf:"bytes,21,opt,name=max_ejection_time,json=maxEjectionTime,proto3" json:"max_ejection_time,omitempty"` + MaxEjectionTime *durationpb.Duration `protobuf:"bytes,21,opt,name=max_ejection_time,json=maxEjectionTime,proto3" json:"max_ejection_time,omitempty"` // The maximum amount of jitter to add to the ejection time, in order to prevent // a 'thundering herd' effect where all proxies try to reconnect to host at the same time. // See :ref:`max_ejection_time_jitter` // Defaults to 0s. - MaxEjectionTimeJitter *duration.Duration `protobuf:"bytes,22,opt,name=max_ejection_time_jitter,json=maxEjectionTimeJitter,proto3" json:"max_ejection_time_jitter,omitempty"` + MaxEjectionTimeJitter *durationpb.Duration `protobuf:"bytes,22,opt,name=max_ejection_time_jitter,json=maxEjectionTimeJitter,proto3" json:"max_ejection_time_jitter,omitempty"` // If active health checking is enabled and a host is ejected by outlier detection, a successful active health check // unejects the host by default and considers it as healthy. Unejection also clears all the outlier detection counters. // To change this default behavior set this config to “false“ where active health checking will not uneject the host. // Defaults to true. - SuccessfulActiveHealthCheckUnejectHost *wrappers.BoolValue `protobuf:"bytes,23,opt,name=successful_active_health_check_uneject_host,json=successfulActiveHealthCheckUnejectHost,proto3" json:"successful_active_health_check_uneject_host,omitempty"` + SuccessfulActiveHealthCheckUnejectHost *wrapperspb.BoolValue `protobuf:"bytes,23,opt,name=successful_active_health_check_uneject_host,json=successfulActiveHealthCheckUnejectHost,proto3" json:"successful_active_health_check_uneject_host,omitempty"` + // Set of host's passive monitors. + // [#not-implemented-hide:] + Monitors []*v3.TypedExtensionConfig `protobuf:"bytes,24,rep,name=monitors,proto3" json:"monitors,omitempty"` + // If enabled, at least one host is ejected regardless of the value of :ref:`max_ejection_percent`. + // Defaults to false. + AlwaysEjectOneHost *wrapperspb.BoolValue `protobuf:"bytes,25,opt,name=always_eject_one_host,json=alwaysEjectOneHost,proto3" json:"always_eject_one_host,omitempty"` } func (x *OutlierDetection) Reset() { @@ -181,77 +188,77 @@ func (*OutlierDetection) Descriptor() ([]byte, []int) { return file_envoy_config_cluster_v3_outlier_detection_proto_rawDescGZIP(), []int{0} } -func (x *OutlierDetection) GetConsecutive_5Xx() *wrappers.UInt32Value { +func (x *OutlierDetection) GetConsecutive_5Xx() *wrapperspb.UInt32Value { if x != nil { return x.Consecutive_5Xx } return nil } -func (x *OutlierDetection) GetInterval() *duration.Duration { +func (x *OutlierDetection) GetInterval() *durationpb.Duration { if x != nil { return x.Interval } return nil } -func (x *OutlierDetection) GetBaseEjectionTime() *duration.Duration { +func (x *OutlierDetection) GetBaseEjectionTime() *durationpb.Duration { if x != nil { return x.BaseEjectionTime } return nil } -func (x *OutlierDetection) GetMaxEjectionPercent() *wrappers.UInt32Value { +func (x *OutlierDetection) GetMaxEjectionPercent() *wrapperspb.UInt32Value { if x != nil { return x.MaxEjectionPercent } return nil } -func (x *OutlierDetection) GetEnforcingConsecutive_5Xx() *wrappers.UInt32Value { +func (x *OutlierDetection) GetEnforcingConsecutive_5Xx() *wrapperspb.UInt32Value { if x != nil { return x.EnforcingConsecutive_5Xx } return nil } -func (x *OutlierDetection) GetEnforcingSuccessRate() *wrappers.UInt32Value { +func (x *OutlierDetection) GetEnforcingSuccessRate() *wrapperspb.UInt32Value { if x != nil { return x.EnforcingSuccessRate } return nil } -func (x *OutlierDetection) GetSuccessRateMinimumHosts() *wrappers.UInt32Value { +func (x *OutlierDetection) GetSuccessRateMinimumHosts() *wrapperspb.UInt32Value { if x != nil { return x.SuccessRateMinimumHosts } return nil } -func (x *OutlierDetection) GetSuccessRateRequestVolume() *wrappers.UInt32Value { +func (x *OutlierDetection) GetSuccessRateRequestVolume() *wrapperspb.UInt32Value { if x != nil { return x.SuccessRateRequestVolume } return nil } -func (x *OutlierDetection) GetSuccessRateStdevFactor() *wrappers.UInt32Value { +func (x *OutlierDetection) GetSuccessRateStdevFactor() *wrapperspb.UInt32Value { if x != nil { return x.SuccessRateStdevFactor } return nil } -func (x *OutlierDetection) GetConsecutiveGatewayFailure() *wrappers.UInt32Value { +func (x *OutlierDetection) GetConsecutiveGatewayFailure() *wrapperspb.UInt32Value { if x != nil { return x.ConsecutiveGatewayFailure } return nil } -func (x *OutlierDetection) GetEnforcingConsecutiveGatewayFailure() *wrappers.UInt32Value { +func (x *OutlierDetection) GetEnforcingConsecutiveGatewayFailure() *wrapperspb.UInt32Value { if x != nil { return x.EnforcingConsecutiveGatewayFailure } @@ -265,83 +272,97 @@ func (x *OutlierDetection) GetSplitExternalLocalOriginErrors() bool { return false } -func (x *OutlierDetection) GetConsecutiveLocalOriginFailure() *wrappers.UInt32Value { +func (x *OutlierDetection) GetConsecutiveLocalOriginFailure() *wrapperspb.UInt32Value { if x != nil { return x.ConsecutiveLocalOriginFailure } return nil } -func (x *OutlierDetection) GetEnforcingConsecutiveLocalOriginFailure() *wrappers.UInt32Value { +func (x *OutlierDetection) GetEnforcingConsecutiveLocalOriginFailure() *wrapperspb.UInt32Value { if x != nil { return x.EnforcingConsecutiveLocalOriginFailure } return nil } -func (x *OutlierDetection) GetEnforcingLocalOriginSuccessRate() *wrappers.UInt32Value { +func (x *OutlierDetection) GetEnforcingLocalOriginSuccessRate() *wrapperspb.UInt32Value { if x != nil { return x.EnforcingLocalOriginSuccessRate } return nil } -func (x *OutlierDetection) GetFailurePercentageThreshold() *wrappers.UInt32Value { +func (x *OutlierDetection) GetFailurePercentageThreshold() *wrapperspb.UInt32Value { if x != nil { return x.FailurePercentageThreshold } return nil } -func (x *OutlierDetection) GetEnforcingFailurePercentage() *wrappers.UInt32Value { +func (x *OutlierDetection) GetEnforcingFailurePercentage() *wrapperspb.UInt32Value { if x != nil { return x.EnforcingFailurePercentage } return nil } -func (x *OutlierDetection) GetEnforcingFailurePercentageLocalOrigin() *wrappers.UInt32Value { +func (x *OutlierDetection) GetEnforcingFailurePercentageLocalOrigin() *wrapperspb.UInt32Value { if x != nil { return x.EnforcingFailurePercentageLocalOrigin } return nil } -func (x *OutlierDetection) GetFailurePercentageMinimumHosts() *wrappers.UInt32Value { +func (x *OutlierDetection) GetFailurePercentageMinimumHosts() *wrapperspb.UInt32Value { if x != nil { return x.FailurePercentageMinimumHosts } return nil } -func (x *OutlierDetection) GetFailurePercentageRequestVolume() *wrappers.UInt32Value { +func (x *OutlierDetection) GetFailurePercentageRequestVolume() *wrapperspb.UInt32Value { if x != nil { return x.FailurePercentageRequestVolume } return nil } -func (x *OutlierDetection) GetMaxEjectionTime() *duration.Duration { +func (x *OutlierDetection) GetMaxEjectionTime() *durationpb.Duration { if x != nil { return x.MaxEjectionTime } return nil } -func (x *OutlierDetection) GetMaxEjectionTimeJitter() *duration.Duration { +func (x *OutlierDetection) GetMaxEjectionTimeJitter() *durationpb.Duration { if x != nil { return x.MaxEjectionTimeJitter } return nil } -func (x *OutlierDetection) GetSuccessfulActiveHealthCheckUnejectHost() *wrappers.BoolValue { +func (x *OutlierDetection) GetSuccessfulActiveHealthCheckUnejectHost() *wrapperspb.BoolValue { if x != nil { return x.SuccessfulActiveHealthCheckUnejectHost } return nil } +func (x *OutlierDetection) GetMonitors() []*v3.TypedExtensionConfig { + if x != nil { + return x.Monitors + } + return nil +} + +func (x *OutlierDetection) GetAlwaysEjectOneHost() *wrapperspb.BoolValue { + if x != nil { + return x.AlwaysEjectOneHost + } + return nil +} + var File_envoy_config_cluster_v3_outlier_detection_proto protoreflect.FileDescriptor var file_envoy_config_cluster_v3_outlier_detection_proto_rawDesc = []byte{ @@ -349,171 +370,183 @@ var file_envoy_config_cluster_v3_outlier_detection_proto_rawDesc = []byte{ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, - 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, - 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8a, 0x12, 0x0a, 0x10, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, - 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6f, - 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x35, 0x78, 0x78, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x35, 0x78, - 0x78, 0x12, 0x3f, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x12, 0x51, 0x0a, 0x12, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, - 0x02, 0x2a, 0x00, 0x52, 0x10, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x45, - 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x61, - 0x0a, 0x19, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x73, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x35, 0x78, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x17, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x35, 0x78, - 0x78, 0x12, 0x5b, 0x0a, 0x16, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x73, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x14, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, - 0x69, 0x6e, 0x67, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x59, - 0x0a, 0x1a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x17, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x4d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x5b, 0x0a, 0x1b, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x18, 0x73, 0x75, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x19, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x64, 0x65, 0x76, 0x5f, 0x66, 0x61, 0x63, - 0x74, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x13, 0x0a, 0x10, + 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x35, 0x78, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x76, 0x65, 0x35, 0x78, 0x78, 0x12, 0x3f, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x08, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x51, 0x0a, 0x12, 0x62, 0x61, 0x73, 0x65, + 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x10, 0x62, 0x61, 0x73, 0x65, 0x45, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x14, 0x6d, + 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x52, 0x61, 0x74, 0x65, 0x53, 0x74, 0x64, 0x65, 0x76, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, - 0x5c, 0x0a, 0x1b, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, - 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0a, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, + 0x52, 0x12, 0x6d, 0x61, 0x78, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x12, 0x61, 0x0a, 0x19, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x35, 0x78, + 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x17, + 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x76, 0x65, 0x35, 0x78, 0x78, 0x12, 0x5b, 0x0a, 0x16, 0x65, 0x6e, 0x66, 0x6f, 0x72, + 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x14, + 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x61, 0x74, 0x65, 0x12, 0x59, 0x0a, 0x1a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x17, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, + 0x61, 0x74, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, + 0x5b, 0x0a, 0x1b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x47, - 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x78, 0x0a, - 0x25, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x66, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, - 0x02, 0x18, 0x64, 0x52, 0x22, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, - 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, - 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x4a, 0x0a, 0x22, 0x73, 0x70, 0x6c, 0x69, 0x74, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x1e, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x73, 0x12, 0x65, 0x0a, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, - 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1d, 0x63, 0x6f, 0x6e, - 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, - 0x67, 0x69, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x2a, 0x65, - 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, - 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x75, 0x65, 0x52, 0x18, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x19, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x64, + 0x65, 0x76, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x26, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, - 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x73, - 0x0a, 0x23, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, - 0x18, 0x64, 0x52, 0x1f, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, - 0x61, 0x74, 0x65, 0x12, 0x67, 0x0a, 0x1c, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, - 0x6f, 0x6c, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, - 0x52, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, - 0x61, 0x67, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x67, 0x0a, 0x1c, - 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x11, 0x20, 0x01, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x53, 0x74, 0x64, 0x65, 0x76, 0x46, + 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x5c, 0x0a, 0x1b, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x46, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x12, 0x78, 0x0a, 0x25, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x1a, 0x65, 0x6e, 0x66, 0x6f, 0x72, - 0x63, 0x69, 0x6e, 0x67, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, - 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x7f, 0x0a, 0x29, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, - 0x6e, 0x67, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, - 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, - 0x69, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x22, 0x65, 0x6e, 0x66, 0x6f, 0x72, + 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x47, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x4a, 0x0a, + 0x22, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1e, 0x73, 0x70, 0x6c, 0x69, 0x74, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x65, 0x0a, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x1d, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x4c, 0x6f, + 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x12, 0x81, 0x01, 0x0a, 0x2a, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, + 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x26, 0x65, 0x6e, + 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x76, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x12, 0x73, 0x0a, 0x23, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x1f, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, + 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x67, 0x0a, 0x1c, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x12, 0x67, 0x0a, 0x1c, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, + 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, + 0x67, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, - 0x25, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x65, 0x0a, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1d, - 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, - 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x67, 0x0a, - 0x21, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, - 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1e, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6a, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x45, 0x6a, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x52, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x65, - 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6a, 0x69, 0x74, - 0x74, 0x65, 0x72, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x77, 0x0a, 0x2b, 0x73, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x6e, - 0x65, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x26, 0x73, 0x75, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x6e, 0x65, 0x6a, 0x65, 0x63, 0x74, - 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x92, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, - 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x42, 0x15, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x1a, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x7f, 0x0a, 0x29, 0x65, + 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x25, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, + 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, + 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x65, 0x0a, 0x20, + 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, + 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, + 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1d, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x48, 0x6f, + 0x73, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x21, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1e, 0x66, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x11, + 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0f, 0x6d, 0x61, + 0x78, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x52, 0x0a, + 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x4a, 0x69, 0x74, 0x74, 0x65, + 0x72, 0x12, 0x77, 0x0a, 0x2b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x6e, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x26, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, + 0x6e, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x08, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x08, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x73, 0x12, 0x4d, 0x0a, 0x15, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x65, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x61, + 0x6c, 0x77, 0x61, 0x79, 0x73, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x6e, 0x65, 0x48, 0x6f, 0x73, + 0x74, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, + 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x92, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x42, 0x15, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -530,10 +563,11 @@ func file_envoy_config_cluster_v3_outlier_detection_proto_rawDescGZIP() []byte { var file_envoy_config_cluster_v3_outlier_detection_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_cluster_v3_outlier_detection_proto_goTypes = []interface{}{ - (*OutlierDetection)(nil), // 0: envoy.config.cluster.v3.OutlierDetection - (*wrappers.UInt32Value)(nil), // 1: google.protobuf.UInt32Value - (*duration.Duration)(nil), // 2: google.protobuf.Duration - (*wrappers.BoolValue)(nil), // 3: google.protobuf.BoolValue + (*OutlierDetection)(nil), // 0: envoy.config.cluster.v3.OutlierDetection + (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration + (*wrapperspb.BoolValue)(nil), // 3: google.protobuf.BoolValue + (*v3.TypedExtensionConfig)(nil), // 4: envoy.config.core.v3.TypedExtensionConfig } var file_envoy_config_cluster_v3_outlier_detection_proto_depIdxs = []int32{ 1, // 0: envoy.config.cluster.v3.OutlierDetection.consecutive_5xx:type_name -> google.protobuf.UInt32Value @@ -558,11 +592,13 @@ var file_envoy_config_cluster_v3_outlier_detection_proto_depIdxs = []int32{ 2, // 19: envoy.config.cluster.v3.OutlierDetection.max_ejection_time:type_name -> google.protobuf.Duration 2, // 20: envoy.config.cluster.v3.OutlierDetection.max_ejection_time_jitter:type_name -> google.protobuf.Duration 3, // 21: envoy.config.cluster.v3.OutlierDetection.successful_active_health_check_uneject_host:type_name -> google.protobuf.BoolValue - 22, // [22:22] is the sub-list for method output_type - 22, // [22:22] is the sub-list for method input_type - 22, // [22:22] is the sub-list for extension type_name - 22, // [22:22] is the sub-list for extension extendee - 0, // [0:22] is the sub-list for field type_name + 4, // 22: envoy.config.cluster.v3.OutlierDetection.monitors:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // 23: envoy.config.cluster.v3.OutlierDetection.always_eject_one_host:type_name -> google.protobuf.BoolValue + 24, // [24:24] is the sub-list for method output_type + 24, // [24:24] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name } func init() { file_envoy_config_cluster_v3_outlier_detection_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.validate.go index 44f24b5511a..966821457d8 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/cluster/v3/outlier_detection.proto @@ -574,6 +575,69 @@ func (m *OutlierDetection) validate(all bool) error { } } + for idx, item := range m.GetMonitors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: fmt.Sprintf("Monitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: fmt.Sprintf("Monitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: fmt.Sprintf("Monitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetAlwaysEjectOneHost()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "AlwaysEjectOneHost", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "AlwaysEjectOneHost", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAlwaysEjectOneHost()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "AlwaysEjectOneHost", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return OutlierDetectionMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection_vtproto.pb.go new file mode 100644 index 00000000000..f837d56bd0b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection_vtproto.pb.go @@ -0,0 +1,456 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/outlier_detection.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *OutlierDetection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutlierDetection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutlierDetection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AlwaysEjectOneHost != nil { + size, err := (*wrapperspb.BoolValue)(m.AlwaysEjectOneHost).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Monitors[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Monitors[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + } + if m.SuccessfulActiveHealthCheckUnejectHost != nil { + size, err := (*wrapperspb.BoolValue)(m.SuccessfulActiveHealthCheckUnejectHost).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.MaxEjectionTimeJitter != nil { + size, err := (*durationpb.Duration)(m.MaxEjectionTimeJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.MaxEjectionTime != nil { + size, err := (*durationpb.Duration)(m.MaxEjectionTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.FailurePercentageRequestVolume != nil { + size, err := (*wrapperspb.UInt32Value)(m.FailurePercentageRequestVolume).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.FailurePercentageMinimumHosts != nil { + size, err := (*wrapperspb.UInt32Value)(m.FailurePercentageMinimumHosts).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.EnforcingFailurePercentageLocalOrigin != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentageLocalOrigin).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.EnforcingFailurePercentage != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.FailurePercentageThreshold != nil { + size, err := (*wrapperspb.UInt32Value)(m.FailurePercentageThreshold).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.EnforcingLocalOriginSuccessRate != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingLocalOriginSuccessRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.EnforcingConsecutiveLocalOriginFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveLocalOriginFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.ConsecutiveLocalOriginFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.ConsecutiveLocalOriginFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.SplitExternalLocalOriginErrors { + i-- + if m.SplitExternalLocalOriginErrors { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.EnforcingConsecutiveGatewayFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveGatewayFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.ConsecutiveGatewayFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.ConsecutiveGatewayFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.SuccessRateStdevFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuccessRateStdevFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.SuccessRateRequestVolume != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuccessRateRequestVolume).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.SuccessRateMinimumHosts != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuccessRateMinimumHosts).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.EnforcingSuccessRate != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingSuccessRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.EnforcingConsecutive_5Xx != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingConsecutive_5Xx).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.MaxEjectionPercent != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxEjectionPercent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.BaseEjectionTime != nil { + size, err := (*durationpb.Duration)(m.BaseEjectionTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Interval != nil { + size, err := (*durationpb.Duration)(m.Interval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Consecutive_5Xx != nil { + size, err := (*wrapperspb.UInt32Value)(m.Consecutive_5Xx).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OutlierDetection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Consecutive_5Xx != nil { + l = (*wrapperspb.UInt32Value)(m.Consecutive_5Xx).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Interval != nil { + l = (*durationpb.Duration)(m.Interval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BaseEjectionTime != nil { + l = (*durationpb.Duration)(m.BaseEjectionTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEjectionPercent != nil { + l = (*wrapperspb.UInt32Value)(m.MaxEjectionPercent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingConsecutive_5Xx != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingConsecutive_5Xx).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingSuccessRate != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingSuccessRate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRateMinimumHosts != nil { + l = (*wrapperspb.UInt32Value)(m.SuccessRateMinimumHosts).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRateRequestVolume != nil { + l = (*wrapperspb.UInt32Value)(m.SuccessRateRequestVolume).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRateStdevFactor != nil { + l = (*wrapperspb.UInt32Value)(m.SuccessRateStdevFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConsecutiveGatewayFailure != nil { + l = (*wrapperspb.UInt32Value)(m.ConsecutiveGatewayFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingConsecutiveGatewayFailure != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveGatewayFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SplitExternalLocalOriginErrors { + n += 2 + } + if m.ConsecutiveLocalOriginFailure != nil { + l = (*wrapperspb.UInt32Value)(m.ConsecutiveLocalOriginFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingConsecutiveLocalOriginFailure != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveLocalOriginFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingLocalOriginSuccessRate != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingLocalOriginSuccessRate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailurePercentageThreshold != nil { + l = (*wrapperspb.UInt32Value)(m.FailurePercentageThreshold).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingFailurePercentage != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentage).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingFailurePercentageLocalOrigin != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentageLocalOrigin).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailurePercentageMinimumHosts != nil { + l = (*wrapperspb.UInt32Value)(m.FailurePercentageMinimumHosts).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailurePercentageRequestVolume != nil { + l = (*wrapperspb.UInt32Value)(m.FailurePercentageRequestVolume).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEjectionTime != nil { + l = (*durationpb.Duration)(m.MaxEjectionTime).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEjectionTimeJitter != nil { + l = (*durationpb.Duration)(m.MaxEjectionTimeJitter).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessfulActiveHealthCheckUnejectHost != nil { + l = (*wrapperspb.BoolValue)(m.SuccessfulActiveHealthCheckUnejectHost).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Monitors) > 0 { + for _, e := range m.Monitors { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AlwaysEjectOneHost != nil { + l = (*wrapperspb.BoolValue)(m.AlwaysEjectOneHost).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go index 75db459f860..659879f9d7e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/common/matcher/v3/matcher.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go index a60724bbcaf..88607c30b17 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/common/matcher/v3/matcher.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher_vtproto.pb.go new file mode 100644 index 00000000000..431572ef8e6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher_vtproto.pb.go @@ -0,0 +1,2035 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/common/matcher/v3/matcher.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Matcher_OnMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_OnMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_OnMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OnMatch.(*Matcher_OnMatch_Action); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OnMatch.(*Matcher_OnMatch_Matcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Matcher_OnMatch_Matcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_OnMatch_Matcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Matcher != nil { + size, err := m.Matcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Matcher_OnMatch_Action) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_OnMatch_Action) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Action != nil { + if vtmsg, ok := interface{}(m.Action).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Action) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Matcher.(*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Matcher.(*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Input != nil { + if vtmsg, ok := interface{}(m.Input).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Input) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValueMatch != nil { + if vtmsg, ok := interface{}(m.ValueMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ValueMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomMatch != nil { + if vtmsg, ok := interface{}(m.CustomMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_PredicateList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_Predicate_PredicateList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_PredicateList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Predicate) > 0 { + for iNdEx := len(m.Predicate) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Predicate[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_Predicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_Predicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_NotMatcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_AndMatcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_OrMatcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_SinglePredicate_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SinglePredicate != nil { + size, err := m.SinglePredicate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_OrMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_OrMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatcher != nil { + size, err := m.OrMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_AndMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_AndMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatcher != nil { + size, err := m.AndMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_NotMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_NotMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatcher != nil { + size, err := m.NotMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_FieldMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_FieldMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_FieldMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OnMatch != nil { + size, err := m.OnMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Predicate != nil { + size, err := m.Predicate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Matchers) > 0 { + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Matchers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherTree_MatchMap) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherTree_MatchMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_MatchMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Map) > 0 { + for k := range m.Map { + v := m.Map[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherTree) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherTree) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.TreeType.(*Matcher_MatcherTree_CustomMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TreeType.(*Matcher_MatcherTree_PrefixMatchMap); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TreeType.(*Matcher_MatcherTree_ExactMatchMap); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Input != nil { + if vtmsg, ok := interface{}(m.Input).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Input) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherTree_ExactMatchMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_ExactMatchMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExactMatchMap != nil { + size, err := m.ExactMatchMap.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherTree_PrefixMatchMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_PrefixMatchMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PrefixMatchMap != nil { + size, err := m.PrefixMatchMap.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherTree_CustomMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_CustomMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomMatch != nil { + if vtmsg, ok := interface{}(m.CustomMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Matcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OnNoMatch != nil { + size, err := m.OnNoMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.MatcherType.(*Matcher_MatcherTree_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatcherType.(*Matcher_MatcherList_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MatcherList != nil { + size, err := m.MatcherList.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherTree_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MatcherTree != nil { + size, err := m.MatcherTree.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_MatchSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate_MatchSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_MatchSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AnyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_NotMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AndMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AndMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AndMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatch != nil { + size, err := m.AndMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_NotMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_NotMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatch != nil { + size, err := m.NotMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AnyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AnyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.AnyMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestHeadersMatch != nil { + size, err := m.HttpRequestHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestTrailersMatch != nil { + size, err := m.HttpRequestTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseHeadersMatch != nil { + size, err := m.HttpResponseHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseTrailersMatch != nil { + size, err := m.HttpResponseTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestGenericBodyMatch != nil { + size, err := m.HttpRequestGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseGenericBodyMatch != nil { + size, err := m.HttpResponseGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *HttpHeadersMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Headers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Headers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringMatch) + copy(dAtA[i:], m.StringMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringMatch))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BinaryMatch) + copy(dAtA[i:], m.BinaryMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BinaryMatch))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Patterns) > 0 { + for iNdEx := len(m.Patterns) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Patterns[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.BytesLimit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BytesLimit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Matcher_OnMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.OnMatch.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_OnMatch_Matcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Matcher != nil { + l = m.Matcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_OnMatch_Action) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + if size, ok := interface{}(m.Action).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Action) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != nil { + if size, ok := interface{}(m.Input).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Input) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Matcher.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValueMatch != nil { + if size, ok := interface{}(m.ValueMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ValueMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomMatch != nil { + if size, ok := interface{}(m.CustomMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_PredicateList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Predicate) > 0 { + for _, e := range m.Predicate { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_Predicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SinglePredicate != nil { + l = m.SinglePredicate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_OrMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatcher != nil { + l = m.OrMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_AndMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatcher != nil { + l = m.AndMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_NotMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatcher != nil { + l = m.NotMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_FieldMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Predicate != nil { + l = m.Predicate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnMatch != nil { + l = m.OnMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherTree_MatchMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Map) > 0 { + for k, v := range m.Map { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherTree) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != nil { + if size, ok := interface{}(m.Input).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Input) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.TreeType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherTree_ExactMatchMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExactMatchMap != nil { + l = m.ExactMatchMap.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherTree_PrefixMatchMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrefixMatchMap != nil { + l = m.PrefixMatchMap.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherTree_CustomMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomMatch != nil { + if size, ok := interface{}(m.CustomMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatcherType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.OnNoMatch != nil { + l = m.OnNoMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatcherList != nil { + l = m.MatcherList.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherTree_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatcherTree != nil { + l = m.MatcherTree.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_MatchSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AndMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatch != nil { + l = m.AndMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_NotMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatch != nil { + l = m.NotMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AnyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *MatchPredicate_HttpRequestHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestHeadersMatch != nil { + l = m.HttpRequestHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestTrailersMatch != nil { + l = m.HttpRequestTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseHeadersMatch != nil { + l = m.HttpResponseHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseTrailersMatch != nil { + l = m.HttpResponseTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestGenericBodyMatch != nil { + l = m.HttpRequestGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseGenericBodyMatch != nil { + l = m.HttpResponseGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BinaryMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesLimit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BytesLimit)) + } + if len(m.Patterns) > 0 { + for _, e := range m.Patterns { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go index 6ad40af6423..a0852aa600f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/address.proto package corev3 @@ -10,9 +10,9 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/go-control-plane/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -357,14 +357,14 @@ type TcpKeepalive struct { // Maximum number of keepalive probes to send without response before deciding // the connection is dead. Default is to use the OS level configuration (unless // overridden, Linux defaults to 9.) - KeepaliveProbes *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=keepalive_probes,json=keepaliveProbes,proto3" json:"keepalive_probes,omitempty"` + KeepaliveProbes *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=keepalive_probes,json=keepaliveProbes,proto3" json:"keepalive_probes,omitempty"` // The number of seconds a connection needs to be idle before keep-alive probes // start being sent. Default is to use the OS level configuration (unless // overridden, Linux defaults to 7200s (i.e., 2 hours.) - KeepaliveTime *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=keepalive_time,json=keepaliveTime,proto3" json:"keepalive_time,omitempty"` + KeepaliveTime *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=keepalive_time,json=keepaliveTime,proto3" json:"keepalive_time,omitempty"` // The number of seconds between keep-alive probes. Default is to use the OS // level configuration (unless overridden, Linux defaults to 75s.) - KeepaliveInterval *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=keepalive_interval,json=keepaliveInterval,proto3" json:"keepalive_interval,omitempty"` + KeepaliveInterval *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=keepalive_interval,json=keepaliveInterval,proto3" json:"keepalive_interval,omitempty"` } func (x *TcpKeepalive) Reset() { @@ -399,21 +399,21 @@ func (*TcpKeepalive) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{3} } -func (x *TcpKeepalive) GetKeepaliveProbes() *wrappers.UInt32Value { +func (x *TcpKeepalive) GetKeepaliveProbes() *wrapperspb.UInt32Value { if x != nil { return x.KeepaliveProbes } return nil } -func (x *TcpKeepalive) GetKeepaliveTime() *wrappers.UInt32Value { +func (x *TcpKeepalive) GetKeepaliveTime() *wrapperspb.UInt32Value { if x != nil { return x.KeepaliveTime } return nil } -func (x *TcpKeepalive) GetKeepaliveInterval() *wrappers.UInt32Value { +func (x *TcpKeepalive) GetKeepaliveInterval() *wrapperspb.UInt32Value { if x != nil { return x.KeepaliveInterval } @@ -498,7 +498,7 @@ type BindConfig struct { // to false, the option “IP_FREEBIND“ is disabled on the socket. When this // flag is not set (default), the socket is not modified, i.e. the option is // neither enabled nor disabled. - Freebind *wrappers.BoolValue `protobuf:"bytes,2,opt,name=freebind,proto3" json:"freebind,omitempty"` + Freebind *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=freebind,proto3" json:"freebind,omitempty"` // Additional socket options that may not be present in Envoy source code or // precompiled binaries. SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` @@ -558,7 +558,7 @@ func (x *BindConfig) GetSourceAddress() *SocketAddress { return nil } -func (x *BindConfig) GetFreebind() *wrappers.BoolValue { +func (x *BindConfig) GetFreebind() *wrapperspb.BoolValue { if x != nil { return x.Freebind } @@ -704,7 +704,7 @@ type CidrRange struct { // IPv4 or IPv6 address, e.g. “192.0.0.0“ or “2001:db8::“. AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"` // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. - PrefixLen *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"` + PrefixLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"` } func (x *CidrRange) Reset() { @@ -746,7 +746,7 @@ func (x *CidrRange) GetAddressPrefix() string { return "" } -func (x *CidrRange) GetPrefixLen() *wrappers.UInt32Value { +func (x *CidrRange) GetPrefixLen() *wrapperspb.UInt32Value { if x != nil { return x.PrefixLen } @@ -932,20 +932,20 @@ func file_envoy_config_core_v3_address_proto_rawDescGZIP() []byte { var file_envoy_config_core_v3_address_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_envoy_config_core_v3_address_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_envoy_config_core_v3_address_proto_goTypes = []interface{}{ - (SocketAddress_Protocol)(0), // 0: envoy.config.core.v3.SocketAddress.Protocol - (*Pipe)(nil), // 1: envoy.config.core.v3.Pipe - (*EnvoyInternalAddress)(nil), // 2: envoy.config.core.v3.EnvoyInternalAddress - (*SocketAddress)(nil), // 3: envoy.config.core.v3.SocketAddress - (*TcpKeepalive)(nil), // 4: envoy.config.core.v3.TcpKeepalive - (*ExtraSourceAddress)(nil), // 5: envoy.config.core.v3.ExtraSourceAddress - (*BindConfig)(nil), // 6: envoy.config.core.v3.BindConfig - (*Address)(nil), // 7: envoy.config.core.v3.Address - (*CidrRange)(nil), // 8: envoy.config.core.v3.CidrRange - (*wrappers.UInt32Value)(nil), // 9: google.protobuf.UInt32Value - (*SocketOptionsOverride)(nil), // 10: envoy.config.core.v3.SocketOptionsOverride - (*wrappers.BoolValue)(nil), // 11: google.protobuf.BoolValue - (*SocketOption)(nil), // 12: envoy.config.core.v3.SocketOption - (*TypedExtensionConfig)(nil), // 13: envoy.config.core.v3.TypedExtensionConfig + (SocketAddress_Protocol)(0), // 0: envoy.config.core.v3.SocketAddress.Protocol + (*Pipe)(nil), // 1: envoy.config.core.v3.Pipe + (*EnvoyInternalAddress)(nil), // 2: envoy.config.core.v3.EnvoyInternalAddress + (*SocketAddress)(nil), // 3: envoy.config.core.v3.SocketAddress + (*TcpKeepalive)(nil), // 4: envoy.config.core.v3.TcpKeepalive + (*ExtraSourceAddress)(nil), // 5: envoy.config.core.v3.ExtraSourceAddress + (*BindConfig)(nil), // 6: envoy.config.core.v3.BindConfig + (*Address)(nil), // 7: envoy.config.core.v3.Address + (*CidrRange)(nil), // 8: envoy.config.core.v3.CidrRange + (*wrapperspb.UInt32Value)(nil), // 9: google.protobuf.UInt32Value + (*SocketOptionsOverride)(nil), // 10: envoy.config.core.v3.SocketOptionsOverride + (*wrapperspb.BoolValue)(nil), // 11: google.protobuf.BoolValue + (*SocketOption)(nil), // 12: envoy.config.core.v3.SocketOption + (*TypedExtensionConfig)(nil), // 13: envoy.config.core.v3.TypedExtensionConfig } var file_envoy_config_core_v3_address_proto_depIdxs = []int32{ 0, // 0: envoy.config.core.v3.SocketAddress.protocol:type_name -> envoy.config.core.v3.SocketAddress.Protocol diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go index 1bf3f99586b..81dea205cd2 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/address.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address_vtproto.pb.go new file mode 100644 index 00000000000..cf1777901aa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address_vtproto.pb.go @@ -0,0 +1,859 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/address.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Pipe) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pipe) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Pipe) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Mode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyInternalAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyInternalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EnvoyInternalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.EndpointId) > 0 { + i -= len(m.EndpointId) + copy(dAtA[i:], m.EndpointId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EndpointId))) + i-- + dAtA[i] = 0x12 + } + if msg, ok := m.AddressNameSpecifier.(*EnvoyInternalAddress_ServerListenerName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *EnvoyInternalAddress_ServerListenerName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EnvoyInternalAddress_ServerListenerName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ServerListenerName) + copy(dAtA[i:], m.ServerListenerName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServerListenerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *SocketAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Ipv4Compat { + i-- + if m.Ipv4Compat { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.ResolverName) > 0 { + i -= len(m.ResolverName) + copy(dAtA[i:], m.ResolverName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResolverName))) + i-- + dAtA[i] = 0x2a + } + if msg, ok := m.PortSpecifier.(*SocketAddress_NamedPort); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PortSpecifier.(*SocketAddress_PortValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0x12 + } + if m.Protocol != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Protocol)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SocketAddress_PortValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketAddress_PortValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PortValue)) + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *SocketAddress_NamedPort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketAddress_NamedPort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.NamedPort) + copy(dAtA[i:], m.NamedPort) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.NamedPort))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *TcpKeepalive) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TcpKeepalive) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TcpKeepalive) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.KeepaliveInterval != nil { + size, err := (*wrapperspb.UInt32Value)(m.KeepaliveInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.KeepaliveTime != nil { + size, err := (*wrapperspb.UInt32Value)(m.KeepaliveTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.KeepaliveProbes != nil { + size, err := (*wrapperspb.UInt32Value)(m.KeepaliveProbes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExtraSourceAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtraSourceAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtraSourceAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SocketOptions != nil { + size, err := m.SocketOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Address != nil { + size, err := m.Address.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BindConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BindConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BindConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LocalAddressSelector != nil { + size, err := m.LocalAddressSelector.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.ExtraSourceAddresses) > 0 { + for iNdEx := len(m.ExtraSourceAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ExtraSourceAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.AdditionalSourceAddresses) > 0 { + for iNdEx := len(m.AdditionalSourceAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.AdditionalSourceAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SocketOptions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.Freebind != nil { + size, err := (*wrapperspb.BoolValue)(m.Freebind).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.SourceAddress != nil { + size, err := m.SourceAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Address) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Address) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Address.(*Address_EnvoyInternalAddress); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Address.(*Address_Pipe); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Address.(*Address_SocketAddress); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Address_SocketAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address_SocketAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SocketAddress != nil { + size, err := m.SocketAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Address_Pipe) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address_Pipe) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Pipe != nil { + size, err := m.Pipe.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Address_EnvoyInternalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address_EnvoyInternalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EnvoyInternalAddress != nil { + size, err := m.EnvoyInternalAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *CidrRange) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CidrRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CidrRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PrefixLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.PrefixLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.AddressPrefix) > 0 { + i -= len(m.AddressPrefix) + copy(dAtA[i:], m.AddressPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AddressPrefix))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Pipe) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Mode != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Mode)) + } + n += len(m.unknownFields) + return n +} + +func (m *EnvoyInternalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.AddressNameSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.EndpointId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *EnvoyInternalAddress_ServerListenerName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServerListenerName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SocketAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Protocol != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Protocol)) + } + l = len(m.Address) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.PortSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.ResolverName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Ipv4Compat { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SocketAddress_PortValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.PortValue)) + return n +} +func (m *SocketAddress_NamedPort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.NamedPort) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TcpKeepalive) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.KeepaliveProbes != nil { + l = (*wrapperspb.UInt32Value)(m.KeepaliveProbes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeepaliveTime != nil { + l = (*wrapperspb.UInt32Value)(m.KeepaliveTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeepaliveInterval != nil { + l = (*wrapperspb.UInt32Value)(m.KeepaliveInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExtraSourceAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + l = m.Address.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SocketOptions != nil { + l = m.SocketOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BindConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceAddress != nil { + l = m.SourceAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Freebind != nil { + l = (*wrapperspb.BoolValue)(m.Freebind).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.AdditionalSourceAddresses) > 0 { + for _, e := range m.AdditionalSourceAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ExtraSourceAddresses) > 0 { + for _, e := range m.ExtraSourceAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LocalAddressSelector != nil { + l = m.LocalAddressSelector.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Address) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Address.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Address_SocketAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SocketAddress != nil { + l = m.SocketAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Address_Pipe) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pipe != nil { + l = m.Pipe.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Address_EnvoyInternalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnvoyInternalAddress != nil { + l = m.EnvoyInternalAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CidrRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AddressPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PrefixLen != nil { + l = (*wrapperspb.UInt32Value)(m.PrefixLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go index 06b61820c44..68841ad1574 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/backoff.proto package corev3 @@ -9,9 +9,9 @@ package corev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -32,13 +32,13 @@ type BackoffStrategy struct { // The base interval to be used for the next back off computation. It should // be greater than zero and less than or equal to :ref:`max_interval // `. - BaseInterval *duration.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` + BaseInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` // Specifies the maximum interval between retries. This parameter is optional, // but must be greater than or equal to the :ref:`base_interval // ` if set. The default // is 10 times the :ref:`base_interval // `. - MaxInterval *duration.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` } func (x *BackoffStrategy) Reset() { @@ -73,14 +73,14 @@ func (*BackoffStrategy) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_backoff_proto_rawDescGZIP(), []int{0} } -func (x *BackoffStrategy) GetBaseInterval() *duration.Duration { +func (x *BackoffStrategy) GetBaseInterval() *durationpb.Duration { if x != nil { return x.BaseInterval } return nil } -func (x *BackoffStrategy) GetMaxInterval() *duration.Duration { +func (x *BackoffStrategy) GetMaxInterval() *durationpb.Duration { if x != nil { return x.MaxInterval } @@ -140,8 +140,8 @@ func file_envoy_config_core_v3_backoff_proto_rawDescGZIP() []byte { var file_envoy_config_core_v3_backoff_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_core_v3_backoff_proto_goTypes = []interface{}{ - (*BackoffStrategy)(nil), // 0: envoy.config.core.v3.BackoffStrategy - (*duration.Duration)(nil), // 1: google.protobuf.Duration + (*BackoffStrategy)(nil), // 0: envoy.config.core.v3.BackoffStrategy + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration } var file_envoy_config_core_v3_backoff_proto_depIdxs = []int32{ 1, // 0: envoy.config.core.v3.BackoffStrategy.base_interval:type_name -> google.protobuf.Duration diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go index c9b6590ccb0..6c9df76280f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/backoff.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff_vtproto.pb.go new file mode 100644 index 00000000000..3c66ff712af --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff_vtproto.pb.go @@ -0,0 +1,91 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/backoff.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *BackoffStrategy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackoffStrategy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BackoffStrategy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BaseInterval != nil { + size, err := (*durationpb.Duration)(m.BaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BackoffStrategy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseInterval != nil { + l = (*durationpb.Duration)(m.BaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go index 8935f058a45..862e09a8337 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/base.proto package corev3 @@ -12,11 +12,11 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - _struct "github.com/golang/protobuf/ptypes/struct" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -205,13 +205,83 @@ func (TrafficDirection) EnumDescriptor() ([]byte, []int) { return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{2} } +// Describes the supported actions types for key/value pair append action. +type KeyValueAppend_KeyValueAppendAction int32 + +const ( + // If the key already exists, this action will result in the following behavior: + // + // - Comma-concatenated value if multiple values are not allowed. + // - New value added to the list of values if multiple values are allowed. + // + // If the key doesn't exist then this will add pair with specified key and value. + KeyValueAppend_APPEND_IF_EXISTS_OR_ADD KeyValueAppend_KeyValueAppendAction = 0 + // This action will add the key/value pair if it doesn't already exist. If the + // key already exists then this will be a no-op. + KeyValueAppend_ADD_IF_ABSENT KeyValueAppend_KeyValueAppendAction = 1 + // This action will overwrite the specified value by discarding any existing + // values if the key already exists. If the key doesn't exist then this will add + // the pair with specified key and value. + KeyValueAppend_OVERWRITE_IF_EXISTS_OR_ADD KeyValueAppend_KeyValueAppendAction = 2 + // This action will overwrite the specified value by discarding any existing + // values if the key already exists. If the key doesn't exist then this will + // be no-op. + KeyValueAppend_OVERWRITE_IF_EXISTS KeyValueAppend_KeyValueAppendAction = 3 +) + +// Enum value maps for KeyValueAppend_KeyValueAppendAction. +var ( + KeyValueAppend_KeyValueAppendAction_name = map[int32]string{ + 0: "APPEND_IF_EXISTS_OR_ADD", + 1: "ADD_IF_ABSENT", + 2: "OVERWRITE_IF_EXISTS_OR_ADD", + 3: "OVERWRITE_IF_EXISTS", + } + KeyValueAppend_KeyValueAppendAction_value = map[string]int32{ + "APPEND_IF_EXISTS_OR_ADD": 0, + "ADD_IF_ABSENT": 1, + "OVERWRITE_IF_EXISTS_OR_ADD": 2, + "OVERWRITE_IF_EXISTS": 3, + } +) + +func (x KeyValueAppend_KeyValueAppendAction) Enum() *KeyValueAppend_KeyValueAppendAction { + p := new(KeyValueAppend_KeyValueAppendAction) + *p = x + return p +} + +func (x KeyValueAppend_KeyValueAppendAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (KeyValueAppend_KeyValueAppendAction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_base_proto_enumTypes[3].Descriptor() +} + +func (KeyValueAppend_KeyValueAppendAction) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_base_proto_enumTypes[3] +} + +func (x KeyValueAppend_KeyValueAppendAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use KeyValueAppend_KeyValueAppendAction.Descriptor instead. +func (KeyValueAppend_KeyValueAppendAction) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{10, 0} +} + // Describes the supported actions types for header append action. type HeaderValueOption_HeaderAppendAction int32 const ( - // This action will append the specified value to the existing values if the header - // already exists. If the header doesn't exist then this will add the header with - // specified key and value. + // If the header already exists, this action will result in: + // + // - Comma-concatenated for predefined inline headers. + // - Duplicate header added in the “HeaderMap“ for other headers. + // + // If the header doesn't exist then this will add new header with specified key and value. HeaderValueOption_APPEND_IF_EXISTS_OR_ADD HeaderValueOption_HeaderAppendAction = 0 // This action will add the header if it doesn't already exist. If the header // already exists then this will be a no-op. @@ -252,11 +322,11 @@ func (x HeaderValueOption_HeaderAppendAction) String() string { } func (HeaderValueOption_HeaderAppendAction) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_config_core_v3_base_proto_enumTypes[3].Descriptor() + return file_envoy_config_core_v3_base_proto_enumTypes[4].Descriptor() } func (HeaderValueOption_HeaderAppendAction) Type() protoreflect.EnumType { - return &file_envoy_config_core_v3_base_proto_enumTypes[3] + return &file_envoy_config_core_v3_base_proto_enumTypes[4] } func (x HeaderValueOption_HeaderAppendAction) Number() protoreflect.EnumNumber { @@ -265,7 +335,7 @@ func (x HeaderValueOption_HeaderAppendAction) Number() protoreflect.EnumNumber { // Deprecated: Use HeaderValueOption_HeaderAppendAction.Descriptor instead. func (HeaderValueOption_HeaderAppendAction) EnumDescriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{11, 0} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{14, 0} } // Identifies location of where either Envoy runs or where upstream hosts run. @@ -355,7 +425,7 @@ type BuildVersion struct { Version *v3.SemanticVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Free-form build information. // Envoy defines several well known keys in the source/common/version/version.h file - Metadata *_struct.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` } func (x *BuildVersion) Reset() { @@ -397,7 +467,7 @@ func (x *BuildVersion) GetVersion() *v3.SemanticVersion { return nil } -func (x *BuildVersion) GetMetadata() *_struct.Struct { +func (x *BuildVersion) GetMetadata() *structpb.Struct { if x != nil { return x.Metadata } @@ -542,7 +612,7 @@ type Node struct { Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` // Opaque metadata extending the node identifier. Envoy will pass this // directly to the management server. - Metadata *_struct.Struct `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` + Metadata *structpb.Struct `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` // Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike // other fields in this message). For example, the xDS client may have a shard identifier that // changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the @@ -622,7 +692,7 @@ func (x *Node) GetCluster() string { return "" } -func (x *Node) GetMetadata() *_struct.Struct { +func (x *Node) GetMetadata() *structpb.Struct { if x != nil { return x.Metadata } @@ -746,14 +816,14 @@ type Metadata struct { // :ref:`typed_filter_metadata ` // fields are present in the metadata with same keys, // only “typed_filter_metadata“ field will be parsed. - FilterMetadata map[string]*_struct.Struct `protobuf:"bytes,1,rep,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + FilterMetadata map[string]*structpb.Struct `protobuf:"bytes,1,rep,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Key is the reverse DNS filter name, e.g. com.acme.widget. The “envoy.*“ // namespace is reserved for Envoy's built-in filters. // The value is encoded as google.protobuf.Any. // If both :ref:`filter_metadata ` // and “typed_filter_metadata“ fields are present in the metadata with same keys, // only “typed_filter_metadata“ field will be parsed. - TypedFilterMetadata map[string]*any1.Any `protobuf:"bytes,2,rep,name=typed_filter_metadata,json=typedFilterMetadata,proto3" json:"typed_filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypedFilterMetadata map[string]*anypb.Any `protobuf:"bytes,2,rep,name=typed_filter_metadata,json=typedFilterMetadata,proto3" json:"typed_filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Metadata) Reset() { @@ -788,14 +858,14 @@ func (*Metadata) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{4} } -func (x *Metadata) GetFilterMetadata() map[string]*_struct.Struct { +func (x *Metadata) GetFilterMetadata() map[string]*structpb.Struct { if x != nil { return x.FilterMetadata } return nil } -func (x *Metadata) GetTypedFilterMetadata() map[string]*any1.Any { +func (x *Metadata) GetTypedFilterMetadata() map[string]*anypb.Any { if x != nil { return x.TypedFilterMetadata } @@ -983,7 +1053,7 @@ type RuntimeFeatureFlag struct { unknownFields protoimpl.UnknownFields // Default value if runtime value is not available. - DefaultValue *wrappers.BoolValue `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + DefaultValue *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` // Runtime key to get value for comparison. This value is used if defined. The boolean value must // be represented via its // `canonical JSON encoding `_. @@ -1022,7 +1092,7 @@ func (*RuntimeFeatureFlag) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{8} } -func (x *RuntimeFeatureFlag) GetDefaultValue() *wrappers.BoolValue { +func (x *RuntimeFeatureFlag) GetDefaultValue() *wrapperspb.BoolValue { if x != nil { return x.DefaultValue } @@ -1036,6 +1106,181 @@ func (x *RuntimeFeatureFlag) GetRuntimeKey() string { return "" } +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key of the key/value pair. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value of the key/value pair. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{9} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Key/value pair plus option to control append behavior. This is used to specify +// key/value pairs that should be appended to a set of existing key/value pairs. +type KeyValueAppend struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key/value pair entry that this option to append or overwrite. + Entry *KeyValue `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` + // Describes the action taken to append/overwrite the given value for an existing + // key or to only add this key if it's absent. + Action KeyValueAppend_KeyValueAppendAction `protobuf:"varint,2,opt,name=action,proto3,enum=envoy.config.core.v3.KeyValueAppend_KeyValueAppendAction" json:"action,omitempty"` +} + +func (x *KeyValueAppend) Reset() { + *x = KeyValueAppend{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValueAppend) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValueAppend) ProtoMessage() {} + +func (x *KeyValueAppend) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValueAppend.ProtoReflect.Descriptor instead. +func (*KeyValueAppend) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{10} +} + +func (x *KeyValueAppend) GetEntry() *KeyValue { + if x != nil { + return x.Entry + } + return nil +} + +func (x *KeyValueAppend) GetAction() KeyValueAppend_KeyValueAppendAction { + if x != nil { + return x.Action + } + return KeyValueAppend_APPEND_IF_EXISTS_OR_ADD +} + +// Key/value pair to append or remove. +type KeyValueMutation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key/value pair to append or overwrite. Only one of “append“ or “remove“ can be set. + Append *KeyValueAppend `protobuf:"bytes,1,opt,name=append,proto3" json:"append,omitempty"` + // Key to remove. Only one of “append“ or “remove“ can be set. + Remove string `protobuf:"bytes,2,opt,name=remove,proto3" json:"remove,omitempty"` +} + +func (x *KeyValueMutation) Reset() { + *x = KeyValueMutation{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValueMutation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValueMutation) ProtoMessage() {} + +func (x *KeyValueMutation) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValueMutation.ProtoReflect.Descriptor instead. +func (*KeyValueMutation) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{11} +} + +func (x *KeyValueMutation) GetAppend() *KeyValueAppend { + if x != nil { + return x.Append + } + return nil +} + +func (x *KeyValueMutation) GetRemove() string { + if x != nil { + return x.Remove + } + return "" +} + // Query parameter name/value pair. type QueryParameter struct { state protoimpl.MessageState @@ -1051,7 +1296,7 @@ type QueryParameter struct { func (x *QueryParameter) Reset() { *x = QueryParameter{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[9] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1064,7 +1309,7 @@ func (x *QueryParameter) String() string { func (*QueryParameter) ProtoMessage() {} func (x *QueryParameter) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[9] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1077,7 +1322,7 @@ func (x *QueryParameter) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryParameter.ProtoReflect.Descriptor instead. func (*QueryParameter) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{9} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{12} } func (x *QueryParameter) GetKey() string { @@ -1118,7 +1363,7 @@ type HeaderValue struct { func (x *HeaderValue) Reset() { *x = HeaderValue{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[10] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1131,7 +1376,7 @@ func (x *HeaderValue) String() string { func (*HeaderValue) ProtoMessage() {} func (x *HeaderValue) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[10] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1144,7 +1389,7 @@ func (x *HeaderValue) ProtoReflect() protoreflect.Message { // Deprecated: Use HeaderValue.ProtoReflect.Descriptor instead. func (*HeaderValue) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{10} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{13} } func (x *HeaderValue) GetKey() string { @@ -1188,7 +1433,7 @@ type HeaderValueOption struct { // default value (``false``) for this field. // // Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. - Append *wrappers.BoolValue `protobuf:"bytes,2,opt,name=append,proto3" json:"append,omitempty"` + Append *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=append,proto3" json:"append,omitempty"` // Describes the action taken to append/overwrite the given value for an existing header // or to only add this header if it's absent. // Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD @@ -1202,7 +1447,7 @@ type HeaderValueOption struct { func (x *HeaderValueOption) Reset() { *x = HeaderValueOption{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[11] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1215,7 +1460,7 @@ func (x *HeaderValueOption) String() string { func (*HeaderValueOption) ProtoMessage() {} func (x *HeaderValueOption) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[11] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1228,7 +1473,7 @@ func (x *HeaderValueOption) ProtoReflect() protoreflect.Message { // Deprecated: Use HeaderValueOption.ProtoReflect.Descriptor instead. func (*HeaderValueOption) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{11} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{14} } func (x *HeaderValueOption) GetHeader() *HeaderValue { @@ -1239,7 +1484,7 @@ func (x *HeaderValueOption) GetHeader() *HeaderValue { } // Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. -func (x *HeaderValueOption) GetAppend() *wrappers.BoolValue { +func (x *HeaderValueOption) GetAppend() *wrapperspb.BoolValue { if x != nil { return x.Append } @@ -1272,7 +1517,7 @@ type HeaderMap struct { func (x *HeaderMap) Reset() { *x = HeaderMap{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[12] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1285,7 +1530,7 @@ func (x *HeaderMap) String() string { func (*HeaderMap) ProtoMessage() {} func (x *HeaderMap) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[12] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1298,7 +1543,7 @@ func (x *HeaderMap) ProtoReflect() protoreflect.Message { // Deprecated: Use HeaderMap.ProtoReflect.Descriptor instead. func (*HeaderMap) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{12} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{15} } func (x *HeaderMap) GetHeaders() []*HeaderValue { @@ -1322,7 +1567,7 @@ type WatchedDirectory struct { func (x *WatchedDirectory) Reset() { *x = WatchedDirectory{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[13] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1335,7 +1580,7 @@ func (x *WatchedDirectory) String() string { func (*WatchedDirectory) ProtoMessage() {} func (x *WatchedDirectory) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[13] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1348,7 +1593,7 @@ func (x *WatchedDirectory) ProtoReflect() protoreflect.Message { // Deprecated: Use WatchedDirectory.ProtoReflect.Descriptor instead. func (*WatchedDirectory) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{13} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{16} } func (x *WatchedDirectory) GetPath() string { @@ -1359,6 +1604,7 @@ func (x *WatchedDirectory) GetPath() string { } // Data source consisting of a file, an inline value, or an environment variable. +// [#next-free-field: 6] type DataSource struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1371,12 +1617,30 @@ type DataSource struct { // *DataSource_InlineString // *DataSource_EnvironmentVariable Specifier isDataSource_Specifier `protobuf_oneof:"specifier"` + // Watched directory that is watched for file changes. If this is set explicitly, the file + // specified in the “filename“ field will be reloaded when relevant file move events occur. + // + // .. note:: + // + // This field only makes sense when the ``filename`` field is set. + // + // .. note:: + // + // Envoy only updates when the file is replaced by a file move, and not when the file is + // edited in place. + // + // .. note:: + // + // Not all use cases of ``DataSource`` support watching directories. It depends on the + // specific usage of the ``DataSource``. See the documentation of the parent message for + // details. + WatchedDirectory *WatchedDirectory `protobuf:"bytes,5,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"` } func (x *DataSource) Reset() { *x = DataSource{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[14] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1389,7 +1653,7 @@ func (x *DataSource) String() string { func (*DataSource) ProtoMessage() {} func (x *DataSource) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[14] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1402,7 +1666,7 @@ func (x *DataSource) ProtoReflect() protoreflect.Message { // Deprecated: Use DataSource.ProtoReflect.Descriptor instead. func (*DataSource) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{14} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{17} } func (m *DataSource) GetSpecifier() isDataSource_Specifier { @@ -1440,6 +1704,13 @@ func (x *DataSource) GetEnvironmentVariable() string { return "" } +func (x *DataSource) GetWatchedDirectory() *WatchedDirectory { + if x != nil { + return x.WatchedDirectory + } + return nil +} + type isDataSource_Specifier interface { isDataSource_Specifier() } @@ -1473,6 +1744,7 @@ func (*DataSource_InlineString) isDataSource_Specifier() {} func (*DataSource_EnvironmentVariable) isDataSource_Specifier() {} // The message specifies the retry policy of remote data source when fetching fails. +// [#next-free-field: 7] type RetryPolicy struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1484,13 +1756,21 @@ type RetryPolicy struct { RetryBackOff *BackoffStrategy `protobuf:"bytes,1,opt,name=retry_back_off,json=retryBackOff,proto3" json:"retry_back_off,omitempty"` // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. - NumRetries *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=num_retries,json=numRetries,proto3" json:"num_retries,omitempty"` + NumRetries *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=num_retries,json=numRetries,proto3" json:"num_retries,omitempty"` + // For details, see :ref:`retry_on `. + RetryOn string `protobuf:"bytes,3,opt,name=retry_on,json=retryOn,proto3" json:"retry_on,omitempty"` + // For details, see :ref:`retry_priority `. + RetryPriority *RetryPolicy_RetryPriority `protobuf:"bytes,4,opt,name=retry_priority,json=retryPriority,proto3" json:"retry_priority,omitempty"` + // For details, see :ref:`RetryHostPredicate `. + RetryHostPredicate []*RetryPolicy_RetryHostPredicate `protobuf:"bytes,5,rep,name=retry_host_predicate,json=retryHostPredicate,proto3" json:"retry_host_predicate,omitempty"` + // For details, see :ref:`host_selection_retry_max_attempts `. + HostSelectionRetryMaxAttempts int64 `protobuf:"varint,6,opt,name=host_selection_retry_max_attempts,json=hostSelectionRetryMaxAttempts,proto3" json:"host_selection_retry_max_attempts,omitempty"` } func (x *RetryPolicy) Reset() { *x = RetryPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[15] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1503,7 +1783,7 @@ func (x *RetryPolicy) String() string { func (*RetryPolicy) ProtoMessage() {} func (x *RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[15] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1516,7 +1796,7 @@ func (x *RetryPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead. func (*RetryPolicy) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{15} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18} } func (x *RetryPolicy) GetRetryBackOff() *BackoffStrategy { @@ -1526,13 +1806,41 @@ func (x *RetryPolicy) GetRetryBackOff() *BackoffStrategy { return nil } -func (x *RetryPolicy) GetNumRetries() *wrappers.UInt32Value { +func (x *RetryPolicy) GetNumRetries() *wrapperspb.UInt32Value { if x != nil { return x.NumRetries } return nil } +func (x *RetryPolicy) GetRetryOn() string { + if x != nil { + return x.RetryOn + } + return "" +} + +func (x *RetryPolicy) GetRetryPriority() *RetryPolicy_RetryPriority { + if x != nil { + return x.RetryPriority + } + return nil +} + +func (x *RetryPolicy) GetRetryHostPredicate() []*RetryPolicy_RetryHostPredicate { + if x != nil { + return x.RetryHostPredicate + } + return nil +} + +func (x *RetryPolicy) GetHostSelectionRetryMaxAttempts() int64 { + if x != nil { + return x.HostSelectionRetryMaxAttempts + } + return 0 +} + // The message specifies how to fetch data from remote and how to verify it. type RemoteDataSource struct { state protoimpl.MessageState @@ -1550,7 +1858,7 @@ type RemoteDataSource struct { func (x *RemoteDataSource) Reset() { *x = RemoteDataSource{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[16] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1563,7 +1871,7 @@ func (x *RemoteDataSource) String() string { func (*RemoteDataSource) ProtoMessage() {} func (x *RemoteDataSource) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[16] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1576,7 +1884,7 @@ func (x *RemoteDataSource) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoteDataSource.ProtoReflect.Descriptor instead. func (*RemoteDataSource) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{16} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{19} } func (x *RemoteDataSource) GetHttpUri() *HttpUri { @@ -1616,7 +1924,7 @@ type AsyncDataSource struct { func (x *AsyncDataSource) Reset() { *x = AsyncDataSource{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[17] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1629,7 +1937,7 @@ func (x *AsyncDataSource) String() string { func (*AsyncDataSource) ProtoMessage() {} func (x *AsyncDataSource) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[17] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1642,7 +1950,7 @@ func (x *AsyncDataSource) ProtoReflect() protoreflect.Message { // Deprecated: Use AsyncDataSource.ProtoReflect.Descriptor instead. func (*AsyncDataSource) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{17} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{20} } func (m *AsyncDataSource) GetSpecifier() isAsyncDataSource_Specifier { @@ -1708,7 +2016,7 @@ type TransportSocket struct { func (x *TransportSocket) Reset() { *x = TransportSocket{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[18] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1721,7 +2029,7 @@ func (x *TransportSocket) String() string { func (*TransportSocket) ProtoMessage() {} func (x *TransportSocket) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[18] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1734,7 +2042,7 @@ func (x *TransportSocket) ProtoReflect() protoreflect.Message { // Deprecated: Use TransportSocket.ProtoReflect.Descriptor instead. func (*TransportSocket) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{21} } func (x *TransportSocket) GetName() string { @@ -1751,7 +2059,7 @@ func (m *TransportSocket) GetConfigType() isTransportSocket_ConfigType { return nil } -func (x *TransportSocket) GetTypedConfig() *any1.Any { +func (x *TransportSocket) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*TransportSocket_TypedConfig); ok { return x.TypedConfig } @@ -1763,7 +2071,7 @@ type isTransportSocket_ConfigType interface { } type TransportSocket_TypedConfig struct { - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*TransportSocket_TypedConfig) isTransportSocket_ConfigType() {} @@ -1792,7 +2100,7 @@ type RuntimeFractionalPercent struct { func (x *RuntimeFractionalPercent) Reset() { *x = RuntimeFractionalPercent{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[19] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1805,7 +2113,7 @@ func (x *RuntimeFractionalPercent) String() string { func (*RuntimeFractionalPercent) ProtoMessage() {} func (x *RuntimeFractionalPercent) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[19] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1818,7 +2126,7 @@ func (x *RuntimeFractionalPercent) ProtoReflect() protoreflect.Message { // Deprecated: Use RuntimeFractionalPercent.ProtoReflect.Descriptor instead. func (*RuntimeFractionalPercent) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{19} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{22} } func (x *RuntimeFractionalPercent) GetDefaultValue() *v3.FractionalPercent { @@ -1850,7 +2158,7 @@ type ControlPlane struct { func (x *ControlPlane) Reset() { *x = ControlPlane{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[20] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1863,7 +2171,7 @@ func (x *ControlPlane) String() string { func (*ControlPlane) ProtoMessage() {} func (x *ControlPlane) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_base_proto_msgTypes[20] + mi := &file_envoy_config_core_v3_base_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1876,7 +2184,7 @@ func (x *ControlPlane) ProtoReflect() protoreflect.Message { // Deprecated: Use ControlPlane.ProtoReflect.Descriptor instead. func (*ControlPlane) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{20} + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{23} } func (x *ControlPlane) GetIdentifier() string { @@ -1886,6 +2194,158 @@ func (x *ControlPlane) GetIdentifier() string { return "" } +// See :ref:`RetryPriority `. +type RetryPolicy_RetryPriority struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *RetryPolicy_RetryPriority_TypedConfig + ConfigType isRetryPolicy_RetryPriority_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *RetryPolicy_RetryPriority) Reset() { + *x = RetryPolicy_RetryPriority{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryPriority) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryPriority) ProtoMessage() {} + +func (x *RetryPolicy_RetryPriority) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryPriority.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryPriority) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *RetryPolicy_RetryPriority) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RetryPolicy_RetryPriority) GetConfigType() isRetryPolicy_RetryPriority_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *RetryPolicy_RetryPriority) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*RetryPolicy_RetryPriority_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isRetryPolicy_RetryPriority_ConfigType interface { + isRetryPolicy_RetryPriority_ConfigType() +} + +type RetryPolicy_RetryPriority_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*RetryPolicy_RetryPriority_TypedConfig) isRetryPolicy_RetryPriority_ConfigType() {} + +// See :ref:`RetryHostPredicate `. +type RetryPolicy_RetryHostPredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *RetryPolicy_RetryHostPredicate_TypedConfig + ConfigType isRetryPolicy_RetryHostPredicate_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *RetryPolicy_RetryHostPredicate) Reset() { + *x = RetryPolicy_RetryHostPredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryHostPredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryHostPredicate) ProtoMessage() {} + +func (x *RetryPolicy_RetryHostPredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryHostPredicate.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryHostPredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18, 1} +} + +func (x *RetryPolicy_RetryHostPredicate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RetryPolicy_RetryHostPredicate) GetConfigType() isRetryPolicy_RetryHostPredicate_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *RetryPolicy_RetryHostPredicate) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isRetryPolicy_RetryHostPredicate_ConfigType interface { + isRetryPolicy_RetryHostPredicate_ConfigType() +} + +type RetryPolicy_RetryHostPredicate_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*RetryPolicy_RetryHostPredicate_TypedConfig) isRetryPolicy_RetryHostPredicate_ConfigType() {} + var File_envoy_config_core_v3_base_proto protoreflect.FileDescriptor var file_envoy_config_core_v3_base_proto_rawDesc = []byte{ @@ -2007,236 +2467,307 @@ var file_envoy_config_core_v3_base_proto_rawDesc = []byte{ 0x6f, 0x64, 0x65, 0x42, 0x19, 0x0a, 0x17, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x0d, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x22, 0xb1, 0x03, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x5b, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x69, 0x6f, 0x6e, 0x22, 0xcd, 0x03, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x69, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x6b, 0x0a, - 0x15, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x74, 0x79, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5a, 0x0a, 0x13, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5c, 0x0a, 0x18, 0x54, 0x79, 0x70, 0x65, 0x64, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, - 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, - 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x22, 0x77, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, - 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x0c, 0xfa, + 0x42, 0x09, 0x9a, 0x01, 0x06, 0x22, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x79, 0x0a, 0x15, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x9a, 0x01, 0x06, 0x22, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x13, 0x74, 0x79, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5a, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x5c, 0x0a, 0x18, 0x54, 0x79, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x22, 0x77, 0x0a, 0x0e, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x3b, + 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0c, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x22, 0xb6, + 0x01, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x49, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, - 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, - 0x6c, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x49, 0x0a, 0x0d, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x2b, - 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x41, 0x0a, 0x0e, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xcd, - 0x01, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0xfa, 0x42, 0x0e, - 0x72, 0x0c, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x21, 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, - 0xc0, 0x01, 0x02, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0c, 0x12, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x09, - 0x72, 0x61, 0x77, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, - 0x1d, 0xfa, 0x42, 0x08, 0x7a, 0x06, 0x10, 0x00, 0x18, 0x80, 0x80, 0x01, 0xf2, 0x98, 0xfe, 0x8f, - 0x05, 0x0c, 0x12, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x52, 0x08, - 0x72, 0x61, 0x77, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, - 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd9, - 0x03, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, + 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x3f, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xae, 0x02, 0x0a, 0x0e, 0x4b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x3e, 0x0a, 0x05, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x5b, 0x0a, 0x06, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, + 0x64, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7f, 0x0a, 0x14, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1b, 0x0a, 0x17, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, + 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, + 0x0d, 0x41, 0x44, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x41, 0x42, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x01, + 0x12, 0x1e, 0x0a, 0x1a, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, + 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x02, + 0x12, 0x17, 0x0a, 0x13, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, + 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x03, 0x22, 0x73, 0x0a, 0x10, 0x4b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, + 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, + 0x65, 0x6e, 0x64, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x21, 0x0a, 0x06, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xfa, 0x42, 0x06, + 0x72, 0x04, 0x28, 0x80, 0x80, 0x01, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x41, + 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x12, 0x19, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xcd, 0x01, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, + 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, + 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x21, 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, 0x80, 0x80, 0x01, + 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0c, 0x12, 0x0a, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x3a, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x42, 0x1d, 0xfa, 0x42, 0x08, 0x7a, 0x06, 0x10, 0x00, 0x18, 0x80, 0x80, 0x01, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x0c, 0x12, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x24, 0x9a, 0xc5, 0x88, + 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xd9, 0x03, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x06, + 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x69, 0x0a, + 0x0d, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, - 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x06, 0x61, 0x70, 0x70, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, - 0x18, 0x01, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x69, 0x0a, 0x0d, 0x61, 0x70, - 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x65, 0x6d, - 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x7d, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x5f, - 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, - 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x44, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x41, 0x42, 0x53, - 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, - 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, - 0x41, 0x44, 0x44, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, - 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x03, 0x3a, 0x2a, - 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6c, 0x0a, 0x09, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x12, 0x3b, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x22, 0x2f, 0x0a, 0x10, 0x57, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0xf4, 0x01, 0x0a, 0x0a, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x65, + 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x65, 0x70, + 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x7d, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x70, 0x70, 0x65, + 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x41, 0x50, 0x50, 0x45, + 0x4e, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, + 0x41, 0x44, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x44, 0x44, 0x5f, 0x49, 0x46, 0x5f, + 0x41, 0x42, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x4f, 0x56, 0x45, 0x52, + 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, + 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x4f, 0x56, 0x45, 0x52, + 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, + 0x03, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6c, 0x0a, + 0x09, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x12, 0x3b, 0x0a, 0x07, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x22, 0x2f, 0x0a, 0x10, 0x57, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0xc9, 0x02, 0x0a, + 0x0a, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69, + 0x6e, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x3c, + 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x13, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x53, 0x0a, 0x11, + 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, + 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xee, 0x05, 0x0a, 0x0b, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x53, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, + 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, + 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x4f, 0x6e, 0x12, 0x56, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x66, 0x0a, 0x14, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x52, 0x12, 0x72, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x21, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, + 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x1d, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x1a, 0x76, + 0x0a, 0x0d, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x7b, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, + 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x23, 0x0a, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x69, - 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x3c, 0x0a, 0x14, 0x65, - 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, - 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x10, 0x01, 0x48, 0x00, 0x52, 0x13, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, - 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, - 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xe8, 0x01, 0x0a, 0x10, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x42, + 0x0a, 0x08, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x68, 0x74, 0x74, 0x70, 0x55, + 0x72, 0x69, 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x0f, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x12, 0x40, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, - 0x22, 0xd4, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, - 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, - 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x52, 0x0a, - 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x42, 0x13, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, - 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, - 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xe8, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x42, 0x0a, 0x08, - 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x68, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, - 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, - 0x36, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x0f, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, - 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x12, 0x40, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x73, 0x79, - 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a, 0x09, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xb0, - 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, - 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, - 0x63, 0x6b, 0x65, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x22, 0xbf, 0x01, 0x0a, 0x18, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x4f, - 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, - 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, - 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, - 0x65, 0x6e, 0x74, 0x22, 0x55, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, - 0x61, 0x6e, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x2a, 0x28, 0x0a, 0x0f, 0x52, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, - 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x49, - 0x47, 0x48, 0x10, 0x01, 0x2a, 0x89, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, - 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x44, 0x10, - 0x02, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x50, - 0x55, 0x54, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x05, - 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, - 0x07, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, - 0x41, 0x43, 0x45, 0x10, 0x08, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x41, 0x54, 0x43, 0x48, 0x10, 0x09, - 0x2a, 0x3e, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x42, 0x4f, 0x55, 0x4e, 0x44, - 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, - 0x42, 0x7d, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x09, - 0x42, 0x61, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x22, 0xb0, 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, + 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, + 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x22, 0xbf, 0x01, 0x0a, 0x18, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, + 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x12, 0x4f, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, + 0x65, 0x79, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0x55, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x2a, 0x28, 0x0a, 0x0f, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, + 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, + 0x48, 0x49, 0x47, 0x48, 0x10, 0x01, 0x2a, 0x89, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, + 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, + 0x44, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, + 0x03, 0x50, 0x55, 0x54, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, + 0x0b, 0x0a, 0x07, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, + 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x41, 0x54, 0x43, 0x48, + 0x10, 0x09, 0x2a, 0x3e, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x42, 0x4f, 0x55, + 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, + 0x10, 0x02, 0x42, 0x7d, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x09, 0x42, 0x61, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2251,83 +2782,97 @@ func file_envoy_config_core_v3_base_proto_rawDescGZIP() []byte { return file_envoy_config_core_v3_base_proto_rawDescData } -var file_envoy_config_core_v3_base_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_envoy_config_core_v3_base_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_envoy_config_core_v3_base_proto_enumTypes = make([]protoimpl.EnumInfo, 5) +var file_envoy_config_core_v3_base_proto_msgTypes = make([]protoimpl.MessageInfo, 29) var file_envoy_config_core_v3_base_proto_goTypes = []interface{}{ (RoutingPriority)(0), // 0: envoy.config.core.v3.RoutingPriority (RequestMethod)(0), // 1: envoy.config.core.v3.RequestMethod (TrafficDirection)(0), // 2: envoy.config.core.v3.TrafficDirection - (HeaderValueOption_HeaderAppendAction)(0), // 3: envoy.config.core.v3.HeaderValueOption.HeaderAppendAction - (*Locality)(nil), // 4: envoy.config.core.v3.Locality - (*BuildVersion)(nil), // 5: envoy.config.core.v3.BuildVersion - (*Extension)(nil), // 6: envoy.config.core.v3.Extension - (*Node)(nil), // 7: envoy.config.core.v3.Node - (*Metadata)(nil), // 8: envoy.config.core.v3.Metadata - (*RuntimeUInt32)(nil), // 9: envoy.config.core.v3.RuntimeUInt32 - (*RuntimePercent)(nil), // 10: envoy.config.core.v3.RuntimePercent - (*RuntimeDouble)(nil), // 11: envoy.config.core.v3.RuntimeDouble - (*RuntimeFeatureFlag)(nil), // 12: envoy.config.core.v3.RuntimeFeatureFlag - (*QueryParameter)(nil), // 13: envoy.config.core.v3.QueryParameter - (*HeaderValue)(nil), // 14: envoy.config.core.v3.HeaderValue - (*HeaderValueOption)(nil), // 15: envoy.config.core.v3.HeaderValueOption - (*HeaderMap)(nil), // 16: envoy.config.core.v3.HeaderMap - (*WatchedDirectory)(nil), // 17: envoy.config.core.v3.WatchedDirectory - (*DataSource)(nil), // 18: envoy.config.core.v3.DataSource - (*RetryPolicy)(nil), // 19: envoy.config.core.v3.RetryPolicy - (*RemoteDataSource)(nil), // 20: envoy.config.core.v3.RemoteDataSource - (*AsyncDataSource)(nil), // 21: envoy.config.core.v3.AsyncDataSource - (*TransportSocket)(nil), // 22: envoy.config.core.v3.TransportSocket - (*RuntimeFractionalPercent)(nil), // 23: envoy.config.core.v3.RuntimeFractionalPercent - (*ControlPlane)(nil), // 24: envoy.config.core.v3.ControlPlane - nil, // 25: envoy.config.core.v3.Node.DynamicParametersEntry - nil, // 26: envoy.config.core.v3.Metadata.FilterMetadataEntry - nil, // 27: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry - (*v3.SemanticVersion)(nil), // 28: envoy.type.v3.SemanticVersion - (*_struct.Struct)(nil), // 29: google.protobuf.Struct - (*Address)(nil), // 30: envoy.config.core.v3.Address - (*v3.Percent)(nil), // 31: envoy.type.v3.Percent - (*wrappers.BoolValue)(nil), // 32: google.protobuf.BoolValue - (*BackoffStrategy)(nil), // 33: envoy.config.core.v3.BackoffStrategy - (*wrappers.UInt32Value)(nil), // 34: google.protobuf.UInt32Value - (*HttpUri)(nil), // 35: envoy.config.core.v3.HttpUri - (*any1.Any)(nil), // 36: google.protobuf.Any - (*v3.FractionalPercent)(nil), // 37: envoy.type.v3.FractionalPercent - (*v31.ContextParams)(nil), // 38: xds.core.v3.ContextParams + (KeyValueAppend_KeyValueAppendAction)(0), // 3: envoy.config.core.v3.KeyValueAppend.KeyValueAppendAction + (HeaderValueOption_HeaderAppendAction)(0), // 4: envoy.config.core.v3.HeaderValueOption.HeaderAppendAction + (*Locality)(nil), // 5: envoy.config.core.v3.Locality + (*BuildVersion)(nil), // 6: envoy.config.core.v3.BuildVersion + (*Extension)(nil), // 7: envoy.config.core.v3.Extension + (*Node)(nil), // 8: envoy.config.core.v3.Node + (*Metadata)(nil), // 9: envoy.config.core.v3.Metadata + (*RuntimeUInt32)(nil), // 10: envoy.config.core.v3.RuntimeUInt32 + (*RuntimePercent)(nil), // 11: envoy.config.core.v3.RuntimePercent + (*RuntimeDouble)(nil), // 12: envoy.config.core.v3.RuntimeDouble + (*RuntimeFeatureFlag)(nil), // 13: envoy.config.core.v3.RuntimeFeatureFlag + (*KeyValue)(nil), // 14: envoy.config.core.v3.KeyValue + (*KeyValueAppend)(nil), // 15: envoy.config.core.v3.KeyValueAppend + (*KeyValueMutation)(nil), // 16: envoy.config.core.v3.KeyValueMutation + (*QueryParameter)(nil), // 17: envoy.config.core.v3.QueryParameter + (*HeaderValue)(nil), // 18: envoy.config.core.v3.HeaderValue + (*HeaderValueOption)(nil), // 19: envoy.config.core.v3.HeaderValueOption + (*HeaderMap)(nil), // 20: envoy.config.core.v3.HeaderMap + (*WatchedDirectory)(nil), // 21: envoy.config.core.v3.WatchedDirectory + (*DataSource)(nil), // 22: envoy.config.core.v3.DataSource + (*RetryPolicy)(nil), // 23: envoy.config.core.v3.RetryPolicy + (*RemoteDataSource)(nil), // 24: envoy.config.core.v3.RemoteDataSource + (*AsyncDataSource)(nil), // 25: envoy.config.core.v3.AsyncDataSource + (*TransportSocket)(nil), // 26: envoy.config.core.v3.TransportSocket + (*RuntimeFractionalPercent)(nil), // 27: envoy.config.core.v3.RuntimeFractionalPercent + (*ControlPlane)(nil), // 28: envoy.config.core.v3.ControlPlane + nil, // 29: envoy.config.core.v3.Node.DynamicParametersEntry + nil, // 30: envoy.config.core.v3.Metadata.FilterMetadataEntry + nil, // 31: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry + (*RetryPolicy_RetryPriority)(nil), // 32: envoy.config.core.v3.RetryPolicy.RetryPriority + (*RetryPolicy_RetryHostPredicate)(nil), // 33: envoy.config.core.v3.RetryPolicy.RetryHostPredicate + (*v3.SemanticVersion)(nil), // 34: envoy.type.v3.SemanticVersion + (*structpb.Struct)(nil), // 35: google.protobuf.Struct + (*Address)(nil), // 36: envoy.config.core.v3.Address + (*v3.Percent)(nil), // 37: envoy.type.v3.Percent + (*wrapperspb.BoolValue)(nil), // 38: google.protobuf.BoolValue + (*BackoffStrategy)(nil), // 39: envoy.config.core.v3.BackoffStrategy + (*wrapperspb.UInt32Value)(nil), // 40: google.protobuf.UInt32Value + (*HttpUri)(nil), // 41: envoy.config.core.v3.HttpUri + (*anypb.Any)(nil), // 42: google.protobuf.Any + (*v3.FractionalPercent)(nil), // 43: envoy.type.v3.FractionalPercent + (*v31.ContextParams)(nil), // 44: xds.core.v3.ContextParams } var file_envoy_config_core_v3_base_proto_depIdxs = []int32{ - 28, // 0: envoy.config.core.v3.BuildVersion.version:type_name -> envoy.type.v3.SemanticVersion - 29, // 1: envoy.config.core.v3.BuildVersion.metadata:type_name -> google.protobuf.Struct - 5, // 2: envoy.config.core.v3.Extension.version:type_name -> envoy.config.core.v3.BuildVersion - 29, // 3: envoy.config.core.v3.Node.metadata:type_name -> google.protobuf.Struct - 25, // 4: envoy.config.core.v3.Node.dynamic_parameters:type_name -> envoy.config.core.v3.Node.DynamicParametersEntry - 4, // 5: envoy.config.core.v3.Node.locality:type_name -> envoy.config.core.v3.Locality - 5, // 6: envoy.config.core.v3.Node.user_agent_build_version:type_name -> envoy.config.core.v3.BuildVersion - 6, // 7: envoy.config.core.v3.Node.extensions:type_name -> envoy.config.core.v3.Extension - 30, // 8: envoy.config.core.v3.Node.listening_addresses:type_name -> envoy.config.core.v3.Address - 26, // 9: envoy.config.core.v3.Metadata.filter_metadata:type_name -> envoy.config.core.v3.Metadata.FilterMetadataEntry - 27, // 10: envoy.config.core.v3.Metadata.typed_filter_metadata:type_name -> envoy.config.core.v3.Metadata.TypedFilterMetadataEntry - 31, // 11: envoy.config.core.v3.RuntimePercent.default_value:type_name -> envoy.type.v3.Percent - 32, // 12: envoy.config.core.v3.RuntimeFeatureFlag.default_value:type_name -> google.protobuf.BoolValue - 14, // 13: envoy.config.core.v3.HeaderValueOption.header:type_name -> envoy.config.core.v3.HeaderValue - 32, // 14: envoy.config.core.v3.HeaderValueOption.append:type_name -> google.protobuf.BoolValue - 3, // 15: envoy.config.core.v3.HeaderValueOption.append_action:type_name -> envoy.config.core.v3.HeaderValueOption.HeaderAppendAction - 14, // 16: envoy.config.core.v3.HeaderMap.headers:type_name -> envoy.config.core.v3.HeaderValue - 33, // 17: envoy.config.core.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.core.v3.BackoffStrategy - 34, // 18: envoy.config.core.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value - 35, // 19: envoy.config.core.v3.RemoteDataSource.http_uri:type_name -> envoy.config.core.v3.HttpUri - 19, // 20: envoy.config.core.v3.RemoteDataSource.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy - 18, // 21: envoy.config.core.v3.AsyncDataSource.local:type_name -> envoy.config.core.v3.DataSource - 20, // 22: envoy.config.core.v3.AsyncDataSource.remote:type_name -> envoy.config.core.v3.RemoteDataSource - 36, // 23: envoy.config.core.v3.TransportSocket.typed_config:type_name -> google.protobuf.Any - 37, // 24: envoy.config.core.v3.RuntimeFractionalPercent.default_value:type_name -> envoy.type.v3.FractionalPercent - 38, // 25: envoy.config.core.v3.Node.DynamicParametersEntry.value:type_name -> xds.core.v3.ContextParams - 29, // 26: envoy.config.core.v3.Metadata.FilterMetadataEntry.value:type_name -> google.protobuf.Struct - 36, // 27: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry.value:type_name -> google.protobuf.Any - 28, // [28:28] is the sub-list for method output_type - 28, // [28:28] is the sub-list for method input_type - 28, // [28:28] is the sub-list for extension type_name - 28, // [28:28] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 34, // 0: envoy.config.core.v3.BuildVersion.version:type_name -> envoy.type.v3.SemanticVersion + 35, // 1: envoy.config.core.v3.BuildVersion.metadata:type_name -> google.protobuf.Struct + 6, // 2: envoy.config.core.v3.Extension.version:type_name -> envoy.config.core.v3.BuildVersion + 35, // 3: envoy.config.core.v3.Node.metadata:type_name -> google.protobuf.Struct + 29, // 4: envoy.config.core.v3.Node.dynamic_parameters:type_name -> envoy.config.core.v3.Node.DynamicParametersEntry + 5, // 5: envoy.config.core.v3.Node.locality:type_name -> envoy.config.core.v3.Locality + 6, // 6: envoy.config.core.v3.Node.user_agent_build_version:type_name -> envoy.config.core.v3.BuildVersion + 7, // 7: envoy.config.core.v3.Node.extensions:type_name -> envoy.config.core.v3.Extension + 36, // 8: envoy.config.core.v3.Node.listening_addresses:type_name -> envoy.config.core.v3.Address + 30, // 9: envoy.config.core.v3.Metadata.filter_metadata:type_name -> envoy.config.core.v3.Metadata.FilterMetadataEntry + 31, // 10: envoy.config.core.v3.Metadata.typed_filter_metadata:type_name -> envoy.config.core.v3.Metadata.TypedFilterMetadataEntry + 37, // 11: envoy.config.core.v3.RuntimePercent.default_value:type_name -> envoy.type.v3.Percent + 38, // 12: envoy.config.core.v3.RuntimeFeatureFlag.default_value:type_name -> google.protobuf.BoolValue + 14, // 13: envoy.config.core.v3.KeyValueAppend.entry:type_name -> envoy.config.core.v3.KeyValue + 3, // 14: envoy.config.core.v3.KeyValueAppend.action:type_name -> envoy.config.core.v3.KeyValueAppend.KeyValueAppendAction + 15, // 15: envoy.config.core.v3.KeyValueMutation.append:type_name -> envoy.config.core.v3.KeyValueAppend + 18, // 16: envoy.config.core.v3.HeaderValueOption.header:type_name -> envoy.config.core.v3.HeaderValue + 38, // 17: envoy.config.core.v3.HeaderValueOption.append:type_name -> google.protobuf.BoolValue + 4, // 18: envoy.config.core.v3.HeaderValueOption.append_action:type_name -> envoy.config.core.v3.HeaderValueOption.HeaderAppendAction + 18, // 19: envoy.config.core.v3.HeaderMap.headers:type_name -> envoy.config.core.v3.HeaderValue + 21, // 20: envoy.config.core.v3.DataSource.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory + 39, // 21: envoy.config.core.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.core.v3.BackoffStrategy + 40, // 22: envoy.config.core.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value + 32, // 23: envoy.config.core.v3.RetryPolicy.retry_priority:type_name -> envoy.config.core.v3.RetryPolicy.RetryPriority + 33, // 24: envoy.config.core.v3.RetryPolicy.retry_host_predicate:type_name -> envoy.config.core.v3.RetryPolicy.RetryHostPredicate + 41, // 25: envoy.config.core.v3.RemoteDataSource.http_uri:type_name -> envoy.config.core.v3.HttpUri + 23, // 26: envoy.config.core.v3.RemoteDataSource.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy + 22, // 27: envoy.config.core.v3.AsyncDataSource.local:type_name -> envoy.config.core.v3.DataSource + 24, // 28: envoy.config.core.v3.AsyncDataSource.remote:type_name -> envoy.config.core.v3.RemoteDataSource + 42, // 29: envoy.config.core.v3.TransportSocket.typed_config:type_name -> google.protobuf.Any + 43, // 30: envoy.config.core.v3.RuntimeFractionalPercent.default_value:type_name -> envoy.type.v3.FractionalPercent + 44, // 31: envoy.config.core.v3.Node.DynamicParametersEntry.value:type_name -> xds.core.v3.ContextParams + 35, // 32: envoy.config.core.v3.Metadata.FilterMetadataEntry.value:type_name -> google.protobuf.Struct + 42, // 33: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry.value:type_name -> google.protobuf.Any + 42, // 34: envoy.config.core.v3.RetryPolicy.RetryPriority.typed_config:type_name -> google.protobuf.Any + 42, // 35: envoy.config.core.v3.RetryPolicy.RetryHostPredicate.typed_config:type_name -> google.protobuf.Any + 36, // [36:36] is the sub-list for method output_type + 36, // [36:36] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_base_proto_init() } @@ -2448,7 +2993,7 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryParameter); i { + switch v := v.(*KeyValue); i { case 0: return &v.state case 1: @@ -2460,7 +3005,7 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderValue); i { + switch v := v.(*KeyValueAppend); i { case 0: return &v.state case 1: @@ -2472,7 +3017,7 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderValueOption); i { + switch v := v.(*KeyValueMutation); i { case 0: return &v.state case 1: @@ -2484,7 +3029,7 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderMap); i { + switch v := v.(*QueryParameter); i { case 0: return &v.state case 1: @@ -2496,7 +3041,7 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WatchedDirectory); i { + switch v := v.(*HeaderValue); i { case 0: return &v.state case 1: @@ -2508,7 +3053,7 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataSource); i { + switch v := v.(*HeaderValueOption); i { case 0: return &v.state case 1: @@ -2520,7 +3065,7 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetryPolicy); i { + switch v := v.(*HeaderMap); i { case 0: return &v.state case 1: @@ -2532,7 +3077,7 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoteDataSource); i { + switch v := v.(*WatchedDirectory); i { case 0: return &v.state case 1: @@ -2544,7 +3089,7 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AsyncDataSource); i { + switch v := v.(*DataSource); i { case 0: return &v.state case 1: @@ -2556,7 +3101,7 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransportSocket); i { + switch v := v.(*RetryPolicy); i { case 0: return &v.state case 1: @@ -2568,7 +3113,7 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RuntimeFractionalPercent); i { + switch v := v.(*RemoteDataSource); i { case 0: return &v.state case 1: @@ -2580,6 +3125,42 @@ func file_envoy_config_core_v3_base_proto_init() { } } file_envoy_config_core_v3_base_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AsyncDataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransportSocket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeFractionalPercent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ControlPlane); i { case 0: return &v.state @@ -2591,31 +3172,61 @@ func file_envoy_config_core_v3_base_proto_init() { return nil } } + file_envoy_config_core_v3_base_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryPriority); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryHostPredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_envoy_config_core_v3_base_proto_msgTypes[3].OneofWrappers = []interface{}{ (*Node_UserAgentVersion)(nil), (*Node_UserAgentBuildVersion)(nil), } - file_envoy_config_core_v3_base_proto_msgTypes[14].OneofWrappers = []interface{}{ + file_envoy_config_core_v3_base_proto_msgTypes[17].OneofWrappers = []interface{}{ (*DataSource_Filename)(nil), (*DataSource_InlineBytes)(nil), (*DataSource_InlineString)(nil), (*DataSource_EnvironmentVariable)(nil), } - file_envoy_config_core_v3_base_proto_msgTypes[17].OneofWrappers = []interface{}{ + file_envoy_config_core_v3_base_proto_msgTypes[20].OneofWrappers = []interface{}{ (*AsyncDataSource_Local)(nil), (*AsyncDataSource_Remote)(nil), } - file_envoy_config_core_v3_base_proto_msgTypes[18].OneofWrappers = []interface{}{ + file_envoy_config_core_v3_base_proto_msgTypes[21].OneofWrappers = []interface{}{ (*TransportSocket_TypedConfig)(nil), } + file_envoy_config_core_v3_base_proto_msgTypes[27].OneofWrappers = []interface{}{ + (*RetryPolicy_RetryPriority_TypedConfig)(nil), + } + file_envoy_config_core_v3_base_proto_msgTypes[28].OneofWrappers = []interface{}{ + (*RetryPolicy_RetryHostPredicate_TypedConfig)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_core_v3_base_proto_rawDesc, - NumEnums: 4, - NumMessages: 24, + NumEnums: 5, + NumMessages: 29, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go index 7e2dd5390e4..09d836dac41 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/base.proto @@ -801,7 +802,16 @@ func (m *Metadata) validate(all bool) error { val := m.GetFilterMetadata()[key] _ = val - // no validation rules for FilterMetadata[key] + if utf8.RuneCountInString(key) < 1 { + err := MetadataValidationError{ + field: fmt.Sprintf("FilterMetadata[%v]", key), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(val).(type) { @@ -847,7 +857,16 @@ func (m *Metadata) validate(all bool) error { val := m.GetTypedFilterMetadata()[key] _ = val - // no validation rules for TypedFilterMetadata[key] + if utf8.RuneCountInString(key) < 1 { + err := MetadataValidationError{ + field: fmt.Sprintf("TypedFilterMetadata[%v]", key), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(val).(type) { @@ -1477,22 +1496,22 @@ var _ interface { ErrorName() string } = RuntimeFeatureFlagValidationError{} -// Validate checks the field values on QueryParameter with the rules defined in -// the proto definition for this message. If any rules are violated, the first +// Validate checks the field values on KeyValue with the rules defined in the +// proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. -func (m *QueryParameter) Validate() error { +func (m *KeyValue) Validate() error { return m.validate(false) } -// ValidateAll checks the field values on QueryParameter with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in QueryParameterMultiError, -// or nil if none found. -func (m *QueryParameter) ValidateAll() error { +// ValidateAll checks the field values on KeyValue with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in KeyValueMultiError, or nil +// if none found. +func (m *KeyValue) ValidateAll() error { return m.validate(true) } -func (m *QueryParameter) validate(all bool) error { +func (m *KeyValue) validate(all bool) error { if m == nil { return nil } @@ -1500,7 +1519,7 @@ func (m *QueryParameter) validate(all bool) error { var errors []error if utf8.RuneCountInString(m.GetKey()) < 1 { - err := QueryParameterValidationError{ + err := KeyValueValidationError{ field: "Key", reason: "value length must be at least 1 runes", } @@ -1510,22 +1529,32 @@ func (m *QueryParameter) validate(all bool) error { errors = append(errors, err) } + if len(m.GetKey()) > 16384 { + err := KeyValueValidationError{ + field: "Key", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Value if len(errors) > 0 { - return QueryParameterMultiError(errors) + return KeyValueMultiError(errors) } return nil } -// QueryParameterMultiError is an error wrapping multiple validation errors -// returned by QueryParameter.ValidateAll() if the designated constraints -// aren't met. -type QueryParameterMultiError []error +// KeyValueMultiError is an error wrapping multiple validation errors returned +// by KeyValue.ValidateAll() if the designated constraints aren't met. +type KeyValueMultiError []error // Error returns a concatenation of all the error messages it wraps. -func (m QueryParameterMultiError) Error() string { +func (m KeyValueMultiError) Error() string { var msgs []string for _, err := range m { msgs = append(msgs, err.Error()) @@ -1534,11 +1563,11 @@ func (m QueryParameterMultiError) Error() string { } // AllErrors returns a list of validation violation errors. -func (m QueryParameterMultiError) AllErrors() []error { return m } +func (m KeyValueMultiError) AllErrors() []error { return m } -// QueryParameterValidationError is the validation error returned by -// QueryParameter.Validate if the designated constraints aren't met. -type QueryParameterValidationError struct { +// KeyValueValidationError is the validation error returned by +// KeyValue.Validate if the designated constraints aren't met. +type KeyValueValidationError struct { field string reason string cause error @@ -1546,22 +1575,22 @@ type QueryParameterValidationError struct { } // Field function returns field value. -func (e QueryParameterValidationError) Field() string { return e.field } +func (e KeyValueValidationError) Field() string { return e.field } // Reason function returns reason value. -func (e QueryParameterValidationError) Reason() string { return e.reason } +func (e KeyValueValidationError) Reason() string { return e.reason } // Cause function returns cause value. -func (e QueryParameterValidationError) Cause() error { return e.cause } +func (e KeyValueValidationError) Cause() error { return e.cause } // Key function returns key value. -func (e QueryParameterValidationError) Key() bool { return e.key } +func (e KeyValueValidationError) Key() bool { return e.key } // ErrorName returns error name. -func (e QueryParameterValidationError) ErrorName() string { return "QueryParameterValidationError" } +func (e KeyValueValidationError) ErrorName() string { return "KeyValueValidationError" } // Error satisfies the builtin error interface -func (e QueryParameterValidationError) Error() string { +func (e KeyValueValidationError) Error() string { cause := "" if e.cause != nil { cause = fmt.Sprintf(" | caused by: %v", e.cause) @@ -1573,14 +1602,14 @@ func (e QueryParameterValidationError) Error() string { } return fmt.Sprintf( - "invalid %sQueryParameter.%s: %s%s", + "invalid %sKeyValue.%s: %s%s", key, e.field, e.reason, cause) } -var _ error = QueryParameterValidationError{} +var _ error = KeyValueValidationError{} var _ interface { Field() string @@ -1588,67 +1617,34 @@ var _ interface { Key() bool Cause() error ErrorName() string -} = QueryParameterValidationError{} +} = KeyValueValidationError{} -// Validate checks the field values on HeaderValue with the rules defined in +// Validate checks the field values on KeyValueAppend with the rules defined in // the proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. -func (m *HeaderValue) Validate() error { +func (m *KeyValueAppend) Validate() error { return m.validate(false) } -// ValidateAll checks the field values on HeaderValue with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in HeaderValueMultiError, or -// nil if none found. -func (m *HeaderValue) ValidateAll() error { +// ValidateAll checks the field values on KeyValueAppend with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in KeyValueAppendMultiError, +// or nil if none found. +func (m *KeyValueAppend) ValidateAll() error { return m.validate(true) } -func (m *HeaderValue) validate(all bool) error { +func (m *KeyValueAppend) validate(all bool) error { if m == nil { return nil } var errors []error - if utf8.RuneCountInString(m.GetKey()) < 1 { - err := HeaderValueValidationError{ - field: "Key", - reason: "value length must be at least 1 runes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(m.GetKey()) > 16384 { - err := HeaderValueValidationError{ - field: "Key", - reason: "value length must be at most 16384 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if !_HeaderValue_Key_Pattern.MatchString(m.GetKey()) { - err := HeaderValueValidationError{ - field: "Key", - reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(m.GetValue()) > 16384 { - err := HeaderValueValidationError{ - field: "Value", - reason: "value length must be at most 16384 bytes", + if m.GetEntry() == nil { + err := KeyValueAppendValidationError{ + field: "Entry", + reason: "value is required", } if !all { return err @@ -1656,21 +1652,39 @@ func (m *HeaderValue) validate(all bool) error { errors = append(errors, err) } - if !_HeaderValue_Value_Pattern.MatchString(m.GetValue()) { - err := HeaderValueValidationError{ - field: "Value", - reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + if all { + switch v := interface{}(m.GetEntry()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, KeyValueAppendValidationError{ + field: "Entry", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, KeyValueAppendValidationError{ + field: "Entry", + reason: "embedded message failed validation", + cause: err, + }) + } } - if !all { - return err + } else if v, ok := interface{}(m.GetEntry()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return KeyValueAppendValidationError{ + field: "Entry", + reason: "embedded message failed validation", + cause: err, + } } - errors = append(errors, err) } - if l := len(m.GetRawValue()); l < 0 || l > 16384 { - err := HeaderValueValidationError{ - field: "RawValue", - reason: "value length must be between 0 and 16384 bytes, inclusive", + if _, ok := KeyValueAppend_KeyValueAppendAction_name[int32(m.GetAction())]; !ok { + err := KeyValueAppendValidationError{ + field: "Action", + reason: "value must be one of the defined enum values", } if !all { return err @@ -1679,18 +1693,19 @@ func (m *HeaderValue) validate(all bool) error { } if len(errors) > 0 { - return HeaderValueMultiError(errors) + return KeyValueAppendMultiError(errors) } return nil } -// HeaderValueMultiError is an error wrapping multiple validation errors -// returned by HeaderValue.ValidateAll() if the designated constraints aren't met. -type HeaderValueMultiError []error +// KeyValueAppendMultiError is an error wrapping multiple validation errors +// returned by KeyValueAppend.ValidateAll() if the designated constraints +// aren't met. +type KeyValueAppendMultiError []error // Error returns a concatenation of all the error messages it wraps. -func (m HeaderValueMultiError) Error() string { +func (m KeyValueAppendMultiError) Error() string { var msgs []string for _, err := range m { msgs = append(msgs, err.Error()) @@ -1699,11 +1714,11 @@ func (m HeaderValueMultiError) Error() string { } // AllErrors returns a list of validation violation errors. -func (m HeaderValueMultiError) AllErrors() []error { return m } +func (m KeyValueAppendMultiError) AllErrors() []error { return m } -// HeaderValueValidationError is the validation error returned by -// HeaderValue.Validate if the designated constraints aren't met. -type HeaderValueValidationError struct { +// KeyValueAppendValidationError is the validation error returned by +// KeyValueAppend.Validate if the designated constraints aren't met. +type KeyValueAppendValidationError struct { field string reason string cause error @@ -1711,22 +1726,22 @@ type HeaderValueValidationError struct { } // Field function returns field value. -func (e HeaderValueValidationError) Field() string { return e.field } +func (e KeyValueAppendValidationError) Field() string { return e.field } // Reason function returns reason value. -func (e HeaderValueValidationError) Reason() string { return e.reason } +func (e KeyValueAppendValidationError) Reason() string { return e.reason } // Cause function returns cause value. -func (e HeaderValueValidationError) Cause() error { return e.cause } +func (e KeyValueAppendValidationError) Cause() error { return e.cause } // Key function returns key value. -func (e HeaderValueValidationError) Key() bool { return e.key } +func (e KeyValueAppendValidationError) Key() bool { return e.key } // ErrorName returns error name. -func (e HeaderValueValidationError) ErrorName() string { return "HeaderValueValidationError" } +func (e KeyValueAppendValidationError) ErrorName() string { return "KeyValueAppendValidationError" } // Error satisfies the builtin error interface -func (e HeaderValueValidationError) Error() string { +func (e KeyValueAppendValidationError) Error() string { cause := "" if e.cause != nil { cause = fmt.Sprintf(" | caused by: %v", e.cause) @@ -1738,14 +1753,14 @@ func (e HeaderValueValidationError) Error() string { } return fmt.Sprintf( - "invalid %sHeaderValue.%s: %s%s", + "invalid %sKeyValueAppend.%s: %s%s", key, e.field, e.reason, cause) } -var _ error = HeaderValueValidationError{} +var _ error = KeyValueAppendValidationError{} var _ interface { Field() string @@ -1753,79 +1768,35 @@ var _ interface { Key() bool Cause() error ErrorName() string -} = HeaderValueValidationError{} +} = KeyValueAppendValidationError{} -var _HeaderValue_Key_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") - -var _HeaderValue_Value_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") - -// Validate checks the field values on HeaderValueOption with the rules defined +// Validate checks the field values on KeyValueMutation with the rules defined // in the proto definition for this message. If any rules are violated, the // first error encountered is returned, or nil if there are no violations. -func (m *HeaderValueOption) Validate() error { +func (m *KeyValueMutation) Validate() error { return m.validate(false) } -// ValidateAll checks the field values on HeaderValueOption with the rules +// ValidateAll checks the field values on KeyValueMutation with the rules // defined in the proto definition for this message. If any rules are // violated, the result is a list of violation errors wrapped in -// HeaderValueOptionMultiError, or nil if none found. -func (m *HeaderValueOption) ValidateAll() error { +// KeyValueMutationMultiError, or nil if none found. +func (m *KeyValueMutation) ValidateAll() error { return m.validate(true) } -func (m *HeaderValueOption) validate(all bool) error { +func (m *KeyValueMutation) validate(all bool) error { if m == nil { return nil } var errors []error - if m.GetHeader() == nil { - err := HeaderValueOptionValidationError{ - field: "Header", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetHeader()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HeaderValueOptionValidationError{ - field: "Header", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HeaderValueOptionValidationError{ - field: "Header", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HeaderValueOptionValidationError{ - field: "Header", - reason: "embedded message failed validation", - cause: err, - } - } - } - if all { switch v := interface{}(m.GetAppend()).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { - errors = append(errors, HeaderValueOptionValidationError{ + errors = append(errors, KeyValueMutationValidationError{ field: "Append", reason: "embedded message failed validation", cause: err, @@ -1833,7 +1804,7 @@ func (m *HeaderValueOption) validate(all bool) error { } case interface{ Validate() error }: if err := v.Validate(); err != nil { - errors = append(errors, HeaderValueOptionValidationError{ + errors = append(errors, KeyValueMutationValidationError{ field: "Append", reason: "embedded message failed validation", cause: err, @@ -1842,7 +1813,7 @@ func (m *HeaderValueOption) validate(all bool) error { } } else if v, ok := interface{}(m.GetAppend()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { - return HeaderValueOptionValidationError{ + return KeyValueMutationValidationError{ field: "Append", reason: "embedded message failed validation", cause: err, @@ -1850,10 +1821,10 @@ func (m *HeaderValueOption) validate(all bool) error { } } - if _, ok := HeaderValueOption_HeaderAppendAction_name[int32(m.GetAppendAction())]; !ok { - err := HeaderValueOptionValidationError{ - field: "AppendAction", - reason: "value must be one of the defined enum values", + if len(m.GetRemove()) > 16384 { + err := KeyValueMutationValidationError{ + field: "Remove", + reason: "value length must be at most 16384 bytes", } if !all { return err @@ -1861,22 +1832,20 @@ func (m *HeaderValueOption) validate(all bool) error { errors = append(errors, err) } - // no validation rules for KeepEmptyValue - if len(errors) > 0 { - return HeaderValueOptionMultiError(errors) + return KeyValueMutationMultiError(errors) } return nil } -// HeaderValueOptionMultiError is an error wrapping multiple validation errors -// returned by HeaderValueOption.ValidateAll() if the designated constraints +// KeyValueMutationMultiError is an error wrapping multiple validation errors +// returned by KeyValueMutation.ValidateAll() if the designated constraints // aren't met. -type HeaderValueOptionMultiError []error +type KeyValueMutationMultiError []error // Error returns a concatenation of all the error messages it wraps. -func (m HeaderValueOptionMultiError) Error() string { +func (m KeyValueMutationMultiError) Error() string { var msgs []string for _, err := range m { msgs = append(msgs, err.Error()) @@ -1885,11 +1854,11 @@ func (m HeaderValueOptionMultiError) Error() string { } // AllErrors returns a list of validation violation errors. -func (m HeaderValueOptionMultiError) AllErrors() []error { return m } +func (m KeyValueMutationMultiError) AllErrors() []error { return m } -// HeaderValueOptionValidationError is the validation error returned by -// HeaderValueOption.Validate if the designated constraints aren't met. -type HeaderValueOptionValidationError struct { +// KeyValueMutationValidationError is the validation error returned by +// KeyValueMutation.Validate if the designated constraints aren't met. +type KeyValueMutationValidationError struct { field string reason string cause error @@ -1897,24 +1866,22 @@ type HeaderValueOptionValidationError struct { } // Field function returns field value. -func (e HeaderValueOptionValidationError) Field() string { return e.field } +func (e KeyValueMutationValidationError) Field() string { return e.field } // Reason function returns reason value. -func (e HeaderValueOptionValidationError) Reason() string { return e.reason } +func (e KeyValueMutationValidationError) Reason() string { return e.reason } // Cause function returns cause value. -func (e HeaderValueOptionValidationError) Cause() error { return e.cause } +func (e KeyValueMutationValidationError) Cause() error { return e.cause } // Key function returns key value. -func (e HeaderValueOptionValidationError) Key() bool { return e.key } +func (e KeyValueMutationValidationError) Key() bool { return e.key } // ErrorName returns error name. -func (e HeaderValueOptionValidationError) ErrorName() string { - return "HeaderValueOptionValidationError" -} +func (e KeyValueMutationValidationError) ErrorName() string { return "KeyValueMutationValidationError" } // Error satisfies the builtin error interface -func (e HeaderValueOptionValidationError) Error() string { +func (e KeyValueMutationValidationError) Error() string { cause := "" if e.cause != nil { cause = fmt.Sprintf(" | caused by: %v", e.cause) @@ -1926,14 +1893,14 @@ func (e HeaderValueOptionValidationError) Error() string { } return fmt.Sprintf( - "invalid %sHeaderValueOption.%s: %s%s", + "invalid %sKeyValueMutation.%s: %s%s", key, e.field, e.reason, cause) } -var _ error = HeaderValueOptionValidationError{} +var _ error = KeyValueMutationValidationError{} var _ interface { Field() string @@ -1941,35 +1908,501 @@ var _ interface { Key() bool Cause() error ErrorName() string -} = HeaderValueOptionValidationError{} +} = KeyValueMutationValidationError{} -// Validate checks the field values on HeaderMap with the rules defined in the -// proto definition for this message. If any rules are violated, the first +// Validate checks the field values on QueryParameter with the rules defined in +// the proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. -func (m *HeaderMap) Validate() error { +func (m *QueryParameter) Validate() error { return m.validate(false) } -// ValidateAll checks the field values on HeaderMap with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in HeaderMapMultiError, or nil -// if none found. -func (m *HeaderMap) ValidateAll() error { +// ValidateAll checks the field values on QueryParameter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in QueryParameterMultiError, +// or nil if none found. +func (m *QueryParameter) ValidateAll() error { return m.validate(true) } -func (m *HeaderMap) validate(all bool) error { +func (m *QueryParameter) validate(all bool) error { if m == nil { return nil } var errors []error - for idx, item := range m.GetHeaders() { - _, _ = idx, item + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := QueryParameterValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } - if all { - switch v := interface{}(item).(type) { + // no validation rules for Value + + if len(errors) > 0 { + return QueryParameterMultiError(errors) + } + + return nil +} + +// QueryParameterMultiError is an error wrapping multiple validation errors +// returned by QueryParameter.ValidateAll() if the designated constraints +// aren't met. +type QueryParameterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QueryParameterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QueryParameterMultiError) AllErrors() []error { return m } + +// QueryParameterValidationError is the validation error returned by +// QueryParameter.Validate if the designated constraints aren't met. +type QueryParameterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QueryParameterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QueryParameterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QueryParameterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QueryParameterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QueryParameterValidationError) ErrorName() string { return "QueryParameterValidationError" } + +// Error satisfies the builtin error interface +func (e QueryParameterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQueryParameter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QueryParameterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QueryParameterValidationError{} + +// Validate checks the field values on HeaderValue with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HeaderValue) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderValue with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HeaderValueMultiError, or +// nil if none found. +func (m *HeaderValue) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderValue) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := HeaderValueValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetKey()) > 16384 { + err := HeaderValueValidationError{ + field: "Key", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_HeaderValue_Key_Pattern.MatchString(m.GetKey()) { + err := HeaderValueValidationError{ + field: "Key", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetValue()) > 16384 { + err := HeaderValueValidationError{ + field: "Value", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_HeaderValue_Value_Pattern.MatchString(m.GetValue()) { + err := HeaderValueValidationError{ + field: "Value", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if l := len(m.GetRawValue()); l < 0 || l > 16384 { + err := HeaderValueValidationError{ + field: "RawValue", + reason: "value length must be between 0 and 16384 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HeaderValueMultiError(errors) + } + + return nil +} + +// HeaderValueMultiError is an error wrapping multiple validation errors +// returned by HeaderValue.ValidateAll() if the designated constraints aren't met. +type HeaderValueMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderValueMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderValueMultiError) AllErrors() []error { return m } + +// HeaderValueValidationError is the validation error returned by +// HeaderValue.Validate if the designated constraints aren't met. +type HeaderValueValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderValueValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderValueValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderValueValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderValueValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderValueValidationError) ErrorName() string { return "HeaderValueValidationError" } + +// Error satisfies the builtin error interface +func (e HeaderValueValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderValue.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderValueValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderValueValidationError{} + +var _HeaderValue_Key_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _HeaderValue_Value_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HeaderValueOption with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *HeaderValueOption) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderValueOption with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HeaderValueOptionMultiError, or nil if none found. +func (m *HeaderValueOption) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderValueOption) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetHeader() == nil { + err := HeaderValueOptionValidationError{ + field: "Header", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderValueOptionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAppend()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAppend()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderValueOptionValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := HeaderValueOption_HeaderAppendAction_name[int32(m.GetAppendAction())]; !ok { + err := HeaderValueOptionValidationError{ + field: "AppendAction", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for KeepEmptyValue + + if len(errors) > 0 { + return HeaderValueOptionMultiError(errors) + } + + return nil +} + +// HeaderValueOptionMultiError is an error wrapping multiple validation errors +// returned by HeaderValueOption.ValidateAll() if the designated constraints +// aren't met. +type HeaderValueOptionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderValueOptionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderValueOptionMultiError) AllErrors() []error { return m } + +// HeaderValueOptionValidationError is the validation error returned by +// HeaderValueOption.Validate if the designated constraints aren't met. +type HeaderValueOptionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderValueOptionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderValueOptionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderValueOptionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderValueOptionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderValueOptionValidationError) ErrorName() string { + return "HeaderValueOptionValidationError" +} + +// Error satisfies the builtin error interface +func (e HeaderValueOptionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderValueOption.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderValueOptionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderValueOptionValidationError{} + +// Validate checks the field values on HeaderMap with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HeaderMap) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderMap with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HeaderMapMultiError, or nil +// if none found. +func (m *HeaderMap) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderMap) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { errors = append(errors, HeaderMapValidationError{ @@ -2209,6 +2642,35 @@ func (m *DataSource) validate(all bool) error { var errors []error + if all { + switch v := interface{}(m.GetWatchedDirectory()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DataSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DataSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchedDirectory()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DataSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + } + } + } + oneofSpecifierPresent := false switch v := m.Specifier.(type) { case *DataSource_Filename: @@ -2456,6 +2918,73 @@ func (m *RetryPolicy) validate(all bool) error { } } + // no validation rules for RetryOn + + if all { + switch v := interface{}(m.GetRetryPriority()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPriority()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetRetryHostPredicate() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for HostSelectionRetryMaxAttempts + if len(errors) > 0 { return RetryPolicyMultiError(errors) } @@ -3314,3 +3843,322 @@ var _ interface { Cause() error ErrorName() string } = ControlPlaneValidationError{} + +// Validate checks the field values on RetryPolicy_RetryPriority with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryPriority) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryPriority with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryPriorityMultiError, or nil if none found. +func (m *RetryPolicy_RetryPriority) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryPriority) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_RetryPriorityValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *RetryPolicy_RetryPriority_TypedConfig: + if v == nil { + err := RetryPolicy_RetryPriorityValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RetryPolicy_RetryPriorityMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryPriorityMultiError is an error wrapping multiple validation +// errors returned by RetryPolicy_RetryPriority.ValidateAll() if the +// designated constraints aren't met. +type RetryPolicy_RetryPriorityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryPriorityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryPriorityMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryPriorityValidationError is the validation error returned by +// RetryPolicy_RetryPriority.Validate if the designated constraints aren't met. +type RetryPolicy_RetryPriorityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryPriorityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryPriorityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryPriorityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryPriorityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryPriorityValidationError) ErrorName() string { + return "RetryPolicy_RetryPriorityValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryPriorityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryPriority.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryPriorityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryPriorityValidationError{} + +// Validate checks the field values on RetryPolicy_RetryHostPredicate with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryHostPredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryHostPredicate with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryHostPredicateMultiError, or nil if none found. +func (m *RetryPolicy_RetryHostPredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryHostPredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_RetryHostPredicateValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *RetryPolicy_RetryHostPredicate_TypedConfig: + if v == nil { + err := RetryPolicy_RetryHostPredicateValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RetryPolicy_RetryHostPredicateMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryHostPredicateMultiError is an error wrapping multiple +// validation errors returned by RetryPolicy_RetryHostPredicate.ValidateAll() +// if the designated constraints aren't met. +type RetryPolicy_RetryHostPredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryHostPredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryHostPredicateMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryHostPredicateValidationError is the validation error +// returned by RetryPolicy_RetryHostPredicate.Validate if the designated +// constraints aren't met. +type RetryPolicy_RetryHostPredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryHostPredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryHostPredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryHostPredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryHostPredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryHostPredicateValidationError) ErrorName() string { + return "RetryPolicy_RetryHostPredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryHostPredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryHostPredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryHostPredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryHostPredicateValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base_vtproto.pb.go new file mode 100644 index 00000000000..fa4823023f7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base_vtproto.pb.go @@ -0,0 +1,2497 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/base.proto + +package corev3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Locality) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Locality) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Locality) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SubZone) > 0 { + i -= len(m.SubZone) + copy(dAtA[i:], m.SubZone) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SubZone))) + i-- + dAtA[i] = 0x1a + } + if len(m.Zone) > 0 { + i -= len(m.Zone) + copy(dAtA[i:], m.Zone) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Zone))) + i-- + dAtA[i] = 0x12 + } + if len(m.Region) > 0 { + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildVersion) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BuildVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + size, err := (*structpb.Struct)(m.Metadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Version != nil { + if vtmsg, ok := interface{}(m.Version).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Version) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Extension) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Extension) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Extension) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TypeUrls) > 0 { + for iNdEx := len(m.TypeUrls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TypeUrls[iNdEx]) + copy(dAtA[i:], m.TypeUrls[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrls[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Version != nil { + size, err := m.Version.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.TypeDescriptor) > 0 { + i -= len(m.TypeDescriptor) + copy(dAtA[i:], m.TypeDescriptor) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeDescriptor))) + i-- + dAtA[i] = 0x1a + } + if len(m.Category) > 0 { + i -= len(m.Category) + copy(dAtA[i:], m.Category) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Category))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Node) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Node) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicParameters) > 0 { + for k := range m.DynamicParameters { + v := m.DynamicParameters[k] + baseI := i + if vtmsg, ok := interface{}(v).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(v) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ListeningAddresses) > 0 { + for iNdEx := len(m.ListeningAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ListeningAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ClientFeatures) > 0 { + for iNdEx := len(m.ClientFeatures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClientFeatures[iNdEx]) + copy(dAtA[i:], m.ClientFeatures[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClientFeatures[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if len(m.Extensions) > 0 { + for iNdEx := len(m.Extensions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Extensions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if msg, ok := m.UserAgentVersionType.(*Node_UserAgentBuildVersion); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.UserAgentVersionType.(*Node_UserAgentVersion); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.UserAgentName) > 0 { + i -= len(m.UserAgentName) + copy(dAtA[i:], m.UserAgentName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UserAgentName))) + i-- + dAtA[i] = 0x32 + } + if m.Locality != nil { + size, err := m.Locality.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Metadata != nil { + size, err := (*structpb.Struct)(m.Metadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Cluster) > 0 { + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Node_UserAgentVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Node_UserAgentVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.UserAgentVersion) + copy(dAtA[i:], m.UserAgentVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UserAgentVersion))) + i-- + dAtA[i] = 0x3a + return len(dAtA) - i, nil +} +func (m *Node_UserAgentBuildVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Node_UserAgentBuildVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UserAgentBuildVersion != nil { + size, err := m.UserAgentBuildVersion.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Metadata) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TypedFilterMetadata) > 0 { + for k := range m.TypedFilterMetadata { + v := m.TypedFilterMetadata[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.FilterMetadata) > 0 { + for k := range m.FilterMetadata { + v := m.FilterMetadata[k] + baseI := i + size, err := (*structpb.Struct)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuntimeUInt32) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeUInt32) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeUInt32) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x1a + } + if m.DefaultValue != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DefaultValue)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *RuntimePercent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimePercent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimePercent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != nil { + if vtmsg, ok := interface{}(m.DefaultValue).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultValue) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeDouble) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeDouble) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeDouble) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DefaultValue)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *RuntimeFeatureFlag) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeFeatureFlag) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeFeatureFlag) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != nil { + size, err := (*wrapperspb.BoolValue)(m.DefaultValue).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeyValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyValueAppend) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValueAppend) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeyValueAppend) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Action != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x10 + } + if m.Entry != nil { + size, err := m.Entry.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyValueMutation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValueMutation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeyValueMutation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Remove) > 0 { + i -= len(m.Remove) + copy(dAtA[i:], m.Remove) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Remove))) + i-- + dAtA[i] = 0x12 + } + if m.Append != nil { + size, err := m.Append.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryParameter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParameter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RawValue) > 0 { + i -= len(m.RawValue) + copy(dAtA[i:], m.RawValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RawValue))) + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderValueOption) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderValueOption) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderValueOption) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.KeepEmptyValue { + i-- + if m.KeepEmptyValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.AppendAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AppendAction)) + i-- + dAtA[i] = 0x18 + } + if m.Append != nil { + size, err := (*wrapperspb.BoolValue)(m.Append).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + size, err := m.Header.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderMap) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WatchedDirectory) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchedDirectory) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WatchedDirectory) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DataSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WatchedDirectory != nil { + size, err := m.WatchedDirectory.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if msg, ok := m.Specifier.(*DataSource_EnvironmentVariable); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*DataSource_InlineString); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*DataSource_InlineBytes); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*DataSource_Filename); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DataSource_Filename) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_Filename) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Filename) + copy(dAtA[i:], m.Filename) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Filename))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *DataSource_InlineBytes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_InlineBytes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.InlineBytes) + copy(dAtA[i:], m.InlineBytes) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InlineBytes))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *DataSource_InlineString) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_InlineString) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.InlineString) + copy(dAtA[i:], m.InlineString) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InlineString))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *DataSource_EnvironmentVariable) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_EnvironmentVariable) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.EnvironmentVariable) + copy(dAtA[i:], m.EnvironmentVariable) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EnvironmentVariable))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryPriority) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryPriority) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryPriority_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryHostPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HostSelectionRetryMaxAttempts != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HostSelectionRetryMaxAttempts)) + i-- + dAtA[i] = 0x30 + } + if len(m.RetryHostPredicate) > 0 { + for iNdEx := len(m.RetryHostPredicate) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetryHostPredicate[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.RetryPriority != nil { + size, err := m.RetryPriority.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.RetryOn) > 0 { + i -= len(m.RetryOn) + copy(dAtA[i:], m.RetryOn) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RetryOn))) + i-- + dAtA[i] = 0x1a + } + if m.NumRetries != nil { + size, err := (*wrapperspb.UInt32Value)(m.NumRetries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RetryBackOff != nil { + size, err := m.RetryBackOff.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoteDataSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoteDataSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RemoteDataSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Sha256) > 0 { + i -= len(m.Sha256) + copy(dAtA[i:], m.Sha256) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Sha256))) + i-- + dAtA[i] = 0x12 + } + if m.HttpUri != nil { + size, err := m.HttpUri.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AsyncDataSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AsyncDataSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AsyncDataSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Specifier.(*AsyncDataSource_Remote); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*AsyncDataSource_Local); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *AsyncDataSource_Local) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AsyncDataSource_Local) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Local != nil { + size, err := m.Local.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *AsyncDataSource_Remote) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AsyncDataSource_Remote) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Remote != nil { + size, err := m.Remote.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *TransportSocket) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransportSocket) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TransportSocket) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*TransportSocket_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TransportSocket_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TransportSocket_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RuntimeFractionalPercent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeFractionalPercent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeFractionalPercent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != nil { + if vtmsg, ok := interface{}(m.DefaultValue).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultValue) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ControlPlane) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControlPlane) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ControlPlane) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Locality) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Region) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Zone) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SubZone) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BuildVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Version != nil { + if size, ok := interface{}(m.Version).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Version) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + l = (*structpb.Struct)(m.Metadata).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Extension) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Category) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TypeDescriptor) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Version != nil { + l = m.Version.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Disabled { + n += 2 + } + if len(m.TypeUrls) > 0 { + for _, s := range m.TypeUrls { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Node) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Cluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + l = (*structpb.Struct)(m.Metadata).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Locality != nil { + l = m.Locality.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UserAgentName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.UserAgentVersionType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.Extensions) > 0 { + for _, e := range m.Extensions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ClientFeatures) > 0 { + for _, s := range m.ClientFeatures { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ListeningAddresses) > 0 { + for _, e := range m.ListeningAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicParameters) > 0 { + for k, v := range m.DynamicParameters { + _ = k + _ = v + l = 0 + if v != nil { + if size, ok := interface{}(v).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(v) + } + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Node_UserAgentVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UserAgentVersion) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *Node_UserAgentBuildVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UserAgentBuildVersion != nil { + l = m.UserAgentBuildVersion.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.FilterMetadata) > 0 { + for k, v := range m.FilterMetadata { + _ = k + _ = v + l = 0 + if v != nil { + l = (*structpb.Struct)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.TypedFilterMetadata) > 0 { + for k, v := range m.TypedFilterMetadata { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeUInt32) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DefaultValue)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimePercent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != nil { + if size, ok := interface{}(m.DefaultValue).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultValue) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeDouble) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != 0 { + n += 9 + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeFeatureFlag) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != nil { + l = (*wrapperspb.BoolValue)(m.DefaultValue).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValueAppend) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Entry != nil { + l = m.Entry.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Action != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Action)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValueMutation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Append != nil { + l = m.Append.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Remove) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *QueryParameter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RawValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderValueOption) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Append != nil { + l = (*wrapperspb.BoolValue)(m.Append).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendAction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.AppendAction)) + } + if m.KeepEmptyValue { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *WatchedDirectory) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DataSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Specifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.WatchedDirectory != nil { + l = m.WatchedDirectory.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DataSource_Filename) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Filename) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DataSource_InlineBytes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InlineBytes) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DataSource_InlineString) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InlineString) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DataSource_EnvironmentVariable) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EnvironmentVariable) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RetryPolicy_RetryPriority) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy_RetryHostPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RetryBackOff != nil { + l = m.RetryBackOff.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumRetries != nil { + l = (*wrapperspb.UInt32Value)(m.NumRetries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RetryOn) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPriority != nil { + l = m.RetryPriority.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetryHostPredicate) > 0 { + for _, e := range m.RetryHostPredicate { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.HostSelectionRetryMaxAttempts != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HostSelectionRetryMaxAttempts)) + } + n += len(m.unknownFields) + return n +} + +func (m *RemoteDataSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpUri != nil { + l = m.HttpUri.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Sha256) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AsyncDataSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Specifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *AsyncDataSource_Local) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Local != nil { + l = m.Local.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AsyncDataSource_Remote) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Remote != nil { + l = m.Remote.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *TransportSocket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *TransportSocket_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeFractionalPercent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != nil { + if size, ok := interface{}(m.DefaultValue).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultValue) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ControlPlane) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go index 32fc76d39ae..295398b9f19 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/config_source.proto package corev3 @@ -11,11 +11,11 @@ import ( v3 "github.com/cncf/xds/go/xds/core/v3" _ "github.com/envoyproxy/go-control-plane/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -32,13 +32,9 @@ const ( type ApiVersion int32 const ( - // When not specified, we assume v2, to ease migration to Envoy's stable API - // versioning. If a client does not support v2 (e.g. due to deprecation), this - // is an invalid value. - // - // Deprecated: Marked as deprecated in envoy/config/core/v3/config_source.proto. + // When not specified, we assume v3; it is the only supported version. ApiVersion_AUTO ApiVersion = 0 - // Use xDS v2 API. + // Use xDS v2 API. This is no longer supported. // // Deprecated: Marked as deprecated in envoy/config/core/v3/config_source.proto. ApiVersion_V2 ApiVersion = 1 @@ -189,9 +185,9 @@ type ApiConfigSource struct { // services will be cycled through if any kind of failure occurs. GrpcServices []*GrpcService `protobuf:"bytes,4,rep,name=grpc_services,json=grpcServices,proto3" json:"grpc_services,omitempty"` // For REST APIs, the delay between successive polls. - RefreshDelay *duration.Duration `protobuf:"bytes,3,opt,name=refresh_delay,json=refreshDelay,proto3" json:"refresh_delay,omitempty"` + RefreshDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=refresh_delay,json=refreshDelay,proto3" json:"refresh_delay,omitempty"` // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - RequestTimeout *duration.Duration `protobuf:"bytes,5,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` + RequestTimeout *durationpb.Duration `protobuf:"bytes,5,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be // rate limited. RateLimitSettings *RateLimitSettings `protobuf:"bytes,6,opt,name=rate_limit_settings,json=rateLimitSettings,proto3" json:"rate_limit_settings,omitempty"` @@ -268,14 +264,14 @@ func (x *ApiConfigSource) GetGrpcServices() []*GrpcService { return nil } -func (x *ApiConfigSource) GetRefreshDelay() *duration.Duration { +func (x *ApiConfigSource) GetRefreshDelay() *durationpb.Duration { if x != nil { return x.RefreshDelay } return nil } -func (x *ApiConfigSource) GetRequestTimeout() *duration.Duration { +func (x *ApiConfigSource) GetRequestTimeout() *durationpb.Duration { if x != nil { return x.RequestTimeout } @@ -405,11 +401,11 @@ type RateLimitSettings struct { // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a // default value of 100 will be used. - MaxTokens *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` + MaxTokens *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens // per second will be used. The minimal fill rate is once per year. Lower // fill rates will be set to once per year. - FillRate *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=fill_rate,json=fillRate,proto3" json:"fill_rate,omitempty"` + FillRate *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=fill_rate,json=fillRate,proto3" json:"fill_rate,omitempty"` } func (x *RateLimitSettings) Reset() { @@ -444,14 +440,14 @@ func (*RateLimitSettings) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{3} } -func (x *RateLimitSettings) GetMaxTokens() *wrappers.UInt32Value { +func (x *RateLimitSettings) GetMaxTokens() *wrapperspb.UInt32Value { if x != nil { return x.MaxTokens } return nil } -func (x *RateLimitSettings) GetFillRate() *wrappers.DoubleValue { +func (x *RateLimitSettings) GetFillRate() *wrapperspb.DoubleValue { if x != nil { return x.FillRate } @@ -575,7 +571,7 @@ type ConfigSource struct { // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another // timeout applies). The default is 15s. - InitialFetchTimeout *duration.Duration `protobuf:"bytes,4,opt,name=initial_fetch_timeout,json=initialFetchTimeout,proto3" json:"initial_fetch_timeout,omitempty"` + InitialFetchTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=initial_fetch_timeout,json=initialFetchTimeout,proto3" json:"initial_fetch_timeout,omitempty"` // API version for xDS resources. This implies the type URLs that the client // will request for resources and the resource type that the client will in // turn expect to be delivered. @@ -664,7 +660,7 @@ func (x *ConfigSource) GetSelf() *SelfConfigSource { return nil } -func (x *ConfigSource) GetInitialFetchTimeout() *duration.Duration { +func (x *ConfigSource) GetInitialFetchTimeout() *durationpb.Duration { if x != nil { return x.InitialFetchTimeout } @@ -750,7 +746,7 @@ type ExtensionConfigSource struct { // Optional default configuration to use as the initial configuration if // there is a failure to receive the initial extension configuration or if // “apply_default_config_without_warming“ flag is set. - DefaultConfig *any1.Any `protobuf:"bytes,2,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` + DefaultConfig *anypb.Any `protobuf:"bytes,2,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` // Use the default config as the initial configuration without warming and // waiting for the first discovery response. Requires the default configuration // to be supplied. @@ -799,7 +795,7 @@ func (x *ExtensionConfigSource) GetConfigSource() *ConfigSource { return nil } -func (x *ExtensionConfigSource) GetDefaultConfig() *any1.Any { +func (x *ExtensionConfigSource) GetDefaultConfig() *anypb.Any { if x != nil { return x.DefaultConfig } @@ -999,20 +995,19 @@ var file_envoy_config_core_v3_config_source_proto_rawDesc = []byte{ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x57, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, - 0x01, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x73, 0x2a, 0x40, 0x0a, 0x0a, 0x41, - 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x04, 0x41, 0x55, 0x54, - 0x4f, 0x10, 0x00, 0x1a, 0x0b, 0x8a, 0xf4, 0x9b, 0xb3, 0x05, 0x03, 0x33, 0x2e, 0x30, 0x08, 0x01, - 0x12, 0x13, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, 0x1a, 0x0b, 0x8a, 0xf4, 0x9b, 0xb3, 0x05, 0x03, - 0x33, 0x2e, 0x30, 0x08, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x33, 0x10, 0x02, 0x42, 0x85, 0x01, - 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, - 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x01, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x73, 0x2a, 0x33, 0x0a, 0x0a, 0x41, + 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, + 0x4f, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, 0x1a, 0x0b, 0x8a, 0xf4, 0x9b, + 0xb3, 0x05, 0x03, 0x33, 0x2e, 0x30, 0x08, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x33, 0x10, 0x02, + 0x42, 0x85, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1040,13 +1035,13 @@ var file_envoy_config_core_v3_config_source_proto_goTypes = []interface{}{ (*ConfigSource)(nil), // 7: envoy.config.core.v3.ConfigSource (*ExtensionConfigSource)(nil), // 8: envoy.config.core.v3.ExtensionConfigSource (*GrpcService)(nil), // 9: envoy.config.core.v3.GrpcService - (*duration.Duration)(nil), // 10: google.protobuf.Duration + (*durationpb.Duration)(nil), // 10: google.protobuf.Duration (*TypedExtensionConfig)(nil), // 11: envoy.config.core.v3.TypedExtensionConfig - (*wrappers.UInt32Value)(nil), // 12: google.protobuf.UInt32Value - (*wrappers.DoubleValue)(nil), // 13: google.protobuf.DoubleValue + (*wrapperspb.UInt32Value)(nil), // 12: google.protobuf.UInt32Value + (*wrapperspb.DoubleValue)(nil), // 13: google.protobuf.DoubleValue (*WatchedDirectory)(nil), // 14: envoy.config.core.v3.WatchedDirectory (*v3.Authority)(nil), // 15: xds.core.v3.Authority - (*any1.Any)(nil), // 16: google.protobuf.Any + (*anypb.Any)(nil), // 16: google.protobuf.Any } var file_envoy_config_core_v3_config_source_proto_depIdxs = []int32{ 1, // 0: envoy.config.core.v3.ApiConfigSource.api_type:type_name -> envoy.config.core.v3.ApiConfigSource.ApiType diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go index edab1513527..c2d0b15953d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/config_source.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source_vtproto.pb.go new file mode 100644 index 00000000000..ae2fd091deb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source_vtproto.pb.go @@ -0,0 +1,831 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/config_source.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ApiConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApiConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ApiConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ConfigValidators) > 0 { + for iNdEx := len(m.ConfigValidators) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ConfigValidators[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if m.TransportApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TransportApiVersion)) + i-- + dAtA[i] = 0x40 + } + if m.SetNodeOnFirstMessageOnly { + i-- + if m.SetNodeOnFirstMessageOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.RateLimitSettings != nil { + size, err := m.RateLimitSettings.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.RequestTimeout != nil { + size, err := (*durationpb.Duration)(m.RequestTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.GrpcServices) > 0 { + for iNdEx := len(m.GrpcServices) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.GrpcServices[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.RefreshDelay != nil { + size, err := (*durationpb.Duration)(m.RefreshDelay).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.ClusterNames) > 0 { + for iNdEx := len(m.ClusterNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterNames[iNdEx]) + copy(dAtA[i:], m.ClusterNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.ApiType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ApiType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AggregatedConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregatedConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AggregatedConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SelfConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SelfConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TransportApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TransportApiVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RateLimitSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimitSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FillRate != nil { + size, err := (*wrapperspb.DoubleValue)(m.FillRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxTokens != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxTokens).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PathConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WatchedDirectory != nil { + size, err := m.WatchedDirectory.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_PathConfigSource); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Authorities) > 0 { + for iNdEx := len(m.Authorities) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Authorities[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Authorities[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + } + if m.ResourceApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResourceApiVersion)) + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_Self); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.InitialFetchTimeout != nil { + size, err := (*durationpb.Duration)(m.InitialFetchTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_Ads); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_ApiConfigSource); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_Path); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ConfigSource_Path) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_Path) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *ConfigSource_ApiConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_ApiConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApiConfigSource != nil { + size, err := m.ApiConfigSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ConfigSource_Ads) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_Ads) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Ads != nil { + size, err := m.Ads.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ConfigSource_Self) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_Self) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Self != nil { + size, err := m.Self.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *ConfigSource_PathConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_PathConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PathConfigSource != nil { + size, err := m.PathConfigSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *ExtensionConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtensionConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtensionConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TypeUrls) > 0 { + for iNdEx := len(m.TypeUrls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TypeUrls[iNdEx]) + copy(dAtA[i:], m.TypeUrls[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrls[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.ApplyDefaultConfigWithoutWarming { + i-- + if m.ApplyDefaultConfigWithoutWarming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.DefaultConfig != nil { + size, err := (*anypb.Any)(m.DefaultConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.ConfigSource != nil { + size, err := m.ConfigSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ApiConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApiType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ApiType)) + } + if len(m.ClusterNames) > 0 { + for _, s := range m.ClusterNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RefreshDelay != nil { + l = (*durationpb.Duration)(m.RefreshDelay).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.GrpcServices) > 0 { + for _, e := range m.GrpcServices { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RequestTimeout != nil { + l = (*durationpb.Duration)(m.RequestTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RateLimitSettings != nil { + l = m.RateLimitSettings.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SetNodeOnFirstMessageOnly { + n += 2 + } + if m.TransportApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TransportApiVersion)) + } + if len(m.ConfigValidators) > 0 { + for _, e := range m.ConfigValidators { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AggregatedConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SelfConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TransportApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TransportApiVersion)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimitSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTokens != nil { + l = (*wrapperspb.UInt32Value)(m.MaxTokens).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FillRate != nil { + l = (*wrapperspb.DoubleValue)(m.FillRate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PathConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WatchedDirectory != nil { + l = m.WatchedDirectory.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ConfigSourceSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.InitialFetchTimeout != nil { + l = (*durationpb.Duration)(m.InitialFetchTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResourceApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResourceApiVersion)) + } + if len(m.Authorities) > 0 { + for _, e := range m.Authorities { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ConfigSource_Path) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *ConfigSource_ApiConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApiConfigSource != nil { + l = m.ApiConfigSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ConfigSource_Ads) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ads != nil { + l = m.Ads.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ConfigSource_Self) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Self != nil { + l = m.Self.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ConfigSource_PathConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PathConfigSource != nil { + l = m.PathConfigSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ExtensionConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigSource != nil { + l = m.ConfigSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DefaultConfig != nil { + l = (*anypb.Any)(m.DefaultConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApplyDefaultConfigWithoutWarming { + n += 2 + } + if len(m.TypeUrls) > 0 { + for _, s := range m.TypeUrls { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go index f9deb0c9017..8d995326c19 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/event_service_config.proto package corev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go index a55672c2759..21f29c8b6f7 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/event_service_config.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config_vtproto.pb.go new file mode 100644 index 00000000000..c8e65c66fe2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config_vtproto.pb.go @@ -0,0 +1,110 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/event_service_config.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *EventServiceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventServiceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EventServiceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigSourceSpecifier.(*EventServiceConfig_GrpcService); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *EventServiceConfig_GrpcService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EventServiceConfig_GrpcService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GrpcService != nil { + size, err := m.GrpcService.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *EventServiceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ConfigSourceSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *EventServiceConfig_GrpcService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + l = m.GrpcService.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go index 3fa0aab8981..81ec41a6e77 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/extension.proto package corev3 @@ -9,9 +9,9 @@ package corev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -39,7 +39,7 @@ type TypedExtensionConfig struct { // URL of “TypedStruct“ will be utilized. See the // :ref:`extension configuration overview // ` for further details. - TypedConfig *any1.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` } func (x *TypedExtensionConfig) Reset() { @@ -81,7 +81,7 @@ func (x *TypedExtensionConfig) GetName() string { return "" } -func (x *TypedExtensionConfig) GetTypedConfig() *any1.Any { +func (x *TypedExtensionConfig) GetTypedConfig() *anypb.Any { if x != nil { return x.TypedConfig } @@ -134,7 +134,7 @@ func file_envoy_config_core_v3_extension_proto_rawDescGZIP() []byte { var file_envoy_config_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_core_v3_extension_proto_goTypes = []interface{}{ (*TypedExtensionConfig)(nil), // 0: envoy.config.core.v3.TypedExtensionConfig - (*any1.Any)(nil), // 1: google.protobuf.Any + (*anypb.Any)(nil), // 1: google.protobuf.Any } var file_envoy_config_core_v3_extension_proto_depIdxs = []int32{ 1, // 0: envoy.config.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go index 9c915d3833d..c0df5946a53 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/extension.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension_vtproto.pb.go new file mode 100644 index 00000000000..f81b8b38b09 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension_vtproto.pb.go @@ -0,0 +1,88 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/extension.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TypedExtensionConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypedExtensionConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TypedExtensionConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TypedExtensionConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go index 30dbf0ecb7b..199ac40f045 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/grpc_method_list.proto package corev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go index 4fc134e94e6..994cc4df733 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/grpc_method_list.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list_vtproto.pb.go new file mode 100644 index 00000000000..940531d1d48 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list_vtproto.pb.go @@ -0,0 +1,149 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/grpc_method_list.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GrpcMethodList_Service) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcMethodList_Service) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcMethodList_Service) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.MethodNames) > 0 { + for iNdEx := len(m.MethodNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MethodNames[iNdEx]) + copy(dAtA[i:], m.MethodNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MethodNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcMethodList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcMethodList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcMethodList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Services) > 0 { + for iNdEx := len(m.Services) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Services[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GrpcMethodList_Service) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.MethodNames) > 0 { + for _, s := range m.MethodNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcMethodList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go index a8b3b061ae9..3967277f61c 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/grpc_service.proto package corev3 @@ -9,13 +9,13 @@ package corev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - _struct "github.com/golang/protobuf/ptypes/struct" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -29,7 +29,7 @@ const ( // gRPC service configuration. This is used by :ref:`ApiConfigSource // ` and filter configurations. -// [#next-free-field: 6] +// [#next-free-field: 7] type GrpcService struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -42,13 +42,16 @@ type GrpcService struct { TargetSpecifier isGrpcService_TargetSpecifier `protobuf_oneof:"target_specifier"` // The timeout for the gRPC request. This is the timeout for a specific // request. - Timeout *duration.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` // Additional metadata to include in streams initiated to the GrpcService. This can be used for // scenarios in which additional ad hoc authorization headers (e.g. “x-foo-bar: baz-key“) are to // be injected. For more information, including details on header value syntax, see the // documentation on :ref:`custom request headers // `. InitialMetadata []*HeaderValue `protobuf:"bytes,5,rep,name=initial_metadata,json=initialMetadata,proto3" json:"initial_metadata,omitempty"` + // Optional default retry policy for streams toward the service. + // If an async stream doesn't have retry policy configured in its stream options, this retry policy is used. + RetryPolicy *RetryPolicy `protobuf:"bytes,6,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` } func (x *GrpcService) Reset() { @@ -104,7 +107,7 @@ func (x *GrpcService) GetGoogleGrpc() *GrpcService_GoogleGrpc { return nil } -func (x *GrpcService) GetTimeout() *duration.Duration { +func (x *GrpcService) GetTimeout() *durationpb.Duration { if x != nil { return x.Timeout } @@ -118,6 +121,13 @@ func (x *GrpcService) GetInitialMetadata() []*HeaderValue { return nil } +func (x *GrpcService) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + type isGrpcService_TargetSpecifier interface { isGrpcService_TargetSpecifier() } @@ -140,6 +150,7 @@ func (*GrpcService_EnvoyGrpc_) isGrpcService_TargetSpecifier() {} func (*GrpcService_GoogleGrpc_) isGrpcService_TargetSpecifier() {} +// [#next-free-field: 6] type GrpcService_EnvoyGrpc struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -157,6 +168,16 @@ type GrpcService_EnvoyGrpc struct { // Currently only supported for xDS gRPC streams. // If not set, xDS gRPC streams default base interval:500ms, maximum interval:30s will be applied. RetryPolicy *RetryPolicy `protobuf:"bytes,3,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // Maximum gRPC message size that is allowed to be received. + // If a message over this limit is received, the gRPC stream is terminated with the RESOURCE_EXHAUSTED error. + // This limit is applied to individual messages in the streaming response and not the total size of streaming response. + // Defaults to 0, which means unlimited. + MaxReceiveMessageLength *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_receive_message_length,json=maxReceiveMessageLength,proto3" json:"max_receive_message_length,omitempty"` + // This provides gRPC client level control over envoy generated headers. + // If false, the header will be sent but it can be overridden by per stream option. + // If true, the header will be removed and can not be overridden by per stream option. + // Default to false. + SkipEnvoyHeaders bool `protobuf:"varint,5,opt,name=skip_envoy_headers,json=skipEnvoyHeaders,proto3" json:"skip_envoy_headers,omitempty"` } func (x *GrpcService_EnvoyGrpc) Reset() { @@ -212,6 +233,20 @@ func (x *GrpcService_EnvoyGrpc) GetRetryPolicy() *RetryPolicy { return nil } +func (x *GrpcService_EnvoyGrpc) GetMaxReceiveMessageLength() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxReceiveMessageLength + } + return nil +} + +func (x *GrpcService_EnvoyGrpc) GetSkipEnvoyHeaders() bool { + if x != nil { + return x.SkipEnvoyHeaders + } + return false +} + // [#next-free-field: 9] type GrpcService_GoogleGrpc struct { state protoimpl.MessageState @@ -243,10 +278,10 @@ type GrpcService_GoogleGrpc struct { CredentialsFactoryName string `protobuf:"bytes,5,opt,name=credentials_factory_name,json=credentialsFactoryName,proto3" json:"credentials_factory_name,omitempty"` // Additional configuration for site-specific customizations of the Google // gRPC library. - Config *_struct.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"` + Config *structpb.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"` // How many bytes each stream can buffer internally. // If not set an implementation defined default is applied (1MiB). - PerStreamBufferLimitBytes *wrappers.UInt32Value `protobuf:"bytes,7,opt,name=per_stream_buffer_limit_bytes,json=perStreamBufferLimitBytes,proto3" json:"per_stream_buffer_limit_bytes,omitempty"` + PerStreamBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=per_stream_buffer_limit_bytes,json=perStreamBufferLimitBytes,proto3" json:"per_stream_buffer_limit_bytes,omitempty"` // Custom channels args. ChannelArgs *GrpcService_GoogleGrpc_ChannelArgs `protobuf:"bytes,8,opt,name=channel_args,json=channelArgs,proto3" json:"channel_args,omitempty"` } @@ -318,14 +353,14 @@ func (x *GrpcService_GoogleGrpc) GetCredentialsFactoryName() string { return "" } -func (x *GrpcService_GoogleGrpc) GetConfig() *_struct.Struct { +func (x *GrpcService_GoogleGrpc) GetConfig() *structpb.Struct { if x != nil { return x.Config } return nil } -func (x *GrpcService_GoogleGrpc) GetPerStreamBufferLimitBytes() *wrappers.UInt32Value { +func (x *GrpcService_GoogleGrpc) GetPerStreamBufferLimitBytes() *wrapperspb.UInt32Value { if x != nil { return x.PerStreamBufferLimitBytes } @@ -941,7 +976,7 @@ func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) G return nil } -func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetTypedConfig() *any1.Any { +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig); ok { return x.TypedConfig } @@ -953,7 +988,7 @@ type isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_Conf } type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig struct { - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() { @@ -1204,7 +1239,7 @@ var file_envoy_config_core_v3_grpc_service_proto_rawDesc = []byte{ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd2, 0x21, 0x0a, 0x0b, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x23, 0x0a, 0x0b, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x0a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, @@ -1223,266 +1258,279 @@ var file_envoy_config_core_v3_grpc_service_proto_rawDesc = []byte{ 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0xde, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x47, - 0x72, 0x70, 0x63, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x10, 0x01, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x2f, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x00, 0x28, 0x80, 0x80, 0x01, 0xc8, - 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, - 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, - 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76, - 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, 0x1a, 0xfa, 0x1c, 0x0a, 0x0a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x47, 0x72, 0x70, 0x63, 0x12, 0x26, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, - 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x10, 0x01, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x72, 0x69, 0x12, 0x70, 0x0a, - 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x12, 0x63, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, - 0x67, 0x0a, 0x10, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xe7, 0x02, 0x0a, 0x09, + 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, + 0x00, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x09, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x59, 0x0a, 0x1a, + 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x17, + 0x6d, 0x61, 0x78, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x6b, 0x69, 0x70, 0x5f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x6b, 0x69, 0x70, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76, 0x6f, + 0x79, 0x47, 0x72, 0x70, 0x63, 0x1a, 0xfa, 0x1c, 0x0a, 0x0a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x12, 0x26, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x75, + 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x72, 0x69, 0x12, 0x70, 0x0a, 0x13, + 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x12, 0x38, 0x0a, 0x18, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5e, 0x0a, - 0x1d, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x62, 0x75, 0x66, 0x66, - 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x19, 0x70, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x75, 0x66, - 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x5b, 0x0a, - 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, - 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x52, 0x0b, 0x63, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x9d, 0x02, 0x0a, 0x0e, 0x53, - 0x73, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, - 0x0a, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x49, - 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x0a, 0x70, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x0a, 0x63, 0x65, 0x72, - 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, - 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, - 0x39, 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x60, 0x0a, 0x16, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x3a, 0x46, 0x9a, 0xc5, 0x88, 0x1e, 0x41, 0x0a, 0x3f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, - 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x92, 0x03, 0x0a, - 0x12, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x12, 0x66, 0x0a, 0x0f, 0x73, 0x73, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, + 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x67, + 0x0a, 0x10, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, + 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x38, 0x0a, 0x18, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5e, 0x0a, 0x1d, + 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x19, 0x70, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x75, 0x66, 0x66, + 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x0c, + 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x9d, 0x02, 0x0a, 0x0e, 0x53, 0x73, + 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0a, + 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x49, 0x0a, + 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x0a, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, + 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, + 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, + 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x73, 0x6c, - 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x72, 0x0a, 0x11, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, - 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x10, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, - 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, - 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x42, 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, - 0x01, 0x1a, 0x88, 0x0f, 0x0a, 0x0f, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4c, 0x0a, 0x15, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x75, - 0x74, 0x65, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x9e, 0x01, 0x0a, - 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x5f, 0x6a, 0x77, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, - 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, - 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x48, 0x00, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x77, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x72, 0x0a, - 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x69, 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, - 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x61, - 0x6d, 0x12, 0x7d, 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x60, 0x0a, 0x16, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x3a, 0x46, 0x9a, 0xc5, 0x88, 0x1e, 0x41, 0x0a, 0x3f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x92, 0x03, 0x0a, 0x12, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x12, 0x66, 0x0a, 0x0f, 0x73, 0x73, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72, 0x65, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x73, 0x6c, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x72, 0x0a, 0x11, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x12, 0x6a, 0x0a, 0x0b, 0x73, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x10, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x3a, + 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x42, 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x1a, 0x88, 0x0f, 0x0a, 0x0f, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4c, 0x0a, 0x15, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, + 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x9e, 0x01, 0x0a, 0x1a, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x6a, 0x77, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x48, 0x00, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x4a, 0x77, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x72, 0x0a, 0x0a, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x69, 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x61, 0x6d, + 0x12, 0x7d, 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, - 0x52, 0x0a, 0x73, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xd9, 0x01, 0x0a, - 0x22, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, - 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x73, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x34, - 0x0a, 0x16, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, - 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x73, 0x3a, 0x62, 0x9a, 0xc5, 0x88, 0x1e, 0x5d, 0x0a, 0x5b, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, - 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xcc, 0x01, 0x0a, 0x14, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, - 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, - 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, - 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x3a, 0x54, 0x9a, 0xc5, 0x88, 0x1e, 0x4f, 0x0a, 0x4d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, + 0x6a, 0x0a, 0x0b, 0x73, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, + 0x0a, 0x73, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x22, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, + 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x73, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, + 0x16, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x3a, 0x62, 0x9a, 0xc5, 0x88, 0x1e, 0x5d, 0x0a, 0x5b, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xcc, 0x01, 0x0a, 0x14, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x3a, 0x54, 0x9a, 0xc5, 0x88, 0x1e, 0x4f, 0x0a, 0x4d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, + 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xea, 0x01, 0x0a, 0x1d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, + 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x5d, 0x9a, 0xc5, 0x88, 0x1e, 0x58, 0x0a, 0x56, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0xd7, 0x03, 0x0a, 0x0a, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x78, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x78, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x69, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x61, + 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, + 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xea, 0x01, 0x0a, 0x1d, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, - 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, - 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, - 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x5d, 0x9a, 0xc5, 0x88, 0x1e, 0x58, 0x0a, - 0x56, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, - 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd7, 0x03, 0x0a, 0x0a, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x78, 0x63, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, - 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x78, - 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x69, - 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x30, - 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, - 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, - 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, - 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3f, - 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, - 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x42, - 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xc3, 0x02, 0x0a, - 0x0b, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x56, 0x0a, 0x04, - 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x41, 0x72, 0x67, 0x73, 0x2e, 0x41, 0x72, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, - 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, - 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x16, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x77, 0x0a, 0x09, 0x41, 0x72, 0x67, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, - 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, - 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3f, 0x9a, + 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x42, 0x1b, + 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xc3, 0x02, 0x0a, 0x0b, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x56, 0x0a, 0x04, 0x61, + 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, + 0x72, 0x67, 0x73, 0x2e, 0x41, 0x72, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x61, + 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x16, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x77, 0x0a, 0x09, 0x41, 0x72, 0x67, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, + 0x70, 0x63, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, - 0x72, 0x70, 0x63, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, - 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x17, 0x0a, 0x10, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, - 0x42, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, - 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x17, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1512,46 +1560,48 @@ var file_envoy_config_core_v3_grpc_service_proto_goTypes = []interface{}{ (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin)(nil), // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin (*GrpcService_GoogleGrpc_CallCredentials_StsService)(nil), // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService (*GrpcService_GoogleGrpc_ChannelArgs_Value)(nil), // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value - nil, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry - (*duration.Duration)(nil), // 14: google.protobuf.Duration - (*HeaderValue)(nil), // 15: envoy.config.core.v3.HeaderValue - (*RetryPolicy)(nil), // 16: envoy.config.core.v3.RetryPolicy - (*_struct.Struct)(nil), // 17: google.protobuf.Struct - (*wrappers.UInt32Value)(nil), // 18: google.protobuf.UInt32Value - (*DataSource)(nil), // 19: envoy.config.core.v3.DataSource - (*emptypb.Empty)(nil), // 20: google.protobuf.Empty - (*any1.Any)(nil), // 21: google.protobuf.Any + nil, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry + (*durationpb.Duration)(nil), // 14: google.protobuf.Duration + (*HeaderValue)(nil), // 15: envoy.config.core.v3.HeaderValue + (*RetryPolicy)(nil), // 16: envoy.config.core.v3.RetryPolicy + (*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value + (*structpb.Struct)(nil), // 18: google.protobuf.Struct + (*DataSource)(nil), // 19: envoy.config.core.v3.DataSource + (*emptypb.Empty)(nil), // 20: google.protobuf.Empty + (*anypb.Any)(nil), // 21: google.protobuf.Any } var file_envoy_config_core_v3_grpc_service_proto_depIdxs = []int32{ 1, // 0: envoy.config.core.v3.GrpcService.envoy_grpc:type_name -> envoy.config.core.v3.GrpcService.EnvoyGrpc 2, // 1: envoy.config.core.v3.GrpcService.google_grpc:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc 14, // 2: envoy.config.core.v3.GrpcService.timeout:type_name -> google.protobuf.Duration 15, // 3: envoy.config.core.v3.GrpcService.initial_metadata:type_name -> envoy.config.core.v3.HeaderValue - 16, // 4: envoy.config.core.v3.GrpcService.EnvoyGrpc.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy - 5, // 5: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials - 6, // 6: envoy.config.core.v3.GrpcService.GoogleGrpc.call_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials - 17, // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.config:type_name -> google.protobuf.Struct - 18, // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.per_stream_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value - 7, // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs - 19, // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.root_certs:type_name -> envoy.config.core.v3.DataSource - 19, // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.private_key:type_name -> envoy.config.core.v3.DataSource - 19, // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.cert_chain:type_name -> envoy.config.core.v3.DataSource - 3, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.ssl_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials - 20, // 14: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.google_default:type_name -> google.protobuf.Empty - 4, // 15: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.local_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials - 20, // 16: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_compute_engine:type_name -> google.protobuf.Empty - 8, // 17: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.service_account_jwt_access:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials - 9, // 18: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_iam:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials - 10, // 19: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.from_plugin:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin - 11, // 20: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.sts_service:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService - 13, // 21: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry - 21, // 22: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin.typed_config:type_name -> google.protobuf.Any - 12, // 23: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry.value:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value - 24, // [24:24] is the sub-list for method output_type - 24, // [24:24] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name + 16, // 4: envoy.config.core.v3.GrpcService.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy + 16, // 5: envoy.config.core.v3.GrpcService.EnvoyGrpc.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy + 17, // 6: envoy.config.core.v3.GrpcService.EnvoyGrpc.max_receive_message_length:type_name -> google.protobuf.UInt32Value + 5, // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials + 6, // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.call_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials + 18, // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.config:type_name -> google.protobuf.Struct + 17, // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.per_stream_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 7, // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs + 19, // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.root_certs:type_name -> envoy.config.core.v3.DataSource + 19, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.private_key:type_name -> envoy.config.core.v3.DataSource + 19, // 14: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.cert_chain:type_name -> envoy.config.core.v3.DataSource + 3, // 15: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.ssl_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials + 20, // 16: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.google_default:type_name -> google.protobuf.Empty + 4, // 17: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.local_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials + 20, // 18: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_compute_engine:type_name -> google.protobuf.Empty + 8, // 19: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.service_account_jwt_access:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials + 9, // 20: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_iam:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials + 10, // 21: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.from_plugin:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin + 11, // 22: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.sts_service:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService + 13, // 23: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry + 21, // 24: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin.typed_config:type_name -> google.protobuf.Any + 12, // 25: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry.value:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value + 26, // [26:26] is the sub-list for method output_type + 26, // [26:26] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_grpc_service_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go index 8fd25237592..9ef41b0774b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/grpc_service.proto @@ -120,6 +121,35 @@ func (m *GrpcService) validate(all bool) error { } + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcServiceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + oneofTargetSpecifierPresent := false switch v := m.TargetSpecifier.(type) { case *GrpcService_EnvoyGrpc_: @@ -392,6 +422,37 @@ func (m *GrpcService_EnvoyGrpc) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetMaxReceiveMessageLength()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "MaxReceiveMessageLength", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "MaxReceiveMessageLength", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxReceiveMessageLength()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_EnvoyGrpcValidationError{ + field: "MaxReceiveMessageLength", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SkipEnvoyHeaders + if len(errors) > 0 { return GrpcService_EnvoyGrpcMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service_vtproto.pb.go new file mode 100644 index 00000000000..90d07efa4a4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service_vtproto.pb.go @@ -0,0 +1,1648 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/grpc_service.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + emptypb "github.com/planetscale/vtprotobuf/types/known/emptypb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GrpcService_EnvoyGrpc) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_EnvoyGrpc) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_EnvoyGrpc) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipEnvoyHeaders { + i-- + if m.SkipEnvoyHeaders { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.MaxReceiveMessageLength != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxReceiveMessageLength).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterName) > 0 { + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CertChain != nil { + size, err := m.CertChain.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.PrivateKey != nil { + size, err := m.PrivateKey.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RootCerts != nil { + size, err := m.RootCerts.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SslCredentials != nil { + size, err := m.SslCredentials.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleDefault != nil { + size, err := (*emptypb.Empty)(m.GoogleDefault).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LocalCredentials != nil { + size, err := m.LocalCredentials.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TokenLifetimeSeconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TokenLifetimeSeconds)) + i-- + dAtA[i] = 0x10 + } + if len(m.JsonKey) > 0 { + i -= len(m.JsonKey) + copy(dAtA[i:], m.JsonKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.JsonKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AuthoritySelector) > 0 { + i -= len(m.AuthoritySelector) + copy(dAtA[i:], m.AuthoritySelector) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AuthoritySelector))) + i-- + dAtA[i] = 0x12 + } + if len(m.AuthorizationToken) > 0 { + i -= len(m.AuthorizationToken) + copy(dAtA[i:], m.AuthorizationToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AuthorizationToken))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ActorTokenType) > 0 { + i -= len(m.ActorTokenType) + copy(dAtA[i:], m.ActorTokenType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ActorTokenType))) + i-- + dAtA[i] = 0x4a + } + if len(m.ActorTokenPath) > 0 { + i -= len(m.ActorTokenPath) + copy(dAtA[i:], m.ActorTokenPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ActorTokenPath))) + i-- + dAtA[i] = 0x42 + } + if len(m.SubjectTokenType) > 0 { + i -= len(m.SubjectTokenType) + copy(dAtA[i:], m.SubjectTokenType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SubjectTokenType))) + i-- + dAtA[i] = 0x3a + } + if len(m.SubjectTokenPath) > 0 { + i -= len(m.SubjectTokenPath) + copy(dAtA[i:], m.SubjectTokenPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SubjectTokenPath))) + i-- + dAtA[i] = 0x32 + } + if len(m.RequestedTokenType) > 0 { + i -= len(m.RequestedTokenType) + copy(dAtA[i:], m.RequestedTokenType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestedTokenType))) + i-- + dAtA[i] = 0x2a + } + if len(m.Scope) > 0 { + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Audience) > 0 { + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0x1a + } + if len(m.Resource) > 0 { + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + } + if len(m.TokenExchangeServiceUri) > 0 { + i -= len(m.TokenExchangeServiceUri) + copy(dAtA[i:], m.TokenExchangeServiceUri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TokenExchangeServiceUri))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_StsService_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_FromPlugin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_GoogleIam); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_AccessToken); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_AccessToken) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_AccessToken) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.AccessToken) + copy(dAtA[i:], m.AccessToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AccessToken))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleComputeEngine != nil { + size, err := (*emptypb.Empty)(m.GoogleComputeEngine).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.GoogleRefreshToken) + copy(dAtA[i:], m.GoogleRefreshToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.GoogleRefreshToken))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ServiceAccountJwtAccess != nil { + size, err := m.ServiceAccountJwtAccess.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIam) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIam) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleIam != nil { + size, err := m.GoogleIam.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_FromPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_FromPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FromPlugin != nil { + size, err := m.FromPlugin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StsService != nil { + size, err := m.StsService.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ValueSpecifier.(*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ValueSpecifier.(*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelArgs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Args) > 0 { + for k := range m.Args { + v := m.Args[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ChannelArgs != nil { + size, err := m.ChannelArgs.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.PerStreamBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerStreamBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Config != nil { + size, err := (*structpb.Struct)(m.Config).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.CredentialsFactoryName) > 0 { + i -= len(m.CredentialsFactoryName) + copy(dAtA[i:], m.CredentialsFactoryName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CredentialsFactoryName))) + i-- + dAtA[i] = 0x2a + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x22 + } + if len(m.CallCredentials) > 0 { + for iNdEx := len(m.CallCredentials) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CallCredentials[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.ChannelCredentials != nil { + size, err := m.ChannelCredentials.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.TargetUri) > 0 { + i -= len(m.TargetUri) + copy(dAtA[i:], m.TargetUri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TargetUri))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.InitialMetadata) > 0 { + for iNdEx := len(m.InitialMetadata) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InitialMetadata[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.TargetSpecifier.(*GrpcService_GoogleGrpc_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TargetSpecifier.(*GrpcService_EnvoyGrpc_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_EnvoyGrpc_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_EnvoyGrpc_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EnvoyGrpc != nil { + size, err := m.EnvoyGrpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleGrpc != nil { + size, err := m.GoogleGrpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_EnvoyGrpc) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Authority) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxReceiveMessageLength != nil { + l = (*wrapperspb.UInt32Value)(m.MaxReceiveMessageLength).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SkipEnvoyHeaders { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RootCerts != nil { + l = m.RootCerts.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PrivateKey != nil { + l = m.PrivateKey.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CertChain != nil { + l = m.CertChain.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.CredentialSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SslCredentials != nil { + l = m.SslCredentials.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleDefault != nil { + l = (*emptypb.Empty)(m.GoogleDefault).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LocalCredentials != nil { + l = m.LocalCredentials.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.JsonKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TokenLifetimeSeconds != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TokenLifetimeSeconds)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AuthorizationToken) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AuthoritySelector) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TokenExchangeServiceUri) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Resource) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Audience) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Scope) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RequestedTokenType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SubjectTokenPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SubjectTokenType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ActorTokenPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ActorTokenType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.CredentialSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_AccessToken) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AccessToken) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleComputeEngine != nil { + l = (*emptypb.Empty)(m.GoogleComputeEngine).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.GoogleRefreshToken) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ServiceAccountJwtAccess != nil { + l = m.ServiceAccountJwtAccess.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIam) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleIam != nil { + l = m.GoogleIam.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_FromPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FromPlugin != nil { + l = m.FromPlugin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StsService != nil { + l = m.StsService.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ValueSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.IntValue)) + return n +} +func (m *GrpcService_GoogleGrpc_ChannelArgs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Args) > 0 { + for k, v := range m.Args { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetUri) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ChannelCredentials != nil { + l = m.ChannelCredentials.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CallCredentials) > 0 { + for _, e := range m.CallCredentials { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.StatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CredentialsFactoryName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Config != nil { + l = (*structpb.Struct)(m.Config).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerStreamBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerStreamBufferLimitBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ChannelArgs != nil { + l = m.ChannelArgs.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.TargetSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.InitialMetadata) > 0 { + for _, e := range m.InitialMetadata { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_EnvoyGrpc_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnvoyGrpc != nil { + l = m.EnvoyGrpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleGrpc != nil { + l = m.GoogleGrpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go index 34b68b42ddd..96ac5fc632c 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/health_check.proto package corev3 @@ -12,12 +12,12 @@ import ( v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - _struct "github.com/golang/protobuf/ptypes/struct" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -147,7 +147,7 @@ func (x *HealthStatusSet) GetStatuses() []HealthStatus { return nil } -// [#next-free-field: 26] +// [#next-free-field: 27] type HealthCheck struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -155,16 +155,16 @@ type HealthCheck struct { // The time to wait for a health check response. If the timeout is reached the // health check attempt will be considered a failure. - Timeout *duration.Duration `protobuf:"bytes,1,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,1,opt,name=timeout,proto3" json:"timeout,omitempty"` // The interval between health checks. - Interval *duration.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"` + Interval *durationpb.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"` // An optional jitter amount in milliseconds. If specified, Envoy will start health // checking after for a random time in ms between 0 and initial_jitter. This only // applies to the first health check. - InitialJitter *duration.Duration `protobuf:"bytes,20,opt,name=initial_jitter,json=initialJitter,proto3" json:"initial_jitter,omitempty"` + InitialJitter *durationpb.Duration `protobuf:"bytes,20,opt,name=initial_jitter,json=initialJitter,proto3" json:"initial_jitter,omitempty"` // An optional jitter amount in milliseconds. If specified, during every // interval Envoy will add interval_jitter to the wait time. - IntervalJitter *duration.Duration `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"` + IntervalJitter *durationpb.Duration `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"` // An optional jitter amount as a percentage of interval_ms. If specified, // during every interval Envoy will add “interval_ms“ * // “interval_jitter_percent“ / 100 to the wait time. @@ -177,15 +177,15 @@ type HealthCheck struct { // :ref:`expected_statuses ` // or :ref:`retriable_statuses `, // this threshold is ignored and the host is considered immediately unhealthy. - UnhealthyThreshold *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=unhealthy_threshold,json=unhealthyThreshold,proto3" json:"unhealthy_threshold,omitempty"` + UnhealthyThreshold *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=unhealthy_threshold,json=unhealthyThreshold,proto3" json:"unhealthy_threshold,omitempty"` // The number of healthy health checks required before a host is marked // healthy. Note that during startup, only a single successful health check is // required to mark a host healthy. - HealthyThreshold *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=healthy_threshold,json=healthyThreshold,proto3" json:"healthy_threshold,omitempty"` + HealthyThreshold *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=healthy_threshold,json=healthyThreshold,proto3" json:"healthy_threshold,omitempty"` // [#not-implemented-hide:] Non-serving port for health checking. - AltPort *wrappers.UInt32Value `protobuf:"bytes,6,opt,name=alt_port,json=altPort,proto3" json:"alt_port,omitempty"` + AltPort *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=alt_port,json=altPort,proto3" json:"alt_port,omitempty"` // Reuse health check connection between health checks. Default is true. - ReuseConnection *wrappers.BoolValue `protobuf:"bytes,7,opt,name=reuse_connection,json=reuseConnection,proto3" json:"reuse_connection,omitempty"` + ReuseConnection *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=reuse_connection,json=reuseConnection,proto3" json:"reuse_connection,omitempty"` // Types that are assignable to HealthChecker: // // *HealthCheck_HttpHealthCheck_ @@ -201,7 +201,7 @@ type HealthCheck struct { // any other. // // The default value for "no traffic interval" is 60 seconds. - NoTrafficInterval *duration.Duration `protobuf:"bytes,12,opt,name=no_traffic_interval,json=noTrafficInterval,proto3" json:"no_traffic_interval,omitempty"` + NoTrafficInterval *durationpb.Duration `protobuf:"bytes,12,opt,name=no_traffic_interval,json=noTrafficInterval,proto3" json:"no_traffic_interval,omitempty"` // The "no traffic healthy interval" is a special health check interval that // is used for hosts that are currently passing active health checking // (including new hosts) when the cluster has received no traffic. @@ -215,26 +215,26 @@ type HealthCheck struct { // // If no_traffic_healthy_interval is not set, it will default to the // no traffic interval and send that interval regardless of health state. - NoTrafficHealthyInterval *duration.Duration `protobuf:"bytes,24,opt,name=no_traffic_healthy_interval,json=noTrafficHealthyInterval,proto3" json:"no_traffic_healthy_interval,omitempty"` + NoTrafficHealthyInterval *durationpb.Duration `protobuf:"bytes,24,opt,name=no_traffic_healthy_interval,json=noTrafficHealthyInterval,proto3" json:"no_traffic_healthy_interval,omitempty"` // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. // // The default value for "unhealthy interval" is the same as "interval". - UnhealthyInterval *duration.Duration `protobuf:"bytes,14,opt,name=unhealthy_interval,json=unhealthyInterval,proto3" json:"unhealthy_interval,omitempty"` + UnhealthyInterval *durationpb.Duration `protobuf:"bytes,14,opt,name=unhealthy_interval,json=unhealthyInterval,proto3" json:"unhealthy_interval,omitempty"` // The "unhealthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as unhealthy. For subsequent health checks // Envoy will shift back to using either "unhealthy interval" if present or the standard health // check interval that is defined. // // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - UnhealthyEdgeInterval *duration.Duration `protobuf:"bytes,15,opt,name=unhealthy_edge_interval,json=unhealthyEdgeInterval,proto3" json:"unhealthy_edge_interval,omitempty"` + UnhealthyEdgeInterval *durationpb.Duration `protobuf:"bytes,15,opt,name=unhealthy_edge_interval,json=unhealthyEdgeInterval,proto3" json:"unhealthy_edge_interval,omitempty"` // The "healthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as healthy. For subsequent health checks // Envoy will shift back to using the standard health check interval that is defined. // // The default value for "healthy edge interval" is the same as the default interval. - HealthyEdgeInterval *duration.Duration `protobuf:"bytes,16,opt,name=healthy_edge_interval,json=healthyEdgeInterval,proto3" json:"healthy_edge_interval,omitempty"` + HealthyEdgeInterval *durationpb.Duration `protobuf:"bytes,16,opt,name=healthy_edge_interval,json=healthyEdgeInterval,proto3" json:"healthy_edge_interval,omitempty"` // .. attention:: // This field is deprecated in favor of the extension // :ref:`event_logger ` and @@ -256,6 +256,10 @@ type HealthCheck struct { // initial health check failure event will be logged. // The default value is false. AlwaysLogHealthCheckFailures bool `protobuf:"varint,19,opt,name=always_log_health_check_failures,json=alwaysLogHealthCheckFailures,proto3" json:"always_log_health_check_failures,omitempty"` + // If set to true, health check success events will always be logged. If set to false, only host addition event will be logged + // if it is the first successful health check, or if the healthy threshold is reached. + // The default value is false. + AlwaysLogHealthCheckSuccess bool `protobuf:"varint,26,opt,name=always_log_health_check_success,json=alwaysLogHealthCheckSuccess,proto3" json:"always_log_health_check_success,omitempty"` // This allows overriding the cluster TLS settings, just for health check connections. TlsOptions *HealthCheck_TlsOptions `protobuf:"bytes,21,opt,name=tls_options,json=tlsOptions,proto3" json:"tls_options,omitempty"` // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's @@ -288,7 +292,7 @@ type HealthCheck struct { // :ref:`transport socket matches `, // the cluster's :ref:`transport socket ` // will be used for health check socket configuration. - TransportSocketMatchCriteria *_struct.Struct `protobuf:"bytes,23,opt,name=transport_socket_match_criteria,json=transportSocketMatchCriteria,proto3" json:"transport_socket_match_criteria,omitempty"` + TransportSocketMatchCriteria *structpb.Struct `protobuf:"bytes,23,opt,name=transport_socket_match_criteria,json=transportSocketMatchCriteria,proto3" json:"transport_socket_match_criteria,omitempty"` } func (x *HealthCheck) Reset() { @@ -323,28 +327,28 @@ func (*HealthCheck) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1} } -func (x *HealthCheck) GetTimeout() *duration.Duration { +func (x *HealthCheck) GetTimeout() *durationpb.Duration { if x != nil { return x.Timeout } return nil } -func (x *HealthCheck) GetInterval() *duration.Duration { +func (x *HealthCheck) GetInterval() *durationpb.Duration { if x != nil { return x.Interval } return nil } -func (x *HealthCheck) GetInitialJitter() *duration.Duration { +func (x *HealthCheck) GetInitialJitter() *durationpb.Duration { if x != nil { return x.InitialJitter } return nil } -func (x *HealthCheck) GetIntervalJitter() *duration.Duration { +func (x *HealthCheck) GetIntervalJitter() *durationpb.Duration { if x != nil { return x.IntervalJitter } @@ -358,28 +362,28 @@ func (x *HealthCheck) GetIntervalJitterPercent() uint32 { return 0 } -func (x *HealthCheck) GetUnhealthyThreshold() *wrappers.UInt32Value { +func (x *HealthCheck) GetUnhealthyThreshold() *wrapperspb.UInt32Value { if x != nil { return x.UnhealthyThreshold } return nil } -func (x *HealthCheck) GetHealthyThreshold() *wrappers.UInt32Value { +func (x *HealthCheck) GetHealthyThreshold() *wrapperspb.UInt32Value { if x != nil { return x.HealthyThreshold } return nil } -func (x *HealthCheck) GetAltPort() *wrappers.UInt32Value { +func (x *HealthCheck) GetAltPort() *wrapperspb.UInt32Value { if x != nil { return x.AltPort } return nil } -func (x *HealthCheck) GetReuseConnection() *wrappers.BoolValue { +func (x *HealthCheck) GetReuseConnection() *wrapperspb.BoolValue { if x != nil { return x.ReuseConnection } @@ -421,35 +425,35 @@ func (x *HealthCheck) GetCustomHealthCheck() *HealthCheck_CustomHealthCheck { return nil } -func (x *HealthCheck) GetNoTrafficInterval() *duration.Duration { +func (x *HealthCheck) GetNoTrafficInterval() *durationpb.Duration { if x != nil { return x.NoTrafficInterval } return nil } -func (x *HealthCheck) GetNoTrafficHealthyInterval() *duration.Duration { +func (x *HealthCheck) GetNoTrafficHealthyInterval() *durationpb.Duration { if x != nil { return x.NoTrafficHealthyInterval } return nil } -func (x *HealthCheck) GetUnhealthyInterval() *duration.Duration { +func (x *HealthCheck) GetUnhealthyInterval() *durationpb.Duration { if x != nil { return x.UnhealthyInterval } return nil } -func (x *HealthCheck) GetUnhealthyEdgeInterval() *duration.Duration { +func (x *HealthCheck) GetUnhealthyEdgeInterval() *durationpb.Duration { if x != nil { return x.UnhealthyEdgeInterval } return nil } -func (x *HealthCheck) GetHealthyEdgeInterval() *duration.Duration { +func (x *HealthCheck) GetHealthyEdgeInterval() *durationpb.Duration { if x != nil { return x.HealthyEdgeInterval } @@ -485,6 +489,13 @@ func (x *HealthCheck) GetAlwaysLogHealthCheckFailures() bool { return false } +func (x *HealthCheck) GetAlwaysLogHealthCheckSuccess() bool { + if x != nil { + return x.AlwaysLogHealthCheckSuccess + } + return false +} + func (x *HealthCheck) GetTlsOptions() *HealthCheck_TlsOptions { if x != nil { return x.TlsOptions @@ -492,7 +503,7 @@ func (x *HealthCheck) GetTlsOptions() *HealthCheck_TlsOptions { return nil } -func (x *HealthCheck) GetTransportSocketMatchCriteria() *_struct.Struct { +func (x *HealthCheck) GetTransportSocketMatchCriteria() *structpb.Struct { if x != nil { return x.TransportSocketMatchCriteria } @@ -643,7 +654,7 @@ type HealthCheck_HttpHealthCheck struct { Receive []*HealthCheck_Payload `protobuf:"bytes,4,rep,name=receive,proto3" json:"receive,omitempty"` // Specifies the size of response buffer in bytes that is used to Payload match. // The default value is 1024. Setting to 0 implies that the Payload will be matched against the entire response. - ResponseBufferSize *wrappers.UInt64Value `protobuf:"bytes,14,opt,name=response_buffer_size,json=responseBufferSize,proto3" json:"response_buffer_size,omitempty"` + ResponseBufferSize *wrapperspb.UInt64Value `protobuf:"bytes,14,opt,name=response_buffer_size,json=responseBufferSize,proto3" json:"response_buffer_size,omitempty"` // Specifies a list of HTTP headers that should be added to each request that is sent to the // health checked cluster. For more information, including details on header value syntax, see // the documentation on :ref:`custom request headers @@ -742,7 +753,7 @@ func (x *HealthCheck_HttpHealthCheck) GetReceive() []*HealthCheck_Payload { return nil } -func (x *HealthCheck_HttpHealthCheck) GetResponseBufferSize() *wrappers.UInt64Value { +func (x *HealthCheck_HttpHealthCheck) GetResponseBufferSize() *wrapperspb.UInt64Value { if x != nil { return x.ResponseBufferSize } @@ -809,6 +820,12 @@ type HealthCheck_TcpHealthCheck struct { // payload block must be found, and in the order specified, but not // necessarily contiguous. Receive []*HealthCheck_Payload `protobuf:"bytes,2,rep,name=receive,proto3" json:"receive,omitempty"` + // When setting this value, it tries to attempt health check request with ProxyProtocol. + // When “send“ is presented, they are sent after preceding ProxyProtocol header. + // Only ProxyProtocol header is sent when “send“ is not presented. + // It allows to use both ProxyProtocol V1 and V2. In V1, it presents L3/L4. In V2, it includes + // LOCAL command and doesn't include L3/L4. + ProxyProtocolConfig *ProxyProtocolConfig `protobuf:"bytes,3,opt,name=proxy_protocol_config,json=proxyProtocolConfig,proto3" json:"proxy_protocol_config,omitempty"` } func (x *HealthCheck_TcpHealthCheck) Reset() { @@ -857,6 +874,13 @@ func (x *HealthCheck_TcpHealthCheck) GetReceive() []*HealthCheck_Payload { return nil } +func (x *HealthCheck_TcpHealthCheck) GetProxyProtocolConfig() *ProxyProtocolConfig { + if x != nil { + return x.ProxyProtocolConfig + } + return nil +} + type HealthCheck_RedisHealthCheck struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1052,7 +1076,7 @@ func (m *HealthCheck_CustomHealthCheck) GetConfigType() isHealthCheck_CustomHeal return nil } -func (x *HealthCheck_CustomHealthCheck) GetTypedConfig() *any1.Any { +func (x *HealthCheck_CustomHealthCheck) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*HealthCheck_CustomHealthCheck_TypedConfig); ok { return x.TypedConfig } @@ -1064,7 +1088,7 @@ type isHealthCheck_CustomHealthCheck_ConfigType interface { } type HealthCheck_CustomHealthCheck_TypedConfig struct { - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*HealthCheck_CustomHealthCheck_TypedConfig) isHealthCheck_CustomHealthCheck_ConfigType() {} @@ -1138,296 +1162,309 @@ var file_envoy_config_core_v3_health_check_proto_rawDesc = []byte{ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, - 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, - 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, - 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, - 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0f, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, 0x12, 0x4d, 0x0a, 0x08, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x22, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, - 0x01, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x22, 0xed, 0x1e, 0x0a, 0x0b, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x07, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, - 0x01, 0x2a, 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x41, 0x0a, 0x08, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, - 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, - 0x40, 0x0a, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, - 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, 0x12, 0x4d, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d, 0xfa, + 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x22, 0x8c, 0x20, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, - 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, - 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x07, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x41, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, + 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x40, 0x0a, 0x0e, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, + 0x12, 0x36, 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, + 0x72, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x57, 0x0a, 0x13, 0x75, 0x6e, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x12, 0x75, + 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x12, 0x53, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x61, 0x6c, 0x74, 0x5f, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x61, 0x6c, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, + 0x45, 0x0a, 0x10, 0x72, 0x65, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x75, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x11, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5c, 0x0a, 0x10, 0x74, 0x63, 0x70, 0x5f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5f, 0x0a, 0x11, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x65, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x53, 0x0a, + 0x13, 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, - 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, - 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x57, 0x0a, - 0x13, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, - 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x12, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, - 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x61, - 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x61, 0x6c, 0x74, - 0x50, 0x6f, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x10, 0x72, 0x65, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x75, 0x73, - 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x11, 0x68, - 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x68, 0x74, 0x74, - 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5c, 0x0a, 0x10, - 0x74, 0x63, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5f, 0x0a, 0x11, 0x67, 0x72, - 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x65, 0x0a, 0x13, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, - 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x12, 0x53, 0x0a, 0x13, 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, - 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, - 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x6e, 0x6f, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x62, 0x0a, 0x1b, 0x6e, 0x6f, 0x5f, 0x74, 0x72, - 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, - 0x00, 0x52, 0x18, 0x6e, 0x6f, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x12, 0x75, - 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, + 0x11, 0x6e, 0x6f, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x12, 0x62, 0x0a, 0x1b, 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x75, 0x6e, - 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, - 0x5b, 0x0a, 0x17, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x15, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, - 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x57, 0x0a, 0x15, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x18, 0x6e, 0x6f, + 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x12, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x5b, 0x0a, 0x17, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, - 0x52, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, - 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x31, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, - 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, - 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x20, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, - 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x1c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x4d, - 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, 0x0a, - 0x1f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x1c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x80, 0x01, - 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1d, 0x0a, 0x04, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x48, 0x00, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x62, 0x69, 0x6e, 0x61, - 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x62, 0x69, 0x6e, 0x61, - 0x72, 0x79, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x42, 0x0e, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x03, 0xf8, 0x42, 0x01, - 0x1a, 0xcc, 0x07, 0x0a, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1f, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, - 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, - 0x01, 0x02, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, + 0x52, 0x15, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x57, 0x0a, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x13, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x12, 0x31, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x67, 0x65, 0x72, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x67, + 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x46, 0x0a, 0x20, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, + 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x61, 0x6c, 0x77, + 0x61, 0x79, 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1f, 0x61, 0x6c, 0x77, + 0x61, 0x79, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1b, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, + 0x4d, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, + 0x0a, 0x1f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x52, 0x1c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x80, + 0x01, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1d, 0x0a, 0x04, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x48, 0x00, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, - 0x76, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x12, 0x57, 0x0a, 0x14, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, - 0x00, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x75, 0x66, 0x66, 0x65, - 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, - 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, - 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, - 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, - 0xc0, 0x01, 0x01, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x65, - 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x72, 0x65, 0x74, 0x72, - 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x54, 0x0a, - 0x11, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x0f, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x56, 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x06, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x65, 0x6e, + 0x64, 0x42, 0x0e, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x1a, 0xc6, 0x07, 0x0a, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0xc0, 0x01, 0x02, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0xc0, 0x01, 0x02, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, + 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, + 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x12, 0x57, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x00, 0x52, 0x12, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, + 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, + 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, + 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x10, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x48, + 0x0a, 0x12, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x11, 0x63, 0x6f, 0x64, 0x65, + 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x63, + 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x56, + 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x0a, 0xfa, 0x42, 0x07, + 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x06, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x3a, + 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, + 0x08, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x52, + 0x09, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x32, 0x1a, 0xa8, 0x02, 0x0a, 0x0e, 0x54, + 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3d, 0x0a, + 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x06, 0x52, 0x06, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, - 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x09, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x32, 0x1a, - 0xc9, 0x01, 0x0a, 0x0e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, + 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, + 0x65, 0x12, 0x5d, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, - 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, - 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0x5b, 0x0a, 0x10, 0x52, - 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0xf4, 0x01, 0x0a, 0x0f, 0x47, 0x72, 0x70, - 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x29, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, - 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x5d, 0x0a, 0x10, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, - 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, - 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, - 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, - 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, - 0xc0, 0x01, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, - 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x36, 0x9a, - 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x1a, 0x64, 0x0a, 0x0a, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0x5b, 0x0a, 0x10, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, + 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x1a, 0xf4, 0x01, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x09, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, + 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x12, 0x5d, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, + 0xe8, 0x07, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0xc0, 0x01, 0x0a, 0x11, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, - 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, - 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x15, - 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, - 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x2a, 0x60, 0x0a, 0x0c, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, - 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54, - 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, - 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x04, 0x12, - 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x05, 0x42, 0x84, 0x01, - 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, - 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, + 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, + 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x64, 0x0a, 0x0a, + 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, + 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x15, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, + 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x2a, 0x60, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, + 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, + 0x0a, 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, + 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, + 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x05, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1455,19 +1492,20 @@ var file_envoy_config_core_v3_health_check_proto_goTypes = []interface{}{ (*HealthCheck_GrpcHealthCheck)(nil), // 7: envoy.config.core.v3.HealthCheck.GrpcHealthCheck (*HealthCheck_CustomHealthCheck)(nil), // 8: envoy.config.core.v3.HealthCheck.CustomHealthCheck (*HealthCheck_TlsOptions)(nil), // 9: envoy.config.core.v3.HealthCheck.TlsOptions - (*duration.Duration)(nil), // 10: google.protobuf.Duration - (*wrappers.UInt32Value)(nil), // 11: google.protobuf.UInt32Value - (*wrappers.BoolValue)(nil), // 12: google.protobuf.BoolValue + (*durationpb.Duration)(nil), // 10: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 11: google.protobuf.UInt32Value + (*wrapperspb.BoolValue)(nil), // 12: google.protobuf.BoolValue (*TypedExtensionConfig)(nil), // 13: envoy.config.core.v3.TypedExtensionConfig (*EventServiceConfig)(nil), // 14: envoy.config.core.v3.EventServiceConfig - (*_struct.Struct)(nil), // 15: google.protobuf.Struct - (*wrappers.UInt64Value)(nil), // 16: google.protobuf.UInt64Value + (*structpb.Struct)(nil), // 15: google.protobuf.Struct + (*wrapperspb.UInt64Value)(nil), // 16: google.protobuf.UInt64Value (*HeaderValueOption)(nil), // 17: envoy.config.core.v3.HeaderValueOption (*v3.Int64Range)(nil), // 18: envoy.type.v3.Int64Range (v3.CodecClientType)(0), // 19: envoy.type.v3.CodecClientType (*v31.StringMatcher)(nil), // 20: envoy.type.matcher.v3.StringMatcher (RequestMethod)(0), // 21: envoy.config.core.v3.RequestMethod - (*any1.Any)(nil), // 22: google.protobuf.Any + (*ProxyProtocolConfig)(nil), // 22: envoy.config.core.v3.ProxyProtocolConfig + (*anypb.Any)(nil), // 23: google.protobuf.Any } var file_envoy_config_core_v3_health_check_proto_depIdxs = []int32{ 0, // 0: envoy.config.core.v3.HealthStatusSet.statuses:type_name -> envoy.config.core.v3.HealthStatus @@ -1503,13 +1541,14 @@ var file_envoy_config_core_v3_health_check_proto_depIdxs = []int32{ 21, // 30: envoy.config.core.v3.HealthCheck.HttpHealthCheck.method:type_name -> envoy.config.core.v3.RequestMethod 3, // 31: envoy.config.core.v3.HealthCheck.TcpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload 3, // 32: envoy.config.core.v3.HealthCheck.TcpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload - 17, // 33: envoy.config.core.v3.HealthCheck.GrpcHealthCheck.initial_metadata:type_name -> envoy.config.core.v3.HeaderValueOption - 22, // 34: envoy.config.core.v3.HealthCheck.CustomHealthCheck.typed_config:type_name -> google.protobuf.Any - 35, // [35:35] is the sub-list for method output_type - 35, // [35:35] is the sub-list for method input_type - 35, // [35:35] is the sub-list for extension type_name - 35, // [35:35] is the sub-list for extension extendee - 0, // [0:35] is the sub-list for field type_name + 22, // 33: envoy.config.core.v3.HealthCheck.TcpHealthCheck.proxy_protocol_config:type_name -> envoy.config.core.v3.ProxyProtocolConfig + 17, // 34: envoy.config.core.v3.HealthCheck.GrpcHealthCheck.initial_metadata:type_name -> envoy.config.core.v3.HeaderValueOption + 23, // 35: envoy.config.core.v3.HealthCheck.CustomHealthCheck.typed_config:type_name -> google.protobuf.Any + 36, // [36:36] is the sub-list for method output_type + 36, // [36:36] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_health_check_proto_init() } @@ -1520,6 +1559,7 @@ func file_envoy_config_core_v3_health_check_proto_init() { file_envoy_config_core_v3_base_proto_init() file_envoy_config_core_v3_event_service_config_proto_init() file_envoy_config_core_v3_extension_proto_init() + file_envoy_config_core_v3_proxy_protocol_proto_init() if !protoimpl.UnsafeEnabled { file_envoy_config_core_v3_health_check_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HealthStatusSet); i { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go index 837a3e1fa19..707776073fa 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/health_check.proto @@ -674,6 +675,8 @@ func (m *HealthCheck) validate(all bool) error { // no validation rules for AlwaysLogHealthCheckFailures + // no validation rules for AlwaysLogHealthCheckSuccess + if all { switch v := interface{}(m.GetTlsOptions()).(type) { case interface{ ValidateAll() error }: @@ -1173,7 +1176,7 @@ func (m *HealthCheck_HttpHealthCheck) validate(all bool) error { if !_HealthCheck_HttpHealthCheck_Host_Pattern.MatchString(m.GetHost()) { err := HealthCheck_HttpHealthCheckValidationError{ field: "Host", - reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + reason: "value does not match regex pattern \"^[^\\x00-\\b\\n-\\x1f\\x7f]*$\"", } if !all { return err @@ -1195,7 +1198,7 @@ func (m *HealthCheck_HttpHealthCheck) validate(all bool) error { if !_HealthCheck_HttpHealthCheck_Path_Pattern.MatchString(m.GetPath()) { err := HealthCheck_HttpHealthCheckValidationError{ field: "Path", - reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + reason: "value does not match regex pattern \"^[^\\x00-\\b\\n-\\x1f\\x7f]*$\"", } if !all { return err @@ -1553,9 +1556,9 @@ var _ interface { ErrorName() string } = HealthCheck_HttpHealthCheckValidationError{} -var _HealthCheck_HttpHealthCheck_Host_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") +var _HealthCheck_HttpHealthCheck_Host_Pattern = regexp.MustCompile("^[^\x00-\b\n-\x1f\x7f]*$") -var _HealthCheck_HttpHealthCheck_Path_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") +var _HealthCheck_HttpHealthCheck_Path_Pattern = regexp.MustCompile("^[^\x00-\b\n-\x1f\x7f]*$") var _HealthCheck_HttpHealthCheck_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") @@ -1648,6 +1651,35 @@ func (m *HealthCheck_TcpHealthCheck) validate(all bool) error { } + if all { + switch v := interface{}(m.GetProxyProtocolConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProxyProtocolConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_TcpHealthCheckValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return HealthCheck_TcpHealthCheckMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check_vtproto.pb.go new file mode 100644 index 00000000000..892ea5e4ee6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check_vtproto.pb.go @@ -0,0 +1,1384 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/health_check.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HealthStatusSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthStatusSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthStatusSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Statuses) > 0 { + var pksize2 int + for _, num := range m.Statuses { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.Statuses { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_Payload) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_Payload) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_Payload) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Payload.(*HealthCheck_Payload_Binary); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Payload.(*HealthCheck_Payload_Text); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_Payload_Text) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_Payload_Text) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Text) + copy(dAtA[i:], m.Text) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Text))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *HealthCheck_Payload_Binary) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_Payload_Binary) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Binary) + copy(dAtA[i:], m.Binary) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Binary))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HealthCheck_HttpHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_HttpHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_HttpHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ResponseBufferSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.ResponseBufferSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.Method != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Method)) + i-- + dAtA[i] = 0x68 + } + if len(m.RetriableStatuses) > 0 { + for iNdEx := len(m.RetriableStatuses) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RetriableStatuses[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RetriableStatuses[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } + } + if m.ServiceNameMatcher != nil { + if vtmsg, ok := interface{}(m.ServiceNameMatcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ServiceNameMatcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + if m.CodecClientType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CodecClientType)) + i-- + dAtA[i] = 0x50 + } + if len(m.ExpectedStatuses) > 0 { + for iNdEx := len(m.ExpectedStatuses) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ExpectedStatuses[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ExpectedStatuses[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestHeadersToAdd[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Receive) > 0 { + for iNdEx := len(m.Receive) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Receive[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.Send != nil { + size, err := m.Send.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_TcpHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_TcpHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_TcpHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ProxyProtocolConfig != nil { + size, err := m.ProxyProtocolConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Receive) > 0 { + for iNdEx := len(m.Receive) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Receive[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Send != nil { + size, err := m.Send.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_RedisHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_RedisHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_RedisHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_GrpcHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_GrpcHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_GrpcHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.InitialMetadata) > 0 { + for iNdEx := len(m.InitialMetadata) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InitialMetadata[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0x12 + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_CustomHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_CustomHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_CustomHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*HealthCheck_CustomHealthCheck_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_CustomHealthCheck_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_CustomHealthCheck_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_TlsOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_TlsOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_TlsOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AlpnProtocols) > 0 { + for iNdEx := len(m.AlpnProtocols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AlpnProtocols[iNdEx]) + copy(dAtA[i:], m.AlpnProtocols[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AlpnProtocols[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AlwaysLogHealthCheckSuccess { + i-- + if m.AlwaysLogHealthCheckSuccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if len(m.EventLogger) > 0 { + for iNdEx := len(m.EventLogger) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.EventLogger[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.NoTrafficHealthyInterval != nil { + size, err := (*durationpb.Duration)(m.NoTrafficHealthyInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.TransportSocketMatchCriteria != nil { + size, err := (*structpb.Struct)(m.TransportSocketMatchCriteria).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.EventService != nil { + size, err := m.EventService.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.TlsOptions != nil { + size, err := m.TlsOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.InitialJitter != nil { + size, err := (*durationpb.Duration)(m.InitialJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.AlwaysLogHealthCheckFailures { + i-- + if m.AlwaysLogHealthCheckFailures { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.IntervalJitterPercent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntervalJitterPercent)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if len(m.EventLogPath) > 0 { + i -= len(m.EventLogPath) + copy(dAtA[i:], m.EventLogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EventLogPath))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.HealthyEdgeInterval != nil { + size, err := (*durationpb.Duration)(m.HealthyEdgeInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.UnhealthyEdgeInterval != nil { + size, err := (*durationpb.Duration)(m.UnhealthyEdgeInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.UnhealthyInterval != nil { + size, err := (*durationpb.Duration)(m.UnhealthyInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if msg, ok := m.HealthChecker.(*HealthCheck_CustomHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.NoTrafficInterval != nil { + size, err := (*durationpb.Duration)(m.NoTrafficInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if msg, ok := m.HealthChecker.(*HealthCheck_GrpcHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HealthChecker.(*HealthCheck_TcpHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HealthChecker.(*HealthCheck_HttpHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.ReuseConnection != nil { + size, err := (*wrapperspb.BoolValue)(m.ReuseConnection).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AltPort != nil { + size, err := (*wrapperspb.UInt32Value)(m.AltPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.HealthyThreshold != nil { + size, err := (*wrapperspb.UInt32Value)(m.HealthyThreshold).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.UnhealthyThreshold != nil { + size, err := (*wrapperspb.UInt32Value)(m.UnhealthyThreshold).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.IntervalJitter != nil { + size, err := (*durationpb.Duration)(m.IntervalJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Interval != nil { + size, err := (*durationpb.Duration)(m.Interval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_HttpHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_HttpHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpHealthCheck != nil { + size, err := m.HttpHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_TcpHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_TcpHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TcpHealthCheck != nil { + size, err := m.TcpHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_GrpcHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_GrpcHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GrpcHealthCheck != nil { + size, err := m.GrpcHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_CustomHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_CustomHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomHealthCheck != nil { + size, err := m.CustomHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *HealthStatusSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Statuses) > 0 { + l = 0 + for _, e := range m.Statuses { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_Payload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Payload.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_Payload_Text) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Text) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HealthCheck_Payload_Binary) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Binary) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HealthCheck_HttpHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Send != nil { + l = m.Send.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Receive) > 0 { + for _, e := range m.Receive { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ExpectedStatuses) > 0 { + for _, e := range m.ExpectedStatuses { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.CodecClientType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CodecClientType)) + } + if m.ServiceNameMatcher != nil { + if size, ok := interface{}(m.ServiceNameMatcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ServiceNameMatcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetriableStatuses) > 0 { + for _, e := range m.RetriableStatuses { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Method != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Method)) + } + if m.ResponseBufferSize != nil { + l = (*wrapperspb.UInt64Value)(m.ResponseBufferSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_TcpHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Send != nil { + l = m.Send.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Receive) > 0 { + for _, e := range m.Receive { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ProxyProtocolConfig != nil { + l = m.ProxyProtocolConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_RedisHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_GrpcHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Authority) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.InitialMetadata) > 0 { + for _, e := range m.InitialMetadata { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_CustomHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_CustomHealthCheck_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_TlsOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AlpnProtocols) > 0 { + for _, s := range m.AlpnProtocols { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Interval != nil { + l = (*durationpb.Duration)(m.Interval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntervalJitter != nil { + l = (*durationpb.Duration)(m.IntervalJitter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UnhealthyThreshold != nil { + l = (*wrapperspb.UInt32Value)(m.UnhealthyThreshold).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HealthyThreshold != nil { + l = (*wrapperspb.UInt32Value)(m.HealthyThreshold).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AltPort != nil { + l = (*wrapperspb.UInt32Value)(m.AltPort).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReuseConnection != nil { + l = (*wrapperspb.BoolValue)(m.ReuseConnection).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HealthChecker.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.NoTrafficInterval != nil { + l = (*durationpb.Duration)(m.NoTrafficInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UnhealthyInterval != nil { + l = (*durationpb.Duration)(m.UnhealthyInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UnhealthyEdgeInterval != nil { + l = (*durationpb.Duration)(m.UnhealthyEdgeInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HealthyEdgeInterval != nil { + l = (*durationpb.Duration)(m.HealthyEdgeInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.EventLogPath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntervalJitterPercent != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.IntervalJitterPercent)) + } + if m.AlwaysLogHealthCheckFailures { + n += 3 + } + if m.InitialJitter != nil { + l = (*durationpb.Duration)(m.InitialJitter).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsOptions != nil { + l = m.TlsOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EventService != nil { + l = m.EventService.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocketMatchCriteria != nil { + l = (*structpb.Struct)(m.TransportSocketMatchCriteria).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NoTrafficHealthyInterval != nil { + l = (*durationpb.Duration)(m.NoTrafficHealthyInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.EventLogger) > 0 { + for _, e := range m.EventLogger { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AlwaysLogHealthCheckSuccess { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_HttpHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpHealthCheck != nil { + l = m.HttpHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_TcpHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TcpHealthCheck != nil { + l = m.TcpHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_GrpcHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcHealthCheck != nil { + l = m.GrpcHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_CustomHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomHealthCheck != nil { + l = m.CustomHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.go index 0ae3e527b00..ec8d54bb741 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/http_service.proto package corev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.validate.go index 2d9590507dd..f1ce3fed0c6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/http_service.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service_vtproto.pb.go new file mode 100644 index 00000000000..64e0ece6701 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service_vtproto.pb.go @@ -0,0 +1,94 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/http_service.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpService) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestHeadersToAdd[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.HttpUri != nil { + size, err := m.HttpUri.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpUri != nil { + l = m.HttpUri.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go index 39bb22f7ebe..c1ba4357f52 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/http_uri.proto package corev3 @@ -9,9 +9,9 @@ package corev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -47,7 +47,7 @@ type HttpUri struct { // *HttpUri_Cluster HttpUpstreamType isHttpUri_HttpUpstreamType `protobuf_oneof:"http_upstream_type"` // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - Timeout *duration.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` } func (x *HttpUri) Reset() { @@ -103,7 +103,7 @@ func (x *HttpUri) GetCluster() string { return "" } -func (x *HttpUri) GetTimeout() *duration.Duration { +func (x *HttpUri) GetTimeout() *durationpb.Duration { if x != nil { return x.Timeout } @@ -181,8 +181,8 @@ func file_envoy_config_core_v3_http_uri_proto_rawDescGZIP() []byte { var file_envoy_config_core_v3_http_uri_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_core_v3_http_uri_proto_goTypes = []interface{}{ - (*HttpUri)(nil), // 0: envoy.config.core.v3.HttpUri - (*duration.Duration)(nil), // 1: google.protobuf.Duration + (*HttpUri)(nil), // 0: envoy.config.core.v3.HttpUri + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration } var file_envoy_config_core_v3_http_uri_proto_depIdxs = []int32{ 1, // 0: envoy.config.core.v3.HttpUri.timeout:type_name -> google.protobuf.Duration diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go index 2c0a69c4183..bbf40c7ce9a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/http_uri.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri_vtproto.pb.go new file mode 100644 index 00000000000..73cd12f13a1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri_vtproto.pb.go @@ -0,0 +1,123 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/http_uri.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpUri) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpUri) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpUri) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.HttpUpstreamType.(*HttpUri_Cluster); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Uri) > 0 { + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpUri_Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpUri_Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HttpUri) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uri) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HttpUpstreamType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpUri_Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cluster) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go index 6ab658340cd..70be28739ab 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/protocol.proto package corev3 @@ -12,10 +12,10 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -141,13 +141,13 @@ type QuicKeepAliveSettings struct { // // If zero, disable keepalive probing. // If absent, use the QUICHE default interval to probe. - MaxInterval *duration.Duration `protobuf:"bytes,1,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` + MaxInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` // The interval to send the first few keep-alive probing packets to prevent connection from hitting the idle timeout. Subsequent probes will be sent, each one with an interval exponentially longer than previous one, till it reaches :ref:`max_interval `. And the probes afterwards will always use :ref:`max_interval `. // // The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout and smaller than max_interval to take effect. // // If absent or zero, disable keepalive probing for a server connection. For a client connection, if :ref:`max_interval ` is also zero, do not keepalive, otherwise use max_interval or QUICHE default to probe all the time. - InitialInterval *duration.Duration `protobuf:"bytes,2,opt,name=initial_interval,json=initialInterval,proto3" json:"initial_interval,omitempty"` + InitialInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=initial_interval,json=initialInterval,proto3" json:"initial_interval,omitempty"` } func (x *QuicKeepAliveSettings) Reset() { @@ -182,14 +182,14 @@ func (*QuicKeepAliveSettings) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{1} } -func (x *QuicKeepAliveSettings) GetMaxInterval() *duration.Duration { +func (x *QuicKeepAliveSettings) GetMaxInterval() *durationpb.Duration { if x != nil { return x.MaxInterval } return nil } -func (x *QuicKeepAliveSettings) GetInitialInterval() *duration.Duration { +func (x *QuicKeepAliveSettings) GetInitialInterval() *durationpb.Duration { if x != nil { return x.InitialInterval } @@ -197,7 +197,7 @@ func (x *QuicKeepAliveSettings) GetInitialInterval() *duration.Duration { } // QUIC protocol options which apply to both downstream and upstream connections. -// [#next-free-field: 8] +// [#next-free-field: 9] type QuicProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -205,10 +205,10 @@ type QuicProtocolOptions struct { // Maximum number of streams that the client can negotiate per connection. 100 // if not specified. - MaxConcurrentStreams *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"` + MaxConcurrentStreams *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"` // `Initial stream-level flow-control receive window // `_ size. Valid values range from - // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16). + // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 16777216 (16 * 1024 * 1024). // // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead. // QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum. @@ -216,19 +216,19 @@ type QuicProtocolOptions struct { // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the // QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to // stop the flow of data to the stream buffers. - InitialStreamWindowSize *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"` + InitialStreamWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"` // Similar to “initial_stream_window_size“, but for connection-level - // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16). - // window. Currently, this has the same minimum/default as “initial_stream_window_size“. + // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults + // to 25165824 (24 * 1024 * 1024). // // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default // window size now, so it's also the minimum. - InitialConnectionWindowSize *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"` + InitialConnectionWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"` // The number of timeouts that can occur before port migration is triggered for QUIC clients. // This defaults to 4. If set to 0, port migration will not occur on path degrading. // Timeout here refers to QUIC internal path degrading timeout mechanism, such as PTO. // This has no effect on server sessions. - NumTimeoutsToTriggerPortMigration *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=num_timeouts_to_trigger_port_migration,json=numTimeoutsToTriggerPortMigration,proto3" json:"num_timeouts_to_trigger_port_migration,omitempty"` + NumTimeoutsToTriggerPortMigration *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=num_timeouts_to_trigger_port_migration,json=numTimeoutsToTriggerPortMigration,proto3" json:"num_timeouts_to_trigger_port_migration,omitempty"` // Probes the peer at the configured interval to solicit traffic, i.e. ACK or PATH_RESPONSE, from the peer to push back connection idle timeout. // If absent, use the default keepalive behavior of which a client connection sends PINGs every 15s, and a server connection doesn't do anything. ConnectionKeepalive *QuicKeepAliveSettings `protobuf:"bytes,5,opt,name=connection_keepalive,json=connectionKeepalive,proto3" json:"connection_keepalive,omitempty"` @@ -238,6 +238,11 @@ type QuicProtocolOptions struct { // A comma-separated list of strings representing QUIC client connection options defined in // `QUICHE `_ and to be sent by upstream connections. ClientConnectionOptions string `protobuf:"bytes,7,opt,name=client_connection_options,json=clientConnectionOptions,proto3" json:"client_connection_options,omitempty"` + // The duration that a QUIC connection stays idle before it closes itself. If this field is not present, QUICHE + // default 600s will be applied. + // For internal corporate network, a long timeout is often fine. + // But for client facing network, 30s is usually a good choice. + IdleNetworkTimeout *durationpb.Duration `protobuf:"bytes,8,opt,name=idle_network_timeout,json=idleNetworkTimeout,proto3" json:"idle_network_timeout,omitempty"` } func (x *QuicProtocolOptions) Reset() { @@ -272,28 +277,28 @@ func (*QuicProtocolOptions) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{2} } -func (x *QuicProtocolOptions) GetMaxConcurrentStreams() *wrappers.UInt32Value { +func (x *QuicProtocolOptions) GetMaxConcurrentStreams() *wrapperspb.UInt32Value { if x != nil { return x.MaxConcurrentStreams } return nil } -func (x *QuicProtocolOptions) GetInitialStreamWindowSize() *wrappers.UInt32Value { +func (x *QuicProtocolOptions) GetInitialStreamWindowSize() *wrapperspb.UInt32Value { if x != nil { return x.InitialStreamWindowSize } return nil } -func (x *QuicProtocolOptions) GetInitialConnectionWindowSize() *wrappers.UInt32Value { +func (x *QuicProtocolOptions) GetInitialConnectionWindowSize() *wrapperspb.UInt32Value { if x != nil { return x.InitialConnectionWindowSize } return nil } -func (x *QuicProtocolOptions) GetNumTimeoutsToTriggerPortMigration() *wrappers.UInt32Value { +func (x *QuicProtocolOptions) GetNumTimeoutsToTriggerPortMigration() *wrapperspb.UInt32Value { if x != nil { return x.NumTimeoutsToTriggerPortMigration } @@ -321,6 +326,13 @@ func (x *QuicProtocolOptions) GetClientConnectionOptions() string { return "" } +func (x *QuicProtocolOptions) GetIdleNetworkTimeout() *durationpb.Duration { + if x != nil { + return x.IdleNetworkTimeout + } + return nil +} + type UpstreamHttpProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -425,7 +437,7 @@ type AlternateProtocolsCacheOptions struct { // The implementation is approximate and enforced independently on each worker thread, thus // it is possible for the maximum entries in the cache to go slightly above the configured // value depending on timing. This is similar to how other circuit breakers work. - MaxEntries *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` + MaxEntries *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` // Allows configuring a persistent // :ref:`key value store ` to flush // alternate protocols entries to disk. @@ -485,7 +497,7 @@ func (x *AlternateProtocolsCacheOptions) GetName() string { return "" } -func (x *AlternateProtocolsCacheOptions) GetMaxEntries() *wrappers.UInt32Value { +func (x *AlternateProtocolsCacheOptions) GetMaxEntries() *wrapperspb.UInt32Value { if x != nil { return x.MaxEntries } @@ -536,21 +548,20 @@ type HttpProtocolOptions struct { // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" // is configured, this timeout is scaled for downstream connections according to the value for // :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `. - IdleTimeout *duration.Duration `protobuf:"bytes,1,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` + IdleTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` // The maximum duration of a connection. The duration is defined as a period since a connection - // was established. If not set, there is no max duration. When max_connection_duration is reached - // and if there are no active streams, the connection will be closed. If the connection is a - // downstream connection and there are any active streams, the drain sequence will kick-in, - // and the connection will be force-closed after the drain period. See :ref:`drain_timeout + // was established. If not set, there is no max duration. When max_connection_duration is reached, + // the drain sequence will kick-in. The connection will be closed after the drain timeout period + // if there are no active streams. See :ref:`drain_timeout // `. - MaxConnectionDuration *duration.Duration `protobuf:"bytes,3,opt,name=max_connection_duration,json=maxConnectionDuration,proto3" json:"max_connection_duration,omitempty"` + MaxConnectionDuration *durationpb.Duration `protobuf:"bytes,3,opt,name=max_connection_duration,json=maxConnectionDuration,proto3" json:"max_connection_duration,omitempty"` // The maximum number of headers. If unconfigured, the default // maximum number of request headers allowed is 100. Requests that exceed this limit will receive // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - MaxHeadersCount *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=max_headers_count,json=maxHeadersCount,proto3" json:"max_headers_count,omitempty"` + MaxHeadersCount *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_headers_count,json=maxHeadersCount,proto3" json:"max_headers_count,omitempty"` // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - MaxStreamDuration *duration.Duration `protobuf:"bytes,4,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` + MaxStreamDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` // Action to take when a client request with a header name containing underscore characters is received. // If this setting is not specified, the value defaults to ALLOW. // Note: upstream responses are not affected by this setting. @@ -561,7 +572,7 @@ type HttpProtocolOptions struct { // If not specified, there is no limit. // Setting this parameter to 1 will effectively disable keep alive. // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. - MaxRequestsPerConnection *wrappers.UInt32Value `protobuf:"bytes,6,opt,name=max_requests_per_connection,json=maxRequestsPerConnection,proto3" json:"max_requests_per_connection,omitempty"` + MaxRequestsPerConnection *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=max_requests_per_connection,json=maxRequestsPerConnection,proto3" json:"max_requests_per_connection,omitempty"` } func (x *HttpProtocolOptions) Reset() { @@ -596,28 +607,28 @@ func (*HttpProtocolOptions) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{5} } -func (x *HttpProtocolOptions) GetIdleTimeout() *duration.Duration { +func (x *HttpProtocolOptions) GetIdleTimeout() *durationpb.Duration { if x != nil { return x.IdleTimeout } return nil } -func (x *HttpProtocolOptions) GetMaxConnectionDuration() *duration.Duration { +func (x *HttpProtocolOptions) GetMaxConnectionDuration() *durationpb.Duration { if x != nil { return x.MaxConnectionDuration } return nil } -func (x *HttpProtocolOptions) GetMaxHeadersCount() *wrappers.UInt32Value { +func (x *HttpProtocolOptions) GetMaxHeadersCount() *wrapperspb.UInt32Value { if x != nil { return x.MaxHeadersCount } return nil } -func (x *HttpProtocolOptions) GetMaxStreamDuration() *duration.Duration { +func (x *HttpProtocolOptions) GetMaxStreamDuration() *durationpb.Duration { if x != nil { return x.MaxStreamDuration } @@ -631,7 +642,7 @@ func (x *HttpProtocolOptions) GetHeadersWithUnderscoresAction() HttpProtocolOpti return HttpProtocolOptions_ALLOW } -func (x *HttpProtocolOptions) GetMaxRequestsPerConnection() *wrappers.UInt32Value { +func (x *HttpProtocolOptions) GetMaxRequestsPerConnection() *wrapperspb.UInt32Value { if x != nil { return x.MaxRequestsPerConnection } @@ -648,7 +659,7 @@ type Http1ProtocolOptions struct { // are generally sent by clients to forward/explicit proxies. This allows clients to configure // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the // “http_proxy“ environment variable. - AllowAbsoluteUrl *wrappers.BoolValue `protobuf:"bytes,1,opt,name=allow_absolute_url,json=allowAbsoluteUrl,proto3" json:"allow_absolute_url,omitempty"` + AllowAbsoluteUrl *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=allow_absolute_url,json=allowAbsoluteUrl,proto3" json:"allow_absolute_url,omitempty"` // Handle incoming HTTP/1.0 and HTTP 0.9 requests. // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 // style connect logic, dechunking, and handling lack of client host iff @@ -691,7 +702,7 @@ type Http1ProtocolOptions struct { // open where possible. // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging // `. - OverrideStreamErrorOnInvalidHttpMessage *wrappers.BoolValue `protobuf:"bytes,7,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` + OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` // Allows sending fully qualified URLs when proxying the first line of the // response. By default, Envoy will only send the path components in the first line. // If this is true, Envoy will create a fully qualified URI composing scheme @@ -703,7 +714,7 @@ type Http1ProtocolOptions struct { // If unset, HTTP/1 parser is selected based on // envoy.reloadable_features.http1_use_balsa_parser. // See issue #21245. - UseBalsaParser *wrappers.BoolValue `protobuf:"bytes,9,opt,name=use_balsa_parser,json=useBalsaParser,proto3" json:"use_balsa_parser,omitempty"` + UseBalsaParser *wrapperspb.BoolValue `protobuf:"bytes,9,opt,name=use_balsa_parser,json=useBalsaParser,proto3" json:"use_balsa_parser,omitempty"` // [#not-implemented-hide:] Hiding so that field can be removed. // If true, and BalsaParser is used (either `use_balsa_parser` above is true, // or `envoy.reloadable_features.http1_use_balsa_parser` is true and @@ -750,7 +761,7 @@ func (*Http1ProtocolOptions) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{6} } -func (x *Http1ProtocolOptions) GetAllowAbsoluteUrl() *wrappers.BoolValue { +func (x *Http1ProtocolOptions) GetAllowAbsoluteUrl() *wrapperspb.BoolValue { if x != nil { return x.AllowAbsoluteUrl } @@ -792,7 +803,7 @@ func (x *Http1ProtocolOptions) GetAllowChunkedLength() bool { return false } -func (x *Http1ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrappers.BoolValue { +func (x *Http1ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { if x != nil { return x.OverrideStreamErrorOnInvalidHttpMessage } @@ -806,7 +817,7 @@ func (x *Http1ProtocolOptions) GetSendFullyQualifiedUrl() bool { return false } -func (x *Http1ProtocolOptions) GetUseBalsaParser() *wrappers.BoolValue { +func (x *Http1ProtocolOptions) GetUseBalsaParser() *wrapperspb.BoolValue { if x != nil { return x.UseBalsaParser } @@ -827,12 +838,12 @@ type KeepaliveSettings struct { // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. // If this is zero, interval PINGs will not be sent. - Interval *duration.Duration `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` + Interval *durationpb.Duration `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` // How long to wait for a response to a keepalive PING. If a response is not received within this // time period, the connection will be aborted. Note that in order to prevent the influence of // Head-of-line (HOL) blocking the timeout period is extended when *any* frame is received on // the connection, under the assumption that if a frame is received the connection is healthy. - Timeout *duration.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` // A random jitter amount as a percentage of interval that will be added to each interval. // A value of zero means there will be no jitter. // The default value is 15%. @@ -844,7 +855,7 @@ type KeepaliveSettings struct { // interval ping will determine if the connection is dead. // // The same feature for HTTP/3 is given by inheritance from QUICHE which uses :ref:`connection idle_timeout ` and the current PTO of the connection to decide whether to probe before sending a new request. - ConnectionIdleInterval *duration.Duration `protobuf:"bytes,4,opt,name=connection_idle_interval,json=connectionIdleInterval,proto3" json:"connection_idle_interval,omitempty"` + ConnectionIdleInterval *durationpb.Duration `protobuf:"bytes,4,opt,name=connection_idle_interval,json=connectionIdleInterval,proto3" json:"connection_idle_interval,omitempty"` } func (x *KeepaliveSettings) Reset() { @@ -879,14 +890,14 @@ func (*KeepaliveSettings) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{7} } -func (x *KeepaliveSettings) GetInterval() *duration.Duration { +func (x *KeepaliveSettings) GetInterval() *durationpb.Duration { if x != nil { return x.Interval } return nil } -func (x *KeepaliveSettings) GetTimeout() *duration.Duration { +func (x *KeepaliveSettings) GetTimeout() *durationpb.Duration { if x != nil { return x.Timeout } @@ -900,7 +911,7 @@ func (x *KeepaliveSettings) GetIntervalJitter() *v3.Percent { return nil } -func (x *KeepaliveSettings) GetConnectionIdleInterval() *duration.Duration { +func (x *KeepaliveSettings) GetConnectionIdleInterval() *durationpb.Duration { if x != nil { return x.ConnectionIdleInterval } @@ -917,7 +928,7 @@ type Http2ProtocolOptions struct { // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header // compression. - HpackTableSize *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=hpack_table_size,json=hpackTableSize,proto3" json:"hpack_table_size,omitempty"` + HpackTableSize *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=hpack_table_size,json=hpackTableSize,proto3" json:"hpack_table_size,omitempty"` // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. @@ -929,7 +940,7 @@ type Http2ProtocolOptions struct { // This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given // connection based on upstream settings. Config dumps will reflect the configured upper bound, // not the per-connection negotiated limits. - MaxConcurrentStreams *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"` + MaxConcurrentStreams *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"` // `Initial stream-level flow-control window // `_ size. Valid values range from 65535 // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 @@ -941,16 +952,16 @@ type Http2ProtocolOptions struct { // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to // stop the flow of data to the codec buffers. - InitialStreamWindowSize *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"` + InitialStreamWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"` // Similar to “initial_stream_window_size“, but for connection-level flow-control // window. Currently, this has the same minimum/maximum/default as “initial_stream_window_size“. - InitialConnectionWindowSize *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"` + InitialConnectionWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"` // Allows proxying Websocket and other upgrades over H2 connect. AllowConnect bool `protobuf:"varint,5,opt,name=allow_connect,json=allowConnect,proto3" json:"allow_connect,omitempty"` - // [#not-implemented-hide:] Hiding until envoy has full metadata support. + // [#not-implemented-hide:] Hiding until Envoy has full metadata support. // Still under implementation. DO NOT USE. // - // Allows metadata. See [metadata + // Allows sending and receiving HTTP/2 METADATA frames. See [metadata // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more // information. AllowMetadata bool `protobuf:"varint,6,opt,name=allow_metadata,json=allowMetadata,proto3" json:"allow_metadata,omitempty"` @@ -958,20 +969,20 @@ type Http2ProtocolOptions struct { // be written into the socket). Exceeding this limit triggers flood mitigation and connection is // terminated. The “http2.outbound_flood“ stat tracks the number of terminated connections due // to flood mitigation. The default limit is 10000. - MaxOutboundFrames *wrappers.UInt32Value `protobuf:"bytes,7,opt,name=max_outbound_frames,json=maxOutboundFrames,proto3" json:"max_outbound_frames,omitempty"` + MaxOutboundFrames *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_outbound_frames,json=maxOutboundFrames,proto3" json:"max_outbound_frames,omitempty"` // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, // preventing high memory utilization when receiving continuous stream of these frames. Exceeding // this limit triggers flood mitigation and connection is terminated. The // “http2.outbound_control_flood“ stat tracks the number of terminated connections due to flood // mitigation. The default limit is 1000. - MaxOutboundControlFrames *wrappers.UInt32Value `protobuf:"bytes,8,opt,name=max_outbound_control_frames,json=maxOutboundControlFrames,proto3" json:"max_outbound_control_frames,omitempty"` + MaxOutboundControlFrames *wrapperspb.UInt32Value `protobuf:"bytes,8,opt,name=max_outbound_control_frames,json=maxOutboundControlFrames,proto3" json:"max_outbound_control_frames,omitempty"` // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood“ // stat tracks the number of connections terminated due to flood mitigation. // Setting this to 0 will terminate connection upon receiving first frame with an empty payload // and no end stream flag. The default limit is 1. - MaxConsecutiveInboundFramesWithEmptyPayload *wrappers.UInt32Value `protobuf:"bytes,9,opt,name=max_consecutive_inbound_frames_with_empty_payload,json=maxConsecutiveInboundFramesWithEmptyPayload,proto3" json:"max_consecutive_inbound_frames_with_empty_payload,omitempty"` + MaxConsecutiveInboundFramesWithEmptyPayload *wrapperspb.UInt32Value `protobuf:"bytes,9,opt,name=max_consecutive_inbound_frames_with_empty_payload,json=maxConsecutiveInboundFramesWithEmptyPayload,proto3" json:"max_consecutive_inbound_frames_with_empty_payload,omitempty"` // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number // of PRIORITY frames received over the lifetime of connection exceeds the value calculated // using this formula:: @@ -983,7 +994,7 @@ type Http2ProtocolOptions struct { // “opened_streams“ is incremented when Envoy send the HEADERS frame for a new stream. The // “http2.inbound_priority_frames_flood“ stat tracks // the number of connections terminated due to flood mitigation. The default limit is 100. - MaxInboundPriorityFramesPerStream *wrappers.UInt32Value `protobuf:"bytes,10,opt,name=max_inbound_priority_frames_per_stream,json=maxInboundPriorityFramesPerStream,proto3" json:"max_inbound_priority_frames_per_stream,omitempty"` + MaxInboundPriorityFramesPerStream *wrapperspb.UInt32Value `protobuf:"bytes,10,opt,name=max_inbound_priority_frames_per_stream,json=maxInboundPriorityFramesPerStream,proto3" json:"max_inbound_priority_frames_per_stream,omitempty"` // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated // using this formula:: @@ -998,7 +1009,7 @@ type Http2ProtocolOptions struct { // flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10. // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, // but more complex implementations that try to estimate available bandwidth require at least 2. - MaxInboundWindowUpdateFramesPerDataFrameSent *wrappers.UInt32Value `protobuf:"bytes,11,opt,name=max_inbound_window_update_frames_per_data_frame_sent,json=maxInboundWindowUpdateFramesPerDataFrameSent,proto3" json:"max_inbound_window_update_frames_per_data_frame_sent,omitempty"` + MaxInboundWindowUpdateFramesPerDataFrameSent *wrapperspb.UInt32Value `protobuf:"bytes,11,opt,name=max_inbound_window_update_frames_per_data_frame_sent,json=maxInboundWindowUpdateFramesPerDataFrameSent,proto3" json:"max_inbound_window_update_frames_per_data_frame_sent,omitempty"` // Allows invalid HTTP messaging and headers. When this option is disabled (default), then // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. @@ -1022,7 +1033,7 @@ type Http2ProtocolOptions struct { // ` // // See `RFC7540, sec. 8.1 `_ for details. - OverrideStreamErrorOnInvalidHttpMessage *wrappers.BoolValue `protobuf:"bytes,14,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` + OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,14,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: // @@ -1056,7 +1067,7 @@ type Http2ProtocolOptions struct { // [#not-implemented-hide:] Hiding so that the field can be removed after oghttp2 is rolled out. // If set, force use of a particular HTTP/2 codec: oghttp2 if true, nghttp2 if false. // If unset, HTTP/2 codec is selected based on envoy.reloadable_features.http2_use_oghttp2. - UseOghttp2Codec *wrappers.BoolValue `protobuf:"bytes,16,opt,name=use_oghttp2_codec,json=useOghttp2Codec,proto3" json:"use_oghttp2_codec,omitempty"` + UseOghttp2Codec *wrapperspb.BoolValue `protobuf:"bytes,16,opt,name=use_oghttp2_codec,json=useOghttp2Codec,proto3" json:"use_oghttp2_codec,omitempty"` } func (x *Http2ProtocolOptions) Reset() { @@ -1091,28 +1102,28 @@ func (*Http2ProtocolOptions) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{8} } -func (x *Http2ProtocolOptions) GetHpackTableSize() *wrappers.UInt32Value { +func (x *Http2ProtocolOptions) GetHpackTableSize() *wrapperspb.UInt32Value { if x != nil { return x.HpackTableSize } return nil } -func (x *Http2ProtocolOptions) GetMaxConcurrentStreams() *wrappers.UInt32Value { +func (x *Http2ProtocolOptions) GetMaxConcurrentStreams() *wrapperspb.UInt32Value { if x != nil { return x.MaxConcurrentStreams } return nil } -func (x *Http2ProtocolOptions) GetInitialStreamWindowSize() *wrappers.UInt32Value { +func (x *Http2ProtocolOptions) GetInitialStreamWindowSize() *wrapperspb.UInt32Value { if x != nil { return x.InitialStreamWindowSize } return nil } -func (x *Http2ProtocolOptions) GetInitialConnectionWindowSize() *wrappers.UInt32Value { +func (x *Http2ProtocolOptions) GetInitialConnectionWindowSize() *wrapperspb.UInt32Value { if x != nil { return x.InitialConnectionWindowSize } @@ -1133,35 +1144,35 @@ func (x *Http2ProtocolOptions) GetAllowMetadata() bool { return false } -func (x *Http2ProtocolOptions) GetMaxOutboundFrames() *wrappers.UInt32Value { +func (x *Http2ProtocolOptions) GetMaxOutboundFrames() *wrapperspb.UInt32Value { if x != nil { return x.MaxOutboundFrames } return nil } -func (x *Http2ProtocolOptions) GetMaxOutboundControlFrames() *wrappers.UInt32Value { +func (x *Http2ProtocolOptions) GetMaxOutboundControlFrames() *wrapperspb.UInt32Value { if x != nil { return x.MaxOutboundControlFrames } return nil } -func (x *Http2ProtocolOptions) GetMaxConsecutiveInboundFramesWithEmptyPayload() *wrappers.UInt32Value { +func (x *Http2ProtocolOptions) GetMaxConsecutiveInboundFramesWithEmptyPayload() *wrapperspb.UInt32Value { if x != nil { return x.MaxConsecutiveInboundFramesWithEmptyPayload } return nil } -func (x *Http2ProtocolOptions) GetMaxInboundPriorityFramesPerStream() *wrappers.UInt32Value { +func (x *Http2ProtocolOptions) GetMaxInboundPriorityFramesPerStream() *wrapperspb.UInt32Value { if x != nil { return x.MaxInboundPriorityFramesPerStream } return nil } -func (x *Http2ProtocolOptions) GetMaxInboundWindowUpdateFramesPerDataFrameSent() *wrappers.UInt32Value { +func (x *Http2ProtocolOptions) GetMaxInboundWindowUpdateFramesPerDataFrameSent() *wrapperspb.UInt32Value { if x != nil { return x.MaxInboundWindowUpdateFramesPerDataFrameSent } @@ -1176,7 +1187,7 @@ func (x *Http2ProtocolOptions) GetStreamErrorOnInvalidHttpMessaging() bool { return false } -func (x *Http2ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrappers.BoolValue { +func (x *Http2ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { if x != nil { return x.OverrideStreamErrorOnInvalidHttpMessage } @@ -1197,7 +1208,7 @@ func (x *Http2ProtocolOptions) GetConnectionKeepalive() *KeepaliveSettings { return nil } -func (x *Http2ProtocolOptions) GetUseOghttp2Codec() *wrappers.BoolValue { +func (x *Http2ProtocolOptions) GetUseOghttp2Codec() *wrapperspb.BoolValue { if x != nil { return x.UseOghttp2Codec } @@ -1253,7 +1264,7 @@ func (x *GrpcProtocolOptions) GetHttp2ProtocolOptions() *Http2ProtocolOptions { } // A message which allows using HTTP/3. -// [#next-free-field: 6] +// [#next-free-field: 7] type Http3ProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1266,7 +1277,7 @@ type Http3ProtocolOptions struct { // // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging // `. - OverrideStreamErrorOnInvalidHttpMessage *wrappers.BoolValue `protobuf:"bytes,2,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` + OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` // Allows proxying Websocket and other upgrades over HTTP/3 CONNECT using // the header mechanisms from the `HTTP/2 extended connect RFC // `_ @@ -1274,6 +1285,13 @@ type Http3ProtocolOptions struct { // `_ // Note that HTTP/3 CONNECT is not yet an RFC. AllowExtendedConnect bool `protobuf:"varint,5,opt,name=allow_extended_connect,json=allowExtendedConnect,proto3" json:"allow_extended_connect,omitempty"` + // [#not-implemented-hide:] Hiding until Envoy has full metadata support. + // Still under implementation. DO NOT USE. + // + // Allows sending and receiving HTTP/3 METADATA frames. See [metadata + // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more + // information. + AllowMetadata bool `protobuf:"varint,6,opt,name=allow_metadata,json=allowMetadata,proto3" json:"allow_metadata,omitempty"` } func (x *Http3ProtocolOptions) Reset() { @@ -1315,7 +1333,7 @@ func (x *Http3ProtocolOptions) GetQuicProtocolOptions() *QuicProtocolOptions { return nil } -func (x *Http3ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrappers.BoolValue { +func (x *Http3ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { if x != nil { return x.OverrideStreamErrorOnInvalidHttpMessage } @@ -1329,6 +1347,13 @@ func (x *Http3ProtocolOptions) GetAllowExtendedConnect() bool { return false } +func (x *Http3ProtocolOptions) GetAllowMetadata() bool { + if x != nil { + return x.AllowMetadata + } + return false +} + // A message to control transformations to the :scheme header type SchemeHeaderTransformation struct { state protoimpl.MessageState @@ -1339,6 +1364,11 @@ type SchemeHeaderTransformation struct { // // *SchemeHeaderTransformation_SchemeToOverwrite Transformation isSchemeHeaderTransformation_Transformation `protobuf_oneof:"transformation"` + // Set the Scheme header to match the upstream transport protocol. For example, should a + // request be sent to the upstream over TLS, the scheme header will be set to "https". Should the + // request be sent over plaintext, the scheme header will be set to "http". + // If scheme_to_overwrite is set, this field is not used. + MatchUpstream bool `protobuf:"varint,2,opt,name=match_upstream,json=matchUpstream,proto3" json:"match_upstream,omitempty"` } func (x *SchemeHeaderTransformation) Reset() { @@ -1387,12 +1417,20 @@ func (x *SchemeHeaderTransformation) GetSchemeToOverwrite() string { return "" } +func (x *SchemeHeaderTransformation) GetMatchUpstream() bool { + if x != nil { + return x.MatchUpstream + } + return false +} + type isSchemeHeaderTransformation_Transformation interface { isSchemeHeaderTransformation_Transformation() } type SchemeHeaderTransformation_SchemeToOverwrite struct { // Overwrite any Scheme header with the contents of this string. + // If set, takes precedence over match_upstream. SchemeToOverwrite string `protobuf:"bytes,1,opt,name=scheme_to_overwrite,json=schemeToOverwrite,proto3,oneof"` } @@ -1602,9 +1640,9 @@ type Http2ProtocolOptions_SettingsParameter struct { unknownFields protoimpl.UnknownFields // The 16 bit parameter identifier. - Identifier *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + Identifier *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` // The 32 bit parameter value. - Value *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } func (x *Http2ProtocolOptions_SettingsParameter) Reset() { @@ -1639,14 +1677,14 @@ func (*Http2ProtocolOptions_SettingsParameter) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{8, 0} } -func (x *Http2ProtocolOptions_SettingsParameter) GetIdentifier() *wrappers.UInt32Value { +func (x *Http2ProtocolOptions_SettingsParameter) GetIdentifier() *wrapperspb.UInt32Value { if x != nil { return x.Identifier } return nil } -func (x *Http2ProtocolOptions_SettingsParameter) GetValue() *wrappers.UInt32Value { +func (x *Http2ProtocolOptions_SettingsParameter) GetValue() *wrapperspb.UInt32Value { if x != nil { return x.Value } @@ -1693,7 +1731,7 @@ var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x22, 0x00, 0x32, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x93, 0x05, 0x0a, + 0x74, 0x69, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xf1, 0x05, 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5b, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x01, @@ -1735,353 +1773,364 @@ var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0xe4, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, - 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x12, 0x2e, 0x0a, - 0x13, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x61, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, - 0x53, 0x61, 0x6e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, - 0x18, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, - 0x6e, 0x69, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xd0, 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x6f, 0x76, - 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x70, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x04, 0x0a, 0x1e, 0x41, 0x6c, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, - 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x61, 0x78, - 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x2a, 0x02, 0x20, 0x00, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, - 0x73, 0x12, 0x5f, 0x0a, 0x16, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6b, - 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x84, 0x01, 0x0a, 0x14, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, - 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, - 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, - 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x1a, 0x68, 0x0a, 0x1c, 0x41, 0x6c, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, - 0x06, 0xd0, 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, - 0x0b, 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x10, 0xff, 0xff, 0x03, 0x20, 0x00, 0x52, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x22, 0xaf, 0x05, 0x0a, 0x13, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, - 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x6e, 0x73, 0x12, 0x5c, 0x0a, 0x14, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x51, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x11, 0x6d, - 0x61, 0x78, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x6d, - 0x61, 0x78, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, - 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d, 0x01, 0x0a, 0x1f, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x75, 0x6e, 0x64, 0x65, 0x72, - 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, - 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, - 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x1b, 0x6d, 0x61, 0x78, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0xfa, 0x42, 0x0c, + 0xaa, 0x01, 0x09, 0x22, 0x03, 0x08, 0xd8, 0x04, 0x32, 0x02, 0x08, 0x01, 0x52, 0x12, 0x69, 0x64, + 0x6c, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0xe4, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, + 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x12, 0x2e, 0x0a, 0x13, 0x61, + 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x61, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x61, + 0x6e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x18, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x72, 0x06, 0xd0, 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x6f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x04, 0x0a, 0x1e, 0x41, 0x6c, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x65, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, + 0x02, 0x20, 0x00, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, + 0x5f, 0x0a, 0x16, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x84, 0x01, 0x0a, 0x14, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x13, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, + 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x1a, 0x68, 0x0a, 0x1c, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xd0, + 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x2a, 0x06, 0x10, 0xff, 0xff, 0x03, 0x20, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x22, 0xaf, 0x05, 0x0a, 0x13, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x18, 0x6d, 0x61, - 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x1c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, - 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x45, - 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x09, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x0a, - 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x5f, - 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x62, 0x73, 0x6f, - 0x6c, 0x75, 0x74, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0c, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x36, 0x0a, - 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x66, 0x6f, - 0x72, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x46, 0x6f, 0x72, 0x48, - 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x66, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, - 0x6b, 0x65, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x51, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x11, 0x6d, 0x61, 0x78, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x6d, 0x61, 0x78, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d, 0x01, 0x0a, 0x1f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, + 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0f, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x27, 0x0a, - 0x0f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x72, - 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, - 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, - 0x65, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, - 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, - 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x66, 0x75, 0x6c, - 0x6c, 0x79, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x75, 0x72, 0x6c, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6e, 0x64, 0x46, 0x75, 0x6c, 0x6c, - 0x79, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x4e, 0x0a, - 0x10, 0x75, 0x73, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x73, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x73, 0x65, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, + 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, + 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x1c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, + 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, + 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, + 0x54, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x93, 0x09, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0e, 0x75, - 0x73, 0x65, 0x42, 0x61, 0x6c, 0x73, 0x61, 0x50, 0x61, 0x72, 0x73, 0x65, 0x72, 0x12, 0x3a, 0x0a, - 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, - 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x1a, 0x9f, 0x03, 0x0a, 0x0f, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x78, 0x0a, - 0x11, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x77, 0x6f, 0x72, - 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x62, 0x73, 0x6f, 0x6c, 0x75, + 0x74, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x36, 0x0a, 0x18, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x46, 0x6f, 0x72, 0x48, 0x74, 0x74, + 0x70, 0x31, 0x30, 0x12, 0x66, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x68, + 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, + 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x79, + 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6e, 0x64, 0x46, 0x75, 0x6c, 0x6c, 0x79, 0x51, + 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x4e, 0x0a, 0x10, 0x75, + 0x73, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x73, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x73, 0x65, 0x72, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, + 0x42, 0x61, 0x6c, 0x73, 0x61, 0x50, 0x61, 0x72, 0x73, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x14, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, + 0x02, 0x08, 0x01, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x1a, 0x9f, 0x03, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x78, 0x0a, 0x11, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, + 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, + 0x64, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, + 0x57, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x5b, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, + 0x6c, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, + 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, + 0x57, 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x4d, 0x9a, 0xc5, 0x88, 0x1e, 0x48, 0x0a, 0x46, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, - 0x6f, 0x72, 0x64, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, - 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x5b, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x66, 0x75, 0x6c, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x74, 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, - 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x4d, 0x9a, 0xc5, 0x88, 0x1e, 0x48, 0x0a, 0x46, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, - 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x14, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, - 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc1, 0x02, 0x0a, 0x11, 0x4b, - 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, - 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x08, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, - 0x3d, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x0f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x18, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, - 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x16, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x64, 0x6c, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xd0, - 0x0e, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x10, 0x68, 0x70, 0x61, 0x63, 0x6b, - 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0e, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x61, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x14, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc1, 0x02, 0x0a, 0x11, 0x4b, 0x65, 0x65, + 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, + 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, + 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, + 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x18, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, + 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x16, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x6c, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xd0, 0x0e, 0x0a, + 0x14, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x10, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0d, 0xfa, - 0x42, 0x0a, 0x2a, 0x08, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, - 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x73, 0x12, 0x6a, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, - 0x07, 0x28, 0xff, 0xff, 0x03, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x72, - 0x0a, 0x1e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, - 0x07, 0x28, 0xff, 0xff, 0x03, 0x52, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x55, - 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, - 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, - 0x28, 0x01, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, - 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, - 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x66, 0x72, - 0x61, 0x6d, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, - 0x01, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x84, 0x01, 0x0a, 0x31, - 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x77, - 0x69, 0x74, 0x68, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x2b, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, - 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x12, 0x6f, 0x0a, 0x26, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, - 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, - 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x21, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x79, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x12, 0x91, 0x01, 0x0a, 0x34, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, - 0x75, 0x6e, 0x64, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x2c, 0x6d, 0x61, 0x78, 0x49, 0x6e, - 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x5e, 0x0a, 0x26, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, - 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, - 0x2e, 0x30, 0x18, 0x01, 0x52, 0x21, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, - 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, - 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, - 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x68, + 0x70, 0x61, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x61, 0x0a, + 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x2a, 0x08, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x43, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x12, 0x6a, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, + 0xff, 0xff, 0x03, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x72, 0x0a, 0x1e, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, + 0xff, 0xff, 0x03, 0x52, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x55, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, + 0x52, 0x11, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, + 0x6d, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x66, 0x72, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, + 0x18, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x84, 0x01, 0x0a, 0x31, 0x6d, 0x61, + 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, + 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, + 0x68, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x2b, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x76, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, + 0x57, 0x69, 0x74, 0x68, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x6f, 0x0a, 0x26, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, + 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x12, 0x91, 0x01, 0x0a, 0x34, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, + 0x64, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x2c, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, + 0x65, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x5e, 0x0a, 0x26, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x52, 0x21, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x12, 0x7a, 0x0a, 0x1a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, - 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, + 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, + 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x7a, 0x0a, 0x1a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x52, 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, + 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x75, 0x73, 0x65, + 0x5f, 0x6f, 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x75, 0x73, 0x65, 0x4f, + 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x1a, 0xe2, 0x01, 0x0a, 0x11, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x12, 0x4e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x2a, 0x06, 0x18, + 0xff, 0xff, 0x03, 0x28, 0x00, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xa5, 0x01, 0x0a, 0x13, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, - 0x5a, 0x0a, 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, - 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x75, - 0x73, 0x65, 0x5f, 0x6f, 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x63, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd8, 0x02, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, + 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x75, 0x73, - 0x65, 0x4f, 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x1a, 0xe2, 0x01, - 0x0a, 0x11, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x2a, - 0x06, 0x18, 0xff, 0xff, 0x03, 0x28, 0x00, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, - 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, - 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0xa5, 0x01, 0x0a, 0x13, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, - 0x70, 0x32, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, - 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb1, 0x02, 0x0a, 0x14, 0x48, 0x74, - 0x74, 0x70, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x71, 0x75, - 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3e, 0x0a, - 0x16, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, - 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x22, 0x74, 0x0a, - 0x1a, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x13, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x72, 0x0d, 0x52, - 0x04, 0x68, 0x74, 0x74, 0x70, 0x52, 0x05, 0x68, 0x74, 0x74, 0x70, 0x73, 0x48, 0x00, 0x52, 0x11, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x54, 0x6f, 0x4f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x42, 0x10, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x81, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, - 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x42, 0x0d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, - 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x16, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, + 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x22, 0x9b, 0x01, 0x0a, 0x1a, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x44, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6f, + 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, + 0xfa, 0x42, 0x0f, 0x72, 0x0d, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x52, 0x05, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x54, 0x6f, 0x4f, 0x76, + 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x10, + 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x81, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x0d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, + 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2116,10 +2165,10 @@ var file_envoy_config_core_v3_protocol_proto_goTypes = []interface{}{ (*Http1ProtocolOptions_HeaderKeyFormat)(nil), // 14: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords)(nil), // 15: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords (*Http2ProtocolOptions_SettingsParameter)(nil), // 16: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter - (*duration.Duration)(nil), // 17: google.protobuf.Duration - (*wrappers.UInt32Value)(nil), // 18: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 18: google.protobuf.UInt32Value (*TypedExtensionConfig)(nil), // 19: envoy.config.core.v3.TypedExtensionConfig - (*wrappers.BoolValue)(nil), // 20: google.protobuf.BoolValue + (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue (*v3.Percent)(nil), // 21: envoy.type.v3.Percent } var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{ @@ -2130,48 +2179,49 @@ var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{ 18, // 4: envoy.config.core.v3.QuicProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value 18, // 5: envoy.config.core.v3.QuicProtocolOptions.num_timeouts_to_trigger_port_migration:type_name -> google.protobuf.UInt32Value 2, // 6: envoy.config.core.v3.QuicProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.QuicKeepAliveSettings - 18, // 7: envoy.config.core.v3.AlternateProtocolsCacheOptions.max_entries:type_name -> google.protobuf.UInt32Value - 19, // 8: envoy.config.core.v3.AlternateProtocolsCacheOptions.key_value_store_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 13, // 9: envoy.config.core.v3.AlternateProtocolsCacheOptions.prepopulated_entries:type_name -> envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry - 17, // 10: envoy.config.core.v3.HttpProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration - 17, // 11: envoy.config.core.v3.HttpProtocolOptions.max_connection_duration:type_name -> google.protobuf.Duration - 18, // 12: envoy.config.core.v3.HttpProtocolOptions.max_headers_count:type_name -> google.protobuf.UInt32Value - 17, // 13: envoy.config.core.v3.HttpProtocolOptions.max_stream_duration:type_name -> google.protobuf.Duration - 0, // 14: envoy.config.core.v3.HttpProtocolOptions.headers_with_underscores_action:type_name -> envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction - 18, // 15: envoy.config.core.v3.HttpProtocolOptions.max_requests_per_connection:type_name -> google.protobuf.UInt32Value - 20, // 16: envoy.config.core.v3.Http1ProtocolOptions.allow_absolute_url:type_name -> google.protobuf.BoolValue - 14, // 17: envoy.config.core.v3.Http1ProtocolOptions.header_key_format:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat - 20, // 18: envoy.config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue - 20, // 19: envoy.config.core.v3.Http1ProtocolOptions.use_balsa_parser:type_name -> google.protobuf.BoolValue - 17, // 20: envoy.config.core.v3.KeepaliveSettings.interval:type_name -> google.protobuf.Duration - 17, // 21: envoy.config.core.v3.KeepaliveSettings.timeout:type_name -> google.protobuf.Duration - 21, // 22: envoy.config.core.v3.KeepaliveSettings.interval_jitter:type_name -> envoy.type.v3.Percent - 17, // 23: envoy.config.core.v3.KeepaliveSettings.connection_idle_interval:type_name -> google.protobuf.Duration - 18, // 24: envoy.config.core.v3.Http2ProtocolOptions.hpack_table_size:type_name -> google.protobuf.UInt32Value - 18, // 25: envoy.config.core.v3.Http2ProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value - 18, // 26: envoy.config.core.v3.Http2ProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value - 18, // 27: envoy.config.core.v3.Http2ProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value - 18, // 28: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_frames:type_name -> google.protobuf.UInt32Value - 18, // 29: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_control_frames:type_name -> google.protobuf.UInt32Value - 18, // 30: envoy.config.core.v3.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload:type_name -> google.protobuf.UInt32Value - 18, // 31: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_priority_frames_per_stream:type_name -> google.protobuf.UInt32Value - 18, // 32: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent:type_name -> google.protobuf.UInt32Value - 20, // 33: envoy.config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue - 16, // 34: envoy.config.core.v3.Http2ProtocolOptions.custom_settings_parameters:type_name -> envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter - 8, // 35: envoy.config.core.v3.Http2ProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.KeepaliveSettings - 20, // 36: envoy.config.core.v3.Http2ProtocolOptions.use_oghttp2_codec:type_name -> google.protobuf.BoolValue - 9, // 37: envoy.config.core.v3.GrpcProtocolOptions.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions - 3, // 38: envoy.config.core.v3.Http3ProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions - 20, // 39: envoy.config.core.v3.Http3ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue - 15, // 40: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.proper_case_words:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords - 19, // 41: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.stateful_formatter:type_name -> envoy.config.core.v3.TypedExtensionConfig - 18, // 42: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.identifier:type_name -> google.protobuf.UInt32Value - 18, // 43: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.value:type_name -> google.protobuf.UInt32Value - 44, // [44:44] is the sub-list for method output_type - 44, // [44:44] is the sub-list for method input_type - 44, // [44:44] is the sub-list for extension type_name - 44, // [44:44] is the sub-list for extension extendee - 0, // [0:44] is the sub-list for field type_name + 17, // 7: envoy.config.core.v3.QuicProtocolOptions.idle_network_timeout:type_name -> google.protobuf.Duration + 18, // 8: envoy.config.core.v3.AlternateProtocolsCacheOptions.max_entries:type_name -> google.protobuf.UInt32Value + 19, // 9: envoy.config.core.v3.AlternateProtocolsCacheOptions.key_value_store_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 13, // 10: envoy.config.core.v3.AlternateProtocolsCacheOptions.prepopulated_entries:type_name -> envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry + 17, // 11: envoy.config.core.v3.HttpProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration + 17, // 12: envoy.config.core.v3.HttpProtocolOptions.max_connection_duration:type_name -> google.protobuf.Duration + 18, // 13: envoy.config.core.v3.HttpProtocolOptions.max_headers_count:type_name -> google.protobuf.UInt32Value + 17, // 14: envoy.config.core.v3.HttpProtocolOptions.max_stream_duration:type_name -> google.protobuf.Duration + 0, // 15: envoy.config.core.v3.HttpProtocolOptions.headers_with_underscores_action:type_name -> envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction + 18, // 16: envoy.config.core.v3.HttpProtocolOptions.max_requests_per_connection:type_name -> google.protobuf.UInt32Value + 20, // 17: envoy.config.core.v3.Http1ProtocolOptions.allow_absolute_url:type_name -> google.protobuf.BoolValue + 14, // 18: envoy.config.core.v3.Http1ProtocolOptions.header_key_format:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat + 20, // 19: envoy.config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 20, // 20: envoy.config.core.v3.Http1ProtocolOptions.use_balsa_parser:type_name -> google.protobuf.BoolValue + 17, // 21: envoy.config.core.v3.KeepaliveSettings.interval:type_name -> google.protobuf.Duration + 17, // 22: envoy.config.core.v3.KeepaliveSettings.timeout:type_name -> google.protobuf.Duration + 21, // 23: envoy.config.core.v3.KeepaliveSettings.interval_jitter:type_name -> envoy.type.v3.Percent + 17, // 24: envoy.config.core.v3.KeepaliveSettings.connection_idle_interval:type_name -> google.protobuf.Duration + 18, // 25: envoy.config.core.v3.Http2ProtocolOptions.hpack_table_size:type_name -> google.protobuf.UInt32Value + 18, // 26: envoy.config.core.v3.Http2ProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value + 18, // 27: envoy.config.core.v3.Http2ProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value + 18, // 28: envoy.config.core.v3.Http2ProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value + 18, // 29: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_frames:type_name -> google.protobuf.UInt32Value + 18, // 30: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_control_frames:type_name -> google.protobuf.UInt32Value + 18, // 31: envoy.config.core.v3.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload:type_name -> google.protobuf.UInt32Value + 18, // 32: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_priority_frames_per_stream:type_name -> google.protobuf.UInt32Value + 18, // 33: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent:type_name -> google.protobuf.UInt32Value + 20, // 34: envoy.config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 16, // 35: envoy.config.core.v3.Http2ProtocolOptions.custom_settings_parameters:type_name -> envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter + 8, // 36: envoy.config.core.v3.Http2ProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.KeepaliveSettings + 20, // 37: envoy.config.core.v3.Http2ProtocolOptions.use_oghttp2_codec:type_name -> google.protobuf.BoolValue + 9, // 38: envoy.config.core.v3.GrpcProtocolOptions.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions + 3, // 39: envoy.config.core.v3.Http3ProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions + 20, // 40: envoy.config.core.v3.Http3ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 15, // 41: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.proper_case_words:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords + 19, // 42: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.stateful_formatter:type_name -> envoy.config.core.v3.TypedExtensionConfig + 18, // 43: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.identifier:type_name -> google.protobuf.UInt32Value + 18, // 44: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.value:type_name -> google.protobuf.UInt32Value + 45, // [45:45] is the sub-list for method output_type + 45, // [45:45] is the sub-list for method input_type + 45, // [45:45] is the sub-list for extension type_name + 45, // [45:45] is the sub-list for extension extendee + 0, // [0:45] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_protocol_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go index feb1489c21d..d0d0be643e4 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/protocol.proto @@ -416,6 +417,37 @@ func (m *QuicProtocolOptions) validate(all bool) error { // no validation rules for ClientConnectionOptions + if d := m.GetIdleNetworkTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = QuicProtocolOptionsValidationError{ + field: "IdleNetworkTimeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lte := time.Duration(600*time.Second + 0*time.Nanosecond) + gte := time.Duration(1*time.Second + 0*time.Nanosecond) + + if dur < gte || dur > lte { + err := QuicProtocolOptionsValidationError{ + field: "IdleNetworkTimeout", + reason: "value must be inside range [1s, 10m0s]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + if len(errors) > 0 { return QuicProtocolOptionsMultiError(errors) } @@ -2128,6 +2160,8 @@ func (m *Http3ProtocolOptions) validate(all bool) error { // no validation rules for AllowExtendedConnect + // no validation rules for AllowMetadata + if len(errors) > 0 { return Http3ProtocolOptionsMultiError(errors) } @@ -2230,6 +2264,8 @@ func (m *SchemeHeaderTransformation) validate(all bool) error { var errors []error + // no validation rules for MatchUpstream + switch v := m.Transformation.(type) { case *SchemeHeaderTransformation_SchemeToOverwrite: if v == nil { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go new file mode 100644 index 00000000000..131aa9d32d4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go @@ -0,0 +1,1718 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/protocol.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TcpProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TcpProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TcpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *QuicKeepAliveSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuicKeepAliveSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QuicKeepAliveSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.InitialInterval != nil { + size, err := (*durationpb.Duration)(m.InitialInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QuicProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuicProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IdleNetworkTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleNetworkTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.ClientConnectionOptions) > 0 { + i -= len(m.ClientConnectionOptions) + copy(dAtA[i:], m.ClientConnectionOptions) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClientConnectionOptions))) + i-- + dAtA[i] = 0x3a + } + if len(m.ConnectionOptions) > 0 { + i -= len(m.ConnectionOptions) + copy(dAtA[i:], m.ConnectionOptions) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConnectionOptions))) + i-- + dAtA[i] = 0x32 + } + if m.ConnectionKeepalive != nil { + size, err := m.ConnectionKeepalive.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.NumTimeoutsToTriggerPortMigration != nil { + size, err := (*wrapperspb.UInt32Value)(m.NumTimeoutsToTriggerPortMigration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.InitialConnectionWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.InitialStreamWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxConcurrentStreams != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpstreamHttpProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamHttpProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamHttpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.OverrideAutoSniHeader) > 0 { + i -= len(m.OverrideAutoSniHeader) + copy(dAtA[i:], m.OverrideAutoSniHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OverrideAutoSniHeader))) + i-- + dAtA[i] = 0x1a + } + if m.AutoSanValidation { + i-- + if m.AutoSanValidation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AutoSni { + i-- + if m.AutoSni { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Port != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AlternateProtocolsCacheOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlternateProtocolsCacheOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AlternateProtocolsCacheOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CanonicalSuffixes) > 0 { + for iNdEx := len(m.CanonicalSuffixes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CanonicalSuffixes[iNdEx]) + copy(dAtA[i:], m.CanonicalSuffixes[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CanonicalSuffixes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.PrepopulatedEntries) > 0 { + for iNdEx := len(m.PrepopulatedEntries) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.PrepopulatedEntries[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.KeyValueStoreConfig != nil { + size, err := m.KeyValueStoreConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxEntries != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxEntries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxRequestsPerConnection != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.HeadersWithUnderscoresAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HeadersWithUnderscoresAction)) + i-- + dAtA[i] = 0x28 + } + if m.MaxStreamDuration != nil { + size, err := (*durationpb.Duration)(m.MaxStreamDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.MaxConnectionDuration != nil { + size, err := (*durationpb.Duration)(m.MaxConnectionDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxHeadersCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxHeadersCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.IdleTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.HeaderFormat.(*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderFormat.(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProperCaseWords != nil { + size, err := m.ProperCaseWords.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StatefulFormatter != nil { + size, err := m.StatefulFormatter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Http1ProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http1ProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AllowCustomMethods { + i-- + if m.AllowCustomMethods { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.UseBalsaParser != nil { + size, err := (*wrapperspb.BoolValue)(m.UseBalsaParser).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.SendFullyQualifiedUrl { + i-- + if m.SendFullyQualifiedUrl { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AllowChunkedLength { + i-- + if m.AllowChunkedLength { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.EnableTrailers { + i-- + if m.EnableTrailers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.HeaderKeyFormat != nil { + size, err := m.HeaderKeyFormat.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.DefaultHostForHttp_10) > 0 { + i -= len(m.DefaultHostForHttp_10) + copy(dAtA[i:], m.DefaultHostForHttp_10) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultHostForHttp_10))) + i-- + dAtA[i] = 0x1a + } + if m.AcceptHttp_10 { + i-- + if m.AcceptHttp_10 { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AllowAbsoluteUrl != nil { + size, err := (*wrapperspb.BoolValue)(m.AllowAbsoluteUrl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeepaliveSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeepaliveSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeepaliveSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectionIdleInterval != nil { + size, err := (*durationpb.Duration)(m.ConnectionIdleInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.IntervalJitter != nil { + if vtmsg, ok := interface{}(m.IntervalJitter).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.IntervalJitter) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Interval != nil { + size, err := (*durationpb.Duration)(m.Interval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http2ProtocolOptions_SettingsParameter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http2ProtocolOptions_SettingsParameter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http2ProtocolOptions_SettingsParameter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != nil { + size, err := (*wrapperspb.UInt32Value)(m.Value).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Identifier != nil { + size, err := (*wrapperspb.UInt32Value)(m.Identifier).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http2ProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http2ProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http2ProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UseOghttp2Codec != nil { + size, err := (*wrapperspb.BoolValue)(m.UseOghttp2Codec).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.ConnectionKeepalive != nil { + size, err := m.ConnectionKeepalive.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if len(m.CustomSettingsParameters) > 0 { + for iNdEx := len(m.CustomSettingsParameters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CustomSettingsParameters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if m.StreamErrorOnInvalidHttpMessaging { + i-- + if m.StreamErrorOnInvalidHttpMessaging { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.MaxInboundWindowUpdateFramesPerDataFrameSent != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInboundWindowUpdateFramesPerDataFrameSent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.MaxInboundPriorityFramesPerStream != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInboundPriorityFramesPerStream).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.MaxConsecutiveInboundFramesWithEmptyPayload != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConsecutiveInboundFramesWithEmptyPayload).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.MaxOutboundControlFrames != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxOutboundControlFrames).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.MaxOutboundFrames != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxOutboundFrames).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AllowMetadata { + i-- + if m.AllowMetadata { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.AllowConnect { + i-- + if m.AllowConnect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.InitialConnectionWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.InitialStreamWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxConcurrentStreams != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.HpackTableSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.HpackTableSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Http2ProtocolOptions != nil { + size, err := m.Http2ProtocolOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http3ProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http3ProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http3ProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AllowMetadata { + i-- + if m.AllowMetadata { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.AllowExtendedConnect { + i-- + if m.AllowExtendedConnect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.QuicProtocolOptions != nil { + size, err := m.QuicProtocolOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SchemeHeaderTransformation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchemeHeaderTransformation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SchemeHeaderTransformation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MatchUpstream { + i-- + if m.MatchUpstream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if msg, ok := m.Transformation.(*SchemeHeaderTransformation_SchemeToOverwrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *SchemeHeaderTransformation_SchemeToOverwrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SchemeHeaderTransformation_SchemeToOverwrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.SchemeToOverwrite) + copy(dAtA[i:], m.SchemeToOverwrite) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SchemeToOverwrite))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *TcpProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *QuicKeepAliveSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialInterval != nil { + l = (*durationpb.Duration)(m.InitialInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *QuicProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxConcurrentStreams != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialStreamWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialConnectionWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumTimeoutsToTriggerPortMigration != nil { + l = (*wrapperspb.UInt32Value)(m.NumTimeoutsToTriggerPortMigration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionKeepalive != nil { + l = m.ConnectionKeepalive.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ConnectionOptions) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ClientConnectionOptions) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IdleNetworkTimeout != nil { + l = (*durationpb.Duration)(m.IdleNetworkTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamHttpProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AutoSni { + n += 2 + } + if m.AutoSanValidation { + n += 2 + } + l = len(m.OverrideAutoSniHeader) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Port != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Port)) + } + n += len(m.unknownFields) + return n +} + +func (m *AlternateProtocolsCacheOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEntries != nil { + l = (*wrapperspb.UInt32Value)(m.MaxEntries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeyValueStoreConfig != nil { + l = m.KeyValueStoreConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.PrepopulatedEntries) > 0 { + for _, e := range m.PrepopulatedEntries { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.CanonicalSuffixes) > 0 { + for _, s := range m.CanonicalSuffixes { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IdleTimeout != nil { + l = (*durationpb.Duration)(m.IdleTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxHeadersCount != nil { + l = (*wrapperspb.UInt32Value)(m.MaxHeadersCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxConnectionDuration != nil { + l = (*durationpb.Duration)(m.MaxConnectionDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxStreamDuration != nil { + l = (*durationpb.Duration)(m.MaxStreamDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HeadersWithUnderscoresAction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HeadersWithUnderscoresAction)) + } + if m.MaxRequestsPerConnection != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.HeaderFormat.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProperCaseWords != nil { + l = m.ProperCaseWords.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatefulFormatter != nil { + l = m.StatefulFormatter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Http1ProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AllowAbsoluteUrl != nil { + l = (*wrapperspb.BoolValue)(m.AllowAbsoluteUrl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AcceptHttp_10 { + n += 2 + } + l = len(m.DefaultHostForHttp_10) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HeaderKeyFormat != nil { + l = m.HeaderKeyFormat.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableTrailers { + n += 2 + } + if m.AllowChunkedLength { + n += 2 + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SendFullyQualifiedUrl { + n += 2 + } + if m.UseBalsaParser != nil { + l = (*wrapperspb.BoolValue)(m.UseBalsaParser).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowCustomMethods { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *KeepaliveSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Interval != nil { + l = (*durationpb.Duration)(m.Interval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntervalJitter != nil { + if size, ok := interface{}(m.IntervalJitter).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.IntervalJitter) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionIdleInterval != nil { + l = (*durationpb.Duration)(m.ConnectionIdleInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http2ProtocolOptions_SettingsParameter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Identifier != nil { + l = (*wrapperspb.UInt32Value)(m.Identifier).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Value != nil { + l = (*wrapperspb.UInt32Value)(m.Value).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http2ProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HpackTableSize != nil { + l = (*wrapperspb.UInt32Value)(m.HpackTableSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxConcurrentStreams != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialStreamWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialConnectionWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowConnect { + n += 2 + } + if m.AllowMetadata { + n += 2 + } + if m.MaxOutboundFrames != nil { + l = (*wrapperspb.UInt32Value)(m.MaxOutboundFrames).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxOutboundControlFrames != nil { + l = (*wrapperspb.UInt32Value)(m.MaxOutboundControlFrames).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxConsecutiveInboundFramesWithEmptyPayload != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConsecutiveInboundFramesWithEmptyPayload).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInboundPriorityFramesPerStream != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInboundPriorityFramesPerStream).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInboundWindowUpdateFramesPerDataFrameSent != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInboundWindowUpdateFramesPerDataFrameSent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StreamErrorOnInvalidHttpMessaging { + n += 2 + } + if len(m.CustomSettingsParameters) > 0 { + for _, e := range m.CustomSettingsParameters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionKeepalive != nil { + l = m.ConnectionKeepalive.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseOghttp2Codec != nil { + l = (*wrapperspb.BoolValue)(m.UseOghttp2Codec).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Http2ProtocolOptions != nil { + l = m.Http2ProtocolOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http3ProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QuicProtocolOptions != nil { + l = m.QuicProtocolOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowExtendedConnect { + n += 2 + } + if m.AllowMetadata { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SchemeHeaderTransformation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Transformation.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.MatchUpstream { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SchemeHeaderTransformation_SchemeToOverwrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SchemeToOverwrite) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go index 56e3119f4b8..43e7d770618 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/proxy_protocol.proto package corev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go index 2edd9b1165c..66555a16642 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/proxy_protocol.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol_vtproto.pb.go new file mode 100644 index 00000000000..d429640eeef --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol_vtproto.pb.go @@ -0,0 +1,162 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/proxy_protocol.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ProxyProtocolPassThroughTLVs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyProtocolPassThroughTLVs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ProxyProtocolPassThroughTLVs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TlvType) > 0 { + var pksize2 int + for _, num := range m.TlvType { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.TlvType { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x12 + } + if m.MatchType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MatchType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ProxyProtocolConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyProtocolConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ProxyProtocolConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PassThroughTlvs != nil { + size, err := m.PassThroughTlvs.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Version != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ProxyProtocolPassThroughTLVs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatchType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MatchType)) + } + if len(m.TlvType) > 0 { + l = 0 + for _, e := range m.TlvType { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + n += len(m.unknownFields) + return n +} + +func (m *ProxyProtocolConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Version != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Version)) + } + if m.PassThroughTlvs != nil { + l = m.PassThroughTlvs.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go index 3c0a2fe07c5..fc4ec52de9c 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/resolver.proto package corev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.validate.go index 30de106fe24..e9f661f6793 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/resolver.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver_vtproto.pb.go new file mode 100644 index 00000000000..5ae614bb279 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver_vtproto.pb.go @@ -0,0 +1,163 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/resolver.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DnsResolverOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DnsResolverOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DnsResolverOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.NoDefaultSearchDomain { + i-- + if m.NoDefaultSearchDomain { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.UseTcpForDnsLookups { + i-- + if m.UseTcpForDnsLookups { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DnsResolutionConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DnsResolutionConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DnsResolutionConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DnsResolverOptions != nil { + size, err := m.DnsResolverOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Resolvers) > 0 { + for iNdEx := len(m.Resolvers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Resolvers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DnsResolverOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseTcpForDnsLookups { + n += 2 + } + if m.NoDefaultSearchDomain { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DnsResolutionConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Resolvers) > 0 { + for _, e := range m.Resolvers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DnsResolverOptions != nil { + l = m.DnsResolverOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go index 646973476e0..2b684f57b67 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/socket_option.proto package corev3 @@ -96,7 +96,7 @@ func (SocketOption_SocketState) EnumDescriptor() ([]byte, []int) { // :ref:`admin's ` socket_options etc. // // It should be noted that the name or level may have different values on different platforms. -// [#next-free-field: 7] +// [#next-free-field: 8] type SocketOption struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -117,6 +117,9 @@ type SocketOption struct { // The state in which the option will be applied. When used in BindConfig // STATE_PREBIND is currently the only valid value. State SocketOption_SocketState `protobuf:"varint,6,opt,name=state,proto3,enum=envoy.config.core.v3.SocketOption_SocketState" json:"state,omitempty"` + // Apply the socket option to the specified `socket type `_. + // If not specified, the socket option will be applied to all socket types. + Type *SocketOption_SocketType `protobuf:"bytes,7,opt,name=type,proto3" json:"type,omitempty"` } func (x *SocketOption) Reset() { @@ -200,6 +203,13 @@ func (x *SocketOption) GetState() SocketOption_SocketState { return SocketOption_STATE_PREBIND } +func (x *SocketOption) GetType() *SocketOption_SocketType { + if x != nil { + return x.Type + } + return nil +} + type isSocketOption_Value interface { isSocketOption_Value() } @@ -265,6 +275,148 @@ func (x *SocketOptionsOverride) GetSocketOptions() []*SocketOption { return nil } +// The `socket type `_ to apply the socket option to. +// Only one field should be set. If multiple fields are set, the precedence order will determine +// the selected one. If none of the fields is set, the socket option will be applied to all socket types. +// +// For example: +// If :ref:`stream ` is set, +// it takes precedence over :ref:`datagram `. +type SocketOption_SocketType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Apply the socket option to the stream socket type. + Stream *SocketOption_SocketType_Stream `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"` + // Apply the socket option to the datagram socket type. + Datagram *SocketOption_SocketType_Datagram `protobuf:"bytes,2,opt,name=datagram,proto3" json:"datagram,omitempty"` +} + +func (x *SocketOption_SocketType) Reset() { + *x = SocketOption_SocketType{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOption_SocketType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOption_SocketType) ProtoMessage() {} + +func (x *SocketOption_SocketType) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOption_SocketType.ProtoReflect.Descriptor instead. +func (*SocketOption_SocketType) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *SocketOption_SocketType) GetStream() *SocketOption_SocketType_Stream { + if x != nil { + return x.Stream + } + return nil +} + +func (x *SocketOption_SocketType) GetDatagram() *SocketOption_SocketType_Datagram { + if x != nil { + return x.Datagram + } + return nil +} + +// The stream socket type. +type SocketOption_SocketType_Stream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SocketOption_SocketType_Stream) Reset() { + *x = SocketOption_SocketType_Stream{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOption_SocketType_Stream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOption_SocketType_Stream) ProtoMessage() {} + +func (x *SocketOption_SocketType_Stream) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOption_SocketType_Stream.ProtoReflect.Descriptor instead. +func (*SocketOption_SocketType_Stream) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0, 0} +} + +// The datagram socket type. +type SocketOption_SocketType_Datagram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SocketOption_SocketType_Datagram) Reset() { + *x = SocketOption_SocketType_Datagram{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOption_SocketType_Datagram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOption_SocketType_Datagram) ProtoMessage() {} + +func (x *SocketOption_SocketType_Datagram) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOption_SocketType_Datagram.ProtoReflect.Descriptor instead. +func (*SocketOption_SocketType_Datagram) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0, 1} +} + var File_envoy_config_core_v3_socket_option_proto protoreflect.FileDescriptor var file_envoy_config_core_v3_socket_option_proto_rawDesc = []byte{ @@ -277,7 +429,7 @@ var file_envoy_config_core_v3_socket_option_proto_rawDesc = []byte{ 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe5, 0x02, 0x0a, 0x0c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xef, 0x04, 0x0a, 0x0c, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, @@ -292,30 +444,47 @@ var file_envoy_config_core_v3_socket_option_proto_rawDesc = []byte{ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x46, 0x0a, 0x0b, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x50, - 0x52, 0x45, 0x42, 0x49, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x41, 0x54, - 0x45, 0x5f, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, - 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x45, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x25, - 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x03, - 0xf8, 0x42, 0x01, 0x22, 0x62, 0x0a, 0x15, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x49, 0x0a, 0x0e, - 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, - 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x85, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, - 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, - 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc4, 0x01, 0x0a, 0x0a, 0x53, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4c, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x06, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x52, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x67, 0x72, + 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, + 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, 0x1a, 0x08, 0x0a, 0x06, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x1a, 0x0a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, + 0x22, 0x46, 0x0a, 0x0b, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x42, 0x49, 0x4e, 0x44, + 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x55, 0x4e, + 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x53, + 0x54, 0x45, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, + 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x62, 0x0a, + 0x15, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x42, 0x85, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x11, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -331,20 +500,26 @@ func file_envoy_config_core_v3_socket_option_proto_rawDescGZIP() []byte { } var file_envoy_config_core_v3_socket_option_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_config_core_v3_socket_option_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_core_v3_socket_option_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_envoy_config_core_v3_socket_option_proto_goTypes = []interface{}{ - (SocketOption_SocketState)(0), // 0: envoy.config.core.v3.SocketOption.SocketState - (*SocketOption)(nil), // 1: envoy.config.core.v3.SocketOption - (*SocketOptionsOverride)(nil), // 2: envoy.config.core.v3.SocketOptionsOverride + (SocketOption_SocketState)(0), // 0: envoy.config.core.v3.SocketOption.SocketState + (*SocketOption)(nil), // 1: envoy.config.core.v3.SocketOption + (*SocketOptionsOverride)(nil), // 2: envoy.config.core.v3.SocketOptionsOverride + (*SocketOption_SocketType)(nil), // 3: envoy.config.core.v3.SocketOption.SocketType + (*SocketOption_SocketType_Stream)(nil), // 4: envoy.config.core.v3.SocketOption.SocketType.Stream + (*SocketOption_SocketType_Datagram)(nil), // 5: envoy.config.core.v3.SocketOption.SocketType.Datagram } var file_envoy_config_core_v3_socket_option_proto_depIdxs = []int32{ 0, // 0: envoy.config.core.v3.SocketOption.state:type_name -> envoy.config.core.v3.SocketOption.SocketState - 1, // 1: envoy.config.core.v3.SocketOptionsOverride.socket_options:type_name -> envoy.config.core.v3.SocketOption - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 3, // 1: envoy.config.core.v3.SocketOption.type:type_name -> envoy.config.core.v3.SocketOption.SocketType + 1, // 2: envoy.config.core.v3.SocketOptionsOverride.socket_options:type_name -> envoy.config.core.v3.SocketOption + 4, // 3: envoy.config.core.v3.SocketOption.SocketType.stream:type_name -> envoy.config.core.v3.SocketOption.SocketType.Stream + 5, // 4: envoy.config.core.v3.SocketOption.SocketType.datagram:type_name -> envoy.config.core.v3.SocketOption.SocketType.Datagram + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_socket_option_proto_init() } @@ -377,6 +552,42 @@ func file_envoy_config_core_v3_socket_option_proto_init() { return nil } } + file_envoy_config_core_v3_socket_option_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOption_SocketType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOption_SocketType_Stream); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOption_SocketType_Datagram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_envoy_config_core_v3_socket_option_proto_msgTypes[0].OneofWrappers = []interface{}{ (*SocketOption_IntValue)(nil), @@ -388,7 +599,7 @@ func file_envoy_config_core_v3_socket_option_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_core_v3_socket_option_proto_rawDesc, NumEnums: 1, - NumMessages: 2, + NumMessages: 5, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go index dc0b53f5518..944e08984c7 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/socket_option.proto @@ -74,6 +75,35 @@ func (m *SocketOption) validate(all bool) error { errors = append(errors, err) } + if all { + switch v := interface{}(m.GetType()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOptionValidationError{ + field: "Type", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOptionValidationError{ + field: "Type", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetType()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOptionValidationError{ + field: "Type", + reason: "embedded message failed validation", + cause: err, + } + } + } + oneofValuePresent := false switch v := m.Value.(type) { case *SocketOption_IntValue: @@ -328,3 +358,371 @@ var _ interface { Cause() error ErrorName() string } = SocketOptionsOverrideValidationError{} + +// Validate checks the field values on SocketOption_SocketType with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SocketOption_SocketType) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOption_SocketType with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SocketOption_SocketTypeMultiError, or nil if none found. +func (m *SocketOption_SocketType) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOption_SocketType) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetStream()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Stream", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Stream", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStream()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOption_SocketTypeValidationError{ + field: "Stream", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDatagram()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Datagram", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Datagram", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDatagram()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOption_SocketTypeValidationError{ + field: "Datagram", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SocketOption_SocketTypeMultiError(errors) + } + + return nil +} + +// SocketOption_SocketTypeMultiError is an error wrapping multiple validation +// errors returned by SocketOption_SocketType.ValidateAll() if the designated +// constraints aren't met. +type SocketOption_SocketTypeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOption_SocketTypeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOption_SocketTypeMultiError) AllErrors() []error { return m } + +// SocketOption_SocketTypeValidationError is the validation error returned by +// SocketOption_SocketType.Validate if the designated constraints aren't met. +type SocketOption_SocketTypeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOption_SocketTypeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOption_SocketTypeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOption_SocketTypeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOption_SocketTypeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOption_SocketTypeValidationError) ErrorName() string { + return "SocketOption_SocketTypeValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOption_SocketTypeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOption_SocketType.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOption_SocketTypeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOption_SocketTypeValidationError{} + +// Validate checks the field values on SocketOption_SocketType_Stream with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SocketOption_SocketType_Stream) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOption_SocketType_Stream with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SocketOption_SocketType_StreamMultiError, or nil if none found. +func (m *SocketOption_SocketType_Stream) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOption_SocketType_Stream) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return SocketOption_SocketType_StreamMultiError(errors) + } + + return nil +} + +// SocketOption_SocketType_StreamMultiError is an error wrapping multiple +// validation errors returned by SocketOption_SocketType_Stream.ValidateAll() +// if the designated constraints aren't met. +type SocketOption_SocketType_StreamMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOption_SocketType_StreamMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOption_SocketType_StreamMultiError) AllErrors() []error { return m } + +// SocketOption_SocketType_StreamValidationError is the validation error +// returned by SocketOption_SocketType_Stream.Validate if the designated +// constraints aren't met. +type SocketOption_SocketType_StreamValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOption_SocketType_StreamValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOption_SocketType_StreamValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOption_SocketType_StreamValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOption_SocketType_StreamValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOption_SocketType_StreamValidationError) ErrorName() string { + return "SocketOption_SocketType_StreamValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOption_SocketType_StreamValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOption_SocketType_Stream.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOption_SocketType_StreamValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOption_SocketType_StreamValidationError{} + +// Validate checks the field values on SocketOption_SocketType_Datagram with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *SocketOption_SocketType_Datagram) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOption_SocketType_Datagram with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SocketOption_SocketType_DatagramMultiError, or nil if none found. +func (m *SocketOption_SocketType_Datagram) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOption_SocketType_Datagram) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return SocketOption_SocketType_DatagramMultiError(errors) + } + + return nil +} + +// SocketOption_SocketType_DatagramMultiError is an error wrapping multiple +// validation errors returned by +// SocketOption_SocketType_Datagram.ValidateAll() if the designated +// constraints aren't met. +type SocketOption_SocketType_DatagramMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOption_SocketType_DatagramMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOption_SocketType_DatagramMultiError) AllErrors() []error { return m } + +// SocketOption_SocketType_DatagramValidationError is the validation error +// returned by SocketOption_SocketType_Datagram.Validate if the designated +// constraints aren't met. +type SocketOption_SocketType_DatagramValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOption_SocketType_DatagramValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOption_SocketType_DatagramValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOption_SocketType_DatagramValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOption_SocketType_DatagramValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOption_SocketType_DatagramValidationError) ErrorName() string { + return "SocketOption_SocketType_DatagramValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOption_SocketType_DatagramValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOption_SocketType_Datagram.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOption_SocketType_DatagramValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOption_SocketType_DatagramValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option_vtproto.pb.go new file mode 100644 index 00000000000..75f5db51229 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option_vtproto.pb.go @@ -0,0 +1,391 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/socket_option.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SocketOption_SocketType_Stream) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption_SocketType_Stream) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_SocketType_Stream) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_SocketType_Datagram) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption_SocketType_Datagram) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_SocketType_Datagram) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_SocketType) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption_SocketType) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_SocketType) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Datagram != nil { + size, err := m.Datagram.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Stream != nil { + size, err := m.Stream.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SocketOption) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Type != nil { + size, err := m.Type.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.State != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.Value.(*SocketOption_BufValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Value.(*SocketOption_IntValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Name != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Name)) + i-- + dAtA[i] = 0x18 + } + if m.Level != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Level)) + i-- + dAtA[i] = 0x10 + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_IntValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_IntValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *SocketOption_BufValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_BufValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BufValue) + copy(dAtA[i:], m.BufValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BufValue))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} +func (m *SocketOptionsOverride) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOptionsOverride) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOptionsOverride) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SocketOptions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_SocketType_Stream) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SocketOption_SocketType_Datagram) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SocketOption_SocketType) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stream != nil { + l = m.Stream.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Datagram != nil { + l = m.Datagram.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SocketOption) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Level != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Level)) + } + if m.Name != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Name)) + } + if vtmsg, ok := m.Value.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.State != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.State)) + } + if m.Type != nil { + l = m.Type.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SocketOption_IntValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.IntValue)) + return n +} +func (m *SocketOption_BufValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BufValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SocketOptionsOverride) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go index 6692c1d5d09..8c7fda3a1a4 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/substitution_format_string.proto package corev3 @@ -10,9 +10,9 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/go-control-plane/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - _struct "github.com/golang/protobuf/ptypes/struct" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" ) @@ -158,7 +158,7 @@ func (x *SubstitutionFormatString) GetTextFormat() string { return "" } -func (x *SubstitutionFormatString) GetJsonFormat() *_struct.Struct { +func (x *SubstitutionFormatString) GetJsonFormat() *structpb.Struct { if x, ok := x.GetFormat().(*SubstitutionFormatString_JsonFormat); ok { return x.JsonFormat } @@ -251,7 +251,7 @@ type SubstitutionFormatString_JsonFormat struct { // "status": 500, // "message": "My error message" // } - JsonFormat *_struct.Struct `protobuf:"bytes,2,opt,name=json_format,json=jsonFormat,proto3,oneof"` + JsonFormat *structpb.Struct `protobuf:"bytes,2,opt,name=json_format,json=jsonFormat,proto3,oneof"` } type SubstitutionFormatString_TextFormatSource struct { @@ -364,7 +364,7 @@ var file_envoy_config_core_v3_substitution_format_string_proto_msgTypes = make([ var file_envoy_config_core_v3_substitution_format_string_proto_goTypes = []interface{}{ (*JsonFormatOptions)(nil), // 0: envoy.config.core.v3.JsonFormatOptions (*SubstitutionFormatString)(nil), // 1: envoy.config.core.v3.SubstitutionFormatString - (*_struct.Struct)(nil), // 2: google.protobuf.Struct + (*structpb.Struct)(nil), // 2: google.protobuf.Struct (*DataSource)(nil), // 3: envoy.config.core.v3.DataSource (*TypedExtensionConfig)(nil), // 4: envoy.config.core.v3.TypedExtensionConfig } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go index c805d82866a..f21251e3200 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/substitution_format_string.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string_vtproto.pb.go new file mode 100644 index 00000000000..2887f4d1a07 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string_vtproto.pb.go @@ -0,0 +1,298 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/substitution_format_string.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *JsonFormatOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JsonFormatOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *JsonFormatOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SortProperties { + i-- + if m.SortProperties { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SubstitutionFormatString) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubstitutionFormatString) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.JsonFormatOptions != nil { + size, err := m.JsonFormatOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if len(m.Formatters) > 0 { + for iNdEx := len(m.Formatters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Formatters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if msg, ok := m.Format.(*SubstitutionFormatString_TextFormatSource); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.ContentType) > 0 { + i -= len(m.ContentType) + copy(dAtA[i:], m.ContentType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ContentType))) + i-- + dAtA[i] = 0x22 + } + if m.OmitEmptyValues { + i-- + if m.OmitEmptyValues { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if msg, ok := m.Format.(*SubstitutionFormatString_JsonFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Format.(*SubstitutionFormatString_TextFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *SubstitutionFormatString_TextFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString_TextFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.TextFormat) + copy(dAtA[i:], m.TextFormat) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TextFormat))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *SubstitutionFormatString_JsonFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString_JsonFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.JsonFormat != nil { + size, err := (*structpb.Struct)(m.JsonFormat).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *SubstitutionFormatString_TextFormatSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString_TextFormatSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TextFormatSource != nil { + size, err := m.TextFormatSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *JsonFormatOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SortProperties { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SubstitutionFormatString) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Format.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.OmitEmptyValues { + n += 2 + } + l = len(m.ContentType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Formatters) > 0 { + for _, e := range m.Formatters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.JsonFormatOptions != nil { + l = m.JsonFormatOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SubstitutionFormatString_TextFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TextFormat) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SubstitutionFormatString_JsonFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.JsonFormat != nil { + l = (*structpb.Struct)(m.JsonFormat).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *SubstitutionFormatString_TextFormatSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TextFormatSource != nil { + l = m.TextFormatSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go index b7f9f8229b9..ebef4e64254 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/core/v3/udp_socket_config.proto package corev3 @@ -9,9 +9,9 @@ package corev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -32,13 +32,13 @@ type UdpSocketConfig struct { // The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate // more memory per socket. Received datagrams above this size will be dropped. If not set // defaults to 1500 bytes. - MaxRxDatagramSize *wrappers.UInt64Value `protobuf:"bytes,1,opt,name=max_rx_datagram_size,json=maxRxDatagramSize,proto3" json:"max_rx_datagram_size,omitempty"` + MaxRxDatagramSize *wrapperspb.UInt64Value `protobuf:"bytes,1,opt,name=max_rx_datagram_size,json=maxRxDatagramSize,proto3" json:"max_rx_datagram_size,omitempty"` // Configures whether Generic Receive Offload (GRO) // _ is preferred when reading from the // UDP socket. The default is context dependent and is documented where UdpSocketConfig is used. // This option affects performance but not functionality. If GRO is not supported by the operating // system, non-GRO receive will be used. - PreferGro *wrappers.BoolValue `protobuf:"bytes,2,opt,name=prefer_gro,json=preferGro,proto3" json:"prefer_gro,omitempty"` + PreferGro *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=prefer_gro,json=preferGro,proto3" json:"prefer_gro,omitempty"` } func (x *UdpSocketConfig) Reset() { @@ -73,14 +73,14 @@ func (*UdpSocketConfig) Descriptor() ([]byte, []int) { return file_envoy_config_core_v3_udp_socket_config_proto_rawDescGZIP(), []int{0} } -func (x *UdpSocketConfig) GetMaxRxDatagramSize() *wrappers.UInt64Value { +func (x *UdpSocketConfig) GetMaxRxDatagramSize() *wrapperspb.UInt64Value { if x != nil { return x.MaxRxDatagramSize } return nil } -func (x *UdpSocketConfig) GetPreferGro() *wrappers.BoolValue { +func (x *UdpSocketConfig) GetPreferGro() *wrapperspb.BoolValue { if x != nil { return x.PreferGro } @@ -136,9 +136,9 @@ func file_envoy_config_core_v3_udp_socket_config_proto_rawDescGZIP() []byte { var file_envoy_config_core_v3_udp_socket_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_core_v3_udp_socket_config_proto_goTypes = []interface{}{ - (*UdpSocketConfig)(nil), // 0: envoy.config.core.v3.UdpSocketConfig - (*wrappers.UInt64Value)(nil), // 1: google.protobuf.UInt64Value - (*wrappers.BoolValue)(nil), // 2: google.protobuf.BoolValue + (*UdpSocketConfig)(nil), // 0: envoy.config.core.v3.UdpSocketConfig + (*wrapperspb.UInt64Value)(nil), // 1: google.protobuf.UInt64Value + (*wrapperspb.BoolValue)(nil), // 2: google.protobuf.BoolValue } var file_envoy_config_core_v3_udp_socket_config_proto_depIdxs = []int32{ 1, // 0: envoy.config.core.v3.UdpSocketConfig.max_rx_datagram_size:type_name -> google.protobuf.UInt64Value diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go index f0c17960d10..f977eda2f51 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/core/v3/udp_socket_config.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config_vtproto.pb.go new file mode 100644 index 00000000000..2809993aec2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config_vtproto.pb.go @@ -0,0 +1,91 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/udp_socket_config.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UdpSocketConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UdpSocketConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UdpSocketConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PreferGro != nil { + size, err := (*wrapperspb.BoolValue)(m.PreferGro).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxRxDatagramSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaxRxDatagramSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UdpSocketConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxRxDatagramSize != nil { + l = (*wrapperspb.UInt64Value)(m.MaxRxDatagramSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PreferGro != nil { + l = (*wrapperspb.BoolValue)(m.PreferGro).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go index 6472d1120a2..96122a01d05 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/endpoint/v3/endpoint.proto package endpointv3 @@ -10,10 +10,10 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -144,6 +144,12 @@ type ClusterLoadAssignment_Policy struct { // // Envoy supports only one element and will NACK if more than one element is present. // Other xDS-capable data planes will not necessarily have this limitation. + // + // In Envoy, this “drop_overloads“ config can be overridden by a runtime key + // "load_balancing_policy.drop_overload_limit" setting. This runtime key can be set to + // any integer number between 0 and 100. 0 means drop 0%. 100 means drop 100%. + // When both “drop_overloads“ config and "load_balancing_policy.drop_overload_limit" + // setting are in place, the min of these two wins. DropOverloads []*ClusterLoadAssignment_Policy_DropOverload `protobuf:"bytes,2,rep,name=drop_overloads,json=dropOverloads,proto3" json:"drop_overloads,omitempty"` // Priority levels and localities are considered overprovisioned with this // factor (in percentage). This means that we don't consider a priority @@ -159,12 +165,12 @@ type ClusterLoadAssignment_Policy struct { // // Read more at :ref:`priority levels ` and // :ref:`localities `. - OverprovisioningFactor *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=overprovisioning_factor,json=overprovisioningFactor,proto3" json:"overprovisioning_factor,omitempty"` + OverprovisioningFactor *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=overprovisioning_factor,json=overprovisioningFactor,proto3" json:"overprovisioning_factor,omitempty"` // The max time until which the endpoints from this assignment can be used. // If no new assignments are received before this time expires the endpoints // are considered stale and should be marked unhealthy. // Defaults to 0 which means endpoints never go stale. - EndpointStaleAfter *duration.Duration `protobuf:"bytes,4,opt,name=endpoint_stale_after,json=endpointStaleAfter,proto3" json:"endpoint_stale_after,omitempty"` + EndpointStaleAfter *durationpb.Duration `protobuf:"bytes,4,opt,name=endpoint_stale_after,json=endpointStaleAfter,proto3" json:"endpoint_stale_after,omitempty"` // If true, use the :ref:`load balancing weight // ` of healthy and unhealthy // hosts to determine the health of the priority level. If false, use the number of healthy and unhealthy hosts @@ -215,14 +221,14 @@ func (x *ClusterLoadAssignment_Policy) GetDropOverloads() []*ClusterLoadAssignme return nil } -func (x *ClusterLoadAssignment_Policy) GetOverprovisioningFactor() *wrappers.UInt32Value { +func (x *ClusterLoadAssignment_Policy) GetOverprovisioningFactor() *wrapperspb.UInt32Value { if x != nil { return x.OverprovisioningFactor } return nil } -func (x *ClusterLoadAssignment_Policy) GetEndpointStaleAfter() *duration.Duration { +func (x *ClusterLoadAssignment_Policy) GetEndpointStaleAfter() *durationpb.Duration { if x != nil { return x.EndpointStaleAfter } @@ -416,8 +422,8 @@ var file_envoy_config_endpoint_v3_endpoint_proto_goTypes = []interface{}{ nil, // 2: envoy.config.endpoint.v3.ClusterLoadAssignment.NamedEndpointsEntry (*ClusterLoadAssignment_Policy_DropOverload)(nil), // 3: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.DropOverload (*LocalityLbEndpoints)(nil), // 4: envoy.config.endpoint.v3.LocalityLbEndpoints - (*wrappers.UInt32Value)(nil), // 5: google.protobuf.UInt32Value - (*duration.Duration)(nil), // 6: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 5: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 6: google.protobuf.Duration (*Endpoint)(nil), // 7: envoy.config.endpoint.v3.Endpoint (*v3.FractionalPercent)(nil), // 8: envoy.type.v3.FractionalPercent } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.validate.go index 1ed6aa20bd0..1b9f2c1fe3e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/endpoint/v3/endpoint.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go index 83f56bd7250..4cff3e6df05 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/endpoint/v3/endpoint_components.proto package endpointv3 @@ -10,9 +10,9 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -154,7 +154,7 @@ type LbEndpoint struct { // LocalityLbEndpoints. If unspecified, will be treated as 1. The sum // of the weights of all endpoints in the endpoint's locality must not // exceed uint32_t maximal value (4294967295). - LoadBalancingWeight *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=load_balancing_weight,json=loadBalancingWeight,proto3" json:"load_balancing_weight,omitempty"` + LoadBalancingWeight *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=load_balancing_weight,json=loadBalancingWeight,proto3" json:"load_balancing_weight,omitempty"` } func (x *LbEndpoint) Reset() { @@ -224,7 +224,7 @@ func (x *LbEndpoint) GetMetadata() *v3.Metadata { return nil } -func (x *LbEndpoint) GetLoadBalancingWeight() *wrappers.UInt32Value { +func (x *LbEndpoint) GetLoadBalancingWeight() *wrapperspb.UInt32Value { if x != nil { return x.LoadBalancingWeight } @@ -311,7 +311,7 @@ func (x *LedsClusterLocalityConfig) GetLedsCollectionName() string { // A group of endpoints belonging to a Locality. // One can have multiple LocalityLbEndpoints for a locality, but only if // they have different priorities. -// [#next-free-field: 9] +// [#next-free-field: 10] type LocalityLbEndpoints struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -319,6 +319,8 @@ type LocalityLbEndpoints struct { // Identifies location of where the upstream hosts run. Locality *v3.Locality `protobuf:"bytes,1,opt,name=locality,proto3" json:"locality,omitempty"` + // Metadata to provide additional information about the locality endpoints in aggregate. + Metadata *v3.Metadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"` // The group of endpoints belonging to the locality specified. // [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be // deprecated and replaced by “load_balancer_endpoints“.] @@ -341,7 +343,7 @@ type LocalityLbEndpoints struct { // configured. These weights are ignored otherwise. If no weights are // specified when locality weighted load balancing is enabled, the locality is // assigned no load. - LoadBalancingWeight *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=load_balancing_weight,json=loadBalancingWeight,proto3" json:"load_balancing_weight,omitempty"` + LoadBalancingWeight *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=load_balancing_weight,json=loadBalancingWeight,proto3" json:"load_balancing_weight,omitempty"` // Optional: the priority for this LocalityLbEndpoints. If unspecified this will // default to the highest priority (0). // @@ -358,7 +360,7 @@ type LocalityLbEndpoints struct { // This will be consumed by load balancing schemes that need proximity order // to determine where to route the requests. // [#not-implemented-hide:] - Proximity *wrappers.UInt32Value `protobuf:"bytes,6,opt,name=proximity,proto3" json:"proximity,omitempty"` + Proximity *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=proximity,proto3" json:"proximity,omitempty"` } func (x *LocalityLbEndpoints) Reset() { @@ -400,6 +402,13 @@ func (x *LocalityLbEndpoints) GetLocality() *v3.Locality { return nil } +func (x *LocalityLbEndpoints) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + func (x *LocalityLbEndpoints) GetLbEndpoints() []*LbEndpoint { if x != nil { return x.LbEndpoints @@ -428,7 +437,7 @@ func (x *LocalityLbEndpoints) GetLedsClusterLocalityConfig() *LedsClusterLocalit return nil } -func (x *LocalityLbEndpoints) GetLoadBalancingWeight() *wrappers.UInt32Value { +func (x *LocalityLbEndpoints) GetLoadBalancingWeight() *wrapperspb.UInt32Value { if x != nil { return x.LoadBalancingWeight } @@ -442,7 +451,7 @@ func (x *LocalityLbEndpoints) GetPriority() uint32 { return 0 } -func (x *LocalityLbEndpoints) GetProximity() *wrappers.UInt32Value { +func (x *LocalityLbEndpoints) GetProximity() *wrapperspb.UInt32Value { if x != nil { return x.Proximity } @@ -758,64 +767,67 @@ var file_envoy_config_endpoint_v3_endpoint_components_proto_rawDesc = []byte{ 0x0a, 0x6c, 0x65, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x6c, 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x65, 0x64, 0x73, 0x43, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xe1, 0x05, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x9d, 0x06, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x12, 0x47, 0x0a, 0x0c, 0x6c, 0x62, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, + 0x0c, 0x6c, 0x62, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x6c, 0x62, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x76, 0x0a, 0x17, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, - 0x76, 0x33, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x6c, - 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x76, 0x0a, 0x17, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, + 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x76, + 0x0a, 0x1c, 0x6c, 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x65, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x19, 0x6c, 0x65, 0x64, + 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x13, 0x6c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x57, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x12, 0x24, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x08, 0x70, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x69, + 0x6d, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, + 0x69, 0x74, 0x79, 0x1a, 0x59, 0x0a, 0x0e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x6c, 0x62, 0x5f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, - 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x15, 0x6c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x73, 0x12, 0x76, 0x0a, 0x1c, 0x6c, 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x65, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, - 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x19, 0x6c, 0x65, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x15, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, - 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x57, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x24, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, - 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x09, 0x70, - 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, - 0x6f, 0x78, 0x69, 0x6d, 0x69, 0x74, 0x79, 0x1a, 0x59, 0x0a, 0x0e, 0x4c, 0x62, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x6c, 0x62, 0x5f, - 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x6c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x42, 0x97, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x2e, 0x76, 0x33, 0x42, 0x17, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, - 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, - 0x3b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x52, 0x0b, 0x6c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x3a, 0x30, + 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, + 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x97, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, + 0x42, 0x17, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -842,7 +854,7 @@ var file_envoy_config_endpoint_v3_endpoint_components_proto_goTypes = []interfac (*v3.Address)(nil), // 7: envoy.config.core.v3.Address (v3.HealthStatus)(0), // 8: envoy.config.core.v3.HealthStatus (*v3.Metadata)(nil), // 9: envoy.config.core.v3.Metadata - (*wrappers.UInt32Value)(nil), // 10: google.protobuf.UInt32Value + (*wrapperspb.UInt32Value)(nil), // 10: google.protobuf.UInt32Value (*v3.ConfigSource)(nil), // 11: envoy.config.core.v3.ConfigSource (*v3.Locality)(nil), // 12: envoy.config.core.v3.Locality } @@ -856,19 +868,20 @@ var file_envoy_config_endpoint_v3_endpoint_components_proto_depIdxs = []int32{ 10, // 6: envoy.config.endpoint.v3.LbEndpoint.load_balancing_weight:type_name -> google.protobuf.UInt32Value 11, // 7: envoy.config.endpoint.v3.LedsClusterLocalityConfig.leds_config:type_name -> envoy.config.core.v3.ConfigSource 12, // 8: envoy.config.endpoint.v3.LocalityLbEndpoints.locality:type_name -> envoy.config.core.v3.Locality - 1, // 9: envoy.config.endpoint.v3.LocalityLbEndpoints.lb_endpoints:type_name -> envoy.config.endpoint.v3.LbEndpoint - 6, // 10: envoy.config.endpoint.v3.LocalityLbEndpoints.load_balancer_endpoints:type_name -> envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList - 2, // 11: envoy.config.endpoint.v3.LocalityLbEndpoints.leds_cluster_locality_config:type_name -> envoy.config.endpoint.v3.LedsClusterLocalityConfig - 10, // 12: envoy.config.endpoint.v3.LocalityLbEndpoints.load_balancing_weight:type_name -> google.protobuf.UInt32Value - 10, // 13: envoy.config.endpoint.v3.LocalityLbEndpoints.proximity:type_name -> google.protobuf.UInt32Value - 7, // 14: envoy.config.endpoint.v3.Endpoint.HealthCheckConfig.address:type_name -> envoy.config.core.v3.Address - 7, // 15: envoy.config.endpoint.v3.Endpoint.AdditionalAddress.address:type_name -> envoy.config.core.v3.Address - 1, // 16: envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList.lb_endpoints:type_name -> envoy.config.endpoint.v3.LbEndpoint - 17, // [17:17] is the sub-list for method output_type - 17, // [17:17] is the sub-list for method input_type - 17, // [17:17] is the sub-list for extension type_name - 17, // [17:17] is the sub-list for extension extendee - 0, // [0:17] is the sub-list for field type_name + 9, // 9: envoy.config.endpoint.v3.LocalityLbEndpoints.metadata:type_name -> envoy.config.core.v3.Metadata + 1, // 10: envoy.config.endpoint.v3.LocalityLbEndpoints.lb_endpoints:type_name -> envoy.config.endpoint.v3.LbEndpoint + 6, // 11: envoy.config.endpoint.v3.LocalityLbEndpoints.load_balancer_endpoints:type_name -> envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList + 2, // 12: envoy.config.endpoint.v3.LocalityLbEndpoints.leds_cluster_locality_config:type_name -> envoy.config.endpoint.v3.LedsClusterLocalityConfig + 10, // 13: envoy.config.endpoint.v3.LocalityLbEndpoints.load_balancing_weight:type_name -> google.protobuf.UInt32Value + 10, // 14: envoy.config.endpoint.v3.LocalityLbEndpoints.proximity:type_name -> google.protobuf.UInt32Value + 7, // 15: envoy.config.endpoint.v3.Endpoint.HealthCheckConfig.address:type_name -> envoy.config.core.v3.Address + 7, // 16: envoy.config.endpoint.v3.Endpoint.AdditionalAddress.address:type_name -> envoy.config.core.v3.Address + 1, // 17: envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList.lb_endpoints:type_name -> envoy.config.endpoint.v3.LbEndpoint + 18, // [18:18] is the sub-list for method output_type + 18, // [18:18] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name } func init() { file_envoy_config_endpoint_v3_endpoint_components_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go index c668c86c7e3..f11d8c5088a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/endpoint/v3/endpoint_components.proto @@ -619,6 +620,35 @@ func (m *LocalityLbEndpoints) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + for idx, item := range m.GetLbEndpoints() { _, _ = idx, item diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components_vtproto.pb.go new file mode 100644 index 00000000000..b86ffd0d1d2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components_vtproto.pb.go @@ -0,0 +1,896 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/endpoint/v3/endpoint_components.proto + +package endpointv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Endpoint_HealthCheckConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint_HealthCheckConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Endpoint_HealthCheckConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DisableActiveHealthCheck { + i-- + if m.DisableActiveHealthCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x12 + } + if m.PortValue != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PortValue)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Endpoint_AdditionalAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint_AdditionalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Endpoint_AdditionalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Endpoint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AdditionalAddresses) > 0 { + for iNdEx := len(m.AdditionalAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.AdditionalAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1a + } + if m.HealthCheckConfig != nil { + size, err := m.HealthCheckConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LbEndpoint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LbEndpoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LbEndpoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.HostIdentifier.(*LbEndpoint_EndpointName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.LoadBalancingWeight != nil { + size, err := (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.HealthStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HealthStatus)) + i-- + dAtA[i] = 0x10 + } + if msg, ok := m.HostIdentifier.(*LbEndpoint_Endpoint); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *LbEndpoint_Endpoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LbEndpoint_Endpoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Endpoint != nil { + size, err := m.Endpoint.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *LbEndpoint_EndpointName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LbEndpoint_EndpointName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.EndpointName) + copy(dAtA[i:], m.EndpointName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EndpointName))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} +func (m *LedsClusterLocalityConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LedsClusterLocalityConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LedsClusterLocalityConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LedsCollectionName) > 0 { + i -= len(m.LedsCollectionName) + copy(dAtA[i:], m.LedsCollectionName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LedsCollectionName))) + i-- + dAtA[i] = 0x12 + } + if m.LedsConfig != nil { + if vtmsg, ok := interface{}(m.LedsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LedsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbEndpoints_LbEndpointList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbEndpoints_LbEndpointList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints_LbEndpointList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LbEndpoints) > 0 { + for iNdEx := len(m.LbEndpoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LbEndpoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbEndpoints) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbEndpoints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if msg, ok := m.LbConfig.(*LocalityLbEndpoints_LedsClusterLocalityConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LbConfig.(*LocalityLbEndpoints_LoadBalancerEndpoints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Proximity != nil { + size, err := (*wrapperspb.UInt32Value)(m.Proximity).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x28 + } + if m.LoadBalancingWeight != nil { + size, err := (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.LbEndpoints) > 0 { + for iNdEx := len(m.LbEndpoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LbEndpoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Locality != nil { + if vtmsg, ok := interface{}(m.Locality).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Locality) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbEndpoints_LoadBalancerEndpoints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints_LoadBalancerEndpoints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadBalancerEndpoints != nil { + size, err := m.LoadBalancerEndpoints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *LocalityLbEndpoints_LedsClusterLocalityConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints_LedsClusterLocalityConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LedsClusterLocalityConfig != nil { + size, err := m.LedsClusterLocalityConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Endpoint_HealthCheckConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PortValue != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PortValue)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableActiveHealthCheck { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Endpoint_AdditionalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Endpoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HealthCheckConfig != nil { + l = m.HealthCheckConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AdditionalAddresses) > 0 { + for _, e := range m.AdditionalAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LbEndpoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.HostIdentifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.HealthStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HealthStatus)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LoadBalancingWeight != nil { + l = (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LbEndpoint_Endpoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Endpoint != nil { + l = m.Endpoint.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LbEndpoint_EndpointName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EndpointName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *LedsClusterLocalityConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LedsConfig != nil { + if size, ok := interface{}(m.LedsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LedsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LedsCollectionName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbEndpoints_LbEndpointList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LbEndpoints) > 0 { + for _, e := range m.LbEndpoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbEndpoints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Locality != nil { + if size, ok := interface{}(m.Locality).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Locality) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LbEndpoints) > 0 { + for _, e := range m.LbEndpoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LoadBalancingWeight != nil { + l = (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if m.Proximity != nil { + l = (*wrapperspb.UInt32Value)(m.Proximity).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LbConfig.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbEndpoints_LoadBalancerEndpoints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadBalancerEndpoints != nil { + l = m.LoadBalancerEndpoints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LocalityLbEndpoints_LedsClusterLocalityConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LedsClusterLocalityConfig != nil { + l = m.LedsClusterLocalityConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_vtproto.pb.go new file mode 100644 index 00000000000..368eefe03e7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_vtproto.pb.go @@ -0,0 +1,331 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/endpoint/v3/endpoint.proto + +package endpointv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClusterLoadAssignment_Policy_DropOverload) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterLoadAssignment_Policy_DropOverload) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterLoadAssignment_Policy_DropOverload) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DropPercentage != nil { + if vtmsg, ok := interface{}(m.DropPercentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DropPercentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Category) > 0 { + i -= len(m.Category) + copy(dAtA[i:], m.Category) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Category))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterLoadAssignment_Policy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterLoadAssignment_Policy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterLoadAssignment_Policy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WeightedPriorityHealth { + i-- + if m.WeightedPriorityHealth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.EndpointStaleAfter != nil { + size, err := (*durationpb.Duration)(m.EndpointStaleAfter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.OverprovisioningFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.OverprovisioningFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.DropOverloads) > 0 { + for iNdEx := len(m.DropOverloads) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DropOverloads[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterLoadAssignment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterLoadAssignment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterLoadAssignment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.NamedEndpoints) > 0 { + for k := range m.NamedEndpoints { + v := m.NamedEndpoints[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.Policy != nil { + size, err := m.Policy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Endpoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClusterName) > 0 { + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterLoadAssignment_Policy_DropOverload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Category) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DropPercentage != nil { + if size, ok := interface{}(m.DropPercentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DropPercentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterLoadAssignment_Policy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DropOverloads) > 0 { + for _, e := range m.DropOverloads { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.OverprovisioningFactor != nil { + l = (*wrapperspb.UInt32Value)(m.OverprovisioningFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EndpointStaleAfter != nil { + l = (*durationpb.Duration)(m.EndpointStaleAfter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WeightedPriorityHealth { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterLoadAssignment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Policy != nil { + l = m.Policy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.NamedEndpoints) > 0 { + for k, v := range m.NamedEndpoints { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go index 0d9aa200eb6..07d96da4966 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go @@ -1,19 +1,20 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/endpoint/v3/load_report.proto package endpointv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - _struct "github.com/golang/protobuf/ptypes/struct" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" ) @@ -28,7 +29,7 @@ const ( // These are stats Envoy reports to the management server at a frequency defined by // :ref:`LoadStatsResponse.load_reporting_interval`. // Stats per upstream region/zone and optionally per subzone. -// [#next-free-field: 9] +// [#next-free-field: 15] type UpstreamLocalityStats struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -49,7 +50,39 @@ type UpstreamLocalityStats struct { // the last report. This information is aggregated over all the // upstream endpoints in the locality. TotalIssuedRequests uint64 `protobuf:"varint,8,opt,name=total_issued_requests,json=totalIssuedRequests,proto3" json:"total_issued_requests,omitempty"` - // Stats for multi-dimensional load balancing. + // The total number of connections in an established state at the time of the + // report. This field is aggregated over all the upstream endpoints in the + // locality. + // In Envoy, this information may be based on “upstream_cx_active metric“. + // [#not-implemented-hide:] + TotalActiveConnections uint64 `protobuf:"varint,9,opt,name=total_active_connections,json=totalActiveConnections,proto3" json:"total_active_connections,omitempty"` + // The total number of connections opened since the last report. + // This field is aggregated over all the upstream endpoints in the locality. + // In Envoy, this information may be based on “upstream_cx_total“ metric + // compared to itself between start and end of an interval, i.e. + // “upstream_cx_total“(now) - “upstream_cx_total“(now - + // load_report_interval). + // [#not-implemented-hide:] + TotalNewConnections uint64 `protobuf:"varint,10,opt,name=total_new_connections,json=totalNewConnections,proto3" json:"total_new_connections,omitempty"` + // The total number of connection failures since the last report. + // This field is aggregated over all the upstream endpoints in the locality. + // In Envoy, this information may be based on “upstream_cx_connect_fail“ + // metric compared to itself between start and end of an interval, i.e. + // “upstream_cx_connect_fail“(now) - “upstream_cx_connect_fail“(now - + // load_report_interval). + // [#not-implemented-hide:] + TotalFailConnections uint64 `protobuf:"varint,11,opt,name=total_fail_connections,json=totalFailConnections,proto3" json:"total_fail_connections,omitempty"` + // CPU utilization stats for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + CpuUtilization *UnnamedEndpointLoadMetricStats `protobuf:"bytes,12,opt,name=cpu_utilization,json=cpuUtilization,proto3" json:"cpu_utilization,omitempty"` + // Memory utilization for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + MemUtilization *UnnamedEndpointLoadMetricStats `protobuf:"bytes,13,opt,name=mem_utilization,json=memUtilization,proto3" json:"mem_utilization,omitempty"` + // Blended application-defined utilization for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + ApplicationUtilization *UnnamedEndpointLoadMetricStats `protobuf:"bytes,14,opt,name=application_utilization,json=applicationUtilization,proto3" json:"application_utilization,omitempty"` + // Named stats for multi-dimensional load balancing. + // These typically come from endpoint metrics reported via ORCA. LoadMetricStats []*EndpointLoadMetricStats `protobuf:"bytes,5,rep,name=load_metric_stats,json=loadMetricStats,proto3" json:"load_metric_stats,omitempty"` // Endpoint granularity stats information for this locality. This information // is populated if the Server requests it by setting @@ -127,6 +160,48 @@ func (x *UpstreamLocalityStats) GetTotalIssuedRequests() uint64 { return 0 } +func (x *UpstreamLocalityStats) GetTotalActiveConnections() uint64 { + if x != nil { + return x.TotalActiveConnections + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalNewConnections() uint64 { + if x != nil { + return x.TotalNewConnections + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalFailConnections() uint64 { + if x != nil { + return x.TotalFailConnections + } + return 0 +} + +func (x *UpstreamLocalityStats) GetCpuUtilization() *UnnamedEndpointLoadMetricStats { + if x != nil { + return x.CpuUtilization + } + return nil +} + +func (x *UpstreamLocalityStats) GetMemUtilization() *UnnamedEndpointLoadMetricStats { + if x != nil { + return x.MemUtilization + } + return nil +} + +func (x *UpstreamLocalityStats) GetApplicationUtilization() *UnnamedEndpointLoadMetricStats { + if x != nil { + return x.ApplicationUtilization + } + return nil +} + func (x *UpstreamLocalityStats) GetLoadMetricStats() []*EndpointLoadMetricStats { if x != nil { return x.LoadMetricStats @@ -158,7 +233,7 @@ type UpstreamEndpointStats struct { Address *v3.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // Opaque and implementation dependent metadata of the // endpoint. Envoy will pass this directly to the management server. - Metadata *_struct.Struct `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"` + Metadata *structpb.Struct `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"` // The total number of requests successfully completed by the endpoints in the // locality. These include non-5xx responses for HTTP, where errors // originate at the client and the endpoint responded successfully. For gRPC, @@ -224,7 +299,7 @@ func (x *UpstreamEndpointStats) GetAddress() *v3.Address { return nil } -func (x *UpstreamEndpointStats) GetMetadata() *_struct.Struct { +func (x *UpstreamEndpointStats) GetMetadata() *structpb.Struct { if x != nil { return x.Metadata } @@ -333,6 +408,65 @@ func (x *EndpointLoadMetricStats) GetTotalMetricValue() float64 { return 0 } +// Same as EndpointLoadMetricStats, except without the metric_name field. +type UnnamedEndpointLoadMetricStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number of calls that finished and included this metric. + NumRequestsFinishedWithMetric uint64 `protobuf:"varint,1,opt,name=num_requests_finished_with_metric,json=numRequestsFinishedWithMetric,proto3" json:"num_requests_finished_with_metric,omitempty"` + // Sum of metric values across all calls that finished with this metric for + // load_reporting_interval. + TotalMetricValue float64 `protobuf:"fixed64,2,opt,name=total_metric_value,json=totalMetricValue,proto3" json:"total_metric_value,omitempty"` +} + +func (x *UnnamedEndpointLoadMetricStats) Reset() { + *x = UnnamedEndpointLoadMetricStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnnamedEndpointLoadMetricStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnnamedEndpointLoadMetricStats) ProtoMessage() {} + +func (x *UnnamedEndpointLoadMetricStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnnamedEndpointLoadMetricStats.ProtoReflect.Descriptor instead. +func (*UnnamedEndpointLoadMetricStats) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{3} +} + +func (x *UnnamedEndpointLoadMetricStats) GetNumRequestsFinishedWithMetric() uint64 { + if x != nil { + return x.NumRequestsFinishedWithMetric + } + return 0 +} + +func (x *UnnamedEndpointLoadMetricStats) GetTotalMetricValue() float64 { + if x != nil { + return x.TotalMetricValue + } + return 0 +} + // Per cluster load stats. Envoy reports these stats a management server in a // :ref:`LoadStatsRequest` // Next ID: 7 @@ -364,13 +498,13 @@ type ClusterStats struct { // request reported. Due to system load and delays between the “LoadStatsRequest“ sent from Envoy // and the “LoadStatsResponse“ message sent from the management server, this may be longer than // the requested load reporting interval in the “LoadStatsResponse“. - LoadReportInterval *duration.Duration `protobuf:"bytes,4,opt,name=load_report_interval,json=loadReportInterval,proto3" json:"load_report_interval,omitempty"` + LoadReportInterval *durationpb.Duration `protobuf:"bytes,4,opt,name=load_report_interval,json=loadReportInterval,proto3" json:"load_report_interval,omitempty"` } func (x *ClusterStats) Reset() { *x = ClusterStats{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[3] + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -383,7 +517,7 @@ func (x *ClusterStats) String() string { func (*ClusterStats) ProtoMessage() {} func (x *ClusterStats) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[3] + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -396,7 +530,7 @@ func (x *ClusterStats) ProtoReflect() protoreflect.Message { // Deprecated: Use ClusterStats.ProtoReflect.Descriptor instead. func (*ClusterStats) Descriptor() ([]byte, []int) { - return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{3} + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{4} } func (x *ClusterStats) GetClusterName() string { @@ -434,7 +568,7 @@ func (x *ClusterStats) GetDroppedRequests() []*ClusterStats_DroppedRequests { return nil } -func (x *ClusterStats) GetLoadReportInterval() *duration.Duration { +func (x *ClusterStats) GetLoadReportInterval() *durationpb.Duration { if x != nil { return x.LoadReportInterval } @@ -455,7 +589,7 @@ type ClusterStats_DroppedRequests struct { func (x *ClusterStats_DroppedRequests) Reset() { *x = ClusterStats_DroppedRequests{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[4] + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -468,7 +602,7 @@ func (x *ClusterStats_DroppedRequests) String() string { func (*ClusterStats_DroppedRequests) ProtoMessage() {} func (x *ClusterStats_DroppedRequests) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[4] + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -481,7 +615,7 @@ func (x *ClusterStats_DroppedRequests) ProtoReflect() protoreflect.Message { // Deprecated: Use ClusterStats_DroppedRequests.ProtoReflect.Descriptor instead. func (*ClusterStats_DroppedRequests) Descriptor() ([]byte, []int) { - return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{3, 0} + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{4, 0} } func (x *ClusterStats_DroppedRequests) GetCategory() string { @@ -513,146 +647,190 @@ var file_envoy_config_endpoint_v3_load_report_proto_rawDesc = []byte{ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xca, 0x04, 0x0a, 0x15, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x3a, - 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x19, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, - 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x69, - 0x73, 0x73, 0x75, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x49, 0x73, 0x73, 0x75, 0x65, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, - 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x75, 0x70, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x3a, 0x32, 0x9a, - 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x55, 0x70, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x22, 0xf7, 0x03, 0x0a, 0x15, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x19, 0x74, 0x6f, 0x74, - 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, - 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x73, - 0x73, 0x75, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x45, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x17, - 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, - 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x1d, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, - 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x45, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0x89, 0x05, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x12, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x71, 0x0a, 0x17, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, - 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, - 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, - 0x01, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, - 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x61, - 0x0a, 0x10, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x08, 0x0a, 0x15, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, + 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x19, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x49, 0x73, 0x73, 0x75, + 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x42, 0x0a, 0x18, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x42, 0x08, 0xd2, 0xc6, + 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x16, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, + 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x42, 0x08, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4e, 0x65, + 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3e, 0x0a, 0x16, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x42, 0x08, 0xd2, 0xc6, + 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x46, 0x61, 0x69, + 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x61, 0x0a, 0x0f, + 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, + 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x61, 0x0a, 0x0f, 0x6d, 0x65, 0x6d, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, - 0x52, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x73, 0x12, 0x4b, 0x0a, 0x14, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x6c, 0x6f, 0x61, 0x64, - 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x1a, 0x96, - 0x01, 0x0a, 0x0f, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x73, 0x12, 0x23, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x08, 0x63, - 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x72, 0x6f, 0x70, 0x70, - 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, - 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x3a, 0x39, 0x9a, 0xc5, - 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, + 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x16, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1a, 0x0a, + 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, + 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xf7, 0x03, + 0x0a, 0x15, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x19, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, + 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, + 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x17, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x77, 0x69, + 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x1d, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x46, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x2c, + 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x34, 0x9a, 0xc5, + 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x22, 0x98, 0x01, 0x0a, 0x1e, 0x55, 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x77, + 0x69, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x1d, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x46, 0x69, 0x6e, + 0x69, 0x73, 0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, + 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x89, 0x05, + 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2a, + 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x71, 0x0a, 0x17, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, + 0x34, 0x0a, 0x16, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x73, 0x42, 0x8f, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, - 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, - 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x52, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x4b, 0x0a, 0x14, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x12, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x1a, 0x96, 0x01, 0x0a, 0x0f, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x08, 0x63, 0x61, 0x74, + 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x44, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x3a, 0x29, + 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x8f, 0x01, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, + 0x3b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -667,33 +845,37 @@ func file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP() []byte { return file_envoy_config_endpoint_v3_load_report_proto_rawDescData } -var file_envoy_config_endpoint_v3_load_report_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_config_endpoint_v3_load_report_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_envoy_config_endpoint_v3_load_report_proto_goTypes = []interface{}{ - (*UpstreamLocalityStats)(nil), // 0: envoy.config.endpoint.v3.UpstreamLocalityStats - (*UpstreamEndpointStats)(nil), // 1: envoy.config.endpoint.v3.UpstreamEndpointStats - (*EndpointLoadMetricStats)(nil), // 2: envoy.config.endpoint.v3.EndpointLoadMetricStats - (*ClusterStats)(nil), // 3: envoy.config.endpoint.v3.ClusterStats - (*ClusterStats_DroppedRequests)(nil), // 4: envoy.config.endpoint.v3.ClusterStats.DroppedRequests - (*v3.Locality)(nil), // 5: envoy.config.core.v3.Locality - (*v3.Address)(nil), // 6: envoy.config.core.v3.Address - (*_struct.Struct)(nil), // 7: google.protobuf.Struct - (*duration.Duration)(nil), // 8: google.protobuf.Duration + (*UpstreamLocalityStats)(nil), // 0: envoy.config.endpoint.v3.UpstreamLocalityStats + (*UpstreamEndpointStats)(nil), // 1: envoy.config.endpoint.v3.UpstreamEndpointStats + (*EndpointLoadMetricStats)(nil), // 2: envoy.config.endpoint.v3.EndpointLoadMetricStats + (*UnnamedEndpointLoadMetricStats)(nil), // 3: envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + (*ClusterStats)(nil), // 4: envoy.config.endpoint.v3.ClusterStats + (*ClusterStats_DroppedRequests)(nil), // 5: envoy.config.endpoint.v3.ClusterStats.DroppedRequests + (*v3.Locality)(nil), // 6: envoy.config.core.v3.Locality + (*v3.Address)(nil), // 7: envoy.config.core.v3.Address + (*structpb.Struct)(nil), // 8: google.protobuf.Struct + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration } var file_envoy_config_endpoint_v3_load_report_proto_depIdxs = []int32{ - 5, // 0: envoy.config.endpoint.v3.UpstreamLocalityStats.locality:type_name -> envoy.config.core.v3.Locality - 2, // 1: envoy.config.endpoint.v3.UpstreamLocalityStats.load_metric_stats:type_name -> envoy.config.endpoint.v3.EndpointLoadMetricStats - 1, // 2: envoy.config.endpoint.v3.UpstreamLocalityStats.upstream_endpoint_stats:type_name -> envoy.config.endpoint.v3.UpstreamEndpointStats - 6, // 3: envoy.config.endpoint.v3.UpstreamEndpointStats.address:type_name -> envoy.config.core.v3.Address - 7, // 4: envoy.config.endpoint.v3.UpstreamEndpointStats.metadata:type_name -> google.protobuf.Struct - 2, // 5: envoy.config.endpoint.v3.UpstreamEndpointStats.load_metric_stats:type_name -> envoy.config.endpoint.v3.EndpointLoadMetricStats - 0, // 6: envoy.config.endpoint.v3.ClusterStats.upstream_locality_stats:type_name -> envoy.config.endpoint.v3.UpstreamLocalityStats - 4, // 7: envoy.config.endpoint.v3.ClusterStats.dropped_requests:type_name -> envoy.config.endpoint.v3.ClusterStats.DroppedRequests - 8, // 8: envoy.config.endpoint.v3.ClusterStats.load_report_interval:type_name -> google.protobuf.Duration - 9, // [9:9] is the sub-list for method output_type - 9, // [9:9] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 6, // 0: envoy.config.endpoint.v3.UpstreamLocalityStats.locality:type_name -> envoy.config.core.v3.Locality + 3, // 1: envoy.config.endpoint.v3.UpstreamLocalityStats.cpu_utilization:type_name -> envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + 3, // 2: envoy.config.endpoint.v3.UpstreamLocalityStats.mem_utilization:type_name -> envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + 3, // 3: envoy.config.endpoint.v3.UpstreamLocalityStats.application_utilization:type_name -> envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + 2, // 4: envoy.config.endpoint.v3.UpstreamLocalityStats.load_metric_stats:type_name -> envoy.config.endpoint.v3.EndpointLoadMetricStats + 1, // 5: envoy.config.endpoint.v3.UpstreamLocalityStats.upstream_endpoint_stats:type_name -> envoy.config.endpoint.v3.UpstreamEndpointStats + 7, // 6: envoy.config.endpoint.v3.UpstreamEndpointStats.address:type_name -> envoy.config.core.v3.Address + 8, // 7: envoy.config.endpoint.v3.UpstreamEndpointStats.metadata:type_name -> google.protobuf.Struct + 2, // 8: envoy.config.endpoint.v3.UpstreamEndpointStats.load_metric_stats:type_name -> envoy.config.endpoint.v3.EndpointLoadMetricStats + 0, // 9: envoy.config.endpoint.v3.ClusterStats.upstream_locality_stats:type_name -> envoy.config.endpoint.v3.UpstreamLocalityStats + 5, // 10: envoy.config.endpoint.v3.ClusterStats.dropped_requests:type_name -> envoy.config.endpoint.v3.ClusterStats.DroppedRequests + 9, // 11: envoy.config.endpoint.v3.ClusterStats.load_report_interval:type_name -> google.protobuf.Duration + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name } func init() { file_envoy_config_endpoint_v3_load_report_proto_init() } @@ -739,7 +921,7 @@ func file_envoy_config_endpoint_v3_load_report_proto_init() { } } file_envoy_config_endpoint_v3_load_report_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClusterStats); i { + switch v := v.(*UnnamedEndpointLoadMetricStats); i { case 0: return &v.state case 1: @@ -751,6 +933,18 @@ func file_envoy_config_endpoint_v3_load_report_proto_init() { } } file_envoy_config_endpoint_v3_load_report_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ClusterStats_DroppedRequests); i { case 0: return &v.state @@ -769,7 +963,7 @@ func file_envoy_config_endpoint_v3_load_report_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_endpoint_v3_load_report_proto_rawDesc, NumEnums: 0, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.validate.go index 00b2ac0ef2a..fd7a0c6c1c5 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/endpoint/v3/load_report.proto @@ -94,6 +95,99 @@ func (m *UpstreamLocalityStats) validate(all bool) error { // no validation rules for TotalIssuedRequests + // no validation rules for TotalActiveConnections + + // no validation rules for TotalNewConnections + + // no validation rules for TotalFailConnections + + if all { + switch v := interface{}(m.GetCpuUtilization()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "CpuUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "CpuUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCpuUtilization()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: "CpuUtilization", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMemUtilization()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "MemUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "MemUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMemUtilization()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: "MemUtilization", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetApplicationUtilization()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "ApplicationUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "ApplicationUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetApplicationUtilization()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: "ApplicationUtilization", + reason: "embedded message failed validation", + cause: err, + } + } + } + for idx, item := range m.GetLoadMetricStats() { _, _ = idx, item @@ -554,6 +648,113 @@ var _ interface { ErrorName() string } = EndpointLoadMetricStatsValidationError{} +// Validate checks the field values on UnnamedEndpointLoadMetricStats with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UnnamedEndpointLoadMetricStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UnnamedEndpointLoadMetricStats with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// UnnamedEndpointLoadMetricStatsMultiError, or nil if none found. +func (m *UnnamedEndpointLoadMetricStats) ValidateAll() error { + return m.validate(true) +} + +func (m *UnnamedEndpointLoadMetricStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NumRequestsFinishedWithMetric + + // no validation rules for TotalMetricValue + + if len(errors) > 0 { + return UnnamedEndpointLoadMetricStatsMultiError(errors) + } + + return nil +} + +// UnnamedEndpointLoadMetricStatsMultiError is an error wrapping multiple +// validation errors returned by UnnamedEndpointLoadMetricStats.ValidateAll() +// if the designated constraints aren't met. +type UnnamedEndpointLoadMetricStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UnnamedEndpointLoadMetricStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UnnamedEndpointLoadMetricStatsMultiError) AllErrors() []error { return m } + +// UnnamedEndpointLoadMetricStatsValidationError is the validation error +// returned by UnnamedEndpointLoadMetricStats.Validate if the designated +// constraints aren't met. +type UnnamedEndpointLoadMetricStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UnnamedEndpointLoadMetricStatsValidationError) ErrorName() string { + return "UnnamedEndpointLoadMetricStatsValidationError" +} + +// Error satisfies the builtin error interface +func (e UnnamedEndpointLoadMetricStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUnnamedEndpointLoadMetricStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UnnamedEndpointLoadMetricStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UnnamedEndpointLoadMetricStatsValidationError{} + // Validate checks the field values on ClusterStats with the rules defined in // the proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report_vtproto.pb.go new file mode 100644 index 00000000000..31b280a340d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report_vtproto.pb.go @@ -0,0 +1,696 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/endpoint/v3/load_report.proto + +package endpointv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UpstreamLocalityStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamLocalityStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamLocalityStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ApplicationUtilization != nil { + size, err := m.ApplicationUtilization.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.MemUtilization != nil { + size, err := m.MemUtilization.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.CpuUtilization != nil { + size, err := m.CpuUtilization.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.TotalFailConnections != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalFailConnections)) + i-- + dAtA[i] = 0x58 + } + if m.TotalNewConnections != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalNewConnections)) + i-- + dAtA[i] = 0x50 + } + if m.TotalActiveConnections != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalActiveConnections)) + i-- + dAtA[i] = 0x48 + } + if m.TotalIssuedRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalIssuedRequests)) + i-- + dAtA[i] = 0x40 + } + if len(m.UpstreamEndpointStats) > 0 { + for iNdEx := len(m.UpstreamEndpointStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpstreamEndpointStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x30 + } + if len(m.LoadMetricStats) > 0 { + for iNdEx := len(m.LoadMetricStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoadMetricStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.TotalErrorRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalErrorRequests)) + i-- + dAtA[i] = 0x20 + } + if m.TotalRequestsInProgress != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalRequestsInProgress)) + i-- + dAtA[i] = 0x18 + } + if m.TotalSuccessfulRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalSuccessfulRequests)) + i-- + dAtA[i] = 0x10 + } + if m.Locality != nil { + if vtmsg, ok := interface{}(m.Locality).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Locality) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpstreamEndpointStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamEndpointStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamEndpointStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalIssuedRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalIssuedRequests)) + i-- + dAtA[i] = 0x38 + } + if m.Metadata != nil { + size, err := (*structpb.Struct)(m.Metadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.LoadMetricStats) > 0 { + for iNdEx := len(m.LoadMetricStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoadMetricStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.TotalErrorRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalErrorRequests)) + i-- + dAtA[i] = 0x20 + } + if m.TotalRequestsInProgress != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalRequestsInProgress)) + i-- + dAtA[i] = 0x18 + } + if m.TotalSuccessfulRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalSuccessfulRequests)) + i-- + dAtA[i] = 0x10 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointLoadMetricStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointLoadMetricStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointLoadMetricStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalMetricValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TotalMetricValue)))) + i-- + dAtA[i] = 0x19 + } + if m.NumRequestsFinishedWithMetric != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumRequestsFinishedWithMetric)) + i-- + dAtA[i] = 0x10 + } + if len(m.MetricName) > 0 { + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UnnamedEndpointLoadMetricStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnnamedEndpointLoadMetricStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UnnamedEndpointLoadMetricStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalMetricValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TotalMetricValue)))) + i-- + dAtA[i] = 0x11 + } + if m.NumRequestsFinishedWithMetric != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumRequestsFinishedWithMetric)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ClusterStats_DroppedRequests) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterStats_DroppedRequests) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterStats_DroppedRequests) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DroppedCount != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DroppedCount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Category) > 0 { + i -= len(m.Category) + copy(dAtA[i:], m.Category) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Category))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterServiceName) > 0 { + i -= len(m.ClusterServiceName) + copy(dAtA[i:], m.ClusterServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterServiceName))) + i-- + dAtA[i] = 0x32 + } + if len(m.DroppedRequests) > 0 { + for iNdEx := len(m.DroppedRequests) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DroppedRequests[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.LoadReportInterval != nil { + size, err := (*durationpb.Duration)(m.LoadReportInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.TotalDroppedRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalDroppedRequests)) + i-- + dAtA[i] = 0x18 + } + if len(m.UpstreamLocalityStats) > 0 { + for iNdEx := len(m.UpstreamLocalityStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpstreamLocalityStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClusterName) > 0 { + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpstreamLocalityStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Locality != nil { + if size, ok := interface{}(m.Locality).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Locality) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalSuccessfulRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalSuccessfulRequests)) + } + if m.TotalRequestsInProgress != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalRequestsInProgress)) + } + if m.TotalErrorRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalErrorRequests)) + } + if len(m.LoadMetricStats) > 0 { + for _, e := range m.LoadMetricStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if len(m.UpstreamEndpointStats) > 0 { + for _, e := range m.UpstreamEndpointStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TotalIssuedRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalIssuedRequests)) + } + if m.TotalActiveConnections != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalActiveConnections)) + } + if m.TotalNewConnections != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalNewConnections)) + } + if m.TotalFailConnections != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalFailConnections)) + } + if m.CpuUtilization != nil { + l = m.CpuUtilization.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MemUtilization != nil { + l = m.MemUtilization.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApplicationUtilization != nil { + l = m.ApplicationUtilization.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamEndpointStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalSuccessfulRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalSuccessfulRequests)) + } + if m.TotalRequestsInProgress != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalRequestsInProgress)) + } + if m.TotalErrorRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalErrorRequests)) + } + if len(m.LoadMetricStats) > 0 { + for _, e := range m.LoadMetricStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Metadata != nil { + l = (*structpb.Struct)(m.Metadata).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalIssuedRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalIssuedRequests)) + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointLoadMetricStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumRequestsFinishedWithMetric != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumRequestsFinishedWithMetric)) + } + if m.TotalMetricValue != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *UnnamedEndpointLoadMetricStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NumRequestsFinishedWithMetric != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumRequestsFinishedWithMetric)) + } + if m.TotalMetricValue != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterStats_DroppedRequests) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Category) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DroppedCount != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DroppedCount)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.UpstreamLocalityStats) > 0 { + for _, e := range m.UpstreamLocalityStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TotalDroppedRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalDroppedRequests)) + } + if m.LoadReportInterval != nil { + l = (*durationpb.Duration)(m.LoadReportInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.DroppedRequests) > 0 { + for _, e := range m.DroppedRequests { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.ClusterServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go index 88a229220f5..1adc7d96f46 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go @@ -1,16 +1,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/listener/v3/api_listener.proto package listenerv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" - any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -38,7 +38,7 @@ type ApiListener struct { // it would have caused circular dependencies for go protos: lds.proto depends on this file, // and http_connection_manager.proto depends on rds.proto, which is in the same directory as // lds.proto, so lds.proto cannot depend on this file.] - ApiListener *any1.Any `protobuf:"bytes,1,opt,name=api_listener,json=apiListener,proto3" json:"api_listener,omitempty"` + ApiListener *anypb.Any `protobuf:"bytes,1,opt,name=api_listener,json=apiListener,proto3" json:"api_listener,omitempty"` } func (x *ApiListener) Reset() { @@ -73,7 +73,7 @@ func (*ApiListener) Descriptor() ([]byte, []int) { return file_envoy_config_listener_v3_api_listener_proto_rawDescGZIP(), []int{0} } -func (x *ApiListener) GetApiListener() *any1.Any { +func (x *ApiListener) GetApiListener() *anypb.Any { if x != nil { return x.ApiListener } @@ -128,7 +128,7 @@ func file_envoy_config_listener_v3_api_listener_proto_rawDescGZIP() []byte { var file_envoy_config_listener_v3_api_listener_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_listener_v3_api_listener_proto_goTypes = []interface{}{ (*ApiListener)(nil), // 0: envoy.config.listener.v3.ApiListener - (*any1.Any)(nil), // 1: google.protobuf.Any + (*anypb.Any)(nil), // 1: google.protobuf.Any } var file_envoy_config_listener_v3_api_listener_proto_depIdxs = []int32{ 1, // 0: envoy.config.listener.v3.ApiListener.api_listener:type_name -> google.protobuf.Any diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.validate.go index 610b40124f3..56954a35df1 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/listener/v3/api_listener.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener_vtproto.pb.go new file mode 100644 index 00000000000..0d24f32c6d2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener_vtproto.pb.go @@ -0,0 +1,77 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/api_listener.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ApiListener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApiListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ApiListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ApiListener != nil { + size, err := (*anypb.Any)(m.ApiListener).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ApiListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApiListener != nil { + l = (*anypb.Any)(m.ApiListener).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go index 686820df248..c639e62b1cd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/listener/v3/listener.proto package listenerv3 @@ -15,10 +15,10 @@ import ( v33 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -193,7 +193,7 @@ func (x *ListenerCollection) GetEntries() []*v31.CollectionEntry { return nil } -// [#next-free-field: 35] +// [#next-free-field: 36] type Listener struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -246,13 +246,13 @@ type Listener struct { // true, the listener hands off redirected connections to the listener associated with the // original destination address. If there is no listener associated with the original destination // address, the connection is handled by the listener that receives it. Defaults to false. - UseOriginalDst *wrappers.BoolValue `protobuf:"bytes,4,opt,name=use_original_dst,json=useOriginalDst,proto3" json:"use_original_dst,omitempty"` + UseOriginalDst *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=use_original_dst,json=useOriginalDst,proto3" json:"use_original_dst,omitempty"` // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, // the connection will be closed. The filter chain match is ignored in this field. DefaultFilterChain *FilterChain `protobuf:"bytes,25,opt,name=default_filter_chain,json=defaultFilterChain,proto3" json:"default_filter_chain,omitempty"` // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). - PerConnectionBufferLimitBytes *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=per_connection_buffer_limit_bytes,json=perConnectionBufferLimitBytes,proto3" json:"per_connection_buffer_limit_bytes,omitempty"` + PerConnectionBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=per_connection_buffer_limit_bytes,json=perConnectionBufferLimitBytes,proto3" json:"per_connection_buffer_limit_bytes,omitempty"` // Listener metadata. Metadata *v3.Metadata `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"` // [#not-implemented-hide:] @@ -279,7 +279,7 @@ type Listener struct { // the accepted socket is closed without a connection being created unless // “continue_on_listener_filters_timeout“ is set to true. Specify 0 to disable the // timeout. If not specified, a default timeout of 15s is used. - ListenerFiltersTimeout *duration.Duration `protobuf:"bytes,15,opt,name=listener_filters_timeout,json=listenerFiltersTimeout,proto3" json:"listener_filters_timeout,omitempty"` + ListenerFiltersTimeout *durationpb.Duration `protobuf:"bytes,15,opt,name=listener_filters_timeout,json=listenerFiltersTimeout,proto3" json:"listener_filters_timeout,omitempty"` // Whether a connection should be created when listener filters timeout. Default is false. // // .. attention:: @@ -302,14 +302,14 @@ type Listener struct { // Setting this flag requires Envoy to run with the “CAP_NET_ADMIN“ capability. // When this flag is not set (default), the socket is not modified, i.e. the transparent option // is neither set nor reset. - Transparent *wrappers.BoolValue `protobuf:"bytes,10,opt,name=transparent,proto3" json:"transparent,omitempty"` + Transparent *wrapperspb.BoolValue `protobuf:"bytes,10,opt,name=transparent,proto3" json:"transparent,omitempty"` // Whether the listener should set the “IP_FREEBIND“ socket option. When this // flag is set to true, listeners can be bound to an IP address that is not // configured on the system running Envoy. When this flag is set to false, the // option “IP_FREEBIND“ is disabled on the socket. When this flag is not set // (default), the socket is not modified, i.e. the option is neither enabled // nor disabled. - Freebind *wrappers.BoolValue `protobuf:"bytes,11,opt,name=freebind,proto3" json:"freebind,omitempty"` + Freebind *wrapperspb.BoolValue `protobuf:"bytes,11,opt,name=freebind,proto3" json:"freebind,omitempty"` // Additional socket options that may not be present in Envoy source code or // precompiled binaries. The socket options can be updated for a listener when // :ref:`enable_reuse_port ` @@ -330,7 +330,7 @@ type Listener struct { // // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. - TcpFastOpenQueueLength *wrappers.UInt32Value `protobuf:"bytes,12,opt,name=tcp_fast_open_queue_length,json=tcpFastOpenQueueLength,proto3" json:"tcp_fast_open_queue_length,omitempty"` + TcpFastOpenQueueLength *wrapperspb.UInt32Value `protobuf:"bytes,12,opt,name=tcp_fast_open_queue_length,json=tcpFastOpenQueueLength,proto3" json:"tcp_fast_open_queue_length,omitempty"` // Specifies the intended direction of the traffic relative to the local Envoy. // This property is required on Windows for listeners using the original destination filter, // see :ref:`Original Destination `. @@ -393,13 +393,13 @@ type Listener struct { // a single worker will currently receive packets. // - On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. - EnableReusePort *wrappers.BoolValue `protobuf:"bytes,29,opt,name=enable_reuse_port,json=enableReusePort,proto3" json:"enable_reuse_port,omitempty"` + EnableReusePort *wrapperspb.BoolValue `protobuf:"bytes,29,opt,name=enable_reuse_port,json=enableReusePort,proto3" json:"enable_reuse_port,omitempty"` // Configuration for :ref:`access logs ` // emitted by this listener. AccessLog []*v33.AccessLog `protobuf:"bytes,22,rep,name=access_log,json=accessLog,proto3" json:"access_log,omitempty"` // The maximum length a tcp listener's pending connections queue can grow to. If no value is // provided net.core.somaxconn will be used on Linux and 128 otherwise. - TcpBacklogSize *wrappers.UInt32Value `protobuf:"bytes,24,opt,name=tcp_backlog_size,json=tcpBacklogSize,proto3" json:"tcp_backlog_size,omitempty"` + TcpBacklogSize *wrapperspb.UInt32Value `protobuf:"bytes,24,opt,name=tcp_backlog_size,json=tcpBacklogSize,proto3" json:"tcp_backlog_size,omitempty"` // The maximum number of connections to accept from the kernel per socket // event. Envoy may decide to close these connections after accepting them // from the kernel e.g. due to load shedding, or other policies. @@ -408,12 +408,12 @@ type Listener struct { // accepted in later event loop iterations. // If no value is provided Envoy will accept all connections pending accept // from the kernel. - MaxConnectionsToAcceptPerSocketEvent *wrappers.UInt32Value `protobuf:"bytes,34,opt,name=max_connections_to_accept_per_socket_event,json=maxConnectionsToAcceptPerSocketEvent,proto3" json:"max_connections_to_accept_per_socket_event,omitempty"` + MaxConnectionsToAcceptPerSocketEvent *wrapperspb.UInt32Value `protobuf:"bytes,34,opt,name=max_connections_to_accept_per_socket_event,json=maxConnectionsToAcceptPerSocketEvent,proto3" json:"max_connections_to_accept_per_socket_event,omitempty"` // Whether the listener should bind to the port. A listener that doesn't // bind can only receive connections redirected from other listeners that set // :ref:`use_original_dst ` // to true. Default is true. - BindToPort *wrappers.BoolValue `protobuf:"bytes,26,opt,name=bind_to_port,json=bindToPort,proto3" json:"bind_to_port,omitempty"` + BindToPort *wrapperspb.BoolValue `protobuf:"bytes,26,opt,name=bind_to_port,json=bindToPort,proto3" json:"bind_to_port,omitempty"` // The exclusive listener type and the corresponding config. // // Types that are assignable to ListenerSpecifier: @@ -426,6 +426,8 @@ type Listener struct { // Whether the listener should limit connections based upon the value of // :ref:`global_downstream_max_connections `. IgnoreGlobalConnLimit bool `protobuf:"varint,31,opt,name=ignore_global_conn_limit,json=ignoreGlobalConnLimit,proto3" json:"ignore_global_conn_limit,omitempty"` + // Whether the listener bypasses configured overload manager actions. + BypassOverloadManager bool `protobuf:"varint,35,opt,name=bypass_overload_manager,json=bypassOverloadManager,proto3" json:"bypass_overload_manager,omitempty"` } func (x *Listener) Reset() { @@ -502,7 +504,7 @@ func (x *Listener) GetFilterChainMatcher() *v32.Matcher { return nil } -func (x *Listener) GetUseOriginalDst() *wrappers.BoolValue { +func (x *Listener) GetUseOriginalDst() *wrapperspb.BoolValue { if x != nil { return x.UseOriginalDst } @@ -516,7 +518,7 @@ func (x *Listener) GetDefaultFilterChain() *FilterChain { return nil } -func (x *Listener) GetPerConnectionBufferLimitBytes() *wrappers.UInt32Value { +func (x *Listener) GetPerConnectionBufferLimitBytes() *wrapperspb.UInt32Value { if x != nil { return x.PerConnectionBufferLimitBytes } @@ -552,7 +554,7 @@ func (x *Listener) GetListenerFilters() []*ListenerFilter { return nil } -func (x *Listener) GetListenerFiltersTimeout() *duration.Duration { +func (x *Listener) GetListenerFiltersTimeout() *durationpb.Duration { if x != nil { return x.ListenerFiltersTimeout } @@ -566,14 +568,14 @@ func (x *Listener) GetContinueOnListenerFiltersTimeout() bool { return false } -func (x *Listener) GetTransparent() *wrappers.BoolValue { +func (x *Listener) GetTransparent() *wrapperspb.BoolValue { if x != nil { return x.Transparent } return nil } -func (x *Listener) GetFreebind() *wrappers.BoolValue { +func (x *Listener) GetFreebind() *wrapperspb.BoolValue { if x != nil { return x.Freebind } @@ -587,7 +589,7 @@ func (x *Listener) GetSocketOptions() []*v3.SocketOption { return nil } -func (x *Listener) GetTcpFastOpenQueueLength() *wrappers.UInt32Value { +func (x *Listener) GetTcpFastOpenQueueLength() *wrapperspb.UInt32Value { if x != nil { return x.TcpFastOpenQueueLength } @@ -630,7 +632,7 @@ func (x *Listener) GetReusePort() bool { return false } -func (x *Listener) GetEnableReusePort() *wrappers.BoolValue { +func (x *Listener) GetEnableReusePort() *wrapperspb.BoolValue { if x != nil { return x.EnableReusePort } @@ -644,21 +646,21 @@ func (x *Listener) GetAccessLog() []*v33.AccessLog { return nil } -func (x *Listener) GetTcpBacklogSize() *wrappers.UInt32Value { +func (x *Listener) GetTcpBacklogSize() *wrapperspb.UInt32Value { if x != nil { return x.TcpBacklogSize } return nil } -func (x *Listener) GetMaxConnectionsToAcceptPerSocketEvent() *wrappers.UInt32Value { +func (x *Listener) GetMaxConnectionsToAcceptPerSocketEvent() *wrapperspb.UInt32Value { if x != nil { return x.MaxConnectionsToAcceptPerSocketEvent } return nil } -func (x *Listener) GetBindToPort() *wrappers.BoolValue { +func (x *Listener) GetBindToPort() *wrapperspb.BoolValue { if x != nil { return x.BindToPort } @@ -693,6 +695,13 @@ func (x *Listener) GetIgnoreGlobalConnLimit() bool { return false } +func (x *Listener) GetBypassOverloadManager() bool { + if x != nil { + return x.BypassOverloadManager + } + return false +} + type isListener_ListenerSpecifier interface { isListener_ListenerSpecifier() } @@ -852,7 +861,7 @@ type Listener_DeprecatedV1 struct { // // This is deprecated. Use :ref:`Listener.bind_to_port // ` - BindToPort *wrappers.BoolValue `protobuf:"bytes,1,opt,name=bind_to_port,json=bindToPort,proto3" json:"bind_to_port,omitempty"` + BindToPort *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=bind_to_port,json=bindToPort,proto3" json:"bind_to_port,omitempty"` } func (x *Listener_DeprecatedV1) Reset() { @@ -887,7 +896,7 @@ func (*Listener_DeprecatedV1) Descriptor() ([]byte, []int) { return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2, 0} } -func (x *Listener_DeprecatedV1) GetBindToPort() *wrappers.BoolValue { +func (x *Listener_DeprecatedV1) GetBindToPort() *wrapperspb.BoolValue { if x != nil { return x.BindToPort } @@ -1129,7 +1138,7 @@ var file_envoy_config_listener_v3_listener_proto_rawDesc = []byte{ 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, - 0x65, 0x73, 0x22, 0x86, 0x18, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, + 0x65, 0x73, 0x22, 0xbe, 0x18, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, @@ -1281,61 +1290,65 @@ var file_envoy_config_listener_v3_listener_proto_rawDesc = []byte{ 0x4d, 0x70, 0x74, 0x63, 0x70, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x47, - 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0x77, - 0x0a, 0x0c, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x12, 0x3c, - 0x0a, 0x0c, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x0a, 0x62, 0x69, 0x6e, 0x64, 0x54, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x3a, 0x29, 0x9a, 0xc5, - 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x1a, 0xfc, 0x02, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x65, 0x12, 0x53, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x62, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x1a, 0x51, 0x0a, 0x0c, 0x45, 0x78, 0x61, 0x63, - 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x41, 0x9a, 0xc5, 0x88, 0x1e, 0x3c, 0x0a, - 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x36, + 0x0a, 0x17, 0x62, 0x79, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x23, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x62, 0x79, 0x70, 0x61, 0x73, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x1a, 0x77, 0x0a, 0x0c, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x12, 0x3c, 0x0a, 0x0c, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x74, + 0x6f, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x62, 0x69, 0x6e, 0x64, 0x54, 0x6f, + 0x50, 0x6f, 0x72, 0x74, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x1a, + 0xfc, 0x02, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x0d, 0x65, + 0x78, 0x61, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, - 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x34, 0x9a, 0xc5, 0x88, - 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x42, 0x13, 0x0a, 0x0c, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x18, 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x22, 0x29, 0x0a, 0x09, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, - 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, - 0x44, 0x49, 0x46, 0x59, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, - 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x12, 0x6c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, - 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x22, 0x11, 0x0a, 0x0f, 0x4c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x22, 0x1b, - 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x22, 0x14, 0x0a, 0x12, 0x41, - 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, - 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x65, + 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x53, 0x0a, 0x0e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x1a, 0x51, 0x0a, 0x0c, 0x45, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x3a, 0x41, 0x9a, 0xc5, 0x88, 0x1e, 0x3c, 0x0a, 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x13, 0x0a, 0x0c, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x18, + 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x29, 0x0a, 0x09, 0x44, 0x72, 0x61, 0x69, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, + 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x59, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x42, 0x14, 0x0a, 0x12, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, + 0x17, 0x10, 0x18, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x22, 0x14, 0x0a, 0x12, 0x41, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -1369,11 +1382,11 @@ var file_envoy_config_listener_v3_listener_proto_goTypes = []interface{}{ (*v31.CollectionEntry)(nil), // 13: xds.core.v3.CollectionEntry (*FilterChain)(nil), // 14: envoy.config.listener.v3.FilterChain (*v32.Matcher)(nil), // 15: xds.type.matcher.v3.Matcher - (*wrappers.BoolValue)(nil), // 16: google.protobuf.BoolValue - (*wrappers.UInt32Value)(nil), // 17: google.protobuf.UInt32Value + (*wrapperspb.BoolValue)(nil), // 16: google.protobuf.BoolValue + (*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value (*v3.Metadata)(nil), // 18: envoy.config.core.v3.Metadata (*ListenerFilter)(nil), // 19: envoy.config.listener.v3.ListenerFilter - (*duration.Duration)(nil), // 20: google.protobuf.Duration + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration (*v3.SocketOption)(nil), // 21: envoy.config.core.v3.SocketOption (v3.TrafficDirection)(0), // 22: envoy.config.core.v3.TrafficDirection (*UdpListenerConfig)(nil), // 23: envoy.config.listener.v3.UdpListenerConfig diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go index a7cfd8f6633..9fd429baed4 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/listener/v3/listener.proto @@ -1051,6 +1052,8 @@ func (m *Listener) validate(all bool) error { // no validation rules for IgnoreGlobalConnLimit + // no validation rules for BypassOverloadManager + switch v := m.ListenerSpecifier.(type) { case *Listener_InternalListener: if v == nil { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go index 9b9efeb8ec0..b6dad7e30b4 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/listener/v3/listener_components.proto package listenerv3 @@ -12,11 +12,11 @@ import ( v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -141,7 +141,7 @@ func (m *Filter) GetConfigType() isFilter_ConfigType { return nil } -func (x *Filter) GetTypedConfig() *any1.Any { +func (x *Filter) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*Filter_TypedConfig); ok { return x.TypedConfig } @@ -163,7 +163,7 @@ type Filter_TypedConfig struct { // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. // [#extension-category: envoy.filters.network] - TypedConfig *any1.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` } type Filter_ConfigDiscovery struct { @@ -228,7 +228,7 @@ type FilterChainMatch struct { // Optional destination port to consider when use_original_dst is set on the // listener in determining a filter chain match. - DestinationPort *wrappers.UInt32Value `protobuf:"bytes,8,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` + DestinationPort *wrapperspb.UInt32Value `protobuf:"bytes,8,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` // If non-empty, an IP address and prefix length to match addresses when the // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. PrefixRanges []*v3.CidrRange `protobuf:"bytes,3,rep,name=prefix_ranges,json=prefixRanges,proto3" json:"prefix_ranges,omitempty"` @@ -237,7 +237,7 @@ type FilterChainMatch struct { // [#not-implemented-hide:] AddressSuffix string `protobuf:"bytes,4,opt,name=address_suffix,json=addressSuffix,proto3" json:"address_suffix,omitempty"` // [#not-implemented-hide:] - SuffixLen *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=suffix_len,json=suffixLen,proto3" json:"suffix_len,omitempty"` + SuffixLen *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=suffix_len,json=suffixLen,proto3" json:"suffix_len,omitempty"` // The criteria is satisfied if the directly connected source IP address of the downstream // connection is contained in at least one of the specified subnets. If the parameter is not // specified or the list is empty, the directly connected source IP address is ignored. @@ -332,7 +332,7 @@ func (*FilterChainMatch) Descriptor() ([]byte, []int) { return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{1} } -func (x *FilterChainMatch) GetDestinationPort() *wrappers.UInt32Value { +func (x *FilterChainMatch) GetDestinationPort() *wrapperspb.UInt32Value { if x != nil { return x.DestinationPort } @@ -353,7 +353,7 @@ func (x *FilterChainMatch) GetAddressSuffix() string { return "" } -func (x *FilterChainMatch) GetSuffixLen() *wrappers.UInt32Value { +func (x *FilterChainMatch) GetSuffixLen() *wrapperspb.UInt32Value { if x != nil { return x.SuffixLen } @@ -442,7 +442,7 @@ type FilterChain struct { // explicitly instead. // // Deprecated: Marked as deprecated in envoy/config/listener/v3/listener_components.proto. - UseProxyProto *wrappers.BoolValue `protobuf:"bytes,4,opt,name=use_proxy_proto,json=useProxyProto,proto3" json:"use_proxy_proto,omitempty"` + UseProxyProto *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=use_proxy_proto,json=useProxyProto,proto3" json:"use_proxy_proto,omitempty"` // [#not-implemented-hide:] filter chain metadata. Metadata *v3.Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` // Optional custom transport socket implementation to use for downstream connections. @@ -455,7 +455,7 @@ type FilterChain struct { // If present and nonzero, the amount of time to allow incoming connections to complete any // transport socket negotiations. If this expires before the transport reports connection // establishment, the connection is summarily closed. - TransportSocketConnectTimeout *duration.Duration `protobuf:"bytes,9,opt,name=transport_socket_connect_timeout,json=transportSocketConnectTimeout,proto3" json:"transport_socket_connect_timeout,omitempty"` + TransportSocketConnectTimeout *durationpb.Duration `protobuf:"bytes,9,opt,name=transport_socket_connect_timeout,json=transportSocketConnectTimeout,proto3" json:"transport_socket_connect_timeout,omitempty"` // The unique name (or empty) by which this filter chain is known. // Note: :ref:`filter_chain_matcher // ` @@ -514,7 +514,7 @@ func (x *FilterChain) GetFilters() []*Filter { } // Deprecated: Marked as deprecated in envoy/config/listener/v3/listener_components.proto. -func (x *FilterChain) GetUseProxyProto() *wrappers.BoolValue { +func (x *FilterChain) GetUseProxyProto() *wrapperspb.BoolValue { if x != nil { return x.UseProxyProto } @@ -535,7 +535,7 @@ func (x *FilterChain) GetTransportSocket() *v3.TransportSocket { return nil } -func (x *FilterChain) GetTransportSocketConnectTimeout() *duration.Duration { +func (x *FilterChain) GetTransportSocketConnectTimeout() *durationpb.Duration { if x != nil { return x.TransportSocketConnectTimeout } @@ -780,7 +780,7 @@ func (m *ListenerFilter) GetConfigType() isListenerFilter_ConfigType { return nil } -func (x *ListenerFilter) GetTypedConfig() *any1.Any { +func (x *ListenerFilter) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*ListenerFilter_TypedConfig); ok { return x.TypedConfig } @@ -809,7 +809,7 @@ type ListenerFilter_TypedConfig struct { // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. // [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } type ListenerFilter_ConfigDiscovery struct { @@ -839,7 +839,7 @@ type FilterChain_OnDemandConfiguration struct { // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. // Upon failure or timeout, all connections related to this filter chain will be closed. // Rebuilding will start again on the next new connection. - RebuildTimeout *duration.Duration `protobuf:"bytes,1,opt,name=rebuild_timeout,json=rebuildTimeout,proto3" json:"rebuild_timeout,omitempty"` + RebuildTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=rebuild_timeout,json=rebuildTimeout,proto3" json:"rebuild_timeout,omitempty"` } func (x *FilterChain_OnDemandConfiguration) Reset() { @@ -874,7 +874,7 @@ func (*FilterChain_OnDemandConfiguration) Descriptor() ([]byte, []int) { return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{2, 0} } -func (x *FilterChain_OnDemandConfiguration) GetRebuildTimeout() *duration.Duration { +func (x *FilterChain_OnDemandConfiguration) GetRebuildTimeout() *durationpb.Duration { if x != nil { return x.RebuildTimeout } @@ -1183,14 +1183,14 @@ var file_envoy_config_listener_v3_listener_components_proto_goTypes = []interfac (*ListenerFilter)(nil), // 5: envoy.config.listener.v3.ListenerFilter (*FilterChain_OnDemandConfiguration)(nil), // 6: envoy.config.listener.v3.FilterChain.OnDemandConfiguration (*ListenerFilterChainMatchPredicate_MatchSet)(nil), // 7: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet - (*any1.Any)(nil), // 8: google.protobuf.Any + (*anypb.Any)(nil), // 8: google.protobuf.Any (*v3.ExtensionConfigSource)(nil), // 9: envoy.config.core.v3.ExtensionConfigSource - (*wrappers.UInt32Value)(nil), // 10: google.protobuf.UInt32Value + (*wrapperspb.UInt32Value)(nil), // 10: google.protobuf.UInt32Value (*v3.CidrRange)(nil), // 11: envoy.config.core.v3.CidrRange - (*wrappers.BoolValue)(nil), // 12: google.protobuf.BoolValue + (*wrapperspb.BoolValue)(nil), // 12: google.protobuf.BoolValue (*v3.Metadata)(nil), // 13: envoy.config.core.v3.Metadata (*v3.TransportSocket)(nil), // 14: envoy.config.core.v3.TransportSocket - (*duration.Duration)(nil), // 15: google.protobuf.Duration + (*durationpb.Duration)(nil), // 15: google.protobuf.Duration (*v31.Int32Range)(nil), // 16: envoy.type.v3.Int32Range } var file_envoy_config_listener_v3_listener_components_proto_depIdxs = []int32{ diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go index 3241ede8c7a..4a18f8b084a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/listener/v3/listener_components.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components_vtproto.pb.go new file mode 100644 index 00000000000..7896458b8a8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components_vtproto.pb.go @@ -0,0 +1,1213 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/listener_components.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Filter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Filter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*Filter_ConfigDiscovery); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigType.(*Filter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Filter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Filter_ConfigDiscovery) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter_ConfigDiscovery) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *FilterChainMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterChainMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterChainMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DirectSourcePrefixRanges) > 0 { + for iNdEx := len(m.DirectSourcePrefixRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.DirectSourcePrefixRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DirectSourcePrefixRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if m.SourceType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SourceType)) + i-- + dAtA[i] = 0x60 + } + if len(m.ServerNames) > 0 { + for iNdEx := len(m.ServerNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ServerNames[iNdEx]) + copy(dAtA[i:], m.ServerNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServerNames[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ApplicationProtocols) > 0 { + for iNdEx := len(m.ApplicationProtocols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ApplicationProtocols[iNdEx]) + copy(dAtA[i:], m.ApplicationProtocols[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ApplicationProtocols[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if len(m.TransportProtocol) > 0 { + i -= len(m.TransportProtocol) + copy(dAtA[i:], m.TransportProtocol) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TransportProtocol))) + i-- + dAtA[i] = 0x4a + } + if m.DestinationPort != nil { + size, err := (*wrapperspb.UInt32Value)(m.DestinationPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.SourcePorts) > 0 { + var pksize2 int + for _, num := range m.SourcePorts { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.SourcePorts { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x3a + } + if len(m.SourcePrefixRanges) > 0 { + for iNdEx := len(m.SourcePrefixRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SourcePrefixRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SourcePrefixRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + } + if m.SuffixLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuffixLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.AddressSuffix) > 0 { + i -= len(m.AddressSuffix) + copy(dAtA[i:], m.AddressSuffix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AddressSuffix))) + i-- + dAtA[i] = 0x22 + } + if len(m.PrefixRanges) > 0 { + for iNdEx := len(m.PrefixRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.PrefixRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PrefixRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + return len(dAtA) - i, nil +} + +func (m *FilterChain_OnDemandConfiguration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterChain_OnDemandConfiguration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterChain_OnDemandConfiguration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RebuildTimeout != nil { + size, err := (*durationpb.Duration)(m.RebuildTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterChain) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterChain) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterChain) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TransportSocketConnectTimeout != nil { + size, err := (*durationpb.Duration)(m.TransportSocketConnectTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.OnDemandConfiguration != nil { + size, err := m.OnDemandConfiguration.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x3a + } + if m.TransportSocket != nil { + if vtmsg, ok := interface{}(m.TransportSocket).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TransportSocket) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.UseProxyProto != nil { + size, err := (*wrapperspb.BoolValue)(m.UseProxyProto).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.FilterChainMatch != nil { + size, err := m.FilterChainMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilterChainMatchPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerFilterChainMatchPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_DestinationPortRange); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_AnyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_NotMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_AndMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilterChainMatchPredicate_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_AndMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_AndMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatch != nil { + size, err := m.AndMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_NotMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_NotMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatch != nil { + size, err := m.NotMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_AnyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_AnyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.AnyMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_DestinationPortRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_DestinationPortRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationPortRange != nil { + if vtmsg, ok := interface{}(m.DestinationPortRange).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DestinationPortRange) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *ListenerFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*ListenerFilter_ConfigDiscovery); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.FilterDisabled != nil { + size, err := m.FilterDisabled.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.ConfigType.(*ListenerFilter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ListenerFilter_ConfigDiscovery) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilter_ConfigDiscovery) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Filter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Filter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Filter_ConfigDiscovery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FilterChainMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PrefixRanges) > 0 { + for _, e := range m.PrefixRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.AddressSuffix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuffixLen != nil { + l = (*wrapperspb.UInt32Value)(m.SuffixLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SourcePrefixRanges) > 0 { + for _, e := range m.SourcePrefixRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.SourcePorts) > 0 { + l = 0 + for _, e := range m.SourcePorts { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.DestinationPort != nil { + l = (*wrapperspb.UInt32Value)(m.DestinationPort).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TransportProtocol) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ApplicationProtocols) > 0 { + for _, s := range m.ApplicationProtocols { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ServerNames) > 0 { + for _, s := range m.ServerNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SourceType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SourceType)) + } + if len(m.DirectSourcePrefixRanges) > 0 { + for _, e := range m.DirectSourcePrefixRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *FilterChain_OnDemandConfiguration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RebuildTimeout != nil { + l = (*durationpb.Duration)(m.RebuildTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FilterChain) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterChainMatch != nil { + l = m.FilterChainMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseProxyProto != nil { + l = (*wrapperspb.BoolValue)(m.UseProxyProto).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocket != nil { + if size, ok := interface{}(m.TransportSocket).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TransportSocket) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnDemandConfiguration != nil { + l = m.OnDemandConfiguration.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocketConnectTimeout != nil { + l = (*durationpb.Duration)(m.TransportSocketConnectTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilterChainMatchPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilterChainMatchPredicate_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilterChainMatchPredicate_AndMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatch != nil { + l = m.AndMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilterChainMatchPredicate_NotMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatch != nil { + l = m.NotMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilterChainMatchPredicate_AnyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *ListenerFilterChainMatchPredicate_DestinationPortRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationPortRange != nil { + if size, ok := interface{}(m.DestinationPortRange).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DestinationPortRange) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.FilterDisabled != nil { + l = m.FilterDisabled.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilter_ConfigDiscovery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_vtproto.pb.go new file mode 100644 index 00000000000..cdccfea8e13 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_vtproto.pb.go @@ -0,0 +1,1297 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/listener.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AdditionalAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdditionalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AdditionalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SocketOptions != nil { + if vtmsg, ok := interface{}(m.SocketOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SocketOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenerCollection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerCollection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerCollection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Entries[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Entries[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Listener_DeprecatedV1) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_DeprecatedV1) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_DeprecatedV1) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BindToPort != nil { + size, err := (*wrapperspb.BoolValue)(m.BindToPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Listener_ConnectionBalanceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_ConnectionBalanceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.BalanceType.(*Listener_ConnectionBalanceConfig_ExtendBalance); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.BalanceType.(*Listener_ConnectionBalanceConfig_ExactBalance_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExactBalance != nil { + size, err := m.ExactBalance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Listener_ConnectionBalanceConfig_ExtendBalance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig_ExtendBalance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtendBalance != nil { + if vtmsg, ok := interface{}(m.ExtendBalance).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ExtendBalance) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Listener_InternalListenerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_InternalListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_InternalListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Listener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BypassOverloadManager { + i-- + if m.BypassOverloadManager { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x98 + } + if m.MaxConnectionsToAcceptPerSocketEvent != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConnectionsToAcceptPerSocketEvent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if len(m.AdditionalAddresses) > 0 { + for iNdEx := len(m.AdditionalAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.AdditionalAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + } + if m.FilterChainMatcher != nil { + if vtmsg, ok := interface{}(m.FilterChainMatcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.FilterChainMatcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if m.IgnoreGlobalConnLimit { + i-- + if m.IgnoreGlobalConnLimit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.EnableMptcp { + i-- + if m.EnableMptcp { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.EnableReusePort != nil { + size, err := (*wrapperspb.BoolValue)(m.EnableReusePort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if msg, ok := m.ListenerSpecifier.(*Listener_InternalListener); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.BindToPort != nil { + size, err := (*wrapperspb.BoolValue)(m.BindToPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if m.DefaultFilterChain != nil { + size, err := m.DefaultFilterChain.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.TcpBacklogSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.TcpBacklogSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.AccessLog) > 0 { + for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AccessLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.ReusePort { + i-- + if m.ReusePort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.ConnectionBalanceConfig != nil { + size, err := m.ConnectionBalanceConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.ApiListener != nil { + size, err := m.ApiListener.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.UdpListenerConfig != nil { + size, err := m.UdpListenerConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.ContinueOnListenerFiltersTimeout { + i-- + if m.ContinueOnListenerFiltersTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.TrafficDirection != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TrafficDirection)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.ListenerFiltersTimeout != nil { + size, err := (*durationpb.Duration)(m.ListenerFiltersTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SocketOptions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SocketOptions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if m.TcpFastOpenQueueLength != nil { + size, err := (*wrapperspb.UInt32Value)(m.TcpFastOpenQueueLength).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.Freebind != nil { + size, err := (*wrapperspb.BoolValue)(m.Freebind).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.Transparent != nil { + size, err := (*wrapperspb.BoolValue)(m.Transparent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if len(m.ListenerFilters) > 0 { + for iNdEx := len(m.ListenerFilters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ListenerFilters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if m.DrainType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DrainType)) + i-- + dAtA[i] = 0x40 + } + if m.DeprecatedV1 != nil { + size, err := m.DeprecatedV1.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.PerConnectionBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.UseOriginalDst != nil { + size, err := (*wrapperspb.BoolValue)(m.UseOriginalDst).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.FilterChains) > 0 { + for iNdEx := len(m.FilterChains) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.FilterChains[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Listener_InternalListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_InternalListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InternalListener != nil { + size, err := m.InternalListener.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + return len(dAtA) - i, nil +} +func (m *ListenerManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ValidationListenerManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidationListenerManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValidationListenerManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ApiListenerManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApiListenerManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ApiListenerManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *AdditionalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SocketOptions != nil { + if size, ok := interface{}(m.SocketOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SocketOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerCollection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_DeprecatedV1) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BindToPort != nil { + l = (*wrapperspb.BoolValue)(m.BindToPort).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Listener_ConnectionBalanceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.BalanceType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExactBalance != nil { + l = m.ExactBalance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Listener_ConnectionBalanceConfig_ExtendBalance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtendBalance != nil { + if size, ok := interface{}(m.ExtendBalance).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ExtendBalance) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Listener_InternalListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Listener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.FilterChains) > 0 { + for _, e := range m.FilterChains { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseOriginalDst != nil { + l = (*wrapperspb.BoolValue)(m.UseOriginalDst).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerConnectionBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DeprecatedV1 != nil { + l = m.DeprecatedV1.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DrainType)) + } + if len(m.ListenerFilters) > 0 { + for _, e := range m.ListenerFilters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Transparent != nil { + l = (*wrapperspb.BoolValue)(m.Transparent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Freebind != nil { + l = (*wrapperspb.BoolValue)(m.Freebind).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TcpFastOpenQueueLength != nil { + l = (*wrapperspb.UInt32Value)(m.TcpFastOpenQueueLength).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ListenerFiltersTimeout != nil { + l = (*durationpb.Duration)(m.ListenerFiltersTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrafficDirection != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.TrafficDirection)) + } + if m.ContinueOnListenerFiltersTimeout { + n += 3 + } + if m.UdpListenerConfig != nil { + l = m.UdpListenerConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApiListener != nil { + l = m.ApiListener.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionBalanceConfig != nil { + l = m.ConnectionBalanceConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReusePort { + n += 3 + } + if len(m.AccessLog) > 0 { + for _, e := range m.AccessLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TcpBacklogSize != nil { + l = (*wrapperspb.UInt32Value)(m.TcpBacklogSize).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DefaultFilterChain != nil { + l = m.DefaultFilterChain.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BindToPort != nil { + l = (*wrapperspb.BoolValue)(m.BindToPort).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ListenerSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.StatPrefix) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableReusePort != nil { + l = (*wrapperspb.BoolValue)(m.EnableReusePort).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableMptcp { + n += 3 + } + if m.IgnoreGlobalConnLimit { + n += 3 + } + if m.FilterChainMatcher != nil { + if size, ok := interface{}(m.FilterChainMatcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.FilterChainMatcher) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AdditionalAddresses) > 0 { + for _, e := range m.AdditionalAddresses { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxConnectionsToAcceptPerSocketEvent != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConnectionsToAcceptPerSocketEvent).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BypassOverloadManager { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_InternalListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InternalListener != nil { + l = m.InternalListener.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *ListenerManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ValidationListenerManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ApiListenerManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go index 90d5204748c..a2f3a48e231 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/listener/v3/quic_config.proto package listenerv3 @@ -11,10 +11,10 @@ import ( _ "github.com/cncf/xds/go/xds/annotations/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -27,7 +27,7 @@ const ( ) // Configuration specific to the UDP QUIC listener. -// [#next-free-field: 10] +// [#next-free-field: 12] type QuicProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -38,11 +38,11 @@ type QuicProtocolOptions struct { // no network activity. // // If it is less than 1ms, Envoy will use 1ms. 300000ms if not specified. - IdleTimeout *duration.Duration `protobuf:"bytes,2,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` + IdleTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` // Connection timeout in milliseconds before the crypto handshake is finished. // // If it is less than 5000ms, Envoy will use 5000ms. 20000ms if not specified. - CryptoHandshakeTimeout *duration.Duration `protobuf:"bytes,3,opt,name=crypto_handshake_timeout,json=cryptoHandshakeTimeout,proto3" json:"crypto_handshake_timeout,omitempty"` + CryptoHandshakeTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=crypto_handshake_timeout,json=cryptoHandshakeTimeout,proto3" json:"crypto_handshake_timeout,omitempty"` // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults // to enabled. Enabled *v3.RuntimeFeatureFlag `protobuf:"bytes,4,opt,name=enabled,proto3" json:"enabled,omitempty"` @@ -53,7 +53,7 @@ type QuicProtocolOptions struct { // packets to read in each read event will be 32 * N. // The actual number of packets to read in total by the UDP listener is also // bound by 6000, regardless of this field or how many connections there are. - PacketsToReadToConnectionCountRatio *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=packets_to_read_to_connection_count_ratio,json=packetsToReadToConnectionCountRatio,proto3" json:"packets_to_read_to_connection_count_ratio,omitempty"` + PacketsToReadToConnectionCountRatio *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=packets_to_read_to_connection_count_ratio,json=packetsToReadToConnectionCountRatio,proto3" json:"packets_to_read_to_connection_count_ratio,omitempty"` // Configure which implementation of “quic::QuicCryptoClientStreamBase“ to be used for this listener. // If not specified the :ref:`QUICHE default one configured by ` will be used. // [#extension-category: envoy.quic.server.crypto_stream] @@ -67,10 +67,17 @@ type QuicProtocolOptions struct { // [#extension-category: envoy.quic.connection_id_generator] ConnectionIdGeneratorConfig *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=connection_id_generator_config,json=connectionIdGeneratorConfig,proto3" json:"connection_id_generator_config,omitempty"` // Configure the server's preferred address to advertise so that client can migrate to it. See :ref:`example ` which configures a pair of v4 and v6 preferred addresses. - // The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with, and only if the client is also QUICHE-based. + // The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with. // If not specified, Envoy will not advertise any server's preferred address. // [#extension-category: envoy.quic.server_preferred_address] ServerPreferredAddressConfig *v3.TypedExtensionConfig `protobuf:"bytes,9,opt,name=server_preferred_address_config,json=serverPreferredAddressConfig,proto3" json:"server_preferred_address_config,omitempty"` + // Configure the server to send transport parameter `disable_active_migration `_. + // Defaults to false (do not send this transport parameter). + SendDisableActiveMigration *wrapperspb.BoolValue `protobuf:"bytes,10,opt,name=send_disable_active_migration,json=sendDisableActiveMigration,proto3" json:"send_disable_active_migration,omitempty"` + // Configure which implementation of “quic::QuicConnectionDebugVisitor“ to be used for this listener. + // If not specified, no debug visitor will be attached to connections. + // [#extension-category: envoy.quic.connection_debug_visitor] + ConnectionDebugVisitorConfig *v3.TypedExtensionConfig `protobuf:"bytes,11,opt,name=connection_debug_visitor_config,json=connectionDebugVisitorConfig,proto3" json:"connection_debug_visitor_config,omitempty"` } func (x *QuicProtocolOptions) Reset() { @@ -112,14 +119,14 @@ func (x *QuicProtocolOptions) GetQuicProtocolOptions() *v3.QuicProtocolOptions { return nil } -func (x *QuicProtocolOptions) GetIdleTimeout() *duration.Duration { +func (x *QuicProtocolOptions) GetIdleTimeout() *durationpb.Duration { if x != nil { return x.IdleTimeout } return nil } -func (x *QuicProtocolOptions) GetCryptoHandshakeTimeout() *duration.Duration { +func (x *QuicProtocolOptions) GetCryptoHandshakeTimeout() *durationpb.Duration { if x != nil { return x.CryptoHandshakeTimeout } @@ -133,7 +140,7 @@ func (x *QuicProtocolOptions) GetEnabled() *v3.RuntimeFeatureFlag { return nil } -func (x *QuicProtocolOptions) GetPacketsToReadToConnectionCountRatio() *wrappers.UInt32Value { +func (x *QuicProtocolOptions) GetPacketsToReadToConnectionCountRatio() *wrapperspb.UInt32Value { if x != nil { return x.PacketsToReadToConnectionCountRatio } @@ -168,6 +175,20 @@ func (x *QuicProtocolOptions) GetServerPreferredAddressConfig() *v3.TypedExtensi return nil } +func (x *QuicProtocolOptions) GetSendDisableActiveMigration() *wrapperspb.BoolValue { + if x != nil { + return x.SendDisableActiveMigration + } + return nil +} + +func (x *QuicProtocolOptions) GetConnectionDebugVisitorConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.ConnectionDebugVisitorConfig + } + return nil +} + var File_envoy_config_listener_v3_quic_config_proto protoreflect.FileDescriptor var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ @@ -193,8 +214,8 @@ var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, - 0x07, 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf6, + 0x08, 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, @@ -249,19 +270,33 @@ var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x8f, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, - 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x51, 0x75, 0x69, 0x63, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x66, 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x1d, 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x73, 0x65, 0x6e, 0x64, 0x44, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x1f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x56, 0x69, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x8f, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x51, 0x75, 0x69, 0x63, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -280,26 +315,29 @@ var file_envoy_config_listener_v3_quic_config_proto_msgTypes = make([]protoimpl. var file_envoy_config_listener_v3_quic_config_proto_goTypes = []interface{}{ (*QuicProtocolOptions)(nil), // 0: envoy.config.listener.v3.QuicProtocolOptions (*v3.QuicProtocolOptions)(nil), // 1: envoy.config.core.v3.QuicProtocolOptions - (*duration.Duration)(nil), // 2: google.protobuf.Duration + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration (*v3.RuntimeFeatureFlag)(nil), // 3: envoy.config.core.v3.RuntimeFeatureFlag - (*wrappers.UInt32Value)(nil), // 4: google.protobuf.UInt32Value + (*wrapperspb.UInt32Value)(nil), // 4: google.protobuf.UInt32Value (*v3.TypedExtensionConfig)(nil), // 5: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.BoolValue)(nil), // 6: google.protobuf.BoolValue } var file_envoy_config_listener_v3_quic_config_proto_depIdxs = []int32{ - 1, // 0: envoy.config.listener.v3.QuicProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions - 2, // 1: envoy.config.listener.v3.QuicProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration - 2, // 2: envoy.config.listener.v3.QuicProtocolOptions.crypto_handshake_timeout:type_name -> google.protobuf.Duration - 3, // 3: envoy.config.listener.v3.QuicProtocolOptions.enabled:type_name -> envoy.config.core.v3.RuntimeFeatureFlag - 4, // 4: envoy.config.listener.v3.QuicProtocolOptions.packets_to_read_to_connection_count_ratio:type_name -> google.protobuf.UInt32Value - 5, // 5: envoy.config.listener.v3.QuicProtocolOptions.crypto_stream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 5, // 6: envoy.config.listener.v3.QuicProtocolOptions.proof_source_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 5, // 7: envoy.config.listener.v3.QuicProtocolOptions.connection_id_generator_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 5, // 8: envoy.config.listener.v3.QuicProtocolOptions.server_preferred_address_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 9, // [9:9] is the sub-list for method output_type - 9, // [9:9] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 1, // 0: envoy.config.listener.v3.QuicProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions + 2, // 1: envoy.config.listener.v3.QuicProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration + 2, // 2: envoy.config.listener.v3.QuicProtocolOptions.crypto_handshake_timeout:type_name -> google.protobuf.Duration + 3, // 3: envoy.config.listener.v3.QuicProtocolOptions.enabled:type_name -> envoy.config.core.v3.RuntimeFeatureFlag + 4, // 4: envoy.config.listener.v3.QuicProtocolOptions.packets_to_read_to_connection_count_ratio:type_name -> google.protobuf.UInt32Value + 5, // 5: envoy.config.listener.v3.QuicProtocolOptions.crypto_stream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 6: envoy.config.listener.v3.QuicProtocolOptions.proof_source_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 7: envoy.config.listener.v3.QuicProtocolOptions.connection_id_generator_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 8: envoy.config.listener.v3.QuicProtocolOptions.server_preferred_address_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 6, // 9: envoy.config.listener.v3.QuicProtocolOptions.send_disable_active_migration:type_name -> google.protobuf.BoolValue + 5, // 10: envoy.config.listener.v3.QuicProtocolOptions.connection_debug_visitor_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 11, // [11:11] is the sub-list for method output_type + 11, // [11:11] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name } func init() { file_envoy_config_listener_v3_quic_config_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go index ba21cb6cbd9..efabacb665d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/listener/v3/quic_config.proto @@ -304,6 +305,64 @@ func (m *QuicProtocolOptions) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetSendDisableActiveMigration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "SendDisableActiveMigration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "SendDisableActiveMigration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSendDisableActiveMigration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "SendDisableActiveMigration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectionDebugVisitorConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionDebugVisitorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionDebugVisitorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionDebugVisitorConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ConnectionDebugVisitorConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return QuicProtocolOptionsMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go new file mode 100644 index 00000000000..4b0826804af --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go @@ -0,0 +1,345 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/quic_config.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *QuicProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuicProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectionDebugVisitorConfig != nil { + if vtmsg, ok := interface{}(m.ConnectionDebugVisitorConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConnectionDebugVisitorConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + if m.SendDisableActiveMigration != nil { + size, err := (*wrapperspb.BoolValue)(m.SendDisableActiveMigration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.ServerPreferredAddressConfig != nil { + if vtmsg, ok := interface{}(m.ServerPreferredAddressConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ServerPreferredAddressConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.ConnectionIdGeneratorConfig != nil { + if vtmsg, ok := interface{}(m.ConnectionIdGeneratorConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConnectionIdGeneratorConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.ProofSourceConfig != nil { + if vtmsg, ok := interface{}(m.ProofSourceConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ProofSourceConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.CryptoStreamConfig != nil { + if vtmsg, ok := interface{}(m.CryptoStreamConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CryptoStreamConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.PacketsToReadToConnectionCountRatio != nil { + size, err := (*wrapperspb.UInt32Value)(m.PacketsToReadToConnectionCountRatio).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.Enabled != nil { + if vtmsg, ok := interface{}(m.Enabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Enabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.CryptoHandshakeTimeout != nil { + size, err := (*durationpb.Duration)(m.CryptoHandshakeTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.IdleTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.QuicProtocolOptions != nil { + if vtmsg, ok := interface{}(m.QuicProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.QuicProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QuicProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QuicProtocolOptions != nil { + if size, ok := interface{}(m.QuicProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.QuicProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IdleTimeout != nil { + l = (*durationpb.Duration)(m.IdleTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CryptoHandshakeTimeout != nil { + l = (*durationpb.Duration)(m.CryptoHandshakeTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Enabled != nil { + if size, ok := interface{}(m.Enabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Enabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PacketsToReadToConnectionCountRatio != nil { + l = (*wrapperspb.UInt32Value)(m.PacketsToReadToConnectionCountRatio).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CryptoStreamConfig != nil { + if size, ok := interface{}(m.CryptoStreamConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CryptoStreamConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProofSourceConfig != nil { + if size, ok := interface{}(m.ProofSourceConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ProofSourceConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionIdGeneratorConfig != nil { + if size, ok := interface{}(m.ConnectionIdGeneratorConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConnectionIdGeneratorConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ServerPreferredAddressConfig != nil { + if size, ok := interface{}(m.ServerPreferredAddressConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ServerPreferredAddressConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SendDisableActiveMigration != nil { + l = (*wrapperspb.BoolValue)(m.SendDisableActiveMigration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionDebugVisitorConfig != nil { + if size, ok := interface{}(m.ConnectionDebugVisitorConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConnectionDebugVisitorConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go index 9998da13396..a5fb1622238 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/listener/v3/udp_listener_config.proto package listenerv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.validate.go index cee10864a61..e52578e9817 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/listener/v3/udp_listener_config.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config_vtproto.pb.go new file mode 100644 index 00000000000..32625933541 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config_vtproto.pb.go @@ -0,0 +1,184 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/udp_listener_config.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UdpListenerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UdpListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UdpListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UdpPacketPacketWriterConfig != nil { + if vtmsg, ok := interface{}(m.UdpPacketPacketWriterConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UdpPacketPacketWriterConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.QuicOptions != nil { + size, err := m.QuicOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.DownstreamSocketConfig != nil { + if vtmsg, ok := interface{}(m.DownstreamSocketConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamSocketConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} + +func (m *ActiveRawUdpListenerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActiveRawUdpListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ActiveRawUdpListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *UdpListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DownstreamSocketConfig != nil { + if size, ok := interface{}(m.DownstreamSocketConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamSocketConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.QuicOptions != nil { + l = m.QuicOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UdpPacketPacketWriterConfig != nil { + if size, ok := interface{}(m.UdpPacketPacketWriterConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UdpPacketPacketWriterConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ActiveRawUdpListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go index 6e71666d77a..7ff4dff1654 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/metrics/v3/metrics_service.proto package metricsv3 @@ -10,9 +10,9 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -89,7 +89,6 @@ func (HistogramEmitMode) EnumDescriptor() ([]byte, []int) { // - name: envoy.stat_sinks.metrics_service // typed_config: // "@type": type.googleapis.com/envoy.config.metrics.v3.MetricsServiceConfig -// transport_api_version: V3 // // [#extension: envoy.stat_sinks.metrics_service] // [#next-free-field: 6] @@ -107,7 +106,7 @@ type MetricsServiceConfig struct { // counter value is reported. Defaults to false. // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the // sink will take updates from the :ref:`MetricsResponse `. - ReportCountersAsDeltas *wrappers.BoolValue `protobuf:"bytes,2,opt,name=report_counters_as_deltas,json=reportCountersAsDeltas,proto3" json:"report_counters_as_deltas,omitempty"` + ReportCountersAsDeltas *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=report_counters_as_deltas,json=reportCountersAsDeltas,proto3" json:"report_counters_as_deltas,omitempty"` // If true, metrics will have their tags emitted as labels on the metrics objects sent to the MetricsService, // and the tag extracted name will be used instead of the full name, which may contain values used by the tag // extractor or additional tags added during stats creation. @@ -162,7 +161,7 @@ func (x *MetricsServiceConfig) GetTransportApiVersion() v3.ApiVersion { return v3.ApiVersion(0) } -func (x *MetricsServiceConfig) GetReportCountersAsDeltas() *wrappers.BoolValue { +func (x *MetricsServiceConfig) GetReportCountersAsDeltas() *wrapperspb.BoolValue { if x != nil { return x.ReportCountersAsDeltas } @@ -270,7 +269,7 @@ var file_envoy_config_metrics_v3_metrics_service_proto_goTypes = []interface{}{ (*MetricsServiceConfig)(nil), // 1: envoy.config.metrics.v3.MetricsServiceConfig (*v3.GrpcService)(nil), // 2: envoy.config.core.v3.GrpcService (v3.ApiVersion)(0), // 3: envoy.config.core.v3.ApiVersion - (*wrappers.BoolValue)(nil), // 4: google.protobuf.BoolValue + (*wrapperspb.BoolValue)(nil), // 4: google.protobuf.BoolValue } var file_envoy_config_metrics_v3_metrics_service_proto_depIdxs = []int32{ 2, // 0: envoy.config.metrics.v3.MetricsServiceConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go index b91adeefdb0..803f930ba72 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/metrics/v3/metrics_service.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service_vtproto.pb.go new file mode 100644 index 00000000000..b3f692c45ca --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service_vtproto.pb.go @@ -0,0 +1,139 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/metrics/v3/metrics_service.proto + +package metricsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MetricsServiceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsServiceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetricsServiceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HistogramEmitMode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HistogramEmitMode)) + i-- + dAtA[i] = 0x28 + } + if m.EmitTagsAsLabels { + i-- + if m.EmitTagsAsLabels { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.TransportApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TransportApiVersion)) + i-- + dAtA[i] = 0x18 + } + if m.ReportCountersAsDeltas != nil { + size, err := (*wrapperspb.BoolValue)(m.ReportCountersAsDeltas).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetricsServiceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReportCountersAsDeltas != nil { + l = (*wrapperspb.BoolValue)(m.ReportCountersAsDeltas).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TransportApiVersion)) + } + if m.EmitTagsAsLabels { + n += 2 + } + if m.HistogramEmitMode != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HistogramEmitMode)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go index de4a360ce4a..02689900282 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/metrics/v3/stats.proto package metricsv3 @@ -11,10 +11,10 @@ import ( v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -93,7 +93,7 @@ func (m *StatsSink) GetConfigType() isStatsSink_ConfigType { return nil } -func (x *StatsSink) GetTypedConfig() *any1.Any { +func (x *StatsSink) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*StatsSink_TypedConfig); ok { return x.TypedConfig } @@ -105,7 +105,7 @@ type isStatsSink_ConfigType interface { } type StatsSink_TypedConfig struct { - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*StatsSink_TypedConfig) isStatsSink_ConfigType() {} @@ -136,7 +136,7 @@ type StatsConfig struct { // default tags in Envoy. // // If not provided, the value is assumed to be true. - UseAllDefaultTags *wrappers.BoolValue `protobuf:"bytes,2,opt,name=use_all_default_tags,json=useAllDefaultTags,proto3" json:"use_all_default_tags,omitempty"` + UseAllDefaultTags *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=use_all_default_tags,json=useAllDefaultTags,proto3" json:"use_all_default_tags,omitempty"` // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated // as normal. Preventing the instantiation of certain families of stats can improve memory // performance for Envoys running especially large configs. @@ -216,7 +216,7 @@ func (x *StatsConfig) GetStatsTags() []*TagSpecifier { return nil } -func (x *StatsConfig) GetUseAllDefaultTags() *wrappers.BoolValue { +func (x *StatsConfig) GetUseAllDefaultTags() *wrapperspb.BoolValue { if x != nil { return x.UseAllDefaultTags } @@ -710,7 +710,7 @@ type DogStatsdSink struct { // size should not exceed your network's MTU. // // Note that this value may not be respected if smaller than a single metric. - MaxBytesPerDatagram *wrappers.UInt64Value `protobuf:"bytes,4,opt,name=max_bytes_per_datagram,json=maxBytesPerDatagram,proto3" json:"max_bytes_per_datagram,omitempty"` + MaxBytesPerDatagram *wrapperspb.UInt64Value `protobuf:"bytes,4,opt,name=max_bytes_per_datagram,json=maxBytesPerDatagram,proto3" json:"max_bytes_per_datagram,omitempty"` } func (x *DogStatsdSink) Reset() { @@ -766,7 +766,7 @@ func (x *DogStatsdSink) GetPrefix() string { return "" } -func (x *DogStatsdSink) GetMaxBytesPerDatagram() *wrappers.UInt64Value { +func (x *DogStatsdSink) GetMaxBytesPerDatagram() *wrapperspb.UInt64Value { if x != nil { return x.MaxBytesPerDatagram } @@ -1016,12 +1016,12 @@ var file_envoy_config_metrics_v3_stats_proto_goTypes = []interface{}{ (*StatsdSink)(nil), // 5: envoy.config.metrics.v3.StatsdSink (*DogStatsdSink)(nil), // 6: envoy.config.metrics.v3.DogStatsdSink (*HystrixSink)(nil), // 7: envoy.config.metrics.v3.HystrixSink - (*any1.Any)(nil), // 8: google.protobuf.Any - (*wrappers.BoolValue)(nil), // 9: google.protobuf.BoolValue + (*anypb.Any)(nil), // 8: google.protobuf.Any + (*wrapperspb.BoolValue)(nil), // 9: google.protobuf.BoolValue (*v3.ListStringMatcher)(nil), // 10: envoy.type.matcher.v3.ListStringMatcher (*v3.StringMatcher)(nil), // 11: envoy.type.matcher.v3.StringMatcher (*v31.Address)(nil), // 12: envoy.config.core.v3.Address - (*wrappers.UInt64Value)(nil), // 13: google.protobuf.UInt64Value + (*wrapperspb.UInt64Value)(nil), // 13: google.protobuf.UInt64Value } var file_envoy_config_metrics_v3_stats_proto_depIdxs = []int32{ 8, // 0: envoy.config.metrics.v3.StatsSink.typed_config:type_name -> google.protobuf.Any diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go index a0fdd84c982..60e9fdaaf4f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/metrics/v3/stats.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats_vtproto.pb.go new file mode 100644 index 00000000000..3f53827ea0d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats_vtproto.pb.go @@ -0,0 +1,976 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/metrics/v3/stats.proto + +package metricsv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StatsSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*StatsSink_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatsSink_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsSink_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *StatsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HistogramBucketSettings) > 0 { + for iNdEx := len(m.HistogramBucketSettings) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HistogramBucketSettings[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.StatsMatcher != nil { + size, err := m.StatsMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.UseAllDefaultTags != nil { + size, err := (*wrapperspb.BoolValue)(m.UseAllDefaultTags).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.StatsTags) > 0 { + for iNdEx := len(m.StatsTags) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StatsTags[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StatsMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.StatsMatcher.(*StatsMatcher_InclusionList); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.StatsMatcher.(*StatsMatcher_ExclusionList); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.StatsMatcher.(*StatsMatcher_RejectAll); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StatsMatcher_RejectAll) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher_RejectAll) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.RejectAll { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *StatsMatcher_ExclusionList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher_ExclusionList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExclusionList != nil { + if vtmsg, ok := interface{}(m.ExclusionList).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ExclusionList) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *StatsMatcher_InclusionList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher_InclusionList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InclusionList != nil { + if vtmsg, ok := interface{}(m.InclusionList).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.InclusionList) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *TagSpecifier) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagSpecifier) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TagSpecifier) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.TagValue.(*TagSpecifier_FixedValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TagValue.(*TagSpecifier_Regex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TagName) > 0 { + i -= len(m.TagName) + copy(dAtA[i:], m.TagName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TagName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TagSpecifier_Regex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TagSpecifier_Regex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Regex) + copy(dAtA[i:], m.Regex) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Regex))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *TagSpecifier_FixedValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TagSpecifier_FixedValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.FixedValue) + copy(dAtA[i:], m.FixedValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.FixedValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *HistogramBucketSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HistogramBucketSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HistogramBucketSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Buckets) > 0 { + for iNdEx := len(m.Buckets) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.Buckets[iNdEx])) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Buckets)*8)) + i-- + dAtA[i] = 0x12 + } + if m.Match != nil { + if vtmsg, ok := interface{}(m.Match).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Match) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatsdSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsdSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsdSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.StatsdSpecifier.(*StatsdSink_TcpClusterName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.StatsdSpecifier.(*StatsdSink_Address); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StatsdSink_Address) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsdSink_Address) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *StatsdSink_TcpClusterName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsdSink_TcpClusterName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.TcpClusterName) + copy(dAtA[i:], m.TcpClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TcpClusterName))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *DogStatsdSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DogStatsdSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DogStatsdSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxBytesPerDatagram != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaxBytesPerDatagram).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.DogStatsdSpecifier.(*DogStatsdSink_Address); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DogStatsdSink_Address) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DogStatsdSink_Address) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *HystrixSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HystrixSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HystrixSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.NumBuckets != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumBuckets)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatsSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *StatsSink_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StatsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StatsTags) > 0 { + for _, e := range m.StatsTags { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseAllDefaultTags != nil { + l = (*wrapperspb.BoolValue)(m.UseAllDefaultTags).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatsMatcher != nil { + l = m.StatsMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HistogramBucketSettings) > 0 { + for _, e := range m.HistogramBucketSettings { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *StatsMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.StatsMatcher.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *StatsMatcher_RejectAll) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *StatsMatcher_ExclusionList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExclusionList != nil { + if size, ok := interface{}(m.ExclusionList).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ExclusionList) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StatsMatcher_InclusionList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InclusionList != nil { + if size, ok := interface{}(m.InclusionList).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.InclusionList) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *TagSpecifier) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TagName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.TagValue.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *TagSpecifier_Regex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Regex) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TagSpecifier_FixedValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FixedValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HistogramBucketSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Match != nil { + if size, ok := interface{}(m.Match).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Match) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Buckets) > 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(len(m.Buckets)*8)) + len(m.Buckets)*8 + } + n += len(m.unknownFields) + return n +} + +func (m *StatsdSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.StatsdSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.Prefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StatsdSink_Address) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StatsdSink_TcpClusterName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TcpClusterName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DogStatsdSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.DogStatsdSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.Prefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxBytesPerDatagram != nil { + l = (*wrapperspb.UInt64Value)(m.MaxBytesPerDatagram).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DogStatsdSink_Address) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HystrixSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NumBuckets != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumBuckets)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go index 7f51f07f349..6feac912996 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/overload/v3/overload.proto package overloadv3 @@ -10,10 +10,10 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -153,7 +153,7 @@ func (m *ResourceMonitor) GetConfigType() isResourceMonitor_ConfigType { return nil } -func (x *ResourceMonitor) GetTypedConfig() *any1.Any { +func (x *ResourceMonitor) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*ResourceMonitor_TypedConfig); ok { return x.TypedConfig } @@ -165,7 +165,7 @@ type isResourceMonitor_ConfigType interface { } type ResourceMonitor_TypedConfig struct { - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*ResourceMonitor_TypedConfig) isResourceMonitor_ConfigType() {} @@ -437,7 +437,7 @@ type OverloadAction struct { // in this list. Triggers []*Trigger `protobuf:"bytes,2,rep,name=triggers,proto3" json:"triggers,omitempty"` // Configuration for the action being instantiated. - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` } func (x *OverloadAction) Reset() { @@ -486,7 +486,7 @@ func (x *OverloadAction) GetTriggers() []*Trigger { return nil } -func (x *OverloadAction) GetTypedConfig() *any1.Any { +func (x *OverloadAction) GetTypedConfig() *anypb.Any { if x != nil { return x.TypedConfig } @@ -631,7 +631,7 @@ type OverloadManager struct { unknownFields protoimpl.UnknownFields // The interval for refreshing resource usage. - RefreshInterval *duration.Duration `protobuf:"bytes,1,opt,name=refresh_interval,json=refreshInterval,proto3" json:"refresh_interval,omitempty"` + RefreshInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=refresh_interval,json=refreshInterval,proto3" json:"refresh_interval,omitempty"` // The set of resources to monitor. ResourceMonitors []*ResourceMonitor `protobuf:"bytes,2,rep,name=resource_monitors,json=resourceMonitors,proto3" json:"resource_monitors,omitempty"` // The set of overload actions. @@ -674,7 +674,7 @@ func (*OverloadManager) Descriptor() ([]byte, []int) { return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{8} } -func (x *OverloadManager) GetRefreshInterval() *duration.Duration { +func (x *OverloadManager) GetRefreshInterval() *durationpb.Duration { if x != nil { return x.RefreshInterval } @@ -769,7 +769,7 @@ func (m *ScaleTimersOverloadActionConfig_ScaleTimer) GetOverloadAdjust() isScale return nil } -func (x *ScaleTimersOverloadActionConfig_ScaleTimer) GetMinTimeout() *duration.Duration { +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) GetMinTimeout() *durationpb.Duration { if x, ok := x.GetOverloadAdjust().(*ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout); ok { return x.MinTimeout } @@ -789,7 +789,7 @@ type isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust interface { type ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout struct { // Sets the minimum duration as an absolute value. - MinTimeout *duration.Duration `protobuf:"bytes,2,opt,name=min_timeout,json=minTimeout,proto3,oneof"` + MinTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=min_timeout,json=minTimeout,proto3,oneof"` } type ScaleTimersOverloadActionConfig_ScaleTimer_MinScale struct { @@ -999,8 +999,8 @@ var file_envoy_config_overload_v3_overload_proto_goTypes = []interface{}{ (*BufferFactoryConfig)(nil), // 8: envoy.config.overload.v3.BufferFactoryConfig (*OverloadManager)(nil), // 9: envoy.config.overload.v3.OverloadManager (*ScaleTimersOverloadActionConfig_ScaleTimer)(nil), // 10: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer - (*any1.Any)(nil), // 11: google.protobuf.Any - (*duration.Duration)(nil), // 12: google.protobuf.Duration + (*anypb.Any)(nil), // 11: google.protobuf.Any + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration (*v3.Percent)(nil), // 13: envoy.type.v3.Percent } var file_envoy_config_overload_v3_overload_proto_depIdxs = []int32{ diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go index ea01c9c1087..267297ed2c5 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/overload/v3/overload.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload_vtproto.pb.go new file mode 100644 index 00000000000..3a8ba0054f2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload_vtproto.pb.go @@ -0,0 +1,938 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/overload/v3/overload.proto + +package overloadv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ResourceMonitor) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMonitor) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceMonitor) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*ResourceMonitor_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceMonitor_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceMonitor_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ThresholdTrigger) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ThresholdTrigger) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ThresholdTrigger) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *ScaledTrigger) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaledTrigger) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaledTrigger) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SaturationThreshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SaturationThreshold)))) + i-- + dAtA[i] = 0x11 + } + if m.ScalingThreshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ScalingThreshold)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Trigger) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Trigger) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Trigger) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.TriggerOneof.(*Trigger_Scaled); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TriggerOneof.(*Trigger_Threshold); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Trigger_Threshold) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Trigger_Threshold) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Threshold != nil { + size, err := m.Threshold.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Trigger_Scaled) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Trigger_Scaled) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Scaled != nil { + size, err := m.Scaled.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OverloadAdjust.(*ScaleTimersOverloadActionConfig_ScaleTimer_MinScale); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OverloadAdjust.(*ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Timer != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timer)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MinTimeout != nil { + size, err := (*durationpb.Duration)(m.MinTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MinScale != nil { + if vtmsg, ok := interface{}(m.MinScale).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MinScale) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ScaleTimersOverloadActionConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleTimersOverloadActionConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TimerScaleFactors) > 0 { + for iNdEx := len(m.TimerScaleFactors) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TimerScaleFactors[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OverloadAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OverloadAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OverloadAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Triggers) > 0 { + for iNdEx := len(m.Triggers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Triggers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LoadShedPoint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadShedPoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadShedPoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Triggers) > 0 { + for iNdEx := len(m.Triggers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Triggers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BufferFactoryConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BufferFactoryConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BufferFactoryConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinimumAccountToTrackPowerOfTwo != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MinimumAccountToTrackPowerOfTwo)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OverloadManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OverloadManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OverloadManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LoadshedPoints) > 0 { + for iNdEx := len(m.LoadshedPoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoadshedPoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.BufferFactoryConfig != nil { + size, err := m.BufferFactoryConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Actions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceMonitors) > 0 { + for iNdEx := len(m.ResourceMonitors) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceMonitors[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.RefreshInterval != nil { + size, err := (*durationpb.Duration)(m.RefreshInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceMonitor) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ResourceMonitor_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ThresholdTrigger) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *ScaledTrigger) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScalingThreshold != 0 { + n += 9 + } + if m.SaturationThreshold != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *Trigger) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.TriggerOneof.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Trigger_Threshold) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Threshold != nil { + l = m.Threshold.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Trigger_Scaled) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Scaled != nil { + l = m.Scaled.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timer != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timer)) + } + if vtmsg, ok := m.OverloadAdjust.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinTimeout != nil { + l = (*durationpb.Duration)(m.MinTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinScale != nil { + if size, ok := interface{}(m.MinScale).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MinScale) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScaleTimersOverloadActionConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TimerScaleFactors) > 0 { + for _, e := range m.TimerScaleFactors { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *OverloadAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Triggers) > 0 { + for _, e := range m.Triggers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LoadShedPoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Triggers) > 0 { + for _, e := range m.Triggers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *BufferFactoryConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinimumAccountToTrackPowerOfTwo != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MinimumAccountToTrackPowerOfTwo)) + } + n += len(m.unknownFields) + return n +} + +func (m *OverloadManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RefreshInterval != nil { + l = (*durationpb.Duration)(m.RefreshInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceMonitors) > 0 { + for _, e := range m.ResourceMonitors { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.BufferFactoryConfig != nil { + l = m.BufferFactoryConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LoadshedPoints) > 0 { + for _, e := range m.LoadshedPoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go index 126084f32bc..c069fae842a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/rbac/v3/rbac.proto package rbacv3 @@ -371,7 +371,7 @@ func (x *Policy) GetCheckedCondition() *v1alpha1.CheckedExpr { } // Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 13] +// [#next-free-field: 14] type Permission struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -391,6 +391,7 @@ type Permission struct { // *Permission_NotRule // *Permission_RequestedServerName // *Permission_Matcher + // *Permission_UriTemplate Rule isPermission_Rule `protobuf_oneof:"rule"` } @@ -517,6 +518,13 @@ func (x *Permission) GetMatcher() *v32.TypedExtensionConfig { return nil } +func (x *Permission) GetUriTemplate() *v32.TypedExtensionConfig { + if x, ok := x.GetRule().(*Permission_UriTemplate); ok { + return x.UriTemplate + } + return nil +} + type isPermission_Rule interface { isPermission_Rule() } @@ -606,6 +614,12 @@ type Permission_Matcher struct { Matcher *v32.TypedExtensionConfig `protobuf:"bytes,12,opt,name=matcher,proto3,oneof"` } +type Permission_UriTemplate struct { + // URI template path matching. + // [#extension-category: envoy.path.match] + UriTemplate *v32.TypedExtensionConfig `protobuf:"bytes,13,opt,name=uri_template,json=uriTemplate,proto3,oneof"` +} + func (*Permission_AndRules) isPermission_Rule() {} func (*Permission_OrRules) isPermission_Rule() {} @@ -630,6 +644,8 @@ func (*Permission_RequestedServerName) isPermission_Rule() {} func (*Permission_Matcher) isPermission_Rule() {} +func (*Permission_UriTemplate) isPermission_Rule() {} + // Principal defines an identity or a group of identities for a downstream // subject. // [#next-free-field: 13] @@ -1355,7 +1371,7 @@ var file_envoy_config_rbac_v3_rbac_proto_rawDesc = []byte{ 0x69, 0x65, 0x72, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, - 0x76, 0x32, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xda, 0x07, 0x0a, 0x0a, 0x50, 0x65, + 0x76, 0x32, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xab, 0x08, 0x0a, 0x0a, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x09, 0x61, 0x6e, 0x64, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, @@ -1406,103 +1422,108 @@ var file_envoy_config_rbac_v3_rbac_proto_rawDesc = []byte{ 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x73, 0x0a, 0x03, - 0x53, 0x65, 0x74, 0x12, 0x40, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x05, - 0x72, 0x75, 0x6c, 0x65, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, - 0x76, 0x32, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, - 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, - 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, - 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xeb, 0x08, 0x0a, 0x09, 0x50, 0x72, 0x69, 0x6e, 0x63, - 0x69, 0x70, 0x61, 0x6c, 0x12, 0x3e, 0x0a, 0x07, 0x61, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x67, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4f, 0x0a, 0x0c, + 0x75, 0x72, 0x69, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, + 0x52, 0x0b, 0x75, 0x72, 0x69, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x1a, 0x73, 0x0a, + 0x03, 0x53, 0x65, 0x74, 0x12, 0x40, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, + 0x65, 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, + 0x6c, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xeb, 0x08, 0x0a, 0x09, 0x50, 0x72, 0x69, 0x6e, + 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x3e, 0x0a, 0x07, 0x61, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, + 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x06, 0x61, + 0x6e, 0x64, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x06, 0x6f, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, - 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x06, 0x61, 0x6e, - 0x64, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x06, 0x6f, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x6e, - 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x05, 0x6f, 0x72, 0x49, - 0x64, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x12, - 0x55, 0x0a, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x05, 0x6f, 0x72, + 0x49, 0x64, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, + 0x12, 0x55, 0x0a, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x4b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x70, 0x12, 0x4b, 0x0a, 0x10, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, + 0x70, 0x12, 0x3e, 0x0a, 0x09, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, + 0x70, 0x12, 0x3e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x3f, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, + 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x05, 0x6e, 0x6f, 0x74, + 0x49, 0x64, 0x1a, 0x6d, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x03, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, + 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x03, 0x69, 0x64, 0x73, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, + 0x74, 0x1a, 0x97, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x4b, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x4b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, - 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x49, 0x70, 0x12, 0x4b, 0x0a, 0x10, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, - 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x70, - 0x12, 0x3e, 0x0a, 0x09, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x70, - 0x12, 0x3e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x12, 0x3f, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x50, 0x61, 0x74, - 0x68, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, - 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, - 0x64, 0x1a, 0x6d, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, - 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, - 0x52, 0x03, 0x69, 0x64, 0x73, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, - 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, - 0x1a, 0x97, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x12, 0x4b, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x3a, - 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, - 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, - 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, - 0x6c, 0x42, 0x11, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, - 0x03, 0xf8, 0x42, 0x01, 0x22, 0x60, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, - 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, - 0x63, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x52, 0x62, 0x61, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x3b, 0x72, - 0x62, 0x61, 0x63, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, 0x88, + 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, + 0x61, 0x6c, 0x42, 0x11, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x60, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, + 0x61, 0x63, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x52, 0x62, 0x61, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x3b, + 0x72, 0x62, 0x61, 0x63, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1562,30 +1583,31 @@ var file_envoy_config_rbac_v3_rbac_proto_depIdxs = []int32{ 4, // 14: envoy.config.rbac.v3.Permission.not_rule:type_name -> envoy.config.rbac.v3.Permission 20, // 15: envoy.config.rbac.v3.Permission.requested_server_name:type_name -> envoy.type.matcher.v3.StringMatcher 21, // 16: envoy.config.rbac.v3.Permission.matcher:type_name -> envoy.config.core.v3.TypedExtensionConfig - 11, // 17: envoy.config.rbac.v3.Principal.and_ids:type_name -> envoy.config.rbac.v3.Principal.Set - 11, // 18: envoy.config.rbac.v3.Principal.or_ids:type_name -> envoy.config.rbac.v3.Principal.Set - 12, // 19: envoy.config.rbac.v3.Principal.authenticated:type_name -> envoy.config.rbac.v3.Principal.Authenticated - 17, // 20: envoy.config.rbac.v3.Principal.source_ip:type_name -> envoy.config.core.v3.CidrRange - 17, // 21: envoy.config.rbac.v3.Principal.direct_remote_ip:type_name -> envoy.config.core.v3.CidrRange - 17, // 22: envoy.config.rbac.v3.Principal.remote_ip:type_name -> envoy.config.core.v3.CidrRange - 15, // 23: envoy.config.rbac.v3.Principal.header:type_name -> envoy.config.route.v3.HeaderMatcher - 16, // 24: envoy.config.rbac.v3.Principal.url_path:type_name -> envoy.type.matcher.v3.PathMatcher - 19, // 25: envoy.config.rbac.v3.Principal.metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher - 22, // 26: envoy.config.rbac.v3.Principal.filter_state:type_name -> envoy.type.matcher.v3.FilterStateMatcher - 5, // 27: envoy.config.rbac.v3.Principal.not_id:type_name -> envoy.config.rbac.v3.Principal - 0, // 28: envoy.config.rbac.v3.Action.action:type_name -> envoy.config.rbac.v3.RBAC.Action - 1, // 29: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.audit_condition:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditCondition - 9, // 30: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.logger_configs:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig - 3, // 31: envoy.config.rbac.v3.RBAC.PoliciesEntry.value:type_name -> envoy.config.rbac.v3.Policy - 21, // 32: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig.audit_logger:type_name -> envoy.config.core.v3.TypedExtensionConfig - 4, // 33: envoy.config.rbac.v3.Permission.Set.rules:type_name -> envoy.config.rbac.v3.Permission - 5, // 34: envoy.config.rbac.v3.Principal.Set.ids:type_name -> envoy.config.rbac.v3.Principal - 20, // 35: envoy.config.rbac.v3.Principal.Authenticated.principal_name:type_name -> envoy.type.matcher.v3.StringMatcher - 36, // [36:36] is the sub-list for method output_type - 36, // [36:36] is the sub-list for method input_type - 36, // [36:36] is the sub-list for extension type_name - 36, // [36:36] is the sub-list for extension extendee - 0, // [0:36] is the sub-list for field type_name + 21, // 17: envoy.config.rbac.v3.Permission.uri_template:type_name -> envoy.config.core.v3.TypedExtensionConfig + 11, // 18: envoy.config.rbac.v3.Principal.and_ids:type_name -> envoy.config.rbac.v3.Principal.Set + 11, // 19: envoy.config.rbac.v3.Principal.or_ids:type_name -> envoy.config.rbac.v3.Principal.Set + 12, // 20: envoy.config.rbac.v3.Principal.authenticated:type_name -> envoy.config.rbac.v3.Principal.Authenticated + 17, // 21: envoy.config.rbac.v3.Principal.source_ip:type_name -> envoy.config.core.v3.CidrRange + 17, // 22: envoy.config.rbac.v3.Principal.direct_remote_ip:type_name -> envoy.config.core.v3.CidrRange + 17, // 23: envoy.config.rbac.v3.Principal.remote_ip:type_name -> envoy.config.core.v3.CidrRange + 15, // 24: envoy.config.rbac.v3.Principal.header:type_name -> envoy.config.route.v3.HeaderMatcher + 16, // 25: envoy.config.rbac.v3.Principal.url_path:type_name -> envoy.type.matcher.v3.PathMatcher + 19, // 26: envoy.config.rbac.v3.Principal.metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher + 22, // 27: envoy.config.rbac.v3.Principal.filter_state:type_name -> envoy.type.matcher.v3.FilterStateMatcher + 5, // 28: envoy.config.rbac.v3.Principal.not_id:type_name -> envoy.config.rbac.v3.Principal + 0, // 29: envoy.config.rbac.v3.Action.action:type_name -> envoy.config.rbac.v3.RBAC.Action + 1, // 30: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.audit_condition:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditCondition + 9, // 31: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.logger_configs:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig + 3, // 32: envoy.config.rbac.v3.RBAC.PoliciesEntry.value:type_name -> envoy.config.rbac.v3.Policy + 21, // 33: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig.audit_logger:type_name -> envoy.config.core.v3.TypedExtensionConfig + 4, // 34: envoy.config.rbac.v3.Permission.Set.rules:type_name -> envoy.config.rbac.v3.Permission + 5, // 35: envoy.config.rbac.v3.Principal.Set.ids:type_name -> envoy.config.rbac.v3.Principal + 20, // 36: envoy.config.rbac.v3.Principal.Authenticated.principal_name:type_name -> envoy.type.matcher.v3.StringMatcher + 37, // [37:37] is the sub-list for method output_type + 37, // [37:37] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name } func init() { file_envoy_config_rbac_v3_rbac_proto_init() } @@ -1728,6 +1750,7 @@ func file_envoy_config_rbac_v3_rbac_proto_init() { (*Permission_NotRule)(nil), (*Permission_RequestedServerName)(nil), (*Permission_Matcher)(nil), + (*Permission_UriTemplate)(nil), } file_envoy_config_rbac_v3_rbac_proto_msgTypes[3].OneofWrappers = []interface{}{ (*Principal_AndIds)(nil), diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go index f034cc682b2..f80fd60974a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/rbac/v3/rbac.proto @@ -957,6 +958,48 @@ func (m *Permission) validate(all bool) error { } } + case *Permission_UriTemplate: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetUriTemplate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "UriTemplate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "UriTemplate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUriTemplate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "UriTemplate", + reason: "embedded message failed validation", + cause: err, + } + } + } + default: _ = v // ensures v is used } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac_vtproto.pb.go new file mode 100644 index 00000000000..940a9b37eb3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac_vtproto.pb.go @@ -0,0 +1,2103 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/rbac/v3/rbac.proto + +package rbacv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AuditLogger != nil { + if vtmsg, ok := interface{}(m.AuditLogger).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AuditLogger) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RBAC_AuditLoggingOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC_AuditLoggingOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC_AuditLoggingOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LoggerConfigs) > 0 { + for iNdEx := len(m.LoggerConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoggerConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.AuditCondition != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AuditCondition)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RBAC) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AuditLoggingOptions != nil { + size, err := m.AuditLoggingOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Policies) > 0 { + for k := range m.Policies { + v := m.Policies[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Action != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Policy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CheckedCondition != nil { + if vtmsg, ok := interface{}(m.CheckedCondition).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CheckedCondition) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.Condition != nil { + if vtmsg, ok := interface{}(m.Condition).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Condition) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Principals) > 0 { + for iNdEx := len(m.Principals) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Principals[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Permissions) > 0 { + for iNdEx := len(m.Permissions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Permissions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Permission_Set) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permission_Set) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Set) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Permission) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permission) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*Permission_UriTemplate); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Matcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_DestinationPortRange); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_UrlPath); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_RequestedServerName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_NotRule); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Metadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_DestinationPort); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_DestinationIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Header); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Any); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_OrRules); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_AndRules); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Permission_AndRules) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_AndRules) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndRules != nil { + size, err := m.AndRules.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Permission_OrRules) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_OrRules) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrRules != nil { + size, err := m.OrRules.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Permission_Any) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Any) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Any { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *Permission_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Header != nil { + if vtmsg, ok := interface{}(m.Header).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Header) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Permission_DestinationIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_DestinationIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationIp != nil { + if vtmsg, ok := interface{}(m.DestinationIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DestinationIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Permission_DestinationPort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_DestinationPort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DestinationPort)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Permission_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Permission_NotRule) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_NotRule) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotRule != nil { + size, err := m.NotRule.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Permission_RequestedServerName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_RequestedServerName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestedServerName != nil { + if vtmsg, ok := interface{}(m.RequestedServerName).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestedServerName) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Permission_UrlPath) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_UrlPath) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UrlPath != nil { + if vtmsg, ok := interface{}(m.UrlPath).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UrlPath) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Permission_DestinationPortRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_DestinationPortRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationPortRange != nil { + if vtmsg, ok := interface{}(m.DestinationPortRange).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DestinationPortRange) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Permission_Matcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Matcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Permission_UriTemplate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_UriTemplate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UriTemplate != nil { + if vtmsg, ok := interface{}(m.UriTemplate).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UriTemplate) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *Principal_Set) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Principal_Set) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Set) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Ids) > 0 { + for iNdEx := len(m.Ids) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Ids[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Principal_Authenticated) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Principal_Authenticated) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Authenticated) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PrincipalName != nil { + if vtmsg, ok := interface{}(m.PrincipalName).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PrincipalName) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *Principal) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Principal) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Identifier.(*Principal_FilterState); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_RemoteIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_DirectRemoteIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_UrlPath); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_NotId); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Metadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Header); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_SourceIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Authenticated_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Any); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_OrIds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_AndIds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Principal_AndIds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_AndIds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndIds != nil { + size, err := m.AndIds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Principal_OrIds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_OrIds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrIds != nil { + size, err := m.OrIds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Principal_Any) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Any) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Any { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *Principal_Authenticated_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Authenticated_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Authenticated != nil { + size, err := m.Authenticated.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Principal_SourceIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_SourceIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SourceIp != nil { + if vtmsg, ok := interface{}(m.SourceIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SourceIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Principal_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Header != nil { + if vtmsg, ok := interface{}(m.Header).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Header) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Principal_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Principal_NotId) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_NotId) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotId != nil { + size, err := m.NotId.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Principal_UrlPath) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_UrlPath) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UrlPath != nil { + if vtmsg, ok := interface{}(m.UrlPath).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UrlPath) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Principal_DirectRemoteIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_DirectRemoteIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DirectRemoteIp != nil { + if vtmsg, ok := interface{}(m.DirectRemoteIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DirectRemoteIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Principal_RemoteIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_RemoteIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RemoteIp != nil { + if vtmsg, ok := interface{}(m.RemoteIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RemoteIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Principal_FilterState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_FilterState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterState != nil { + if vtmsg, ok := interface{}(m.FilterState).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.FilterState) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Action) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Action) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Action) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Action != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AuditLogger != nil { + if size, ok := interface{}(m.AuditLogger).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AuditLogger) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsOptional { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RBAC_AuditLoggingOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AuditCondition != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.AuditCondition)) + } + if len(m.LoggerConfigs) > 0 { + for _, e := range m.LoggerConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RBAC) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Action)) + } + if len(m.Policies) > 0 { + for k, v := range m.Policies { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.AuditLoggingOptions != nil { + l = m.AuditLoggingOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Policy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Permissions) > 0 { + for _, e := range m.Permissions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Principals) > 0 { + for _, e := range m.Principals { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Condition != nil { + if size, ok := interface{}(m.Condition).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Condition) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CheckedCondition != nil { + if size, ok := interface{}(m.CheckedCondition).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CheckedCondition) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Permission_Set) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Permission) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Permission_AndRules) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndRules != nil { + l = m.AndRules.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_OrRules) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrRules != nil { + l = m.OrRules.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_Any) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Permission_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + if size, ok := interface{}(m.Header).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Header) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_DestinationIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationIp != nil { + if size, ok := interface{}(m.DestinationIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DestinationIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_DestinationPort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.DestinationPort)) + return n +} +func (m *Permission_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_NotRule) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotRule != nil { + l = m.NotRule.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_RequestedServerName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestedServerName != nil { + if size, ok := interface{}(m.RequestedServerName).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RequestedServerName) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_UrlPath) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UrlPath != nil { + if size, ok := interface{}(m.UrlPath).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UrlPath) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_DestinationPortRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationPortRange != nil { + if size, ok := interface{}(m.DestinationPortRange).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DestinationPortRange) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_Matcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_UriTemplate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UriTemplate != nil { + if size, ok := interface{}(m.UriTemplate).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UriTemplate) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Set) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ids) > 0 { + for _, e := range m.Ids { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Principal_Authenticated) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrincipalName != nil { + if size, ok := interface{}(m.PrincipalName).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PrincipalName) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Principal) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Identifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Principal_AndIds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndIds != nil { + l = m.AndIds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_OrIds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrIds != nil { + l = m.OrIds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Any) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Principal_Authenticated_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Authenticated != nil { + l = m.Authenticated.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_SourceIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceIp != nil { + if size, ok := interface{}(m.SourceIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SourceIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + if size, ok := interface{}(m.Header).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Header) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_NotId) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotId != nil { + l = m.NotId.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_UrlPath) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UrlPath != nil { + if size, ok := interface{}(m.UrlPath).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UrlPath) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_DirectRemoteIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DirectRemoteIp != nil { + if size, ok := interface{}(m.DirectRemoteIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DirectRemoteIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_RemoteIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RemoteIp != nil { + if size, ok := interface{}(m.RemoteIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RemoteIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_FilterState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterState != nil { + if size, ok := interface{}(m.FilterState).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.FilterState) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Action) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Action != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Action)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go index 152bc22bc32..a3410659573 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/route/v3/route.proto package routev3 @@ -10,10 +10,10 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -70,14 +70,11 @@ type RouteConfiguration struct { // Specifies a list of HTTP headers that should be removed from each request // routed by the HTTP connection manager. RequestHeadersToRemove []string `protobuf:"bytes,8,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` - // By default, headers that should be added/removed are evaluated from most to least specific: - // - // * route level - // * virtual host level - // * connection manager level - // - // To allow setting overrides at the route or virtual host level, this order can be reversed - // by setting this option to true. Defaults to false. + // Headers mutations at all levels are evaluated, if specified. By default, the order is from most + // specific (i.e. route entry level) to least specific (i.e. route configuration level). Later header + // mutations may override earlier mutations. + // This order can be reversed by setting this field to true. In other words, most specific level mutation + // is evaluated last. MostSpecificHeaderMutationsWins bool `protobuf:"varint,10,opt,name=most_specific_header_mutations_wins,json=mostSpecificHeaderMutationsWins,proto3" json:"most_specific_header_mutations_wins,omitempty"` // An optional boolean that specifies whether the clusters that the route // table refers to will be validated by the cluster manager. If set to true @@ -92,7 +89,7 @@ type RouteConfiguration struct { // ` // option. Users may wish to override the default behavior in certain cases (for example when // using CDS with a static route table). - ValidateClusters *wrappers.BoolValue `protobuf:"bytes,7,opt,name=validate_clusters,json=validateClusters,proto3" json:"validate_clusters,omitempty"` + ValidateClusters *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=validate_clusters,json=validateClusters,proto3" json:"validate_clusters,omitempty"` // The maximum bytes of the response :ref:`direct response body // ` size. If not specified the default // is 4096. @@ -103,7 +100,7 @@ type RouteConfiguration struct { // ` in memory. Be careful setting // this to be larger than the default 4KB, since the allocated memory for direct response body // is not subject to data plane buffering controls. - MaxDirectResponseBodySizeBytes *wrappers.UInt32Value `protobuf:"bytes,11,opt,name=max_direct_response_body_size_bytes,json=maxDirectResponseBodySizeBytes,proto3" json:"max_direct_response_body_size_bytes,omitempty"` + MaxDirectResponseBodySizeBytes *wrapperspb.UInt32Value `protobuf:"bytes,11,opt,name=max_direct_response_body_size_bytes,json=maxDirectResponseBodySizeBytes,proto3" json:"max_direct_response_body_size_bytes,omitempty"` // A list of plugins and their configurations which may be used by a // :ref:`cluster specifier plugin name ` // within the route. All “extension.name“ fields in this list must be unique. @@ -129,7 +126,7 @@ type RouteConfiguration struct { // [#comment: An entry's value may be wrapped in a // :ref:`FilterConfig` // message to specify additional options.] - TypedPerFilterConfig map[string]*any1.Any `protobuf:"bytes,16,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,16,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // The metadata field can be used to provide additional information // about the route configuration. It can be used for configuration, stats, and logging. // The metadata should go under the filter namespace that will need it. @@ -233,14 +230,14 @@ func (x *RouteConfiguration) GetMostSpecificHeaderMutationsWins() bool { return false } -func (x *RouteConfiguration) GetValidateClusters() *wrappers.BoolValue { +func (x *RouteConfiguration) GetValidateClusters() *wrapperspb.BoolValue { if x != nil { return x.ValidateClusters } return nil } -func (x *RouteConfiguration) GetMaxDirectResponseBodySizeBytes() *wrappers.UInt32Value { +func (x *RouteConfiguration) GetMaxDirectResponseBodySizeBytes() *wrapperspb.UInt32Value { if x != nil { return x.MaxDirectResponseBodySizeBytes } @@ -275,7 +272,7 @@ func (x *RouteConfiguration) GetIgnorePathParametersInPathMatching() bool { return false } -func (x *RouteConfiguration) GetTypedPerFilterConfig() map[string]*any1.Any { +func (x *RouteConfiguration) GetTypedPerFilterConfig() map[string]*anypb.Any { if x != nil { return x.TypedPerFilterConfig } @@ -495,13 +492,13 @@ var file_envoy_config_route_v3_route_proto_goTypes = []interface{}{ nil, // 2: envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry (*VirtualHost)(nil), // 3: envoy.config.route.v3.VirtualHost (*v3.HeaderValueOption)(nil), // 4: envoy.config.core.v3.HeaderValueOption - (*wrappers.BoolValue)(nil), // 5: google.protobuf.BoolValue - (*wrappers.UInt32Value)(nil), // 6: google.protobuf.UInt32Value + (*wrapperspb.BoolValue)(nil), // 5: google.protobuf.BoolValue + (*wrapperspb.UInt32Value)(nil), // 6: google.protobuf.UInt32Value (*ClusterSpecifierPlugin)(nil), // 7: envoy.config.route.v3.ClusterSpecifierPlugin (*RouteAction_RequestMirrorPolicy)(nil), // 8: envoy.config.route.v3.RouteAction.RequestMirrorPolicy (*v3.Metadata)(nil), // 9: envoy.config.core.v3.Metadata (*v3.ConfigSource)(nil), // 10: envoy.config.core.v3.ConfigSource - (*any1.Any)(nil), // 11: google.protobuf.Any + (*anypb.Any)(nil), // 11: google.protobuf.Any } var file_envoy_config_route_v3_route_proto_depIdxs = []int32{ 3, // 0: envoy.config.route.v3.RouteConfiguration.virtual_hosts:type_name -> envoy.config.route.v3.VirtualHost diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go index 78991464dc9..be062e5bd8b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/route/v3/route.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go index 3b0e41c354d..bd5ae667fc1 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/route/v3/route_components.proto package routev3 @@ -17,11 +17,11 @@ import ( v34 "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3" v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -433,7 +433,7 @@ type VirtualHost struct { // [#comment: An entry's value may be wrapped in a // :ref:`FilterConfig` // message to specify additional options.] - TypedPerFilterConfig map[string]*any1.Any `protobuf:"bytes,15,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,15,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Decides whether the :ref:`x-envoy-attempt-count // ` header should be included // in the upstream request. Setting this option will cause it to override any existing header @@ -463,7 +463,7 @@ type VirtualHost struct { // will take precedence over this config and it'll be treated independently (e.g.: values are not // inherited). :ref:`Retry policy ` should not be // set if this field is used. - RetryPolicyTypedConfig *any1.Any `protobuf:"bytes,20,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` + RetryPolicyTypedConfig *anypb.Any `protobuf:"bytes,20,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` // Indicates the hedge policy for all routes in this virtual host. Note that setting a // route level entry will take precedence over this config and it'll be treated // independently (e.g.: values are not inherited). @@ -474,7 +474,7 @@ type VirtualHost struct { // The maximum bytes which will be buffered for retries and shadowing. // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum // value of this and the listener per_connection_buffer_limit_bytes. - PerRequestBufferLimitBytes *wrappers.UInt32Value `protobuf:"bytes,18,opt,name=per_request_buffer_limit_bytes,json=perRequestBufferLimitBytes,proto3" json:"per_request_buffer_limit_bytes,omitempty"` + PerRequestBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,18,opt,name=per_request_buffer_limit_bytes,json=perRequestBufferLimitBytes,proto3" json:"per_request_buffer_limit_bytes,omitempty"` // Specify a set of default request mirroring policies for every route under this virtual host. // It takes precedence over the route config mirror policy entirely. // That is, policies are not merged, the most specific non-empty one becomes the mirror policies. @@ -604,7 +604,7 @@ func (x *VirtualHost) GetCors() *CorsPolicy { return nil } -func (x *VirtualHost) GetTypedPerFilterConfig() map[string]*any1.Any { +func (x *VirtualHost) GetTypedPerFilterConfig() map[string]*anypb.Any { if x != nil { return x.TypedPerFilterConfig } @@ -632,7 +632,7 @@ func (x *VirtualHost) GetRetryPolicy() *RetryPolicy { return nil } -func (x *VirtualHost) GetRetryPolicyTypedConfig() *any1.Any { +func (x *VirtualHost) GetRetryPolicyTypedConfig() *anypb.Any { if x != nil { return x.RetryPolicyTypedConfig } @@ -653,7 +653,7 @@ func (x *VirtualHost) GetIncludeIsTimeoutRetryHeader() bool { return false } -func (x *VirtualHost) GetPerRequestBufferLimitBytes() *wrappers.UInt32Value { +func (x *VirtualHost) GetPerRequestBufferLimitBytes() *wrapperspb.UInt32Value { if x != nil { return x.PerRequestBufferLimitBytes } @@ -680,7 +680,7 @@ type FilterAction struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Action *any1.Any `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + Action *anypb.Any `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` } func (x *FilterAction) Reset() { @@ -715,7 +715,7 @@ func (*FilterAction) Descriptor() ([]byte, []int) { return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{1} } -func (x *FilterAction) GetAction() *any1.Any { +func (x *FilterAction) GetAction() *anypb.Any { if x != nil { return x.Action } @@ -814,7 +814,7 @@ type Route struct { // [#comment: An entry's value may be wrapped in a // :ref:`FilterConfig` // message to specify additional options.] - TypedPerFilterConfig map[string]*any1.Any `protobuf:"bytes,13,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,13,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Specifies a set of headers that will be added to requests matching this // route. Headers specified at this level are applied before headers from the // enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and @@ -841,7 +841,7 @@ type Route struct { // The maximum bytes which will be buffered for retries and shadowing. // If set, the bytes actually buffered will be the minimum value of this and the // listener per_connection_buffer_limit_bytes. - PerRequestBufferLimitBytes *wrappers.UInt32Value `protobuf:"bytes,16,opt,name=per_request_buffer_limit_bytes,json=perRequestBufferLimitBytes,proto3" json:"per_request_buffer_limit_bytes,omitempty"` + PerRequestBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,16,opt,name=per_request_buffer_limit_bytes,json=perRequestBufferLimitBytes,proto3" json:"per_request_buffer_limit_bytes,omitempty"` // The human readable prefix to use when emitting statistics for this endpoint. // The statistics are rooted at vhost..route.. // This should be set for highly critical @@ -960,7 +960,7 @@ func (x *Route) GetDecorator() *Decorator { return nil } -func (x *Route) GetTypedPerFilterConfig() map[string]*any1.Any { +func (x *Route) GetTypedPerFilterConfig() map[string]*anypb.Any { if x != nil { return x.TypedPerFilterConfig } @@ -1002,7 +1002,7 @@ func (x *Route) GetTracing() *Tracing { return nil } -func (x *Route) GetPerRequestBufferLimitBytes() *wrappers.UInt32Value { +func (x *Route) GetPerRequestBufferLimitBytes() *wrapperspb.UInt32Value { if x != nil { return x.PerRequestBufferLimitBytes } @@ -1081,7 +1081,7 @@ type WeightedCluster struct { // cluster weights. It is up to the management server to supply the correct weights. // // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. - TotalWeight *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=total_weight,json=totalWeight,proto3" json:"total_weight,omitempty"` + TotalWeight *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=total_weight,json=totalWeight,proto3" json:"total_weight,omitempty"` // Specifies the runtime key prefix that should be used to construct the // runtime keys associated with each cluster. When the “runtime_key_prefix“ is // specified, the router will look for weights associated with each upstream @@ -1137,7 +1137,7 @@ func (x *WeightedCluster) GetClusters() []*WeightedCluster_ClusterWeight { } // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. -func (x *WeightedCluster) GetTotalWeight() *wrappers.UInt32Value { +func (x *WeightedCluster) GetTotalWeight() *wrapperspb.UInt32Value { if x != nil { return x.TotalWeight } @@ -1259,7 +1259,7 @@ type RouteMatch struct { PathSpecifier isRouteMatch_PathSpecifier `protobuf_oneof:"path_specifier"` // Indicates that prefix/path matching should be case sensitive. The default // is true. Ignored for safe_regex matching. - CaseSensitive *wrappers.BoolValue `protobuf:"bytes,4,opt,name=case_sensitive,json=caseSensitive,proto3" json:"case_sensitive,omitempty"` + CaseSensitive *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=case_sensitive,json=caseSensitive,proto3" json:"case_sensitive,omitempty"` // Indicates that the route should additionally match on a runtime key. Every time the route // is considered for a match, it must also fall under the percentage of matches indicated by // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the @@ -1395,7 +1395,7 @@ func (x *RouteMatch) GetPathMatchPolicy() *v31.TypedExtensionConfig { return nil } -func (x *RouteMatch) GetCaseSensitive() *wrappers.BoolValue { +func (x *RouteMatch) GetCaseSensitive() *wrapperspb.BoolValue { if x != nil { return x.CaseSensitive } @@ -1526,7 +1526,7 @@ func (*RouteMatch_PathMatchPolicy) isRouteMatch_PathSpecifier() {} // :ref:`CorsPolicy in filter extension ` // as as alternative. // -// [#next-free-field: 13] +// [#next-free-field: 14] type CorsPolicy struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1544,7 +1544,7 @@ type CorsPolicy struct { // Specifies the content for the “access-control-max-age“ header. MaxAge string `protobuf:"bytes,5,opt,name=max_age,json=maxAge,proto3" json:"max_age,omitempty"` // Specifies whether the resource allows credentials. - AllowCredentials *wrappers.BoolValue `protobuf:"bytes,6,opt,name=allow_credentials,json=allowCredentials,proto3" json:"allow_credentials,omitempty"` + AllowCredentials *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=allow_credentials,json=allowCredentials,proto3" json:"allow_credentials,omitempty"` // Types that are assignable to EnabledSpecifier: // // *CorsPolicy_FilterEnabled @@ -1563,7 +1563,10 @@ type CorsPolicy struct { // which the request initiator was fetched. // // More details refer to https://developer.chrome.com/blog/private-network-access-preflight. - AllowPrivateNetworkAccess *wrappers.BoolValue `protobuf:"bytes,12,opt,name=allow_private_network_access,json=allowPrivateNetworkAccess,proto3" json:"allow_private_network_access,omitempty"` + AllowPrivateNetworkAccess *wrapperspb.BoolValue `protobuf:"bytes,12,opt,name=allow_private_network_access,json=allowPrivateNetworkAccess,proto3" json:"allow_private_network_access,omitempty"` + // Specifies if preflight requests not matching the configured allowed origin should be forwarded + // to the upstream. Default is true. + ForwardNotMatchingPreflights *wrapperspb.BoolValue `protobuf:"bytes,13,opt,name=forward_not_matching_preflights,json=forwardNotMatchingPreflights,proto3" json:"forward_not_matching_preflights,omitempty"` } func (x *CorsPolicy) Reset() { @@ -1633,7 +1636,7 @@ func (x *CorsPolicy) GetMaxAge() string { return "" } -func (x *CorsPolicy) GetAllowCredentials() *wrappers.BoolValue { +func (x *CorsPolicy) GetAllowCredentials() *wrapperspb.BoolValue { if x != nil { return x.AllowCredentials } @@ -1661,13 +1664,20 @@ func (x *CorsPolicy) GetShadowEnabled() *v31.RuntimeFractionalPercent { return nil } -func (x *CorsPolicy) GetAllowPrivateNetworkAccess() *wrappers.BoolValue { +func (x *CorsPolicy) GetAllowPrivateNetworkAccess() *wrapperspb.BoolValue { if x != nil { return x.AllowPrivateNetworkAccess } return nil } +func (x *CorsPolicy) GetForwardNotMatchingPreflights() *wrapperspb.BoolValue { + if x != nil { + return x.ForwardNotMatchingPreflights + } + return nil +} + type isCorsPolicy_EnabledSpecifier interface { isCorsPolicy_EnabledSpecifier() } @@ -1787,7 +1797,6 @@ type RouteAction struct { // :ref:`host_rewrite_path_regex `) // causes the original value of the host header, if any, to be appended to the // :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header if it is different to the last value appended. - // This can be disabled by setting the runtime guard “envoy_reloadable_features_append_xfh_idempotent“ to false. AppendXForwardedHost bool `protobuf:"varint,38,opt,name=append_x_forwarded_host,json=appendXForwardedHost,proto3" json:"append_x_forwarded_host,omitempty"` // Specifies the upstream timeout for the route. If not specified, the default is 15s. This // spans between the point at which the entire downstream request (i.e. end-of-stream) has been @@ -1800,7 +1809,7 @@ type RouteAction struct { // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. - Timeout *duration.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, // although the connection manager wide :ref:`stream_idle_timeout // ` @@ -1823,7 +1832,7 @@ type RouteAction struct { // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" // is configured, this timeout is scaled according to the value for // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. - IdleTimeout *duration.Duration `protobuf:"bytes,24,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` + IdleTimeout *durationpb.Duration `protobuf:"bytes,24,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` // Specifies how to send request over TLS early data. // If absent, allows `safe HTTP requests `_ to be sent on early data. // [#extension-category: envoy.route.early_data_policy] @@ -1837,7 +1846,7 @@ type RouteAction struct { // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, // most internal one becomes the enforced policy). :ref:`Retry policy ` // should not be set if this field is used. - RetryPolicyTypedConfig *any1.Any `protobuf:"bytes,33,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` + RetryPolicyTypedConfig *anypb.Any `protobuf:"bytes,33,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` // Specify a set of route request mirroring policies. // It takes precedence over the virtual host and route config mirror policy entirely. // That is, policies are not merged, the most specific non-empty one becomes the mirror policies. @@ -1855,7 +1864,7 @@ type RouteAction struct { // This field is deprecated. Please use :ref:`vh_rate_limits ` // // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. - IncludeVhRateLimits *wrappers.BoolValue `protobuf:"bytes,14,opt,name=include_vh_rate_limits,json=includeVhRateLimits,proto3" json:"include_vh_rate_limits,omitempty"` + IncludeVhRateLimits *wrapperspb.BoolValue `protobuf:"bytes,14,opt,name=include_vh_rate_limits,json=includeVhRateLimits,proto3" json:"include_vh_rate_limits,omitempty"` // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to // route the request. The method of combination is deterministic such that @@ -1904,7 +1913,7 @@ type RouteAction struct { // :ref:`retry overview `. // // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. - MaxGrpcTimeout *duration.Duration `protobuf:"bytes,23,opt,name=max_grpc_timeout,json=maxGrpcTimeout,proto3" json:"max_grpc_timeout,omitempty"` + MaxGrpcTimeout *durationpb.Duration `protobuf:"bytes,23,opt,name=max_grpc_timeout,json=maxGrpcTimeout,proto3" json:"max_grpc_timeout,omitempty"` // Deprecated by :ref:`grpc_timeout_header_offset `. // If present, Envoy will adjust the timeout provided by the “grpc-timeout“ header by subtracting // the provided duration from the header. This is useful in allowing Envoy to set its global @@ -1915,7 +1924,7 @@ type RouteAction struct { // infinity). // // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. - GrpcTimeoutOffset *duration.Duration `protobuf:"bytes,28,opt,name=grpc_timeout_offset,json=grpcTimeoutOffset,proto3" json:"grpc_timeout_offset,omitempty"` + GrpcTimeoutOffset *durationpb.Duration `protobuf:"bytes,28,opt,name=grpc_timeout_offset,json=grpcTimeoutOffset,proto3" json:"grpc_timeout_offset,omitempty"` UpgradeConfigs []*RouteAction_UpgradeConfig `protobuf:"bytes,25,rep,name=upgrade_configs,json=upgradeConfigs,proto3" json:"upgrade_configs,omitempty"` // If present, Envoy will try to follow an upstream redirect response instead of proxying the // response back to the downstream. An upstream redirect response is defined @@ -1940,7 +1949,7 @@ type RouteAction struct { // If not specified, at most one redirect will be followed. // // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. - MaxInternalRedirects *wrappers.UInt32Value `protobuf:"bytes,31,opt,name=max_internal_redirects,json=maxInternalRedirects,proto3" json:"max_internal_redirects,omitempty"` + MaxInternalRedirects *wrapperspb.UInt32Value `protobuf:"bytes,31,opt,name=max_internal_redirects,json=maxInternalRedirects,proto3" json:"max_internal_redirects,omitempty"` // Indicates that the route has a hedge policy. Note that if this is set, // it'll take precedence over the virtual host level hedge policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). @@ -2072,7 +2081,7 @@ func (x *RouteAction) GetHostRewriteLiteral() string { return "" } -func (x *RouteAction) GetAutoHostRewrite() *wrappers.BoolValue { +func (x *RouteAction) GetAutoHostRewrite() *wrapperspb.BoolValue { if x, ok := x.GetHostRewriteSpecifier().(*RouteAction_AutoHostRewrite); ok { return x.AutoHostRewrite } @@ -2100,14 +2109,14 @@ func (x *RouteAction) GetAppendXForwardedHost() bool { return false } -func (x *RouteAction) GetTimeout() *duration.Duration { +func (x *RouteAction) GetTimeout() *durationpb.Duration { if x != nil { return x.Timeout } return nil } -func (x *RouteAction) GetIdleTimeout() *duration.Duration { +func (x *RouteAction) GetIdleTimeout() *durationpb.Duration { if x != nil { return x.IdleTimeout } @@ -2128,7 +2137,7 @@ func (x *RouteAction) GetRetryPolicy() *RetryPolicy { return nil } -func (x *RouteAction) GetRetryPolicyTypedConfig() *any1.Any { +func (x *RouteAction) GetRetryPolicyTypedConfig() *anypb.Any { if x != nil { return x.RetryPolicyTypedConfig } @@ -2157,7 +2166,7 @@ func (x *RouteAction) GetRateLimits() []*RateLimit { } // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. -func (x *RouteAction) GetIncludeVhRateLimits() *wrappers.BoolValue { +func (x *RouteAction) GetIncludeVhRateLimits() *wrapperspb.BoolValue { if x != nil { return x.IncludeVhRateLimits } @@ -2180,7 +2189,7 @@ func (x *RouteAction) GetCors() *CorsPolicy { } // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. -func (x *RouteAction) GetMaxGrpcTimeout() *duration.Duration { +func (x *RouteAction) GetMaxGrpcTimeout() *durationpb.Duration { if x != nil { return x.MaxGrpcTimeout } @@ -2188,7 +2197,7 @@ func (x *RouteAction) GetMaxGrpcTimeout() *duration.Duration { } // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. -func (x *RouteAction) GetGrpcTimeoutOffset() *duration.Duration { +func (x *RouteAction) GetGrpcTimeoutOffset() *durationpb.Duration { if x != nil { return x.GrpcTimeoutOffset } @@ -2218,7 +2227,7 @@ func (x *RouteAction) GetInternalRedirectAction() RouteAction_InternalRedirectAc } // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. -func (x *RouteAction) GetMaxInternalRedirects() *wrappers.UInt32Value { +func (x *RouteAction) GetMaxInternalRedirects() *wrapperspb.UInt32Value { if x != nil { return x.MaxInternalRedirects } @@ -2323,7 +2332,7 @@ type RouteAction_AutoHostRewrite struct { // :ref:`config_http_conn_man_headers_x-forwarded-host` header if // :ref:`append_x_forwarded_host ` // is set. - AutoHostRewrite *wrappers.BoolValue `protobuf:"bytes,7,opt,name=auto_host_rewrite,json=autoHostRewrite,proto3,oneof"` + AutoHostRewrite *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=auto_host_rewrite,json=autoHostRewrite,proto3,oneof"` } type RouteAction_HostRewriteHeader struct { @@ -2390,7 +2399,7 @@ type RetryPolicy struct { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. - NumRetries *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=num_retries,json=numRetries,proto3" json:"num_retries,omitempty"` + NumRetries *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=num_retries,json=numRetries,proto3" json:"num_retries,omitempty"` // Specifies a non-zero upstream timeout per retry attempt (including the initial attempt). This // parameter is optional. The same conditions documented for // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. @@ -2402,7 +2411,7 @@ type RetryPolicy struct { // Consequently, when using a :ref:`5xx ` based // retry policy, a request that times out will not be retried as the total timeout budget // would have been exhausted. - PerTryTimeout *duration.Duration `protobuf:"bytes,3,opt,name=per_try_timeout,json=perTryTimeout,proto3" json:"per_try_timeout,omitempty"` + PerTryTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=per_try_timeout,json=perTryTimeout,proto3" json:"per_try_timeout,omitempty"` // Specifies an upstream idle timeout per retry attempt (including the initial attempt). This // parameter is optional and if absent there is no per try idle timeout. The semantics of the per // try idle timeout are similar to the @@ -2422,7 +2431,7 @@ type RetryPolicy struct { // the idle timer continues once the response starts streaming back to the downstream client. // This ensures that response data continues to make progress without using one of the HTTP // connection manager idle timeouts. - PerTryIdleTimeout *duration.Duration `protobuf:"bytes,13,opt,name=per_try_idle_timeout,json=perTryIdleTimeout,proto3" json:"per_try_idle_timeout,omitempty"` + PerTryIdleTimeout *durationpb.Duration `protobuf:"bytes,13,opt,name=per_try_idle_timeout,json=perTryIdleTimeout,proto3" json:"per_try_idle_timeout,omitempty"` // Specifies an implementation of a RetryPriority which is used to determine the // distribution of load across priorities used for retries. Refer to // :ref:`retry plugin configuration ` for more details. @@ -2503,21 +2512,21 @@ func (x *RetryPolicy) GetRetryOn() string { return "" } -func (x *RetryPolicy) GetNumRetries() *wrappers.UInt32Value { +func (x *RetryPolicy) GetNumRetries() *wrapperspb.UInt32Value { if x != nil { return x.NumRetries } return nil } -func (x *RetryPolicy) GetPerTryTimeout() *duration.Duration { +func (x *RetryPolicy) GetPerTryTimeout() *durationpb.Duration { if x != nil { return x.PerTryTimeout } return nil } -func (x *RetryPolicy) GetPerTryIdleTimeout() *duration.Duration { +func (x *RetryPolicy) GetPerTryIdleTimeout() *durationpb.Duration { if x != nil { return x.PerTryIdleTimeout } @@ -2597,7 +2606,7 @@ type HedgePolicy struct { // Must be at least 1. // Defaults to 1. // [#not-implemented-hide:] - InitialRequests *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=initial_requests,json=initialRequests,proto3" json:"initial_requests,omitempty"` + InitialRequests *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=initial_requests,json=initialRequests,proto3" json:"initial_requests,omitempty"` // Specifies a probability that an additional upstream request should be sent // on top of what is specified by initial_requests. // Defaults to 0. @@ -2651,7 +2660,7 @@ func (*HedgePolicy) Descriptor() ([]byte, []int) { return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{10} } -func (x *HedgePolicy) GetInitialRequests() *wrappers.UInt32Value { +func (x *HedgePolicy) GetInitialRequests() *wrapperspb.UInt32Value { if x != nil { return x.InitialRequests } @@ -3021,7 +3030,7 @@ type Decorator struct { // ` header. Operation string `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"` // Whether the decorated details should be propagated to the other party. The default is true. - Propagate *wrappers.BoolValue `protobuf:"bytes,2,opt,name=propagate,proto3" json:"propagate,omitempty"` + Propagate *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=propagate,proto3" json:"propagate,omitempty"` } func (x *Decorator) Reset() { @@ -3063,7 +3072,7 @@ func (x *Decorator) GetOperation() string { return "" } -func (x *Decorator) GetPropagate() *wrappers.BoolValue { +func (x *Decorator) GetPropagate() *wrapperspb.BoolValue { if x != nil { return x.Propagate } @@ -3258,7 +3267,7 @@ type RateLimit struct { // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. - Stage *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=stage,proto3" json:"stage,omitempty"` + Stage *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=stage,proto3" json:"stage,omitempty"` // The key to be set in runtime to disable this rate limit configuration. DisableKey string `protobuf:"bytes,2,opt,name=disable_key,json=disableKey,proto3" json:"disable_key,omitempty"` // A list of actions that are to be applied for this rate limit configuration. @@ -3307,7 +3316,7 @@ func (*RateLimit) Descriptor() ([]byte, []int) { return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17} } -func (x *RateLimit) GetStage() *wrappers.UInt32Value { +func (x *RateLimit) GetStage() *wrapperspb.UInt32Value { if x != nil { return x.Stage } @@ -3756,7 +3765,7 @@ type InternalRedirectPolicy struct { // will pass the redirect back to downstream. // // If not specified, at most one redirect will be followed. - MaxInternalRedirects *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=max_internal_redirects,json=maxInternalRedirects,proto3" json:"max_internal_redirects,omitempty"` + MaxInternalRedirects *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_internal_redirects,json=maxInternalRedirects,proto3" json:"max_internal_redirects,omitempty"` // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, // only 302 will be treated as internal redirect. // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. @@ -3807,7 +3816,7 @@ func (*InternalRedirectPolicy) Descriptor() ([]byte, []int) { return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{20} } -func (x *InternalRedirectPolicy) GetMaxInternalRedirects() *wrappers.UInt32Value { +func (x *InternalRedirectPolicy) GetMaxInternalRedirects() *wrapperspb.UInt32Value { if x != nil { return x.MaxInternalRedirects } @@ -3854,7 +3863,7 @@ type FilterConfig struct { unknownFields protoimpl.UnknownFields // The filter config. - Config *any1.Any `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Config *anypb.Any `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` // If true, the filter is optional, meaning that if the client does // not support the specified filter, it may ignore the map entry rather // than rejecting the config. @@ -3908,7 +3917,7 @@ func (*FilterConfig) Descriptor() ([]byte, []int) { return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{21} } -func (x *FilterConfig) GetConfig() *any1.Any { +func (x *FilterConfig) GetConfig() *anypb.Any { if x != nil { return x.Config } @@ -3961,7 +3970,7 @@ type WeightedCluster_ClusterWeight struct { // is determined by its weight. The sum of weights across all // entries in the clusters array must be greater than 0, and must not exceed // uint32_t maximal value (4294967295). - Weight *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=weight,proto3" json:"weight,omitempty"` + Weight *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=weight,proto3" json:"weight,omitempty"` // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field will be considered for // load balancing. Note that this will be merged with what's provided in @@ -3998,7 +4007,7 @@ type WeightedCluster_ClusterWeight struct { // [#comment: An entry's value may be wrapped in a // :ref:`FilterConfig` // message to specify additional options.] - TypedPerFilterConfig map[string]*any1.Any `protobuf:"bytes,10,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,10,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Types that are assignable to HostRewriteSpecifier: // // *WeightedCluster_ClusterWeight_HostRewriteLiteral @@ -4051,7 +4060,7 @@ func (x *WeightedCluster_ClusterWeight) GetClusterHeader() string { return "" } -func (x *WeightedCluster_ClusterWeight) GetWeight() *wrappers.UInt32Value { +func (x *WeightedCluster_ClusterWeight) GetWeight() *wrapperspb.UInt32Value { if x != nil { return x.Weight } @@ -4093,7 +4102,7 @@ func (x *WeightedCluster_ClusterWeight) GetResponseHeadersToRemove() []string { return nil } -func (x *WeightedCluster_ClusterWeight) GetTypedPerFilterConfig() map[string]*any1.Any { +func (x *WeightedCluster_ClusterWeight) GetTypedPerFilterConfig() map[string]*anypb.Any { if x != nil { return x.TypedPerFilterConfig } @@ -4172,7 +4181,7 @@ type RouteMatch_TlsContextMatchOptions struct { // If specified, the route will match against whether or not a certificate is presented. // If not specified, certificate presentation status (true or false) will not be considered when route matching. - Presented *wrappers.BoolValue `protobuf:"bytes,1,opt,name=presented,proto3" json:"presented,omitempty"` + Presented *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=presented,proto3" json:"presented,omitempty"` // If specified, the route will match against whether or not a certificate is validated. // If not specified, certificate validation status (true or false) will not be considered when route matching. // @@ -4185,7 +4194,7 @@ type RouteMatch_TlsContextMatchOptions struct { // The only known workaround for this issue is to disable TLS session resumption entirely, by // setting both :ref:`disable_stateless_session_resumption ` // and :ref:`disable_stateful_session_resumption ` on the DownstreamTlsContext. - Validated *wrappers.BoolValue `protobuf:"bytes,2,opt,name=validated,proto3" json:"validated,omitempty"` + Validated *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=validated,proto3" json:"validated,omitempty"` } func (x *RouteMatch_TlsContextMatchOptions) Reset() { @@ -4220,14 +4229,14 @@ func (*RouteMatch_TlsContextMatchOptions) Descriptor() ([]byte, []int) { return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6, 1} } -func (x *RouteMatch_TlsContextMatchOptions) GetPresented() *wrappers.BoolValue { +func (x *RouteMatch_TlsContextMatchOptions) GetPresented() *wrapperspb.BoolValue { if x != nil { return x.Presented } return nil } -func (x *RouteMatch_TlsContextMatchOptions) GetValidated() *wrappers.BoolValue { +func (x *RouteMatch_TlsContextMatchOptions) GetValidated() *wrapperspb.BoolValue { if x != nil { return x.Validated } @@ -4279,7 +4288,8 @@ func (*RouteMatch_ConnectMatcher) Descriptor() ([]byte, []int) { // collected for the shadow cluster making this feature useful for testing. // // During shadowing, the host/authority header is altered such that “-shadow“ is appended. This is -// useful for logging. For example, “cluster1“ becomes “cluster1-shadow“. +// useful for logging. For example, “cluster1“ becomes “cluster1-shadow“. This behavior can be +// disabled by setting “disable_shadow_host_suffix_append“ to “true“. // // .. note:: // @@ -4289,7 +4299,7 @@ func (*RouteMatch_ConnectMatcher) Descriptor() ([]byte, []int) { // // Shadowing doesn't support Http CONNECT and upgrades. // -// [#next-free-field: 6] +// [#next-free-field: 7] type RouteAction_RequestMirrorPolicy struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4325,7 +4335,9 @@ type RouteAction_RequestMirrorPolicy struct { // value, the request will be mirrored. RuntimeFraction *v31.RuntimeFractionalPercent `protobuf:"bytes,3,opt,name=runtime_fraction,json=runtimeFraction,proto3" json:"runtime_fraction,omitempty"` // Determines if the trace span should be sampled. Defaults to true. - TraceSampled *wrappers.BoolValue `protobuf:"bytes,4,opt,name=trace_sampled,json=traceSampled,proto3" json:"trace_sampled,omitempty"` + TraceSampled *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=trace_sampled,json=traceSampled,proto3" json:"trace_sampled,omitempty"` + // Disables appending the “-shadow“ suffix to the shadowed “Host“ header. Defaults to “false“. + DisableShadowHostSuffixAppend bool `protobuf:"varint,6,opt,name=disable_shadow_host_suffix_append,json=disableShadowHostSuffixAppend,proto3" json:"disable_shadow_host_suffix_append,omitempty"` } func (x *RouteAction_RequestMirrorPolicy) Reset() { @@ -4381,13 +4393,20 @@ func (x *RouteAction_RequestMirrorPolicy) GetRuntimeFraction() *v31.RuntimeFract return nil } -func (x *RouteAction_RequestMirrorPolicy) GetTraceSampled() *wrappers.BoolValue { +func (x *RouteAction_RequestMirrorPolicy) GetTraceSampled() *wrapperspb.BoolValue { if x != nil { return x.TraceSampled } return nil } +func (x *RouteAction_RequestMirrorPolicy) GetDisableShadowHostSuffixAppend() bool { + if x != nil { + return x.DisableShadowHostSuffixAppend + } + return false +} + // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer // `. // [#next-free-field: 7] @@ -4562,7 +4581,7 @@ type RouteAction_UpgradeConfig struct { // Upgrade: [upgrade_type] will be proxied upstream. UpgradeType string `protobuf:"bytes,1,opt,name=upgrade_type,json=upgradeType,proto3" json:"upgrade_type,omitempty"` // Determines if upgrades are available on this route. Defaults to true. - Enabled *wrappers.BoolValue `protobuf:"bytes,2,opt,name=enabled,proto3" json:"enabled,omitempty"` + Enabled *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=enabled,proto3" json:"enabled,omitempty"` // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. // Note that CONNECT support is currently considered alpha in Envoy. @@ -4609,7 +4628,7 @@ func (x *RouteAction_UpgradeConfig) GetUpgradeType() string { return "" } -func (x *RouteAction_UpgradeConfig) GetEnabled() *wrappers.BoolValue { +func (x *RouteAction_UpgradeConfig) GetEnabled() *wrapperspb.BoolValue { if x != nil { return x.Enabled } @@ -4636,19 +4655,19 @@ type RouteAction_MaxStreamDuration struct { // is used. If this field is set explicitly to zero, any // HttpConnectionManager max_stream_duration timeout will be disabled for // this route. - MaxStreamDuration *duration.Duration `protobuf:"bytes,1,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` + MaxStreamDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` // If present, and the request contains a `grpc-timeout header // `_, use that value as the // “max_stream_duration“, but limit the applied timeout to the maximum value specified here. // If set to 0, the “grpc-timeout“ header is used without modification. - GrpcTimeoutHeaderMax *duration.Duration `protobuf:"bytes,2,opt,name=grpc_timeout_header_max,json=grpcTimeoutHeaderMax,proto3" json:"grpc_timeout_header_max,omitempty"` + GrpcTimeoutHeaderMax *durationpb.Duration `protobuf:"bytes,2,opt,name=grpc_timeout_header_max,json=grpcTimeoutHeaderMax,proto3" json:"grpc_timeout_header_max,omitempty"` // If present, Envoy will adjust the timeout provided by the “grpc-timeout“ header by // subtracting the provided duration from the header. This is useful for allowing Envoy to set // its global timeout to be less than that of the deadline imposed by the calling client, which // makes it more likely that Envoy will handle the timeout instead of having the call canceled // by the client. If, after applying the offset, the resulting timeout is zero or negative, // the stream will timeout immediately. - GrpcTimeoutHeaderOffset *duration.Duration `protobuf:"bytes,3,opt,name=grpc_timeout_header_offset,json=grpcTimeoutHeaderOffset,proto3" json:"grpc_timeout_header_offset,omitempty"` + GrpcTimeoutHeaderOffset *durationpb.Duration `protobuf:"bytes,3,opt,name=grpc_timeout_header_offset,json=grpcTimeoutHeaderOffset,proto3" json:"grpc_timeout_header_offset,omitempty"` } func (x *RouteAction_MaxStreamDuration) Reset() { @@ -4683,21 +4702,21 @@ func (*RouteAction_MaxStreamDuration) Descriptor() ([]byte, []int) { return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 3} } -func (x *RouteAction_MaxStreamDuration) GetMaxStreamDuration() *duration.Duration { +func (x *RouteAction_MaxStreamDuration) GetMaxStreamDuration() *durationpb.Duration { if x != nil { return x.MaxStreamDuration } return nil } -func (x *RouteAction_MaxStreamDuration) GetGrpcTimeoutHeaderMax() *duration.Duration { +func (x *RouteAction_MaxStreamDuration) GetGrpcTimeoutHeaderMax() *durationpb.Duration { if x != nil { return x.GrpcTimeoutHeaderMax } return nil } -func (x *RouteAction_MaxStreamDuration) GetGrpcTimeoutHeaderOffset() *duration.Duration { +func (x *RouteAction_MaxStreamDuration) GetGrpcTimeoutHeaderOffset() *durationpb.Duration { if x != nil { return x.GrpcTimeoutHeaderOffset } @@ -4847,7 +4866,7 @@ type RouteAction_HashPolicy_Cookie struct { // If specified, a cookie with the TTL will be generated if the cookie is // not present. If the TTL is present and zero, the generated cookie will // be a session cookie. - Ttl *duration.Duration `protobuf:"bytes,2,opt,name=ttl,proto3" json:"ttl,omitempty"` + Ttl *durationpb.Duration `protobuf:"bytes,2,opt,name=ttl,proto3" json:"ttl,omitempty"` // The name of the path for the cookie. If no path is specified here, no path // will be set for the cookie. Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` @@ -4894,7 +4913,7 @@ func (x *RouteAction_HashPolicy_Cookie) GetName() string { return "" } -func (x *RouteAction_HashPolicy_Cookie) GetTtl() *duration.Duration { +func (x *RouteAction_HashPolicy_Cookie) GetTtl() *durationpb.Duration { if x != nil { return x.Ttl } @@ -5183,7 +5202,7 @@ func (m *RetryPolicy_RetryPriority) GetConfigType() isRetryPolicy_RetryPriority_ return nil } -func (x *RetryPolicy_RetryPriority) GetTypedConfig() *any1.Any { +func (x *RetryPolicy_RetryPriority) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*RetryPolicy_RetryPriority_TypedConfig); ok { return x.TypedConfig } @@ -5195,7 +5214,7 @@ type isRetryPolicy_RetryPriority_ConfigType interface { } type RetryPolicy_RetryPriority_TypedConfig struct { - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*RetryPolicy_RetryPriority_TypedConfig) isRetryPolicy_RetryPriority_ConfigType() {} @@ -5260,7 +5279,7 @@ func (m *RetryPolicy_RetryHostPredicate) GetConfigType() isRetryPolicy_RetryHost return nil } -func (x *RetryPolicy_RetryHostPredicate) GetTypedConfig() *any1.Any { +func (x *RetryPolicy_RetryHostPredicate) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { return x.TypedConfig } @@ -5272,7 +5291,7 @@ type isRetryPolicy_RetryHostPredicate_ConfigType interface { } type RetryPolicy_RetryHostPredicate_TypedConfig struct { - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*RetryPolicy_RetryHostPredicate_TypedConfig) isRetryPolicy_RetryHostPredicate_ConfigType() {} @@ -5286,12 +5305,12 @@ type RetryPolicy_RetryBackOff struct { // than zero. Values less than 1 ms are rounded up to 1 ms. // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's // back-off algorithm. - BaseInterval *duration.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` + BaseInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` // Specifies the maximum interval between retries. This parameter is optional, but must be // greater than or equal to the “base_interval“ if set. The default is 10 times the // “base_interval“. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion // of Envoy's back-off algorithm. - MaxInterval *duration.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` } func (x *RetryPolicy_RetryBackOff) Reset() { @@ -5326,14 +5345,14 @@ func (*RetryPolicy_RetryBackOff) Descriptor() ([]byte, []int) { return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 2} } -func (x *RetryPolicy_RetryBackOff) GetBaseInterval() *duration.Duration { +func (x *RetryPolicy_RetryBackOff) GetBaseInterval() *durationpb.Duration { if x != nil { return x.BaseInterval } return nil } -func (x *RetryPolicy_RetryBackOff) GetMaxInterval() *duration.Duration { +func (x *RetryPolicy_RetryBackOff) GetMaxInterval() *durationpb.Duration { if x != nil { return x.MaxInterval } @@ -5457,7 +5476,7 @@ type RetryPolicy_RateLimitedRetryBackOff struct { // Specifies the maximum back off interval that Envoy will allow. If a reset // header contains an interval longer than this then it will be discarded and // the next header will be tried. Defaults to 300 seconds. - MaxInterval *duration.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` } func (x *RetryPolicy_RateLimitedRetryBackOff) Reset() { @@ -5499,7 +5518,7 @@ func (x *RetryPolicy_RateLimitedRetryBackOff) GetResetHeaders() []*RetryPolicy_R return nil } -func (x *RetryPolicy_RateLimitedRetryBackOff) GetMaxInterval() *duration.Duration { +func (x *RetryPolicy_RateLimitedRetryBackOff) GetMaxInterval() *durationpb.Duration { if x != nil { return x.MaxInterval } @@ -6042,13 +6061,13 @@ type RateLimit_Action_MaskedRemoteAddress struct { // For example, trusted address from x-forwarded-for is “192.168.1.1“, // the descriptor entry is ("masked_remote_address", "192.168.1.1/32"); // if mask len is 24, the descriptor entry is ("masked_remote_address", "192.168.1.0/24"). - V4PrefixMaskLen *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=v4_prefix_mask_len,json=v4PrefixMaskLen,proto3" json:"v4_prefix_mask_len,omitempty"` + V4PrefixMaskLen *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=v4_prefix_mask_len,json=v4PrefixMaskLen,proto3" json:"v4_prefix_mask_len,omitempty"` // Length of prefix mask len for IPv6 (e.g. 0, 128). // Defaults to 128 when unset. // For example, trusted address from x-forwarded-for is “2001:abcd:ef01:2345:6789:abcd:ef01:234“, // the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345:6789:abcd:ef01:234/128"); // if mask len is 64, the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345::/64"). - V6PrefixMaskLen *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=v6_prefix_mask_len,json=v6PrefixMaskLen,proto3" json:"v6_prefix_mask_len,omitempty"` + V6PrefixMaskLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=v6_prefix_mask_len,json=v6PrefixMaskLen,proto3" json:"v6_prefix_mask_len,omitempty"` } func (x *RateLimit_Action_MaskedRemoteAddress) Reset() { @@ -6083,14 +6102,14 @@ func (*RateLimit_Action_MaskedRemoteAddress) Descriptor() ([]byte, []int) { return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 4} } -func (x *RateLimit_Action_MaskedRemoteAddress) GetV4PrefixMaskLen() *wrappers.UInt32Value { +func (x *RateLimit_Action_MaskedRemoteAddress) GetV4PrefixMaskLen() *wrapperspb.UInt32Value { if x != nil { return x.V4PrefixMaskLen } return nil } -func (x *RateLimit_Action_MaskedRemoteAddress) GetV6PrefixMaskLen() *wrappers.UInt32Value { +func (x *RateLimit_Action_MaskedRemoteAddress) GetV6PrefixMaskLen() *wrapperspb.UInt32Value { if x != nil { return x.V6PrefixMaskLen } @@ -6178,7 +6197,7 @@ type RateLimit_Action_HeaderValueMatch struct { // request matches the headers. If set to false, the action will append a // descriptor entry when the request does not match the headers. The // default value is true. - ExpectMatch *wrappers.BoolValue `protobuf:"bytes,2,opt,name=expect_match,json=expectMatch,proto3" json:"expect_match,omitempty"` + ExpectMatch *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=expect_match,json=expectMatch,proto3" json:"expect_match,omitempty"` // Specifies a set of headers that the rate limit action should match // on. The action will check the request’s headers against all the // specified headers in the config. A match will happen if all the @@ -6233,7 +6252,7 @@ func (x *RateLimit_Action_HeaderValueMatch) GetDescriptorValue() string { return "" } -func (x *RateLimit_Action_HeaderValueMatch) GetExpectMatch() *wrappers.BoolValue { +func (x *RateLimit_Action_HeaderValueMatch) GetExpectMatch() *wrapperspb.BoolValue { if x != nil { return x.ExpectMatch } @@ -6439,7 +6458,7 @@ type RateLimit_Action_QueryParameterValueMatch struct { // request matches the headers. If set to false, the action will append a // descriptor entry when the request does not match the headers. The // default value is true. - ExpectMatch *wrappers.BoolValue `protobuf:"bytes,2,opt,name=expect_match,json=expectMatch,proto3" json:"expect_match,omitempty"` + ExpectMatch *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=expect_match,json=expectMatch,proto3" json:"expect_match,omitempty"` // Specifies a set of query parameters that the rate limit action should match // on. The action will check the request’s query parameters against all the // specified query parameters in the config. A match will happen if all the @@ -6494,7 +6513,7 @@ func (x *RateLimit_Action_QueryParameterValueMatch) GetDescriptorValue() string return "" } -func (x *RateLimit_Action_QueryParameterValueMatch) GetExpectMatch() *wrappers.BoolValue { +func (x *RateLimit_Action_QueryParameterValueMatch) GetExpectMatch() *wrapperspb.BoolValue { if x != nil { return x.ExpectMatch } @@ -7013,7 +7032,7 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x15, 0x0a, 0x0e, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, - 0x08, 0x03, 0x10, 0x04, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0xc5, 0x05, 0x0a, 0x0a, + 0x08, 0x03, 0x10, 0x04, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0xa8, 0x06, 0x0a, 0x0a, 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5f, 0x0a, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, @@ -7050,958 +7069,969 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, - 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x42, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x65, - 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x08, - 0x10, 0x09, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x72, - 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x22, 0xdc, 0x2c, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, - 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x48, - 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x12, 0x55, 0x0a, 0x11, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x18, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x12, 0x76, 0x0a, 0x1f, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x27, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x48, 0x00, 0x52, 0x1c, 0x69, - 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x8e, 0x01, 0x0a, 0x1f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, - 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, - 0x14, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, - 0x1b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x45, 0x0a, 0x0e, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x12, 0x32, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, - 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, 0x78, - 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x52, 0x0c, - 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x5a, 0x0a, 0x13, - 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3f, 0x0a, 0x14, 0x68, 0x6f, 0x73, 0x74, - 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, - 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x48, 0x0a, 0x11, 0x61, 0x75, 0x74, - 0x6f, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x61, 0x0a, 0x1f, 0x66, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x48, 0x01, 0x52, 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x13, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x48, 0x01, 0x52, - 0x11, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x12, 0x67, 0x0a, 0x17, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x23, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, - 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, - 0x75, 0x74, 0x65, 0x48, 0x01, 0x52, 0x14, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x35, 0x0a, 0x17, 0x61, - 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, - 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, - 0x70, 0x65, 0x6e, 0x64, 0x58, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x48, 0x6f, - 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x28, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x65, 0x61, - 0x72, 0x6c, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x45, 0x0a, - 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x72, - 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x5f, 0x76, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x68, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x73, 0x12, 0x4e, 0x0a, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, - 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x68, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, - 0x01, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x67, - 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, - 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x47, 0x72, - 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x67, 0x72, 0x70, - 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x11, - 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x12, 0x59, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x73, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x67, 0x0a, 0x18, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, + 0x52, 0x1c, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x3a, 0x24, + 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, + 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xa6, 0x2d, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x0e, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, + 0x01, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x11, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, - 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x16, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x80, 0x01, 0x0a, 0x18, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x18, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x16, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x76, 0x0a, 0x1f, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x27, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x48, 0x00, + 0x52, 0x1c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x8e, + 0x01, 0x0a, 0x1f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, - 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, - 0x30, 0x18, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0c, 0x68, 0x65, 0x64, - 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x64, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x81, 0x03, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, - 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x42, 0x08, - 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x72, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x0d, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x3a, 0x39, 0x9a, 0xc5, - 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, - 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x0b, 0x72, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x1a, 0xd6, 0x0b, 0x0a, 0x0a, 0x48, - 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4e, 0x0a, 0x06, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, - 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, - 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x06, 0x63, 0x6f, 0x6f, - 0x6b, 0x69, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x1b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, + 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x45, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x32, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0d, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, + 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, + 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x5a, 0x0a, 0x13, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3f, 0x0a, 0x14, 0x68, + 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, 0x65, + 0x72, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, + 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x48, 0x0a, 0x11, + 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x01, 0x52, 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x48, 0x6f, 0x73, 0x74, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x13, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x1d, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, + 0x48, 0x01, 0x52, 0x11, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x17, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, + 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, + 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, 0x01, 0x52, 0x14, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x35, + 0x0a, 0x17, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x58, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, + 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, + 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x65, 0x61, 0x72, 0x6c, + 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x28, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0f, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, - 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x48, - 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x79, 0x0a, 0x15, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, - 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x48, 0x00, 0x52, 0x14, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x12, 0x67, 0x0a, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, + 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x76, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x68, 0x52, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x4e, 0x0a, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x68, 0x61, 0x73, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x11, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x72, + 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x18, 0x01, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x6d, 0x61, + 0x78, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x17, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0e, 0x6d, 0x61, + 0x78, 0x47, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x13, + 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x11, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x12, 0x59, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x5e, 0x0a, - 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, - 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x1a, 0xc6, 0x01, 0x0a, 0x06, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, - 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, - 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, - 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, - 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, + 0x67, 0x0a, 0x18, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x22, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x80, 0x01, 0x0a, 0x18, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x16, 0x6d, + 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0c, + 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x64, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcb, 0x03, 0x0a, 0x13, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, + 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, + 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, + 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x12, + 0x48, 0x0a, 0x21, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x64, 0x6f, + 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x61, 0x70, + 0x70, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x53, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x75, 0x66, + 0x66, 0x69, 0x78, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x1a, 0x5f, 0x0a, 0x0f, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x25, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, - 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0f, 0xfa, 0x42, 0x0c, - 0x72, 0x0a, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x1a, 0xfe, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1b, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x74, - 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x5d, 0x0a, 0x0a, - 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x1a, 0xd6, 0x0b, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x48, 0x00, 0x52, 0x06, + 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x79, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x48, 0x00, 0x52, 0x14, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x67, 0x0a, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x0c, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x3a, 0x37, 0x9a, 0xc5, 0x88, - 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, - 0x6f, 0x6b, 0x69, 0x65, 0x1a, 0x7a, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x3a, 0x45, 0x9a, 0xc5, 0x88, 0x1e, 0x40, - 0x0a, 0x3e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, - 0x1a, 0x6e, 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, - 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, - 0x1a, 0x66, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x19, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, - 0x37, 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, - 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x1a, 0xc6, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, + 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, + 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, + 0x5f, 0x0a, 0x0f, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x12, 0x25, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, + 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, + 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0xfe, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x5d, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, + 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, + 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x17, 0x0a, 0x10, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, - 0xf8, 0x42, 0x01, 0x1a, 0xa3, 0x03, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, - 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, - 0x61, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x65, 0x0a, - 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x8d, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, - 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, - 0x6f, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x50, 0x6f, 0x73, 0x74, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, - 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x88, 0x02, 0x0a, 0x11, 0x4d, 0x61, - 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x49, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x17, 0x67, 0x72, - 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x78, 0x12, 0x56, 0x0a, 0x1a, - 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x67, 0x72, 0x70, - 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x22, 0x60, 0x0a, 0x1b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, - 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, - 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, - 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x49, - 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x22, 0x5e, 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x41, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x52, 0x4f, 0x55, 0x47, 0x48, - 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, - 0x43, 0x54, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x5f, 0x49, - 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, - 0x10, 0x01, 0x1a, 0x02, 0x18, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, + 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x1a, 0x7a, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x3a, 0x45, 0x9a, 0xc5, 0x88, 0x1e, 0x40, 0x0a, 0x3e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x18, 0x0a, - 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, - 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x4a, 0x04, 0x08, - 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, 0x04, 0x08, 0x16, 0x10, 0x17, 0x4a, - 0x04, 0x08, 0x15, 0x10, 0x16, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x15, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x22, 0xbf, 0x10, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4f, 0x6e, 0x12, 0x52, 0x0a, - 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x42, 0x13, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, - 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, - 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x54, 0x72, 0x79, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4a, 0x0a, 0x14, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, - 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x70, - 0x65, 0x72, 0x54, 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x12, 0x57, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x14, 0x72, 0x65, 0x74, - 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x12, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x12, 0x64, 0x0a, 0x18, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x21, 0x68, 0x6f, 0x73, 0x74, - 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x1d, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, - 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, - 0x28, 0x0d, 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, - 0x66, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, - 0x78, 0x0a, 0x1b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x5f, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, - 0x52, 0x17, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x51, 0x0a, 0x11, 0x72, 0x65, 0x74, - 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x09, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x10, 0x72, 0x65, 0x74, 0x72, - 0x69, 0x61, 0x62, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x19, - 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x17, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0xb9, - 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, - 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, - 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, - 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, - 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x0d, 0x0a, - 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, - 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc3, 0x01, 0x0a, 0x12, 0x52, - 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, - 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, - 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, - 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x1a, 0xd6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, - 0x66, 0x12, 0x4a, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x6e, 0x0a, + 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x3f, 0x9a, 0xc5, + 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x1a, 0x66, 0x0a, + 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, + 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x17, 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x1a, 0xa3, 0x03, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x30, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, + 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x65, 0x0a, 0x0e, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0x8d, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x6f, 0x73, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x6f, 0x73, + 0x74, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x88, 0x02, 0x0a, 0x11, 0x4d, 0x61, 0x78, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x17, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, + 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, - 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x46, 0x0a, - 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, - 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, + 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x78, 0x12, 0x56, 0x0a, 0x1a, 0x67, 0x72, 0x70, + 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x22, 0x60, 0x0a, 0x1b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, + 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x41, 0x56, + 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, + 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54, 0x45, + 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x02, 0x22, 0x5e, 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, + 0x1e, 0x50, 0x41, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x52, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x49, 0x4e, + 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, + 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x01, 0x1a, + 0x02, 0x18, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x18, 0x0a, 0x11, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, + 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, + 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, 0x04, 0x08, 0x16, 0x10, 0x17, 0x4a, 0x04, 0x08, 0x15, + 0x10, 0x16, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, + 0xbf, 0x10, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x19, 0x0a, 0x08, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4f, 0x6e, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, + 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x41, + 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x54, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x4a, 0x0a, 0x14, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x70, 0x65, 0x72, 0x54, + 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, + 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, + 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, + 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x12, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x64, 0x0a, 0x18, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x21, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, + 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x1d, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, + 0x34, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x14, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, + 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x0c, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x78, 0x0a, 0x1b, + 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x17, 0x72, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, + 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x51, 0x0a, 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x10, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x19, 0x72, 0x65, 0x74, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x17, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0d, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x1a, 0x88, 0x01, 0x0a, 0x0b, 0x52, 0x65, - 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, - 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x06, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x1a, 0xc0, 0x01, 0x0a, 0x17, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, - 0x12, 0x5d, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x65, - 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, - 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, - 0x46, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc3, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, + 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd6, 0x01, + 0x0a, 0x0c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x4a, + 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x34, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x65, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x0b, 0x0a, 0x07, - 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x53, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x49, - 0x58, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x01, 0x3a, 0x25, 0x9a, - 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9c, 0x02, 0x0a, 0x0b, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x50, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x19, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, - 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x17, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, - 0x61, 0x6e, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x18, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x6f, 0x6e, - 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x68, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x50, - 0x65, 0x72, 0x54, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x3a, 0x25, 0x9a, 0xc5, - 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x22, 0xe1, 0x05, 0x0a, 0x0e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, - 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, - 0x52, 0x0d, 0x68, 0x74, 0x74, 0x70, 0x73, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, - 0x29, 0x0a, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x30, 0x0a, 0x0d, 0x68, 0x6f, - 0x73, 0x74, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0c, - 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0d, - 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x12, 0x32, 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, - 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, - 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, - 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x72, - 0x65, 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, - 0x74, 0x65, 0x48, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x12, 0x69, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x0c, 0x62, 0x61, + 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, + 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, + 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x1a, 0x88, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, + 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, - 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x69, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x77, - 0x0a, 0x14, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x5f, - 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x4c, 0x59, 0x10, 0x00, 0x12, 0x09, 0x0a, - 0x05, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x5f, - 0x4f, 0x54, 0x48, 0x45, 0x52, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x45, 0x4d, 0x50, 0x4f, - 0x52, 0x41, 0x52, 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, - 0x16, 0x0a, 0x12, 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x44, - 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x04, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x1a, 0x0a, 0x18, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x18, 0x0a, - 0x16, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x23, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, - 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, - 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x4e, - 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, - 0x12, 0x25, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, - 0x67, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, - 0x65, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, 0x65, 0x63, - 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x22, 0xd2, 0x02, 0x0a, 0x07, 0x54, 0x72, 0x61, 0x63, 0x69, - 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, - 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, - 0x0f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, + 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, + 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x1a, 0xc0, 0x01, 0x0a, 0x17, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x5d, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0c, + 0x72, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0c, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x22, 0x34, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x43, + 0x4f, 0x4e, 0x44, 0x53, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x49, 0x58, 0x5f, 0x54, + 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, + 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0x9c, 0x02, 0x0a, 0x0b, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x50, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, + 0x28, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x19, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, - 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x4b, 0x0a, 0x10, 0x6f, 0x76, 0x65, 0x72, - 0x61, 0x6c, 0x6c, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, - 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, 0x53, 0x61, 0x6d, - 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, - 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x52, 0x0a, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, - 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x22, 0xb4, 0x01, 0x0a, 0x0e, - 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3e, - 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x1b, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, - 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, - 0x04, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x22, 0xc9, 0x1c, 0x0a, 0x09, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, - 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x2a, 0x02, 0x18, 0x0a, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x4b, - 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, - 0x08, 0x01, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x0a, 0x05, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0xb5, 0x18, 0x0a, - 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x6d, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x63, + 0x65, 0x12, 0x36, 0x0a, 0x18, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x14, 0x68, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x50, 0x65, 0x72, 0x54, + 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, + 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0xe1, 0x05, 0x0a, 0x0e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, 0x72, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x29, 0x0a, 0x0f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x30, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0c, 0x68, 0x6f, 0x73, + 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0c, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x32, + 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, + 0x01, 0x02, 0x48, 0x01, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, + 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, + 0x01, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x69, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, + 0x72, 0x69, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x73, 0x74, 0x72, 0x69, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x77, 0x0a, 0x14, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x5f, 0x50, 0x45, 0x52, + 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x4c, 0x59, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x4f, + 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x5f, 0x4f, 0x54, 0x48, + 0x45, 0x52, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x52, + 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, + 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, + 0x43, 0x54, 0x10, 0x04, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x1a, + 0x0a, 0x18, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x70, 0x61, + 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, + 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x4e, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x91, 0x01, 0x0a, 0x09, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x25, 0x0a, + 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, 0x65, 0x3a, 0x23, + 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x22, 0xd2, 0x02, 0x0a, 0x07, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, + 0x49, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, + 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0f, 0x72, 0x61, + 0x6e, 0x64, 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x53, 0x61, 0x6d, + 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x4b, 0x0a, 0x10, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, + 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x54, 0x61, 0x67, 0x73, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x22, 0xb4, 0x01, 0x0a, 0x0e, 0x56, 0x69, 0x72, + 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x07, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, + 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x07, + 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, + 0xc9, 0x1c, 0x0a, 0x09, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x3b, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, + 0x02, 0x18, 0x0a, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x4b, 0x0a, 0x07, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0xb5, 0x18, 0x0a, 0x06, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x6d, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, - 0x65, 0x79, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, - 0x12, 0x68, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, + 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x10, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x77, 0x0a, 0x10, 0x64, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x42, 0x11, 0x92, - 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, - 0x48, 0x00, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, - 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x4a, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, - 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x48, 0x00, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x71, 0x0a, 0x15, 0x6d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x13, 0x6d, - 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x12, 0x81, 0x01, 0x0a, 0x1b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x49, 0x0a, 0x0d, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x1a, 0x53, 0x0a, 0x12, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0xd1, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, - 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, - 0x70, 0x5f, 0x69, 0x66, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x3a, - 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x49, 0x0a, 0x0d, 0x52, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x38, 0x9a, 0xc5, 0x88, - 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x1a, 0xbe, 0x01, 0x0a, 0x13, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, - 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, - 0x12, 0x76, 0x34, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x5f, - 0x6c, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x20, - 0x52, 0x0f, 0x76, 0x34, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x73, 0x6b, 0x4c, 0x65, - 0x6e, 0x12, 0x53, 0x0a, 0x12, 0x76, 0x36, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x0f, 0x76, 0x36, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, - 0x61, 0x73, 0x6b, 0x4c, 0x65, 0x6e, 0x1a, 0x9e, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, - 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x1a, 0xb3, 0x02, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x4b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x63, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x65, 0x63, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x48, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xb8, 0x01, - 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, - 0x61, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, - 0x79, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, - 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xda, 0x02, 0x0a, 0x08, 0x4d, 0x65, 0x74, - 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x59, 0x0a, 0x06, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, - 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, - 0x69, 0x66, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x22, 0x26, 0x0a, - 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x59, 0x4e, 0x41, 0x4d, - 0x49, 0x43, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x45, 0x4e, - 0x54, 0x52, 0x59, 0x10, 0x01, 0x1a, 0x97, 0x02, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, - 0x0c, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x61, 0x0a, 0x10, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0f, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x3a, - 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x17, 0x0a, 0x10, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, - 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xf2, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, - 0x65, 0x12, 0x66, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, - 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x63, 0x0a, 0x0f, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x0c, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, - 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x19, - 0x0a, 0x12, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, - 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xe6, - 0x05, 0x0a, 0x0d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, - 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0b, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, - 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x12, 0x5c, 0x0a, 0x10, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, - 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, - 0x00, 0x52, 0x0e, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x48, 0x00, 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, - 0x25, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, - 0x48, 0x00, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, - 0x37, 0x0a, 0x0c, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, - 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x75, 0x66, - 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3b, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, - 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x12, 0x40, 0x0a, 0x1d, 0x74, 0x72, 0x65, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x5f, 0x65, - 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x74, 0x72, 0x65, 0x61, - 0x74, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x73, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x18, - 0x0a, 0x16, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, - 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x0b, 0x72, 0x65, 0x67, 0x65, - 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xa1, 0x02, 0x0a, 0x15, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x12, 0x1e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x28, 0x80, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x53, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, - 0x0c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x2f, 0x9a, - 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x21, - 0x0a, 0x1f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0x86, 0x03, 0x0a, 0x16, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x52, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x48, + 0x00, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x68, 0x0a, + 0x12, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x10, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x77, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, + 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x4e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x4a, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x15, + 0x6d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x73, 0x6b, + 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x81, 0x01, 0x0a, 0x1b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x1a, 0x49, 0x0a, 0x0d, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x53, + 0x0a, 0x12, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x1a, 0xd1, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x69, + 0x66, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x73, 0x6b, 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x3a, 0x39, 0x9a, 0xc5, + 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x49, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, + 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x1a, 0xbe, 0x01, 0x0a, 0x13, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, 0x12, 0x76, 0x34, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x40, 0x0a, 0x17, 0x72, 0x65, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, - 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x92, 0x01, 0x02, 0x10, 0x05, 0x52, 0x15, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x0a, - 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x5f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, - 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, - 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x18, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, - 0x6f, 0x70, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, - 0x0c, 0x18, 0x01, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, - 0x43, 0x6f, 0x70, 0x79, 0x22, 0x79, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, - 0x8b, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, - 0x14, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, - 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x20, 0x52, 0x0f, 0x76, + 0x34, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x73, 0x6b, 0x4c, 0x65, 0x6e, 0x12, 0x53, + 0x0a, 0x12, 0x76, 0x36, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, + 0x80, 0x01, 0x52, 0x0f, 0x76, 0x36, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x73, 0x6b, + 0x4c, 0x65, 0x6e, 0x1a, 0x9e, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x3a, 0x35, 0x9a, + 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x1a, 0xb3, 0x02, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, + 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x48, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, + 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x3b, 0x9a, + 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xb8, 0x01, 0x0a, 0x0f, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2e, + 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, + 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x50, + 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xda, 0x02, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, + 0x65, 0x79, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x59, 0x0a, 0x06, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x69, 0x66, 0x5f, + 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x6b, + 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x22, 0x26, 0x0a, 0x06, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, + 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x01, 0x1a, 0x97, 0x02, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x61, 0x0a, 0x10, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, + 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x17, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x1a, 0xf2, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x66, + 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x63, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x19, 0x0a, 0x12, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xe6, 0x05, 0x0a, 0x0d, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x21, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x2e, 0x0a, 0x0b, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x5c, 0x0a, 0x10, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, + 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3c, + 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x0b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x0c, + 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3b, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x21, 0x0a, + 0x0c, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x40, 0x0a, 0x1d, 0x74, 0x72, 0x65, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, + 0x67, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x5f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x74, 0x72, 0x65, 0x61, 0x74, 0x4d, 0x69, + 0x73, 0x73, 0x69, 0x6e, 0x67, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x73, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, + 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x0b, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x22, 0xa1, 0x02, 0x0a, 0x15, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x1e, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, + 0x07, 0x72, 0x05, 0x10, 0x01, 0x28, 0x80, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x53, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, + 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, + 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x21, 0x0a, 0x1f, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, + 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0x86, 0x03, 0x0a, 0x16, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x52, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x40, 0x0a, 0x17, 0x72, 0x65, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x10, 0x05, 0x52, 0x15, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x0a, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, + 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x18, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x70, 0x79, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x18, 0x01, + 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x70, + 0x79, 0x22, 0x79, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x2c, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x8b, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, + 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -8085,17 +8115,17 @@ var file_envoy_config_route_v3_route_components_proto_goTypes = []interface{}{ (*RateLimit_Override_DynamicMetadata)(nil), // 63: envoy.config.route.v3.RateLimit.Override.DynamicMetadata (*v3.Matcher)(nil), // 64: xds.type.matcher.v3.Matcher (*v31.HeaderValueOption)(nil), // 65: envoy.config.core.v3.HeaderValueOption - (*any1.Any)(nil), // 66: google.protobuf.Any - (*wrappers.UInt32Value)(nil), // 67: google.protobuf.UInt32Value + (*anypb.Any)(nil), // 66: google.protobuf.Any + (*wrapperspb.UInt32Value)(nil), // 67: google.protobuf.UInt32Value (*v31.Metadata)(nil), // 68: envoy.config.core.v3.Metadata (*v31.TypedExtensionConfig)(nil), // 69: envoy.config.core.v3.TypedExtensionConfig (*v32.RegexMatcher)(nil), // 70: envoy.type.matcher.v3.RegexMatcher - (*wrappers.BoolValue)(nil), // 71: google.protobuf.BoolValue + (*wrapperspb.BoolValue)(nil), // 71: google.protobuf.BoolValue (*v31.RuntimeFractionalPercent)(nil), // 72: envoy.config.core.v3.RuntimeFractionalPercent (*v32.MetadataMatcher)(nil), // 73: envoy.type.matcher.v3.MetadataMatcher (*v32.StringMatcher)(nil), // 74: envoy.type.matcher.v3.StringMatcher (*v32.RegexMatchAndSubstitute)(nil), // 75: envoy.type.matcher.v3.RegexMatchAndSubstitute - (*duration.Duration)(nil), // 76: google.protobuf.Duration + (*durationpb.Duration)(nil), // 76: google.protobuf.Duration (v31.RoutingPriority)(0), // 77: envoy.config.core.v3.RoutingPriority (*v33.FractionalPercent)(nil), // 78: envoy.type.v3.FractionalPercent (*v31.DataSource)(nil), // 79: envoy.config.core.v3.DataSource @@ -8153,124 +8183,125 @@ var file_envoy_config_route_v3_route_components_proto_depIdxs = []int32{ 72, // 45: envoy.config.route.v3.CorsPolicy.filter_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent 72, // 46: envoy.config.route.v3.CorsPolicy.shadow_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent 71, // 47: envoy.config.route.v3.CorsPolicy.allow_private_network_access:type_name -> google.protobuf.BoolValue - 10, // 48: envoy.config.route.v3.RouteAction.weighted_clusters:type_name -> envoy.config.route.v3.WeightedCluster - 11, // 49: envoy.config.route.v3.RouteAction.inline_cluster_specifier_plugin:type_name -> envoy.config.route.v3.ClusterSpecifierPlugin - 1, // 50: envoy.config.route.v3.RouteAction.cluster_not_found_response_code:type_name -> envoy.config.route.v3.RouteAction.ClusterNotFoundResponseCode - 68, // 51: envoy.config.route.v3.RouteAction.metadata_match:type_name -> envoy.config.core.v3.Metadata - 75, // 52: envoy.config.route.v3.RouteAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute - 69, // 53: envoy.config.route.v3.RouteAction.path_rewrite_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig - 71, // 54: envoy.config.route.v3.RouteAction.auto_host_rewrite:type_name -> google.protobuf.BoolValue - 75, // 55: envoy.config.route.v3.RouteAction.host_rewrite_path_regex:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute - 76, // 56: envoy.config.route.v3.RouteAction.timeout:type_name -> google.protobuf.Duration - 76, // 57: envoy.config.route.v3.RouteAction.idle_timeout:type_name -> google.protobuf.Duration - 69, // 58: envoy.config.route.v3.RouteAction.early_data_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig - 15, // 59: envoy.config.route.v3.RouteAction.retry_policy:type_name -> envoy.config.route.v3.RetryPolicy - 66, // 60: envoy.config.route.v3.RouteAction.retry_policy_typed_config:type_name -> google.protobuf.Any - 35, // 61: envoy.config.route.v3.RouteAction.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy - 77, // 62: envoy.config.route.v3.RouteAction.priority:type_name -> envoy.config.core.v3.RoutingPriority - 23, // 63: envoy.config.route.v3.RouteAction.rate_limits:type_name -> envoy.config.route.v3.RateLimit - 71, // 64: envoy.config.route.v3.RouteAction.include_vh_rate_limits:type_name -> google.protobuf.BoolValue - 36, // 65: envoy.config.route.v3.RouteAction.hash_policy:type_name -> envoy.config.route.v3.RouteAction.HashPolicy - 13, // 66: envoy.config.route.v3.RouteAction.cors:type_name -> envoy.config.route.v3.CorsPolicy - 76, // 67: envoy.config.route.v3.RouteAction.max_grpc_timeout:type_name -> google.protobuf.Duration - 76, // 68: envoy.config.route.v3.RouteAction.grpc_timeout_offset:type_name -> google.protobuf.Duration - 37, // 69: envoy.config.route.v3.RouteAction.upgrade_configs:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig - 26, // 70: envoy.config.route.v3.RouteAction.internal_redirect_policy:type_name -> envoy.config.route.v3.InternalRedirectPolicy - 2, // 71: envoy.config.route.v3.RouteAction.internal_redirect_action:type_name -> envoy.config.route.v3.RouteAction.InternalRedirectAction - 67, // 72: envoy.config.route.v3.RouteAction.max_internal_redirects:type_name -> google.protobuf.UInt32Value - 16, // 73: envoy.config.route.v3.RouteAction.hedge_policy:type_name -> envoy.config.route.v3.HedgePolicy - 38, // 74: envoy.config.route.v3.RouteAction.max_stream_duration:type_name -> envoy.config.route.v3.RouteAction.MaxStreamDuration - 67, // 75: envoy.config.route.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value - 76, // 76: envoy.config.route.v3.RetryPolicy.per_try_timeout:type_name -> google.protobuf.Duration - 76, // 77: envoy.config.route.v3.RetryPolicy.per_try_idle_timeout:type_name -> google.protobuf.Duration - 46, // 78: envoy.config.route.v3.RetryPolicy.retry_priority:type_name -> envoy.config.route.v3.RetryPolicy.RetryPriority - 47, // 79: envoy.config.route.v3.RetryPolicy.retry_host_predicate:type_name -> envoy.config.route.v3.RetryPolicy.RetryHostPredicate - 69, // 80: envoy.config.route.v3.RetryPolicy.retry_options_predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig - 48, // 81: envoy.config.route.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RetryBackOff - 50, // 82: envoy.config.route.v3.RetryPolicy.rate_limited_retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff - 24, // 83: envoy.config.route.v3.RetryPolicy.retriable_headers:type_name -> envoy.config.route.v3.HeaderMatcher - 24, // 84: envoy.config.route.v3.RetryPolicy.retriable_request_headers:type_name -> envoy.config.route.v3.HeaderMatcher - 67, // 85: envoy.config.route.v3.HedgePolicy.initial_requests:type_name -> google.protobuf.UInt32Value - 78, // 86: envoy.config.route.v3.HedgePolicy.additional_request_chance:type_name -> envoy.type.v3.FractionalPercent - 75, // 87: envoy.config.route.v3.RedirectAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute - 4, // 88: envoy.config.route.v3.RedirectAction.response_code:type_name -> envoy.config.route.v3.RedirectAction.RedirectResponseCode - 79, // 89: envoy.config.route.v3.DirectResponseAction.body:type_name -> envoy.config.core.v3.DataSource - 71, // 90: envoy.config.route.v3.Decorator.propagate:type_name -> google.protobuf.BoolValue - 78, // 91: envoy.config.route.v3.Tracing.client_sampling:type_name -> envoy.type.v3.FractionalPercent - 78, // 92: envoy.config.route.v3.Tracing.random_sampling:type_name -> envoy.type.v3.FractionalPercent - 78, // 93: envoy.config.route.v3.Tracing.overall_sampling:type_name -> envoy.type.v3.FractionalPercent - 80, // 94: envoy.config.route.v3.Tracing.custom_tags:type_name -> envoy.type.tracing.v3.CustomTag - 24, // 95: envoy.config.route.v3.VirtualCluster.headers:type_name -> envoy.config.route.v3.HeaderMatcher - 67, // 96: envoy.config.route.v3.RateLimit.stage:type_name -> google.protobuf.UInt32Value - 51, // 97: envoy.config.route.v3.RateLimit.actions:type_name -> envoy.config.route.v3.RateLimit.Action - 52, // 98: envoy.config.route.v3.RateLimit.limit:type_name -> envoy.config.route.v3.RateLimit.Override - 70, // 99: envoy.config.route.v3.HeaderMatcher.safe_regex_match:type_name -> envoy.type.matcher.v3.RegexMatcher - 81, // 100: envoy.config.route.v3.HeaderMatcher.range_match:type_name -> envoy.type.v3.Int64Range - 74, // 101: envoy.config.route.v3.HeaderMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher - 74, // 102: envoy.config.route.v3.QueryParameterMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher - 67, // 103: envoy.config.route.v3.InternalRedirectPolicy.max_internal_redirects:type_name -> google.protobuf.UInt32Value - 69, // 104: envoy.config.route.v3.InternalRedirectPolicy.predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig - 66, // 105: envoy.config.route.v3.FilterConfig.config:type_name -> google.protobuf.Any - 66, // 106: envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any - 66, // 107: envoy.config.route.v3.Route.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any - 67, // 108: envoy.config.route.v3.WeightedCluster.ClusterWeight.weight:type_name -> google.protobuf.UInt32Value - 68, // 109: envoy.config.route.v3.WeightedCluster.ClusterWeight.metadata_match:type_name -> envoy.config.core.v3.Metadata - 65, // 110: envoy.config.route.v3.WeightedCluster.ClusterWeight.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 65, // 111: envoy.config.route.v3.WeightedCluster.ClusterWeight.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 31, // 112: envoy.config.route.v3.WeightedCluster.ClusterWeight.typed_per_filter_config:type_name -> envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry - 66, // 113: envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any - 71, // 114: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.presented:type_name -> google.protobuf.BoolValue - 71, // 115: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.validated:type_name -> google.protobuf.BoolValue - 72, // 116: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.runtime_fraction:type_name -> envoy.config.core.v3.RuntimeFractionalPercent - 71, // 117: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.trace_sampled:type_name -> google.protobuf.BoolValue - 39, // 118: envoy.config.route.v3.RouteAction.HashPolicy.header:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Header - 41, // 119: envoy.config.route.v3.RouteAction.HashPolicy.cookie:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Cookie - 42, // 120: envoy.config.route.v3.RouteAction.HashPolicy.connection_properties:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties - 43, // 121: envoy.config.route.v3.RouteAction.HashPolicy.query_parameter:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter - 44, // 122: envoy.config.route.v3.RouteAction.HashPolicy.filter_state:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.FilterState - 71, // 123: envoy.config.route.v3.RouteAction.UpgradeConfig.enabled:type_name -> google.protobuf.BoolValue - 45, // 124: envoy.config.route.v3.RouteAction.UpgradeConfig.connect_config:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig - 76, // 125: envoy.config.route.v3.RouteAction.MaxStreamDuration.max_stream_duration:type_name -> google.protobuf.Duration - 76, // 126: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_max:type_name -> google.protobuf.Duration - 76, // 127: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_offset:type_name -> google.protobuf.Duration - 75, // 128: envoy.config.route.v3.RouteAction.HashPolicy.Header.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute - 76, // 129: envoy.config.route.v3.RouteAction.HashPolicy.Cookie.ttl:type_name -> google.protobuf.Duration - 40, // 130: envoy.config.route.v3.RouteAction.HashPolicy.Cookie.attributes:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.CookieAttribute - 82, // 131: envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig.proxy_protocol_config:type_name -> envoy.config.core.v3.ProxyProtocolConfig - 66, // 132: envoy.config.route.v3.RetryPolicy.RetryPriority.typed_config:type_name -> google.protobuf.Any - 66, // 133: envoy.config.route.v3.RetryPolicy.RetryHostPredicate.typed_config:type_name -> google.protobuf.Any - 76, // 134: envoy.config.route.v3.RetryPolicy.RetryBackOff.base_interval:type_name -> google.protobuf.Duration - 76, // 135: envoy.config.route.v3.RetryPolicy.RetryBackOff.max_interval:type_name -> google.protobuf.Duration - 3, // 136: envoy.config.route.v3.RetryPolicy.ResetHeader.format:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeaderFormat - 49, // 137: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.reset_headers:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeader - 76, // 138: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.max_interval:type_name -> google.protobuf.Duration - 53, // 139: envoy.config.route.v3.RateLimit.Action.source_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.SourceCluster - 54, // 140: envoy.config.route.v3.RateLimit.Action.destination_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.DestinationCluster - 55, // 141: envoy.config.route.v3.RateLimit.Action.request_headers:type_name -> envoy.config.route.v3.RateLimit.Action.RequestHeaders - 56, // 142: envoy.config.route.v3.RateLimit.Action.remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.RemoteAddress - 58, // 143: envoy.config.route.v3.RateLimit.Action.generic_key:type_name -> envoy.config.route.v3.RateLimit.Action.GenericKey - 59, // 144: envoy.config.route.v3.RateLimit.Action.header_value_match:type_name -> envoy.config.route.v3.RateLimit.Action.HeaderValueMatch - 60, // 145: envoy.config.route.v3.RateLimit.Action.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Action.DynamicMetaData - 61, // 146: envoy.config.route.v3.RateLimit.Action.metadata:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData - 69, // 147: envoy.config.route.v3.RateLimit.Action.extension:type_name -> envoy.config.core.v3.TypedExtensionConfig - 57, // 148: envoy.config.route.v3.RateLimit.Action.masked_remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress - 62, // 149: envoy.config.route.v3.RateLimit.Action.query_parameter_value_match:type_name -> envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch - 63, // 150: envoy.config.route.v3.RateLimit.Override.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Override.DynamicMetadata - 67, // 151: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v4_prefix_mask_len:type_name -> google.protobuf.UInt32Value - 67, // 152: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v6_prefix_mask_len:type_name -> google.protobuf.UInt32Value - 71, // 153: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.expect_match:type_name -> google.protobuf.BoolValue - 24, // 154: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher - 83, // 155: envoy.config.route.v3.RateLimit.Action.DynamicMetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey - 83, // 156: envoy.config.route.v3.RateLimit.Action.MetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey - 5, // 157: envoy.config.route.v3.RateLimit.Action.MetaData.source:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData.Source - 71, // 158: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch.expect_match:type_name -> google.protobuf.BoolValue - 25, // 159: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch.query_parameters:type_name -> envoy.config.route.v3.QueryParameterMatcher - 83, // 160: envoy.config.route.v3.RateLimit.Override.DynamicMetadata.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey - 161, // [161:161] is the sub-list for method output_type - 161, // [161:161] is the sub-list for method input_type - 161, // [161:161] is the sub-list for extension type_name - 161, // [161:161] is the sub-list for extension extendee - 0, // [0:161] is the sub-list for field type_name + 71, // 48: envoy.config.route.v3.CorsPolicy.forward_not_matching_preflights:type_name -> google.protobuf.BoolValue + 10, // 49: envoy.config.route.v3.RouteAction.weighted_clusters:type_name -> envoy.config.route.v3.WeightedCluster + 11, // 50: envoy.config.route.v3.RouteAction.inline_cluster_specifier_plugin:type_name -> envoy.config.route.v3.ClusterSpecifierPlugin + 1, // 51: envoy.config.route.v3.RouteAction.cluster_not_found_response_code:type_name -> envoy.config.route.v3.RouteAction.ClusterNotFoundResponseCode + 68, // 52: envoy.config.route.v3.RouteAction.metadata_match:type_name -> envoy.config.core.v3.Metadata + 75, // 53: envoy.config.route.v3.RouteAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 69, // 54: envoy.config.route.v3.RouteAction.path_rewrite_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig + 71, // 55: envoy.config.route.v3.RouteAction.auto_host_rewrite:type_name -> google.protobuf.BoolValue + 75, // 56: envoy.config.route.v3.RouteAction.host_rewrite_path_regex:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 76, // 57: envoy.config.route.v3.RouteAction.timeout:type_name -> google.protobuf.Duration + 76, // 58: envoy.config.route.v3.RouteAction.idle_timeout:type_name -> google.protobuf.Duration + 69, // 59: envoy.config.route.v3.RouteAction.early_data_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig + 15, // 60: envoy.config.route.v3.RouteAction.retry_policy:type_name -> envoy.config.route.v3.RetryPolicy + 66, // 61: envoy.config.route.v3.RouteAction.retry_policy_typed_config:type_name -> google.protobuf.Any + 35, // 62: envoy.config.route.v3.RouteAction.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy + 77, // 63: envoy.config.route.v3.RouteAction.priority:type_name -> envoy.config.core.v3.RoutingPriority + 23, // 64: envoy.config.route.v3.RouteAction.rate_limits:type_name -> envoy.config.route.v3.RateLimit + 71, // 65: envoy.config.route.v3.RouteAction.include_vh_rate_limits:type_name -> google.protobuf.BoolValue + 36, // 66: envoy.config.route.v3.RouteAction.hash_policy:type_name -> envoy.config.route.v3.RouteAction.HashPolicy + 13, // 67: envoy.config.route.v3.RouteAction.cors:type_name -> envoy.config.route.v3.CorsPolicy + 76, // 68: envoy.config.route.v3.RouteAction.max_grpc_timeout:type_name -> google.protobuf.Duration + 76, // 69: envoy.config.route.v3.RouteAction.grpc_timeout_offset:type_name -> google.protobuf.Duration + 37, // 70: envoy.config.route.v3.RouteAction.upgrade_configs:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig + 26, // 71: envoy.config.route.v3.RouteAction.internal_redirect_policy:type_name -> envoy.config.route.v3.InternalRedirectPolicy + 2, // 72: envoy.config.route.v3.RouteAction.internal_redirect_action:type_name -> envoy.config.route.v3.RouteAction.InternalRedirectAction + 67, // 73: envoy.config.route.v3.RouteAction.max_internal_redirects:type_name -> google.protobuf.UInt32Value + 16, // 74: envoy.config.route.v3.RouteAction.hedge_policy:type_name -> envoy.config.route.v3.HedgePolicy + 38, // 75: envoy.config.route.v3.RouteAction.max_stream_duration:type_name -> envoy.config.route.v3.RouteAction.MaxStreamDuration + 67, // 76: envoy.config.route.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value + 76, // 77: envoy.config.route.v3.RetryPolicy.per_try_timeout:type_name -> google.protobuf.Duration + 76, // 78: envoy.config.route.v3.RetryPolicy.per_try_idle_timeout:type_name -> google.protobuf.Duration + 46, // 79: envoy.config.route.v3.RetryPolicy.retry_priority:type_name -> envoy.config.route.v3.RetryPolicy.RetryPriority + 47, // 80: envoy.config.route.v3.RetryPolicy.retry_host_predicate:type_name -> envoy.config.route.v3.RetryPolicy.RetryHostPredicate + 69, // 81: envoy.config.route.v3.RetryPolicy.retry_options_predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig + 48, // 82: envoy.config.route.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RetryBackOff + 50, // 83: envoy.config.route.v3.RetryPolicy.rate_limited_retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff + 24, // 84: envoy.config.route.v3.RetryPolicy.retriable_headers:type_name -> envoy.config.route.v3.HeaderMatcher + 24, // 85: envoy.config.route.v3.RetryPolicy.retriable_request_headers:type_name -> envoy.config.route.v3.HeaderMatcher + 67, // 86: envoy.config.route.v3.HedgePolicy.initial_requests:type_name -> google.protobuf.UInt32Value + 78, // 87: envoy.config.route.v3.HedgePolicy.additional_request_chance:type_name -> envoy.type.v3.FractionalPercent + 75, // 88: envoy.config.route.v3.RedirectAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 4, // 89: envoy.config.route.v3.RedirectAction.response_code:type_name -> envoy.config.route.v3.RedirectAction.RedirectResponseCode + 79, // 90: envoy.config.route.v3.DirectResponseAction.body:type_name -> envoy.config.core.v3.DataSource + 71, // 91: envoy.config.route.v3.Decorator.propagate:type_name -> google.protobuf.BoolValue + 78, // 92: envoy.config.route.v3.Tracing.client_sampling:type_name -> envoy.type.v3.FractionalPercent + 78, // 93: envoy.config.route.v3.Tracing.random_sampling:type_name -> envoy.type.v3.FractionalPercent + 78, // 94: envoy.config.route.v3.Tracing.overall_sampling:type_name -> envoy.type.v3.FractionalPercent + 80, // 95: envoy.config.route.v3.Tracing.custom_tags:type_name -> envoy.type.tracing.v3.CustomTag + 24, // 96: envoy.config.route.v3.VirtualCluster.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 67, // 97: envoy.config.route.v3.RateLimit.stage:type_name -> google.protobuf.UInt32Value + 51, // 98: envoy.config.route.v3.RateLimit.actions:type_name -> envoy.config.route.v3.RateLimit.Action + 52, // 99: envoy.config.route.v3.RateLimit.limit:type_name -> envoy.config.route.v3.RateLimit.Override + 70, // 100: envoy.config.route.v3.HeaderMatcher.safe_regex_match:type_name -> envoy.type.matcher.v3.RegexMatcher + 81, // 101: envoy.config.route.v3.HeaderMatcher.range_match:type_name -> envoy.type.v3.Int64Range + 74, // 102: envoy.config.route.v3.HeaderMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 74, // 103: envoy.config.route.v3.QueryParameterMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 67, // 104: envoy.config.route.v3.InternalRedirectPolicy.max_internal_redirects:type_name -> google.protobuf.UInt32Value + 69, // 105: envoy.config.route.v3.InternalRedirectPolicy.predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig + 66, // 106: envoy.config.route.v3.FilterConfig.config:type_name -> google.protobuf.Any + 66, // 107: envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 66, // 108: envoy.config.route.v3.Route.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 67, // 109: envoy.config.route.v3.WeightedCluster.ClusterWeight.weight:type_name -> google.protobuf.UInt32Value + 68, // 110: envoy.config.route.v3.WeightedCluster.ClusterWeight.metadata_match:type_name -> envoy.config.core.v3.Metadata + 65, // 111: envoy.config.route.v3.WeightedCluster.ClusterWeight.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 65, // 112: envoy.config.route.v3.WeightedCluster.ClusterWeight.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 31, // 113: envoy.config.route.v3.WeightedCluster.ClusterWeight.typed_per_filter_config:type_name -> envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry + 66, // 114: envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 71, // 115: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.presented:type_name -> google.protobuf.BoolValue + 71, // 116: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.validated:type_name -> google.protobuf.BoolValue + 72, // 117: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.runtime_fraction:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 71, // 118: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.trace_sampled:type_name -> google.protobuf.BoolValue + 39, // 119: envoy.config.route.v3.RouteAction.HashPolicy.header:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Header + 41, // 120: envoy.config.route.v3.RouteAction.HashPolicy.cookie:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Cookie + 42, // 121: envoy.config.route.v3.RouteAction.HashPolicy.connection_properties:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties + 43, // 122: envoy.config.route.v3.RouteAction.HashPolicy.query_parameter:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter + 44, // 123: envoy.config.route.v3.RouteAction.HashPolicy.filter_state:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.FilterState + 71, // 124: envoy.config.route.v3.RouteAction.UpgradeConfig.enabled:type_name -> google.protobuf.BoolValue + 45, // 125: envoy.config.route.v3.RouteAction.UpgradeConfig.connect_config:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig + 76, // 126: envoy.config.route.v3.RouteAction.MaxStreamDuration.max_stream_duration:type_name -> google.protobuf.Duration + 76, // 127: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_max:type_name -> google.protobuf.Duration + 76, // 128: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_offset:type_name -> google.protobuf.Duration + 75, // 129: envoy.config.route.v3.RouteAction.HashPolicy.Header.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 76, // 130: envoy.config.route.v3.RouteAction.HashPolicy.Cookie.ttl:type_name -> google.protobuf.Duration + 40, // 131: envoy.config.route.v3.RouteAction.HashPolicy.Cookie.attributes:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.CookieAttribute + 82, // 132: envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig.proxy_protocol_config:type_name -> envoy.config.core.v3.ProxyProtocolConfig + 66, // 133: envoy.config.route.v3.RetryPolicy.RetryPriority.typed_config:type_name -> google.protobuf.Any + 66, // 134: envoy.config.route.v3.RetryPolicy.RetryHostPredicate.typed_config:type_name -> google.protobuf.Any + 76, // 135: envoy.config.route.v3.RetryPolicy.RetryBackOff.base_interval:type_name -> google.protobuf.Duration + 76, // 136: envoy.config.route.v3.RetryPolicy.RetryBackOff.max_interval:type_name -> google.protobuf.Duration + 3, // 137: envoy.config.route.v3.RetryPolicy.ResetHeader.format:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeaderFormat + 49, // 138: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.reset_headers:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeader + 76, // 139: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.max_interval:type_name -> google.protobuf.Duration + 53, // 140: envoy.config.route.v3.RateLimit.Action.source_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.SourceCluster + 54, // 141: envoy.config.route.v3.RateLimit.Action.destination_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.DestinationCluster + 55, // 142: envoy.config.route.v3.RateLimit.Action.request_headers:type_name -> envoy.config.route.v3.RateLimit.Action.RequestHeaders + 56, // 143: envoy.config.route.v3.RateLimit.Action.remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.RemoteAddress + 58, // 144: envoy.config.route.v3.RateLimit.Action.generic_key:type_name -> envoy.config.route.v3.RateLimit.Action.GenericKey + 59, // 145: envoy.config.route.v3.RateLimit.Action.header_value_match:type_name -> envoy.config.route.v3.RateLimit.Action.HeaderValueMatch + 60, // 146: envoy.config.route.v3.RateLimit.Action.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Action.DynamicMetaData + 61, // 147: envoy.config.route.v3.RateLimit.Action.metadata:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData + 69, // 148: envoy.config.route.v3.RateLimit.Action.extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 57, // 149: envoy.config.route.v3.RateLimit.Action.masked_remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress + 62, // 150: envoy.config.route.v3.RateLimit.Action.query_parameter_value_match:type_name -> envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch + 63, // 151: envoy.config.route.v3.RateLimit.Override.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Override.DynamicMetadata + 67, // 152: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v4_prefix_mask_len:type_name -> google.protobuf.UInt32Value + 67, // 153: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v6_prefix_mask_len:type_name -> google.protobuf.UInt32Value + 71, // 154: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.expect_match:type_name -> google.protobuf.BoolValue + 24, // 155: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 83, // 156: envoy.config.route.v3.RateLimit.Action.DynamicMetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 83, // 157: envoy.config.route.v3.RateLimit.Action.MetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 5, // 158: envoy.config.route.v3.RateLimit.Action.MetaData.source:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData.Source + 71, // 159: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch.expect_match:type_name -> google.protobuf.BoolValue + 25, // 160: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch.query_parameters:type_name -> envoy.config.route.v3.QueryParameterMatcher + 83, // 161: envoy.config.route.v3.RateLimit.Override.DynamicMetadata.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 162, // [162:162] is the sub-list for method output_type + 162, // [162:162] is the sub-list for method input_type + 162, // [162:162] is the sub-list for extension type_name + 162, // [162:162] is the sub-list for extension extendee + 0, // [0:162] is the sub-list for field type_name } func init() { file_envoy_config_route_v3_route_components_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go index 8e17d094d01..ffa6daf7b8c 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/route/v3/route_components.proto @@ -2690,6 +2691,35 @@ func (m *CorsPolicy) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetForwardNotMatchingPreflights()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "ForwardNotMatchingPreflights", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "ForwardNotMatchingPreflights", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetForwardNotMatchingPreflights()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "ForwardNotMatchingPreflights", + reason: "embedded message failed validation", + cause: err, + } + } + } + switch v := m.EnabledSpecifier.(type) { case *CorsPolicy_FilterEnabled: if v == nil { @@ -7334,6 +7364,8 @@ func (m *RouteAction_RequestMirrorPolicy) validate(all bool) error { } } + // no validation rules for DisableShadowHostSuffixAppend + if len(errors) > 0 { return RouteAction_RequestMirrorPolicyMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components_vtproto.pb.go new file mode 100644 index 00000000000..79709bb9720 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components_vtproto.pb.go @@ -0,0 +1,8302 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/route/v3/route_components.proto + +package routev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *VirtualHost) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VirtualHost) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *VirtualHost) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.IncludeIsTimeoutRetryHeader { + i-- + if m.IncludeIsTimeoutRetryHeader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if len(m.RequestMirrorPolicies) > 0 { + for iNdEx := len(m.RequestMirrorPolicies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestMirrorPolicies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.RetryPolicyTypedConfig != nil { + size, err := (*anypb.Any)(m.RetryPolicyTypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.IncludeAttemptCountInResponse { + i-- + if m.IncludeAttemptCountInResponse { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.PerRequestBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.HedgePolicy != nil { + size, err := m.HedgePolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x7a + } + } + if m.IncludeRequestAttemptCount { + i-- + if m.IncludeRequestAttemptCount { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x6a + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } + } + if m.Cors != nil { + size, err := m.Cors.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RateLimits[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.VirtualClusters) > 0 { + for iNdEx := len(m.VirtualClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.VirtualClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.RequireTls != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequireTls)) + i-- + dAtA[i] = 0x20 + } + if len(m.Routes) > 0 { + for iNdEx := len(m.Routes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Routes[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Domains) > 0 { + for iNdEx := len(m.Domains) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Domains[iNdEx]) + copy(dAtA[i:], m.Domains[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Domains[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Action != nil { + size, err := (*anypb.Any)(m.Action).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Routes) > 0 { + for iNdEx := len(m.Routes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Routes[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Route) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Route) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if msg, ok := m.Action.(*Route_NonForwardingAction); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Action.(*Route_FilterAction); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.PerRequestBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Tracing != nil { + size, err := m.Tracing.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x72 + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + } + if msg, ok := m.Action.(*Route_DirectResponse); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Decorator != nil { + size, err := m.Decorator.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.Action.(*Route_Redirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Action.(*Route_Route); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Match != nil { + size, err := m.Match.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Route_Route) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_Route) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Route != nil { + size, err := m.Route.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Route_Redirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_Redirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Redirect != nil { + size, err := m.Redirect.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Route_DirectResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_DirectResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DirectResponse != nil { + size, err := m.DirectResponse.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Route_FilterAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_FilterAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterAction != nil { + size, err := m.FilterAction.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + return len(dAtA) - i, nil +} +func (m *Route_NonForwardingAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_NonForwardingAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NonForwardingAction != nil { + size, err := m.NonForwardingAction.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *WeightedCluster_ClusterWeight) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedCluster_ClusterWeight) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster_ClusterWeight) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterHeader) > 0 { + i -= len(m.ClusterHeader) + copy(dAtA[i:], m.ClusterHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterHeader))) + i-- + dAtA[i] = 0x62 + } + if msg, ok := m.HostRewriteSpecifier.(*WeightedCluster_ClusterWeight_HostRewriteLiteral); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.MetadataMatch != nil { + if vtmsg, ok := interface{}(m.MetadataMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Weight != nil { + size, err := (*wrapperspb.UInt32Value)(m.Weight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WeightedCluster_ClusterWeight_HostRewriteLiteral) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster_ClusterWeight_HostRewriteLiteral) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HostRewriteLiteral) + copy(dAtA[i:], m.HostRewriteLiteral) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRewriteLiteral))) + i-- + dAtA[i] = 0x5a + return len(dAtA) - i, nil +} +func (m *WeightedCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.RandomValueSpecifier.(*WeightedCluster_HeaderName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TotalWeight != nil { + size, err := (*wrapperspb.UInt32Value)(m.TotalWeight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.RuntimeKeyPrefix) > 0 { + i -= len(m.RuntimeKeyPrefix) + copy(dAtA[i:], m.RuntimeKeyPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKeyPrefix))) + i-- + dAtA[i] = 0x12 + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Clusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WeightedCluster_HeaderName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster_HeaderName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *ClusterSpecifierPlugin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterSpecifierPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterSpecifierPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Extension != nil { + if vtmsg, ok := interface{}(m.Extension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Extension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_GrpcRouteMatchOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch_GrpcRouteMatchOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_GrpcRouteMatchOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_TlsContextMatchOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch_TlsContextMatchOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_TlsContextMatchOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Validated != nil { + size, err := (*wrapperspb.BoolValue)(m.Validated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Presented != nil { + size, err := (*wrapperspb.BoolValue)(m.Presented).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_ConnectMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch_ConnectMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_ConnectMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PathSpecifier.(*RouteMatch_PathMatchPolicy); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PathSpecifier.(*RouteMatch_PathSeparatedPrefix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.DynamicMetadata) > 0 { + for iNdEx := len(m.DynamicMetadata) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.DynamicMetadata[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DynamicMetadata[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if msg, ok := m.PathSpecifier.(*RouteMatch_ConnectMatcher_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TlsContext != nil { + size, err := m.TlsContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if msg, ok := m.PathSpecifier.(*RouteMatch_SafeRegex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RuntimeFraction != nil { + if vtmsg, ok := interface{}(m.RuntimeFraction).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RuntimeFraction) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.Grpc != nil { + size, err := m.Grpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.QueryParameters) > 0 { + for iNdEx := len(m.QueryParameters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.QueryParameters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if m.CaseSensitive != nil { + size, err := (*wrapperspb.BoolValue)(m.CaseSensitive).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.PathSpecifier.(*RouteMatch_Path); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PathSpecifier.(*RouteMatch_Prefix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_Prefix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_Prefix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *RouteMatch_Path) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_Path) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *RouteMatch_SafeRegex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_SafeRegex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SafeRegex != nil { + if vtmsg, ok := interface{}(m.SafeRegex).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SafeRegex) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *RouteMatch_ConnectMatcher_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_ConnectMatcher_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConnectMatcher != nil { + size, err := m.ConnectMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *RouteMatch_PathSeparatedPrefix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_PathSeparatedPrefix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PathSeparatedPrefix) + copy(dAtA[i:], m.PathSeparatedPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PathSeparatedPrefix))) + i-- + dAtA[i] = 0x72 + return len(dAtA) - i, nil +} +func (m *RouteMatch_PathMatchPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_PathMatchPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PathMatchPolicy != nil { + if vtmsg, ok := interface{}(m.PathMatchPolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PathMatchPolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x7a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *CorsPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CorsPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CorsPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ForwardNotMatchingPreflights != nil { + size, err := (*wrapperspb.BoolValue)(m.ForwardNotMatchingPreflights).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.AllowPrivateNetworkAccess != nil { + size, err := (*wrapperspb.BoolValue)(m.AllowPrivateNetworkAccess).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if len(m.AllowOriginStringMatch) > 0 { + for iNdEx := len(m.AllowOriginStringMatch) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AllowOriginStringMatch[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AllowOriginStringMatch[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + } + if m.ShadowEnabled != nil { + if vtmsg, ok := interface{}(m.ShadowEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ShadowEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } + if msg, ok := m.EnabledSpecifier.(*CorsPolicy_FilterEnabled); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.AllowCredentials != nil { + size, err := (*wrapperspb.BoolValue)(m.AllowCredentials).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.MaxAge) > 0 { + i -= len(m.MaxAge) + copy(dAtA[i:], m.MaxAge) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MaxAge))) + i-- + dAtA[i] = 0x2a + } + if len(m.ExposeHeaders) > 0 { + i -= len(m.ExposeHeaders) + copy(dAtA[i:], m.ExposeHeaders) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ExposeHeaders))) + i-- + dAtA[i] = 0x22 + } + if len(m.AllowHeaders) > 0 { + i -= len(m.AllowHeaders) + copy(dAtA[i:], m.AllowHeaders) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AllowHeaders))) + i-- + dAtA[i] = 0x1a + } + if len(m.AllowMethods) > 0 { + i -= len(m.AllowMethods) + copy(dAtA[i:], m.AllowMethods) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AllowMethods))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *CorsPolicy_FilterEnabled) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CorsPolicy_FilterEnabled) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterEnabled != nil { + if vtmsg, ok := interface{}(m.FilterEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.FilterEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_RequestMirrorPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_RequestMirrorPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_RequestMirrorPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DisableShadowHostSuffixAppend { + i-- + if m.DisableShadowHostSuffixAppend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.ClusterHeader) > 0 { + i -= len(m.ClusterHeader) + copy(dAtA[i:], m.ClusterHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterHeader))) + i-- + dAtA[i] = 0x2a + } + if m.TraceSampled != nil { + size, err := (*wrapperspb.BoolValue)(m.TraceSampled).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.RuntimeFraction != nil { + if vtmsg, ok := interface{}(m.RuntimeFraction).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RuntimeFraction) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Cluster) > 0 { + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_Header) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RegexRewrite != nil { + if vtmsg, ok := interface{}(m.RegexRewrite).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RegexRewrite) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_CookieAttribute) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_CookieAttribute) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_CookieAttribute) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_Cookie) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_Cookie) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Cookie) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Attributes[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + } + if m.Ttl != nil { + size, err := (*durationpb.Duration)(m.Ttl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SourceIp { + i-- + if m.SourceIp { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_QueryParameter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_QueryParameter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_QueryParameter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_FilterState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_FilterState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_FilterState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_FilterState_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_QueryParameter_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Terminal { + i-- + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_ConnectionProperties_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_Cookie_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_Header_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_Header_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Header_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Header != nil { + size, err := m.Header.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_Cookie_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Cookie_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Cookie != nil { + size, err := m.Cookie.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_ConnectionProperties_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_ConnectionProperties_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConnectionProperties != nil { + size, err := m.ConnectionProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_QueryParameter_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_QueryParameter_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.QueryParameter != nil { + size, err := m.QueryParameter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_FilterState_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_FilterState_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterState != nil { + size, err := m.FilterState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *RouteAction_UpgradeConfig_ConnectConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_UpgradeConfig_ConnectConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_UpgradeConfig_ConnectConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AllowPost { + i-- + if m.AllowPost { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.ProxyProtocolConfig != nil { + if vtmsg, ok := interface{}(m.ProxyProtocolConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ProxyProtocolConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_UpgradeConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_UpgradeConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_UpgradeConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectConfig != nil { + size, err := m.ConnectConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Enabled != nil { + size, err := (*wrapperspb.BoolValue)(m.Enabled).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.UpgradeType) > 0 { + i -= len(m.UpgradeType) + copy(dAtA[i:], m.UpgradeType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpgradeType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_MaxStreamDuration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_MaxStreamDuration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_MaxStreamDuration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.GrpcTimeoutHeaderOffset != nil { + size, err := (*durationpb.Duration)(m.GrpcTimeoutHeaderOffset).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.GrpcTimeoutHeaderMax != nil { + size, err := (*durationpb.Duration)(m.GrpcTimeoutHeaderMax).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxStreamDuration != nil { + size, err := (*durationpb.Duration)(m.MaxStreamDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PathRewritePolicy != nil { + if vtmsg, ok := interface{}(m.PathRewritePolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PathRewritePolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if m.EarlyDataPolicy != nil { + if vtmsg, ok := interface{}(m.EarlyDataPolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EarlyDataPolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_InlineClusterSpecifierPlugin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.AppendXForwardedHost { + i-- + if m.AppendXForwardedHost { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_ClusterSpecifierPlugin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.MaxStreamDuration != nil { + size, err := m.MaxStreamDuration.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_HostRewritePathRegex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.InternalRedirectPolicy != nil { + size, err := m.InternalRedirectPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if m.RetryPolicyTypedConfig != nil { + size, err := (*anypb.Any)(m.RetryPolicyTypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if m.RegexRewrite != nil { + if vtmsg, ok := interface{}(m.RegexRewrite).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RegexRewrite) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if m.MaxInternalRedirects != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if len(m.RequestMirrorPolicies) > 0 { + for iNdEx := len(m.RequestMirrorPolicies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestMirrorPolicies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_HostRewriteHeader); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.GrpcTimeoutOffset != nil { + size, err := (*durationpb.Duration)(m.GrpcTimeoutOffset).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.HedgePolicy != nil { + size, err := m.HedgePolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if m.InternalRedirectAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.InternalRedirectAction)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if len(m.UpgradeConfigs) > 0 { + for iNdEx := len(m.UpgradeConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpgradeConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.IdleTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.MaxGrpcTimeout != nil { + size, err := (*durationpb.Duration)(m.MaxGrpcTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.ClusterNotFoundResponseCode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClusterNotFoundResponseCode)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.Cors != nil { + size, err := m.Cors.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.HashPolicy) > 0 { + for iNdEx := len(m.HashPolicy) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HashPolicy[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + } + if m.IncludeVhRateLimits != nil { + size, err := (*wrapperspb.BoolValue)(m.IncludeVhRateLimits).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RateLimits[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x58 + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_AutoHostRewrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_HostRewriteLiteral); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.PrefixRewrite) > 0 { + i -= len(m.PrefixRewrite) + copy(dAtA[i:], m.PrefixRewrite) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PrefixRewrite))) + i-- + dAtA[i] = 0x2a + } + if m.MetadataMatch != nil { + if vtmsg, ok := interface{}(m.MetadataMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_WeightedClusters); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_ClusterHeader); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_Cluster); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *RouteAction_ClusterHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_ClusterHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ClusterHeader) + copy(dAtA[i:], m.ClusterHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterHeader))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *RouteAction_WeightedClusters) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_WeightedClusters) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.WeightedClusters != nil { + size, err := m.WeightedClusters.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HostRewriteLiteral) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HostRewriteLiteral) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HostRewriteLiteral) + copy(dAtA[i:], m.HostRewriteLiteral) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRewriteLiteral))) + i-- + dAtA[i] = 0x32 + return len(dAtA) - i, nil +} +func (m *RouteAction_AutoHostRewrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_AutoHostRewrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AutoHostRewrite != nil { + size, err := (*wrapperspb.BoolValue)(m.AutoHostRewrite).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HostRewriteHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HostRewriteHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HostRewriteHeader) + copy(dAtA[i:], m.HostRewriteHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRewriteHeader))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + return len(dAtA) - i, nil +} +func (m *RouteAction_HostRewritePathRegex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HostRewritePathRegex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HostRewritePathRegex != nil { + if vtmsg, ok := interface{}(m.HostRewritePathRegex).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HostRewritePathRegex) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_ClusterSpecifierPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_ClusterSpecifierPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ClusterSpecifierPlugin) + copy(dAtA[i:], m.ClusterSpecifierPlugin) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterSpecifierPlugin))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + return len(dAtA) - i, nil +} +func (m *RouteAction_InlineClusterSpecifierPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_InlineClusterSpecifierPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InlineClusterSpecifierPlugin != nil { + size, err := m.InlineClusterSpecifierPlugin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryPriority) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryPriority) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryPriority_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryHostPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryBackOff) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryBackOff) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryBackOff) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BaseInterval != nil { + size, err := (*durationpb.Duration)(m.BaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_ResetHeader) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_ResetHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_ResetHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Format != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.ResetHeaders) > 0 { + for iNdEx := len(m.ResetHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResetHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PerTryIdleTimeout != nil { + size, err := (*durationpb.Duration)(m.PerTryIdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if len(m.RetryOptionsPredicates) > 0 { + for iNdEx := len(m.RetryOptionsPredicates) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RetryOptionsPredicates[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RetryOptionsPredicates[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } + } + if m.RateLimitedRetryBackOff != nil { + size, err := m.RateLimitedRetryBackOff.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if len(m.RetriableRequestHeaders) > 0 { + for iNdEx := len(m.RetriableRequestHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetriableRequestHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.RetriableHeaders) > 0 { + for iNdEx := len(m.RetriableHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetriableHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if m.RetryBackOff != nil { + size, err := m.RetryBackOff.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.RetriableStatusCodes) > 0 { + var pksize2 int + for _, num := range m.RetriableStatusCodes { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.RetriableStatusCodes { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x3a + } + if m.HostSelectionRetryMaxAttempts != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HostSelectionRetryMaxAttempts)) + i-- + dAtA[i] = 0x30 + } + if len(m.RetryHostPredicate) > 0 { + for iNdEx := len(m.RetryHostPredicate) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetryHostPredicate[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.RetryPriority != nil { + size, err := m.RetryPriority.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.PerTryTimeout != nil { + size, err := (*durationpb.Duration)(m.PerTryTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.NumRetries != nil { + size, err := (*wrapperspb.UInt32Value)(m.NumRetries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.RetryOn) > 0 { + i -= len(m.RetryOn) + copy(dAtA[i:], m.RetryOn) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RetryOn))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HedgePolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HedgePolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HedgePolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HedgeOnPerTryTimeout { + i-- + if m.HedgeOnPerTryTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.AdditionalRequestChance != nil { + if vtmsg, ok := interface{}(m.AdditionalRequestChance).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AdditionalRequestChance) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.InitialRequests != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialRequests).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RedirectAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RedirectAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PathRewriteSpecifier.(*RedirectAction_RegexRewrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.PortRedirect != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PortRedirect)) + i-- + dAtA[i] = 0x40 + } + if msg, ok := m.SchemeRewriteSpecifier.(*RedirectAction_SchemeRedirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.StripQuery { + i-- + if m.StripQuery { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.PathRewriteSpecifier.(*RedirectAction_PrefixRewrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.SchemeRewriteSpecifier.(*RedirectAction_HttpsRedirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.ResponseCode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResponseCode)) + i-- + dAtA[i] = 0x18 + } + if msg, ok := m.PathRewriteSpecifier.(*RedirectAction_PathRedirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.HostRedirect) > 0 { + i -= len(m.HostRedirect) + copy(dAtA[i:], m.HostRedirect) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRedirect))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RedirectAction_PathRedirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_PathRedirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PathRedirect) + copy(dAtA[i:], m.PathRedirect) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PathRedirect))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *RedirectAction_HttpsRedirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_HttpsRedirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.HttpsRedirect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *RedirectAction_PrefixRewrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_PrefixRewrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PrefixRewrite) + copy(dAtA[i:], m.PrefixRewrite) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PrefixRewrite))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} +func (m *RedirectAction_SchemeRedirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_SchemeRedirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.SchemeRedirect) + copy(dAtA[i:], m.SchemeRedirect) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SchemeRedirect))) + i-- + dAtA[i] = 0x3a + return len(dAtA) - i, nil +} +func (m *RedirectAction_RegexRewrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_RegexRewrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RegexRewrite != nil { + if vtmsg, ok := interface{}(m.RegexRewrite).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RegexRewrite) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *DirectResponseAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DirectResponseAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DirectResponseAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Body != nil { + if vtmsg, ok := interface{}(m.Body).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Body) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Status != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NonForwardingAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonForwardingAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *NonForwardingAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Decorator) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Decorator) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Decorator) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Propagate != nil { + size, err := (*wrapperspb.BoolValue)(m.Propagate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Operation) > 0 { + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Tracing) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tracing) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CustomTags) > 0 { + for iNdEx := len(m.CustomTags) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.CustomTags[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomTags[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.OverallSampling != nil { + if vtmsg, ok := interface{}(m.OverallSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverallSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.RandomSampling != nil { + if vtmsg, ok := interface{}(m.RandomSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RandomSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ClientSampling != nil { + if vtmsg, ok := interface{}(m.ClientSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClientSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VirtualCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VirtualCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *VirtualCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_SourceCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_SourceCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_SourceCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_DestinationCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_DestinationCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DestinationCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_RequestHeaders) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_RequestHeaders) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RequestHeaders) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipIfAbsent { + i-- + if m.SkipIfAbsent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x12 + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_RemoteAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_RemoteAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RemoteAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_MaskedRemoteAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_MaskedRemoteAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_MaskedRemoteAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.V6PrefixMaskLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.V6PrefixMaskLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.V4PrefixMaskLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.V4PrefixMaskLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_GenericKey) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_GenericKey) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_GenericKey) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorValue) > 0 { + i -= len(m.DescriptorValue) + copy(dAtA[i:], m.DescriptorValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorValue))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_HeaderValueMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_HeaderValueMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_HeaderValueMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x22 + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.ExpectMatch != nil { + size, err := (*wrapperspb.BoolValue)(m.ExpectMatch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorValue) > 0 { + i -= len(m.DescriptorValue) + copy(dAtA[i:], m.DescriptorValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorValue))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_DynamicMetaData) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_DynamicMetaData) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DynamicMetaData) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x1a + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_MetaData) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_MetaData) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_MetaData) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipIfAbsent { + i-- + if m.SkipIfAbsent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Source != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Source)) + i-- + dAtA[i] = 0x20 + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x1a + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_QueryParameterValueMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_QueryParameterValueMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_QueryParameterValueMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x22 + } + if len(m.QueryParameters) > 0 { + for iNdEx := len(m.QueryParameters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.QueryParameters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.ExpectMatch != nil { + size, err := (*wrapperspb.BoolValue)(m.ExpectMatch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorValue) > 0 { + i -= len(m.DescriptorValue) + copy(dAtA[i:], m.DescriptorValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorValue))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_QueryParameterValueMatch_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_MaskedRemoteAddress_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_Extension); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_Metadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_DynamicMetadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_HeaderValueMatch_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_GenericKey_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_RemoteAddress_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_RequestHeaders_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_DestinationCluster_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_SourceCluster_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_SourceCluster_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_SourceCluster_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SourceCluster != nil { + size, err := m.SourceCluster.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_DestinationCluster_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DestinationCluster_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationCluster != nil { + size, err := m.DestinationCluster.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_RequestHeaders_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RequestHeaders_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestHeaders != nil { + size, err := m.RequestHeaders.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_RemoteAddress_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RemoteAddress_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RemoteAddress != nil { + size, err := m.RemoteAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_GenericKey_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_GenericKey_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GenericKey != nil { + size, err := m.GenericKey.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_HeaderValueMatch_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_HeaderValueMatch_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderValueMatch != nil { + size, err := m.HeaderValueMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_DynamicMetadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DynamicMetadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DynamicMetadata != nil { + size, err := m.DynamicMetadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + size, err := m.Metadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_Extension) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_Extension) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Extension != nil { + if vtmsg, ok := interface{}(m.Extension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Extension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_MaskedRemoteAddress_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_MaskedRemoteAddress_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MaskedRemoteAddress != nil { + size, err := m.MaskedRemoteAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_QueryParameterValueMatch_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_QueryParameterValueMatch_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.QueryParameterValueMatch != nil { + size, err := m.QueryParameterValueMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Override_DynamicMetadata) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Override_DynamicMetadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Override_DynamicMetadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Override) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Override) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Override) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OverrideSpecifier.(*RateLimit_Override_DynamicMetadata_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Override_DynamicMetadata_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Override_DynamicMetadata_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DynamicMetadata != nil { + size, err := m.DynamicMetadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RateLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Limit != nil { + size, err := m.Limit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Actions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DisableKey) > 0 { + i -= len(m.DisableKey) + copy(dAtA[i:], m.DisableKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DisableKey))) + i-- + dAtA[i] = 0x12 + } + if m.Stage != nil { + size, err := (*wrapperspb.UInt32Value)(m.Stage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TreatMissingHeaderAsEmpty { + i-- + if m.TreatMissingHeaderAsEmpty { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_ContainsMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_SafeRegexMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_SuffixMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_PrefixMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.InvertMatch { + i-- + if m.InvertMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_PresentMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_RangeMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_ExactMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderMatcher_ExactMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_ExactMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ExactMatch) + copy(dAtA[i:], m.ExactMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ExactMatch))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_RangeMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_RangeMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RangeMatch != nil { + if vtmsg, ok := interface{}(m.RangeMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RangeMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_PresentMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_PresentMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.PresentMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_PrefixMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_PrefixMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PrefixMatch) + copy(dAtA[i:], m.PrefixMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PrefixMatch))) + i-- + dAtA[i] = 0x4a + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_SuffixMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_SuffixMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.SuffixMatch) + copy(dAtA[i:], m.SuffixMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SuffixMatch))) + i-- + dAtA[i] = 0x52 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_SafeRegexMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_SafeRegexMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SafeRegexMatch != nil { + if vtmsg, ok := interface{}(m.SafeRegexMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SafeRegexMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_ContainsMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_ContainsMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ContainsMatch) + copy(dAtA[i:], m.ContainsMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ContainsMatch))) + i-- + dAtA[i] = 0x62 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + if vtmsg, ok := interface{}(m.StringMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StringMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *QueryParameterMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParameterMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameterMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.QueryParameterMatchSpecifier.(*QueryParameterMatcher_PresentMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.QueryParameterMatchSpecifier.(*QueryParameterMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryParameterMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameterMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + if vtmsg, ok := interface{}(m.StringMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StringMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *QueryParameterMatcher_PresentMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameterMatcher_PresentMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.PresentMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *InternalRedirectPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRedirectPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *InternalRedirectPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResponseHeadersToCopy) > 0 { + for iNdEx := len(m.ResponseHeadersToCopy) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToCopy[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToCopy[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToCopy[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.AllowCrossSchemeRedirect { + i-- + if m.AllowCrossSchemeRedirect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Predicates) > 0 { + for iNdEx := len(m.Predicates) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Predicates[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Predicates[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.RedirectResponseCodes) > 0 { + var pksize2 int + for _, num := range m.RedirectResponseCodes { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.RedirectResponseCodes { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x12 + } + if m.MaxInternalRedirects != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Config != nil { + size, err := (*anypb.Any)(m.Config).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VirtualHost) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Domains) > 0 { + for _, s := range m.Domains { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Routes) > 0 { + for _, e := range m.Routes { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RequireTls != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequireTls)) + } + if len(m.VirtualClusters) > 0 { + for _, e := range m.VirtualClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Cors != nil { + l = m.Cors.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IncludeRequestAttemptCount { + n += 2 + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HedgePolicy != nil { + l = m.HedgePolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerRequestBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IncludeAttemptCountInResponse { + n += 3 + } + if m.RetryPolicyTypedConfig != nil { + l = (*anypb.Any)(m.RetryPolicyTypedConfig).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestMirrorPolicies) > 0 { + for _, e := range m.RequestMirrorPolicies { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IncludeIsTimeoutRetryHeader { + n += 3 + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FilterAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + l = (*anypb.Any)(m.Action).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Routes) > 0 { + for _, e := range m.Routes { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Route) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Match != nil { + l = m.Match.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Action.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Decorator != nil { + l = m.Decorator.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Tracing != nil { + l = m.Tracing.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerRequestBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.StatPrefix) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Route_Route) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Route != nil { + l = m.Route.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Route_Redirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Redirect != nil { + l = m.Redirect.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Route_DirectResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DirectResponse != nil { + l = m.DirectResponse.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Route_FilterAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterAction != nil { + l = m.FilterAction.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Route_NonForwardingAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NonForwardingAction != nil { + l = m.NonForwardingAction.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *WeightedCluster_ClusterWeight) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Weight != nil { + l = (*wrapperspb.UInt32Value)(m.Weight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataMatch != nil { + if size, ok := interface{}(m.MetadataMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if vtmsg, ok := m.HostRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.ClusterHeader) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WeightedCluster_ClusterWeight_HostRewriteLiteral) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRewriteLiteral) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *WeightedCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.RuntimeKeyPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalWeight != nil { + l = (*wrapperspb.UInt32Value)(m.TotalWeight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.RandomValueSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *WeightedCluster_HeaderName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *ClusterSpecifierPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Extension != nil { + if size, ok := interface{}(m.Extension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Extension) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsOptional { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_GrpcRouteMatchOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_TlsContextMatchOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Presented != nil { + l = (*wrapperspb.BoolValue)(m.Presented).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Validated != nil { + l = (*wrapperspb.BoolValue)(m.Validated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_ConnectMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.PathSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.CaseSensitive != nil { + l = (*wrapperspb.BoolValue)(m.CaseSensitive).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.QueryParameters) > 0 { + for _, e := range m.QueryParameters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Grpc != nil { + l = m.Grpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RuntimeFraction != nil { + if size, ok := interface{}(m.RuntimeFraction).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RuntimeFraction) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsContext != nil { + l = m.TlsContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.DynamicMetadata) > 0 { + for _, e := range m.DynamicMetadata { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_Prefix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteMatch_Path) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteMatch_SafeRegex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SafeRegex != nil { + if size, ok := interface{}(m.SafeRegex).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SafeRegex) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteMatch_ConnectMatcher_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConnectMatcher != nil { + l = m.ConnectMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteMatch_PathSeparatedPrefix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathSeparatedPrefix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteMatch_PathMatchPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PathMatchPolicy != nil { + if size, ok := interface{}(m.PathMatchPolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PathMatchPolicy) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CorsPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AllowMethods) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AllowHeaders) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ExposeHeaders) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.MaxAge) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowCredentials != nil { + l = (*wrapperspb.BoolValue)(m.AllowCredentials).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.EnabledSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.ShadowEnabled != nil { + if size, ok := interface{}(m.ShadowEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ShadowEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AllowOriginStringMatch) > 0 { + for _, e := range m.AllowOriginStringMatch { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AllowPrivateNetworkAccess != nil { + l = (*wrapperspb.BoolValue)(m.AllowPrivateNetworkAccess).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ForwardNotMatchingPreflights != nil { + l = (*wrapperspb.BoolValue)(m.ForwardNotMatchingPreflights).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CorsPolicy_FilterEnabled) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterEnabled != nil { + if size, ok := interface{}(m.FilterEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.FilterEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_RequestMirrorPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RuntimeFraction != nil { + if size, ok := interface{}(m.RuntimeFraction).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RuntimeFraction) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TraceSampled != nil { + l = (*wrapperspb.BoolValue)(m.TraceSampled).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ClusterHeader) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableShadowHostSuffixAppend { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RegexRewrite != nil { + if size, ok := interface{}(m.RegexRewrite).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RegexRewrite) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_CookieAttribute) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_Cookie) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Ttl != nil { + l = (*durationpb.Duration)(m.Ttl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceIp { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_QueryParameter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_FilterState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.PolicySpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Terminal { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_Header_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_Cookie_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cookie != nil { + l = m.Cookie.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_ConnectionProperties_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConnectionProperties != nil { + l = m.ConnectionProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_QueryParameter_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryParameter != nil { + l = m.QueryParameter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_FilterState_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterState != nil { + l = m.FilterState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_UpgradeConfig_ConnectConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProxyProtocolConfig != nil { + if size, ok := interface{}(m.ProxyProtocolConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ProxyProtocolConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowPost { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_UpgradeConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UpgradeType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Enabled != nil { + l = (*wrapperspb.BoolValue)(m.Enabled).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectConfig != nil { + l = m.ConnectConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_MaxStreamDuration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxStreamDuration != nil { + l = (*durationpb.Duration)(m.MaxStreamDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcTimeoutHeaderMax != nil { + l = (*durationpb.Duration)(m.GrpcTimeoutHeaderMax).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcTimeoutHeaderOffset != nil { + l = (*durationpb.Duration)(m.GrpcTimeoutHeaderOffset).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ClusterSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.MetadataMatch != nil { + if size, ok := interface{}(m.MetadataMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.PrefixRewrite) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HostRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IncludeVhRateLimits != nil { + l = (*wrapperspb.BoolValue)(m.IncludeVhRateLimits).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HashPolicy) > 0 { + for _, e := range m.HashPolicy { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Cors != nil { + l = m.Cors.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClusterNotFoundResponseCode != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ClusterNotFoundResponseCode)) + } + if m.MaxGrpcTimeout != nil { + l = (*durationpb.Duration)(m.MaxGrpcTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IdleTimeout != nil { + l = (*durationpb.Duration)(m.IdleTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.UpgradeConfigs) > 0 { + for _, e := range m.UpgradeConfigs { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.InternalRedirectAction != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.InternalRedirectAction)) + } + if m.HedgePolicy != nil { + l = m.HedgePolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcTimeoutOffset != nil { + l = (*durationpb.Duration)(m.GrpcTimeoutOffset).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestMirrorPolicies) > 0 { + for _, e := range m.RequestMirrorPolicies { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxInternalRedirects != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RegexRewrite != nil { + if size, ok := interface{}(m.RegexRewrite).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RegexRewrite) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicyTypedConfig != nil { + l = (*anypb.Any)(m.RetryPolicyTypedConfig).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InternalRedirectPolicy != nil { + l = m.InternalRedirectPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxStreamDuration != nil { + l = m.MaxStreamDuration.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendXForwardedHost { + n += 3 + } + if m.EarlyDataPolicy != nil { + if size, ok := interface{}(m.EarlyDataPolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EarlyDataPolicy) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PathRewritePolicy != nil { + if size, ok := interface{}(m.PathRewritePolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PathRewritePolicy) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cluster) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_ClusterHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterHeader) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_WeightedClusters) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.WeightedClusters != nil { + l = m.WeightedClusters.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HostRewriteLiteral) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRewriteLiteral) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_AutoHostRewrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AutoHostRewrite != nil { + l = (*wrapperspb.BoolValue)(m.AutoHostRewrite).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HostRewriteHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRewriteHeader) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_HostRewritePathRegex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HostRewritePathRegex != nil { + if size, ok := interface{}(m.HostRewritePathRegex).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HostRewritePathRegex) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *RouteAction_ClusterSpecifierPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterSpecifierPlugin) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_InlineClusterSpecifierPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InlineClusterSpecifierPlugin != nil { + l = m.InlineClusterSpecifierPlugin.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *RetryPolicy_RetryPriority) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy_RetryHostPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy_RetryBackOff) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseInterval != nil { + l = (*durationpb.Duration)(m.BaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_ResetHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Format != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Format)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResetHeaders) > 0 { + for _, e := range m.ResetHeaders { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RetryOn) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumRetries != nil { + l = (*wrapperspb.UInt32Value)(m.NumRetries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerTryTimeout != nil { + l = (*durationpb.Duration)(m.PerTryTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPriority != nil { + l = m.RetryPriority.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetryHostPredicate) > 0 { + for _, e := range m.RetryHostPredicate { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.HostSelectionRetryMaxAttempts != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HostSelectionRetryMaxAttempts)) + } + if len(m.RetriableStatusCodes) > 0 { + l = 0 + for _, e := range m.RetriableStatusCodes { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.RetryBackOff != nil { + l = m.RetryBackOff.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetriableHeaders) > 0 { + for _, e := range m.RetriableHeaders { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RetriableRequestHeaders) > 0 { + for _, e := range m.RetriableRequestHeaders { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RateLimitedRetryBackOff != nil { + l = m.RateLimitedRetryBackOff.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetryOptionsPredicates) > 0 { + for _, e := range m.RetryOptionsPredicates { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.PerTryIdleTimeout != nil { + l = (*durationpb.Duration)(m.PerTryIdleTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HedgePolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitialRequests != nil { + l = (*wrapperspb.UInt32Value)(m.InitialRequests).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AdditionalRequestChance != nil { + if size, ok := interface{}(m.AdditionalRequestChance).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AdditionalRequestChance) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HedgeOnPerTryTimeout { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RedirectAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRedirect) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.PathRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.ResponseCode != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResponseCode)) + } + if vtmsg, ok := m.SchemeRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.StripQuery { + n += 2 + } + if m.PortRedirect != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PortRedirect)) + } + n += len(m.unknownFields) + return n +} + +func (m *RedirectAction_PathRedirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathRedirect) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RedirectAction_HttpsRedirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *RedirectAction_PrefixRewrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PrefixRewrite) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RedirectAction_SchemeRedirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SchemeRedirect) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RedirectAction_RegexRewrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RegexRewrite != nil { + if size, ok := interface{}(m.RegexRewrite).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RegexRewrite) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DirectResponseAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) + } + if m.Body != nil { + if size, ok := interface{}(m.Body).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Body) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *NonForwardingAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Decorator) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Operation) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Propagate != nil { + l = (*wrapperspb.BoolValue)(m.Propagate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Tracing) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientSampling != nil { + if size, ok := interface{}(m.ClientSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ClientSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RandomSampling != nil { + if size, ok := interface{}(m.RandomSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RandomSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverallSampling != nil { + if size, ok := interface{}(m.OverallSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverallSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CustomTags) > 0 { + for _, e := range m.CustomTags { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *VirtualCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_SourceCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_DestinationCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_RequestHeaders) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SkipIfAbsent { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_RemoteAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_MaskedRemoteAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.V4PrefixMaskLen != nil { + l = (*wrapperspb.UInt32Value)(m.V4PrefixMaskLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.V6PrefixMaskLen != nil { + l = (*wrapperspb.UInt32Value)(m.V6PrefixMaskLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_GenericKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_HeaderValueMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExpectMatch != nil { + l = (*wrapperspb.BoolValue)(m.ExpectMatch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_DynamicMetaData) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_MetaData) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Source != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Source)) + } + if m.SkipIfAbsent { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_QueryParameterValueMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExpectMatch != nil { + l = (*wrapperspb.BoolValue)(m.ExpectMatch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.QueryParameters) > 0 { + for _, e := range m.QueryParameters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ActionSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_SourceCluster_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceCluster != nil { + l = m.SourceCluster.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_DestinationCluster_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationCluster != nil { + l = m.DestinationCluster.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_RequestHeaders_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestHeaders != nil { + l = m.RequestHeaders.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_RemoteAddress_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RemoteAddress != nil { + l = m.RemoteAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_GenericKey_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GenericKey != nil { + l = m.GenericKey.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_HeaderValueMatch_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderValueMatch != nil { + l = m.HeaderValueMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_DynamicMetadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DynamicMetadata != nil { + l = m.DynamicMetadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_Extension) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Extension != nil { + if size, ok := interface{}(m.Extension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Extension) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_MaskedRemoteAddress_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaskedRemoteAddress != nil { + l = m.MaskedRemoteAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_QueryParameterValueMatch_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryParameterValueMatch != nil { + l = m.QueryParameterValueMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Override_DynamicMetadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Override) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.OverrideSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Override_DynamicMetadata_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DynamicMetadata != nil { + l = m.DynamicMetadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stage != nil { + l = (*wrapperspb.UInt32Value)(m.Stage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DisableKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Limit != nil { + l = m.Limit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HeaderMatchSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.InvertMatch { + n += 2 + } + if m.TreatMissingHeaderAsEmpty { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderMatcher_ExactMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ExactMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_RangeMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RangeMatch != nil { + if size, ok := interface{}(m.RangeMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RangeMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HeaderMatcher_PresentMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *HeaderMatcher_PrefixMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PrefixMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_SuffixMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SuffixMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_SafeRegexMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SafeRegexMatch != nil { + if size, ok := interface{}(m.SafeRegexMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SafeRegexMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HeaderMatcher_ContainsMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainsMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + if size, ok := interface{}(m.StringMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StringMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *QueryParameterMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.QueryParameterMatchSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *QueryParameterMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + if size, ok := interface{}(m.StringMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StringMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *QueryParameterMatcher_PresentMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *InternalRedirectPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxInternalRedirects != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RedirectResponseCodes) > 0 { + l = 0 + for _, e := range m.RedirectResponseCodes { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.Predicates) > 0 { + for _, e := range m.Predicates { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AllowCrossSchemeRedirect { + n += 2 + } + if len(m.ResponseHeadersToCopy) > 0 { + for _, s := range m.ResponseHeadersToCopy { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *FilterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + l = (*anypb.Any)(m.Config).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsOptional { + n += 2 + } + if m.Disabled { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_vtproto.pb.go new file mode 100644 index 00000000000..4f536bb8ded --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_vtproto.pb.go @@ -0,0 +1,474 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/route/v3/route.proto + +package routev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RouteConfiguration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteConfiguration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteConfiguration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if m.IgnorePathParametersInPathMatching { + i-- + if m.IgnorePathParametersInPathMatching { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if m.IgnorePortInHostMatching { + i-- + if m.IgnorePortInHostMatching { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if len(m.RequestMirrorPolicies) > 0 { + for iNdEx := len(m.RequestMirrorPolicies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestMirrorPolicies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.ClusterSpecifierPlugins) > 0 { + for iNdEx := len(m.ClusterSpecifierPlugins) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ClusterSpecifierPlugins[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + } + if m.MaxDirectResponseBodySizeBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxDirectResponseBodySizeBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.MostSpecificHeaderMutationsWins { + i-- + if m.MostSpecificHeaderMutationsWins { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.Vhds != nil { + size, err := m.Vhds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.ValidateClusters != nil { + size, err := (*wrapperspb.BoolValue)(m.ValidateClusters).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.InternalOnlyHeaders) > 0 { + for iNdEx := len(m.InternalOnlyHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.InternalOnlyHeaders[iNdEx]) + copy(dAtA[i:], m.InternalOnlyHeaders[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InternalOnlyHeaders[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.VirtualHosts) > 0 { + for iNdEx := len(m.VirtualHosts) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.VirtualHosts[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Vhds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vhds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Vhds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConfigSource != nil { + if vtmsg, ok := interface{}(m.ConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteConfiguration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.VirtualHosts) > 0 { + for _, e := range m.VirtualHosts { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.InternalOnlyHeaders) > 0 { + for _, s := range m.InternalOnlyHeaders { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ValidateClusters != nil { + l = (*wrapperspb.BoolValue)(m.ValidateClusters).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Vhds != nil { + l = m.Vhds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MostSpecificHeaderMutationsWins { + n += 2 + } + if m.MaxDirectResponseBodySizeBytes != nil { + l = (*wrapperspb.UInt32Value)(m.MaxDirectResponseBodySizeBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ClusterSpecifierPlugins) > 0 { + for _, e := range m.ClusterSpecifierPlugins { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestMirrorPolicies) > 0 { + for _, e := range m.RequestMirrorPolicies { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IgnorePortInHostMatching { + n += 2 + } + if m.IgnorePathParametersInPathMatching { + n += 2 + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Vhds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigSource != nil { + if size, ok := interface{}(m.ConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go index bf823b0422e..1c02988b691 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/route/v3/scoped_route.proto package routev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go index 5e52729bb82..54c187ce19f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/route/v3/scoped_route.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route_vtproto.pb.go new file mode 100644 index 00000000000..1e6a7e81cf2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route_vtproto.pb.go @@ -0,0 +1,263 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/route/v3/scoped_route.proto + +package routev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ScopedRouteConfiguration_Key_Fragment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfiguration_Key_Fragment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration_Key_Fragment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*ScopedRouteConfiguration_Key_Fragment_StringKey); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfiguration_Key_Fragment_StringKey) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration_Key_Fragment_StringKey) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringKey) + copy(dAtA[i:], m.StringKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringKey))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *ScopedRouteConfiguration_Key) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfiguration_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Fragments) > 0 { + for iNdEx := len(m.Fragments) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fragments[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfiguration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfiguration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RouteConfiguration != nil { + size, err := m.RouteConfiguration.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.OnDemand { + i-- + if m.OnDemand { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Key != nil { + size, err := m.Key.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.RouteConfigurationName) > 0 { + i -= len(m.RouteConfigurationName) + copy(dAtA[i:], m.RouteConfigurationName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RouteConfigurationName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfiguration_Key_Fragment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRouteConfiguration_Key_Fragment_StringKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringKey) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *ScopedRouteConfiguration_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fragments) > 0 { + for _, e := range m.Fragments { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRouteConfiguration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RouteConfigurationName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Key != nil { + l = m.Key.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnDemand { + n += 2 + } + if m.RouteConfiguration != nil { + l = m.RouteConfiguration.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go index 8a8e6d13bc1..284516c99e1 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/tap/v3/common.proto package tapv3 @@ -13,10 +13,10 @@ import ( v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -541,12 +541,12 @@ type OutputConfig struct { // truncation. If truncation occurs, the :ref:`truncated // ` field will be set. If not specified, the // default is 1KiB. - MaxBufferedRxBytes *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=max_buffered_rx_bytes,json=maxBufferedRxBytes,proto3" json:"max_buffered_rx_bytes,omitempty"` + MaxBufferedRxBytes *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_buffered_rx_bytes,json=maxBufferedRxBytes,proto3" json:"max_buffered_rx_bytes,omitempty"` // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to // truncation. If truncation occurs, the :ref:`truncated // ` field will be set. If not specified, the // default is 1KiB. - MaxBufferedTxBytes *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=max_buffered_tx_bytes,json=maxBufferedTxBytes,proto3" json:"max_buffered_tx_bytes,omitempty"` + MaxBufferedTxBytes *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=max_buffered_tx_bytes,json=maxBufferedTxBytes,proto3" json:"max_buffered_tx_bytes,omitempty"` // Indicates whether taps produce a single buffered message per tap, or multiple streamed // messages per tap in the emitted :ref:`TraceWrapper // ` messages. Note that streamed tapping does not @@ -595,14 +595,14 @@ func (x *OutputConfig) GetSinks() []*OutputSink { return nil } -func (x *OutputConfig) GetMaxBufferedRxBytes() *wrappers.UInt32Value { +func (x *OutputConfig) GetMaxBufferedRxBytes() *wrapperspb.UInt32Value { if x != nil { return x.MaxBufferedRxBytes } return nil } -func (x *OutputConfig) GetMaxBufferedTxBytes() *wrappers.UInt32Value { +func (x *OutputConfig) GetMaxBufferedTxBytes() *wrapperspb.UInt32Value { if x != nil { return x.MaxBufferedTxBytes } @@ -827,7 +827,7 @@ type BufferedAdminSink struct { // This may result in returning fewer traces than were requested, and in the case that no traces are // buffered during this time, no traces will be returned. // Specifying 0 for the timeout value (or not specifying a value at all) indicates an infinite timeout. - Timeout *duration.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` } func (x *BufferedAdminSink) Reset() { @@ -869,7 +869,7 @@ func (x *BufferedAdminSink) GetMaxTraces() uint64 { return 0 } -func (x *BufferedAdminSink) GetTimeout() *duration.Duration { +func (x *BufferedAdminSink) GetTimeout() *durationpb.Duration { if x != nil { return x.Timeout } @@ -1406,9 +1406,9 @@ var file_envoy_config_tap_v3_common_proto_goTypes = []interface{}{ (*v3.MatchPredicate)(nil), // 13: envoy.config.common.matcher.v3.MatchPredicate (*v31.RuntimeFractionalPercent)(nil), // 14: envoy.config.core.v3.RuntimeFractionalPercent (*v32.HeaderMatcher)(nil), // 15: envoy.config.route.v3.HeaderMatcher - (*wrappers.UInt32Value)(nil), // 16: google.protobuf.UInt32Value + (*wrapperspb.UInt32Value)(nil), // 16: google.protobuf.UInt32Value (*v31.TypedExtensionConfig)(nil), // 17: envoy.config.core.v3.TypedExtensionConfig - (*duration.Duration)(nil), // 18: google.protobuf.Duration + (*durationpb.Duration)(nil), // 18: google.protobuf.Duration (*v31.GrpcService)(nil), // 19: envoy.config.core.v3.GrpcService } var file_envoy_config_tap_v3_common_proto_depIdxs = []int32{ diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go index dc7a31524c0..04df840e125 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/tap/v3/common.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common_vtproto.pb.go new file mode 100644 index 00000000000..9cedeecd0a6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common_vtproto.pb.go @@ -0,0 +1,1591 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/tap/v3/common.proto + +package tapv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TapConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TapConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TapConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Match != nil { + if vtmsg, ok := interface{}(m.Match).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Match) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.TapEnabled != nil { + if vtmsg, ok := interface{}(m.TapEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TapEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.OutputConfig != nil { + size, err := m.OutputConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MatchConfig != nil { + size, err := m.MatchConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate_MatchSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate_MatchSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_MatchSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AnyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_NotMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AndMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AndMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AndMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatch != nil { + size, err := m.AndMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_NotMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_NotMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatch != nil { + size, err := m.NotMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AnyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AnyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.AnyMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestHeadersMatch != nil { + size, err := m.HttpRequestHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestTrailersMatch != nil { + size, err := m.HttpRequestTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseHeadersMatch != nil { + size, err := m.HttpResponseHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseTrailersMatch != nil { + size, err := m.HttpResponseTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestGenericBodyMatch != nil { + size, err := m.HttpRequestGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseGenericBodyMatch != nil { + size, err := m.HttpResponseGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *HttpHeadersMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Headers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Headers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringMatch) + copy(dAtA[i:], m.StringMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringMatch))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BinaryMatch) + copy(dAtA[i:], m.BinaryMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BinaryMatch))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Patterns) > 0 { + for iNdEx := len(m.Patterns) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Patterns[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.BytesLimit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BytesLimit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OutputConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutputConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Streaming { + i-- + if m.Streaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaxBufferedTxBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxBufferedTxBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxBufferedRxBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxBufferedRxBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Sinks) > 0 { + for iNdEx := len(m.Sinks) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Sinks[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OutputSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutputSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OutputSinkType.(*OutputSink_CustomSink); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_BufferedAdmin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_StreamingGrpc); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_FilePerTap); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_StreamingAdmin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Format != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OutputSink_StreamingAdmin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_StreamingAdmin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StreamingAdmin != nil { + size, err := m.StreamingAdmin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *OutputSink_FilePerTap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_FilePerTap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilePerTap != nil { + size, err := m.FilePerTap.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *OutputSink_StreamingGrpc) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_StreamingGrpc) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StreamingGrpc != nil { + size, err := m.StreamingGrpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *OutputSink_BufferedAdmin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_BufferedAdmin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BufferedAdmin != nil { + size, err := m.BufferedAdmin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *OutputSink_CustomSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_CustomSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomSink != nil { + if vtmsg, ok := interface{}(m.CustomSink).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomSink) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *StreamingAdminSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamingAdminSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StreamingAdminSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *BufferedAdminSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BufferedAdminSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BufferedAdminSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxTraces != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxTraces)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FilePerTapSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilePerTapSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilePerTapSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PathPrefix) > 0 { + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StreamingGrpcSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamingGrpcSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StreamingGrpcSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.TapId) > 0 { + i -= len(m.TapId) + copy(dAtA[i:], m.TapId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TapId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TapConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatchConfig != nil { + l = m.MatchConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OutputConfig != nil { + l = m.OutputConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TapEnabled != nil { + if size, ok := interface{}(m.TapEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TapEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Match != nil { + if size, ok := interface{}(m.Match).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Match) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate_MatchSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AndMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatch != nil { + l = m.AndMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_NotMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatch != nil { + l = m.NotMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AnyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *MatchPredicate_HttpRequestHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestHeadersMatch != nil { + l = m.HttpRequestHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestTrailersMatch != nil { + l = m.HttpRequestTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseHeadersMatch != nil { + l = m.HttpResponseHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseTrailersMatch != nil { + l = m.HttpResponseTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestGenericBodyMatch != nil { + l = m.HttpRequestGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseGenericBodyMatch != nil { + l = m.HttpResponseGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BinaryMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesLimit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BytesLimit)) + } + if len(m.Patterns) > 0 { + for _, e := range m.Patterns { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *OutputConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Sinks) > 0 { + for _, e := range m.Sinks { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxBufferedRxBytes != nil { + l = (*wrapperspb.UInt32Value)(m.MaxBufferedRxBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxBufferedTxBytes != nil { + l = (*wrapperspb.UInt32Value)(m.MaxBufferedTxBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Streaming { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *OutputSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Format != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Format)) + } + if vtmsg, ok := m.OutputSinkType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *OutputSink_StreamingAdmin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamingAdmin != nil { + l = m.StreamingAdmin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_FilePerTap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilePerTap != nil { + l = m.FilePerTap.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_StreamingGrpc) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamingGrpc != nil { + l = m.StreamingGrpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_BufferedAdmin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BufferedAdmin != nil { + l = m.BufferedAdmin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_CustomSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomSink != nil { + if size, ok := interface{}(m.CustomSink).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomSink) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StreamingAdminSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *BufferedAdminSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTraces != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxTraces)) + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FilePerTapSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StreamingGrpcSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TapId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go index 368dbb17f16..1975c6f020d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/trace/v3/datadog.proto package tracev3 @@ -11,6 +11,7 @@ import ( _ "github.com/envoyproxy/protoc-gen-validate/validate" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -22,6 +23,56 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Configuration for the Remote Configuration feature. +type DatadogRemoteConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Frequency at which new configuration updates are queried. + // If no value is provided, the default value is delegated to the Datadog tracing library. + PollingInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=polling_interval,json=pollingInterval,proto3" json:"polling_interval,omitempty"` +} + +func (x *DatadogRemoteConfig) Reset() { + *x = DatadogRemoteConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DatadogRemoteConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DatadogRemoteConfig) ProtoMessage() {} + +func (x *DatadogRemoteConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DatadogRemoteConfig.ProtoReflect.Descriptor instead. +func (*DatadogRemoteConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_datadog_proto_rawDescGZIP(), []int{0} +} + +func (x *DatadogRemoteConfig) GetPollingInterval() *durationpb.Duration { + if x != nil { + return x.PollingInterval + } + return nil +} + // Configuration for the Datadog tracer. // [#extension: envoy.tracers.datadog] type DatadogConfig struct { @@ -36,12 +87,18 @@ type DatadogConfig struct { // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors // that require a specific hostname. Defaults to :ref:`collector_cluster ` above. CollectorHostname string `protobuf:"bytes,3,opt,name=collector_hostname,json=collectorHostname,proto3" json:"collector_hostname,omitempty"` + // Enables and configures remote configuration. + // Remote Configuration allows to configure the tracer from Datadog's user interface. + // This feature can drastically increase the number of connections to the Datadog Agent. + // Each tracer regularly polls for configuration updates, and the number of tracers is the product + // of the number of listeners and worker threads. + RemoteConfig *DatadogRemoteConfig `protobuf:"bytes,4,opt,name=remote_config,json=remoteConfig,proto3" json:"remote_config,omitempty"` } func (x *DatadogConfig) Reset() { *x = DatadogConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[0] + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -54,7 +111,7 @@ func (x *DatadogConfig) String() string { func (*DatadogConfig) ProtoMessage() {} func (x *DatadogConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[0] + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -67,7 +124,7 @@ func (x *DatadogConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use DatadogConfig.ProtoReflect.Descriptor instead. func (*DatadogConfig) Descriptor() ([]byte, []int) { - return file_envoy_config_trace_v3_datadog_proto_rawDescGZIP(), []int{0} + return file_envoy_config_trace_v3_datadog_proto_rawDescGZIP(), []int{1} } func (x *DatadogConfig) GetCollectorCluster() string { @@ -91,13 +148,22 @@ func (x *DatadogConfig) GetCollectorHostname() string { return "" } +func (x *DatadogConfig) GetRemoteConfig() *DatadogRemoteConfig { + if x != nil { + return x.RemoteConfig + } + return nil +} + var File_envoy_config_trace_v3_datadog_proto protoreflect.FileDescriptor var file_envoy_config_trace_v3_datadog_proto_rawDesc = []byte{ 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x75, 0x64, + 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, @@ -105,32 +171,43 @@ var file_envoy_config_trace_v3_datadog_proto_rawDesc = []byte{ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcc, 0x01, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, - 0x64, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x11, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x63, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, - 0x2a, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x63, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, - 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb3, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2a, 0x12, - 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, - 0x67, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, - 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5b, 0x0a, 0x13, 0x44, 0x61, 0x74, 0x61, 0x64, + 0x6f, 0x67, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x44, + 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x70, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x22, 0x9d, 0x02, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x48, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, - 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb3, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2a, 0x12, 0x28, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, + 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -145,16 +222,20 @@ func file_envoy_config_trace_v3_datadog_proto_rawDescGZIP() []byte { return file_envoy_config_trace_v3_datadog_proto_rawDescData } -var file_envoy_config_trace_v3_datadog_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_datadog_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_envoy_config_trace_v3_datadog_proto_goTypes = []interface{}{ - (*DatadogConfig)(nil), // 0: envoy.config.trace.v3.DatadogConfig + (*DatadogRemoteConfig)(nil), // 0: envoy.config.trace.v3.DatadogRemoteConfig + (*DatadogConfig)(nil), // 1: envoy.config.trace.v3.DatadogConfig + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration } var file_envoy_config_trace_v3_datadog_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 2, // 0: envoy.config.trace.v3.DatadogRemoteConfig.polling_interval:type_name -> google.protobuf.Duration + 0, // 1: envoy.config.trace.v3.DatadogConfig.remote_config:type_name -> envoy.config.trace.v3.DatadogRemoteConfig + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_envoy_config_trace_v3_datadog_proto_init() } @@ -164,6 +245,18 @@ func file_envoy_config_trace_v3_datadog_proto_init() { } if !protoimpl.UnsafeEnabled { file_envoy_config_trace_v3_datadog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DatadogRemoteConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_trace_v3_datadog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DatadogConfig); i { case 0: return &v.state @@ -182,7 +275,7 @@ func file_envoy_config_trace_v3_datadog_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_trace_v3_datadog_proto_rawDesc, NumEnums: 0, - NumMessages: 1, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go index 9aacd2f9903..43ac4c898b4 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/trace/v3/datadog.proto @@ -35,6 +36,137 @@ var ( _ = sort.Sort ) +// Validate checks the field values on DatadogRemoteConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DatadogRemoteConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DatadogRemoteConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DatadogRemoteConfigMultiError, or nil if none found. +func (m *DatadogRemoteConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *DatadogRemoteConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPollingInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DatadogRemoteConfigValidationError{ + field: "PollingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DatadogRemoteConfigValidationError{ + field: "PollingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPollingInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DatadogRemoteConfigValidationError{ + field: "PollingInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DatadogRemoteConfigMultiError(errors) + } + + return nil +} + +// DatadogRemoteConfigMultiError is an error wrapping multiple validation +// errors returned by DatadogRemoteConfig.ValidateAll() if the designated +// constraints aren't met. +type DatadogRemoteConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DatadogRemoteConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DatadogRemoteConfigMultiError) AllErrors() []error { return m } + +// DatadogRemoteConfigValidationError is the validation error returned by +// DatadogRemoteConfig.Validate if the designated constraints aren't met. +type DatadogRemoteConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DatadogRemoteConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DatadogRemoteConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DatadogRemoteConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DatadogRemoteConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DatadogRemoteConfigValidationError) ErrorName() string { + return "DatadogRemoteConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e DatadogRemoteConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDatadogRemoteConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DatadogRemoteConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DatadogRemoteConfigValidationError{} + // Validate checks the field values on DatadogConfig with the rules defined in // the proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. @@ -81,6 +213,35 @@ func (m *DatadogConfig) validate(all bool) error { // no validation rules for CollectorHostname + if all { + switch v := interface{}(m.GetRemoteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DatadogConfigValidationError{ + field: "RemoteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DatadogConfigValidationError{ + field: "RemoteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRemoteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DatadogConfigValidationError{ + field: "RemoteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return DatadogConfigMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog_vtproto.pb.go new file mode 100644 index 00000000000..b4cede7f003 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog_vtproto.pb.go @@ -0,0 +1,167 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/datadog.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DatadogRemoteConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DatadogRemoteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DatadogRemoteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PollingInterval != nil { + size, err := (*durationpb.Duration)(m.PollingInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DatadogConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DatadogConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DatadogConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RemoteConfig != nil { + size, err := m.RemoteConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.CollectorHostname) > 0 { + i -= len(m.CollectorHostname) + copy(dAtA[i:], m.CollectorHostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorHostname))) + i-- + dAtA[i] = 0x1a + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x12 + } + if len(m.CollectorCluster) > 0 { + i -= len(m.CollectorCluster) + copy(dAtA[i:], m.CollectorCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorCluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DatadogRemoteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PollingInterval != nil { + l = (*durationpb.Duration)(m.PollingInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DatadogConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CollectorCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CollectorHostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RemoteConfig != nil { + l = m.RemoteConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go index 6f0fa9dd631..a3dd5bb9598 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/trace/v3/dynamic_ot.proto package tracev3 @@ -10,9 +10,9 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/go-control-plane/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - _struct "github.com/golang/protobuf/ptypes/struct" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" ) @@ -24,10 +24,10 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// DynamicOtConfig is used to dynamically load a tracer from a shared library +// DynamicOtConfig was used to dynamically load a tracer from a shared library // that implements the `OpenTracing dynamic loading API // `_. -// [#extension: envoy.tracers.dynamic_ot] +// [#not-implemented-hide:] type DynamicOtConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -42,7 +42,7 @@ type DynamicOtConfig struct { // library. // // Deprecated: Marked as deprecated in envoy/config/trace/v3/dynamic_ot.proto. - Config *_struct.Struct `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + Config *structpb.Struct `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` } func (x *DynamicOtConfig) Reset() { @@ -86,7 +86,7 @@ func (x *DynamicOtConfig) GetLibrary() string { } // Deprecated: Marked as deprecated in envoy/config/trace/v3/dynamic_ot.proto. -func (x *DynamicOtConfig) GetConfig() *_struct.Struct { +func (x *DynamicOtConfig) GetConfig() *structpb.Struct { if x != nil { return x.Config } @@ -111,30 +111,31 @@ var file_envoy_config_trace_v3_dynamic_ot_proto_rawDesc = []byte{ 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x01, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb7, 0x01, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4f, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x2c, 0x0a, 0x07, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, - 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x07, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x12, - 0x3c, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, - 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2c, 0x9a, - 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x4f, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb8, 0x01, 0xf2, 0x98, - 0xfe, 0x8f, 0x05, 0x2d, 0x12, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x64, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6f, 0x74, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4f, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x12, 0x32, 0x0a, 0x07, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x07, 0x6c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x11, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, + 0x01, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4f, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb8, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2d, + 0x12, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x64, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x5f, 0x6f, 0x74, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x4f, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -152,7 +153,7 @@ func file_envoy_config_trace_v3_dynamic_ot_proto_rawDescGZIP() []byte { var file_envoy_config_trace_v3_dynamic_ot_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_trace_v3_dynamic_ot_proto_goTypes = []interface{}{ (*DynamicOtConfig)(nil), // 0: envoy.config.trace.v3.DynamicOtConfig - (*_struct.Struct)(nil), // 1: google.protobuf.Struct + (*structpb.Struct)(nil), // 1: google.protobuf.Struct } var file_envoy_config_trace_v3_dynamic_ot_proto_depIdxs = []int32{ 1, // 0: envoy.config.trace.v3.DynamicOtConfig.config:type_name -> google.protobuf.Struct diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.validate.go index fdd7dbf9b32..2e936002750 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/trace/v3/dynamic_ot.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot_vtproto.pb.go new file mode 100644 index 00000000000..594ee84613a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot_vtproto.pb.go @@ -0,0 +1,88 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/dynamic_ot.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DynamicOtConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicOtConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicOtConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Config != nil { + size, err := (*structpb.Struct)(m.Config).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Library) > 0 { + i -= len(m.Library) + copy(dAtA[i:], m.Library) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Library))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DynamicOtConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Library) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Config != nil { + l = (*structpb.Struct)(m.Config).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go index 9eb67464473..98a8a86f15f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/trace/v3/http_tracer.proto package tracev3 @@ -9,9 +9,9 @@ package tracev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -150,7 +150,7 @@ func (m *Tracing_Http) GetConfigType() isTracing_Http_ConfigType { return nil } -func (x *Tracing_Http) GetTypedConfig() *any1.Any { +func (x *Tracing_Http) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*Tracing_Http_TypedConfig); ok { return x.TypedConfig } @@ -162,7 +162,7 @@ type isTracing_Http_ConfigType interface { } type Tracing_Http_TypedConfig struct { - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*Tracing_Http_TypedConfig) isTracing_Http_ConfigType() {} @@ -226,7 +226,7 @@ var file_envoy_config_trace_v3_http_tracer_proto_msgTypes = make([]protoimpl.Mes var file_envoy_config_trace_v3_http_tracer_proto_goTypes = []interface{}{ (*Tracing)(nil), // 0: envoy.config.trace.v3.Tracing (*Tracing_Http)(nil), // 1: envoy.config.trace.v3.Tracing.Http - (*any1.Any)(nil), // 2: google.protobuf.Any + (*anypb.Any)(nil), // 2: google.protobuf.Any } var file_envoy_config_trace_v3_http_tracer_proto_depIdxs = []int32{ 1, // 0: envoy.config.trace.v3.Tracing.http:type_name -> envoy.config.trace.v3.Tracing.Http diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go index 64a37da88a2..60cec43ae25 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/trace/v3/http_tracer.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer_vtproto.pb.go new file mode 100644 index 00000000000..756a6de2caf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer_vtproto.pb.go @@ -0,0 +1,178 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/http_tracer.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Tracing_Http) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tracing_Http) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing_Http) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*Tracing_Http_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Tracing_Http_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing_Http_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Tracing) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tracing) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Http != nil { + size, err := m.Http.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Tracing_Http) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Tracing_Http_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Tracing) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Http != nil { + l = m.Http.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go index 2b62d92a9b1..e7eeb33ef67 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/trace/v3/lightstep.proto package tracev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.validate.go index e4602a3b878..d9ebdddca93 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/trace/v3/lightstep.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep_vtproto.pb.go new file mode 100644 index 00000000000..c1f9240a66e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep_vtproto.pb.go @@ -0,0 +1,145 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/lightstep.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LightstepConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LightstepConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LightstepConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AccessToken != nil { + if vtmsg, ok := interface{}(m.AccessToken).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessToken) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if len(m.PropagationModes) > 0 { + var pksize2 int + for _, num := range m.PropagationModes { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.PropagationModes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x1a + } + if len(m.AccessTokenFile) > 0 { + i -= len(m.AccessTokenFile) + copy(dAtA[i:], m.AccessTokenFile) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AccessTokenFile))) + i-- + dAtA[i] = 0x12 + } + if len(m.CollectorCluster) > 0 { + i -= len(m.CollectorCluster) + copy(dAtA[i:], m.CollectorCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorCluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LightstepConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CollectorCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AccessTokenFile) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.PropagationModes) > 0 { + l = 0 + for _, e := range m.PropagationModes { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.AccessToken != nil { + if size, ok := interface{}(m.AccessToken).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AccessToken) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go index f76a34bd1d2..b7e7c69f615 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/trace/v3/opencensus.proto package tracev3 @@ -318,96 +318,101 @@ var file_envoy_config_trace_v3_opencensus_proto_rawDesc = []byte{ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc2, 0x09, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x0a, 0x0a, 0x10, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x56, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0b, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x43, 0x0a, 0x17, 0x73, 0x74, - 0x64, 0x6f, 0x75, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, - 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x15, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, - 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, - 0x4d, 0x0a, 0x1c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x65, - 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, - 0x18, 0x01, 0x52, 0x1a, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x45, - 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x41, - 0x0a, 0x16, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, - 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x73, 0x74, 0x61, - 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, - 0x64, 0x12, 0x3c, 0x0a, 0x13, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, - 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x12, 0x73, 0x74, 0x61, - 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x68, 0x0a, 0x18, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x67, - 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x49, 0x0a, 0x17, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, + 0x05, 0x01, 0x18, 0x01, 0x52, 0x15, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x53, 0x0a, 0x1c, 0x73, + 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, + 0x05, 0x01, 0x18, 0x01, 0x52, 0x1a, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, + 0x72, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x47, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x14, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x13, 0x73, 0x74, 0x61, + 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x12, 0x73, 0x74, 0x61, 0x63, 0x6b, + 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x6e, 0x0a, + 0x18, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x70, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, + 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x16, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, + 0x65, 0x72, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, + 0x17, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, + 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x11, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, + 0x01, 0x52, 0x15, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x0a, 0x7a, 0x69, 0x70, 0x6b, + 0x69, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, + 0x09, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x4b, 0x0a, 0x18, 0x6f, 0x63, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x5f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x11, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, + 0x16, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x0f, 0x6f, 0x63, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x0e, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x66, 0x0a, 0x14, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, - 0x01, 0x52, 0x16, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x47, 0x72, - 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x17, 0x7a, 0x69, 0x70, - 0x6b, 0x69, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, - 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x15, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x45, - 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2a, - 0x0a, 0x0a, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, - 0x09, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x45, 0x0a, 0x18, 0x6f, 0x63, - 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x5f, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, - 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x16, 0x6f, 0x63, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x12, 0x34, 0x0a, 0x0f, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, - 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0e, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x60, 0x0a, 0x14, 0x6f, 0x63, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, - 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x12, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x47, 0x72, - 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x77, 0x0a, 0x16, 0x69, 0x6e, 0x63, - 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, - 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x69, 0x6e, - 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0x77, 0x0a, 0x16, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x5f, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x09, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x43, - 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, - 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, - 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x54, - 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x60, 0x0a, 0x0c, 0x54, - 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x08, 0x0a, 0x04, 0x4e, - 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x43, - 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x47, 0x52, 0x50, 0x43, - 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x42, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, - 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x54, - 0x45, 0x58, 0x54, 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x42, 0x33, 0x10, 0x04, 0x3a, 0x2d, 0x9a, - 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x6e, - 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x07, - 0x10, 0x08, 0x42, 0xb9, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2d, 0x12, 0x2b, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, - 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, - 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, - 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, - 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x76, 0x69, 0x63, 0x65, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, + 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x12, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7d, 0x0a, 0x16, 0x69, + 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, + 0x05, 0x01, 0x18, 0x01, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x54, 0x72, + 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x7d, 0x0a, 0x16, 0x6f, 0x75, + 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x14, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x60, 0x0a, 0x0c, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, + 0x45, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, + 0x54, 0x45, 0x58, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x47, 0x52, 0x50, 0x43, 0x5f, 0x54, + 0x52, 0x41, 0x43, 0x45, 0x5f, 0x42, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4c, + 0x4f, 0x55, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, + 0x54, 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x42, 0x33, 0x10, 0x04, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, + 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, + 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, + 0x42, 0xb9, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2d, 0x12, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x72, 0x73, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x76, + 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, + 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.validate.go index 7b7a9911336..4e8286181c6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/trace/v3/opencensus.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus_vtproto.pb.go new file mode 100644 index 00000000000..66b08bf86ed --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus_vtproto.pb.go @@ -0,0 +1,311 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/opencensus.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *OpenCensusConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpenCensusConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OpenCensusConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OcagentGrpcService != nil { + if vtmsg, ok := interface{}(m.OcagentGrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OcagentGrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.StackdriverGrpcService != nil { + if vtmsg, ok := interface{}(m.StackdriverGrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StackdriverGrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if len(m.OcagentAddress) > 0 { + i -= len(m.OcagentAddress) + copy(dAtA[i:], m.OcagentAddress) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OcagentAddress))) + i-- + dAtA[i] = 0x62 + } + if m.OcagentExporterEnabled { + i-- + if m.OcagentExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if len(m.StackdriverAddress) > 0 { + i -= len(m.StackdriverAddress) + copy(dAtA[i:], m.StackdriverAddress) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StackdriverAddress))) + i-- + dAtA[i] = 0x52 + } + if len(m.OutgoingTraceContext) > 0 { + var pksize2 int + for _, num := range m.OutgoingTraceContext { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.OutgoingTraceContext { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x4a + } + if len(m.IncomingTraceContext) > 0 { + var pksize4 int + for _, num := range m.IncomingTraceContext { + pksize4 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize4 + j3 := i + for _, num1 := range m.IncomingTraceContext { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA[j3] = uint8(num) + j3++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize4)) + i-- + dAtA[i] = 0x42 + } + if len(m.ZipkinUrl) > 0 { + i -= len(m.ZipkinUrl) + copy(dAtA[i:], m.ZipkinUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ZipkinUrl))) + i-- + dAtA[i] = 0x32 + } + if m.ZipkinExporterEnabled { + i-- + if m.ZipkinExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.StackdriverProjectId) > 0 { + i -= len(m.StackdriverProjectId) + copy(dAtA[i:], m.StackdriverProjectId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StackdriverProjectId))) + i-- + dAtA[i] = 0x22 + } + if m.StackdriverExporterEnabled { + i-- + if m.StackdriverExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.StdoutExporterEnabled { + i-- + if m.StdoutExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TraceConfig != nil { + if vtmsg, ok := interface{}(m.TraceConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TraceConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OpenCensusConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TraceConfig != nil { + if size, ok := interface{}(m.TraceConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TraceConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StdoutExporterEnabled { + n += 2 + } + if m.StackdriverExporterEnabled { + n += 2 + } + l = len(m.StackdriverProjectId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ZipkinExporterEnabled { + n += 2 + } + l = len(m.ZipkinUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.IncomingTraceContext) > 0 { + l = 0 + for _, e := range m.IncomingTraceContext { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.OutgoingTraceContext) > 0 { + l = 0 + for _, e := range m.OutgoingTraceContext { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + l = len(m.StackdriverAddress) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcagentExporterEnabled { + n += 2 + } + l = len(m.OcagentAddress) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StackdriverGrpcService != nil { + if size, ok := interface{}(m.StackdriverGrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StackdriverGrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcagentGrpcService != nil { + if size, ok := interface{}(m.OcagentGrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OcagentGrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go index f652e987ea9..a0087e25807 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/trace/v3/opentelemetry.proto package tracev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go index d1043ccf4c6..101f73bbe84 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/trace/v3/opentelemetry.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry_vtproto.pb.go new file mode 100644 index 00000000000..d6c6280515f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry_vtproto.pb.go @@ -0,0 +1,206 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/opentelemetry.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *OpenTelemetryConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpenTelemetryConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OpenTelemetryConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Sampler != nil { + if vtmsg, ok := interface{}(m.Sampler).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Sampler) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if len(m.ResourceDetectors) > 0 { + for iNdEx := len(m.ResourceDetectors) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResourceDetectors[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResourceDetectors[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.HttpService != nil { + if vtmsg, ok := interface{}(m.HttpService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x12 + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OpenTelemetryConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpService != nil { + if size, ok := interface{}(m.HttpService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceDetectors) > 0 { + for _, e := range m.ResourceDetectors { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Sampler != nil { + if size, ok := interface{}(m.Sampler).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Sampler) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go index c032d11b559..662b1bea5d5 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/trace/v3/service.proto package tracev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.validate.go index 628a189dcd0..87b74b55435 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/trace/v3/service.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service_vtproto.pb.go new file mode 100644 index 00000000000..71fddd38910 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service_vtproto.pb.go @@ -0,0 +1,95 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/service.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TraceServiceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceServiceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TraceServiceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TraceServiceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go index ad4f4eb5168..948ea5f1381 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/trace/v3/skywalking.proto package tracev3 @@ -10,9 +10,9 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -116,7 +116,7 @@ type ClientConfig struct { // Envoy caches the segment in memory when the SkyWalking backend service is temporarily unavailable. // This field specifies the maximum number of segments that can be cached. If not specified, the // default is 1024. - MaxCacheSize *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=max_cache_size,json=maxCacheSize,proto3" json:"max_cache_size,omitempty"` + MaxCacheSize *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_cache_size,json=maxCacheSize,proto3" json:"max_cache_size,omitempty"` } func (x *ClientConfig) Reset() { @@ -179,7 +179,7 @@ func (x *ClientConfig) GetBackendToken() string { return "" } -func (x *ClientConfig) GetMaxCacheSize() *wrappers.UInt32Value { +func (x *ClientConfig) GetMaxCacheSize() *wrapperspb.UInt32Value { if x != nil { return x.MaxCacheSize } @@ -270,10 +270,10 @@ func file_envoy_config_trace_v3_skywalking_proto_rawDescGZIP() []byte { var file_envoy_config_trace_v3_skywalking_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_envoy_config_trace_v3_skywalking_proto_goTypes = []interface{}{ - (*SkyWalkingConfig)(nil), // 0: envoy.config.trace.v3.SkyWalkingConfig - (*ClientConfig)(nil), // 1: envoy.config.trace.v3.ClientConfig - (*v3.GrpcService)(nil), // 2: envoy.config.core.v3.GrpcService - (*wrappers.UInt32Value)(nil), // 3: google.protobuf.UInt32Value + (*SkyWalkingConfig)(nil), // 0: envoy.config.trace.v3.SkyWalkingConfig + (*ClientConfig)(nil), // 1: envoy.config.trace.v3.ClientConfig + (*v3.GrpcService)(nil), // 2: envoy.config.core.v3.GrpcService + (*wrapperspb.UInt32Value)(nil), // 3: google.protobuf.UInt32Value } var file_envoy_config_trace_v3_skywalking_proto_depIdxs = []int32{ 2, // 0: envoy.config.trace.v3.SkyWalkingConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go index 34d38cf15e8..559bdb49392 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/trace/v3/skywalking.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking_vtproto.pb.go new file mode 100644 index 00000000000..8af59a37d51 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking_vtproto.pb.go @@ -0,0 +1,224 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/skywalking.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SkyWalkingConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SkyWalkingConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SkyWalkingConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientConfig != nil { + size, err := m.ClientConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxCacheSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxCacheSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.BackendTokenSpecifier.(*ClientConfig_BackendToken); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.InstanceName) > 0 { + i -= len(m.InstanceName) + copy(dAtA[i:], m.InstanceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InstanceName))) + i-- + dAtA[i] = 0x12 + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientConfig_BackendToken) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig_BackendToken) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BackendToken) + copy(dAtA[i:], m.BackendToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BackendToken))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *SkyWalkingConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientConfig != nil { + l = m.ClientConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.InstanceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.BackendTokenSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.MaxCacheSize != nil { + l = (*wrapperspb.UInt32Value)(m.MaxCacheSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientConfig_BackendToken) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BackendToken) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go index b69ff01e7dc..94ded5e4a21 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/trace/v3/trace.proto package tracev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.validate.go index 806c2c1ff99..1797e4924f9 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/trace/v3/trace.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go index 7b3bc5cc87b..c040e1e0ee5 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/trace/v3/xray.proto package tracev3 @@ -10,9 +10,9 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - _struct "github.com/golang/protobuf/ptypes/struct" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" ) @@ -115,7 +115,7 @@ type XRayConfig_SegmentFields struct { Origin string `protobuf:"bytes,1,opt,name=origin,proto3" json:"origin,omitempty"` // AWS resource metadata dictionary. // See: `X-Ray Segment Document documentation `__ - Aws *_struct.Struct `protobuf:"bytes,2,opt,name=aws,proto3" json:"aws,omitempty"` + Aws *structpb.Struct `protobuf:"bytes,2,opt,name=aws,proto3" json:"aws,omitempty"` } func (x *XRayConfig_SegmentFields) Reset() { @@ -157,7 +157,7 @@ func (x *XRayConfig_SegmentFields) GetOrigin() string { return "" } -func (x *XRayConfig_SegmentFields) GetAws() *_struct.Struct { +func (x *XRayConfig_SegmentFields) GetAws() *structpb.Struct { if x != nil { return x.Aws } @@ -244,7 +244,7 @@ var file_envoy_config_trace_v3_xray_proto_goTypes = []interface{}{ (*XRayConfig_SegmentFields)(nil), // 1: envoy.config.trace.v3.XRayConfig.SegmentFields (*v3.SocketAddress)(nil), // 2: envoy.config.core.v3.SocketAddress (*v3.DataSource)(nil), // 3: envoy.config.core.v3.DataSource - (*_struct.Struct)(nil), // 4: google.protobuf.Struct + (*structpb.Struct)(nil), // 4: google.protobuf.Struct } var file_envoy_config_trace_v3_xray_proto_depIdxs = []int32{ 2, // 0: envoy.config.trace.v3.XRayConfig.daemon_endpoint:type_name -> envoy.config.core.v3.SocketAddress diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.validate.go index bf9abc48e20..a48a838edd2 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/trace/v3/xray.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray_vtproto.pb.go new file mode 100644 index 00000000000..b5bfdff5b97 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray_vtproto.pb.go @@ -0,0 +1,221 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/xray.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *XRayConfig_SegmentFields) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *XRayConfig_SegmentFields) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *XRayConfig_SegmentFields) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Aws != nil { + size, err := (*structpb.Struct)(m.Aws).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Origin) > 0 { + i -= len(m.Origin) + copy(dAtA[i:], m.Origin) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Origin))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *XRayConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *XRayConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *XRayConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SegmentFields != nil { + size, err := m.SegmentFields.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.SamplingRuleManifest != nil { + if vtmsg, ok := interface{}(m.SamplingRuleManifest).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SamplingRuleManifest) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.SegmentName) > 0 { + i -= len(m.SegmentName) + copy(dAtA[i:], m.SegmentName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SegmentName))) + i-- + dAtA[i] = 0x12 + } + if m.DaemonEndpoint != nil { + if vtmsg, ok := interface{}(m.DaemonEndpoint).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DaemonEndpoint) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *XRayConfig_SegmentFields) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Origin) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Aws != nil { + l = (*structpb.Struct)(m.Aws).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *XRayConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DaemonEndpoint != nil { + if size, ok := interface{}(m.DaemonEndpoint).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DaemonEndpoint) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SegmentName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SamplingRuleManifest != nil { + if size, ok := interface{}(m.SamplingRuleManifest).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SamplingRuleManifest) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SegmentFields != nil { + l = m.SegmentFields.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go index e9ca164f869..bf96a43d7aa 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/config/trace/v3/zipkin.proto package tracev3 @@ -10,9 +10,9 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/go-control-plane/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -107,7 +107,7 @@ type ZipkinConfig struct { TraceId_128Bit bool `protobuf:"varint,3,opt,name=trace_id_128bit,json=traceId128bit,proto3" json:"trace_id_128bit,omitempty"` // Determines whether client and server spans will share the same span context. // The default value is true. - SharedSpanContext *wrappers.BoolValue `protobuf:"bytes,4,opt,name=shared_span_context,json=sharedSpanContext,proto3" json:"shared_span_context,omitempty"` + SharedSpanContext *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=shared_span_context,json=sharedSpanContext,proto3" json:"shared_span_context,omitempty"` // Determines the selected collector endpoint version. CollectorEndpointVersion ZipkinConfig_CollectorEndpointVersion `protobuf:"varint,5,opt,name=collector_endpoint_version,json=collectorEndpointVersion,proto3,enum=envoy.config.trace.v3.ZipkinConfig_CollectorEndpointVersion" json:"collector_endpoint_version,omitempty"` // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors @@ -126,6 +126,12 @@ type ZipkinConfig struct { // If this is set to true, then the // :ref:`start_child_span of router ` // SHOULD be set to true also to ensure the correctness of trace chain. + // + // Both this field and ``start_child_span`` are deprecated by the + // :ref:`spawn_upstream_span `. + // Please use that ``spawn_upstream_span`` field to control the span creation. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/zipkin.proto. SplitSpansForRequest bool `protobuf:"varint,7,opt,name=split_spans_for_request,json=splitSpansForRequest,proto3" json:"split_spans_for_request,omitempty"` } @@ -182,7 +188,7 @@ func (x *ZipkinConfig) GetTraceId_128Bit() bool { return false } -func (x *ZipkinConfig) GetSharedSpanContext() *wrappers.BoolValue { +func (x *ZipkinConfig) GetSharedSpanContext() *wrapperspb.BoolValue { if x != nil { return x.SharedSpanContext } @@ -203,6 +209,7 @@ func (x *ZipkinConfig) GetCollectorHostname() string { return "" } +// Deprecated: Marked as deprecated in envoy/config/trace/v3/zipkin.proto. func (x *ZipkinConfig) GetSplitSpansForRequest() bool { if x != nil { return x.SplitSpansForRequest @@ -228,7 +235,7 @@ var file_envoy_config_trace_v3_zipkin_proto_rawDesc = []byte{ 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf7, 0x04, 0x0a, 0x0c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x05, 0x0a, 0x0c, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, @@ -254,33 +261,33 @@ var file_envoy_config_trace_v3_zipkin_proto_rawDesc = []byte{ 0x69, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x70, 0x6c, + 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x17, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x70, 0x6c, 0x69, - 0x74, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x78, 0x0a, 0x18, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x25, - 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x55, - 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, - 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x1a, 0x08, 0xa8, 0xf7, 0xb4, 0x8b, 0x02, 0x01, 0x08, - 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x01, - 0x12, 0x0e, 0x0a, 0x0a, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x10, 0x02, - 0x12, 0x08, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x10, 0x03, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, - 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb1, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x29, 0x12, 0x27, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x2e, - 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, - 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, - 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, + 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x53, 0x70, + 0x61, 0x6e, 0x73, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x78, 0x0a, + 0x18, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x25, 0x44, 0x45, 0x50, + 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x41, 0x56, + 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, + 0x53, 0x45, 0x10, 0x00, 0x1a, 0x08, 0xa8, 0xf7, 0xb4, 0x8b, 0x02, 0x01, 0x08, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0e, 0x0a, + 0x0a, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x10, 0x02, 0x12, 0x08, 0x0a, + 0x04, 0x47, 0x52, 0x50, 0x43, 0x10, 0x03, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x42, 0xb1, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x29, 0x12, 0x27, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x2e, 0x76, 0x34, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x33, 0x42, 0x0b, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -300,7 +307,7 @@ var file_envoy_config_trace_v3_zipkin_proto_msgTypes = make([]protoimpl.MessageI var file_envoy_config_trace_v3_zipkin_proto_goTypes = []interface{}{ (ZipkinConfig_CollectorEndpointVersion)(0), // 0: envoy.config.trace.v3.ZipkinConfig.CollectorEndpointVersion (*ZipkinConfig)(nil), // 1: envoy.config.trace.v3.ZipkinConfig - (*wrappers.BoolValue)(nil), // 2: google.protobuf.BoolValue + (*wrapperspb.BoolValue)(nil), // 2: google.protobuf.BoolValue } var file_envoy_config_trace_v3_zipkin_proto_depIdxs = []int32{ 2, // 0: envoy.config.trace.v3.ZipkinConfig.shared_span_context:type_name -> google.protobuf.BoolValue diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go index 52834f4ed1e..d2db7385ac8 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/config/trace/v3/zipkin.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin_vtproto.pb.go new file mode 100644 index 00000000000..2dc450502f0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin_vtproto.pb.go @@ -0,0 +1,144 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/zipkin.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ZipkinConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ZipkinConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ZipkinConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SplitSpansForRequest { + i-- + if m.SplitSpansForRequest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.CollectorHostname) > 0 { + i -= len(m.CollectorHostname) + copy(dAtA[i:], m.CollectorHostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorHostname))) + i-- + dAtA[i] = 0x32 + } + if m.CollectorEndpointVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CollectorEndpointVersion)) + i-- + dAtA[i] = 0x28 + } + if m.SharedSpanContext != nil { + size, err := (*wrapperspb.BoolValue)(m.SharedSpanContext).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.TraceId_128Bit { + i-- + if m.TraceId_128Bit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.CollectorEndpoint) > 0 { + i -= len(m.CollectorEndpoint) + copy(dAtA[i:], m.CollectorEndpoint) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorEndpoint))) + i-- + dAtA[i] = 0x12 + } + if len(m.CollectorCluster) > 0 { + i -= len(m.CollectorCluster) + copy(dAtA[i:], m.CollectorCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorCluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ZipkinConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CollectorCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CollectorEndpoint) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TraceId_128Bit { + n += 2 + } + if m.SharedSpanContext != nil { + l = (*wrapperspb.BoolValue)(m.SharedSpanContext).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CollectorEndpointVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CollectorEndpointVersion)) + } + l = len(m.CollectorHostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SplitSpansForRequest { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go index 8cbac9f9e51..eb9a42f3d7d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/data/accesslog/v3/accesslog.proto package accesslogv3 @@ -11,12 +11,12 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -479,38 +479,38 @@ type AccessLogCommon struct { TlsProperties *TLSProperties `protobuf:"bytes,4,opt,name=tls_properties,json=tlsProperties,proto3" json:"tls_properties,omitempty"` // The time that Envoy started servicing this request. This is effectively the time that the first // downstream byte is received. - StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // Interval between the first downstream byte received and the last // downstream byte received (i.e. time it takes to receive a request). - TimeToLastRxByte *duration.Duration `protobuf:"bytes,6,opt,name=time_to_last_rx_byte,json=timeToLastRxByte,proto3" json:"time_to_last_rx_byte,omitempty"` + TimeToLastRxByte *durationpb.Duration `protobuf:"bytes,6,opt,name=time_to_last_rx_byte,json=timeToLastRxByte,proto3" json:"time_to_last_rx_byte,omitempty"` // Interval between the first downstream byte received and the first upstream byte sent. There may // by considerable delta between “time_to_last_rx_byte“ and this value due to filters. // Additionally, the same caveats apply as documented in “time_to_last_downstream_tx_byte“ about // not accounting for kernel socket buffer time, etc. - TimeToFirstUpstreamTxByte *duration.Duration `protobuf:"bytes,7,opt,name=time_to_first_upstream_tx_byte,json=timeToFirstUpstreamTxByte,proto3" json:"time_to_first_upstream_tx_byte,omitempty"` + TimeToFirstUpstreamTxByte *durationpb.Duration `protobuf:"bytes,7,opt,name=time_to_first_upstream_tx_byte,json=timeToFirstUpstreamTxByte,proto3" json:"time_to_first_upstream_tx_byte,omitempty"` // Interval between the first downstream byte received and the last upstream byte sent. There may // by considerable delta between “time_to_last_rx_byte“ and this value due to filters. // Additionally, the same caveats apply as documented in “time_to_last_downstream_tx_byte“ about // not accounting for kernel socket buffer time, etc. - TimeToLastUpstreamTxByte *duration.Duration `protobuf:"bytes,8,opt,name=time_to_last_upstream_tx_byte,json=timeToLastUpstreamTxByte,proto3" json:"time_to_last_upstream_tx_byte,omitempty"` + TimeToLastUpstreamTxByte *durationpb.Duration `protobuf:"bytes,8,opt,name=time_to_last_upstream_tx_byte,json=timeToLastUpstreamTxByte,proto3" json:"time_to_last_upstream_tx_byte,omitempty"` // Interval between the first downstream byte received and the first upstream // byte received (i.e. time it takes to start receiving a response). - TimeToFirstUpstreamRxByte *duration.Duration `protobuf:"bytes,9,opt,name=time_to_first_upstream_rx_byte,json=timeToFirstUpstreamRxByte,proto3" json:"time_to_first_upstream_rx_byte,omitempty"` + TimeToFirstUpstreamRxByte *durationpb.Duration `protobuf:"bytes,9,opt,name=time_to_first_upstream_rx_byte,json=timeToFirstUpstreamRxByte,proto3" json:"time_to_first_upstream_rx_byte,omitempty"` // Interval between the first downstream byte received and the last upstream // byte received (i.e. time it takes to receive a complete response). - TimeToLastUpstreamRxByte *duration.Duration `protobuf:"bytes,10,opt,name=time_to_last_upstream_rx_byte,json=timeToLastUpstreamRxByte,proto3" json:"time_to_last_upstream_rx_byte,omitempty"` + TimeToLastUpstreamRxByte *durationpb.Duration `protobuf:"bytes,10,opt,name=time_to_last_upstream_rx_byte,json=timeToLastUpstreamRxByte,proto3" json:"time_to_last_upstream_rx_byte,omitempty"` // Interval between the first downstream byte received and the first downstream byte sent. // There may be a considerable delta between the “time_to_first_upstream_rx_byte“ and this field // due to filters. Additionally, the same caveats apply as documented in // “time_to_last_downstream_tx_byte“ about not accounting for kernel socket buffer time, etc. - TimeToFirstDownstreamTxByte *duration.Duration `protobuf:"bytes,11,opt,name=time_to_first_downstream_tx_byte,json=timeToFirstDownstreamTxByte,proto3" json:"time_to_first_downstream_tx_byte,omitempty"` + TimeToFirstDownstreamTxByte *durationpb.Duration `protobuf:"bytes,11,opt,name=time_to_first_downstream_tx_byte,json=timeToFirstDownstreamTxByte,proto3" json:"time_to_first_downstream_tx_byte,omitempty"` // Interval between the first downstream byte received and the last downstream byte sent. // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta // between “time_to_last_upstream_rx_byte“ and this field. Note also that this is an approximate // time. In the current implementation it does not include kernel socket buffer time. In the // current implementation it also does not include send window buffering inside the HTTP/2 codec. // In the future it is likely that work will be done to make this duration more accurate. - TimeToLastDownstreamTxByte *duration.Duration `protobuf:"bytes,12,opt,name=time_to_last_downstream_tx_byte,json=timeToLastDownstreamTxByte,proto3" json:"time_to_last_downstream_tx_byte,omitempty"` + TimeToLastDownstreamTxByte *durationpb.Duration `protobuf:"bytes,12,opt,name=time_to_last_downstream_tx_byte,json=timeToLastDownstreamTxByte,proto3" json:"time_to_last_downstream_tx_byte,omitempty"` // The upstream remote/destination address that handles this exchange. This does not include // retries. UpstreamRemoteAddress *v3.Address `protobuf:"bytes,13,opt,name=upstream_remote_address,json=upstreamRemoteAddress,proto3" json:"upstream_remote_address,omitempty"` @@ -543,7 +543,7 @@ type AccessLogCommon struct { // Map of filter state in stream info that have been configured to be logged. If the filter // state serialized to any message other than “google.protobuf.Any“ it will be packed into // “google.protobuf.Any“. - FilterStateObjects map[string]*any1.Any `protobuf:"bytes,21,rep,name=filter_state_objects,json=filterStateObjects,proto3" json:"filter_state_objects,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + FilterStateObjects map[string]*anypb.Any `protobuf:"bytes,21,rep,name=filter_state_objects,json=filterStateObjects,proto3" json:"filter_state_objects,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // A list of custom tags, which annotate logs with additional information. // To configure this value, users should configure // :ref:`custom_tags `. @@ -552,7 +552,7 @@ type AccessLogCommon struct { // For TCP: Total duration in milliseconds of the downstream connection. // This is the total duration of the request (i.e., when the request's ActiveStream is destroyed) // and may be longer than “time_to_last_downstream_tx_byte“. - Duration *duration.Duration `protobuf:"bytes,23,opt,name=duration,proto3" json:"duration,omitempty"` + Duration *durationpb.Duration `protobuf:"bytes,23,opt,name=duration,proto3" json:"duration,omitempty"` // For HTTP: Number of times the request is attempted upstream. Note that the field is omitted when the request was never attempted upstream. // For TCP: Number of times the connection request is attempted upstream. Note that the field is omitted when the connect request was never attempted upstream. UpstreamRequestAttemptCount uint32 `protobuf:"varint,24,opt,name=upstream_request_attempt_count,json=upstreamRequestAttemptCount,proto3" json:"upstream_request_attempt_count,omitempty"` @@ -661,56 +661,56 @@ func (x *AccessLogCommon) GetTlsProperties() *TLSProperties { return nil } -func (x *AccessLogCommon) GetStartTime() *timestamp.Timestamp { +func (x *AccessLogCommon) GetStartTime() *timestamppb.Timestamp { if x != nil { return x.StartTime } return nil } -func (x *AccessLogCommon) GetTimeToLastRxByte() *duration.Duration { +func (x *AccessLogCommon) GetTimeToLastRxByte() *durationpb.Duration { if x != nil { return x.TimeToLastRxByte } return nil } -func (x *AccessLogCommon) GetTimeToFirstUpstreamTxByte() *duration.Duration { +func (x *AccessLogCommon) GetTimeToFirstUpstreamTxByte() *durationpb.Duration { if x != nil { return x.TimeToFirstUpstreamTxByte } return nil } -func (x *AccessLogCommon) GetTimeToLastUpstreamTxByte() *duration.Duration { +func (x *AccessLogCommon) GetTimeToLastUpstreamTxByte() *durationpb.Duration { if x != nil { return x.TimeToLastUpstreamTxByte } return nil } -func (x *AccessLogCommon) GetTimeToFirstUpstreamRxByte() *duration.Duration { +func (x *AccessLogCommon) GetTimeToFirstUpstreamRxByte() *durationpb.Duration { if x != nil { return x.TimeToFirstUpstreamRxByte } return nil } -func (x *AccessLogCommon) GetTimeToLastUpstreamRxByte() *duration.Duration { +func (x *AccessLogCommon) GetTimeToLastUpstreamRxByte() *durationpb.Duration { if x != nil { return x.TimeToLastUpstreamRxByte } return nil } -func (x *AccessLogCommon) GetTimeToFirstDownstreamTxByte() *duration.Duration { +func (x *AccessLogCommon) GetTimeToFirstDownstreamTxByte() *durationpb.Duration { if x != nil { return x.TimeToFirstDownstreamTxByte } return nil } -func (x *AccessLogCommon) GetTimeToLastDownstreamTxByte() *duration.Duration { +func (x *AccessLogCommon) GetTimeToLastDownstreamTxByte() *durationpb.Duration { if x != nil { return x.TimeToLastDownstreamTxByte } @@ -773,7 +773,7 @@ func (x *AccessLogCommon) GetDownstreamDirectRemoteAddress() *v3.Address { return nil } -func (x *AccessLogCommon) GetFilterStateObjects() map[string]*any1.Any { +func (x *AccessLogCommon) GetFilterStateObjects() map[string]*anypb.Any { if x != nil { return x.FilterStateObjects } @@ -787,7 +787,7 @@ func (x *AccessLogCommon) GetCustomTags() map[string]string { return nil } -func (x *AccessLogCommon) GetDuration() *duration.Duration { +func (x *AccessLogCommon) GetDuration() *durationpb.Duration { if x != nil { return x.Duration } @@ -866,7 +866,7 @@ func (x *AccessLogCommon) GetAccessLogType() AccessLogType { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 28] +// [#next-free-field: 29] type ResponseFlags struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -927,6 +927,8 @@ type ResponseFlags struct { OverloadManager bool `protobuf:"varint,26,opt,name=overload_manager,json=overloadManager,proto3" json:"overload_manager,omitempty"` // Indicates a DNS resolution failed. DnsResolutionFailure bool `protobuf:"varint,27,opt,name=dns_resolution_failure,json=dnsResolutionFailure,proto3" json:"dns_resolution_failure,omitempty"` + // Indicates a downstream remote codec level reset was received on the stream + DownstreamRemoteReset bool `protobuf:"varint,28,opt,name=downstream_remote_reset,json=downstreamRemoteReset,proto3" json:"downstream_remote_reset,omitempty"` } func (x *ResponseFlags) Reset() { @@ -1150,6 +1152,13 @@ func (x *ResponseFlags) GetDnsResolutionFailure() bool { return false } +func (x *ResponseFlags) GetDownstreamRemoteReset() bool { + if x != nil { + return x.DownstreamRemoteReset + } + return false +} + // Properties of a negotiated TLS connection. // [#next-free-field: 8] type TLSProperties struct { @@ -1164,7 +1173,7 @@ type TLSProperties struct { // (e.g. “009C“ for “TLS_RSA_WITH_AES_128_GCM_SHA256“). // // Here it is expressed as an integer. - TlsCipherSuite *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=tls_cipher_suite,json=tlsCipherSuite,proto3" json:"tls_cipher_suite,omitempty"` + TlsCipherSuite *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=tls_cipher_suite,json=tlsCipherSuite,proto3" json:"tls_cipher_suite,omitempty"` // SNI hostname from handshake. TlsSniHostname string `protobuf:"bytes,3,opt,name=tls_sni_hostname,json=tlsSniHostname,proto3" json:"tls_sni_hostname,omitempty"` // Properties of the local certificate used to negotiate TLS. @@ -1216,7 +1225,7 @@ func (x *TLSProperties) GetTlsVersion() TLSProperties_TLSVersion { return TLSProperties_VERSION_UNSPECIFIED } -func (x *TLSProperties) GetTlsCipherSuite() *wrappers.UInt32Value { +func (x *TLSProperties) GetTlsCipherSuite() *wrapperspb.UInt32Value { if x != nil { return x.TlsCipherSuite } @@ -1272,7 +1281,7 @@ type HTTPRequestProperties struct { Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` // The port of the incoming request URI // (unused currently, as port is composed onto authority). - Port *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=port,proto3" json:"port,omitempty"` + Port *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=port,proto3" json:"port,omitempty"` // The path portion from the incoming request URI. Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` // Value of the “User-Agent“ request header. @@ -1362,7 +1371,7 @@ func (x *HTTPRequestProperties) GetAuthority() string { return "" } -func (x *HTTPRequestProperties) GetPort() *wrappers.UInt32Value { +func (x *HTTPRequestProperties) GetPort() *wrapperspb.UInt32Value { if x != nil { return x.Port } @@ -1453,7 +1462,7 @@ type HTTPResponseProperties struct { unknownFields protoimpl.UnknownFields // The HTTP response code returned by Envoy. - ResponseCode *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + ResponseCode *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` // Size of the HTTP response headers in bytes. // // This value is captured from the OSI layer 7 perspective, i.e. it does not @@ -1508,7 +1517,7 @@ func (*HTTPResponseProperties) Descriptor() ([]byte, []int) { return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{7} } -func (x *HTTPResponseProperties) GetResponseCode() *wrappers.UInt32Value { +func (x *HTTPResponseProperties) GetResponseCode() *wrapperspb.UInt32Value { if x != nil { return x.ResponseCode } @@ -1620,6 +1629,8 @@ type TLSProperties_CertificateProperties struct { SubjectAltName []*TLSProperties_CertificateProperties_SubjectAltName `protobuf:"bytes,1,rep,name=subject_alt_name,json=subjectAltName,proto3" json:"subject_alt_name,omitempty"` // The subject field of the certificate. Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + // The issuer field of the certificate. + Issuer string `protobuf:"bytes,3,opt,name=issuer,proto3" json:"issuer,omitempty"` } func (x *TLSProperties_CertificateProperties) Reset() { @@ -1668,6 +1679,13 @@ func (x *TLSProperties_CertificateProperties) GetSubject() string { return "" } +func (x *TLSProperties_CertificateProperties) GetIssuer() string { + if x != nil { + return x.Issuer + } + return "" +} + type TLSProperties_CertificateProperties_SubjectAltName struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2005,7 +2023,7 @@ var file_envoy_data_accesslog_v3_accesslog_proto_rawDesc = []byte{ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xe9, 0x0d, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xa1, 0x0e, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x66, 0x61, 0x69, 0x6c, @@ -2099,69 +2117,74 @@ var file_envoy_data_accesslog_v3_accesslog_proto_rawDesc = []byte{ 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x1a, 0xd5, 0x01, 0x0a, - 0x0c, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x12, 0x52, 0x0a, - 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x46, 0x6c, 0x61, 0x67, 0x73, 0x2e, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x65, 0x64, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, - 0x6e, 0x22, 0x36, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x52, - 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x01, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, - 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x2e, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x65, 0x64, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, - 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, - 0x67, 0x73, 0x22, 0xad, 0x08, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, - 0x73, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, - 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, - 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, - 0x12, 0x28, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x73, 0x6e, 0x69, 0x5f, 0x68, 0x6f, 0x73, 0x74, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x53, - 0x6e, 0x69, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x7e, 0x0a, 0x1c, 0x6c, 0x6f, - 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x1a, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x1b, 0x70, 0x65, - 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x19, 0x70, - 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6c, 0x73, 0x5f, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x74, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x27, - 0x0a, 0x0f, 0x6a, 0x61, 0x33, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, - 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6a, 0x61, 0x33, 0x46, 0x69, 0x6e, 0x67, - 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x1a, 0x81, 0x03, 0x0a, 0x15, 0x43, 0x65, 0x72, 0x74, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x36, 0x0a, 0x17, + 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x65, 0x74, 0x1a, 0xd5, 0x01, 0x0a, 0x0c, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x65, 0x64, 0x12, 0x52, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x2e, 0x55, 0x6e, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x36, 0x0a, 0x06, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x45, + 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, + 0x01, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x2e, + 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x3a, 0x2c, 0x9a, 0xc5, + 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x22, 0xc5, 0x08, 0x0a, 0x0d, 0x54, + 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0b, + 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x46, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, + 0x73, 0x6e, 0x69, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x53, 0x6e, 0x69, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x7e, 0x0a, 0x1c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x1a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x1b, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x19, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6c, 0x73, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6c, 0x73, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x6a, 0x61, 0x33, 0x5f, 0x66, 0x69, + 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x6a, 0x61, 0x33, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x1a, + 0x99, 0x03, 0x0a, 0x15, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x10, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, + 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, - 0x73, 0x12, 0x75, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, - 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x1a, 0x92, 0x01, 0x0a, 0x0e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, + 0x73, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x52, 0x0e, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, + 0x73, 0x75, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, + 0x65, 0x72, 0x1a, 0x92, 0x01, 0x0a, 0x0e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x12, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x3a, 0x51, 0x9a, @@ -2352,16 +2375,16 @@ var file_envoy_data_accesslog_v3_accesslog_proto_goTypes = []interface{}{ (*ResponseFlags_Unauthorized)(nil), // 14: envoy.data.accesslog.v3.ResponseFlags.Unauthorized (*TLSProperties_CertificateProperties)(nil), // 15: envoy.data.accesslog.v3.TLSProperties.CertificateProperties (*TLSProperties_CertificateProperties_SubjectAltName)(nil), // 16: envoy.data.accesslog.v3.TLSProperties.CertificateProperties.SubjectAltName - nil, // 17: envoy.data.accesslog.v3.HTTPRequestProperties.RequestHeadersEntry - nil, // 18: envoy.data.accesslog.v3.HTTPResponseProperties.ResponseHeadersEntry - nil, // 19: envoy.data.accesslog.v3.HTTPResponseProperties.ResponseTrailersEntry - (*v3.Address)(nil), // 20: envoy.config.core.v3.Address - (*timestamp.Timestamp)(nil), // 21: google.protobuf.Timestamp - (*duration.Duration)(nil), // 22: google.protobuf.Duration - (*v3.Metadata)(nil), // 23: envoy.config.core.v3.Metadata - (*wrappers.UInt32Value)(nil), // 24: google.protobuf.UInt32Value - (v3.RequestMethod)(0), // 25: envoy.config.core.v3.RequestMethod - (*any1.Any)(nil), // 26: google.protobuf.Any + nil, // 17: envoy.data.accesslog.v3.HTTPRequestProperties.RequestHeadersEntry + nil, // 18: envoy.data.accesslog.v3.HTTPResponseProperties.ResponseHeadersEntry + nil, // 19: envoy.data.accesslog.v3.HTTPResponseProperties.ResponseTrailersEntry + (*v3.Address)(nil), // 20: envoy.config.core.v3.Address + (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 22: google.protobuf.Duration + (*v3.Metadata)(nil), // 23: envoy.config.core.v3.Metadata + (*wrapperspb.UInt32Value)(nil), // 24: google.protobuf.UInt32Value + (v3.RequestMethod)(0), // 25: envoy.config.core.v3.RequestMethod + (*anypb.Any)(nil), // 26: google.protobuf.Any } var file_envoy_data_accesslog_v3_accesslog_proto_depIdxs = []int32{ 7, // 0: envoy.data.accesslog.v3.TCPAccessLogEntry.common_properties:type_name -> envoy.data.accesslog.v3.AccessLogCommon diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go index 42ca35d399e..7a0fec615d2 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/data/accesslog/v3/accesslog.proto @@ -1277,6 +1278,8 @@ func (m *ResponseFlags) validate(all bool) error { // no validation rules for DnsResolutionFailure + // no validation rules for DownstreamRemoteReset + if len(errors) > 0 { return ResponseFlagsMultiError(errors) } @@ -2026,6 +2029,8 @@ func (m *TLSProperties_CertificateProperties) validate(all bool) error { // no validation rules for Subject + // no validation rules for Issuer + if len(errors) > 0 { return TLSProperties_CertificatePropertiesMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog_vtproto.pb.go new file mode 100644 index 00000000000..c37ec091596 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog_vtproto.pb.go @@ -0,0 +1,2040 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/data/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TCPAccessLogEntry) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TCPAccessLogEntry) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TCPAccessLogEntry) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectionProperties != nil { + size, err := m.ConnectionProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.CommonProperties != nil { + size, err := m.CommonProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HTTPAccessLogEntry) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPAccessLogEntry) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPAccessLogEntry) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Response != nil { + size, err := m.Response.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ProtocolVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ProtocolVersion)) + i-- + dAtA[i] = 0x10 + } + if m.CommonProperties != nil { + size, err := m.CommonProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConnectionProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConnectionProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SentBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SentBytes)) + i-- + dAtA[i] = 0x10 + } + if m.ReceivedBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ReceivedBytes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AccessLogCommon) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccessLogCommon) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogCommon) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AccessLogType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AccessLogType)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x88 + } + if m.UpstreamWireBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamWireBytesReceived)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if m.UpstreamWireBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamWireBytesSent)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.DownstreamWireBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamWireBytesReceived)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.DownstreamWireBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamWireBytesSent)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + } + if len(m.DownstreamTransportFailureReason) > 0 { + i -= len(m.DownstreamTransportFailureReason) + copy(dAtA[i:], m.DownstreamTransportFailureReason) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DownstreamTransportFailureReason))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.IntermediateLogEntry { + i-- + if m.IntermediateLogEntry { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if len(m.StreamId) > 0 { + i -= len(m.StreamId) + copy(dAtA[i:], m.StreamId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StreamId))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if len(m.ConnectionTerminationDetails) > 0 { + i -= len(m.ConnectionTerminationDetails) + copy(dAtA[i:], m.ConnectionTerminationDetails) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConnectionTerminationDetails))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.UpstreamRequestAttemptCount != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamRequestAttemptCount)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.Duration != nil { + size, err := (*durationpb.Duration)(m.Duration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.CustomTags) > 0 { + for k := range m.CustomTags { + v := m.CustomTags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if len(m.FilterStateObjects) > 0 { + for k := range m.FilterStateObjects { + v := m.FilterStateObjects[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if m.DownstreamDirectRemoteAddress != nil { + if vtmsg, ok := interface{}(m.DownstreamDirectRemoteAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamDirectRemoteAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if len(m.RouteName) > 0 { + i -= len(m.RouteName) + copy(dAtA[i:], m.RouteName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RouteName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.UpstreamTransportFailureReason) > 0 { + i -= len(m.UpstreamTransportFailureReason) + copy(dAtA[i:], m.UpstreamTransportFailureReason) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpstreamTransportFailureReason))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.ResponseFlags != nil { + size, err := m.ResponseFlags.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.UpstreamCluster) > 0 { + i -= len(m.UpstreamCluster) + copy(dAtA[i:], m.UpstreamCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpstreamCluster))) + i-- + dAtA[i] = 0x7a + } + if m.UpstreamLocalAddress != nil { + if vtmsg, ok := interface{}(m.UpstreamLocalAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamLocalAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.UpstreamRemoteAddress != nil { + if vtmsg, ok := interface{}(m.UpstreamRemoteAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamRemoteAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if m.TimeToLastDownstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastDownstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.TimeToFirstDownstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToFirstDownstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.TimeToLastUpstreamRxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastUpstreamRxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.TimeToFirstUpstreamRxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToFirstUpstreamRxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.TimeToLastUpstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastUpstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.TimeToFirstUpstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToFirstUpstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.TimeToLastRxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastRxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.StartTime != nil { + size, err := (*timestamppb.Timestamp)(m.StartTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.TlsProperties != nil { + size, err := m.TlsProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.DownstreamLocalAddress != nil { + if vtmsg, ok := interface{}(m.DownstreamLocalAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamLocalAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.DownstreamRemoteAddress != nil { + if vtmsg, ok := interface{}(m.DownstreamRemoteAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamRemoteAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SampleRate != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleRate)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *ResponseFlags_Unauthorized) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFlags_Unauthorized) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseFlags_Unauthorized) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Reason != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Reason)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseFlags) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFlags) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseFlags) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DownstreamRemoteReset { + i-- + if m.DownstreamRemoteReset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + if m.DnsResolutionFailure { + i-- + if m.DnsResolutionFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.OverloadManager { + i-- + if m.OverloadManager { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.NoClusterFound { + i-- + if m.NoClusterFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + if m.UpstreamProtocolError { + i-- + if m.UpstreamProtocolError { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.DurationTimeout { + i-- + if m.DurationTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.NoFilterConfigFound { + i-- + if m.NoFilterConfigFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.ResponseFromCacheFilter { + i-- + if m.ResponseFromCacheFilter { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.UpstreamMaxStreamDurationReached { + i-- + if m.UpstreamMaxStreamDurationReached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.DownstreamProtocolError { + i-- + if m.DownstreamProtocolError { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.InvalidEnvoyRequestHeaders { + i-- + if m.InvalidEnvoyRequestHeaders { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if m.StreamIdleTimeout { + i-- + if m.StreamIdleTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.UpstreamRetryLimitExceeded { + i-- + if m.UpstreamRetryLimitExceeded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.DownstreamConnectionTermination { + i-- + if m.DownstreamConnectionTermination { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if m.RateLimitServiceError { + i-- + if m.RateLimitServiceError { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if m.UnauthorizedDetails != nil { + size, err := m.UnauthorizedDetails.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.RateLimited { + i-- + if m.RateLimited { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.FaultInjected { + i-- + if m.FaultInjected { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if m.DelayInjected { + i-- + if m.DelayInjected { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.NoRouteFound { + i-- + if m.NoRouteFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.UpstreamOverflow { + i-- + if m.UpstreamOverflow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.UpstreamConnectionTermination { + i-- + if m.UpstreamConnectionTermination { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.UpstreamConnectionFailure { + i-- + if m.UpstreamConnectionFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.UpstreamRemoteReset { + i-- + if m.UpstreamRemoteReset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.LocalReset { + i-- + if m.LocalReset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.UpstreamRequestTimeout { + i-- + if m.UpstreamRequestTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.NoHealthyUpstream { + i-- + if m.NoHealthyUpstream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.FailedLocalHealthcheck { + i-- + if m.FailedLocalHealthcheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.San.(*TLSProperties_CertificateProperties_SubjectAltName_Dns); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.San.(*TLSProperties_CertificateProperties_SubjectAltName_Uri); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Uri) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Uri) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *TLSProperties_CertificateProperties_SubjectAltName_Dns) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Dns) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Dns) + copy(dAtA[i:], m.Dns) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Dns))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *TLSProperties_CertificateProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSProperties_CertificateProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Issuer) > 0 { + i -= len(m.Issuer) + copy(dAtA[i:], m.Issuer) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Issuer))) + i-- + dAtA[i] = 0x1a + } + if len(m.Subject) > 0 { + i -= len(m.Subject) + copy(dAtA[i:], m.Subject) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subject))) + i-- + dAtA[i] = 0x12 + } + if len(m.SubjectAltName) > 0 { + for iNdEx := len(m.SubjectAltName) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SubjectAltName[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TLSProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Ja3Fingerprint) > 0 { + i -= len(m.Ja3Fingerprint) + copy(dAtA[i:], m.Ja3Fingerprint) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Ja3Fingerprint))) + i-- + dAtA[i] = 0x3a + } + if len(m.TlsSessionId) > 0 { + i -= len(m.TlsSessionId) + copy(dAtA[i:], m.TlsSessionId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TlsSessionId))) + i-- + dAtA[i] = 0x32 + } + if m.PeerCertificateProperties != nil { + size, err := m.PeerCertificateProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.LocalCertificateProperties != nil { + size, err := m.LocalCertificateProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.TlsSniHostname) > 0 { + i -= len(m.TlsSniHostname) + copy(dAtA[i:], m.TlsSniHostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TlsSniHostname))) + i-- + dAtA[i] = 0x1a + } + if m.TlsCipherSuite != nil { + size, err := (*wrapperspb.UInt32Value)(m.TlsCipherSuite).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.TlsVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TlsVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HTTPRequestProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPRequestProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPRequestProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DownstreamHeaderBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamHeaderBytesReceived)) + i-- + dAtA[i] = 0x78 + } + if m.UpstreamHeaderBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamHeaderBytesSent)) + i-- + dAtA[i] = 0x70 + } + if len(m.RequestHeaders) > 0 { + for k := range m.RequestHeaders { + v := m.RequestHeaders[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if m.RequestBodyBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestBodyBytes)) + i-- + dAtA[i] = 0x60 + } + if m.RequestHeadersBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestHeadersBytes)) + i-- + dAtA[i] = 0x58 + } + if len(m.OriginalPath) > 0 { + i -= len(m.OriginalPath) + copy(dAtA[i:], m.OriginalPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OriginalPath))) + i-- + dAtA[i] = 0x52 + } + if len(m.RequestId) > 0 { + i -= len(m.RequestId) + copy(dAtA[i:], m.RequestId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestId))) + i-- + dAtA[i] = 0x4a + } + if len(m.ForwardedFor) > 0 { + i -= len(m.ForwardedFor) + copy(dAtA[i:], m.ForwardedFor) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ForwardedFor))) + i-- + dAtA[i] = 0x42 + } + if len(m.Referer) > 0 { + i -= len(m.Referer) + copy(dAtA[i:], m.Referer) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Referer))) + i-- + dAtA[i] = 0x3a + } + if len(m.UserAgent) > 0 { + i -= len(m.UserAgent) + copy(dAtA[i:], m.UserAgent) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UserAgent))) + i-- + dAtA[i] = 0x32 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x2a + } + if m.Port != nil { + size, err := (*wrapperspb.UInt32Value)(m.Port).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0x1a + } + if len(m.Scheme) > 0 { + i -= len(m.Scheme) + copy(dAtA[i:], m.Scheme) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Scheme))) + i-- + dAtA[i] = 0x12 + } + if m.RequestMethod != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestMethod)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HTTPResponseProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPResponseProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPResponseProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DownstreamHeaderBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamHeaderBytesSent)) + i-- + dAtA[i] = 0x40 + } + if m.UpstreamHeaderBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamHeaderBytesReceived)) + i-- + dAtA[i] = 0x38 + } + if len(m.ResponseCodeDetails) > 0 { + i -= len(m.ResponseCodeDetails) + copy(dAtA[i:], m.ResponseCodeDetails) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseCodeDetails))) + i-- + dAtA[i] = 0x32 + } + if len(m.ResponseTrailers) > 0 { + for k := range m.ResponseTrailers { + v := m.ResponseTrailers[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ResponseHeaders) > 0 { + for k := range m.ResponseHeaders { + v := m.ResponseHeaders[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.ResponseBodyBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResponseBodyBytes)) + i-- + dAtA[i] = 0x18 + } + if m.ResponseHeadersBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResponseHeadersBytes)) + i-- + dAtA[i] = 0x10 + } + if m.ResponseCode != nil { + size, err := (*wrapperspb.UInt32Value)(m.ResponseCode).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TCPAccessLogEntry) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonProperties != nil { + l = m.CommonProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionProperties != nil { + l = m.ConnectionProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HTTPAccessLogEntry) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonProperties != nil { + l = m.CommonProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProtocolVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ProtocolVersion)) + } + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Response != nil { + l = m.Response.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConnectionProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ReceivedBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ReceivedBytes)) + } + if m.SentBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SentBytes)) + } + n += len(m.unknownFields) + return n +} + +func (m *AccessLogCommon) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SampleRate != 0 { + n += 9 + } + if m.DownstreamRemoteAddress != nil { + if size, ok := interface{}(m.DownstreamRemoteAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamRemoteAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DownstreamLocalAddress != nil { + if size, ok := interface{}(m.DownstreamLocalAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamLocalAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsProperties != nil { + l = m.TlsProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StartTime != nil { + l = (*timestamppb.Timestamp)(m.StartTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastRxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastRxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToFirstUpstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToFirstUpstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastUpstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastUpstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToFirstUpstreamRxByte != nil { + l = (*durationpb.Duration)(m.TimeToFirstUpstreamRxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastUpstreamRxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastUpstreamRxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToFirstDownstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToFirstDownstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastDownstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastDownstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamRemoteAddress != nil { + if size, ok := interface{}(m.UpstreamRemoteAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamRemoteAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamLocalAddress != nil { + if size, ok := interface{}(m.UpstreamLocalAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamLocalAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UpstreamCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResponseFlags != nil { + l = m.ResponseFlags.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UpstreamTransportFailureReason) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RouteName) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DownstreamDirectRemoteAddress != nil { + if size, ok := interface{}(m.DownstreamDirectRemoteAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamDirectRemoteAddress) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.FilterStateObjects) > 0 { + for k, v := range m.FilterStateObjects { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.CustomTags) > 0 { + for k, v := range m.CustomTags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.Duration != nil { + l = (*durationpb.Duration)(m.Duration).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamRequestAttemptCount != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.UpstreamRequestAttemptCount)) + } + l = len(m.ConnectionTerminationDetails) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.StreamId) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntermediateLogEntry { + n += 3 + } + l = len(m.DownstreamTransportFailureReason) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DownstreamWireBytesSent != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DownstreamWireBytesSent)) + } + if m.DownstreamWireBytesReceived != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DownstreamWireBytesReceived)) + } + if m.UpstreamWireBytesSent != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.UpstreamWireBytesSent)) + } + if m.UpstreamWireBytesReceived != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.UpstreamWireBytesReceived)) + } + if m.AccessLogType != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.AccessLogType)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseFlags_Unauthorized) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Reason != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Reason)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseFlags) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FailedLocalHealthcheck { + n += 2 + } + if m.NoHealthyUpstream { + n += 2 + } + if m.UpstreamRequestTimeout { + n += 2 + } + if m.LocalReset { + n += 2 + } + if m.UpstreamRemoteReset { + n += 2 + } + if m.UpstreamConnectionFailure { + n += 2 + } + if m.UpstreamConnectionTermination { + n += 2 + } + if m.UpstreamOverflow { + n += 2 + } + if m.NoRouteFound { + n += 2 + } + if m.DelayInjected { + n += 2 + } + if m.FaultInjected { + n += 2 + } + if m.RateLimited { + n += 2 + } + if m.UnauthorizedDetails != nil { + l = m.UnauthorizedDetails.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RateLimitServiceError { + n += 2 + } + if m.DownstreamConnectionTermination { + n += 2 + } + if m.UpstreamRetryLimitExceeded { + n += 3 + } + if m.StreamIdleTimeout { + n += 3 + } + if m.InvalidEnvoyRequestHeaders { + n += 3 + } + if m.DownstreamProtocolError { + n += 3 + } + if m.UpstreamMaxStreamDurationReached { + n += 3 + } + if m.ResponseFromCacheFilter { + n += 3 + } + if m.NoFilterConfigFound { + n += 3 + } + if m.DurationTimeout { + n += 3 + } + if m.UpstreamProtocolError { + n += 3 + } + if m.NoClusterFound { + n += 3 + } + if m.OverloadManager { + n += 3 + } + if m.DnsResolutionFailure { + n += 3 + } + if m.DownstreamRemoteReset { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.San.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Uri) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uri) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TLSProperties_CertificateProperties_SubjectAltName_Dns) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Dns) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TLSProperties_CertificateProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SubjectAltName) > 0 { + for _, e := range m.SubjectAltName { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.Subject) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Issuer) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TLSProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TlsVersion)) + } + if m.TlsCipherSuite != nil { + l = (*wrapperspb.UInt32Value)(m.TlsCipherSuite).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TlsSniHostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalCertificateProperties != nil { + l = m.LocalCertificateProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PeerCertificateProperties != nil { + l = m.PeerCertificateProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TlsSessionId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Ja3Fingerprint) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HTTPRequestProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestMethod != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestMethod)) + } + l = len(m.Scheme) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Authority) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Port != nil { + l = (*wrapperspb.UInt32Value)(m.Port).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UserAgent) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Referer) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ForwardedFor) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RequestId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.OriginalPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestHeadersBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestHeadersBytes)) + } + if m.RequestBodyBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestBodyBytes)) + } + if len(m.RequestHeaders) > 0 { + for k, v := range m.RequestHeaders { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.UpstreamHeaderBytesSent != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.UpstreamHeaderBytesSent)) + } + if m.DownstreamHeaderBytesReceived != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DownstreamHeaderBytesReceived)) + } + n += len(m.unknownFields) + return n +} + +func (m *HTTPResponseProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResponseCode != nil { + l = (*wrapperspb.UInt32Value)(m.ResponseCode).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResponseHeadersBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResponseHeadersBytes)) + } + if m.ResponseBodyBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResponseBodyBytes)) + } + if len(m.ResponseHeaders) > 0 { + for k, v := range m.ResponseHeaders { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.ResponseTrailers) > 0 { + for k, v := range m.ResponseTrailers { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.ResponseCodeDetails) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamHeaderBytesReceived != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.UpstreamHeaderBytesReceived)) + } + if m.DownstreamHeaderBytesSent != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DownstreamHeaderBytesSent)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go index 3a346bf355b..ed75102d4ea 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/clusters/aggregate/v3/cluster.proto package aggregatev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.go index ce250b6c070..44fb2c71f17 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/clusters/aggregate/v3/cluster.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster_vtproto.pb.go new file mode 100644 index 00000000000..a3f22bf1308 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster_vtproto.pb.go @@ -0,0 +1,77 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/clusters/aggregate/v3/cluster.proto + +package aggregatev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClusterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Clusters[iNdEx]) + copy(dAtA[i:], m.Clusters[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Clusters[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Clusters) > 0 { + for _, s := range m.Clusters { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go index 9fd8f66f4a4..13e47ea8325 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/filters/common/fault/v3/fault.proto package faultv3 @@ -10,9 +10,9 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -124,7 +124,7 @@ func (m *FaultDelay) GetFaultDelaySecifier() isFaultDelay_FaultDelaySecifier { return nil } -func (x *FaultDelay) GetFixedDelay() *duration.Duration { +func (x *FaultDelay) GetFixedDelay() *durationpb.Duration { if x, ok := x.GetFaultDelaySecifier().(*FaultDelay_FixedDelay); ok { return x.FixedDelay } @@ -155,7 +155,7 @@ type FaultDelay_FixedDelay struct { // the JSON/YAML Duration mapping. For HTTP/Mongo, the specified // delay will be injected before a new request/operation. // This is required if type is FIXED. - FixedDelay *duration.Duration `protobuf:"bytes,3,opt,name=fixed_delay,json=fixedDelay,proto3,oneof"` + FixedDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=fixed_delay,json=fixedDelay,proto3,oneof"` } type FaultDelay_HeaderDelay_ struct { @@ -508,7 +508,7 @@ var file_envoy_extensions_filters_common_fault_v3_fault_proto_goTypes = []interf (*FaultDelay_HeaderDelay)(nil), // 3: envoy.extensions.filters.common.fault.v3.FaultDelay.HeaderDelay (*FaultRateLimit_FixedLimit)(nil), // 4: envoy.extensions.filters.common.fault.v3.FaultRateLimit.FixedLimit (*FaultRateLimit_HeaderLimit)(nil), // 5: envoy.extensions.filters.common.fault.v3.FaultRateLimit.HeaderLimit - (*duration.Duration)(nil), // 6: google.protobuf.Duration + (*durationpb.Duration)(nil), // 6: google.protobuf.Duration (*v3.FractionalPercent)(nil), // 7: envoy.type.v3.FractionalPercent } var file_envoy_extensions_filters_common_fault_v3_fault_proto_depIdxs = []int32{ diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go index 06015e74d7e..7387e6d026e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/filters/common/fault/v3/fault.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault_vtproto.pb.go new file mode 100644 index 00000000000..4a462b40ad0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault_vtproto.pb.go @@ -0,0 +1,491 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/common/fault/v3/fault.proto + +package faultv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *FaultDelay_HeaderDelay) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultDelay_HeaderDelay) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay_HeaderDelay) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *FaultDelay) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultDelay) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.FaultDelaySecifier.(*FaultDelay_HeaderDelay_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Percentage != nil { + if vtmsg, ok := interface{}(m.Percentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Percentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.FaultDelaySecifier.(*FaultDelay_FixedDelay); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *FaultDelay_FixedDelay) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay_FixedDelay) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FixedDelay != nil { + size, err := (*durationpb.Duration)(m.FixedDelay).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *FaultDelay_HeaderDelay_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay_HeaderDelay_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderDelay != nil { + size, err := m.HeaderDelay.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *FaultRateLimit_FixedLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultRateLimit_FixedLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_FixedLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LimitKbps != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LimitKbps)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FaultRateLimit_HeaderLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultRateLimit_HeaderLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_HeaderLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *FaultRateLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultRateLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LimitType.(*FaultRateLimit_HeaderLimit_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Percentage != nil { + if vtmsg, ok := interface{}(m.Percentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Percentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if msg, ok := m.LimitType.(*FaultRateLimit_FixedLimit_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *FaultRateLimit_FixedLimit_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_FixedLimit_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FixedLimit != nil { + size, err := m.FixedLimit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *FaultRateLimit_HeaderLimit_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_HeaderLimit_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderLimit != nil { + size, err := m.HeaderLimit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *FaultDelay_HeaderDelay) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FaultDelay) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.FaultDelaySecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Percentage != nil { + if size, ok := interface{}(m.Percentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Percentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultDelay_FixedDelay) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FixedDelay != nil { + l = (*durationpb.Duration)(m.FixedDelay).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultDelay_HeaderDelay_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderDelay != nil { + l = m.HeaderDelay.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultRateLimit_FixedLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LimitKbps != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LimitKbps)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultRateLimit_HeaderLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FaultRateLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.LimitType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Percentage != nil { + if size, ok := interface{}(m.Percentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Percentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultRateLimit_FixedLimit_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FixedLimit != nil { + l = m.FixedLimit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultRateLimit_HeaderLimit_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderLimit != nil { + l = m.HeaderLimit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go index 29569d824dd..cea58cea9d5 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/filters/http/fault/v3/fault.proto package faultv3 @@ -12,10 +12,10 @@ import ( v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - _struct "github.com/golang/protobuf/ptypes/struct" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -182,7 +182,7 @@ type HTTPFault struct { // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy // limit. It's possible for the number of active faults to rise slightly above the configured // amount due to the implementation details. - MaxActiveFaults *wrappers.UInt32Value `protobuf:"bytes,6,opt,name=max_active_faults,json=maxActiveFaults,proto3" json:"max_active_faults,omitempty"` + MaxActiveFaults *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=max_active_faults,json=maxActiveFaults,proto3" json:"max_active_faults,omitempty"` // The response rate limit to be applied to the response body of the stream. When configured, // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent // ` runtime key. @@ -223,7 +223,7 @@ type HTTPFault struct { // This data can be logged as part of Access Logs using the :ref:`command operator // ` %DYNAMIC_METADATA(NAMESPACE)%, where NAMESPACE is the name of // the fault filter. - FilterMetadata *_struct.Struct `protobuf:"bytes,16,opt,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty"` + FilterMetadata *structpb.Struct `protobuf:"bytes,16,opt,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty"` } func (x *HTTPFault) Reset() { @@ -293,7 +293,7 @@ func (x *HTTPFault) GetDownstreamNodes() []string { return nil } -func (x *HTTPFault) GetMaxActiveFaults() *wrappers.UInt32Value { +func (x *HTTPFault) GetMaxActiveFaults() *wrapperspb.UInt32Value { if x != nil { return x.MaxActiveFaults } @@ -363,7 +363,7 @@ func (x *HTTPFault) GetDisableDownstreamClusterStats() bool { return false } -func (x *HTTPFault) GetFilterMetadata() *_struct.Struct { +func (x *HTTPFault) GetFilterMetadata() *structpb.Struct { if x != nil { return x.FilterMetadata } @@ -566,9 +566,9 @@ var file_envoy_extensions_filters_http_fault_v3_fault_proto_goTypes = []interfac (*v3.FractionalPercent)(nil), // 3: envoy.type.v3.FractionalPercent (*v31.FaultDelay)(nil), // 4: envoy.extensions.filters.common.fault.v3.FaultDelay (*v32.HeaderMatcher)(nil), // 5: envoy.config.route.v3.HeaderMatcher - (*wrappers.UInt32Value)(nil), // 6: google.protobuf.UInt32Value + (*wrapperspb.UInt32Value)(nil), // 6: google.protobuf.UInt32Value (*v31.FaultRateLimit)(nil), // 7: envoy.extensions.filters.common.fault.v3.FaultRateLimit - (*_struct.Struct)(nil), // 8: google.protobuf.Struct + (*structpb.Struct)(nil), // 8: google.protobuf.Struct } var file_envoy_extensions_filters_http_fault_v3_fault_proto_depIdxs = []int32{ 2, // 0: envoy.extensions.filters.http.fault.v3.FaultAbort.header_abort:type_name -> envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go index 2521f6fa4cf..f3cc33072a5 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/filters/http/fault/v3/fault.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault_vtproto.pb.go new file mode 100644 index 00000000000..8baeafe14e9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault_vtproto.pb.go @@ -0,0 +1,546 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/http/fault/v3/fault.proto + +package faultv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *FaultAbort_HeaderAbort) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultAbort_HeaderAbort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_HeaderAbort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *FaultAbort) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultAbort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ErrorType.(*FaultAbort_GrpcStatus); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ErrorType.(*FaultAbort_HeaderAbort_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Percentage != nil { + if vtmsg, ok := interface{}(m.Percentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Percentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.ErrorType.(*FaultAbort_HttpStatus); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *FaultAbort_HttpStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_HttpStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HttpStatus)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *FaultAbort_HeaderAbort_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_HeaderAbort_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderAbort != nil { + size, err := m.HeaderAbort.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *FaultAbort_GrpcStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_GrpcStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.GrpcStatus)) + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} +func (m *HTTPFault) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPFault) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPFault) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FilterMetadata != nil { + size, err := (*structpb.Struct)(m.FilterMetadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.DisableDownstreamClusterStats { + i-- + if m.DisableDownstreamClusterStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if len(m.AbortGrpcStatusRuntime) > 0 { + i -= len(m.AbortGrpcStatusRuntime) + copy(dAtA[i:], m.AbortGrpcStatusRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AbortGrpcStatusRuntime))) + i-- + dAtA[i] = 0x72 + } + if len(m.ResponseRateLimitPercentRuntime) > 0 { + i -= len(m.ResponseRateLimitPercentRuntime) + copy(dAtA[i:], m.ResponseRateLimitPercentRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseRateLimitPercentRuntime))) + i-- + dAtA[i] = 0x6a + } + if len(m.MaxActiveFaultsRuntime) > 0 { + i -= len(m.MaxActiveFaultsRuntime) + copy(dAtA[i:], m.MaxActiveFaultsRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MaxActiveFaultsRuntime))) + i-- + dAtA[i] = 0x62 + } + if len(m.AbortHttpStatusRuntime) > 0 { + i -= len(m.AbortHttpStatusRuntime) + copy(dAtA[i:], m.AbortHttpStatusRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AbortHttpStatusRuntime))) + i-- + dAtA[i] = 0x5a + } + if len(m.DelayDurationRuntime) > 0 { + i -= len(m.DelayDurationRuntime) + copy(dAtA[i:], m.DelayDurationRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DelayDurationRuntime))) + i-- + dAtA[i] = 0x52 + } + if len(m.AbortPercentRuntime) > 0 { + i -= len(m.AbortPercentRuntime) + copy(dAtA[i:], m.AbortPercentRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AbortPercentRuntime))) + i-- + dAtA[i] = 0x4a + } + if len(m.DelayPercentRuntime) > 0 { + i -= len(m.DelayPercentRuntime) + copy(dAtA[i:], m.DelayPercentRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DelayPercentRuntime))) + i-- + dAtA[i] = 0x42 + } + if m.ResponseRateLimit != nil { + if vtmsg, ok := interface{}(m.ResponseRateLimit).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseRateLimit) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.MaxActiveFaults != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxActiveFaults).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.DownstreamNodes) > 0 { + for iNdEx := len(m.DownstreamNodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DownstreamNodes[iNdEx]) + copy(dAtA[i:], m.DownstreamNodes[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DownstreamNodes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Headers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Headers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.UpstreamCluster) > 0 { + i -= len(m.UpstreamCluster) + copy(dAtA[i:], m.UpstreamCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpstreamCluster))) + i-- + dAtA[i] = 0x1a + } + if m.Abort != nil { + size, err := m.Abort.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Delay != nil { + if vtmsg, ok := interface{}(m.Delay).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Delay) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FaultAbort_HeaderAbort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FaultAbort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ErrorType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Percentage != nil { + if size, ok := interface{}(m.Percentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Percentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultAbort_HttpStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.HttpStatus)) + return n +} +func (m *FaultAbort_HeaderAbort_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderAbort != nil { + l = m.HeaderAbort.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultAbort_GrpcStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.GrpcStatus)) + return n +} +func (m *HTTPFault) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Delay != nil { + if size, ok := interface{}(m.Delay).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Delay) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Abort != nil { + l = m.Abort.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UpstreamCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DownstreamNodes) > 0 { + for _, s := range m.DownstreamNodes { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxActiveFaults != nil { + l = (*wrapperspb.UInt32Value)(m.MaxActiveFaults).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResponseRateLimit != nil { + if size, ok := interface{}(m.ResponseRateLimit).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ResponseRateLimit) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DelayPercentRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AbortPercentRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DelayDurationRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AbortHttpStatusRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.MaxActiveFaultsRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ResponseRateLimitPercentRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AbortGrpcStatusRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableDownstreamClusterStats { + n += 2 + } + if m.FilterMetadata != nil { + l = (*structpb.Struct)(m.FilterMetadata).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go index 0dc8c2583f9..bcf5c7d4072 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/filters/http/rbac/v3/rbac.proto package rbacv3 @@ -25,7 +25,7 @@ const ( ) // RBAC filter config. -// [#next-free-field: 6] +// [#next-free-field: 8] type RBAC struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -36,6 +36,10 @@ type RBAC struct { // If present and empty, DENY. // If both rules and matcher are configured, rules will be ignored. Rules *v3.RBAC `protobuf:"bytes,1,opt,name=rules,proto3" json:"rules,omitempty"` + // If specified, rules will emit stats with the given prefix. + // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with + // rules. + RulesStatPrefix string `protobuf:"bytes,6,opt,name=rules_stat_prefix,json=rulesStatPrefix,proto3" json:"rules_stat_prefix,omitempty"` // The match tree to use when resolving RBAC action for incoming requests. Requests do not // match any matcher will be denied. // If absent, no enforcing RBAC matcher will be applied. @@ -54,6 +58,8 @@ type RBAC struct { // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with // shadow rules. ShadowRulesStatPrefix string `protobuf:"bytes,3,opt,name=shadow_rules_stat_prefix,json=shadowRulesStatPrefix,proto3" json:"shadow_rules_stat_prefix,omitempty"` + // If track_per_rule_stats is true, counters will be published for each rule and shadow rule. + TrackPerRuleStats bool `protobuf:"varint,7,opt,name=track_per_rule_stats,json=trackPerRuleStats,proto3" json:"track_per_rule_stats,omitempty"` } func (x *RBAC) Reset() { @@ -95,6 +101,13 @@ func (x *RBAC) GetRules() *v3.RBAC { return nil } +func (x *RBAC) GetRulesStatPrefix() string { + if x != nil { + return x.RulesStatPrefix + } + return "" +} + func (x *RBAC) GetMatcher() *v31.Matcher { if x != nil { return x.Matcher @@ -123,6 +136,13 @@ func (x *RBAC) GetShadowRulesStatPrefix() string { return "" } +func (x *RBAC) GetTrackPerRuleStats() bool { + if x != nil { + return x.TrackPerRuleStats + } + return false +} + type RBACPerRoute struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -193,56 +213,62 @@ var file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDesc = []byte{ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0xdd, 0x03, 0x0a, 0x04, 0x52, 0x42, 0x41, 0x43, 0x12, 0x49, 0x0a, 0x05, 0x72, 0x75, 0x6c, + 0x22, 0xba, 0x04, 0x0a, 0x04, 0x52, 0x42, 0x41, 0x43, 0x12, 0x49, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x42, 0x17, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x05, 0x72, - 0x75, 0x6c, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x42, 0x1f, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x72, 0x75, 0x6c, - 0x65, 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0xd2, 0xc6, 0xa4, 0xe1, - 0x06, 0x02, 0x08, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5d, 0x0a, - 0x0c, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x42, - 0x1e, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x18, 0x12, 0x16, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, - 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, - 0x0b, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x0e, - 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x42, 0x26, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x18, 0x12, 0x16, 0x73, 0x68, 0x61, 0x64, - 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x65, 0x72, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0d, 0x73, 0x68, 0x61, 0x64, - 0x6f, 0x77, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x68, 0x61, - 0x64, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x68, 0x61, - 0x64, 0x6f, 0x77, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, - 0x74, 0x74, 0x70, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x42, 0x41, 0x43, - 0x22, 0x8b, 0x01, 0x0a, 0x0c, 0x52, 0x42, 0x41, 0x43, 0x50, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x72, 0x62, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, - 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x52, 0x04, 0x72, 0x62, - 0x61, 0x63, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, - 0x74, 0x74, 0x70, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x42, 0x41, 0x43, - 0x50, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x42, 0x9f, - 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x33, 0x69, 0x6f, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x42, 0x09, - 0x52, 0x62, 0x61, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x53, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, - 0x70, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x62, 0x61, 0x63, 0x76, 0x33, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x57, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x1f, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, + 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x0c, 0x73, 0x68, 0x61, + 0x64, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x42, 0x1e, 0xf2, 0x98, 0xfe, + 0x8f, 0x05, 0x18, 0x12, 0x16, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6c, 0x65, + 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x73, 0x68, 0x61, + 0x64, 0x6f, 0x77, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x0e, 0x73, 0x68, 0x61, 0x64, + 0x6f, 0x77, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x26, + 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x18, 0x12, 0x16, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0xd2, 0xc6, + 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0d, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, + 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2f, + 0x0a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x75, 0x6c, 0x65, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x74, 0x72, + 0x61, 0x63, 0x6b, 0x50, 0x65, 0x72, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3a, + 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x22, 0x8b, 0x01, + 0x0a, 0x0c, 0x52, 0x42, 0x41, 0x43, 0x50, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x3f, + 0x0a, 0x04, 0x72, 0x62, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x62, 0x61, + 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x52, 0x04, 0x72, 0x62, 0x61, 0x63, 0x3a, + 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x50, 0x65, 0x72, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x42, 0x9f, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x33, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, + 0x74, 0x74, 0x70, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x52, 0x62, 0x61, + 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x53, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x72, + 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x62, 0x61, 0x63, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.go index bfabd45b8a3..1d820564bac 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/filters/http/rbac/v3/rbac.proto @@ -85,6 +86,8 @@ func (m *RBAC) validate(all bool) error { } } + // no validation rules for RulesStatPrefix + if all { switch v := interface{}(m.GetMatcher()).(type) { case interface{ ValidateAll() error }: @@ -174,6 +177,8 @@ func (m *RBAC) validate(all bool) error { // no validation rules for ShadowRulesStatPrefix + // no validation rules for TrackPerRuleStats + if len(errors) > 0 { return RBACMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac_vtproto.pb.go new file mode 100644 index 00000000000..1e9cd9a0647 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac_vtproto.pb.go @@ -0,0 +1,283 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/http/rbac/v3/rbac.proto + +package rbacv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RBAC) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TrackPerRuleStats { + i-- + if m.TrackPerRuleStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.RulesStatPrefix) > 0 { + i -= len(m.RulesStatPrefix) + copy(dAtA[i:], m.RulesStatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RulesStatPrefix))) + i-- + dAtA[i] = 0x32 + } + if m.ShadowMatcher != nil { + if vtmsg, ok := interface{}(m.ShadowMatcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ShadowMatcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if len(m.ShadowRulesStatPrefix) > 0 { + i -= len(m.ShadowRulesStatPrefix) + copy(dAtA[i:], m.ShadowRulesStatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ShadowRulesStatPrefix))) + i-- + dAtA[i] = 0x1a + } + if m.ShadowRules != nil { + if vtmsg, ok := interface{}(m.ShadowRules).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ShadowRules) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Rules != nil { + if vtmsg, ok := interface{}(m.Rules).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Rules) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RBACPerRoute) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBACPerRoute) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBACPerRoute) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Rbac != nil { + size, err := m.Rbac.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *RBAC) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rules != nil { + if size, ok := interface{}(m.Rules).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Rules) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ShadowRules != nil { + if size, ok := interface{}(m.ShadowRules).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ShadowRules) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ShadowRulesStatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ShadowMatcher != nil { + if size, ok := interface{}(m.ShadowMatcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ShadowMatcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RulesStatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackPerRuleStats { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RBACPerRoute) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rbac != nil { + l = m.Rbac.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go index a53a6f18c9f..01a368894bc 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/filters/http/router/v3/router.proto package routerv3 @@ -12,10 +12,10 @@ import ( v3 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -35,7 +35,7 @@ type Router struct { // Whether the router generates dynamic cluster statistics. Defaults to // true. Can be disabled in high performance scenarios. - DynamicStats *wrappers.BoolValue `protobuf:"bytes,1,opt,name=dynamic_stats,json=dynamicStats,proto3" json:"dynamic_stats,omitempty"` + DynamicStats *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=dynamic_stats,json=dynamicStats,proto3" json:"dynamic_stats,omitempty"` // Whether to start a child span for egress routed calls. This can be // useful in scenarios where other filters (auth, ratelimit, etc.) make // outbound calls and have child spans rooted at the same ingress @@ -141,7 +141,7 @@ func (*Router) Descriptor() ([]byte, []int) { return file_envoy_extensions_filters_http_router_v3_router_proto_rawDescGZIP(), []int{0} } -func (x *Router) GetDynamicStats() *wrappers.BoolValue { +func (x *Router) GetDynamicStats() *wrapperspb.BoolValue { if x != nil { return x.DynamicStats } @@ -221,7 +221,7 @@ type Router_UpstreamAccessLogOptions struct { // will flush access logs periodically at the specified interval. This is especially useful in the // case of long-lived requests, such as CONNECT and Websockets. // The interval must be at least 1 millisecond. - UpstreamLogFlushInterval *duration.Duration `protobuf:"bytes,2,opt,name=upstream_log_flush_interval,json=upstreamLogFlushInterval,proto3" json:"upstream_log_flush_interval,omitempty"` + UpstreamLogFlushInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=upstream_log_flush_interval,json=upstreamLogFlushInterval,proto3" json:"upstream_log_flush_interval,omitempty"` } func (x *Router_UpstreamAccessLogOptions) Reset() { @@ -263,7 +263,7 @@ func (x *Router_UpstreamAccessLogOptions) GetFlushUpstreamLogOnUpstreamStream() return false } -func (x *Router_UpstreamAccessLogOptions) GetUpstreamLogFlushInterval() *duration.Duration { +func (x *Router_UpstreamAccessLogOptions) GetUpstreamLogFlushInterval() *durationpb.Duration { if x != nil { return x.UpstreamLogFlushInterval } @@ -399,10 +399,10 @@ var file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes = make([] var file_envoy_extensions_filters_http_router_v3_router_proto_goTypes = []interface{}{ (*Router)(nil), // 0: envoy.extensions.filters.http.router.v3.Router (*Router_UpstreamAccessLogOptions)(nil), // 1: envoy.extensions.filters.http.router.v3.Router.UpstreamAccessLogOptions - (*wrappers.BoolValue)(nil), // 2: google.protobuf.BoolValue + (*wrapperspb.BoolValue)(nil), // 2: google.protobuf.BoolValue (*v3.AccessLog)(nil), // 3: envoy.config.accesslog.v3.AccessLog (*v31.HttpFilter)(nil), // 4: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter - (*duration.Duration)(nil), // 5: google.protobuf.Duration + (*durationpb.Duration)(nil), // 5: google.protobuf.Duration } var file_envoy_extensions_filters_http_router_v3_router_proto_depIdxs = []int32{ 2, // 0: envoy.extensions.filters.http.router.v3.Router.dynamic_stats:type_name -> google.protobuf.BoolValue diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go index bb99e4b66b9..151afcaa33b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/filters/http/router/v3/router.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router_vtproto.pb.go new file mode 100644 index 00000000000..88105c3e186 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router_vtproto.pb.go @@ -0,0 +1,302 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/http/router/v3/router.proto + +package routerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Router_UpstreamAccessLogOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Router_UpstreamAccessLogOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Router_UpstreamAccessLogOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UpstreamLogFlushInterval != nil { + size, err := (*durationpb.Duration)(m.UpstreamLogFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FlushUpstreamLogOnUpstreamStream { + i-- + if m.FlushUpstreamLogOnUpstreamStream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Router) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Router) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Router) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UpstreamLogOptions != nil { + size, err := m.UpstreamLogOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.UpstreamHttpFilters) > 0 { + for iNdEx := len(m.UpstreamHttpFilters) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.UpstreamHttpFilters[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamHttpFilters[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + } + if m.SuppressGrpcRequestFailureCodeStats { + i-- + if m.SuppressGrpcRequestFailureCodeStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.RespectExpectedRqTimeout { + i-- + if m.RespectExpectedRqTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.StrictCheckHeaders) > 0 { + for iNdEx := len(m.StrictCheckHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StrictCheckHeaders[iNdEx]) + copy(dAtA[i:], m.StrictCheckHeaders[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StrictCheckHeaders[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.SuppressEnvoyHeaders { + i-- + if m.SuppressEnvoyHeaders { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.UpstreamLog) > 0 { + for iNdEx := len(m.UpstreamLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.UpstreamLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if m.StartChildSpan { + i-- + if m.StartChildSpan { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.DynamicStats != nil { + size, err := (*wrapperspb.BoolValue)(m.DynamicStats).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Router_UpstreamAccessLogOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FlushUpstreamLogOnUpstreamStream { + n += 2 + } + if m.UpstreamLogFlushInterval != nil { + l = (*durationpb.Duration)(m.UpstreamLogFlushInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Router) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DynamicStats != nil { + l = (*wrapperspb.BoolValue)(m.DynamicStats).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StartChildSpan { + n += 2 + } + if len(m.UpstreamLog) > 0 { + for _, e := range m.UpstreamLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SuppressEnvoyHeaders { + n += 2 + } + if len(m.StrictCheckHeaders) > 0 { + for _, s := range m.StrictCheckHeaders { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RespectExpectedRqTimeout { + n += 2 + } + if m.SuppressGrpcRequestFailureCodeStats { + n += 2 + } + if len(m.UpstreamHttpFilters) > 0 { + for _, e := range m.UpstreamHttpFilters { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UpstreamLogOptions != nil { + l = m.UpstreamLogOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go index 91e76c5f08c..03cc091234a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto package http_connection_managerv3 @@ -17,11 +17,11 @@ import ( v34 "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3" v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -338,7 +338,7 @@ func (HttpConnectionManager_Tracing_OperationName) EnumDescriptor() ([]byte, []i return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 0, 0} } -// [#next-free-field: 57] +// [#next-free-field: 59] type HttpConnectionManager struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -363,7 +363,7 @@ type HttpConnectionManager struct { // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked // documentation for more information. Defaults to false. - AddUserAgent *wrappers.BoolValue `protobuf:"bytes,6,opt,name=add_user_agent,json=addUserAgent,proto3" json:"add_user_agent,omitempty"` + AddUserAgent *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=add_user_agent,json=addUserAgent,proto3" json:"add_user_agent,omitempty"` // Presence of the object defines whether the connection manager // emits :ref:`tracing ` data to the :ref:`configured tracing provider // `. @@ -371,6 +371,20 @@ type HttpConnectionManager struct { // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. CommonHttpProtocolOptions *v3.HttpProtocolOptions `protobuf:"bytes,35,opt,name=common_http_protocol_options,json=commonHttpProtocolOptions,proto3" json:"common_http_protocol_options,omitempty"` + // If set to true, Envoy will not start a drain timer for downstream HTTP1 connections after + // :ref:`common_http_protocol_options.max_connection_duration + // ` passes. + // Instead, Envoy will wait for the next downstream request, add connection:close to the response + // headers, then close the connection after the stream ends. + // + // This behavior is compliant with `RFC 9112 section 9.6 `_ + // + // If set to false, “max_connection_duration“ will cause Envoy to enter the normal drain + // sequence for HTTP1 with Envoy eventually closing the connection (once there are no active + // streams). + // + // Has no effect if “max_connection_duration“ is unset. Defaults to false. + Http1SafeMaxConnectionDuration bool `protobuf:"varint,58,opt,name=http1_safe_max_connection_duration,json=http1SafeMaxConnectionDuration,proto3" json:"http1_safe_max_connection_duration,omitempty"` // Additional HTTP/1 settings that are passed to the HTTP/1 codec. // [#comment:TODO: The following fields are ignored when the // :ref:`header validation configuration ` @@ -396,7 +410,7 @@ type HttpConnectionManager struct { // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. // Requests that exceed this limit will receive a 431 response. - MaxRequestHeadersKb *wrappers.UInt32Value `protobuf:"bytes,29,opt,name=max_request_headers_kb,json=maxRequestHeadersKb,proto3" json:"max_request_headers_kb,omitempty"` + MaxRequestHeadersKb *wrapperspb.UInt32Value `protobuf:"bytes,29,opt,name=max_request_headers_kb,json=maxRequestHeadersKb,proto3" json:"max_request_headers_kb,omitempty"` // The stream idle timeout for connections managed by the connection manager. // If not specified, this defaults to 5 minutes. The default value was selected // so as not to interfere with any smaller configured timeouts that may have @@ -436,26 +450,27 @@ type HttpConnectionManager struct { // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. - StreamIdleTimeout *duration.Duration `protobuf:"bytes,24,opt,name=stream_idle_timeout,json=streamIdleTimeout,proto3" json:"stream_idle_timeout,omitempty"` + StreamIdleTimeout *durationpb.Duration `protobuf:"bytes,24,opt,name=stream_idle_timeout,json=streamIdleTimeout,proto3" json:"stream_idle_timeout,omitempty"` // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. - RequestTimeout *duration.Duration `protobuf:"bytes,28,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` + RequestTimeout *durationpb.Duration `protobuf:"bytes,28,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` // The amount of time that Envoy will wait for the request headers to be received. The timer is // activated when the first byte of the headers is received, and is disarmed when the last byte of // the headers has been received. If not specified or set to 0, this timeout is disabled. - RequestHeadersTimeout *duration.Duration `protobuf:"bytes,41,opt,name=request_headers_timeout,json=requestHeadersTimeout,proto3" json:"request_headers_timeout,omitempty"` + RequestHeadersTimeout *durationpb.Duration `protobuf:"bytes,41,opt,name=request_headers_timeout,json=requestHeadersTimeout,proto3" json:"request_headers_timeout,omitempty"` // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. // This is used so that Envoy provides a grace period for new streams that // race with the final GOAWAY frame. During this grace period, Envoy will // continue to accept new streams. After the grace period, a final GOAWAY // frame is sent and Envoy will start refusing new streams. Draining occurs - // both when a connection hits the idle timeout or during general server - // draining. The default grace period is 5000 milliseconds (5 seconds) if this - // option is not specified. - DrainTimeout *duration.Duration `protobuf:"bytes,12,opt,name=drain_timeout,json=drainTimeout,proto3" json:"drain_timeout,omitempty"` + // either when a connection hits the idle timeout, when :ref:`max_connection_duration + // ` + // is reached, or during general server draining. The default grace period is + // 5000 milliseconds (5 seconds) if this option is not specified. + DrainTimeout *durationpb.Duration `protobuf:"bytes,12,opt,name=drain_timeout,json=drainTimeout,proto3" json:"drain_timeout,omitempty"` // The delayed close timeout is for downstream connections managed by the HTTP connection manager. // It is defined as a grace period after connection close processing has been locally initiated // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy @@ -488,27 +503,34 @@ type HttpConnectionManager struct { // A value of 0 will completely disable delayed close processing. When disabled, the downstream // connection's socket will be closed immediately after the write flush is completed or will // never close if the write flush does not complete. - DelayedCloseTimeout *duration.Duration `protobuf:"bytes,26,opt,name=delayed_close_timeout,json=delayedCloseTimeout,proto3" json:"delayed_close_timeout,omitempty"` + DelayedCloseTimeout *durationpb.Duration `protobuf:"bytes,26,opt,name=delayed_close_timeout,json=delayedCloseTimeout,proto3" json:"delayed_close_timeout,omitempty"` // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. AccessLog []*v31.AccessLog `protobuf:"bytes,13,rep,name=access_log,json=accessLog,proto3" json:"access_log,omitempty"` + // The interval to flush the above access logs. + // // .. attention:: - // This field is deprecated in favor of - // :ref:`access_log_flush_interval - // `. - // Note that if both this field and :ref:`access_log_flush_interval - // ` - // are specified, the former (deprecated field) is ignored. + // + // This field is deprecated in favor of + // :ref:`access_log_flush_interval + // `. + // Note that if both this field and :ref:`access_log_flush_interval + // ` + // are specified, the former (deprecated field) is ignored. // // Deprecated: Marked as deprecated in envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto. - AccessLogFlushInterval *duration.Duration `protobuf:"bytes,54,opt,name=access_log_flush_interval,json=accessLogFlushInterval,proto3" json:"access_log_flush_interval,omitempty"` + AccessLogFlushInterval *durationpb.Duration `protobuf:"bytes,54,opt,name=access_log_flush_interval,json=accessLogFlushInterval,proto3" json:"access_log_flush_interval,omitempty"` + // If set to true, HCM will flush an access log once when a new HTTP request is received, after the request + // headers have been evaluated, and before iterating through the HTTP filter chain. + // // .. attention:: - // This field is deprecated in favor of - // :ref:`flush_access_log_on_new_request - // `. - // Note that if both this field and :ref:`flush_access_log_on_new_request - // ` - // are specified, the former (deprecated field) is ignored. + // + // This field is deprecated in favor of + // :ref:`flush_access_log_on_new_request + // `. + // Note that if both this field and :ref:`flush_access_log_on_new_request + // ` + // are specified, the former (deprecated field) is ignored. // // Deprecated: Marked as deprecated in envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto. FlushAccessLogOnNewRequest bool `protobuf:"varint,55,opt,name=flush_access_log_on_new_request,json=flushAccessLogOnNewRequest,proto3" json:"flush_access_log_on_new_request,omitempty"` @@ -521,7 +543,7 @@ type HttpConnectionManager struct { // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - UseRemoteAddress *wrappers.BoolValue `protobuf:"bytes,14,opt,name=use_remote_address,json=useRemoteAddress,proto3" json:"use_remote_address,omitempty"` + UseRemoteAddress *wrapperspb.BoolValue `protobuf:"bytes,14,opt,name=use_remote_address,json=useRemoteAddress,proto3" json:"use_remote_address,omitempty"` // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when // determining the origin client's IP address. The default is zero if this option @@ -576,7 +598,7 @@ type HttpConnectionManager struct { // ` header if it does not exist. This defaults to // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature // is not desired it can be disabled. - GenerateRequestId *wrappers.BoolValue `protobuf:"bytes,15,opt,name=generate_request_id,json=generateRequestId,proto3" json:"generate_request_id,omitempty"` + GenerateRequestId *wrapperspb.BoolValue `protobuf:"bytes,15,opt,name=generate_request_id,json=generateRequestId,proto3" json:"generate_request_id,omitempty"` // Whether the connection manager will keep the :ref:`x-request-id // ` header if passed for a request that is edge // (Edge request is the request from external clients to front Envoy) and not reset it, which @@ -631,7 +653,7 @@ type HttpConnectionManager struct { // [#comment:TODO: This field is ignored when the // :ref:`header validation configuration ` // is present.] - NormalizePath *wrappers.BoolValue `protobuf:"bytes,30,opt,name=normalize_path,json=normalizePath,proto3" json:"normalize_path,omitempty"` + NormalizePath *wrapperspb.BoolValue `protobuf:"bytes,30,opt,name=normalize_path,json=normalizePath,proto3" json:"normalize_path,omitempty"` // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream “:path“ header as well. Without // setting this option, incoming requests with path “//dir///file“ will not match against route @@ -702,7 +724,7 @@ type HttpConnectionManager struct { // ` // “not“ the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging // ` - StreamErrorOnInvalidHttpMessage *wrappers.BoolValue `protobuf:"bytes,40,opt,name=stream_error_on_invalid_http_message,json=streamErrorOnInvalidHttpMessage,proto3" json:"stream_error_on_invalid_http_message,omitempty"` + StreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,40,opt,name=stream_error_on_invalid_http_message,json=streamErrorOnInvalidHttpMessage,proto3" json:"stream_error_on_invalid_http_message,omitempty"` // [#not-implemented-hide:] Path normalization configuration. This includes // configurations for transformations (e.g. RFC 3986 normalization or merge // adjacent slashes) and the policy to apply them. The policy determines @@ -752,10 +774,13 @@ type HttpConnectionManager struct { // Append the “x-forwarded-port“ header with the port value client used to connect to Envoy. It // will be ignored if the “x-forwarded-port“ header has been set by any trusted proxy in front of Envoy. AppendXForwardedPort bool `protobuf:"varint,51,opt,name=append_x_forwarded_port,json=appendXForwardedPort,proto3" json:"append_x_forwarded_port,omitempty"` + // Append the :ref:`config_http_conn_man_headers_x-envoy-local-overloaded` HTTP header in the scenario where + // the Overload Manager has been triggered. + AppendLocalOverload bool `protobuf:"varint,57,opt,name=append_local_overload,json=appendLocalOverload,proto3" json:"append_local_overload,omitempty"` // Whether the HCM will add ProxyProtocolFilterState to the Connection lifetime filter state. Defaults to “true“. // This should be set to “false“ in cases where Envoy's view of the downstream address may not correspond to the // actual client address, for example, if there's another proxy in front of the Envoy. - AddProxyProtocolConnectionState *wrappers.BoolValue `protobuf:"bytes,53,opt,name=add_proxy_protocol_connection_state,json=addProxyProtocolConnectionState,proto3" json:"add_proxy_protocol_connection_state,omitempty"` + AddProxyProtocolConnectionState *wrapperspb.BoolValue `protobuf:"bytes,53,opt,name=add_proxy_protocol_connection_state,json=addProxyProtocolConnectionState,proto3" json:"add_proxy_protocol_connection_state,omitempty"` } func (x *HttpConnectionManager) Reset() { @@ -839,7 +864,7 @@ func (x *HttpConnectionManager) GetHttpFilters() []*HttpFilter { return nil } -func (x *HttpConnectionManager) GetAddUserAgent() *wrappers.BoolValue { +func (x *HttpConnectionManager) GetAddUserAgent() *wrapperspb.BoolValue { if x != nil { return x.AddUserAgent } @@ -860,6 +885,13 @@ func (x *HttpConnectionManager) GetCommonHttpProtocolOptions() *v3.HttpProtocolO return nil } +func (x *HttpConnectionManager) GetHttp1SafeMaxConnectionDuration() bool { + if x != nil { + return x.Http1SafeMaxConnectionDuration + } + return false +} + func (x *HttpConnectionManager) GetHttpProtocolOptions() *v3.Http1ProtocolOptions { if x != nil { return x.HttpProtocolOptions @@ -902,42 +934,42 @@ func (x *HttpConnectionManager) GetSchemeHeaderTransformation() *v3.SchemeHeader return nil } -func (x *HttpConnectionManager) GetMaxRequestHeadersKb() *wrappers.UInt32Value { +func (x *HttpConnectionManager) GetMaxRequestHeadersKb() *wrapperspb.UInt32Value { if x != nil { return x.MaxRequestHeadersKb } return nil } -func (x *HttpConnectionManager) GetStreamIdleTimeout() *duration.Duration { +func (x *HttpConnectionManager) GetStreamIdleTimeout() *durationpb.Duration { if x != nil { return x.StreamIdleTimeout } return nil } -func (x *HttpConnectionManager) GetRequestTimeout() *duration.Duration { +func (x *HttpConnectionManager) GetRequestTimeout() *durationpb.Duration { if x != nil { return x.RequestTimeout } return nil } -func (x *HttpConnectionManager) GetRequestHeadersTimeout() *duration.Duration { +func (x *HttpConnectionManager) GetRequestHeadersTimeout() *durationpb.Duration { if x != nil { return x.RequestHeadersTimeout } return nil } -func (x *HttpConnectionManager) GetDrainTimeout() *duration.Duration { +func (x *HttpConnectionManager) GetDrainTimeout() *durationpb.Duration { if x != nil { return x.DrainTimeout } return nil } -func (x *HttpConnectionManager) GetDelayedCloseTimeout() *duration.Duration { +func (x *HttpConnectionManager) GetDelayedCloseTimeout() *durationpb.Duration { if x != nil { return x.DelayedCloseTimeout } @@ -952,7 +984,7 @@ func (x *HttpConnectionManager) GetAccessLog() []*v31.AccessLog { } // Deprecated: Marked as deprecated in envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto. -func (x *HttpConnectionManager) GetAccessLogFlushInterval() *duration.Duration { +func (x *HttpConnectionManager) GetAccessLogFlushInterval() *durationpb.Duration { if x != nil { return x.AccessLogFlushInterval } @@ -974,7 +1006,7 @@ func (x *HttpConnectionManager) GetAccessLogOptions() *HttpConnectionManager_Hcm return nil } -func (x *HttpConnectionManager) GetUseRemoteAddress() *wrappers.BoolValue { +func (x *HttpConnectionManager) GetUseRemoteAddress() *wrapperspb.BoolValue { if x != nil { return x.UseRemoteAddress } @@ -1023,7 +1055,7 @@ func (x *HttpConnectionManager) GetVia() string { return "" } -func (x *HttpConnectionManager) GetGenerateRequestId() *wrappers.BoolValue { +func (x *HttpConnectionManager) GetGenerateRequestId() *wrapperspb.BoolValue { if x != nil { return x.GenerateRequestId } @@ -1079,7 +1111,7 @@ func (x *HttpConnectionManager) GetUpgradeConfigs() []*HttpConnectionManager_Upg return nil } -func (x *HttpConnectionManager) GetNormalizePath() *wrappers.BoolValue { +func (x *HttpConnectionManager) GetNormalizePath() *wrapperspb.BoolValue { if x != nil { return x.NormalizePath } @@ -1135,7 +1167,7 @@ func (x *HttpConnectionManager) GetStripAnyHostPort() bool { return false } -func (x *HttpConnectionManager) GetStreamErrorOnInvalidHttpMessage() *wrappers.BoolValue { +func (x *HttpConnectionManager) GetStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { if x != nil { return x.StreamErrorOnInvalidHttpMessage } @@ -1177,7 +1209,14 @@ func (x *HttpConnectionManager) GetAppendXForwardedPort() bool { return false } -func (x *HttpConnectionManager) GetAddProxyProtocolConnectionState() *wrappers.BoolValue { +func (x *HttpConnectionManager) GetAppendLocalOverload() bool { + if x != nil { + return x.AppendLocalOverload + } + return false +} + +func (x *HttpConnectionManager) GetAddProxyProtocolConnectionState() *wrapperspb.BoolValue { if x != nil { return x.AddProxyProtocolConnectionState } @@ -1336,7 +1375,7 @@ type ResponseMapper struct { // Filter to determine if this mapper should apply. Filter *v31.AccessLogFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` // The new response status code if specified. - StatusCode *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + StatusCode *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` // The new local reply body text if specified. It will be used in the “%LOCAL_REPLY_BODY%“ // command operator in the “body_format“. Body *v3.DataSource `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` @@ -1387,7 +1426,7 @@ func (x *ResponseMapper) GetFilter() *v31.AccessLogFilter { return nil } -func (x *ResponseMapper) GetStatusCode() *wrappers.UInt32Value { +func (x *ResponseMapper) GetStatusCode() *wrapperspb.UInt32Value { if x != nil { return x.StatusCode } @@ -1774,7 +1813,7 @@ func (m *HttpFilter) GetConfigType() isHttpFilter_ConfigType { return nil } -func (x *HttpFilter) GetTypedConfig() *any1.Any { +func (x *HttpFilter) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*HttpFilter_TypedConfig); ok { return x.TypedConfig } @@ -1814,7 +1853,7 @@ type HttpFilter_TypedConfig struct { // :ref:`ExtensionWithMatcher ` // with the desired HTTP filter. // [#extension-category: envoy.filters.http] - TypedConfig *any1.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` } type HttpFilter_ConfigDiscovery struct { @@ -1839,7 +1878,7 @@ type RequestIDExtension struct { unknownFields protoimpl.UnknownFields // Request ID extension specific configuration. - TypedConfig *any1.Any `protobuf:"bytes,1,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + TypedConfig *anypb.Any `protobuf:"bytes,1,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` } func (x *RequestIDExtension) Reset() { @@ -1874,7 +1913,7 @@ func (*RequestIDExtension) Descriptor() ([]byte, []int) { return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{8} } -func (x *RequestIDExtension) GetTypedConfig() *any1.Any { +func (x *RequestIDExtension) GetTypedConfig() *anypb.Any { if x != nil { return x.TypedConfig } @@ -1967,7 +2006,7 @@ type HttpConnectionManager_Tracing struct { // Maximum length of the request path to extract and include in the HttpUrl tag. Used to // truncate lengthy request paths to meet the needs of a tracing backend. // Default: 256 - MaxPathTagLength *wrappers.UInt32Value `protobuf:"bytes,7,opt,name=max_path_tag_length,json=maxPathTagLength,proto3" json:"max_path_tag_length,omitempty"` + MaxPathTagLength *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_path_tag_length,json=maxPathTagLength,proto3" json:"max_path_tag_length,omitempty"` // A list of custom tags with unique tag name to create tags for the active span. CustomTags []*v34.CustomTag `protobuf:"bytes,8,rep,name=custom_tags,json=customTags,proto3" json:"custom_tags,omitempty"` // Configuration for an external tracing provider. @@ -2001,7 +2040,7 @@ type HttpConnectionManager_Tracing struct { // this flag should be set to true. // // The default value is false for now for backward compatibility. - SpawnUpstreamSpan *wrappers.BoolValue `protobuf:"bytes,10,opt,name=spawn_upstream_span,json=spawnUpstreamSpan,proto3" json:"spawn_upstream_span,omitempty"` + SpawnUpstreamSpan *wrapperspb.BoolValue `protobuf:"bytes,10,opt,name=spawn_upstream_span,json=spawnUpstreamSpan,proto3" json:"spawn_upstream_span,omitempty"` } func (x *HttpConnectionManager_Tracing) Reset() { @@ -2064,7 +2103,7 @@ func (x *HttpConnectionManager_Tracing) GetVerbose() bool { return false } -func (x *HttpConnectionManager_Tracing) GetMaxPathTagLength() *wrappers.UInt32Value { +func (x *HttpConnectionManager_Tracing) GetMaxPathTagLength() *wrapperspb.UInt32Value { if x != nil { return x.MaxPathTagLength } @@ -2085,7 +2124,7 @@ func (x *HttpConnectionManager_Tracing) GetProvider() *v35.Tracing_Http { return nil } -func (x *HttpConnectionManager_Tracing) GetSpawnUpstreamSpan() *wrappers.BoolValue { +func (x *HttpConnectionManager_Tracing) GetSpawnUpstreamSpan() *wrapperspb.BoolValue { if x != nil { return x.SpawnUpstreamSpan } @@ -2157,7 +2196,7 @@ type HttpConnectionManager_SetCurrentClientCertDetails struct { unknownFields protoimpl.UnknownFields // Whether to forward the subject of the client cert. Defaults to false. - Subject *wrappers.BoolValue `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + Subject *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the // XFCC header comma separated from other values with the value Cert="PEM". // Defaults to false. @@ -2207,7 +2246,7 @@ func (*HttpConnectionManager_SetCurrentClientCertDetails) Descriptor() ([]byte, return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 2} } -func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetSubject() *wrappers.BoolValue { +func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetSubject() *wrapperspb.BoolValue { if x != nil { return x.Subject } @@ -2273,7 +2312,7 @@ type HttpConnectionManager_UpgradeConfig struct { // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the // :ref:`upgrade documentation `. - Enabled *wrappers.BoolValue `protobuf:"bytes,3,opt,name=enabled,proto3" json:"enabled,omitempty"` + Enabled *wrapperspb.BoolValue `protobuf:"bytes,3,opt,name=enabled,proto3" json:"enabled,omitempty"` } func (x *HttpConnectionManager_UpgradeConfig) Reset() { @@ -2322,7 +2361,7 @@ func (x *HttpConnectionManager_UpgradeConfig) GetFilters() []*HttpFilter { return nil } -func (x *HttpConnectionManager_UpgradeConfig) GetEnabled() *wrappers.BoolValue { +func (x *HttpConnectionManager_UpgradeConfig) GetEnabled() *wrapperspb.BoolValue { if x != nil { return x.Enabled } @@ -2571,7 +2610,7 @@ type HttpConnectionManager_HcmAccessLogOptions struct { // “requestComplete()“ method of “StreamInfo“ in access log filters, or through the “%DURATION%“ substitution // string. // The interval must be at least 1 millisecond. - AccessLogFlushInterval *duration.Duration `protobuf:"bytes,1,opt,name=access_log_flush_interval,json=accessLogFlushInterval,proto3" json:"access_log_flush_interval,omitempty"` + AccessLogFlushInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=access_log_flush_interval,json=accessLogFlushInterval,proto3" json:"access_log_flush_interval,omitempty"` // If set to true, HCM will flush an access log when a new HTTP request is received, after request // headers have been evaluated, before iterating through the HTTP filter chain. // This log record, if enabled, does not depend on periodic log records or request completion log. @@ -2615,7 +2654,7 @@ func (*HttpConnectionManager_HcmAccessLogOptions) Descriptor() ([]byte, []int) { return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 6} } -func (x *HttpConnectionManager_HcmAccessLogOptions) GetAccessLogFlushInterval() *duration.Duration { +func (x *HttpConnectionManager_HcmAccessLogOptions) GetAccessLogFlushInterval() *durationpb.Duration { if x != nil { return x.AccessLogFlushInterval } @@ -3018,7 +3057,7 @@ var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connec 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9e, 0x41, 0x0a, 0x15, 0x48, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9e, 0x42, 0x0a, 0x15, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x85, 0x01, 0x0a, 0x0a, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, @@ -3074,280 +3113,288 @@ var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connec 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, 0x0a, 0x15, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, - 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x13, 0x68, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x69, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, - 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x14, 0x68, 0x74, 0x74, - 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x33, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2c, 0x20, 0x01, 0x28, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4a, 0x0a, 0x22, 0x68, 0x74, 0x74, 0x70, 0x31, 0x5f, + 0x73, 0x61, 0x66, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x3a, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1e, 0x68, 0x74, 0x74, 0x70, 0x31, 0x53, 0x61, 0x66, 0x65, 0x4d, 0x61, 0x78, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x15, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x33, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, - 0x74, 0x74, 0x70, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, - 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0xb9, 0x01, 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, - 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, - 0x01, 0x52, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x72, 0x0a, - 0x1c, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x30, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x68, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x69, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x07, + 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, + 0x16, 0x68, 0x74, 0x74, 0x70, 0x33, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x33, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x2c, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, + 0x02, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0xb9, 0x01, + 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x1a, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x72, 0x0a, 0x1c, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x30, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x5d, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6b, 0x62, 0x18, 0x1d, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0x80, 0x40, 0x20, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x78, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4b, 0x62, - 0x12, 0x52, 0x0a, 0x13, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, - 0x01, 0x52, 0x11, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4b, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x6e, 0x52, 0x1a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, + 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6b, 0x62, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, - 0x01, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x12, 0x62, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x29, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0xfa, - 0x42, 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x15, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4d, 0x0a, 0x15, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, - 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x1a, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x13, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, - 0x6f, 0x67, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x6d, 0x0a, 0x19, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x17, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, - 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, - 0x52, 0x16, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x50, 0x0a, 0x1f, 0x66, 0x6c, 0x75, 0x73, - 0x68, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, - 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x37, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x1a, - 0x66, 0x6c, 0x75, 0x73, 0x68, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, - 0x4e, 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x94, 0x01, 0x0a, 0x12, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x38, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x66, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x48, 0x63, 0x6d, 0x41, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x10, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x51, 0x0a, 0x12, 0x75, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, - 0x08, 0x01, 0x52, 0x10, 0x75, 0x73, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x78, 0x66, 0x66, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, - 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x70, 0x73, 0x18, 0x13, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x11, 0x78, 0x66, 0x66, 0x4e, 0x75, 0x6d, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, - 0x64, 0x48, 0x6f, 0x70, 0x73, 0x12, 0x73, 0x0a, 0x20, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, - 0x6c, 0x5f, 0x69, 0x70, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2e, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1d, 0x6f, 0x72, 0x69, - 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x49, 0x70, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x73, 0x0a, 0x20, 0x65, 0x61, - 0x72, 0x6c, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x34, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x1d, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x75, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0xa0, 0x01, 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x19, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x68, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, + 0x2a, 0x05, 0x18, 0x80, 0x40, 0x20, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4b, 0x62, 0x12, 0x52, 0x0a, 0x13, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x11, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x4b, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x0e, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x62, 0x0a, + 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0xfa, 0x42, 0x05, 0xaa, 0x01, + 0x02, 0x32, 0x00, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x4d, 0x0a, 0x15, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x6f, + 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x64, 0x65, 0x6c, + 0x61, 0x79, 0x65, 0x64, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x6d, 0x0a, 0x19, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x17, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, + 0x3d, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x16, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x12, 0x50, 0x0a, 0x1f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x6e, 0x65, 0x77, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x37, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, + 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x1a, 0x66, 0x6c, 0x75, 0x73, + 0x68, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, 0x4e, 0x65, 0x77, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x94, 0x01, 0x0a, 0x12, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x38, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x66, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x48, 0x63, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x51, 0x0a, + 0x12, 0x75, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x10, + 0x75, 0x73, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x2f, 0x0a, 0x14, 0x78, 0x66, 0x66, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x70, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, + 0x78, 0x66, 0x66, 0x4e, 0x75, 0x6d, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x70, + 0x73, 0x12, 0x73, 0x0a, 0x20, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x70, + 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1d, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, + 0x6c, 0x49, 0x70, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x73, 0x0a, 0x20, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x34, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1d, 0x65, 0x61, + 0x72, 0x6c, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa0, 0x01, 0x0a, 0x17, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x68, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x26, + 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x78, 0x66, 0x66, 0x5f, 0x61, 0x70, 0x70, 0x65, 0x6e, + 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x58, 0x66, 0x66, + 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x1d, 0x0a, 0x03, 0x76, 0x69, 0x61, 0x18, 0x16, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, + 0x52, 0x03, 0x76, 0x69, 0x61, 0x12, 0x4a, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x70, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x47, 0x0a, 0x21, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, 0x69, 0x6e, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x61, + 0x6c, 0x77, 0x61, 0x79, 0x73, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xb4, 0x01, 0x0a, 0x1b, + 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x78, 0x66, 0x66, 0x5f, 0x61, - 0x70, 0x70, 0x65, 0x6e, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, - 0x70, 0x58, 0x66, 0x66, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x1d, 0x0a, 0x03, 0x76, 0x69, - 0x61, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, - 0x00, 0xc0, 0x01, 0x02, 0x52, 0x03, 0x76, 0x69, 0x61, 0x12, 0x4a, 0x0a, 0x13, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x70, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x47, 0x0a, 0x21, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, - 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, - 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x1c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x64, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0xb4, 0x01, 0x0a, 0x1b, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, - 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x18, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, - 0x6c, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x18, 0x66, 0x6f, - 0x72, 0x77, 0x61, 0x72, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0xb4, 0x01, 0x0a, 0x1f, 0x73, 0x65, 0x74, 0x5f, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x6e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, - 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x52, 0x1b, 0x73, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x2c, 0x0a, - 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x31, 0x30, 0x30, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x69, - 0x6e, 0x75, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x31, 0x30, 0x30, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x12, 0x65, 0x0a, 0x31, 0x72, - 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x61, 0x73, 0x5f, - 0x69, 0x70, 0x76, 0x34, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x69, 0x70, 0x76, 0x36, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x2a, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, - 0x74, 0x49, 0x70, 0x76, 0x34, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x41, 0x73, 0x49, 0x70, 0x76, 0x34, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x49, 0x70, - 0x76, 0x36, 0x12, 0x89, 0x01, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x60, 0x2e, 0x65, + 0x6c, 0x73, 0x12, 0xb4, 0x01, 0x0a, 0x1f, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x6e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, - 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x41, - 0x0a, 0x0e, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0d, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, 0x74, - 0x68, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, - 0x65, 0x73, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, - 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0xb7, 0x01, 0x0a, 0x20, 0x70, 0x61, 0x74, 0x68, 0x5f, - 0x77, 0x69, 0x74, 0x68, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x6c, 0x61, - 0x73, 0x68, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x2d, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, - 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, - 0x63, 0x61, 0x70, 0x65, 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x1c, 0x70, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, - 0x70, 0x65, 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x81, 0x01, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x72, 0x65, - 0x70, 0x6c, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x4d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, - 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x50, 0x0a, 0x18, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x27, 0x20, - 0x01, 0x28, 0x08, 0x42, 0x17, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x73, 0x74, 0x72, - 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x52, 0x15, 0x73, 0x74, - 0x72, 0x69, 0x70, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x50, - 0x6f, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x61, 0x6e, 0x79, - 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, - 0x48, 0x01, 0x52, 0x10, 0x73, 0x74, 0x72, 0x69, 0x70, 0x41, 0x6e, 0x79, 0x48, 0x6f, 0x73, 0x74, - 0x50, 0x6f, 0x72, 0x74, 0x12, 0x69, 0x0a, 0x24, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, - 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x28, 0x20, 0x01, + 0x2e, 0x53, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x1b, 0x73, 0x65, + 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, + 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x5f, 0x31, 0x30, 0x30, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x31, 0x30, 0x30, 0x43, + 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x12, 0x65, 0x0a, 0x31, 0x72, 0x65, 0x70, 0x72, 0x65, + 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x70, 0x76, 0x34, + 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x69, 0x70, 0x76, 0x36, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x2a, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x49, 0x70, 0x76, + 0x34, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x41, 0x73, + 0x49, 0x70, 0x76, 0x34, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x49, 0x70, 0x76, 0x36, 0x12, 0x89, + 0x01, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x60, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x41, 0x0a, 0x0e, 0x6e, 0x6f, + 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1f, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, - 0xa9, 0x01, 0x0a, 0x1a, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2b, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, - 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x18, 0x70, 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x73, - 0x74, 0x72, 0x69, 0x70, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x6f, - 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x74, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x74, - 0x72, 0x69, 0x70, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x44, - 0x6f, 0x74, 0x12, 0x94, 0x01, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x64, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, + 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x23, 0x0a, + 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x6c, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x12, 0xb7, 0x01, 0x0a, 0x20, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x77, 0x69, 0x74, 0x68, + 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, + 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, + 0x70, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x53, + 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x81, 0x01, 0x0a, + 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x49, 0x44, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x7b, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, + 0x18, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x17, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x52, 0x15, 0x73, 0x74, 0x72, 0x69, 0x70, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, + 0x2f, 0x0a, 0x13, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x61, 0x6e, 0x79, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x10, + 0x73, 0x74, 0x72, 0x69, 0x70, 0x41, 0x6e, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x69, 0x0a, 0x24, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x28, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0xa9, 0x01, 0x0a, 0x1a, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6f, 0x0a, 0x1e, 0x74, 0x79, 0x70, - 0x65, 0x64, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x32, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1b, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x61, 0x70, - 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, - 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, - 0x65, 0x6e, 0x64, 0x58, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x50, 0x6f, 0x72, - 0x74, 0x12, 0x68, 0x0a, 0x23, 0x61, 0x64, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x18, 0x70, + 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x74, 0x72, 0x69, 0x70, + 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x64, + 0x6f, 0x74, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x74, 0x72, 0x69, 0x70, 0x54, + 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x44, 0x6f, 0x74, 0x12, 0x94, + 0x01, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x64, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6f, 0x0a, 0x1e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x58, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x32, 0x0a, + 0x15, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x39, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x61, 0x70, + 0x70, 0x65, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, + 0x64, 0x12, 0x68, 0x0a, 0x23, 0x61, 0x64, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, @@ -3843,14 +3890,14 @@ var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connec (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor)(nil), // 24: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement)(nil), // 25: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement (*v32.RouteConfiguration)(nil), // 26: envoy.config.route.v3.RouteConfiguration - (*wrappers.BoolValue)(nil), // 27: google.protobuf.BoolValue + (*wrapperspb.BoolValue)(nil), // 27: google.protobuf.BoolValue (*v3.HttpProtocolOptions)(nil), // 28: envoy.config.core.v3.HttpProtocolOptions (*v3.Http1ProtocolOptions)(nil), // 29: envoy.config.core.v3.Http1ProtocolOptions (*v3.Http2ProtocolOptions)(nil), // 30: envoy.config.core.v3.Http2ProtocolOptions (*v3.Http3ProtocolOptions)(nil), // 31: envoy.config.core.v3.Http3ProtocolOptions (*v3.SchemeHeaderTransformation)(nil), // 32: envoy.config.core.v3.SchemeHeaderTransformation - (*wrappers.UInt32Value)(nil), // 33: google.protobuf.UInt32Value - (*duration.Duration)(nil), // 34: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 33: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 34: google.protobuf.Duration (*v31.AccessLog)(nil), // 35: envoy.config.accesslog.v3.AccessLog (*v3.TypedExtensionConfig)(nil), // 36: envoy.config.core.v3.TypedExtensionConfig (*v3.SubstitutionFormatString)(nil), // 37: envoy.config.core.v3.SubstitutionFormatString @@ -3859,7 +3906,7 @@ var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connec (*v3.HeaderValueOption)(nil), // 40: envoy.config.core.v3.HeaderValueOption (*v3.ConfigSource)(nil), // 41: envoy.config.core.v3.ConfigSource (*v32.ScopedRouteConfiguration)(nil), // 42: envoy.config.route.v3.ScopedRouteConfiguration - (*any1.Any)(nil), // 43: google.protobuf.Any + (*anypb.Any)(nil), // 43: google.protobuf.Any (*v3.ExtensionConfigSource)(nil), // 44: envoy.config.core.v3.ExtensionConfigSource (*v33.Percent)(nil), // 45: envoy.type.v3.Percent (*v34.CustomTag)(nil), // 46: envoy.type.tracing.v3.CustomTag diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go index 9b5309b0000..094e7faaf01 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -200,6 +201,8 @@ func (m *HttpConnectionManager) validate(all bool) error { } } + // no validation rules for Http1SafeMaxConnectionDuration + if all { switch v := interface{}(m.GetHttpProtocolOptions()).(type) { case interface{ ValidateAll() error }: @@ -1059,6 +1062,8 @@ func (m *HttpConnectionManager) validate(all bool) error { // no validation rules for AppendXForwardedPort + // no validation rules for AppendLocalOverload + if all { switch v := interface{}(m.GetAddProxyProtocolConnectionState()).(type) { case interface{ ValidateAll() error }: diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager_vtproto.pb.go new file mode 100644 index 00000000000..3ecbe1295a5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager_vtproto.pb.go @@ -0,0 +1,3470 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +package http_connection_managerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpConnectionManager_Tracing) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_Tracing) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_Tracing) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SpawnUpstreamSpan != nil { + size, err := (*wrapperspb.BoolValue)(m.SpawnUpstreamSpan).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.Provider != nil { + if vtmsg, ok := interface{}(m.Provider).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Provider) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if len(m.CustomTags) > 0 { + for iNdEx := len(m.CustomTags) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.CustomTags[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomTags[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + } + if m.MaxPathTagLength != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxPathTagLength).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Verbose { + i-- + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.OverallSampling != nil { + if vtmsg, ok := interface{}(m.OverallSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverallSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.RandomSampling != nil { + if vtmsg, ok := interface{}(m.RandomSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RandomSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.ClientSampling != nil { + if vtmsg, ok := interface{}(m.ClientSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClientSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_InternalAddressConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_InternalAddressConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_InternalAddressConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CidrRanges) > 0 { + for iNdEx := len(m.CidrRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.CidrRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CidrRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if m.UnixSockets { + i-- + if m.UnixSockets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Chain { + i-- + if m.Chain { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.Uri { + i-- + if m.Uri { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Dns { + i-- + if m.Dns { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Cert { + i-- + if m.Cert { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Subject != nil { + size, err := (*wrapperspb.BoolValue)(m.Subject).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_UpgradeConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_UpgradeConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_UpgradeConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Enabled != nil { + size, err := (*wrapperspb.BoolValue)(m.Enabled).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.UpgradeType) > 0 { + i -= len(m.UpgradeType) + copy(dAtA[i:], m.UpgradeType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpgradeType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_PathNormalizationOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_PathNormalizationOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_PathNormalizationOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HttpFilterTransformation != nil { + if vtmsg, ok := interface{}(m.HttpFilterTransformation).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpFilterTransformation) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ForwardingTransformation != nil { + if vtmsg, ok := interface{}(m.ForwardingTransformation).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ForwardingTransformation) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_ProxyStatusConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_ProxyStatusConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ProxyStatusConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ProxyName.(*HttpConnectionManager_ProxyStatusConfig_LiteralProxyName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ProxyName.(*HttpConnectionManager_ProxyStatusConfig_UseNodeId); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.SetRecommendedResponseCode { + i-- + if m.SetRecommendedResponseCode { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.RemoveResponseFlags { + i-- + if m.RemoveResponseFlags { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.RemoveConnectionTerminationDetails { + i-- + if m.RemoveConnectionTerminationDetails { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.RemoveDetails { + i-- + if m.RemoveDetails { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_ProxyStatusConfig_UseNodeId) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ProxyStatusConfig_UseNodeId) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.UseNodeId { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.LiteralProxyName) + copy(dAtA[i:], m.LiteralProxyName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LiteralProxyName))) + i-- + dAtA[i] = 0x32 + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_HcmAccessLogOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_HcmAccessLogOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_HcmAccessLogOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FlushLogOnTunnelSuccessfullyEstablished { + i-- + if m.FlushLogOnTunnelSuccessfullyEstablished { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.FlushAccessLogOnNewRequest { + i-- + if m.FlushAccessLogOnNewRequest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AccessLogFlushInterval != nil { + size, err := (*durationpb.Duration)(m.AccessLogFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Http1SafeMaxConnectionDuration { + i-- + if m.Http1SafeMaxConnectionDuration { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xd0 + } + if m.AppendLocalOverload { + i-- + if m.AppendLocalOverload { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc8 + } + if m.AccessLogOptions != nil { + size, err := m.AccessLogOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc2 + } + if m.FlushAccessLogOnNewRequest { + i-- + if m.FlushAccessLogOnNewRequest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xb8 + } + if m.AccessLogFlushInterval != nil { + size, err := (*durationpb.Duration)(m.AccessLogFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xb2 + } + if m.AddProxyProtocolConnectionState != nil { + size, err := (*wrapperspb.BoolValue)(m.AddProxyProtocolConnectionState).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xaa + } + if len(m.EarlyHeaderMutationExtensions) > 0 { + for iNdEx := len(m.EarlyHeaderMutationExtensions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.EarlyHeaderMutationExtensions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EarlyHeaderMutationExtensions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xa2 + } + } + if m.AppendXForwardedPort { + i-- + if m.AppendXForwardedPort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x98 + } + if m.TypedHeaderValidationConfig != nil { + if vtmsg, ok := interface{}(m.TypedHeaderValidationConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedHeaderValidationConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x92 + } + if m.ProxyStatusConfig != nil { + size, err := m.ProxyStatusConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x8a + } + if m.SchemeHeaderTransformation != nil { + if vtmsg, ok := interface{}(m.SchemeHeaderTransformation).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SchemeHeaderTransformation) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x82 + } + if m.StripTrailingHostDot { + i-- + if m.StripTrailingHostDot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf8 + } + if len(m.OriginalIpDetectionExtensions) > 0 { + for iNdEx := len(m.OriginalIpDetectionExtensions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.OriginalIpDetectionExtensions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OriginalIpDetectionExtensions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf2 + } + } + if m.PathWithEscapedSlashesAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PathWithEscapedSlashesAction)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe8 + } + if m.Http3ProtocolOptions != nil { + if vtmsg, ok := interface{}(m.Http3ProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Http3ProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe2 + } + if m.PathNormalizationOptions != nil { + size, err := m.PathNormalizationOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xda + } + if msg, ok := m.StripPortMode.(*HttpConnectionManager_StripAnyHostPort); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RequestHeadersTimeout != nil { + size, err := (*durationpb.Duration)(m.RequestHeadersTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if m.StreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.StreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + if m.StripMatchingHostPort { + i-- + if m.StripMatchingHostPort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + } + if m.LocalReplyConfig != nil { + size, err := m.LocalReplyConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + if m.AlwaysSetRequestIdInResponse { + i-- + if m.AlwaysSetRequestIdInResponse { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + } + if m.RequestIdExtension != nil { + size, err := m.RequestIdExtension.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.CommonHttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CommonHttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.ServerHeaderTransformation != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ServerHeaderTransformation)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x90 + } + if m.MergeSlashes { + i-- + if m.MergeSlashes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x88 + } + if m.PreserveExternalRequestId { + i-- + if m.PreserveExternalRequestId { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if msg, ok := m.RouteSpecifier.(*HttpConnectionManager_ScopedRoutes); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.NormalizePath != nil { + size, err := (*wrapperspb.BoolValue)(m.NormalizePath).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if m.MaxRequestHeadersKb != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequestHeadersKb).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if m.RequestTimeout != nil { + size, err := (*durationpb.Duration)(m.RequestTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.DelayedCloseTimeout != nil { + size, err := (*durationpb.Duration)(m.DelayedCloseTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if m.InternalAddressConfig != nil { + size, err := m.InternalAddressConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.StreamIdleTimeout != nil { + size, err := (*durationpb.Duration)(m.StreamIdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.UpgradeConfigs) > 0 { + for iNdEx := len(m.UpgradeConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpgradeConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if len(m.Via) > 0 { + i -= len(m.Via) + copy(dAtA[i:], m.Via) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Via))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.SkipXffAppend { + i-- + if m.SkipXffAppend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.RepresentIpv4RemoteAddressAsIpv4MappedIpv6 { + i-- + if m.RepresentIpv4RemoteAddressAsIpv4MappedIpv6 { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.XffNumTrustedHops != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.XffNumTrustedHops)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.Proxy_100Continue { + i-- + if m.Proxy_100Continue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if m.SetCurrentClientCertDetails != nil { + size, err := m.SetCurrentClientCertDetails.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.ForwardClientCertDetails != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ForwardClientCertDetails)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.GenerateRequestId != nil { + size, err := (*wrapperspb.BoolValue)(m.GenerateRequestId).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.UseRemoteAddress != nil { + size, err := (*wrapperspb.BoolValue)(m.UseRemoteAddress).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if len(m.AccessLog) > 0 { + for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AccessLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if m.DrainTimeout != nil { + size, err := (*durationpb.Duration)(m.DrainTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if len(m.ServerName) > 0 { + i -= len(m.ServerName) + copy(dAtA[i:], m.ServerName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServerName))) + i-- + dAtA[i] = 0x52 + } + if m.Http2ProtocolOptions != nil { + if vtmsg, ok := interface{}(m.Http2ProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Http2ProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.HttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.HttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.Tracing != nil { + size, err := m.Tracing.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AddUserAgent != nil { + size, err := (*wrapperspb.BoolValue)(m.AddUserAgent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.HttpFilters) > 0 { + for iNdEx := len(m.HttpFilters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HttpFilters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if msg, ok := m.RouteSpecifier.(*HttpConnectionManager_RouteConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.RouteSpecifier.(*HttpConnectionManager_Rds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x12 + } + if m.CodecType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CodecType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_Rds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_Rds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Rds != nil { + size, err := m.Rds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_RouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_RouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RouteConfig != nil { + if vtmsg, ok := interface{}(m.RouteConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RouteConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_ScopedRoutes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ScopedRoutes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRoutes != nil { + size, err := m.ScopedRoutes.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_StripAnyHostPort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_StripAnyHostPort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.StripAnyHostPort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd0 + return len(dAtA) - i, nil +} +func (m *LocalReplyConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalReplyConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalReplyConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BodyFormat != nil { + if vtmsg, ok := interface{}(m.BodyFormat).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BodyFormat) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Mappers) > 0 { + for iNdEx := len(m.Mappers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Mappers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseMapper) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseMapper) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseMapper) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeadersToAdd) > 0 { + for iNdEx := len(m.HeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.HeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if m.BodyFormatOverride != nil { + if vtmsg, ok := interface{}(m.BodyFormatOverride).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BodyFormatOverride) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.Body != nil { + if vtmsg, ok := interface{}(m.Body).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Body) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.StatusCode != nil { + size, err := (*wrapperspb.UInt32Value)(m.StatusCode).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Filter != nil { + if vtmsg, ok := interface{}(m.Filter).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Filter) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Rds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Rds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RouteConfigName) > 0 { + i -= len(m.RouteConfigName) + copy(dAtA[i:], m.RouteConfigName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RouteConfigName))) + i-- + dAtA[i] = 0x12 + } + if m.ConfigSource != nil { + if vtmsg, ok := interface{}(m.ConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfigurationsList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfigurationsList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfigurationsList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ScopedRouteConfigurations) > 0 { + for iNdEx := len(m.ScopedRouteConfigurations) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ScopedRouteConfigurations[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ScopedRouteConfigurations[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + } + if len(m.Separator) > 0 { + i -= len(m.Separator) + copy(dAtA[i:], m.Separator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Separator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ExtractType.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ExtractType.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.ElementSeparator) > 0 { + i -= len(m.ElementSeparator) + copy(dAtA[i:], m.ElementSeparator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ElementSeparator))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Element != nil { + size, err := m.Element.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderValueExtractor != nil { + size, err := m.HeaderValueExtractor.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopeKeyBuilder) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Fragments) > 0 { + for iNdEx := len(m.Fragments) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fragments[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigSpecifier.(*ScopedRoutes_ScopedRds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigSpecifier.(*ScopedRoutes_ScopedRouteConfigurationsList); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RdsConfigSource != nil { + if vtmsg, ok := interface{}(m.RdsConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RdsConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.ScopeKeyBuilder != nil { + size, err := m.ScopeKeyBuilder.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopedRouteConfigurationsList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopedRouteConfigurationsList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRouteConfigurationsList != nil { + size, err := m.ScopedRouteConfigurationsList.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopedRds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopedRds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRds != nil { + size, err := m.ScopedRds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *ScopedRds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SrdsResourcesLocator) > 0 { + i -= len(m.SrdsResourcesLocator) + copy(dAtA[i:], m.SrdsResourcesLocator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SrdsResourcesLocator))) + i-- + dAtA[i] = 0x12 + } + if m.ScopedRdsConfigSource != nil { + if vtmsg, ok := interface{}(m.ScopedRdsConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ScopedRdsConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.ConfigType.(*HttpFilter_ConfigDiscovery); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigType.(*HttpFilter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpFilter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpFilter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *HttpFilter_ConfigDiscovery) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpFilter_ConfigDiscovery) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *RequestIDExtension) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestIDExtension) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RequestIDExtension) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyMobileHttpConnectionManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyMobileHttpConnectionManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EnvoyMobileHttpConnectionManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Config != nil { + size, err := m.Config.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_Tracing) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientSampling != nil { + if size, ok := interface{}(m.ClientSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ClientSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RandomSampling != nil { + if size, ok := interface{}(m.RandomSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RandomSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverallSampling != nil { + if size, ok := interface{}(m.OverallSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverallSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Verbose { + n += 2 + } + if m.MaxPathTagLength != nil { + l = (*wrapperspb.UInt32Value)(m.MaxPathTagLength).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CustomTags) > 0 { + for _, e := range m.CustomTags { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Provider != nil { + if size, ok := interface{}(m.Provider).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Provider) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SpawnUpstreamSpan != nil { + l = (*wrapperspb.BoolValue)(m.SpawnUpstreamSpan).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_InternalAddressConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UnixSockets { + n += 2 + } + if len(m.CidrRanges) > 0 { + for _, e := range m.CidrRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Subject != nil { + l = (*wrapperspb.BoolValue)(m.Subject).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Cert { + n += 2 + } + if m.Dns { + n += 2 + } + if m.Uri { + n += 2 + } + if m.Chain { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_UpgradeConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UpgradeType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Enabled != nil { + l = (*wrapperspb.BoolValue)(m.Enabled).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_PathNormalizationOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ForwardingTransformation != nil { + if size, ok := interface{}(m.ForwardingTransformation).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ForwardingTransformation) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpFilterTransformation != nil { + if size, ok := interface{}(m.HttpFilterTransformation).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpFilterTransformation) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_ProxyStatusConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RemoveDetails { + n += 2 + } + if m.RemoveConnectionTerminationDetails { + n += 2 + } + if m.RemoveResponseFlags { + n += 2 + } + if m.SetRecommendedResponseCode { + n += 2 + } + if vtmsg, ok := m.ProxyName.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_ProxyStatusConfig_UseNodeId) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LiteralProxyName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpConnectionManager_HcmAccessLogOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AccessLogFlushInterval != nil { + l = (*durationpb.Duration)(m.AccessLogFlushInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FlushAccessLogOnNewRequest { + n += 2 + } + if m.FlushLogOnTunnelSuccessfullyEstablished { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CodecType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CodecType)) + } + l = len(m.StatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.RouteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.HttpFilters) > 0 { + for _, e := range m.HttpFilters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AddUserAgent != nil { + l = (*wrapperspb.BoolValue)(m.AddUserAgent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Tracing != nil { + l = m.Tracing.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpProtocolOptions != nil { + if size, ok := interface{}(m.HttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Http2ProtocolOptions != nil { + if size, ok := interface{}(m.Http2ProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Http2ProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServerName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainTimeout != nil { + l = (*durationpb.Duration)(m.DrainTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AccessLog) > 0 { + for _, e := range m.AccessLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseRemoteAddress != nil { + l = (*wrapperspb.BoolValue)(m.UseRemoteAddress).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GenerateRequestId != nil { + l = (*wrapperspb.BoolValue)(m.GenerateRequestId).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ForwardClientCertDetails != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ForwardClientCertDetails)) + } + if m.SetCurrentClientCertDetails != nil { + l = m.SetCurrentClientCertDetails.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Proxy_100Continue { + n += 3 + } + if m.XffNumTrustedHops != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.XffNumTrustedHops)) + } + if m.RepresentIpv4RemoteAddressAsIpv4MappedIpv6 { + n += 3 + } + if m.SkipXffAppend { + n += 3 + } + l = len(m.Via) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.UpgradeConfigs) > 0 { + for _, e := range m.UpgradeConfigs { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.StreamIdleTimeout != nil { + l = (*durationpb.Duration)(m.StreamIdleTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InternalAddressConfig != nil { + l = m.InternalAddressConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DelayedCloseTimeout != nil { + l = (*durationpb.Duration)(m.DelayedCloseTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestTimeout != nil { + l = (*durationpb.Duration)(m.RequestTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxRequestHeadersKb != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequestHeadersKb).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NormalizePath != nil { + l = (*wrapperspb.BoolValue)(m.NormalizePath).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PreserveExternalRequestId { + n += 3 + } + if m.MergeSlashes { + n += 3 + } + if m.ServerHeaderTransformation != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ServerHeaderTransformation)) + } + if m.CommonHttpProtocolOptions != nil { + if size, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CommonHttpProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestIdExtension != nil { + l = m.RequestIdExtension.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AlwaysSetRequestIdInResponse { + n += 3 + } + if m.LocalReplyConfig != nil { + l = m.LocalReplyConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StripMatchingHostPort { + n += 3 + } + if m.StreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.StreamErrorOnInvalidHttpMessage).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestHeadersTimeout != nil { + l = (*durationpb.Duration)(m.RequestHeadersTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.StripPortMode.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.PathNormalizationOptions != nil { + l = m.PathNormalizationOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Http3ProtocolOptions != nil { + if size, ok := interface{}(m.Http3ProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Http3ProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PathWithEscapedSlashesAction != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.PathWithEscapedSlashesAction)) + } + if len(m.OriginalIpDetectionExtensions) > 0 { + for _, e := range m.OriginalIpDetectionExtensions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.StripTrailingHostDot { + n += 3 + } + if m.SchemeHeaderTransformation != nil { + if size, ok := interface{}(m.SchemeHeaderTransformation).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SchemeHeaderTransformation) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProxyStatusConfig != nil { + l = m.ProxyStatusConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedHeaderValidationConfig != nil { + if size, ok := interface{}(m.TypedHeaderValidationConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedHeaderValidationConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendXForwardedPort { + n += 3 + } + if len(m.EarlyHeaderMutationExtensions) > 0 { + for _, e := range m.EarlyHeaderMutationExtensions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AddProxyProtocolConnectionState != nil { + l = (*wrapperspb.BoolValue)(m.AddProxyProtocolConnectionState).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AccessLogFlushInterval != nil { + l = (*durationpb.Duration)(m.AccessLogFlushInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FlushAccessLogOnNewRequest { + n += 3 + } + if m.AccessLogOptions != nil { + l = m.AccessLogOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendLocalOverload { + n += 3 + } + if m.Http1SafeMaxConnectionDuration { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_Rds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rds != nil { + l = m.Rds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpConnectionManager_RouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RouteConfig != nil { + if size, ok := interface{}(m.RouteConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RouteConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpConnectionManager_ScopedRoutes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRoutes != nil { + l = m.ScopedRoutes.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *HttpConnectionManager_StripAnyHostPort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 3 + return n +} +func (m *LocalReplyConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Mappers) > 0 { + for _, e := range m.Mappers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.BodyFormat != nil { + if size, ok := interface{}(m.BodyFormat).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.BodyFormat) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseMapper) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Filter != nil { + if size, ok := interface{}(m.Filter).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Filter) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatusCode != nil { + l = (*wrapperspb.UInt32Value)(m.StatusCode).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Body != nil { + if size, ok := interface{}(m.Body).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Body) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BodyFormatOverride != nil { + if size, ok := interface{}(m.BodyFormatOverride).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.BodyFormatOverride) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HeadersToAdd) > 0 { + for _, e := range m.HeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Rds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigSource != nil { + if size, ok := interface{}(m.ConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RouteConfigName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRouteConfigurationsList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ScopedRouteConfigurations) > 0 { + for _, e := range m.ScopedRouteConfigurations { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Separator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ElementSeparator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ExtractType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.Index)) + return n +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Element != nil { + l = m.Element.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderValueExtractor != nil { + l = m.HeaderValueExtractor.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRoutes_ScopeKeyBuilder) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fragments) > 0 { + for _, e := range m.Fragments { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ScopeKeyBuilder != nil { + l = m.ScopeKeyBuilder.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RdsConfigSource != nil { + if size, ok := interface{}(m.RdsConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RdsConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopedRouteConfigurationsList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRouteConfigurationsList != nil { + l = m.ScopedRouteConfigurationsList.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRoutes_ScopedRds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRds != nil { + l = m.ScopedRds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRdsConfigSource != nil { + if size, ok := interface{}(m.ScopedRdsConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ScopedRdsConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SrdsResourcesLocator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.IsOptional { + n += 2 + } + if m.Disabled { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpFilter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpFilter_ConfigDiscovery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RequestIDExtension) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *EnvoyMobileHttpConnectionManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + l = m.Config.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go index 6067ababf67..7d69f6349dd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto package client_side_weighted_round_robinv3 @@ -9,10 +9,10 @@ package client_side_weighted_round_robinv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -50,31 +50,31 @@ type ClientSideWeightedRoundRobin struct { // Whether to enable out-of-band utilization reporting collection from // the endpoints. By default, per-request utilization reporting is used. - EnableOobLoadReport *wrappers.BoolValue `protobuf:"bytes,1,opt,name=enable_oob_load_report,json=enableOobLoadReport,proto3" json:"enable_oob_load_report,omitempty"` + EnableOobLoadReport *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enable_oob_load_report,json=enableOobLoadReport,proto3" json:"enable_oob_load_report,omitempty"` // Load reporting interval to request from the server. Note that the // server may not provide reports as frequently as the client requests. // Used only when enable_oob_load_report is true. Default is 10 seconds. - OobReportingPeriod *duration.Duration `protobuf:"bytes,2,opt,name=oob_reporting_period,json=oobReportingPeriod,proto3" json:"oob_reporting_period,omitempty"` + OobReportingPeriod *durationpb.Duration `protobuf:"bytes,2,opt,name=oob_reporting_period,json=oobReportingPeriod,proto3" json:"oob_reporting_period,omitempty"` // A given endpoint must report load metrics continuously for at least // this long before the endpoint weight will be used. This avoids // churn when the set of endpoint addresses changes. Takes effect // both immediately after we establish a connection to an endpoint and // after weight_expiration_period has caused us to stop using the most // recent load metrics. Default is 10 seconds. - BlackoutPeriod *duration.Duration `protobuf:"bytes,3,opt,name=blackout_period,json=blackoutPeriod,proto3" json:"blackout_period,omitempty"` + BlackoutPeriod *durationpb.Duration `protobuf:"bytes,3,opt,name=blackout_period,json=blackoutPeriod,proto3" json:"blackout_period,omitempty"` // If a given endpoint has not reported load metrics in this long, // then we stop using the reported weight. This ensures that we do // not continue to use very stale weights. Once we stop using a stale // value, if we later start seeing fresh reports again, the // blackout_period applies. Defaults to 3 minutes. - WeightExpirationPeriod *duration.Duration `protobuf:"bytes,4,opt,name=weight_expiration_period,json=weightExpirationPeriod,proto3" json:"weight_expiration_period,omitempty"` + WeightExpirationPeriod *durationpb.Duration `protobuf:"bytes,4,opt,name=weight_expiration_period,json=weightExpirationPeriod,proto3" json:"weight_expiration_period,omitempty"` // How often endpoint weights are recalculated. Values less than 100ms are // capped at 100ms. Default is 1 second. - WeightUpdatePeriod *duration.Duration `protobuf:"bytes,5,opt,name=weight_update_period,json=weightUpdatePeriod,proto3" json:"weight_update_period,omitempty"` + WeightUpdatePeriod *durationpb.Duration `protobuf:"bytes,5,opt,name=weight_update_period,json=weightUpdatePeriod,proto3" json:"weight_update_period,omitempty"` // The multiplier used to adjust endpoint weights with the error rate // calculated as eps/qps. Configuration is rejected if this value is negative. // Default is 1.0. - ErrorUtilizationPenalty *wrappers.FloatValue `protobuf:"bytes,6,opt,name=error_utilization_penalty,json=errorUtilizationPenalty,proto3" json:"error_utilization_penalty,omitempty"` + ErrorUtilizationPenalty *wrapperspb.FloatValue `protobuf:"bytes,6,opt,name=error_utilization_penalty,json=errorUtilizationPenalty,proto3" json:"error_utilization_penalty,omitempty"` } func (x *ClientSideWeightedRoundRobin) Reset() { @@ -109,42 +109,42 @@ func (*ClientSideWeightedRoundRobin) Descriptor() ([]byte, []int) { return file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescGZIP(), []int{0} } -func (x *ClientSideWeightedRoundRobin) GetEnableOobLoadReport() *wrappers.BoolValue { +func (x *ClientSideWeightedRoundRobin) GetEnableOobLoadReport() *wrapperspb.BoolValue { if x != nil { return x.EnableOobLoadReport } return nil } -func (x *ClientSideWeightedRoundRobin) GetOobReportingPeriod() *duration.Duration { +func (x *ClientSideWeightedRoundRobin) GetOobReportingPeriod() *durationpb.Duration { if x != nil { return x.OobReportingPeriod } return nil } -func (x *ClientSideWeightedRoundRobin) GetBlackoutPeriod() *duration.Duration { +func (x *ClientSideWeightedRoundRobin) GetBlackoutPeriod() *durationpb.Duration { if x != nil { return x.BlackoutPeriod } return nil } -func (x *ClientSideWeightedRoundRobin) GetWeightExpirationPeriod() *duration.Duration { +func (x *ClientSideWeightedRoundRobin) GetWeightExpirationPeriod() *durationpb.Duration { if x != nil { return x.WeightExpirationPeriod } return nil } -func (x *ClientSideWeightedRoundRobin) GetWeightUpdatePeriod() *duration.Duration { +func (x *ClientSideWeightedRoundRobin) GetWeightUpdatePeriod() *durationpb.Duration { if x != nil { return x.WeightUpdatePeriod } return nil } -func (x *ClientSideWeightedRoundRobin) GetErrorUtilizationPenalty() *wrappers.FloatValue { +func (x *ClientSideWeightedRoundRobin) GetErrorUtilizationPenalty() *wrapperspb.FloatValue { if x != nil { return x.ErrorUtilizationPenalty } @@ -243,9 +243,9 @@ func file_envoy_extensions_load_balancing_policies_client_side_weighted_round_ro var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_goTypes = []interface{}{ (*ClientSideWeightedRoundRobin)(nil), // 0: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin - (*wrappers.BoolValue)(nil), // 1: google.protobuf.BoolValue - (*duration.Duration)(nil), // 2: google.protobuf.Duration - (*wrappers.FloatValue)(nil), // 3: google.protobuf.FloatValue + (*wrapperspb.BoolValue)(nil), // 1: google.protobuf.BoolValue + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration + (*wrapperspb.FloatValue)(nil), // 3: google.protobuf.FloatValue } var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_depIdxs = []int32{ 1, // 0: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.enable_oob_load_report:type_name -> google.protobuf.BoolValue diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go index 6a1f64e5ab4..0d6a6ca7ebd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go new file mode 100644 index 00000000000..250bbcfd337 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go @@ -0,0 +1,148 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto + +package client_side_weighted_round_robinv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClientSideWeightedRoundRobin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientSideWeightedRoundRobin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientSideWeightedRoundRobin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ErrorUtilizationPenalty != nil { + size, err := (*wrapperspb.FloatValue)(m.ErrorUtilizationPenalty).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.WeightUpdatePeriod != nil { + size, err := (*durationpb.Duration)(m.WeightUpdatePeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.WeightExpirationPeriod != nil { + size, err := (*durationpb.Duration)(m.WeightExpirationPeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.BlackoutPeriod != nil { + size, err := (*durationpb.Duration)(m.BlackoutPeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.OobReportingPeriod != nil { + size, err := (*durationpb.Duration)(m.OobReportingPeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EnableOobLoadReport != nil { + size, err := (*wrapperspb.BoolValue)(m.EnableOobLoadReport).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientSideWeightedRoundRobin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnableOobLoadReport != nil { + l = (*wrapperspb.BoolValue)(m.EnableOobLoadReport).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OobReportingPeriod != nil { + l = (*durationpb.Duration)(m.OobReportingPeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BlackoutPeriod != nil { + l = (*durationpb.Duration)(m.BlackoutPeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WeightExpirationPeriod != nil { + l = (*durationpb.Duration)(m.WeightExpirationPeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WeightUpdatePeriod != nil { + l = (*durationpb.Duration)(m.WeightUpdatePeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorUtilizationPenalty != nil { + l = (*wrapperspb.FloatValue)(m.ErrorUtilizationPenalty).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go index a5e23ba8d57..726732605b4 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/load_balancing_policies/common/v3/common.proto package commonv3 @@ -11,10 +11,10 @@ import ( v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -118,7 +118,7 @@ type SlowStartConfig struct { // Represents the size of slow start window. // If set, the newly created host remains in slow start mode starting from its creation time // for the duration of slow start window. - SlowStartWindow *duration.Duration `protobuf:"bytes,1,opt,name=slow_start_window,json=slowStartWindow,proto3" json:"slow_start_window,omitempty"` + SlowStartWindow *durationpb.Duration `protobuf:"bytes,1,opt,name=slow_start_window,json=slowStartWindow,proto3" json:"slow_start_window,omitempty"` // This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, // so that endpoint would get linearly increasing amount of traffic. // When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. @@ -170,7 +170,7 @@ func (*SlowStartConfig) Descriptor() ([]byte, []int) { return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{1} } -func (x *SlowStartConfig) GetSlowStartWindow() *duration.Duration { +func (x *SlowStartConfig) GetSlowStartWindow() *durationpb.Duration { if x != nil { return x.SlowStartWindow } @@ -218,7 +218,7 @@ type ConsistentHashingLbConfig struct { // // This is an O(N) algorithm, unlike other load balancers. Using a lower “hash_balance_factor“ results in more hosts // being probed, so use a higher value if you require better performance. - HashBalanceFactor *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` + HashBalanceFactor *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` } func (x *ConsistentHashingLbConfig) Reset() { @@ -260,7 +260,7 @@ func (x *ConsistentHashingLbConfig) GetUseHostnameForHashing() bool { return false } -func (x *ConsistentHashingLbConfig) GetHashBalanceFactor() *wrappers.UInt32Value { +func (x *ConsistentHashingLbConfig) GetHashBalanceFactor() *wrapperspb.UInt32Value { if x != nil { return x.HashBalanceFactor } @@ -284,7 +284,7 @@ type LocalityLbConfig_ZoneAwareLbConfig struct { // even if zone aware routing is configured. If not specified, the default is 6. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. - MinClusterSize *wrappers.UInt64Value `protobuf:"bytes,2,opt,name=min_cluster_size,json=minClusterSize,proto3" json:"min_cluster_size,omitempty"` + MinClusterSize *wrapperspb.UInt64Value `protobuf:"bytes,2,opt,name=min_cluster_size,json=minClusterSize,proto3" json:"min_cluster_size,omitempty"` // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic // mode`. Instead, the cluster will fail all // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a @@ -331,7 +331,7 @@ func (x *LocalityLbConfig_ZoneAwareLbConfig) GetRoutingEnabled() *v31.Percent { return nil } -func (x *LocalityLbConfig_ZoneAwareLbConfig) GetMinClusterSize() *wrappers.UInt64Value { +func (x *LocalityLbConfig_ZoneAwareLbConfig) GetMinClusterSize() *wrapperspb.UInt64Value { if x != nil { return x.MinClusterSize } @@ -503,11 +503,11 @@ var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_goTypes (*ConsistentHashingLbConfig)(nil), // 2: envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig (*LocalityLbConfig_ZoneAwareLbConfig)(nil), // 3: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig (*LocalityLbConfig_LocalityWeightedLbConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig - (*duration.Duration)(nil), // 5: google.protobuf.Duration + (*durationpb.Duration)(nil), // 5: google.protobuf.Duration (*v3.RuntimeDouble)(nil), // 6: envoy.config.core.v3.RuntimeDouble (*v31.Percent)(nil), // 7: envoy.type.v3.Percent - (*wrappers.UInt32Value)(nil), // 8: google.protobuf.UInt32Value - (*wrappers.UInt64Value)(nil), // 9: google.protobuf.UInt64Value + (*wrapperspb.UInt32Value)(nil), // 8: google.protobuf.UInt32Value + (*wrapperspb.UInt64Value)(nil), // 9: google.protobuf.UInt64Value } var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_depIdxs = []int32{ 3, // 0: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.zone_aware_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go index f291a355ca8..2aa2f26dae0 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/load_balancing_policies/common/v3/common.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common_vtproto.pb.go new file mode 100644 index 00000000000..ad3021a243a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common_vtproto.pb.go @@ -0,0 +1,492 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/common/v3/common.proto + +package commonv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LocalityLbConfig_ZoneAwareLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FailTrafficOnPanic { + i-- + if m.FailTrafficOnPanic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.MinClusterSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinClusterSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RoutingEnabled != nil { + if vtmsg, ok := interface{}(m.RoutingEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RoutingEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LocalityConfigSpecifier.(*LocalityLbConfig_LocalityWeightedLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LocalityConfigSpecifier.(*LocalityLbConfig_ZoneAwareLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ZoneAwareLbConfig != nil { + size, err := m.ZoneAwareLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *LocalityLbConfig_LocalityWeightedLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LocalityWeightedLbConfig != nil { + size, err := m.LocalityWeightedLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *SlowStartConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SlowStartConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SlowStartConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinWeightPercent != nil { + if vtmsg, ok := interface{}(m.MinWeightPercent).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MinWeightPercent) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Aggression != nil { + if vtmsg, ok := interface{}(m.Aggression).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Aggression) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SlowStartWindow != nil { + size, err := (*durationpb.Duration)(m.SlowStartWindow).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsistentHashingLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsistentHashingLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConsistentHashingLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HashBalanceFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.HashBalanceFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.UseHostnameForHashing { + i-- + if m.UseHostnameForHashing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoutingEnabled != nil { + if size, ok := interface{}(m.RoutingEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RoutingEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinClusterSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinClusterSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailTrafficOnPanic { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.LocalityConfigSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ZoneAwareLbConfig != nil { + l = m.ZoneAwareLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LocalityLbConfig_LocalityWeightedLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LocalityWeightedLbConfig != nil { + l = m.LocalityWeightedLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *SlowStartConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SlowStartWindow != nil { + l = (*durationpb.Duration)(m.SlowStartWindow).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Aggression != nil { + if size, ok := interface{}(m.Aggression).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Aggression) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinWeightPercent != nil { + if size, ok := interface{}(m.MinWeightPercent).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MinWeightPercent) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConsistentHashingLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseHostnameForHashing { + n += 2 + } + if m.HashBalanceFactor != nil { + l = (*wrapperspb.UInt32Value)(m.HashBalanceFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go index 51aa1f4be4a..819df3d7b7c 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go @@ -1,19 +1,20 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto package least_requestv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -25,10 +26,73 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Available methods for selecting the host set from which to return the host with the +// fewest active requests. +type LeastRequest_SelectionMethod int32 + +const ( + // Return host with fewest requests from a set of “choice_count“ randomly selected hosts. + // Best selection method for most scenarios. + LeastRequest_N_CHOICES LeastRequest_SelectionMethod = 0 + // Return host with fewest requests from all hosts. + // Useful in some niche use cases involving low request rates and one of: + // (example 1) low request limits on workloads, or (example 2) few hosts. + // + // Example 1: Consider a workload type that can only accept one connection at a time. + // If such workloads are deployed across many hosts, only a small percentage of those + // workloads have zero connections at any given time, and the rate of new connections is low, + // the “FULL_SCAN“ method is more likely to select a suitable host than “N_CHOICES“. + // + // Example 2: Consider a workload type that is only deployed on 2 hosts. With default settings, + // the “N_CHOICES“ method will return the host with more active requests 25% of the time. + // If the request rate is sufficiently low, the behavior of always selecting the host with least + // requests as of the last metrics refresh may be preferable. + LeastRequest_FULL_SCAN LeastRequest_SelectionMethod = 1 +) + +// Enum value maps for LeastRequest_SelectionMethod. +var ( + LeastRequest_SelectionMethod_name = map[int32]string{ + 0: "N_CHOICES", + 1: "FULL_SCAN", + } + LeastRequest_SelectionMethod_value = map[string]int32{ + "N_CHOICES": 0, + "FULL_SCAN": 1, + } +) + +func (x LeastRequest_SelectionMethod) Enum() *LeastRequest_SelectionMethod { + p := new(LeastRequest_SelectionMethod) + *p = x + return p +} + +func (x LeastRequest_SelectionMethod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LeastRequest_SelectionMethod) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes[0].Descriptor() +} + +func (LeastRequest_SelectionMethod) Type() protoreflect.EnumType { + return &file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes[0] +} + +func (x LeastRequest_SelectionMethod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LeastRequest_SelectionMethod.Descriptor instead. +func (LeastRequest_SelectionMethod) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescGZIP(), []int{0, 0} +} + // This configuration allows the built-in LEAST_REQUEST LB policy to be configured via the LB policy // extension point. See the :ref:`load balancing architecture overview // ` for more information. -// [#next-free-field: 6] +// [#next-free-field: 7] type LeastRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -36,7 +100,8 @@ type LeastRequest struct { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - ChoiceCount *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=choice_count,json=choiceCount,proto3" json:"choice_count,omitempty"` + // Only applies to the “N_CHOICES“ selection method. + ChoiceCount *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=choice_count,json=choiceCount,proto3" json:"choice_count,omitempty"` // The following formula is used to calculate the dynamic weights when hosts have different load // balancing weights: // @@ -68,10 +133,14 @@ type LeastRequest struct { // Configuration for local zone aware load balancing or locality weighted load balancing. LocalityLbConfig *v31.LocalityLbConfig `protobuf:"bytes,4,opt,name=locality_lb_config,json=localityLbConfig,proto3" json:"locality_lb_config,omitempty"` // [#not-implemented-hide:] - // Configuration for performing full scan on the list of hosts. - // If this configuration is set, when selecting the host a full scan on the list hosts will be - // used to select the one with least requests instead of using random choices. - EnableFullScan *wrappers.BoolValue `protobuf:"bytes,5,opt,name=enable_full_scan,json=enableFullScan,proto3" json:"enable_full_scan,omitempty"` + // Unused. Replaced by the `selection_method` enum for better extensibility. + // + // Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto. + EnableFullScan *wrapperspb.BoolValue `protobuf:"bytes,5,opt,name=enable_full_scan,json=enableFullScan,proto3" json:"enable_full_scan,omitempty"` + // Method for selecting the host set from which to return the host with the fewest active requests. + // + // Defaults to “N_CHOICES“. + SelectionMethod LeastRequest_SelectionMethod `protobuf:"varint,6,opt,name=selection_method,json=selectionMethod,proto3,enum=envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest_SelectionMethod" json:"selection_method,omitempty"` } func (x *LeastRequest) Reset() { @@ -106,7 +175,7 @@ func (*LeastRequest) Descriptor() ([]byte, []int) { return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescGZIP(), []int{0} } -func (x *LeastRequest) GetChoiceCount() *wrappers.UInt32Value { +func (x *LeastRequest) GetChoiceCount() *wrapperspb.UInt32Value { if x != nil { return x.ChoiceCount } @@ -134,13 +203,21 @@ func (x *LeastRequest) GetLocalityLbConfig() *v31.LocalityLbConfig { return nil } -func (x *LeastRequest) GetEnableFullScan() *wrappers.BoolValue { +// Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto. +func (x *LeastRequest) GetEnableFullScan() *wrapperspb.BoolValue { if x != nil { return x.EnableFullScan } return nil } +func (x *LeastRequest) GetSelectionMethod() LeastRequest_SelectionMethod { + if x != nil { + return x.SelectionMethod + } + return LeastRequest_N_CHOICES +} + var File_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto protoreflect.FileDescriptor var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc = []byte{ @@ -160,55 +237,70 @@ var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, - 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, - 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd8, 0x03, 0x0a, 0x0c, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, - 0x28, 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x53, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, - 0x65, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x42, 0x69, 0x61, 0x73, 0x12, 0x6f, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x72, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, - 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x44, 0x0a, 0x10, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x63, 0x61, 0x6e, 0x42, - 0xd8, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x47, 0x69, 0x6f, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, - 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x76, 0x33, 0x42, 0x11, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x70, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, - 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x65, 0x61, 0x73, 0x74, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x05, 0x0a, 0x0c, 0x4c, 0x65, + 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, + 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, 0x6f, 0x0a, 0x11, 0x73, 0x6c, 0x6f, + 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x72, 0x0a, 0x12, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, + 0x0a, 0x10, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x73, 0x63, + 0x61, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x63, 0x61, + 0x6e, 0x12, 0x8c, 0x01, 0x0a, 0x10, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x57, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x22, 0x2f, 0x0a, 0x0f, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x5f, 0x43, 0x48, 0x4f, 0x49, 0x43, 0x45, 0x53, + 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x55, 0x4c, 0x4c, 0x5f, 0x53, 0x43, 0x41, 0x4e, 0x10, + 0x01, 0x42, 0xd8, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x47, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x70, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x6c, 0x65, 0x61, 0x73, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x65, 0x61, + 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -223,26 +315,29 @@ func file_envoy_extensions_load_balancing_policies_least_request_v3_least_reques return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData } +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_goTypes = []interface{}{ - (*LeastRequest)(nil), // 0: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest - (*wrappers.UInt32Value)(nil), // 1: google.protobuf.UInt32Value - (*v3.RuntimeDouble)(nil), // 2: envoy.config.core.v3.RuntimeDouble - (*v31.SlowStartConfig)(nil), // 3: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig - (*v31.LocalityLbConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig - (*wrappers.BoolValue)(nil), // 5: google.protobuf.BoolValue + (LeastRequest_SelectionMethod)(0), // 0: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.SelectionMethod + (*LeastRequest)(nil), // 1: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest + (*wrapperspb.UInt32Value)(nil), // 2: google.protobuf.UInt32Value + (*v3.RuntimeDouble)(nil), // 3: envoy.config.core.v3.RuntimeDouble + (*v31.SlowStartConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig + (*v31.LocalityLbConfig)(nil), // 5: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig + (*wrapperspb.BoolValue)(nil), // 6: google.protobuf.BoolValue } var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_depIdxs = []int32{ - 1, // 0: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.choice_count:type_name -> google.protobuf.UInt32Value - 2, // 1: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble - 3, // 2: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.slow_start_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig - 4, // 3: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.locality_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig - 5, // 4: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.enable_full_scan:type_name -> google.protobuf.BoolValue - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 2, // 0: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.choice_count:type_name -> google.protobuf.UInt32Value + 3, // 1: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble + 4, // 2: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.slow_start_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig + 5, // 3: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.locality_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig + 6, // 4: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.enable_full_scan:type_name -> google.protobuf.BoolValue + 0, // 5: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.selection_method:type_name -> envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.SelectionMethod + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { @@ -271,13 +366,14 @@ func file_envoy_extensions_load_balancing_policies_least_request_v3_least_reques File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 1, NumExtensions: 0, NumServices: 0, }, GoTypes: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_goTypes, DependencyIndexes: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_depIdxs, + EnumInfos: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes, MessageInfos: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes, }.Build() File_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto = out.File diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go index fcec6a429fd..75a3a2c2e81 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto @@ -188,6 +189,17 @@ func (m *LeastRequest) validate(all bool) error { } } + if _, ok := LeastRequest_SelectionMethod_name[int32(m.GetSelectionMethod())]; !ok { + err := LeastRequestValidationError{ + field: "SelectionMethod", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { return LeastRequestMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request_vtproto.pb.go new file mode 100644 index 00000000000..a7abe001b47 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request_vtproto.pb.go @@ -0,0 +1,196 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto + +package least_requestv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LeastRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeastRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LeastRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SelectionMethod != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SelectionMethod)) + i-- + dAtA[i] = 0x30 + } + if m.EnableFullScan != nil { + size, err := (*wrapperspb.BoolValue)(m.EnableFullScan).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.LocalityLbConfig != nil { + if vtmsg, ok := interface{}(m.LocalityLbConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalityLbConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.SlowStartConfig != nil { + if vtmsg, ok := interface{}(m.SlowStartConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SlowStartConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.ActiveRequestBias != nil { + if vtmsg, ok := interface{}(m.ActiveRequestBias).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ActiveRequestBias) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ChoiceCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.ChoiceCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LeastRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChoiceCount != nil { + l = (*wrapperspb.UInt32Value)(m.ChoiceCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ActiveRequestBias != nil { + if size, ok := interface{}(m.ActiveRequestBias).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ActiveRequestBias) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SlowStartConfig != nil { + if size, ok := interface{}(m.SlowStartConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SlowStartConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalityLbConfig != nil { + if size, ok := interface{}(m.LocalityLbConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalityLbConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableFullScan != nil { + l = (*wrapperspb.BoolValue)(m.EnableFullScan).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SelectionMethod != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SelectionMethod)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go index 58cfa7c7a03..7468da4ac83 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto package pick_firstv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go index a3ea2dd5886..d142fed995d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first_vtproto.pb.go new file mode 100644 index 00000000000..828e7062146 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first_vtproto.pb.go @@ -0,0 +1,74 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto + +package pick_firstv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *PickFirst) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PickFirst) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PickFirst) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ShuffleAddressList { + i-- + if m.ShuffleAddressList { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PickFirst) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ShuffleAddressList { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go index 501283fa7f6..6c544cc726a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto package ring_hashv3 @@ -11,9 +11,9 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -96,11 +96,11 @@ type RingHash struct { // provided host) the better the request distribution will reflect the desired weights. Defaults // to 1024 entries, and limited to 8M entries. See also // :ref:`maximum_ring_size`. - MinimumRingSize *wrappers.UInt64Value `protobuf:"bytes,2,opt,name=minimum_ring_size,json=minimumRingSize,proto3" json:"minimum_ring_size,omitempty"` + MinimumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,2,opt,name=minimum_ring_size,json=minimumRingSize,proto3" json:"minimum_ring_size,omitempty"` // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered // to further constrain resource use. See also // :ref:`minimum_ring_size`. - MaximumRingSize *wrappers.UInt64Value `protobuf:"bytes,3,opt,name=maximum_ring_size,json=maximumRingSize,proto3" json:"maximum_ring_size,omitempty"` + MaximumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,3,opt,name=maximum_ring_size,json=maximumRingSize,proto3" json:"maximum_ring_size,omitempty"` // If set to “true“, the cluster will use hostname instead of the resolved // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. // @@ -134,7 +134,7 @@ type RingHash struct { // ` instead. // // Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto. - HashBalanceFactor *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` + HashBalanceFactor *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` // Common configuration for hashing-based load balancing policies. ConsistentHashingLbConfig *v3.ConsistentHashingLbConfig `protobuf:"bytes,6,opt,name=consistent_hashing_lb_config,json=consistentHashingLbConfig,proto3" json:"consistent_hashing_lb_config,omitempty"` // Enable locality weighted load balancing for ring hash lb explicitly. @@ -180,14 +180,14 @@ func (x *RingHash) GetHashFunction() RingHash_HashFunction { return RingHash_DEFAULT_HASH } -func (x *RingHash) GetMinimumRingSize() *wrappers.UInt64Value { +func (x *RingHash) GetMinimumRingSize() *wrapperspb.UInt64Value { if x != nil { return x.MinimumRingSize } return nil } -func (x *RingHash) GetMaximumRingSize() *wrappers.UInt64Value { +func (x *RingHash) GetMaximumRingSize() *wrapperspb.UInt64Value { if x != nil { return x.MaximumRingSize } @@ -203,7 +203,7 @@ func (x *RingHash) GetUseHostnameForHashing() bool { } // Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto. -func (x *RingHash) GetHashBalanceFactor() *wrappers.UInt32Value { +func (x *RingHash) GetHashBalanceFactor() *wrapperspb.UInt32Value { if x != nil { return x.HashBalanceFactor } @@ -246,7 +246,7 @@ var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_r 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x06, 0x0a, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x06, 0x0a, 0x08, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x12, 0x7b, 0x0a, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, @@ -255,64 +255,65 @@ var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_r 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x46, 0x75, - 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x54, 0x0a, 0x11, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, - 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x44, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, - 0x01, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x6f, - 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x60, 0x0a, 0x13, 0x68, 0x61, 0x73, 0x68, - 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, 0x92, 0xc7, 0x86, 0xd8, - 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x8e, 0x01, 0x0a, 0x1c, 0x63, - 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, - 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x4d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, - 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x9c, 0x01, 0x0a, 0x1b, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, - 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x5d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, - 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x40, 0x0a, 0x0c, 0x48, 0x61, - 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x45, - 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, - 0x58, 0x58, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x55, 0x52, - 0x4d, 0x55, 0x52, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x02, 0x42, 0xc8, 0x01, 0xba, - 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x43, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x52, 0x69, - 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x68, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x72, - 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x69, 0x6e, 0x67, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x0c, 0xfa, 0x42, 0x09, 0x32, 0x07, 0x18, 0x80, 0x80, 0x80, 0x04, 0x28, 0x01, 0x52, 0x0f, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x54, + 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, + 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x44, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x60, 0x0a, 0x13, 0x68, 0x61, + 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x8e, 0x01, 0x0a, + 0x1c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x9c, 0x01, + 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x5d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x40, 0x0a, 0x0c, + 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x0c, + 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x58, 0x58, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4d, + 0x55, 0x52, 0x4d, 0x55, 0x52, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x02, 0x42, 0xc8, + 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x43, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2e, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x76, 0x33, 0x42, 0x0d, + 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x68, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -332,8 +333,8 @@ var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_m var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_goTypes = []interface{}{ (RingHash_HashFunction)(0), // 0: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.HashFunction (*RingHash)(nil), // 1: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash - (*wrappers.UInt64Value)(nil), // 2: google.protobuf.UInt64Value - (*wrappers.UInt32Value)(nil), // 3: google.protobuf.UInt32Value + (*wrapperspb.UInt64Value)(nil), // 2: google.protobuf.UInt64Value + (*wrapperspb.UInt32Value)(nil), // 3: google.protobuf.UInt32Value (*v3.ConsistentHashingLbConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig (*v3.LocalityLbConfig_LocalityWeightedLbConfig)(nil), // 5: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go index 4f431eeade6..c5ec6e39ce0 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto @@ -70,10 +71,10 @@ func (m *RingHash) validate(all bool) error { if wrapper := m.GetMinimumRingSize(); wrapper != nil { - if wrapper.GetValue() > 8388608 { + if val := wrapper.GetValue(); val < 1 || val > 8388608 { err := RingHashValidationError{ field: "MinimumRingSize", - reason: "value must be less than or equal to 8388608", + reason: "value must be inside range [1, 8388608]", } if !all { return err diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash_vtproto.pb.go new file mode 100644 index 00000000000..f762ec4c6d3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash_vtproto.pb.go @@ -0,0 +1,191 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto + +package ring_hashv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RingHash) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RingHash) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RingHash) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LocalityWeightedLbConfig != nil { + if vtmsg, ok := interface{}(m.LocalityWeightedLbConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalityWeightedLbConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.ConsistentHashingLbConfig != nil { + if vtmsg, ok := interface{}(m.ConsistentHashingLbConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConsistentHashingLbConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.HashBalanceFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.HashBalanceFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.UseHostnameForHashing { + i-- + if m.UseHostnameForHashing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaximumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaximumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MinimumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinimumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.HashFunction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HashFunction)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RingHash) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HashFunction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HashFunction)) + } + if m.MinimumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinimumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaximumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MaximumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseHostnameForHashing { + n += 2 + } + if m.HashBalanceFactor != nil { + l = (*wrapperspb.UInt32Value)(m.HashBalanceFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConsistentHashingLbConfig != nil { + if size, ok := interface{}(m.ConsistentHashingLbConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConsistentHashingLbConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalityWeightedLbConfig != nil { + if size, ok := interface{}(m.LocalityWeightedLbConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalityWeightedLbConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go index 7467d942010..7c3741a23dd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto package wrr_localityv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go index ec7470e88b1..c4c33b4f001 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality_vtproto.pb.go new file mode 100644 index 00000000000..1a014866147 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality_vtproto.pb.go @@ -0,0 +1,95 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto + +package wrr_localityv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *WrrLocality) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WrrLocality) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WrrLocality) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EndpointPickingPolicy != nil { + if vtmsg, ok := interface{}(m.EndpointPickingPolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EndpointPickingPolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WrrLocality) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndpointPickingPolicy != nil { + if size, ok := interface{}(m.EndpointPickingPolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EndpointPickingPolicy) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go index 8a63cb980f0..4199deae587 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/rbac/audit_loggers/stream/v3/stream.proto package streamv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go index 015fdd8e630..5fe37d901ff 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/rbac/audit_loggers/stream/v3/stream.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream_vtproto.pb.go new file mode 100644 index 00000000000..8e85a56806a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream_vtproto.pb.go @@ -0,0 +1,61 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/rbac/audit_loggers/stream/v3/stream.proto + +package streamv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StdoutAuditLog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StdoutAuditLog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StdoutAuditLog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *StdoutAuditLog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go index 84edfea54a9..37df4d7a638 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/transport_sockets/tls/v3/cert.proto package tlsv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.go index 81f1b85e586..aa4f445f557 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/transport_sockets/tls/v3/cert.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go index 3890402c720..0f7321c2244 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/transport_sockets/tls/v3/common.proto package tlsv3 @@ -12,10 +12,10 @@ import ( v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -97,6 +97,7 @@ const ( SubjectAltNameMatcher_DNS SubjectAltNameMatcher_SanType = 2 SubjectAltNameMatcher_URI SubjectAltNameMatcher_SanType = 3 SubjectAltNameMatcher_IP_ADDRESS SubjectAltNameMatcher_SanType = 4 + SubjectAltNameMatcher_OTHER_NAME SubjectAltNameMatcher_SanType = 5 ) // Enum value maps for SubjectAltNameMatcher_SanType. @@ -107,6 +108,7 @@ var ( 2: "DNS", 3: "URI", 4: "IP_ADDRESS", + 5: "OTHER_NAME", } SubjectAltNameMatcher_SanType_value = map[string]int32{ "SAN_TYPE_UNSPECIFIED": 0, @@ -114,6 +116,7 @@ var ( "DNS": 2, "URI": 3, "IP_ADDRESS": 4, + "OTHER_NAME": 5, } ) @@ -447,7 +450,7 @@ func (m *PrivateKeyProvider) GetConfigType() isPrivateKeyProvider_ConfigType { return nil } -func (x *PrivateKeyProvider) GetTypedConfig() *any1.Any { +func (x *PrivateKeyProvider) GetTypedConfig() *anypb.Any { if x, ok := x.GetConfigType().(*PrivateKeyProvider_TypedConfig); ok { return x.TypedConfig } @@ -466,7 +469,7 @@ type isPrivateKeyProvider_ConfigType interface { } type PrivateKeyProvider_TypedConfig struct { - TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*PrivateKeyProvider_TypedConfig) isPrivateKeyProvider_ConfigType() {} @@ -770,7 +773,21 @@ type SubjectAltNameMatcher struct { // Specification of type of SAN. Note that the default enum value is an invalid choice. SanType SubjectAltNameMatcher_SanType `protobuf:"varint,1,opt,name=san_type,json=sanType,proto3,enum=envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher_SanType" json:"san_type,omitempty"` // Matcher for SAN value. + // + // The string matching for OTHER_NAME SAN values depends on their ASN.1 type: + // + // - OBJECT: Validated against its dotted numeric notation (e.g., "1.2.3.4") + // - BOOLEAN: Validated against strings "true" or "false" + // - INTEGER/ENUMERATED: Validated against a string containing the integer value + // - NULL: Validated against an empty string + // - Other types: Validated directly against the string value Matcher *v31.StringMatcher `protobuf:"bytes,2,opt,name=matcher,proto3" json:"matcher,omitempty"` + // OID Value which is required if OTHER_NAME SAN type is used. + // For example, UPN OID is 1.3.6.1.4.1.311.20.2.3 + // (Reference: http://oid-info.com/get/1.3.6.1.4.1.311.20.2.3). + // + // If set for SAN types other than OTHER_NAME, it will be ignored. + Oid string `protobuf:"bytes,3,opt,name=oid,proto3" json:"oid,omitempty"` } func (x *SubjectAltNameMatcher) Reset() { @@ -819,7 +836,14 @@ func (x *SubjectAltNameMatcher) GetMatcher() *v31.StringMatcher { return nil } -// [#next-free-field: 17] +func (x *SubjectAltNameMatcher) GetOid() string { + if x != nil { + return x.Oid + } + return "" +} + +// [#next-free-field: 18] type CertificateValidationContext struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -860,16 +884,18 @@ type CertificateValidationContext struct { // can be treated as trust anchor as well. It allows verification with building valid partial chain instead // of a full chain. // - // Only one of “trusted_ca“ and “ca_certificate_provider_instance“ may be specified. - // - // [#next-major-version: This field and watched_directory below should ideally be moved into a - // separate sub-message, since there's no point in specifying the latter field without this one.] + // If “ca_certificate_provider_instance“ is set, it takes precedence over “trusted_ca“. TrustedCa *v3.DataSource `protobuf:"bytes,1,opt,name=trusted_ca,json=trustedCa,proto3" json:"trusted_ca,omitempty"` // Certificate provider instance for fetching TLS certificates. // - // Only one of “trusted_ca“ and “ca_certificate_provider_instance“ may be specified. + // If set, takes precedence over “trusted_ca“. // [#not-implemented-hide:] CaCertificateProviderInstance *CertificateProviderPluginInstance `protobuf:"bytes,13,opt,name=ca_certificate_provider_instance,json=caCertificateProviderInstance,proto3" json:"ca_certificate_provider_instance,omitempty"` + // Use system root certs for validation. + // If present, system root certs are used only if neither of the “trusted_ca“ + // or “ca_certificate_provider_instance“ fields are set. + // [#not-implemented-hide:] + SystemRootCerts *CertificateValidationContext_SystemRootCerts `protobuf:"bytes,17,opt,name=system_root_certs,json=systemRootCerts,proto3" json:"system_root_certs,omitempty"` // If specified, updates of a file-based “trusted_ca“ source will be triggered // by this watch. This allows explicit control over the path watched, by // default the parent directory of the filesystem path in “trusted_ca“ is @@ -969,7 +995,7 @@ type CertificateValidationContext struct { // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/common.proto. MatchSubjectAltNames []*v31.StringMatcher `protobuf:"bytes,9,rep,name=match_subject_alt_names,json=matchSubjectAltNames,proto3" json:"match_subject_alt_names,omitempty"` // [#not-implemented-hide:] Must present signed certificate time-stamp. - RequireSignedCertificateTimestamp *wrappers.BoolValue `protobuf:"bytes,6,opt,name=require_signed_certificate_timestamp,json=requireSignedCertificateTimestamp,proto3" json:"require_signed_certificate_timestamp,omitempty"` + RequireSignedCertificateTimestamp *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=require_signed_certificate_timestamp,json=requireSignedCertificateTimestamp,proto3" json:"require_signed_certificate_timestamp,omitempty"` // An optional `certificate revocation list // `_ // (in PEM format). If specified, Envoy will verify that the presented peer @@ -1006,7 +1032,7 @@ type CertificateValidationContext struct { // This matches the semantics of “SSL_CTX_set_verify_depth“ in OpenSSL 1.0.x and older versions of BoringSSL. It differs from “SSL_CTX_set_verify_depth“ // in OpenSSL 1.1.x and newer versions of BoringSSL in that the trust anchor is included. // Trusted issues are specified by setting :ref:`trusted_ca ` - MaxVerifyDepth *wrappers.UInt32Value `protobuf:"bytes,16,opt,name=max_verify_depth,json=maxVerifyDepth,proto3" json:"max_verify_depth,omitempty"` + MaxVerifyDepth *wrapperspb.UInt32Value `protobuf:"bytes,16,opt,name=max_verify_depth,json=maxVerifyDepth,proto3" json:"max_verify_depth,omitempty"` } func (x *CertificateValidationContext) Reset() { @@ -1055,6 +1081,13 @@ func (x *CertificateValidationContext) GetCaCertificateProviderInstance() *Certi return nil } +func (x *CertificateValidationContext) GetSystemRootCerts() *CertificateValidationContext_SystemRootCerts { + if x != nil { + return x.SystemRootCerts + } + return nil +} + func (x *CertificateValidationContext) GetWatchedDirectory() *v3.WatchedDirectory { if x != nil { return x.WatchedDirectory @@ -1091,7 +1124,7 @@ func (x *CertificateValidationContext) GetMatchSubjectAltNames() []*v31.StringMa return nil } -func (x *CertificateValidationContext) GetRequireSignedCertificateTimestamp() *wrappers.BoolValue { +func (x *CertificateValidationContext) GetRequireSignedCertificateTimestamp() *wrapperspb.BoolValue { if x != nil { return x.RequireSignedCertificateTimestamp } @@ -1133,13 +1166,51 @@ func (x *CertificateValidationContext) GetOnlyVerifyLeafCertCrl() bool { return false } -func (x *CertificateValidationContext) GetMaxVerifyDepth() *wrappers.UInt32Value { +func (x *CertificateValidationContext) GetMaxVerifyDepth() *wrapperspb.UInt32Value { if x != nil { return x.MaxVerifyDepth } return nil } +type CertificateValidationContext_SystemRootCerts struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CertificateValidationContext_SystemRootCerts) Reset() { + *x = CertificateValidationContext_SystemRootCerts{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateValidationContext_SystemRootCerts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateValidationContext_SystemRootCerts) ProtoMessage() {} + +func (x *CertificateValidationContext_SystemRootCerts) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateValidationContext_SystemRootCerts.ProtoReflect.Descriptor instead. +func (*CertificateValidationContext_SystemRootCerts) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{6, 0} +} + var File_envoy_extensions_transport_sockets_tls_v3_common_proto protoreflect.FileDescriptor var file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = []byte{ @@ -1282,7 +1353,7 @@ var file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = []byte 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, - 0xa4, 0x02, 0x0a, 0x15, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, + 0xc6, 0x02, 0x0a, 0x15, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x08, 0x73, 0x61, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x48, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, @@ -1295,120 +1366,132 @@ var file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = []byte 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x22, 0x50, 0x0a, 0x07, 0x53, 0x61, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x18, 0x0a, 0x14, 0x53, 0x41, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x41, - 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x12, 0x07, 0x0a, - 0x03, 0x55, 0x52, 0x49, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x50, 0x5f, 0x41, 0x44, 0x44, - 0x52, 0x45, 0x53, 0x53, 0x10, 0x04, 0x22, 0x90, 0x0c, 0x0a, 0x1c, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x63, 0x68, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6f, 0x69, 0x64, 0x22, 0x60, 0x0a, 0x07, 0x53, 0x61, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x41, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, + 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x12, + 0x07, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x50, 0x5f, 0x41, + 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, + 0x52, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x05, 0x22, 0xa9, 0x0d, 0x0a, 0x1c, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x57, 0x0a, 0x0a, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x16, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, + 0x43, 0x61, 0x12, 0xad, 0x01, 0x0a, 0x20, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x16, 0xf2, 0x98, 0xfe, + 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x1d, 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x11, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x57, 0x0a, 0x0a, 0x74, 0x72, 0x75, 0x73, 0x74, - 0x65, 0x64, 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x16, 0xf2, - 0x98, 0xfe, 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x43, 0x61, - 0x12, 0xad, 0x01, 0x0a, 0x20, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x16, 0xf2, 0x98, 0xfe, 0x8f, 0x05, - 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x1d, 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x6b, 0x69, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, - 0x72, 0x04, 0x10, 0x2c, 0x28, 0x2c, 0x52, 0x15, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x70, 0x6b, 0x69, 0x12, 0x46, 0x0a, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, 0x6f, + 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, 0x0f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, + 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, - 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, 0x72, 0x04, 0x10, 0x40, 0x28, 0x5f, 0x52, 0x15, + 0x61, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x6b, 0x69, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, + 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, 0x72, 0x04, 0x10, 0x2c, 0x28, 0x2c, 0x52, 0x15, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x82, 0x01, 0x0a, 0x1d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, - 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, - 0x19, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x24, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, - 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x12, 0x32, 0x0a, 0x03, 0x63, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x3a, 0x0a, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x18, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, - 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, - 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x54, 0x72, - 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, - 0x74, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x62, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, - 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x38, 0x0a, 0x19, 0x6f, 0x6e, - 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x5f, 0x63, 0x72, 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6f, - 0x6e, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, - 0x74, 0x43, 0x72, 0x6c, 0x12, 0x4f, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, - 0x44, 0x65, 0x70, 0x74, 0x68, 0x22, 0x46, 0x0a, 0x16, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x16, 0x0a, 0x12, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x53, 0x54, 0x5f, - 0x43, 0x48, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x43, 0x43, 0x45, 0x50, - 0x54, 0x5f, 0x55, 0x4e, 0x54, 0x52, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x3a, 0x35, 0x9a, - 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, - 0x74, 0x65, 0x78, 0x74, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, - 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0xa8, 0x01, 0xba, 0x80, 0xc8, 0xd1, - 0x06, 0x02, 0x10, 0x02, 0x0a, 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, - 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, - 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, - 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, - 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, - 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, - 0x6c, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x53, 0x70, 0x6b, 0x69, 0x12, 0x46, 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, + 0x72, 0x04, 0x10, 0x40, 0x28, 0x5f, 0x52, 0x15, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x82, 0x01, + 0x0a, 0x1d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x19, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x24, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, 0x03, 0x63, 0x72, 0x6c, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x3a, 0x0a, + 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x17, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x18, 0x74, 0x72, + 0x75, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, 0x74, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x62, + 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x38, 0x0a, 0x19, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x72, 0x6c, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6f, 0x6e, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x43, 0x72, 0x6c, 0x12, 0x4f, 0x0a, 0x10, + 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x74, 0x68, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x0e, 0x6d, + 0x61, 0x78, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x44, 0x65, 0x70, 0x74, 0x68, 0x1a, 0x11, 0x0a, + 0x0f, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, + 0x22, 0x46, 0x0a, 0x16, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x45, + 0x52, 0x49, 0x46, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x53, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x49, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x5f, 0x55, 0x4e, 0x54, + 0x52, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, + 0x74, 0x68, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x17, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x42, 0xa8, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1424,7 +1507,7 @@ func file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP() [ } var file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_envoy_extensions_transport_sockets_tls_v3_common_proto_goTypes = []interface{}{ (TlsParameters_TlsProtocol)(0), // 0: envoy.extensions.transport_sockets.tls.v3.TlsParameters.TlsProtocol (SubjectAltNameMatcher_SanType)(0), // 1: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.SanType @@ -1436,44 +1519,46 @@ var file_envoy_extensions_transport_sockets_tls_v3_common_proto_goTypes = []inte (*CertificateProviderPluginInstance)(nil), // 7: envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance (*SubjectAltNameMatcher)(nil), // 8: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher (*CertificateValidationContext)(nil), // 9: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext - (*any1.Any)(nil), // 10: google.protobuf.Any - (*v3.DataSource)(nil), // 11: envoy.config.core.v3.DataSource - (*v3.WatchedDirectory)(nil), // 12: envoy.config.core.v3.WatchedDirectory - (*v31.StringMatcher)(nil), // 13: envoy.type.matcher.v3.StringMatcher - (*wrappers.BoolValue)(nil), // 14: google.protobuf.BoolValue - (*v3.TypedExtensionConfig)(nil), // 15: envoy.config.core.v3.TypedExtensionConfig - (*wrappers.UInt32Value)(nil), // 16: google.protobuf.UInt32Value + (*CertificateValidationContext_SystemRootCerts)(nil), // 10: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.SystemRootCerts + (*anypb.Any)(nil), // 11: google.protobuf.Any + (*v3.DataSource)(nil), // 12: envoy.config.core.v3.DataSource + (*v3.WatchedDirectory)(nil), // 13: envoy.config.core.v3.WatchedDirectory + (*v31.StringMatcher)(nil), // 14: envoy.type.matcher.v3.StringMatcher + (*wrapperspb.BoolValue)(nil), // 15: google.protobuf.BoolValue + (*v3.TypedExtensionConfig)(nil), // 16: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value } var file_envoy_extensions_transport_sockets_tls_v3_common_proto_depIdxs = []int32{ 0, // 0: envoy.extensions.transport_sockets.tls.v3.TlsParameters.tls_minimum_protocol_version:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsParameters.TlsProtocol 0, // 1: envoy.extensions.transport_sockets.tls.v3.TlsParameters.tls_maximum_protocol_version:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsParameters.TlsProtocol - 10, // 2: envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider.typed_config:type_name -> google.protobuf.Any - 11, // 3: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.certificate_chain:type_name -> envoy.config.core.v3.DataSource - 11, // 4: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.private_key:type_name -> envoy.config.core.v3.DataSource - 11, // 5: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.pkcs12:type_name -> envoy.config.core.v3.DataSource - 12, // 6: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory + 11, // 2: envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider.typed_config:type_name -> google.protobuf.Any + 12, // 3: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.certificate_chain:type_name -> envoy.config.core.v3.DataSource + 12, // 4: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.private_key:type_name -> envoy.config.core.v3.DataSource + 12, // 5: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.pkcs12:type_name -> envoy.config.core.v3.DataSource + 13, // 6: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory 4, // 7: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.private_key_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider - 11, // 8: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.password:type_name -> envoy.config.core.v3.DataSource - 11, // 9: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.ocsp_staple:type_name -> envoy.config.core.v3.DataSource - 11, // 10: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.signed_certificate_timestamp:type_name -> envoy.config.core.v3.DataSource - 11, // 11: envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys.keys:type_name -> envoy.config.core.v3.DataSource + 12, // 8: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.password:type_name -> envoy.config.core.v3.DataSource + 12, // 9: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.ocsp_staple:type_name -> envoy.config.core.v3.DataSource + 12, // 10: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.signed_certificate_timestamp:type_name -> envoy.config.core.v3.DataSource + 12, // 11: envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys.keys:type_name -> envoy.config.core.v3.DataSource 1, // 12: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.san_type:type_name -> envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.SanType - 13, // 13: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.matcher:type_name -> envoy.type.matcher.v3.StringMatcher - 11, // 14: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.trusted_ca:type_name -> envoy.config.core.v3.DataSource + 14, // 13: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.matcher:type_name -> envoy.type.matcher.v3.StringMatcher + 12, // 14: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.trusted_ca:type_name -> envoy.config.core.v3.DataSource 7, // 15: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.ca_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance - 12, // 16: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory - 8, // 17: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.match_typed_subject_alt_names:type_name -> envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher - 13, // 18: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.match_subject_alt_names:type_name -> envoy.type.matcher.v3.StringMatcher - 14, // 19: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.require_signed_certificate_timestamp:type_name -> google.protobuf.BoolValue - 11, // 20: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.crl:type_name -> envoy.config.core.v3.DataSource - 2, // 21: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.trust_chain_verification:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.TrustChainVerification - 15, // 22: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.custom_validator_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 16, // 23: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.max_verify_depth:type_name -> google.protobuf.UInt32Value - 24, // [24:24] is the sub-list for method output_type - 24, // [24:24] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name + 10, // 16: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.system_root_certs:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.SystemRootCerts + 13, // 17: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory + 8, // 18: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.match_typed_subject_alt_names:type_name -> envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher + 14, // 19: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.match_subject_alt_names:type_name -> envoy.type.matcher.v3.StringMatcher + 15, // 20: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.require_signed_certificate_timestamp:type_name -> google.protobuf.BoolValue + 12, // 21: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.crl:type_name -> envoy.config.core.v3.DataSource + 2, // 22: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.trust_chain_verification:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.TrustChainVerification + 16, // 23: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.custom_validator_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 17, // 24: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.max_verify_depth:type_name -> google.protobuf.UInt32Value + 25, // [25:25] is the sub-list for method output_type + 25, // [25:25] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name } func init() { file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() } @@ -1566,6 +1651,18 @@ func file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() { return nil } } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateValidationContext_SystemRootCerts); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[1].OneofWrappers = []interface{}{ (*PrivateKeyProvider_TypedConfig)(nil), @@ -1576,7 +1673,7 @@ func file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc, NumEnums: 3, - NumMessages: 7, + NumMessages: 8, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go index 83c576c1d77..c020641fbc0 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/transport_sockets/tls/v3/common.proto @@ -995,6 +996,8 @@ func (m *SubjectAltNameMatcher) validate(all bool) error { } } + // no validation rules for Oid + if len(errors) > 0 { return SubjectAltNameMatcherMultiError(errors) } @@ -1159,6 +1162,35 @@ func (m *CertificateValidationContext) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetSystemRootCerts()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "SystemRootCerts", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "SystemRootCerts", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSystemRootCerts()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "SystemRootCerts", + reason: "embedded message failed validation", + cause: err, + } + } + } + if all { switch v := interface{}(m.GetWatchedDirectory()).(type) { case interface{ ValidateAll() error }: @@ -1507,3 +1539,110 @@ var _ interface { Cause() error ErrorName() string } = CertificateValidationContextValidationError{} + +// Validate checks the field values on +// CertificateValidationContext_SystemRootCerts with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CertificateValidationContext_SystemRootCerts) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// CertificateValidationContext_SystemRootCerts with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// CertificateValidationContext_SystemRootCertsMultiError, or nil if none found. +func (m *CertificateValidationContext_SystemRootCerts) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateValidationContext_SystemRootCerts) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return CertificateValidationContext_SystemRootCertsMultiError(errors) + } + + return nil +} + +// CertificateValidationContext_SystemRootCertsMultiError is an error wrapping +// multiple validation errors returned by +// CertificateValidationContext_SystemRootCerts.ValidateAll() if the +// designated constraints aren't met. +type CertificateValidationContext_SystemRootCertsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateValidationContext_SystemRootCertsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateValidationContext_SystemRootCertsMultiError) AllErrors() []error { return m } + +// CertificateValidationContext_SystemRootCertsValidationError is the +// validation error returned by +// CertificateValidationContext_SystemRootCerts.Validate if the designated +// constraints aren't met. +type CertificateValidationContext_SystemRootCertsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateValidationContext_SystemRootCertsValidationError) ErrorName() string { + return "CertificateValidationContext_SystemRootCertsValidationError" +} + +// Error satisfies the builtin error interface +func (e CertificateValidationContext_SystemRootCertsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateValidationContext_SystemRootCerts.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateValidationContext_SystemRootCertsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateValidationContext_SystemRootCertsValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common_vtproto.pb.go new file mode 100644 index 00000000000..00d5573d938 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common_vtproto.pb.go @@ -0,0 +1,1155 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/common.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TlsParameters) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsParameters) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsParameters) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SignatureAlgorithms) > 0 { + for iNdEx := len(m.SignatureAlgorithms) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SignatureAlgorithms[iNdEx]) + copy(dAtA[i:], m.SignatureAlgorithms[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SignatureAlgorithms[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.EcdhCurves) > 0 { + for iNdEx := len(m.EcdhCurves) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EcdhCurves[iNdEx]) + copy(dAtA[i:], m.EcdhCurves[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EcdhCurves[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.CipherSuites) > 0 { + for iNdEx := len(m.CipherSuites) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CipherSuites[iNdEx]) + copy(dAtA[i:], m.CipherSuites[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CipherSuites[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.TlsMaximumProtocolVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TlsMaximumProtocolVersion)) + i-- + dAtA[i] = 0x10 + } + if m.TlsMinimumProtocolVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TlsMinimumProtocolVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PrivateKeyProvider) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PrivateKeyProvider) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PrivateKeyProvider) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Fallback { + i-- + if m.Fallback { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if msg, ok := m.ConfigType.(*PrivateKeyProvider_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.ProviderName) > 0 { + i -= len(m.ProviderName) + copy(dAtA[i:], m.ProviderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ProviderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PrivateKeyProvider_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PrivateKeyProvider_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *TlsCertificate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsCertificate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsCertificate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Pkcs12 != nil { + if vtmsg, ok := interface{}(m.Pkcs12).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Pkcs12) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.WatchedDirectory != nil { + if vtmsg, ok := interface{}(m.WatchedDirectory).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.WatchedDirectory) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.PrivateKeyProvider != nil { + size, err := m.PrivateKeyProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.SignedCertificateTimestamp) > 0 { + for iNdEx := len(m.SignedCertificateTimestamp) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SignedCertificateTimestamp[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SignedCertificateTimestamp[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if m.OcspStaple != nil { + if vtmsg, ok := interface{}(m.OcspStaple).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OcspStaple) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.Password != nil { + if vtmsg, ok := interface{}(m.Password).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Password) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.PrivateKey != nil { + if vtmsg, ok := interface{}(m.PrivateKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PrivateKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.CertificateChain != nil { + if vtmsg, ok := interface{}(m.CertificateChain).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CertificateChain) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TlsSessionTicketKeys) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsSessionTicketKeys) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsSessionTicketKeys) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Keys) > 0 { + for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Keys[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Keys[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CertificateProviderPluginInstance) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateProviderPluginInstance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateProviderPluginInstance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CertificateName) > 0 { + i -= len(m.CertificateName) + copy(dAtA[i:], m.CertificateName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CertificateName))) + i-- + dAtA[i] = 0x12 + } + if len(m.InstanceName) > 0 { + i -= len(m.InstanceName) + copy(dAtA[i:], m.InstanceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InstanceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SubjectAltNameMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAltNameMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAltNameMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Oid) > 0 { + i -= len(m.Oid) + copy(dAtA[i:], m.Oid) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Oid))) + i-- + dAtA[i] = 0x1a + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SanType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SanType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CertificateValidationContext_SystemRootCerts) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateValidationContext_SystemRootCerts) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateValidationContext_SystemRootCerts) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *CertificateValidationContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SystemRootCerts != nil { + size, err := m.SystemRootCerts.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.MaxVerifyDepth != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxVerifyDepth).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.MatchTypedSubjectAltNames) > 0 { + for iNdEx := len(m.MatchTypedSubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.MatchTypedSubjectAltNames[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + } + if m.OnlyVerifyLeafCertCrl { + i-- + if m.OnlyVerifyLeafCertCrl { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if m.CaCertificateProviderInstance != nil { + size, err := m.CaCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.CustomValidatorConfig != nil { + if vtmsg, ok := interface{}(m.CustomValidatorConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomValidatorConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } + if m.WatchedDirectory != nil { + if vtmsg, ok := interface{}(m.WatchedDirectory).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.WatchedDirectory) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + if m.TrustChainVerification != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TrustChainVerification)) + i-- + dAtA[i] = 0x50 + } + if len(m.MatchSubjectAltNames) > 0 { + for iNdEx := len(m.MatchSubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.MatchSubjectAltNames[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MatchSubjectAltNames[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + } + if m.AllowExpiredCertificate { + i-- + if m.AllowExpiredCertificate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.Crl != nil { + if vtmsg, ok := interface{}(m.Crl).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Crl) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.RequireSignedCertificateTimestamp != nil { + size, err := (*wrapperspb.BoolValue)(m.RequireSignedCertificateTimestamp).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.VerifyCertificateSpki) > 0 { + for iNdEx := len(m.VerifyCertificateSpki) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VerifyCertificateSpki[iNdEx]) + copy(dAtA[i:], m.VerifyCertificateSpki[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VerifyCertificateSpki[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.VerifyCertificateHash) > 0 { + for iNdEx := len(m.VerifyCertificateHash) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VerifyCertificateHash[iNdEx]) + copy(dAtA[i:], m.VerifyCertificateHash[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VerifyCertificateHash[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.TrustedCa != nil { + if vtmsg, ok := interface{}(m.TrustedCa).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TrustedCa) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TlsParameters) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsMinimumProtocolVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TlsMinimumProtocolVersion)) + } + if m.TlsMaximumProtocolVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TlsMaximumProtocolVersion)) + } + if len(m.CipherSuites) > 0 { + for _, s := range m.CipherSuites { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.EcdhCurves) > 0 { + for _, s := range m.EcdhCurves { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.SignatureAlgorithms) > 0 { + for _, s := range m.SignatureAlgorithms { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *PrivateKeyProvider) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProviderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Fallback { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *PrivateKeyProvider_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *TlsCertificate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CertificateChain != nil { + if size, ok := interface{}(m.CertificateChain).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CertificateChain) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PrivateKey != nil { + if size, ok := interface{}(m.PrivateKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PrivateKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Password != nil { + if size, ok := interface{}(m.Password).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Password) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcspStaple != nil { + if size, ok := interface{}(m.OcspStaple).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OcspStaple) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SignedCertificateTimestamp) > 0 { + for _, e := range m.SignedCertificateTimestamp { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.PrivateKeyProvider != nil { + l = m.PrivateKeyProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WatchedDirectory != nil { + if size, ok := interface{}(m.WatchedDirectory).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.WatchedDirectory) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Pkcs12 != nil { + if size, ok := interface{}(m.Pkcs12).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Pkcs12) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TlsSessionTicketKeys) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keys) > 0 { + for _, e := range m.Keys { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateProviderPluginInstance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InstanceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CertificateName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SubjectAltNameMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SanType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SanType)) + } + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Oid) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateValidationContext_SystemRootCerts) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *CertificateValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TrustedCa != nil { + if size, ok := interface{}(m.TrustedCa).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TrustedCa) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.VerifyCertificateHash) > 0 { + for _, s := range m.VerifyCertificateHash { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.VerifyCertificateSpki) > 0 { + for _, s := range m.VerifyCertificateSpki { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RequireSignedCertificateTimestamp != nil { + l = (*wrapperspb.BoolValue)(m.RequireSignedCertificateTimestamp).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Crl != nil { + if size, ok := interface{}(m.Crl).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Crl) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowExpiredCertificate { + n += 2 + } + if len(m.MatchSubjectAltNames) > 0 { + for _, e := range m.MatchSubjectAltNames { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TrustChainVerification != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TrustChainVerification)) + } + if m.WatchedDirectory != nil { + if size, ok := interface{}(m.WatchedDirectory).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.WatchedDirectory) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CustomValidatorConfig != nil { + if size, ok := interface{}(m.CustomValidatorConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomValidatorConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CaCertificateProviderInstance != nil { + l = m.CaCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnlyVerifyLeafCertCrl { + n += 2 + } + if len(m.MatchTypedSubjectAltNames) > 0 { + for _, e := range m.MatchTypedSubjectAltNames { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxVerifyDepth != nil { + l = (*wrapperspb.UInt32Value)(m.MaxVerifyDepth).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SystemRootCerts != nil { + l = m.SystemRootCerts.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go index c126f0de7ad..cf919ed971b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/transport_sockets/tls/v3/secret.proto package tlsv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go index c34909177af..913c549220c 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/transport_sockets/tls/v3/secret.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret_vtproto.pb.go new file mode 100644 index 00000000000..35e8a3ce283 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret_vtproto.pb.go @@ -0,0 +1,415 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/secret.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GenericSecret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GenericSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Secret != nil { + if vtmsg, ok := interface{}(m.Secret).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Secret) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SdsSecretConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SdsSecretConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SdsSecretConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SdsConfig != nil { + if vtmsg, ok := interface{}(m.SdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Secret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*Secret_GenericSecret); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*Secret_ValidationContext); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*Secret_SessionTicketKeys); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*Secret_TlsCertificate); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Secret_TlsCertificate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_TlsCertificate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TlsCertificate != nil { + size, err := m.TlsCertificate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Secret_SessionTicketKeys) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_SessionTicketKeys) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SessionTicketKeys != nil { + size, err := m.SessionTicketKeys.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Secret_ValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_ValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContext != nil { + size, err := m.ValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Secret_GenericSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_GenericSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GenericSecret != nil { + size, err := m.GenericSecret.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *GenericSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Secret != nil { + if size, ok := interface{}(m.Secret).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Secret) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SdsSecretConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SdsConfig != nil { + if size, ok := interface{}(m.SdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Secret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Secret_TlsCertificate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsCertificate != nil { + l = m.TlsCertificate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Secret_SessionTicketKeys) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SessionTicketKeys != nil { + l = m.SessionTicketKeys.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Secret_ValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContext != nil { + l = m.ValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Secret_GenericSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GenericSecret != nil { + l = m.GenericSecret.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go index 8602c2670d4..db43da6745a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/transport_sockets/tls/v3/tls.proto package tlsv3 @@ -11,10 +11,10 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -115,13 +115,13 @@ type UpstreamTlsContext struct { // for TLSv1.2 and older) to store for the purpose of session resumption. // // Defaults to 1, setting this to 0 disables session resumption. - MaxSessionKeys *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=max_session_keys,json=maxSessionKeys,proto3" json:"max_session_keys,omitempty"` + MaxSessionKeys *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_session_keys,json=maxSessionKeys,proto3" json:"max_session_keys,omitempty"` // This field is used to control the enforcement, whereby the handshake will fail if the keyUsage extension // is present and incompatible with the TLS usage. Currently, the default value is false (i.e., enforcement off) // but it is expected to be changed to true by default in a future release. // “ssl.was_key_usage_invalid“ in :ref:`listener metrics ` will be set for certificate // configurations that would fail if this option were set to true. - EnforceRsaKeyUsage *wrappers.BoolValue `protobuf:"bytes,5,opt,name=enforce_rsa_key_usage,json=enforceRsaKeyUsage,proto3" json:"enforce_rsa_key_usage,omitempty"` + EnforceRsaKeyUsage *wrapperspb.BoolValue `protobuf:"bytes,5,opt,name=enforce_rsa_key_usage,json=enforceRsaKeyUsage,proto3" json:"enforce_rsa_key_usage,omitempty"` } func (x *UpstreamTlsContext) Reset() { @@ -177,21 +177,21 @@ func (x *UpstreamTlsContext) GetAllowRenegotiation() bool { return false } -func (x *UpstreamTlsContext) GetMaxSessionKeys() *wrappers.UInt32Value { +func (x *UpstreamTlsContext) GetMaxSessionKeys() *wrapperspb.UInt32Value { if x != nil { return x.MaxSessionKeys } return nil } -func (x *UpstreamTlsContext) GetEnforceRsaKeyUsage() *wrappers.BoolValue { +func (x *UpstreamTlsContext) GetEnforceRsaKeyUsage() *wrapperspb.BoolValue { if x != nil { return x.EnforceRsaKeyUsage } return nil } -// [#next-free-field: 11] +// [#next-free-field: 12] type DownstreamTlsContext struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -201,10 +201,10 @@ type DownstreamTlsContext struct { CommonTlsContext *CommonTlsContext `protobuf:"bytes,1,opt,name=common_tls_context,json=commonTlsContext,proto3" json:"common_tls_context,omitempty"` // If specified, Envoy will reject connections without a valid client // certificate. - RequireClientCertificate *wrappers.BoolValue `protobuf:"bytes,2,opt,name=require_client_certificate,json=requireClientCertificate,proto3" json:"require_client_certificate,omitempty"` + RequireClientCertificate *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=require_client_certificate,json=requireClientCertificate,proto3" json:"require_client_certificate,omitempty"` // If specified, Envoy will reject connections without a valid and matching SNI. // [#not-implemented-hide:] - RequireSni *wrappers.BoolValue `protobuf:"bytes,3,opt,name=require_sni,json=requireSni,proto3" json:"require_sni,omitempty"` + RequireSni *wrapperspb.BoolValue `protobuf:"bytes,3,opt,name=require_sni,json=requireSni,proto3" json:"require_sni,omitempty"` // Types that are assignable to SessionTicketKeysType: // // *DownstreamTlsContext_SessionTicketKeys @@ -217,7 +217,7 @@ type DownstreamTlsContext struct { // If specified, “session_timeout“ will change the maximum lifetime (in seconds) of the TLS session. // Currently this value is used as a hint for the `TLS session ticket lifetime (for TLSv1.2) `_. // Only seconds can be specified (fractional seconds are ignored). - SessionTimeout *duration.Duration `protobuf:"bytes,6,opt,name=session_timeout,json=sessionTimeout,proto3" json:"session_timeout,omitempty"` + SessionTimeout *durationpb.Duration `protobuf:"bytes,6,opt,name=session_timeout,json=sessionTimeout,proto3" json:"session_timeout,omitempty"` // Config for whether to use certificates if they do not have // an accompanying OCSP response or if the response expires at runtime. // Defaults to LENIENT_STAPLING @@ -225,7 +225,11 @@ type DownstreamTlsContext struct { // Multiple certificates are allowed in Downstream transport socket to serve different SNI. // If the client provides SNI but no such cert matched, it will decide to full scan certificates or not based on this config. // Defaults to false. See more details in :ref:`Multiple TLS certificates `. - FullScanCertsOnSniMismatch *wrappers.BoolValue `protobuf:"bytes,9,opt,name=full_scan_certs_on_sni_mismatch,json=fullScanCertsOnSniMismatch,proto3" json:"full_scan_certs_on_sni_mismatch,omitempty"` + FullScanCertsOnSniMismatch *wrapperspb.BoolValue `protobuf:"bytes,9,opt,name=full_scan_certs_on_sni_mismatch,json=fullScanCertsOnSniMismatch,proto3" json:"full_scan_certs_on_sni_mismatch,omitempty"` + // By default, Envoy as a server uses its preferred cipher during the handshake. + // Setting this to true would allow the downstream client's preferred cipher to be used instead. + // Has no effect when using TLSv1_3. + PreferClientCiphers bool `protobuf:"varint,11,opt,name=prefer_client_ciphers,json=preferClientCiphers,proto3" json:"prefer_client_ciphers,omitempty"` } func (x *DownstreamTlsContext) Reset() { @@ -267,14 +271,14 @@ func (x *DownstreamTlsContext) GetCommonTlsContext() *CommonTlsContext { return nil } -func (x *DownstreamTlsContext) GetRequireClientCertificate() *wrappers.BoolValue { +func (x *DownstreamTlsContext) GetRequireClientCertificate() *wrapperspb.BoolValue { if x != nil { return x.RequireClientCertificate } return nil } -func (x *DownstreamTlsContext) GetRequireSni() *wrappers.BoolValue { +func (x *DownstreamTlsContext) GetRequireSni() *wrapperspb.BoolValue { if x != nil { return x.RequireSni } @@ -316,7 +320,7 @@ func (x *DownstreamTlsContext) GetDisableStatefulSessionResumption() bool { return false } -func (x *DownstreamTlsContext) GetSessionTimeout() *duration.Duration { +func (x *DownstreamTlsContext) GetSessionTimeout() *durationpb.Duration { if x != nil { return x.SessionTimeout } @@ -330,13 +334,20 @@ func (x *DownstreamTlsContext) GetOcspStaplePolicy() DownstreamTlsContext_OcspSt return DownstreamTlsContext_LENIENT_STAPLING } -func (x *DownstreamTlsContext) GetFullScanCertsOnSniMismatch() *wrappers.BoolValue { +func (x *DownstreamTlsContext) GetFullScanCertsOnSniMismatch() *wrapperspb.BoolValue { if x != nil { return x.FullScanCertsOnSniMismatch } return nil } +func (x *DownstreamTlsContext) GetPreferClientCiphers() bool { + if x != nil { + return x.PreferClientCiphers + } + return false +} + type isDownstreamTlsContext_SessionTicketKeysType interface { isDownstreamTlsContext_SessionTicketKeysType() } @@ -442,7 +453,7 @@ func (x *TlsKeyLog) GetRemoteAddressRange() []*v3.CidrRange { } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 16] +// [#next-free-field: 17] type CommonTlsContext struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -454,11 +465,8 @@ type CommonTlsContext struct { // :ref:`Multiple TLS certificates ` can be associated with the // same context to allow both RSA and ECDSA certificates and support SNI-based selection. // - // Only one of “tls_certificates“, “tls_certificate_sds_secret_configs“, - // and “tls_certificate_provider_instance“ may be used. - // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's - // not legal to put a repeated field in a oneof. In the next major version, we should rework - // this to avoid this problem.] + // If “tls_certificate_provider_instance“ is set, this field is ignored. + // If this field is set, “tls_certificate_sds_secret_configs“ is ignored. TlsCertificates []*TlsCertificate `protobuf:"bytes,2,rep,name=tls_certificates,json=tlsCertificates,proto3" json:"tls_certificates,omitempty"` // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. @@ -466,18 +474,21 @@ type CommonTlsContext struct { // The same number and types of certificates as :ref:`tls_certificates ` // are valid in the the certificates fetched through this setting. // - // Only one of “tls_certificates“, “tls_certificate_sds_secret_configs“, - // and “tls_certificate_provider_instance“ may be used. - // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's - // not legal to put a repeated field in a oneof. In the next major version, we should rework - // this to avoid this problem.] + // If “tls_certificates“ or “tls_certificate_provider_instance“ are set, this field + // is ignored. TlsCertificateSdsSecretConfigs []*SdsSecretConfig `protobuf:"bytes,6,rep,name=tls_certificate_sds_secret_configs,json=tlsCertificateSdsSecretConfigs,proto3" json:"tls_certificate_sds_secret_configs,omitempty"` // Certificate provider instance for fetching TLS certs. // - // Only one of “tls_certificates“, “tls_certificate_sds_secret_configs“, - // and “tls_certificate_provider_instance“ may be used. + // If this field is set, “tls_certificates“ and “tls_certificate_provider_instance“ + // are ignored. // [#not-implemented-hide:] TlsCertificateProviderInstance *CertificateProviderPluginInstance `protobuf:"bytes,14,opt,name=tls_certificate_provider_instance,json=tlsCertificateProviderInstance,proto3" json:"tls_certificate_provider_instance,omitempty"` + // Custom TLS certificate selector. + // + // Select TLS certificate based on TLS client hello. + // If empty, defaults to native TLS certificate selection behavior: + // DNS SANs or Subject Common Name in TLS certificates is extracted as server name pattern to match SNI. + CustomTlsCertificateSelector *v3.TypedExtensionConfig `protobuf:"bytes,16,opt,name=custom_tls_certificate_selector,json=customTlsCertificateSelector,proto3" json:"custom_tls_certificate_selector,omitempty"` // Certificate provider for fetching TLS certificates. // [#not-implemented-hide:] // @@ -574,6 +585,13 @@ func (x *CommonTlsContext) GetTlsCertificateProviderInstance() *CertificateProvi return nil } +func (x *CommonTlsContext) GetCustomTlsCertificateSelector() *v3.TypedExtensionConfig { + if x != nil { + return x.CustomTlsCertificateSelector + } + return nil +} + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. func (x *CommonTlsContext) GetTlsCertificateCertificateProvider() *CommonTlsContext_CertificateProvider { if x != nil { @@ -1020,7 +1038,7 @@ var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc = []byte{ 0x66, 0x6f, 0x72, 0x63, 0x65, 0x52, 0x73, 0x61, 0x4b, 0x65, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x9a, 0x09, + 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0xce, 0x09, 0x0a, 0x14, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x69, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, @@ -1084,232 +1102,242 @@ var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc = []byte{ 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x66, 0x75, 0x6c, 0x6c, 0x53, 0x63, 0x61, 0x6e, 0x43, 0x65, 0x72, - 0x74, 0x73, 0x4f, 0x6e, 0x53, 0x6e, 0x69, 0x4d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, - 0x4e, 0x0a, 0x10, 0x4f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x14, 0x0a, 0x10, 0x4c, 0x45, 0x4e, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x53, - 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x52, - 0x49, 0x43, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, - 0x0a, 0x0b, 0x4d, 0x55, 0x53, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x45, 0x10, 0x02, 0x3a, - 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x1a, - 0x0a, 0x18, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, - 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0xcc, 0x01, 0x0a, 0x09, 0x54, - 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, - 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x4f, 0x0a, 0x13, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x51, 0x0a, 0x14, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xea, 0x17, 0x0a, 0x10, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x57, - 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, - 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, - 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x09, 0x74, 0x6c, - 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x64, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, - 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x6c, - 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, - 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x86, 0x01, - 0x0a, 0x22, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, - 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1e, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x97, 0x01, 0x0a, 0x21, 0x74, 0x6c, 0x73, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, - 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x52, 0x1e, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x12, 0xad, 0x01, 0x0a, 0x24, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x74, 0x73, 0x4f, 0x6e, 0x53, 0x6e, 0x69, 0x4d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x32, 0x0a, 0x15, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x69, 0x70, 0x68, + 0x65, 0x72, 0x73, 0x22, 0x4e, 0x0a, 0x10, 0x4f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, 0x0a, 0x10, 0x4c, 0x45, 0x4e, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, + 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x55, 0x53, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, + 0x45, 0x10, 0x02, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x44, 0x6f, + 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x42, 0x1a, 0x0a, 0x18, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0xcc, + 0x01, 0x0a, 0x09, 0x54, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x12, 0x1b, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x4f, 0x0a, 0x13, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, + 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x51, 0x0a, 0x14, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xdd, 0x18, + 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x12, 0x57, 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x52, 0x09, 0x74, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x64, 0x0a, 0x10, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x73, 0x12, 0x86, 0x01, 0x0a, 0x22, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1e, 0x74, 0x6c, 0x73, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x97, 0x01, 0x0a, 0x21, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x1e, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x12, 0x71, 0x0a, 0x1f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0xad, 0x01, 0x0a, 0x24, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x18, 0x01, 0x52, 0x21, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0xc6, 0x01, 0x0a, 0x2d, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x21, 0x74, - 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x12, 0xc6, 0x01, 0x0a, 0x2d, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x29, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x12, 0x78, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x00, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x8c, 0x01, 0x0a, 0x24, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, + 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xa2, 0x01, 0x0a, 0x1b, 0x63, 0x6f, + 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x60, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x6f, 0x6d, + 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x48, 0x00, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0xb5, + 0x01, 0x0a, 0x27, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, + 0x52, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0xce, 0x01, 0x0a, 0x30, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, + 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x2c, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x57, + 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6c, + 0x6f, 0x67, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, - 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x29, - 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x78, 0x0a, 0x12, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, - 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, - 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x00, - 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x12, 0x8c, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, - 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, - 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, - 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0xa2, 0x01, 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x5f, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x52, 0x06, + 0x6b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x1a, 0x92, 0x01, 0x0a, 0x13, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1b, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x0c, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6d, 0x0a, 0x1b, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xa4, 0x06, 0x0a, 0x24, 0x43, + 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x12, 0x8f, 0x01, 0x0a, 0x1a, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x60, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, - 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x00, 0x52, 0x19, 0x63, 0x6f, - 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0xb5, 0x01, 0x0a, 0x27, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, - 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, - 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, - 0xce, 0x01, 0x0a, 0x30, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, - 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, - 0x48, 0x00, 0x52, 0x2c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x57, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, - 0x12, 0x4d, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, - 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x6c, - 0x73, 0x4b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x1a, - 0x92, 0x01, 0x0a, 0x13, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6d, 0x0a, 0x1b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x1a, 0xa4, 0x06, 0x0a, 0x24, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x8f, 0x01, 0x0a, - 0x1a, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, - 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, - 0x01, 0x02, 0x10, 0x01, 0x52, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x94, - 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x18, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x94, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x64, 0x73, + 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x64, 0x73, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xb3, 0x01, 0x0a, + 0x27, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x24, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x12, 0xcc, 0x01, 0x0a, 0x30, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, - 0x02, 0x10, 0x01, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xb3, 0x01, 0x0a, 0x27, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, - 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, - 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, - 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0xcc, 0x01, 0x0a, 0x30, + 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x2c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x3a, 0x4e, 0x9a, 0xc5, 0x88, 0x1e, 0x49, 0x0a, 0x47, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x6f, 0x6d, + 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x19, 0x0a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, - 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, - 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, - 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x2c, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x4e, 0x9a, 0xc5, 0x88, 0x1e, - 0x49, 0x0a, 0x47, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, - 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x19, 0x0a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0xa5, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, - 0x02, 0x0a, 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x54, 0x6c, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, - 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x78, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0xa5, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x42, 0x08, 0x54, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, + 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1335,17 +1363,17 @@ var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_goTypes = []interfa (*CommonTlsContext_CertificateProvider)(nil), // 5: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider (*CommonTlsContext_CertificateProviderInstance)(nil), // 6: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance (*CommonTlsContext_CombinedCertificateValidationContext)(nil), // 7: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext - (*wrappers.UInt32Value)(nil), // 8: google.protobuf.UInt32Value - (*wrappers.BoolValue)(nil), // 9: google.protobuf.BoolValue + (*wrapperspb.UInt32Value)(nil), // 8: google.protobuf.UInt32Value + (*wrapperspb.BoolValue)(nil), // 9: google.protobuf.BoolValue (*TlsSessionTicketKeys)(nil), // 10: envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys (*SdsSecretConfig)(nil), // 11: envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig - (*duration.Duration)(nil), // 12: google.protobuf.Duration + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration (*v3.CidrRange)(nil), // 13: envoy.config.core.v3.CidrRange (*TlsParameters)(nil), // 14: envoy.extensions.transport_sockets.tls.v3.TlsParameters (*TlsCertificate)(nil), // 15: envoy.extensions.transport_sockets.tls.v3.TlsCertificate (*CertificateProviderPluginInstance)(nil), // 16: envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance - (*CertificateValidationContext)(nil), // 17: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext - (*v3.TypedExtensionConfig)(nil), // 18: envoy.config.core.v3.TypedExtensionConfig + (*v3.TypedExtensionConfig)(nil), // 17: envoy.config.core.v3.TypedExtensionConfig + (*CertificateValidationContext)(nil), // 18: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext } var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_depIdxs = []int32{ 4, // 0: envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext.common_tls_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext @@ -1365,25 +1393,26 @@ var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_depIdxs = []int32{ 15, // 14: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificates:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsCertificate 11, // 15: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_sds_secret_configs:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig 16, // 16: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance - 5, // 17: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider - 6, // 18: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance - 17, // 19: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext - 11, // 20: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig - 7, // 21: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.combined_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext - 5, // 22: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider - 6, // 23: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance - 18, // 24: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.custom_handshaker:type_name -> envoy.config.core.v3.TypedExtensionConfig - 3, // 25: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.key_log:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsKeyLog - 18, // 26: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider.typed_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 17, // 27: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.default_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext - 11, // 28: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig - 5, // 29: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider - 6, // 30: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance - 31, // [31:31] is the sub-list for method output_type - 31, // [31:31] is the sub-list for method input_type - 31, // [31:31] is the sub-list for extension type_name - 31, // [31:31] is the sub-list for extension extendee - 0, // [0:31] is the sub-list for field type_name + 17, // 17: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.custom_tls_certificate_selector:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 18: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 19: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 18, // 20: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + 11, // 21: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 7, // 22: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.combined_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext + 5, // 23: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 24: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 17, // 25: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.custom_handshaker:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // 26: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.key_log:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsKeyLog + 17, // 27: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider.typed_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 18, // 28: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.default_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + 11, // 29: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 5, // 30: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 31: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 32, // [32:32] is the sub-list for method output_type + 32, // [32:32] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name } func init() { file_envoy_extensions_transport_sockets_tls_v3_tls_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go index 89b8fd11e8c..6468ff227c3 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -419,6 +420,8 @@ func (m *DownstreamTlsContext) validate(all bool) error { } } + // no validation rules for PreferClientCiphers + switch v := m.SessionTicketKeysType.(type) { case *DownstreamTlsContext_SessionTicketKeys: if v == nil { @@ -924,6 +927,35 @@ func (m *CommonTlsContext) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetCustomTlsCertificateSelector()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CustomTlsCertificateSelector", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CustomTlsCertificateSelector", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomTlsCertificateSelector()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "CustomTlsCertificateSelector", + reason: "embedded message failed validation", + cause: err, + } + } + } + if all { switch v := interface{}(m.GetTlsCertificateCertificateProvider()).(type) { case interface{ ValidateAll() error }: diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go index 4af07522895..7e9ee89672e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto package tlsv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.validate.go index c15476be592..4992dec753d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config_vtproto.pb.go new file mode 100644 index 00000000000..6cad4f635fe --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config_vtproto.pb.go @@ -0,0 +1,167 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SPIFFECertValidatorConfig_TrustDomain) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SPIFFECertValidatorConfig_TrustDomain) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SPIFFECertValidatorConfig_TrustDomain) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TrustBundle != nil { + if vtmsg, ok := interface{}(m.TrustBundle).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TrustBundle) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SPIFFECertValidatorConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SPIFFECertValidatorConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SPIFFECertValidatorConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TrustDomains) > 0 { + for iNdEx := len(m.TrustDomains) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TrustDomains[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SPIFFECertValidatorConfig_TrustDomain) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrustBundle != nil { + if size, ok := interface{}(m.TrustBundle).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TrustBundle) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SPIFFECertValidatorConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TrustDomains) > 0 { + for _, e := range m.TrustDomains { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_vtproto.pb.go new file mode 100644 index 00000000000..287129049b4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_vtproto.pb.go @@ -0,0 +1,1265 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/tls.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UpstreamTlsContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamTlsContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamTlsContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EnforceRsaKeyUsage != nil { + size, err := (*wrapperspb.BoolValue)(m.EnforceRsaKeyUsage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.MaxSessionKeys != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxSessionKeys).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.AllowRenegotiation { + i-- + if m.AllowRenegotiation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Sni) > 0 { + i -= len(m.Sni) + copy(dAtA[i:], m.Sni) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Sni))) + i-- + dAtA[i] = 0x12 + } + if m.CommonTlsContext != nil { + size, err := m.CommonTlsContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DownstreamTlsContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownstreamTlsContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PreferClientCiphers { + i-- + if m.PreferClientCiphers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if m.DisableStatefulSessionResumption { + i-- + if m.DisableStatefulSessionResumption { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.FullScanCertsOnSniMismatch != nil { + size, err := (*wrapperspb.BoolValue)(m.FullScanCertsOnSniMismatch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.OcspStaplePolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.OcspStaplePolicy)) + i-- + dAtA[i] = 0x40 + } + if msg, ok := m.SessionTicketKeysType.(*DownstreamTlsContext_DisableStatelessSessionResumption); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.SessionTimeout != nil { + size, err := (*durationpb.Duration)(m.SessionTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if msg, ok := m.SessionTicketKeysType.(*DownstreamTlsContext_SessionTicketKeysSdsSecretConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.SessionTicketKeysType.(*DownstreamTlsContext_SessionTicketKeys); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RequireSni != nil { + size, err := (*wrapperspb.BoolValue)(m.RequireSni).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.RequireClientCertificate != nil { + size, err := (*wrapperspb.BoolValue)(m.RequireClientCertificate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.CommonTlsContext != nil { + size, err := m.CommonTlsContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DownstreamTlsContext_SessionTicketKeys) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext_SessionTicketKeys) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SessionTicketKeys != nil { + size, err := m.SessionTicketKeys.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SessionTicketKeysSdsSecretConfig != nil { + size, err := m.SessionTicketKeysSdsSecretConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *DownstreamTlsContext_DisableStatelessSessionResumption) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext_DisableStatelessSessionResumption) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.DisableStatelessSessionResumption { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + return len(dAtA) - i, nil +} +func (m *TlsKeyLog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsKeyLog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsKeyLog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RemoteAddressRange) > 0 { + for iNdEx := len(m.RemoteAddressRange) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RemoteAddressRange[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RemoteAddressRange[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.LocalAddressRange) > 0 { + for iNdEx := len(m.LocalAddressRange) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.LocalAddressRange[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalAddressRange[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_CertificateProvider) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext_CertificateProvider) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CertificateProvider) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Config.(*CommonTlsContext_CertificateProvider_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_CertificateProvider_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CertificateProvider_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + if vtmsg, ok := interface{}(m.TypedConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_CertificateProviderInstance) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext_CertificateProviderInstance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CertificateProviderInstance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CertificateName) > 0 { + i -= len(m.CertificateName) + copy(dAtA[i:], m.CertificateName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CertificateName))) + i-- + dAtA[i] = 0x12 + } + if len(m.InstanceName) > 0 { + i -= len(m.InstanceName) + copy(dAtA[i:], m.InstanceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InstanceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ValidationContextCertificateProviderInstance != nil { + size, err := m.ValidationContextCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.ValidationContextCertificateProvider != nil { + size, err := m.ValidationContextCertificateProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ValidationContextSdsSecretConfig != nil { + size, err := m.ValidationContextSdsSecretConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValidationContext != nil { + size, err := m.DefaultValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CustomTlsCertificateSelector != nil { + if vtmsg, ok := interface{}(m.CustomTlsCertificateSelector).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomTlsCertificateSelector) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.KeyLog != nil { + size, err := m.KeyLog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.TlsCertificateProviderInstance != nil { + size, err := m.TlsCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.CustomHandshaker != nil { + if vtmsg, ok := interface{}(m.CustomHandshaker).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomHandshaker) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContextCertificateProviderInstance); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TlsCertificateCertificateProviderInstance != nil { + size, err := m.TlsCertificateCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContextCertificateProvider); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TlsCertificateCertificateProvider != nil { + size, err := m.TlsCertificateCertificateProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_CombinedValidationContext); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContextSdsSecretConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TlsCertificateSdsSecretConfigs) > 0 { + for iNdEx := len(m.TlsCertificateSdsSecretConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TlsCertificateSdsSecretConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.AlpnProtocols) > 0 { + for iNdEx := len(m.AlpnProtocols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AlpnProtocols[iNdEx]) + copy(dAtA[i:], m.AlpnProtocols[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AlpnProtocols[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContext); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TlsCertificates) > 0 { + for iNdEx := len(m.TlsCertificates) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TlsCertificates[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.TlsParams != nil { + size, err := m.TlsParams.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_ValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContext != nil { + size, err := m.ValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_ValidationContextSdsSecretConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContextSdsSecretConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContextSdsSecretConfig != nil { + size, err := m.ValidationContextSdsSecretConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_CombinedValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CombinedValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CombinedValidationContext != nil { + size, err := m.CombinedValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_ValidationContextCertificateProvider) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContextCertificateProvider) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContextCertificateProvider != nil { + size, err := m.ValidationContextCertificateProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_ValidationContextCertificateProviderInstance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContextCertificateProviderInstance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContextCertificateProviderInstance != nil { + size, err := m.ValidationContextCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *UpstreamTlsContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonTlsContext != nil { + l = m.CommonTlsContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Sni) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowRenegotiation { + n += 2 + } + if m.MaxSessionKeys != nil { + l = (*wrapperspb.UInt32Value)(m.MaxSessionKeys).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforceRsaKeyUsage != nil { + l = (*wrapperspb.BoolValue)(m.EnforceRsaKeyUsage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DownstreamTlsContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonTlsContext != nil { + l = m.CommonTlsContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequireClientCertificate != nil { + l = (*wrapperspb.BoolValue)(m.RequireClientCertificate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequireSni != nil { + l = (*wrapperspb.BoolValue)(m.RequireSni).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.SessionTicketKeysType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.SessionTimeout != nil { + l = (*durationpb.Duration)(m.SessionTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcspStaplePolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.OcspStaplePolicy)) + } + if m.FullScanCertsOnSniMismatch != nil { + l = (*wrapperspb.BoolValue)(m.FullScanCertsOnSniMismatch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableStatefulSessionResumption { + n += 2 + } + if m.PreferClientCiphers { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DownstreamTlsContext_SessionTicketKeys) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SessionTicketKeys != nil { + l = m.SessionTicketKeys.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SessionTicketKeysSdsSecretConfig != nil { + l = m.SessionTicketKeysSdsSecretConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DownstreamTlsContext_DisableStatelessSessionResumption) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *TlsKeyLog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LocalAddressRange) > 0 { + for _, e := range m.LocalAddressRange { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RemoteAddressRange) > 0 { + for _, e := range m.RemoteAddressRange { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_CertificateProvider) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Config.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_CertificateProvider_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + if size, ok := interface{}(m.TypedConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_CertificateProviderInstance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InstanceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CertificateName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValidationContext != nil { + l = m.DefaultValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ValidationContextSdsSecretConfig != nil { + l = m.ValidationContextSdsSecretConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ValidationContextCertificateProvider != nil { + l = m.ValidationContextCertificateProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ValidationContextCertificateProviderInstance != nil { + l = m.ValidationContextCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsParams != nil { + l = m.TlsParams.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TlsCertificates) > 0 { + for _, e := range m.TlsCertificates { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if vtmsg, ok := m.ValidationContextType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.AlpnProtocols) > 0 { + for _, s := range m.AlpnProtocols { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.TlsCertificateSdsSecretConfigs) > 0 { + for _, e := range m.TlsCertificateSdsSecretConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TlsCertificateCertificateProvider != nil { + l = m.TlsCertificateCertificateProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsCertificateCertificateProviderInstance != nil { + l = m.TlsCertificateCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CustomHandshaker != nil { + if size, ok := interface{}(m.CustomHandshaker).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomHandshaker) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsCertificateProviderInstance != nil { + l = m.TlsCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeyLog != nil { + l = m.KeyLog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CustomTlsCertificateSelector != nil { + if size, ok := interface{}(m.CustomTlsCertificateSelector).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomTlsCertificateSelector) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_ValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContext != nil { + l = m.ValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_ValidationContextSdsSecretConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContextSdsSecretConfig != nil { + l = m.ValidationContextSdsSecretConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_CombinedValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CombinedValidationContext != nil { + l = m.CombinedValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_ValidationContextCertificateProvider) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContextCertificateProvider != nil { + l = m.ValidationContextCertificateProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_ValidationContextCertificateProviderInstance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContextCertificateProviderInstance != nil { + l = m.ValidationContextCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go index 65abb236d33..6f09930e968 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go @@ -1,17 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/service/discovery/v3/ads.proto package discoveryv3 import ( - context "context" _ "github.com/cncf/xds/go/udpa/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -184,185 +180,3 @@ func file_envoy_service_discovery_v3_ads_proto_init() { file_envoy_service_discovery_v3_ads_proto_goTypes = nil file_envoy_service_discovery_v3_ads_proto_depIdxs = nil } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// AggregatedDiscoveryServiceClient is the client API for AggregatedDiscoveryService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type AggregatedDiscoveryServiceClient interface { - // This is a gRPC-only API. - StreamAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) - DeltaAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_DeltaAggregatedResourcesClient, error) -} - -type aggregatedDiscoveryServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewAggregatedDiscoveryServiceClient(cc grpc.ClientConnInterface) AggregatedDiscoveryServiceClient { - return &aggregatedDiscoveryServiceClient{cc} -} - -func (c *aggregatedDiscoveryServiceClient) StreamAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) { - stream, err := c.cc.NewStream(ctx, &_AggregatedDiscoveryService_serviceDesc.Streams[0], "/envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources", opts...) - if err != nil { - return nil, err - } - x := &aggregatedDiscoveryServiceStreamAggregatedResourcesClient{stream} - return x, nil -} - -type AggregatedDiscoveryService_StreamAggregatedResourcesClient interface { - Send(*DiscoveryRequest) error - Recv() (*DiscoveryResponse, error) - grpc.ClientStream -} - -type aggregatedDiscoveryServiceStreamAggregatedResourcesClient struct { - grpc.ClientStream -} - -func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesClient) Send(m *DiscoveryRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesClient) Recv() (*DiscoveryResponse, error) { - m := new(DiscoveryResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *aggregatedDiscoveryServiceClient) DeltaAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_DeltaAggregatedResourcesClient, error) { - stream, err := c.cc.NewStream(ctx, &_AggregatedDiscoveryService_serviceDesc.Streams[1], "/envoy.service.discovery.v3.AggregatedDiscoveryService/DeltaAggregatedResources", opts...) - if err != nil { - return nil, err - } - x := &aggregatedDiscoveryServiceDeltaAggregatedResourcesClient{stream} - return x, nil -} - -type AggregatedDiscoveryService_DeltaAggregatedResourcesClient interface { - Send(*DeltaDiscoveryRequest) error - Recv() (*DeltaDiscoveryResponse, error) - grpc.ClientStream -} - -type aggregatedDiscoveryServiceDeltaAggregatedResourcesClient struct { - grpc.ClientStream -} - -func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesClient) Send(m *DeltaDiscoveryRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesClient) Recv() (*DeltaDiscoveryResponse, error) { - m := new(DeltaDiscoveryResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// AggregatedDiscoveryServiceServer is the server API for AggregatedDiscoveryService service. -type AggregatedDiscoveryServiceServer interface { - // This is a gRPC-only API. - StreamAggregatedResources(AggregatedDiscoveryService_StreamAggregatedResourcesServer) error - DeltaAggregatedResources(AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error -} - -// UnimplementedAggregatedDiscoveryServiceServer can be embedded to have forward compatible implementations. -type UnimplementedAggregatedDiscoveryServiceServer struct { -} - -func (*UnimplementedAggregatedDiscoveryServiceServer) StreamAggregatedResources(AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { - return status.Errorf(codes.Unimplemented, "method StreamAggregatedResources not implemented") -} -func (*UnimplementedAggregatedDiscoveryServiceServer) DeltaAggregatedResources(AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error { - return status.Errorf(codes.Unimplemented, "method DeltaAggregatedResources not implemented") -} - -func RegisterAggregatedDiscoveryServiceServer(s *grpc.Server, srv AggregatedDiscoveryServiceServer) { - s.RegisterService(&_AggregatedDiscoveryService_serviceDesc, srv) -} - -func _AggregatedDiscoveryService_StreamAggregatedResources_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(AggregatedDiscoveryServiceServer).StreamAggregatedResources(&aggregatedDiscoveryServiceStreamAggregatedResourcesServer{stream}) -} - -type AggregatedDiscoveryService_StreamAggregatedResourcesServer interface { - Send(*DiscoveryResponse) error - Recv() (*DiscoveryRequest, error) - grpc.ServerStream -} - -type aggregatedDiscoveryServiceStreamAggregatedResourcesServer struct { - grpc.ServerStream -} - -func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesServer) Send(m *DiscoveryResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesServer) Recv() (*DiscoveryRequest, error) { - m := new(DiscoveryRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _AggregatedDiscoveryService_DeltaAggregatedResources_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(AggregatedDiscoveryServiceServer).DeltaAggregatedResources(&aggregatedDiscoveryServiceDeltaAggregatedResourcesServer{stream}) -} - -type AggregatedDiscoveryService_DeltaAggregatedResourcesServer interface { - Send(*DeltaDiscoveryResponse) error - Recv() (*DeltaDiscoveryRequest, error) - grpc.ServerStream -} - -type aggregatedDiscoveryServiceDeltaAggregatedResourcesServer struct { - grpc.ServerStream -} - -func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesServer) Send(m *DeltaDiscoveryResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesServer) Recv() (*DeltaDiscoveryRequest, error) { - m := new(DeltaDiscoveryRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _AggregatedDiscoveryService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "envoy.service.discovery.v3.AggregatedDiscoveryService", - HandlerType: (*AggregatedDiscoveryServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamAggregatedResources", - Handler: _AggregatedDiscoveryService_StreamAggregatedResources_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "DeltaAggregatedResources", - Handler: _AggregatedDiscoveryService_DeltaAggregatedResources_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "envoy/service/discovery/v3/ads.proto", -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go index 966eb457e13..9080fedd859 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/service/discovery/v3/ads.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_grpc.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_grpc.pb.go new file mode 100644 index 00000000000..7a7f1af970a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_grpc.pb.go @@ -0,0 +1,210 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.26.1 +// source: envoy/service/discovery/v3/ads.proto + +package discoveryv3 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + AggregatedDiscoveryService_StreamAggregatedResources_FullMethodName = "/envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources" + AggregatedDiscoveryService_DeltaAggregatedResources_FullMethodName = "/envoy.service.discovery.v3.AggregatedDiscoveryService/DeltaAggregatedResources" +) + +// AggregatedDiscoveryServiceClient is the client API for AggregatedDiscoveryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type AggregatedDiscoveryServiceClient interface { + // This is a gRPC-only API. + StreamAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) + DeltaAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_DeltaAggregatedResourcesClient, error) +} + +type aggregatedDiscoveryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewAggregatedDiscoveryServiceClient(cc grpc.ClientConnInterface) AggregatedDiscoveryServiceClient { + return &aggregatedDiscoveryServiceClient{cc} +} + +func (c *aggregatedDiscoveryServiceClient) StreamAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) { + stream, err := c.cc.NewStream(ctx, &AggregatedDiscoveryService_ServiceDesc.Streams[0], AggregatedDiscoveryService_StreamAggregatedResources_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &aggregatedDiscoveryServiceStreamAggregatedResourcesClient{stream} + return x, nil +} + +type AggregatedDiscoveryService_StreamAggregatedResourcesClient interface { + Send(*DiscoveryRequest) error + Recv() (*DiscoveryResponse, error) + grpc.ClientStream +} + +type aggregatedDiscoveryServiceStreamAggregatedResourcesClient struct { + grpc.ClientStream +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesClient) Send(m *DiscoveryRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesClient) Recv() (*DiscoveryResponse, error) { + m := new(DiscoveryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *aggregatedDiscoveryServiceClient) DeltaAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_DeltaAggregatedResourcesClient, error) { + stream, err := c.cc.NewStream(ctx, &AggregatedDiscoveryService_ServiceDesc.Streams[1], AggregatedDiscoveryService_DeltaAggregatedResources_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &aggregatedDiscoveryServiceDeltaAggregatedResourcesClient{stream} + return x, nil +} + +type AggregatedDiscoveryService_DeltaAggregatedResourcesClient interface { + Send(*DeltaDiscoveryRequest) error + Recv() (*DeltaDiscoveryResponse, error) + grpc.ClientStream +} + +type aggregatedDiscoveryServiceDeltaAggregatedResourcesClient struct { + grpc.ClientStream +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesClient) Send(m *DeltaDiscoveryRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesClient) Recv() (*DeltaDiscoveryResponse, error) { + m := new(DeltaDiscoveryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// AggregatedDiscoveryServiceServer is the server API for AggregatedDiscoveryService service. +// All implementations should embed UnimplementedAggregatedDiscoveryServiceServer +// for forward compatibility +type AggregatedDiscoveryServiceServer interface { + // This is a gRPC-only API. + StreamAggregatedResources(AggregatedDiscoveryService_StreamAggregatedResourcesServer) error + DeltaAggregatedResources(AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error +} + +// UnimplementedAggregatedDiscoveryServiceServer should be embedded to have forward compatible implementations. +type UnimplementedAggregatedDiscoveryServiceServer struct { +} + +func (UnimplementedAggregatedDiscoveryServiceServer) StreamAggregatedResources(AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { + return status.Errorf(codes.Unimplemented, "method StreamAggregatedResources not implemented") +} +func (UnimplementedAggregatedDiscoveryServiceServer) DeltaAggregatedResources(AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error { + return status.Errorf(codes.Unimplemented, "method DeltaAggregatedResources not implemented") +} + +// UnsafeAggregatedDiscoveryServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to AggregatedDiscoveryServiceServer will +// result in compilation errors. +type UnsafeAggregatedDiscoveryServiceServer interface { + mustEmbedUnimplementedAggregatedDiscoveryServiceServer() +} + +func RegisterAggregatedDiscoveryServiceServer(s grpc.ServiceRegistrar, srv AggregatedDiscoveryServiceServer) { + s.RegisterService(&AggregatedDiscoveryService_ServiceDesc, srv) +} + +func _AggregatedDiscoveryService_StreamAggregatedResources_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(AggregatedDiscoveryServiceServer).StreamAggregatedResources(&aggregatedDiscoveryServiceStreamAggregatedResourcesServer{stream}) +} + +type AggregatedDiscoveryService_StreamAggregatedResourcesServer interface { + Send(*DiscoveryResponse) error + Recv() (*DiscoveryRequest, error) + grpc.ServerStream +} + +type aggregatedDiscoveryServiceStreamAggregatedResourcesServer struct { + grpc.ServerStream +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesServer) Send(m *DiscoveryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesServer) Recv() (*DiscoveryRequest, error) { + m := new(DiscoveryRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _AggregatedDiscoveryService_DeltaAggregatedResources_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(AggregatedDiscoveryServiceServer).DeltaAggregatedResources(&aggregatedDiscoveryServiceDeltaAggregatedResourcesServer{stream}) +} + +type AggregatedDiscoveryService_DeltaAggregatedResourcesServer interface { + Send(*DeltaDiscoveryResponse) error + Recv() (*DeltaDiscoveryRequest, error) + grpc.ServerStream +} + +type aggregatedDiscoveryServiceDeltaAggregatedResourcesServer struct { + grpc.ServerStream +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesServer) Send(m *DeltaDiscoveryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesServer) Recv() (*DeltaDiscoveryRequest, error) { + m := new(DeltaDiscoveryRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// AggregatedDiscoveryService_ServiceDesc is the grpc.ServiceDesc for AggregatedDiscoveryService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var AggregatedDiscoveryService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "envoy.service.discovery.v3.AggregatedDiscoveryService", + HandlerType: (*AggregatedDiscoveryServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamAggregatedResources", + Handler: _AggregatedDiscoveryService_StreamAggregatedResources_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "DeltaAggregatedResources", + Handler: _AggregatedDiscoveryService_DeltaAggregatedResources_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "envoy/service/discovery/v3/ads.proto", +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_vtproto.pb.go new file mode 100644 index 00000000000..0e604235d1e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_vtproto.pb.go @@ -0,0 +1,61 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/discovery/v3/ads.proto + +package discoveryv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AdsDummy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdsDummy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AdsDummy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *AdsDummy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go index a39aac288ad..a9b5f693589 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/service/discovery/v3/discovery.proto package discoveryv3 @@ -10,11 +10,11 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" status "google.golang.org/genproto/googleapis/rpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -286,7 +286,7 @@ type DiscoveryResponse struct { // The version of the response data. VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` // The response resources. These resources are typed and depend on the API being called. - Resources []*any1.Any `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` + Resources []*anypb.Any `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` // [#not-implemented-hide:] // Canary is used to support two Envoy command line flags: // @@ -357,7 +357,7 @@ func (x *DiscoveryResponse) GetVersionInfo() string { return "" } -func (x *DiscoveryResponse) GetResources() []*any1.Any { +func (x *DiscoveryResponse) GetResources() []*anypb.Any { if x != nil { return x.Resources } @@ -838,7 +838,7 @@ type Resource struct { // resources. Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // The resource being tracked. - Resource *any1.Any `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + Resource *anypb.Any `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` // Time-to-live value for the resource. For each resource, a timer is started. The timer is // reset each time the resource is received with a new TTL. If the resource is received with // no TTL set, the timer is removed for the resource. Upon expiration of the timer, the @@ -852,7 +852,7 @@ type Resource struct { // a management server failure. For example, the feature may be used for fault injection // testing where the fault injection should be terminated in the event that Envoy loses contact // with the management server. - Ttl *duration.Duration `protobuf:"bytes,6,opt,name=ttl,proto3" json:"ttl,omitempty"` + Ttl *durationpb.Duration `protobuf:"bytes,6,opt,name=ttl,proto3" json:"ttl,omitempty"` // Cache control properties for the resource. // [#not-implemented-hide:] CacheControl *Resource_CacheControl `protobuf:"bytes,7,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` @@ -921,14 +921,14 @@ func (x *Resource) GetVersion() string { return "" } -func (x *Resource) GetResource() *any1.Any { +func (x *Resource) GetResource() *anypb.Any { if x != nil { return x.Resource } return nil } -func (x *Resource) GetTtl() *duration.Duration { +func (x *Resource) GetTtl() *durationpb.Duration { if x != nil { return x.Ttl } @@ -1472,9 +1472,9 @@ var file_envoy_service_discovery_v3_discovery_proto_goTypes = []interface{}{ (*Resource_CacheControl)(nil), // 13: envoy.service.discovery.v3.Resource.CacheControl (*v3.Node)(nil), // 14: envoy.config.core.v3.Node (*status.Status)(nil), // 15: google.rpc.Status - (*any1.Any)(nil), // 16: google.protobuf.Any + (*anypb.Any)(nil), // 16: google.protobuf.Any (*v3.ControlPlane)(nil), // 17: envoy.config.core.v3.ControlPlane - (*duration.Duration)(nil), // 18: google.protobuf.Duration + (*durationpb.Duration)(nil), // 18: google.protobuf.Duration (*v3.Metadata)(nil), // 19: envoy.config.core.v3.Metadata } var file_envoy_service_discovery_v3_discovery_proto_depIdxs = []int32{ diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go index 913cdb6094b..e30bb1e439f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/service/discovery/v3/discovery.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery_vtproto.pb.go new file mode 100644 index 00000000000..56a3ef579fd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery_vtproto.pb.go @@ -0,0 +1,1546 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/discovery/v3/discovery.proto + +package discoveryv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ResourceLocator) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceLocator) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceLocator) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicParameters) > 0 { + for k := range m.DynamicParameters { + v := m.DynamicParameters[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceName) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DynamicParameterConstraints != nil { + size, err := m.DynamicParameterConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DiscoveryRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscoveryRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DiscoveryRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResourceLocators) > 0 { + for iNdEx := len(m.ResourceLocators) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceLocators[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if m.ErrorDetail != nil { + if vtmsg, ok := interface{}(m.ErrorDetail).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ErrorDetail) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if len(m.ResponseNonce) > 0 { + i -= len(m.ResponseNonce) + copy(dAtA[i:], m.ResponseNonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseNonce))) + i-- + dAtA[i] = 0x2a + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x22 + } + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DiscoveryResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscoveryResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DiscoveryResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ControlPlane != nil { + if vtmsg, ok := interface{}(m.ControlPlane).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ControlPlane) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if len(m.Nonce) > 0 { + i -= len(m.Nonce) + copy(dAtA[i:], m.Nonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Nonce))) + i-- + dAtA[i] = 0x2a + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x22 + } + if m.Canary { + i-- + if m.Canary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.Resources[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeltaDiscoveryRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeltaDiscoveryRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DeltaDiscoveryRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResourceLocatorsUnsubscribe) > 0 { + for iNdEx := len(m.ResourceLocatorsUnsubscribe) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceLocatorsUnsubscribe[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if len(m.ResourceLocatorsSubscribe) > 0 { + for iNdEx := len(m.ResourceLocatorsSubscribe) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceLocatorsSubscribe[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if m.ErrorDetail != nil { + if vtmsg, ok := interface{}(m.ErrorDetail).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ErrorDetail) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if len(m.ResponseNonce) > 0 { + i -= len(m.ResponseNonce) + copy(dAtA[i:], m.ResponseNonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseNonce))) + i-- + dAtA[i] = 0x32 + } + if len(m.InitialResourceVersions) > 0 { + for k := range m.InitialResourceVersions { + v := m.InitialResourceVersions[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ResourceNamesUnsubscribe) > 0 { + for iNdEx := len(m.ResourceNamesUnsubscribe) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNamesUnsubscribe[iNdEx]) + copy(dAtA[i:], m.ResourceNamesUnsubscribe[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResourceNamesUnsubscribe[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.ResourceNamesSubscribe) > 0 { + for iNdEx := len(m.ResourceNamesSubscribe) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNamesSubscribe[iNdEx]) + copy(dAtA[i:], m.ResourceNamesSubscribe[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResourceNamesSubscribe[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x12 + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeltaDiscoveryResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeltaDiscoveryResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DeltaDiscoveryResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RemovedResourceNames) > 0 { + for iNdEx := len(m.RemovedResourceNames) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RemovedResourceNames[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if m.ControlPlane != nil { + if vtmsg, ok := interface{}(m.ControlPlane).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ControlPlane) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if len(m.RemovedResources) > 0 { + for iNdEx := len(m.RemovedResources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RemovedResources[iNdEx]) + copy(dAtA[i:], m.RemovedResources[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RemovedResources[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Nonce) > 0 { + i -= len(m.Nonce) + copy(dAtA[i:], m.Nonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Nonce))) + i-- + dAtA[i] = 0x2a + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Resources[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.SystemVersionInfo) > 0 { + i -= len(m.SystemVersionInfo) + copy(dAtA[i:], m.SystemVersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SystemVersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_SingleConstraint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints_SingleConstraint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConstraintType.(*DynamicParameterConstraints_SingleConstraint_Exists_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConstraintType.(*DynamicParameterConstraints_SingleConstraint_Value); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_SingleConstraint_Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint_Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_SingleConstraint_Exists_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exists != nil { + size, err := m.Exists.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_ConstraintList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints_ConstraintList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_ConstraintList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Constraints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*DynamicParameterConstraints_NotConstraints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*DynamicParameterConstraints_AndConstraints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*DynamicParameterConstraints_OrConstraints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*DynamicParameterConstraints_Constraint); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_Constraint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_Constraint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Constraint != nil { + size, err := m.Constraint.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_OrConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_OrConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrConstraints != nil { + size, err := m.OrConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_AndConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_AndConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndConstraints != nil { + size, err := m.AndConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_NotConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_NotConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotConstraints != nil { + size, err := m.NotConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Resource_CacheControl) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_CacheControl) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Resource_CacheControl) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DoNotCache { + i-- + if m.DoNotCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Resource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Resource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.ResourceName != nil { + size, err := m.ResourceName.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.CacheControl != nil { + size, err := m.CacheControl.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Ttl != nil { + size, err := (*durationpb.Duration)(m.Ttl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.Aliases) > 0 { + for iNdEx := len(m.Aliases) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Aliases[iNdEx]) + copy(dAtA[i:], m.Aliases[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Aliases[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if m.Resource != nil { + size, err := (*anypb.Any)(m.Resource).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceLocator) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.DynamicParameters) > 0 { + for k, v := range m.DynamicParameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ResourceName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DynamicParameterConstraints != nil { + l = m.DynamicParameterConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DiscoveryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ResponseNonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorDetail != nil { + if size, ok := interface{}(m.ErrorDetail).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ErrorDetail) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceLocators) > 0 { + for _, e := range m.ResourceLocators { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DiscoveryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Canary { + n += 2 + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Nonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ControlPlane != nil { + if size, ok := interface{}(m.ControlPlane).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ControlPlane) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeltaDiscoveryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceNamesSubscribe) > 0 { + for _, s := range m.ResourceNamesSubscribe { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResourceNamesUnsubscribe) > 0 { + for _, s := range m.ResourceNamesUnsubscribe { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.InitialResourceVersions) > 0 { + for k, v := range m.InitialResourceVersions { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.ResponseNonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorDetail != nil { + if size, ok := interface{}(m.ErrorDetail).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ErrorDetail) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceLocatorsSubscribe) > 0 { + for _, e := range m.ResourceLocatorsSubscribe { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResourceLocatorsUnsubscribe) > 0 { + for _, e := range m.ResourceLocatorsUnsubscribe { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DeltaDiscoveryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SystemVersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Nonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RemovedResources) > 0 { + for _, s := range m.RemovedResources { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ControlPlane != nil { + if size, ok := interface{}(m.ControlPlane).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ControlPlane) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RemovedResourceNames) > 0 { + for _, e := range m.RemovedResourceNames { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_SingleConstraint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConstraintType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_SingleConstraint_Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DynamicParameterConstraints_SingleConstraint_Exists_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exists != nil { + l = m.Exists.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_ConstraintList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_Constraint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Constraint != nil { + l = m.Constraint.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_OrConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrConstraints != nil { + l = m.OrConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_AndConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndConstraints != nil { + l = m.AndConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_NotConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotConstraints != nil { + l = m.NotConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Resource_CacheControl) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DoNotCache { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Resource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Resource != nil { + l = (*anypb.Any)(m.Resource).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Ttl != nil { + l = (*durationpb.Duration)(m.Ttl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CacheControl != nil { + l = m.CacheControl.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResourceName != nil { + l = m.ResourceName.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go index d2465ee754d..6b47a93c6b0 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go @@ -1,22 +1,18 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/service/load_stats/v3/lrs.proto package load_statsv3 import ( - context "context" _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v31 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - duration "github.com/golang/protobuf/ptypes/duration" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -108,7 +104,7 @@ type LoadStatsResponse struct { // “LoadStatsResponse“ will also be accumulated and billed to the cluster. This avoids a period // of inobservability that might otherwise exists between the messages. New clusters are not // subject to this consideration. - LoadReportingInterval *duration.Duration `protobuf:"bytes,2,opt,name=load_reporting_interval,json=loadReportingInterval,proto3" json:"load_reporting_interval,omitempty"` + LoadReportingInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=load_reporting_interval,json=loadReportingInterval,proto3" json:"load_reporting_interval,omitempty"` // Set to “true“ if the management server supports endpoint granularity // report. ReportEndpointGranularity bool `protobuf:"varint,3,opt,name=report_endpoint_granularity,json=reportEndpointGranularity,proto3" json:"report_endpoint_granularity,omitempty"` @@ -160,7 +156,7 @@ func (x *LoadStatsResponse) GetSendAllClusters() bool { return false } -func (x *LoadStatsResponse) GetLoadReportingInterval() *duration.Duration { +func (x *LoadStatsResponse) GetLoadReportingInterval() *durationpb.Duration { if x != nil { return x.LoadReportingInterval } @@ -258,11 +254,11 @@ func file_envoy_service_load_stats_v3_lrs_proto_rawDescGZIP() []byte { var file_envoy_service_load_stats_v3_lrs_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_envoy_service_load_stats_v3_lrs_proto_goTypes = []interface{}{ - (*LoadStatsRequest)(nil), // 0: envoy.service.load_stats.v3.LoadStatsRequest - (*LoadStatsResponse)(nil), // 1: envoy.service.load_stats.v3.LoadStatsResponse - (*v3.Node)(nil), // 2: envoy.config.core.v3.Node - (*v31.ClusterStats)(nil), // 3: envoy.config.endpoint.v3.ClusterStats - (*duration.Duration)(nil), // 4: google.protobuf.Duration + (*LoadStatsRequest)(nil), // 0: envoy.service.load_stats.v3.LoadStatsRequest + (*LoadStatsResponse)(nil), // 1: envoy.service.load_stats.v3.LoadStatsResponse + (*v3.Node)(nil), // 2: envoy.config.core.v3.Node + (*v31.ClusterStats)(nil), // 3: envoy.config.endpoint.v3.ClusterStats + (*durationpb.Duration)(nil), // 4: google.protobuf.Duration } var file_envoy_service_load_stats_v3_lrs_proto_depIdxs = []int32{ 2, // 0: envoy.service.load_stats.v3.LoadStatsRequest.node:type_name -> envoy.config.core.v3.Node @@ -327,173 +323,3 @@ func file_envoy_service_load_stats_v3_lrs_proto_init() { file_envoy_service_load_stats_v3_lrs_proto_goTypes = nil file_envoy_service_load_stats_v3_lrs_proto_depIdxs = nil } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// LoadReportingServiceClient is the client API for LoadReportingService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LoadReportingServiceClient interface { - // Advanced API to allow for multi-dimensional load balancing by remote - // server. For receiving LB assignments, the steps are: - // 1, The management server is configured with per cluster/zone/load metric - // - // capacity configuration. The capacity configuration definition is - // outside of the scope of this document. - // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters - // to balance. - // - // Independently, Envoy will initiate a StreamLoadStats bidi stream with a - // management server: - // 1. Once a connection establishes, the management server publishes a - // LoadStatsResponse for all clusters it is interested in learning load - // stats about. - // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts - // based on per-zone weights and/or per-instance weights (if specified) - // based on intra-zone LbPolicy. This information comes from the above - // {Stream,Fetch}Endpoints. - // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. - // 4. Envoy aggregates load reports over the period of time given to it in - // LoadStatsResponse.load_reporting_interval. This includes aggregation - // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as - // well as load metrics from upstream hosts. - // 5. When the timer of load_reporting_interval expires, Envoy sends new - // LoadStatsRequest filled with load reports for each cluster. - // 6. The management server uses the load reports from all reported Envoys - // from around the world, computes global assignment and prepares traffic - // assignment destined for each zone Envoys are located in. Goto 2. - StreamLoadStats(ctx context.Context, opts ...grpc.CallOption) (LoadReportingService_StreamLoadStatsClient, error) -} - -type loadReportingServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewLoadReportingServiceClient(cc grpc.ClientConnInterface) LoadReportingServiceClient { - return &loadReportingServiceClient{cc} -} - -func (c *loadReportingServiceClient) StreamLoadStats(ctx context.Context, opts ...grpc.CallOption) (LoadReportingService_StreamLoadStatsClient, error) { - stream, err := c.cc.NewStream(ctx, &_LoadReportingService_serviceDesc.Streams[0], "/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats", opts...) - if err != nil { - return nil, err - } - x := &loadReportingServiceStreamLoadStatsClient{stream} - return x, nil -} - -type LoadReportingService_StreamLoadStatsClient interface { - Send(*LoadStatsRequest) error - Recv() (*LoadStatsResponse, error) - grpc.ClientStream -} - -type loadReportingServiceStreamLoadStatsClient struct { - grpc.ClientStream -} - -func (x *loadReportingServiceStreamLoadStatsClient) Send(m *LoadStatsRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *loadReportingServiceStreamLoadStatsClient) Recv() (*LoadStatsResponse, error) { - m := new(LoadStatsResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// LoadReportingServiceServer is the server API for LoadReportingService service. -type LoadReportingServiceServer interface { - // Advanced API to allow for multi-dimensional load balancing by remote - // server. For receiving LB assignments, the steps are: - // 1, The management server is configured with per cluster/zone/load metric - // - // capacity configuration. The capacity configuration definition is - // outside of the scope of this document. - // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters - // to balance. - // - // Independently, Envoy will initiate a StreamLoadStats bidi stream with a - // management server: - // 1. Once a connection establishes, the management server publishes a - // LoadStatsResponse for all clusters it is interested in learning load - // stats about. - // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts - // based on per-zone weights and/or per-instance weights (if specified) - // based on intra-zone LbPolicy. This information comes from the above - // {Stream,Fetch}Endpoints. - // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. - // 4. Envoy aggregates load reports over the period of time given to it in - // LoadStatsResponse.load_reporting_interval. This includes aggregation - // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as - // well as load metrics from upstream hosts. - // 5. When the timer of load_reporting_interval expires, Envoy sends new - // LoadStatsRequest filled with load reports for each cluster. - // 6. The management server uses the load reports from all reported Envoys - // from around the world, computes global assignment and prepares traffic - // assignment destined for each zone Envoys are located in. Goto 2. - StreamLoadStats(LoadReportingService_StreamLoadStatsServer) error -} - -// UnimplementedLoadReportingServiceServer can be embedded to have forward compatible implementations. -type UnimplementedLoadReportingServiceServer struct { -} - -func (*UnimplementedLoadReportingServiceServer) StreamLoadStats(LoadReportingService_StreamLoadStatsServer) error { - return status.Errorf(codes.Unimplemented, "method StreamLoadStats not implemented") -} - -func RegisterLoadReportingServiceServer(s *grpc.Server, srv LoadReportingServiceServer) { - s.RegisterService(&_LoadReportingService_serviceDesc, srv) -} - -func _LoadReportingService_StreamLoadStats_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadReportingServiceServer).StreamLoadStats(&loadReportingServiceStreamLoadStatsServer{stream}) -} - -type LoadReportingService_StreamLoadStatsServer interface { - Send(*LoadStatsResponse) error - Recv() (*LoadStatsRequest, error) - grpc.ServerStream -} - -type loadReportingServiceStreamLoadStatsServer struct { - grpc.ServerStream -} - -func (x *loadReportingServiceStreamLoadStatsServer) Send(m *LoadStatsResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadReportingServiceStreamLoadStatsServer) Recv() (*LoadStatsRequest, error) { - m := new(LoadStatsRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _LoadReportingService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "envoy.service.load_stats.v3.LoadReportingService", - HandlerType: (*LoadReportingServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamLoadStats", - Handler: _LoadReportingService_StreamLoadStats_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "envoy/service/load_stats/v3/lrs.proto", -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.validate.go index e6806c11c8a..cf4e395c2a3 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/service/load_stats/v3/lrs.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_grpc.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_grpc.pb.go new file mode 100644 index 00000000000..4eb34c17332 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_grpc.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.26.1 +// source: envoy/service/load_stats/v3/lrs.proto + +package load_statsv3 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + LoadReportingService_StreamLoadStats_FullMethodName = "/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats" +) + +// LoadReportingServiceClient is the client API for LoadReportingService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LoadReportingServiceClient interface { + // Advanced API to allow for multi-dimensional load balancing by remote + // server. For receiving LB assignments, the steps are: + // 1, The management server is configured with per cluster/zone/load metric + // + // capacity configuration. The capacity configuration definition is + // outside of the scope of this document. + // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters + // to balance. + // + // Independently, Envoy will initiate a StreamLoadStats bidi stream with a + // management server: + // 1. Once a connection establishes, the management server publishes a + // LoadStatsResponse for all clusters it is interested in learning load + // stats about. + // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts + // based on per-zone weights and/or per-instance weights (if specified) + // based on intra-zone LbPolicy. This information comes from the above + // {Stream,Fetch}Endpoints. + // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. + // 4. Envoy aggregates load reports over the period of time given to it in + // LoadStatsResponse.load_reporting_interval. This includes aggregation + // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as + // well as load metrics from upstream hosts. + // 5. When the timer of load_reporting_interval expires, Envoy sends new + // LoadStatsRequest filled with load reports for each cluster. + // 6. The management server uses the load reports from all reported Envoys + // from around the world, computes global assignment and prepares traffic + // assignment destined for each zone Envoys are located in. Goto 2. + StreamLoadStats(ctx context.Context, opts ...grpc.CallOption) (LoadReportingService_StreamLoadStatsClient, error) +} + +type loadReportingServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewLoadReportingServiceClient(cc grpc.ClientConnInterface) LoadReportingServiceClient { + return &loadReportingServiceClient{cc} +} + +func (c *loadReportingServiceClient) StreamLoadStats(ctx context.Context, opts ...grpc.CallOption) (LoadReportingService_StreamLoadStatsClient, error) { + stream, err := c.cc.NewStream(ctx, &LoadReportingService_ServiceDesc.Streams[0], LoadReportingService_StreamLoadStats_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &loadReportingServiceStreamLoadStatsClient{stream} + return x, nil +} + +type LoadReportingService_StreamLoadStatsClient interface { + Send(*LoadStatsRequest) error + Recv() (*LoadStatsResponse, error) + grpc.ClientStream +} + +type loadReportingServiceStreamLoadStatsClient struct { + grpc.ClientStream +} + +func (x *loadReportingServiceStreamLoadStatsClient) Send(m *LoadStatsRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadReportingServiceStreamLoadStatsClient) Recv() (*LoadStatsResponse, error) { + m := new(LoadStatsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadReportingServiceServer is the server API for LoadReportingService service. +// All implementations should embed UnimplementedLoadReportingServiceServer +// for forward compatibility +type LoadReportingServiceServer interface { + // Advanced API to allow for multi-dimensional load balancing by remote + // server. For receiving LB assignments, the steps are: + // 1, The management server is configured with per cluster/zone/load metric + // + // capacity configuration. The capacity configuration definition is + // outside of the scope of this document. + // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters + // to balance. + // + // Independently, Envoy will initiate a StreamLoadStats bidi stream with a + // management server: + // 1. Once a connection establishes, the management server publishes a + // LoadStatsResponse for all clusters it is interested in learning load + // stats about. + // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts + // based on per-zone weights and/or per-instance weights (if specified) + // based on intra-zone LbPolicy. This information comes from the above + // {Stream,Fetch}Endpoints. + // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. + // 4. Envoy aggregates load reports over the period of time given to it in + // LoadStatsResponse.load_reporting_interval. This includes aggregation + // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as + // well as load metrics from upstream hosts. + // 5. When the timer of load_reporting_interval expires, Envoy sends new + // LoadStatsRequest filled with load reports for each cluster. + // 6. The management server uses the load reports from all reported Envoys + // from around the world, computes global assignment and prepares traffic + // assignment destined for each zone Envoys are located in. Goto 2. + StreamLoadStats(LoadReportingService_StreamLoadStatsServer) error +} + +// UnimplementedLoadReportingServiceServer should be embedded to have forward compatible implementations. +type UnimplementedLoadReportingServiceServer struct { +} + +func (UnimplementedLoadReportingServiceServer) StreamLoadStats(LoadReportingService_StreamLoadStatsServer) error { + return status.Errorf(codes.Unimplemented, "method StreamLoadStats not implemented") +} + +// UnsafeLoadReportingServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LoadReportingServiceServer will +// result in compilation errors. +type UnsafeLoadReportingServiceServer interface { + mustEmbedUnimplementedLoadReportingServiceServer() +} + +func RegisterLoadReportingServiceServer(s grpc.ServiceRegistrar, srv LoadReportingServiceServer) { + s.RegisterService(&LoadReportingService_ServiceDesc, srv) +} + +func _LoadReportingService_StreamLoadStats_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadReportingServiceServer).StreamLoadStats(&loadReportingServiceStreamLoadStatsServer{stream}) +} + +type LoadReportingService_StreamLoadStatsServer interface { + Send(*LoadStatsResponse) error + Recv() (*LoadStatsRequest, error) + grpc.ServerStream +} + +type loadReportingServiceStreamLoadStatsServer struct { + grpc.ServerStream +} + +func (x *loadReportingServiceStreamLoadStatsServer) Send(m *LoadStatsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadReportingServiceStreamLoadStatsServer) Recv() (*LoadStatsRequest, error) { + m := new(LoadStatsRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadReportingService_ServiceDesc is the grpc.ServiceDesc for LoadReportingService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var LoadReportingService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "envoy.service.load_stats.v3.LoadReportingService", + HandlerType: (*LoadReportingServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamLoadStats", + Handler: _LoadReportingService_StreamLoadStats_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "envoy/service/load_stats/v3/lrs.proto", +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_vtproto.pb.go new file mode 100644 index 00000000000..ca86ae7740d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_vtproto.pb.go @@ -0,0 +1,230 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/load_stats/v3/lrs.proto + +package load_statsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LoadStatsRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadStatsRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadStatsRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterStats) > 0 { + for iNdEx := len(m.ClusterStats) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ClusterStats[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClusterStats[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LoadStatsResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadStatsResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadStatsResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SendAllClusters { + i-- + if m.SendAllClusters { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.ReportEndpointGranularity { + i-- + if m.ReportEndpointGranularity { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.LoadReportingInterval != nil { + size, err := (*durationpb.Duration)(m.LoadReportingInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Clusters[iNdEx]) + copy(dAtA[i:], m.Clusters[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Clusters[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LoadStatsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ClusterStats) > 0 { + for _, e := range m.ClusterStats { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LoadStatsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Clusters) > 0 { + for _, s := range m.Clusters { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LoadReportingInterval != nil { + l = (*durationpb.Duration)(m.LoadReportingInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReportEndpointGranularity { + n += 2 + } + if m.SendAllClusters { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go index 849aef09472..4635ca02842 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go @@ -1,26 +1,22 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/service/status/v3/csds.proto package statusv3 import ( - context "context" _ "github.com/cncf/xds/go/udpa/annotations" v32 "github.com/envoyproxy/go-control-plane/envoy/admin/v3" _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" - any1 "github.com/golang/protobuf/ptypes/any" - timestamp "github.com/golang/protobuf/ptypes/timestamp" _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -397,6 +393,10 @@ type ClientConfig struct { // Represents generic xDS config and the exact config structure depends on // the type URL (like Cluster if it is CDS) GenericXdsConfigs []*ClientConfig_GenericXdsConfig `protobuf:"bytes,3,rep,name=generic_xds_configs,json=genericXdsConfigs,proto3" json:"generic_xds_configs,omitempty"` + // For xDS clients, the scope in which the data is used. + // For example, gRPC indicates the data plane target or that the data is + // associated with gRPC server(s). + ClientScope string `protobuf:"bytes,4,opt,name=client_scope,json=clientScope,proto3" json:"client_scope,omitempty"` } func (x *ClientConfig) Reset() { @@ -453,6 +453,13 @@ func (x *ClientConfig) GetGenericXdsConfigs() []*ClientConfig_GenericXdsConfig { return nil } +func (x *ClientConfig) GetClientScope() string { + if x != nil { + return x.ClientScope + } + return "" +} + type ClientStatusResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -520,9 +527,9 @@ type ClientConfig_GenericXdsConfig struct { // static bootstrap listeners, this field will be "" VersionInfo string `protobuf:"bytes,3,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` // The xDS resource config. Actual content depends on the type - XdsConfig *any1.Any `protobuf:"bytes,4,opt,name=xds_config,json=xdsConfig,proto3" json:"xds_config,omitempty"` + XdsConfig *anypb.Any `protobuf:"bytes,4,opt,name=xds_config,json=xdsConfig,proto3" json:"xds_config,omitempty"` // Timestamp when the xDS resource was last updated - LastUpdated *timestamp.Timestamp `protobuf:"bytes,5,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` // Per xDS resource config status. It is generated by management servers. // It will not be present if the CSDS server is an xDS client. ConfigStatus ConfigStatus `protobuf:"varint,6,opt,name=config_status,json=configStatus,proto3,enum=envoy.service.status.v3.ConfigStatus" json:"config_status,omitempty"` @@ -593,14 +600,14 @@ func (x *ClientConfig_GenericXdsConfig) GetVersionInfo() string { return "" } -func (x *ClientConfig_GenericXdsConfig) GetXdsConfig() *any1.Any { +func (x *ClientConfig_GenericXdsConfig) GetXdsConfig() *anypb.Any { if x != nil { return x.XdsConfig } return nil } -func (x *ClientConfig_GenericXdsConfig) GetLastUpdated() *timestamp.Timestamp { +func (x *ClientConfig_GenericXdsConfig) GetLastUpdated() *timestamppb.Timestamp { if x != nil { return x.LastUpdated } @@ -716,7 +723,7 @@ var file_envoy_service_status_v3_csds_proto_rawDesc = []byte{ 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x65, 0x72, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x10, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x5f, 0x78, 0x64, 0x73, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8b, 0x06, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xae, 0x06, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, @@ -732,88 +739,90 @@ var file_envoy_service_status_v3_csds_proto_rawDesc = []byte{ 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x73, 0x1a, 0xe2, 0x03, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x58, - 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, - 0x55, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x33, 0x0a, 0x0a, 0x78, 0x64, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x4a, - 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, - 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, - 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x33, 0x9a, - 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2a, 0x4b, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0a, 0x0a, 0x06, 0x53, 0x59, 0x4e, 0x43, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4e, - 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, - 0x4c, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x2a, - 0x63, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4c, 0x49, - 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, - 0x10, 0x0a, 0x0c, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4e, 0x41, 0x43, 0x4b, - 0x45, 0x44, 0x10, 0x03, 0x32, 0xb2, 0x02, 0x0a, 0x1c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x77, 0x0a, 0x12, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x98, - 0x01, 0x0a, 0x11, 0x46, 0x65, 0x74, 0x63, 0x68, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, + 0x69, 0x67, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0xe2, 0x03, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, + 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x33, 0x0a, + 0x0a, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x4a, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x49, 0x0a, + 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x69, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x53, 0x74, 0x61, + 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, + 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, 0x1b, 0x2f, 0x76, - 0x33, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x85, 0x01, 0xba, 0x80, 0xc8, 0xd1, - 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, - 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x43, 0x73, 0x64, - 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x47, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, - 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x76, - 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x4b, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x59, 0x4e, 0x43, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, + 0x0a, 0x08, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, + 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x10, 0x04, 0x2a, 0x63, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x43, 0x4b, + 0x45, 0x44, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4e, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x03, 0x32, 0xb2, 0x02, 0x0a, 0x1c, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x77, 0x0a, 0x12, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, + 0x01, 0x12, 0x98, 0x01, 0x0a, 0x11, 0x46, 0x65, 0x74, 0x63, 0x68, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, + 0x1b, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x85, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x09, + 0x43, 0x73, 0x64, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x47, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -845,8 +854,8 @@ var file_envoy_service_status_v3_csds_proto_goTypes = []interface{}{ (*v32.RoutesConfigDump)(nil), // 11: envoy.admin.v3.RoutesConfigDump (*v32.ScopedRoutesConfigDump)(nil), // 12: envoy.admin.v3.ScopedRoutesConfigDump (*v32.EndpointsConfigDump)(nil), // 13: envoy.admin.v3.EndpointsConfigDump - (*any1.Any)(nil), // 14: google.protobuf.Any - (*timestamp.Timestamp)(nil), // 15: google.protobuf.Timestamp + (*anypb.Any)(nil), // 14: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp (v32.ClientResourceStatus)(0), // 16: envoy.admin.v3.ClientResourceStatus (*v32.UpdateFailureState)(nil), // 17: envoy.admin.v3.UpdateFailureState } @@ -974,152 +983,3 @@ func file_envoy_service_status_v3_csds_proto_init() { file_envoy_service_status_v3_csds_proto_goTypes = nil file_envoy_service_status_v3_csds_proto_depIdxs = nil } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// ClientStatusDiscoveryServiceClient is the client API for ClientStatusDiscoveryService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ClientStatusDiscoveryServiceClient interface { - StreamClientStatus(ctx context.Context, opts ...grpc.CallOption) (ClientStatusDiscoveryService_StreamClientStatusClient, error) - FetchClientStatus(ctx context.Context, in *ClientStatusRequest, opts ...grpc.CallOption) (*ClientStatusResponse, error) -} - -type clientStatusDiscoveryServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewClientStatusDiscoveryServiceClient(cc grpc.ClientConnInterface) ClientStatusDiscoveryServiceClient { - return &clientStatusDiscoveryServiceClient{cc} -} - -func (c *clientStatusDiscoveryServiceClient) StreamClientStatus(ctx context.Context, opts ...grpc.CallOption) (ClientStatusDiscoveryService_StreamClientStatusClient, error) { - stream, err := c.cc.NewStream(ctx, &_ClientStatusDiscoveryService_serviceDesc.Streams[0], "/envoy.service.status.v3.ClientStatusDiscoveryService/StreamClientStatus", opts...) - if err != nil { - return nil, err - } - x := &clientStatusDiscoveryServiceStreamClientStatusClient{stream} - return x, nil -} - -type ClientStatusDiscoveryService_StreamClientStatusClient interface { - Send(*ClientStatusRequest) error - Recv() (*ClientStatusResponse, error) - grpc.ClientStream -} - -type clientStatusDiscoveryServiceStreamClientStatusClient struct { - grpc.ClientStream -} - -func (x *clientStatusDiscoveryServiceStreamClientStatusClient) Send(m *ClientStatusRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *clientStatusDiscoveryServiceStreamClientStatusClient) Recv() (*ClientStatusResponse, error) { - m := new(ClientStatusResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *clientStatusDiscoveryServiceClient) FetchClientStatus(ctx context.Context, in *ClientStatusRequest, opts ...grpc.CallOption) (*ClientStatusResponse, error) { - out := new(ClientStatusResponse) - err := c.cc.Invoke(ctx, "/envoy.service.status.v3.ClientStatusDiscoveryService/FetchClientStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ClientStatusDiscoveryServiceServer is the server API for ClientStatusDiscoveryService service. -type ClientStatusDiscoveryServiceServer interface { - StreamClientStatus(ClientStatusDiscoveryService_StreamClientStatusServer) error - FetchClientStatus(context.Context, *ClientStatusRequest) (*ClientStatusResponse, error) -} - -// UnimplementedClientStatusDiscoveryServiceServer can be embedded to have forward compatible implementations. -type UnimplementedClientStatusDiscoveryServiceServer struct { -} - -func (*UnimplementedClientStatusDiscoveryServiceServer) StreamClientStatus(ClientStatusDiscoveryService_StreamClientStatusServer) error { - return status.Errorf(codes.Unimplemented, "method StreamClientStatus not implemented") -} -func (*UnimplementedClientStatusDiscoveryServiceServer) FetchClientStatus(context.Context, *ClientStatusRequest) (*ClientStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FetchClientStatus not implemented") -} - -func RegisterClientStatusDiscoveryServiceServer(s *grpc.Server, srv ClientStatusDiscoveryServiceServer) { - s.RegisterService(&_ClientStatusDiscoveryService_serviceDesc, srv) -} - -func _ClientStatusDiscoveryService_StreamClientStatus_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ClientStatusDiscoveryServiceServer).StreamClientStatus(&clientStatusDiscoveryServiceStreamClientStatusServer{stream}) -} - -type ClientStatusDiscoveryService_StreamClientStatusServer interface { - Send(*ClientStatusResponse) error - Recv() (*ClientStatusRequest, error) - grpc.ServerStream -} - -type clientStatusDiscoveryServiceStreamClientStatusServer struct { - grpc.ServerStream -} - -func (x *clientStatusDiscoveryServiceStreamClientStatusServer) Send(m *ClientStatusResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *clientStatusDiscoveryServiceStreamClientStatusServer) Recv() (*ClientStatusRequest, error) { - m := new(ClientStatusRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _ClientStatusDiscoveryService_FetchClientStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClientStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClientStatusDiscoveryServiceServer).FetchClientStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/envoy.service.status.v3.ClientStatusDiscoveryService/FetchClientStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClientStatusDiscoveryServiceServer).FetchClientStatus(ctx, req.(*ClientStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _ClientStatusDiscoveryService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "envoy.service.status.v3.ClientStatusDiscoveryService", - HandlerType: (*ClientStatusDiscoveryServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "FetchClientStatus", - Handler: _ClientStatusDiscoveryService_FetchClientStatus_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamClientStatus", - Handler: _ClientStatusDiscoveryService_StreamClientStatus_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "envoy/service/status/v3/csds.proto", -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go index e0c078d4319..d27eee6463c 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/service/status/v3/csds.proto @@ -638,6 +639,8 @@ func (m *ClientConfig) validate(all bool) error { } + // no validation rules for ClientScope + if len(errors) > 0 { return ClientConfigMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_grpc.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_grpc.pb.go new file mode 100644 index 00000000000..abe9abebdfa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_grpc.pb.go @@ -0,0 +1,177 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.26.1 +// source: envoy/service/status/v3/csds.proto + +package statusv3 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ClientStatusDiscoveryService_StreamClientStatus_FullMethodName = "/envoy.service.status.v3.ClientStatusDiscoveryService/StreamClientStatus" + ClientStatusDiscoveryService_FetchClientStatus_FullMethodName = "/envoy.service.status.v3.ClientStatusDiscoveryService/FetchClientStatus" +) + +// ClientStatusDiscoveryServiceClient is the client API for ClientStatusDiscoveryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ClientStatusDiscoveryServiceClient interface { + StreamClientStatus(ctx context.Context, opts ...grpc.CallOption) (ClientStatusDiscoveryService_StreamClientStatusClient, error) + FetchClientStatus(ctx context.Context, in *ClientStatusRequest, opts ...grpc.CallOption) (*ClientStatusResponse, error) +} + +type clientStatusDiscoveryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewClientStatusDiscoveryServiceClient(cc grpc.ClientConnInterface) ClientStatusDiscoveryServiceClient { + return &clientStatusDiscoveryServiceClient{cc} +} + +func (c *clientStatusDiscoveryServiceClient) StreamClientStatus(ctx context.Context, opts ...grpc.CallOption) (ClientStatusDiscoveryService_StreamClientStatusClient, error) { + stream, err := c.cc.NewStream(ctx, &ClientStatusDiscoveryService_ServiceDesc.Streams[0], ClientStatusDiscoveryService_StreamClientStatus_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &clientStatusDiscoveryServiceStreamClientStatusClient{stream} + return x, nil +} + +type ClientStatusDiscoveryService_StreamClientStatusClient interface { + Send(*ClientStatusRequest) error + Recv() (*ClientStatusResponse, error) + grpc.ClientStream +} + +type clientStatusDiscoveryServiceStreamClientStatusClient struct { + grpc.ClientStream +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusClient) Send(m *ClientStatusRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusClient) Recv() (*ClientStatusResponse, error) { + m := new(ClientStatusResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *clientStatusDiscoveryServiceClient) FetchClientStatus(ctx context.Context, in *ClientStatusRequest, opts ...grpc.CallOption) (*ClientStatusResponse, error) { + out := new(ClientStatusResponse) + err := c.cc.Invoke(ctx, ClientStatusDiscoveryService_FetchClientStatus_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClientStatusDiscoveryServiceServer is the server API for ClientStatusDiscoveryService service. +// All implementations should embed UnimplementedClientStatusDiscoveryServiceServer +// for forward compatibility +type ClientStatusDiscoveryServiceServer interface { + StreamClientStatus(ClientStatusDiscoveryService_StreamClientStatusServer) error + FetchClientStatus(context.Context, *ClientStatusRequest) (*ClientStatusResponse, error) +} + +// UnimplementedClientStatusDiscoveryServiceServer should be embedded to have forward compatible implementations. +type UnimplementedClientStatusDiscoveryServiceServer struct { +} + +func (UnimplementedClientStatusDiscoveryServiceServer) StreamClientStatus(ClientStatusDiscoveryService_StreamClientStatusServer) error { + return status.Errorf(codes.Unimplemented, "method StreamClientStatus not implemented") +} +func (UnimplementedClientStatusDiscoveryServiceServer) FetchClientStatus(context.Context, *ClientStatusRequest) (*ClientStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FetchClientStatus not implemented") +} + +// UnsafeClientStatusDiscoveryServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ClientStatusDiscoveryServiceServer will +// result in compilation errors. +type UnsafeClientStatusDiscoveryServiceServer interface { + mustEmbedUnimplementedClientStatusDiscoveryServiceServer() +} + +func RegisterClientStatusDiscoveryServiceServer(s grpc.ServiceRegistrar, srv ClientStatusDiscoveryServiceServer) { + s.RegisterService(&ClientStatusDiscoveryService_ServiceDesc, srv) +} + +func _ClientStatusDiscoveryService_StreamClientStatus_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ClientStatusDiscoveryServiceServer).StreamClientStatus(&clientStatusDiscoveryServiceStreamClientStatusServer{stream}) +} + +type ClientStatusDiscoveryService_StreamClientStatusServer interface { + Send(*ClientStatusResponse) error + Recv() (*ClientStatusRequest, error) + grpc.ServerStream +} + +type clientStatusDiscoveryServiceStreamClientStatusServer struct { + grpc.ServerStream +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusServer) Send(m *ClientStatusResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusServer) Recv() (*ClientStatusRequest, error) { + m := new(ClientStatusRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _ClientStatusDiscoveryService_FetchClientStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClientStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClientStatusDiscoveryServiceServer).FetchClientStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ClientStatusDiscoveryService_FetchClientStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClientStatusDiscoveryServiceServer).FetchClientStatus(ctx, req.(*ClientStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ClientStatusDiscoveryService_ServiceDesc is the grpc.ServiceDesc for ClientStatusDiscoveryService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ClientStatusDiscoveryService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "envoy.service.status.v3.ClientStatusDiscoveryService", + HandlerType: (*ClientStatusDiscoveryServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FetchClientStatus", + Handler: _ClientStatusDiscoveryService_FetchClientStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamClientStatus", + Handler: _ClientStatusDiscoveryService_StreamClientStatus_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "envoy/service/status/v3/csds.proto", +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_vtproto.pb.go new file mode 100644 index 00000000000..a55983e810a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_vtproto.pb.go @@ -0,0 +1,866 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/status/v3/csds.proto + +package statusv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClientStatusRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatusRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientStatusRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ExcludeResourceContents { + i-- + if m.ExcludeResourceContents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.NodeMatchers) > 0 { + for iNdEx := len(m.NodeMatchers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.NodeMatchers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.NodeMatchers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PerXdsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PerXdsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x38 + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_EndpointConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_ScopedRouteConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_RouteConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_ClusterConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_ListenerConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Status != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PerXdsConfig_ListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_ListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListenerConfig != nil { + if vtmsg, ok := interface{}(m.ListenerConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ListenerConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_ClusterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_ClusterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ClusterConfig != nil { + if vtmsg, ok := interface{}(m.ClusterConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClusterConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_RouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_RouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RouteConfig != nil { + if vtmsg, ok := interface{}(m.RouteConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RouteConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_ScopedRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_ScopedRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRouteConfig != nil { + if vtmsg, ok := interface{}(m.ScopedRouteConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ScopedRouteConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_EndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_EndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EndpointConfig != nil { + if vtmsg, ok := interface{}(m.EndpointConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EndpointConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ClientConfig_GenericXdsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientConfig_GenericXdsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig_GenericXdsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsStaticResource { + i-- + if m.IsStaticResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.ErrorState != nil { + if vtmsg, ok := interface{}(m.ErrorState).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ErrorState) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x38 + } + if m.ConfigStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ConfigStatus)) + i-- + dAtA[i] = 0x30 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.XdsConfig != nil { + size, err := (*anypb.Any)(m.XdsConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClientScope) > 0 { + i -= len(m.ClientScope) + copy(dAtA[i:], m.ClientScope) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClientScope))) + i-- + dAtA[i] = 0x22 + } + if len(m.GenericXdsConfigs) > 0 { + for iNdEx := len(m.GenericXdsConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.GenericXdsConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.XdsConfig) > 0 { + for iNdEx := len(m.XdsConfig) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.XdsConfig[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStatusResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatusResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientStatusResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Config[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClientStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeMatchers) > 0 { + for _, e := range m.NodeMatchers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExcludeResourceContents { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *PerXdsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) + } + if vtmsg, ok := m.PerXdsConfig.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *PerXdsConfig_ListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListenerConfig != nil { + if size, ok := interface{}(m.ListenerConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ListenerConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_ClusterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClusterConfig != nil { + if size, ok := interface{}(m.ClusterConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ClusterConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_RouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RouteConfig != nil { + if size, ok := interface{}(m.RouteConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RouteConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_ScopedRouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRouteConfig != nil { + if size, ok := interface{}(m.ScopedRouteConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ScopedRouteConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_EndpointConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndpointConfig != nil { + if size, ok := interface{}(m.EndpointConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EndpointConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ClientConfig_GenericXdsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.XdsConfig != nil { + l = (*anypb.Any)(m.XdsConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConfigStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ConfigStatus)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + if m.ErrorState != nil { + if size, ok := interface{}(m.ErrorState).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ErrorState) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsStaticResource { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClientConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.XdsConfig) > 0 { + for _, e := range m.XdsConfig { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.GenericXdsConfigs) > 0 { + for _, e := range m.GenericXdsConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.ClientScope) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go index a76427d6c06..8afb4e8d127 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/http/v3/cookie.proto package httpv3 @@ -9,9 +9,9 @@ package httpv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -33,8 +33,8 @@ type Cookie struct { // new cookie for downstream. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Duration of cookie. This will be used to set the expiry time of a new cookie when it is - // generated. Set this to 0 to use a session cookie. - Ttl *duration.Duration `protobuf:"bytes,2,opt,name=ttl,proto3" json:"ttl,omitempty"` + // generated. Set this to 0s to use a session cookie and disable cookie expiration. + Ttl *durationpb.Duration `protobuf:"bytes,2,opt,name=ttl,proto3" json:"ttl,omitempty"` // Path of cookie. This will be used to set the path of a new cookie when it is generated. // If no path is specified here, no path will be set for the cookie. Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` @@ -79,7 +79,7 @@ func (x *Cookie) GetName() string { return "" } -func (x *Cookie) GetTtl() *duration.Duration { +func (x *Cookie) GetTtl() *durationpb.Duration { if x != nil { return x.Ttl } @@ -137,8 +137,8 @@ func file_envoy_type_http_v3_cookie_proto_rawDescGZIP() []byte { var file_envoy_type_http_v3_cookie_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_type_http_v3_cookie_proto_goTypes = []interface{}{ - (*Cookie)(nil), // 0: envoy.type.http.v3.Cookie - (*duration.Duration)(nil), // 1: google.protobuf.Duration + (*Cookie)(nil), // 0: envoy.type.http.v3.Cookie + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration } var file_envoy_type_http_v3_cookie_proto_depIdxs = []int32{ 1, // 0: envoy.type.http.v3.Cookie.ttl:type_name -> google.protobuf.Duration diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.validate.go index bb23d478192..3daecd3dea3 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/http/v3/cookie.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie_vtproto.pb.go new file mode 100644 index 00000000000..66ab8b784a8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie_vtproto.pb.go @@ -0,0 +1,99 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/http/v3/cookie.proto + +package httpv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Cookie) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cookie) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cookie) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + } + if m.Ttl != nil { + size, err := (*durationpb.Duration)(m.Ttl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cookie) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Ttl != nil { + l = (*durationpb.Duration)(m.Ttl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go index 888f00dd1f7..dda21b56bd0 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/http/v3/path_transformation.proto package httpv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go index ce91984f20d..0bb35065bc6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/http/v3/path_transformation.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation_vtproto.pb.go new file mode 100644 index 00000000000..64d8960cf26 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation_vtproto.pb.go @@ -0,0 +1,300 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/http/v3/path_transformation.proto + +package httpv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *PathTransformation_Operation_NormalizePathRFC3986) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation_Operation_NormalizePathRFC3986) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_NormalizePathRFC3986) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation_MergeSlashes) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation_Operation_MergeSlashes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_MergeSlashes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation_Operation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OperationSpecifier.(*PathTransformation_Operation_MergeSlashes_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OperationSpecifier.(*PathTransformation_Operation_NormalizePathRfc_3986); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation_NormalizePathRfc_3986) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_NormalizePathRfc_3986) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NormalizePathRfc_3986 != nil { + size, err := m.NormalizePathRfc_3986.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *PathTransformation_Operation_MergeSlashes_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_MergeSlashes_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MergeSlashes != nil { + size, err := m.MergeSlashes.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *PathTransformation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Operations[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation_NormalizePathRFC3986) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PathTransformation_Operation_MergeSlashes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PathTransformation_Operation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.OperationSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *PathTransformation_Operation_NormalizePathRfc_3986) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NormalizePathRfc_3986 != nil { + l = m.NormalizePathRfc_3986.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PathTransformation_Operation_MergeSlashes_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MergeSlashes != nil { + l = m.MergeSlashes.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PathTransformation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Operations) > 0 { + for _, e := range m.Operations { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go index 65948905376..db3bd5994f7 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/matcher/v3/filter_state.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go index 0152061d5be..41a5f68db02 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/matcher/v3/filter_state.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state_vtproto.pb.go new file mode 100644 index 00000000000..873f63eef73 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state_vtproto.pb.go @@ -0,0 +1,121 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/filter_state.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *FilterStateMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterStateMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterStateMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Matcher.(*FilterStateMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterStateMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterStateMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + size, err := m.StringMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *FilterStateMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Matcher.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *FilterStateMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + l = m.StringMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go index 01bf97506a3..a2f9c73adc4 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/matcher/v3/http_inputs.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go index 24950b97ab5..78de165bdcd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/matcher/v3/http_inputs.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs_vtproto.pb.go new file mode 100644 index 00000000000..ecf552dc0e2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs_vtproto.pb.go @@ -0,0 +1,289 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/http_inputs.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpRequestHeaderMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpRequestHeaderMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpRequestHeaderMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpRequestTrailerMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpRequestTrailerMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpRequestTrailerMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseHeaderMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseHeaderMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseHeaderMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseTrailerMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseTrailerMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseTrailerMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpRequestQueryParamMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpRequestQueryParamMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpRequestQueryParamMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.QueryParam) > 0 { + i -= len(m.QueryParam) + copy(dAtA[i:], m.QueryParam) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.QueryParam))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpRequestHeaderMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpRequestTrailerMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpResponseHeaderMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpResponseTrailerMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpRequestQueryParamMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.QueryParam) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go index 91534f4b126..14a093334b6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/matcher/v3/metadata.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go index 0a00e4faa31..27c898ee041 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/matcher/v3/metadata.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata_vtproto.pb.go new file mode 100644 index 00000000000..4050e14c269 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata_vtproto.pb.go @@ -0,0 +1,195 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/metadata.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MetadataMatcher_PathSegment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataMatcher_PathSegment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataMatcher_PathSegment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Segment.(*MetadataMatcher_PathSegment_Key); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MetadataMatcher_PathSegment_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataMatcher_PathSegment_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *MetadataMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Invert { + i-- + if m.Invert { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Value != nil { + size, err := m.Value.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Path) > 0 { + for iNdEx := len(m.Path) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Path[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Filter) > 0 { + i -= len(m.Filter) + copy(dAtA[i:], m.Filter) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Filter))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetadataMatcher_PathSegment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Segment.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataMatcher_PathSegment_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *MetadataMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Filter) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Path) > 0 { + for _, e := range m.Path { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Value != nil { + l = m.Value.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Invert { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go index 1274f799c8e..d6083cb2773 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/matcher/v3/node.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go index fb9b77f213c..62aa27f7ad2 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/matcher/v3/node.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node_vtproto.pb.go new file mode 100644 index 00000000000..3bea65da7c6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node_vtproto.pb.go @@ -0,0 +1,94 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/node.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *NodeMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *NodeMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.NodeMetadatas) > 0 { + for iNdEx := len(m.NodeMetadatas) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.NodeMetadatas[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.NodeId != nil { + size, err := m.NodeId.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NodeMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeId != nil { + l = m.NodeId.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.NodeMetadatas) > 0 { + for _, e := range m.NodeMetadatas { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go index 99733ecf4b7..2ad4bccfad0 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/matcher/v3/number.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go index d656d7f445d..b019d7d010c 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/matcher/v3/number.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number_vtproto.pb.go new file mode 100644 index 00000000000..7315258ab06 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number_vtproto.pb.go @@ -0,0 +1,160 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/number.proto + +package matcherv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DoubleMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*DoubleMatcher_Exact); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*DoubleMatcher_Range); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DoubleMatcher_Range) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleMatcher_Range) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Range != nil { + if vtmsg, ok := interface{}(m.Range).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Range) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *DoubleMatcher_Exact) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleMatcher_Exact) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Exact)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *DoubleMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *DoubleMatcher_Range) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Range != nil { + if size, ok := interface{}(m.Range).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Range) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DoubleMatcher_Exact) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go index 8854b2c485b..aac680dbe13 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/matcher/v3/path.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go index 524fae95af6..a978c99ab30 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/matcher/v3/path.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path_vtproto.pb.go new file mode 100644 index 00000000000..044fe9db219 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path_vtproto.pb.go @@ -0,0 +1,110 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/path.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *PathMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*PathMatcher_Path); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *PathMatcher_Path) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathMatcher_Path) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Path != nil { + size, err := m.Path.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *PathMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *PathMatcher_Path) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Path != nil { + l = m.Path.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go index c95338e39e8..383bb267c39 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/matcher/v3/regex.proto package matcherv3 @@ -10,9 +10,9 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/go-control-plane/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -213,7 +213,7 @@ type RegexMatcher_GoogleRE2 struct { // global ``re2.max_program_size.error_level`` runtime value. // // Deprecated: Marked as deprecated in envoy/type/matcher/v3/regex.proto. - MaxProgramSize *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=max_program_size,json=maxProgramSize,proto3" json:"max_program_size,omitempty"` + MaxProgramSize *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_program_size,json=maxProgramSize,proto3" json:"max_program_size,omitempty"` } func (x *RegexMatcher_GoogleRE2) Reset() { @@ -249,7 +249,7 @@ func (*RegexMatcher_GoogleRE2) Descriptor() ([]byte, []int) { } // Deprecated: Marked as deprecated in envoy/type/matcher/v3/regex.proto. -func (x *RegexMatcher_GoogleRE2) GetMaxProgramSize() *wrappers.UInt32Value { +func (x *RegexMatcher_GoogleRE2) GetMaxProgramSize() *wrapperspb.UInt32Value { if x != nil { return x.MaxProgramSize } @@ -335,7 +335,7 @@ var file_envoy_type_matcher_v3_regex_proto_goTypes = []interface{}{ (*RegexMatcher)(nil), // 0: envoy.type.matcher.v3.RegexMatcher (*RegexMatchAndSubstitute)(nil), // 1: envoy.type.matcher.v3.RegexMatchAndSubstitute (*RegexMatcher_GoogleRE2)(nil), // 2: envoy.type.matcher.v3.RegexMatcher.GoogleRE2 - (*wrappers.UInt32Value)(nil), // 3: google.protobuf.UInt32Value + (*wrapperspb.UInt32Value)(nil), // 3: google.protobuf.UInt32Value } var file_envoy_type_matcher_v3_regex_proto_depIdxs = []int32{ 2, // 0: envoy.type.matcher.v3.RegexMatcher.google_re2:type_name -> envoy.type.matcher.v3.RegexMatcher.GoogleRE2 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go index efd4f414465..bb00d0cd7ae 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/matcher/v3/regex.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex_vtproto.pb.go new file mode 100644 index 00000000000..234f071937d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex_vtproto.pb.go @@ -0,0 +1,246 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/regex.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RegexMatcher_GoogleRE2) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegexMatcher_GoogleRE2) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatcher_GoogleRE2) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxProgramSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxProgramSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RegexMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegexMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Regex) > 0 { + i -= len(m.Regex) + copy(dAtA[i:], m.Regex) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Regex))) + i-- + dAtA[i] = 0x12 + } + if msg, ok := m.EngineType.(*RegexMatcher_GoogleRe2); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RegexMatcher_GoogleRe2) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatcher_GoogleRe2) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleRe2 != nil { + size, err := m.GoogleRe2.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RegexMatchAndSubstitute) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegexMatchAndSubstitute) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatchAndSubstitute) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Substitution) > 0 { + i -= len(m.Substitution) + copy(dAtA[i:], m.Substitution) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Substitution))) + i-- + dAtA[i] = 0x12 + } + if m.Pattern != nil { + size, err := m.Pattern.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RegexMatcher_GoogleRE2) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxProgramSize != nil { + l = (*wrapperspb.UInt32Value)(m.MaxProgramSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RegexMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.EngineType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.Regex) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RegexMatcher_GoogleRe2) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleRe2 != nil { + l = m.GoogleRe2.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RegexMatchAndSubstitute) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pattern != nil { + l = m.Pattern.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Substitution) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go index 618f84d5961..3da1aae4ebc 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/matcher/v3/status_code_input.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go index 763fa9f334d..b09b90c13d9 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/matcher/v3/status_code_input.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input_vtproto.pb.go new file mode 100644 index 00000000000..156377f5010 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input_vtproto.pb.go @@ -0,0 +1,104 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/status_code_input.proto + +package matcherv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpResponseStatusCodeMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseStatusCodeMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseStatusCodeMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseStatusCodeClassMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseStatusCodeClassMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseStatusCodeClassMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseStatusCodeMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *HttpResponseStatusCodeClassMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go index 3fec38decc7..2ebed90845d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go @@ -1,13 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/matcher/v3/string.proto package matcherv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/cncf/xds/go/xds/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -23,7 +24,7 @@ const ( ) // Specifies the way to match a string. -// [#next-free-field: 8] +// [#next-free-field: 9] type StringMatcher struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -36,6 +37,7 @@ type StringMatcher struct { // *StringMatcher_Suffix // *StringMatcher_SafeRegex // *StringMatcher_Contains + // *StringMatcher_Custom MatchPattern isStringMatcher_MatchPattern `protobuf_oneof:"match_pattern"` // If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This // has no effect for the safe_regex match. @@ -117,6 +119,13 @@ func (x *StringMatcher) GetContains() string { return "" } +func (x *StringMatcher) GetCustom() *v3.TypedExtensionConfig { + if x, ok := x.GetMatchPattern().(*StringMatcher_Custom); ok { + return x.Custom + } + return nil +} + func (x *StringMatcher) GetIgnoreCase() bool { if x != nil { return x.IgnoreCase @@ -172,6 +181,12 @@ type StringMatcher_Contains struct { Contains string `protobuf:"bytes,7,opt,name=contains,proto3,oneof"` } +type StringMatcher_Custom struct { + // Use an extension as the matcher type. + // [#extension-category: envoy.string_matcher] + Custom *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + func (*StringMatcher_Exact) isStringMatcher_MatchPattern() {} func (*StringMatcher_Prefix) isStringMatcher_MatchPattern() {} @@ -182,6 +197,8 @@ func (*StringMatcher_SafeRegex) isStringMatcher_MatchPattern() {} func (*StringMatcher_Contains) isStringMatcher_MatchPattern() {} +func (*StringMatcher_Custom) isStringMatcher_MatchPattern() {} + // Specifies a list of ways to match a string. type ListStringMatcher struct { state protoimpl.MessageState @@ -238,52 +255,58 @@ var file_envoy_type_matcher_v3_string_proto_rawDesc = []byte{ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, - 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, - 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, - 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd1, 0x02, 0x0a, 0x0d, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x65, - 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x78, - 0x61, 0x63, 0x74, 0x12, 0x21, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, + 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x03, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, + 0x12, 0x21, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, - 0x00, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x4e, 0x0a, 0x0a, 0x73, 0x61, 0x66, - 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, - 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x08, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x73, - 0x65, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, - 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0x8c, 0x01, - 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x3a, - 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x84, 0x01, 0xba, - 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x4e, 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, + 0x65, 0x67, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, + 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x3b, 0x0a, + 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x73, 0x65, 0x3a, 0x27, 0x9a, 0xc5, 0x88, + 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0x8c, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4a, 0x0a, + 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -300,18 +323,20 @@ func file_envoy_type_matcher_v3_string_proto_rawDescGZIP() []byte { var file_envoy_type_matcher_v3_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_envoy_type_matcher_v3_string_proto_goTypes = []interface{}{ - (*StringMatcher)(nil), // 0: envoy.type.matcher.v3.StringMatcher - (*ListStringMatcher)(nil), // 1: envoy.type.matcher.v3.ListStringMatcher - (*RegexMatcher)(nil), // 2: envoy.type.matcher.v3.RegexMatcher + (*StringMatcher)(nil), // 0: envoy.type.matcher.v3.StringMatcher + (*ListStringMatcher)(nil), // 1: envoy.type.matcher.v3.ListStringMatcher + (*RegexMatcher)(nil), // 2: envoy.type.matcher.v3.RegexMatcher + (*v3.TypedExtensionConfig)(nil), // 3: xds.core.v3.TypedExtensionConfig } var file_envoy_type_matcher_v3_string_proto_depIdxs = []int32{ 2, // 0: envoy.type.matcher.v3.StringMatcher.safe_regex:type_name -> envoy.type.matcher.v3.RegexMatcher - 0, // 1: envoy.type.matcher.v3.ListStringMatcher.patterns:type_name -> envoy.type.matcher.v3.StringMatcher - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 3, // 1: envoy.type.matcher.v3.StringMatcher.custom:type_name -> xds.core.v3.TypedExtensionConfig + 0, // 2: envoy.type.matcher.v3.ListStringMatcher.patterns:type_name -> envoy.type.matcher.v3.StringMatcher + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_envoy_type_matcher_v3_string_proto_init() } @@ -352,6 +377,7 @@ func file_envoy_type_matcher_v3_string_proto_init() { (*StringMatcher_Suffix)(nil), (*StringMatcher_SafeRegex)(nil), (*StringMatcher_Contains)(nil), + (*StringMatcher_Custom)(nil), } type x struct{} out := protoimpl.TypeBuilder{ diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go index 9a67d92a628..98e3925f645 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/matcher/v3/string.proto @@ -199,6 +200,48 @@ func (m *StringMatcher) validate(all bool) error { errors = append(errors, err) } + case *StringMatcher_Custom: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetCustom()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustom()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + } + } + } + default: _ = v // ensures v is used } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string_vtproto.pb.go new file mode 100644 index 00000000000..9c016e2e736 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string_vtproto.pb.go @@ -0,0 +1,370 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/string.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StringMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*StringMatcher_Custom); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Contains); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.IgnoreCase { + i-- + if m.IgnoreCase { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.MatchPattern.(*StringMatcher_SafeRegex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Suffix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Prefix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Exact); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StringMatcher_Exact) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Exact) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Exact) + copy(dAtA[i:], m.Exact) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Exact))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *StringMatcher_Prefix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Prefix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *StringMatcher_Suffix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Suffix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Suffix) + copy(dAtA[i:], m.Suffix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Suffix))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *StringMatcher_SafeRegex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_SafeRegex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SafeRegex != nil { + size, err := m.SafeRegex.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *StringMatcher_Contains) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Contains) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Contains) + copy(dAtA[i:], m.Contains) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Contains))) + i-- + dAtA[i] = 0x3a + return len(dAtA) - i, nil +} +func (m *StringMatcher_Custom) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Custom) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Custom != nil { + if vtmsg, ok := interface{}(m.Custom).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Custom) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *ListStringMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListStringMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListStringMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Patterns) > 0 { + for iNdEx := len(m.Patterns) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Patterns[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StringMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.IgnoreCase { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *StringMatcher_Exact) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Exact) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_Prefix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_Suffix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Suffix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_SafeRegex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SafeRegex != nil { + l = m.SafeRegex.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StringMatcher_Contains) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Contains) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_Custom) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Custom != nil { + if size, ok := interface{}(m.Custom).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Custom) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListStringMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Patterns) > 0 { + for _, e := range m.Patterns { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go index 1ecb4ce095d..ef844bc7f84 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/matcher/v3/struct.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go index 47d7eeb50a4..d69c1547f6a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/matcher/v3/struct.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct_vtproto.pb.go new file mode 100644 index 00000000000..d36052b8363 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct_vtproto.pb.go @@ -0,0 +1,171 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/struct.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StructMatcher_PathSegment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StructMatcher_PathSegment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StructMatcher_PathSegment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Segment.(*StructMatcher_PathSegment_Key); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StructMatcher_PathSegment_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StructMatcher_PathSegment_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *StructMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StructMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StructMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != nil { + size, err := m.Value.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Path) > 0 { + for iNdEx := len(m.Path) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Path[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *StructMatcher_PathSegment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Segment.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *StructMatcher_PathSegment_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StructMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Path) > 0 { + for _, e := range m.Path { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Value != nil { + l = m.Value.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go index 851261eb6c7..7ba125cf308 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/matcher/v3/value.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go index 5bbf95eba26..9814aa0dbaf 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/matcher/v3/value.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value_vtproto.pb.go new file mode 100644 index 00000000000..852f5cead13 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value_vtproto.pb.go @@ -0,0 +1,545 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/value.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ValueMatcher_NullMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueMatcher_NullMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_NullMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ValueMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*ValueMatcher_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_ListMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_PresentMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_BoolMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_DoubleMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_NullMatch_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ValueMatcher_NullMatch_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_NullMatch_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NullMatch != nil { + size, err := m.NullMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_DoubleMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_DoubleMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DoubleMatch != nil { + size, err := m.DoubleMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + size, err := m.StringMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_BoolMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_BoolMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *ValueMatcher_PresentMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_PresentMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.PresentMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} +func (m *ValueMatcher_ListMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_ListMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListMatch != nil { + size, err := m.ListMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *ListMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*ListMatcher_OneOf); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ListMatcher_OneOf) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListMatcher_OneOf) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OneOf != nil { + size, err := m.OneOf.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *OrMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OrMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ValueMatchers) > 0 { + for iNdEx := len(m.ValueMatchers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ValueMatchers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValueMatcher_NullMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ValueMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ValueMatcher_NullMatch_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NullMatch != nil { + l = m.NullMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_DoubleMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DoubleMatch != nil { + l = m.DoubleMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + l = m.StringMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_BoolMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *ValueMatcher_PresentMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *ValueMatcher_ListMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListMatch != nil { + l = m.ListMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ListMatcher_OneOf) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OneOf != nil { + l = m.OneOf.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OrMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ValueMatchers) > 0 { + for _, e := range m.ValueMatchers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go index 84f7cfe7ccf..7a6ac07a533 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/metadata/v3/metadata.proto package metadatav3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go index 37e45c4f0e3..adc8c8ed51f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/metadata/v3/metadata.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata_vtproto.pb.go new file mode 100644 index 00000000000..efbf1efc3de --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata_vtproto.pb.go @@ -0,0 +1,563 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/metadata/v3/metadata.proto + +package metadatav3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MetadataKey_PathSegment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKey_PathSegment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKey_PathSegment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Segment.(*MetadataKey_PathSegment_Key); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MetadataKey_PathSegment_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKey_PathSegment_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *MetadataKey) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKey) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKey) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Path) > 0 { + for iNdEx := len(m.Path) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Path[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Request) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Request) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Request) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Route) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Route) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Route) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Cluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Host) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Host) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Host) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Kind.(*MetadataKind_Host_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Kind.(*MetadataKind_Cluster_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Kind.(*MetadataKind_Route_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Kind.(*MetadataKind_Request_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Request_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Request_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *MetadataKind_Route_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Route_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Route != nil { + size, err := m.Route.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MetadataKind_Cluster_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Cluster_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Cluster != nil { + size, err := m.Cluster.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *MetadataKind_Host_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Host_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Host != nil { + size, err := m.Host.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *MetadataKey_PathSegment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Segment.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataKey_PathSegment_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *MetadataKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Path) > 0 { + for _, e := range m.Path { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Request) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Route) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Host) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Kind.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Request_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MetadataKind_Route_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Route != nil { + l = m.Route.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MetadataKind_Cluster_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MetadataKind_Host_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Host != nil { + l = m.Host.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go index ea64f091fd8..388e4749e21 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/tracing/v3/custom_tag.proto package tracingv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go index eeebc9efdeb..d15de9b6885 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/tracing/v3/custom_tag.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag_vtproto.pb.go new file mode 100644 index 00000000000..e558c5d079d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag_vtproto.pb.go @@ -0,0 +1,556 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/tracing/v3/custom_tag.proto + +package tracingv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *CustomTag_Literal) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Literal) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Literal) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Environment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Environment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Environment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Header) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Metadata) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x1a + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Kind != nil { + if vtmsg, ok := interface{}(m.Kind).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Kind) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*CustomTag_Metadata_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*CustomTag_RequestHeader); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*CustomTag_Environment_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*CustomTag_Literal_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Tag) > 0 { + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Literal_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Literal_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Literal != nil { + size, err := m.Literal.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CustomTag_Environment_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Environment_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Environment != nil { + size, err := m.Environment.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *CustomTag_RequestHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_RequestHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestHeader != nil { + size, err := m.RequestHeader.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *CustomTag_Metadata_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Metadata_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + size, err := m.Metadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *CustomTag_Literal) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Environment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != nil { + if size, ok := interface{}(m.Kind).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Kind) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tag) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Literal_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Literal != nil { + l = m.Literal.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CustomTag_Environment_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Environment != nil { + l = m.Environment.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CustomTag_RequestHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestHeader != nil { + l = m.RequestHeader.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CustomTag_Metadata_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go index 2cda6f3dc1d..af620911fde 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/v3/hash_policy.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go index 5cb102e8062..5ec37f54022 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/v3/hash_policy.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy_vtproto.pb.go new file mode 100644 index 00000000000..bcc19959686 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy_vtproto.pb.go @@ -0,0 +1,251 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/hash_policy.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HashPolicy_SourceIp) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashPolicy_SourceIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_SourceIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *HashPolicy_FilterState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashPolicy_FilterState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_FilterState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HashPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PolicySpecifier.(*HashPolicy_FilterState_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*HashPolicy_SourceIp_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HashPolicy_SourceIp_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_SourceIp_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SourceIp != nil { + size, err := m.SourceIp.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *HashPolicy_FilterState_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_FilterState_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterState != nil { + size, err := m.FilterState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *HashPolicy_SourceIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *HashPolicy_FilterState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HashPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.PolicySpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HashPolicy_SourceIp_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceIp != nil { + l = m.SourceIp.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HashPolicy_FilterState_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterState != nil { + l = m.FilterState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go index c92aa2d6deb..74f4e24dfe0 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/v3/http.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go index fff363617da..e2c41e26f25 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/v3/http.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go index 6ca1677a18c..f7e952b3a13 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/v3/http_status.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go index 985e8d94cee..d3f76e93724 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/v3/http_status.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status_vtproto.pb.go new file mode 100644 index 00000000000..f25340d846e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status_vtproto.pb.go @@ -0,0 +1,70 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/http_status.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Code != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Code)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go index 727374bb357..45eb66186d0 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/v3/percent.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go index 25a5815f098..2929f39f819 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/v3/percent.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent_vtproto.pb.go new file mode 100644 index 00000000000..82c60c5d958 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent_vtproto.pb.go @@ -0,0 +1,132 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/percent.proto + +package typev3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Percent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Percent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Percent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FractionalPercent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FractionalPercent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FractionalPercent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Denominator != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Denominator)) + i-- + dAtA[i] = 0x10 + } + if m.Numerator != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Numerator)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Percent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *FractionalPercent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Numerator != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Numerator)) + } + if m.Denominator != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Denominator)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go index fa10eee0f10..63be48f3c76 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/v3/range.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go index b6a0f4ef58f..6bf697e9bfc 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/v3/range.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range_vtproto.pb.go new file mode 100644 index 00000000000..7309b8c14ba --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range_vtproto.pb.go @@ -0,0 +1,200 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/range.proto + +package typev3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Int64Range) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Range) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int64Range) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.End != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Range) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Range) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int32Range) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.End != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DoubleRange) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.End != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.End)))) + i-- + dAtA[i] = 0x11 + } + if m.Start != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Start)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Int64Range) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.End)) + } + n += len(m.unknownFields) + return n +} + +func (m *Int32Range) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.End)) + } + n += len(m.unknownFields) + return n +} + +func (m *DoubleRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 9 + } + if m.End != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go index b342e22173a..e7663f294fc 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/v3/ratelimit_strategy.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go index 1d22adb0982..eebce17eac4 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/v3/ratelimit_strategy.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy_vtproto.pb.go new file mode 100644 index 00000000000..c35990b7673 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy_vtproto.pb.go @@ -0,0 +1,241 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/ratelimit_strategy.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RateLimitStrategy_RequestsPerTimeUnit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TimeUnit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TimeUnit)) + i-- + dAtA[i] = 0x10 + } + if m.RequestsPerTimeUnit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestsPerTimeUnit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RateLimitStrategy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimitStrategy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Strategy.(*RateLimitStrategy_TokenBucket); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Strategy.(*RateLimitStrategy_RequestsPerTimeUnit_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Strategy.(*RateLimitStrategy_BlanketRule_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RateLimitStrategy_BlanketRule_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_BlanketRule_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BlanketRule)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *RateLimitStrategy_RequestsPerTimeUnit_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestsPerTimeUnit != nil { + size, err := m.RequestsPerTimeUnit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RateLimitStrategy_TokenBucket) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_TokenBucket) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TokenBucket != nil { + size, err := m.TokenBucket.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RateLimitStrategy_RequestsPerTimeUnit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestsPerTimeUnit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestsPerTimeUnit)) + } + if m.TimeUnit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TimeUnit)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimitStrategy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Strategy.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimitStrategy_BlanketRule_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.BlanketRule)) + return n +} +func (m *RateLimitStrategy_RequestsPerTimeUnit_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestsPerTimeUnit != nil { + l = m.RequestsPerTimeUnit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimitStrategy_TokenBucket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TokenBucket != nil { + l = m.TokenBucket.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go index 6677a074aee..3686888888a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/v3/ratelimit_unit.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go index 472396797be..17658400ee9 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/v3/ratelimit_unit.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go index 10416e602cf..630e6567c41 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/v3/semantic_version.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go index e0db1e39d25..af3b6ee415b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/v3/semantic_version.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version_vtproto.pb.go new file mode 100644 index 00000000000..13810fe82a3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version_vtproto.pb.go @@ -0,0 +1,86 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/semantic_version.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SemanticVersion) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SemanticVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SemanticVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Patch != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Patch)) + i-- + dAtA[i] = 0x18 + } + if m.MinorNumber != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MinorNumber)) + i-- + dAtA[i] = 0x10 + } + if m.MajorNumber != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MajorNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SemanticVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MajorNumber != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MajorNumber)) + } + if m.MinorNumber != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MinorNumber)) + } + if m.Patch != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Patch)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go index 5363e5abe08..9c21f245410 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc v5.26.1 // source: envoy/type/v3/token_bucket.proto package typev3 @@ -9,10 +9,10 @@ package typev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -35,11 +35,11 @@ type TokenBucket struct { MaxTokens uint32 `protobuf:"varint,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` // The number of tokens added to the bucket during each fill interval. If not specified, defaults // to a single token. - TokensPerFill *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=tokens_per_fill,json=tokensPerFill,proto3" json:"tokens_per_fill,omitempty"` + TokensPerFill *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=tokens_per_fill,json=tokensPerFill,proto3" json:"tokens_per_fill,omitempty"` // The fill interval that tokens are added to the bucket. During each fill interval // “tokens_per_fill“ are added to the bucket. The bucket will never contain more than // “max_tokens“ tokens. - FillInterval *duration.Duration `protobuf:"bytes,3,opt,name=fill_interval,json=fillInterval,proto3" json:"fill_interval,omitempty"` + FillInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=fill_interval,json=fillInterval,proto3" json:"fill_interval,omitempty"` } func (x *TokenBucket) Reset() { @@ -81,14 +81,14 @@ func (x *TokenBucket) GetMaxTokens() uint32 { return 0 } -func (x *TokenBucket) GetTokensPerFill() *wrappers.UInt32Value { +func (x *TokenBucket) GetTokensPerFill() *wrapperspb.UInt32Value { if x != nil { return x.TokensPerFill } return nil } -func (x *TokenBucket) GetFillInterval() *duration.Duration { +func (x *TokenBucket) GetFillInterval() *durationpb.Duration { if x != nil { return x.FillInterval } @@ -150,9 +150,9 @@ func file_envoy_type_v3_token_bucket_proto_rawDescGZIP() []byte { var file_envoy_type_v3_token_bucket_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_type_v3_token_bucket_proto_goTypes = []interface{}{ - (*TokenBucket)(nil), // 0: envoy.type.v3.TokenBucket - (*wrappers.UInt32Value)(nil), // 1: google.protobuf.UInt32Value - (*duration.Duration)(nil), // 2: google.protobuf.Duration + (*TokenBucket)(nil), // 0: envoy.type.v3.TokenBucket + (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration } var file_envoy_type_v3_token_bucket_proto_depIdxs = []int32{ 1, // 0: envoy.type.v3.TokenBucket.tokens_per_fill:type_name -> google.protobuf.UInt32Value diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go index dbc6448ac77..4f2607621d3 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go @@ -1,3 +1,4 @@ +//go:build !disable_pgv // Code generated by protoc-gen-validate. DO NOT EDIT. // source: envoy/type/v3/token_bucket.proto diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket_vtproto.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket_vtproto.pb.go new file mode 100644 index 00000000000..8ab53eaf284 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket_vtproto.pb.go @@ -0,0 +1,100 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/token_bucket.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TokenBucket) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenBucket) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TokenBucket) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FillInterval != nil { + size, err := (*durationpb.Duration)(m.FillInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.TokensPerFill != nil { + size, err := (*wrapperspb.UInt32Value)(m.TokensPerFill).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxTokens != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxTokens)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TokenBucket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTokens != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxTokens)) + } + if m.TokensPerFill != nil { + l = (*wrapperspb.UInt32Value)(m.TokensPerFill).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FillInterval != nil { + l = (*durationpb.Duration)(m.FillInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go index a31b2e1a3f7..6df95e89ecb 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.30.0 -// protoc v4.22.2 +// protoc v3.21.12 // source: validate/validate.proto package validate @@ -1974,7 +1974,7 @@ type StringRules struct { // MaxBytes specifies that this field must be the specified number of bytes // at a maximum MaxBytes *uint64 `protobuf:"varint,5,opt,name=max_bytes,json=maxBytes" json:"max_bytes,omitempty"` - // Pattern specifes that this field must match against the specified + // Pattern specifies that this field must match against the specified // regular expression (RE2 syntax). The included expression should elide // any delimiters. Pattern *string `protobuf:"bytes,6,opt,name=pattern" json:"pattern,omitempty"` @@ -2349,7 +2349,7 @@ type BytesRules struct { // MaxLen specifies that this field must be the specified number of bytes // at a maximum MaxLen *uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen" json:"max_len,omitempty"` - // Pattern specifes that this field must match against the specified + // Pattern specifies that this field must match against the specified // regular expression (RE2 syntax). The included expression should elide // any delimiters. Pattern *string `protobuf:"bytes,4,opt,name=pattern" json:"pattern,omitempty"` @@ -2699,10 +2699,10 @@ type RepeatedRules struct { // items at a maximum MaxItems *uint64 `protobuf:"varint,2,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` // Unique specifies that all elements in this field must be unique. This - // contraint is only applicable to scalar and enum types (messages are not + // constraint is only applicable to scalar and enum types (messages are not // supported). Unique *bool `protobuf:"varint,3,opt,name=unique" json:"unique,omitempty"` - // Items specifies the contraints to be applied to each item in the field. + // Items specifies the constraints to be applied to each item in the field. // Repeated message fields will still execute validation against each item // unless skip is specified here. Items *FieldRules `protobuf:"bytes,4,opt,name=items" json:"items,omitempty"` diff --git a/terraform/providers/google/vendor/github.com/go-logr/logr/README.md b/terraform/providers/google/vendor/github.com/go-logr/logr/README.md index 8969526a6e5..7c7f0c69cd9 100644 --- a/terraform/providers/google/vendor/github.com/go-logr/logr/README.md +++ b/terraform/providers/google/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/terraform/providers/google/vendor/github.com/go-logr/logr/funcr/funcr.go b/terraform/providers/google/vendor/github.com/go-logr/logr/funcr/funcr.go index fb2f866f4b7..30568e768dc 100644 --- a/terraform/providers/google/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/terraform/providers/google/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - parentValuesStr string - depth int - opts *Options - group string // for slog groups - groupDepth int + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef } // outputFormat indicates which outputFormat to use. @@ -257,6 +256,13 @@ const ( outputJSON ) +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any @@ -264,76 +270,102 @@ type PseudoStruct []any func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole line + buf.WriteByte('{') // for the whole record } + // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape + f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if f.parentValuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) } - buf.WriteString(f.parentValuesStr) - continuing = true - } + f.flatten(buf, vals, true) // escape user-provided keys - groupDepth := f.groupDepth - if f.group != "" { - if f.valuesStr != "" || len(args) != 0 { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } else { - // The group was empty - groupDepth-- + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) } - if f.valuesStr != "" { + if bodyStr != "" { if continuing { buf.WriteByte(f.comma()) } - buf.WriteString(f.valuesStr) - continuing = true + buf.WriteString(bodyStr) } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole record } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - for i := 0; i < groupDepth; i++ { - buf.WriteByte('}') // for the groups + return buf.String() +} + +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true } - if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole line + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') } return buf.String() } -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } v := kvList[i+1] - if i > 0 || continuing { + if i > 0 { if f.outputFormat == outputJSON { buf.WriteByte(f.comma()) } else { @@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any { // startGroup opens a new group scope (basically a sub-struct), which locks all // the current saved values and starts them anew. This is needed to satisfy // slog. -func (f *Formatter) startGroup(group string) { +func (f *Formatter) startGroup(name string) { // Unnamed groups are just inlined. - if group == "" { + if name == "" { return } - // Any saved values can no longer be changed. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - continuing := false - - if f.parentValuesStr != "" { - buf.WriteString(f.parentValuesStr) - continuing = true - } - - if f.group != "" && f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - } - - // NOTE: We don't close the scope here - that's done later, when a log line - // is actually rendered (because we have N scopes to close). - - f.parentValuesStr = buf.String() + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) // Start collecting new values. - f.group = group - f.groupDepth++ + f.groupName = name f.valuesStr = "" f.values = nil } @@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) { // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys + f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } diff --git a/terraform/providers/google/vendor/github.com/golang/glog/glog_file.go b/terraform/providers/google/vendor/github.com/golang/glog/glog_file.go index e7d125c5ae4..8eb8b08c600 100644 --- a/terraform/providers/google/vendor/github.com/golang/glog/glog_file.go +++ b/terraform/providers/google/vendor/github.com/golang/glog/glog_file.go @@ -26,7 +26,6 @@ import ( "fmt" "io" "os" - "os/user" "path/filepath" "runtime" "strings" @@ -68,9 +67,8 @@ func init() { host = shortHostname(h) } - current, err := user.Current() - if err == nil { - userName = current.Username + if u := lookupUser(); u != "" { + userName = u } // Sanitize userName since it is used to construct file paths. userName = strings.Map(func(r rune) rune { @@ -369,9 +367,6 @@ func (s *fileSink) Flush() error { // flush flushes all logs of severity threshold or greater. func (s *fileSink) flush(threshold logsink.Severity) error { - s.mu.Lock() - defer s.mu.Unlock() - var firstErr error updateErr := func(err error) { if err != nil && firstErr == nil { @@ -379,13 +374,23 @@ func (s *fileSink) flush(threshold logsink.Severity) error { } } - // Flush from fatal down, in case there's trouble flushing. - for sev := logsink.Fatal; sev >= threshold; sev-- { - file := s.file[sev] - if file != nil { - updateErr(file.Flush()) - updateErr(file.Sync()) + // Remember where we flushed, so we can call sync without holding + // the lock. + var files []flushSyncWriter + func() { + s.mu.Lock() + defer s.mu.Unlock() + // Flush from fatal down, in case there's trouble flushing. + for sev := logsink.Fatal; sev >= threshold; sev-- { + if file := s.file[sev]; file != nil { + updateErr(file.Flush()) + files = append(files, file) + } } + }() + + for _, file := range files { + updateErr(file.Sync()) } return firstErr diff --git a/terraform/providers/google/vendor/github.com/golang/glog/glog_file_nonwindows.go b/terraform/providers/google/vendor/github.com/golang/glog/glog_file_nonwindows.go new file mode 100644 index 00000000000..d5cdb793c54 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/golang/glog/glog_file_nonwindows.go @@ -0,0 +1,12 @@ +//go:build !windows + +package glog + +import "os/user" + +func lookupUser() string { + if current, err := user.Current(); err == nil { + return current.Username + } + return "" +} diff --git a/terraform/providers/google/vendor/github.com/golang/glog/glog_file_windows.go b/terraform/providers/google/vendor/github.com/golang/glog/glog_file_windows.go new file mode 100644 index 00000000000..a9e4f609dfb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/golang/glog/glog_file_windows.go @@ -0,0 +1,30 @@ +//go:build windows + +package glog + +import ( + "syscall" +) + +// This follows the logic in the standard library's user.Current() function, except +// that it leaves out the potentially expensive calls required to look up the user's +// display name in Active Directory. +func lookupUser() string { + token, err := syscall.OpenCurrentProcessToken() + if err != nil { + return "" + } + defer token.Close() + tokenUser, err := token.GetTokenUser() + if err != nil { + return "" + } + username, _, accountType, err := tokenUser.User.Sid.LookupAccount("") + if err != nil { + return "" + } + if accountType != syscall.SidTypeUser { + return "" + } + return username +} diff --git a/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 0ef27d33deb..00000000000 --- a/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/any/any.proto - -package any - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/any.proto. - -type Any = anypb.Any - -var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } -func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { - if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_any_any_proto = out.File - file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index d0079ee3ef3..00000000000 --- a/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto - -package duration - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/duration.proto. - -type Duration = durationpb.Duration - -var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } -func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { - if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File - file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go deleted file mode 100644 index 8d82abe2133..00000000000 --- a/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ /dev/null @@ -1,78 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/struct/struct.proto - -package structpb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - structpb "google.golang.org/protobuf/types/known/structpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/struct.proto. - -type NullValue = structpb.NullValue - -const NullValue_NULL_VALUE = structpb.NullValue_NULL_VALUE - -var NullValue_name = structpb.NullValue_name -var NullValue_value = structpb.NullValue_value - -type Struct = structpb.Struct -type Value = structpb.Value -type Value_NullValue = structpb.Value_NullValue -type Value_NumberValue = structpb.Value_NumberValue -type Value_StringValue = structpb.Value_StringValue -type Value_BoolValue = structpb.Value_BoolValue -type Value_StructValue = structpb.Value_StructValue -type Value_ListValue = structpb.Value_ListValue -type ListValue = structpb.ListValue - -var File_github_com_golang_protobuf_ptypes_struct_struct_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = []byte{ - 0x0a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x3b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() } -func file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() { - if File_github_com_golang_protobuf_ptypes_struct_struct_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_struct_struct_proto = out.File - file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index a76f8076009..00000000000 --- a/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -package timestamp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/timestamp.proto. - -type Timestamp = timestamppb.Timestamp - -var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, - 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } -func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { - if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go deleted file mode 100644 index cc40f27ad30..00000000000 --- a/terraform/providers/google/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ /dev/null @@ -1,71 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto - -package wrappers - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/wrappers.proto. - -type DoubleValue = wrapperspb.DoubleValue -type FloatValue = wrapperspb.FloatValue -type Int64Value = wrapperspb.Int64Value -type UInt64Value = wrapperspb.UInt64Value -type Int32Value = wrapperspb.Int32Value -type UInt32Value = wrapperspb.UInt32Value -type BoolValue = wrapperspb.BoolValue -type StringValue = wrapperspb.StringValue -type BytesValue = wrapperspb.BytesValue - -var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() } -func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() { - if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File - file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go index 16278a1d995..fcd049de922 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/common/common.proto @@ -145,8 +145,8 @@ type Identity struct { // *Identity_SpiffeId // *Identity_Hostname // *Identity_Uid - // *Identity_MdbUsername - // *Identity_GaiaId + // *Identity_Username + // *Identity_GcpId IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` // Additional identity-specific attributes. Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -212,16 +212,16 @@ func (x *Identity) GetUid() string { return "" } -func (x *Identity) GetMdbUsername() string { - if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { - return x.MdbUsername +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username } return "" } -func (x *Identity) GetGaiaId() string { - if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { - return x.GaiaId +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId } return "" } @@ -252,14 +252,14 @@ type Identity_Uid struct { Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` } -type Identity_MdbUsername struct { - // The MDB username of a connection endpoint. - MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` } -type Identity_GaiaId struct { - // The Gaia ID of a connection endpoint. - GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` } func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} @@ -268,9 +268,9 @@ func (*Identity_Hostname) isIdentity_IdentityOneof() {} func (*Identity_Uid) isIdentity_IdentityOneof() {} -func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} +func (*Identity_Username) isIdentity_IdentityOneof() {} -func (*Identity_GaiaId) isIdentity_IdentityOneof() {} +func (*Identity_GcpId) isIdentity_IdentityOneof() {} var File_internal_proto_common_common_proto protoreflect.FileDescriptor @@ -278,38 +278,37 @@ var file_internal_proto_common_common_proto_rawDesc = []byte{ 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, + 0xa8, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, - 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, - 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, - 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, - 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, - 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, - 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, - 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, + 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, + 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, + 0x63, 0x70, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, + 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, + 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, + 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -326,7 +325,7 @@ func file_internal_proto_common_common_proto_rawDescGZIP() []byte { var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_internal_proto_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.TLSVersion (*Identity)(nil), // 2: s2a.proto.Identity @@ -347,7 +346,7 @@ func file_internal_proto_common_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Identity); i { case 0: return &v.state @@ -360,12 +359,12 @@ func file_internal_proto_common_common_proto_init() { } } } - file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []any{ (*Identity_SpiffeId)(nil), (*Identity_Hostname)(nil), (*Identity_Uid)(nil), - (*Identity_MdbUsername)(nil), - (*Identity_GaiaId)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), } type x struct{} out := protoimpl.TypeBuilder{ diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go index f4f763ae102..2af3ee3dc1c 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a_context/s2a_context.proto @@ -209,7 +209,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.S2AContext (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite @@ -233,7 +233,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go index 0a86ebee592..8919232fd88 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -1171,7 +1171,7 @@ func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_s2a_proto_goTypes = []any{ (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq @@ -1226,7 +1226,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -1238,7 +1238,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientSessionStartReq); i { case 0: return &v.state @@ -1250,7 +1250,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerSessionStartReq); i { case 0: return &v.state @@ -1262,7 +1262,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*SessionNextReq); i { case 0: return &v.state @@ -1274,7 +1274,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ResumptionTicketReq); i { case 0: return &v.state @@ -1286,7 +1286,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -1298,7 +1298,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*SessionState); i { case 0: return &v.state @@ -1310,7 +1310,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*SessionResult); i { case 0: return &v.state @@ -1322,7 +1322,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*SessionStatus); i { case 0: return &v.state @@ -1334,7 +1334,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -1347,10 +1347,10 @@ func file_internal_proto_s2a_s2a_proto_init() { } } } - file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*SessionReq_ClientStart)(nil), (*SessionReq_ServerStart)(nil), (*SessionReq_Next)(nil), diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go index 0fa582fc874..8fac3841be5 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" @@ -61,11 +61,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -129,7 +130,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go index c84bed97748..e9aa5d14c0d 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/common/common.proto @@ -256,62 +256,218 @@ func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} } +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_SpiffeId + // *Identity_Hostname + // *Identity_Uid + // *Identity_Username + // *Identity_GcpId + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional identity-specific attributes. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetSpiffeId() string { + if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { + return x.SpiffeId + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetUid() string { + if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { + return x.Uid + } + return "" +} + +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username + } + return "" +} + +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_SpiffeId struct { + // The SPIFFE ID of a connection endpoint. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` +} + +type Identity_Hostname struct { + // The hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +type Identity_Uid struct { + // The UID of a connection endpoint. + Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` +} + +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` +} + +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` +} + +func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (*Identity_Uid) isIdentity_IdentityOneof() {} + +func (*Identity_Username) isIdentity_IdentityOneof() {} + +func (*Identity_GcpId) isIdentity_IdentityOneof() {} + var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, - 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, - 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, - 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, - 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, - 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, - 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x22, 0xab, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, + 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, + 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, 0x63, 0x70, 0x49, 0x64, 0x12, 0x46, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, + 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, + 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, + 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, - 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, - 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, - 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, - 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, - 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, - 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, - 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, - 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, - 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, - 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, - 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, + 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, + 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, + 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, + 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, + 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, 0x33, 0x43, + 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, + 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, + 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, + 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x13, + 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, + 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x79, + 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1d, + 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, + 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, + 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, 0x12, 0x19, + 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, + 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -327,18 +483,22 @@ func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_v2_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_proto_v2_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol + (*Identity)(nil), // 4: s2a.proto.v2.Identity + nil, // 5: s2a.proto.v2.Identity.AttributesEntry } var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 5, // 0: s2a.proto.v2.Identity.attributes:type_name -> s2a.proto.v2.Identity.AttributesEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_internal_proto_v2_common_common_proto_init() } @@ -346,19 +506,41 @@ func file_internal_proto_v2_common_common_proto_init() { if File_internal_proto_v2_common_common_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_v2_common_common_proto_msgTypes[0].OneofWrappers = []any{ + (*Identity_SpiffeId)(nil), + (*Identity_Hostname)(nil), + (*Identity_Uid)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, NumEnums: 4, - NumMessages: 0, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_internal_proto_v2_common_common_proto_goTypes, DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, + MessageInfos: file_internal_proto_v2_common_common_proto_msgTypes, }.Build() File_internal_proto_v2_common_common_proto = out.File file_internal_proto_v2_common_common_proto_rawDesc = nil diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go index b7fd871c7a7..418331a4bde 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go @@ -14,14 +14,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a_context/s2a_context.proto package s2a_context_go_proto import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -64,7 +64,7 @@ type S2AContext struct { // certificate chain was NOT validated successfully. PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` // The local identity used during session setup. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,9,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The SHA256 hash of the DER-encoding of the local leaf certificate used in // the handshake. LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` @@ -151,35 +151,36 @@ var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, - 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, - 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, - 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, - 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, - 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x5f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, + 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, + 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, + 0x44, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, + 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, + 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, + 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, + 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, + 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -195,12 +196,12 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext - (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 1: s2a.proto.v2.Identity } var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ - 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity + 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.v2.Identity 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name @@ -214,7 +215,7 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go index e843450c7ed..548f31da2d5 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto package s2a_go_proto import ( - common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -292,6 +291,12 @@ const ( // The connect-to-Google verification mode uses the trust bundle for // connecting to Google, e.g. *.mtls.googleapis.com endpoints. ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_3 ValidatePeerCertificateChainReq_VerificationMode = 3 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 ValidatePeerCertificateChainReq_VerificationMode = 4 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 ValidatePeerCertificateChainReq_VerificationMode = 5 ) // Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. @@ -300,11 +305,17 @@ var ( 0: "UNSPECIFIED", 1: "SPIFFE", 2: "CONNECT_TO_GOOGLE", + 3: "RESERVED_CUSTOM_VERIFICATION_MODE_3", + 4: "RESERVED_CUSTOM_VERIFICATION_MODE_4", + 5: "RESERVED_CUSTOM_VERIFICATION_MODE_5", } ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ - "UNSPECIFIED": 0, - "SPIFFE": 1, - "CONNECT_TO_GOOGLE": 2, + "UNSPECIFIED": 0, + "SPIFFE": 1, + "CONNECT_TO_GOOGLE": 2, + "RESERVED_CUSTOM_VERIFICATION_MODE_3": 3, + "RESERVED_CUSTOM_VERIFICATION_MODE_4": 4, + "RESERVED_CUSTOM_VERIFICATION_MODE_5": 5, } ) @@ -454,7 +465,7 @@ type AuthenticationMechanism struct { // mechanism. Otherwise, S2A assumes that the authentication mechanism is // associated with the default identity. If the default identity cannot be // determined, the request is rejected. - Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + Identity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=identity,proto3" json:"identity,omitempty"` // Types that are assignable to MechanismOneof: // // *AuthenticationMechanism_Token @@ -493,7 +504,7 @@ func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} } -func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { if x != nil { return x.Identity } @@ -1185,7 +1196,7 @@ type SessionReq struct { // identity is not populated, S2A will try to deduce the managed identity to // use from the SNI extension. If that also fails, S2A uses the default // identity (if one exists). - LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,7,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The authentication mechanisms that the application wishes to use to // authenticate to S2A, ordered by preference. S2A will always use the first // authentication mechanism that matches the managed identity. @@ -1231,7 +1242,7 @@ func (*SessionReq) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} } -func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { +func (x *SessionReq) GetLocalIdentity() *common_go_proto.Identity { if x != nil { return x.LocalIdentity } @@ -1790,358 +1801,365 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, - 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, - 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, - 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, - 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, - 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x22, 0x7e, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x32, 0x0a, 0x08, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, + 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x22, 0x36, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, + 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, + 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, - 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, - 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, - 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, - 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, - 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, - 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, - 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, - 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, - 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, - 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, - 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, - 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, - 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, - 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, - 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, - 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, + 0x6f, 0x6e, 0x12, 0x78, 0x0a, 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, + 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, + 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, + 0x06, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, + 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, + 0x93, 0x01, 0x0a, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, + 0x72, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, + 0x61, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, + 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, + 0x65, 0x61, 0x64, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, + 0x02, 0x0a, 0x18, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, + 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, + 0x01, 0x12, 0x2e, 0x0a, 0x2a, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, + 0x02, 0x12, 0x29, 0x0a, 0x25, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, + 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, + 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, + 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0xb0, 0x03, 0x0a, 0x1d, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x5d, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x51, 0x0a, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x52, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, + 0x61, 0x33, 0x38, 0x34, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, + 0x31, 0x32, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x08, 0x0a, 0x04, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x22, 0x3d, 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, - 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, - 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, - 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, - 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, - 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, - 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, - 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, - 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, - 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, + 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf4, + 0x05, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x52, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, + 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, - 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, - 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, - 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, - 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, - 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, - 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, - 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, - 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, - 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, - 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, + 0x65, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, + 0x1a, 0x39, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x51, 0x0a, 0x25, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, + 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x22, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, + 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, + 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x12, 0x27, 0x0a, 0x23, + 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, + 0x45, 0x5f, 0x33, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x34, 0x10, 0x04, 0x12, 0x27, + 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, + 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, + 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, + 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, + 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, + 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, - 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, + 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, - 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, + 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, + 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, + 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, + 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, + 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, + 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, + 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, + 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, - 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, - 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, - 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, - 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, - 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, - 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, - 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, - 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, - 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, - 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, + 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, + 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, + 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, + 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, + 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, + 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, - 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, - 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, - 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, - 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, - 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, - 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, + 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, - 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -2158,7 +2176,7 @@ func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_s2a_proto_goTypes = []any{ (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation @@ -2183,7 +2201,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol - (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 24: s2a.proto.v2.Identity (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion @@ -2191,7 +2209,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ } var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol - 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.v2.Identity 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration @@ -2203,7 +2221,7 @@ var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext - 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity + 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.v2.Identity 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq @@ -2238,7 +2256,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AlpnPolicy); i { case 0: return &v.state @@ -2250,7 +2268,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -2262,7 +2280,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Status); i { case 0: return &v.state @@ -2274,7 +2292,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationReq); i { case 0: return &v.state @@ -2286,7 +2304,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp); i { case 0: return &v.state @@ -2298,7 +2316,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationReq); i { case 0: return &v.state @@ -2310,7 +2328,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationResp); i { case 0: return &v.state @@ -2322,7 +2340,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationReq); i { case 0: return &v.state @@ -2334,7 +2352,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationResp); i { case 0: return &v.state @@ -2346,7 +2364,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq); i { case 0: return &v.state @@ -2358,7 +2376,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainResp); i { case 0: return &v.state @@ -2370,7 +2388,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -2382,7 +2400,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -2394,7 +2412,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { case 0: return &v.state @@ -2406,7 +2424,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { case 0: return &v.state @@ -2418,7 +2436,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { case 0: return &v.state @@ -2430,7 +2448,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { case 0: return &v.state @@ -2443,30 +2461,30 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { } } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []any{ (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*OffloadPrivateKeyOperationReq_RawBytes)(nil), (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []any{ (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []any{ (*SessionReq_GetTlsConfigurationReq)(nil), (*SessionReq_OffloadPrivateKeyOperationReq)(nil), (*SessionReq_OffloadResumptionKeyOperationReq)(nil), (*SessionReq_ValidatePeerCertificateChainReq)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []any{ (*SessionResp_GetTlsConfigurationResp)(nil), (*SessionResp_OffloadPrivateKeyOperationResp)(nil), (*SessionResp_OffloadResumptionKeyOperationResp)(nil), diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go index 2566df6c304..c93f75a78b0 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" @@ -54,11 +54,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -115,7 +116,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/record.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/record.go index c60515510a7..e76509ef01a 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/record.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/record.go @@ -378,11 +378,6 @@ func (p *conn) Read(b []byte) (n int, err error) { if len(p.handshakeBuf) > 0 { return 0, errors.New("application data received while processing fragmented handshake messages") } - if p.ticketState == receivingTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } case alert: return 0, p.handleAlertMessage() case handshake: @@ -500,17 +495,7 @@ func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex i } func (p *conn) Close() error { - p.readMutex.Lock() - defer p.readMutex.Unlock() - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - // If p.ticketState is equal to notReceivingTickets, then S2A has - // been sent a flight of session tickets, and we must wait for the - // call to S2A to complete before closing the record protocol. - if p.ticketState == notReceivingTickets { - <-p.callComplete - grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") - } + // Close the connection immediately. return p.Conn.Close() } @@ -663,7 +648,7 @@ func (p *conn) handleHandshakeMessage() error { // Several handshake messages may be coalesced into a single record. // Continue reading them until the handshake buffer is empty. for len(p.handshakeBuf) > 0 { - handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() + handshakeMsgType, msgLen, msg, _, ok := p.parseHandshakeMsg() if !ok { // The handshake could not be fully parsed, so read in another // record and try again later. @@ -681,20 +666,7 @@ func (p *conn) handleHandshakeMessage() error { return err } case tlsHandshakeNewSessionTicketType: - // Ignore tickets that are received after a batch of tickets has - // been sent to S2A. - if p.ticketState == notReceivingTickets { - continue - } - if p.ticketState == ticketsNotYetReceived { - p.ticketState = receivingTickets - } - p.sessionTickets = append(p.sessionTickets, rawMsg) - if len(p.sessionTickets) == maxAllowedTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } + // Do nothing for session ticket. default: return errors.New("unknown handshake message type") } diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go index ec96ba3b6a6..4057e70c8ad 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go @@ -23,7 +23,8 @@ import ( "fmt" "os" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) const ( @@ -37,7 +38,7 @@ type AccessTokenManager interface { DefaultToken() (token string, err error) // Token returns a token that an application with local identity equal to // identity must use to authenticate to S2A. - Token(identity *commonpb.Identity) (token string, err error) + Token(identity interface{}) (token string, err error) } type singleTokenAccessTokenManager struct { @@ -65,6 +66,14 @@ func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { } // Token always returns the token managed by the singleTokenAccessTokenManager. -func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { +func (m *singleTokenAccessTokenManager) Token(identity interface{}) (string, error) { + switch v := identity.(type) { + case *commonpbv1.Identity: + // valid type. + case *commonpb.Identity: + // valid type. + default: + return "", fmt.Errorf("Incorrect identity type: %v", v) + } return m.token, nil } diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index 85a8379d833..a6402ee48cc 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -28,7 +28,6 @@ import ( "os" "time" - "github.com/golang/protobuf/proto" "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" @@ -38,8 +37,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/proto" - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -59,9 +59,9 @@ type s2av2TransportCreds struct { transportCreds credentials.TransportCredentials tokenManager *tokenmanager.AccessTokenManager // localIdentity should only be used by the client. - localIdentity *commonpbv1.Identity + localIdentity *commonpb.Identity // localIdentities should only be used by the server. - localIdentities []*commonpbv1.Identity + localIdentities []*commonpb.Identity verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode fallbackClientHandshake fallback.ClientHandshake getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) @@ -70,7 +70,7 @@ type s2av2TransportCreds struct { // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -101,7 +101,7 @@ func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCre // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { +func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -183,13 +183,7 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori } creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(timeoutCtx, - func() error { - conn, authInfo, err = creds.ClientHandshake(timeoutCtx, serverName, rawConn) - return err - }) + conn, authInfo, err := creds.ClientHandshake(timeoutCtx, serverName, rawConn) if err != nil { grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) if c.fallbackClientHandshake != nil { @@ -197,7 +191,7 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori } return nil, nil, err } - grpclog.Infof("Successfully done client handshake using S2Av2 to: %s", serverName) + grpclog.Infof("client-side handshake is done using S2Av2 to: %s", serverName) return conn, authInfo, err } @@ -247,13 +241,7 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede } creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(ctx, - func() error { - conn, authInfo, err = creds.ServerHandshake(rawConn) - return err - }) + conn, authInfo, err := creds.ServerHandshake(rawConn) if err != nil { grpclog.Infof("Failed to do server handshake using S2Av2: %v", err) return nil, nil, err @@ -280,15 +268,15 @@ func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { tokenManager = *c.tokenManager } verificationMode := c.verificationMode - var localIdentity *commonpbv1.Identity + var localIdentity *commonpb.Identity if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) + localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) } - var localIdentities []*commonpbv1.Identity + var localIdentities []*commonpb.Identity if c.localIdentities != nil { - localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) + localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) + localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) } } creds := &s2av2TransportCreds{ diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go index 4d919132295..fa0002e36b7 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -44,8 +43,8 @@ const ( ) // GetTLSConfigurationForClient returns a tls.Config instance for use by a client application. -func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { - authMechanisms := getAuthMechanisms(tokenManager, []*commonpbv1.Identity{localIdentity}) +func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { + authMechanisms := getAuthMechanisms(tokenManager, []*commonpb.Identity{localIdentity}) if grpclog.V(1) { grpclog.Infof("Sending request to S2Av2 for client TLS config.") @@ -126,7 +125,7 @@ func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStr } // GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. -func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { +func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { return &tls.Config{ GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), }, nil @@ -136,7 +135,7 @@ func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager token // connection with a client, based on SNI communicated during ClientHello. // Ensures that server presents the correct certificate to establish a TLS // connection. -func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { +func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) if err != nil { @@ -219,9 +218,9 @@ func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { } } -func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { +func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { authMechanisms := getAuthMechanisms(tokenManager, localIdentities) - var locID *commonpbv1.Identity + var locID *commonpb.Identity if localIdentities != nil { locID = localIdentities[0] } @@ -283,7 +282,7 @@ func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsCo return clientAuth } -func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { +func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity) []*s2av2pb.AuthenticationMechanism { if tokenManager == nil { return nil } diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/s2a.go b/terraform/providers/google/vendor/github.com/google/s2a-go/s2a.go index 5ecb06f930e..cc79bd09a67 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/s2a.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/s2a.go @@ -29,7 +29,6 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/internal/handshaker" "github.com/google/s2a-go/internal/handshaker/service" @@ -38,8 +37,10 @@ import ( "github.com/google/s2a-go/retry" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/proto" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -54,17 +55,17 @@ const ( // credentials.TransportCredentials interface. type s2aTransportCreds struct { info *credentials.ProtocolInfo - minTLSVersion commonpb.TLSVersion - maxTLSVersion commonpb.TLSVersion + minTLSVersion commonpbv1.TLSVersion + maxTLSVersion commonpbv1.TLSVersion // tlsCiphersuites contains the ciphersuites used in the S2A connection. // Note that these are currently unconfigurable. - tlsCiphersuites []commonpb.Ciphersuite + tlsCiphersuites []commonpbv1.Ciphersuite // localIdentity should only be used by the client. - localIdentity *commonpb.Identity + localIdentity *commonpbv1.Identity // localIdentities should only be used by the server. - localIdentities []*commonpb.Identity + localIdentities []*commonpbv1.Identity // targetIdentities should only be used by the client. - targetIdentities []*commonpb.Identity + targetIdentities []*commonpbv1.Identity isClient bool s2aAddr string ensureProcessSessionTickets *sync.WaitGroup @@ -76,7 +77,7 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts == nil { return nil, errors.New("nil client options") } - var targetIdentities []*commonpb.Identity + var targetIdentities []*commonpbv1.Identity for _, targetIdentity := range opts.TargetIdentities { protoTargetIdentity, err := toProtoIdentity(targetIdentity) if err != nil { @@ -93,12 +94,12 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro info: &credentials.ProtocolInfo{ SecurityProtocol: s2aSecurityProtocol, }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + minTLSVersion: commonpbv1.TLSVersion_TLS1_3, + maxTLSVersion: commonpbv1.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpbv1.Ciphersuite{ + commonpbv1.Ciphersuite_AES_128_GCM_SHA256, + commonpbv1.Ciphersuite_AES_256_GCM_SHA384, + commonpbv1.Ciphersuite_CHACHA20_POLY1305_SHA256, }, localIdentity: localIdentity, targetIdentities: targetIdentities, @@ -112,7 +113,11 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc } - return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) + v2LocalIdentity, err := toV2ProtoIdentity(opts.LocalIdentity) + if err != nil { + return nil, err + } + return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, v2LocalIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) } // NewServerCreds returns a server-side transport credentials object that uses @@ -121,7 +126,7 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro if opts == nil { return nil, errors.New("nil server options") } - var localIdentities []*commonpb.Identity + var localIdentities []*commonpbv1.Identity for _, localIdentity := range opts.LocalIdentities { protoLocalIdentity, err := toProtoIdentity(localIdentity) if err != nil { @@ -134,12 +139,12 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro info: &credentials.ProtocolInfo{ SecurityProtocol: s2aSecurityProtocol, }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + minTLSVersion: commonpbv1.TLSVersion_TLS1_3, + maxTLSVersion: commonpbv1.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpbv1.Ciphersuite{ + commonpbv1.Ciphersuite_AES_128_GCM_SHA256, + commonpbv1.Ciphersuite_AES_256_GCM_SHA384, + commonpbv1.Ciphersuite_CHACHA20_POLY1305_SHA256, }, localIdentities: localIdentities, isClient: false, @@ -147,7 +152,15 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro }, nil } verificationMode := getVerificationMode(opts.VerificationMode) - return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, localIdentities, verificationMode, opts.getS2AStream) + var v2LocalIdentities []*commonpb.Identity + for _, localIdentity := range opts.LocalIdentities { + protoLocalIdentity, err := toV2ProtoIdentity(localIdentity) + if err != nil { + return nil, err + } + v2LocalIdentities = append(v2LocalIdentities, protoLocalIdentity) + } + return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, v2LocalIdentities, verificationMode, opts.getS2AStream) } // ClientHandshake initiates a client-side TLS handshake using the S2A. @@ -248,22 +261,22 @@ func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { info := *c.info - var localIdentity *commonpb.Identity + var localIdentity *commonpbv1.Identity if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) + localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) } - var localIdentities []*commonpb.Identity + var localIdentities []*commonpbv1.Identity if c.localIdentities != nil { - localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) + localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) + localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) } } - var targetIdentities []*commonpb.Identity + var targetIdentities []*commonpbv1.Identity if c.targetIdentities != nil { - targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) + targetIdentities = make([]*commonpbv1.Identity, len(c.targetIdentities)) for i, targetIdentity := range c.targetIdentities { - targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) + targetIdentities[i] = proto.Clone(targetIdentity).(*commonpbv1.Identity) } } return &s2aTransportCreds{ @@ -351,6 +364,12 @@ func getVerificationMode(verificationMode VerificationModeType) s2av2pb.Validate return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE case Spiffe: return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE + case ReservedCustomVerificationMode3: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_3 + case ReservedCustomVerificationMode4: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 + case ReservedCustomVerificationMode5: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 default: return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED } @@ -396,24 +415,20 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net defer cancel() var s2aTLSConfig *tls.Config + var c net.Conn retry.Run(timeoutCtx, func() error { s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{ ServerName: serverName, }) - return err - }) - if err != nil { - grpclog.Infof("error building S2A TLS config: %v", err) - return fallback(err) - } + if err != nil { + grpclog.Infof("error building S2A TLS config: %v", err) + return err + } - s2aDialer := &tls.Dialer{ - Config: s2aTLSConfig, - } - var c net.Conn - retry.Run(timeoutCtx, - func() error { + s2aDialer := &tls.Dialer{ + Config: s2aTLSConfig, + } c, err = s2aDialer.DialContext(timeoutCtx, network, addr) return err }) diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/s2a_options.go b/terraform/providers/google/vendor/github.com/google/s2a-go/s2a_options.go index fcdbc1621bd..5bbf31bf412 100644 --- a/terraform/providers/google/vendor/github.com/google/s2a-go/s2a_options.go +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/s2a_options.go @@ -28,7 +28,8 @@ import ( "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" - s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) // Identity is the interface for S2A identities. @@ -76,9 +77,12 @@ type VerificationModeType int // Three types of verification modes. const ( - Unspecified = iota - ConnectToGoogle + Unspecified VerificationModeType = iota Spiffe + ConnectToGoogle + ReservedCustomVerificationMode3 + ReservedCustomVerificationMode4 + ReservedCustomVerificationMode5 ) // ClientOptions contains the client-side options used to establish a secure @@ -198,7 +202,23 @@ func DefaultServerOptions(s2aAddress string) *ServerOptions { } } -func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { +func toProtoIdentity(identity Identity) (*s2apbv1.Identity, error) { + if identity == nil { + return nil, nil + } + switch id := identity.(type) { + case *spiffeID: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + case *hostname: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Hostname{Hostname: id.Name()}}, nil + case *uid: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Uid{Uid: id.Name()}}, nil + default: + return nil, errors.New("unrecognized identity type") + } +} + +func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) { if identity == nil { return nil, nil } diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index d51736e7e36..44d4d00202f 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.12.4" + "v2": "2.13.0" } diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index 7e36eb48ff8..d63421b71ca 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,19 @@ # Changelog +## [2.13.0](https://github.com/googleapis/gax-go/compare/v2.12.5...v2.13.0) (2024-07-22) + + +### Features + +* **iterator:** add package to help work with new iter.Seq types ([#358](https://github.com/googleapis/gax-go/issues/358)) ([6bccdaa](https://github.com/googleapis/gax-go/commit/6bccdaac011fe6fd147e4eb533a8e6520b7d4acc)) + +## [2.12.5](https://github.com/googleapis/gax-go/compare/v2.12.4...v2.12.5) (2024-06-18) + + +### Bug Fixes + +* **v2/apierror:** fix (*APIError).Error() for unwrapped Status ([#351](https://github.com/googleapis/gax-go/issues/351)) ([22c16e7](https://github.com/googleapis/gax-go/commit/22c16e7bff5402bdc4c25063771cdd01c650b500)), refs [#350](https://github.com/googleapis/gax-go/issues/350) + ## [2.12.4](https://github.com/googleapis/gax-go/compare/v2.12.3...v2.12.4) (2024-05-03) diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index d785a065cab..7de60773d63 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -206,8 +206,10 @@ func (a *APIError) Error() string { // Truncate the googleapi.Error message because it dumps the Details in // an ugly way. msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) - } else if a.status != nil { + } else if a.status != nil && a.err != nil { msg = a.err.Error() + } else if a.status != nil { + msg = a.status.Message() } return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) } diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/header.go b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/header.go index 3e53729e5fc..f5273985afc 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/header.go @@ -163,11 +163,38 @@ func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { out = metadata.MD(make(map[string][]string)) } headers := callctx.HeadersFromContext(ctx) - for k, v := range headers { - out[k] = append(out[k], v...) + + // x-goog-api-client is a special case that we want to make sure gets merged + // into a single header. + const xGoogHeader = "x-goog-api-client" + var mergedXgoogHeader strings.Builder + + for k, vals := range headers { + if k == xGoogHeader { + // Merge all values for the x-goog-api-client header set on the ctx. + for _, v := range vals { + mergedXgoogHeader.WriteString(v) + mergedXgoogHeader.WriteRune(' ') + } + continue + } + out[k] = append(out[k], vals...) } for i := 0; i < len(keyvals); i = i + 2 { out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + + if keyvals[i] == xGoogHeader { + // Merge the x-goog-api-client header values set on the ctx with any + // values passed in for it from the client. + mergedXgoogHeader.WriteString(keyvals[i+1]) + mergedXgoogHeader.WriteRune(' ') + } + } + + // Add the x goog header back in, replacing the separate values that were set. + if mergedXgoogHeader.Len() > 0 { + out[xGoogHeader] = []string{mergedXgoogHeader.String()[:mergedXgoogHeader.Len()-1]} } + return out } diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 3006ad7bd91..e12421cf599 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.12.4" +const Version = "2.13.0" diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go new file mode 100644 index 00000000000..d4d6019ff64 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go @@ -0,0 +1,63 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//go:build go1.23 + +// Package iterator contains helper for working with iterators. It is meant for +// internal use only by the Go Client Libraries. +package iterator + +import ( + "iter" + + otherit "google.golang.org/api/iterator" +) + +// RangeAdapter transforms client iterator type into a [iter.Seq2] that can +// be used with Go's range expressions. +// +// This is for internal use only. +func RangeAdapter[T any](next func() (T, error)) iter.Seq2[T, error] { + var err error + return func(yield func(T, error) bool) { + for { + if err != nil { + return + } + var resp T + resp, err = next() + if err == otherit.Done { + return + } + if !yield(resp, err) { + return + } + } + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/README.md b/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/README.md index 21a17c5af39..983d44c7db4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/README.md +++ b/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/README.md @@ -140,9 +140,10 @@ log.Printf("[DEBUG] %d", 42) ... [DEBUG] my-app: 42 ``` -Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +Notice that if `appLogger` is initialized with the `INFO` log level, _and_ you specify `InferLevels: true`, you will not see any output here. You must change `appLogger` to `DEBUG` to see output. See the docs for more information. If the log lines start with a timestamp you can use the -`InferLevelsWithTimestamp` option to try and ignore them. +`InferLevelsWithTimestamp` option to try and ignore them. Please note that in order +for `InferLevelsWithTimestamp` to be relevant, `InferLevels` must be set to `true`. diff --git a/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/intlogger.go b/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/intlogger.go index b45064acf1a..272a710c04c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -55,23 +55,38 @@ var ( faintBoldColor = color.New(color.Faint, color.Bold) faintColor = color.New(color.Faint) - faintMultiLinePrefix = faintColor.Sprint(" | ") - faintFieldSeparator = faintColor.Sprint("=") - faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n") + faintMultiLinePrefix string + faintFieldSeparator string + faintFieldSeparatorWithNewLine string ) +func init() { + // Force all the colors to enabled because we do our own detection of color usage. + for _, c := range _levelToColor { + c.EnableColor() + } + + faintBoldColor.EnableColor() + faintColor.EnableColor() + + faintMultiLinePrefix = faintColor.Sprint(" | ") + faintFieldSeparator = faintColor.Sprint("=") + faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n") +} + // Make sure that intLogger is a Logger var _ Logger = &intLogger{} // intLogger is an internal logger implementation. Internal in that it is // defined entirely by this package. type intLogger struct { - json bool - callerOffset int - name string - timeFormat string - timeFn TimeFunction - disableTime bool + json bool + jsonEscapeEnabled bool + callerOffset int + name string + timeFormat string + timeFn TimeFunction + disableTime bool // This is an interface so that it's shared by any derived loggers, since // those derived loggers share the bufio.Writer as well. @@ -79,6 +94,19 @@ type intLogger struct { writer *writer level *int32 + // The value of curEpoch when our level was set + setEpoch uint64 + + // The value of curEpoch the last time we performed the level sync process + ownEpoch uint64 + + // Shared amongst all the loggers created in this hierachy, used to determine + // if the level sync process should be run by comparing it with ownEpoch + curEpoch *uint64 + + // The logger this one was created from. Only set when syncParentLevel is set + parent *intLogger + headerColor ColorOption fieldColor ColorOption @@ -88,6 +116,7 @@ type intLogger struct { // create subloggers with their own level setting independentLevels bool + syncParentLevel bool subloggerHook func(sub Logger) Logger } @@ -129,9 +158,9 @@ func newLogger(opts *LoggerOptions) *intLogger { } var ( - primaryColor ColorOption = ColorOff - headerColor ColorOption = ColorOff - fieldColor ColorOption = ColorOff + primaryColor = ColorOff + headerColor = ColorOff + fieldColor = ColorOff ) switch { case opts.ColorHeaderOnly: @@ -145,6 +174,7 @@ func newLogger(opts *LoggerOptions) *intLogger { l := &intLogger{ json: opts.JSONFormat, + jsonEscapeEnabled: !opts.JSONEscapeDisabled, name: opts.Name, timeFormat: TimeFormat, timeFn: time.Now, @@ -152,8 +182,10 @@ func newLogger(opts *LoggerOptions) *intLogger { mutex: mutex, writer: newWriter(output, primaryColor), level: new(int32), + curEpoch: new(uint64), exclude: opts.Exclude, independentLevels: opts.IndependentLevels, + syncParentLevel: opts.SyncParentLevel, headerColor: headerColor, fieldColor: fieldColor, subloggerHook: opts.SubloggerHook, @@ -194,7 +226,7 @@ const offsetIntLogger = 3 // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { - if level < Level(atomic.LoadInt32(l.level)) { + if level < l.GetLevel() { return } @@ -597,7 +629,7 @@ func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, a vals := l.jsonMapEntry(t, name, level, msg) args = append(l.implied, args...) - if args != nil && len(args) > 0 { + if len(args) > 0 { if len(args)%2 != 0 { cs, ok := args[len(args)-1].(CapturedStacktrace) if ok { @@ -637,13 +669,17 @@ func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, a } } - err := json.NewEncoder(l.writer).Encode(vals) + encoder := json.NewEncoder(l.writer) + encoder.SetEscapeHTML(l.jsonEscapeEnabled) + err := encoder.Encode(vals) if err != nil { if _, ok := err.(*json.UnsupportedTypeError); ok { plainVal := l.jsonMapEntry(t, name, level, msg) plainVal["@warn"] = errJsonUnsupportedTypeMsg - json.NewEncoder(l.writer).Encode(plainVal) + errEncoder := json.NewEncoder(l.writer) + errEncoder.SetEscapeHTML(l.jsonEscapeEnabled) + errEncoder.Encode(plainVal) } } } @@ -718,27 +754,27 @@ func (l *intLogger) Error(msg string, args ...interface{}) { // Indicate that the logger would emit TRACE level logs func (l *intLogger) IsTrace() bool { - return Level(atomic.LoadInt32(l.level)) == Trace + return l.GetLevel() == Trace } // Indicate that the logger would emit DEBUG level logs func (l *intLogger) IsDebug() bool { - return Level(atomic.LoadInt32(l.level)) <= Debug + return l.GetLevel() <= Debug } // Indicate that the logger would emit INFO level logs func (l *intLogger) IsInfo() bool { - return Level(atomic.LoadInt32(l.level)) <= Info + return l.GetLevel() <= Info } // Indicate that the logger would emit WARN level logs func (l *intLogger) IsWarn() bool { - return Level(atomic.LoadInt32(l.level)) <= Warn + return l.GetLevel() <= Warn } // Indicate that the logger would emit ERROR level logs func (l *intLogger) IsError() bool { - return Level(atomic.LoadInt32(l.level)) <= Error + return l.GetLevel() <= Error } const MissingKey = "EXTRA_VALUE_AT_END" @@ -854,12 +890,63 @@ func (l *intLogger) resetOutput(opts *LoggerOptions) error { // Update the logging level on-the-fly. This will affect all subloggers as // well. func (l *intLogger) SetLevel(level Level) { - atomic.StoreInt32(l.level, int32(level)) + if !l.syncParentLevel { + atomic.StoreInt32(l.level, int32(level)) + return + } + + nsl := new(int32) + *nsl = int32(level) + + l.level = nsl + + l.ownEpoch = atomic.AddUint64(l.curEpoch, 1) + l.setEpoch = l.ownEpoch +} + +func (l *intLogger) searchLevelPtr() *int32 { + p := l.parent + + ptr := l.level + + max := l.setEpoch + + for p != nil { + if p.setEpoch > max { + max = p.setEpoch + ptr = p.level + } + + p = p.parent + } + + return ptr } // Returns the current level func (l *intLogger) GetLevel() Level { - return Level(atomic.LoadInt32(l.level)) + // We perform the loads immediately to keep the CPU pipeline busy, which + // effectively makes the second load cost nothing. Once loaded into registers + // the comparison returns the already loaded value. The comparison is almost + // always true, so the branch predictor should hit consistently with it. + var ( + curEpoch = atomic.LoadUint64(l.curEpoch) + level = Level(atomic.LoadInt32(l.level)) + own = l.ownEpoch + ) + + if curEpoch == own { + return level + } + + // Perform the level sync process. We'll avoid doing this next time by seeing the + // epoch as current. + + ptr := l.searchLevelPtr() + l.level = ptr + l.ownEpoch = curEpoch + + return Level(atomic.LoadInt32(ptr)) } // Create a *log.Logger that will send it's data through this Logger. This @@ -912,6 +999,8 @@ func (l *intLogger) copy() *intLogger { if l.independentLevels { sl.level = new(int32) *sl.level = *l.level + } else if l.syncParentLevel { + sl.parent = l } return &sl diff --git a/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/logger.go b/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/logger.go index 947ac0c9afc..ad17544f550 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/go-hclog/logger.go @@ -233,6 +233,7 @@ type StandardLoggerOptions struct { // [DEBUG] and strip it off before reapplying it. // The timestamp detection may result in false positives and incomplete // string outputs. + // InferLevelsWithTimestamp is only relevant if InferLevels is true. InferLevelsWithTimestamp bool // ForceLevel is used to force all output from the standard logger to be at @@ -263,6 +264,9 @@ type LoggerOptions struct { // Control if the output should be in JSON. JSONFormat bool + // Control the escape switch of json.Encoder + JSONEscapeDisabled bool + // Include file and line information in each log line IncludeLocation bool @@ -303,6 +307,24 @@ type LoggerOptions struct { // will not affect the parent or sibling loggers. IndependentLevels bool + // When set, changing the level of a logger effects only it's direct sub-loggers + // rather than all sub-loggers. For example: + // a := logger.Named("a") + // a.SetLevel(Error) + // b := a.Named("b") + // c := a.Named("c") + // b.GetLevel() => Error + // c.GetLevel() => Error + // b.SetLevel(Info) + // a.GetLevel() => Error + // b.GetLevel() => Info + // c.GetLevel() => Error + // a.SetLevel(Warn) + // a.GetLevel() => Warn + // b.GetLevel() => Warn + // c.GetLevel() => Warn + SyncParentLevel bool + // SubloggerHook registers a function that is called when a sublogger via // Named, With, or ResetNamed is created. If defined, the function is passed // the newly created Logger and the returned Logger is returned from the diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hc-install/version/VERSION b/terraform/providers/google/vendor/github.com/hashicorp/hc-install/version/VERSION index 844f6a91acb..d2b13eb644d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hc-install/version/VERSION +++ b/terraform/providers/google/vendor/github.com/hashicorp/hc-install/version/VERSION @@ -1 +1 @@ -0.6.3 +0.6.4 diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md index f3fe93d45f0..2eebedbc76f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md @@ -1,5 +1,25 @@ # HCL Changelog +## v2.20.1 (March 26, 2024) + +### Bugs Fixed + +* Return `ExprSyntaxError` when an invalid namespaced function is encountered during parsing ([#668](https://github.com/hashicorp/hcl/pull/668)) + +### Internal + +* Standardize on only two value dumping/diffing libraries ([#669](https://github.com/hashicorp/hcl/pull/669)) + +## v2.20.0 (February 29, 2024) + +### Enhancements + +* Support for namespaced functions ([#639](https://github.com/hashicorp/hcl/pull/639)) + +### Bugs Fixed + +* ext/dynblock: if `iterator` is invalid return this error instead of consequential errors ([#656](https://github.com/hashicorp/hcl/pull/656)) + ## v2.19.0 (October 16, 2023) ### Enhancements @@ -43,7 +63,7 @@ * HCL now uses a newer version of the upstream `cty` library which has improved treatment of unknown values: it can now track additional optional information that reduces the range of an unknown value, which allows some operations against unknown values to return known or partially-known results. ([#590](https://github.com/hashicorp/hcl/pull/590)) **Note:** This change effectively passes on [`cty`'s notion of backward compatibility](https://github.com/zclconf/go-cty/blob/main/COMPATIBILITY.md) whereby unknown values can become "more known" in later releases. In particular, if your caller is using `cty.Value.RawEquals` in its tests against the results of operations with unknown values then you may see those tests begin failing after upgrading, due to the values now being more "refined". - + If so, you should review the refinements with consideration to [the `cty` refinements docs](https://github.com/zclconf/go-cty/blob/7dcbae46a6f247e983efb1fa774d2bb68781a333/docs/refinements.md) and update your expected results to match only if the reported refinements seem correct for the given situation. The `RawEquals` method is intended only for making exact value comparisons in test cases, so main application code should not use it; use `Equals` instead for real logic, which will take refinements into account automatically. ## v2.16.2 (March 9, 2023) @@ -173,7 +193,7 @@ * hclsyntax: Mark objects with keys that are sensitive. ([#440](https://github.com/hashicorp/hcl/pull/440)) ## v2.8.1 (December 17, 2020) - + ### Bugs Fixed * hclsyntax: Fix panic when expanding marked function arguments. ([#429](https://github.com/hashicorp/hcl/pull/429)) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/Makefile b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/Makefile new file mode 100644 index 00000000000..675178e7475 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/Makefile @@ -0,0 +1,18 @@ +fmtcheck: + "$(CURDIR)/scripts/gofmtcheck.sh" + +fmtfix: + gofmt -w ./ + +vetcheck: + go vet ./... + +copyrightcheck: + go run github.com/hashicorp/copywrite@latest headers --plan + +copyrightfix: + go run github.com/hashicorp/copywrite@latest headers + +check: copyrightcheck vetcheck fmtcheck + +fix: copyrightfix fmtfix diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go index e0de1c3dd4e..815973996bb 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go @@ -6,6 +6,7 @@ package hclsyntax import ( "fmt" "sort" + "strings" "sync" "github.com/hashicorp/hcl/v2" @@ -251,6 +252,76 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti } } + extraUnknown := &functionCallUnknown{ + name: e.Name, + } + + // For historical reasons, we represent namespaced function names + // as strings with :: separating the names. If this was an attempt + // to call a namespaced function then we'll try to distinguish + // between an invalid namespace or an invalid name within a valid + // namespace in order to give the user better feedback about what + // is wrong. + // + // The parser guarantees that a function name will always + // be a series of valid identifiers separated by "::" with no + // other content, so we can be relatively unforgiving in our processing + // here. + if sepIdx := strings.LastIndex(e.Name, "::"); sepIdx != -1 { + namespace := e.Name[:sepIdx+2] + name := e.Name[sepIdx+2:] + + avail := make([]string, 0, len(ctx.Functions)) + for availName := range ctx.Functions { + if strings.HasPrefix(availName, namespace) { + avail = append(avail, availName) + } + } + + extraUnknown.name = name + extraUnknown.namespace = namespace + + if len(avail) == 0 { + // TODO: Maybe use nameSuggestion for the other available + // namespaces? But that'd require us to go scan the function + // table again, so we'll wait to see if it's really warranted. + // For now, we're assuming people are more likely to misremember + // the function names than the namespaces, because in many + // applications there will be relatively few namespaces compared + // to the number of distinct functions. + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Call to unknown function", + Detail: fmt.Sprintf("There are no functions in namespace %q.", namespace), + Subject: &e.NameRange, + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + Extra: extraUnknown, + }, + } + } else { + suggestion := nameSuggestion(name, avail) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %s%s?", namespace, suggestion) + } + + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Call to unknown function", + Detail: fmt.Sprintf("There is no function named %q in namespace %s.%s", name, namespace, suggestion), + Subject: &e.NameRange, + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + Extra: extraUnknown, + }, + } + } + } + avail := make([]string, 0, len(ctx.Functions)) for name := range ctx.Functions { avail = append(avail, name) @@ -269,6 +340,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: extraUnknown, }, } } @@ -616,6 +688,27 @@ func (e *functionCallDiagExtra) FunctionCallError() error { return e.functionCallError } +// FunctionCallUnknownDiagExtra is an interface implemented by a value in the Extra +// field of some diagnostics to indicate when the error was caused by a call to +// an unknown function. +type FunctionCallUnknownDiagExtra interface { + CalledFunctionName() string + CalledFunctionNamespace() string +} + +type functionCallUnknown struct { + name string + namespace string +} + +func (e *functionCallUnknown) CalledFunctionName() string { + return e.name +} + +func (e *functionCallUnknown) CalledFunctionNamespace() string { + return e.namespace +} + type ConditionalExpr struct { Condition Expression TrueResult Expression @@ -1920,3 +2013,27 @@ func (e *AnonSymbolExpr) Range() hcl.Range { func (e *AnonSymbolExpr) StartRange() hcl.Range { return e.SrcRange } + +// ExprSyntaxError is a placeholder for an invalid expression that could not +// be parsed due to syntax errors. +type ExprSyntaxError struct { + Placeholder cty.Value + ParseDiags hcl.Diagnostics + SrcRange hcl.Range +} + +func (e *ExprSyntaxError) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Placeholder, e.ParseDiags +} + +func (e *ExprSyntaxError) walkChildNodes(w internalWalkFunc) { + // ExprSyntaxError is a leaf node in the tree +} + +func (e *ExprSyntaxError) Range() hcl.Range { + return e.SrcRange +} + +func (e *ExprSyntaxError) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go index ce5a5cb755d..6c3e472caba 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go @@ -3,7 +3,7 @@ package hclsyntax -// Generated by expression_vars_get.go. DO NOT EDIT. +// Generated by expression_vars_gen.go. DO NOT EDIT. // Run 'go generate' on this package to update the set of functions here. import ( @@ -22,6 +22,10 @@ func (e *ConditionalExpr) Variables() []hcl.Traversal { return Variables(e) } +func (e *ExprSyntaxError) Variables() []hcl.Traversal { + return Variables(e) +} + func (e *ForExpr) Variables() []hcl.Traversal { return Variables(e) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go index 383ec6b85d6..66486074c7c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go @@ -9,4 +9,4 @@ package hclsyntax //go:generate gofmt -w scan_tokens.go //go:generate ragel -Z scan_string_lit.rl //go:generate gofmt -w scan_string_lit.go -//go:generate stringer -type TokenType -output token_type_string.go +//go:generate go run golang.org/x/tools/cmd/stringer -type TokenType -output token_type_string.go diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go index aa147afeb49..ce96ae35b4c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go @@ -999,7 +999,7 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { case TokenIdent: tok := p.Read() // eat identifier token - if p.Peek().Type == TokenOParen { + if p.Peek().Type == TokenOParen || p.Peek().Type == TokenDoubleColon { return p.finishParsingFunctionCall(tok) } @@ -1145,16 +1145,76 @@ func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) { // finishParsingFunctionCall parses a function call assuming that the function // name was already read, and so the peeker should be pointing at the opening -// parenthesis after the name. +// parenthesis after the name, or at the double-colon after the initial +// function scope name. func (p *parser) finishParsingFunctionCall(name Token) (Expression, hcl.Diagnostics) { + var diags hcl.Diagnostics + openTok := p.Read() - if openTok.Type != TokenOParen { + if openTok.Type != TokenOParen && openTok.Type != TokenDoubleColon { // should never happen if callers behave - panic("finishParsingFunctionCall called with non-parenthesis as next token") + panic("finishParsingFunctionCall called with unsupported next token") + } + + nameStr := string(name.Bytes) + nameEndPos := name.Range.End + for openTok.Type == TokenDoubleColon { + nextName := p.Read() + if nextName.Type != TokenIdent { + diag := hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing function name", + Detail: "Function scope resolution symbol :: must be followed by a function name in this scope.", + Subject: &nextName.Range, + Context: hcl.RangeBetween(name.Range, nextName.Range).Ptr(), + } + diags = append(diags, &diag) + p.recoverOver(TokenOParen) + return &ExprSyntaxError{ + ParseDiags: hcl.Diagnostics{&diag}, + Placeholder: cty.DynamicVal, + SrcRange: hcl.RangeBetween(name.Range, nextName.Range), + }, diags + } + + // Initial versions of HCLv2 didn't support function namespaces, and + // so for backward compatibility we just treat namespaced functions + // as weird names with "::" separators in them, saved as a string + // to keep the API unchanged. FunctionCallExpr also has some special + // handling of names containing :: when referring to a function that + // doesn't exist in EvalContext, to return better error messages + // when namespaces are used incorrectly. + nameStr = nameStr + "::" + string(nextName.Bytes) + nameEndPos = nextName.Range.End + + openTok = p.Read() + } + + nameRange := hcl.Range{ + Filename: name.Range.Filename, + Start: name.Range.Start, + End: nameEndPos, + } + + if openTok.Type != TokenOParen { + diag := hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing open parenthesis", + Detail: "Function selector must be followed by an open parenthesis to begin the function call.", + Subject: &openTok.Range, + Context: hcl.RangeBetween(name.Range, openTok.Range).Ptr(), + } + + diags = append(diags, &diag) + p.recoverOver(TokenOParen) + return &ExprSyntaxError{ + ParseDiags: hcl.Diagnostics{&diag}, + Placeholder: cty.DynamicVal, + SrcRange: hcl.RangeBetween(name.Range, openTok.Range), + }, diags } var args []Expression - var diags hcl.Diagnostics var expandFinal bool var closeTok Token @@ -1218,7 +1278,7 @@ Token: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Unterminated function call", - Detail: "There is no closing parenthesis for this function call before the end of the file. This may be caused by incorrect parethesis nesting elsewhere in this file.", + Detail: "There is no closing parenthesis for this function call before the end of the file. This may be caused by incorrect parenthesis nesting elsewhere in this file.", Subject: hcl.RangeBetween(name.Range, openTok.Range).Ptr(), }) default: @@ -1245,12 +1305,12 @@ Token: p.PopIncludeNewlines() return &FunctionCallExpr{ - Name: string(name.Bytes), + Name: nameStr, Args: args, ExpandFinal: expandFinal, - NameRange: name.Range, + NameRange: nameRange, OpenParenRange: openTok.Range, CloseParenRange: closeTok.Range, }, diags diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go index 5d60ff5a5ea..6b44d9923ba 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go @@ -1,13 +1,12 @@ +//line scan_string_lit.rl:1 // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//line scan_string_lit.rl:1 - package hclsyntax // This file is generated from scan_string_lit.rl. DO NOT EDIT. -//line scan_string_lit.go:9 +//line scan_string_lit.go:11 var _hclstrtok_actions []byte = []byte{ 0, 1, 0, 1, 1, 2, 1, 0, } @@ -117,12 +116,12 @@ const hclstrtok_error int = 0 const hclstrtok_en_quoted int = 10 const hclstrtok_en_unquoted int = 4 -//line scan_string_lit.rl:10 +//line scan_string_lit.rl:12 func scanStringLit(data []byte, quoted bool) [][]byte { var ret [][]byte -//line scan_string_lit.rl:61 +//line scan_string_lit.rl:63 // Ragel state p := 0 // "Pointer" into data @@ -147,11 +146,11 @@ func scanStringLit(data []byte, quoted bool) [][]byte { ret = append(ret, data[ts:te]) }*/ -//line scan_string_lit.go:154 +//line scan_string_lit.go:156 { } -//line scan_string_lit.go:158 +//line scan_string_lit.go:160 { var _klen int var _trans int @@ -232,7 +231,7 @@ func scanStringLit(data []byte, quoted bool) [][]byte { _acts++ switch _hclstrtok_actions[_acts-1] { case 0: -//line scan_string_lit.rl:40 +//line scan_string_lit.rl:42 // If te is behind p then we've skipped over some literal // characters which we must now return. @@ -242,12 +241,12 @@ func scanStringLit(data []byte, quoted bool) [][]byte { ts = p case 1: -//line scan_string_lit.rl:48 +//line scan_string_lit.rl:50 te = p ret = append(ret, data[ts:te]) -//line scan_string_lit.go:253 +//line scan_string_lit.go:255 } } @@ -270,12 +269,12 @@ func scanStringLit(data []byte, quoted bool) [][]byte { __acts++ switch _hclstrtok_actions[__acts-1] { case 1: -//line scan_string_lit.rl:48 +//line scan_string_lit.rl:50 te = p ret = append(ret, data[ts:te]) -//line scan_string_lit.go:278 +//line scan_string_lit.go:280 } } } @@ -285,7 +284,7 @@ func scanStringLit(data []byte, quoted bool) [][]byte { } } -//line scan_string_lit.rl:89 +//line scan_string_lit.rl:91 if te < p { // Collect any leftover literal characters at the end of the input diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl index f8ac1175167..21d2c8bca38 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl @@ -1,3 +1,5 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 package hclsyntax diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go index 1bbbb92781d..3ed8455ff87 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go @@ -1,8 +1,7 @@ +//line scan_tokens.rl:1 // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//line scan_tokens.rl:1 - package hclsyntax import ( @@ -13,7 +12,7 @@ import ( // This file is generated from scan_tokens.rl. DO NOT EDIT. -//line scan_tokens.go:15 +//line scan_tokens.go:17 var _hcltok_actions []byte = []byte{ 0, 1, 0, 1, 1, 1, 3, 1, 4, 1, 7, 1, 8, 1, 9, 1, 10, @@ -33,13 +32,13 @@ var _hcltok_actions []byte = []byte{ 1, 71, 1, 72, 1, 73, 1, 74, 1, 75, 1, 76, 1, 77, 1, 78, 1, 79, 1, 80, 1, 81, 1, 82, - 1, 83, 1, 84, 1, 85, 2, 0, - 14, 2, 0, 25, 2, 0, 29, 2, - 0, 37, 2, 0, 41, 2, 1, 2, - 2, 4, 5, 2, 4, 6, 2, 4, - 21, 2, 4, 22, 2, 4, 33, 2, - 4, 34, 2, 4, 45, 2, 4, 46, - 2, 4, 54, 2, 4, 55, + 1, 83, 1, 84, 1, 85, 1, 86, + 2, 0, 14, 2, 0, 25, 2, 0, + 29, 2, 0, 37, 2, 0, 41, 2, + 1, 2, 2, 4, 5, 2, 4, 6, + 2, 4, 21, 2, 4, 22, 2, 4, + 33, 2, 4, 34, 2, 4, 45, 2, + 4, 46, 2, 4, 54, 2, 4, 55, } var _hcltok_key_offsets []int16 = []int16{ @@ -225,22 +224,22 @@ var _hcltok_key_offsets []int16 = []int16{ 9153, 9171, 9172, 9182, 9183, 9192, 9200, 9202, 9205, 9207, 9209, 9211, 9216, 9229, 9233, 9248, 9277, 9288, 9290, 9294, 9298, 9303, 9307, 9309, - 9316, 9320, 9328, 9332, 9407, 9409, 9410, 9411, - 9412, 9413, 9414, 9416, 9421, 9423, 9425, 9426, - 9470, 9471, 9472, 9474, 9479, 9483, 9483, 9485, - 9487, 9498, 9508, 9516, 9517, 9519, 9520, 9524, - 9528, 9538, 9542, 9549, 9560, 9567, 9571, 9577, - 9588, 9620, 9669, 9684, 9699, 9704, 9706, 9711, - 9743, 9751, 9753, 9775, 9797, 9799, 9815, 9831, - 9833, 9835, 9835, 9836, 9837, 9838, 9840, 9841, - 9853, 9855, 9857, 9859, 9873, 9887, 9889, 9892, - 9895, 9897, 9898, 9899, 9901, 9903, 9905, 9919, - 9933, 9935, 9938, 9941, 9943, 9944, 9945, 9947, - 9949, 9951, 10000, 10044, 10046, 10051, 10055, 10055, - 10057, 10059, 10070, 10080, 10088, 10089, 10091, 10092, - 10096, 10100, 10110, 10114, 10121, 10132, 10139, 10143, - 10149, 10160, 10192, 10241, 10256, 10271, 10276, 10278, - 10283, 10315, 10323, 10325, 10347, 10369, + 9316, 9320, 9328, 9332, 9408, 9410, 9411, 9412, + 9413, 9414, 9415, 9417, 9422, 9423, 9425, 9427, + 9428, 9472, 9473, 9474, 9476, 9481, 9485, 9485, + 9487, 9489, 9500, 9510, 9518, 9519, 9521, 9522, + 9526, 9530, 9540, 9544, 9551, 9562, 9569, 9573, + 9579, 9590, 9622, 9671, 9686, 9701, 9706, 9708, + 9713, 9745, 9753, 9755, 9777, 9799, 9801, 9817, + 9833, 9835, 9837, 9837, 9838, 9839, 9840, 9842, + 9843, 9855, 9857, 9859, 9861, 9875, 9889, 9891, + 9894, 9897, 9899, 9900, 9901, 9903, 9905, 9907, + 9921, 9935, 9937, 9940, 9943, 9945, 9946, 9947, + 9949, 9951, 9953, 10002, 10046, 10048, 10053, 10057, + 10057, 10059, 10061, 10072, 10082, 10090, 10091, 10093, + 10094, 10098, 10102, 10112, 10116, 10123, 10134, 10141, + 10145, 10151, 10162, 10194, 10243, 10258, 10273, 10278, + 10280, 10285, 10317, 10325, 10327, 10349, 10371, } var _hcltok_trans_keys []byte = []byte{ @@ -263,7 +262,7 @@ var _hcltok_trans_keys []byte = []byte{ 233, 234, 237, 239, 240, 243, 48, 57, 65, 90, 97, 122, 196, 218, 229, 236, 10, 170, 181, 183, 186, 128, 150, 152, - 182, 184, 255, 192, 255, 128, 255, 173, + 182, 184, 255, 192, 255, 0, 127, 173, 130, 133, 146, 159, 165, 171, 175, 255, 181, 190, 184, 185, 192, 255, 140, 134, 138, 142, 161, 163, 255, 182, 130, 136, @@ -572,7 +571,7 @@ var _hcltok_trans_keys []byte = []byte{ 150, 153, 131, 140, 255, 160, 163, 164, 165, 184, 185, 186, 161, 162, 133, 255, 170, 181, 183, 186, 128, 150, 152, 182, - 184, 255, 192, 255, 128, 255, 173, 130, + 184, 255, 192, 255, 0, 127, 173, 130, 133, 146, 159, 165, 171, 175, 255, 181, 190, 184, 185, 192, 255, 140, 134, 138, 142, 161, 163, 255, 182, 130, 136, 137, @@ -1411,136 +1410,136 @@ var _hcltok_trans_keys []byte = []byte{ 187, 191, 192, 255, 162, 191, 192, 255, 160, 168, 128, 159, 161, 167, 169, 191, 158, 191, 192, 255, 9, 10, 13, 32, - 33, 34, 35, 38, 46, 47, 60, 61, - 62, 64, 92, 95, 123, 124, 125, 126, - 127, 194, 195, 198, 199, 203, 204, 205, - 206, 207, 210, 212, 213, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 228, 233, 234, 237, 238, 239, - 240, 0, 36, 37, 45, 48, 57, 58, - 63, 65, 90, 91, 96, 97, 122, 192, - 193, 196, 218, 229, 236, 241, 247, 9, - 32, 10, 61, 10, 38, 46, 42, 47, - 46, 69, 101, 48, 57, 60, 61, 61, - 62, 61, 45, 95, 194, 195, 198, 199, - 203, 204, 205, 206, 207, 210, 212, 213, - 214, 215, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 228, 233, 234, - 237, 239, 240, 243, 48, 57, 65, 90, - 97, 122, 196, 218, 229, 236, 124, 125, - 128, 191, 170, 181, 186, 128, 191, 151, - 183, 128, 255, 192, 255, 0, 127, 173, - 130, 133, 146, 159, 165, 171, 175, 191, - 192, 255, 181, 190, 128, 175, 176, 183, - 184, 185, 186, 191, 134, 139, 141, 162, - 128, 135, 136, 255, 182, 130, 137, 176, - 151, 152, 154, 160, 136, 191, 192, 255, - 128, 143, 144, 170, 171, 175, 176, 178, - 179, 191, 128, 159, 160, 191, 176, 128, - 138, 139, 173, 174, 255, 148, 150, 164, - 167, 173, 176, 185, 189, 190, 192, 255, - 144, 128, 145, 146, 175, 176, 191, 128, - 140, 141, 255, 166, 176, 178, 191, 192, - 255, 186, 128, 137, 138, 170, 171, 179, - 180, 181, 182, 191, 160, 161, 162, 164, - 165, 166, 167, 168, 169, 170, 171, 172, - 173, 174, 175, 176, 177, 178, 179, 180, - 181, 182, 183, 184, 185, 186, 187, 188, - 189, 190, 128, 191, 128, 129, 130, 131, - 137, 138, 139, 140, 141, 142, 143, 144, - 153, 154, 155, 156, 157, 158, 159, 160, - 161, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 172, 173, 174, 175, 176, - 177, 178, 179, 180, 182, 183, 184, 188, - 189, 190, 191, 132, 187, 129, 130, 132, - 133, 134, 176, 177, 178, 179, 180, 181, - 182, 183, 128, 191, 128, 129, 130, 131, - 132, 133, 134, 135, 144, 136, 143, 145, - 191, 192, 255, 182, 183, 184, 128, 191, - 128, 191, 191, 128, 190, 192, 255, 128, - 146, 147, 148, 152, 153, 154, 155, 156, - 158, 159, 160, 161, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 172, 173, - 174, 175, 176, 129, 191, 192, 255, 158, - 159, 128, 157, 160, 191, 192, 255, 128, - 191, 164, 169, 171, 172, 173, 174, 175, - 180, 181, 182, 183, 184, 185, 187, 188, - 189, 190, 191, 128, 163, 165, 186, 144, - 145, 146, 147, 148, 150, 151, 152, 155, - 157, 158, 160, 170, 171, 172, 175, 128, - 159, 161, 169, 173, 191, 128, 191, 10, - 13, 34, 36, 37, 92, 128, 191, 192, - 223, 224, 239, 240, 247, 248, 255, 10, - 13, 34, 92, 36, 37, 128, 191, 192, - 223, 224, 239, 240, 247, 248, 255, 10, - 13, 36, 123, 123, 126, 126, 37, 123, - 126, 10, 13, 128, 191, 192, 223, 224, - 239, 240, 247, 248, 255, 128, 191, 128, + 33, 34, 35, 38, 46, 47, 58, 60, + 61, 62, 64, 92, 95, 123, 124, 125, + 126, 127, 194, 195, 198, 199, 203, 204, + 205, 206, 207, 210, 212, 213, 214, 215, + 216, 217, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 233, 234, 237, 238, + 239, 240, 0, 36, 37, 45, 48, 57, + 59, 63, 65, 90, 91, 96, 97, 122, + 192, 193, 196, 218, 229, 236, 241, 247, + 9, 32, 10, 61, 10, 38, 46, 42, + 47, 46, 69, 101, 48, 57, 58, 60, + 61, 61, 62, 61, 45, 95, 194, 195, + 198, 199, 203, 204, 205, 206, 207, 210, + 212, 213, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, + 233, 234, 237, 239, 240, 243, 48, 57, + 65, 90, 97, 122, 196, 218, 229, 236, + 124, 125, 128, 191, 170, 181, 186, 128, + 191, 151, 183, 128, 255, 192, 255, 0, + 127, 173, 130, 133, 146, 159, 165, 171, + 175, 191, 192, 255, 181, 190, 128, 175, + 176, 183, 184, 185, 186, 191, 134, 139, + 141, 162, 128, 135, 136, 255, 182, 130, + 137, 176, 151, 152, 154, 160, 136, 191, + 192, 255, 128, 143, 144, 170, 171, 175, + 176, 178, 179, 191, 128, 159, 160, 191, + 176, 128, 138, 139, 173, 174, 255, 148, + 150, 164, 167, 173, 176, 185, 189, 190, + 192, 255, 144, 128, 145, 146, 175, 176, + 191, 128, 140, 141, 255, 166, 176, 178, + 191, 192, 255, 186, 128, 137, 138, 170, + 171, 179, 180, 181, 182, 191, 160, 161, + 162, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 128, 191, 128, 129, + 130, 131, 137, 138, 139, 140, 141, 142, + 143, 144, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 182, 183, + 184, 188, 189, 190, 191, 132, 187, 129, + 130, 132, 133, 134, 176, 177, 178, 179, + 180, 181, 182, 183, 128, 191, 128, 129, + 130, 131, 132, 133, 134, 135, 144, 136, + 143, 145, 191, 192, 255, 182, 183, 184, + 128, 191, 128, 191, 191, 128, 190, 192, + 255, 128, 146, 147, 148, 152, 153, 154, + 155, 156, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 129, 191, 192, + 255, 158, 159, 128, 157, 160, 191, 192, + 255, 128, 191, 164, 169, 171, 172, 173, + 174, 175, 180, 181, 182, 183, 184, 185, + 187, 188, 189, 190, 191, 128, 163, 165, + 186, 144, 145, 146, 147, 148, 150, 151, + 152, 155, 157, 158, 160, 170, 171, 172, + 175, 128, 159, 161, 169, 173, 191, 128, + 191, 10, 13, 34, 36, 37, 92, 128, + 191, 192, 223, 224, 239, 240, 247, 248, + 255, 10, 13, 34, 92, 36, 37, 128, + 191, 192, 223, 224, 239, 240, 247, 248, + 255, 10, 13, 36, 123, 123, 126, 126, + 37, 123, 126, 10, 13, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 128, + 191, 128, 191, 128, 191, 10, 13, 36, + 37, 128, 191, 192, 223, 224, 239, 240, + 247, 248, 255, 10, 13, 36, 37, 128, + 191, 192, 223, 224, 239, 240, 247, 248, + 255, 10, 13, 10, 13, 123, 10, 13, + 126, 10, 13, 126, 126, 128, 191, 128, 191, 128, 191, 10, 13, 36, 37, 128, 191, 192, 223, 224, 239, 240, 247, 248, 255, 10, 13, 36, 37, 128, 191, 192, 223, 224, 239, 240, 247, 248, 255, 10, 13, 10, 13, 123, 10, 13, 126, 10, 13, 126, 126, 128, 191, 128, 191, 128, - 191, 10, 13, 36, 37, 128, 191, 192, - 223, 224, 239, 240, 247, 248, 255, 10, - 13, 36, 37, 128, 191, 192, 223, 224, - 239, 240, 247, 248, 255, 10, 13, 10, - 13, 123, 10, 13, 126, 10, 13, 126, - 126, 128, 191, 128, 191, 128, 191, 95, - 194, 195, 198, 199, 203, 204, 205, 206, - 207, 210, 212, 213, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 228, 233, 234, 237, 238, 239, 240, - 65, 90, 97, 122, 128, 191, 192, 193, - 196, 218, 229, 236, 241, 247, 248, 255, - 45, 95, 194, 195, 198, 199, 203, 204, + 191, 95, 194, 195, 198, 199, 203, 204, 205, 206, 207, 210, 212, 213, 214, 215, 216, 217, 219, 220, 221, 222, 223, 224, - 225, 226, 227, 228, 233, 234, 237, 239, - 240, 243, 48, 57, 65, 90, 97, 122, - 196, 218, 229, 236, 128, 191, 170, 181, - 186, 128, 191, 151, 183, 128, 255, 192, - 255, 0, 127, 173, 130, 133, 146, 159, - 165, 171, 175, 191, 192, 255, 181, 190, - 128, 175, 176, 183, 184, 185, 186, 191, - 134, 139, 141, 162, 128, 135, 136, 255, - 182, 130, 137, 176, 151, 152, 154, 160, - 136, 191, 192, 255, 128, 143, 144, 170, - 171, 175, 176, 178, 179, 191, 128, 159, - 160, 191, 176, 128, 138, 139, 173, 174, - 255, 148, 150, 164, 167, 173, 176, 185, - 189, 190, 192, 255, 144, 128, 145, 146, - 175, 176, 191, 128, 140, 141, 255, 166, - 176, 178, 191, 192, 255, 186, 128, 137, - 138, 170, 171, 179, 180, 181, 182, 191, - 160, 161, 162, 164, 165, 166, 167, 168, - 169, 170, 171, 172, 173, 174, 175, 176, - 177, 178, 179, 180, 181, 182, 183, 184, - 185, 186, 187, 188, 189, 190, 128, 191, - 128, 129, 130, 131, 137, 138, 139, 140, - 141, 142, 143, 144, 153, 154, 155, 156, - 157, 158, 159, 160, 161, 162, 163, 164, - 165, 166, 167, 168, 169, 170, 171, 172, - 173, 174, 175, 176, 177, 178, 179, 180, - 182, 183, 184, 188, 189, 190, 191, 132, - 187, 129, 130, 132, 133, 134, 176, 177, - 178, 179, 180, 181, 182, 183, 128, 191, - 128, 129, 130, 131, 132, 133, 134, 135, - 144, 136, 143, 145, 191, 192, 255, 182, - 183, 184, 128, 191, 128, 191, 191, 128, - 190, 192, 255, 128, 146, 147, 148, 152, - 153, 154, 155, 156, 158, 159, 160, 161, - 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 172, 173, 174, 175, 176, 129, - 191, 192, 255, 158, 159, 128, 157, 160, - 191, 192, 255, 128, 191, 164, 169, 171, - 172, 173, 174, 175, 180, 181, 182, 183, - 184, 185, 187, 188, 189, 190, 191, 128, - 163, 165, 186, 144, 145, 146, 147, 148, - 150, 151, 152, 155, 157, 158, 160, 170, - 171, 172, 175, 128, 159, 161, 169, 173, - 191, 128, 191, + 225, 226, 227, 228, 233, 234, 237, 238, + 239, 240, 65, 90, 97, 122, 128, 191, + 192, 193, 196, 218, 229, 236, 241, 247, + 248, 255, 45, 95, 194, 195, 198, 199, + 203, 204, 205, 206, 207, 210, 212, 213, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 233, 234, + 237, 239, 240, 243, 48, 57, 65, 90, + 97, 122, 196, 218, 229, 236, 128, 191, + 170, 181, 186, 128, 191, 151, 183, 128, + 255, 192, 255, 0, 127, 173, 130, 133, + 146, 159, 165, 171, 175, 191, 192, 255, + 181, 190, 128, 175, 176, 183, 184, 185, + 186, 191, 134, 139, 141, 162, 128, 135, + 136, 255, 182, 130, 137, 176, 151, 152, + 154, 160, 136, 191, 192, 255, 128, 143, + 144, 170, 171, 175, 176, 178, 179, 191, + 128, 159, 160, 191, 176, 128, 138, 139, + 173, 174, 255, 148, 150, 164, 167, 173, + 176, 185, 189, 190, 192, 255, 144, 128, + 145, 146, 175, 176, 191, 128, 140, 141, + 255, 166, 176, 178, 191, 192, 255, 186, + 128, 137, 138, 170, 171, 179, 180, 181, + 182, 191, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, + 128, 191, 128, 129, 130, 131, 137, 138, + 139, 140, 141, 142, 143, 144, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 182, 183, 184, 188, 189, 190, + 191, 132, 187, 129, 130, 132, 133, 134, + 176, 177, 178, 179, 180, 181, 182, 183, + 128, 191, 128, 129, 130, 131, 132, 133, + 134, 135, 144, 136, 143, 145, 191, 192, + 255, 182, 183, 184, 128, 191, 128, 191, + 191, 128, 190, 192, 255, 128, 146, 147, + 148, 152, 153, 154, 155, 156, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 129, 191, 192, 255, 158, 159, 128, + 157, 160, 191, 192, 255, 128, 191, 164, + 169, 171, 172, 173, 174, 175, 180, 181, + 182, 183, 184, 185, 187, 188, 189, 190, + 191, 128, 163, 165, 186, 144, 145, 146, + 147, 148, 150, 151, 152, 155, 157, 158, + 160, 170, 171, 172, 175, 128, 159, 161, + 169, 173, 191, 128, 191, } var _hcltok_single_lengths []byte = []byte{ @@ -1726,22 +1725,22 @@ var _hcltok_single_lengths []byte = []byte{ 12, 1, 4, 1, 5, 2, 0, 3, 2, 2, 2, 1, 7, 0, 7, 17, 3, 0, 2, 0, 3, 0, 0, 1, - 0, 2, 0, 53, 2, 1, 1, 1, - 1, 1, 2, 3, 2, 2, 1, 34, - 1, 1, 0, 3, 2, 0, 0, 0, - 1, 2, 4, 1, 0, 1, 0, 0, - 0, 0, 1, 1, 1, 0, 0, 1, - 30, 47, 13, 9, 3, 0, 1, 28, - 2, 0, 18, 16, 0, 6, 4, 2, - 2, 0, 1, 1, 1, 2, 1, 2, - 0, 0, 0, 4, 2, 2, 3, 3, - 2, 1, 1, 0, 0, 0, 4, 2, - 2, 3, 3, 2, 1, 1, 0, 0, - 0, 33, 34, 0, 3, 2, 0, 0, + 0, 2, 0, 54, 2, 1, 1, 1, + 1, 1, 2, 3, 1, 2, 2, 1, + 34, 1, 1, 0, 3, 2, 0, 0, 0, 1, 2, 4, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 30, 47, 13, 9, 3, 0, 1, - 28, 2, 0, 18, 16, 0, + 28, 2, 0, 18, 16, 0, 6, 4, + 2, 2, 0, 1, 1, 1, 2, 1, + 2, 0, 0, 0, 4, 2, 2, 3, + 3, 2, 1, 1, 0, 0, 0, 4, + 2, 2, 3, 3, 2, 1, 1, 0, + 0, 0, 33, 34, 0, 3, 2, 0, + 0, 0, 1, 2, 4, 1, 0, 1, + 0, 0, 0, 0, 1, 1, 1, 0, + 0, 1, 30, 47, 13, 9, 3, 0, + 1, 28, 2, 0, 18, 16, 0, } var _hcltok_range_lengths []byte = []byte{ @@ -1928,21 +1927,21 @@ var _hcltok_range_lengths []byte = []byte{ 0, 0, 0, 2, 3, 2, 4, 6, 4, 1, 1, 2, 1, 2, 1, 3, 2, 3, 2, 11, 0, 0, 0, 0, - 0, 0, 0, 1, 0, 0, 0, 5, - 0, 0, 1, 1, 1, 0, 1, 1, - 5, 4, 2, 0, 1, 0, 2, 2, - 5, 2, 3, 5, 3, 2, 3, 5, - 1, 1, 1, 3, 1, 1, 2, 2, - 3, 1, 2, 3, 1, 5, 6, 0, - 0, 0, 0, 0, 0, 0, 0, 5, - 1, 1, 1, 5, 6, 0, 0, 0, - 0, 0, 0, 1, 1, 1, 5, 6, - 0, 0, 0, 0, 0, 0, 1, 1, - 1, 8, 5, 1, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 0, 0, 0, + 5, 0, 0, 1, 1, 1, 0, 1, 1, 5, 4, 2, 0, 1, 0, 2, 2, 5, 2, 3, 5, 3, 2, 3, 5, 1, 1, 1, 3, 1, 1, 2, - 2, 3, 1, 2, 3, 1, + 2, 3, 1, 2, 3, 1, 5, 6, + 0, 0, 0, 0, 0, 0, 0, 0, + 5, 1, 1, 1, 5, 6, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 5, + 6, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 8, 5, 1, 1, 1, 0, + 1, 1, 5, 4, 2, 0, 1, 0, + 2, 2, 5, 2, 3, 5, 3, 2, + 3, 5, 1, 1, 1, 3, 1, 1, + 2, 2, 3, 1, 2, 3, 1, } var _hcltok_index_offsets []int16 = []int16{ @@ -2128,22 +2127,22 @@ var _hcltok_index_offsets []int16 = []int16{ 7187, 7203, 7205, 7213, 7215, 7223, 7229, 7231, 7235, 7238, 7241, 7244, 7248, 7259, 7262, 7274, 7298, 7306, 7308, 7312, 7315, 7320, 7323, 7325, - 7330, 7333, 7339, 7342, 7407, 7410, 7412, 7414, - 7416, 7418, 7420, 7423, 7428, 7431, 7434, 7436, - 7476, 7478, 7480, 7482, 7487, 7491, 7492, 7494, - 7496, 7503, 7510, 7517, 7519, 7521, 7523, 7526, - 7529, 7535, 7538, 7543, 7550, 7555, 7558, 7562, - 7569, 7601, 7650, 7665, 7678, 7683, 7685, 7689, - 7720, 7726, 7728, 7749, 7769, 7771, 7783, 7794, - 7797, 7800, 7801, 7803, 7805, 7807, 7810, 7812, - 7820, 7822, 7824, 7826, 7836, 7845, 7848, 7852, - 7856, 7859, 7861, 7863, 7865, 7867, 7869, 7879, - 7888, 7891, 7895, 7899, 7902, 7904, 7906, 7908, - 7910, 7912, 7954, 7994, 7996, 8001, 8005, 8006, - 8008, 8010, 8017, 8024, 8031, 8033, 8035, 8037, - 8040, 8043, 8049, 8052, 8057, 8064, 8069, 8072, - 8076, 8083, 8115, 8164, 8179, 8192, 8197, 8199, - 8203, 8234, 8240, 8242, 8263, 8283, + 7330, 7333, 7339, 7342, 7408, 7411, 7413, 7415, + 7417, 7419, 7421, 7424, 7429, 7431, 7434, 7437, + 7439, 7479, 7481, 7483, 7485, 7490, 7494, 7495, + 7497, 7499, 7506, 7513, 7520, 7522, 7524, 7526, + 7529, 7532, 7538, 7541, 7546, 7553, 7558, 7561, + 7565, 7572, 7604, 7653, 7668, 7681, 7686, 7688, + 7692, 7723, 7729, 7731, 7752, 7772, 7774, 7786, + 7797, 7800, 7803, 7804, 7806, 7808, 7810, 7813, + 7815, 7823, 7825, 7827, 7829, 7839, 7848, 7851, + 7855, 7859, 7862, 7864, 7866, 7868, 7870, 7872, + 7882, 7891, 7894, 7898, 7902, 7905, 7907, 7909, + 7911, 7913, 7915, 7957, 7997, 7999, 8004, 8008, + 8009, 8011, 8013, 8020, 8027, 8034, 8036, 8038, + 8040, 8043, 8046, 8052, 8055, 8060, 8067, 8072, + 8075, 8079, 8086, 8118, 8167, 8182, 8195, 8200, + 8202, 8206, 8237, 8243, 8245, 8266, 8286, } var _hcltok_indicies []int16 = []int16{ @@ -2165,7 +2164,7 @@ var _hcltok_indicies []int16 = []int16{ 61, 62, 37, 39, 63, 41, 64, 65, 66, 11, 11, 11, 14, 38, 0, 44, 0, 11, 11, 11, 11, 0, 11, 11, - 11, 0, 11, 0, 11, 11, 0, 0, + 11, 0, 11, 0, 11, 0, 11, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 11, 11, 11, 11, 11, 0, 0, 11, 0, 0, 11, 0, 11, 0, 0, @@ -2418,7 +2417,7 @@ var _hcltok_indicies []int16 = []int16{ 11, 16, 417, 16, 265, 300, 301, 302, 14, 0, 0, 11, 419, 419, 419, 419, 418, 419, 419, 419, 418, 419, 418, 419, - 419, 418, 418, 418, 418, 418, 418, 419, + 418, 419, 418, 418, 418, 418, 418, 419, 418, 418, 418, 418, 419, 419, 419, 419, 419, 418, 418, 419, 418, 418, 419, 418, 419, 418, 418, 419, 418, 418, 418, 419, @@ -3066,123 +3065,123 @@ var _hcltok_indicies []int16 = []int16{ 1045, 801, 1046, 1045, 795, 1050, 1141, 1047, 1059, 1047, 1045, 1046, 1045, 795, 1142, 1143, 1144, 1142, 1145, 1146, 1147, 1149, 1150, 1151, - 1152, 1153, 1154, 670, 670, 419, 1155, 1156, - 1157, 1158, 670, 1161, 1162, 1164, 1165, 1166, - 1160, 1167, 1168, 1169, 1170, 1171, 1172, 1173, + 1152, 1153, 1154, 1155, 670, 670, 419, 1156, + 1157, 1158, 1159, 670, 1162, 1163, 1165, 1166, + 1167, 1161, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, - 1182, 1183, 1184, 1185, 1186, 1188, 1189, 1190, - 1191, 1192, 1193, 670, 1148, 7, 1148, 419, - 1148, 419, 1160, 1163, 1187, 1194, 1159, 1142, - 1142, 1195, 1143, 1196, 1198, 1197, 4, 1147, - 1200, 1197, 1201, 1197, 2, 1147, 1197, 6, - 8, 8, 7, 1202, 1203, 1204, 1197, 1205, - 1206, 1197, 1207, 1197, 419, 419, 1209, 1210, - 489, 470, 1211, 470, 1212, 1213, 1214, 1215, - 1216, 1217, 1218, 1219, 1220, 1221, 1222, 544, - 1223, 520, 1224, 1225, 1226, 1227, 1228, 1229, - 1230, 1231, 1232, 1233, 1234, 1235, 419, 419, - 419, 425, 565, 1208, 1236, 1197, 1237, 1197, - 670, 1238, 419, 419, 419, 670, 1238, 670, - 670, 419, 1238, 419, 1238, 419, 1238, 419, - 670, 670, 670, 670, 670, 1238, 419, 670, - 670, 670, 419, 670, 419, 1238, 419, 670, - 670, 670, 670, 419, 1238, 670, 419, 670, - 419, 670, 419, 670, 670, 419, 670, 1238, - 419, 670, 419, 670, 419, 670, 1238, 670, - 419, 1238, 670, 419, 670, 419, 1238, 670, - 670, 670, 670, 670, 1238, 419, 419, 670, - 419, 670, 1238, 670, 419, 1238, 670, 670, - 1238, 419, 419, 670, 419, 670, 419, 670, - 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, - 1246, 1247, 1248, 1249, 715, 1250, 1251, 1252, - 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, - 1261, 1260, 1262, 1263, 1264, 1265, 1266, 671, - 1238, 1267, 1268, 1269, 1270, 1271, 1272, 1273, - 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, - 1282, 1283, 1284, 1285, 725, 1286, 1287, 1288, - 692, 1289, 1290, 1291, 1292, 1293, 1294, 671, - 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, - 674, 1303, 671, 674, 1304, 1305, 1306, 1307, - 683, 1238, 1308, 1309, 1310, 1311, 703, 1312, - 1313, 683, 1314, 1315, 1316, 1317, 1318, 671, - 1238, 1319, 1278, 1320, 1321, 1322, 683, 1323, - 1324, 674, 671, 683, 425, 1238, 1288, 671, - 674, 683, 425, 683, 425, 1325, 683, 1238, - 425, 674, 1326, 1327, 674, 1328, 1329, 681, - 1330, 1331, 1332, 1333, 1334, 1284, 1335, 1336, - 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, - 1345, 1346, 1303, 1347, 674, 683, 425, 1238, - 1348, 1349, 683, 671, 1238, 425, 671, 1238, - 674, 1350, 731, 1351, 1352, 1353, 1354, 1355, - 1356, 1357, 1358, 671, 1359, 1360, 1361, 1362, - 1363, 1364, 671, 683, 1238, 1366, 1367, 1368, - 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, - 1372, 1378, 1379, 1380, 1381, 1365, 1377, 1365, - 1238, 1365, 1238, 1382, 1382, 1383, 1384, 1385, - 1386, 1387, 1388, 1389, 1390, 1387, 767, 1391, - 1391, 1391, 1392, 1391, 1391, 768, 769, 770, - 1391, 767, 1382, 1382, 1393, 1396, 1397, 1395, - 1398, 1399, 1398, 1400, 1391, 1402, 1401, 1396, - 1403, 1395, 1405, 1404, 1394, 1394, 1394, 768, - 769, 770, 1394, 767, 767, 1406, 773, 1406, - 1407, 1406, 775, 1408, 1409, 1410, 1411, 1412, - 1413, 1414, 1411, 776, 775, 1408, 1415, 1415, - 777, 779, 1416, 1415, 776, 1418, 1419, 1417, - 1418, 1419, 1420, 1417, 775, 1408, 1421, 1415, - 775, 1408, 1415, 1423, 1422, 1425, 1424, 776, - 1426, 777, 1426, 779, 1426, 785, 1427, 1428, - 1429, 1430, 1431, 1432, 1433, 1430, 786, 785, - 1427, 1434, 1434, 787, 789, 1435, 1434, 786, - 1437, 1438, 1436, 1437, 1438, 1439, 1436, 785, - 1427, 1440, 1434, 785, 1427, 1434, 1442, 1441, - 1444, 1443, 786, 1445, 787, 1445, 789, 1445, - 795, 1448, 1449, 1451, 1452, 1453, 1447, 1454, - 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, - 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, - 1471, 1472, 1473, 1475, 1476, 1477, 1478, 1479, - 1480, 795, 795, 1446, 1447, 1450, 1474, 1481, - 1446, 1046, 795, 795, 1483, 1484, 865, 846, - 1485, 846, 1486, 1487, 1488, 1489, 1490, 1491, - 1492, 1493, 1494, 1495, 1496, 920, 1497, 896, - 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, - 1506, 1507, 1508, 1509, 795, 795, 795, 801, - 941, 1482, 1046, 1510, 795, 795, 795, 1046, - 1510, 1046, 1046, 795, 1510, 795, 1510, 795, - 1510, 795, 1046, 1046, 1046, 1046, 1046, 1510, - 795, 1046, 1046, 1046, 795, 1046, 795, 1510, - 795, 1046, 1046, 1046, 1046, 795, 1510, 1046, - 795, 1046, 795, 1046, 795, 1046, 1046, 795, - 1046, 1510, 795, 1046, 795, 1046, 795, 1046, - 1510, 1046, 795, 1510, 1046, 795, 1046, 795, - 1510, 1046, 1046, 1046, 1046, 1046, 1510, 795, - 795, 1046, 795, 1046, 1510, 1046, 795, 1510, - 1046, 1046, 1510, 795, 795, 1046, 795, 1046, - 795, 1046, 1510, 1511, 1512, 1513, 1514, 1515, - 1516, 1517, 1518, 1519, 1520, 1521, 1091, 1522, - 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, - 1531, 1532, 1533, 1532, 1534, 1535, 1536, 1537, - 1538, 1047, 1510, 1539, 1540, 1541, 1542, 1543, - 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, - 1552, 1553, 1554, 1555, 1556, 1557, 1101, 1558, - 1559, 1560, 1068, 1561, 1562, 1563, 1564, 1565, - 1566, 1047, 1567, 1568, 1569, 1570, 1571, 1572, - 1573, 1574, 1050, 1575, 1047, 1050, 1576, 1577, - 1578, 1579, 1059, 1510, 1580, 1581, 1582, 1583, - 1079, 1584, 1585, 1059, 1586, 1587, 1588, 1589, - 1590, 1047, 1510, 1591, 1550, 1592, 1593, 1594, - 1059, 1595, 1596, 1050, 1047, 1059, 801, 1510, - 1560, 1047, 1050, 1059, 801, 1059, 801, 1597, - 1059, 1510, 801, 1050, 1598, 1599, 1050, 1600, - 1601, 1057, 1602, 1603, 1604, 1605, 1606, 1556, - 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, - 1615, 1616, 1617, 1618, 1575, 1619, 1050, 1059, - 801, 1510, 1620, 1621, 1059, 1047, 1510, 801, - 1047, 1510, 1050, 1622, 1107, 1623, 1624, 1625, - 1626, 1627, 1628, 1629, 1630, 1047, 1631, 1632, - 1633, 1634, 1635, 1636, 1047, 1059, 1510, 1638, - 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, - 1647, 1648, 1644, 1650, 1651, 1652, 1653, 1637, - 1649, 1637, 1510, 1637, 1510, + 1182, 1183, 1184, 1185, 1186, 1187, 1189, 1190, + 1191, 1192, 1193, 1194, 670, 1148, 7, 1148, + 419, 1148, 419, 1161, 1164, 1188, 1195, 1160, + 1142, 1142, 1196, 1143, 1197, 1199, 1198, 4, + 1147, 1201, 1198, 1202, 1198, 2, 1147, 1198, + 6, 8, 8, 7, 1203, 1204, 1198, 1205, + 1206, 1198, 1207, 1208, 1198, 1209, 1198, 419, + 419, 1211, 1212, 489, 470, 1213, 470, 1214, + 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, + 1223, 1224, 544, 1225, 520, 1226, 1227, 1228, + 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, + 1237, 419, 419, 419, 425, 565, 1210, 1238, + 1198, 1239, 1198, 670, 1240, 419, 419, 419, + 670, 1240, 670, 670, 419, 1240, 419, 1240, + 419, 1240, 419, 670, 670, 670, 670, 670, + 1240, 419, 670, 670, 670, 419, 670, 419, + 1240, 419, 670, 670, 670, 670, 419, 1240, + 670, 419, 670, 419, 670, 419, 670, 670, + 419, 670, 1240, 419, 670, 419, 670, 419, + 670, 1240, 670, 419, 1240, 670, 419, 670, + 419, 1240, 670, 670, 670, 670, 670, 1240, + 419, 419, 670, 419, 670, 1240, 670, 419, + 1240, 670, 670, 1240, 419, 419, 670, 419, + 670, 419, 670, 1240, 1241, 1242, 1243, 1244, + 1245, 1246, 1247, 1248, 1249, 1250, 1251, 715, + 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, + 1260, 1261, 1262, 1263, 1262, 1264, 1265, 1266, + 1267, 1268, 671, 1240, 1269, 1270, 1271, 1272, + 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, + 1281, 1282, 1283, 1284, 1285, 1286, 1287, 725, + 1288, 1289, 1290, 692, 1291, 1292, 1293, 1294, + 1295, 1296, 671, 1297, 1298, 1299, 1300, 1301, + 1302, 1303, 1304, 674, 1305, 671, 674, 1306, + 1307, 1308, 1309, 683, 1240, 1310, 1311, 1312, + 1313, 703, 1314, 1315, 683, 1316, 1317, 1318, + 1319, 1320, 671, 1240, 1321, 1280, 1322, 1323, + 1324, 683, 1325, 1326, 674, 671, 683, 425, + 1240, 1290, 671, 674, 683, 425, 683, 425, + 1327, 683, 1240, 425, 674, 1328, 1329, 674, + 1330, 1331, 681, 1332, 1333, 1334, 1335, 1336, + 1286, 1337, 1338, 1339, 1340, 1341, 1342, 1343, + 1344, 1345, 1346, 1347, 1348, 1305, 1349, 674, + 683, 425, 1240, 1350, 1351, 683, 671, 1240, + 425, 671, 1240, 674, 1352, 731, 1353, 1354, + 1355, 1356, 1357, 1358, 1359, 1360, 671, 1361, + 1362, 1363, 1364, 1365, 1366, 671, 683, 1240, + 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, + 1376, 1377, 1378, 1374, 1380, 1381, 1382, 1383, + 1367, 1379, 1367, 1240, 1367, 1240, 1384, 1384, + 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, + 1389, 767, 1393, 1393, 1393, 1394, 1393, 1393, + 768, 769, 770, 1393, 767, 1384, 1384, 1395, + 1398, 1399, 1397, 1400, 1401, 1400, 1402, 1393, + 1404, 1403, 1398, 1405, 1397, 1407, 1406, 1396, + 1396, 1396, 768, 769, 770, 1396, 767, 767, + 1408, 773, 1408, 1409, 1408, 775, 1410, 1411, + 1412, 1413, 1414, 1415, 1416, 1413, 776, 775, + 1410, 1417, 1417, 777, 779, 1418, 1417, 776, + 1420, 1421, 1419, 1420, 1421, 1422, 1419, 775, + 1410, 1423, 1417, 775, 1410, 1417, 1425, 1424, + 1427, 1426, 776, 1428, 777, 1428, 779, 1428, + 785, 1429, 1430, 1431, 1432, 1433, 1434, 1435, + 1432, 786, 785, 1429, 1436, 1436, 787, 789, + 1437, 1436, 786, 1439, 1440, 1438, 1439, 1440, + 1441, 1438, 785, 1429, 1442, 1436, 785, 1429, + 1436, 1444, 1443, 1446, 1445, 786, 1447, 787, + 1447, 789, 1447, 795, 1450, 1451, 1453, 1454, + 1455, 1449, 1456, 1457, 1458, 1459, 1460, 1461, + 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469, + 1470, 1471, 1472, 1473, 1474, 1475, 1477, 1478, + 1479, 1480, 1481, 1482, 795, 795, 1448, 1449, + 1452, 1476, 1483, 1448, 1046, 795, 795, 1485, + 1486, 865, 846, 1487, 846, 1488, 1489, 1490, + 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, + 920, 1499, 896, 1500, 1501, 1502, 1503, 1504, + 1505, 1506, 1507, 1508, 1509, 1510, 1511, 795, + 795, 795, 801, 941, 1484, 1046, 1512, 795, + 795, 795, 1046, 1512, 1046, 1046, 795, 1512, + 795, 1512, 795, 1512, 795, 1046, 1046, 1046, + 1046, 1046, 1512, 795, 1046, 1046, 1046, 795, + 1046, 795, 1512, 795, 1046, 1046, 1046, 1046, + 795, 1512, 1046, 795, 1046, 795, 1046, 795, + 1046, 1046, 795, 1046, 1512, 795, 1046, 795, + 1046, 795, 1046, 1512, 1046, 795, 1512, 1046, + 795, 1046, 795, 1512, 1046, 1046, 1046, 1046, + 1046, 1512, 795, 795, 1046, 795, 1046, 1512, + 1046, 795, 1512, 1046, 1046, 1512, 795, 795, + 1046, 795, 1046, 795, 1046, 1512, 1513, 1514, + 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, + 1523, 1091, 1524, 1525, 1526, 1527, 1528, 1529, + 1530, 1531, 1532, 1533, 1534, 1535, 1534, 1536, + 1537, 1538, 1539, 1540, 1047, 1512, 1541, 1542, + 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, + 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, + 1559, 1101, 1560, 1561, 1562, 1068, 1563, 1564, + 1565, 1566, 1567, 1568, 1047, 1569, 1570, 1571, + 1572, 1573, 1574, 1575, 1576, 1050, 1577, 1047, + 1050, 1578, 1579, 1580, 1581, 1059, 1512, 1582, + 1583, 1584, 1585, 1079, 1586, 1587, 1059, 1588, + 1589, 1590, 1591, 1592, 1047, 1512, 1593, 1552, + 1594, 1595, 1596, 1059, 1597, 1598, 1050, 1047, + 1059, 801, 1512, 1562, 1047, 1050, 1059, 801, + 1059, 801, 1599, 1059, 1512, 801, 1050, 1600, + 1601, 1050, 1602, 1603, 1057, 1604, 1605, 1606, + 1607, 1608, 1558, 1609, 1610, 1611, 1612, 1613, + 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1577, + 1621, 1050, 1059, 801, 1512, 1622, 1623, 1059, + 1047, 1512, 801, 1047, 1512, 1050, 1624, 1107, + 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, + 1047, 1633, 1634, 1635, 1636, 1637, 1638, 1047, + 1059, 1512, 1640, 1641, 1642, 1643, 1644, 1645, + 1646, 1647, 1648, 1649, 1650, 1646, 1652, 1653, + 1654, 1655, 1639, 1651, 1639, 1512, 1639, 1512, } var _hcltok_trans_targs []int16 = []int16{ @@ -3238,7 +3237,7 @@ var _hcltok_trans_targs []int16 = []int16{ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 405, 406, 407, 408, 410, - 412, 414, 1459, 1471, 1459, 437, 438, 439, + 412, 414, 1459, 1472, 1459, 437, 438, 439, 440, 417, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, @@ -3281,11 +3280,11 @@ var _hcltok_trans_targs []int16 = []int16{ 888, 889, 890, 891, 892, 895, 896, 898, 899, 900, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 914, 915, 916, - 917, 920, 922, 923, 925, 927, 1509, 1510, - 929, 930, 931, 1509, 1509, 932, 1523, 1523, - 1524, 935, 1523, 936, 1525, 1526, 1529, 1530, - 1534, 1534, 1535, 941, 1534, 942, 1536, 1537, - 1540, 1541, 1545, 1546, 1545, 968, 969, 970, + 917, 920, 922, 923, 925, 927, 1510, 1511, + 929, 930, 931, 1510, 1510, 932, 1524, 1524, + 1525, 935, 1524, 936, 1526, 1527, 1530, 1531, + 1535, 1535, 1536, 941, 1535, 942, 1537, 1538, + 1541, 1542, 1546, 1547, 1546, 968, 969, 970, 971, 948, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, @@ -3316,7 +3315,7 @@ var _hcltok_trans_targs []int16 = []int16{ 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1204, 1205, 1206, 1207, 1208, 1209, 1211, - 1213, 1215, 1217, 1219, 1220, 1545, 1545, 1221, + 1213, 1215, 1217, 1219, 1220, 1546, 1546, 1221, 1358, 1359, 1290, 1360, 1361, 1362, 1363, 1364, 1365, 1319, 1366, 1255, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1275, 1375, 1376, 1377, @@ -3330,78 +3329,78 @@ var _hcltok_trans_targs []int16 = []int16{ 1439, 1440, 1441, 1442, 1443, 1445, 1446, 1447, 1448, 1451, 1453, 1454, 1456, 1458, 1460, 1459, 1461, 1462, 1459, 1463, 1459, 1464, 1465, 1466, - 1468, 1469, 1470, 1459, 1472, 1459, 1473, 1459, - 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, + 1468, 1469, 1470, 1471, 1459, 1473, 1459, 1474, + 1459, 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, - 1506, 1507, 1508, 1459, 1459, 1459, 1459, 1459, - 1459, 1, 1459, 7, 1459, 1459, 1459, 1459, - 1459, 415, 416, 420, 421, 422, 423, 424, - 425, 426, 427, 428, 429, 430, 431, 433, - 435, 436, 468, 509, 524, 531, 533, 535, - 555, 558, 574, 687, 1459, 1459, 1459, 691, - 692, 693, 694, 695, 696, 697, 698, 699, - 700, 701, 703, 704, 705, 706, 707, 708, - 709, 710, 711, 712, 713, 714, 715, 716, - 717, 718, 719, 720, 721, 722, 723, 725, - 726, 727, 728, 729, 730, 731, 732, 733, - 734, 735, 736, 737, 738, 739, 741, 742, - 743, 745, 746, 747, 748, 749, 750, 751, - 752, 753, 754, 755, 756, 757, 758, 760, - 761, 762, 763, 764, 765, 766, 767, 768, - 770, 771, 772, 773, 774, 775, 776, 777, - 778, 779, 780, 781, 782, 783, 784, 785, - 786, 787, 789, 790, 791, 792, 793, 794, - 795, 796, 797, 798, 799, 800, 801, 802, - 803, 804, 805, 806, 807, 808, 809, 811, - 812, 813, 814, 815, 816, 817, 818, 819, - 820, 821, 822, 823, 824, 825, 826, 855, - 880, 883, 884, 886, 893, 894, 897, 901, - 913, 918, 919, 921, 924, 926, 1511, 1509, - 1512, 1517, 1519, 1509, 1520, 1521, 1522, 1509, - 928, 1509, 1509, 1513, 1514, 1516, 1509, 1515, - 1509, 1509, 1509, 1518, 1509, 1509, 1509, 933, - 934, 938, 939, 1523, 1531, 1532, 1533, 1523, - 937, 1523, 1523, 934, 1527, 1528, 1523, 1523, - 1523, 1523, 1523, 940, 944, 945, 1534, 1542, - 1543, 1544, 1534, 943, 1534, 1534, 940, 1538, - 1539, 1534, 1534, 1534, 1534, 1534, 1545, 1547, - 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, - 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, - 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, - 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, - 1580, 1581, 1545, 946, 947, 951, 952, 953, - 954, 955, 956, 957, 958, 959, 960, 961, - 962, 964, 966, 967, 999, 1040, 1055, 1062, - 1064, 1066, 1086, 1089, 1105, 1218, 1545, 1222, - 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, - 1231, 1232, 1234, 1235, 1236, 1237, 1238, 1239, - 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, - 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1256, - 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, - 1265, 1266, 1267, 1268, 1269, 1270, 1272, 1273, - 1274, 1276, 1277, 1278, 1279, 1280, 1281, 1282, - 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1291, - 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, - 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, - 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, - 1317, 1318, 1320, 1321, 1322, 1323, 1324, 1325, - 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, - 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1342, - 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, - 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1386, - 1411, 1414, 1415, 1417, 1424, 1425, 1428, 1432, - 1444, 1449, 1450, 1452, 1455, 1457, + 1506, 1507, 1508, 1509, 1459, 1459, 1459, 1459, + 1459, 1459, 1, 1459, 1459, 7, 1459, 1459, + 1459, 1459, 1459, 415, 416, 420, 421, 422, + 423, 424, 425, 426, 427, 428, 429, 430, + 431, 433, 435, 436, 468, 509, 524, 531, + 533, 535, 555, 558, 574, 687, 1459, 1459, + 1459, 691, 692, 693, 694, 695, 696, 697, + 698, 699, 700, 701, 703, 704, 705, 706, + 707, 708, 709, 710, 711, 712, 713, 714, + 715, 716, 717, 718, 719, 720, 721, 722, + 723, 725, 726, 727, 728, 729, 730, 731, + 732, 733, 734, 735, 736, 737, 738, 739, + 741, 742, 743, 745, 746, 747, 748, 749, + 750, 751, 752, 753, 754, 755, 756, 757, + 758, 760, 761, 762, 763, 764, 765, 766, + 767, 768, 770, 771, 772, 773, 774, 775, + 776, 777, 778, 779, 780, 781, 782, 783, + 784, 785, 786, 787, 789, 790, 791, 792, + 793, 794, 795, 796, 797, 798, 799, 800, + 801, 802, 803, 804, 805, 806, 807, 808, + 809, 811, 812, 813, 814, 815, 816, 817, + 818, 819, 820, 821, 822, 823, 824, 825, + 826, 855, 880, 883, 884, 886, 893, 894, + 897, 901, 913, 918, 919, 921, 924, 926, + 1512, 1510, 1513, 1518, 1520, 1510, 1521, 1522, + 1523, 1510, 928, 1510, 1510, 1514, 1515, 1517, + 1510, 1516, 1510, 1510, 1510, 1519, 1510, 1510, + 1510, 933, 934, 938, 939, 1524, 1532, 1533, + 1534, 1524, 937, 1524, 1524, 934, 1528, 1529, + 1524, 1524, 1524, 1524, 1524, 940, 944, 945, + 1535, 1543, 1544, 1545, 1535, 943, 1535, 1535, + 940, 1539, 1540, 1535, 1535, 1535, 1535, 1535, + 1546, 1548, 1549, 1550, 1551, 1552, 1553, 1554, + 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, + 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, + 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, + 1579, 1580, 1581, 1582, 1546, 946, 947, 951, + 952, 953, 954, 955, 956, 957, 958, 959, + 960, 961, 962, 964, 966, 967, 999, 1040, + 1055, 1062, 1064, 1066, 1086, 1089, 1105, 1218, + 1546, 1222, 1223, 1224, 1225, 1226, 1227, 1228, + 1229, 1230, 1231, 1232, 1234, 1235, 1236, 1237, + 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, + 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, + 1254, 1256, 1257, 1258, 1259, 1260, 1261, 1262, + 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, + 1272, 1273, 1274, 1276, 1277, 1278, 1279, 1280, + 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, + 1289, 1291, 1292, 1293, 1294, 1295, 1296, 1297, + 1298, 1299, 1301, 1302, 1303, 1304, 1305, 1306, + 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, + 1315, 1316, 1317, 1318, 1320, 1321, 1322, 1323, + 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, + 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, + 1340, 1342, 1343, 1344, 1345, 1346, 1347, 1348, + 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, + 1357, 1386, 1411, 1414, 1415, 1417, 1424, 1425, + 1428, 1432, 1444, 1449, 1450, 1452, 1455, 1457, } var _hcltok_trans_actions []byte = []byte{ - 145, 107, 0, 0, 91, 141, 0, 7, + 147, 109, 0, 0, 91, 143, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 121, 0, 0, 0, + 0, 0, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3448,7 +3447,7 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 143, 193, 149, 0, 0, 0, + 0, 0, 145, 195, 151, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3479,7 +3478,7 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 147, 125, 0, + 0, 0, 0, 0, 0, 149, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3491,11 +3490,11 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 31, 169, + 0, 0, 0, 0, 0, 0, 31, 171, 0, 0, 0, 35, 33, 0, 55, 41, - 175, 0, 53, 0, 175, 175, 0, 0, - 75, 61, 181, 0, 73, 0, 181, 181, - 0, 0, 85, 187, 89, 0, 0, 0, + 177, 0, 53, 0, 177, 177, 0, 0, + 75, 61, 183, 0, 73, 0, 183, 183, + 0, 0, 85, 189, 89, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3539,18 +3538,19 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 93, - 0, 0, 119, 0, 111, 0, 7, 7, - 7, 0, 0, 113, 0, 115, 0, 123, - 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 121, 0, 113, 0, 7, 7, + 0, 7, 0, 0, 115, 0, 117, 0, + 125, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 7, 7, - 7, 196, 196, 196, 196, 196, 196, 7, - 7, 196, 7, 127, 139, 135, 97, 133, - 103, 0, 129, 0, 101, 95, 109, 99, - 131, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 7, + 7, 7, 198, 198, 198, 198, 198, 198, + 7, 7, 198, 7, 129, 141, 137, 97, + 135, 103, 0, 131, 107, 0, 101, 95, + 111, 99, 133, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 105, 117, 137, 0, + 0, 0, 0, 0, 0, 0, 105, 119, + 139, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3568,23 +3568,23 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 13, - 0, 0, 172, 17, 0, 7, 7, 23, - 0, 25, 27, 0, 0, 0, 151, 0, - 15, 19, 9, 0, 21, 11, 29, 0, - 0, 0, 0, 43, 0, 178, 178, 49, - 0, 157, 154, 1, 175, 175, 45, 37, - 47, 39, 51, 0, 0, 0, 63, 0, - 184, 184, 69, 0, 163, 160, 1, 181, - 181, 65, 57, 67, 59, 71, 77, 0, + 0, 13, 0, 0, 174, 17, 0, 7, + 7, 23, 0, 25, 27, 0, 0, 0, + 153, 0, 15, 19, 9, 0, 21, 11, + 29, 0, 0, 0, 0, 43, 0, 180, + 180, 49, 0, 159, 156, 1, 177, 177, + 45, 37, 47, 39, 51, 0, 0, 0, + 63, 0, 186, 186, 69, 0, 165, 162, + 1, 183, 183, 65, 57, 67, 59, 71, + 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 7, + 7, 7, 192, 192, 192, 192, 192, 192, + 7, 7, 192, 7, 81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 7, 7, 7, - 190, 190, 190, 190, 190, 190, 7, 7, - 190, 7, 81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 83, 0, + 83, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3602,7 +3602,6 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, } var _hcltok_to_state_actions []byte = []byte{ @@ -3794,16 +3793,16 @@ var _hcltok_to_state_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 3, 0, 0, + 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 166, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 166, 0, + 0, 0, 0, 0, 168, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 168, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3, 0, 0, 0, 0, 0, 0, + 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, } var _hcltok_from_state_actions []byte = []byte{ @@ -3995,16 +3994,16 @@ var _hcltok_from_state_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 5, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 5, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 5, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 5, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, } var _hcltok_eof_trans []int16 = []int16{ @@ -4190,35 +4189,35 @@ var _hcltok_eof_trans []int16 = []int16{ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, - 1046, 1046, 1046, 0, 1196, 1197, 1198, 1200, - 1198, 1198, 1198, 1203, 1198, 1198, 1198, 1209, - 1198, 1198, 1239, 1239, 1239, 1239, 1239, 1239, - 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, - 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, - 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, - 1239, 1239, 1239, 1239, 1239, 0, 1392, 1394, - 1395, 1399, 1399, 1392, 1402, 1395, 1405, 1395, - 1407, 1407, 1407, 0, 1416, 1418, 1418, 1416, - 1416, 1423, 1425, 1427, 1427, 1427, 0, 1435, - 1437, 1437, 1435, 1435, 1442, 1444, 1446, 1446, - 1446, 0, 1483, 1511, 1511, 1511, 1511, 1511, - 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, - 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, - 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, - 1511, 1511, 1511, 1511, 1511, 1511, + 1046, 1046, 1046, 0, 1197, 1198, 1199, 1201, + 1199, 1199, 1199, 1204, 1199, 1199, 1199, 1199, + 1211, 1199, 1199, 1241, 1241, 1241, 1241, 1241, + 1241, 1241, 1241, 1241, 1241, 1241, 1241, 1241, + 1241, 1241, 1241, 1241, 1241, 1241, 1241, 1241, + 1241, 1241, 1241, 1241, 1241, 1241, 1241, 1241, + 1241, 1241, 1241, 1241, 1241, 1241, 0, 1394, + 1396, 1397, 1401, 1401, 1394, 1404, 1397, 1407, + 1397, 1409, 1409, 1409, 0, 1418, 1420, 1420, + 1418, 1418, 1425, 1427, 1429, 1429, 1429, 0, + 1437, 1439, 1439, 1437, 1437, 1444, 1446, 1448, + 1448, 1448, 0, 1485, 1513, 1513, 1513, 1513, + 1513, 1513, 1513, 1513, 1513, 1513, 1513, 1513, + 1513, 1513, 1513, 1513, 1513, 1513, 1513, 1513, + 1513, 1513, 1513, 1513, 1513, 1513, 1513, 1513, + 1513, 1513, 1513, 1513, 1513, 1513, 1513, } const hcltok_start int = 1459 const hcltok_first_final int = 1459 const hcltok_error int = 0 -const hcltok_en_stringTemplate int = 1509 -const hcltok_en_heredocTemplate int = 1523 -const hcltok_en_bareTemplate int = 1534 -const hcltok_en_identOnly int = 1545 +const hcltok_en_stringTemplate int = 1510 +const hcltok_en_heredocTemplate int = 1524 +const hcltok_en_bareTemplate int = 1535 +const hcltok_en_identOnly int = 1546 const hcltok_en_main int = 1459 -//line scan_tokens.rl:16 +//line scan_tokens.rl:18 func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { stripData := stripUTF8BOM(data) @@ -4232,7 +4231,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To StartByte: start.Byte, } -//line scan_tokens.rl:305 +//line scan_tokens.rl:317 // Ragel state p := 0 // "Pointer" into data @@ -4260,7 +4259,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To var retBraces []int // stack of brace levels that cause us to use fret var heredocs []heredocInProgress // stack of heredocs we're currently processing -//line scan_tokens.rl:340 +//line scan_tokens.rl:352 // Make Go compiler happy _ = ts @@ -4280,7 +4279,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To f.emitToken(TokenType(b[0]), ts, te) } -//line scan_tokens.go:4289 +//line scan_tokens.go:4292 { top = 0 ts = 0 @@ -4288,7 +4287,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To act = 0 } -//line scan_tokens.go:4297 +//line scan_tokens.go:4300 { var _klen int var _trans int @@ -4312,7 +4311,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To //line NONE:1 ts = p -//line scan_tokens.go:4320 +//line scan_tokens.go:4323 } } @@ -4384,7 +4383,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To _acts++ switch _hcltok_actions[_acts-1] { case 0: -//line scan_tokens.rl:224 +//line scan_tokens.rl:235 p-- case 4: @@ -4392,13 +4391,13 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To te = p + 1 case 5: -//line scan_tokens.rl:248 +//line scan_tokens.rl:259 act = 4 case 6: -//line scan_tokens.rl:250 +//line scan_tokens.rl:261 act = 6 case 7: -//line scan_tokens.rl:160 +//line scan_tokens.rl:171 te = p + 1 { token(TokenTemplateInterp) @@ -4416,7 +4415,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 8: -//line scan_tokens.rl:170 +//line scan_tokens.rl:181 te = p + 1 { token(TokenTemplateControl) @@ -4434,7 +4433,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 9: -//line scan_tokens.rl:84 +//line scan_tokens.rl:95 te = p + 1 { token(TokenCQuote) @@ -4447,19 +4446,19 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } case 10: -//line scan_tokens.rl:248 +//line scan_tokens.rl:259 te = p + 1 { token(TokenQuotedLit) } case 11: -//line scan_tokens.rl:251 +//line scan_tokens.rl:262 te = p + 1 { token(TokenBadUTF8) } case 12: -//line scan_tokens.rl:160 +//line scan_tokens.rl:171 te = p p-- { @@ -4478,7 +4477,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 13: -//line scan_tokens.rl:170 +//line scan_tokens.rl:181 te = p p-- { @@ -4497,41 +4496,41 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 14: -//line scan_tokens.rl:248 +//line scan_tokens.rl:259 te = p p-- { token(TokenQuotedLit) } case 15: -//line scan_tokens.rl:249 +//line scan_tokens.rl:260 te = p p-- { token(TokenQuotedNewline) } case 16: -//line scan_tokens.rl:250 +//line scan_tokens.rl:261 te = p p-- { token(TokenInvalid) } case 17: -//line scan_tokens.rl:251 +//line scan_tokens.rl:262 te = p p-- { token(TokenBadUTF8) } case 18: -//line scan_tokens.rl:248 +//line scan_tokens.rl:259 p = (te) - 1 { token(TokenQuotedLit) } case 19: -//line scan_tokens.rl:251 +//line scan_tokens.rl:262 p = (te) - 1 { token(TokenBadUTF8) @@ -4552,13 +4551,13 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } case 21: -//line scan_tokens.rl:148 +//line scan_tokens.rl:159 act = 11 case 22: -//line scan_tokens.rl:259 +//line scan_tokens.rl:270 act = 12 case 23: -//line scan_tokens.rl:160 +//line scan_tokens.rl:171 te = p + 1 { token(TokenTemplateInterp) @@ -4576,7 +4575,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 24: -//line scan_tokens.rl:170 +//line scan_tokens.rl:181 te = p + 1 { token(TokenTemplateControl) @@ -4594,7 +4593,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 25: -//line scan_tokens.rl:111 +//line scan_tokens.rl:122 te = p + 1 { // This action is called specificially when a heredoc literal @@ -4639,13 +4638,13 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To token(TokenStringLit) } case 26: -//line scan_tokens.rl:259 +//line scan_tokens.rl:270 te = p + 1 { token(TokenBadUTF8) } case 27: -//line scan_tokens.rl:160 +//line scan_tokens.rl:171 te = p p-- { @@ -4664,7 +4663,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 28: -//line scan_tokens.rl:170 +//line scan_tokens.rl:181 te = p p-- { @@ -4683,7 +4682,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 29: -//line scan_tokens.rl:148 +//line scan_tokens.rl:159 te = p p-- { @@ -4694,14 +4693,14 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To token(TokenStringLit) } case 30: -//line scan_tokens.rl:259 +//line scan_tokens.rl:270 te = p p-- { token(TokenBadUTF8) } case 31: -//line scan_tokens.rl:148 +//line scan_tokens.rl:159 p = (te) - 1 { // This action is called when a heredoc literal _doesn't_ end @@ -4736,13 +4735,13 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } case 33: -//line scan_tokens.rl:156 +//line scan_tokens.rl:167 act = 15 case 34: -//line scan_tokens.rl:266 +//line scan_tokens.rl:277 act = 16 case 35: -//line scan_tokens.rl:160 +//line scan_tokens.rl:171 te = p + 1 { token(TokenTemplateInterp) @@ -4760,7 +4759,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 36: -//line scan_tokens.rl:170 +//line scan_tokens.rl:181 te = p + 1 { token(TokenTemplateControl) @@ -4778,19 +4777,19 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 37: -//line scan_tokens.rl:156 +//line scan_tokens.rl:167 te = p + 1 { token(TokenStringLit) } case 38: -//line scan_tokens.rl:266 +//line scan_tokens.rl:277 te = p + 1 { token(TokenBadUTF8) } case 39: -//line scan_tokens.rl:160 +//line scan_tokens.rl:171 te = p p-- { @@ -4809,7 +4808,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 40: -//line scan_tokens.rl:170 +//line scan_tokens.rl:181 te = p p-- { @@ -4828,21 +4827,21 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } case 41: -//line scan_tokens.rl:156 +//line scan_tokens.rl:167 te = p p-- { token(TokenStringLit) } case 42: -//line scan_tokens.rl:266 +//line scan_tokens.rl:277 te = p p-- { token(TokenBadUTF8) } case 43: -//line scan_tokens.rl:156 +//line scan_tokens.rl:167 p = (te) - 1 { token(TokenStringLit) @@ -4869,45 +4868,45 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } case 45: -//line scan_tokens.rl:270 +//line scan_tokens.rl:281 act = 17 case 46: -//line scan_tokens.rl:271 +//line scan_tokens.rl:282 act = 18 case 47: -//line scan_tokens.rl:271 +//line scan_tokens.rl:282 te = p + 1 { token(TokenBadUTF8) } case 48: -//line scan_tokens.rl:272 +//line scan_tokens.rl:283 te = p + 1 { token(TokenInvalid) } case 49: -//line scan_tokens.rl:270 +//line scan_tokens.rl:281 te = p p-- { token(TokenIdent) } case 50: -//line scan_tokens.rl:271 +//line scan_tokens.rl:282 te = p p-- { token(TokenBadUTF8) } case 51: -//line scan_tokens.rl:270 +//line scan_tokens.rl:281 p = (te) - 1 { token(TokenIdent) } case 52: -//line scan_tokens.rl:271 +//line scan_tokens.rl:282 p = (te) - 1 { token(TokenBadUTF8) @@ -4928,86 +4927,92 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } case 54: -//line scan_tokens.rl:278 +//line scan_tokens.rl:289 act = 22 case 55: -//line scan_tokens.rl:301 - act = 39 +//line scan_tokens.rl:313 + act = 40 case 56: -//line scan_tokens.rl:280 +//line scan_tokens.rl:291 te = p + 1 { token(TokenComment) } case 57: -//line scan_tokens.rl:281 +//line scan_tokens.rl:292 te = p + 1 { token(TokenNewline) } case 58: -//line scan_tokens.rl:283 +//line scan_tokens.rl:294 te = p + 1 { token(TokenEqualOp) } case 59: -//line scan_tokens.rl:284 +//line scan_tokens.rl:295 te = p + 1 { token(TokenNotEqual) } case 60: -//line scan_tokens.rl:285 +//line scan_tokens.rl:296 te = p + 1 { token(TokenGreaterThanEq) } case 61: -//line scan_tokens.rl:286 +//line scan_tokens.rl:297 te = p + 1 { token(TokenLessThanEq) } case 62: -//line scan_tokens.rl:287 +//line scan_tokens.rl:298 te = p + 1 { token(TokenAnd) } case 63: -//line scan_tokens.rl:288 +//line scan_tokens.rl:299 te = p + 1 { token(TokenOr) } case 64: -//line scan_tokens.rl:289 +//line scan_tokens.rl:300 te = p + 1 { - token(TokenEllipsis) + token(TokenDoubleColon) } case 65: -//line scan_tokens.rl:290 +//line scan_tokens.rl:301 te = p + 1 { - token(TokenFatArrow) + token(TokenEllipsis) } case 66: -//line scan_tokens.rl:291 +//line scan_tokens.rl:302 te = p + 1 { - selfToken() + token(TokenFatArrow) } case 67: -//line scan_tokens.rl:180 +//line scan_tokens.rl:303 + te = p + 1 + { + selfToken() + } + case 68: +//line scan_tokens.rl:191 te = p + 1 { token(TokenOBrace) braces++ } - case 68: -//line scan_tokens.rl:185 + case 69: +//line scan_tokens.rl:196 te = p + 1 { if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { @@ -5026,8 +5031,8 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To braces-- } } - case 69: -//line scan_tokens.rl:197 + case 70: +//line scan_tokens.rl:208 te = p + 1 { // Only consume from the retBraces stack and return if we are at @@ -5055,8 +5060,8 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To braces-- } } - case 70: -//line scan_tokens.rl:79 + case 71: +//line scan_tokens.rl:90 te = p + 1 { token(TokenOQuote) @@ -5064,12 +5069,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1509 + cs = 1510 goto _again } } - case 71: -//line scan_tokens.rl:89 + case 72: +//line scan_tokens.rl:100 te = p + 1 { token(TokenOHeredoc) @@ -5094,94 +5099,94 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1523 + cs = 1524 goto _again } } - case 72: -//line scan_tokens.rl:301 + case 73: +//line scan_tokens.rl:313 te = p + 1 { token(TokenBadUTF8) } - case 73: -//line scan_tokens.rl:302 + case 74: +//line scan_tokens.rl:314 te = p + 1 { token(TokenInvalid) } - case 74: -//line scan_tokens.rl:276 + case 75: +//line scan_tokens.rl:287 te = p p-- - case 75: -//line scan_tokens.rl:277 + case 76: +//line scan_tokens.rl:288 te = p p-- { token(TokenNumberLit) } - case 76: -//line scan_tokens.rl:278 + case 77: +//line scan_tokens.rl:289 te = p p-- { token(TokenIdent) } - case 77: -//line scan_tokens.rl:280 + case 78: +//line scan_tokens.rl:291 te = p p-- { token(TokenComment) } - case 78: -//line scan_tokens.rl:291 + case 79: +//line scan_tokens.rl:303 te = p p-- { selfToken() } - case 79: -//line scan_tokens.rl:301 + case 80: +//line scan_tokens.rl:313 te = p p-- { token(TokenBadUTF8) } - case 80: -//line scan_tokens.rl:302 + case 81: +//line scan_tokens.rl:314 te = p p-- { token(TokenInvalid) } - case 81: -//line scan_tokens.rl:277 + case 82: +//line scan_tokens.rl:288 p = (te) - 1 { token(TokenNumberLit) } - case 82: -//line scan_tokens.rl:278 + case 83: +//line scan_tokens.rl:289 p = (te) - 1 { token(TokenIdent) } - case 83: -//line scan_tokens.rl:291 + case 84: +//line scan_tokens.rl:303 p = (te) - 1 { selfToken() } - case 84: -//line scan_tokens.rl:301 + case 85: +//line scan_tokens.rl:313 p = (te) - 1 { token(TokenBadUTF8) } - case 85: + case 86: //line NONE:1 switch act { case 22: @@ -5189,14 +5194,14 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To p = (te) - 1 token(TokenIdent) } - case 39: + case 40: { p = (te) - 1 token(TokenBadUTF8) } } -//line scan_tokens.go:5055 +//line scan_tokens.go:5062 } } @@ -5215,7 +5220,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To //line NONE:1 act = 0 -//line scan_tokens.go:5073 +//line scan_tokens.go:5080 } } @@ -5241,7 +5246,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } -//line scan_tokens.rl:363 +//line scan_tokens.rl:375 // If we fall out here without being in a final state then we've // encountered something that the scanner can't match, which we'll diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl index 942ad92ba1e..66bb47146c9 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl @@ -1,3 +1,5 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 package hclsyntax @@ -53,6 +55,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To LogicalAnd = "&&"; LogicalOr = "||"; + DoubleColon = "::"; Ellipsis = "..."; FatArrow = "=>"; @@ -294,6 +297,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To LessThanEqual => { token(TokenLessThanEq); }; LogicalAnd => { token(TokenAnd); }; LogicalOr => { token(TokenOr); }; + DoubleColon => { token(TokenDoubleColon); }; Ellipsis => { token(TokenEllipsis); }; FatArrow => { token(TokenFatArrow); }; SelfToken => { selfToken() }; diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md index 6d31e352550..88925410ab7 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md @@ -668,7 +668,7 @@ a == b equal a != b not equal ``` -Two values are equal if the are of identical types and their values are +Two values are equal if they are of identical types and their values are equal as defined in the HCL syntax-agnostic information model. The equality operators are commutative and opposite, such that `(a == b) == !(a != b)` and `(a == b) == (b == a)` for all values `a` and `b`. diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go index afde5f33a0b..47648b8f006 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go @@ -63,8 +63,9 @@ const ( TokenDot TokenType = '.' TokenComma TokenType = ',' - TokenEllipsis TokenType = '…' - TokenFatArrow TokenType = '⇒' + TokenDoubleColon TokenType = '⸬' + TokenEllipsis TokenType = '…' + TokenFatArrow TokenType = '⇒' TokenQuestion TokenType = '?' TokenColon TokenType = ':' diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token_type_string.go b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token_type_string.go index c23c4f0b7a4..1453389cff5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token_type_string.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token_type_string.go @@ -1,11 +1,11 @@ -// Code generated by "stringer -type TokenType -output token_type_string.go"; DO NOT EDIT. +// Code generated by "stringer -type TokenType -output token_type_string.go token_type.go"; DO NOT EDIT. package hclsyntax import "strconv" func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. + // An "invalid array index" compiler error signifies that the constant values (55) have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[TokenOBrace-123] @@ -35,6 +35,7 @@ func _() { _ = x[TokenBang-33] _ = x[TokenDot-46] _ = x[TokenComma-44] + _ = x[TokenDoubleColon-11820] _ = x[TokenEllipsis-8230] _ = x[TokenFatArrow-8658] _ = x[TokenQuestion-63] @@ -64,7 +65,7 @@ func _() { _ = x[TokenNil-0] } -const _TokenType_name = "TokenNilTokenNewlineTokenBangTokenPercentTokenBitwiseAndTokenApostropheTokenOParenTokenCParenTokenStarTokenPlusTokenCommaTokenMinusTokenDotTokenSlashTokenColonTokenSemicolonTokenLessThanTokenEqualTokenGreaterThanTokenQuestionTokenCommentTokenOHeredocTokenIdentTokenNumberLitTokenQuotedLitTokenStringLitTokenOBrackTokenCBrackTokenBitwiseXorTokenBacktickTokenCHeredocTokenOBraceTokenBitwiseOrTokenCBraceTokenBitwiseNotTokenOQuoteTokenCQuoteTokenTemplateControlTokenEllipsisTokenFatArrowTokenTemplateSeqEndTokenAndTokenOrTokenTemplateInterpTokenEqualOpTokenNotEqualTokenLessThanEqTokenGreaterThanEqTokenEOFTokenTabsTokenQuotedNewlineTokenStarStarTokenInvalidTokenBadUTF8" +const _TokenType_name = "TokenNilTokenNewlineTokenBangTokenPercentTokenBitwiseAndTokenApostropheTokenOParenTokenCParenTokenStarTokenPlusTokenCommaTokenMinusTokenDotTokenSlashTokenColonTokenSemicolonTokenLessThanTokenEqualTokenGreaterThanTokenQuestionTokenCommentTokenOHeredocTokenIdentTokenNumberLitTokenQuotedLitTokenStringLitTokenOBrackTokenCBrackTokenBitwiseXorTokenBacktickTokenCHeredocTokenOBraceTokenBitwiseOrTokenCBraceTokenBitwiseNotTokenOQuoteTokenCQuoteTokenTemplateControlTokenEllipsisTokenFatArrowTokenTemplateSeqEndTokenAndTokenOrTokenTemplateInterpTokenEqualOpTokenNotEqualTokenLessThanEqTokenGreaterThanEqTokenEOFTokenTabsTokenQuotedNewlineTokenStarStarTokenDoubleColonTokenInvalidTokenBadUTF8" var _TokenType_map = map[TokenType]string{ 0: _TokenType_name[0:8], @@ -119,8 +120,9 @@ var _TokenType_map = map[TokenType]string{ 9225: _TokenType_name[603:612], 9252: _TokenType_name[612:630], 10138: _TokenType_name[630:643], - 65533: _TokenType_name[643:655], - 128169: _TokenType_name[655:667], + 11820: _TokenType_name[643:659], + 65533: _TokenType_name[659:671], + 128169: _TokenType_name[671:683], } func (i TokenType) String() string { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/tools.go b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/tools.go new file mode 100644 index 00000000000..e8c42ad1f65 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/hcl/v2/tools.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build tools +// +build tools + +package hcl + +import ( + _ "golang.org/x/tools/cmd/stringer" +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go index 90b66889126..235d5612655 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go @@ -3,7 +3,7 @@ package version -const version = "0.20.0" +const version = "0.21.0" // ModuleVersion returns the current version of the github.com/hashicorp/terraform-exec Go module. // This is a function to allow for future possible enhancement using debug.BuildInfo. diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go index 2c5a6d07a9d..7a6ea92323e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go @@ -12,10 +12,11 @@ import ( ) type applyConfig struct { - backup string - destroy bool - dirOrPlan string - lock bool + allowDeferral bool + backup string + destroy bool + dirOrPlan string + lock bool // LockTimeout must be a string with time unit, e.g. '10s' lockTimeout string @@ -105,6 +106,10 @@ func (opt *DestroyFlagOption) configureApply(conf *applyConfig) { conf.destroy = opt.destroy } +func (opt *AllowDeferralOption) configureApply(conf *applyConfig) { + conf.allowDeferral = opt.allowDeferral +} + // Apply represents the terraform apply subcommand. func (tf *Terraform) Apply(ctx context.Context, opts ...ApplyOption) error { cmd, err := tf.applyCmd(ctx, opts...) @@ -232,6 +237,22 @@ func (tf *Terraform) buildApplyArgs(ctx context.Context, c applyConfig) ([]strin } } + if c.allowDeferral { + // Ensure the version is later than 1.9.0 + err := tf.compatible(ctx, tf1_9_0, nil) + if err != nil { + return nil, fmt.Errorf("-allow-deferral is an experimental option introduced in Terraform 1.9.0: %w", err) + } + + // Ensure the version has experiments enabled (alpha or dev builds) + err = tf.experimentsEnabled(ctx) + if err != nil { + return nil, fmt.Errorf("-allow-deferral is only available in experimental Terraform builds: %w", err) + } + + args = append(args, "-allow-deferral") + } + return args, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go index d783027a4ff..339bf39ec99 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go @@ -7,6 +7,18 @@ import ( "encoding/json" ) +// AllowDeferralOption represents the -allow-deferral flag. This flag is only enabled in +// experimental builds of Terraform. (alpha or built via source with experiments enabled) +type AllowDeferralOption struct { + allowDeferral bool +} + +// AllowDeferral represents the -allow-deferral flag. This flag is only enabled in +// experimental builds of Terraform. (alpha or built via source with experiments enabled) +func AllowDeferral(allowDeferral bool) *AllowDeferralOption { + return &AllowDeferralOption{allowDeferral} +} + // AllowMissingConfigOption represents the -allow-missing-config flag. type AllowMissingConfigOption struct { allowMissingConfig bool diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go index 946ce8d0ad3..c2ec1f9ec37 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go @@ -12,20 +12,21 @@ import ( ) type planConfig struct { - destroy bool - dir string - lock bool - lockTimeout string - out string - parallelism int - reattachInfo ReattachInfo - refresh bool - refreshOnly bool - replaceAddrs []string - state string - targets []string - vars []string - varFiles []string + allowDeferral bool + destroy bool + dir string + lock bool + lockTimeout string + out string + parallelism int + reattachInfo ReattachInfo + refresh bool + refreshOnly bool + replaceAddrs []string + state string + targets []string + vars []string + varFiles []string } var defaultPlanOptions = planConfig{ @@ -97,6 +98,10 @@ func (opt *DestroyFlagOption) configurePlan(conf *planConfig) { conf.destroy = opt.destroy } +func (opt *AllowDeferralOption) configurePlan(conf *planConfig) { + conf.allowDeferral = opt.allowDeferral +} + // Plan executes `terraform plan` with the specified options and waits for it // to complete. // @@ -243,6 +248,21 @@ func (tf *Terraform) buildPlanArgs(ctx context.Context, c planConfig) ([]string, args = append(args, "-var", v) } } + if c.allowDeferral { + // Ensure the version is later than 1.9.0 + err := tf.compatible(ctx, tf1_9_0, nil) + if err != nil { + return nil, fmt.Errorf("-allow-deferral is an experimental option introduced in Terraform 1.9.0: %w", err) + } + + // Ensure the version has experiments enabled (alpha or dev builds) + err = tf.experimentsEnabled(ctx) + if err != nil { + return nil, fmt.Errorf("-allow-deferral is only available in experimental Terraform builds: %w", err) + } + + args = append(args, "-allow-deferral") + } return args, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go index 4ba4f6eafcd..87addd1ec59 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go @@ -33,6 +33,7 @@ var ( tf1_1_0 = version.Must(version.NewVersion("1.1.0")) tf1_4_0 = version.Must(version.NewVersion("1.4.0")) tf1_6_0 = version.Must(version.NewVersion("1.6.0")) + tf1_9_0 = version.Must(version.NewVersion("1.9.0")) ) // Version returns structured output from the terraform version command including both the Terraform CLI version @@ -180,6 +181,22 @@ func (tf *Terraform) compatible(ctx context.Context, minInclusive *version.Versi return nil } +// experimentsEnabled asserts the cached terraform version has experiments enabled in the executable, +// and returns a well known error if not. Experiments are enabled in alpha and (potentially) dev builds of Terraform. +func (tf *Terraform) experimentsEnabled(ctx context.Context) error { + tfv, _, err := tf.Version(ctx, false) + if err != nil { + return err + } + + preRelease := tfv.Prerelease() + if preRelease == "dev" || strings.Contains(preRelease, "alpha") { + return nil + } + + return fmt.Errorf("experiments are not enabled in version %s, as it's not an alpha or dev build", errorVersionString(tfv)) +} + func stripPrereleaseAndMeta(v *version.Version) *version.Version { if v == nil { return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-json/plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-json/plan.go index 38ea778e1cb..d8618985678 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-json/plan.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-json/plan.go @@ -60,6 +60,17 @@ type Plan struct { // plan. ResourceChanges []*ResourceChange `json:"resource_changes,omitempty"` + // DeferredChanges contains the change operations for resources that are deferred + // for this plan. + DeferredChanges []*DeferredResourceChange `json:"deferred_changes,omitempty"` + + // Complete indicates that all resources have successfully planned changes. + // This will be false if there are DeferredChanges or if the -target flag is used. + // + // Complete was introduced in Terraform 1.8 and will be nil for all previous + // Terraform versions. + Complete *bool `json:"complete,omitempty"` + // The change operations for outputs within this plan. OutputChanges map[string]*Change `json:"output_changes,omitempty"` @@ -269,3 +280,13 @@ type PlanVariable struct { // The value for this variable at plan time. Value interface{} `json:"value,omitempty"` } + +// DeferredResourceChange is a description of a resource change that has been +// deferred for some reason. +type DeferredResourceChange struct { + // Reason is the reason why this resource change was deferred. + Reason string `json:"reason,omitempty"` + + // Change contains any information we have about the deferred change. + ResourceChange *ResourceChange `json:"resource_change,omitempty"` +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go index 7ad91271482..fb821442947 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go @@ -63,9 +63,15 @@ const ( // The protocol version being used, as a string, such as "6" KeyProtocolVersion = "tf_proto_version" + // The Deferred reason for an RPC response + KeyDeferredReason = "tf_deferred_reason" + // Whether the GetProviderSchemaOptional server capability is enabled KeyServerCapabilityGetProviderSchemaOptional = "tf_server_capability_get_provider_schema_optional" // Whether the PlanDestroy server capability is enabled KeyServerCapabilityPlanDestroy = "tf_server_capability_plan_destroy" + + // Whether the DeferralAllowed client capability is enabled + KeyClientCapabilityDeferralAllowed = "tf_client_capability_deferral_allowed" ) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/client_capabilities.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/client_capabilities.go new file mode 100644 index 00000000000..ba01cd8b8f1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/client_capabilities.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +// ConfigureProviderClientCapabilities allows Terraform to publish information +// regarding optionally supported protocol features for the ConfigureProvider RPC, +// such as forward-compatible Terraform behavior changes. +type ConfigureProviderClientCapabilities struct { + // DeferralAllowed signals that the request from Terraform is able to + // handle deferred responses from the provider. + DeferralAllowed bool +} + +// ReadDataSourceClientCapabilities allows Terraform to publish information +// regarding optionally supported protocol features for the ReadDataSource RPC, +// such as forward-compatible Terraform behavior changes. +type ReadDataSourceClientCapabilities struct { + // DeferralAllowed signals that the request from Terraform is able to + // handle deferred responses from the provider. + DeferralAllowed bool +} + +// ReadResourceClientCapabilities allows Terraform to publish information +// regarding optionally supported protocol features for the ReadResource RPC, +// such as forward-compatible Terraform behavior changes. +type ReadResourceClientCapabilities struct { + // DeferralAllowed signals that the request from Terraform is able to + // handle deferred responses from the provider. + DeferralAllowed bool +} + +// PlanResourceChangeClientCapabilities allows Terraform to publish information +// regarding optionally supported protocol features for the PlanResourceChange RPC, +// such as forward-compatible Terraform behavior changes. +type PlanResourceChangeClientCapabilities struct { + // DeferralAllowed signals that the request from Terraform is able to + // handle deferred responses from the provider. + DeferralAllowed bool +} + +// ImportResourceStateClientCapabilities allows Terraform to publish information +// regarding optionally supported protocol features for the ImportResourceState RPC, +// such as forward-compatible Terraform behavior changes. +type ImportResourceStateClientCapabilities struct { + // DeferralAllowed signals that the request from Terraform is able to + // handle deferred responses from the provider. + DeferralAllowed bool +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go index f76df34175a..df1a2814d87 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go @@ -87,6 +87,10 @@ type ReadDataSourceRequest struct { // // This configuration will have known values for all fields. ProviderMeta *DynamicValue + + // ClientCapabilities defines optionally supported protocol features for the + // ReadDataSource RPC, such as forward-compatible Terraform behavior changes. + ClientCapabilities *ReadDataSourceClientCapabilities } // ReadDataSourceResponse is the response from the provider about the current @@ -105,4 +109,8 @@ type ReadDataSourceResponse struct { // indicates a successful validation with no warnings or errors // generated. Diagnostics []*Diagnostic + + // Deferred is used to indicate to Terraform that the ReadDataSource operation + // needs to be deferred for a reason. + Deferred *Deferred } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/deferred.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/deferred.go new file mode 100644 index 00000000000..967cb861a8b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/deferred.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +const ( + // DeferredReasonUnknown is used to indicate an invalid `DeferredReason`. + // Provider developers should not use it. + DeferredReasonUnknown DeferredReason = 0 + + // DeferredReasonResourceConfigUnknown is used to indicate that the resource configuration + // is partially unknown and the real values need to be known before the change can be planned. + DeferredReasonResourceConfigUnknown DeferredReason = 1 + + // DeferredReasonProviderConfigUnknown is used to indicate that the provider configuration + // is partially unknown and the real values need to be known before the change can be planned. + DeferredReasonProviderConfigUnknown DeferredReason = 2 + + // DeferredReasonAbsentPrereq is used to indicate that a hard dependency has not been satisfied. + DeferredReasonAbsentPrereq DeferredReason = 3 +) + +// Deferred is used to indicate to Terraform that a change needs to be deferred for a reason. +type Deferred struct { + // Reason is the reason for deferring the change. + Reason DeferredReason +} + +// DeferredReason represents different reasons for deferring a change. +type DeferredReason int32 + +func (d DeferredReason) String() string { + switch d { + case 0: + return "UNKNOWN" + case 1: + return "RESOURCE_CONFIG_UNKNOWN" + case 2: + return "PROVIDER_CONFIG_UNKNOWN" + case 3: + return "ABSENT_PREREQ" + } + return "UNKNOWN" +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/client_capabilities.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/client_capabilities.go new file mode 100644 index 00000000000..94ddc3d4352 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/client_capabilities.go @@ -0,0 +1,69 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func ConfigureProviderClientCapabilities(in *tfplugin5.ClientCapabilities) *tfprotov5.ConfigureProviderClientCapabilities { + if in == nil { + return nil + } + + resp := &tfprotov5.ConfigureProviderClientCapabilities{ + DeferralAllowed: in.DeferralAllowed, + } + + return resp +} + +func ReadDataSourceClientCapabilities(in *tfplugin5.ClientCapabilities) *tfprotov5.ReadDataSourceClientCapabilities { + if in == nil { + return nil + } + + resp := &tfprotov5.ReadDataSourceClientCapabilities{ + DeferralAllowed: in.DeferralAllowed, + } + + return resp +} + +func ReadResourceClientCapabilities(in *tfplugin5.ClientCapabilities) *tfprotov5.ReadResourceClientCapabilities { + if in == nil { + return nil + } + + resp := &tfprotov5.ReadResourceClientCapabilities{ + DeferralAllowed: in.DeferralAllowed, + } + + return resp +} + +func PlanResourceChangeClientCapabilities(in *tfplugin5.ClientCapabilities) *tfprotov5.PlanResourceChangeClientCapabilities { + if in == nil { + return nil + } + + resp := &tfprotov5.PlanResourceChangeClientCapabilities{ + DeferralAllowed: in.DeferralAllowed, + } + + return resp +} + +func ImportResourceStateClientCapabilities(in *tfplugin5.ClientCapabilities) *tfprotov5.ImportResourceStateClientCapabilities { + if in == nil { + return nil + } + + resp := &tfprotov5.ImportResourceStateClientCapabilities{ + DeferralAllowed: in.DeferralAllowed, + } + + return resp +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go index 3b831e7dcfb..385f484534f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go @@ -27,9 +27,10 @@ func ReadDataSourceRequest(in *tfplugin5.ReadDataSource_Request) *tfprotov5.Read } resp := &tfprotov5.ReadDataSourceRequest{ - Config: DynamicValue(in.Config), - ProviderMeta: DynamicValue(in.ProviderMeta), - TypeName: in.TypeName, + Config: DynamicValue(in.Config), + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + ClientCapabilities: ReadDataSourceClientCapabilities(in.ClientCapabilities), } return resp diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go index 6f8cd7d9350..ac487800e5b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go @@ -46,8 +46,9 @@ func ConfigureProviderRequest(in *tfplugin5.Configure_Request) *tfprotov5.Config } resp := &tfprotov5.ConfigureProviderRequest{ - Config: DynamicValue(in.Config), - TerraformVersion: in.TerraformVersion, + Config: DynamicValue(in.Config), + TerraformVersion: in.TerraformVersion, + ClientCapabilities: ConfigureProviderClientCapabilities(in.ClientCapabilities), } return resp diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go index c7e8d72ef0f..f531b487073 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go @@ -41,10 +41,11 @@ func ReadResourceRequest(in *tfplugin5.ReadResource_Request) *tfprotov5.ReadReso } resp := &tfprotov5.ReadResourceRequest{ - CurrentState: DynamicValue(in.CurrentState), - Private: in.Private, - ProviderMeta: DynamicValue(in.ProviderMeta), - TypeName: in.TypeName, + CurrentState: DynamicValue(in.CurrentState), + Private: in.Private, + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + ClientCapabilities: ReadResourceClientCapabilities(in.ClientCapabilities), } return resp @@ -56,12 +57,13 @@ func PlanResourceChangeRequest(in *tfplugin5.PlanResourceChange_Request) *tfprot } resp := &tfprotov5.PlanResourceChangeRequest{ - Config: DynamicValue(in.Config), - PriorPrivate: in.PriorPrivate, - PriorState: DynamicValue(in.PriorState), - ProposedNewState: DynamicValue(in.ProposedNewState), - ProviderMeta: DynamicValue(in.ProviderMeta), - TypeName: in.TypeName, + Config: DynamicValue(in.Config), + PriorPrivate: in.PriorPrivate, + PriorState: DynamicValue(in.PriorState), + ProposedNewState: DynamicValue(in.ProposedNewState), + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + ClientCapabilities: PlanResourceChangeClientCapabilities(in.ClientCapabilities), } return resp @@ -90,8 +92,9 @@ func ImportResourceStateRequest(in *tfplugin5.ImportResourceState_Request) *tfpr } resp := &tfprotov5.ImportResourceStateRequest{ - TypeName: in.TypeName, - ID: in.Id, + TypeName: in.TypeName, + ID: in.Id, + ClientCapabilities: ImportResourceStateClientCapabilities(in.ClientCapabilities), } return resp diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/client_capabilities.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/client_capabilities.go new file mode 100644 index 00000000000..d64557b83ad --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/client_capabilities.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf5serverlogging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ConfigureProviderClientCapabilities generates a TRACE "Announced client capabilities" log. +func ConfigureProviderClientCapabilities(ctx context.Context, capabilities *tfprotov5.ConfigureProviderClientCapabilities) { + if capabilities == nil { + logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{}) + return + } + + responseFields := map[string]interface{}{ + logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed, + } + + logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields) +} + +// ReadDataSourceClientCapabilities generates a TRACE "Announced client capabilities" log. +func ReadDataSourceClientCapabilities(ctx context.Context, capabilities *tfprotov5.ReadDataSourceClientCapabilities) { + if capabilities == nil { + logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{}) + return + } + + responseFields := map[string]interface{}{ + logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed, + } + + logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields) +} + +// ReadResourceClientCapabilities generates a TRACE "Announced client capabilities" log. +func ReadResourceClientCapabilities(ctx context.Context, capabilities *tfprotov5.ReadResourceClientCapabilities) { + if capabilities == nil { + logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{}) + return + } + + responseFields := map[string]interface{}{ + logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed, + } + + logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields) +} + +// PlanResourceChangeClientCapabilities generates a TRACE "Announced client capabilities" log. +func PlanResourceChangeClientCapabilities(ctx context.Context, capabilities *tfprotov5.PlanResourceChangeClientCapabilities) { + if capabilities == nil { + logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{}) + return + } + + responseFields := map[string]interface{}{ + logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed, + } + + logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields) +} + +// ImportResourceStateClientCapabilities generates a TRACE "Announced client capabilities" log. +func ImportResourceStateClientCapabilities(ctx context.Context, capabilities *tfprotov5.ImportResourceStateClientCapabilities) { + if capabilities == nil { + logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{}) + return + } + + responseFields := map[string]interface{}{ + logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed, + } + + logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/deferred.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/deferred.go new file mode 100644 index 00000000000..fa9449ccc36 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/deferred.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf5serverlogging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// Deferred generates a TRACE "Received downstream deferred response" log if populated. +func Deferred(ctx context.Context, deferred *tfprotov5.Deferred) { + if deferred == nil { + return + } + + responseFields := map[string]interface{}{ + logging.KeyDeferredReason: deferred.Reason.String(), + } + + logging.ProtocolTrace(ctx, "Received downstream deferred response", responseFields) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go index e10c839bf5c..46ce948a15e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 5.5 +// Terraform Plugin RPC protocol version 5.6 // -// This file defines version 5.5 of the RPC protocol. To implement a plugin +// This file defines version 5.6 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -22,8 +22,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.1 +// protoc-gen-go v1.34.0 +// protoc v5.26.1 // source: tfplugin5.proto package tfplugin5 @@ -195,6 +195,65 @@ func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { return file_tfplugin5_proto_rawDescGZIP(), []int{6, 2, 0} } +// Reason is the reason for deferring the change. +type Deferred_Reason int32 + +const ( + // UNKNOWN is the default value, and should not be used. + Deferred_UNKNOWN Deferred_Reason = 0 + // RESOURCE_CONFIG_UNKNOWN is used when the config is partially unknown and the real + // values need to be known before the change can be planned. + Deferred_RESOURCE_CONFIG_UNKNOWN Deferred_Reason = 1 + // PROVIDER_CONFIG_UNKNOWN is used when parts of the provider configuration + // are unknown, e.g. the provider configuration is only known after the apply is done. + Deferred_PROVIDER_CONFIG_UNKNOWN Deferred_Reason = 2 + // ABSENT_PREREQ is used when a hard dependency has not been satisfied. + Deferred_ABSENT_PREREQ Deferred_Reason = 3 +) + +// Enum value maps for Deferred_Reason. +var ( + Deferred_Reason_name = map[int32]string{ + 0: "UNKNOWN", + 1: "RESOURCE_CONFIG_UNKNOWN", + 2: "PROVIDER_CONFIG_UNKNOWN", + 3: "ABSENT_PREREQ", + } + Deferred_Reason_value = map[string]int32{ + "UNKNOWN": 0, + "RESOURCE_CONFIG_UNKNOWN": 1, + "PROVIDER_CONFIG_UNKNOWN": 2, + "ABSENT_PREREQ": 3, + } +) + +func (x Deferred_Reason) Enum() *Deferred_Reason { + p := new(Deferred_Reason) + *p = x + return p +} + +func (x Deferred_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Deferred_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin5_proto_enumTypes[3].Descriptor() +} + +func (Deferred_Reason) Type() protoreflect.EnumType { + return &file_tfplugin5_proto_enumTypes[3] +} + +func (x Deferred_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Deferred_Reason.Descriptor instead. +func (Deferred_Reason) EnumDescriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{10, 0} +} + // DynamicValue is an opaque encoding of terraform data, with the field name // indicating the encoding scheme used. type DynamicValue struct { @@ -658,6 +717,59 @@ func (x *ServerCapabilities) GetMoveResourceState() bool { return false } +// ClientCapabilities allows Terraform to publish information regarding +// supported protocol features. This is used to indicate availability of +// certain forward-compatible changes which may be optional in a major +// protocol version, but cannot be tested for directly. +type ClientCapabilities struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The deferral_allowed capability signals that the client is able to + // handle deferred responses from the provider. + DeferralAllowed bool `protobuf:"varint,1,opt,name=deferral_allowed,json=deferralAllowed,proto3" json:"deferral_allowed,omitempty"` +} + +func (x *ClientCapabilities) Reset() { + *x = ClientCapabilities{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientCapabilities) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientCapabilities) ProtoMessage() {} + +func (x *ClientCapabilities) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientCapabilities.ProtoReflect.Descriptor instead. +func (*ClientCapabilities) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8} +} + +func (x *ClientCapabilities) GetDeferralAllowed() bool { + if x != nil { + return x.DeferralAllowed + } + return false +} + type Function struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -685,7 +797,7 @@ type Function struct { func (x *Function) Reset() { *x = Function{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[8] + mi := &file_tfplugin5_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -698,7 +810,7 @@ func (x *Function) String() string { func (*Function) ProtoMessage() {} func (x *Function) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[8] + mi := &file_tfplugin5_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -711,7 +823,7 @@ func (x *Function) ProtoReflect() protoreflect.Message { // Deprecated: Use Function.ProtoReflect.Descriptor instead. func (*Function) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{8} + return file_tfplugin5_proto_rawDescGZIP(), []int{9} } func (x *Function) GetParameters() []*Function_Parameter { @@ -763,6 +875,55 @@ func (x *Function) GetDeprecationMessage() string { return "" } +// Deferred is a message that indicates that change is deferred for a reason. +type Deferred struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // reason is the reason for deferring the change. + Reason Deferred_Reason `protobuf:"varint,1,opt,name=reason,proto3,enum=tfplugin5.Deferred_Reason" json:"reason,omitempty"` +} + +func (x *Deferred) Reset() { + *x = Deferred{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Deferred) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Deferred) ProtoMessage() {} + +func (x *Deferred) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Deferred.ProtoReflect.Descriptor instead. +func (*Deferred) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{10} +} + +func (x *Deferred) GetReason() Deferred_Reason { + if x != nil { + return x.Reason + } + return Deferred_UNKNOWN +} + type GetMetadata struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -772,7 +933,7 @@ type GetMetadata struct { func (x *GetMetadata) Reset() { *x = GetMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[9] + mi := &file_tfplugin5_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -785,7 +946,7 @@ func (x *GetMetadata) String() string { func (*GetMetadata) ProtoMessage() {} func (x *GetMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[9] + mi := &file_tfplugin5_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -798,7 +959,7 @@ func (x *GetMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{9} + return file_tfplugin5_proto_rawDescGZIP(), []int{11} } type GetProviderSchema struct { @@ -810,7 +971,7 @@ type GetProviderSchema struct { func (x *GetProviderSchema) Reset() { *x = GetProviderSchema{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[10] + mi := &file_tfplugin5_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -823,7 +984,7 @@ func (x *GetProviderSchema) String() string { func (*GetProviderSchema) ProtoMessage() {} func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[10] + mi := &file_tfplugin5_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -836,7 +997,7 @@ func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema.ProtoReflect.Descriptor instead. func (*GetProviderSchema) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{10} + return file_tfplugin5_proto_rawDescGZIP(), []int{12} } type PrepareProviderConfig struct { @@ -848,7 +1009,7 @@ type PrepareProviderConfig struct { func (x *PrepareProviderConfig) Reset() { *x = PrepareProviderConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[11] + mi := &file_tfplugin5_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -861,7 +1022,7 @@ func (x *PrepareProviderConfig) String() string { func (*PrepareProviderConfig) ProtoMessage() {} func (x *PrepareProviderConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[11] + mi := &file_tfplugin5_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -874,7 +1035,7 @@ func (x *PrepareProviderConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use PrepareProviderConfig.ProtoReflect.Descriptor instead. func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{11} + return file_tfplugin5_proto_rawDescGZIP(), []int{13} } type UpgradeResourceState struct { @@ -886,7 +1047,7 @@ type UpgradeResourceState struct { func (x *UpgradeResourceState) Reset() { *x = UpgradeResourceState{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[12] + mi := &file_tfplugin5_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -899,7 +1060,7 @@ func (x *UpgradeResourceState) String() string { func (*UpgradeResourceState) ProtoMessage() {} func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[12] + mi := &file_tfplugin5_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -912,7 +1073,7 @@ func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState.ProtoReflect.Descriptor instead. func (*UpgradeResourceState) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{12} + return file_tfplugin5_proto_rawDescGZIP(), []int{14} } type ValidateResourceTypeConfig struct { @@ -924,7 +1085,7 @@ type ValidateResourceTypeConfig struct { func (x *ValidateResourceTypeConfig) Reset() { *x = ValidateResourceTypeConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[13] + mi := &file_tfplugin5_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -937,7 +1098,7 @@ func (x *ValidateResourceTypeConfig) String() string { func (*ValidateResourceTypeConfig) ProtoMessage() {} func (x *ValidateResourceTypeConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[13] + mi := &file_tfplugin5_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -950,7 +1111,7 @@ func (x *ValidateResourceTypeConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateResourceTypeConfig.ProtoReflect.Descriptor instead. func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{13} + return file_tfplugin5_proto_rawDescGZIP(), []int{15} } type ValidateDataSourceConfig struct { @@ -962,7 +1123,7 @@ type ValidateDataSourceConfig struct { func (x *ValidateDataSourceConfig) Reset() { *x = ValidateDataSourceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[14] + mi := &file_tfplugin5_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -975,7 +1136,7 @@ func (x *ValidateDataSourceConfig) String() string { func (*ValidateDataSourceConfig) ProtoMessage() {} func (x *ValidateDataSourceConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[14] + mi := &file_tfplugin5_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -988,7 +1149,7 @@ func (x *ValidateDataSourceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateDataSourceConfig.ProtoReflect.Descriptor instead. func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{14} + return file_tfplugin5_proto_rawDescGZIP(), []int{16} } type Configure struct { @@ -1000,7 +1161,7 @@ type Configure struct { func (x *Configure) Reset() { *x = Configure{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[15] + mi := &file_tfplugin5_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1013,7 +1174,7 @@ func (x *Configure) String() string { func (*Configure) ProtoMessage() {} func (x *Configure) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[15] + mi := &file_tfplugin5_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1026,7 +1187,7 @@ func (x *Configure) ProtoReflect() protoreflect.Message { // Deprecated: Use Configure.ProtoReflect.Descriptor instead. func (*Configure) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{15} + return file_tfplugin5_proto_rawDescGZIP(), []int{17} } type ReadResource struct { @@ -1038,7 +1199,7 @@ type ReadResource struct { func (x *ReadResource) Reset() { *x = ReadResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[16] + mi := &file_tfplugin5_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1051,7 +1212,7 @@ func (x *ReadResource) String() string { func (*ReadResource) ProtoMessage() {} func (x *ReadResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[16] + mi := &file_tfplugin5_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1064,7 +1225,7 @@ func (x *ReadResource) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource.ProtoReflect.Descriptor instead. func (*ReadResource) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{16} + return file_tfplugin5_proto_rawDescGZIP(), []int{18} } type PlanResourceChange struct { @@ -1076,7 +1237,7 @@ type PlanResourceChange struct { func (x *PlanResourceChange) Reset() { *x = PlanResourceChange{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[17] + mi := &file_tfplugin5_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1089,7 +1250,7 @@ func (x *PlanResourceChange) String() string { func (*PlanResourceChange) ProtoMessage() {} func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[17] + mi := &file_tfplugin5_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1102,7 +1263,7 @@ func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange.ProtoReflect.Descriptor instead. func (*PlanResourceChange) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{17} + return file_tfplugin5_proto_rawDescGZIP(), []int{19} } type ApplyResourceChange struct { @@ -1114,7 +1275,7 @@ type ApplyResourceChange struct { func (x *ApplyResourceChange) Reset() { *x = ApplyResourceChange{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[18] + mi := &file_tfplugin5_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1127,7 +1288,7 @@ func (x *ApplyResourceChange) String() string { func (*ApplyResourceChange) ProtoMessage() {} func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[18] + mi := &file_tfplugin5_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1140,7 +1301,7 @@ func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange.ProtoReflect.Descriptor instead. func (*ApplyResourceChange) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{18} + return file_tfplugin5_proto_rawDescGZIP(), []int{20} } type ImportResourceState struct { @@ -1152,7 +1313,7 @@ type ImportResourceState struct { func (x *ImportResourceState) Reset() { *x = ImportResourceState{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[19] + mi := &file_tfplugin5_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1165,7 +1326,7 @@ func (x *ImportResourceState) String() string { func (*ImportResourceState) ProtoMessage() {} func (x *ImportResourceState) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[19] + mi := &file_tfplugin5_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1178,7 +1339,7 @@ func (x *ImportResourceState) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState.ProtoReflect.Descriptor instead. func (*ImportResourceState) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{19} + return file_tfplugin5_proto_rawDescGZIP(), []int{21} } type MoveResourceState struct { @@ -1190,7 +1351,7 @@ type MoveResourceState struct { func (x *MoveResourceState) Reset() { *x = MoveResourceState{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[20] + mi := &file_tfplugin5_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1203,7 +1364,7 @@ func (x *MoveResourceState) String() string { func (*MoveResourceState) ProtoMessage() {} func (x *MoveResourceState) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[20] + mi := &file_tfplugin5_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1216,7 +1377,7 @@ func (x *MoveResourceState) ProtoReflect() protoreflect.Message { // Deprecated: Use MoveResourceState.ProtoReflect.Descriptor instead. func (*MoveResourceState) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{20} + return file_tfplugin5_proto_rawDescGZIP(), []int{22} } type ReadDataSource struct { @@ -1228,7 +1389,7 @@ type ReadDataSource struct { func (x *ReadDataSource) Reset() { *x = ReadDataSource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[21] + mi := &file_tfplugin5_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1241,7 +1402,7 @@ func (x *ReadDataSource) String() string { func (*ReadDataSource) ProtoMessage() {} func (x *ReadDataSource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[21] + mi := &file_tfplugin5_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1254,7 +1415,7 @@ func (x *ReadDataSource) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource.ProtoReflect.Descriptor instead. func (*ReadDataSource) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{21} + return file_tfplugin5_proto_rawDescGZIP(), []int{23} } type GetProvisionerSchema struct { @@ -1266,7 +1427,7 @@ type GetProvisionerSchema struct { func (x *GetProvisionerSchema) Reset() { *x = GetProvisionerSchema{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[22] + mi := &file_tfplugin5_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1279,7 +1440,7 @@ func (x *GetProvisionerSchema) String() string { func (*GetProvisionerSchema) ProtoMessage() {} func (x *GetProvisionerSchema) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[22] + mi := &file_tfplugin5_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1292,7 +1453,7 @@ func (x *GetProvisionerSchema) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProvisionerSchema.ProtoReflect.Descriptor instead. func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{22} + return file_tfplugin5_proto_rawDescGZIP(), []int{24} } type ValidateProvisionerConfig struct { @@ -1304,7 +1465,7 @@ type ValidateProvisionerConfig struct { func (x *ValidateProvisionerConfig) Reset() { *x = ValidateProvisionerConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[23] + mi := &file_tfplugin5_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1317,7 +1478,7 @@ func (x *ValidateProvisionerConfig) String() string { func (*ValidateProvisionerConfig) ProtoMessage() {} func (x *ValidateProvisionerConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[23] + mi := &file_tfplugin5_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1330,7 +1491,7 @@ func (x *ValidateProvisionerConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateProvisionerConfig.ProtoReflect.Descriptor instead. func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{23} + return file_tfplugin5_proto_rawDescGZIP(), []int{25} } type ProvisionResource struct { @@ -1342,7 +1503,7 @@ type ProvisionResource struct { func (x *ProvisionResource) Reset() { *x = ProvisionResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[24] + mi := &file_tfplugin5_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1355,7 +1516,7 @@ func (x *ProvisionResource) String() string { func (*ProvisionResource) ProtoMessage() {} func (x *ProvisionResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[24] + mi := &file_tfplugin5_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1368,7 +1529,7 @@ func (x *ProvisionResource) ProtoReflect() protoreflect.Message { // Deprecated: Use ProvisionResource.ProtoReflect.Descriptor instead. func (*ProvisionResource) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{24} + return file_tfplugin5_proto_rawDescGZIP(), []int{26} } type GetFunctions struct { @@ -1380,7 +1541,7 @@ type GetFunctions struct { func (x *GetFunctions) Reset() { *x = GetFunctions{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[25] + mi := &file_tfplugin5_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1393,7 +1554,7 @@ func (x *GetFunctions) String() string { func (*GetFunctions) ProtoMessage() {} func (x *GetFunctions) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[25] + mi := &file_tfplugin5_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1406,7 +1567,7 @@ func (x *GetFunctions) ProtoReflect() protoreflect.Message { // Deprecated: Use GetFunctions.ProtoReflect.Descriptor instead. func (*GetFunctions) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{25} + return file_tfplugin5_proto_rawDescGZIP(), []int{27} } type CallFunction struct { @@ -1418,7 +1579,7 @@ type CallFunction struct { func (x *CallFunction) Reset() { *x = CallFunction{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[26] + mi := &file_tfplugin5_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1431,7 +1592,7 @@ func (x *CallFunction) String() string { func (*CallFunction) ProtoMessage() {} func (x *CallFunction) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[26] + mi := &file_tfplugin5_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1444,7 +1605,7 @@ func (x *CallFunction) ProtoReflect() protoreflect.Message { // Deprecated: Use CallFunction.ProtoReflect.Descriptor instead. func (*CallFunction) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{26} + return file_tfplugin5_proto_rawDescGZIP(), []int{28} } type AttributePath_Step struct { @@ -1463,7 +1624,7 @@ type AttributePath_Step struct { func (x *AttributePath_Step) Reset() { *x = AttributePath_Step{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[27] + mi := &file_tfplugin5_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1476,7 +1637,7 @@ func (x *AttributePath_Step) String() string { func (*AttributePath_Step) ProtoMessage() {} func (x *AttributePath_Step) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[27] + mi := &file_tfplugin5_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1555,7 +1716,7 @@ type Stop_Request struct { func (x *Stop_Request) Reset() { *x = Stop_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[28] + mi := &file_tfplugin5_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1568,7 +1729,7 @@ func (x *Stop_Request) String() string { func (*Stop_Request) ProtoMessage() {} func (x *Stop_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[28] + mi := &file_tfplugin5_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1595,7 +1756,7 @@ type Stop_Response struct { func (x *Stop_Response) Reset() { *x = Stop_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[29] + mi := &file_tfplugin5_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1608,7 +1769,7 @@ func (x *Stop_Response) String() string { func (*Stop_Response) ProtoMessage() {} func (x *Stop_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[29] + mi := &file_tfplugin5_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1647,7 +1808,7 @@ type Schema_Block struct { func (x *Schema_Block) Reset() { *x = Schema_Block{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[31] + mi := &file_tfplugin5_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1660,7 +1821,7 @@ func (x *Schema_Block) String() string { func (*Schema_Block) ProtoMessage() {} func (x *Schema_Block) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[31] + mi := &file_tfplugin5_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1737,7 +1898,7 @@ type Schema_Attribute struct { func (x *Schema_Attribute) Reset() { *x = Schema_Attribute{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[32] + mi := &file_tfplugin5_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1750,7 +1911,7 @@ func (x *Schema_Attribute) String() string { func (*Schema_Attribute) ProtoMessage() {} func (x *Schema_Attribute) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[32] + mi := &file_tfplugin5_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1844,7 +2005,7 @@ type Schema_NestedBlock struct { func (x *Schema_NestedBlock) Reset() { *x = Schema_NestedBlock{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[33] + mi := &file_tfplugin5_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1857,7 +2018,7 @@ func (x *Schema_NestedBlock) String() string { func (*Schema_NestedBlock) ProtoMessage() {} func (x *Schema_NestedBlock) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[33] + mi := &file_tfplugin5_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1935,7 +2096,7 @@ type Function_Parameter struct { func (x *Function_Parameter) Reset() { *x = Function_Parameter{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[34] + mi := &file_tfplugin5_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1948,7 +2109,7 @@ func (x *Function_Parameter) String() string { func (*Function_Parameter) ProtoMessage() {} func (x *Function_Parameter) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[34] + mi := &file_tfplugin5_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1961,7 +2122,7 @@ func (x *Function_Parameter) ProtoReflect() protoreflect.Message { // Deprecated: Use Function_Parameter.ProtoReflect.Descriptor instead. func (*Function_Parameter) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{8, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 0} } func (x *Function_Parameter) GetName() string { @@ -2018,7 +2179,7 @@ type Function_Return struct { func (x *Function_Return) Reset() { *x = Function_Return{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[35] + mi := &file_tfplugin5_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2031,7 +2192,7 @@ func (x *Function_Return) String() string { func (*Function_Return) ProtoMessage() {} func (x *Function_Return) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[35] + mi := &file_tfplugin5_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2044,7 +2205,7 @@ func (x *Function_Return) ProtoReflect() protoreflect.Message { // Deprecated: Use Function_Return.ProtoReflect.Descriptor instead. func (*Function_Return) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{8, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 1} } func (x *Function_Return) GetType() []byte { @@ -2063,7 +2224,7 @@ type GetMetadata_Request struct { func (x *GetMetadata_Request) Reset() { *x = GetMetadata_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[36] + mi := &file_tfplugin5_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2076,7 +2237,7 @@ func (x *GetMetadata_Request) String() string { func (*GetMetadata_Request) ProtoMessage() {} func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[36] + mi := &file_tfplugin5_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2089,7 +2250,7 @@ func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_Request.ProtoReflect.Descriptor instead. func (*GetMetadata_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{9, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 0} } type GetMetadata_Response struct { @@ -2108,7 +2269,7 @@ type GetMetadata_Response struct { func (x *GetMetadata_Response) Reset() { *x = GetMetadata_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[37] + mi := &file_tfplugin5_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2121,7 +2282,7 @@ func (x *GetMetadata_Response) String() string { func (*GetMetadata_Response) ProtoMessage() {} func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[37] + mi := &file_tfplugin5_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2134,7 +2295,7 @@ func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_Response.ProtoReflect.Descriptor instead. func (*GetMetadata_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{9, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 1} } func (x *GetMetadata_Response) GetServerCapabilities() *ServerCapabilities { @@ -2184,7 +2345,7 @@ type GetMetadata_FunctionMetadata struct { func (x *GetMetadata_FunctionMetadata) Reset() { *x = GetMetadata_FunctionMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[38] + mi := &file_tfplugin5_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2197,7 +2358,7 @@ func (x *GetMetadata_FunctionMetadata) String() string { func (*GetMetadata_FunctionMetadata) ProtoMessage() {} func (x *GetMetadata_FunctionMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[38] + mi := &file_tfplugin5_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2210,7 +2371,7 @@ func (x *GetMetadata_FunctionMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_FunctionMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata_FunctionMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{9, 2} + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 2} } func (x *GetMetadata_FunctionMetadata) GetName() string { @@ -2231,7 +2392,7 @@ type GetMetadata_DataSourceMetadata struct { func (x *GetMetadata_DataSourceMetadata) Reset() { *x = GetMetadata_DataSourceMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[39] + mi := &file_tfplugin5_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2244,7 +2405,7 @@ func (x *GetMetadata_DataSourceMetadata) String() string { func (*GetMetadata_DataSourceMetadata) ProtoMessage() {} func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[39] + mi := &file_tfplugin5_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2257,7 +2418,7 @@ func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_DataSourceMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata_DataSourceMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{9, 3} + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 3} } func (x *GetMetadata_DataSourceMetadata) GetTypeName() string { @@ -2278,7 +2439,7 @@ type GetMetadata_ResourceMetadata struct { func (x *GetMetadata_ResourceMetadata) Reset() { *x = GetMetadata_ResourceMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[40] + mi := &file_tfplugin5_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2291,7 +2452,7 @@ func (x *GetMetadata_ResourceMetadata) String() string { func (*GetMetadata_ResourceMetadata) ProtoMessage() {} func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[40] + mi := &file_tfplugin5_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2304,7 +2465,7 @@ func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_ResourceMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata_ResourceMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{9, 4} + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 4} } func (x *GetMetadata_ResourceMetadata) GetTypeName() string { @@ -2323,7 +2484,7 @@ type GetProviderSchema_Request struct { func (x *GetProviderSchema_Request) Reset() { *x = GetProviderSchema_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[41] + mi := &file_tfplugin5_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2336,7 +2497,7 @@ func (x *GetProviderSchema_Request) String() string { func (*GetProviderSchema_Request) ProtoMessage() {} func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[41] + mi := &file_tfplugin5_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2349,7 +2510,7 @@ func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema_Request.ProtoReflect.Descriptor instead. func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{10, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{12, 0} } type GetProviderSchema_Response struct { @@ -2370,7 +2531,7 @@ type GetProviderSchema_Response struct { func (x *GetProviderSchema_Response) Reset() { *x = GetProviderSchema_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[42] + mi := &file_tfplugin5_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2383,7 +2544,7 @@ func (x *GetProviderSchema_Response) String() string { func (*GetProviderSchema_Response) ProtoMessage() {} func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[42] + mi := &file_tfplugin5_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2396,7 +2557,7 @@ func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema_Response.ProtoReflect.Descriptor instead. func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{10, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{12, 1} } func (x *GetProviderSchema_Response) GetProvider() *Schema { @@ -2459,7 +2620,7 @@ type PrepareProviderConfig_Request struct { func (x *PrepareProviderConfig_Request) Reset() { *x = PrepareProviderConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[46] + mi := &file_tfplugin5_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2472,7 +2633,7 @@ func (x *PrepareProviderConfig_Request) String() string { func (*PrepareProviderConfig_Request) ProtoMessage() {} func (x *PrepareProviderConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[46] + mi := &file_tfplugin5_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2485,7 +2646,7 @@ func (x *PrepareProviderConfig_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use PrepareProviderConfig_Request.ProtoReflect.Descriptor instead. func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{11, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{13, 0} } func (x *PrepareProviderConfig_Request) GetConfig() *DynamicValue { @@ -2507,7 +2668,7 @@ type PrepareProviderConfig_Response struct { func (x *PrepareProviderConfig_Response) Reset() { *x = PrepareProviderConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[47] + mi := &file_tfplugin5_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2520,7 +2681,7 @@ func (x *PrepareProviderConfig_Response) String() string { func (*PrepareProviderConfig_Response) ProtoMessage() {} func (x *PrepareProviderConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[47] + mi := &file_tfplugin5_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2533,7 +2694,7 @@ func (x *PrepareProviderConfig_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use PrepareProviderConfig_Response.ProtoReflect.Descriptor instead. func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{11, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{13, 1} } func (x *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { @@ -2578,7 +2739,7 @@ type UpgradeResourceState_Request struct { func (x *UpgradeResourceState_Request) Reset() { *x = UpgradeResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[48] + mi := &file_tfplugin5_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2591,7 +2752,7 @@ func (x *UpgradeResourceState_Request) String() string { func (*UpgradeResourceState_Request) ProtoMessage() {} func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[48] + mi := &file_tfplugin5_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2604,7 +2765,7 @@ func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState_Request.ProtoReflect.Descriptor instead. func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{12, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{14, 0} } func (x *UpgradeResourceState_Request) GetTypeName() string { @@ -2646,7 +2807,7 @@ type UpgradeResourceState_Response struct { func (x *UpgradeResourceState_Response) Reset() { *x = UpgradeResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[49] + mi := &file_tfplugin5_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2659,7 +2820,7 @@ func (x *UpgradeResourceState_Response) String() string { func (*UpgradeResourceState_Response) ProtoMessage() {} func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[49] + mi := &file_tfplugin5_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2672,7 +2833,7 @@ func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState_Response.ProtoReflect.Descriptor instead. func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{12, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{14, 1} } func (x *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { @@ -2701,7 +2862,7 @@ type ValidateResourceTypeConfig_Request struct { func (x *ValidateResourceTypeConfig_Request) Reset() { *x = ValidateResourceTypeConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[50] + mi := &file_tfplugin5_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2714,7 +2875,7 @@ func (x *ValidateResourceTypeConfig_Request) String() string { func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} func (x *ValidateResourceTypeConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[50] + mi := &file_tfplugin5_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2727,7 +2888,7 @@ func (x *ValidateResourceTypeConfig_Request) ProtoReflect() protoreflect.Message // Deprecated: Use ValidateResourceTypeConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{13, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{15, 0} } func (x *ValidateResourceTypeConfig_Request) GetTypeName() string { @@ -2755,7 +2916,7 @@ type ValidateResourceTypeConfig_Response struct { func (x *ValidateResourceTypeConfig_Response) Reset() { *x = ValidateResourceTypeConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[51] + mi := &file_tfplugin5_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2768,7 +2929,7 @@ func (x *ValidateResourceTypeConfig_Response) String() string { func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} func (x *ValidateResourceTypeConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[51] + mi := &file_tfplugin5_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2781,7 +2942,7 @@ func (x *ValidateResourceTypeConfig_Response) ProtoReflect() protoreflect.Messag // Deprecated: Use ValidateResourceTypeConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{13, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{15, 1} } func (x *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { @@ -2803,7 +2964,7 @@ type ValidateDataSourceConfig_Request struct { func (x *ValidateDataSourceConfig_Request) Reset() { *x = ValidateDataSourceConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[52] + mi := &file_tfplugin5_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2816,7 +2977,7 @@ func (x *ValidateDataSourceConfig_Request) String() string { func (*ValidateDataSourceConfig_Request) ProtoMessage() {} func (x *ValidateDataSourceConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[52] + mi := &file_tfplugin5_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2829,7 +2990,7 @@ func (x *ValidateDataSourceConfig_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateDataSourceConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{14, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{16, 0} } func (x *ValidateDataSourceConfig_Request) GetTypeName() string { @@ -2857,7 +3018,7 @@ type ValidateDataSourceConfig_Response struct { func (x *ValidateDataSourceConfig_Response) Reset() { *x = ValidateDataSourceConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[53] + mi := &file_tfplugin5_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2870,7 +3031,7 @@ func (x *ValidateDataSourceConfig_Response) String() string { func (*ValidateDataSourceConfig_Response) ProtoMessage() {} func (x *ValidateDataSourceConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[53] + mi := &file_tfplugin5_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2883,7 +3044,7 @@ func (x *ValidateDataSourceConfig_Response) ProtoReflect() protoreflect.Message // Deprecated: Use ValidateDataSourceConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{14, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{16, 1} } func (x *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { @@ -2898,14 +3059,15 @@ type Configure_Request struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + ClientCapabilities *ClientCapabilities `protobuf:"bytes,3,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"` } func (x *Configure_Request) Reset() { *x = Configure_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[54] + mi := &file_tfplugin5_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2918,7 +3080,7 @@ func (x *Configure_Request) String() string { func (*Configure_Request) ProtoMessage() {} func (x *Configure_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[54] + mi := &file_tfplugin5_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2931,7 +3093,7 @@ func (x *Configure_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use Configure_Request.ProtoReflect.Descriptor instead. func (*Configure_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{15, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{17, 0} } func (x *Configure_Request) GetTerraformVersion() string { @@ -2948,6 +3110,13 @@ func (x *Configure_Request) GetConfig() *DynamicValue { return nil } +func (x *Configure_Request) GetClientCapabilities() *ClientCapabilities { + if x != nil { + return x.ClientCapabilities + } + return nil +} + type Configure_Response struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2959,7 +3128,7 @@ type Configure_Response struct { func (x *Configure_Response) Reset() { *x = Configure_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[55] + mi := &file_tfplugin5_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2972,7 +3141,7 @@ func (x *Configure_Response) String() string { func (*Configure_Response) ProtoMessage() {} func (x *Configure_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[55] + mi := &file_tfplugin5_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2985,7 +3154,7 @@ func (x *Configure_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use Configure_Response.ProtoReflect.Descriptor instead. func (*Configure_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{15, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{17, 1} } func (x *Configure_Response) GetDiagnostics() []*Diagnostic { @@ -3008,16 +3177,17 @@ type ReadResource_Request struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` - Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` - ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ClientCapabilities *ClientCapabilities `protobuf:"bytes,5,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"` } func (x *ReadResource_Request) Reset() { *x = ReadResource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[56] + mi := &file_tfplugin5_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3030,7 +3200,7 @@ func (x *ReadResource_Request) String() string { func (*ReadResource_Request) ProtoMessage() {} func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[56] + mi := &file_tfplugin5_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3043,7 +3213,7 @@ func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource_Request.ProtoReflect.Descriptor instead. func (*ReadResource_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{16, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{18, 0} } func (x *ReadResource_Request) GetTypeName() string { @@ -3074,6 +3244,13 @@ func (x *ReadResource_Request) GetProviderMeta() *DynamicValue { return nil } +func (x *ReadResource_Request) GetClientCapabilities() *ClientCapabilities { + if x != nil { + return x.ClientCapabilities + } + return nil +} + type ReadResource_Response struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3082,12 +3259,15 @@ type ReadResource_Response struct { NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred *Deferred `protobuf:"bytes,4,opt,name=deferred,proto3" json:"deferred,omitempty"` } func (x *ReadResource_Response) Reset() { *x = ReadResource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[57] + mi := &file_tfplugin5_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3100,7 +3280,7 @@ func (x *ReadResource_Response) String() string { func (*ReadResource_Response) ProtoMessage() {} func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[57] + mi := &file_tfplugin5_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3113,7 +3293,7 @@ func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource_Response.ProtoReflect.Descriptor instead. func (*ReadResource_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{16, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{18, 1} } func (x *ReadResource_Response) GetNewState() *DynamicValue { @@ -3137,23 +3317,31 @@ func (x *ReadResource_Response) GetPrivate() []byte { return nil } +func (x *ReadResource_Response) GetDeferred() *Deferred { + if x != nil { + return x.Deferred + } + return nil +} + type PlanResourceChange_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` - ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` - Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` - ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ClientCapabilities *ClientCapabilities `protobuf:"bytes,7,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"` } func (x *PlanResourceChange_Request) Reset() { *x = PlanResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[58] + mi := &file_tfplugin5_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3166,7 +3354,7 @@ func (x *PlanResourceChange_Request) String() string { func (*PlanResourceChange_Request) ProtoMessage() {} func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[58] + mi := &file_tfplugin5_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3179,7 +3367,7 @@ func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange_Request.ProtoReflect.Descriptor instead. func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{17, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 0} } func (x *PlanResourceChange_Request) GetTypeName() string { @@ -3224,6 +3412,13 @@ func (x *PlanResourceChange_Request) GetProviderMeta() *DynamicValue { return nil } +func (x *PlanResourceChange_Request) GetClientCapabilities() *ClientCapabilities { + if x != nil { + return x.ClientCapabilities + } + return nil +} + type PlanResourceChange_Response struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3245,12 +3440,15 @@ type PlanResourceChange_Response struct { // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== // ==== DO NOT USE THIS ==== LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred *Deferred `protobuf:"bytes,6,opt,name=deferred,proto3" json:"deferred,omitempty"` } func (x *PlanResourceChange_Response) Reset() { *x = PlanResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[59] + mi := &file_tfplugin5_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3263,7 +3461,7 @@ func (x *PlanResourceChange_Response) String() string { func (*PlanResourceChange_Response) ProtoMessage() {} func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[59] + mi := &file_tfplugin5_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3276,7 +3474,7 @@ func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange_Response.ProtoReflect.Descriptor instead. func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{17, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 1} } func (x *PlanResourceChange_Response) GetPlannedState() *DynamicValue { @@ -3314,6 +3512,13 @@ func (x *PlanResourceChange_Response) GetLegacyTypeSystem() bool { return false } +func (x *PlanResourceChange_Response) GetDeferred() *Deferred { + if x != nil { + return x.Deferred + } + return nil +} + type ApplyResourceChange_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3330,7 +3535,7 @@ type ApplyResourceChange_Request struct { func (x *ApplyResourceChange_Request) Reset() { *x = ApplyResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[60] + mi := &file_tfplugin5_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3343,7 +3548,7 @@ func (x *ApplyResourceChange_Request) String() string { func (*ApplyResourceChange_Request) ProtoMessage() {} func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[60] + mi := &file_tfplugin5_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3356,7 +3561,7 @@ func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange_Request.ProtoReflect.Descriptor instead. func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{18, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{20, 0} } func (x *ApplyResourceChange_Request) GetTypeName() string { @@ -3426,7 +3631,7 @@ type ApplyResourceChange_Response struct { func (x *ApplyResourceChange_Response) Reset() { *x = ApplyResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[61] + mi := &file_tfplugin5_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3439,7 +3644,7 @@ func (x *ApplyResourceChange_Response) String() string { func (*ApplyResourceChange_Response) ProtoMessage() {} func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[61] + mi := &file_tfplugin5_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3452,7 +3657,7 @@ func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange_Response.ProtoReflect.Descriptor instead. func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{18, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{20, 1} } func (x *ApplyResourceChange_Response) GetNewState() *DynamicValue { @@ -3488,14 +3693,15 @@ type ImportResourceState_Request struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + ClientCapabilities *ClientCapabilities `protobuf:"bytes,3,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"` } func (x *ImportResourceState_Request) Reset() { *x = ImportResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[62] + mi := &file_tfplugin5_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3508,7 +3714,7 @@ func (x *ImportResourceState_Request) String() string { func (*ImportResourceState_Request) ProtoMessage() {} func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[62] + mi := &file_tfplugin5_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3521,7 +3727,7 @@ func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState_Request.ProtoReflect.Descriptor instead. func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{19, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{21, 0} } func (x *ImportResourceState_Request) GetTypeName() string { @@ -3538,6 +3744,13 @@ func (x *ImportResourceState_Request) GetId() string { return "" } +func (x *ImportResourceState_Request) GetClientCapabilities() *ClientCapabilities { + if x != nil { + return x.ClientCapabilities + } + return nil +} + type ImportResourceState_ImportedResource struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3551,7 +3764,7 @@ type ImportResourceState_ImportedResource struct { func (x *ImportResourceState_ImportedResource) Reset() { *x = ImportResourceState_ImportedResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[63] + mi := &file_tfplugin5_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3564,7 +3777,7 @@ func (x *ImportResourceState_ImportedResource) String() string { func (*ImportResourceState_ImportedResource) ProtoMessage() {} func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[63] + mi := &file_tfplugin5_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3577,7 +3790,7 @@ func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Messa // Deprecated: Use ImportResourceState_ImportedResource.ProtoReflect.Descriptor instead. func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{19, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{21, 1} } func (x *ImportResourceState_ImportedResource) GetTypeName() string { @@ -3608,12 +3821,15 @@ type ImportResourceState_Response struct { ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred *Deferred `protobuf:"bytes,3,opt,name=deferred,proto3" json:"deferred,omitempty"` } func (x *ImportResourceState_Response) Reset() { *x = ImportResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[64] + mi := &file_tfplugin5_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3626,7 +3842,7 @@ func (x *ImportResourceState_Response) String() string { func (*ImportResourceState_Response) ProtoMessage() {} func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[64] + mi := &file_tfplugin5_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3639,7 +3855,7 @@ func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState_Response.ProtoReflect.Descriptor instead. func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{19, 2} + return file_tfplugin5_proto_rawDescGZIP(), []int{21, 2} } func (x *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { @@ -3656,6 +3872,13 @@ func (x *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { return nil } +func (x *ImportResourceState_Response) GetDeferred() *Deferred { + if x != nil { + return x.Deferred + } + return nil +} + type MoveResourceState_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3681,7 +3904,7 @@ type MoveResourceState_Request struct { func (x *MoveResourceState_Request) Reset() { *x = MoveResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[65] + mi := &file_tfplugin5_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3694,7 +3917,7 @@ func (x *MoveResourceState_Request) String() string { func (*MoveResourceState_Request) ProtoMessage() {} func (x *MoveResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[65] + mi := &file_tfplugin5_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3707,7 +3930,7 @@ func (x *MoveResourceState_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use MoveResourceState_Request.ProtoReflect.Descriptor instead. func (*MoveResourceState_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{20, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{22, 0} } func (x *MoveResourceState_Request) GetSourceProviderAddress() string { @@ -3768,7 +3991,7 @@ type MoveResourceState_Response struct { func (x *MoveResourceState_Response) Reset() { *x = MoveResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[66] + mi := &file_tfplugin5_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3781,7 +4004,7 @@ func (x *MoveResourceState_Response) String() string { func (*MoveResourceState_Response) ProtoMessage() {} func (x *MoveResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[66] + mi := &file_tfplugin5_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3794,7 +4017,7 @@ func (x *MoveResourceState_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use MoveResourceState_Response.ProtoReflect.Descriptor instead. func (*MoveResourceState_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{20, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{22, 1} } func (x *MoveResourceState_Response) GetTargetState() *DynamicValue { @@ -3823,15 +4046,16 @@ type ReadDataSource_Request struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ClientCapabilities *ClientCapabilities `protobuf:"bytes,4,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"` } func (x *ReadDataSource_Request) Reset() { *x = ReadDataSource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[67] + mi := &file_tfplugin5_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3844,7 +4068,7 @@ func (x *ReadDataSource_Request) String() string { func (*ReadDataSource_Request) ProtoMessage() {} func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[67] + mi := &file_tfplugin5_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3857,7 +4081,7 @@ func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource_Request.ProtoReflect.Descriptor instead. func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{21, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{23, 0} } func (x *ReadDataSource_Request) GetTypeName() string { @@ -3881,6 +4105,13 @@ func (x *ReadDataSource_Request) GetProviderMeta() *DynamicValue { return nil } +func (x *ReadDataSource_Request) GetClientCapabilities() *ClientCapabilities { + if x != nil { + return x.ClientCapabilities + } + return nil +} + type ReadDataSource_Response struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3888,12 +4119,15 @@ type ReadDataSource_Response struct { State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred *Deferred `protobuf:"bytes,3,opt,name=deferred,proto3" json:"deferred,omitempty"` } func (x *ReadDataSource_Response) Reset() { *x = ReadDataSource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[68] + mi := &file_tfplugin5_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3906,7 +4140,7 @@ func (x *ReadDataSource_Response) String() string { func (*ReadDataSource_Response) ProtoMessage() {} func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[68] + mi := &file_tfplugin5_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3919,7 +4153,7 @@ func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource_Response.ProtoReflect.Descriptor instead. func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{21, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{23, 1} } func (x *ReadDataSource_Response) GetState() *DynamicValue { @@ -3936,6 +4170,13 @@ func (x *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { return nil } +func (x *ReadDataSource_Response) GetDeferred() *Deferred { + if x != nil { + return x.Deferred + } + return nil +} + type GetProvisionerSchema_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3945,7 +4186,7 @@ type GetProvisionerSchema_Request struct { func (x *GetProvisionerSchema_Request) Reset() { *x = GetProvisionerSchema_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[69] + mi := &file_tfplugin5_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3958,7 +4199,7 @@ func (x *GetProvisionerSchema_Request) String() string { func (*GetProvisionerSchema_Request) ProtoMessage() {} func (x *GetProvisionerSchema_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[69] + mi := &file_tfplugin5_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3971,7 +4212,7 @@ func (x *GetProvisionerSchema_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProvisionerSchema_Request.ProtoReflect.Descriptor instead. func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{22, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{24, 0} } type GetProvisionerSchema_Response struct { @@ -3986,7 +4227,7 @@ type GetProvisionerSchema_Response struct { func (x *GetProvisionerSchema_Response) Reset() { *x = GetProvisionerSchema_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[70] + mi := &file_tfplugin5_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3999,7 +4240,7 @@ func (x *GetProvisionerSchema_Response) String() string { func (*GetProvisionerSchema_Response) ProtoMessage() {} func (x *GetProvisionerSchema_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[70] + mi := &file_tfplugin5_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4012,7 +4253,7 @@ func (x *GetProvisionerSchema_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProvisionerSchema_Response.ProtoReflect.Descriptor instead. func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{22, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{24, 1} } func (x *GetProvisionerSchema_Response) GetProvisioner() *Schema { @@ -4040,7 +4281,7 @@ type ValidateProvisionerConfig_Request struct { func (x *ValidateProvisionerConfig_Request) Reset() { *x = ValidateProvisionerConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[71] + mi := &file_tfplugin5_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4053,7 +4294,7 @@ func (x *ValidateProvisionerConfig_Request) String() string { func (*ValidateProvisionerConfig_Request) ProtoMessage() {} func (x *ValidateProvisionerConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[71] + mi := &file_tfplugin5_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4066,7 +4307,7 @@ func (x *ValidateProvisionerConfig_Request) ProtoReflect() protoreflect.Message // Deprecated: Use ValidateProvisionerConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{23, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{25, 0} } func (x *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { @@ -4087,7 +4328,7 @@ type ValidateProvisionerConfig_Response struct { func (x *ValidateProvisionerConfig_Response) Reset() { *x = ValidateProvisionerConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[72] + mi := &file_tfplugin5_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4100,7 +4341,7 @@ func (x *ValidateProvisionerConfig_Response) String() string { func (*ValidateProvisionerConfig_Response) ProtoMessage() {} func (x *ValidateProvisionerConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[72] + mi := &file_tfplugin5_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4113,7 +4354,7 @@ func (x *ValidateProvisionerConfig_Response) ProtoReflect() protoreflect.Message // Deprecated: Use ValidateProvisionerConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{23, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{25, 1} } func (x *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { @@ -4135,7 +4376,7 @@ type ProvisionResource_Request struct { func (x *ProvisionResource_Request) Reset() { *x = ProvisionResource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[73] + mi := &file_tfplugin5_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4148,7 +4389,7 @@ func (x *ProvisionResource_Request) String() string { func (*ProvisionResource_Request) ProtoMessage() {} func (x *ProvisionResource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[73] + mi := &file_tfplugin5_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4161,7 +4402,7 @@ func (x *ProvisionResource_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ProvisionResource_Request.ProtoReflect.Descriptor instead. func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{24, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{26, 0} } func (x *ProvisionResource_Request) GetConfig() *DynamicValue { @@ -4190,7 +4431,7 @@ type ProvisionResource_Response struct { func (x *ProvisionResource_Response) Reset() { *x = ProvisionResource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[74] + mi := &file_tfplugin5_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4203,7 +4444,7 @@ func (x *ProvisionResource_Response) String() string { func (*ProvisionResource_Response) ProtoMessage() {} func (x *ProvisionResource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[74] + mi := &file_tfplugin5_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4216,7 +4457,7 @@ func (x *ProvisionResource_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ProvisionResource_Response.ProtoReflect.Descriptor instead. func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{24, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{26, 1} } func (x *ProvisionResource_Response) GetOutput() string { @@ -4242,7 +4483,7 @@ type GetFunctions_Request struct { func (x *GetFunctions_Request) Reset() { *x = GetFunctions_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[75] + mi := &file_tfplugin5_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4255,7 +4496,7 @@ func (x *GetFunctions_Request) String() string { func (*GetFunctions_Request) ProtoMessage() {} func (x *GetFunctions_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[75] + mi := &file_tfplugin5_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4268,7 +4509,7 @@ func (x *GetFunctions_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use GetFunctions_Request.ProtoReflect.Descriptor instead. func (*GetFunctions_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{25, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{27, 0} } type GetFunctions_Response struct { @@ -4285,7 +4526,7 @@ type GetFunctions_Response struct { func (x *GetFunctions_Response) Reset() { *x = GetFunctions_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[76] + mi := &file_tfplugin5_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4298,7 +4539,7 @@ func (x *GetFunctions_Response) String() string { func (*GetFunctions_Response) ProtoMessage() {} func (x *GetFunctions_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[76] + mi := &file_tfplugin5_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4311,7 +4552,7 @@ func (x *GetFunctions_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetFunctions_Response.ProtoReflect.Descriptor instead. func (*GetFunctions_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{25, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{27, 1} } func (x *GetFunctions_Response) GetFunctions() map[string]*Function { @@ -4342,7 +4583,7 @@ type CallFunction_Request struct { func (x *CallFunction_Request) Reset() { *x = CallFunction_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[78] + mi := &file_tfplugin5_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4355,7 +4596,7 @@ func (x *CallFunction_Request) String() string { func (*CallFunction_Request) ProtoMessage() {} func (x *CallFunction_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[78] + mi := &file_tfplugin5_proto_msgTypes[80] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4368,7 +4609,7 @@ func (x *CallFunction_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use CallFunction_Request.ProtoReflect.Descriptor instead. func (*CallFunction_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{26, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{28, 0} } func (x *CallFunction_Request) GetName() string { @@ -4399,7 +4640,7 @@ type CallFunction_Response struct { func (x *CallFunction_Response) Reset() { *x = CallFunction_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[79] + mi := &file_tfplugin5_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4412,7 +4653,7 @@ func (x *CallFunction_Response) String() string { func (*CallFunction_Response) ProtoMessage() {} func (x *CallFunction_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[79] + mi := &file_tfplugin5_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4425,7 +4666,7 @@ func (x *CallFunction_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use CallFunction_Response.ProtoReflect.Descriptor instead. func (*CallFunction_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{26, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{28, 1} } func (x *CallFunction_Response) GetResult() *DynamicValue { @@ -4570,558 +4811,610 @@ var file_tfplugin5_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x22, 0x8e, 0x05, 0x0a, 0x08, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x12, 0x4c, 0x0a, 0x12, 0x76, 0x61, 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x11, 0x76, 0x61, 0x72, - 0x69, 0x61, 0x64, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x32, - 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x74, 0x75, - 0x72, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, - 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, - 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, - 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, - 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x1a, 0xf3, 0x01, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, - 0x77, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x1a, 0x1c, 0x0a, 0x06, 0x52, 0x65, 0x74, 0x75, 0x72, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x96, 0x04, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0xef, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, - 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, - 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x66, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x1a, 0x26, 0x0a, 0x10, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x31, 0x0a, 0x12, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x2f, 0x0a, - 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc7, - 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x74, 0x61, 0x74, 0x65, 0x22, 0x3f, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x61, 0x6c, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x61, 0x6c, 0x41, 0x6c, + 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x22, 0x8e, 0x05, 0x0a, 0x08, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x4c, 0x0a, 0x12, 0x76, 0x61, 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x11, 0x76, 0x61, + 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x32, 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, + 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x1a, 0xf3, 0x01, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x1a, 0x1c, 0x0a, 0x06, 0x52, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa2, 0x01, 0x0a, 0x08, 0x44, 0x65, 0x66, 0x65, 0x72, + 0x72, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x62, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1b, + 0x0a, 0x17, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, + 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x50, + 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x42, 0x53, 0x45, + 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x52, 0x45, 0x51, 0x10, 0x03, 0x22, 0x96, 0x04, 0x0a, 0x0b, + 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xef, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, + 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, 0x0c, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, + 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, 0x64, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x12, 0x45, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x26, 0x0a, 0x10, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x1a, 0x31, 0x0a, 0x12, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x1a, 0x2f, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc7, 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, - 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, - 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, - 0x73, 0x12, 0x52, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, + 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x0e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, - 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x0e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdb, 0x01, 0x0a, 0x15, 0x50, 0x72, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x85, - 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0f, 0x70, - 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x70, - 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, - 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, - 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, - 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, - 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, - 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x73, 0x22, 0xb9, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x1a, - 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, - 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe3, 0x02, - 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xbc, - 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, - 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdb, + 0x01, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x85, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x40, 0x0a, 0x0f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, + 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, - 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, + 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, + 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x93, 0x01, - 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, - 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xbb, 0x02, 0x0a, 0x07, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, - 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, - 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, - 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, - 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, - 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, - 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, - 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb8, 0x01, 0x0a, + 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, - 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, - 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, - 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xed, 0x02, - 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, - 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, - 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, + 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x8a, 0x02, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x65, 0x1a, 0xb7, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, + 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, + 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, + 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x22, 0xe4, 0x03, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x8c, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, + 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, - 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe7, 0x03, - 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x1a, 0xa8, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x36, 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x13, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, - 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa6, - 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, + 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x52, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, + 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, - 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, - 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x07, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, + 0x64, 0x52, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x22, 0xf3, 0x05, 0x0a, 0x12, + 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x1a, 0x8b, 0x03, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, + 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, + 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, + 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, + 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, + 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x1a, 0xce, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, + 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, + 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, + 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x12, 0x2f, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x52, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, + 0x64, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, + 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, + 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, - 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, - 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, - 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, - 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, - 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x73, 0x0a, 0x07, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5b, - 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, + 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, + 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xef, 0x03, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x86, + 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x52, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x1a, 0xd4, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, + 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, + 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2f, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, + 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x52, 0x08, + 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x22, 0xe7, 0x03, 0x0a, 0x11, 0x4d, 0x6f, 0x76, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xa8, + 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x15, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x36, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa6, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, - 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x81, 0x02, 0x0a, 0x0c, - 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x09, 0x0a, 0x07, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xe5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, - 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x1a, 0x51, 0x0a, 0x0e, - 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xd1, 0x01, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x54, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x61, 0x72, 0x67, - 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, - 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, - 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xef, 0x0b, 0x0a, 0x08, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x6c, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x7b, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x18, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, - 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x22, 0x9e, 0x03, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xe5, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, + 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0xa3, 0x01, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x12, 0x2f, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x52, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, + 0x72, 0x65, 0x64, 0x22, 0x9b, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x22, 0xe5, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x73, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5b, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x81, 0x02, 0x0a, 0x0c, 0x47, 0x65, 0x74, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0xe5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd1, 0x01, 0x0a, + 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x54, 0x0a, + 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x09, + 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x09, + 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x41, 0x52, + 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xef, 0x0b, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, + 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, + 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, + 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1a, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x18, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x27, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x09, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x1c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x25, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, + 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, - 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x1c, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, - 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, - 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x66, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, - 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x60, 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x35, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, + 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x4d, + 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, + 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x47, - 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, - 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, - 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, - 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x86, 0x03, 0x0a, - 0x0b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x09, - 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, - 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x19, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, - 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, - 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, - 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, - 0x6f, 0x2f, 0x74, 0x66, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x35, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x43, + 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, + 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x86, 0x03, 0x0a, 0x0b, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x09, 0x47, 0x65, 0x74, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, + 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x19, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, + 0x66, 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, + 0x66, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x35, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -5136,222 +5429,235 @@ func file_tfplugin5_proto_rawDescGZIP() []byte { return file_tfplugin5_proto_rawDescData } -var file_tfplugin5_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_tfplugin5_proto_msgTypes = make([]protoimpl.MessageInfo, 80) +var file_tfplugin5_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_tfplugin5_proto_msgTypes = make([]protoimpl.MessageInfo, 82) var file_tfplugin5_proto_goTypes = []interface{}{ (StringKind)(0), // 0: tfplugin5.StringKind (Diagnostic_Severity)(0), // 1: tfplugin5.Diagnostic.Severity (Schema_NestedBlock_NestingMode)(0), // 2: tfplugin5.Schema.NestedBlock.NestingMode - (*DynamicValue)(nil), // 3: tfplugin5.DynamicValue - (*Diagnostic)(nil), // 4: tfplugin5.Diagnostic - (*FunctionError)(nil), // 5: tfplugin5.FunctionError - (*AttributePath)(nil), // 6: tfplugin5.AttributePath - (*Stop)(nil), // 7: tfplugin5.Stop - (*RawState)(nil), // 8: tfplugin5.RawState - (*Schema)(nil), // 9: tfplugin5.Schema - (*ServerCapabilities)(nil), // 10: tfplugin5.ServerCapabilities - (*Function)(nil), // 11: tfplugin5.Function - (*GetMetadata)(nil), // 12: tfplugin5.GetMetadata - (*GetProviderSchema)(nil), // 13: tfplugin5.GetProviderSchema - (*PrepareProviderConfig)(nil), // 14: tfplugin5.PrepareProviderConfig - (*UpgradeResourceState)(nil), // 15: tfplugin5.UpgradeResourceState - (*ValidateResourceTypeConfig)(nil), // 16: tfplugin5.ValidateResourceTypeConfig - (*ValidateDataSourceConfig)(nil), // 17: tfplugin5.ValidateDataSourceConfig - (*Configure)(nil), // 18: tfplugin5.Configure - (*ReadResource)(nil), // 19: tfplugin5.ReadResource - (*PlanResourceChange)(nil), // 20: tfplugin5.PlanResourceChange - (*ApplyResourceChange)(nil), // 21: tfplugin5.ApplyResourceChange - (*ImportResourceState)(nil), // 22: tfplugin5.ImportResourceState - (*MoveResourceState)(nil), // 23: tfplugin5.MoveResourceState - (*ReadDataSource)(nil), // 24: tfplugin5.ReadDataSource - (*GetProvisionerSchema)(nil), // 25: tfplugin5.GetProvisionerSchema - (*ValidateProvisionerConfig)(nil), // 26: tfplugin5.ValidateProvisionerConfig - (*ProvisionResource)(nil), // 27: tfplugin5.ProvisionResource - (*GetFunctions)(nil), // 28: tfplugin5.GetFunctions - (*CallFunction)(nil), // 29: tfplugin5.CallFunction - (*AttributePath_Step)(nil), // 30: tfplugin5.AttributePath.Step - (*Stop_Request)(nil), // 31: tfplugin5.Stop.Request - (*Stop_Response)(nil), // 32: tfplugin5.Stop.Response - nil, // 33: tfplugin5.RawState.FlatmapEntry - (*Schema_Block)(nil), // 34: tfplugin5.Schema.Block - (*Schema_Attribute)(nil), // 35: tfplugin5.Schema.Attribute - (*Schema_NestedBlock)(nil), // 36: tfplugin5.Schema.NestedBlock - (*Function_Parameter)(nil), // 37: tfplugin5.Function.Parameter - (*Function_Return)(nil), // 38: tfplugin5.Function.Return - (*GetMetadata_Request)(nil), // 39: tfplugin5.GetMetadata.Request - (*GetMetadata_Response)(nil), // 40: tfplugin5.GetMetadata.Response - (*GetMetadata_FunctionMetadata)(nil), // 41: tfplugin5.GetMetadata.FunctionMetadata - (*GetMetadata_DataSourceMetadata)(nil), // 42: tfplugin5.GetMetadata.DataSourceMetadata - (*GetMetadata_ResourceMetadata)(nil), // 43: tfplugin5.GetMetadata.ResourceMetadata - (*GetProviderSchema_Request)(nil), // 44: tfplugin5.GetProviderSchema.Request - (*GetProviderSchema_Response)(nil), // 45: tfplugin5.GetProviderSchema.Response - nil, // 46: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry - nil, // 47: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry - nil, // 48: tfplugin5.GetProviderSchema.Response.FunctionsEntry - (*PrepareProviderConfig_Request)(nil), // 49: tfplugin5.PrepareProviderConfig.Request - (*PrepareProviderConfig_Response)(nil), // 50: tfplugin5.PrepareProviderConfig.Response - (*UpgradeResourceState_Request)(nil), // 51: tfplugin5.UpgradeResourceState.Request - (*UpgradeResourceState_Response)(nil), // 52: tfplugin5.UpgradeResourceState.Response - (*ValidateResourceTypeConfig_Request)(nil), // 53: tfplugin5.ValidateResourceTypeConfig.Request - (*ValidateResourceTypeConfig_Response)(nil), // 54: tfplugin5.ValidateResourceTypeConfig.Response - (*ValidateDataSourceConfig_Request)(nil), // 55: tfplugin5.ValidateDataSourceConfig.Request - (*ValidateDataSourceConfig_Response)(nil), // 56: tfplugin5.ValidateDataSourceConfig.Response - (*Configure_Request)(nil), // 57: tfplugin5.Configure.Request - (*Configure_Response)(nil), // 58: tfplugin5.Configure.Response - (*ReadResource_Request)(nil), // 59: tfplugin5.ReadResource.Request - (*ReadResource_Response)(nil), // 60: tfplugin5.ReadResource.Response - (*PlanResourceChange_Request)(nil), // 61: tfplugin5.PlanResourceChange.Request - (*PlanResourceChange_Response)(nil), // 62: tfplugin5.PlanResourceChange.Response - (*ApplyResourceChange_Request)(nil), // 63: tfplugin5.ApplyResourceChange.Request - (*ApplyResourceChange_Response)(nil), // 64: tfplugin5.ApplyResourceChange.Response - (*ImportResourceState_Request)(nil), // 65: tfplugin5.ImportResourceState.Request - (*ImportResourceState_ImportedResource)(nil), // 66: tfplugin5.ImportResourceState.ImportedResource - (*ImportResourceState_Response)(nil), // 67: tfplugin5.ImportResourceState.Response - (*MoveResourceState_Request)(nil), // 68: tfplugin5.MoveResourceState.Request - (*MoveResourceState_Response)(nil), // 69: tfplugin5.MoveResourceState.Response - (*ReadDataSource_Request)(nil), // 70: tfplugin5.ReadDataSource.Request - (*ReadDataSource_Response)(nil), // 71: tfplugin5.ReadDataSource.Response - (*GetProvisionerSchema_Request)(nil), // 72: tfplugin5.GetProvisionerSchema.Request - (*GetProvisionerSchema_Response)(nil), // 73: tfplugin5.GetProvisionerSchema.Response - (*ValidateProvisionerConfig_Request)(nil), // 74: tfplugin5.ValidateProvisionerConfig.Request - (*ValidateProvisionerConfig_Response)(nil), // 75: tfplugin5.ValidateProvisionerConfig.Response - (*ProvisionResource_Request)(nil), // 76: tfplugin5.ProvisionResource.Request - (*ProvisionResource_Response)(nil), // 77: tfplugin5.ProvisionResource.Response - (*GetFunctions_Request)(nil), // 78: tfplugin5.GetFunctions.Request - (*GetFunctions_Response)(nil), // 79: tfplugin5.GetFunctions.Response - nil, // 80: tfplugin5.GetFunctions.Response.FunctionsEntry - (*CallFunction_Request)(nil), // 81: tfplugin5.CallFunction.Request - (*CallFunction_Response)(nil), // 82: tfplugin5.CallFunction.Response + (Deferred_Reason)(0), // 3: tfplugin5.Deferred.Reason + (*DynamicValue)(nil), // 4: tfplugin5.DynamicValue + (*Diagnostic)(nil), // 5: tfplugin5.Diagnostic + (*FunctionError)(nil), // 6: tfplugin5.FunctionError + (*AttributePath)(nil), // 7: tfplugin5.AttributePath + (*Stop)(nil), // 8: tfplugin5.Stop + (*RawState)(nil), // 9: tfplugin5.RawState + (*Schema)(nil), // 10: tfplugin5.Schema + (*ServerCapabilities)(nil), // 11: tfplugin5.ServerCapabilities + (*ClientCapabilities)(nil), // 12: tfplugin5.ClientCapabilities + (*Function)(nil), // 13: tfplugin5.Function + (*Deferred)(nil), // 14: tfplugin5.Deferred + (*GetMetadata)(nil), // 15: tfplugin5.GetMetadata + (*GetProviderSchema)(nil), // 16: tfplugin5.GetProviderSchema + (*PrepareProviderConfig)(nil), // 17: tfplugin5.PrepareProviderConfig + (*UpgradeResourceState)(nil), // 18: tfplugin5.UpgradeResourceState + (*ValidateResourceTypeConfig)(nil), // 19: tfplugin5.ValidateResourceTypeConfig + (*ValidateDataSourceConfig)(nil), // 20: tfplugin5.ValidateDataSourceConfig + (*Configure)(nil), // 21: tfplugin5.Configure + (*ReadResource)(nil), // 22: tfplugin5.ReadResource + (*PlanResourceChange)(nil), // 23: tfplugin5.PlanResourceChange + (*ApplyResourceChange)(nil), // 24: tfplugin5.ApplyResourceChange + (*ImportResourceState)(nil), // 25: tfplugin5.ImportResourceState + (*MoveResourceState)(nil), // 26: tfplugin5.MoveResourceState + (*ReadDataSource)(nil), // 27: tfplugin5.ReadDataSource + (*GetProvisionerSchema)(nil), // 28: tfplugin5.GetProvisionerSchema + (*ValidateProvisionerConfig)(nil), // 29: tfplugin5.ValidateProvisionerConfig + (*ProvisionResource)(nil), // 30: tfplugin5.ProvisionResource + (*GetFunctions)(nil), // 31: tfplugin5.GetFunctions + (*CallFunction)(nil), // 32: tfplugin5.CallFunction + (*AttributePath_Step)(nil), // 33: tfplugin5.AttributePath.Step + (*Stop_Request)(nil), // 34: tfplugin5.Stop.Request + (*Stop_Response)(nil), // 35: tfplugin5.Stop.Response + nil, // 36: tfplugin5.RawState.FlatmapEntry + (*Schema_Block)(nil), // 37: tfplugin5.Schema.Block + (*Schema_Attribute)(nil), // 38: tfplugin5.Schema.Attribute + (*Schema_NestedBlock)(nil), // 39: tfplugin5.Schema.NestedBlock + (*Function_Parameter)(nil), // 40: tfplugin5.Function.Parameter + (*Function_Return)(nil), // 41: tfplugin5.Function.Return + (*GetMetadata_Request)(nil), // 42: tfplugin5.GetMetadata.Request + (*GetMetadata_Response)(nil), // 43: tfplugin5.GetMetadata.Response + (*GetMetadata_FunctionMetadata)(nil), // 44: tfplugin5.GetMetadata.FunctionMetadata + (*GetMetadata_DataSourceMetadata)(nil), // 45: tfplugin5.GetMetadata.DataSourceMetadata + (*GetMetadata_ResourceMetadata)(nil), // 46: tfplugin5.GetMetadata.ResourceMetadata + (*GetProviderSchema_Request)(nil), // 47: tfplugin5.GetProviderSchema.Request + (*GetProviderSchema_Response)(nil), // 48: tfplugin5.GetProviderSchema.Response + nil, // 49: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + nil, // 50: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + nil, // 51: tfplugin5.GetProviderSchema.Response.FunctionsEntry + (*PrepareProviderConfig_Request)(nil), // 52: tfplugin5.PrepareProviderConfig.Request + (*PrepareProviderConfig_Response)(nil), // 53: tfplugin5.PrepareProviderConfig.Response + (*UpgradeResourceState_Request)(nil), // 54: tfplugin5.UpgradeResourceState.Request + (*UpgradeResourceState_Response)(nil), // 55: tfplugin5.UpgradeResourceState.Response + (*ValidateResourceTypeConfig_Request)(nil), // 56: tfplugin5.ValidateResourceTypeConfig.Request + (*ValidateResourceTypeConfig_Response)(nil), // 57: tfplugin5.ValidateResourceTypeConfig.Response + (*ValidateDataSourceConfig_Request)(nil), // 58: tfplugin5.ValidateDataSourceConfig.Request + (*ValidateDataSourceConfig_Response)(nil), // 59: tfplugin5.ValidateDataSourceConfig.Response + (*Configure_Request)(nil), // 60: tfplugin5.Configure.Request + (*Configure_Response)(nil), // 61: tfplugin5.Configure.Response + (*ReadResource_Request)(nil), // 62: tfplugin5.ReadResource.Request + (*ReadResource_Response)(nil), // 63: tfplugin5.ReadResource.Response + (*PlanResourceChange_Request)(nil), // 64: tfplugin5.PlanResourceChange.Request + (*PlanResourceChange_Response)(nil), // 65: tfplugin5.PlanResourceChange.Response + (*ApplyResourceChange_Request)(nil), // 66: tfplugin5.ApplyResourceChange.Request + (*ApplyResourceChange_Response)(nil), // 67: tfplugin5.ApplyResourceChange.Response + (*ImportResourceState_Request)(nil), // 68: tfplugin5.ImportResourceState.Request + (*ImportResourceState_ImportedResource)(nil), // 69: tfplugin5.ImportResourceState.ImportedResource + (*ImportResourceState_Response)(nil), // 70: tfplugin5.ImportResourceState.Response + (*MoveResourceState_Request)(nil), // 71: tfplugin5.MoveResourceState.Request + (*MoveResourceState_Response)(nil), // 72: tfplugin5.MoveResourceState.Response + (*ReadDataSource_Request)(nil), // 73: tfplugin5.ReadDataSource.Request + (*ReadDataSource_Response)(nil), // 74: tfplugin5.ReadDataSource.Response + (*GetProvisionerSchema_Request)(nil), // 75: tfplugin5.GetProvisionerSchema.Request + (*GetProvisionerSchema_Response)(nil), // 76: tfplugin5.GetProvisionerSchema.Response + (*ValidateProvisionerConfig_Request)(nil), // 77: tfplugin5.ValidateProvisionerConfig.Request + (*ValidateProvisionerConfig_Response)(nil), // 78: tfplugin5.ValidateProvisionerConfig.Response + (*ProvisionResource_Request)(nil), // 79: tfplugin5.ProvisionResource.Request + (*ProvisionResource_Response)(nil), // 80: tfplugin5.ProvisionResource.Response + (*GetFunctions_Request)(nil), // 81: tfplugin5.GetFunctions.Request + (*GetFunctions_Response)(nil), // 82: tfplugin5.GetFunctions.Response + nil, // 83: tfplugin5.GetFunctions.Response.FunctionsEntry + (*CallFunction_Request)(nil), // 84: tfplugin5.CallFunction.Request + (*CallFunction_Response)(nil), // 85: tfplugin5.CallFunction.Response } var file_tfplugin5_proto_depIdxs = []int32{ 1, // 0: tfplugin5.Diagnostic.severity:type_name -> tfplugin5.Diagnostic.Severity - 6, // 1: tfplugin5.Diagnostic.attribute:type_name -> tfplugin5.AttributePath - 30, // 2: tfplugin5.AttributePath.steps:type_name -> tfplugin5.AttributePath.Step - 33, // 3: tfplugin5.RawState.flatmap:type_name -> tfplugin5.RawState.FlatmapEntry - 34, // 4: tfplugin5.Schema.block:type_name -> tfplugin5.Schema.Block - 37, // 5: tfplugin5.Function.parameters:type_name -> tfplugin5.Function.Parameter - 37, // 6: tfplugin5.Function.variadic_parameter:type_name -> tfplugin5.Function.Parameter - 38, // 7: tfplugin5.Function.return:type_name -> tfplugin5.Function.Return + 7, // 1: tfplugin5.Diagnostic.attribute:type_name -> tfplugin5.AttributePath + 33, // 2: tfplugin5.AttributePath.steps:type_name -> tfplugin5.AttributePath.Step + 36, // 3: tfplugin5.RawState.flatmap:type_name -> tfplugin5.RawState.FlatmapEntry + 37, // 4: tfplugin5.Schema.block:type_name -> tfplugin5.Schema.Block + 40, // 5: tfplugin5.Function.parameters:type_name -> tfplugin5.Function.Parameter + 40, // 6: tfplugin5.Function.variadic_parameter:type_name -> tfplugin5.Function.Parameter + 41, // 7: tfplugin5.Function.return:type_name -> tfplugin5.Function.Return 0, // 8: tfplugin5.Function.description_kind:type_name -> tfplugin5.StringKind - 35, // 9: tfplugin5.Schema.Block.attributes:type_name -> tfplugin5.Schema.Attribute - 36, // 10: tfplugin5.Schema.Block.block_types:type_name -> tfplugin5.Schema.NestedBlock - 0, // 11: tfplugin5.Schema.Block.description_kind:type_name -> tfplugin5.StringKind - 0, // 12: tfplugin5.Schema.Attribute.description_kind:type_name -> tfplugin5.StringKind - 34, // 13: tfplugin5.Schema.NestedBlock.block:type_name -> tfplugin5.Schema.Block - 2, // 14: tfplugin5.Schema.NestedBlock.nesting:type_name -> tfplugin5.Schema.NestedBlock.NestingMode - 0, // 15: tfplugin5.Function.Parameter.description_kind:type_name -> tfplugin5.StringKind - 10, // 16: tfplugin5.GetMetadata.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities - 4, // 17: tfplugin5.GetMetadata.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 42, // 18: tfplugin5.GetMetadata.Response.data_sources:type_name -> tfplugin5.GetMetadata.DataSourceMetadata - 43, // 19: tfplugin5.GetMetadata.Response.resources:type_name -> tfplugin5.GetMetadata.ResourceMetadata - 41, // 20: tfplugin5.GetMetadata.Response.functions:type_name -> tfplugin5.GetMetadata.FunctionMetadata - 9, // 21: tfplugin5.GetProviderSchema.Response.provider:type_name -> tfplugin5.Schema - 46, // 22: tfplugin5.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry - 47, // 23: tfplugin5.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry - 4, // 24: tfplugin5.GetProviderSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 9, // 25: tfplugin5.GetProviderSchema.Response.provider_meta:type_name -> tfplugin5.Schema - 10, // 26: tfplugin5.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities - 48, // 27: tfplugin5.GetProviderSchema.Response.functions:type_name -> tfplugin5.GetProviderSchema.Response.FunctionsEntry - 9, // 28: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin5.Schema - 9, // 29: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin5.Schema - 11, // 30: tfplugin5.GetProviderSchema.Response.FunctionsEntry.value:type_name -> tfplugin5.Function - 3, // 31: tfplugin5.PrepareProviderConfig.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 32: tfplugin5.PrepareProviderConfig.Response.prepared_config:type_name -> tfplugin5.DynamicValue - 4, // 33: tfplugin5.PrepareProviderConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 8, // 34: tfplugin5.UpgradeResourceState.Request.raw_state:type_name -> tfplugin5.RawState - 3, // 35: tfplugin5.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin5.DynamicValue - 4, // 36: tfplugin5.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 37: tfplugin5.ValidateResourceTypeConfig.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 38: tfplugin5.ValidateResourceTypeConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 39: tfplugin5.ValidateDataSourceConfig.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 40: tfplugin5.ValidateDataSourceConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 41: tfplugin5.Configure.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 42: tfplugin5.Configure.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 43: tfplugin5.ReadResource.Request.current_state:type_name -> tfplugin5.DynamicValue - 3, // 44: tfplugin5.ReadResource.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 45: tfplugin5.ReadResource.Response.new_state:type_name -> tfplugin5.DynamicValue - 4, // 46: tfplugin5.ReadResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 47: tfplugin5.PlanResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue - 3, // 48: tfplugin5.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin5.DynamicValue - 3, // 49: tfplugin5.PlanResourceChange.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 50: tfplugin5.PlanResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 51: tfplugin5.PlanResourceChange.Response.planned_state:type_name -> tfplugin5.DynamicValue - 6, // 52: tfplugin5.PlanResourceChange.Response.requires_replace:type_name -> tfplugin5.AttributePath - 4, // 53: tfplugin5.PlanResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 54: tfplugin5.ApplyResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue - 3, // 55: tfplugin5.ApplyResourceChange.Request.planned_state:type_name -> tfplugin5.DynamicValue - 3, // 56: tfplugin5.ApplyResourceChange.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 57: tfplugin5.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 58: tfplugin5.ApplyResourceChange.Response.new_state:type_name -> tfplugin5.DynamicValue - 4, // 59: tfplugin5.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 60: tfplugin5.ImportResourceState.ImportedResource.state:type_name -> tfplugin5.DynamicValue - 66, // 61: tfplugin5.ImportResourceState.Response.imported_resources:type_name -> tfplugin5.ImportResourceState.ImportedResource - 4, // 62: tfplugin5.ImportResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 8, // 63: tfplugin5.MoveResourceState.Request.source_state:type_name -> tfplugin5.RawState - 3, // 64: tfplugin5.MoveResourceState.Response.target_state:type_name -> tfplugin5.DynamicValue - 4, // 65: tfplugin5.MoveResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 66: tfplugin5.ReadDataSource.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 67: tfplugin5.ReadDataSource.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 68: tfplugin5.ReadDataSource.Response.state:type_name -> tfplugin5.DynamicValue - 4, // 69: tfplugin5.ReadDataSource.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 9, // 70: tfplugin5.GetProvisionerSchema.Response.provisioner:type_name -> tfplugin5.Schema - 4, // 71: tfplugin5.GetProvisionerSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 72: tfplugin5.ValidateProvisionerConfig.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 73: tfplugin5.ValidateProvisionerConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 74: tfplugin5.ProvisionResource.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 75: tfplugin5.ProvisionResource.Request.connection:type_name -> tfplugin5.DynamicValue - 4, // 76: tfplugin5.ProvisionResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 80, // 77: tfplugin5.GetFunctions.Response.functions:type_name -> tfplugin5.GetFunctions.Response.FunctionsEntry - 4, // 78: tfplugin5.GetFunctions.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 11, // 79: tfplugin5.GetFunctions.Response.FunctionsEntry.value:type_name -> tfplugin5.Function - 3, // 80: tfplugin5.CallFunction.Request.arguments:type_name -> tfplugin5.DynamicValue - 3, // 81: tfplugin5.CallFunction.Response.result:type_name -> tfplugin5.DynamicValue - 5, // 82: tfplugin5.CallFunction.Response.error:type_name -> tfplugin5.FunctionError - 39, // 83: tfplugin5.Provider.GetMetadata:input_type -> tfplugin5.GetMetadata.Request - 44, // 84: tfplugin5.Provider.GetSchema:input_type -> tfplugin5.GetProviderSchema.Request - 49, // 85: tfplugin5.Provider.PrepareProviderConfig:input_type -> tfplugin5.PrepareProviderConfig.Request - 53, // 86: tfplugin5.Provider.ValidateResourceTypeConfig:input_type -> tfplugin5.ValidateResourceTypeConfig.Request - 55, // 87: tfplugin5.Provider.ValidateDataSourceConfig:input_type -> tfplugin5.ValidateDataSourceConfig.Request - 51, // 88: tfplugin5.Provider.UpgradeResourceState:input_type -> tfplugin5.UpgradeResourceState.Request - 57, // 89: tfplugin5.Provider.Configure:input_type -> tfplugin5.Configure.Request - 59, // 90: tfplugin5.Provider.ReadResource:input_type -> tfplugin5.ReadResource.Request - 61, // 91: tfplugin5.Provider.PlanResourceChange:input_type -> tfplugin5.PlanResourceChange.Request - 63, // 92: tfplugin5.Provider.ApplyResourceChange:input_type -> tfplugin5.ApplyResourceChange.Request - 65, // 93: tfplugin5.Provider.ImportResourceState:input_type -> tfplugin5.ImportResourceState.Request - 68, // 94: tfplugin5.Provider.MoveResourceState:input_type -> tfplugin5.MoveResourceState.Request - 70, // 95: tfplugin5.Provider.ReadDataSource:input_type -> tfplugin5.ReadDataSource.Request - 78, // 96: tfplugin5.Provider.GetFunctions:input_type -> tfplugin5.GetFunctions.Request - 81, // 97: tfplugin5.Provider.CallFunction:input_type -> tfplugin5.CallFunction.Request - 31, // 98: tfplugin5.Provider.Stop:input_type -> tfplugin5.Stop.Request - 72, // 99: tfplugin5.Provisioner.GetSchema:input_type -> tfplugin5.GetProvisionerSchema.Request - 74, // 100: tfplugin5.Provisioner.ValidateProvisionerConfig:input_type -> tfplugin5.ValidateProvisionerConfig.Request - 76, // 101: tfplugin5.Provisioner.ProvisionResource:input_type -> tfplugin5.ProvisionResource.Request - 31, // 102: tfplugin5.Provisioner.Stop:input_type -> tfplugin5.Stop.Request - 40, // 103: tfplugin5.Provider.GetMetadata:output_type -> tfplugin5.GetMetadata.Response - 45, // 104: tfplugin5.Provider.GetSchema:output_type -> tfplugin5.GetProviderSchema.Response - 50, // 105: tfplugin5.Provider.PrepareProviderConfig:output_type -> tfplugin5.PrepareProviderConfig.Response - 54, // 106: tfplugin5.Provider.ValidateResourceTypeConfig:output_type -> tfplugin5.ValidateResourceTypeConfig.Response - 56, // 107: tfplugin5.Provider.ValidateDataSourceConfig:output_type -> tfplugin5.ValidateDataSourceConfig.Response - 52, // 108: tfplugin5.Provider.UpgradeResourceState:output_type -> tfplugin5.UpgradeResourceState.Response - 58, // 109: tfplugin5.Provider.Configure:output_type -> tfplugin5.Configure.Response - 60, // 110: tfplugin5.Provider.ReadResource:output_type -> tfplugin5.ReadResource.Response - 62, // 111: tfplugin5.Provider.PlanResourceChange:output_type -> tfplugin5.PlanResourceChange.Response - 64, // 112: tfplugin5.Provider.ApplyResourceChange:output_type -> tfplugin5.ApplyResourceChange.Response - 67, // 113: tfplugin5.Provider.ImportResourceState:output_type -> tfplugin5.ImportResourceState.Response - 69, // 114: tfplugin5.Provider.MoveResourceState:output_type -> tfplugin5.MoveResourceState.Response - 71, // 115: tfplugin5.Provider.ReadDataSource:output_type -> tfplugin5.ReadDataSource.Response - 79, // 116: tfplugin5.Provider.GetFunctions:output_type -> tfplugin5.GetFunctions.Response - 82, // 117: tfplugin5.Provider.CallFunction:output_type -> tfplugin5.CallFunction.Response - 32, // 118: tfplugin5.Provider.Stop:output_type -> tfplugin5.Stop.Response - 73, // 119: tfplugin5.Provisioner.GetSchema:output_type -> tfplugin5.GetProvisionerSchema.Response - 75, // 120: tfplugin5.Provisioner.ValidateProvisionerConfig:output_type -> tfplugin5.ValidateProvisionerConfig.Response - 77, // 121: tfplugin5.Provisioner.ProvisionResource:output_type -> tfplugin5.ProvisionResource.Response - 32, // 122: tfplugin5.Provisioner.Stop:output_type -> tfplugin5.Stop.Response - 103, // [103:123] is the sub-list for method output_type - 83, // [83:103] is the sub-list for method input_type - 83, // [83:83] is the sub-list for extension type_name - 83, // [83:83] is the sub-list for extension extendee - 0, // [0:83] is the sub-list for field type_name + 3, // 9: tfplugin5.Deferred.reason:type_name -> tfplugin5.Deferred.Reason + 38, // 10: tfplugin5.Schema.Block.attributes:type_name -> tfplugin5.Schema.Attribute + 39, // 11: tfplugin5.Schema.Block.block_types:type_name -> tfplugin5.Schema.NestedBlock + 0, // 12: tfplugin5.Schema.Block.description_kind:type_name -> tfplugin5.StringKind + 0, // 13: tfplugin5.Schema.Attribute.description_kind:type_name -> tfplugin5.StringKind + 37, // 14: tfplugin5.Schema.NestedBlock.block:type_name -> tfplugin5.Schema.Block + 2, // 15: tfplugin5.Schema.NestedBlock.nesting:type_name -> tfplugin5.Schema.NestedBlock.NestingMode + 0, // 16: tfplugin5.Function.Parameter.description_kind:type_name -> tfplugin5.StringKind + 11, // 17: tfplugin5.GetMetadata.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities + 5, // 18: tfplugin5.GetMetadata.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 45, // 19: tfplugin5.GetMetadata.Response.data_sources:type_name -> tfplugin5.GetMetadata.DataSourceMetadata + 46, // 20: tfplugin5.GetMetadata.Response.resources:type_name -> tfplugin5.GetMetadata.ResourceMetadata + 44, // 21: tfplugin5.GetMetadata.Response.functions:type_name -> tfplugin5.GetMetadata.FunctionMetadata + 10, // 22: tfplugin5.GetProviderSchema.Response.provider:type_name -> tfplugin5.Schema + 49, // 23: tfplugin5.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + 50, // 24: tfplugin5.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + 5, // 25: tfplugin5.GetProviderSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 10, // 26: tfplugin5.GetProviderSchema.Response.provider_meta:type_name -> tfplugin5.Schema + 11, // 27: tfplugin5.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities + 51, // 28: tfplugin5.GetProviderSchema.Response.functions:type_name -> tfplugin5.GetProviderSchema.Response.FunctionsEntry + 10, // 29: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin5.Schema + 10, // 30: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin5.Schema + 13, // 31: tfplugin5.GetProviderSchema.Response.FunctionsEntry.value:type_name -> tfplugin5.Function + 4, // 32: tfplugin5.PrepareProviderConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 33: tfplugin5.PrepareProviderConfig.Response.prepared_config:type_name -> tfplugin5.DynamicValue + 5, // 34: tfplugin5.PrepareProviderConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 9, // 35: tfplugin5.UpgradeResourceState.Request.raw_state:type_name -> tfplugin5.RawState + 4, // 36: tfplugin5.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin5.DynamicValue + 5, // 37: tfplugin5.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 4, // 38: tfplugin5.ValidateResourceTypeConfig.Request.config:type_name -> tfplugin5.DynamicValue + 5, // 39: tfplugin5.ValidateResourceTypeConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 4, // 40: tfplugin5.ValidateDataSourceConfig.Request.config:type_name -> tfplugin5.DynamicValue + 5, // 41: tfplugin5.ValidateDataSourceConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 4, // 42: tfplugin5.Configure.Request.config:type_name -> tfplugin5.DynamicValue + 12, // 43: tfplugin5.Configure.Request.client_capabilities:type_name -> tfplugin5.ClientCapabilities + 5, // 44: tfplugin5.Configure.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 4, // 45: tfplugin5.ReadResource.Request.current_state:type_name -> tfplugin5.DynamicValue + 4, // 46: tfplugin5.ReadResource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 12, // 47: tfplugin5.ReadResource.Request.client_capabilities:type_name -> tfplugin5.ClientCapabilities + 4, // 48: tfplugin5.ReadResource.Response.new_state:type_name -> tfplugin5.DynamicValue + 5, // 49: tfplugin5.ReadResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 14, // 50: tfplugin5.ReadResource.Response.deferred:type_name -> tfplugin5.Deferred + 4, // 51: tfplugin5.PlanResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 4, // 52: tfplugin5.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin5.DynamicValue + 4, // 53: tfplugin5.PlanResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 54: tfplugin5.PlanResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 12, // 55: tfplugin5.PlanResourceChange.Request.client_capabilities:type_name -> tfplugin5.ClientCapabilities + 4, // 56: tfplugin5.PlanResourceChange.Response.planned_state:type_name -> tfplugin5.DynamicValue + 7, // 57: tfplugin5.PlanResourceChange.Response.requires_replace:type_name -> tfplugin5.AttributePath + 5, // 58: tfplugin5.PlanResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 14, // 59: tfplugin5.PlanResourceChange.Response.deferred:type_name -> tfplugin5.Deferred + 4, // 60: tfplugin5.ApplyResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 4, // 61: tfplugin5.ApplyResourceChange.Request.planned_state:type_name -> tfplugin5.DynamicValue + 4, // 62: tfplugin5.ApplyResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 63: tfplugin5.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 4, // 64: tfplugin5.ApplyResourceChange.Response.new_state:type_name -> tfplugin5.DynamicValue + 5, // 65: tfplugin5.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 12, // 66: tfplugin5.ImportResourceState.Request.client_capabilities:type_name -> tfplugin5.ClientCapabilities + 4, // 67: tfplugin5.ImportResourceState.ImportedResource.state:type_name -> tfplugin5.DynamicValue + 69, // 68: tfplugin5.ImportResourceState.Response.imported_resources:type_name -> tfplugin5.ImportResourceState.ImportedResource + 5, // 69: tfplugin5.ImportResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 14, // 70: tfplugin5.ImportResourceState.Response.deferred:type_name -> tfplugin5.Deferred + 9, // 71: tfplugin5.MoveResourceState.Request.source_state:type_name -> tfplugin5.RawState + 4, // 72: tfplugin5.MoveResourceState.Response.target_state:type_name -> tfplugin5.DynamicValue + 5, // 73: tfplugin5.MoveResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 4, // 74: tfplugin5.ReadDataSource.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 75: tfplugin5.ReadDataSource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 12, // 76: tfplugin5.ReadDataSource.Request.client_capabilities:type_name -> tfplugin5.ClientCapabilities + 4, // 77: tfplugin5.ReadDataSource.Response.state:type_name -> tfplugin5.DynamicValue + 5, // 78: tfplugin5.ReadDataSource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 14, // 79: tfplugin5.ReadDataSource.Response.deferred:type_name -> tfplugin5.Deferred + 10, // 80: tfplugin5.GetProvisionerSchema.Response.provisioner:type_name -> tfplugin5.Schema + 5, // 81: tfplugin5.GetProvisionerSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 4, // 82: tfplugin5.ValidateProvisionerConfig.Request.config:type_name -> tfplugin5.DynamicValue + 5, // 83: tfplugin5.ValidateProvisionerConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 4, // 84: tfplugin5.ProvisionResource.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 85: tfplugin5.ProvisionResource.Request.connection:type_name -> tfplugin5.DynamicValue + 5, // 86: tfplugin5.ProvisionResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 83, // 87: tfplugin5.GetFunctions.Response.functions:type_name -> tfplugin5.GetFunctions.Response.FunctionsEntry + 5, // 88: tfplugin5.GetFunctions.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 13, // 89: tfplugin5.GetFunctions.Response.FunctionsEntry.value:type_name -> tfplugin5.Function + 4, // 90: tfplugin5.CallFunction.Request.arguments:type_name -> tfplugin5.DynamicValue + 4, // 91: tfplugin5.CallFunction.Response.result:type_name -> tfplugin5.DynamicValue + 6, // 92: tfplugin5.CallFunction.Response.error:type_name -> tfplugin5.FunctionError + 42, // 93: tfplugin5.Provider.GetMetadata:input_type -> tfplugin5.GetMetadata.Request + 47, // 94: tfplugin5.Provider.GetSchema:input_type -> tfplugin5.GetProviderSchema.Request + 52, // 95: tfplugin5.Provider.PrepareProviderConfig:input_type -> tfplugin5.PrepareProviderConfig.Request + 56, // 96: tfplugin5.Provider.ValidateResourceTypeConfig:input_type -> tfplugin5.ValidateResourceTypeConfig.Request + 58, // 97: tfplugin5.Provider.ValidateDataSourceConfig:input_type -> tfplugin5.ValidateDataSourceConfig.Request + 54, // 98: tfplugin5.Provider.UpgradeResourceState:input_type -> tfplugin5.UpgradeResourceState.Request + 60, // 99: tfplugin5.Provider.Configure:input_type -> tfplugin5.Configure.Request + 62, // 100: tfplugin5.Provider.ReadResource:input_type -> tfplugin5.ReadResource.Request + 64, // 101: tfplugin5.Provider.PlanResourceChange:input_type -> tfplugin5.PlanResourceChange.Request + 66, // 102: tfplugin5.Provider.ApplyResourceChange:input_type -> tfplugin5.ApplyResourceChange.Request + 68, // 103: tfplugin5.Provider.ImportResourceState:input_type -> tfplugin5.ImportResourceState.Request + 71, // 104: tfplugin5.Provider.MoveResourceState:input_type -> tfplugin5.MoveResourceState.Request + 73, // 105: tfplugin5.Provider.ReadDataSource:input_type -> tfplugin5.ReadDataSource.Request + 81, // 106: tfplugin5.Provider.GetFunctions:input_type -> tfplugin5.GetFunctions.Request + 84, // 107: tfplugin5.Provider.CallFunction:input_type -> tfplugin5.CallFunction.Request + 34, // 108: tfplugin5.Provider.Stop:input_type -> tfplugin5.Stop.Request + 75, // 109: tfplugin5.Provisioner.GetSchema:input_type -> tfplugin5.GetProvisionerSchema.Request + 77, // 110: tfplugin5.Provisioner.ValidateProvisionerConfig:input_type -> tfplugin5.ValidateProvisionerConfig.Request + 79, // 111: tfplugin5.Provisioner.ProvisionResource:input_type -> tfplugin5.ProvisionResource.Request + 34, // 112: tfplugin5.Provisioner.Stop:input_type -> tfplugin5.Stop.Request + 43, // 113: tfplugin5.Provider.GetMetadata:output_type -> tfplugin5.GetMetadata.Response + 48, // 114: tfplugin5.Provider.GetSchema:output_type -> tfplugin5.GetProviderSchema.Response + 53, // 115: tfplugin5.Provider.PrepareProviderConfig:output_type -> tfplugin5.PrepareProviderConfig.Response + 57, // 116: tfplugin5.Provider.ValidateResourceTypeConfig:output_type -> tfplugin5.ValidateResourceTypeConfig.Response + 59, // 117: tfplugin5.Provider.ValidateDataSourceConfig:output_type -> tfplugin5.ValidateDataSourceConfig.Response + 55, // 118: tfplugin5.Provider.UpgradeResourceState:output_type -> tfplugin5.UpgradeResourceState.Response + 61, // 119: tfplugin5.Provider.Configure:output_type -> tfplugin5.Configure.Response + 63, // 120: tfplugin5.Provider.ReadResource:output_type -> tfplugin5.ReadResource.Response + 65, // 121: tfplugin5.Provider.PlanResourceChange:output_type -> tfplugin5.PlanResourceChange.Response + 67, // 122: tfplugin5.Provider.ApplyResourceChange:output_type -> tfplugin5.ApplyResourceChange.Response + 70, // 123: tfplugin5.Provider.ImportResourceState:output_type -> tfplugin5.ImportResourceState.Response + 72, // 124: tfplugin5.Provider.MoveResourceState:output_type -> tfplugin5.MoveResourceState.Response + 74, // 125: tfplugin5.Provider.ReadDataSource:output_type -> tfplugin5.ReadDataSource.Response + 82, // 126: tfplugin5.Provider.GetFunctions:output_type -> tfplugin5.GetFunctions.Response + 85, // 127: tfplugin5.Provider.CallFunction:output_type -> tfplugin5.CallFunction.Response + 35, // 128: tfplugin5.Provider.Stop:output_type -> tfplugin5.Stop.Response + 76, // 129: tfplugin5.Provisioner.GetSchema:output_type -> tfplugin5.GetProvisionerSchema.Response + 78, // 130: tfplugin5.Provisioner.ValidateProvisionerConfig:output_type -> tfplugin5.ValidateProvisionerConfig.Response + 80, // 131: tfplugin5.Provisioner.ProvisionResource:output_type -> tfplugin5.ProvisionResource.Response + 35, // 132: tfplugin5.Provisioner.Stop:output_type -> tfplugin5.Stop.Response + 113, // [113:133] is the sub-list for method output_type + 93, // [93:113] is the sub-list for method input_type + 93, // [93:93] is the sub-list for extension type_name + 93, // [93:93] is the sub-list for extension extendee + 0, // [0:93] is the sub-list for field type_name } func init() { file_tfplugin5_proto_init() } @@ -5457,7 +5763,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Function); i { + switch v := v.(*ClientCapabilities); i { case 0: return &v.state case 1: @@ -5469,7 +5775,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMetadata); i { + switch v := v.(*Function); i { case 0: return &v.state case 1: @@ -5481,7 +5787,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProviderSchema); i { + switch v := v.(*Deferred); i { case 0: return &v.state case 1: @@ -5493,7 +5799,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrepareProviderConfig); i { + switch v := v.(*GetMetadata); i { case 0: return &v.state case 1: @@ -5505,7 +5811,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpgradeResourceState); i { + switch v := v.(*GetProviderSchema); i { case 0: return &v.state case 1: @@ -5517,7 +5823,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateResourceTypeConfig); i { + switch v := v.(*PrepareProviderConfig); i { case 0: return &v.state case 1: @@ -5529,7 +5835,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateDataSourceConfig); i { + switch v := v.(*UpgradeResourceState); i { case 0: return &v.state case 1: @@ -5541,7 +5847,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Configure); i { + switch v := v.(*ValidateResourceTypeConfig); i { case 0: return &v.state case 1: @@ -5553,7 +5859,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResource); i { + switch v := v.(*ValidateDataSourceConfig); i { case 0: return &v.state case 1: @@ -5565,7 +5871,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlanResourceChange); i { + switch v := v.(*Configure); i { case 0: return &v.state case 1: @@ -5577,7 +5883,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyResourceChange); i { + switch v := v.(*ReadResource); i { case 0: return &v.state case 1: @@ -5589,7 +5895,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportResourceState); i { + switch v := v.(*PlanResourceChange); i { case 0: return &v.state case 1: @@ -5601,7 +5907,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MoveResourceState); i { + switch v := v.(*ApplyResourceChange); i { case 0: return &v.state case 1: @@ -5613,7 +5919,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadDataSource); i { + switch v := v.(*ImportResourceState); i { case 0: return &v.state case 1: @@ -5625,7 +5931,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProvisionerSchema); i { + switch v := v.(*MoveResourceState); i { case 0: return &v.state case 1: @@ -5637,7 +5943,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateProvisionerConfig); i { + switch v := v.(*ReadDataSource); i { case 0: return &v.state case 1: @@ -5649,7 +5955,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProvisionResource); i { + switch v := v.(*GetProvisionerSchema); i { case 0: return &v.state case 1: @@ -5661,7 +5967,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetFunctions); i { + switch v := v.(*ValidateProvisionerConfig); i { case 0: return &v.state case 1: @@ -5673,7 +5979,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CallFunction); i { + switch v := v.(*ProvisionResource); i { case 0: return &v.state case 1: @@ -5685,7 +5991,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttributePath_Step); i { + switch v := v.(*GetFunctions); i { case 0: return &v.state case 1: @@ -5697,7 +6003,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Stop_Request); i { + switch v := v.(*CallFunction); i { case 0: return &v.state case 1: @@ -5709,7 +6015,19 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Stop_Response); i { + switch v := v.(*AttributePath_Step); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop_Request); i { case 0: return &v.state case 1: @@ -5721,6 +6039,18 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_Block); i { case 0: return &v.state @@ -5732,7 +6062,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_Attribute); i { case 0: return &v.state @@ -5744,7 +6074,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_NestedBlock); i { case 0: return &v.state @@ -5756,7 +6086,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Function_Parameter); i { case 0: return &v.state @@ -5768,7 +6098,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Function_Return); i { case 0: return &v.state @@ -5780,7 +6110,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_Request); i { case 0: return &v.state @@ -5792,7 +6122,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_Response); i { case 0: return &v.state @@ -5804,7 +6134,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_FunctionMetadata); i { case 0: return &v.state @@ -5816,7 +6146,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_DataSourceMetadata); i { case 0: return &v.state @@ -5828,7 +6158,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_ResourceMetadata); i { case 0: return &v.state @@ -5840,7 +6170,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProviderSchema_Request); i { case 0: return &v.state @@ -5852,7 +6182,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProviderSchema_Response); i { case 0: return &v.state @@ -5864,7 +6194,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PrepareProviderConfig_Request); i { case 0: return &v.state @@ -5876,7 +6206,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PrepareProviderConfig_Response); i { case 0: return &v.state @@ -5888,7 +6218,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeResourceState_Request); i { case 0: return &v.state @@ -5900,7 +6230,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeResourceState_Response); i { case 0: return &v.state @@ -5912,7 +6242,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateResourceTypeConfig_Request); i { case 0: return &v.state @@ -5924,7 +6254,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateResourceTypeConfig_Response); i { case 0: return &v.state @@ -5936,7 +6266,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateDataSourceConfig_Request); i { case 0: return &v.state @@ -5948,7 +6278,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateDataSourceConfig_Response); i { case 0: return &v.state @@ -5960,7 +6290,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Configure_Request); i { case 0: return &v.state @@ -5972,7 +6302,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Configure_Response); i { case 0: return &v.state @@ -5984,7 +6314,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadResource_Request); i { case 0: return &v.state @@ -5996,7 +6326,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadResource_Response); i { case 0: return &v.state @@ -6008,7 +6338,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlanResourceChange_Request); i { case 0: return &v.state @@ -6020,7 +6350,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlanResourceChange_Response); i { case 0: return &v.state @@ -6032,7 +6362,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ApplyResourceChange_Request); i { case 0: return &v.state @@ -6044,7 +6374,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ApplyResourceChange_Response); i { case 0: return &v.state @@ -6056,7 +6386,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_Request); i { case 0: return &v.state @@ -6068,7 +6398,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_ImportedResource); i { case 0: return &v.state @@ -6080,7 +6410,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_Response); i { case 0: return &v.state @@ -6092,7 +6422,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MoveResourceState_Request); i { case 0: return &v.state @@ -6104,7 +6434,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MoveResourceState_Response); i { case 0: return &v.state @@ -6116,7 +6446,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadDataSource_Request); i { case 0: return &v.state @@ -6128,7 +6458,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadDataSource_Response); i { case 0: return &v.state @@ -6140,7 +6470,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProvisionerSchema_Request); i { case 0: return &v.state @@ -6152,7 +6482,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProvisionerSchema_Response); i { case 0: return &v.state @@ -6164,7 +6494,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateProvisionerConfig_Request); i { case 0: return &v.state @@ -6176,7 +6506,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateProvisionerConfig_Response); i { case 0: return &v.state @@ -6188,7 +6518,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProvisionResource_Request); i { case 0: return &v.state @@ -6200,7 +6530,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProvisionResource_Response); i { case 0: return &v.state @@ -6212,7 +6542,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetFunctions_Request); i { case 0: return &v.state @@ -6224,7 +6554,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetFunctions_Response); i { case 0: return &v.state @@ -6236,7 +6566,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CallFunction_Request); i { case 0: return &v.state @@ -6248,7 +6578,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CallFunction_Response); i { case 0: return &v.state @@ -6262,7 +6592,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[2].OneofWrappers = []interface{}{} - file_tfplugin5_proto_msgTypes[27].OneofWrappers = []interface{}{ + file_tfplugin5_proto_msgTypes[29].OneofWrappers = []interface{}{ (*AttributePath_Step_AttributeName)(nil), (*AttributePath_Step_ElementKeyString)(nil), (*AttributePath_Step_ElementKeyInt)(nil), @@ -6272,8 +6602,8 @@ func file_tfplugin5_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_tfplugin5_proto_rawDesc, - NumEnums: 3, - NumMessages: 80, + NumEnums: 4, + NumMessages: 82, NumExtensions: 0, NumServices: 2, }, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto index 1266a510955..3c2fa84aca6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 5.5 +// Terraform Plugin RPC protocol version 5.6 // -// This file defines version 5.5 of the RPC protocol. To implement a plugin +// This file defines version 5.6 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -155,6 +155,16 @@ message ServerCapabilities { bool move_resource_state = 3; } +// ClientCapabilities allows Terraform to publish information regarding +// supported protocol features. This is used to indicate availability of +// certain forward-compatible changes which may be optional in a major +// protocol version, but cannot be tested for directly. +message ClientCapabilities { + // The deferral_allowed capability signals that the client is able to + // handle deferred responses from the provider. + bool deferral_allowed = 1; +} + message Function { // parameters is the ordered list of positional function parameters. repeated Parameter parameters = 1; @@ -211,6 +221,25 @@ message Function { } } +// Deferred is a message that indicates that change is deferred for a reason. +message Deferred { + // Reason is the reason for deferring the change. + enum Reason { + // UNKNOWN is the default value, and should not be used. + UNKNOWN = 0; + // RESOURCE_CONFIG_UNKNOWN is used when the config is partially unknown and the real + // values need to be known before the change can be planned. + RESOURCE_CONFIG_UNKNOWN = 1; + // PROVIDER_CONFIG_UNKNOWN is used when parts of the provider configuration + // are unknown, e.g. the provider configuration is only known after the apply is done. + PROVIDER_CONFIG_UNKNOWN = 2; + // ABSENT_PREREQ is used when a hard dependency has not been satisfied. + ABSENT_PREREQ = 3; + } + // reason is the reason for deferring the change. + Reason reason = 1; +} + service Provider { //////// Information about what a provider supports/expects @@ -367,6 +396,7 @@ message Configure { message Request { string terraform_version = 1; DynamicValue config = 2; + ClientCapabilities client_capabilities = 3; } message Response { repeated Diagnostic diagnostics = 1; @@ -387,11 +417,15 @@ message ReadResource { DynamicValue current_state = 2; bytes private = 3; DynamicValue provider_meta = 4; + ClientCapabilities client_capabilities = 5; } message Response { DynamicValue new_state = 1; repeated Diagnostic diagnostics = 2; bytes private = 3; + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred deferred = 4; } } @@ -403,6 +437,7 @@ message PlanResourceChange { DynamicValue config = 4; bytes prior_private = 5; DynamicValue provider_meta = 6; + ClientCapabilities client_capabilities = 7; } message Response { @@ -424,6 +459,9 @@ message PlanResourceChange { // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== // ==== DO NOT USE THIS ==== bool legacy_type_system = 5; + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred deferred = 6; } } @@ -460,6 +498,7 @@ message ImportResourceState { message Request { string type_name = 1; string id = 2; + ClientCapabilities client_capabilities = 3; } message ImportedResource { @@ -471,6 +510,9 @@ message ImportResourceState { message Response { repeated ImportedResource imported_resources = 1; repeated Diagnostic diagnostics = 2; + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred deferred = 3; } } @@ -515,10 +557,14 @@ message ReadDataSource { string type_name = 1; DynamicValue config = 2; DynamicValue provider_meta = 3; + ClientCapabilities client_capabilities = 4; } message Response { DynamicValue state = 1; repeated Diagnostic diagnostics = 2; + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred deferred = 3; } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go index fc01684637e..8a8c8a5a013 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 5.5 +// Terraform Plugin RPC protocol version 5.6 // -// This file defines version 5.5 of the RPC protocol. To implement a plugin +// This file defines version 5.6 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -23,7 +23,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.1 +// - protoc v5.26.1 // source: tfplugin5.proto package tfplugin5 diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/data_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/data_source.go index a62f3cde223..592ca364aed 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/data_source.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/data_source.go @@ -40,6 +40,7 @@ func ReadDataSource_Response(in *tfprotov5.ReadDataSourceResponse) *tfplugin5.Re resp := &tfplugin5.ReadDataSource_Response{ Diagnostics: Diagnostics(in.Diagnostics), State: DynamicValue(in.State), + Deferred: Deferred(in.Deferred), } return resp diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/deferred.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/deferred.go new file mode 100644 index 00000000000..376d3d83bf6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/deferred.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func Deferred(in *tfprotov5.Deferred) *tfplugin5.Deferred { + if in == nil { + return nil + } + + resp := &tfplugin5.Deferred{ + Reason: tfplugin5.Deferred_Reason(in.Reason), + } + + return resp +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/resource.go index 0ba9ab465ff..8e65712e975 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/resource.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/resource.go @@ -54,6 +54,7 @@ func ReadResource_Response(in *tfprotov5.ReadResourceResponse) *tfplugin5.ReadRe Diagnostics: Diagnostics(in.Diagnostics), NewState: DynamicValue(in.NewState), Private: in.Private, + Deferred: Deferred(in.Deferred), } return resp @@ -70,6 +71,7 @@ func PlanResourceChange_Response(in *tfprotov5.PlanResourceChangeResponse) *tfpl PlannedPrivate: in.PlannedPrivate, PlannedState: DynamicValue(in.PlannedState), RequiresReplace: AttributePaths(in.RequiresReplace), + Deferred: Deferred(in.Deferred), } return resp @@ -98,6 +100,7 @@ func ImportResourceState_Response(in *tfprotov5.ImportResourceStateResponse) *tf resp := &tfplugin5.ImportResourceState_Response{ Diagnostics: Diagnostics(in.Diagnostics), ImportedResources: ImportResourceState_ImportedResources(in.ImportedResources), + Deferred: Deferred(in.Deferred), } return resp diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go index fa85a8e04fc..799f90238a2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go @@ -53,10 +53,7 @@ type ProviderServer interface { // are a handy interface for defining what a function is to // terraform-plugin-go, so they are their own interface that is composed // into ProviderServer. - // - // This will be required in an upcoming release. - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 - // FunctionServer + FunctionServer } // GetMetadataRequest represents a GetMetadata RPC request. @@ -211,6 +208,10 @@ type ConfigureProviderRequest struct { // known values. Values that are not set in the configuration will be // null. Config *DynamicValue + + // ClientCapabilities defines optionally supported protocol features for the + // ConfigureProvider RPC, such as forward-compatible Terraform behavior changes. + ClientCapabilities *ConfigureProviderClientCapabilities } // ConfigureProviderResponse represents a Terraform RPC response to the diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go index 3090d298af0..9e50a0ce6c6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go @@ -52,14 +52,24 @@ type ResourceServer interface { // specified by the passed ID and return it as one or more resource // states for Terraform to assume control of. ImportResourceState(context.Context, *ImportResourceStateRequest) (*ImportResourceStateResponse, error) + + // MoveResourceState is called when Terraform is asked to change a resource + // type for an existing resource. The provider must accept the change as + // valid by ensuring the source resource type, schema version, and provider + // address are compatible to convert the source state into the target + // resource type and latest state version. + // + // This functionality is only supported in Terraform 1.8 and later. The + // provider must have enabled the MoveResourceState server capability to + // enable these requests. + MoveResourceState(context.Context, *MoveResourceStateRequest) (*MoveResourceStateResponse, error) } // ResourceServerWithMoveResourceState is a temporary interface for servers // to implement MoveResourceState RPC handling. // -// Deprecated: The MoveResourceState method will be moved into the -// ResourceServer interface and this interface will be removed in a future -// version. +// Deprecated: This interface will be removed in a future version. Use +// ResourceServer instead. type ResourceServerWithMoveResourceState interface { ResourceServer @@ -177,6 +187,10 @@ type ReadResourceRequest struct { // // This configuration will have known values for all fields. ProviderMeta *DynamicValue + + // ClientCapabilities defines optionally supported protocol features for the + // ReadResource RPC, such as forward-compatible Terraform behavior changes. + ClientCapabilities *ReadResourceClientCapabilities } // ReadResourceResponse is the response from the provider about the current @@ -201,6 +215,10 @@ type ReadResourceResponse struct { // with requests for this resource. This state will be associated with // the resource, but will not be considered when calculating diffs. Private []byte + + // Deferred is used to indicate to Terraform that the ReadResource operation + // needs to be deferred for a reason. + Deferred *Deferred } // PlanResourceChangeRequest is the request Terraform sends when it is @@ -267,6 +285,10 @@ type PlanResourceChangeRequest struct { // // This configuration will have known values for all fields. ProviderMeta *DynamicValue + + // ClientCapabilities defines optionally supported protocol features for the + // PlanResourceChange RPC, such as forward-compatible Terraform behavior changes. + ClientCapabilities *PlanResourceChangeClientCapabilities } // PlanResourceChangeResponse is the response from the provider about what the @@ -345,6 +367,10 @@ type PlanResourceChangeResponse struct { // // Deprecated: Really, just don't use this, you don't need it. UnsafeToUseLegacyTypeSystem bool + + // Deferred is used to indicate to Terraform that the PlanResourceChange operation + // needs to be deferred for a reason. + Deferred *Deferred } // ApplyResourceChangeRequest is the request Terraform sends when it needs to @@ -465,6 +491,10 @@ type ImportResourceStateRequest struct { // for the ID, and use it to determine what resource or resources to // import. ID string + + // ClientCapabilities defines optionally supported protocol features for the + // ImportResourceState RPC, such as forward-compatible Terraform behavior changes. + ClientCapabilities *ImportResourceStateClientCapabilities } // ImportResourceStateResponse is the response from the provider about the @@ -478,6 +508,10 @@ type ImportResourceStateResponse struct { // requested resource or resources. Returning an empty slice indicates // a successful validation with no warnings or errors generated. Diagnostics []*Diagnostic + + // Deferred is used to indicate to Terraform that the ImportResourceState operation + // needs to be deferred for a reason. + Deferred *Deferred } // ImportedResource represents a single resource that a provider has diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go index feb359620ae..17c5c147ae8 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go @@ -49,7 +49,7 @@ const ( // // In the future, it may be possible to include this information directly // in the protocol buffers rather than recreating a constant here. - protocolVersionMinor uint = 4 + protocolVersionMinor uint = 6 ) // protocolVersion represents the combined major and minor version numbers of @@ -579,6 +579,7 @@ func (s *server) Configure(ctx context.Context, protoReq *tfplugin5.Configure_Re defer logging.ProtocolTrace(ctx, "Served request") req := fromproto.ConfigureProviderRequest(protoReq) + tf5serverlogging.ConfigureProviderClientCapabilities(ctx, req.ClientCapabilities) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) ctx = tf5serverlogging.DownstreamRequest(ctx) @@ -679,6 +680,7 @@ func (s *server) ReadDataSource(ctx context.Context, protoReq *tfplugin5.ReadDat req := fromproto.ReadDataSourceRequest(protoReq) + tf5serverlogging.ReadDataSourceClientCapabilities(ctx, req.ClientCapabilities) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) ctx = tf5serverlogging.DownstreamRequest(ctx) @@ -692,6 +694,11 @@ func (s *server) ReadDataSource(ctx context.Context, protoReq *tfplugin5.ReadDat tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "State", resp.State) + tf5serverlogging.Deferred(ctx, resp.Deferred) + + if resp.Deferred != nil && (req.ClientCapabilities == nil || !req.ClientCapabilities.DeferralAllowed) { + resp.Diagnostics = append(resp.Diagnostics, invalidDeferredResponseDiag(resp.Deferred.Reason)) + } protoResp := toproto.ReadDataSource_Response(resp) @@ -766,6 +773,7 @@ func (s *server) ReadResource(ctx context.Context, protoReq *tfplugin5.ReadResou req := fromproto.ReadResourceRequest(protoReq) + tf5serverlogging.ReadResourceClientCapabilities(ctx, req.ClientCapabilities) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "CurrentState", req.CurrentState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Request", "Private", req.Private) @@ -783,6 +791,11 @@ func (s *server) ReadResource(ctx context.Context, protoReq *tfplugin5.ReadResou logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response", "Private", resp.Private) + tf5serverlogging.Deferred(ctx, resp.Deferred) + + if resp.Deferred != nil && (req.ClientCapabilities == nil || !req.ClientCapabilities.DeferralAllowed) { + resp.Diagnostics = append(resp.Diagnostics, invalidDeferredResponseDiag(resp.Deferred.Reason)) + } protoResp := toproto.ReadResource_Response(resp) @@ -800,6 +813,7 @@ func (s *server) PlanResourceChange(ctx context.Context, protoReq *tfplugin5.Pla req := fromproto.PlanResourceChangeRequest(protoReq) + tf5serverlogging.PlanResourceChangeClientCapabilities(ctx, req.ClientCapabilities) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", req.PriorState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProposedNewState", req.ProposedNewState) @@ -818,6 +832,11 @@ func (s *server) PlanResourceChange(ctx context.Context, protoReq *tfplugin5.Pla tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PlannedState", resp.PlannedState) logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response", "PlannedPrivate", resp.PlannedPrivate) + tf5serverlogging.Deferred(ctx, resp.Deferred) + + if resp.Deferred != nil && (req.ClientCapabilities == nil || !req.ClientCapabilities.DeferralAllowed) { + resp.Diagnostics = append(resp.Diagnostics, invalidDeferredResponseDiag(resp.Deferred.Reason)) + } protoResp := toproto.PlanResourceChange_Response(resp) @@ -870,6 +889,8 @@ func (s *server) ImportResourceState(ctx context.Context, protoReq *tfplugin5.Im req := fromproto.ImportResourceStateRequest(protoReq) + tf5serverlogging.ImportResourceStateClientCapabilities(ctx, req.ClientCapabilities) + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ImportResourceState(ctx, req) @@ -885,6 +906,11 @@ func (s *server) ImportResourceState(ctx context.Context, protoReq *tfplugin5.Im logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "State", importedResource.State) logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "Private", importedResource.Private) } + tf5serverlogging.Deferred(ctx, resp.Deferred) + + if resp.Deferred != nil && (req.ClientCapabilities == nil || !req.ClientCapabilities.DeferralAllowed) { + resp.Diagnostics = append(resp.Diagnostics, invalidDeferredResponseDiag(resp.Deferred.Reason)) + } protoResp := toproto.ImportResourceState_Response(resp) @@ -900,37 +926,11 @@ func (s *server) MoveResourceState(ctx context.Context, protoReq *tfplugin5.Move logging.ProtocolTrace(ctx, "Received request") defer logging.ProtocolTrace(ctx, "Served request") - // Remove this check and error in preference of - // s.downstream.MoveResourceState below once ResourceServer interface - // implements the MoveResourceState method. - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/363 - // nolint:staticcheck - resourceServerWMRS, ok := s.downstream.(tfprotov5.ResourceServerWithMoveResourceState) - - if !ok { - logging.ProtocolError(ctx, "ProviderServer does not implement ResourceServerWithMoveResourceState") - - protoResp := &tfplugin5.MoveResourceState_Response{ - Diagnostics: []*tfplugin5.Diagnostic{ - { - Severity: tfplugin5.Diagnostic_ERROR, - Summary: "Provider Move Resource State Not Implemented", - Detail: "A MoveResourceState call was received by the provider, however the provider does not implement the call. " + - "Either upgrade the provider to a version that implements move resource state support or this is a bug in Terraform that should be reported to the Terraform maintainers.", - }, - }, - } - - return protoResp, nil - } - req := fromproto.MoveResourceStateRequest(protoReq) ctx = tf5serverlogging.DownstreamRequest(ctx) - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/363 - // resp, err := s.downstream.MoveResourceState(ctx, req) - resp, err := resourceServerWMRS.MoveResourceState(ctx, req) + resp, err := s.downstream.MoveResourceState(ctx, req) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) @@ -954,26 +954,6 @@ func (s *server) CallFunction(ctx context.Context, protoReq *tfplugin5.CallFunct logging.ProtocolTrace(ctx, "Received request") defer logging.ProtocolTrace(ctx, "Served request") - // Remove this check and error in preference of s.downstream.CallFunction - // below once ProviderServer interface requires FunctionServer. - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 - functionServer, ok := s.downstream.(tfprotov5.FunctionServer) - - if !ok { - logging.ProtocolError(ctx, "ProviderServer does not implement FunctionServer") - - text := "Provider Functions Not Implemented: A provider-defined function call was received by the provider, however the provider does not implement functions. " + - "Either upgrade the provider to a version that implements provider-defined functions or this is a bug in Terraform that should be reported to the Terraform maintainers." - - protoResp := &tfplugin5.CallFunction_Response{ - Error: &tfplugin5.FunctionError{ - Text: text, - }, - } - - return protoResp, nil - } - req := fromproto.CallFunctionRequest(protoReq) for position, argument := range req.Arguments { @@ -982,9 +962,7 @@ func (s *server) CallFunction(ctx context.Context, protoReq *tfplugin5.CallFunct ctx = tf5serverlogging.DownstreamRequest(ctx) - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 - // resp, err := s.downstream.CallFunction(ctx, req) - resp, err := functionServer.CallFunction(ctx, req) + resp, err := s.downstream.CallFunction(ctx, req) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) @@ -1007,28 +985,11 @@ func (s *server) GetFunctions(ctx context.Context, protoReq *tfplugin5.GetFuncti logging.ProtocolTrace(ctx, "Received request") defer logging.ProtocolTrace(ctx, "Served request") - // Remove this check and response in preference of s.downstream.GetFunctions - // below once ProviderServer interface requires FunctionServer. - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 - functionServer, ok := s.downstream.(tfprotov5.FunctionServer) - - if !ok { - logging.ProtocolWarn(ctx, "ProviderServer does not implement FunctionServer") - - protoResp := &tfplugin5.GetFunctions_Response{ - Functions: map[string]*tfplugin5.Function{}, - } - - return protoResp, nil - } - req := fromproto.GetFunctionsRequest(protoReq) ctx = tf5serverlogging.DownstreamRequest(ctx) - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 - // resp, err := s.downstream.GetFunctions(ctx, req) - resp, err := functionServer.GetFunctions(ctx, req) + resp, err := s.downstream.GetFunctions(ctx, req) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) @@ -1041,3 +1002,13 @@ func (s *server) GetFunctions(ctx context.Context, protoReq *tfplugin5.GetFuncti return protoResp, nil } + +func invalidDeferredResponseDiag(reason tfprotov5.DeferredReason) *tfprotov5.Diagnostic { + return &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: "Invalid Deferred Response", + Detail: "Provider returned a deferred response but the Terraform request did not indicate support for deferred actions." + + "This is an issue with the provider and should be reported to the provider developers.\n\n" + + fmt.Sprintf("Deferred reason - %q", reason.String()), + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/client_capabilities.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/client_capabilities.go new file mode 100644 index 00000000000..b528c123abf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/client_capabilities.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +// ConfigureProviderClientCapabilities allows Terraform to publish information +// regarding optionally supported protocol features for the ConfigureProvider RPC, +// such as forward-compatible Terraform behavior changes. +type ConfigureProviderClientCapabilities struct { + // DeferralAllowed signals that the request from Terraform is able to + // handle deferred responses from the provider. + DeferralAllowed bool +} + +// ReadDataSourceClientCapabilities allows Terraform to publish information +// regarding optionally supported protocol features for the ReadDataSource RPC, +// such as forward-compatible Terraform behavior changes. +type ReadDataSourceClientCapabilities struct { + // DeferralAllowed signals that the request from Terraform is able to + // handle deferred responses from the provider. + DeferralAllowed bool +} + +// ReadResourceClientCapabilities allows Terraform to publish information +// regarding optionally supported protocol features for the ReadResource RPC, +// such as forward-compatible Terraform behavior changes. +type ReadResourceClientCapabilities struct { + // DeferralAllowed signals that the request from Terraform is able to + // handle deferred responses from the provider. + DeferralAllowed bool +} + +// PlanResourceChangeClientCapabilities allows Terraform to publish information +// regarding optionally supported protocol features for the PlanResourceChange RPC, +// such as forward-compatible Terraform behavior changes. +type PlanResourceChangeClientCapabilities struct { + // DeferralAllowed signals that the request from Terraform is able to + // handle deferred responses from the provider. + DeferralAllowed bool +} + +// ImportResourceStateClientCapabilities allows Terraform to publish information +// regarding optionally supported protocol features for the ImportResourceState RPC, +// such as forward-compatible Terraform behavior changes. +type ImportResourceStateClientCapabilities struct { + // DeferralAllowed signals that the request from Terraform is able to + // handle deferred responses from the provider. + DeferralAllowed bool +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/data_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/data_source.go index ebb2cbd3dc2..bed6e3d52f5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/data_source.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/data_source.go @@ -87,6 +87,10 @@ type ReadDataSourceRequest struct { // // This configuration will have known values for all fields. ProviderMeta *DynamicValue + + // ClientCapabilities defines optionally supported protocol features for the + // ReadDataSource RPC, such as forward-compatible Terraform behavior changes. + ClientCapabilities *ReadDataSourceClientCapabilities } // ReadDataSourceResponse is the response from the provider about the current @@ -105,4 +109,8 @@ type ReadDataSourceResponse struct { // indicates a successful validation with no warnings or errors // generated. Diagnostics []*Diagnostic + + // Deferred is used to indicate to Terraform that the ReadDataSource operation + // needs to be deferred for a reason. + Deferred *Deferred } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/deferred.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/deferred.go new file mode 100644 index 00000000000..110e3bd75b5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/deferred.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +const ( + // DeferredReasonUnknown is used to indicate an invalid `DeferredReason`. + // Provider developers should not use it. + DeferredReasonUnknown DeferredReason = 0 + + // DeferredReasonResourceConfigUnknown is used to indicate that the resource configuration + // is partially unknown and the real values need to be known before the change can be planned. + DeferredReasonResourceConfigUnknown DeferredReason = 1 + + // DeferredReasonProviderConfigUnknown is used to indicate that the provider configuration + // is partially unknown and the real values need to be known before the change can be planned. + DeferredReasonProviderConfigUnknown DeferredReason = 2 + + // DeferredReasonAbsentPrereq is used to indicate that a hard dependency has not been satisfied. + DeferredReasonAbsentPrereq DeferredReason = 3 +) + +// Deferred is used to indicate to Terraform that a change needs to be deferred for a reason. +type Deferred struct { + // Reason is the reason for deferring the change. + Reason DeferredReason +} + +// DeferredReason represents different reasons for deferring a change. +type DeferredReason int32 + +func (d DeferredReason) String() string { + switch d { + case 0: + return "UNKNOWN" + case 1: + return "RESOURCE_CONFIG_UNKNOWN" + case 2: + return "PROVIDER_CONFIG_UNKNOWN" + case 3: + return "ABSENT_PREREQ" + } + return "UNKNOWN" +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/client_capabilities.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/client_capabilities.go new file mode 100644 index 00000000000..06238eac048 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/client_capabilities.go @@ -0,0 +1,69 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func ConfigureProviderClientCapabilities(in *tfplugin6.ClientCapabilities) *tfprotov6.ConfigureProviderClientCapabilities { + if in == nil { + return nil + } + + resp := &tfprotov6.ConfigureProviderClientCapabilities{ + DeferralAllowed: in.DeferralAllowed, + } + + return resp +} + +func ReadDataSourceClientCapabilities(in *tfplugin6.ClientCapabilities) *tfprotov6.ReadDataSourceClientCapabilities { + if in == nil { + return nil + } + + resp := &tfprotov6.ReadDataSourceClientCapabilities{ + DeferralAllowed: in.DeferralAllowed, + } + + return resp +} + +func ReadResourceClientCapabilities(in *tfplugin6.ClientCapabilities) *tfprotov6.ReadResourceClientCapabilities { + if in == nil { + return nil + } + + resp := &tfprotov6.ReadResourceClientCapabilities{ + DeferralAllowed: in.DeferralAllowed, + } + + return resp +} + +func PlanResourceChangeClientCapabilities(in *tfplugin6.ClientCapabilities) *tfprotov6.PlanResourceChangeClientCapabilities { + if in == nil { + return nil + } + + resp := &tfprotov6.PlanResourceChangeClientCapabilities{ + DeferralAllowed: in.DeferralAllowed, + } + + return resp +} + +func ImportResourceStateClientCapabilities(in *tfplugin6.ClientCapabilities) *tfprotov6.ImportResourceStateClientCapabilities { + if in == nil { + return nil + } + + resp := &tfprotov6.ImportResourceStateClientCapabilities{ + DeferralAllowed: in.DeferralAllowed, + } + + return resp +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/data_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/data_source.go index 2544e12f2e9..85059f92a69 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/data_source.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/data_source.go @@ -27,9 +27,10 @@ func ReadDataSourceRequest(in *tfplugin6.ReadDataSource_Request) *tfprotov6.Read } resp := &tfprotov6.ReadDataSourceRequest{ - Config: DynamicValue(in.Config), - ProviderMeta: DynamicValue(in.ProviderMeta), - TypeName: in.TypeName, + Config: DynamicValue(in.Config), + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + ClientCapabilities: ReadDataSourceClientCapabilities(in.ClientCapabilities), } return resp diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/provider.go index 912288684a7..99a6cc5563a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/provider.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/provider.go @@ -46,8 +46,9 @@ func ConfigureProviderRequest(in *tfplugin6.ConfigureProvider_Request) *tfprotov } resp := &tfprotov6.ConfigureProviderRequest{ - Config: DynamicValue(in.Config), - TerraformVersion: in.TerraformVersion, + Config: DynamicValue(in.Config), + TerraformVersion: in.TerraformVersion, + ClientCapabilities: ConfigureProviderClientCapabilities(in.ClientCapabilities), } return resp diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go index 1b5997c70ae..24e336953fa 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go @@ -41,10 +41,11 @@ func ReadResourceRequest(in *tfplugin6.ReadResource_Request) *tfprotov6.ReadReso } resp := &tfprotov6.ReadResourceRequest{ - CurrentState: DynamicValue(in.CurrentState), - Private: in.Private, - ProviderMeta: DynamicValue(in.ProviderMeta), - TypeName: in.TypeName, + CurrentState: DynamicValue(in.CurrentState), + Private: in.Private, + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + ClientCapabilities: ReadResourceClientCapabilities(in.ClientCapabilities), } return resp @@ -56,12 +57,13 @@ func PlanResourceChangeRequest(in *tfplugin6.PlanResourceChange_Request) *tfprot } resp := &tfprotov6.PlanResourceChangeRequest{ - Config: DynamicValue(in.Config), - PriorPrivate: in.PriorPrivate, - PriorState: DynamicValue(in.PriorState), - ProposedNewState: DynamicValue(in.ProposedNewState), - ProviderMeta: DynamicValue(in.ProviderMeta), - TypeName: in.TypeName, + Config: DynamicValue(in.Config), + PriorPrivate: in.PriorPrivate, + PriorState: DynamicValue(in.PriorState), + ProposedNewState: DynamicValue(in.ProposedNewState), + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + ClientCapabilities: PlanResourceChangeClientCapabilities(in.ClientCapabilities), } return resp @@ -90,8 +92,9 @@ func ImportResourceStateRequest(in *tfplugin6.ImportResourceState_Request) *tfpr } resp := &tfprotov6.ImportResourceStateRequest{ - TypeName: in.TypeName, - ID: in.Id, + TypeName: in.TypeName, + ID: in.Id, + ClientCapabilities: ImportResourceStateClientCapabilities(in.ClientCapabilities), } return resp diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/client_capabilities.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/client_capabilities.go new file mode 100644 index 00000000000..d8d5859f43c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/client_capabilities.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf6serverlogging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ConfigureProviderClientCapabilities generates a TRACE "Announced client capabilities" log. +func ConfigureProviderClientCapabilities(ctx context.Context, capabilities *tfprotov6.ConfigureProviderClientCapabilities) { + if capabilities == nil { + logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{}) + return + } + + responseFields := map[string]interface{}{ + logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed, + } + + logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields) +} + +// ReadDataSourceClientCapabilities generates a TRACE "Announced client capabilities" log. +func ReadDataSourceClientCapabilities(ctx context.Context, capabilities *tfprotov6.ReadDataSourceClientCapabilities) { + if capabilities == nil { + logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{}) + return + } + + responseFields := map[string]interface{}{ + logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed, + } + + logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields) +} + +// ReadResourceClientCapabilities generates a TRACE "Announced client capabilities" log. +func ReadResourceClientCapabilities(ctx context.Context, capabilities *tfprotov6.ReadResourceClientCapabilities) { + if capabilities == nil { + logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{}) + return + } + + responseFields := map[string]interface{}{ + logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed, + } + + logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields) +} + +// PlanResourceChangeClientCapabilities generates a TRACE "Announced client capabilities" log. +func PlanResourceChangeClientCapabilities(ctx context.Context, capabilities *tfprotov6.PlanResourceChangeClientCapabilities) { + if capabilities == nil { + logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{}) + return + } + + responseFields := map[string]interface{}{ + logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed, + } + + logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields) +} + +// ImportResourceStateClientCapabilities generates a TRACE "Announced client capabilities" log. +func ImportResourceStateClientCapabilities(ctx context.Context, capabilities *tfprotov6.ImportResourceStateClientCapabilities) { + if capabilities == nil { + logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{}) + return + } + + responseFields := map[string]interface{}{ + logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed, + } + + logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/deferred.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/deferred.go new file mode 100644 index 00000000000..5822b6094a2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/deferred.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf6serverlogging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// Deferred generates a TRACE "Received downstream deferred response" log if populated. +func Deferred(ctx context.Context, deferred *tfprotov6.Deferred) { + if deferred == nil { + return + } + + responseFields := map[string]interface{}{ + logging.KeyDeferredReason: deferred.Reason.String(), + } + + logging.ProtocolTrace(ctx, "Received downstream deferred response", responseFields) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go index 9016dfe4cd2..e0f55a1f542 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 6.5 +// Terraform Plugin RPC protocol version 6.6 // -// This file defines version 6.5 of the RPC protocol. To implement a plugin +// This file defines version 6.6 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -22,8 +22,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.1 +// protoc-gen-go v1.34.0 +// protoc v5.26.1 // source: tfplugin6.proto package tfplugin6 @@ -250,6 +250,65 @@ func (Schema_Object_NestingMode) EnumDescriptor() ([]byte, []int) { return file_tfplugin6_proto_rawDescGZIP(), []int{6, 3, 0} } +// Reason is the reason for deferring the change. +type Deferred_Reason int32 + +const ( + // UNKNOWN is the default value, and should not be used. + Deferred_UNKNOWN Deferred_Reason = 0 + // RESOURCE_CONFIG_UNKNOWN is used when the config is partially unknown and the real + // values need to be known before the change can be planned. + Deferred_RESOURCE_CONFIG_UNKNOWN Deferred_Reason = 1 + // PROVIDER_CONFIG_UNKNOWN is used when parts of the provider configuration + // are unknown, e.g. the provider configuration is only known after the apply is done. + Deferred_PROVIDER_CONFIG_UNKNOWN Deferred_Reason = 2 + // ABSENT_PREREQ is used when a hard dependency has not been satisfied. + Deferred_ABSENT_PREREQ Deferred_Reason = 3 +) + +// Enum value maps for Deferred_Reason. +var ( + Deferred_Reason_name = map[int32]string{ + 0: "UNKNOWN", + 1: "RESOURCE_CONFIG_UNKNOWN", + 2: "PROVIDER_CONFIG_UNKNOWN", + 3: "ABSENT_PREREQ", + } + Deferred_Reason_value = map[string]int32{ + "UNKNOWN": 0, + "RESOURCE_CONFIG_UNKNOWN": 1, + "PROVIDER_CONFIG_UNKNOWN": 2, + "ABSENT_PREREQ": 3, + } +) + +func (x Deferred_Reason) Enum() *Deferred_Reason { + p := new(Deferred_Reason) + *p = x + return p +} + +func (x Deferred_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Deferred_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin6_proto_enumTypes[4].Descriptor() +} + +func (Deferred_Reason) Type() protoreflect.EnumType { + return &file_tfplugin6_proto_enumTypes[4] +} + +func (x Deferred_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Deferred_Reason.Descriptor instead. +func (Deferred_Reason) EnumDescriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{10, 0} +} + // DynamicValue is an opaque encoding of terraform data, with the field name // indicating the encoding scheme used. type DynamicValue struct { @@ -818,6 +877,108 @@ func (x *ServerCapabilities) GetMoveResourceState() bool { return false } +// ClientCapabilities allows Terraform to publish information regarding +// supported protocol features. This is used to indicate availability of +// certain forward-compatible changes which may be optional in a major +// protocol version, but cannot be tested for directly. +type ClientCapabilities struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The deferral_allowed capability signals that the client is able to + // handle deferred responses from the provider. + DeferralAllowed bool `protobuf:"varint,1,opt,name=deferral_allowed,json=deferralAllowed,proto3" json:"deferral_allowed,omitempty"` +} + +func (x *ClientCapabilities) Reset() { + *x = ClientCapabilities{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientCapabilities) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientCapabilities) ProtoMessage() {} + +func (x *ClientCapabilities) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientCapabilities.ProtoReflect.Descriptor instead. +func (*ClientCapabilities) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9} +} + +func (x *ClientCapabilities) GetDeferralAllowed() bool { + if x != nil { + return x.DeferralAllowed + } + return false +} + +// Deferred is a message that indicates that change is deferred for a reason. +type Deferred struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // reason is the reason for deferring the change. + Reason Deferred_Reason `protobuf:"varint,1,opt,name=reason,proto3,enum=tfplugin6.Deferred_Reason" json:"reason,omitempty"` +} + +func (x *Deferred) Reset() { + *x = Deferred{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Deferred) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Deferred) ProtoMessage() {} + +func (x *Deferred) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Deferred.ProtoReflect.Descriptor instead. +func (*Deferred) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{10} +} + +func (x *Deferred) GetReason() Deferred_Reason { + if x != nil { + return x.Reason + } + return Deferred_UNKNOWN +} + type GetMetadata struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -827,7 +988,7 @@ type GetMetadata struct { func (x *GetMetadata) Reset() { *x = GetMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[9] + mi := &file_tfplugin6_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -840,7 +1001,7 @@ func (x *GetMetadata) String() string { func (*GetMetadata) ProtoMessage() {} func (x *GetMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[9] + mi := &file_tfplugin6_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -853,7 +1014,7 @@ func (x *GetMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{9} + return file_tfplugin6_proto_rawDescGZIP(), []int{11} } type GetProviderSchema struct { @@ -865,7 +1026,7 @@ type GetProviderSchema struct { func (x *GetProviderSchema) Reset() { *x = GetProviderSchema{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[10] + mi := &file_tfplugin6_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -878,7 +1039,7 @@ func (x *GetProviderSchema) String() string { func (*GetProviderSchema) ProtoMessage() {} func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[10] + mi := &file_tfplugin6_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -891,7 +1052,7 @@ func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema.ProtoReflect.Descriptor instead. func (*GetProviderSchema) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{10} + return file_tfplugin6_proto_rawDescGZIP(), []int{12} } type ValidateProviderConfig struct { @@ -903,7 +1064,7 @@ type ValidateProviderConfig struct { func (x *ValidateProviderConfig) Reset() { *x = ValidateProviderConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[11] + mi := &file_tfplugin6_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -916,7 +1077,7 @@ func (x *ValidateProviderConfig) String() string { func (*ValidateProviderConfig) ProtoMessage() {} func (x *ValidateProviderConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[11] + mi := &file_tfplugin6_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -929,7 +1090,7 @@ func (x *ValidateProviderConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateProviderConfig.ProtoReflect.Descriptor instead. func (*ValidateProviderConfig) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{11} + return file_tfplugin6_proto_rawDescGZIP(), []int{13} } type UpgradeResourceState struct { @@ -941,7 +1102,7 @@ type UpgradeResourceState struct { func (x *UpgradeResourceState) Reset() { *x = UpgradeResourceState{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[12] + mi := &file_tfplugin6_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -954,7 +1115,7 @@ func (x *UpgradeResourceState) String() string { func (*UpgradeResourceState) ProtoMessage() {} func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[12] + mi := &file_tfplugin6_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -967,7 +1128,7 @@ func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState.ProtoReflect.Descriptor instead. func (*UpgradeResourceState) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{12} + return file_tfplugin6_proto_rawDescGZIP(), []int{14} } type ValidateResourceConfig struct { @@ -979,7 +1140,7 @@ type ValidateResourceConfig struct { func (x *ValidateResourceConfig) Reset() { *x = ValidateResourceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[13] + mi := &file_tfplugin6_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -992,7 +1153,7 @@ func (x *ValidateResourceConfig) String() string { func (*ValidateResourceConfig) ProtoMessage() {} func (x *ValidateResourceConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[13] + mi := &file_tfplugin6_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1005,7 +1166,7 @@ func (x *ValidateResourceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateResourceConfig.ProtoReflect.Descriptor instead. func (*ValidateResourceConfig) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{13} + return file_tfplugin6_proto_rawDescGZIP(), []int{15} } type ValidateDataResourceConfig struct { @@ -1017,7 +1178,7 @@ type ValidateDataResourceConfig struct { func (x *ValidateDataResourceConfig) Reset() { *x = ValidateDataResourceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[14] + mi := &file_tfplugin6_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1030,7 +1191,7 @@ func (x *ValidateDataResourceConfig) String() string { func (*ValidateDataResourceConfig) ProtoMessage() {} func (x *ValidateDataResourceConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[14] + mi := &file_tfplugin6_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1043,7 +1204,7 @@ func (x *ValidateDataResourceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateDataResourceConfig.ProtoReflect.Descriptor instead. func (*ValidateDataResourceConfig) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{14} + return file_tfplugin6_proto_rawDescGZIP(), []int{16} } type ConfigureProvider struct { @@ -1055,7 +1216,7 @@ type ConfigureProvider struct { func (x *ConfigureProvider) Reset() { *x = ConfigureProvider{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[15] + mi := &file_tfplugin6_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1068,7 +1229,7 @@ func (x *ConfigureProvider) String() string { func (*ConfigureProvider) ProtoMessage() {} func (x *ConfigureProvider) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[15] + mi := &file_tfplugin6_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1081,7 +1242,7 @@ func (x *ConfigureProvider) ProtoReflect() protoreflect.Message { // Deprecated: Use ConfigureProvider.ProtoReflect.Descriptor instead. func (*ConfigureProvider) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{15} + return file_tfplugin6_proto_rawDescGZIP(), []int{17} } type ReadResource struct { @@ -1093,7 +1254,7 @@ type ReadResource struct { func (x *ReadResource) Reset() { *x = ReadResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[16] + mi := &file_tfplugin6_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1106,7 +1267,7 @@ func (x *ReadResource) String() string { func (*ReadResource) ProtoMessage() {} func (x *ReadResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[16] + mi := &file_tfplugin6_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1119,7 +1280,7 @@ func (x *ReadResource) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource.ProtoReflect.Descriptor instead. func (*ReadResource) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{16} + return file_tfplugin6_proto_rawDescGZIP(), []int{18} } type PlanResourceChange struct { @@ -1131,7 +1292,7 @@ type PlanResourceChange struct { func (x *PlanResourceChange) Reset() { *x = PlanResourceChange{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[17] + mi := &file_tfplugin6_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1144,7 +1305,7 @@ func (x *PlanResourceChange) String() string { func (*PlanResourceChange) ProtoMessage() {} func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[17] + mi := &file_tfplugin6_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1157,7 +1318,7 @@ func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange.ProtoReflect.Descriptor instead. func (*PlanResourceChange) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{17} + return file_tfplugin6_proto_rawDescGZIP(), []int{19} } type ApplyResourceChange struct { @@ -1169,7 +1330,7 @@ type ApplyResourceChange struct { func (x *ApplyResourceChange) Reset() { *x = ApplyResourceChange{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[18] + mi := &file_tfplugin6_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1182,7 +1343,7 @@ func (x *ApplyResourceChange) String() string { func (*ApplyResourceChange) ProtoMessage() {} func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[18] + mi := &file_tfplugin6_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1195,7 +1356,7 @@ func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange.ProtoReflect.Descriptor instead. func (*ApplyResourceChange) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{18} + return file_tfplugin6_proto_rawDescGZIP(), []int{20} } type ImportResourceState struct { @@ -1207,7 +1368,7 @@ type ImportResourceState struct { func (x *ImportResourceState) Reset() { *x = ImportResourceState{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[19] + mi := &file_tfplugin6_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1220,7 +1381,7 @@ func (x *ImportResourceState) String() string { func (*ImportResourceState) ProtoMessage() {} func (x *ImportResourceState) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[19] + mi := &file_tfplugin6_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1233,7 +1394,7 @@ func (x *ImportResourceState) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState.ProtoReflect.Descriptor instead. func (*ImportResourceState) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{19} + return file_tfplugin6_proto_rawDescGZIP(), []int{21} } type MoveResourceState struct { @@ -1245,7 +1406,7 @@ type MoveResourceState struct { func (x *MoveResourceState) Reset() { *x = MoveResourceState{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[20] + mi := &file_tfplugin6_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1258,7 +1419,7 @@ func (x *MoveResourceState) String() string { func (*MoveResourceState) ProtoMessage() {} func (x *MoveResourceState) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[20] + mi := &file_tfplugin6_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1271,7 +1432,7 @@ func (x *MoveResourceState) ProtoReflect() protoreflect.Message { // Deprecated: Use MoveResourceState.ProtoReflect.Descriptor instead. func (*MoveResourceState) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{20} + return file_tfplugin6_proto_rawDescGZIP(), []int{22} } type ReadDataSource struct { @@ -1283,7 +1444,7 @@ type ReadDataSource struct { func (x *ReadDataSource) Reset() { *x = ReadDataSource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[21] + mi := &file_tfplugin6_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1296,7 +1457,7 @@ func (x *ReadDataSource) String() string { func (*ReadDataSource) ProtoMessage() {} func (x *ReadDataSource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[21] + mi := &file_tfplugin6_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1309,7 +1470,7 @@ func (x *ReadDataSource) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource.ProtoReflect.Descriptor instead. func (*ReadDataSource) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{21} + return file_tfplugin6_proto_rawDescGZIP(), []int{23} } type GetFunctions struct { @@ -1321,7 +1482,7 @@ type GetFunctions struct { func (x *GetFunctions) Reset() { *x = GetFunctions{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[22] + mi := &file_tfplugin6_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1334,7 +1495,7 @@ func (x *GetFunctions) String() string { func (*GetFunctions) ProtoMessage() {} func (x *GetFunctions) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[22] + mi := &file_tfplugin6_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1347,7 +1508,7 @@ func (x *GetFunctions) ProtoReflect() protoreflect.Message { // Deprecated: Use GetFunctions.ProtoReflect.Descriptor instead. func (*GetFunctions) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{22} + return file_tfplugin6_proto_rawDescGZIP(), []int{24} } type CallFunction struct { @@ -1359,7 +1520,7 @@ type CallFunction struct { func (x *CallFunction) Reset() { *x = CallFunction{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[23] + mi := &file_tfplugin6_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1372,7 +1533,7 @@ func (x *CallFunction) String() string { func (*CallFunction) ProtoMessage() {} func (x *CallFunction) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[23] + mi := &file_tfplugin6_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1385,7 +1546,7 @@ func (x *CallFunction) ProtoReflect() protoreflect.Message { // Deprecated: Use CallFunction.ProtoReflect.Descriptor instead. func (*CallFunction) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{23} + return file_tfplugin6_proto_rawDescGZIP(), []int{25} } type AttributePath_Step struct { @@ -1404,7 +1565,7 @@ type AttributePath_Step struct { func (x *AttributePath_Step) Reset() { *x = AttributePath_Step{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[24] + mi := &file_tfplugin6_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1417,7 +1578,7 @@ func (x *AttributePath_Step) String() string { func (*AttributePath_Step) ProtoMessage() {} func (x *AttributePath_Step) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[24] + mi := &file_tfplugin6_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1496,7 +1657,7 @@ type StopProvider_Request struct { func (x *StopProvider_Request) Reset() { *x = StopProvider_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[25] + mi := &file_tfplugin6_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1509,7 +1670,7 @@ func (x *StopProvider_Request) String() string { func (*StopProvider_Request) ProtoMessage() {} func (x *StopProvider_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[25] + mi := &file_tfplugin6_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1536,7 +1697,7 @@ type StopProvider_Response struct { func (x *StopProvider_Response) Reset() { *x = StopProvider_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[26] + mi := &file_tfplugin6_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1549,7 +1710,7 @@ func (x *StopProvider_Response) String() string { func (*StopProvider_Response) ProtoMessage() {} func (x *StopProvider_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[26] + mi := &file_tfplugin6_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1588,7 +1749,7 @@ type Schema_Block struct { func (x *Schema_Block) Reset() { *x = Schema_Block{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[28] + mi := &file_tfplugin6_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1601,7 +1762,7 @@ func (x *Schema_Block) String() string { func (*Schema_Block) ProtoMessage() {} func (x *Schema_Block) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[28] + mi := &file_tfplugin6_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1679,7 +1840,7 @@ type Schema_Attribute struct { func (x *Schema_Attribute) Reset() { *x = Schema_Attribute{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[29] + mi := &file_tfplugin6_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1692,7 +1853,7 @@ func (x *Schema_Attribute) String() string { func (*Schema_Attribute) ProtoMessage() {} func (x *Schema_Attribute) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[29] + mi := &file_tfplugin6_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1793,7 +1954,7 @@ type Schema_NestedBlock struct { func (x *Schema_NestedBlock) Reset() { *x = Schema_NestedBlock{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[30] + mi := &file_tfplugin6_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1806,7 +1967,7 @@ func (x *Schema_NestedBlock) String() string { func (*Schema_NestedBlock) ProtoMessage() {} func (x *Schema_NestedBlock) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[30] + mi := &file_tfplugin6_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1876,7 +2037,7 @@ type Schema_Object struct { func (x *Schema_Object) Reset() { *x = Schema_Object{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[31] + mi := &file_tfplugin6_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1889,7 +2050,7 @@ func (x *Schema_Object) String() string { func (*Schema_Object) ProtoMessage() {} func (x *Schema_Object) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[31] + mi := &file_tfplugin6_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1962,7 +2123,7 @@ type Function_Parameter struct { func (x *Function_Parameter) Reset() { *x = Function_Parameter{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[32] + mi := &file_tfplugin6_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1975,7 +2136,7 @@ func (x *Function_Parameter) String() string { func (*Function_Parameter) ProtoMessage() {} func (x *Function_Parameter) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[32] + mi := &file_tfplugin6_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2045,7 +2206,7 @@ type Function_Return struct { func (x *Function_Return) Reset() { *x = Function_Return{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[33] + mi := &file_tfplugin6_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2058,7 +2219,7 @@ func (x *Function_Return) String() string { func (*Function_Return) ProtoMessage() {} func (x *Function_Return) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[33] + mi := &file_tfplugin6_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2090,7 +2251,7 @@ type GetMetadata_Request struct { func (x *GetMetadata_Request) Reset() { *x = GetMetadata_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[34] + mi := &file_tfplugin6_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2103,7 +2264,7 @@ func (x *GetMetadata_Request) String() string { func (*GetMetadata_Request) ProtoMessage() {} func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[34] + mi := &file_tfplugin6_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2116,7 +2277,7 @@ func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_Request.ProtoReflect.Descriptor instead. func (*GetMetadata_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{9, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{11, 0} } type GetMetadata_Response struct { @@ -2135,7 +2296,7 @@ type GetMetadata_Response struct { func (x *GetMetadata_Response) Reset() { *x = GetMetadata_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[35] + mi := &file_tfplugin6_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2148,7 +2309,7 @@ func (x *GetMetadata_Response) String() string { func (*GetMetadata_Response) ProtoMessage() {} func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[35] + mi := &file_tfplugin6_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2161,7 +2322,7 @@ func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_Response.ProtoReflect.Descriptor instead. func (*GetMetadata_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{9, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{11, 1} } func (x *GetMetadata_Response) GetServerCapabilities() *ServerCapabilities { @@ -2211,7 +2372,7 @@ type GetMetadata_FunctionMetadata struct { func (x *GetMetadata_FunctionMetadata) Reset() { *x = GetMetadata_FunctionMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[36] + mi := &file_tfplugin6_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2224,7 +2385,7 @@ func (x *GetMetadata_FunctionMetadata) String() string { func (*GetMetadata_FunctionMetadata) ProtoMessage() {} func (x *GetMetadata_FunctionMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[36] + mi := &file_tfplugin6_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2237,7 +2398,7 @@ func (x *GetMetadata_FunctionMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_FunctionMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata_FunctionMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{9, 2} + return file_tfplugin6_proto_rawDescGZIP(), []int{11, 2} } func (x *GetMetadata_FunctionMetadata) GetName() string { @@ -2258,7 +2419,7 @@ type GetMetadata_DataSourceMetadata struct { func (x *GetMetadata_DataSourceMetadata) Reset() { *x = GetMetadata_DataSourceMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[37] + mi := &file_tfplugin6_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2271,7 +2432,7 @@ func (x *GetMetadata_DataSourceMetadata) String() string { func (*GetMetadata_DataSourceMetadata) ProtoMessage() {} func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[37] + mi := &file_tfplugin6_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2284,7 +2445,7 @@ func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_DataSourceMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata_DataSourceMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{9, 3} + return file_tfplugin6_proto_rawDescGZIP(), []int{11, 3} } func (x *GetMetadata_DataSourceMetadata) GetTypeName() string { @@ -2305,7 +2466,7 @@ type GetMetadata_ResourceMetadata struct { func (x *GetMetadata_ResourceMetadata) Reset() { *x = GetMetadata_ResourceMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[38] + mi := &file_tfplugin6_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2318,7 +2479,7 @@ func (x *GetMetadata_ResourceMetadata) String() string { func (*GetMetadata_ResourceMetadata) ProtoMessage() {} func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[38] + mi := &file_tfplugin6_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2331,7 +2492,7 @@ func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_ResourceMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata_ResourceMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{9, 4} + return file_tfplugin6_proto_rawDescGZIP(), []int{11, 4} } func (x *GetMetadata_ResourceMetadata) GetTypeName() string { @@ -2350,7 +2511,7 @@ type GetProviderSchema_Request struct { func (x *GetProviderSchema_Request) Reset() { *x = GetProviderSchema_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[39] + mi := &file_tfplugin6_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2363,7 +2524,7 @@ func (x *GetProviderSchema_Request) String() string { func (*GetProviderSchema_Request) ProtoMessage() {} func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[39] + mi := &file_tfplugin6_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2376,7 +2537,7 @@ func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema_Request.ProtoReflect.Descriptor instead. func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{10, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{12, 0} } type GetProviderSchema_Response struct { @@ -2397,7 +2558,7 @@ type GetProviderSchema_Response struct { func (x *GetProviderSchema_Response) Reset() { *x = GetProviderSchema_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[40] + mi := &file_tfplugin6_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2410,7 +2571,7 @@ func (x *GetProviderSchema_Response) String() string { func (*GetProviderSchema_Response) ProtoMessage() {} func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[40] + mi := &file_tfplugin6_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2423,7 +2584,7 @@ func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema_Response.ProtoReflect.Descriptor instead. func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{10, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{12, 1} } func (x *GetProviderSchema_Response) GetProvider() *Schema { @@ -2486,7 +2647,7 @@ type ValidateProviderConfig_Request struct { func (x *ValidateProviderConfig_Request) Reset() { *x = ValidateProviderConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[44] + mi := &file_tfplugin6_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2499,7 +2660,7 @@ func (x *ValidateProviderConfig_Request) String() string { func (*ValidateProviderConfig_Request) ProtoMessage() {} func (x *ValidateProviderConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[44] + mi := &file_tfplugin6_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2512,7 +2673,7 @@ func (x *ValidateProviderConfig_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateProviderConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateProviderConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{11, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{13, 0} } func (x *ValidateProviderConfig_Request) GetConfig() *DynamicValue { @@ -2533,7 +2694,7 @@ type ValidateProviderConfig_Response struct { func (x *ValidateProviderConfig_Response) Reset() { *x = ValidateProviderConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[45] + mi := &file_tfplugin6_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2546,7 +2707,7 @@ func (x *ValidateProviderConfig_Response) String() string { func (*ValidateProviderConfig_Response) ProtoMessage() {} func (x *ValidateProviderConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[45] + mi := &file_tfplugin6_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2559,7 +2720,7 @@ func (x *ValidateProviderConfig_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateProviderConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateProviderConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{11, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{13, 1} } func (x *ValidateProviderConfig_Response) GetDiagnostics() []*Diagnostic { @@ -2597,7 +2758,7 @@ type UpgradeResourceState_Request struct { func (x *UpgradeResourceState_Request) Reset() { *x = UpgradeResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[46] + mi := &file_tfplugin6_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2610,7 +2771,7 @@ func (x *UpgradeResourceState_Request) String() string { func (*UpgradeResourceState_Request) ProtoMessage() {} func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[46] + mi := &file_tfplugin6_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2623,7 +2784,7 @@ func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState_Request.ProtoReflect.Descriptor instead. func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{12, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{14, 0} } func (x *UpgradeResourceState_Request) GetTypeName() string { @@ -2665,7 +2826,7 @@ type UpgradeResourceState_Response struct { func (x *UpgradeResourceState_Response) Reset() { *x = UpgradeResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[47] + mi := &file_tfplugin6_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2678,7 +2839,7 @@ func (x *UpgradeResourceState_Response) String() string { func (*UpgradeResourceState_Response) ProtoMessage() {} func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[47] + mi := &file_tfplugin6_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2691,7 +2852,7 @@ func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState_Response.ProtoReflect.Descriptor instead. func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{12, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{14, 1} } func (x *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { @@ -2720,7 +2881,7 @@ type ValidateResourceConfig_Request struct { func (x *ValidateResourceConfig_Request) Reset() { *x = ValidateResourceConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[48] + mi := &file_tfplugin6_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2733,7 +2894,7 @@ func (x *ValidateResourceConfig_Request) String() string { func (*ValidateResourceConfig_Request) ProtoMessage() {} func (x *ValidateResourceConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[48] + mi := &file_tfplugin6_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2746,7 +2907,7 @@ func (x *ValidateResourceConfig_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateResourceConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateResourceConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{13, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{15, 0} } func (x *ValidateResourceConfig_Request) GetTypeName() string { @@ -2774,7 +2935,7 @@ type ValidateResourceConfig_Response struct { func (x *ValidateResourceConfig_Response) Reset() { *x = ValidateResourceConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[49] + mi := &file_tfplugin6_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2787,7 +2948,7 @@ func (x *ValidateResourceConfig_Response) String() string { func (*ValidateResourceConfig_Response) ProtoMessage() {} func (x *ValidateResourceConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[49] + mi := &file_tfplugin6_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2800,7 +2961,7 @@ func (x *ValidateResourceConfig_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateResourceConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateResourceConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{13, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{15, 1} } func (x *ValidateResourceConfig_Response) GetDiagnostics() []*Diagnostic { @@ -2822,7 +2983,7 @@ type ValidateDataResourceConfig_Request struct { func (x *ValidateDataResourceConfig_Request) Reset() { *x = ValidateDataResourceConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[50] + mi := &file_tfplugin6_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2835,7 +2996,7 @@ func (x *ValidateDataResourceConfig_Request) String() string { func (*ValidateDataResourceConfig_Request) ProtoMessage() {} func (x *ValidateDataResourceConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[50] + mi := &file_tfplugin6_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2848,7 +3009,7 @@ func (x *ValidateDataResourceConfig_Request) ProtoReflect() protoreflect.Message // Deprecated: Use ValidateDataResourceConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateDataResourceConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{14, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{16, 0} } func (x *ValidateDataResourceConfig_Request) GetTypeName() string { @@ -2876,7 +3037,7 @@ type ValidateDataResourceConfig_Response struct { func (x *ValidateDataResourceConfig_Response) Reset() { *x = ValidateDataResourceConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[51] + mi := &file_tfplugin6_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2889,7 +3050,7 @@ func (x *ValidateDataResourceConfig_Response) String() string { func (*ValidateDataResourceConfig_Response) ProtoMessage() {} func (x *ValidateDataResourceConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[51] + mi := &file_tfplugin6_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2902,7 +3063,7 @@ func (x *ValidateDataResourceConfig_Response) ProtoReflect() protoreflect.Messag // Deprecated: Use ValidateDataResourceConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateDataResourceConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{14, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{16, 1} } func (x *ValidateDataResourceConfig_Response) GetDiagnostics() []*Diagnostic { @@ -2917,14 +3078,15 @@ type ConfigureProvider_Request struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + ClientCapabilities *ClientCapabilities `protobuf:"bytes,3,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"` } func (x *ConfigureProvider_Request) Reset() { *x = ConfigureProvider_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[52] + mi := &file_tfplugin6_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2937,7 +3099,7 @@ func (x *ConfigureProvider_Request) String() string { func (*ConfigureProvider_Request) ProtoMessage() {} func (x *ConfigureProvider_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[52] + mi := &file_tfplugin6_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2950,7 +3112,7 @@ func (x *ConfigureProvider_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ConfigureProvider_Request.ProtoReflect.Descriptor instead. func (*ConfigureProvider_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{15, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{17, 0} } func (x *ConfigureProvider_Request) GetTerraformVersion() string { @@ -2967,6 +3129,13 @@ func (x *ConfigureProvider_Request) GetConfig() *DynamicValue { return nil } +func (x *ConfigureProvider_Request) GetClientCapabilities() *ClientCapabilities { + if x != nil { + return x.ClientCapabilities + } + return nil +} + type ConfigureProvider_Response struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2978,7 +3147,7 @@ type ConfigureProvider_Response struct { func (x *ConfigureProvider_Response) Reset() { *x = ConfigureProvider_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[53] + mi := &file_tfplugin6_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2991,7 +3160,7 @@ func (x *ConfigureProvider_Response) String() string { func (*ConfigureProvider_Response) ProtoMessage() {} func (x *ConfigureProvider_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[53] + mi := &file_tfplugin6_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3004,7 +3173,7 @@ func (x *ConfigureProvider_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ConfigureProvider_Response.ProtoReflect.Descriptor instead. func (*ConfigureProvider_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{15, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{17, 1} } func (x *ConfigureProvider_Response) GetDiagnostics() []*Diagnostic { @@ -3027,16 +3196,17 @@ type ReadResource_Request struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` - Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` - ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ClientCapabilities *ClientCapabilities `protobuf:"bytes,5,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"` } func (x *ReadResource_Request) Reset() { *x = ReadResource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[54] + mi := &file_tfplugin6_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3049,7 +3219,7 @@ func (x *ReadResource_Request) String() string { func (*ReadResource_Request) ProtoMessage() {} func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[54] + mi := &file_tfplugin6_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3062,7 +3232,7 @@ func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource_Request.ProtoReflect.Descriptor instead. func (*ReadResource_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{16, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{18, 0} } func (x *ReadResource_Request) GetTypeName() string { @@ -3093,6 +3263,13 @@ func (x *ReadResource_Request) GetProviderMeta() *DynamicValue { return nil } +func (x *ReadResource_Request) GetClientCapabilities() *ClientCapabilities { + if x != nil { + return x.ClientCapabilities + } + return nil +} + type ReadResource_Response struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3101,12 +3278,15 @@ type ReadResource_Response struct { NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred *Deferred `protobuf:"bytes,4,opt,name=deferred,proto3" json:"deferred,omitempty"` } func (x *ReadResource_Response) Reset() { *x = ReadResource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[55] + mi := &file_tfplugin6_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3119,7 +3299,7 @@ func (x *ReadResource_Response) String() string { func (*ReadResource_Response) ProtoMessage() {} func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[55] + mi := &file_tfplugin6_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3132,7 +3312,7 @@ func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource_Response.ProtoReflect.Descriptor instead. func (*ReadResource_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{16, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{18, 1} } func (x *ReadResource_Response) GetNewState() *DynamicValue { @@ -3156,23 +3336,31 @@ func (x *ReadResource_Response) GetPrivate() []byte { return nil } +func (x *ReadResource_Response) GetDeferred() *Deferred { + if x != nil { + return x.Deferred + } + return nil +} + type PlanResourceChange_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` - ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` - Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` - ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ClientCapabilities *ClientCapabilities `protobuf:"bytes,7,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"` } func (x *PlanResourceChange_Request) Reset() { *x = PlanResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[56] + mi := &file_tfplugin6_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3185,7 +3373,7 @@ func (x *PlanResourceChange_Request) String() string { func (*PlanResourceChange_Request) ProtoMessage() {} func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[56] + mi := &file_tfplugin6_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3198,7 +3386,7 @@ func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange_Request.ProtoReflect.Descriptor instead. func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{17, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{19, 0} } func (x *PlanResourceChange_Request) GetTypeName() string { @@ -3243,6 +3431,13 @@ func (x *PlanResourceChange_Request) GetProviderMeta() *DynamicValue { return nil } +func (x *PlanResourceChange_Request) GetClientCapabilities() *ClientCapabilities { + if x != nil { + return x.ClientCapabilities + } + return nil +} + type PlanResourceChange_Response struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3264,12 +3459,15 @@ type PlanResourceChange_Response struct { // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== // ==== DO NOT USE THIS ==== LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred *Deferred `protobuf:"bytes,6,opt,name=deferred,proto3" json:"deferred,omitempty"` } func (x *PlanResourceChange_Response) Reset() { *x = PlanResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[57] + mi := &file_tfplugin6_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3282,7 +3480,7 @@ func (x *PlanResourceChange_Response) String() string { func (*PlanResourceChange_Response) ProtoMessage() {} func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[57] + mi := &file_tfplugin6_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3295,7 +3493,7 @@ func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange_Response.ProtoReflect.Descriptor instead. func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{17, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{19, 1} } func (x *PlanResourceChange_Response) GetPlannedState() *DynamicValue { @@ -3333,6 +3531,13 @@ func (x *PlanResourceChange_Response) GetLegacyTypeSystem() bool { return false } +func (x *PlanResourceChange_Response) GetDeferred() *Deferred { + if x != nil { + return x.Deferred + } + return nil +} + type ApplyResourceChange_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3349,7 +3554,7 @@ type ApplyResourceChange_Request struct { func (x *ApplyResourceChange_Request) Reset() { *x = ApplyResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[58] + mi := &file_tfplugin6_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3362,7 +3567,7 @@ func (x *ApplyResourceChange_Request) String() string { func (*ApplyResourceChange_Request) ProtoMessage() {} func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[58] + mi := &file_tfplugin6_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3375,7 +3580,7 @@ func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange_Request.ProtoReflect.Descriptor instead. func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{18, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{20, 0} } func (x *ApplyResourceChange_Request) GetTypeName() string { @@ -3445,7 +3650,7 @@ type ApplyResourceChange_Response struct { func (x *ApplyResourceChange_Response) Reset() { *x = ApplyResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[59] + mi := &file_tfplugin6_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3458,7 +3663,7 @@ func (x *ApplyResourceChange_Response) String() string { func (*ApplyResourceChange_Response) ProtoMessage() {} func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[59] + mi := &file_tfplugin6_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3471,7 +3676,7 @@ func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange_Response.ProtoReflect.Descriptor instead. func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{18, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{20, 1} } func (x *ApplyResourceChange_Response) GetNewState() *DynamicValue { @@ -3507,14 +3712,15 @@ type ImportResourceState_Request struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + ClientCapabilities *ClientCapabilities `protobuf:"bytes,3,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"` } func (x *ImportResourceState_Request) Reset() { *x = ImportResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[60] + mi := &file_tfplugin6_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3527,7 +3733,7 @@ func (x *ImportResourceState_Request) String() string { func (*ImportResourceState_Request) ProtoMessage() {} func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[60] + mi := &file_tfplugin6_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3540,7 +3746,7 @@ func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState_Request.ProtoReflect.Descriptor instead. func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{19, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{21, 0} } func (x *ImportResourceState_Request) GetTypeName() string { @@ -3557,6 +3763,13 @@ func (x *ImportResourceState_Request) GetId() string { return "" } +func (x *ImportResourceState_Request) GetClientCapabilities() *ClientCapabilities { + if x != nil { + return x.ClientCapabilities + } + return nil +} + type ImportResourceState_ImportedResource struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3570,7 +3783,7 @@ type ImportResourceState_ImportedResource struct { func (x *ImportResourceState_ImportedResource) Reset() { *x = ImportResourceState_ImportedResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[61] + mi := &file_tfplugin6_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3583,7 +3796,7 @@ func (x *ImportResourceState_ImportedResource) String() string { func (*ImportResourceState_ImportedResource) ProtoMessage() {} func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[61] + mi := &file_tfplugin6_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3596,7 +3809,7 @@ func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Messa // Deprecated: Use ImportResourceState_ImportedResource.ProtoReflect.Descriptor instead. func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{19, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{21, 1} } func (x *ImportResourceState_ImportedResource) GetTypeName() string { @@ -3627,12 +3840,15 @@ type ImportResourceState_Response struct { ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred *Deferred `protobuf:"bytes,3,opt,name=deferred,proto3" json:"deferred,omitempty"` } func (x *ImportResourceState_Response) Reset() { *x = ImportResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[62] + mi := &file_tfplugin6_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3645,7 +3861,7 @@ func (x *ImportResourceState_Response) String() string { func (*ImportResourceState_Response) ProtoMessage() {} func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[62] + mi := &file_tfplugin6_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3658,7 +3874,7 @@ func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState_Response.ProtoReflect.Descriptor instead. func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{19, 2} + return file_tfplugin6_proto_rawDescGZIP(), []int{21, 2} } func (x *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { @@ -3675,6 +3891,13 @@ func (x *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { return nil } +func (x *ImportResourceState_Response) GetDeferred() *Deferred { + if x != nil { + return x.Deferred + } + return nil +} + type MoveResourceState_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3700,7 +3923,7 @@ type MoveResourceState_Request struct { func (x *MoveResourceState_Request) Reset() { *x = MoveResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[63] + mi := &file_tfplugin6_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3713,7 +3936,7 @@ func (x *MoveResourceState_Request) String() string { func (*MoveResourceState_Request) ProtoMessage() {} func (x *MoveResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[63] + mi := &file_tfplugin6_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3726,7 +3949,7 @@ func (x *MoveResourceState_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use MoveResourceState_Request.ProtoReflect.Descriptor instead. func (*MoveResourceState_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{20, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{22, 0} } func (x *MoveResourceState_Request) GetSourceProviderAddress() string { @@ -3787,7 +4010,7 @@ type MoveResourceState_Response struct { func (x *MoveResourceState_Response) Reset() { *x = MoveResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[64] + mi := &file_tfplugin6_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3800,7 +4023,7 @@ func (x *MoveResourceState_Response) String() string { func (*MoveResourceState_Response) ProtoMessage() {} func (x *MoveResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[64] + mi := &file_tfplugin6_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3813,7 +4036,7 @@ func (x *MoveResourceState_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use MoveResourceState_Response.ProtoReflect.Descriptor instead. func (*MoveResourceState_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{20, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{22, 1} } func (x *MoveResourceState_Response) GetTargetState() *DynamicValue { @@ -3842,15 +4065,16 @@ type ReadDataSource_Request struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ClientCapabilities *ClientCapabilities `protobuf:"bytes,4,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"` } func (x *ReadDataSource_Request) Reset() { *x = ReadDataSource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[65] + mi := &file_tfplugin6_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3863,7 +4087,7 @@ func (x *ReadDataSource_Request) String() string { func (*ReadDataSource_Request) ProtoMessage() {} func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[65] + mi := &file_tfplugin6_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3876,7 +4100,7 @@ func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource_Request.ProtoReflect.Descriptor instead. func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{21, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{23, 0} } func (x *ReadDataSource_Request) GetTypeName() string { @@ -3900,6 +4124,13 @@ func (x *ReadDataSource_Request) GetProviderMeta() *DynamicValue { return nil } +func (x *ReadDataSource_Request) GetClientCapabilities() *ClientCapabilities { + if x != nil { + return x.ClientCapabilities + } + return nil +} + type ReadDataSource_Response struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3907,12 +4138,15 @@ type ReadDataSource_Response struct { State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred *Deferred `protobuf:"bytes,3,opt,name=deferred,proto3" json:"deferred,omitempty"` } func (x *ReadDataSource_Response) Reset() { *x = ReadDataSource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[66] + mi := &file_tfplugin6_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3925,7 +4159,7 @@ func (x *ReadDataSource_Response) String() string { func (*ReadDataSource_Response) ProtoMessage() {} func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[66] + mi := &file_tfplugin6_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3938,7 +4172,7 @@ func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource_Response.ProtoReflect.Descriptor instead. func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{21, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{23, 1} } func (x *ReadDataSource_Response) GetState() *DynamicValue { @@ -3955,6 +4189,13 @@ func (x *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { return nil } +func (x *ReadDataSource_Response) GetDeferred() *Deferred { + if x != nil { + return x.Deferred + } + return nil +} + type GetFunctions_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3964,7 +4205,7 @@ type GetFunctions_Request struct { func (x *GetFunctions_Request) Reset() { *x = GetFunctions_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[67] + mi := &file_tfplugin6_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3977,7 +4218,7 @@ func (x *GetFunctions_Request) String() string { func (*GetFunctions_Request) ProtoMessage() {} func (x *GetFunctions_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[67] + mi := &file_tfplugin6_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3990,7 +4231,7 @@ func (x *GetFunctions_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use GetFunctions_Request.ProtoReflect.Descriptor instead. func (*GetFunctions_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{22, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{24, 0} } type GetFunctions_Response struct { @@ -4007,7 +4248,7 @@ type GetFunctions_Response struct { func (x *GetFunctions_Response) Reset() { *x = GetFunctions_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[68] + mi := &file_tfplugin6_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4020,7 +4261,7 @@ func (x *GetFunctions_Response) String() string { func (*GetFunctions_Response) ProtoMessage() {} func (x *GetFunctions_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[68] + mi := &file_tfplugin6_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4033,7 +4274,7 @@ func (x *GetFunctions_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetFunctions_Response.ProtoReflect.Descriptor instead. func (*GetFunctions_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{22, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{24, 1} } func (x *GetFunctions_Response) GetFunctions() map[string]*Function { @@ -4064,7 +4305,7 @@ type CallFunction_Request struct { func (x *CallFunction_Request) Reset() { *x = CallFunction_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[70] + mi := &file_tfplugin6_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4077,7 +4318,7 @@ func (x *CallFunction_Request) String() string { func (*CallFunction_Request) ProtoMessage() {} func (x *CallFunction_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[70] + mi := &file_tfplugin6_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4090,7 +4331,7 @@ func (x *CallFunction_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use CallFunction_Request.ProtoReflect.Descriptor instead. func (*CallFunction_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{23, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{25, 0} } func (x *CallFunction_Request) GetName() string { @@ -4121,7 +4362,7 @@ type CallFunction_Response struct { func (x *CallFunction_Response) Reset() { *x = CallFunction_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[71] + mi := &file_tfplugin6_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4134,7 +4375,7 @@ func (x *CallFunction_Response) String() string { func (*CallFunction_Response) ProtoMessage() {} func (x *CallFunction_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[71] + mi := &file_tfplugin6_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4147,7 +4388,7 @@ func (x *CallFunction_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use CallFunction_Response.ProtoReflect.Descriptor instead. func (*CallFunction_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{23, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{25, 1} } func (x *CallFunction_Response) GetResult() *DynamicValue { @@ -4354,458 +4595,509 @@ var file_tfplugin6_proto_rawDesc = []byte{ 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x96, 0x04, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0xef, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, - 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, - 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, - 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, - 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, - 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x1a, 0x26, 0x0a, 0x10, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x31, 0x0a, 0x12, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x2f, - 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, - 0xc7, 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, - 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x3f, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x61, 0x6c, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x61, 0x6c, + 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x22, 0xa2, 0x01, 0x0a, 0x08, 0x44, 0x65, 0x66, 0x65, + 0x72, 0x72, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x62, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x1b, 0x0a, 0x17, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, + 0x49, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, + 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x42, 0x53, + 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x52, 0x45, 0x51, 0x10, 0x03, 0x22, 0x96, 0x04, 0x0a, + 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x09, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xef, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, + 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, + 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, + 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x26, 0x0a, 0x10, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x1a, 0x31, 0x0a, 0x12, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x2f, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc7, 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, - 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, - 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, - 0x65, 0x73, 0x12, 0x52, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x14, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x0e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, - 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x0e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x99, 0x01, 0x0a, 0x16, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, - 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, - 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, - 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, - 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, - 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xc1, - 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x1a, 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, - 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, - 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x99, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, + 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, - 0x74, 0x61, 0x1a, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, + 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, - 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, - 0xbb, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, - 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, - 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, - 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, - 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, - 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x9d, 0x02, - 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, - 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb6, + 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, - 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, - 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, - 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, - 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, - 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x92, 0x04, - 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, + 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, - 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, - 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, - 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, - 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, - 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, - 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, - 0x65, 0x6d, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x74, 0x69, 0x63, 0x73, 0x22, 0x92, 0x02, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x1a, 0xb7, 0x01, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, + 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, + 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x52, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe4, 0x03, 0x0a, 0x0c, 0x52, 0x65, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x8c, 0x02, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, 0x01, 0x0a, - 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, - 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x73, 0x22, 0xe7, 0x03, 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xa8, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x10, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0c, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x1a, 0xa6, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x3a, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, - 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0x9c, 0x02, 0x0a, - 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, - 0x95, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, + 0x2f, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x52, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, + 0x22, 0xf3, 0x05, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x8b, 0x03, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, + 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, + 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x52, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0xce, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, + 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, + 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, + 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, + 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, + 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x12, 0x2f, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x52, 0x08, 0x64, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, + 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, - 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x81, 0x02, 0x0a, 0x0c, - 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x09, 0x0a, 0x07, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xe5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, + 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, + 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xef, 0x03, 0x0a, 0x13, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x1a, 0x86, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x4e, 0x0a, 0x13, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x78, 0x0a, 0x10, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xd4, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, - 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x1a, 0x51, 0x0a, 0x0e, - 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xd1, 0x01, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x54, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x61, 0x72, 0x67, - 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, - 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, - 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xa4, 0x0c, 0x0a, 0x08, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, - 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2f, 0x0a, 0x08, + 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, + 0x72, 0x65, 0x64, 0x52, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x22, 0xe7, 0x03, + 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x1a, 0xa8, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x36, 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x13, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa6, + 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0x9e, 0x03, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xe5, 0x01, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x1a, 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, + 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2f, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, + 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x52, 0x08, + 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x22, 0x81, 0x02, 0x0a, 0x0c, 0x47, 0x65, 0x74, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0xe5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd1, 0x01, 0x0a, + 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x54, 0x0a, + 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x09, + 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x09, + 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x41, 0x52, + 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xa4, 0x0c, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, + 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, - 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1a, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, - 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x27, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x60, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, + 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x25, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x50, + 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x4d, 0x6f, + 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x4d, 0x6f, 0x76, 0x65, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, 0x0e, + 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, + 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x25, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, - 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, - 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, - 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, - 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x4d, - 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, - 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, - 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x46, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x43, - 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, - 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1f, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, - 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x66, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x36, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x53, + 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x47, + 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, + 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x66, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x76, 0x36, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -4820,202 +5112,215 @@ func file_tfplugin6_proto_rawDescGZIP() []byte { return file_tfplugin6_proto_rawDescData } -var file_tfplugin6_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_tfplugin6_proto_msgTypes = make([]protoimpl.MessageInfo, 72) +var file_tfplugin6_proto_enumTypes = make([]protoimpl.EnumInfo, 5) +var file_tfplugin6_proto_msgTypes = make([]protoimpl.MessageInfo, 74) var file_tfplugin6_proto_goTypes = []interface{}{ (StringKind)(0), // 0: tfplugin6.StringKind (Diagnostic_Severity)(0), // 1: tfplugin6.Diagnostic.Severity (Schema_NestedBlock_NestingMode)(0), // 2: tfplugin6.Schema.NestedBlock.NestingMode (Schema_Object_NestingMode)(0), // 3: tfplugin6.Schema.Object.NestingMode - (*DynamicValue)(nil), // 4: tfplugin6.DynamicValue - (*Diagnostic)(nil), // 5: tfplugin6.Diagnostic - (*FunctionError)(nil), // 6: tfplugin6.FunctionError - (*AttributePath)(nil), // 7: tfplugin6.AttributePath - (*StopProvider)(nil), // 8: tfplugin6.StopProvider - (*RawState)(nil), // 9: tfplugin6.RawState - (*Schema)(nil), // 10: tfplugin6.Schema - (*Function)(nil), // 11: tfplugin6.Function - (*ServerCapabilities)(nil), // 12: tfplugin6.ServerCapabilities - (*GetMetadata)(nil), // 13: tfplugin6.GetMetadata - (*GetProviderSchema)(nil), // 14: tfplugin6.GetProviderSchema - (*ValidateProviderConfig)(nil), // 15: tfplugin6.ValidateProviderConfig - (*UpgradeResourceState)(nil), // 16: tfplugin6.UpgradeResourceState - (*ValidateResourceConfig)(nil), // 17: tfplugin6.ValidateResourceConfig - (*ValidateDataResourceConfig)(nil), // 18: tfplugin6.ValidateDataResourceConfig - (*ConfigureProvider)(nil), // 19: tfplugin6.ConfigureProvider - (*ReadResource)(nil), // 20: tfplugin6.ReadResource - (*PlanResourceChange)(nil), // 21: tfplugin6.PlanResourceChange - (*ApplyResourceChange)(nil), // 22: tfplugin6.ApplyResourceChange - (*ImportResourceState)(nil), // 23: tfplugin6.ImportResourceState - (*MoveResourceState)(nil), // 24: tfplugin6.MoveResourceState - (*ReadDataSource)(nil), // 25: tfplugin6.ReadDataSource - (*GetFunctions)(nil), // 26: tfplugin6.GetFunctions - (*CallFunction)(nil), // 27: tfplugin6.CallFunction - (*AttributePath_Step)(nil), // 28: tfplugin6.AttributePath.Step - (*StopProvider_Request)(nil), // 29: tfplugin6.StopProvider.Request - (*StopProvider_Response)(nil), // 30: tfplugin6.StopProvider.Response - nil, // 31: tfplugin6.RawState.FlatmapEntry - (*Schema_Block)(nil), // 32: tfplugin6.Schema.Block - (*Schema_Attribute)(nil), // 33: tfplugin6.Schema.Attribute - (*Schema_NestedBlock)(nil), // 34: tfplugin6.Schema.NestedBlock - (*Schema_Object)(nil), // 35: tfplugin6.Schema.Object - (*Function_Parameter)(nil), // 36: tfplugin6.Function.Parameter - (*Function_Return)(nil), // 37: tfplugin6.Function.Return - (*GetMetadata_Request)(nil), // 38: tfplugin6.GetMetadata.Request - (*GetMetadata_Response)(nil), // 39: tfplugin6.GetMetadata.Response - (*GetMetadata_FunctionMetadata)(nil), // 40: tfplugin6.GetMetadata.FunctionMetadata - (*GetMetadata_DataSourceMetadata)(nil), // 41: tfplugin6.GetMetadata.DataSourceMetadata - (*GetMetadata_ResourceMetadata)(nil), // 42: tfplugin6.GetMetadata.ResourceMetadata - (*GetProviderSchema_Request)(nil), // 43: tfplugin6.GetProviderSchema.Request - (*GetProviderSchema_Response)(nil), // 44: tfplugin6.GetProviderSchema.Response - nil, // 45: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry - nil, // 46: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry - nil, // 47: tfplugin6.GetProviderSchema.Response.FunctionsEntry - (*ValidateProviderConfig_Request)(nil), // 48: tfplugin6.ValidateProviderConfig.Request - (*ValidateProviderConfig_Response)(nil), // 49: tfplugin6.ValidateProviderConfig.Response - (*UpgradeResourceState_Request)(nil), // 50: tfplugin6.UpgradeResourceState.Request - (*UpgradeResourceState_Response)(nil), // 51: tfplugin6.UpgradeResourceState.Response - (*ValidateResourceConfig_Request)(nil), // 52: tfplugin6.ValidateResourceConfig.Request - (*ValidateResourceConfig_Response)(nil), // 53: tfplugin6.ValidateResourceConfig.Response - (*ValidateDataResourceConfig_Request)(nil), // 54: tfplugin6.ValidateDataResourceConfig.Request - (*ValidateDataResourceConfig_Response)(nil), // 55: tfplugin6.ValidateDataResourceConfig.Response - (*ConfigureProvider_Request)(nil), // 56: tfplugin6.ConfigureProvider.Request - (*ConfigureProvider_Response)(nil), // 57: tfplugin6.ConfigureProvider.Response - (*ReadResource_Request)(nil), // 58: tfplugin6.ReadResource.Request - (*ReadResource_Response)(nil), // 59: tfplugin6.ReadResource.Response - (*PlanResourceChange_Request)(nil), // 60: tfplugin6.PlanResourceChange.Request - (*PlanResourceChange_Response)(nil), // 61: tfplugin6.PlanResourceChange.Response - (*ApplyResourceChange_Request)(nil), // 62: tfplugin6.ApplyResourceChange.Request - (*ApplyResourceChange_Response)(nil), // 63: tfplugin6.ApplyResourceChange.Response - (*ImportResourceState_Request)(nil), // 64: tfplugin6.ImportResourceState.Request - (*ImportResourceState_ImportedResource)(nil), // 65: tfplugin6.ImportResourceState.ImportedResource - (*ImportResourceState_Response)(nil), // 66: tfplugin6.ImportResourceState.Response - (*MoveResourceState_Request)(nil), // 67: tfplugin6.MoveResourceState.Request - (*MoveResourceState_Response)(nil), // 68: tfplugin6.MoveResourceState.Response - (*ReadDataSource_Request)(nil), // 69: tfplugin6.ReadDataSource.Request - (*ReadDataSource_Response)(nil), // 70: tfplugin6.ReadDataSource.Response - (*GetFunctions_Request)(nil), // 71: tfplugin6.GetFunctions.Request - (*GetFunctions_Response)(nil), // 72: tfplugin6.GetFunctions.Response - nil, // 73: tfplugin6.GetFunctions.Response.FunctionsEntry - (*CallFunction_Request)(nil), // 74: tfplugin6.CallFunction.Request - (*CallFunction_Response)(nil), // 75: tfplugin6.CallFunction.Response + (Deferred_Reason)(0), // 4: tfplugin6.Deferred.Reason + (*DynamicValue)(nil), // 5: tfplugin6.DynamicValue + (*Diagnostic)(nil), // 6: tfplugin6.Diagnostic + (*FunctionError)(nil), // 7: tfplugin6.FunctionError + (*AttributePath)(nil), // 8: tfplugin6.AttributePath + (*StopProvider)(nil), // 9: tfplugin6.StopProvider + (*RawState)(nil), // 10: tfplugin6.RawState + (*Schema)(nil), // 11: tfplugin6.Schema + (*Function)(nil), // 12: tfplugin6.Function + (*ServerCapabilities)(nil), // 13: tfplugin6.ServerCapabilities + (*ClientCapabilities)(nil), // 14: tfplugin6.ClientCapabilities + (*Deferred)(nil), // 15: tfplugin6.Deferred + (*GetMetadata)(nil), // 16: tfplugin6.GetMetadata + (*GetProviderSchema)(nil), // 17: tfplugin6.GetProviderSchema + (*ValidateProviderConfig)(nil), // 18: tfplugin6.ValidateProviderConfig + (*UpgradeResourceState)(nil), // 19: tfplugin6.UpgradeResourceState + (*ValidateResourceConfig)(nil), // 20: tfplugin6.ValidateResourceConfig + (*ValidateDataResourceConfig)(nil), // 21: tfplugin6.ValidateDataResourceConfig + (*ConfigureProvider)(nil), // 22: tfplugin6.ConfigureProvider + (*ReadResource)(nil), // 23: tfplugin6.ReadResource + (*PlanResourceChange)(nil), // 24: tfplugin6.PlanResourceChange + (*ApplyResourceChange)(nil), // 25: tfplugin6.ApplyResourceChange + (*ImportResourceState)(nil), // 26: tfplugin6.ImportResourceState + (*MoveResourceState)(nil), // 27: tfplugin6.MoveResourceState + (*ReadDataSource)(nil), // 28: tfplugin6.ReadDataSource + (*GetFunctions)(nil), // 29: tfplugin6.GetFunctions + (*CallFunction)(nil), // 30: tfplugin6.CallFunction + (*AttributePath_Step)(nil), // 31: tfplugin6.AttributePath.Step + (*StopProvider_Request)(nil), // 32: tfplugin6.StopProvider.Request + (*StopProvider_Response)(nil), // 33: tfplugin6.StopProvider.Response + nil, // 34: tfplugin6.RawState.FlatmapEntry + (*Schema_Block)(nil), // 35: tfplugin6.Schema.Block + (*Schema_Attribute)(nil), // 36: tfplugin6.Schema.Attribute + (*Schema_NestedBlock)(nil), // 37: tfplugin6.Schema.NestedBlock + (*Schema_Object)(nil), // 38: tfplugin6.Schema.Object + (*Function_Parameter)(nil), // 39: tfplugin6.Function.Parameter + (*Function_Return)(nil), // 40: tfplugin6.Function.Return + (*GetMetadata_Request)(nil), // 41: tfplugin6.GetMetadata.Request + (*GetMetadata_Response)(nil), // 42: tfplugin6.GetMetadata.Response + (*GetMetadata_FunctionMetadata)(nil), // 43: tfplugin6.GetMetadata.FunctionMetadata + (*GetMetadata_DataSourceMetadata)(nil), // 44: tfplugin6.GetMetadata.DataSourceMetadata + (*GetMetadata_ResourceMetadata)(nil), // 45: tfplugin6.GetMetadata.ResourceMetadata + (*GetProviderSchema_Request)(nil), // 46: tfplugin6.GetProviderSchema.Request + (*GetProviderSchema_Response)(nil), // 47: tfplugin6.GetProviderSchema.Response + nil, // 48: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry + nil, // 49: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry + nil, // 50: tfplugin6.GetProviderSchema.Response.FunctionsEntry + (*ValidateProviderConfig_Request)(nil), // 51: tfplugin6.ValidateProviderConfig.Request + (*ValidateProviderConfig_Response)(nil), // 52: tfplugin6.ValidateProviderConfig.Response + (*UpgradeResourceState_Request)(nil), // 53: tfplugin6.UpgradeResourceState.Request + (*UpgradeResourceState_Response)(nil), // 54: tfplugin6.UpgradeResourceState.Response + (*ValidateResourceConfig_Request)(nil), // 55: tfplugin6.ValidateResourceConfig.Request + (*ValidateResourceConfig_Response)(nil), // 56: tfplugin6.ValidateResourceConfig.Response + (*ValidateDataResourceConfig_Request)(nil), // 57: tfplugin6.ValidateDataResourceConfig.Request + (*ValidateDataResourceConfig_Response)(nil), // 58: tfplugin6.ValidateDataResourceConfig.Response + (*ConfigureProvider_Request)(nil), // 59: tfplugin6.ConfigureProvider.Request + (*ConfigureProvider_Response)(nil), // 60: tfplugin6.ConfigureProvider.Response + (*ReadResource_Request)(nil), // 61: tfplugin6.ReadResource.Request + (*ReadResource_Response)(nil), // 62: tfplugin6.ReadResource.Response + (*PlanResourceChange_Request)(nil), // 63: tfplugin6.PlanResourceChange.Request + (*PlanResourceChange_Response)(nil), // 64: tfplugin6.PlanResourceChange.Response + (*ApplyResourceChange_Request)(nil), // 65: tfplugin6.ApplyResourceChange.Request + (*ApplyResourceChange_Response)(nil), // 66: tfplugin6.ApplyResourceChange.Response + (*ImportResourceState_Request)(nil), // 67: tfplugin6.ImportResourceState.Request + (*ImportResourceState_ImportedResource)(nil), // 68: tfplugin6.ImportResourceState.ImportedResource + (*ImportResourceState_Response)(nil), // 69: tfplugin6.ImportResourceState.Response + (*MoveResourceState_Request)(nil), // 70: tfplugin6.MoveResourceState.Request + (*MoveResourceState_Response)(nil), // 71: tfplugin6.MoveResourceState.Response + (*ReadDataSource_Request)(nil), // 72: tfplugin6.ReadDataSource.Request + (*ReadDataSource_Response)(nil), // 73: tfplugin6.ReadDataSource.Response + (*GetFunctions_Request)(nil), // 74: tfplugin6.GetFunctions.Request + (*GetFunctions_Response)(nil), // 75: tfplugin6.GetFunctions.Response + nil, // 76: tfplugin6.GetFunctions.Response.FunctionsEntry + (*CallFunction_Request)(nil), // 77: tfplugin6.CallFunction.Request + (*CallFunction_Response)(nil), // 78: tfplugin6.CallFunction.Response } var file_tfplugin6_proto_depIdxs = []int32{ - 1, // 0: tfplugin6.Diagnostic.severity:type_name -> tfplugin6.Diagnostic.Severity - 7, // 1: tfplugin6.Diagnostic.attribute:type_name -> tfplugin6.AttributePath - 28, // 2: tfplugin6.AttributePath.steps:type_name -> tfplugin6.AttributePath.Step - 31, // 3: tfplugin6.RawState.flatmap:type_name -> tfplugin6.RawState.FlatmapEntry - 32, // 4: tfplugin6.Schema.block:type_name -> tfplugin6.Schema.Block - 36, // 5: tfplugin6.Function.parameters:type_name -> tfplugin6.Function.Parameter - 36, // 6: tfplugin6.Function.variadic_parameter:type_name -> tfplugin6.Function.Parameter - 37, // 7: tfplugin6.Function.return:type_name -> tfplugin6.Function.Return - 0, // 8: tfplugin6.Function.description_kind:type_name -> tfplugin6.StringKind - 33, // 9: tfplugin6.Schema.Block.attributes:type_name -> tfplugin6.Schema.Attribute - 34, // 10: tfplugin6.Schema.Block.block_types:type_name -> tfplugin6.Schema.NestedBlock - 0, // 11: tfplugin6.Schema.Block.description_kind:type_name -> tfplugin6.StringKind - 35, // 12: tfplugin6.Schema.Attribute.nested_type:type_name -> tfplugin6.Schema.Object - 0, // 13: tfplugin6.Schema.Attribute.description_kind:type_name -> tfplugin6.StringKind - 32, // 14: tfplugin6.Schema.NestedBlock.block:type_name -> tfplugin6.Schema.Block - 2, // 15: tfplugin6.Schema.NestedBlock.nesting:type_name -> tfplugin6.Schema.NestedBlock.NestingMode - 33, // 16: tfplugin6.Schema.Object.attributes:type_name -> tfplugin6.Schema.Attribute - 3, // 17: tfplugin6.Schema.Object.nesting:type_name -> tfplugin6.Schema.Object.NestingMode - 0, // 18: tfplugin6.Function.Parameter.description_kind:type_name -> tfplugin6.StringKind - 12, // 19: tfplugin6.GetMetadata.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities - 5, // 20: tfplugin6.GetMetadata.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 41, // 21: tfplugin6.GetMetadata.Response.data_sources:type_name -> tfplugin6.GetMetadata.DataSourceMetadata - 42, // 22: tfplugin6.GetMetadata.Response.resources:type_name -> tfplugin6.GetMetadata.ResourceMetadata - 40, // 23: tfplugin6.GetMetadata.Response.functions:type_name -> tfplugin6.GetMetadata.FunctionMetadata - 10, // 24: tfplugin6.GetProviderSchema.Response.provider:type_name -> tfplugin6.Schema - 45, // 25: tfplugin6.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry - 46, // 26: tfplugin6.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry - 5, // 27: tfplugin6.GetProviderSchema.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 10, // 28: tfplugin6.GetProviderSchema.Response.provider_meta:type_name -> tfplugin6.Schema - 12, // 29: tfplugin6.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities - 47, // 30: tfplugin6.GetProviderSchema.Response.functions:type_name -> tfplugin6.GetProviderSchema.Response.FunctionsEntry - 10, // 31: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin6.Schema - 10, // 32: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin6.Schema - 11, // 33: tfplugin6.GetProviderSchema.Response.FunctionsEntry.value:type_name -> tfplugin6.Function - 4, // 34: tfplugin6.ValidateProviderConfig.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 35: tfplugin6.ValidateProviderConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 9, // 36: tfplugin6.UpgradeResourceState.Request.raw_state:type_name -> tfplugin6.RawState - 4, // 37: tfplugin6.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin6.DynamicValue - 5, // 38: tfplugin6.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 39: tfplugin6.ValidateResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 40: tfplugin6.ValidateResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 41: tfplugin6.ValidateDataResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 42: tfplugin6.ValidateDataResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 43: tfplugin6.ConfigureProvider.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 44: tfplugin6.ConfigureProvider.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 45: tfplugin6.ReadResource.Request.current_state:type_name -> tfplugin6.DynamicValue - 4, // 46: tfplugin6.ReadResource.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 47: tfplugin6.ReadResource.Response.new_state:type_name -> tfplugin6.DynamicValue - 5, // 48: tfplugin6.ReadResource.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 49: tfplugin6.PlanResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue - 4, // 50: tfplugin6.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin6.DynamicValue - 4, // 51: tfplugin6.PlanResourceChange.Request.config:type_name -> tfplugin6.DynamicValue - 4, // 52: tfplugin6.PlanResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 53: tfplugin6.PlanResourceChange.Response.planned_state:type_name -> tfplugin6.DynamicValue - 7, // 54: tfplugin6.PlanResourceChange.Response.requires_replace:type_name -> tfplugin6.AttributePath - 5, // 55: tfplugin6.PlanResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 56: tfplugin6.ApplyResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue - 4, // 57: tfplugin6.ApplyResourceChange.Request.planned_state:type_name -> tfplugin6.DynamicValue - 4, // 58: tfplugin6.ApplyResourceChange.Request.config:type_name -> tfplugin6.DynamicValue - 4, // 59: tfplugin6.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 60: tfplugin6.ApplyResourceChange.Response.new_state:type_name -> tfplugin6.DynamicValue - 5, // 61: tfplugin6.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 62: tfplugin6.ImportResourceState.ImportedResource.state:type_name -> tfplugin6.DynamicValue - 65, // 63: tfplugin6.ImportResourceState.Response.imported_resources:type_name -> tfplugin6.ImportResourceState.ImportedResource - 5, // 64: tfplugin6.ImportResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 9, // 65: tfplugin6.MoveResourceState.Request.source_state:type_name -> tfplugin6.RawState - 4, // 66: tfplugin6.MoveResourceState.Response.target_state:type_name -> tfplugin6.DynamicValue - 5, // 67: tfplugin6.MoveResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 68: tfplugin6.ReadDataSource.Request.config:type_name -> tfplugin6.DynamicValue - 4, // 69: tfplugin6.ReadDataSource.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 70: tfplugin6.ReadDataSource.Response.state:type_name -> tfplugin6.DynamicValue - 5, // 71: tfplugin6.ReadDataSource.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 73, // 72: tfplugin6.GetFunctions.Response.functions:type_name -> tfplugin6.GetFunctions.Response.FunctionsEntry - 5, // 73: tfplugin6.GetFunctions.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 11, // 74: tfplugin6.GetFunctions.Response.FunctionsEntry.value:type_name -> tfplugin6.Function - 4, // 75: tfplugin6.CallFunction.Request.arguments:type_name -> tfplugin6.DynamicValue - 4, // 76: tfplugin6.CallFunction.Response.result:type_name -> tfplugin6.DynamicValue - 6, // 77: tfplugin6.CallFunction.Response.error:type_name -> tfplugin6.FunctionError - 38, // 78: tfplugin6.Provider.GetMetadata:input_type -> tfplugin6.GetMetadata.Request - 43, // 79: tfplugin6.Provider.GetProviderSchema:input_type -> tfplugin6.GetProviderSchema.Request - 48, // 80: tfplugin6.Provider.ValidateProviderConfig:input_type -> tfplugin6.ValidateProviderConfig.Request - 52, // 81: tfplugin6.Provider.ValidateResourceConfig:input_type -> tfplugin6.ValidateResourceConfig.Request - 54, // 82: tfplugin6.Provider.ValidateDataResourceConfig:input_type -> tfplugin6.ValidateDataResourceConfig.Request - 50, // 83: tfplugin6.Provider.UpgradeResourceState:input_type -> tfplugin6.UpgradeResourceState.Request - 56, // 84: tfplugin6.Provider.ConfigureProvider:input_type -> tfplugin6.ConfigureProvider.Request - 58, // 85: tfplugin6.Provider.ReadResource:input_type -> tfplugin6.ReadResource.Request - 60, // 86: tfplugin6.Provider.PlanResourceChange:input_type -> tfplugin6.PlanResourceChange.Request - 62, // 87: tfplugin6.Provider.ApplyResourceChange:input_type -> tfplugin6.ApplyResourceChange.Request - 64, // 88: tfplugin6.Provider.ImportResourceState:input_type -> tfplugin6.ImportResourceState.Request - 67, // 89: tfplugin6.Provider.MoveResourceState:input_type -> tfplugin6.MoveResourceState.Request - 69, // 90: tfplugin6.Provider.ReadDataSource:input_type -> tfplugin6.ReadDataSource.Request - 71, // 91: tfplugin6.Provider.GetFunctions:input_type -> tfplugin6.GetFunctions.Request - 74, // 92: tfplugin6.Provider.CallFunction:input_type -> tfplugin6.CallFunction.Request - 29, // 93: tfplugin6.Provider.StopProvider:input_type -> tfplugin6.StopProvider.Request - 39, // 94: tfplugin6.Provider.GetMetadata:output_type -> tfplugin6.GetMetadata.Response - 44, // 95: tfplugin6.Provider.GetProviderSchema:output_type -> tfplugin6.GetProviderSchema.Response - 49, // 96: tfplugin6.Provider.ValidateProviderConfig:output_type -> tfplugin6.ValidateProviderConfig.Response - 53, // 97: tfplugin6.Provider.ValidateResourceConfig:output_type -> tfplugin6.ValidateResourceConfig.Response - 55, // 98: tfplugin6.Provider.ValidateDataResourceConfig:output_type -> tfplugin6.ValidateDataResourceConfig.Response - 51, // 99: tfplugin6.Provider.UpgradeResourceState:output_type -> tfplugin6.UpgradeResourceState.Response - 57, // 100: tfplugin6.Provider.ConfigureProvider:output_type -> tfplugin6.ConfigureProvider.Response - 59, // 101: tfplugin6.Provider.ReadResource:output_type -> tfplugin6.ReadResource.Response - 61, // 102: tfplugin6.Provider.PlanResourceChange:output_type -> tfplugin6.PlanResourceChange.Response - 63, // 103: tfplugin6.Provider.ApplyResourceChange:output_type -> tfplugin6.ApplyResourceChange.Response - 66, // 104: tfplugin6.Provider.ImportResourceState:output_type -> tfplugin6.ImportResourceState.Response - 68, // 105: tfplugin6.Provider.MoveResourceState:output_type -> tfplugin6.MoveResourceState.Response - 70, // 106: tfplugin6.Provider.ReadDataSource:output_type -> tfplugin6.ReadDataSource.Response - 72, // 107: tfplugin6.Provider.GetFunctions:output_type -> tfplugin6.GetFunctions.Response - 75, // 108: tfplugin6.Provider.CallFunction:output_type -> tfplugin6.CallFunction.Response - 30, // 109: tfplugin6.Provider.StopProvider:output_type -> tfplugin6.StopProvider.Response - 94, // [94:110] is the sub-list for method output_type - 78, // [78:94] is the sub-list for method input_type - 78, // [78:78] is the sub-list for extension type_name - 78, // [78:78] is the sub-list for extension extendee - 0, // [0:78] is the sub-list for field type_name + 1, // 0: tfplugin6.Diagnostic.severity:type_name -> tfplugin6.Diagnostic.Severity + 8, // 1: tfplugin6.Diagnostic.attribute:type_name -> tfplugin6.AttributePath + 31, // 2: tfplugin6.AttributePath.steps:type_name -> tfplugin6.AttributePath.Step + 34, // 3: tfplugin6.RawState.flatmap:type_name -> tfplugin6.RawState.FlatmapEntry + 35, // 4: tfplugin6.Schema.block:type_name -> tfplugin6.Schema.Block + 39, // 5: tfplugin6.Function.parameters:type_name -> tfplugin6.Function.Parameter + 39, // 6: tfplugin6.Function.variadic_parameter:type_name -> tfplugin6.Function.Parameter + 40, // 7: tfplugin6.Function.return:type_name -> tfplugin6.Function.Return + 0, // 8: tfplugin6.Function.description_kind:type_name -> tfplugin6.StringKind + 4, // 9: tfplugin6.Deferred.reason:type_name -> tfplugin6.Deferred.Reason + 36, // 10: tfplugin6.Schema.Block.attributes:type_name -> tfplugin6.Schema.Attribute + 37, // 11: tfplugin6.Schema.Block.block_types:type_name -> tfplugin6.Schema.NestedBlock + 0, // 12: tfplugin6.Schema.Block.description_kind:type_name -> tfplugin6.StringKind + 38, // 13: tfplugin6.Schema.Attribute.nested_type:type_name -> tfplugin6.Schema.Object + 0, // 14: tfplugin6.Schema.Attribute.description_kind:type_name -> tfplugin6.StringKind + 35, // 15: tfplugin6.Schema.NestedBlock.block:type_name -> tfplugin6.Schema.Block + 2, // 16: tfplugin6.Schema.NestedBlock.nesting:type_name -> tfplugin6.Schema.NestedBlock.NestingMode + 36, // 17: tfplugin6.Schema.Object.attributes:type_name -> tfplugin6.Schema.Attribute + 3, // 18: tfplugin6.Schema.Object.nesting:type_name -> tfplugin6.Schema.Object.NestingMode + 0, // 19: tfplugin6.Function.Parameter.description_kind:type_name -> tfplugin6.StringKind + 13, // 20: tfplugin6.GetMetadata.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities + 6, // 21: tfplugin6.GetMetadata.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 44, // 22: tfplugin6.GetMetadata.Response.data_sources:type_name -> tfplugin6.GetMetadata.DataSourceMetadata + 45, // 23: tfplugin6.GetMetadata.Response.resources:type_name -> tfplugin6.GetMetadata.ResourceMetadata + 43, // 24: tfplugin6.GetMetadata.Response.functions:type_name -> tfplugin6.GetMetadata.FunctionMetadata + 11, // 25: tfplugin6.GetProviderSchema.Response.provider:type_name -> tfplugin6.Schema + 48, // 26: tfplugin6.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry + 49, // 27: tfplugin6.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry + 6, // 28: tfplugin6.GetProviderSchema.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 11, // 29: tfplugin6.GetProviderSchema.Response.provider_meta:type_name -> tfplugin6.Schema + 13, // 30: tfplugin6.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities + 50, // 31: tfplugin6.GetProviderSchema.Response.functions:type_name -> tfplugin6.GetProviderSchema.Response.FunctionsEntry + 11, // 32: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin6.Schema + 11, // 33: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin6.Schema + 12, // 34: tfplugin6.GetProviderSchema.Response.FunctionsEntry.value:type_name -> tfplugin6.Function + 5, // 35: tfplugin6.ValidateProviderConfig.Request.config:type_name -> tfplugin6.DynamicValue + 6, // 36: tfplugin6.ValidateProviderConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 10, // 37: tfplugin6.UpgradeResourceState.Request.raw_state:type_name -> tfplugin6.RawState + 5, // 38: tfplugin6.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin6.DynamicValue + 6, // 39: tfplugin6.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 5, // 40: tfplugin6.ValidateResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue + 6, // 41: tfplugin6.ValidateResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 5, // 42: tfplugin6.ValidateDataResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue + 6, // 43: tfplugin6.ValidateDataResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 5, // 44: tfplugin6.ConfigureProvider.Request.config:type_name -> tfplugin6.DynamicValue + 14, // 45: tfplugin6.ConfigureProvider.Request.client_capabilities:type_name -> tfplugin6.ClientCapabilities + 6, // 46: tfplugin6.ConfigureProvider.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 5, // 47: tfplugin6.ReadResource.Request.current_state:type_name -> tfplugin6.DynamicValue + 5, // 48: tfplugin6.ReadResource.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 14, // 49: tfplugin6.ReadResource.Request.client_capabilities:type_name -> tfplugin6.ClientCapabilities + 5, // 50: tfplugin6.ReadResource.Response.new_state:type_name -> tfplugin6.DynamicValue + 6, // 51: tfplugin6.ReadResource.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 15, // 52: tfplugin6.ReadResource.Response.deferred:type_name -> tfplugin6.Deferred + 5, // 53: tfplugin6.PlanResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue + 5, // 54: tfplugin6.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin6.DynamicValue + 5, // 55: tfplugin6.PlanResourceChange.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 56: tfplugin6.PlanResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 14, // 57: tfplugin6.PlanResourceChange.Request.client_capabilities:type_name -> tfplugin6.ClientCapabilities + 5, // 58: tfplugin6.PlanResourceChange.Response.planned_state:type_name -> tfplugin6.DynamicValue + 8, // 59: tfplugin6.PlanResourceChange.Response.requires_replace:type_name -> tfplugin6.AttributePath + 6, // 60: tfplugin6.PlanResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 15, // 61: tfplugin6.PlanResourceChange.Response.deferred:type_name -> tfplugin6.Deferred + 5, // 62: tfplugin6.ApplyResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue + 5, // 63: tfplugin6.ApplyResourceChange.Request.planned_state:type_name -> tfplugin6.DynamicValue + 5, // 64: tfplugin6.ApplyResourceChange.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 65: tfplugin6.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 5, // 66: tfplugin6.ApplyResourceChange.Response.new_state:type_name -> tfplugin6.DynamicValue + 6, // 67: tfplugin6.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 14, // 68: tfplugin6.ImportResourceState.Request.client_capabilities:type_name -> tfplugin6.ClientCapabilities + 5, // 69: tfplugin6.ImportResourceState.ImportedResource.state:type_name -> tfplugin6.DynamicValue + 68, // 70: tfplugin6.ImportResourceState.Response.imported_resources:type_name -> tfplugin6.ImportResourceState.ImportedResource + 6, // 71: tfplugin6.ImportResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 15, // 72: tfplugin6.ImportResourceState.Response.deferred:type_name -> tfplugin6.Deferred + 10, // 73: tfplugin6.MoveResourceState.Request.source_state:type_name -> tfplugin6.RawState + 5, // 74: tfplugin6.MoveResourceState.Response.target_state:type_name -> tfplugin6.DynamicValue + 6, // 75: tfplugin6.MoveResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 5, // 76: tfplugin6.ReadDataSource.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 77: tfplugin6.ReadDataSource.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 14, // 78: tfplugin6.ReadDataSource.Request.client_capabilities:type_name -> tfplugin6.ClientCapabilities + 5, // 79: tfplugin6.ReadDataSource.Response.state:type_name -> tfplugin6.DynamicValue + 6, // 80: tfplugin6.ReadDataSource.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 15, // 81: tfplugin6.ReadDataSource.Response.deferred:type_name -> tfplugin6.Deferred + 76, // 82: tfplugin6.GetFunctions.Response.functions:type_name -> tfplugin6.GetFunctions.Response.FunctionsEntry + 6, // 83: tfplugin6.GetFunctions.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 12, // 84: tfplugin6.GetFunctions.Response.FunctionsEntry.value:type_name -> tfplugin6.Function + 5, // 85: tfplugin6.CallFunction.Request.arguments:type_name -> tfplugin6.DynamicValue + 5, // 86: tfplugin6.CallFunction.Response.result:type_name -> tfplugin6.DynamicValue + 7, // 87: tfplugin6.CallFunction.Response.error:type_name -> tfplugin6.FunctionError + 41, // 88: tfplugin6.Provider.GetMetadata:input_type -> tfplugin6.GetMetadata.Request + 46, // 89: tfplugin6.Provider.GetProviderSchema:input_type -> tfplugin6.GetProviderSchema.Request + 51, // 90: tfplugin6.Provider.ValidateProviderConfig:input_type -> tfplugin6.ValidateProviderConfig.Request + 55, // 91: tfplugin6.Provider.ValidateResourceConfig:input_type -> tfplugin6.ValidateResourceConfig.Request + 57, // 92: tfplugin6.Provider.ValidateDataResourceConfig:input_type -> tfplugin6.ValidateDataResourceConfig.Request + 53, // 93: tfplugin6.Provider.UpgradeResourceState:input_type -> tfplugin6.UpgradeResourceState.Request + 59, // 94: tfplugin6.Provider.ConfigureProvider:input_type -> tfplugin6.ConfigureProvider.Request + 61, // 95: tfplugin6.Provider.ReadResource:input_type -> tfplugin6.ReadResource.Request + 63, // 96: tfplugin6.Provider.PlanResourceChange:input_type -> tfplugin6.PlanResourceChange.Request + 65, // 97: tfplugin6.Provider.ApplyResourceChange:input_type -> tfplugin6.ApplyResourceChange.Request + 67, // 98: tfplugin6.Provider.ImportResourceState:input_type -> tfplugin6.ImportResourceState.Request + 70, // 99: tfplugin6.Provider.MoveResourceState:input_type -> tfplugin6.MoveResourceState.Request + 72, // 100: tfplugin6.Provider.ReadDataSource:input_type -> tfplugin6.ReadDataSource.Request + 74, // 101: tfplugin6.Provider.GetFunctions:input_type -> tfplugin6.GetFunctions.Request + 77, // 102: tfplugin6.Provider.CallFunction:input_type -> tfplugin6.CallFunction.Request + 32, // 103: tfplugin6.Provider.StopProvider:input_type -> tfplugin6.StopProvider.Request + 42, // 104: tfplugin6.Provider.GetMetadata:output_type -> tfplugin6.GetMetadata.Response + 47, // 105: tfplugin6.Provider.GetProviderSchema:output_type -> tfplugin6.GetProviderSchema.Response + 52, // 106: tfplugin6.Provider.ValidateProviderConfig:output_type -> tfplugin6.ValidateProviderConfig.Response + 56, // 107: tfplugin6.Provider.ValidateResourceConfig:output_type -> tfplugin6.ValidateResourceConfig.Response + 58, // 108: tfplugin6.Provider.ValidateDataResourceConfig:output_type -> tfplugin6.ValidateDataResourceConfig.Response + 54, // 109: tfplugin6.Provider.UpgradeResourceState:output_type -> tfplugin6.UpgradeResourceState.Response + 60, // 110: tfplugin6.Provider.ConfigureProvider:output_type -> tfplugin6.ConfigureProvider.Response + 62, // 111: tfplugin6.Provider.ReadResource:output_type -> tfplugin6.ReadResource.Response + 64, // 112: tfplugin6.Provider.PlanResourceChange:output_type -> tfplugin6.PlanResourceChange.Response + 66, // 113: tfplugin6.Provider.ApplyResourceChange:output_type -> tfplugin6.ApplyResourceChange.Response + 69, // 114: tfplugin6.Provider.ImportResourceState:output_type -> tfplugin6.ImportResourceState.Response + 71, // 115: tfplugin6.Provider.MoveResourceState:output_type -> tfplugin6.MoveResourceState.Response + 73, // 116: tfplugin6.Provider.ReadDataSource:output_type -> tfplugin6.ReadDataSource.Response + 75, // 117: tfplugin6.Provider.GetFunctions:output_type -> tfplugin6.GetFunctions.Response + 78, // 118: tfplugin6.Provider.CallFunction:output_type -> tfplugin6.CallFunction.Response + 33, // 119: tfplugin6.Provider.StopProvider:output_type -> tfplugin6.StopProvider.Response + 104, // [104:120] is the sub-list for method output_type + 88, // [88:104] is the sub-list for method input_type + 88, // [88:88] is the sub-list for extension type_name + 88, // [88:88] is the sub-list for extension extendee + 0, // [0:88] is the sub-list for field type_name } func init() { file_tfplugin6_proto_init() } @@ -5133,7 +5438,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMetadata); i { + switch v := v.(*ClientCapabilities); i { case 0: return &v.state case 1: @@ -5145,7 +5450,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProviderSchema); i { + switch v := v.(*Deferred); i { case 0: return &v.state case 1: @@ -5157,7 +5462,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateProviderConfig); i { + switch v := v.(*GetMetadata); i { case 0: return &v.state case 1: @@ -5169,7 +5474,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpgradeResourceState); i { + switch v := v.(*GetProviderSchema); i { case 0: return &v.state case 1: @@ -5181,7 +5486,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateResourceConfig); i { + switch v := v.(*ValidateProviderConfig); i { case 0: return &v.state case 1: @@ -5193,7 +5498,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateDataResourceConfig); i { + switch v := v.(*UpgradeResourceState); i { case 0: return &v.state case 1: @@ -5205,7 +5510,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConfigureProvider); i { + switch v := v.(*ValidateResourceConfig); i { case 0: return &v.state case 1: @@ -5217,7 +5522,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResource); i { + switch v := v.(*ValidateDataResourceConfig); i { case 0: return &v.state case 1: @@ -5229,7 +5534,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlanResourceChange); i { + switch v := v.(*ConfigureProvider); i { case 0: return &v.state case 1: @@ -5241,7 +5546,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyResourceChange); i { + switch v := v.(*ReadResource); i { case 0: return &v.state case 1: @@ -5253,7 +5558,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportResourceState); i { + switch v := v.(*PlanResourceChange); i { case 0: return &v.state case 1: @@ -5265,7 +5570,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MoveResourceState); i { + switch v := v.(*ApplyResourceChange); i { case 0: return &v.state case 1: @@ -5277,7 +5582,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadDataSource); i { + switch v := v.(*ImportResourceState); i { case 0: return &v.state case 1: @@ -5289,7 +5594,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetFunctions); i { + switch v := v.(*MoveResourceState); i { case 0: return &v.state case 1: @@ -5301,7 +5606,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CallFunction); i { + switch v := v.(*ReadDataSource); i { case 0: return &v.state case 1: @@ -5313,7 +5618,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttributePath_Step); i { + switch v := v.(*GetFunctions); i { case 0: return &v.state case 1: @@ -5325,7 +5630,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopProvider_Request); i { + switch v := v.(*CallFunction); i { case 0: return &v.state case 1: @@ -5337,7 +5642,19 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopProvider_Response); i { + switch v := v.(*AttributePath_Step); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopProvider_Request); i { case 0: return &v.state case 1: @@ -5349,6 +5666,18 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopProvider_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_Block); i { case 0: return &v.state @@ -5360,7 +5689,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_Attribute); i { case 0: return &v.state @@ -5372,7 +5701,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_NestedBlock); i { case 0: return &v.state @@ -5384,7 +5713,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_Object); i { case 0: return &v.state @@ -5396,7 +5725,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Function_Parameter); i { case 0: return &v.state @@ -5408,7 +5737,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Function_Return); i { case 0: return &v.state @@ -5420,7 +5749,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_Request); i { case 0: return &v.state @@ -5432,7 +5761,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_Response); i { case 0: return &v.state @@ -5444,7 +5773,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_FunctionMetadata); i { case 0: return &v.state @@ -5456,7 +5785,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_DataSourceMetadata); i { case 0: return &v.state @@ -5468,7 +5797,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_ResourceMetadata); i { case 0: return &v.state @@ -5480,7 +5809,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProviderSchema_Request); i { case 0: return &v.state @@ -5492,7 +5821,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProviderSchema_Response); i { case 0: return &v.state @@ -5504,7 +5833,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateProviderConfig_Request); i { case 0: return &v.state @@ -5516,7 +5845,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateProviderConfig_Response); i { case 0: return &v.state @@ -5528,7 +5857,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeResourceState_Request); i { case 0: return &v.state @@ -5540,7 +5869,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeResourceState_Response); i { case 0: return &v.state @@ -5552,7 +5881,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateResourceConfig_Request); i { case 0: return &v.state @@ -5564,7 +5893,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateResourceConfig_Response); i { case 0: return &v.state @@ -5576,7 +5905,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateDataResourceConfig_Request); i { case 0: return &v.state @@ -5588,7 +5917,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateDataResourceConfig_Response); i { case 0: return &v.state @@ -5600,7 +5929,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ConfigureProvider_Request); i { case 0: return &v.state @@ -5612,7 +5941,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ConfigureProvider_Response); i { case 0: return &v.state @@ -5624,7 +5953,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadResource_Request); i { case 0: return &v.state @@ -5636,7 +5965,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadResource_Response); i { case 0: return &v.state @@ -5648,7 +5977,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlanResourceChange_Request); i { case 0: return &v.state @@ -5660,7 +5989,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlanResourceChange_Response); i { case 0: return &v.state @@ -5672,7 +6001,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ApplyResourceChange_Request); i { case 0: return &v.state @@ -5684,7 +6013,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ApplyResourceChange_Response); i { case 0: return &v.state @@ -5696,7 +6025,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_Request); i { case 0: return &v.state @@ -5708,7 +6037,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_ImportedResource); i { case 0: return &v.state @@ -5720,7 +6049,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_Response); i { case 0: return &v.state @@ -5732,7 +6061,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MoveResourceState_Request); i { case 0: return &v.state @@ -5744,7 +6073,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MoveResourceState_Response); i { case 0: return &v.state @@ -5756,7 +6085,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadDataSource_Request); i { case 0: return &v.state @@ -5768,7 +6097,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadDataSource_Response); i { case 0: return &v.state @@ -5780,7 +6109,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetFunctions_Request); i { case 0: return &v.state @@ -5792,7 +6121,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetFunctions_Response); i { case 0: return &v.state @@ -5804,7 +6133,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CallFunction_Request); i { case 0: return &v.state @@ -5816,7 +6145,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CallFunction_Response); i { case 0: return &v.state @@ -5830,7 +6159,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[2].OneofWrappers = []interface{}{} - file_tfplugin6_proto_msgTypes[24].OneofWrappers = []interface{}{ + file_tfplugin6_proto_msgTypes[26].OneofWrappers = []interface{}{ (*AttributePath_Step_AttributeName)(nil), (*AttributePath_Step_ElementKeyString)(nil), (*AttributePath_Step_ElementKeyInt)(nil), @@ -5840,8 +6169,8 @@ func file_tfplugin6_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_tfplugin6_proto_rawDesc, - NumEnums: 4, - NumMessages: 72, + NumEnums: 5, + NumMessages: 74, NumExtensions: 0, NumServices: 1, }, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto index 097abf0cca0..8504e12dccc 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 6.5 +// Terraform Plugin RPC protocol version 6.6 // -// This file defines version 6.5 of the RPC protocol. To implement a plugin +// This file defines version 6.6 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -230,6 +230,35 @@ message ServerCapabilities { bool move_resource_state = 3; } +// ClientCapabilities allows Terraform to publish information regarding +// supported protocol features. This is used to indicate availability of +// certain forward-compatible changes which may be optional in a major +// protocol version, but cannot be tested for directly. +message ClientCapabilities { + // The deferral_allowed capability signals that the client is able to + // handle deferred responses from the provider. + bool deferral_allowed = 1; +} + +// Deferred is a message that indicates that change is deferred for a reason. +message Deferred { + // Reason is the reason for deferring the change. + enum Reason { + // UNKNOWN is the default value, and should not be used. + UNKNOWN = 0; + // RESOURCE_CONFIG_UNKNOWN is used when the config is partially unknown and the real + // values need to be known before the change can be planned. + RESOURCE_CONFIG_UNKNOWN = 1; + // PROVIDER_CONFIG_UNKNOWN is used when parts of the provider configuration + // are unknown, e.g. the provider configuration is only known after the apply is done. + PROVIDER_CONFIG_UNKNOWN = 2; + // ABSENT_PREREQ is used when a hard dependency has not been satisfied. + ABSENT_PREREQ = 3; + } + // reason is the reason for deferring the change. + Reason reason = 1; +} + service Provider { //////// Information about what a provider supports/expects @@ -385,6 +414,7 @@ message ConfigureProvider { message Request { string terraform_version = 1; DynamicValue config = 2; + ClientCapabilities client_capabilities = 3; } message Response { repeated Diagnostic diagnostics = 1; @@ -405,11 +435,15 @@ message ReadResource { DynamicValue current_state = 2; bytes private = 3; DynamicValue provider_meta = 4; + ClientCapabilities client_capabilities = 5; } message Response { DynamicValue new_state = 1; repeated Diagnostic diagnostics = 2; bytes private = 3; + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred deferred = 4; } } @@ -421,6 +455,7 @@ message PlanResourceChange { DynamicValue config = 4; bytes prior_private = 5; DynamicValue provider_meta = 6; + ClientCapabilities client_capabilities = 7; } message Response { @@ -441,6 +476,9 @@ message PlanResourceChange { // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== // ==== DO NOT USE THIS ==== bool legacy_type_system = 5; + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred deferred = 6; } } @@ -477,6 +515,7 @@ message ImportResourceState { message Request { string type_name = 1; string id = 2; + ClientCapabilities client_capabilities = 3; } message ImportedResource { @@ -488,6 +527,9 @@ message ImportResourceState { message Response { repeated ImportedResource imported_resources = 1; repeated Diagnostic diagnostics = 2; + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred deferred = 3; } } @@ -532,10 +574,14 @@ message ReadDataSource { string type_name = 1; DynamicValue config = 2; DynamicValue provider_meta = 3; + ClientCapabilities client_capabilities = 4; } message Response { DynamicValue state = 1; repeated Diagnostic diagnostics = 2; + // deferred is set if the provider is deferring the change. If set the caller + // needs to handle the deferral. + Deferred deferred = 3; } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go index 3ae64b469f1..d1d31e196cf 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 6.5 +// Terraform Plugin RPC protocol version 6.6 // -// This file defines version 6.5 of the RPC protocol. To implement a plugin +// This file defines version 6.6 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -23,7 +23,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.1 +// - protoc v5.26.1 // source: tfplugin6.proto package tfplugin6 diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/data_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/data_source.go index 954272ab16f..33d0415ba7a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/data_source.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/data_source.go @@ -38,6 +38,7 @@ func ReadDataSource_Response(in *tfprotov6.ReadDataSourceResponse) *tfplugin6.Re resp := &tfplugin6.ReadDataSource_Response{ Diagnostics: Diagnostics(in.Diagnostics), State: DynamicValue(in.State), + Deferred: Deferred(in.Deferred), } return resp diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/deferred.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/deferred.go new file mode 100644 index 00000000000..32357aea133 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/deferred.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func Deferred(in *tfprotov6.Deferred) *tfplugin6.Deferred { + if in == nil { + return nil + } + + resp := &tfplugin6.Deferred{ + Reason: tfplugin6.Deferred_Reason(in.Reason), + } + + return resp +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go index 638504d7e64..876ba5d2660 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go @@ -54,6 +54,7 @@ func ReadResource_Response(in *tfprotov6.ReadResourceResponse) *tfplugin6.ReadRe Diagnostics: Diagnostics(in.Diagnostics), NewState: DynamicValue(in.NewState), Private: in.Private, + Deferred: Deferred(in.Deferred), } return resp @@ -70,6 +71,7 @@ func PlanResourceChange_Response(in *tfprotov6.PlanResourceChangeResponse) *tfpl PlannedPrivate: in.PlannedPrivate, PlannedState: DynamicValue(in.PlannedState), RequiresReplace: AttributePaths(in.RequiresReplace), + Deferred: Deferred(in.Deferred), } return resp @@ -98,6 +100,7 @@ func ImportResourceState_Response(in *tfprotov6.ImportResourceStateResponse) *tf resp := &tfplugin6.ImportResourceState_Response{ Diagnostics: Diagnostics(in.Diagnostics), ImportedResources: ImportResourceState_ImportedResources(in.ImportedResources), + Deferred: Deferred(in.Deferred), } return resp diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go index e1ea384de3f..a5185138f09 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go @@ -53,10 +53,7 @@ type ProviderServer interface { // are a handy interface for defining what a function is to // terraform-plugin-go, so they are their own interface that is composed // into ProviderServer. - // - // This will be required in an upcoming release. - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 - // FunctionServer + FunctionServer } // GetMetadataRequest represents a GetMetadata RPC request. @@ -211,6 +208,10 @@ type ConfigureProviderRequest struct { // known values. Values that are not set in the configuration will be // null. Config *DynamicValue + + // ClientCapabilities defines optionally supported protocol features for the + // ConfigureProvider RPC, such as forward-compatible Terraform behavior changes. + ClientCapabilities *ConfigureProviderClientCapabilities } // ConfigureProviderResponse represents a Terraform RPC response to the diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go index 9344f8db820..bf1a6e387be 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go @@ -52,14 +52,24 @@ type ResourceServer interface { // specified by the passed ID and return it as one or more resource // states for Terraform to assume control of. ImportResourceState(context.Context, *ImportResourceStateRequest) (*ImportResourceStateResponse, error) + + // MoveResourceState is called when Terraform is asked to change a resource + // type for an existing resource. The provider must accept the change as + // valid by ensuring the source resource type, schema version, and provider + // address are compatible to convert the source state into the target + // resource type and latest state version. + // + // This functionality is only supported in Terraform 1.8 and later. The + // provider must have enabled the MoveResourceState server capability to + // enable these requests. + MoveResourceState(context.Context, *MoveResourceStateRequest) (*MoveResourceStateResponse, error) } // ResourceServerWithMoveResourceState is a temporary interface for servers // to implement MoveResourceState RPC handling. // -// Deprecated: The MoveResourceState method will be moved into the -// ResourceServer interface and this interface will be removed in a future -// version. +// Deprecated: This interface will be removed in a future version. Use +// ResourceServer instead. type ResourceServerWithMoveResourceState interface { ResourceServer @@ -174,6 +184,10 @@ type ReadResourceRequest struct { // // This configuration will have known values for all fields. ProviderMeta *DynamicValue + + // ClientCapabilities defines optionally supported protocol features for the + // ReadResource RPC, such as forward-compatible Terraform behavior changes. + ClientCapabilities *ReadResourceClientCapabilities } // ReadResourceResponse is the response from the provider about the current @@ -198,6 +212,10 @@ type ReadResourceResponse struct { // with requests for this resource. This state will be associated with // the resource, but will not be considered when calculating diffs. Private []byte + + // Deferred is used to indicate to Terraform that the ReadResource operation + // needs to be deferred for a reason. + Deferred *Deferred } // PlanResourceChangeRequest is the request Terraform sends when it is @@ -264,6 +282,10 @@ type PlanResourceChangeRequest struct { // // This configuration will have known values for all fields. ProviderMeta *DynamicValue + + // ClientCapabilities defines optionally supported protocol features for the + // PlanResourceChange RPC, such as forward-compatible Terraform behavior changes. + ClientCapabilities *PlanResourceChangeClientCapabilities } // PlanResourceChangeResponse is the response from the provider about what the @@ -342,6 +364,10 @@ type PlanResourceChangeResponse struct { // // Deprecated: Really, just don't use this, you don't need it. UnsafeToUseLegacyTypeSystem bool + + // Deferred is used to indicate to Terraform that the PlanResourceChange operation + // needs to be deferred for a reason. + Deferred *Deferred } // ApplyResourceChangeRequest is the request Terraform sends when it needs to @@ -462,6 +488,10 @@ type ImportResourceStateRequest struct { // for the ID, and use it to determine what resource or resources to // import. ID string + + // ClientCapabilities defines optionally supported protocol features for the + // ImportResourceState RPC, such as forward-compatible Terraform behavior changes. + ClientCapabilities *ImportResourceStateClientCapabilities } // ImportResourceStateResponse is the response from the provider about the @@ -475,6 +505,10 @@ type ImportResourceStateResponse struct { // requested resource or resources. Returning an empty slice indicates // a successful validation with no warnings or errors generated. Diagnostics []*Diagnostic + + // Deferred is used to indicate to Terraform that the ImportResourceState operation + // needs to be deferred for a reason. + Deferred *Deferred } // ImportedResource represents a single resource that a provider has diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go index e8b5eb4dae5..cb79928c175 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go @@ -49,7 +49,7 @@ const ( // // In the future, it may be possible to include this information directly // in the protocol buffers rather than recreating a constant here. - protocolVersionMinor uint = 4 + protocolVersionMinor uint = 6 ) // protocolVersion represents the combined major and minor version numbers of @@ -551,6 +551,7 @@ func (s *server) ConfigureProvider(ctx context.Context, protoReq *tfplugin6.Conf req := fromproto.ConfigureProviderRequest(protoReq) + tf6serverlogging.ConfigureProviderClientCapabilities(ctx, req.ClientCapabilities) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) ctx = tf6serverlogging.DownstreamRequest(ctx) @@ -678,6 +679,7 @@ func (s *server) ReadDataSource(ctx context.Context, protoReq *tfplugin6.ReadDat req := fromproto.ReadDataSourceRequest(protoReq) + tf6serverlogging.ReadDataSourceClientCapabilities(ctx, req.ClientCapabilities) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) @@ -693,6 +695,11 @@ func (s *server) ReadDataSource(ctx context.Context, protoReq *tfplugin6.ReadDat tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "State", resp.State) + tf6serverlogging.Deferred(ctx, resp.Deferred) + + if resp.Deferred != nil && (req.ClientCapabilities == nil || !req.ClientCapabilities.DeferralAllowed) { + resp.Diagnostics = append(resp.Diagnostics, invalidDeferredResponseDiag(resp.Deferred.Reason)) + } protoResp := toproto.ReadDataSource_Response(resp) @@ -767,6 +774,7 @@ func (s *server) ReadResource(ctx context.Context, protoReq *tfplugin6.ReadResou req := fromproto.ReadResourceRequest(protoReq) + tf6serverlogging.ReadResourceClientCapabilities(ctx, req.ClientCapabilities) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "CurrentState", req.CurrentState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Request", "Private", req.Private) @@ -783,6 +791,11 @@ func (s *server) ReadResource(ctx context.Context, protoReq *tfplugin6.ReadResou tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response", "Private", resp.Private) + tf6serverlogging.Deferred(ctx, resp.Deferred) + + if resp.Deferred != nil && (req.ClientCapabilities == nil || !req.ClientCapabilities.DeferralAllowed) { + resp.Diagnostics = append(resp.Diagnostics, invalidDeferredResponseDiag(resp.Deferred.Reason)) + } protoResp := toproto.ReadResource_Response(resp) @@ -800,6 +813,7 @@ func (s *server) PlanResourceChange(ctx context.Context, protoReq *tfplugin6.Pla req := fromproto.PlanResourceChangeRequest(protoReq) + tf6serverlogging.PlanResourceChangeClientCapabilities(ctx, req.ClientCapabilities) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", req.PriorState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProposedNewState", req.ProposedNewState) @@ -818,6 +832,11 @@ func (s *server) PlanResourceChange(ctx context.Context, protoReq *tfplugin6.Pla tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PlannedState", resp.PlannedState) logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response", "PlannedPrivate", resp.PlannedPrivate) + tf6serverlogging.Deferred(ctx, resp.Deferred) + + if resp.Deferred != nil && (req.ClientCapabilities == nil || !req.ClientCapabilities.DeferralAllowed) { + resp.Diagnostics = append(resp.Diagnostics, invalidDeferredResponseDiag(resp.Deferred.Reason)) + } protoResp := toproto.PlanResourceChange_Response(resp) @@ -870,6 +889,8 @@ func (s *server) ImportResourceState(ctx context.Context, protoReq *tfplugin6.Im req := fromproto.ImportResourceStateRequest(protoReq) + tf6serverlogging.ImportResourceStateClientCapabilities(ctx, req.ClientCapabilities) + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ImportResourceState(ctx, req) @@ -885,6 +906,11 @@ func (s *server) ImportResourceState(ctx context.Context, protoReq *tfplugin6.Im logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "State", importedResource.State) logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "Private", importedResource.Private) } + tf6serverlogging.Deferred(ctx, resp.Deferred) + + if resp.Deferred != nil && (req.ClientCapabilities == nil || !req.ClientCapabilities.DeferralAllowed) { + resp.Diagnostics = append(resp.Diagnostics, invalidDeferredResponseDiag(resp.Deferred.Reason)) + } protoResp := toproto.ImportResourceState_Response(resp) @@ -900,37 +926,11 @@ func (s *server) MoveResourceState(ctx context.Context, protoReq *tfplugin6.Move logging.ProtocolTrace(ctx, "Received request") defer logging.ProtocolTrace(ctx, "Served request") - // Remove this check and error in preference of - // s.downstream.MoveResourceState below once ResourceServer interface - // implements the MoveResourceState method. - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/363 - // nolint:staticcheck - resourceServerWMRS, ok := s.downstream.(tfprotov6.ResourceServerWithMoveResourceState) - - if !ok { - logging.ProtocolError(ctx, "ProviderServer does not implement ResourceServerWithMoveResourceState") - - protoResp := &tfplugin6.MoveResourceState_Response{ - Diagnostics: []*tfplugin6.Diagnostic{ - { - Severity: tfplugin6.Diagnostic_ERROR, - Summary: "Provider Move Resource State Not Implemented", - Detail: "A MoveResourceState call was received by the provider, however the provider does not implement the call. " + - "Either upgrade the provider to a version that implements move resource state support or this is a bug in Terraform that should be reported to the Terraform maintainers.", - }, - }, - } - - return protoResp, nil - } - req := fromproto.MoveResourceStateRequest(protoReq) ctx = tf6serverlogging.DownstreamRequest(ctx) - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/363 - // resp, err := s.downstream.MoveResourceState(ctx, req) - resp, err := resourceServerWMRS.MoveResourceState(ctx, req) + resp, err := s.downstream.MoveResourceState(ctx, req) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) @@ -954,26 +954,6 @@ func (s *server) CallFunction(ctx context.Context, protoReq *tfplugin6.CallFunct logging.ProtocolTrace(ctx, "Received request") defer logging.ProtocolTrace(ctx, "Served request") - // Remove this check and error in preference of s.downstream.CallFunction - // below once ProviderServer interface requires FunctionServer. - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 - functionServer, ok := s.downstream.(tfprotov6.FunctionServer) - - if !ok { - logging.ProtocolError(ctx, "ProviderServer does not implement FunctionServer") - - text := "Provider Functions Not Implemented: A provider-defined function call was received by the provider, however the provider does not implement functions. " + - "Either upgrade the provider to a version that implements provider-defined functions or this is a bug in Terraform that should be reported to the Terraform maintainers." - - protoResp := &tfplugin6.CallFunction_Response{ - Error: &tfplugin6.FunctionError{ - Text: text, - }, - } - - return protoResp, nil - } - req := fromproto.CallFunctionRequest(protoReq) for position, argument := range req.Arguments { @@ -982,9 +962,7 @@ func (s *server) CallFunction(ctx context.Context, protoReq *tfplugin6.CallFunct ctx = tf6serverlogging.DownstreamRequest(ctx) - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 - // resp, err := s.downstream.CallFunction(ctx, req) - resp, err := functionServer.CallFunction(ctx, req) + resp, err := s.downstream.CallFunction(ctx, req) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) @@ -1007,28 +985,11 @@ func (s *server) GetFunctions(ctx context.Context, protoReq *tfplugin6.GetFuncti logging.ProtocolTrace(ctx, "Received request") defer logging.ProtocolTrace(ctx, "Served request") - // Remove this check and response in preference of s.downstream.GetFunctions - // below once ProviderServer interface requires FunctionServer. - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 - functionServer, ok := s.downstream.(tfprotov6.FunctionServer) - - if !ok { - logging.ProtocolWarn(ctx, "ProviderServer does not implement FunctionServer") - - protoResp := &tfplugin6.GetFunctions_Response{ - Functions: map[string]*tfplugin6.Function{}, - } - - return protoResp, nil - } - req := fromproto.GetFunctionsRequest(protoReq) ctx = tf6serverlogging.DownstreamRequest(ctx) - // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 - // resp, err := s.downstream.GetFunctions(ctx, req) - resp, err := functionServer.GetFunctions(ctx, req) + resp, err := s.downstream.GetFunctions(ctx, req) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) @@ -1041,3 +1002,13 @@ func (s *server) GetFunctions(ctx context.Context, protoReq *tfplugin6.GetFuncti return protoResp, nil } + +func invalidDeferredResponseDiag(reason tfprotov6.DeferredReason) *tfprotov6.Diagnostic { + return &tfprotov6.Diagnostic{ + Severity: tfprotov6.DiagnosticSeverityError, + Summary: "Invalid Deferred Response", + Detail: "Provider returned a deferred response but the Terraform request did not indicate support for deferred actions." + + "This is an issue with the provider and should be reported to the provider developers.\n\n" + + fmt.Sprintf("Deferred reason - %q", reason.String()), + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go index ed03ef9833d..08fb152078e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go @@ -446,7 +446,7 @@ func marshalMsgPackNumber(val Value, typ Type, p *AttributePath, enc *msgpack.En if err != nil { return p.NewErrorf("error encoding int value: %w", err) } - } else if fv, acc := n.Float64(); acc == big.Exact { + } else if fv, acc := n.Float64(); acc == big.Exact && !n.IsInt() { err := enc.EncodeFloat64(fv) if err != nil { return p.NewErrorf("error encoding float value: %w", err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/aliases.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/aliases.go deleted file mode 100644 index 275137d8129..00000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/aliases.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package resource - -import ( - "context" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -) - -// Deprecated: Use helper/id package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -const UniqueIdPrefix = id.UniqueIdPrefix - -// Helper for a resource to generate a unique identifier w/ default prefix -// -// Deprecated: Use helper/id package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -func UniqueId() string { - return id.UniqueId() -} - -// Deprecated: Use helper/id package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -const UniqueIDSuffixLength = id.UniqueIDSuffixLength - -// Helper for a resource to generate a unique identifier w/ given prefix -// -// After the prefix, the ID consists of an incrementing 26 digit value (to match -// previous timestamp output). After the prefix, the ID consists of a timestamp -// and an incrementing 8 hex digit value The timestamp means that multiple IDs -// created with the same prefix will sort in the order of their creation, even -// across multiple terraform executions, as long as the clock is not turned back -// between calls, and as long as any given terraform execution generates fewer -// than 4 billion IDs. -// -// Deprecated: Use helper/id package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -func PrefixedUniqueId(prefix string) string { - return id.PrefixedUniqueId(prefix) -} - -// Deprecated: Use helper/retry package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -type NotFoundError = retry.NotFoundError - -// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending -// -// Deprecated: Use helper/retry package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -type UnexpectedStateError = retry.UnexpectedStateError - -// TimeoutError is returned when WaitForState times out -// -// Deprecated: Use helper/retry package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -type TimeoutError = retry.TimeoutError - -// StateRefreshFunc is a function type used for StateChangeConf that is -// responsible for refreshing the item being watched for a state change. -// -// It returns three results. `result` is any object that will be returned -// as the final object after waiting for state change. This allows you to -// return the final updated object, for example an EC2 instance after refreshing -// it. A nil result represents not found. -// -// `state` is the latest state of that object. And `err` is any error that -// may have happened while refreshing the state. -// -// Deprecated: Use helper/retry package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -type StateRefreshFunc = retry.StateRefreshFunc - -// StateChangeConf is the configuration struct used for `WaitForState`. -// -// Deprecated: Use helper/retry package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -type StateChangeConf = retry.StateChangeConf - -// RetryFunc is the function retried until it succeeds. -// -// Deprecated: Use helper/retry package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -type RetryFunc = retry.RetryFunc - -// RetryContext is a basic wrapper around StateChangeConf that will just retry -// a function until it no longer returns an error. -// -// Cancellation from the passed in context will propagate through to the -// underlying StateChangeConf -// -// Deprecated: Use helper/retry package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -func RetryContext(ctx context.Context, timeout time.Duration, f RetryFunc) error { - return retry.RetryContext(ctx, timeout, f) -} - -// Retry is a basic wrapper around StateChangeConf that will just retry -// a function until it no longer returns an error. -// -// Deprecated: Use helper/retry package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -func Retry(timeout time.Duration, f RetryFunc) error { - return retry.Retry(timeout, f) -} - -// RetryError is the required return type of RetryFunc. It forces client code -// to choose whether or not a given error is retryable. -// -// Deprecated: Use helper/retry package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -type RetryError = retry.RetryError - -// RetryableError is a helper to create a RetryError that's retryable from a -// given error. To prevent logic errors, will return an error when passed a -// nil error. -// -// Deprecated: Use helper/retry package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -func RetryableError(err error) *RetryError { - r := retry.RetryableError(err) - - return &RetryError{ - Err: r.Err, - Retryable: r.Retryable, - } -} - -// NonRetryableError is a helper to create a RetryError that's _not_ retryable -// from a given error. To prevent logic errors, will return an error when -// passed a nil error. -// -// Deprecated: Use helper/retry package instead. This is required for migrating acceptance -// testing to terraform-plugin-testing. -func NonRetryableError(err error) *RetryError { - r := retry.NonRetryableError(err) - - return &RetryError{ - Err: r.Err, - Retryable: r.Retryable, - } -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go deleted file mode 100644 index 7dbf883b504..00000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package resource - -import ( - "context" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" -) - -// testStepValidateRequest contains data for the (TestStep).validate() method. -type testStepValidateRequest struct { - // StepNumber is the index of the TestStep in the TestCase.Steps. - StepNumber int - - // TestCaseHasProviders is enabled if the TestCase has set any of - // ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, - // or ProviderFactories. - TestCaseHasProviders bool -} - -// hasProviders returns true if the TestStep has set any of the -// ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, or -// ProviderFactories fields. -func (s TestStep) hasProviders(_ context.Context) bool { - if len(s.ExternalProviders) > 0 { - return true - } - - if len(s.ProtoV5ProviderFactories) > 0 { - return true - } - - if len(s.ProtoV6ProviderFactories) > 0 { - return true - } - - if len(s.ProviderFactories) > 0 { - return true - } - - return false -} - -// validate ensures the TestStep is valid based on the following criteria: -// -// - Config or ImportState or RefreshState is set. -// - Config and RefreshState are not both set. -// - RefreshState and Destroy are not both set. -// - RefreshState is not the first TestStep. -// - Providers are not specified (ExternalProviders, -// ProtoV5ProviderFactories, ProtoV6ProviderFactories, ProviderFactories) -// if specified at the TestCase level. -// - Providers are specified (ExternalProviders, ProtoV5ProviderFactories, -// ProtoV6ProviderFactories, ProviderFactories) if not specified at the -// TestCase level. -// - No overlapping ExternalProviders and ProviderFactories entries -// - ResourceName is not empty when ImportState is true, ImportStateIdFunc -// is not set, and ImportStateId is not set. -func (s TestStep) validate(ctx context.Context, req testStepValidateRequest) error { - ctx = logging.TestStepNumberContext(ctx, req.StepNumber) - - logging.HelperResourceTrace(ctx, "Validating TestStep") - - if s.Config == "" && !s.ImportState && !s.RefreshState { - err := fmt.Errorf("TestStep missing Config or ImportState or RefreshState") - logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) - return err - } - - if s.Config != "" && s.RefreshState { - err := fmt.Errorf("TestStep cannot have Config and RefreshState") - logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) - return err - } - - if s.RefreshState && s.Destroy { - err := fmt.Errorf("TestStep cannot have RefreshState and Destroy") - logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) - return err - } - - if s.RefreshState && req.StepNumber == 1 { - err := fmt.Errorf("TestStep cannot have RefreshState as first step") - logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) - return err - } - - if s.ImportState && s.RefreshState { - err := fmt.Errorf("TestStep cannot have ImportState and RefreshState in same step") - logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) - return err - } - - for name := range s.ExternalProviders { - if _, ok := s.ProviderFactories[name]; ok { - err := fmt.Errorf("TestStep provider %q set in both ExternalProviders and ProviderFactories", name) - logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) - return err - } - } - - hasProviders := s.hasProviders(ctx) - - if req.TestCaseHasProviders && hasProviders { - err := fmt.Errorf("Providers must only be specified either at the TestCase or TestStep level") - logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) - return err - } - - if !req.TestCaseHasProviders && !hasProviders { - err := fmt.Errorf("Providers must be specified at the TestCase level or in all TestStep") - logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) - return err - } - - if s.ImportState { - if s.ImportStateId == "" && s.ImportStateIdFunc == nil && s.ResourceName == "" { - err := fmt.Errorf("TestStep ImportState must be specified with ImportStateId, ImportStateIdFunc, or ResourceName") - logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) - return err - } - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go deleted file mode 100644 index 0d4bbe52667..00000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugintest - -import ( - "fmt" - "os" - "path/filepath" -) - -func symlinkFile(src string, dest string) error { - err := os.Symlink(src, dest) - - if err != nil { - return fmt.Errorf("unable to symlink %q to %q: %w", src, dest, err) - } - - srcInfo, err := os.Stat(src) - - if err != nil { - return fmt.Errorf("unable to stat %q: %w", src, err) - } - - err = os.Chmod(dest, srcInfo.Mode()) - - if err != nil { - return fmt.Errorf("unable to set %q permissions: %w", dest, err) - } - - return nil -} - -// symlinkDirectoriesOnly finds only the first-level child directories in srcDir -// and symlinks them into destDir. -// Unlike symlinkDir, this is done non-recursively in order to limit the number -// of file descriptors used. -func symlinkDirectoriesOnly(srcDir string, destDir string) error { - srcInfo, err := os.Stat(srcDir) - if err != nil { - return fmt.Errorf("unable to stat source directory %q: %w", srcDir, err) - } - - err = os.MkdirAll(destDir, srcInfo.Mode()) - if err != nil { - return fmt.Errorf("unable to make destination directory %q: %w", destDir, err) - } - - dirEntries, err := os.ReadDir(srcDir) - - if err != nil { - return fmt.Errorf("unable to read source directory %q: %w", srcDir, err) - } - - for _, dirEntry := range dirEntries { - if !dirEntry.IsDir() { - continue - } - - srcPath := filepath.Join(srcDir, dirEntry.Name()) - destPath := filepath.Join(destDir, dirEntry.Name()) - err := symlinkFile(srcPath, destPath) - - if err != nil { - return fmt.Errorf("unable to symlink directory %q to %q: %w", srcPath, destPath, err) - } - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/LICENSE b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/LICENSE new file mode 100644 index 00000000000..07c599410bf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/LICENSE @@ -0,0 +1,375 @@ +Copyright (c) 2014 HashiCorp, Inc. + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/config.go new file mode 100644 index 00000000000..4a663610fd9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/config.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +// TestStepConfigFunc is the callback type used with acceptance tests to +// specify a string which either identifies a directory containing +// Terraform configuration files, or a file that contains Terraform +// configuration. +type TestStepConfigFunc func(TestStepConfigRequest) string + +// TestStepConfigRequest defines the request supplied to types +// implementing TestStepConfigFunc. StepNumber is one-based +// and is used in the predefined helper functions: +// +// - [config.TestStepDirectory] +// - [config.TestStepFile]. +// +// TestName is used in the predefined helper functions: +// +// - [config.TestNameDirectory] +// - [config.TestStepDirectory] +// - [config.TestNameFile] +// - [config.TestStepFile] +type TestStepConfigRequest struct { + StepNumber int + TestName string +} + +// Exec executes TestStepConfigFunc if it is not nil, otherwise an +// empty string is returned. +func (f TestStepConfigFunc) Exec(req TestStepConfigRequest) string { + if f != nil { + return f(req) + } + + return "" +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/directory.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/directory.go new file mode 100644 index 00000000000..c3c9ab0c0f4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/directory.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "path/filepath" + "strconv" +) + +// StaticDirectory returns the supplied directory. +func StaticDirectory(directory string) func(TestStepConfigRequest) string { + return func(_ TestStepConfigRequest) string { + return directory + } +} + +// TestNameDirectory returns the name of the test prefixed with +// "testdata". +// +// For example, given test code: +// +// func TestExampleCloudThing_basic(t *testing.T) { +// resource.Test(t, resource.TestCase{ +// Steps: []resource.TestStep{ +// { +// ConfigDirectory: config.TestNameDirectory(), +// }, +// }, +// }) +// } +// +// The testing configurations will be expected in the +// testdata/TestExampleCloudThing_basic/ directory. +func TestNameDirectory() func(TestStepConfigRequest) string { + return func(req TestStepConfigRequest) string { + return filepath.Join("testdata", req.TestName) + } +} + +// TestStepDirectory returns the name of the test suffixed with the +// test step number and prefixed with "testdata". +// +// For example, given test code: +// +// func TestExampleCloudThing_basic(t *testing.T) { +// resource.Test(t, resource.TestCase{ +// Steps: []resource.TestStep{ +// { +// ConfigDirectory: config.TestStepDirectory(), +// }, +// }, +// }) +// } +// +// The testing configurations will be expected in the +// testdata/TestExampleCloudThing_basic/1 directory as +// TestStepConfigRequest.StepNumber is one-based. +func TestStepDirectory() func(TestStepConfigRequest) string { + return func(req TestStepConfigRequest) string { + return filepath.Join("testdata", req.TestName, strconv.Itoa(req.StepNumber)) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/doc.go new file mode 100644 index 00000000000..e85d5f81c90 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package config implements functionality for supporting native +// Terraform configuration and variables for testing purposes. +package config diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/file.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/file.go new file mode 100644 index 00000000000..1974c40651d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/file.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "path/filepath" + "strconv" +) + +// StaticFile returns the supplied file. +func StaticFile(file string) func(TestStepConfigRequest) string { + return func(_ TestStepConfigRequest) string { + return file + } +} + +// TestNameFile returns the name of the test suffixed with the supplied +// file and prefixed with "testdata". +// +// For example, given test code: +// +// func TestExampleCloudThing_basic(t *testing.T) { +// resource.Test(t, resource.TestCase{ +// Steps: []resource.TestStep{ +// { +// ConfigFile: config.TestNameFile("test.tf"), +// }, +// }, +// }) +// } +// +// The testing configuration will be expected in the +// testdata/TestExampleCloudThing_basic/test.tf file. +func TestNameFile(file string) func(TestStepConfigRequest) string { + return func(req TestStepConfigRequest) string { + return filepath.Join("testdata", req.TestName, file) + } +} + +// TestStepFile returns the name of the test suffixed with the test +// step number and the supplied file, and prefixed with "testdata". +// +// For example, given test code: +// +// func TestExampleCloudThing_basic(t *testing.T) { +// resource.Test(t, resource.TestCase{ +// Steps: []resource.TestStep{ +// { +// ConfigFile: config.TestStepFile("test.tf"), +// }, +// }, +// }) +// } +// +// The testing configuration will be expected in the +// testdata/TestExampleCloudThing_basic/1/test.tf file +// as TestStepConfigRequest.StepNumber is one-based. +func TestStepFile(file string) func(TestStepConfigRequest) string { + return func(req TestStepConfigRequest) string { + return filepath.Join("testdata", req.TestName, strconv.Itoa(req.StepNumber), file) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/variable.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/variable.go new file mode 100644 index 00000000000..76c2a511072 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/config/variable.go @@ -0,0 +1,326 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + + "golang.org/x/exp/constraints" +) + +const autoTFVarsJson = "terraform-plugin-testing.auto.tfvars.json" + +// Variable interface is an alias to json.Marshaler. +type Variable interface { + json.Marshaler +} + +// Variables is a type holding a key-value map of variable names +// to types implementing the Variable interface. +type Variables map[string]Variable + +// Write creates a file in the destination supplied +// containing JSON encoded Variables. +func (v Variables) Write(dest string) error { + if len(v) == 0 { + return nil + } + + b, err := json.Marshal(v) + + if err != nil { + return fmt.Errorf("cannot marshal variables: %s", err) + } + + outFilename := filepath.Join(dest, autoTFVarsJson) + + err = os.WriteFile(outFilename, b, 0600) + + if err != nil { + return fmt.Errorf("cannot write variables file: %s", err) + } + + return nil +} + +var _ Variable = boolVariable{} + +// boolVariable supports JSON encoding of a bool. +type boolVariable struct { + value bool +} + +// MarshalJSON returns the JSON encoding of boolVariable. +func (v boolVariable) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +// BoolVariable returns boolVariable which implements Variable. +func BoolVariable(value bool) boolVariable { + return boolVariable{ + value: value, + } +} + +var _ Variable = floatVariable{} + +// floatVariable supports JSON encoding of any floating-point type. +type floatVariable struct { + value any +} + +// MarshalJSON returns the JSON encoding of floatVariable. +func (v floatVariable) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +// FloatVariable returns floatVariable which implements Variable. +func FloatVariable[T constraints.Float](value T) floatVariable { + return floatVariable{ + value: value, + } +} + +var _ Variable = integerVariable{} + +// integerVariable supports JSON encoding of any integer type. +type integerVariable struct { + value any +} + +// MarshalJSON returns the JSON encoding of integerVariable. +func (v integerVariable) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +// IntegerVariable returns integerVariable which implements Variable. +func IntegerVariable[T constraints.Integer](value T) integerVariable { + return integerVariable{ + value: value, + } +} + +var _ Variable = listVariable{} + +// listVariable supports JSON encoding of slice of Variable. +type listVariable struct { + value []Variable +} + +// MarshalJSON returns the JSON encoding of listVariable. +// Every Variable within a listVariable must be the same +// underlying type. +func (v listVariable) MarshalJSON() ([]byte, error) { + if !typesEq(v.value) { + return nil, errors.New("lists must contain the same type") + } + + return json.Marshal(v.value) +} + +// ListVariable returns listVariable which implements Variable. +func ListVariable(value ...Variable) listVariable { + return listVariable{ + value: value, + } +} + +var _ Variable = mapVariable{} + +// mapVariable supports JSON encoding of a key-value map of +// string to Variable. +type mapVariable struct { + value map[string]Variable +} + +// MarshalJSON returns the JSON encoding of mapVariable. +// Every Variable in a mapVariable must be the same +// underlying type. +func (v mapVariable) MarshalJSON() ([]byte, error) { + var variables []Variable + + for _, variable := range v.value { + variables = append(variables, variable) + } + + if !typesEq(variables) { + return nil, errors.New("maps must contain the same type") + } + + return json.Marshal(v.value) +} + +// MapVariable returns mapVariable which implements Variable. +func MapVariable(value map[string]Variable) mapVariable { + return mapVariable{ + value: value, + } +} + +var _ Variable = objectVariable{} + +// objectVariable supports JSON encoding of a key-value +// map of string to Variable in which each Variable +// can be a different underlying type. +type objectVariable struct { + value map[string]Variable +} + +// MarshalJSON returns the JSON encoding of objectVariable. +func (v objectVariable) MarshalJSON() ([]byte, error) { + b, err := json.Marshal(v.value) + + if err != nil { + innerErr := err + + // Unwrap is used here to expose the initial error, for example + // "maps must contain the same type" whilst removing any errors + // related to the implementation (i.e., the usage of + // encoding/json in this instance. + for errors.Unwrap(innerErr) != nil { + innerErr = errors.Unwrap(err) + } + + return nil, innerErr + } + + return b, nil +} + +// ObjectVariable returns objectVariable which implements Variable. +func ObjectVariable(value map[string]Variable) objectVariable { + return objectVariable{ + value: value, + } +} + +var _ Variable = setVariable{} + +// setVariable supports JSON encoding of a slice of Variable. +type setVariable struct { + value []Variable +} + +// MarshalJSON returns the JSON encoding of setVariable. +// Every Variable in a setVariable must be the same +// underlying type. +func (v setVariable) MarshalJSON() ([]byte, error) { + for kx, x := range v.value { + for ky := kx + 1; ky < len(v.value); ky++ { + y := v.value[ky] + + if _, ok := x.(setVariable); !ok { + continue + } + + if _, ok := y.(setVariable); !ok { + continue + } + + if reflect.DeepEqual(x, y) { + return nil, errors.New("sets must contain unique elements") + } + } + } + + if !typesEq(v.value) { + return nil, errors.New("sets must contain the same type") + } + + return json.Marshal(v.value) +} + +// SetVariable returns setVariable which implements Variable. +func SetVariable(value ...Variable) setVariable { + return setVariable{ + value: value, + } +} + +var _ Variable = stringVariable{} + +// stringVariable supports JSON encoding of a string. +type stringVariable struct { + value string +} + +// MarshalJSON returns the JSON encoding of stringVariable. +func (v stringVariable) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +// StringVariable returns stringVariable which implements Variable. +func StringVariable(value string) stringVariable { + return stringVariable{ + value: value, + } +} + +var _ Variable = tupleVariable{} + +// tupleVariable supports JSON encoding of a slice of Variable +// in which each element in the slice can be a different +// underlying type. +type tupleVariable struct { + value []Variable +} + +// MarshalJSON returns the JSON encoding of tupleVariable. +func (v tupleVariable) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +// TupleVariable returns tupleVariable which implements Variable. +func TupleVariable(value ...Variable) tupleVariable { + return tupleVariable{ + value: value, + } +} + +// typesEq verifies that every element in the supplied slice of Variable +// is the same underlying type. +func typesEq(variables []Variable) bool { + var t reflect.Type + + for _, variable := range variables { + switch x := variable.(type) { + case listVariable: + if !typesEq(x.value) { + return false + } + case mapVariable: + var vars []Variable + + for _, v := range x.value { + vars = append(vars, v) + } + + if !typesEq(vars) { + return false + } + case setVariable: + if !typesEq(x.value) { + return false + } + } + + typeOfVariable := reflect.TypeOf(variable) + + if t == nil { + t = typeOfVariable + continue + } + + if t != typeOfVariable { + return false + } + } + + return true +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/environment_variables.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/environment_variables.go similarity index 100% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/environment_variables.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/environment_variables.go diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/error.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/error.go new file mode 100644 index 00000000000..3c990e35fc8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/error.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "fmt" + "strings" + "time" +) + +// NotFoundError represents when a StateRefreshFunc returns a nil result +// during a StateChangeConf waiter method and that StateChangeConf is +// configured for specific targets. +// +// Deprecated: Copy this type to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.NotFoundError. +type NotFoundError struct { + LastError error + LastRequest interface{} + LastResponse interface{} + Message string + Retries int +} + +// Error returns the Message string, if non-empty, or a string indicating +// the resource could not be found. +// +// Deprecated: Copy this method to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.NotFoundError. +func (e *NotFoundError) Error() string { + if e.Message != "" { + return e.Message + } + + if e.Retries > 0 { + return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) + } + + return "couldn't find resource" +} + +// Unwrap returns the LastError, compatible with errors.Unwrap. +// +// Deprecated: Copy this method to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.NotFoundError. +func (e *NotFoundError) Unwrap() error { + return e.LastError +} + +// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending +// +// Deprecated: Copy this type to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.UnexpectedStateError. +type UnexpectedStateError struct { + LastError error + State string + ExpectedState []string +} + +// Error returns a string with the unexpected state value, the desired target, +// and any last error. +// +// Deprecated: Copy this method to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.UnexpectedStateError. +func (e *UnexpectedStateError) Error() string { + return fmt.Sprintf( + "unexpected state '%s', wanted target '%s'. last error: %s", + e.State, + strings.Join(e.ExpectedState, ", "), + e.LastError, + ) +} + +// Unwrap returns the LastError, compatible with errors.Unwrap. +// +// Deprecated: Copy this method to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.UnexpectedStateError. +func (e *UnexpectedStateError) Unwrap() error { + return e.LastError +} + +// TimeoutError is returned when WaitForState times out +// +// Deprecated: Copy this type to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.TimeoutError. +type TimeoutError struct { + LastError error + LastState string + Timeout time.Duration + ExpectedState []string +} + +// Error returns a string with any information available. +// +// Deprecated: Copy this method to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.TimeoutError. +func (e *TimeoutError) Error() string { + expectedState := "resource to be gone" + if len(e.ExpectedState) > 0 { + expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) + } + + extraInfo := make([]string, 0) + if e.LastState != "" { + extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) + } + if e.Timeout > 0 { + extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) + } + + suffix := "" + if len(extraInfo) > 0 { + suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) + } + + if e.LastError != nil { + return fmt.Sprintf("timeout while waiting for %s%s: %s", + expectedState, suffix, e.LastError) + } + + return fmt.Sprintf("timeout while waiting for %s%s", + expectedState, suffix) +} + +// Unwrap returns the LastError, compatible with errors.Unwrap. +// +// Deprecated: Copy this method to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.TimeoutError. +func (e *TimeoutError) Unwrap() error { + return e.LastError +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/id.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/id.go new file mode 100644 index 00000000000..81c1f2e8e8d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/id.go @@ -0,0 +1,62 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// UniqueIdPrefix is a string prefix automatically added to return values of +// the UniqueId function. +// +// Deprecated: Copy this value to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/id.UniquePrefix. +const UniqueIdPrefix = `terraform-` + +// idCounter is a monotonic counter for generating ordered unique ids. +var idMutex sync.Mutex +var idCounter uint32 + +// Helper for a resource to generate a unique identifier w/ default prefix +// +// Deprecated: Copy this function to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/id.Unique. +func UniqueId() string { + return PrefixedUniqueId(UniqueIdPrefix) +} + +// UniqueIDSuffixLength is the string length of the suffix generated by +// PrefixedUniqueId. This can be used by length validation functions to +// ensure prefixes are the correct length for the target field. +// +// Deprecated: Copy this value to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/id.UniqueSuffixLength. +const UniqueIDSuffixLength = 26 + +// Helper for a resource to generate a unique identifier w/ given prefix +// +// After the prefix, the ID consists of an incrementing 26 digit value (to match +// previous timestamp output). After the prefix, the ID consists of a timestamp +// and an incrementing 8 hex digit value The timestamp means that multiple IDs +// created with the same prefix will sort in the order of their creation, even +// across multiple terraform executions, as long as the clock is not turned back +// between calls, and as long as any given terraform execution generates fewer +// than 4 billion IDs. +// +// Deprecated: Copy this function to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/id.PrefixedUnique. +func PrefixedUniqueId(prefix string) string { + // Be precise to 4 digits of fractional seconds, but remove the dot before the + // fractional seconds. + timestamp := strings.Replace( + time.Now().UTC().Format("20060102150405.0000"), ".", "", 1) + + idMutex.Lock() + defer idMutex.Unlock() + idCounter++ + return fmt.Sprintf("%s%s%08x", prefix, timestamp, idCounter) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/json.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/json.go similarity index 100% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/json.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/json.go diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/plan_checks.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/plan_checks.go new file mode 100644 index 00000000000..712e3dbdfe7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/plan_checks.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + + tfjson "github.com/hashicorp/terraform-json" + "github.com/hashicorp/terraform-plugin-testing/internal/errorshim" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/mitchellh/go-testing-interface" +) + +func runPlanChecks(ctx context.Context, t testing.T, plan *tfjson.Plan, planChecks []plancheck.PlanCheck) error { + t.Helper() + + var result error + + for _, planCheck := range planChecks { + resp := plancheck.CheckPlanResponse{} + planCheck.CheckPlan(ctx, plancheck.CheckPlanRequest{Plan: plan}, &resp) + + if resp.Error != nil { + // TODO: Once Go 1.20 is the minimum supported version for this module, replace with `errors.Join` function + // - https://github.com/hashicorp/terraform-plugin-testing/issues/99 + result = errorshim.Join(result, resp.Error) + } + } + + return result +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/plugin.go similarity index 98% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/plugin.go index 14d869466b5..5c92f3ab999 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/plugin.go @@ -16,10 +16,11 @@ import ( "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" - testing "github.com/mitchellh/go-testing-interface" + "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-testing/internal/logging" + "github.com/hashicorp/terraform-plugin-testing/internal/plugintest" ) // protov5ProviderFactory is a function which is called to start a protocol diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/state.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/state.go new file mode 100644 index 00000000000..9ed1132703c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/state.go @@ -0,0 +1,292 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "log" + "time" +) + +var refreshGracePeriod = 30 * time.Second + +// StateRefreshFunc is a function type used for StateChangeConf that is +// responsible for refreshing the item being watched for a state change. +// +// It returns three results. `result` is any object that will be returned +// as the final object after waiting for state change. This allows you to +// return the final updated object, for example an EC2 instance after refreshing +// it. A nil result represents not found. +// +// `state` is the latest state of that object. And `err` is any error that +// may have happened while refreshing the state. +// +// Deprecated: Copy this type to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.StateRefreshFunc. +type StateRefreshFunc func() (result interface{}, state string, err error) + +// StateChangeConf is the configuration struct used for `WaitForState`. +// +// Deprecated: Copy this type to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.StateChangeConf. +type StateChangeConf struct { + Delay time.Duration // Wait this time before starting checks + Pending []string // States that are "allowed" and will continue trying + Refresh StateRefreshFunc // Refreshes the current state + Target []string // Target state + Timeout time.Duration // The amount of time to wait before timeout + MinTimeout time.Duration // Smallest time to wait before refreshes + PollInterval time.Duration // Override MinTimeout/backoff and only poll this often + NotFoundChecks int // Number of times to allow not found (nil result from Refresh) + + // This is to work around inconsistent APIs + ContinuousTargetOccurence int // Number of times the Target state has to occur continuously +} + +// WaitForStateContext watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// If the Refresh function returns an error, exit immediately with that error. +// +// If the Refresh function returns a state other than the Target state or one +// listed in Pending, return immediately with an error. +// +// If the Timeout is exceeded before reaching the Target state, return an +// error. +// +// Otherwise, the result is the result of the first call to the Refresh function to +// reach the target state. +// +// # Cancellation from the passed in context will cancel the refresh loop +// +// Deprecated: Copy this method to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.StateChangeConf. +func (conf *StateChangeConf) WaitForStateContext(ctx context.Context) (interface{}, error) { + log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) + + notfoundTick := 0 + targetOccurence := 0 + + // Set a default for times to check for not found + if conf.NotFoundChecks == 0 { + conf.NotFoundChecks = 20 + } + + if conf.ContinuousTargetOccurence == 0 { + conf.ContinuousTargetOccurence = 1 + } + + type Result struct { + Result interface{} + State string + Error error + Done bool + } + + // Read every result from the refresh loop, waiting for a positive result.Done. + resCh := make(chan Result, 1) + // cancellation channel for the refresh loop + cancelCh := make(chan struct{}) + + result := Result{} + + go func() { + defer close(resCh) + + select { + case <-time.After(conf.Delay): + case <-cancelCh: + return + } + + // start with 0 delay for the first loop + var wait time.Duration + + for { + // store the last result + resCh <- result + + // wait and watch for cancellation + select { + case <-cancelCh: + return + case <-time.After(wait): + // first round had no wait + if wait == 0 { + wait = 100 * time.Millisecond + } + } + + res, currentState, err := conf.Refresh() + result = Result{ + Result: res, + State: currentState, + Error: err, + } + + if err != nil { + resCh <- result + return + } + + // If we're waiting for the absence of a thing, then return + if res == nil && len(conf.Target) == 0 { + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + + if res == nil { + // If we didn't find the resource, check if we have been + // not finding it for awhile, and if so, report an error. + notfoundTick++ + if notfoundTick > conf.NotFoundChecks { + result.Error = &NotFoundError{ + LastError: err, + Retries: notfoundTick, + } + resCh <- result + return + } + } else { + // Reset the counter for when a resource isn't found + notfoundTick = 0 + found := false + + for _, allowed := range conf.Target { + if currentState == allowed { + found = true + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + } + + for _, allowed := range conf.Pending { + if currentState == allowed { + found = true + targetOccurence = 0 + break + } + } + + if !found && len(conf.Pending) > 0 { + result.Error = &UnexpectedStateError{ + LastError: err, + State: result.State, + ExpectedState: conf.Target, + } + resCh <- result + return + } + } + + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } + + // If a poll interval has been specified, choose that interval. + // Otherwise bound the default value. + if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { + wait = conf.PollInterval + } else { + if wait < conf.MinTimeout { + wait = conf.MinTimeout + } else if wait > 10*time.Second { + wait = 10 * time.Second + } + } + + log.Printf("[TRACE] Waiting %s before next try", wait) + } + }() + + // store the last value result from the refresh loop + lastResult := Result{} + + timeout := time.After(conf.Timeout) + for { + select { + case r, ok := <-resCh: + // channel closed, so return the last result + if !ok { + return lastResult.Result, lastResult.Error + } + + // we reached the intended state + if r.Done { + return r.Result, r.Error + } + + // still waiting, store the last result + lastResult = r + case <-ctx.Done(): + close(cancelCh) + return nil, ctx.Err() + case <-timeout: + log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) + log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) + + // cancel the goroutine and start our grace period timer + close(cancelCh) + timeout := time.After(refreshGracePeriod) + + // we need a for loop and a label to break on, because we may have + // an extra response value to read, but still want to wait for the + // channel to close. + forSelect: + for { + select { + case r, ok := <-resCh: + if r.Done { + // the last refresh loop reached the desired state + return r.Result, r.Error + } + + if !ok { + // the goroutine returned + break forSelect + } + + // target state not reached, save the result for the + // TimeoutError and wait for the channel to close + lastResult = r + case <-ctx.Done(): + log.Println("[ERROR] Context cancelation detected, abandoning grace period") + break forSelect + case <-timeout: + log.Println("[ERROR] WaitForState exceeded refresh grace period") + break forSelect + } + } + + return nil, &TimeoutError{ + LastError: lastResult.Error, + LastState: lastResult.State, + Timeout: conf.Timeout, + ExpectedState: conf.Target, + } + } + } +} + +// WaitForState watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// Deprecated: Please use WaitForStateContext to ensure proper plugin shutdown +func (conf *StateChangeConf) WaitForState() (interface{}, error) { + return conf.WaitForStateContext(context.Background()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/state_shim.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/state_shim.go index 43c809f2ba6..c9659dc95c3 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/state_shim.go @@ -10,9 +10,10 @@ import ( tfjson "github.com/hashicorp/terraform-json" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/hashicorp/terraform-plugin-testing/internal/addrs" + "github.com/hashicorp/terraform-plugin-testing/internal/tfdiags" ) type shimmedState struct { @@ -20,7 +21,7 @@ type shimmedState struct { } func shimStateFromJson(jsonState *tfjson.State) (*terraform.State, error) { - state := terraform.NewState() + state := terraform.NewState() //nolint:staticcheck // legacy usage state.TFVersion = jsonState.TerraformVersion if jsonState.Values == nil { @@ -65,12 +66,14 @@ func shimOutputState(so *tfjson.StateOutput) (*terraform.OutputState, error) { case string: elements := make([]interface{}, len(v)) for i, el := range v { + //nolint:forcetypeassert // Guaranteed by type switch elements[i] = el.(string) } os.Value = elements case bool: elements := make([]interface{}, len(v)) for i, el := range v { + //nolint:forcetypeassert // Guaranteed by type switch elements[i] = el.(bool) } os.Value = elements @@ -78,6 +81,7 @@ func shimOutputState(so *tfjson.StateOutput) (*terraform.OutputState, error) { case json.Number: elements := make([]interface{}, len(v)) for i, el := range v { + //nolint:forcetypeassert // Guaranteed by type switch elements[i] = el.(json.Number) } os.Value = elements @@ -120,7 +124,7 @@ func (ss *shimmedState) shimStateModule(sm *tfjson.StateModule) error { } } - mod := ss.state.AddModule(path) + mod := ss.state.AddModule(path) //nolint:staticcheck // legacy usage for _, res := range sm.Resources { resourceState, err := shimResourceState(res) if err != nil { @@ -189,15 +193,36 @@ func shimResourceState(res *tfjson.StateResource) (*terraform.ResourceState, err } attributes := sf.Flatmap() - if _, ok := attributes["id"]; !ok { - return nil, fmt.Errorf("no %q found in attributes", "id") + // The instance state identifier was a Terraform versions 0.11 and earlier + // concept which helped core and the then SDK determine if the resource + // should be removed and as an identifier value in the human readable + // output. This concept unfortunately carried over to the testing logic when + // the testing logic was mostly changed to use the public, machine-readable + // JSON interface with Terraform, rather than reusing prior internal logic + // from Terraform. Using the "id" attribute value for this identifier was + // the default implementation and therefore those older versions of + // Terraform required the attribute. This is no longer necessary after + // Terraform versions 0.12 and later. + // + // If the "id" attribute is not found, set the instance state identifier to + // a synthetic value that can hopefully lead someone encountering the value + // to these comments. The prior logic used to raise an error if the + // attribute was not present, but this value should now only be present in + // legacy logic of this Go module, such as unintentionally exported logic in + // the terraform package, and not encountered during normal testing usage. + // + // Reference: https://github.com/hashicorp/terraform-plugin-testing/issues/84 + instanceStateID, ok := attributes["id"] + + if !ok { + instanceStateID = "id-attribute-not-set" } return &terraform.ResourceState{ Provider: res.ProviderName, Type: res.Type, Primary: &terraform.InstanceState{ - ID: attributes["id"], + ID: instanceStateID, Attributes: attributes, Meta: map[string]interface{}{ "schema_version": int(res.SchemaVersion), diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testcase_providers.go similarity index 100% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testcase_providers.go diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testcase_validate.go similarity index 66% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testcase_validate.go index 8eb85a14abf..6640f8c84cc 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testcase_validate.go @@ -7,9 +7,18 @@ import ( "context" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/internal/logging" + "github.com/hashicorp/terraform-plugin-testing/internal/teststep" ) +// hasProviders returns true if the TestCase has ExternalProviders set. +func (c TestCase) hasExternalProviders(_ context.Context) bool { + return len(c.ExternalProviders) > 0 +} + // hasProviders returns true if the TestCase has set any of the // ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, // ProviderFactories, or Providers fields. @@ -42,7 +51,7 @@ func (c TestCase) hasProviders(_ context.Context) bool { // - No overlapping ExternalProviders and Providers entries // - No overlapping ExternalProviders and ProviderFactories entries // - TestStep validations performed by the (TestStep).validate() method. -func (c TestCase) validate(ctx context.Context) error { +func (c TestCase) validate(ctx context.Context, t testing.T) error { logging.HelperResourceTrace(ctx, "Validating TestCase") if len(c.Steps) == 0 { @@ -65,13 +74,30 @@ func (c TestCase) validate(ctx context.Context) error { } } + testCaseHasExternalProviders := c.hasExternalProviders(ctx) testCaseHasProviders := c.hasProviders(ctx) for stepIndex, step := range c.Steps { stepNumber := stepIndex + 1 // Use 1-based index for humans + + configRequest := teststep.PrepareConfigurationRequest{ + Directory: step.ConfigDirectory, + File: step.ConfigFile, + Raw: step.Config, + TestStepConfigRequest: config.TestStepConfigRequest{ + StepNumber: stepNumber, + TestName: t.Name(), + }, + }.Exec() + + stepConfiguration := teststep.Configuration(configRequest) + stepValidateReq := testStepValidateRequest{ - StepNumber: stepNumber, - TestCaseHasProviders: testCaseHasProviders, + StepConfiguration: stepConfiguration, + StepNumber: stepNumber, + TestCaseHasExternalProviders: testCaseHasExternalProviders, + TestCaseHasProviders: testCaseHasProviders, + TestName: t.Name(), } err := step.validate(ctx, stepValidateReq) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing.go similarity index 83% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing.go index 9bde8e22aa8..2c40fc39657 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing.go @@ -15,20 +15,26 @@ import ( "strings" "time" + "github.com/hashicorp/go-multierror" "github.com/mitchellh/go-testing-interface" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + + "github.com/hashicorp/terraform-plugin-testing/internal/addrs" + "github.com/hashicorp/terraform-plugin-testing/internal/logging" + "github.com/hashicorp/terraform-plugin-testing/internal/plugintest" ) // flagSweep is a flag available when running tests on the command line. It -// contains a comma seperated list of regions to for the sweeper functions to +// contains a comma separated list of regions to for the sweeper functions to // run in. This flag bypasses the normal Test path and instead runs functions designed to // clean up any leaked resources a testing environment could have created. It is // a best effort attempt, and relies on Provider authors to implement "Sweeper" @@ -49,7 +55,7 @@ import ( var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") var flagSweepAllowFailures = flag.Bool("sweep-allow-failures", false, "Enable to allow Sweeper Tests to continue after failures") -var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") +var flagSweepRun = flag.String("sweep-run", "", "Comma separated list of Sweeper Tests to run") var sweeperFuncs map[string]*Sweeper // SweeperFunc is a signature for a function that acts as a sweeper. It @@ -157,7 +163,7 @@ func runSweepers(regions []string, sweepers map[string]*Sweeper, allowFailures b log.Printf("Sweeper Tests for region (%s) ran successfully:\n", region) for sweeper, sweeperErr := range regionSweeperRunList { if sweeperErr == nil { - fmt.Printf("\t- %s\n", sweeper) + log.Printf("\t- %s\n", sweeper) } else { regionSweeperErrorFound = true } @@ -168,7 +174,7 @@ func runSweepers(regions []string, sweepers map[string]*Sweeper, allowFailures b log.Printf("Sweeper Tests for region (%s) ran unsuccessfully:\n", region) for sweeper, sweeperErr := range regionSweeperRunList { if sweeperErr != nil { - fmt.Printf("\t- %s: %s\n", sweeper, sweeperErr) + log.Printf("\t- %s: %s\n", sweeper, sweeperErr) } } } @@ -183,7 +189,7 @@ func runSweepers(regions []string, sweepers map[string]*Sweeper, allowFailures b return sweeperRunList, nil } -// filterSweepers takes a comma seperated string listing the names of sweepers +// filterSweepers takes a comma separated string listing the names of sweepers // to be ran, and returns a filtered set from the list of all of sweepers to // run based on the names given. func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { @@ -230,7 +236,7 @@ func filterSweeperWithDependencies(name string, source map[string]*Sweeper) map[ return result } -// runSweeperWithRegion recieves a sweeper and a region, and recursively calls +// runSweeperWithRegion receives a sweeper and a region, and recursively calls // itself with that region for every dependency found for that sweeper. If there // are no dependencies, invoke the contained sweeper fun with the region, and // add the success/fail status to the sweeperRunList. @@ -319,6 +325,12 @@ type TestCase struct { // acceptance tests, such as verifying that keys are setup. PreCheck func() + // TerraformVersionChecks is a list of checks to run against + // the Terraform CLI version which is running the testing. + // Each check is executed in order, respecting the first skip + // or fail response, unless the Any() meta check is also used. + TerraformVersionChecks []tfversion.TerraformVersionCheck + // ProviderFactories can be specified for the providers that are valid. // // This can also be specified at the TestStep level to enable per-step @@ -415,6 +427,15 @@ type TestCase struct { // IDRefreshIgnore is a list of configuration keys that will be ignored // during ID-only refresh testing. IDRefreshIgnore []string + + // WorkingDir sets the base directory where testing files used by the testing + // module are generated. If WorkingDir is unset, a randomized, temporary + // directory is used. + // + // Use the TF_ACC_PERSIST_WORKING_DIR environment variable, conventionally + // set to "1", to persist any working directory files. Otherwise, this directory is + // automatically cleaned up at the end of the TestCase. + WorkingDir string } // ExternalProvider holds information about third-party providers that should @@ -470,12 +491,60 @@ type TestStep struct { // Config a string of the configuration to give to Terraform. If this // is set, then the TestCase will execute this step with the same logic - // as a `terraform apply`. + // as a `terraform apply`. If both Config and ConfigDirectory are set + // an error will be returned. // // JSON Configuration Syntax can be used and is assumed whenever Config // contains valid JSON. + // + // Only one of Config, ConfigDirectory or ConfigFile can be set + // otherwise an error will be returned. Config string + // ConfigDirectory is a function which returns a function that + // accepts config.TestStepProviderConfig and returns a string + // representing a directory that contains Terraform + // configuration files. + // + // There are helper functions in the [config] package that can be used, + // such as: + // + // - [config.StaticDirectory] + // - [config.TestNameDirectory] + // - [config.TestStepDirectory] + // + // When running Terraform operations for the test, Terraform will + // be executed with copies of the files of this directory as its + // working directory. Only one of Config, ConfigDirectory or + // ConfigFile can be set otherwise an error will be returned. + ConfigDirectory config.TestStepConfigFunc + + // ConfigFile is a function which returns a function that + // accepts config.TestStepProviderConfig and returns a string + // representing a file that contains Terraform configuration. + // + // There are helper functions in the [config] package that can be used, + // such as: + // + // - [config.StaticFile] + // - [config.TestNameFile] + // - [config.TestStepFile] + // + // When running Terraform operations for the test, Terraform will + // be executed with a copy of the file as its working directory. + // Only one of Config, ConfigDirectory or ConfigFile can be set + // otherwise an error will be returned. + ConfigFile config.TestStepConfigFunc + + // ConfigVariables is a map defining variables for use in conjunction + // with Terraform configuration. If this map is populated then it + // will be used to assemble an *.auto.tfvars.json which will be + // written into the working directory. Any variables that are + // defined within the Terraform configuration that have a matching + // variable definition in *.auto.tfvars.json will have their value + // substituted when the acceptance test is executed. + ConfigVariables config.Variables + // Check is called after the Config is applied. Use this step to // make your own API calls to check the status of things, and to // inspect the format of the ResourceState itself. @@ -498,6 +567,20 @@ type TestStep struct { // test to pass. ExpectError *regexp.Regexp + // ConfigPlanChecks allows assertions to be made against the plan file at different points of a Config (apply) test using a plan check. + // Custom plan checks can be created by implementing the [PlanCheck] interface, or by using a PlanCheck implementation from the provided [plancheck] package + // + // [PlanCheck]: https://pkg.go.dev/github.com/hashicorp/terraform-plugin-testing/plancheck#PlanCheck + // [plancheck]: https://pkg.go.dev/github.com/hashicorp/terraform-plugin-testing/plancheck + ConfigPlanChecks ConfigPlanChecks + + // RefreshPlanChecks allows assertions to be made against the plan file at different points of a Refresh test using a plan check. + // Custom plan checks can be created by implementing the [PlanCheck] interface, or by using a PlanCheck implementation from the provided [plancheck] package + // + // [PlanCheck]: https://pkg.go.dev/github.com/hashicorp/terraform-plugin-testing/plancheck#PlanCheck + // [plancheck]: https://pkg.go.dev/github.com/hashicorp/terraform-plugin-testing/plancheck + RefreshPlanChecks RefreshPlanChecks + // PlanOnly can be set to only run `plan` with this configuration, and not // actually apply it. This is useful for ensuring config changes result in // no-op plans @@ -570,10 +653,24 @@ type TestStep struct { // IDs returned by the Import. Note that this checks for strict equality // and does not respect DiffSuppressFunc or CustomizeDiff. // + // By default, the prior resource state and import resource state are + // matched by the "id" attribute. If the "id" attribute is not implemented + // or another attribute more uniquely identifies the resource, set the + // ImportStateVerifyIdentifierAttribute field to adjust the attribute for + // matching. + // + // If certain attributes cannot be correctly imported, set the + // ImportStateVerifyIgnore field. + ImportStateVerify bool + + // ImportStateVerifyIdentifierAttribute is the resource attribute for + // matching the prior resource state and import resource state during import + // verification. By default, the "id" attribute is used. + ImportStateVerifyIdentifierAttribute string + // ImportStateVerifyIgnore is a list of prefixes of fields that should // not be verified to be equal. These can be set to ephemeral fields or // fields that can't be refreshed and don't matter. - ImportStateVerify bool ImportStateVerifyIgnore []string // ImportStatePersist, if true, will update the persisted state with the @@ -667,6 +764,28 @@ type TestStep struct { ExternalProviders map[string]ExternalProvider } +// ConfigPlanChecks defines the different points in a Config TestStep when plan checks can be run. +type ConfigPlanChecks struct { + // PreApply runs all plan checks in the slice. This occurs before the apply of a Config test is run. This slice cannot be populated + // with TestStep.PlanOnly, as there is no PreApply plan run with that flag set. All errors by plan checks in this slice are aggregated, reported, and will result in a test failure. + PreApply []plancheck.PlanCheck + + // PostApplyPreRefresh runs all plan checks in the slice. This occurs after the apply and before the refresh of a Config test is run. + // All errors by plan checks in this slice are aggregated, reported, and will result in a test failure. + PostApplyPreRefresh []plancheck.PlanCheck + + // PostApplyPostRefresh runs all plan checks in the slice. This occurs after the apply and refresh of a Config test are run. + // All errors by plan checks in this slice are aggregated, reported, and will result in a test failure. + PostApplyPostRefresh []plancheck.PlanCheck +} + +// RefreshPlanChecks defines the different points in a Refresh TestStep when plan checks can be run. +type RefreshPlanChecks struct { + // PostRefresh runs all plan checks in the slice. This occurs after the refresh of the Refresh test is run. + // All errors by plan checks in this slice are aggregated, reported, and will result in a test failure. + PostRefresh []plancheck.PlanCheck +} + // ParallelTest performs an acceptance test on a resource, allowing concurrency // with other ParallelTest. The number of concurrent tests is controlled by the // "go test" command -parallel flag. @@ -718,7 +837,7 @@ func Test(t testing.T, c TestCase) { ctx := context.Background() ctx = logging.InitTestContext(ctx, t) - err := c.validate(ctx) + err := c.validate(ctx, t) if err != nil { logging.HelperResourceError(ctx, @@ -775,6 +894,17 @@ func Test(t testing.T, c TestCase) { } }(helper) + // Run the TerraformVersionChecks if we have it. + // This is done after creating the helper because a working directory is required + // to retrieve the Terraform version. + if c.TerraformVersionChecks != nil { + logging.HelperResourceDebug(ctx, "Calling TestCase Terraform version checks") + + runTFVersionChecks(ctx, t, helper.TerraformVersion(), c.TerraformVersionChecks) + + logging.HelperResourceDebug(ctx, "Called TestCase Terraform version checks") + } + runNewTest(ctx, t, c, helper) logging.HelperResourceDebug(ctx, "Finished TestCase") @@ -835,15 +965,15 @@ func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { // TestCheckFuncs and aggregates failures. func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { return func(s *terraform.State) error { - var result []error + var result *multierror.Error for i, f := range fs { if err := f(s); err != nil { - result = append(result, fmt.Errorf("Check %d/%d error: %w", i+1, len(fs), err)) + result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) } } - return errors.Join(result...) + return result.ErrorOrNil() } } @@ -902,6 +1032,14 @@ func TestCheckResourceAttrSet(name, key string) TestCheckFunc { // TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with // support for non-root modules +// +// Deprecated: This functionality is deprecated without replacement. The +// terraform-plugin-testing Go module is intended for provider testing, which +// should always be possible within the root module of a configuration. This +// functionality is a carryover of when this code was used within Terraform +// core to test both providers and modules. Modern testing implementations to +// verify interactions between modules should be tested in Terraform core or +// using tooling outside this Go module. func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { mpt := addrs.Module(mp).UnkeyedInstanceShim() return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { @@ -993,6 +1131,14 @@ func TestCheckResourceAttr(name, key, value string) TestCheckFunc { // TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with // support for non-root modules +// +// Deprecated: This functionality is deprecated without replacement. The +// terraform-plugin-testing Go module is intended for provider testing, which +// should always be possible within the root module of a configuration. This +// functionality is a carryover of when this code was used within Terraform +// core to test both providers and modules. Modern testing implementations to +// verify interactions between modules should be tested in Terraform core or +// using tooling outside this Go module. func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { mpt := addrs.Module(mp).UnkeyedInstanceShim() return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { @@ -1157,6 +1303,14 @@ func TestCheckNoResourceAttr(name, key string) TestCheckFunc { // TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with // support for non-root modules +// +// Deprecated: This functionality is deprecated without replacement. The +// terraform-plugin-testing Go module is intended for provider testing, which +// should always be possible within the root module of a configuration. This +// functionality is a carryover of when this code was used within Terraform +// core to test both providers and modules. Modern testing implementations to +// verify interactions between modules should be tested in Terraform core or +// using tooling outside this Go module. func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { mpt := addrs.Module(mp).UnkeyedInstanceShim() return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { @@ -1253,6 +1407,14 @@ func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { // TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with // support for non-root modules +// +// Deprecated: This functionality is deprecated without replacement. The +// terraform-plugin-testing Go module is intended for provider testing, which +// should always be possible within the root module of a configuration. This +// functionality is a carryover of when this code was used within Terraform +// core to test both providers and modules. Modern testing implementations to +// verify interactions between modules should be tested in Terraform core or +// using tooling outside this Go module. func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { mpt := addrs.Module(mp).UnkeyedInstanceShim() return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { @@ -1292,6 +1454,14 @@ func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckF // TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with // support for non-root modules +// +// Deprecated: This functionality is deprecated without replacement. The +// terraform-plugin-testing Go module is intended for provider testing, which +// should always be possible within the root module of a configuration. This +// functionality is a carryover of when this code was used within Terraform +// core to test both providers and modules. Modern testing implementations to +// verify interactions between modules should be tested in Terraform core or +// using tooling outside this Go module. func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { return func(s *terraform.State) error { return TestCheckModuleResourceAttr(mp, name, key, *value)(s) @@ -1349,6 +1519,14 @@ func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string // TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with // support for non-root modules +// +// Deprecated: This functionality is deprecated without replacement. The +// terraform-plugin-testing Go module is intended for provider testing, which +// should always be possible within the root module of a configuration. This +// functionality is a carryover of when this code was used within Terraform +// core to test both providers and modules. Modern testing implementations to +// verify interactions between modules should be tested in Terraform core or +// using tooling outside this Go module. func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() @@ -1445,7 +1623,12 @@ func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { return fmt.Errorf("Not found: %s", name) } - if !r.MatchString(rs.Value.(string)) { + valStr, ok := rs.Value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for resource value", rs.Value) + } + + if !r.MatchString(valStr) { return fmt.Errorf( "Output '%s': %#v didn't match %q", name, @@ -1476,7 +1659,7 @@ func modulePrimaryInstanceState(ms *terraform.ModuleState, name string) (*terraf // modulePathPrimaryInstanceState returns the primary instance state for the // given resource name in a given module path. func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { - ms := s.ModuleByPath(mp) + ms := s.ModuleByPath(mp) //nolint:staticcheck // legacy usage if ms == nil { return nil, fmt.Errorf("No module found at: %s", mp) } @@ -1487,7 +1670,7 @@ func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, // primaryInstanceState returns the primary instance state for the given // resource name in the root module. func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { - ms := s.RootModule() + ms := s.RootModule() //nolint:staticcheck // legacy usage return modulePrimaryInstanceState(ms, name) } @@ -1506,7 +1689,7 @@ func indexesIntoTypeSet(key string) bool { func checkIfIndexesIntoTypeSet(key string, f TestCheckFunc) TestCheckFunc { return func(s *terraform.State) error { err := f(s) - if err != nil && s.IsBinaryDrivenTest && indexesIntoTypeSet(key) { + if err != nil && indexesIntoTypeSet(key) { return fmt.Errorf("Error in test check: %s\nTest check address %q likely indexes into TypeSet\nThis is currently not possible in the SDK", err, key) } return err @@ -1516,7 +1699,7 @@ func checkIfIndexesIntoTypeSet(key string, f TestCheckFunc) TestCheckFunc { func checkIfIndexesIntoTypeSetPair(keyFirst, keySecond string, f TestCheckFunc) TestCheckFunc { return func(s *terraform.State) error { err := f(s) - if err != nil && s.IsBinaryDrivenTest && (indexesIntoTypeSet(keyFirst) || indexesIntoTypeSet(keySecond)) { + if err != nil && (indexesIntoTypeSet(keyFirst) || indexesIntoTypeSet(keySecond)) { return fmt.Errorf("Error in test check: %s\nTest check address %q or %q likely indexes into TypeSet\nThis is currently not possible in the SDK", err, keyFirst, keySecond) } return err diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_config.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_config.go index f56c885be32..57bc0d8ce32 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_config.go @@ -7,8 +7,9 @@ import ( "context" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" + "github.com/hashicorp/terraform-plugin-testing/internal/logging" + + "github.com/hashicorp/terraform-plugin-testing/internal/plugintest" ) func testStepTaint(ctx context.Context, step TestStep, wd *plugintest.WorkingDir) error { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new.go similarity index 64% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new.go index 14b247306af..96ad3eec028 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new.go @@ -6,16 +6,21 @@ package resource import ( "context" "fmt" + "os" + "path/filepath" "reflect" + "strconv" "strings" "github.com/google/go-cmp/cmp" tfjson "github.com/hashicorp/terraform-json" "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/internal/logging" + "github.com/hashicorp/terraform-plugin-testing/internal/plugintest" + "github.com/hashicorp/terraform-plugin-testing/internal/teststep" + "github.com/hashicorp/terraform-plugin-testing/terraform" ) func runPostTestDestroy(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, providers *providerFactories, statePreDestroy *terraform.State) error { @@ -45,7 +50,7 @@ func runPostTestDestroy(ctx context.Context, t testing.T, c TestCase, wd *plugin func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest.Helper) { t.Helper() - wd := helper.RequireNewWorkingDir(ctx, t) + wd := helper.RequireNewWorkingDir(ctx, t, c.WorkingDir) ctx = logging.TestTerraformPathContext(ctx, wd.GetHelper().TerraformExecPath()) ctx = logging.TestWorkingDirectoryContext(ctx, wd.GetHelper().WorkingDirectory()) @@ -89,8 +94,16 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest wd.Close() }() + // Return value from c.ProviderConfig() is assigned to Raw as this was previously being + // passed to wd.SetConfig() when the second argument accept a configuration string. if c.hasProviders(ctx) { - err := wd.SetConfig(ctx, c.providerConfig(ctx, false)) + config := teststep.Configuration( + teststep.ConfigurationRequest{ + Raw: teststep.Pointer(c.providerConfig(ctx, false)), + }, + ) + + err := wd.SetConfig(ctx, config, nil) if err != nil { logging.HelperResourceError(ctx, @@ -117,10 +130,28 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest // use this to track last step successfully applied // acts as default for import tests - var appliedCfg string + var appliedCfg teststep.Config + var stepNumber int for stepIndex, step := range c.Steps { - stepNumber := stepIndex + 1 // 1-based indexing for humans + if stepNumber > 0 { + copyWorkingDir(ctx, t, stepNumber, wd) + } + + stepNumber = stepIndex + 1 // 1-based indexing for humans + + configRequest := teststep.PrepareConfigurationRequest{ + Directory: step.ConfigDirectory, + File: step.ConfigFile, + Raw: step.Config, + TestStepConfigRequest: config.TestStepConfigRequest{ + StepNumber: stepNumber, + TestName: t.Name(), + }, + }.Exec() + + cfg := teststep.Configuration(configRequest) + ctx = logging.TestStepNumberContext(ctx, stepNumber) logging.HelperResourceDebug(ctx, "Starting TestStep") @@ -152,7 +183,7 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest } } - if step.Config != "" && !step.Destroy && len(step.Taint) > 0 { + if cfg != nil && !step.Destroy && len(step.Taint) > 0 { err := testStepTaint(ctx, step, wd) if err != nil { @@ -164,16 +195,55 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest } } - if step.hasProviders(ctx) { + hasProviders, err := step.hasProviders(ctx, stepIndex, t.Name()) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error checking for providers", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error checking for providers: %s", stepNumber, len(c.Steps), err) + } + + if hasProviders { providers = &providerFactories{ legacy: sdkProviderFactories(c.ProviderFactories).merge(step.ProviderFactories), protov5: protov5ProviderFactories(c.ProtoV5ProviderFactories).merge(step.ProtoV5ProviderFactories), protov6: protov6ProviderFactories(c.ProtoV6ProviderFactories).merge(step.ProtoV6ProviderFactories), } - providerCfg := step.providerConfig(ctx, step.configHasProviderBlock(ctx)) + var hasProviderBlock bool - err := wd.SetConfig(ctx, providerCfg) + if cfg != nil { + hasProviderBlock, err = cfg.HasProviderBlock(ctx) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error determining whether configuration contains provider block", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error determining whether configuration contains provider block: %s", stepNumber, len(c.Steps), err) + } + } + + var testStepConfig teststep.Config + + // Return value from step.providerConfig() is assigned to Raw as this was previously being + // passed to wd.SetConfig() directly when the second argument to wd.SetConfig() accepted a + // configuration string. + confRequest := teststep.PrepareConfigurationRequest{ + Directory: step.ConfigDirectory, + File: step.ConfigFile, + Raw: step.providerConfig(ctx, hasProviderBlock), + TestStepConfigRequest: config.TestStepConfigRequest{ + StepNumber: stepIndex + 1, + TestName: t.Name(), + }, + }.Exec() + + testStepConfig = teststep.Configuration(confRequest) + + err = wd.SetConfig(ctx, testStepConfig, step.ConfigVariables) if err != nil { logging.HelperResourceError(ctx, @@ -206,7 +276,7 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest if step.ImportState { logging.HelperResourceTrace(ctx, "TestStep is ImportState mode") - err := testStepNewImportState(ctx, t, helper, wd, step, appliedCfg, providers) + err := testStepNewImportState(ctx, t, helper, wd, step, appliedCfg, providers, stepIndex) if step.ExpectError != nil { logging.HelperResourceDebug(ctx, "Checking TestStep ExpectError") if err == nil { @@ -281,10 +351,10 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest continue } - if step.Config != "" { + if cfg != nil { logging.HelperResourceTrace(ctx, "TestStep is Config mode") - err := testStepNewConfig(ctx, t, c, wd, step, providers) + err := testStepNewConfig(ctx, t, c, wd, step, providers, stepIndex) if step.ExpectError != nil { logging.HelperResourceDebug(ctx, "Checking TestStep ExpectError") @@ -318,7 +388,44 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest } } - appliedCfg = step.mergedConfig(ctx, c) + var hasTerraformBlock bool + var hasProviderBlock bool + + if cfg != nil { + hasTerraformBlock, err = cfg.HasTerraformBlock(ctx) + + if err != nil { + logging.HelperResourceError(ctx, + "Error determining whether configuration contains terraform block", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Error determining whether configuration contains terraform block: %s", err) + } + + hasProviderBlock, err = cfg.HasProviderBlock(ctx) + + if err != nil { + logging.HelperResourceError(ctx, + "Error determining whether configuration contains provider block", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Error determining whether configuration contains provider block: %s", err) + } + } + + mergedConfig := step.mergedConfig(ctx, c, hasTerraformBlock, hasProviderBlock) + + confRequest := teststep.PrepareConfigurationRequest{ + Directory: step.ConfigDirectory, + File: step.ConfigFile, + Raw: mergedConfig, + TestStepConfigRequest: config.TestStepConfigRequest{ + StepNumber: stepIndex + 1, + TestName: t.Name(), + }, + }.Exec() + + appliedCfg = teststep.Configuration(confRequest) logging.HelperResourceDebug(ctx, "Finished TestStep") @@ -327,6 +434,10 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest t.Fatalf("Step %d/%d, unsupported test mode", stepNumber, len(c.Steps)) } + + if stepNumber > 0 { + copyWorkingDir(ctx, t, stepNumber, wd) + } } func getState(ctx context.Context, t testing.T, wd *plugintest.WorkingDir) (*terraform.State, error) { @@ -344,7 +455,7 @@ func getState(ctx context.Context, t testing.T, wd *plugintest.WorkingDir) (*ter } func stateIsEmpty(state *terraform.State) bool { - return state.Empty() || !state.HasResources() + return state.Empty() || !state.HasResources() //nolint:staticcheck // legacy usage } func planIsEmpty(plan *tfjson.Plan) bool { @@ -358,23 +469,73 @@ func planIsEmpty(plan *tfjson.Plan) bool { return true } -func testIDRefresh(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, r *terraform.ResourceState, providers *providerFactories) error { +func testIDRefresh(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, r *terraform.ResourceState, providers *providerFactories, stepIndex int) error { t.Helper() // Build the state. The state is just the resource with an ID. There // are no attributes. We only set what is needed to perform a refresh. - state := terraform.NewState() + state := terraform.NewState() //nolint:staticcheck // legacy usage state.RootModule().Resources = make(map[string]*terraform.ResourceState) state.RootModule().Resources[c.IDRefreshName] = &terraform.ResourceState{} + configRequest := teststep.PrepareConfigurationRequest{ + Directory: step.ConfigDirectory, + File: step.ConfigFile, + Raw: step.Config, + TestStepConfigRequest: config.TestStepConfigRequest{ + StepNumber: stepIndex + 1, + TestName: t.Name(), + }, + }.Exec() + + cfg := teststep.Configuration(configRequest) + + var hasProviderBlock bool + + if cfg != nil { + var err error + + hasProviderBlock, err = cfg.HasProviderBlock(ctx) + + if err != nil { + logging.HelperResourceError(ctx, + "Error determining whether configuration contains provider block for import test config", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Error determining whether configuration contains provider block for import test config: %s", err) + } + } + + // Return value from c.ProviderConfig() is assigned to Raw as this was previously being + // passed to wd.SetConfig() when the second argument accept a configuration string. + testStepConfig := teststep.Configuration( + teststep.ConfigurationRequest{ + Raw: teststep.Pointer(c.providerConfig(ctx, hasProviderBlock)), + }, + ) + // Temporarily set the config to a minimal provider config for the refresh // test. After the refresh we can reset it. - err := wd.SetConfig(ctx, c.providerConfig(ctx, step.configHasProviderBlock(ctx))) + err := wd.SetConfig(ctx, testStepConfig, step.ConfigVariables) if err != nil { t.Fatalf("Error setting import test config: %s", err) } + defer func() { - err = wd.SetConfig(ctx, step.Config) + confRequest := teststep.PrepareConfigurationRequest{ + Directory: step.ConfigDirectory, + File: step.ConfigFile, + Raw: step.providerConfig(ctx, hasProviderBlock), + TestStepConfigRequest: config.TestStepConfigRequest{ + StepNumber: stepIndex + 1, + TestName: t.Name(), + }, + }.Exec() + + testStepConfigDefer := teststep.Configuration(confRequest) + + err = wd.SetConfig(ctx, testStepConfigDefer, step.ConfigVariables) + if err != nil { t.Fatalf("Error resetting test config: %s", err) } @@ -441,3 +602,27 @@ func testIDRefresh(ctx context.Context, t testing.T, c TestCase, wd *plugintest. return nil } + +func copyWorkingDir(ctx context.Context, t testing.T, stepNumber int, wd *plugintest.WorkingDir) { + if os.Getenv(plugintest.EnvTfAccPersistWorkingDir) == "" { + return + } + + workingDir := wd.GetHelper().WorkingDirectory() + + dest := filepath.Join(workingDir, fmt.Sprintf("%s%s", "step_", strconv.Itoa(stepNumber))) + + baseDir := wd.BaseDir() + rootBaseDir := strings.TrimLeft(baseDir, workingDir) + + err := plugintest.CopyDir(workingDir, dest, rootBaseDir) + if err != nil { + logging.HelperResourceError(ctx, + "Unexpected error copying working directory files", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error copying working directory files: %s", stepNumber, err) + } + + t.Logf("Working directory and files have been copied to: %s", dest) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_config.go similarity index 67% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_config.go index a52008768fb..5df394d94cf 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_config.go @@ -9,17 +9,73 @@ import ( "fmt" tfjson "github.com/hashicorp/terraform-json" - testing "github.com/mitchellh/go-testing-interface" + "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/internal/teststep" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/hashicorp/terraform-plugin-testing/internal/logging" + "github.com/hashicorp/terraform-plugin-testing/internal/plugintest" ) -func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, providers *providerFactories) error { +func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, providers *providerFactories, stepIndex int) error { t.Helper() - err := wd.SetConfig(ctx, step.mergedConfig(ctx, c)) + configRequest := teststep.PrepareConfigurationRequest{ + Directory: step.ConfigDirectory, + File: step.ConfigFile, + Raw: step.Config, + TestStepConfigRequest: config.TestStepConfigRequest{ + StepNumber: stepIndex + 1, + TestName: t.Name(), + }, + }.Exec() + + cfg := teststep.Configuration(configRequest) + + var hasTerraformBlock bool + var hasProviderBlock bool + + if cfg != nil { + var err error + + hasTerraformBlock, err = cfg.HasTerraformBlock(ctx) + + if err != nil { + logging.HelperResourceError(ctx, + "Error determining whether configuration contains terraform block", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Error determining whether configuration contains terraform block: %s", err) + } + + hasProviderBlock, err = cfg.HasProviderBlock(ctx) + + if err != nil { + logging.HelperResourceError(ctx, + "Error determining whether configuration contains provider block", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Error determining whether configuration contains provider block: %s", err) + } + } + + mergedConfig := step.mergedConfig(ctx, c, hasTerraformBlock, hasProviderBlock) + + confRequest := teststep.PrepareConfigurationRequest{ + Directory: step.ConfigDirectory, + File: step.ConfigFile, + Raw: mergedConfig, + TestStepConfigRequest: config.TestStepConfigRequest{ + StepNumber: stepIndex + 1, + TestName: t.Name(), + }, + }.Exec() + + testStepConfig := teststep.Configuration(confRequest) + + err := wd.SetConfig(ctx, testStepConfig, step.ConfigVariables) if err != nil { return fmt.Errorf("Error setting config: %w", err) } @@ -50,6 +106,24 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint return fmt.Errorf("Error running pre-apply plan: %w", err) } + // Run pre-apply plan checks + if len(step.ConfigPlanChecks.PreApply) > 0 { + var plan *tfjson.Plan + err = runProviderCommand(ctx, t, func() error { + var err error + plan, err = wd.SavedPlan(ctx) + return err + }, wd, providers) + if err != nil { + return fmt.Errorf("Error retrieving pre-apply plan: %w", err) + } + + err = runPlanChecks(ctx, t, plan, step.ConfigPlanChecks.PreApply) + if err != nil { + return fmt.Errorf("Pre-apply plan check(s) failed:\n%w", err) + } + } + // We need to keep a copy of the state prior to destroying such // that the destroy steps can verify their behavior in the // check function @@ -93,7 +167,6 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint if step.Check != nil { logging.HelperResourceTrace(ctx, "Using TestStep Check") - state.IsBinaryDrivenTest = true if step.Destroy { if err := step.Check(stateBeforeApplication); err != nil { return fmt.Errorf("Check failed: %w", err) @@ -130,6 +203,14 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint return fmt.Errorf("Error retrieving post-apply plan: %w", err) } + // Run post-apply, pre-refresh plan checks + if len(step.ConfigPlanChecks.PostApplyPreRefresh) > 0 { + err = runPlanChecks(ctx, t, plan, step.ConfigPlanChecks.PostApplyPreRefresh) + if err != nil { + return fmt.Errorf("Post-apply, pre-refresh plan check(s) failed:\n%w", err) + } + } + if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { var stdout string err = runProviderCommand(ctx, t, func() error { @@ -173,6 +254,14 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint return fmt.Errorf("Error retrieving second post-apply plan: %w", err) } + // Run post-apply, post-refresh plan checks + if len(step.ConfigPlanChecks.PostApplyPostRefresh) > 0 { + err = runPlanChecks(ctx, t, plan, step.ConfigPlanChecks.PostApplyPostRefresh) + if err != nil { + return fmt.Errorf("Post-apply, post-refresh plan check(s) failed:\n%w", err) + } + } + // check if plan is empty if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { var stdout string @@ -209,6 +298,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint return err } + //nolint:staticcheck // legacy usage if state.Empty() { return nil } @@ -233,7 +323,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint // this fails. If refresh isn't read-only, then this will have // caught a different bug. if idRefreshCheck != nil { - if err := testIDRefresh(ctx, t, c, wd, step, idRefreshCheck, providers); err != nil { + if err := testIDRefresh(ctx, t, c, wd, step, idRefreshCheck, providers, stepIndex); err != nil { return fmt.Errorf( "[ERROR] Test: ID-only test failed: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_import_state.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_import_state.go index 4ddf56c5b28..7dbc0b800b4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_import_state.go @@ -12,14 +12,29 @@ import ( "github.com/google/go-cmp/cmp" "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/internal/teststep" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/hashicorp/terraform-plugin-testing/internal/logging" + "github.com/hashicorp/terraform-plugin-testing/internal/plugintest" ) -func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest.Helper, wd *plugintest.WorkingDir, step TestStep, cfg string, providers *providerFactories) error { +func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest.Helper, wd *plugintest.WorkingDir, step TestStep, cfg teststep.Config, providers *providerFactories, stepIndex int) error { t.Helper() + configRequest := teststep.PrepareConfigurationRequest{ + Directory: step.ConfigDirectory, + File: step.ConfigFile, + Raw: step.Config, + TestStepConfigRequest: config.TestStepConfigRequest{ + StepNumber: stepIndex + 1, + TestName: t.Name(), + }, + }.Exec() + + testStepConfig := teststep.Configuration(configRequest) + if step.ResourceName == "" { t.Fatal("ResourceName is required for an import state test") } @@ -27,6 +42,7 @@ func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest // get state from check sequence var state *terraform.State var err error + err = runProviderCommand(ctx, t, func() error { state, err = getState(ctx, t, wd) if err != nil { @@ -78,11 +94,11 @@ func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest logging.HelperResourceTrace(ctx, fmt.Sprintf("Using import identifier: %s", importId)) // Create working directory for import tests - if step.Config == "" { + if testStepConfig == nil { logging.HelperResourceTrace(ctx, "Using prior TestStep Config for import") - step.Config = cfg - if step.Config == "" { + testStepConfig = cfg + if testStepConfig == nil { t.Fatal("Cannot import state with no specified config") } } @@ -93,11 +109,11 @@ func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest if step.ImportStatePersist { importWd = wd } else { - importWd = helper.RequireNewWorkingDir(ctx, t) + importWd = helper.RequireNewWorkingDir(ctx, t, "") defer importWd.Close() } - err = importWd.SetConfig(ctx, step.Config) + err = importWd.SetConfig(ctx, testStepConfig, step.ConfigVariables) if err != nil { t.Fatalf("Error setting test config: %s", err) } @@ -146,7 +162,7 @@ func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest continue } - is := r.Primary.DeepCopy() + is := r.Primary.DeepCopy() //nolint:staticcheck // legacy usage is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type states = append(states, is) } @@ -181,12 +197,33 @@ func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest } } + identifierAttribute := step.ImportStateVerifyIdentifierAttribute + + if identifierAttribute == "" { + identifierAttribute = "id" + } + for _, r := range newResources { + rIdentifier, ok := r.Primary.Attributes[identifierAttribute] + + if !ok { + t.Fatalf("ImportStateVerify: New resource missing identifier attribute %q, ensure attribute value is properly set or use ImportStateVerifyIdentifierAttribute to choose different attribute", identifierAttribute) + } + // Find the existing resource var oldR *terraform.ResourceState for _, r2 := range oldResources { + if r2.Primary == nil || r2.Type != r.Type || r2.Provider != r.Provider { + continue + } + + r2Identifier, ok := r2.Primary.Attributes[identifierAttribute] + + if !ok { + t.Fatalf("ImportStateVerify: Old resource missing identifier attribute %q, ensure attribute value is properly set or use ImportStateVerifyIdentifierAttribute to choose different attribute", identifierAttribute) + } - if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type && r2.Provider == r.Provider { + if r2Identifier == rIdentifier { oldR = r2 break } @@ -194,7 +231,7 @@ func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest if oldR == nil || oldR.Primary == nil { t.Fatalf( "Failed state verification, resource with ID %s not found", - r.Primary.ID) + rIdentifier) } // don't add empty flatmapped containers, so we can more easily diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_refresh_state.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_refresh_state.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_refresh_state.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_refresh_state.go index 627190a9d19..86073b165d1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_refresh_state.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_refresh_state.go @@ -10,9 +10,10 @@ import ( tfjson "github.com/hashicorp/terraform-json" "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/hashicorp/terraform-plugin-testing/internal/logging" + "github.com/hashicorp/terraform-plugin-testing/internal/plugintest" ) func testStepNewRefreshState(ctx context.Context, t testing.T, wd *plugintest.WorkingDir, step TestStep, providers *providerFactories) error { @@ -66,7 +67,7 @@ func testStepNewRefreshState(ctx context.Context, t testing.T, wd *plugintest.Wo return wd.CreatePlan(ctx) }, wd, providers) if err != nil { - return fmt.Errorf("Error running post-apply plan: %w", err) + return fmt.Errorf("Error running post-refresh plan: %w", err) } var plan *tfjson.Plan @@ -76,7 +77,15 @@ func testStepNewRefreshState(ctx context.Context, t testing.T, wd *plugintest.Wo return err }, wd, providers) if err != nil { - return fmt.Errorf("Error retrieving post-apply plan: %w", err) + return fmt.Errorf("Error retrieving post-refresh plan: %w", err) + } + + // Run post-refresh plan checks + if len(step.RefreshPlanChecks.PostRefresh) > 0 { + err = runPlanChecks(ctx, t, plan, step.RefreshPlanChecks.PostRefresh) + if err != nil { + return fmt.Errorf("Post-refresh plan check(s) failed:\n%w", err) + } } if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_sets.go similarity index 99% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_sets.go index 8f5a731c32e..a304bfc4e3e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_sets.go @@ -10,7 +10,7 @@ import ( "regexp" "strings" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-plugin-testing/terraform" ) const ( diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/teststep_providers.go similarity index 72% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/teststep_providers.go index 9b759bde03a..1e2aa843eff 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/teststep_providers.go @@ -6,24 +6,9 @@ package resource import ( "context" "fmt" - "regexp" "strings" ) -var configProviderBlockRegex = regexp.MustCompile(`provider "?[a-zA-Z0-9_-]+"? {`) - -// configHasProviderBlock returns true if the Config has declared a provider -// configuration block, e.g. provider "examplecloud" {...} -func (s TestStep) configHasProviderBlock(_ context.Context) bool { - return configProviderBlockRegex.MatchString(s.Config) -} - -// configHasTerraformBlock returns true if the Config has declared a terraform -// configuration block, e.g. terraform {...} -func (s TestStep) configHasTerraformBlock(_ context.Context) bool { - return strings.Contains(s.Config, "terraform {") -} - // mergedConfig prepends any necessary terraform configuration blocks to the // TestStep Config. // @@ -31,21 +16,26 @@ func (s TestStep) configHasTerraformBlock(_ context.Context) bool { // TestStep, the terraform configuration block should be included with the // step configuration to prevent errors with providers outside the // registry.terraform.io hostname or outside the hashicorp namespace. -func (s TestStep) mergedConfig(ctx context.Context, testCase TestCase) string { +// This is only necessary when using TestStep.Config. +// +// When TestStep.ConfigDirectory is used, the expectation is that the +// Terraform configuration files will specify a terraform configuration +// block and/or provider blocks as necessary. +func (s TestStep) mergedConfig(ctx context.Context, testCase TestCase, configHasTerraformBlock, configHasProviderBlock bool) string { var config strings.Builder // Prevent issues with existing configurations containing the terraform // configuration block. - if s.configHasTerraformBlock(ctx) { + if configHasTerraformBlock { config.WriteString(s.Config) return config.String() } if testCase.hasProviders(ctx) { - config.WriteString(testCase.providerConfig(ctx, s.configHasProviderBlock(ctx))) + config.WriteString(testCase.providerConfig(ctx, configHasProviderBlock)) } else { - config.WriteString(s.providerConfig(ctx, s.configHasProviderBlock(ctx))) + config.WriteString(s.providerConfig(ctx, configHasProviderBlock)) } config.WriteString(s.Config) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/teststep_validate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/teststep_validate.go new file mode 100644 index 00000000000..d63db4dc89e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/teststep_validate.go @@ -0,0 +1,239 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/internal/logging" + "github.com/hashicorp/terraform-plugin-testing/internal/teststep" +) + +// testStepValidateRequest contains data for the (TestStep).validate() method. +type testStepValidateRequest struct { + // StepConfiguration contains the TestStep configuration derived from + // TestStep.Config or TestStep.ConfigDirectory. + StepConfiguration teststep.Config + + // StepNumber is the index of the TestStep in the TestCase.Steps. + StepNumber int + + // TestCaseHasExternalProviders is enabled if the TestCase has + // ExternalProviders. + TestCaseHasExternalProviders bool + + // TestCaseHasProviders is enabled if the TestCase has set any of + // ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, + // or ProviderFactories. + TestCaseHasProviders bool + + // TestName is the name of the test. + TestName string +} + +// hasExternalProviders returns true if the TestStep has +// ExternalProviders set. +func (s TestStep) hasExternalProviders() bool { + return len(s.ExternalProviders) > 0 +} + +// hasProviders returns true if the TestStep has set any of the +// ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, or +// ProviderFactories fields. It will also return true if ConfigDirectory or +// Config contain terraform configuration which specify a provider block. +func (s TestStep) hasProviders(ctx context.Context, stepIndex int, testName string) (bool, error) { + if len(s.ExternalProviders) > 0 { + return true, nil + } + + if len(s.ProtoV5ProviderFactories) > 0 { + return true, nil + } + + if len(s.ProtoV6ProviderFactories) > 0 { + return true, nil + } + + if len(s.ProviderFactories) > 0 { + return true, nil + } + + configRequest := teststep.PrepareConfigurationRequest{ + Directory: s.ConfigDirectory, + File: s.ConfigFile, + TestStepConfigRequest: config.TestStepConfigRequest{ + StepNumber: stepIndex + 1, + TestName: testName, + }, + }.Exec() + + cfg := teststep.Configuration(configRequest) + + var cfgHasProviders bool + + if cfg != nil { + var err error + + cfgHasProviders, err = cfg.HasProviderBlock(ctx) + + if err != nil { + return false, err + } + } + + if cfgHasProviders { + return true, nil + } + + return false, nil +} + +// validate ensures the TestStep is valid based on the following criteria: +// +// - Config or ImportState or RefreshState is set. +// - Config and RefreshState are not both set. +// - RefreshState and Destroy are not both set. +// - RefreshState is not the first TestStep. +// - Providers are not specified (ExternalProviders, +// ProtoV5ProviderFactories, ProtoV6ProviderFactories, ProviderFactories) +// if specified at the TestCase level. +// - Providers are specified (ExternalProviders, ProtoV5ProviderFactories, +// ProtoV6ProviderFactories, ProviderFactories) if not specified at the +// TestCase level. +// - No overlapping ExternalProviders and ProviderFactories entries +// - ResourceName is not empty when ImportState is true, ImportStateIdFunc +// is not set, and ImportStateId is not set. +// - ConfigPlanChecks (PreApply, PostApplyPreRefresh, PostApplyPostRefresh) are only set when Config is set. +// - ConfigPlanChecks.PreApply are only set when PlanOnly is false. +// - RefreshPlanChecks (PostRefresh) are only set when RefreshState is set. +func (s TestStep) validate(ctx context.Context, req testStepValidateRequest) error { + ctx = logging.TestStepNumberContext(ctx, req.StepNumber) + + logging.HelperResourceTrace(ctx, "Validating TestStep") + + if req.StepConfiguration == nil && !s.ImportState && !s.RefreshState { + err := fmt.Errorf("TestStep missing Config or ConfigDirectory or ConfigFile or ImportState or RefreshState") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if req.StepConfiguration != nil && s.RefreshState { + err := fmt.Errorf("TestStep cannot have Config or ConfigDirectory or ConfigFile and RefreshState") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.RefreshState && s.Destroy { + err := fmt.Errorf("TestStep cannot have RefreshState and Destroy") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.RefreshState && req.StepNumber == 1 { + err := fmt.Errorf("TestStep cannot have RefreshState as first step") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.ImportState && s.RefreshState { + err := fmt.Errorf("TestStep cannot have ImportState and RefreshState in same step") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + for name := range s.ExternalProviders { + if _, ok := s.ProviderFactories[name]; ok { + err := fmt.Errorf("TestStep provider %q set in both ExternalProviders and ProviderFactories", name) + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + if req.TestCaseHasExternalProviders && req.StepConfiguration != nil && req.StepConfiguration.HasConfigurationFiles() { + err := fmt.Errorf("Providers must only be specified within the terraform configuration files when using TestStep.Config") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.hasExternalProviders() && req.StepConfiguration != nil && req.StepConfiguration.HasConfigurationFiles() { + err := fmt.Errorf("Providers must only be specified within the terraform configuration files when using TestStep.Config") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + // We need a 0-based step index for consistency + hasProviders, err := s.hasProviders(ctx, req.StepNumber-1, req.TestName) + + if err != nil { + logging.HelperResourceError(ctx, "TestStep error checking for providers", map[string]interface{}{logging.KeyError: err}) + return err + } + + if req.TestCaseHasProviders && hasProviders { + err := fmt.Errorf("Providers must only be specified either at the TestCase or TestStep level") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + var cfgHasProviderBlock bool + + if req.StepConfiguration != nil { + cfgHasProviderBlock, err = req.StepConfiguration.HasProviderBlock(ctx) + + if err != nil { + logging.HelperResourceError(ctx, "TestStep error checking for if configuration has provider block", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + if !req.TestCaseHasProviders && !hasProviders && !cfgHasProviderBlock { + err := fmt.Errorf("Providers must be specified at the TestCase level, or in all TestStep, or in TestStep.ConfigDirectory or TestStep.ConfigFile") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.ImportState { + if s.ImportStateId == "" && s.ImportStateIdFunc == nil && s.ResourceName == "" { + err := fmt.Errorf("TestStep ImportState must be specified with ImportStateId, ImportStateIdFunc, or ResourceName") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + if len(s.ConfigPlanChecks.PreApply) > 0 { + if req.StepConfiguration == nil { + err := fmt.Errorf("TestStep ConfigPlanChecks.PreApply must only be specified with Config, ConfigDirectory or ConfigFile") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.PlanOnly { + err := fmt.Errorf("TestStep ConfigPlanChecks.PreApply cannot be run with PlanOnly") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + if len(s.ConfigPlanChecks.PostApplyPreRefresh) > 0 && req.StepConfiguration == nil { + err := fmt.Errorf("TestStep ConfigPlanChecks.PostApplyPreRefresh must only be specified with Config, ConfigDirectory or ConfigFile") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if len(s.ConfigPlanChecks.PostApplyPostRefresh) > 0 && req.StepConfiguration == nil { + err := fmt.Errorf("TestStep ConfigPlanChecks.PostApplyPostRefresh must only be specified with Config, ConfigDirectory or ConfigFile") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if len(s.RefreshPlanChecks.PostRefresh) > 0 && !s.RefreshState { + err := fmt.Errorf("TestStep RefreshPlanChecks.PostRefresh must only be specified with RefreshState") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/tfversion_checks.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/tfversion_checks.go new file mode 100644 index 00000000000..1bec0abd620 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/tfversion_checks.go @@ -0,0 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + + "github.com/hashicorp/go-version" + "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-testing/tfversion" +) + +func runTFVersionChecks(ctx context.Context, t testing.T, terraformVersion *version.Version, terraformVersionChecks []tfversion.TerraformVersionCheck) { + t.Helper() + + for _, tfVersionCheck := range terraformVersionChecks { + resp := tfversion.CheckTerraformVersionResponse{} + tfVersionCheck.CheckTerraformVersion(ctx, tfversion.CheckTerraformVersionRequest{TerraformVersion: terraformVersion}, &resp) + + if resp.Error != nil { + t.Fatalf(resp.Error.Error()) + } + + if resp.Skip != "" { + t.Skip(resp.Skip) + } + } + +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/wait.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/wait.go new file mode 100644 index 00000000000..332791bc911 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/wait.go @@ -0,0 +1,135 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "errors" + "sync" + "time" +) + +// RetryContext is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +// +// Cancellation from the passed in context will propagate through to the +// underlying StateChangeConf +// +// Deprecated: Copy this function to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.RetryContext. +func RetryContext(ctx context.Context, timeout time.Duration, f RetryFunc) error { + // These are used to pull the error out of the function; need a mutex to + // avoid a data race. + var resultErr error + var resultErrMu sync.Mutex + + c := &StateChangeConf{ + Pending: []string{"retryableerror"}, + Target: []string{"success"}, + Timeout: timeout, + MinTimeout: 500 * time.Millisecond, + Refresh: func() (interface{}, string, error) { + rerr := f() + + resultErrMu.Lock() + defer resultErrMu.Unlock() + + if rerr == nil { + resultErr = nil + return 42, "success", nil + } + + resultErr = rerr.Err + + if rerr.Retryable { + return 42, "retryableerror", nil + } + return nil, "quit", rerr.Err + }, + } + + _, waitErr := c.WaitForStateContext(ctx) + + // Need to acquire the lock here to be able to avoid race using resultErr as + // the return value + resultErrMu.Lock() + defer resultErrMu.Unlock() + + // resultErr may be nil because the wait timed out and resultErr was never + // set; this is still an error + if resultErr == nil { + return waitErr + } + // resultErr takes precedence over waitErr if both are set because it is + // more likely to be useful + return resultErr +} + +// Retry is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +// +// Deprecated: Please use RetryContext to ensure proper plugin shutdown +func Retry(timeout time.Duration, f RetryFunc) error { + return RetryContext(context.Background(), timeout, f) +} + +// RetryFunc is the function retried until it succeeds. +// +// Deprecated: Copy this type to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.RetryFunc. +type RetryFunc func() *RetryError + +// RetryError is the required return type of RetryFunc. It forces client code +// to choose whether or not a given error is retryable. +// +// Deprecated: Copy this type to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.RetryError. +type RetryError struct { + Err error + Retryable bool +} + +// Unwrap returns the Err, compatible with errors.Unwrap. +// +// Deprecated: Copy this method to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.RetryError. +func (e *RetryError) Unwrap() error { + return e.Err +} + +// RetryableError is a helper to create a RetryError that's retryable from a +// given error. To prevent logic errors, will return an error when passed a +// nil error. +// +// Deprecated: Copy this function to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.RetryableError. +func RetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + Err: errors.New("empty retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } + } + return &RetryError{Err: err, Retryable: true} +} + +// NonRetryableError is a helper to create a RetryError that's _not_ retryable +// from a given error. To prevent logic errors, will return an error when +// passed a nil error. +// +// Deprecated: Copy this function to the provider codebase or use +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry.NonRetryableError. +func NonRetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + Err: errors.New("empty non-retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } + } + return &RetryError{Err: err, Retryable: false} +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/doc.go new file mode 100644 index 00000000000..0d29d9f4563 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/doc.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package addrs contains types that represent "addresses", which are +// references to specific objects within a Terraform configuration or +// state. +// +// All addresses have string representations based on HCL traversal syntax +// which should be used in the user-interface, and also in-memory +// representations that can be used internally. +// +// For object types that exist within Terraform modules a pair of types is +// used. The "local" part of the address is represented by a type, and then +// an absolute path to that object in the context of its module is represented +// by a type of the same name with an "Abs" prefix added, for "absolute". +// +// All types within this package should be treated as immutable, even if this +// is not enforced by the Go compiler. It is always an implementation error +// to modify an address object in-place after it is initially constructed. +package addrs diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/instance_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/instance_key.go new file mode 100644 index 00000000000..56700fc0572 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/instance_key.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" +) + +// instanceKey represents the key of an instance within an object that +// contains multiple instances due to using "count" or "for_each" arguments +// in configuration. +// +// intKey and stringKey are the two implementations of this type. No other +// implementations are allowed. The single instance of an object that _isn't_ +// using "count" or "for_each" is represented by NoKey, which is a nil +// InstanceKey. +type instanceKey interface { + instanceKeySigil() + String() string +} + +// NoKey represents the absence of an instanceKey, for the single instance +// of a configuration object that does not use "count" or "for_each" at all. +var NoKey instanceKey + +// intKey is the InstanceKey representation representing integer indices, as +// used when the "count" argument is specified or if for_each is used with +// a sequence type. +type intKey int + +func (k intKey) instanceKeySigil() { +} + +func (k intKey) String() string { + return fmt.Sprintf("[%d]", int(k)) +} + +// stringKey is the InstanceKey representation representing string indices, as +// used when the "for_each" argument is specified with a map or object type. +type stringKey string + +func (k stringKey) instanceKeySigil() { +} + +func (k stringKey) String() string { + // FIXME: This isn't _quite_ right because Go's quoted string syntax is + // slightly different than HCL's, but we'll accept it for now. + return fmt.Sprintf("[%q]", string(k)) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/module.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/module.go new file mode 100644 index 00000000000..8dbbb469d48 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/module.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// Module is an address for a module call within configuration. This is +// the static counterpart of ModuleInstance, representing a traversal through +// the static module call tree in configuration and does not take into account +// the potentially-multiple instances of a module that might be created by +// "count" and "for_each" arguments within those calls. +// +// This type should be used only in very specialized cases when working with +// the static module call tree. Type ModuleInstance is appropriate in more cases. +// +// Although Module is a slice, it should be treated as immutable after creation. +type Module []string diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/module_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/module_instance.go new file mode 100644 index 00000000000..e43fd3e3622 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/addrs/module_instance.go @@ -0,0 +1,242 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/hashicorp/terraform-plugin-testing/internal/tfdiags" +) + +// ModuleInstance is an address for a particular module instance within the +// dynamic module tree. This is an extension of the static traversals +// represented by type Module that deals with the possibility of a single +// module call producing multiple instances via the "count" and "for_each" +// arguments. +// +// Although ModuleInstance is a slice, it should be treated as immutable after +// creation. +type ModuleInstance []ModuleInstanceStep + +func parseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { + mi, remain, diags := parseModuleInstancePrefix(traversal) + if len(remain) != 0 { + if len(remain) == len(traversal) { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid module instance address", + "A module instance address must begin with \"module.\".", + )) + } else { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid module instance address", + "The module instance address is followed by additional invalid content.", + )) + } + } + return mi, diags +} + +// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + for _, err := range parseDiags.Errs() { + // ignore warnings, they don't matter in this case + diags = append(diags, tfdiags.FromError(err)) + } + if parseDiags.HasErrors() { + return nil, diags + } + + addr, addrDiags := parseModuleInstance(traversal) + diags = append(diags, addrDiags...) + return addr, diags +} + +func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { + remain := traversal + var mi ModuleInstance + var diags tfdiags.Diagnostics + + for len(remain) > 0 { + var next string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + next = tt.Name + case hcl.TraverseAttr: + next = tt.Name + default: + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Module address prefix must be followed by dot and then a name.", + )) + } + + if next != "module" { + break + } + + remain = remain[1:] + // If we have the prefix "module" then we should be followed by an + // module call name, as an attribute, and then optionally an index step + // giving the instance key. + if len(remain) == 0 { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Prefix \"module.\" must be followed by a module name.", + )) + break + } + + var moduleName string + switch tt := remain[0].(type) { + case hcl.TraverseAttr: + moduleName = tt.Name + default: + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Prefix \"module.\" must be followed by a module name.", + )) + } + remain = remain[1:] + step := ModuleInstanceStep{ + Name: moduleName, + } + + if len(remain) > 0 { + if idx, ok := remain[0].(hcl.TraverseIndex); ok { + remain = remain[1:] + + switch idx.Key.Type() { + case cty.String: + step.InstanceKey = stringKey(idx.Key.AsString()) + case cty.Number: + var idxInt int + err := gocty.FromCtyValue(idx.Key, &idxInt) + if err == nil { + step.InstanceKey = intKey(idxInt) + } else { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + fmt.Sprintf("Invalid module index: %s.", err), + )) + } + default: + // Should never happen, because no other types are allowed in traversal indices. + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Invalid module key: must be either a string or an integer.", + )) + } + } + } + + mi = append(mi, step) + } + + var retRemain hcl.Traversal + if len(remain) > 0 { + retRemain = make(hcl.Traversal, len(remain)) + copy(retRemain, remain) + // The first element here might be either a TraverseRoot or a + // TraverseAttr, depending on whether we had a module address on the + // front. To make life easier for callers, we'll normalize to always + // start with a TraverseRoot. + if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { + retRemain[0] = hcl.TraverseRoot{ + Name: tt.Name, + SrcRange: tt.SrcRange, + } + } + } + + return mi, retRemain, diags +} + +// UnkeyedInstanceShim is a shim method for converting a Module address to the +// equivalent ModuleInstance address that assumes that no modules have +// keyed instances. +// +// This is a temporary allowance for the fact that Terraform does not presently +// support "count" and "for_each" on modules, and thus graph building code that +// derives graph nodes from configuration must just assume unkeyed modules +// in order to construct the graph. At a later time when "count" and "for_each" +// support is added for modules, all callers of this method will need to be +// reworked to allow for keyed module instances. +func (m Module) UnkeyedInstanceShim() ModuleInstance { + path := make(ModuleInstance, len(m)) + for i, name := range m { + path[i] = ModuleInstanceStep{Name: name} + } + return path +} + +// ModuleInstanceStep is a single traversal step through the dynamic module +// tree. It is used only as part of ModuleInstance. +type ModuleInstanceStep struct { + Name string + InstanceKey instanceKey +} + +// RootModuleInstance is the module instance address representing the root +// module, which is also the zero value of ModuleInstance. +var RootModuleInstance ModuleInstance + +// Child returns the address of a child module instance of the receiver, +// identified by the given name and key. +func (m ModuleInstance) Child(name string, key instanceKey) ModuleInstance { + ret := make(ModuleInstance, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, ModuleInstanceStep{ + Name: name, + InstanceKey: key, + }) +} + +// String returns a string representation of the receiver, in the format used +// within e.g. user-provided resource addresses. +// +// The address of the root module has the empty string as its representation. +func (m ModuleInstance) String() string { + var buf bytes.Buffer + sep := "" + for _, step := range m { + buf.WriteString(sep) + buf.WriteString("module.") + buf.WriteString(step.Name) + if step.InstanceKey != NoKey { + buf.WriteString(step.InstanceKey.String()) + } + sep = "." + } + return buf.String() +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/coerce_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/coerce_value.go new file mode 100644 index 00000000000..d12ff8cced9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/coerce_value.go @@ -0,0 +1,253 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +// CoerceValue attempts to force the given value to conform to the type +// implied by the receiever. +// +// This is useful in situations where a configuration must be derived from +// an already-decoded value. It is always better to decode directly from +// configuration where possible since then source location information is +// still available to produce diagnostics, but in special situations this +// function allows a compatible result to be obtained even if the +// configuration objects are not available. +// +// If the given value cannot be converted to conform to the receiving schema +// then an error is returned describing one of possibly many problems. This +// error may be a cty.PathError indicating a position within the nested +// data structure where the problem applies. +func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) { + var path cty.Path + return b.coerceValue(in, path) +} + +func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + switch { + case in.IsNull(): + return cty.NullVal(b.ImpliedType()), nil + case !in.IsKnown(): + return cty.UnknownVal(b.ImpliedType()), nil + } + + ty := in.Type() + if !ty.IsObjectType() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required") + } + + for name := range ty.AttributeTypes() { + if _, defined := b.Attributes[name]; defined { + continue + } + if _, defined := b.BlockTypes[name]; defined { + continue + } + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name) + } + + attrs := make(map[string]cty.Value) + + for name, attrS := range b.Attributes { + var val cty.Value + switch { + case ty.HasAttribute(name): + val = in.GetAttr(name) + case attrS.Computed || attrS.Optional: + val = cty.NullVal(attrS.Type) + default: + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name) + } + + val, err := attrS.coerceValue(val, append(path, cty.GetAttrStep{Name: name})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + + attrs[name] = val + } + for typeName, blockS := range b.BlockTypes { + switch blockS.Nesting { + + case NestingSingle, NestingGroup: + switch { + case ty.HasAttribute(typeName): + var err error + val := in.GetAttr(typeName) + attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + default: + attrs[typeName] = blockS.EmptyValue() + } + + case NestingList: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.List(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.List(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list") + } + l := coll.LengthInt() + + if l == 0 { + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.ListVal(elems) + default: + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + } + + case NestingSet: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Set(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Set(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set") + } + l := coll.LengthInt() + + if l == 0 { + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.SetVal(elems) + default: + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + } + + case NestingMap: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Map(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Map(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + l := coll.LengthInt() + if l == 0 { + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + continue + } + elems := make(map[string]cty.Value) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + key, val := it.Element() + if key.Type() != cty.String || key.IsNull() || !key.IsKnown() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems[key.AsString()] = val + } + } + + // If the attribute values here contain any DynamicPseudoTypes, + // the concrete type must be an object. + useObject := false + switch { + case coll.Type().IsObjectType(): + useObject = true + default: + // It's possible that we were given a map, and need to coerce it to an object + ety := coll.Type().ElementType() + for _, v := range elems { + if !v.Type().Equals(ety) { + useObject = true + break + } + } + } + + if useObject { + attrs[typeName] = cty.ObjectVal(elems) + } else { + attrs[typeName] = cty.MapVal(elems) + } + default: + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + } + + default: + // should never happen because above is exhaustive + panic(fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting)) + } + } + + return cty.ObjectVal(attrs), nil +} + +func (a *Attribute) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + val, err := convert.Convert(in, a.Type) + if err != nil { + return cty.UnknownVal(a.Type), path.NewError(err) + } + return val, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/doc.go new file mode 100644 index 00000000000..d96be9c7f0f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/doc.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package configschema contains types for describing the expected structure +// of a configuration block whose shape is not known until runtime. +// +// For example, this is used to describe the expected contents of a resource +// configuration block, which is defined by the corresponding provider plugin +// and thus not compiled into Terraform core. +// +// A configschema primarily describes the shape of configuration, but it is +// also suitable for use with other structures derived from the configuration, +// such as the cached state of a resource or a resource diff. +// +// This package should not be confused with the package helper/schema, which +// is the higher-level helper library used to implement providers themselves. +package configschema diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/empty_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/empty_value.go new file mode 100644 index 00000000000..cc1107fa0bd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/empty_value.go @@ -0,0 +1,62 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// EmptyValue returns the "empty value" for the receiving block, which for +// a block type is a non-null object where all of the attribute values are +// the empty values of the block's attributes and nested block types. +// +// In other words, it returns the value that would be returned if an empty +// block were decoded against the receiving schema, assuming that no required +// attribute or block constraints were honored. +func (b *Block) EmptyValue() cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range b.Attributes { + vals[name] = attrS.EmptyValue() + } + for name, blockS := range b.BlockTypes { + vals[name] = blockS.EmptyValue() + } + return cty.ObjectVal(vals) +} + +// EmptyValue returns the "empty value" for the receiving attribute, which is +// the value that would be returned if there were no definition of the attribute +// at all, ignoring any required constraint. +func (a *Attribute) EmptyValue() cty.Value { + return cty.NullVal(a.Type) +} + +// EmptyValue returns the "empty value" for when there are zero nested blocks +// present of the receiving type. +func (b *NestedBlock) EmptyValue() cty.Value { + switch b.Nesting { + case NestingSingle: + return cty.NullVal(b.Block.ImpliedType()) + case NestingGroup: + return b.Block.EmptyValue() + case NestingList: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyTupleVal + } else { + return cty.ListValEmpty(ty) + } + case NestingMap: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyObjectVal + } else { + return cty.MapValEmpty(ty) + } + case NestingSet: + return cty.SetValEmpty(b.Block.ImpliedType()) + default: + // Should never get here because the above is intended to be exhaustive, + // but we'll be robust and return a result nonetheless. + return cty.NullVal(cty.DynamicPseudoType) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/implied_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/implied_type.go new file mode 100644 index 00000000000..4de413519f6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/implied_type.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + atys := make(map[string]cty.Type) + + for name, attrS := range b.Attributes { + atys[name] = attrS.Type + } + + for name, blockS := range b.BlockTypes { + if _, exists := atys[name]; exists { + panic("invalid schema, blocks and attributes cannot have the same name") + } + + childType := blockS.Block.ImpliedType() + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + atys[name] = childType + case NestingList: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, which means our type _constraint_ must be + // cty.DynamicPseudoType to allow the tuple type to be decided + // separately for each value. + if childType.HasDynamicTypes() { + atys[name] = cty.DynamicPseudoType + } else { + atys[name] = cty.List(childType) + } + case NestingSet: + if childType.HasDynamicTypes() { + panic("can't use cty.DynamicPseudoType inside a block type with NestingSet") + } + atys[name] = cty.Set(childType) + case NestingMap: + // We prefer to use a map where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use an object + // instead, which means our type _constraint_ must be + // cty.DynamicPseudoType to allow the tuple type to be decided + // separately for each value. + if childType.HasDynamicTypes() { + atys[name] = cty.DynamicPseudoType + } else { + atys[name] = cty.Map(childType) + } + default: + panic("invalid nesting type") + } + } + + return cty.Object(atys) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/nestingmode_string.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/nestingmode_string.go new file mode 100644 index 00000000000..febe743e11a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/nestingmode_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. + +package configschema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[nestingModeInvalid-0] + _ = x[NestingSingle-1] + _ = x[NestingGroup-2] + _ = x[NestingList-3] + _ = x[NestingSet-4] + _ = x[NestingMap-5] +} + +const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap" + +var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74} + +func (i NestingMode) String() string { + if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { + return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/schema.go new file mode 100644 index 00000000000..fafe3fa91ce --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema/schema.go @@ -0,0 +1,161 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// StringKind represents the format a string is in. +type StringKind int + +const ( + // StringPlain indicates a string is plain-text and requires no processing for display. + StringPlain StringKind = iota + // StringMarkdown indicates a string is in markdown format and may + // require additional processing to display. + StringMarkdown +) + +// Block represents a configuration block. +// +// "Block" here is a logical grouping construct, though it happens to map +// directly onto the physical block syntax of Terraform's native configuration +// syntax. It may be a more a matter of convention in other syntaxes, such as +// JSON. +// +// When converted to a value, a Block always becomes an instance of an object +// type derived from its defined attributes and nested blocks +type Block struct { + // Attributes describes any attributes that may appear directly inside + // the block. + Attributes map[string]*Attribute + + // BlockTypes describes any nested block types that may appear directly + // inside the block. + BlockTypes map[string]*NestedBlock + + // Description and DescriptionKind contain a user facing description of the block + // and the format of that string. + Description string + DescriptionKind StringKind + + // Deprecated indicates whether the block has been marked as deprecated in the + // provider and usage should be discouraged. + Deprecated bool +} + +// Attribute represents a configuration attribute, within a block. +type Attribute struct { + // Type is a type specification that the attribute's value must conform to. + Type cty.Type + + // Description is an English-language description of the purpose and + // usage of the attribute. A description should be concise and use only + // one or two sentences, leaving full definition to longer-form + // documentation defined elsewhere. + Description string + DescriptionKind StringKind + + // Required, if set to true, specifies that an omitted or null value is + // not permitted. + Required bool + + // Optional, if set to true, specifies that an omitted or null value is + // permitted. This field conflicts with Required. + Optional bool + + // Computed, if set to true, specifies that the value comes from the + // provider rather than from configuration. If combined with Optional, + // then the config may optionally provide an overridden value. + Computed bool + + // Sensitive, if set to true, indicates that an attribute may contain + // sensitive information. + // + // At present nothing is done with this information, but callers are + // encouraged to set it where appropriate so that it may be used in the + // future to help Terraform mask sensitive information. (Terraform + // currently achieves this in a limited sense via other mechanisms.) + Sensitive bool + + // Deprecated indicates whether the attribute has been marked as deprecated in the + // provider and usage should be discouraged. + Deprecated bool +} + +// NestedBlock represents the embedding of one block within another. +type NestedBlock struct { + // Block is the description of the block that's nested. + Block + + // Nesting provides the nesting mode for the child block, which determines + // how many instances of the block are allowed, how many labels it expects, + // and how the resulting data will be converted into a data structure. + Nesting NestingMode + + // MinItems and MaxItems set, for the NestingList and NestingSet nesting + // modes, lower and upper limits on the number of child blocks allowed + // of the given type. If both are left at zero, no limit is applied. + // + // As a special case, both values can be set to 1 for NestingSingle in + // order to indicate that a particular single block is required. + // + // These fields are ignored for other nesting modes and must both be left + // at zero. + MinItems, MaxItems int +} + +// NestingMode is an enumeration of modes for nesting blocks inside other +// blocks. +type NestingMode int + +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=NestingMode +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. + +const ( + nestingModeInvalid NestingMode = iota + + // NestingSingle indicates that only a single instance of a given + // block type is permitted, with no labels, and its content should be + // provided directly as an object value. + NestingSingle + + // NestingGroup is similar to NestingSingle in that it calls for only a + // single instance of a given block type with no labels, but it additionally + // guarantees that its result will never be null, even if the block is + // absent, and instead the nested attributes and blocks will be treated + // as absent in that case. (Any required attributes or blocks within the + // nested block are not enforced unless the block is explicitly present + // in the configuration, so they are all effectively optional when the + // block is not present.) + // + // This is useful for the situation where a remote API has a feature that + // is always enabled but has a group of settings related to that feature + // that themselves have default values. By using NestingGroup instead of + // NestingSingle in that case, generated plans will show the block as + // present even when not present in configuration, thus allowing any + // default values within to be displayed to the user. + NestingGroup + + // NestingList indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a list. + NestingList + + // NestingSet indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a set. + NestingSet + + // NestingMap indicates that multiple blocks of the given type are + // permitted, each with a single label, and that their corresponding + // objects should be provided in a map whose keys are the labels. + // + // It's an error, therefore, to use the same label value on multiple + // blocks. + NestingMap +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/flatmap.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/flatmap.go new file mode 100644 index 00000000000..2bad034de94 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/flatmap.go @@ -0,0 +1,426 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +// FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a map compatible with what would be +// produced by the "flatmap" package. +// +// The type of the given value informs the structure of the resulting map. +// The value must be of an object type or this function will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given value must not have any maps of complex types or the result +// is undefined. +func FlatmapValueFromHCL2(v cty.Value) map[string]string { + if v.IsNull() { + return nil + } + + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", v.Type())) + } + + m := make(map[string]string) + flatmapValueFromHCL2Map(m, "", v) + return m +} + +func flatmapValueFromHCL2Value(m map[string]string, key string, val cty.Value) { + ty := val.Type() + switch { + case ty.IsPrimitiveType() || ty == cty.DynamicPseudoType: + flatmapValueFromHCL2Primitive(m, key, val) + case ty.IsObjectType() || ty.IsMapType(): + flatmapValueFromHCL2Map(m, key+".", val) + case ty.IsTupleType() || ty.IsListType() || ty.IsSetType(): + flatmapValueFromHCL2Seq(m, key+".", val) + default: + panic(fmt.Sprintf("cannot encode %s to flatmap", ty.FriendlyName())) + } +} + +func flatmapValueFromHCL2Primitive(m map[string]string, key string, val cty.Value) { + if !val.IsKnown() { + m[key] = UnknownVariableValue + return + } + if val.IsNull() { + // Omit entirely + return + } + + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + // Should not be possible, since all primitive types can convert to string. + panic(fmt.Sprintf("invalid primitive encoding to flatmap: %s", err)) + } + m[key] = val.AsString() +} + +func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + switch { + case val.Type().IsObjectType(): + // Whole objects can't be unknown in flatmap, so instead we'll + // just write all of the attribute values out as unknown. + for name, aty := range val.Type().AttributeTypes() { + flatmapValueFromHCL2Value(m, prefix+name, cty.UnknownVal(aty)) + } + default: + m[prefix+"%"] = UnknownVariableValue + } + return + } + + valLen := 0 + for it := val.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + flatmapValueFromHCL2Value(m, prefix+name, av) + valLen++ + } + if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed + m[prefix+"%"] = strconv.Itoa(valLen) + } +} + +func flatmapValueFromHCL2Seq(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + m[prefix+"#"] = UnknownVariableValue + return + } + + // For sets this won't actually generate exactly what helper/schema would've + // generated, because we don't have access to the set key function it + // would've used. However, in practice it doesn't actually matter what the + // keys are as long as they are unique, so we'll just generate sequential + // indexes for them as if it were a list. + // + // An important implication of this, however, is that the set ordering will + // not be consistent across mutations and so different keys may be assigned + // to the same value when round-tripping. Since this shim is intended to + // be short-lived and not used for round-tripping, we accept this. + i := 0 + for it := val.ElementIterator(); it.Next(); { + _, av := it.Element() + key := prefix + strconv.Itoa(i) + flatmapValueFromHCL2Value(m, key, av) + i++ + } + m[prefix+"#"] = strconv.Itoa(i) +} + +// HCL2ValueFromFlatmap converts a map compatible with what would be produced +// by the "flatmap" package to a HCL2 (really, the cty dynamic types library +// that HCL2 uses) object type. +// +// The intended result type must be provided in order to guide how the +// map contents are decoded. This must be an object type or this function +// will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given type must not have any maps of complex types or the result +// is undefined. +// +// The result may contain null values if the given map does not contain keys +// for all of the different key paths implied by the given type. +func HCL2ValueFromFlatmap(m map[string]string, ty cty.Type) (cty.Value, error) { + if m == nil { + return cty.NullVal(ty), nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", ty)) + } + + return hcl2ValueFromFlatmapObject(m, "", ty.AttributeTypes()) +} + +func hcl2ValueFromFlatmapValue(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + var val cty.Value + var err error + switch { + case ty.IsPrimitiveType(): + val, err = hcl2ValueFromFlatmapPrimitive(m, key, ty) + case ty.IsObjectType(): + val, err = hcl2ValueFromFlatmapObject(m, key+".", ty.AttributeTypes()) + case ty.IsTupleType(): + val, err = hcl2ValueFromFlatmapTuple(m, key+".", ty.TupleElementTypes()) + case ty.IsMapType(): + val, err = hcl2ValueFromFlatmapMap(m, key+".", ty) + case ty.IsListType(): + val, err = hcl2ValueFromFlatmapList(m, key+".", ty) + case ty.IsSetType(): + val, err = hcl2ValueFromFlatmapSet(m, key+".", ty) + default: + err = fmt.Errorf("cannot decode %s from flatmap", ty.FriendlyName()) + } + + if err != nil { + return cty.DynamicVal, err + } + return val, nil +} + +func hcl2ValueFromFlatmapPrimitive(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + rawVal, exists := m[key] + if !exists { + return cty.NullVal(ty), nil + } + if rawVal == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + var err error + val := cty.StringVal(rawVal) + val, err = convert.Convert(val, ty) + if err != nil { + // This should never happen for _valid_ input, but flatmap data might + // be tampered with by the user and become invalid. + return cty.DynamicVal, fmt.Errorf("invalid value for %q in state: %s", key, err) + } + + return val, nil +} + +func hcl2ValueFromFlatmapObject(m map[string]string, prefix string, atys map[string]cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + for name, aty := range atys { + val, err := hcl2ValueFromFlatmapValue(m, prefix+name, aty) + if err != nil { + return cty.DynamicVal, err + } + vals[name] = val + } + return cty.ObjectVal(vals), nil +} + +func hcl2ValueFromFlatmapTuple(m map[string]string, prefix string, etys []cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(cty.Tuple(etys)), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + if count != len(etys) { + return cty.DynamicVal, fmt.Errorf("wrong number of values for %q in state: got %d, but need %d", prefix, count, len(etys)) + } + + vals = make([]cty.Value, len(etys)) + for i, ety := range etys { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + return cty.TupleVal(vals), nil +} + +func hcl2ValueFromFlatmapMap(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // We actually don't really care about the "count" of a map for our + // purposes here, but we do need to check if it _exists_ in order to + // recognize the difference between null (not set at all) and empty. + if strCount, exists := m[prefix+"%"]; !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + key := fullKey[len(prefix):] + if key == "%" { + // Ignore the "count" key + continue + } + + val, err := hcl2ValueFromFlatmapValue(m, fullKey, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[key] = val + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + return cty.MapVal(vals), nil +} + +func hcl2ValueFromFlatmapList(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + + ety := ty.ElementType() + if count == 0 { + return cty.ListValEmpty(ety), nil + } + + vals = make([]cty.Value, count) + for i := 0; i < count; i++ { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + + return cty.ListVal(vals), nil +} + +func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + strCount, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // Keep track of keys we've seen, se we don't add the same set value + // multiple times. The cty.Set will normally de-duplicate values, but we may + // have unknown values that would not show as equivalent. + seen := map[string]bool{} + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + subKey := fullKey[len(prefix):] + if subKey == "#" { + // Ignore the "count" key + continue + } + key := fullKey + if dot := strings.IndexByte(subKey, '.'); dot != -1 { + key = fullKey[:dot+len(prefix)] + } + + if seen[key] { + continue + } + + seen[key] = true + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals = append(vals, val) + } + + if len(vals) == 0 && strCount == "1" { + // An empty set wouldn't be represented in the flatmap, so this must be + // a single empty object since the count is actually 1. + // Add an appropriately typed null value to the set. + var val cty.Value + switch { + case ety.IsMapType(): + val = cty.MapValEmpty(ety) + case ety.IsListType(): + val = cty.ListValEmpty(ety) + case ety.IsSetType(): + val = cty.SetValEmpty(ety) + case ety.IsObjectType(): + // TODO: cty.ObjectValEmpty + objectMap := map[string]cty.Value{} + for attr, ty := range ety.AttributeTypes() { + objectMap[attr] = cty.NullVal(ty) + } + val = cty.ObjectVal(objectMap) + default: + val = cty.NullVal(ety) + } + vals = append(vals, val) + + } else if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/paths.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/paths.go new file mode 100644 index 00000000000..628a8bf6868 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/paths.go @@ -0,0 +1,279 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" +) + +// RequiresReplace takes a list of flatmapped paths from a +// InstanceDiff.Attributes along with the corresponding cty.Type, and returns +// the list of the cty.Paths that are flagged as causing the resource +// replacement (RequiresNew). +// This will filter out redundant paths, paths that refer to flatmapped indexes +// (e.g. "#", "%"), and will return any changes within a set as the path to the +// set itself. +func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { + var paths []cty.Path + + for _, attr := range attrs { + p, err := requiresReplacePath(attr, ty) + if err != nil { + return nil, err + } + + paths = append(paths, p) + } + + // now trim off any trailing paths that aren't GetAttrSteps, since only an + // attribute itself can require replacement + paths = trimPaths(paths) + + // There may be redundant paths due to set elements or index attributes + // Do some ugly n^2 filtering, but these are always fairly small sets. + for i := 0; i < len(paths)-1; i++ { + for j := i + 1; j < len(paths); j++ { + if reflect.DeepEqual(paths[i], paths[j]) { + // swap the tail and slice it off + paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] + paths = paths[:len(paths)-1] + j-- + } + } + } + + return paths, nil +} + +// trimPaths removes any trailing steps that aren't of type GetAttrSet, since +// only an attribute itself can require replacement +func trimPaths(paths []cty.Path) []cty.Path { + var trimmed []cty.Path + for _, path := range paths { + path = trimPath(path) + if len(path) > 0 { + trimmed = append(trimmed, path) + } + } + return trimmed +} + +func trimPath(path cty.Path) cty.Path { + for len(path) > 0 { + _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) + if isGetAttr { + break + } + path = path[:len(path)-1] + } + return path +} + +// requiresReplacePath takes a key from a flatmap along with the cty.Type +// describing the structure, and returns the cty.Path that would be used to +// reference the nested value in the data structure. +// This is used specifically to record the RequiresReplace attributes from a +// ResourceInstanceDiff. +func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { + if k == "" { + return nil, nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) + } + + path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) + if err != nil { + return path, fmt.Errorf("[%s] %s", k, err) + } + return path, nil +} + +func pathSplit(p string) (string, string) { + parts := strings.SplitN(p, ".", 2) + head := parts[0] + rest := "" + if len(parts) > 1 { + rest = parts[1] + } + return head, rest +} + +func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { + k, rest := pathSplit(key) + + path := cty.Path{cty.GetAttrStep{Name: k}} + + ty, ok := atys[k] + if !ok { + return path, fmt.Errorf("attribute %q not found", k) + } + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + switch { + case ty.IsPrimitiveType(): + err = fmt.Errorf("invalid step %q with type %#v", key, ty) + case ty.IsObjectType(): + path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) + case ty.IsTupleType(): + path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) + case ty.IsMapType(): + path, err = pathFromFlatmapKeyMap(key, ty) + case ty.IsListType(): + path, err = pathFromFlatmapKeyList(key, ty) + case ty.IsSetType(): + path, err = pathFromFlatmapKeySet(key, ty) + default: + err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) + } + + if err != nil { + return path, err + } + + return path, nil +} + +func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if k == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if idx >= len(etys) { + return path, fmt.Errorf("index %s out of range in %#v", key, etys) + } + + if rest == "" { + return path, nil + } + + ty := etys[idx] + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := key, "" + if !ty.ElementType().IsPrimitiveType() { + k, rest = pathSplit(key) + } + + // we don't need to convert the index keys to paths + if k == "%" { + return path, nil + } + + path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if key == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { + // once we hit a set, we can't return consistent paths, so just mark the + // set as a whole changed. + return nil, nil +} + +// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for +// use in generating legacy style diffs. +func FlatmapKeyFromPath(path cty.Path) string { + var parts []string + + for _, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + parts = append(parts, step.Name) + case cty.IndexStep: + switch ty := step.Key.Type(); { + case ty == cty.String: + parts = append(parts, step.Key.AsString()) + case ty == cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + parts = append(parts, strconv.Itoa(int(i))) + } + } + } + + return strings.Join(parts, ".") +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/values.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/values.go new file mode 100644 index 00000000000..191f1bc7530 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/values.go @@ -0,0 +1,233 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + "math/big" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for +// known object values and uses the provided block schema to perform some +// additional normalization to better mimic the shape of value that the old +// HCL1/HIL-based codepaths would've produced. +// +// In particular, it discards the collections that we use to represent nested +// blocks (other than NestingSingle) if they are empty, which better mimics +// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't +// know that an unspecified block _could_ exist. +// +// The given object value must conform to the schema's implied type or this +// function will panic or produce incorrect results. +// +// This is primarily useful for the final transition from new-style values to +// terraform.ResourceConfig before calling to a legacy provider, since +// helper/schema (the old provider SDK) is particularly sensitive to these +// subtle differences within its validation code. +func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { + if v.IsNull() { + return nil + } + if !v.IsKnown() { + panic("ConfigValueFromHCL2Block used with unknown value") + } + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) + } + + atys := v.Type().AttributeTypes() + ret := make(map[string]interface{}) + + for name := range schema.Attributes { + if _, exists := atys[name]; !exists { + continue + } + + av := v.GetAttr(name) + if av.IsNull() { + // Skip nulls altogether, to better mimic how HCL1 would behave + continue + } + ret[name] = ConfigValueFromHCL2(av) + } + + for name, blockS := range schema.BlockTypes { + if _, exists := atys[name]; !exists { + continue + } + bv := v.GetAttr(name) + if !bv.IsKnown() { + ret[name] = UnknownVariableValue + continue + } + if bv.IsNull() { + continue + } + + switch blockS.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) + + case configschema.NestingList, configschema.NestingSet: + l := bv.LengthInt() + if l == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make([]interface{}, 0, l) + for it := bv.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsKnown() { + elems = append(elems, UnknownVariableValue) + continue + } + elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) + } + ret[name] = elems + + case configschema.NestingMap: + if bv.LengthInt() == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make(map[string]interface{}) + for it := bv.ElementIterator(); it.Next(); { + ek, ev := it.Element() + if !ev.IsKnown() { + elems[ek.AsString()] = UnknownVariableValue + continue + } + elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) + } + ret[name] = elems + } + } + + return ret +} + +// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a value type that matches what would've +// been produced from the HCL-based interpolator for an equivalent structure. +// +// This function will transform a cty null value into a Go nil value, which +// isn't a possible outcome of the HCL/HIL-based decoder and so callers may +// need to detect and reject any null values. +func ConfigValueFromHCL2(v cty.Value) interface{} { + if !v.IsKnown() { + return UnknownVariableValue + } + if v.IsNull() { + return nil + } + + switch v.Type() { + case cty.Bool: + return v.True() // like HCL.BOOL + case cty.String: + return v.AsString() // like HCL token.STRING or token.HEREDOC + case cty.Number: + // We can't match HCL _exactly_ here because it distinguishes between + // int and float values, but we'll get as close as we can by using + // an int if the number is exactly representable, and a float if not. + // The conversion to float will force precision to that of a float64, + // which is potentially losing information from the specific number + // given, but no worse than what HCL would've done in its own conversion + // to float. + + f := v.AsBigFloat() + if i, acc := f.Int64(); acc == big.Exact { + // if we're on a 32-bit system and the number is too big for 32-bit + // int then we'll fall through here and use a float64. + const MaxInt = int(^uint(0) >> 1) + const MinInt = -MaxInt - 1 + if i <= int64(MaxInt) && i >= int64(MinInt) { + return int(i) // Like HCL token.NUMBER + } + } + + f64, _ := f.Float64() + return f64 // like HCL token.FLOAT + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]interface{}, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, ConfigValueFromHCL2(ev)) + } + return l + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]interface{}) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + cv := ConfigValueFromHCL2(ev) + if cv != nil { + l[ek.AsString()] = cv + } + } + return l + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to config value", v)) +} + +// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes +// a value as would be returned from the old interpolator and turns it into +// a cty.Value so it can be used within, for example, an HCL2 EvalContext. +func HCL2ValueFromConfigValue(v interface{}) cty.Value { + if v == nil { + return cty.NullVal(cty.DynamicPseudoType) + } + if v == UnknownVariableValue { + return cty.DynamicVal + } + + switch tv := v.(type) { + case bool: + return cty.BoolVal(tv) + case string: + return cty.StringVal(tv) + case int: + return cty.NumberIntVal(int64(tv)) + case float64: + return cty.NumberFloatVal(tv) + case []interface{}: + vals := make([]cty.Value, len(tv)) + for i, ev := range tv { + vals[i] = HCL2ValueFromConfigValue(ev) + } + return cty.TupleVal(vals) + case map[string]interface{}: + vals := map[string]cty.Value{} + for k, ev := range tv { + vals[k] = HCL2ValueFromConfigValue(ev) + } + return cty.ObjectVal(vals) + default: + // HCL/HIL should never generate anything that isn't caught by + // the above, so if we get here something has gone very wrong. + panic(fmt.Errorf("can't convert %#v to cty.Value", v)) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/values_equiv.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/values_equiv.go new file mode 100644 index 00000000000..6b2be2239d3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim/values_equiv.go @@ -0,0 +1,217 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// ValuesSDKEquivalent returns true if both of the given values seem equivalent +// as far as the legacy SDK diffing code would be concerned. +// +// Since SDK diffing is a fuzzy, inexact operation, this function is also +// fuzzy and inexact. It will err on the side of returning false if it +// encounters an ambiguous situation. Ambiguity is most common in the presence +// of sets because in practice it is impossible to exactly correlate +// nonequal-but-equivalent set elements because they have no identity separate +// from their value. +// +// This must be used _only_ for comparing values for equivalence within the +// SDK planning code. It is only meaningful to compare the "prior state" +// provided by Terraform Core with the "planned new state" produced by the +// legacy SDK code via shims. In particular it is not valid to use this +// function with their the config value or the "proposed new state" value +// because they contain only the subset of data that Terraform Core itself is +// able to determine. +func ValuesSDKEquivalent(a, b cty.Value) bool { + if a == cty.NilVal || b == cty.NilVal { + // We don't generally expect nils to appear, but we'll allow them + // for robustness since the data structures produced by legacy SDK code + // can sometimes be non-ideal. + return a == b // equivalent if they are _both_ nil + } + if a.RawEquals(b) { + // Easy case. We use RawEquals because we want two unknowns to be + // considered equal here, whereas "Equals" would return unknown. + return true + } + if !a.IsKnown() || !b.IsKnown() { + // Two unknown values are equivalent regardless of type. A known is + // never equivalent to an unknown. + return a.IsKnown() == b.IsKnown() + } + if aZero, bZero := valuesSDKEquivalentIsNullOrZero(a), valuesSDKEquivalentIsNullOrZero(b); aZero || bZero { + // Two null/zero values are equivalent regardless of type. A non-zero is + // never equivalent to a zero. + return aZero == bZero + } + + // If we get down here then we are guaranteed that both a and b are known, + // non-null values. + + aTy := a.Type() + bTy := b.Type() + switch { + case aTy.IsSetType() && bTy.IsSetType(): + return valuesSDKEquivalentSets(a, b) + case aTy.IsListType() && bTy.IsListType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsTupleType() && bTy.IsTupleType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsMapType() && bTy.IsMapType(): + return valuesSDKEquivalentMappings(a, b) + case aTy.IsObjectType() && bTy.IsObjectType(): + return valuesSDKEquivalentMappings(a, b) + case aTy == cty.Number && bTy == cty.Number: + return valuesSDKEquivalentNumbers(a, b) + default: + // We've now covered all the interesting cases, so anything that falls + // down here cannot be equivalent. + return false + } +} + +// valuesSDKEquivalentIsNullOrZero returns true if the given value is either +// null or is the "zero value" (in the SDK/Go sense) for its type. +func valuesSDKEquivalentIsNullOrZero(v cty.Value) bool { + if v == cty.NilVal { + return true + } + + ty := v.Type() + switch { + case !v.IsKnown(): + return false + case v.IsNull(): + return true + + // After this point, v is always known and non-null + case ty.IsListType() || ty.IsSetType() || ty.IsMapType() || ty.IsObjectType() || ty.IsTupleType(): + return v.LengthInt() == 0 + case ty == cty.String: + return v.RawEquals(cty.StringVal("")) + case ty == cty.Number: + return v.RawEquals(cty.Zero) + case ty == cty.Bool: + return v.RawEquals(cty.False) + default: + // The above is exhaustive, but for robustness we'll consider anything + // else to _not_ be zero unless it is null. + return false + } +} + +// valuesSDKEquivalentSets returns true only if each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa. +// This is a fuzzy operation that prefers to signal non-equivalence if it cannot +// be certain that all elements are accounted for. +func valuesSDKEquivalentSets(a, b cty.Value) bool { + if aLen, bLen := a.LengthInt(), b.LengthInt(); aLen != bLen { + return false + } + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if ValuesSDKEquivalent(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + for _, eq := range aeqs { + if !eq { + return false + } + } + for _, eq := range beqs { + if !eq { + return false + } + } + return true +} + +// valuesSDKEquivalentSequences decides equivalence for two sequence values +// (lists or tuples). +func valuesSDKEquivalentSequences(a, b cty.Value) bool { + as := a.AsValueSlice() + bs := b.AsValueSlice() + if len(as) != len(bs) { + return false + } + + for i := range as { + if !ValuesSDKEquivalent(as[i], bs[i]) { + return false + } + } + return true +} + +// valuesSDKEquivalentMappings decides equivalence for two mapping values +// (maps or objects). +func valuesSDKEquivalentMappings(a, b cty.Value) bool { + as := a.AsValueMap() + bs := b.AsValueMap() + if len(as) != len(bs) { + return false + } + + for k, av := range as { + bv, ok := bs[k] + if !ok { + return false + } + if !ValuesSDKEquivalent(av, bv) { + return false + } + } + return true +} + +// valuesSDKEquivalentNumbers decides equivalence for two number values based +// on the fact that the SDK uses int and float64 representations while +// cty (and thus Terraform Core) uses big.Float, and so we expect to lose +// precision in the round-trip. +// +// This does _not_ attempt to allow for an epsilon difference that may be +// caused by accumulated innacuracy in a float calculation, under the +// expectation that providers generally do not actually do compuations on +// floats and instead just pass string representations of them on verbatim +// to remote APIs. A remote API _itself_ may introduce inaccuracy, but that's +// a problem for the provider itself to deal with, based on its knowledge of +// the remote system, e.g. using DiffSuppressFunc. +func valuesSDKEquivalentNumbers(a, b cty.Value) bool { + if a.RawEquals(b) { + return true // easy + } + + af := a.AsBigFloat() + bf := b.AsBigFloat() + + if af.IsInt() != bf.IsInt() { + return false + } + if af.IsInt() && bf.IsInt() { + return false // a.RawEquals(b) test above is good enough for integers + } + + // The SDK supports only int and float64, so if it's not an integer + // we know that only a float64-level of precision can possibly be + // significant. + af64, _ := af.Float64() + bf64, _ := bf.Float64() + return af64 == bf64 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/errorshim/error_join_shim.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/errorshim/error_join_shim.go new file mode 100644 index 00000000000..b7371af827c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/errorshim/error_join_shim.go @@ -0,0 +1,47 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// TODO: Once Go 1.20 is the minimum supported version delete this package, replace all usages with `errors` package +// - https://github.com/hashicorp/terraform-plugin-testing/issues/99 +package errorshim + +// Copied from -> https://cs.opensource.google/go/go/+/refs/tags/go1.20.2:src/errors/join.go +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + var b []byte + for i, err := range e.errs { + if i > 0 { + b = append(b, '\n') + } + b = append(b, err.Error()...) + } + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/context.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/context.go new file mode 100644 index 00000000000..0fe8002aa7a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/context.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" + helperlogging "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + testing "github.com/mitchellh/go-testing-interface" +) + +// InitContext creates SDK logger contexts when the provider is running in +// "production" (not under acceptance testing). The incoming context will +// already have the root SDK logger and root provider logger setup from +// terraform-plugin-go tf5server RPC handlers. +func InitContext(ctx context.Context) context.Context { + ctx = tfsdklog.NewSubsystem(ctx, SubsystemHelperSchema, + // All calls are through the HelperSchema* helper functions + tfsdklog.WithAdditionalLocationOffset(1), + tfsdklog.WithLevelFromEnv(EnvTfLogSdkHelperSchema), + // Propagate tf_req_id, tf_rpc, etc. fields + tfsdklog.WithRootFields(), + ) + + return ctx +} + +// InitTestContext registers the terraform-plugin-log/tfsdklog test sink, +// configures the standard library log package, and creates SDK logger +// contexts. The incoming context is expected to be devoid of logging setup. +// +// The standard library log package handling is important as provider code +// under test may be using that package or another logging library outside of +// terraform-plugin-log. +func InitTestContext(ctx context.Context, t testing.T) context.Context { + helperlogging.SetOutput(t) + + ctx = tfsdklog.RegisterTestSink(ctx, t) + ctx = tfsdklog.NewRootSDKLogger(ctx, tfsdklog.WithLevelFromEnv(EnvTfLogSdk)) + ctx = tfsdklog.NewSubsystem(ctx, SubsystemHelperResource, + // All calls are through the HelperResource* helper functions + tfsdklog.WithAdditionalLocationOffset(1), + tfsdklog.WithLevelFromEnv(EnvTfLogSdkHelperResource), + ) + ctx = TestNameContext(ctx, t.Name()) + + return ctx +} + +// TestNameContext adds the current test name to loggers. +func TestNameContext(ctx context.Context, testName string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestName, testName) + + return ctx +} + +// TestStepNumberContext adds the current test step number to loggers. +func TestStepNumberContext(ctx context.Context, stepNumber int) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestStepNumber, stepNumber) + + return ctx +} + +// TestTerraformPathContext adds the current test Terraform CLI path to loggers. +func TestTerraformPathContext(ctx context.Context, terraformPath string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestTerraformPath, terraformPath) + + return ctx +} + +// TestWorkingDirectoryContext adds the current test working directory to loggers. +func TestWorkingDirectoryContext(ctx context.Context, workingDirectory string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestWorkingDirectory, workingDirectory) + + return ctx +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/environment_variables.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/environment_variables.go new file mode 100644 index 00000000000..2ffc73eee6c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/environment_variables.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +// Environment variables. +const ( + // EnvTfLogSdk is an environment variable that sets the logging level of + // the root SDK logger, while the provider is under test. In "production" + // usage, this environment variable is handled by terraform-plugin-go. + // + // Terraform CLI's logging must be explicitly turned on before this + // environment varable can be used to reduce the SDK logging levels. It + // cannot be used to show only SDK logging unless all other logging levels + // are turned off. + EnvTfLogSdk = "TF_LOG_SDK" + + // EnvTfLogSdkHelperResource is an environment variable that sets the logging + // level of SDK helper/resource loggers. Infers root SDK logging level, if + // unset. + EnvTfLogSdkHelperResource = "TF_LOG_SDK_HELPER_RESOURCE" + + // EnvTfLogSdkHelperSchema is an environment variable that sets the logging + // level of SDK helper/schema loggers. Infers root SDK logging level, if + // unset. + EnvTfLogSdkHelperSchema = "TF_LOG_SDK_HELPER_SCHEMA" +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/helper_resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/helper_resource.go new file mode 100644 index 00000000000..1b1459f2461 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/helper_resource.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +const ( + // SubsystemHelperResource is the tfsdklog subsystem name for helper/resource. + SubsystemHelperResource = "helper_resource" +) + +// HelperResourceTrace emits a helper/resource subsystem log at TRACE level. +func HelperResourceTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemTrace(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceDebug emits a helper/resource subsystem log at DEBUG level. +func HelperResourceDebug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemDebug(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceWarn emits a helper/resource subsystem log at WARN level. +func HelperResourceWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceError emits a helper/resource subsystem log at ERROR level. +func HelperResourceError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemError(ctx, SubsystemHelperResource, msg, additionalFields...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/helper_schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/helper_schema.go new file mode 100644 index 00000000000..0ecf6bf2e48 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/helper_schema.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +const ( + // SubsystemHelperSchema is the tfsdklog subsystem name for helper/schema. + SubsystemHelperSchema = "helper_schema" +) + +// HelperSchemaDebug emits a helper/schema subsystem log at DEBUG level. +func HelperSchemaDebug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemDebug(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaError emits a helper/schema subsystem log at ERROR level. +func HelperSchemaError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemError(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaTrace emits a helper/schema subsystem log at TRACE level. +func HelperSchemaTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemTrace(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaWarn emits a helper/schema subsystem log at WARN level. +func HelperSchemaWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemHelperSchema, msg, additionalFields...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/keys.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/keys.go new file mode 100644 index 00000000000..983fde437a2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/logging/keys.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +// Structured logging keys. +// +// Practitioners or tooling reading logs may be depending on these keys, so be +// conscious of that when changing them. +// +// Refer to the terraform-plugin-go logging keys as well, which should be +// equivalent to these when possible. +const ( + // Attribute path representation, which is typically in flatmap form such + // as parent.0.child in this project. + KeyAttributePath = "tf_attribute_path" + + // The type of data source being operated on, such as "archive_file" + KeyDataSourceType = "tf_data_source_type" + + // Underlying Go error string when logging an error. + KeyError = "error" + + // The full address of the provider, such as + // registry.terraform.io/hashicorp/random + KeyProviderAddress = "tf_provider_addr" + + // The type of resource being operated on, such as "random_pet" + KeyResourceType = "tf_resource_type" + + // The name of the test being executed. + KeyTestName = "test_name" + + // The TestStep number of the test being executed. Starts at 1. + KeyTestStepNumber = "test_step_number" + + // Terraform configuration used during acceptance testing Terraform operations. + KeyTestTerraformConfiguration = "test_terraform_configuration" + + // The Terraform CLI logging level (TF_LOG) used for an acceptance test. + KeyTestTerraformLogLevel = "test_terraform_log_level" + + // The Terraform CLI logging level (TF_LOG_CORE) used for an acceptance test. + KeyTestTerraformLogCoreLevel = "test_terraform_log_core_level" + + // The Terraform CLI logging level (TF_LOG_PROVIDER) used for an acceptance test. + KeyTestTerraformLogProviderLevel = "test_terraform_log_provider_level" + + // The path to the Terraform CLI logging file used for an acceptance test. + // + // This should match where the rest of the acceptance test logs are going + // already, but is provided for troubleshooting in case it does not. + KeyTestTerraformLogPath = "test_terraform_log_path" + + // The path to the Terraform CLI used for an acceptance test. + KeyTestTerraformPath = "test_terraform_path" + + // Terraform plan output generated during a TestStep. + KeyTestTerraformPlan = "test_terraform_plan" + + // The working directory of the acceptance test. + KeyTestWorkingDirectory = "test_working_directory" +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/config.go similarity index 97% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/config.go index d3cb35bcec9..b63a55e4ed5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/config.go @@ -16,7 +16,8 @@ import ( "github.com/hashicorp/hc-install/product" "github.com/hashicorp/hc-install/releases" "github.com/hashicorp/hc-install/src" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + + "github.com/hashicorp/terraform-plugin-testing/internal/logging" ) // Config is used to configure the test helper. In most normal test programs diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/doc.go similarity index 100% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/doc.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/doc.go diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/environment_variables.go similarity index 91% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/environment_variables.go index 6df86f89f8c..361de8f1e0c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/environment_variables.go @@ -108,4 +108,12 @@ const ( // will be installed if a binary is not found at the given path. No version // checks are performed against an existing binary. EnvTfAccTerraformPath = "TF_ACC_TERRAFORM_PATH" + + // EnvTfAccPersistWorkingDir environment variable enables persisting + // the working directory and the files generated during execution of + // TestStep(s). Default is disabled, in which case the working directory + // and the files it contains are deleted at the end of each acceptance + // test. Can be set to any value to persist the working directory and + // its contents, however "1" is conventional. + EnvTfAccPersistWorkingDir = "TF_ACC_PERSIST_WORKING_DIR" ) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/guard.go similarity index 100% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/guard.go diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/helper.go similarity index 90% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/helper.go index f9617887218..3c9772cfc4e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/helper.go @@ -10,8 +10,10 @@ import ( "os" "strings" + "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-exec/tfexec" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + + "github.com/hashicorp/terraform-plugin-testing/internal/logging" ) // AutoInitProviderHelper is the main entrypoint for testing provider plugins @@ -41,6 +43,7 @@ type Helper struct { // for tests that use fixture files. sourceDir string terraformExec string + terraformVer *version.Version // execTempDir is created during DiscoverConfig to store any downloaded // binaries @@ -77,11 +80,23 @@ func InitHelper(ctx context.Context, config *Config) (*Helper, error) { return nil, fmt.Errorf("failed to create temporary directory for test helper: %s", err) } + tf, err := tfexec.NewTerraform(baseDir, config.TerraformExec) + if err != nil { + return nil, fmt.Errorf("unable to create terraform-exec instance: %w", err) + } + + tfVersion, _, err := tf.Version(ctx, false) + + if err != nil { + return nil, fmt.Errorf("error calling terraform version command: %w", err) + } + return &Helper{ baseDir: baseDir, sourceDir: config.SourceDir, terraformExec: config.TerraformExec, execTempDir: config.execTempDir, + terraformVer: tfVersion, }, nil } @@ -91,12 +106,17 @@ func InitHelper(ctx context.Context, config *Config) (*Helper, error) { // Call this before returning from TestMain to minimize the amount of detritus // left behind in the filesystem after the tests complete. func (h *Helper) Close() error { + if os.Getenv(EnvTfAccPersistWorkingDir) != "" { + return nil + } + if h.execTempDir != "" { err := os.RemoveAll(h.execTempDir) if err != nil { return err } } + return os.RemoveAll(h.baseDir) } @@ -106,8 +126,15 @@ func (h *Helper) Close() error { // If the working directory object is not itself closed by the time the test // program exits, the Close method on the helper itself will attempt to // delete it. -func (h *Helper) NewWorkingDir(ctx context.Context, t TestControl) (*WorkingDir, error) { - dir, err := os.MkdirTemp(h.baseDir, "work") +func (h *Helper) NewWorkingDir(ctx context.Context, t TestControl, wd string) (*WorkingDir, error) { + workingDir := h.baseDir + + if wd != "" { + workingDir = wd + h.baseDir = wd + } + + dir, err := os.MkdirTemp(workingDir, "work") if err != nil { return nil, err } @@ -266,10 +293,10 @@ func (h *Helper) NewWorkingDir(ctx context.Context, t TestControl) (*WorkingDir, // RequireNewWorkingDir is a variant of NewWorkingDir that takes a TestControl // object and will immediately fail the running test if the creation of the // working directory fails. -func (h *Helper) RequireNewWorkingDir(ctx context.Context, t TestControl) *WorkingDir { +func (h *Helper) RequireNewWorkingDir(ctx context.Context, t TestControl, workingDir string) *WorkingDir { t.Helper() - wd, err := h.NewWorkingDir(ctx, t) + wd, err := h.NewWorkingDir(ctx, t, workingDir) if err != nil { t := testingT{t} t.Fatalf("failed to create new working directory: %s", err) @@ -288,3 +315,8 @@ func (h *Helper) WorkingDirectory() string { func (h *Helper) TerraformExecPath() string { return h.terraformExec } + +// TerraformVersion returns the Terraform CLI version being used when running tests. +func (h *Helper) TerraformVersion() *version.Version { + return h.terraformVer +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/util.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/util.go new file mode 100644 index 00000000000..be187a01b92 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/util.go @@ -0,0 +1,186 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugintest + +import ( + "fmt" + "io" + "io/fs" + "os" + "path" + "path/filepath" + "strings" + "testing" +) + +func symlinkFile(src string, dest string) error { + err := os.Symlink(src, dest) + + if err != nil { + return fmt.Errorf("unable to symlink %q to %q: %w", src, dest, err) + } + + srcInfo, err := os.Stat(src) + + if err != nil { + return fmt.Errorf("unable to stat %q: %w", src, err) + } + + err = os.Chmod(dest, srcInfo.Mode()) + + if err != nil { + return fmt.Errorf("unable to set %q permissions: %w", dest, err) + } + + return nil +} + +// symlinkDirectoriesOnly finds only the first-level child directories in srcDir +// and symlinks them into destDir. +// Unlike symlinkDir, this is done non-recursively in order to limit the number +// of file descriptors used. +func symlinkDirectoriesOnly(srcDir string, destDir string) error { + srcInfo, err := os.Stat(srcDir) + if err != nil { + return fmt.Errorf("unable to stat source directory %q: %w", srcDir, err) + } + + err = os.MkdirAll(destDir, srcInfo.Mode()) + if err != nil { + return fmt.Errorf("unable to make destination directory %q: %w", destDir, err) + } + + dirEntries, err := os.ReadDir(srcDir) + + if err != nil { + return fmt.Errorf("unable to read source directory %q: %w", srcDir, err) + } + + for _, dirEntry := range dirEntries { + if !dirEntry.IsDir() { + continue + } + + srcPath := filepath.Join(srcDir, dirEntry.Name()) + destPath := filepath.Join(destDir, dirEntry.Name()) + err := symlinkFile(srcPath, destPath) + + if err != nil { + return fmt.Errorf("unable to symlink directory %q to %q: %w", srcPath, destPath, err) + } + } + + return nil +} + +// CopyFile copies a single file from src to dest. +func CopyFile(src, dest string) error { + var srcFileInfo os.FileInfo + + srcFile, err := os.Open(src) + if err != nil { + return fmt.Errorf("unable to open file: %w", err) + } + defer srcFile.Close() + + destFile, err := os.Create(dest) + if err != nil { + return fmt.Errorf("unable to create file: %w", err) + } + defer destFile.Close() + + if _, err = io.Copy(destFile, srcFile); err != nil { + return fmt.Errorf("unable to copy: %w", err) + } + + if srcFileInfo, err = os.Stat(src); err != nil { + return fmt.Errorf("unable to stat: %w", err) + } + + return os.Chmod(dest, srcFileInfo.Mode()) +} + +// CopyDir recursively copies directories and files +// from src to dest. +func CopyDir(src, dest, baseDirName string) error { + srcInfo, err := os.Stat(src) + if err != nil { + return fmt.Errorf("unable to stat: %w", err) + } + + if err = os.MkdirAll(dest, srcInfo.Mode()); err != nil { + return fmt.Errorf("unable to create dir: %w", err) + } + + dirEntries, err := os.ReadDir(src) + if err != nil { + return fmt.Errorf("unable to read dir: %w", err) + } + + for _, dirEntry := range dirEntries { + srcFilepath := path.Join(src, dirEntry.Name()) + destFilepath := path.Join(dest, dirEntry.Name()) + + if !strings.Contains(srcFilepath, baseDirName) { + continue + } + + fi, err := dirEntry.Info() + + if err != nil { + return fmt.Errorf("unable to get dir entry info: %w", err) + } + + if dirEntry.IsDir() || fi.Mode()&fs.ModeSymlink == fs.ModeSymlink { + if err = CopyDir(srcFilepath, destFilepath, baseDirName); err != nil { + return fmt.Errorf("unable to copy directory: %w", err) + } + } else { + if err = CopyFile(srcFilepath, destFilepath); err != nil { + return fmt.Errorf("unable to copy file: %w", err) + } + } + } + + return nil +} + +// TestExpectTFatal provides a wrapper for logic which should call +// (*testing.T).Fatal() or (*testing.T).Fatalf(). +// +// Since we do not want the wrapping test to fail when an expected test error +// occurs, it is required that the testLogic passed in uses +// github.com/mitchellh/go-testing-interface.RuntimeT instead of the real +// *testing.T. +// +// If Fatal() or Fatalf() is not called in the logic, the real (*testing.T).Fatal() will +// be called to fail the test. +func TestExpectTFatal(t *testing.T, testLogic func()) { + t.Helper() + + var recoverIface interface{} + + func() { + defer func() { + recoverIface = recover() + }() + + testLogic() + }() + + if recoverIface == nil { + t.Fatalf("expected t.Fatal(), got none") + } + + recoverStr, ok := recoverIface.(string) + + if !ok { + t.Fatalf("expected string from recover(), got: %v (%T)", recoverIface, recoverIface) + } + + // this string is hardcoded in github.com/mitchellh/go-testing-interface + if !strings.HasPrefix(recoverStr, "testing.T failed, see logs for output") { + t.Fatalf("expected t.Fatal(), got: %s", recoverStr) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/working_dir.go similarity index 88% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/working_dir.go index 05b02844206..a6e58081223 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/working_dir.go @@ -5,7 +5,6 @@ package plugintest import ( "context" - "encoding/json" "fmt" "os" "path/filepath" @@ -13,13 +12,14 @@ import ( "github.com/hashicorp/terraform-exec/tfexec" tfjson "github.com/hashicorp/terraform-json" - "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/internal/logging" + "github.com/hashicorp/terraform-plugin-testing/internal/teststep" ) const ( - ConfigFileName = "terraform_plugin_test.tf" - ConfigFileNameJSON = ConfigFileName + ".json" - PlanFileName = "tfplan" + ConfigFileName = "terraform_plugin_test.tf" + PlanFileName = "tfplan" ) // WorkingDir represents a distinct working directory that can be used for @@ -47,10 +47,19 @@ type WorkingDir struct { reattachInfo tfexec.ReattachInfo } +// BaseDir returns the path to the root of the working directory tree. +func (wd *WorkingDir) BaseDir() string { + return wd.baseDir +} + // Close deletes the directories and files created to represent the receiving // working directory. After this method is called, the working directory object // is invalid and may no longer be used. func (wd *WorkingDir) Close() error { + if os.Getenv(EnvTfAccPersistWorkingDir) != "" { + return nil + } + return os.RemoveAll(wd.baseDir) } @@ -73,29 +82,71 @@ func (wd *WorkingDir) GetHelper() *Helper { // This must be called at least once before any call to Init, Plan, Apply, or // Destroy to establish the configuration. Any previously-set configuration is // discarded and any saved plan is cleared. -func (wd *WorkingDir) SetConfig(ctx context.Context, cfg string) error { +func (wd *WorkingDir) SetConfig(ctx context.Context, cfg teststep.Config, vars config.Variables) error { + // Remove old config and variables files first + d, err := os.Open(wd.baseDir) + + if err != nil { + return err + } + + defer d.Close() + + fi, err := d.Readdir(-1) + + if err != nil { + return err + } + + for _, file := range fi { + if file.Mode().IsRegular() { + if filepath.Ext(file.Name()) == ".tf" || filepath.Ext(file.Name()) == ".json" { + err = os.Remove(filepath.Join(d.Name(), file.Name())) + + if err != nil && !os.IsNotExist(err) { + return err + } + } + } + } + logging.HelperResourceTrace(ctx, "Setting Terraform configuration", map[string]any{logging.KeyTestTerraformConfiguration: cfg}) outFilename := filepath.Join(wd.baseDir, ConfigFileName) - rmFilename := filepath.Join(wd.baseDir, ConfigFileNameJSON) - bCfg := []byte(cfg) - if json.Valid(bCfg) { - outFilename, rmFilename = rmFilename, outFilename + + // This file has to be written otherwise wd.Init() will return an error. + err = os.WriteFile(outFilename, nil, 0700) + + if err != nil { + return err } - if err := os.Remove(rmFilename); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("unable to remove %q: %w", rmFilename, err) + + // wd.configFilename must be set otherwise wd.Init() will return an error. + wd.configFilename = outFilename + + // Write configuration + if cfg != nil { + err = cfg.Write(ctx, wd.baseDir) + + if err != nil { + return err + } } - err := os.WriteFile(outFilename, bCfg, 0700) + + //Write configuration variables + err = vars.Write(wd.baseDir) + if err != nil { return err } - wd.configFilename = outFilename // Changing configuration invalidates any saved plan. err = wd.ClearPlan(ctx) + if err != nil { return err } + return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/config.go new file mode 100644 index 00000000000..6bd94d3a372 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/config.go @@ -0,0 +1,241 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package teststep + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-testing/config" +) + +const ( + rawConfigFileName = "terraform_plugin_test.tf" + rawConfigFileNameJSON = rawConfigFileName + ".json" +) + +var ( + providerConfigBlockRegex = regexp.MustCompile(`provider "?[a-zA-Z0-9_-]+"? {`) + terraformConfigBlockRegex = regexp.MustCompile(`terraform {`) +) + +// Config defines an interface implemented by all types +// that represent Terraform configuration: +// +// - [config.configurationDirectory] +// - [config.configurationFile] +// - [config.configurationString] +type Config interface { + HasConfigurationFiles() bool + HasProviderBlock(context.Context) (bool, error) + HasTerraformBlock(context.Context) (bool, error) + Write(context.Context, string) error +} + +// PrepareConfigurationRequest is used to simplify the generation of +// a ConfigurationRequest which is required when calling the +// Configuration func. +type PrepareConfigurationRequest struct { + Directory config.TestStepConfigFunc + File config.TestStepConfigFunc + Raw string + TestStepConfigRequest config.TestStepConfigRequest +} + +// Exec returns a Configuration request which is required when +// calling the Configuration func. +func (p PrepareConfigurationRequest) Exec() ConfigurationRequest { + directory := Pointer(p.Directory.Exec(p.TestStepConfigRequest)) + file := Pointer(p.File.Exec(p.TestStepConfigRequest)) + raw := Pointer(p.Raw) + + return ConfigurationRequest{ + Directory: directory, + File: file, + Raw: raw, + } +} + +// ConfigurationRequest is used by the Configuration func to determine +// the underlying type to instantiate. +type ConfigurationRequest struct { + Directory *string + File *string + Raw *string +} + +// Validate ensures that only one of Directory, File or Raw are non-empty. +func (c ConfigurationRequest) Validate() error { + var configSet []string + + if c.Directory != nil && *c.Directory != "" { + configSet = append(configSet, "directory") + } + + if c.File != nil && *c.File != "" { + configSet = append(configSet, "file") + } + + if c.Raw != nil && *c.Raw != "" { + configSet = append(configSet, "raw") + } + + if len(configSet) > 1 { + configSetStr := strings.Join(configSet, `, `) + + i := strings.LastIndex(configSetStr, ", ") + + if i != -1 { + configSetStr = configSetStr[:i] + " and " + configSetStr[i+len(", "):] + } + + return fmt.Errorf(`%s are populated, only one of "directory", "file", or "raw" is allowed`, configSetStr) + } + + return nil +} + +// Configuration uses the supplied ConfigurationRequest to determine +// which of the types that implement Config to instantiate. If none +// of the fields in ConfigurationRequest are populated nil is returned. +func Configuration(req ConfigurationRequest) Config { + if req.Directory != nil && *req.Directory != "" { + return configurationDirectory{ + directory: *req.Directory, + } + } + + if req.File != nil && *req.File != "" { + return configurationFile{ + file: *req.File, + } + } + + if req.Raw != nil && *req.Raw != "" { + return configurationString{ + raw: *req.Raw, + } + } + + return nil +} + +// copyFiles accepts a path to a directory and a destination. Only +// files in the path directory are copied, any nested directories +// are ignored. +func copyFiles(path string, dstPath string) error { + infos, err := os.ReadDir(path) + + if err != nil { + return err + } + + for _, info := range infos { + srcPath := filepath.Join(path, info.Name()) + + if info.IsDir() { + continue + } else { + err = copyFile(srcPath, dstPath) + + if err != nil { + return err + } + } + + } + return nil +} + +// copyFile accepts a path to a file and a destination, +// copying the file from path to destination. +func copyFile(path string, dstPath string) error { + srcF, err := os.Open(path) + + if err != nil { + return err + } + + defer srcF.Close() + + di, err := os.Stat(dstPath) + + if err != nil { + return err + } + + if di.IsDir() { + _, file := filepath.Split(path) + dstPath = filepath.Join(dstPath, file) + } + + dstF, err := os.Create(dstPath) + + if err != nil { + return err + } + + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + return nil +} + +// filesContains accepts a string representing a directory and a +// regular expression. For each file that is found within the +// directory fileContains func is called. Any nested directories +// within the directory specified by dir are ignored. +func filesContains(dir string, find *regexp.Regexp) (bool, error) { + dirEntries, err := os.ReadDir(dir) + + if err != nil { + return false, err + } + + for _, dirEntry := range dirEntries { + if dirEntry.IsDir() { + continue + } + + path := filepath.Join(dir, dirEntry.Name()) + + contains, err := fileContains(path, find) + + if err != nil { + return false, err + } + + if contains { + return true, nil + } + } + + return false, nil +} + +// fileContains accepts a path and a regular expression. The +// file is read and the supplied regular expression is used +// to determine whether the file contains the specified string. +func fileContains(path string, find *regexp.Regexp) (bool, error) { + f, err := os.ReadFile(path) + + if err != nil { + return false, err + } + + return find.MatchString(string(f)), nil +} + +// Pointer returns a pointer to any type. +func Pointer[T any](in T) *T { + return &in +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/directory.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/directory.go new file mode 100644 index 00000000000..0126e82aa03 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/directory.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package teststep + +import ( + "context" + "os" + "path/filepath" +) + +var _ Config = configurationDirectory{} + +type configurationDirectory struct { + directory string +} + +// HasConfigurationFiles is used during validation to ensure that +// ExternalProviders are not declared at the TestCase or TestStep +// level when using TestStep.ConfigDirectory. +func (c configurationDirectory) HasConfigurationFiles() bool { + return true +} + +// HasProviderBlock returns true if the Config has declared a provider +// configuration block, e.g. provider "examplecloud" {...} +func (c configurationDirectory) HasProviderBlock(ctx context.Context) (bool, error) { + configDirectory := c.directory + + if !filepath.IsAbs(configDirectory) { + pwd, err := os.Getwd() + + if err != nil { + return false, err + } + + configDirectory = filepath.Join(pwd, configDirectory) + } + + contains, err := filesContains(configDirectory, providerConfigBlockRegex) + + if err != nil { + return false, err + } + + return contains, nil +} + +// HasTerraformBlock returns true if the Config has declared a terraform +// configuration block, e.g. terraform {...} +func (c configurationDirectory) HasTerraformBlock(ctx context.Context) (bool, error) { + configDirectory := c.directory + + if !filepath.IsAbs(configDirectory) { + pwd, err := os.Getwd() + + if err != nil { + return false, err + } + + configDirectory = filepath.Join(pwd, configDirectory) + } + + contains, err := filesContains(configDirectory, terraformConfigBlockRegex) + + if err != nil { + return false, err + } + + return contains, nil +} + +// Write copies all files from directory to destination. +func (c configurationDirectory) Write(ctx context.Context, dest string) error { + configDirectory := c.directory + + if !filepath.IsAbs(configDirectory) { + pwd, err := os.Getwd() + + if err != nil { + return err + } + + configDirectory = filepath.Join(pwd, configDirectory) + } + + err := copyFiles(configDirectory, dest) + + if err != nil { + return err + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/file.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/file.go new file mode 100644 index 00000000000..6de3f0752c6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/file.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package teststep + +import ( + "context" + "os" + "path/filepath" +) + +var _ Config = configurationFile{} + +type configurationFile struct { + file string +} + +// HasConfigurationFiles is used during validation to ensure that +// ExternalProviders are not declared at the TestCase or TestStep +// level when using TestStep.ConfigFile. +func (c configurationFile) HasConfigurationFiles() bool { + return true +} + +// HasProviderBlock returns true if the Config has declared a provider +// configuration block, e.g. provider "examplecloud" {...} +func (c configurationFile) HasProviderBlock(ctx context.Context) (bool, error) { + configFile := c.file + + if !filepath.IsAbs(configFile) { + pwd, err := os.Getwd() + + if err != nil { + return false, err + } + + configFile = filepath.Join(pwd, configFile) + } + + contains, err := fileContains(configFile, providerConfigBlockRegex) + + if err != nil { + return false, err + } + + return contains, nil +} + +// HasTerraformBlock returns true if the Config has declared a terraform +// configuration block, e.g. terraform {...} +func (c configurationFile) HasTerraformBlock(ctx context.Context) (bool, error) { + configFile := c.file + + if !filepath.IsAbs(configFile) { + pwd, err := os.Getwd() + + if err != nil { + return false, err + } + + configFile = filepath.Join(pwd, configFile) + } + + contains, err := fileContains(configFile, terraformConfigBlockRegex) + + if err != nil { + return false, err + } + + return contains, nil +} + +// Write copies file from c.file to destination. +func (c configurationFile) Write(ctx context.Context, dest string) error { + configFile := c.file + + if !filepath.IsAbs(configFile) { + pwd, err := os.Getwd() + + if err != nil { + return err + } + + configFile = filepath.Join(pwd, configFile) + } + + err := copyFile(configFile, dest) + + if err != nil { + return err + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/string.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/string.go new file mode 100644 index 00000000000..4143b484d8a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/teststep/string.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package teststep + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" +) + +var _ Config = configurationString{} + +type configurationString struct { + raw string +} + +// HasConfigurationFiles is used during validation to allow declaration +// of ExternalProviders at the TestCase or TestStep level when using +// TestStep.Config. +func (c configurationString) HasConfigurationFiles() bool { + return false +} + +// HasProviderBlock returns true if the Config has declared a provider +// configuration block, e.g. provider "examplecloud" {...} +func (c configurationString) HasProviderBlock(ctx context.Context) (bool, error) { + return providerConfigBlockRegex.MatchString(c.raw), nil +} + +// HasTerraformBlock returns true if the Config has declared a terraform +// configuration block, e.g. terraform {...} +func (c configurationString) HasTerraformBlock(ctx context.Context) (bool, error) { + return terraformConfigBlockRegex.MatchString(c.raw), nil +} + +// Write creates a file and writes c.raw into it. +func (c configurationString) Write(ctx context.Context, dest string) error { + outFilename := filepath.Join(dest, rawConfigFileName) + rmFilename := filepath.Join(dest, rawConfigFileNameJSON) + + bCfg := []byte(c.raw) + + if json.Valid(bCfg) { + outFilename, rmFilename = rmFilename, outFilename + } + + if err := os.Remove(rmFilename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to remove %q: %w", rmFilename, err) + } + + err := os.WriteFile(outFilename, bCfg, 0700) + + if err != nil { + return err + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/config_traversals.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/config_traversals.go new file mode 100644 index 00000000000..6208117cbff --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/config_traversals.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/hashicorp/go-cty/cty" +) + +// FormatCtyPath is a helper function to produce a user-friendly string +// representation of a cty.Path. The result uses a syntax similar to the +// HCL expression language in the hope of it being familiar to users. +func FormatCtyPath(path cty.Path) string { + var buf bytes.Buffer + for _, step := range path { + switch ts := step.(type) { + case cty.GetAttrStep: + fmt.Fprintf(&buf, ".%s", ts.Name) + case cty.IndexStep: + buf.WriteByte('[') + key := ts.Key + keyTy := key.Type() + switch { + case key.IsNull(): + buf.WriteString("null") + case !key.IsKnown(): + buf.WriteString("(not yet known)") + case keyTy == cty.Number: + bf := key.AsBigFloat() + buf.WriteString(bf.Text('g', -1)) + case keyTy == cty.String: + buf.WriteString(strconv.Quote(key.AsString())) + default: + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// FormatError is a helper function to produce a user-friendly string +// representation of certain special error types that we might want to +// include in diagnostic messages. +// +// This currently has special behavior only for cty.PathError, where a +// non-empty path is rendered in a HCL-like syntax as context. +func FormatError(err error) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return err.Error() + } + + return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/contextual.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/contextual.go new file mode 100644 index 00000000000..a9b5c7e83e8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/contextual.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// AttributeValue returns a diagnostic about an attribute value in an implied current +// configuration context. This should be returned only from functions whose +// interface specifies a clear configuration context that this will be +// resolved in. +// +// The given path is relative to the implied configuration context. To describe +// a top-level attribute, it should be a single-element cty.Path with a +// cty.GetAttrStep. It's assumed that the path is returning into a structure +// that would be produced by our conventions in the configschema package; it +// may return unexpected results for structures that can't be represented by +// configschema. +// +// Since mapping attribute paths back onto configuration is an imprecise +// operation (e.g. dynamic block generation may cause the same block to be +// evaluated multiple times) the diagnostic detail should include the attribute +// name and other context required to help the user understand what is being +// referenced in case the identified source range is not unique. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is the value assigned to the +// named attribute, or the containing body's "missing item range" if no +// value is present. +func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { + return &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + attrPath: attrPath, + } +} + +// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains +// one. Normally this is not accessed directly, and instead the config body is +// added to the Diagnostic to create a more complete message for the user. In +// some cases however, we may want to know just the name of the attribute that +// generated the Diagnostic message. +// This returns a nil cty.Path if it does not exist in the Diagnostic. +func GetAttribute(d Diagnostic) cty.Path { + if d, ok := d.(*attributeDiagnostic); ok { + return d.attrPath + } + return nil +} + +type attributeDiagnostic struct { + diagnosticBase + attrPath cty.Path +} + +// WholeContainingBody returns a diagnostic about the body that is an implied +// current configuration context. This should be returned only from +// functions whose interface specifies a clear configuration context that this +// will be resolved in. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is currently the missing item +// range of the body. In future, this may change to some other suitable +// part of the containing body. +func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { + return &wholeBodyDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + } +} + +type wholeBodyDiagnostic struct { + diagnosticBase +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostic.go new file mode 100644 index 00000000000..547271346aa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostic.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +type Diagnostic interface { + Severity() Severity + Description() Description +} + +type Severity rune + +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=Severity +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +type Description struct { + Summary string + Detail string +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostic_base.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostic_base.go new file mode 100644 index 00000000000..505692ce51a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostic_base.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +// diagnosticBase can be embedded in other diagnostic structs to get +// default implementations of Severity and Description. This type also +// has default implementations of Source that return no source +// location or expression-related information, so embedders should generally +// override those method to return more useful results where possible. +type diagnosticBase struct { + severity Severity + summary string + detail string +} + +func (d diagnosticBase) Severity() Severity { + return d.severity +} + +func (d diagnosticBase) Description() Description { + return Description{ + Summary: d.summary, + Detail: d.detail, + } +} + +func Diag(sev Severity, summary, detail string) Diagnostic { + return &diagnosticBase{ + severity: sev, + summary: summary, + detail: detail, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostics.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostics.go new file mode 100644 index 00000000000..4fc99c1bb70 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/diagnostics.go @@ -0,0 +1,196 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "bytes" + "fmt" + "sort" +) + +// Diagnostics is a list of diagnostics. Diagnostics is intended to be used +// where a Go "error" might normally be used, allowing richer information +// to be conveyed (more context, support for warnings). +// +// A nil Diagnostics is a valid, empty diagnostics list, thus allowing +// heap allocation to be avoided in the common case where there are no +// diagnostics to report at all. +type Diagnostics []Diagnostic + +// HasErrors returns true if any of the diagnostics in the list have +// a severity of Error. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity() == Error { + return true + } + } + return false +} + +// Err flattens a diagnostics list into a single Go error, or to nil +// if the diagnostics list does not include any error-level diagnostics. +// +// This can be used to smuggle diagnostics through an API that deals in +// native errors, but unfortunately it will lose naked warnings (warnings +// that aren't accompanied by at least one error) since such APIs have no +// mechanism through which to report these. +// +// return result, diags.Error() +func (diags Diagnostics) Err() error { + if !diags.HasErrors() { + return nil + } + return diagnosticsAsError{diags} +} + +// ErrWithWarnings is similar to Err except that it will also return a non-nil +// error if the receiver contains only warnings. +// +// In the warnings-only situation, the result is guaranteed to be of dynamic +// type NonFatalError, allowing diagnostics-aware callers to type-assert +// and unwrap it, treating it as non-fatal. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) ErrWithWarnings() error { + if len(diags) == 0 { + return nil + } + if diags.HasErrors() { + return diags.Err() + } + return NonFatalError{diags} +} + +// NonFatalErr is similar to Err except that it always returns either nil +// (if there are no diagnostics at all) or NonFatalError. +// +// This allows diagnostics to be returned over an error return channel while +// being explicit that the diagnostics should not halt processing. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) NonFatalErr() error { + if len(diags) == 0 { + return nil + } + return NonFatalError{diags} +} + +type diagnosticsAsError struct { + Diagnostics +} + +func (dae diagnosticsAsError) Error() string { + diags := dae.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + for _, diag := range dae.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped +// diagnostics object can be picked apart by errwrap-aware code. +func (dae diagnosticsAsError) WrappedErrors() []error { + var errs []error + for _, diag := range dae.Diagnostics { + if wrapper, isErr := diag.(nativeError); isErr { + errs = append(errs, wrapper.err) + } + } + return errs +} + +// NonFatalError is a special error type, returned by +// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, +// that indicates that the wrapped diagnostics should be treated as non-fatal. +// Callers can conditionally type-assert an error to this type in order to +// detect the non-fatal scenario and handle it in a different way. +type NonFatalError struct { + Diagnostics +} + +func (woe NonFatalError) Error() string { + diags := woe.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors or warnings" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + if diags.HasErrors() { + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + } else { + fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) + } + for _, diag := range woe.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// sortDiagnostics is an implementation of sort.Interface +type sortDiagnostics []Diagnostic + +var _ sort.Interface = sortDiagnostics(nil) + +func (sd sortDiagnostics) Len() int { + return len(sd) +} + +func (sd sortDiagnostics) Less(i, j int) bool { + iD, jD := sd[i], sd[j] + iSev, jSev := iD.Severity(), jD.Severity() + + switch { + case iSev != jSev: + return iSev == Warning + default: + // The remaining properties do not have a defined ordering, so + // we'll leave it unspecified. Since we use sort.Stable in + // the caller of this, the ordering of remaining items will + // be preserved. + return false + } +} + +func (sd sortDiagnostics) Swap(i, j int) { + sd[i], sd[j] = sd[j], sd[i] +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/doc.go new file mode 100644 index 00000000000..23be0a8bece --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/doc.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tfdiags is a utility package for representing errors and +// warnings in a manner that allows us to produce good messages for the +// user. +// +// "diag" is short for "diagnostics", and is meant as a general word for +// feedback to a user about potential or actual problems. +// +// A design goal for this package is for it to be able to provide rich +// messaging where possible but to also be pragmatic about dealing with +// generic errors produced by system components that _can't_ provide +// such rich messaging. As a consequence, the main types in this package -- +// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" +// over an error channel and then be unpacked at the other end, so that +// error diagnostics (at least) can transit through APIs that are not +// aware of this package. +package tfdiags diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/error.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/error.go new file mode 100644 index 00000000000..f7c9c65d382 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/error.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +// nativeError is a Diagnostic implementation that wraps a normal Go error +type nativeError struct { + err error +} + +var _ Diagnostic = nativeError{} + +func (e nativeError) Severity() Severity { + return Error +} + +func (e nativeError) Description() Description { + return Description{ + Summary: FormatError(e.err), + } +} + +func FromError(err error) Diagnostic { + return &nativeError{ + err: err, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/severity_string.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/severity_string.go new file mode 100644 index 00000000000..78a721068c3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/severity_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=Severity"; DO NOT EDIT. + +package tfdiags + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Error-69] + _ = x[Warning-87] +} + +const ( + _Severity_name_0 = "Error" + _Severity_name_1 = "Warning" +) + +func (i Severity) String() string { + switch { + case i == 69: + return _Severity_name_0 + case i == 87: + return _Severity_name_1 + default: + return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/simple_warning.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/simple_warning.go new file mode 100644 index 00000000000..0c90c478891 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/internal/tfdiags/simple_warning.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +type simpleWarning string + +var _ Diagnostic = simpleWarning("") + +// SimpleWarning constructs a simple (summary-only) warning diagnostic. +func SimpleWarning(msg string) Diagnostic { + return simpleWarning(msg) +} + +func (e simpleWarning) Severity() Severity { + return Warning +} + +func (e simpleWarning) Description() Description { + return Description{ + Summary: string(e), + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/doc.go new file mode 100644 index 00000000000..eceda6faaf1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package plancheck contains the plan check interface, request/response structs, and common plan check implementations. +package plancheck diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_empty_plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_empty_plan.go new file mode 100644 index 00000000000..92d55f46537 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_empty_plan.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plancheck + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-testing/internal/errorshim" +) + +var _ PlanCheck = expectEmptyPlan{} + +type expectEmptyPlan struct{} + +// CheckPlan implements the plan check logic. +func (e expectEmptyPlan) CheckPlan(ctx context.Context, req CheckPlanRequest, resp *CheckPlanResponse) { + var result error + + for _, rc := range req.Plan.ResourceChanges { + if !rc.Change.Actions.NoOp() { + // TODO: Once Go 1.20 is the minimum supported version for this module, replace with `errors.Join` function + // - https://github.com/hashicorp/terraform-plugin-testing/issues/99 + result = errorshim.Join(result, fmt.Errorf("expected empty plan, but %s has planned action(s): %v", rc.Address, rc.Change.Actions)) + } + } + + resp.Error = result +} + +// ExpectEmptyPlan returns a plan check that asserts that there are no resource changes in the plan. +// All resource changes found will be aggregated and returned in a plan check error. +func ExpectEmptyPlan() PlanCheck { + return expectEmptyPlan{} +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_non_empty_plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_non_empty_plan.go new file mode 100644 index 00000000000..74acf034d24 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_non_empty_plan.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plancheck + +import ( + "context" + "errors" +) + +var _ PlanCheck = expectNonEmptyPlan{} + +type expectNonEmptyPlan struct{} + +// CheckPlan implements the plan check logic. +func (e expectNonEmptyPlan) CheckPlan(ctx context.Context, req CheckPlanRequest, resp *CheckPlanResponse) { + for _, rc := range req.Plan.ResourceChanges { + if !rc.Change.Actions.NoOp() { + return + } + } + + resp.Error = errors.New("expected a non-empty plan, but got an empty plan") +} + +// ExpectNonEmptyPlan returns a plan check that asserts there is at least one resource change in the plan. +func ExpectNonEmptyPlan() PlanCheck { + return expectNonEmptyPlan{} +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_resource_action.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_resource_action.go new file mode 100644 index 00000000000..37a2336d2f3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_resource_action.go @@ -0,0 +1,90 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plancheck + +import ( + "context" + "fmt" +) + +var _ PlanCheck = expectResourceAction{} + +type expectResourceAction struct { + resourceAddress string + actionType ResourceActionType +} + +// CheckPlan implements the plan check logic. +func (e expectResourceAction) CheckPlan(ctx context.Context, req CheckPlanRequest, resp *CheckPlanResponse) { + foundResource := false + + for _, rc := range req.Plan.ResourceChanges { + if e.resourceAddress != rc.Address { + continue + } + + switch e.actionType { + case ResourceActionNoop: + if !rc.Change.Actions.NoOp() { + resp.Error = fmt.Errorf("'%s' - expected %s, got action(s): %v", rc.Address, e.actionType, rc.Change.Actions) + return + } + case ResourceActionCreate: + if !rc.Change.Actions.Create() { + resp.Error = fmt.Errorf("'%s' - expected %s, got action(s): %v", rc.Address, e.actionType, rc.Change.Actions) + return + } + case ResourceActionRead: + if !rc.Change.Actions.Read() { + resp.Error = fmt.Errorf("'%s' - expected %s, got action(s): %v", rc.Address, e.actionType, rc.Change.Actions) + return + } + case ResourceActionUpdate: + if !rc.Change.Actions.Update() { + resp.Error = fmt.Errorf("'%s' - expected %s, got action(s): %v", rc.Address, e.actionType, rc.Change.Actions) + return + } + case ResourceActionDestroy: + if !rc.Change.Actions.Delete() { + resp.Error = fmt.Errorf("'%s' - expected %s, got action(s): %v", rc.Address, e.actionType, rc.Change.Actions) + return + } + case ResourceActionDestroyBeforeCreate: + if !rc.Change.Actions.DestroyBeforeCreate() { + resp.Error = fmt.Errorf("'%s' - expected %s, got action(s): %v", rc.Address, e.actionType, rc.Change.Actions) + return + } + case ResourceActionCreateBeforeDestroy: + if !rc.Change.Actions.CreateBeforeDestroy() { + resp.Error = fmt.Errorf("'%s' - expected %s, got action(s): %v", rc.Address, e.actionType, rc.Change.Actions) + return + } + case ResourceActionReplace: + if !rc.Change.Actions.Replace() { + resp.Error = fmt.Errorf("%s - expected %s, got action(s): %v", rc.Address, e.actionType, rc.Change.Actions) + return + } + default: + resp.Error = fmt.Errorf("%s - unexpected ResourceActionType: %s", rc.Address, e.actionType) + return + } + + foundResource = true + break + } + + if !foundResource { + resp.Error = fmt.Errorf("%s - Resource not found in plan ResourceChanges", e.resourceAddress) + return + } +} + +// ExpectResourceAction returns a plan check that asserts that a given resource will have a specific resource change type in the plan. +// Valid actionType are an enum of type plancheck.ResourceActionType, examples: NoOp, DestroyBeforeCreate, Update (in-place), etc. +func ExpectResourceAction(resourceAddress string, actionType ResourceActionType) PlanCheck { + return expectResourceAction{ + resourceAddress: resourceAddress, + actionType: actionType, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_sensitive_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_sensitive_value.go new file mode 100644 index 00000000000..b6c3a51946f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_sensitive_value.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plancheck + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" +) + +var _ PlanCheck = expectSensitiveValue{} + +type expectSensitiveValue struct { + resourceAddress string + attributePath tfjsonpath.Path +} + +// CheckPlan implements the plan check logic. +func (e expectSensitiveValue) CheckPlan(ctx context.Context, req CheckPlanRequest, resp *CheckPlanResponse) { + + for _, rc := range req.Plan.ResourceChanges { + if e.resourceAddress != rc.Address { + continue + } + + result, err := tfjsonpath.Traverse(rc.Change.AfterSensitive, e.attributePath) + if err != nil { + resp.Error = err + return + } + + isSensitive, ok := result.(bool) + if !ok { + resp.Error = fmt.Errorf("invalid path: the path value cannot be asserted as bool") + return + } + + if !isSensitive { + resp.Error = fmt.Errorf("attribute at path is not sensitive") + return + } + + return + } + + resp.Error = fmt.Errorf("%s - Resource not found in plan ResourceChanges", e.resourceAddress) +} + +// ExpectSensitiveValue returns a plan check that asserts that the specified attribute at the given resource has a sensitive value. +// +// Due to implementation differences between the terraform-plugin-sdk and the terraform-plugin-framework, representation of sensitive +// values may differ. For example, terraform-plugin-sdk based providers may have less precise representations of sensitive values, such +// as marking whole maps as sensitive rather than individual element values. +func ExpectSensitiveValue(resourceAddress string, attributePath tfjsonpath.Path) PlanCheck { + return expectSensitiveValue{ + resourceAddress: resourceAddress, + attributePath: attributePath, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_unknown_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_unknown_value.go new file mode 100644 index 00000000000..1569397c21d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_unknown_value.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plancheck + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" +) + +var _ PlanCheck = expectUnknownValue{} + +type expectUnknownValue struct { + resourceAddress string + attributePath tfjsonpath.Path +} + +// CheckPlan implements the plan check logic. +func (e expectUnknownValue) CheckPlan(ctx context.Context, req CheckPlanRequest, resp *CheckPlanResponse) { + + for _, rc := range req.Plan.ResourceChanges { + if e.resourceAddress != rc.Address { + continue + } + + result, err := tfjsonpath.Traverse(rc.Change.AfterUnknown, e.attributePath) + if err != nil { + resp.Error = err + return + } + + isUnknown, ok := result.(bool) + if !ok { + resp.Error = fmt.Errorf("invalid path: the path value cannot be asserted as bool") + return + } + + if !isUnknown { + resp.Error = fmt.Errorf("attribute at path is known") + return + } + + return + } + + resp.Error = fmt.Errorf("%s - Resource not found in plan ResourceChanges", e.resourceAddress) +} + +// ExpectUnknownValue returns a plan check that asserts that the specified attribute at the given resource has an unknown value. +// +// Due to implementation differences between the terraform-plugin-sdk and the terraform-plugin-framework, representation of unknown +// values may differ. For example, terraform-plugin-sdk based providers may have less precise representations of unknown values, such +// as marking whole maps as unknown rather than individual element values. +func ExpectUnknownValue(resourceAddress string, attributePath tfjsonpath.Path) PlanCheck { + return expectUnknownValue{ + resourceAddress: resourceAddress, + attributePath: attributePath, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/plan_check.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/plan_check.go new file mode 100644 index 00000000000..b6ec0d1997a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/plan_check.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plancheck + +import ( + "context" + + tfjson "github.com/hashicorp/terraform-json" +) + +// PlanCheck defines an interface for implementing test logic that checks a plan file and then returns an error +// if the plan file does not match what is expected. +type PlanCheck interface { + // CheckPlan should perform the plan check. + CheckPlan(context.Context, CheckPlanRequest, *CheckPlanResponse) +} + +// CheckPlanRequest is a request for an invoke of the CheckPlan function. +type CheckPlanRequest struct { + // Plan represents a parsed plan file, retrieved via the `terraform show -json` command. + Plan *tfjson.Plan +} + +// CheckPlanResponse is a response to an invoke of the CheckPlan function. +type CheckPlanResponse struct { + // Error is used to report the failure of a plan check assertion and is combined with other PlanCheck errors + // to be reported as a test failure. + Error error +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/resource_action.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/resource_action.go new file mode 100644 index 00000000000..ff4afebcdb0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/resource_action.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plancheck + +// ResourceActionType is a string enum type that routes to a specific terraform-json.Actions function for asserting resource changes. +// - https://pkg.go.dev/github.com/hashicorp/terraform-json#Actions +// +// More information about expected resource behavior can be found at: https://developer.hashicorp.com/terraform/language/resources/behavior +type ResourceActionType string + +const ( + // ResourceActionNoop occurs when a resource is not planned to change (no-op). + // - Routes to: https://pkg.go.dev/github.com/hashicorp/terraform-json#Actions.NoOp + ResourceActionNoop ResourceActionType = "NoOp" + + // ResourceActionCreate occurs when a resource is planned to be created. + // - Routes to: https://pkg.go.dev/github.com/hashicorp/terraform-json#Actions.Create + ResourceActionCreate ResourceActionType = "Create" + + // ResourceActionRead occurs when a data source is planned to be read during the apply stage (data sources are read during plan stage when possible). + // See the data source documentation for more information on this behavior: https://developer.hashicorp.com/terraform/language/data-sources#data-resource-behavior + // - Routes to: https://pkg.go.dev/github.com/hashicorp/terraform-json#Actions.Read + ResourceActionRead ResourceActionType = "Read" + + // ResourceActionUpdate occurs when a resource is planned to be updated in-place. + // - Routes to: https://pkg.go.dev/github.com/hashicorp/terraform-json#Actions.Update + ResourceActionUpdate ResourceActionType = "Update" + + // ResourceActionDestroy occurs when a resource is planned to be deleted. + // - Routes to: https://pkg.go.dev/github.com/hashicorp/terraform-json#Actions.Delete + ResourceActionDestroy ResourceActionType = "Destroy" + + // ResourceActionDestroyBeforeCreate occurs when a resource is planned to be deleted and then re-created. This is the default + // behavior when terraform must change a resource argument that cannot be updated in-place due to remote API limitations. + // - Routes to: https://pkg.go.dev/github.com/hashicorp/terraform-json#Actions.DestroyBeforeCreate + ResourceActionDestroyBeforeCreate ResourceActionType = "DestroyBeforeCreate" + + // ResourceActionCreateBeforeDestroy occurs when a resource is planned to be created and then deleted. This is opt-in behavior that + // is enabled with the [create_before_destroy] meta-argument. + // - Routes to: https://pkg.go.dev/github.com/hashicorp/terraform-json#Actions.CreateBeforeDestroy + // + // [create_before_destroy]: https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#create_before_destroy + ResourceActionCreateBeforeDestroy ResourceActionType = "CreateBeforeDestroy" + + // ResourceActionReplace can be used to verify a resource is planned to be deleted and re-created (where the order of delete and create actions are not important). + // This action matches both ResourceActionDestroyBeforeCreate and ResourceActionCreateBeforeDestroy. + // - Routes to: https://pkg.go.dev/github.com/hashicorp/terraform-json#Actions.Replace + ResourceActionReplace ResourceActionType = "Replace" +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/diff.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/diff.go new file mode 100644 index 00000000000..f70e46e4d04 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/diff.go @@ -0,0 +1,1055 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "fmt" + "log" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim" +) + +// diffChangeType is an enum with the kind of changes a diff has planned. +type diffChangeType byte + +const ( + diffInvalid diffChangeType = iota //nolint:deadcode,varcheck + diffNone + diffCreate + diffUpdate + diffDestroy + diffDestroyCreate +) + +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// InstanceDiff is the diff of a resource from some state to another. +// +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type InstanceDiff struct { + mu sync.Mutex + Attributes map[string]*ResourceAttrDiff + Destroy bool + DestroyDeposed bool + DestroyTainted bool + + RawConfig cty.Value + RawState cty.Value + RawPlan cty.Value + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by Terraform core. It is + // meant to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) Lock() { d.mu.Lock() } + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + +// ApplyToValue merges the receiver into the given base value, returning a +// new value that incorporates the planned changes. The given value must +// conform to the given schema, or this method will panic. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { + // Create an InstanceState attributes from our existing state. + // We can use this to more easily apply the diff changes. + attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + if attrs == nil { + attrs = map[string]string{} + } + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} + + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { + return result, nil + } + + return d.applyBlockDiff(nil, attrs, schema) +} + +func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + result := map[string]string{} + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + // localPrefix is used to build the local result map + localPrefix := "" + if name != "" { + localPrefix = name + "." + } + + // iterate over the schema rather than the attributes, so we can handle + // different block types separately from plain attributes + for n, attrSchema := range schema.Attributes { + var err error + newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[localPrefix+k] = v + } + } + + blockPrefix := strings.Join(path, ".") + if blockPrefix != "" { + blockPrefix += "." + } + for n, block := range schema.BlockTypes { + // we need to find the set of all keys that traverse this block + candidateKeys := map[string]bool{} + blockKey := blockPrefix + n + "." + localBlockPrefix := localPrefix + n + "." + + // we can only trust the diff for sets, since the path changes, so don't + // count existing values as candidate keys. If it turns out we're + // keeping the attributes, we will catch it down below with "keepBlock" + // after we check the set count. + if block.Nesting != configschema.NestingSet { + for k := range attrs { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + } + + for k, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + + if diff.NewRemoved { + continue + } + + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + + // check each set candidate to see if it was removed. + // we need to do this, because when entire sets are removed, they may + // have the wrong key, and ony show diffs going to "" + if block.Nesting == configschema.NestingSet { + for k := range candidateKeys { + indexPrefix := strings.Join(append(path, n, k), ".") + "." + keep := false + // now check each set element to see if it's a new diff, or one + // that we're dropping. Since we're only applying the "New" + // portion of the set, we can ignore diffs that only contain "Old" + for attr, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if !strings.HasPrefix(attr, indexPrefix) { + continue + } + + // check for empty "count" keys + if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { + continue + } + + // removed items don't count either + if diff.NewRemoved { + continue + } + + // this must be a diff to keep + keep = true + break + } + if !keep { + delete(candidateKeys, k) + } + } + } + + for k := range candidateKeys { + newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) + if err != nil { + return result, err + } + + for attr, v := range newAttrs { + result[localBlockPrefix+attr] = v + } + } + + keepBlock := true + // check this block's count diff directly first, since we may not + // have candidates because it was removed and only set to "0" + if diff, ok := d.Attributes[blockKey+"#"]; ok { + if diff.New == "0" || diff.NewRemoved { + keepBlock = false + } + } + + // if there was no diff at all, then we need to keep the block attributes + if len(candidateKeys) == 0 && keepBlock { + for k, v := range attrs { + if strings.HasPrefix(k, blockKey) { + // we need the key relative to this block, so remove the + // entire prefix, then re-insert the block name. + localKey := localBlockPrefix + k[len(blockKey):] + result[localKey] = v + } + } + } + + countAddr := strings.Join(append(path, n, "#"), ".") + if countDiff, ok := d.Attributes[countAddr]; ok { + if countDiff.NewComputed { + result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue + } else { + result[localBlockPrefix+"#"] = countDiff.New + + // While sets are complete, list are not, and we may not have all the + // information to track removals. If the list was truncated, we need to + // remove the extra items from the result. + if block.Nesting == configschema.NestingList && + countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { + length, _ := strconv.Atoi(countDiff.New) + for k := range result { + if !strings.HasPrefix(k, localBlockPrefix) { + continue + } + + index := k[len(localBlockPrefix):] + nextDot := strings.Index(index, ".") + if nextDot < 1 { + continue + } + index = index[:nextDot] + i, err := strconv.Atoi(index) + if err != nil { + // this shouldn't happen since we added these + // ourself, but make note of it just in case. + log.Printf("[ERROR] bad list index in %q: %s", k, err) + continue + } + if i >= length { + delete(result, k) + } + } + } + } + } else if origCount, ok := attrs[countAddr]; ok && keepBlock { + result[localBlockPrefix+"#"] = origCount + } else { + result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + ty := attrSchema.Type + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): + return d.applyCollectionDiff(path, attrs, attrSchema) + case ty.IsSetType(): + return d.applySetDiff(path, attrs, attrSchema) + default: + return d.applySingleAttrDiff(path, attrs, attrSchema) + } +} + +func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + currentKey := strings.Join(path, ".") + + attr := path[len(path)-1] + + result := map[string]string{} + diff := d.Attributes[currentKey] + old, exists := attrs[currentKey] + + if diff != nil && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + // "id" must exist and not be an empty string, or it must be unknown. + // This only applied to top-level "id" fields. + if attr == "id" && len(path) == 1 { + if old == "" { + result[attr] = hcl2shim.UnknownVariableValue + } else { + result[attr] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attr] = old + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + // we only set a missing string here, since bool or number types + // would have distinct zero value which shouldn't have been + // lost. + if attrSchema.Type == cty.String { + result[attr] = "" + } + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != hcl2shim.UnknownVariableValue && + diff.Old != hcl2shim.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + } + + if diff.NewRemoved { + // don't set anything in the new value + return map[string]string{}, nil + } + + if diff.Old == diff.New && diff.New == "" { + // this can only be a valid empty string + if attrSchema.Type == cty.String { + result[attr] = "" + } + return result, nil + } + + if attrSchema.Computed && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + result[attr] = diff.New + + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + prefix := "" + if len(path) > 1 { + prefix = strings.Join(path[:len(path)-1], ".") + "." + } + + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + currentKey := prefix + name + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k[len(prefix):]] = hcl2shim.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k[len(prefix):]] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + noDiff := true + keys := map[string]bool{} + for k := range d.Attributes { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noDiff = false + keys[k] = true + } + + noAttrs := true + for k := range attrs { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noAttrs = false + keys[k] = true + } + + // If there's no diff and no attrs, then there's no value at all. + // This prevents an unexpected zero-count attribute in the attributes. + if noDiff && noAttrs { + return result, nil + } + + idx := "#" + if attrSchema.Type.IsMapType() { + idx = "%" + } + + for k := range keys { + // generate an schema placeholder for the values + elSchema := &configschema.Attribute{ + Type: attrSchema.Type.ElementType(), + } + + res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[name+"."+k] = v + } + } + + // Just like in nested list blocks, for simple lists we may need to fill in + // missing empty strings. + countKey := name + "." + idx + count := result[countKey] + length, _ := strconv.Atoi(count) + + if count != "" && count != hcl2shim.UnknownVariableValue && + attrSchema.Type.Equals(cty.List(cty.String)) { + // insert empty strings into missing indexes + for i := 0; i < length; i++ { + key := fmt.Sprintf("%s.%d", name, i) + if _, ok := result[key]; !ok { + result[key] = "" + } + } + } + + // now check for truncation in any type of list + if attrSchema.Type.IsListType() { + for key := range result { + if key == countKey { + continue + } + + if len(key) <= len(name)+1 { + // not sure what this is, but don't panic + continue + } + + index := key[len(name)+1:] + + // It is possible to have nested sets or maps, so look for another dot + dot := strings.Index(index, ".") + if dot > 0 { + index = index[:dot] + } + + // This shouldn't have any more dots, since the element type is only string. + num, err := strconv.Atoi(index) + if err != nil { + log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) + continue + } + + if num >= length { + delete(result, key) + } + } + } + + // Fill in the count value if it wasn't present in the diff for some reason, + // or if there is no count at all. + _, countDiff := d.Attributes[countKey] + if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { + result[countKey] = countFlatmapContainerValues(countKey, result) + } + + return result, nil +} + +func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + // We only need this special behavior for sets of object. + if !attrSchema.Type.ElementType().IsObjectType() { + // The normal collection apply behavior will work okay for this one, then. + return d.applyCollectionDiff(path, attrs, attrSchema) + } + + // When we're dealing with a set of an object type we actually want to + // use our normal _block type_ apply behaviors, so we'll construct ourselves + // a synthetic schema that treats the object type as a block type and + // then delegate to our block apply method. + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + } + + for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { + // We can safely make everything into an attribute here because in the + // event that there are nested set attributes we'll end up back in + // here again recursively and can then deal with the next level of + // expansion. + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: ty, + Optional: true, + } + } + + parentPath := path[:len(path)-1] + childName := path[len(path)-1] + containerSchema := &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + childName: { + Nesting: configschema.NestingSet, + Block: *synthSchema, + }, + }, + } + + return d.applyBlockDiff(parentPath, attrs, containerSchema) +} + +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + +// ResourceAttrDiff is the diff of a single attribute of a resource. +// +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type ResourceAttrDiff struct { + Old string // Old Value + New string // New Value + NewComputed bool // True if new value is computed (unknown currently) + NewRemoved bool // True if this attribute is being removed + NewExtra interface{} // Extra information for the provider + RequiresNew bool // True if change requires new resource + Sensitive bool // True if the data should not be displayed in UI output + Type diffAttrType +} + +func (d *ResourceAttrDiff) GoString() string { + return fmt.Sprintf("*%#v", *d) +} + +// DiffAttrType is an enum type that says whether a resource attribute +// diff is an input attribute (comes from the configuration) or an +// output attribute (comes as a result of applying the configuration). An +// example input would be "ami" for AWS and an example output would be +// "private_ip". +type diffAttrType byte + +// Deprecated: This function is unintentionally exported by this Go module and +// not supported for external consumption. It will be removed in the next major +// version. +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + +// ChangeType returns the diffChangeType represented by the diff +// for this single instance. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) ChangeType() diffChangeType { + if d.Empty() { + return diffNone + } + + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { + return diffDestroyCreate + } + + if d.GetDestroy() || d.GetDestroyDeposed() { + return diffDestroy + } + + if d.RequiresNew() { + return diffCreate + } + + return diffUpdate +} + +// Empty returns true if this diff encapsulates no changes. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) Empty() bool { + if d == nil { + return true + } + + d.mu.Lock() + defer d.mu.Unlock() + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +// TODO: investigate why removing this unused method causes panic in tests +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +func (d *InstanceDiff) GoString() string { + return fmt.Sprintf("*%#v", InstanceDiff{ + Attributes: d.Attributes, + Destroy: d.Destroy, + DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, + }) +} + +// RequiresNew returns true if the diff requires the creation of a new +// resource (implying the destruction of the old). +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) RequiresNew() bool { + if d == nil { + return false + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + + if d.DestroyTainted { + return true + } + + for _, rd := range d.Attributes { + if rd != nil && rd.RequiresNew { + return true + } + } + + return false +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} + +// Safely copies the Attributes map +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + +// Same checks whether or not two InstanceDiff's are the "same". When +// we say "same", it is not necessarily exactly equal. Instead, it is +// just checking that the same attributes are changing, a destroy +// isn't suddenly happening, etc. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: + return true, "" + } + + d.mu.Lock() + defer d.mu.Unlock() + + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2 := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { + return false, fmt.Sprintf( + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) + } + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { + return false, fmt.Sprintf( + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + } + + // Go through the old diff and make sure the new diff has all the + // same attributes. To start, build up the check map to be all the keys. + checkOld := make(map[string]struct{}) + checkNew := make(map[string]struct{}) + for k := range d.Attributes { + checkOld[k] = struct{}{} + } + for k := range d2.CopyAttributes() { + checkNew[k] = struct{}{} + } + + // Make an ordered list so we are sure the approximated hashes are left + // to process at the end of the loop + keys := make([]string, 0, len(d.Attributes)) + for k := range d.Attributes { + keys = append(keys, k) + } + sort.StringSlice(keys).Sort() + + for _, k := range keys { + diffOld := d.Attributes[k] + + if _, ok := checkOld[k]; !ok { + // We're not checking this key for whatever reason (see where + // check is modified). + continue + } + + // Remove this key since we'll never hit it again + delete(checkOld, k) + delete(checkNew, k) + + _, ok := d2.GetAttribute(k) + if !ok { + // If there's no new attribute, and the old diff expected the attribute + // to be removed, that's just fine. + if diffOld.NewRemoved { + continue + } + + // If the last diff was a computed value then the absence of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + + // No exact match, but maybe this is a set containing computed + // values. So check if there is an approximate hash in the key + // and if so, try to match the key. + if strings.Contains(k, "~") { + parts := strings.Split(k, ".") + parts2 := append([]string(nil), parts...) + + re := regexp.MustCompile(`^~\d+$`) + for i, part := range parts { + if re.MatchString(part) { + // we're going to consider this the base of a + // computed hash, and remove all longer matching fields + ok = true + + parts2[i] = `\d+` + parts2 = parts2[:i+1] + break + } + } + + re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) + if err != nil { + return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) + } + + for k2 := range checkNew { + if re.MatchString(k2) { + delete(checkNew, k2) + } + } + } + + // This is a little tricky, but when a diff contains a computed + // list, set, or map that can only be interpolated after the apply + // command has created the dependent resources, it could turn out + // that the result is actually the same as the existing state which + // would remove the key from the diff. + if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + // Similarly, in a RequiresNew scenario, a list that shows up in the plan + // diff can disappear from the apply diff, which is calculated from an + // empty state. + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + if !ok { + return false, fmt.Sprintf("attribute mismatch: %s", k) + } + } + + // search for the suffix of the base of a [computed] map, list or set. + match := multiVal.FindStringSubmatch(k) + + if diffOld.NewComputed && len(match) == 2 { + matchLen := len(match[1]) + + // This is a computed list, set, or map, so remove any keys with + // this prefix from the check list. + kprefix := k[:len(k)-matchLen] + for k2 := range checkOld { + if strings.HasPrefix(k2, kprefix) { + delete(checkOld, k2) + } + } + for k2 := range checkNew { + if strings.HasPrefix(k2, kprefix) { + delete(checkNew, k2) + } + } + } + + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). + } + + // Check for leftover attributes + if len(checkNew) > 0 { + extras := make([]string, 0, len(checkNew)) + for attr := range checkNew { + extras = append(extras, attr) + } + return false, + fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) + } + + return true, "" +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/instancetype.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/instancetype.go new file mode 100644 index 00000000000..1871445819a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/instancetype.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=instanceType instancetype.go +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. + +// instanceType is an enum of the various types of instances store in the State +type instanceType int + +const ( + typeInvalid instanceType = iota + typePrimary + typeTainted + typeDeposed +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/instancetype_string.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/instancetype_string.go new file mode 100644 index 00000000000..782ef90c05c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/instancetype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=instanceType instancetype.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[typeInvalid-0] + _ = x[typePrimary-1] + _ = x[typeTainted-2] + _ = x[typeDeposed-3] +} + +const _instanceType_name = "typeInvalidtypePrimarytypeTaintedtypeDeposed" + +var _instanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i instanceType) String() string { + if i < 0 || i >= instanceType(len(_instanceType_index)-1) { + return "instanceType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _instanceType_name[_instanceType_index[i]:_instanceType_index[i+1]] +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource.go new file mode 100644 index 00000000000..0541a5fff67 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource.go @@ -0,0 +1,372 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" + + "github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim" +) + +// InstanceInfo is used to hold information about the instance and/or +// resource being modified. +// +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type InstanceInfo struct { + // Id is a unique name to represent this instance. This is not related + // to InstanceState.ID in any way. + Id string + + // ModulePath is the complete path of the module containing this + // instance. + ModulePath []string + + // Type is the resource type of this instance + Type string +} + +// ResourceConfig is a legacy type that was formerly used to represent +// interpolatable configuration blocks. It is now only used to shim to old +// APIs that still use this type, via NewResourceConfigShimmed. +// +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type ResourceConfig struct { + ComputedKeys []string + Raw map[string]interface{} + Config map[string]interface{} +} + +// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly +// the given value. +// +// The given value may contain hcl2shim.UnknownVariableValue to signal that +// something is computed, but it must not contain unprocessed interpolation +// sequences as we might've seen in Terraform v0.11 and prior. +// +// Deprecated: This function is unintentionally exported by this Go module and +// not supported for external consumption. It will be removed in the next major +// version. Use real Terraform configuration instead. +func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { + v := hcl2shim.HCL2ValueFromConfigValue(raw) + + // This is a little weird but we round-trip the value through the hcl2shim + // package here for two reasons: firstly, because that reduces the risk + // of it including something unlike what NewResourceConfigShimmed would + // produce, and secondly because it creates a copy of "raw" just in case + // something is relying on the fact that in the old world the raw and + // config maps were always distinct, and thus you could in principle mutate + // one without affecting the other. (I sure hope nobody was doing that, though!) + //nolint:forcetypeassert + cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) + + return &ResourceConfig{ + Raw: raw, + Config: cfg, + + ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), + } +} + +// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy +// ResourceConfig object, so that it can be passed to older APIs that expect +// this wrapping. +// +// The returned ResourceConfig is already interpolated and cannot be +// re-interpolated. It is, therefore, useful only to functions that expect +// an already-populated ResourceConfig which they then treat as read-only. +// +// If the given value is not of an object type that conforms to the given +// schema then this function will panic. +// +// Deprecated: This function is unintentionally exported by this Go module and +// not supported for external consumption. It will be removed in the next major +// version. +func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { + if !val.Type().IsObjectType() { + panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) + } + ret := &ResourceConfig{} + + legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) + if legacyVal != nil { + ret.Config = legacyVal + + // Now we need to walk through our structure and find any unknown values, + // producing the separate list ComputedKeys to represent these. We use the + // schema here so that we can preserve the expected invariant + // that an attribute is always either wholly known or wholly unknown, while + // a child block can be partially unknown. + ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") + } else { + ret.Config = make(map[string]interface{}) + } + ret.Raw = ret.Config + + return ret +} + +// Record the any config values in ComputedKeys. This field had been unused in +// helper/schema, but in the new protocol we're using this so that the SDK can +// now handle having an unknown collection. The legacy diff code doesn't +// properly handle the unknown, because it can't be expressed in the same way +// between the config and diff. +func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { + var ret []string + ty := val.Type() + + if val.IsNull() { + return ret + } + + if !val.IsKnown() { + // we shouldn't have an entirely unknown resource, but prevent empty + // strings just in case + if len(path) > 0 { + ret = append(ret, path) + } + return ret + } + + if path != "" { + path += "." + } + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) + ret = append(ret, keys...) + } + + case ty.IsMapType(), ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + subK, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) + ret = append(ret, keys...) + } + } + + return ret +} + +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copiedConfig, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result, ok := copiedConfig.(*ResourceConfig) + if !ok { + panic(fmt.Errorf("unexpected type %T for copiedConfig", copiedConfig)) + } + + return result +} + +// Equal checks the equality of two resource configs. +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + +// Get looks up a configuration value by key and returns the value. +// +// The second return value is true if the get was successful. Get will +// return the raw value if the key is computed, so you should pair this +// with IsComputed. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (c *ResourceConfig) Get(k string) (interface{}, bool) { + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw + } + + return c.get(k, source) +} + +// GetRaw looks up a configuration value by key and returns the value, +// from the raw, uninterpolated config. +// +// The second return value is true if the get was successful. Get will +// not succeed if the value is being computed. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { + return c.get(k, c.Raw) +} + +// IsComputed returns whether the given key is computed or not. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (c *ResourceConfig) IsComputed(k string) bool { + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown +} + +func (c *ResourceConfig) get( + k string, raw map[string]interface{}) (interface{}, bool) { + parts := strings.Split(k, ".") + if len(parts) == 1 && parts[0] == "" { + parts = nil + } + + var current interface{} = raw + var previous interface{} = nil + for i, part := range parts { + if current == nil { + return nil, false + } + + cv := reflect.ValueOf(current) + switch cv.Kind() { + case reflect.Map: + previous = current + v := cv.MapIndex(reflect.ValueOf(part)) + if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + + return v.Interface(), true + } + + return nil, false + } + + current = v.Interface() + case reflect.Slice: + previous = current + + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { + return v, true + } + } + + current = cv.Len() + } else { + i, err := strconv.ParseInt(part, 0, 0) + if err != nil { + return nil, false + } + if int(i) < 0 || int(i) >= cv.Len() { + return nil, false + } + current = cv.Index(int(i)).Interface() + } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + v, ok := prevMap[actualKey] + return v, ok + } + + return nil, false + default: + panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) + } + } + + return current, true +} + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +// TODO: investigate why deleting this causes odd runtime test failures +// must be some kind of interface implementation +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == hcl2shim.UnknownVariableValue { + w.Unknown = true + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_address.go new file mode 100644 index 00000000000..8d92fbb5e45 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_address.go @@ -0,0 +1,229 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// resourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type resourceAddress struct { + // Addresses a resource falling somewhere in the module path + // When specified alone, addresses all resources within a module path + Path []string + + // Addresses a specific resource that occurs in a list + Index int + + InstanceType instanceType + InstanceTypeSet bool + Name string + Type string + Mode ResourceMode // significant only if InstanceTypeSet +} + +// String outputs the address that parses into this address. +func (r *resourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + switch r.Mode { + case ManagedResourceMode: + // nothing to do + case DataResourceMode: + result = append(result, "data") + default: + panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + if r.InstanceTypeSet { + switch r.InstanceType { + case typePrimary: + name += ".primary" + case typeDeposed: + name += ".deposed" + case typeTainted: + name += ".tainted" + } + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + +func parseResourceAddress(s string) (*resourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + mode := ManagedResourceMode + if matches["data_prefix"] != "" { + mode = DataResourceMode + } + resourceIndex, err := parseResourceIndex(matches["index"]) + if err != nil { + return nil, err + } + instanceType, err := parseInstanceType(matches["instance_type"]) + if err != nil { + return nil, err + } + path := parseResourcePath(matches["path"]) + + // not allowed to say "data." without a type following + if mode == DataResourceMode && matches["type"] == "" { + return nil, fmt.Errorf( + "invalid resource address %q: must target specific data instance", + s, + ) + } + + return &resourceAddress{ + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], + Mode: mode, + }, nil +} + +// Less returns true if and only if the receiver should be sorted before +// the given address when presenting a list of resource addresses to +// an end-user. +// +// This sort uses lexicographic sorting for most components, but uses +// numeric sort for indices, thus causing index 10 to sort after +// index 9, rather than after index 1. +func (addr *resourceAddress) Less(other *resourceAddress) bool { + + switch { + + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) + + case !reflect.DeepEqual(addr.Path, other.Path): + // If the two paths are the same length but don't match, we'll just + // cheat and compare the string forms since it's easier than + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. + addrStr := addr.String() + otherStr := other.String() + return addrStr < otherStr + + case addr.Mode != other.Mode: + return addr.Mode == DataResourceMode + + case addr.Type != other.Type: + return addr.Type < other.Type + + case addr.Name != other.Name: + return addr.Name < other.Name + + case addr.Index != other.Index: + // Since "Index" is -1 for an un-indexed address, this also conveniently + // sorts unindexed addresses before indexed ones, should they both + // appear for some reason. + return addr.Index < other.Index + + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet + + case addr.InstanceType != other.InstanceType: + // InstanceType is actually an enum, so this is just an arbitrary + // sort based on the enum numeric values, and thus not particularly + // meaningful. + return addr.InstanceType < other.InstanceType + + default: + return false + + } +} + +func parseResourceIndex(s string) (int, error) { + if s == "" { + return -1, nil + } + return strconv.Atoi(s) +} + +func parseResourcePath(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ".") + path := make([]string, 0, len(parts)) + for _, s := range parts { + // Due to the limitations of the regexp match below, the path match has + // some noise in it we have to filter out :| + if s == "" || s == "module" { + continue + } + path = append(path, s) + } + return path +} + +func parseInstanceType(s string) (instanceType, error) { + switch s { + case "", "primary": + return typePrimary, nil + case "deposed": + return typeDeposed, nil + case "tainted": + return typeTainted, nil + default: + return typeInvalid, fmt.Errorf("Unexpected value for instanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "module.foo.module.bar" (optional) + `(?P(?:module\.(?P[^.]+)\.?)*)` + + // possibly "data.", if targeting is a data resource + `(?P(?:data\.)?)` + + // "aws_instance.web" (optional when module path specified) + `(?:(?P[^.]+)\.(?P[^.[]+))?` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("invalid resource address %q", s) + } + + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + + return matches, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_mode.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_mode.go new file mode 100644 index 00000000000..2d7b10bcff7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_mode.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. + +// ResourceMode is deprecated, use addrs.ResourceMode instead. +// It has been preserved for backwards compatibility. +type ResourceMode int + +const ( + ManagedResourceMode ResourceMode = iota + DataResourceMode +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_mode_string.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_mode_string.go new file mode 100644 index 00000000000..ba84346a218 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_mode_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ManagedResourceMode-0] + _ = x[DataResourceMode-1] +} + +const _ResourceMode_name = "ManagedResourceModeDataResourceMode" + +var _ResourceMode_index = [...]uint8{0, 19, 35} + +func (i ResourceMode) String() string { + if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_provider.go new file mode 100644 index 00000000000..6de283544bb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/resource_provider.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +// ResourceType is a type of resource that a resource provider can manage. +// +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// DataSource is a data source that a resource provider implements. +// +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type DataSource struct { + Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/schemas.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/schemas.go new file mode 100644 index 00000000000..1cec2eb6fe8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/schemas.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema" +) + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +// +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type ProviderSchema struct { + Provider *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +// +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/state.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/state.go new file mode 100644 index 00000000000..1e0fcb28577 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/state.go @@ -0,0 +1,1824 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + "github.com/mitchellh/copystructure" + + "github.com/hashicorp/terraform-plugin-testing/internal/addrs" + "github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim" +) + +const ( + // StateVersion is the current version for our state file + stateVersion = 3 +) + +// rootModulePath is the path of the root module +var rootModulePath = []string{"root"} + +// normalizeModulePath transforms a legacy module path (which may or may not +// have a redundant "root" label at the start of it) into an +// addrs.ModuleInstance representing the same module. +// +// For legacy reasons, different parts of Terraform disagree about whether the +// root module has the path []string{} or []string{"root"}, and so this +// function accepts both and trims off the "root". An implication of this is +// that it's not possible to actually have a module call in the root module +// that is itself named "root", since that would be ambiguous. +// +// normalizeModulePath takes a raw module path and returns a path that +// has the rootModulePath prepended to it. If I could go back in time I +// would've never had a rootModulePath (empty path would be root). We can +// still fix this but thats a big refactor that my branch doesn't make sense +// for. Instead, this function normalizes paths. +func normalizeModulePath(p []string) addrs.ModuleInstance { + // FIXME: Remove this once everyone is using addrs.ModuleInstance. + + if len(p) > 0 && p[0] == "root" { + p = p[1:] + } + + ret := make(addrs.ModuleInstance, len(p)) + for i, name := range p { + // For now we don't actually support modules with multiple instances + // identified by keys, so we just treat every path element as a + // step with no key. + ret[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + return ret +} + +// State keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +type State struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + // + // Deprecated: This field is unintentionally exported by this Go module and + // external consumption is not supported. It will be removed in the next + // major version. + Remote *RemoteState `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + // + // Deprecated: This field is unintentionally exported by this Go module and + // external consumption is not supported. It will be removed in the next + // major version. + Backend *BackendState `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*ModuleState `json:"modules"` + + mu sync.Mutex + + // IsBinaryDrivenTest is a special flag that assists with a binary driver + // heuristic, it should not be set externally + // + // Deprecated: This field is unintentionally exported by this Go module and + // external consumption is not supported. It will be removed in the next + // major version. + IsBinaryDrivenTest bool +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) Lock() { s.mu.Lock() } + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) Unlock() { s.mu.Unlock() } + +// NewState is used to initialize a blank state +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func NewState() *State { + s := &State{} + s.init() + return s +} + +// Children returns the ModuleStates that are direct children of +// the given path. If the path is "root", for example, then children +// returned might be "root.child", but not "root.child.grandchild". +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) Children(path []string) []*ModuleState { + s.Lock() + defer s.Unlock() + // TODO: test + + return s.children(path) +} + +func (s *State) children(path []string) []*ModuleState { + result := make([]*ModuleState, 0) + for _, m := range s.Modules { + if m == nil { + continue + } + + if len(m.Path) != len(path)+1 { + continue + } + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + result = append(result, m) + } + + return result +} + +// AddModule adds the module with the given path to the state. +// +// This should be the preferred method to add module states since it +// allows us to optimize lookups later as well as control sorting. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { + s.Lock() + defer s.Unlock() + + return s.addModule(path) +} + +func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { + // check if the module exists first + m := s.moduleByPath(path) + if m != nil { + return m + } + + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + // For the purposes of state, the legacy address format also includes + // a redundant extra prefix element "root". It is important to include + // this because the "prune" method will remove any module that has a + // path length less than one, and other parts of the state code will + // trim off the first element indiscriminately. + legacyPath := make([]string, len(path)+1) + legacyPath[0] = "root" + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("state cannot represent modules with count or for_each keys") + } + + legacyPath[i+1] = step.Name + } + + m = &ModuleState{Path: legacyPath} + m.init() + s.Modules = append(s.Modules, m) + s.sort() + return m +} + +// ModuleByPath is used to lookup the module state for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { + if s == nil { + return nil + } + s.Lock() + defer s.Unlock() + + return s.moduleByPath(path) +} + +func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { + for _, mod := range s.Modules { + if mod == nil { + continue + } + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// Empty returns true if the state is empty. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return len(s.Modules) == 0 +} + +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + +// IsRemote returns true if State represents a state that exists and is +// remote. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) IsRemote() bool { + if s == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Remote == nil { + return false + } + if s.Remote.Type == "" { + return false + } + + return true +} + +// Validate validates the integrity of this state file. +// +// Certain properties of the statefile are expected by Terraform in order +// to behave properly. The core of Terraform will assume that once it +// receives a State structure that it has been validated. This validation +// check should be called to ensure that. +// +// If this returns an error, then the user should be notified. The error +// response will include detailed information on the nature of the error. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) Validate() error { + s.Lock() + defer s.Unlock() + + var result error + + // !!!! FOR DEVELOPERS !!!! + // + // Any errors returned from this Validate function will BLOCK TERRAFORM + // from loading a state file. Therefore, this should only contain checks + // that are only resolvable through manual intervention. + // + // !!!! FOR DEVELOPERS !!!! + + // Make sure there are no duplicate module states. We open a new + // block here so we can use basic variable names and future validations + // can do the same. + { + found := make(map[string]struct{}) + for _, ms := range s.Modules { + if ms == nil { + continue + } + + key := strings.Join(ms.Path, ".") + if _, ok := found[key]; ok { + result = multierror.Append(result, fmt.Errorf( + strings.TrimSpace(stateValidateErrMultiModule), key)) + continue + } + + found[key] = struct{}{} + } + } + + return result +} + +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) Remove(addr ...string) error { + s.Lock() + defer s.Unlock() + + // Filter out what we need to delete + filter := &stateFilter{State: s} + results, err := filter.filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + //nolint:forcetypeassert + s.removeInstance(r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.moduleByPath(normalizeModulePath(path)) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } +} + +// RootModule returns the ModuleState for the root module +func (s *State) RootModule() *ModuleState { + root := s.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Equal tests if one state is equal to another. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) Equal(other *State) bool { + // If one is nil, we do a direct check + if s == nil || other == nil { + return s == other + } + + s.Lock() + defer s.Unlock() + return s.equal(other) +} + +func (s *State) equal(other *State) bool { + if s == nil || other == nil { + return s == other + } + + // If the versions are different, they're certainly not equal + if s.Version != other.Version { + return false + } + + // If any of the modules are not equal, then this state isn't equal + if len(s.Modules) != len(other.Modules) { + return false + } + for _, m := range s.Modules { + // This isn't very optimal currently but works. + otherM := other.moduleByPath(normalizeModulePath(m.Path)) + if otherM == nil { + return false + } + + // If they're not equal, then we're not equal! + if !m.Equal(otherM) { + return false + } + } + + return true +} + +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type StateAgeComparison int + +const ( + StateAgeEqual StateAgeComparison = 0 + StateAgeReceiverNewer StateAgeComparison = 1 + StateAgeReceiverOlder StateAgeComparison = -1 +) + +// CompareAges compares one state with another for which is "older". +// +// This is a simple check using the state's serial, and is thus only as +// reliable as the serial itself. In the normal case, only one state +// exists for a given combination of lineage/serial, but Terraform +// does not guarantee this and so the result of this method should be +// used with care. +// +// Returns an integer that is negative if the receiver is older than +// the argument, positive if the converse, and zero if they are equal. +// An error is returned if the two states are not of the same lineage, +// in which case the integer returned has no meaning. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) CompareAges(other *State) (StateAgeComparison, error) { + // nil states are "older" than actual states + switch { + case s != nil && other == nil: + return StateAgeReceiverNewer, nil + case s == nil && other != nil: + return StateAgeReceiverOlder, nil + case s == nil && other == nil: + return StateAgeEqual, nil + } + + if !s.SameLineage(other) { + return StateAgeEqual, fmt.Errorf( + "can't compare two states of differing lineage", + ) + } + + s.Lock() + defer s.Unlock() + + switch { + case s.Serial < other.Serial: + return StateAgeReceiverOlder, nil + case s.Serial > other.Serial: + return StateAgeReceiverNewer, nil + default: + return StateAgeEqual, nil + } +} + +// SameLineage returns true only if the state given in argument belongs +// to the same "lineage" of states as the receiver. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) SameLineage(other *State) bool { + s.Lock() + defer s.Unlock() + + // If one of the states has no lineage then it is assumed to predate + // this concept, and so we'll accept it as belonging to any lineage + // so that a lineage string can be assigned to newer versions + // without breaking compatibility with older versions. + if s.Lineage == "" || other.Lineage == "" { + return true + } + + return s.Lineage == other.Lineage +} + +// DeepCopy performs a deep copy of the state structure and returns +// a new structure. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + copiedState, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + state, ok := copiedState.(*State) + if !ok { + panic(fmt.Errorf("unexpected type %T for copiedState", state)) + } + + return state +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) Init() { + s.Lock() + defer s.Unlock() + s.init() +} + +func (s *State) init() { + if s.Version == 0 { + s.Version = stateVersion + } + + if s.moduleByPath(addrs.RootModuleInstance) == nil { + s.addModule(addrs.RootModuleInstance) + } + s.ensureHasLineage() + + for _, mod := range s.Modules { + if mod != nil { + mod.init() + } + } + + if s.Remote != nil { + s.Remote.init() + } + +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) EnsureHasLineage() { + s.Lock() + defer s.Unlock() + + s.ensureHasLineage() +} + +func (s *State) ensureHasLineage() { + if s.Lineage == "" { + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + s.Lineage = lineage + if os.Getenv("TF_ACC") == "" || os.Getenv("TF_ACC_STATE_LINEAGE") == "1" { + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } + } else { + if os.Getenv("TF_ACC") == "" || os.Getenv("TF_ACC_STATE_LINEAGE") == "1" { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } + } +} + +// AddModuleState insert this module state and override any existing ModuleState +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + s.Lock() + defer s.Unlock() + + s.addModuleState(mod) +} + +func (s *State) addModuleState(mod *ModuleState) { + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + +// prune is used to remove any resources that are no longer required +func (s *State) prune() { + if s == nil { + return + } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + + for _, mod := range s.Modules { + mod.prune() + } + if s.Remote != nil && s.Remote.Empty() { + s.Remote = nil + } +} + +// sort sorts the modules +func (s *State) sort() { + sort.Sort(moduleStateSort(s.Modules)) + + // Allow modules to be sorted + for _, m := range s.Modules { + if m != nil { + m.sort() + } + } +} + +func (s *State) String() string { + if s == nil { + return "" + } + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + for _, m := range s.Modules { + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// BackendState stores the configuration to connect to a remote backend. +// +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type BackendState struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} + +// RemoteState is used to track the information about a remote +// state store that we push/pull state to. +// +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type RemoteState struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` + + mu sync.Mutex +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *RemoteState) Lock() { s.mu.Lock() } + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *RemoteState) Unlock() { s.mu.Unlock() } + +func (r *RemoteState) init() { + r.Lock() + defer r.Unlock() + + if r.Config == nil { + r.Config = make(map[string]string) + } +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (r *RemoteState) Empty() bool { + if r == nil { + return true + } + r.Lock() + defer r.Unlock() + + return r.Type == "" +} + +// OutputState is used to track the state relevant to a single output. +type OutputState struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *OutputState) Lock() { s.mu.Lock() } + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *OutputState) Unlock() { s.mu.Unlock() } + +func (s *OutputState) String() string { + return fmt.Sprintf("%#v", s.Value) +} + +// Equal compares two OutputState structures for equality. nil values are +// considered equal. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *OutputState) Equal(other *OutputState) bool { + if s == nil && other == nil { + return true + } + + if s == nil || other == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Sensitive != other.Sensitive { + return false + } + + if !reflect.DeepEqual(s.Value, other.Value) { + return false + } + + return true +} + +// ModuleState is used to track all the state relevant to a single +// module. Previous to Terraform 0.3, all state belonged to the "root" +// module. +type ModuleState struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*OutputState `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + mu sync.Mutex +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *ModuleState) Lock() { s.mu.Lock() } + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *ModuleState) Unlock() { s.mu.Unlock() } + +// Equal tests whether one module state is equal to another. +func (m *ModuleState) Equal(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + // Paths must be equal + if !reflect.DeepEqual(m.Path, other.Path) { + return false + } + + // Outputs must be equal + if len(m.Outputs) != len(other.Outputs) { + return false + } + for k, v := range m.Outputs { + if !other.Outputs[k].Equal(v) { + return false + } + } + + // Dependencies must be equal. This sorts these in place but + // this shouldn't cause any problems. + sort.Strings(m.Dependencies) + sort.Strings(other.Dependencies) + if len(m.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range m.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // Resources must be equal + if len(m.Resources) != len(other.Resources) { + return false + } + for k, r := range m.Resources { + otherR, ok := other.Resources[k] + if !ok { + return false + } + + if !r.Equal(otherR) { + return false + } + } + + return true +} + +func (m *ModuleState) init() { + m.Lock() + defer m.Unlock() + + if m.Path == nil { + m.Path = []string{} + } + if m.Outputs == nil { + m.Outputs = make(map[string]*OutputState) + } + if m.Resources == nil { + m.Resources = make(map[string]*ResourceState) + } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } +} + +// prune is used to remove any resources that are no longer required +func (m *ModuleState) prune() { + m.Lock() + defer m.Unlock() + + for k, v := range m.Resources { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + delete(m.Resources, k) + continue + } + + v.prune() + } + + for k, v := range m.Outputs { + if v.Value == hcl2shim.UnknownVariableValue { + delete(m.Outputs, k) + } + } + + m.Dependencies = uniqueStrings(m.Dependencies) +} + +func (m *ModuleState) sort() { + for _, v := range m.Resources { + v.sort() + } +} + +func (m *ModuleState) String() string { + m.Lock() + defer m.Unlock() + + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + names := make([]string, 0, len(m.Resources)) + for name := range m.Resources { + names = append(names, name) + } + + sort.Sort(resourceNameSort(names)) + + for _, k := range names { + rs := m.Resources[k] + var id string + if rs.Primary != nil { + id = rs.Primary.ID + } + if id == "" { + id = "" + } + + taintStr := "" + if rs.Primary.Tainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(rs.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + if rs.Provider != "" { + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) + } + + var attributes map[string]string + if rs.Primary != nil { + attributes = rs.Primary.Attributes + } + attrKeys := make([]string, 0, len(attributes)) + for ak := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + for idx, t := range rs.Deposed { + taintStr := "" + if t.Tainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) + } + + if len(rs.Dependencies) > 0 { + buf.WriteString("\n Dependencies:\n") + for _, dep := range rs.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep)) + } + } + } + + if len(m.Outputs) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.Outputs)) + for k := range m.Outputs { + ks = append(ks, k) + } + + sort.Strings(ks) + + for _, k := range ks { + v := m.Outputs[k] + switch vTyped := v.Value.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } + } + } + + return buf.String() +} + +// ResourceStateKey is a structured representation of the key used for the +// ModuleState.Resources mapping +type ResourceStateKey struct { + Name string + Type string + Mode ResourceMode + Index int +} + +// Equal determines whether two ResourceStateKeys are the same +func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { + if rsk == nil || other == nil { + return false + } + if rsk.Mode != other.Mode { + return false + } + if rsk.Type != other.Type { + return false + } + if rsk.Name != other.Name { + return false + } + if rsk.Index != other.Index { + return false + } + return true +} + +func (rsk *ResourceStateKey) String() string { + if rsk == nil { + return "" + } + var prefix string + switch rsk.Mode { + case ManagedResourceMode: + prefix = "" + case DataResourceMode: + prefix = "data." + default: + panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) + } + if rsk.Index == -1 { + return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) + } + return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) +} + +// ParseResourceStateKey accepts a key in the format used by +// ModuleState.Resources and returns a resource name and resource index. In the +// state, a resource has the format "type.name.index" or "type.name". In the +// latter case, the index is returned as -1. +func parseResourceStateKey(k string) (*ResourceStateKey, error) { + parts := strings.Split(k, ".") + mode := ManagedResourceMode + if len(parts) > 0 && parts[0] == "data" { + mode = DataResourceMode + // Don't need the constant "data" prefix for parsing + // now that we've figured out the mode. + parts = parts[1:] + } + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("Malformed resource state key: %s", k) + } + rsk := &ResourceStateKey{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Index: -1, + } + if len(parts) == 3 { + index, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("Malformed resource state key index: %s", k) + } + rsk.Index = index + } + return rsk, nil +} + +// ResourceState holds the state of a resource that is used so that +// a provider can find and manage an existing resource as well as for +// storing attributes that are used to populate variables of child +// resources. +// +// Attributes has attributes about the created resource that are +// queryable in interpolation: "${type.id.attr}" +// +// Extra is just extra data that a provider can return that we store +// for later, but is not exposed in any way to the user. +type ResourceState struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *InstanceState `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*InstanceState `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *ResourceState) Lock() { s.mu.Lock() } + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *ResourceState) Unlock() { s.mu.Unlock() } + +// Equal tests whether two ResourceStates are equal. +func (s *ResourceState) Equal(other *ResourceState) bool { + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Provider != other.Provider { + return false + } + + // Dependencies must be equal + sort.Strings(s.Dependencies) + sort.Strings(other.Dependencies) + if len(s.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range s.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // States must be equal + return s.Primary.Equal(other.Primary) +} + +func (s *ResourceState) init() { + s.Lock() + defer s.Unlock() + + if s.Primary == nil { + s.Primary = &InstanceState{} + } + s.Primary.init() + + if s.Dependencies == nil { + s.Dependencies = []string{} + } + + if s.Deposed == nil { + s.Deposed = make([]*InstanceState, 0) + } +} + +// prune is used to remove any instances that are no longer required +func (s *ResourceState) prune() { + s.Lock() + defer s.Unlock() + + n := len(s.Deposed) + for i := 0; i < n; i++ { + inst := s.Deposed[i] + if inst == nil || inst.ID == "" { + copy(s.Deposed[i:], s.Deposed[i+1:]) + s.Deposed[n-1] = nil + n-- + i-- + } + } + s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) +} + +func (s *ResourceState) sort() { + s.Lock() + defer s.Unlock() + + sort.Strings(s.Dependencies) +} + +func (s *ResourceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) + return buf.String() +} + +// InstanceState is used to track the unique state information belonging +// to a given instance. +type InstanceState struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral EphemeralState `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + ProviderMeta cty.Value + + RawConfig cty.Value + RawState cty.Value + RawPlan cty.Value + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` + + mu sync.Mutex +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *InstanceState) Lock() { s.mu.Lock() } + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *InstanceState) Unlock() { s.mu.Unlock() } + +func (s *InstanceState) init() { + s.Lock() + defer s.Unlock() + + if s.Attributes == nil { + s.Attributes = make(map[string]string) + } + if s.Meta == nil { + s.Meta = make(map[string]interface{}) + } + s.Ephemeral.init() +} + +// NewInstanceStateShimmedFromValue is a shim method to lower a new-style +// object value representing the attributes of an instance object into the +// legacy InstanceState representation. +// +// This is for shimming to old components only and should not be used in new code. +// +// Deprecated: This function is unintentionally exported by this Go module and +// not supported for external consumption. It will be removed in the next major +// version. +func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { + attrs := hcl2shim.FlatmapValueFromHCL2(state) + return &InstanceState{ + ID: attrs["id"], + Attributes: attrs, + Meta: map[string]interface{}{ + "schema_version": schemaVersion, + }, + } +} + +// AttrsAsObjectValue shims from the legacy InstanceState representation to +// a new-style cty object value representation of the state attributes, using +// the given type for guidance. +// +// The given type must be the implied type of the schema of the resource type +// of the object whose state is being converted, or the result is undefined. +// +// This is for shimming from old components only and should not be used in +// new code. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { + if s == nil { + // if the state is nil, we need to construct a complete cty.Value with + // null attributes, rather than a single cty.NullVal(ty) + s = &InstanceState{} + } + + if s.Attributes == nil { + s.Attributes = map[string]string{} + } + + // make sure ID is included in the attributes. The InstanceState.ID value + // takes precedence. + if s.ID != "" { + s.Attributes["id"] = s.ID + } + + return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) +} + +// Copy all the Fields from another InstanceState +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *InstanceState) Set(from *InstanceState) { + s.Lock() + defer s.Unlock() + + from.Lock() + defer from.Unlock() + + s.ID = from.ID + s.Attributes = from.Attributes + s.Ephemeral = from.Ephemeral + s.Meta = from.Meta + s.Tainted = from.Tainted +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *InstanceState) DeepCopy() *InstanceState { + copiedState, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + instanceState, ok := copiedState.(*InstanceState) + if !ok { + panic(fmt.Errorf("unexpected type %T for copiedState", copiedState)) + } + + return instanceState +} + +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *InstanceState) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return s.ID == "" +} + +func (s *InstanceState) Equal(other *InstanceState) bool { + // Short circuit some nil checks + if s == nil || other == nil { + return s == other + } + s.Lock() + defer s.Unlock() + + // IDs must be equal + if s.ID != other.ID { + return false + } + + // Attributes must be equal + if len(s.Attributes) != len(other.Attributes) { + return false + } + for k, v := range s.Attributes { + otherV, ok := other.Attributes[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { + return false + } + } + + if s.Tainted != other.Tainted { + return false + } + + return true +} + +// MergeDiff takes a ResourceDiff and merges the attributes into +// this resource state in order to generate a new state. This new +// state can be used to provide updated attribute lookups for +// variable interpolation. +// +// If the diff attribute requires computing the value, and hence +// won't be available until apply, the value is replaced with the +// computeID. +// +// Deprecated: This method is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { + result := s.DeepCopy() + if result == nil { + result = new(InstanceState) + } + result.init() + + if s != nil { + s.Lock() + defer s.Unlock() + for k, v := range s.Attributes { + result.Attributes[k] = v + } + } + if d != nil { + for k, diff := range d.CopyAttributes() { + if diff.NewRemoved { + delete(result.Attributes, k) + continue + } + if diff.NewComputed { + result.Attributes[k] = hcl2shim.UnknownVariableValue + continue + } + + result.Attributes[k] = diff.New + } + } + + return result +} + +func (s *InstanceState) String() string { + notCreated := "" + + if s == nil { + return notCreated + } + + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + + if s.ID == "" { + return notCreated + } + + buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) + + attributes := s.Attributes + attrKeys := make([]string, 0, len(attributes)) + for ak := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) + } + + buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) + + return buf.String() +} + +// EphemeralState is used for transient state that is only kept in-memory +// +// Deprecated: This type is unintentionally exported by this Go module and not +// supported for external consumption. It will be removed in the next major +// version. +type EphemeralState struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` + + // Type is used to specify the resource type for this instance. This is only + // required for import operations (as documented). If the documentation + // doesn't state that you need to set this, then don't worry about + // setting it. + Type string `json:"-"` +} + +func (e *EphemeralState) init() { + if e.ConnInfo == nil { + e.ConnInfo = make(map[string]string) + } +} + +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + +// moduleStateSort implements sort.Interface to sort module states +type moduleStateSort []*ModuleState + +func (s moduleStateSort) Len() int { + return len(s) +} + +func (s moduleStateSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} + +func (s moduleStateSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +const stateValidateErrMultiModule = ` +Multiple modules with the same path: %s + +This means that there are multiple entries in the "modules" field +in your state file that point to the same module. This will cause Terraform +to behave in unexpected and error prone ways and is invalid. Please back up +and modify your state file manually to resolve this. +` diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/state_filter.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/state_filter.go new file mode 100644 index 00000000000..caf2c79674b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/state_filter.go @@ -0,0 +1,273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "fmt" + "sort" +) + +// stateFilter is responsible for filtering and searching a state. +// +// This is a separate struct from State rather than a method on State +// because StateFilter might create sidecar data structures to optimize +// filtering on the state. +// +// If you change the State, the filter created is invalid and either +// Reset should be called or a new one should be allocated. StateFilter +// will not watch State for changes and do this for you. If you filter after +// changing the State without calling Reset, the behavior is not defined. +type stateFilter struct { + State *State +} + +// Filter takes the addresses specified by fs and finds all the matches. +// The values of fs are resource addressing syntax that can be parsed by +// parseResourceAddress. +func (f *stateFilter) filter(fs ...string) ([]*stateFilterResult, error) { + // Parse all the addresses + var as []*resourceAddress + + if len(fs) == 0 { + // If we weren't given any filters, then we list all + as = []*resourceAddress{{Index: -1}} + } else { + as = make([]*resourceAddress, len(fs)) + } + + for i, v := range fs { + a, err := parseResourceAddress(v) + if err != nil { + return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) + } + + as[i] = a + } + + // Filter each of the address. We keep track of this in a map to + // strip duplicates. + resultSet := make(map[string]*stateFilterResult) + for _, a := range as { + for _, r := range f.filterSingle(a) { + resultSet[r.String()] = r + } + } + + // Make the result list + results := make([]*stateFilterResult, 0, len(resultSet)) + for _, v := range resultSet { + results = append(results, v) + } + + // Sort them and return + sort.Sort(stateFilterResultSlice(results)) + return results, nil +} + +func (f *stateFilter) filterSingle(a *resourceAddress) []*stateFilterResult { + // The slice to keep track of results + var results []*stateFilterResult + + // Go through modules first. + modules := make([]*ModuleState, 0, len(f.State.Modules)) + for _, m := range f.State.Modules { + if f.relevant(a, m) { + modules = append(modules, m) + + // Only add the module to the results if we haven't specified a type. + // We also ignore the root module. + if a.Type == "" && len(m.Path) > 1 { + results = append(results, &stateFilterResult{ + Path: m.Path[1:], + Address: (&resourceAddress{Path: m.Path[1:]}).String(), + Value: m, + }) + } + } + } + + // With the modules set, go through all the resources within + // the modules to find relevant resources. + for _, m := range modules { + for n, r := range m.Resources { + // The name in the state contains valuable information. Parse. + key, err := parseResourceStateKey(n) + if err != nil { + // If we get an error parsing, then just ignore it + // out of the state. + continue + } + + // Older states and test fixtures often don't contain the + // type directly on the ResourceState. We add this so StateFilter + // is a bit more robust. + if r.Type == "" { + r.Type = key.Type + } + + if f.relevant(a, r) { + if a.Name != "" && a.Name != key.Name { + // Name doesn't match + continue + } + + if a.Index >= 0 && key.Index != a.Index { + // Index doesn't match + continue + } + + if a.Name != "" && a.Name != key.Name { + continue + } + + // Build the address for this resource + addr := &resourceAddress{ + Path: m.Path[1:], + Name: key.Name, + Type: key.Type, + Index: key.Index, + } + + // Add the resource level result + resourceResult := &stateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Value: r, + } + if !a.InstanceTypeSet { + results = append(results, resourceResult) + } + + // Add the instances + if r.Primary != nil { + addr.InstanceType = typePrimary + addr.InstanceTypeSet = false + results = append(results, &stateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: r.Primary, + }) + } + + for _, instance := range r.Deposed { + if f.relevant(a, instance) { + addr.InstanceType = typeDeposed + addr.InstanceTypeSet = true + results = append(results, &stateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: instance, + }) + } + } + } + } + } + + return results +} + +// relevant checks for relevance of this address against the given value. +func (f *stateFilter) relevant(addr *resourceAddress, raw interface{}) bool { + switch v := raw.(type) { + case *ModuleState: + path := v.Path[1:] + + if len(addr.Path) > len(path) { + // Longer path in address means there is no way we match. + return false + } + + // Check for a prefix match + for i, p := range addr.Path { + if path[i] != p { + // Any mismatches don't match. + return false + } + } + + return true + case *ResourceState: + if addr.Type == "" { + // If we have no resource type, then we're interested in all! + return true + } + + // If the type doesn't match we fail immediately + if v.Type != addr.Type { + return false + } + + return true + default: + // If we don't know about it, let's just say no + return false + } +} + +// stateFilterResult is a single result from a filter operation. Filter +// can match multiple things within a state (module, resource, instance, etc.) +// and this unifies that. +type stateFilterResult struct { + // Module path of the result + Path []string + + // Address is the address that can be used to reference this exact result. + Address string + + // Parent, if non-nil, is a parent of this result. For instances, the + // parent would be a resource. For resources, the parent would be + // a module. For modules, this is currently nil. + Parent *stateFilterResult + + // Value is the actual value. This must be type switched on. It can be + // any data structures that `State` can hold: `ModuleState`, + // `ResourceState`, `InstanceState`. + Value interface{} +} + +func (r *stateFilterResult) String() string { + return fmt.Sprintf("%T: %s", r.Value, r.Address) +} + +func (r *stateFilterResult) sortedType() int { + switch r.Value.(type) { + case *ModuleState: + return 0 + case *ResourceState: + return 1 + case *InstanceState: + return 2 + default: + return 50 + } +} + +// stateFilterResultSlice is a slice of results that implements +// sort.Interface. The sorting goal is what is most appealing to +// human output. +type stateFilterResultSlice []*stateFilterResult + +func (s stateFilterResultSlice) Len() int { return len(s) } +func (s stateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s stateFilterResultSlice) Less(i, j int) bool { + a, b := s[i], s[j] + + // if these address contain an index, we want to sort by index rather than name + addrA, errA := parseResourceAddress(a.Address) + addrB, errB := parseResourceAddress(b.Address) + if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { + return addrA.Index < addrB.Index + } + + // If the addresses are different it is just lexographic sorting + if a.Address != b.Address { + return a.Address < b.Address + } + + // Addresses are the same, which means it matters on the type + return a.sortedType() < b.sortedType() +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/util.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/util.go new file mode 100644 index 00000000000..6353ad27d95 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/terraform/util.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "sort" +) + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/doc.go new file mode 100644 index 00000000000..4b1a4923b93 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tfjsonpath implements terraform-json path functionality, which defines +// traversals into Terraform JSON data, for testing purposes. +package tfjsonpath diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/path.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/path.go new file mode 100644 index 00000000000..ef3930dfb5f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/path.go @@ -0,0 +1,107 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfjsonpath + +import ( + "fmt" +) + +// Path represents exact traversal steps specifying a value inside +// Terraform JSON data. These steps always start from a MapStep with a key +// specifying the name of a top-level JSON object or array. +// +// The [terraform-json] library serves as the de facto documentation +// for JSON format of Terraform data. +// +// Use the New() function to create a Path with an initial AtMapKey() step. +// Path functionality follows a builder pattern, which allows for chaining method +// calls to construct a full path. The available traversal steps after Path +// creation are: +// +// - AtSliceIndex(): Step into a slice at a specific 0-based index +// - AtMapKey(): Step into a map at a specific key +// +// For example, to represent the first element of a JSON array +// underneath a "some_array" property of this JSON value: +// +// { +// "some_array": [true] +// } +// +// The path code would be represented by: +// +// tfjsonpath.New("some_array").AtSliceIndex(0) +// +// [terraform-json]: (https://pkg.go.dev/github.com/hashicorp/terraform-json) +type Path struct { + steps []step +} + +// New creates a new path with an initial MapStep. +func New(name string) Path { + return Path{ + steps: []step{ + MapStep(name), + }, + } +} + +// AtSliceIndex returns a copied Path with a new SliceStep at the end. +func (s Path) AtSliceIndex(index int) Path { + newSteps := append(s.steps, SliceStep(index)) + s.steps = newSteps + return s +} + +// AtMapKey returns a copied Path with a new MapStep at the end. +func (s Path) AtMapKey(key string) Path { + newSteps := append(s.steps, MapStep(key)) + s.steps = newSteps + return s +} + +// Traverse returns the element found when traversing the given +// object using the specified Path. The object is an unmarshalled +// JSON object representing Terraform data. +// +// Traverse returns an error if the value specified by the Path +// is not found in the given object or if the given object does not +// conform to format of Terraform JSON data. +func Traverse(object any, attrPath Path) (any, error) { + _, ok := object.(map[string]any) + + if !ok { + return nil, fmt.Errorf("cannot convert given object to map[string]any") + } + + result := object + + for _, step := range attrPath.steps { + switch s := step.(type) { + case MapStep: + mapObj, ok := result.(map[string]any) + if !ok { + return nil, fmt.Errorf("path not found: cannot convert object at MapStep %s to map[string]any", string(s)) + } + result, ok = mapObj[string(s)] + if !ok { + return nil, fmt.Errorf("path not found: specified key %s not found in map", string(s)) + } + + case SliceStep: + sliceObj, ok := result.([]any) + if !ok { + return nil, fmt.Errorf("path not found: cannot convert object at SliceStep %d to []any", s) + } + + if int(s) >= len(sliceObj) { + return nil, fmt.Errorf("path not found: SliceStep index %d is out of range with slice length %d", s, len(sliceObj)) + } + + result = sliceObj[s] + } + } + + return result, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/step.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/step.go new file mode 100644 index 00000000000..7b6813d60e8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfjsonpath/step.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfjsonpath + +// step represents a traversal type indicating the underlying Go type +// representation for a Terraform JSON value. +type step interface{} + +// MapStep represents a traversal for map[string]any +type MapStep string + +// SliceStep represents a traversal for []any +type SliceStep int diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/all.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/all.go new file mode 100644 index 00000000000..78e1f178019 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/all.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" +) + +// All will return the first non-nil error or non-empty skip message +// if any of the given checks return a non-nil error or non-empty skip message. +// Otherwise, it will return a nil error and empty skip message (run the test) +// +// Use of All is only necessary when used in conjunction with Any as the +// TerraformVersionChecks field automatically applies a logical AND. +func All(terraformVersionChecks ...TerraformVersionCheck) TerraformVersionCheck { + return allCheck{ + terraformVersionChecks: terraformVersionChecks, + } +} + +// allCheck implements the TerraformVersionCheck interface +type allCheck struct { + terraformVersionChecks []TerraformVersionCheck +} + +// CheckTerraformVersion satisfies the TerraformVersionCheck interface. +func (a allCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + + for _, subCheck := range a.terraformVersionChecks { + checkResp := CheckTerraformVersionResponse{} + + subCheck.CheckTerraformVersion(ctx, CheckTerraformVersionRequest{TerraformVersion: req.TerraformVersion}, &checkResp) + + if checkResp.Error != nil { + resp.Error = checkResp.Error + return + } + + if checkResp.Skip != "" { + resp.Skip = checkResp.Skip + return + } + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/any.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/any.go new file mode 100644 index 00000000000..27088e1a58a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/any.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-testing/internal/errorshim" +) + +// Any will return a nil error and empty skip message (run the test) +// if any of the given checks return a nil error and empty skip message. +// Otherwise, it will return all errors and fail the test if any of the given +// checks return a non-nil error, or it will return all skip messages +// and skip (pass) the test. +func Any(terraformVersionChecks ...TerraformVersionCheck) TerraformVersionCheck { + return anyCheck{ + terraformVersionChecks: terraformVersionChecks, + } +} + +// anyCheck implements the TerraformVersionCheck interface +type anyCheck struct { + terraformVersionChecks []TerraformVersionCheck +} + +// CheckTerraformVersion satisfies the TerraformVersionCheck interface. +func (a anyCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + var joinedErrors error + strBuilder := strings.Builder{} + + for _, subCheck := range a.terraformVersionChecks { + checkResp := CheckTerraformVersionResponse{} + + subCheck.CheckTerraformVersion(ctx, CheckTerraformVersionRequest{TerraformVersion: req.TerraformVersion}, &checkResp) + + if checkResp.Error == nil && checkResp.Skip == "" { + resp.Error = nil + resp.Skip = "" + return + } + + if checkResp.Error != nil { + // TODO: Once Go 1.20 is the minimum supported version for this module, replace with `errors.Join` function + // - https://github.com/hashicorp/terraform-plugin-testing/issues/99 + joinedErrors = errorshim.Join(joinedErrors, checkResp.Error) + } + + if checkResp.Skip != "" { + strBuilder.WriteString(checkResp.Skip) + strBuilder.WriteString("\n") + } + } + + resp.Error = joinedErrors + resp.Skip = strings.TrimSpace(strBuilder.String()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/doc.go new file mode 100644 index 00000000000..d73b474d254 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tfversion contains the Terraform version check interface, request/response structs, and common version check implementations. +package tfversion diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_above.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_above.go new file mode 100644 index 00000000000..4734bcf6ebf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_above.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-version" +) + +// RequireAbove will fail the test if the Terraform CLI +// version is below the given version. For example, if given +// version.Must(version.NewVersion("0.15.0")), then 0.14.x or +// any other prior minor versions will fail the test. +func RequireAbove(minimumVersion *version.Version) TerraformVersionCheck { + return requireAboveCheck{ + minimumVersion: minimumVersion, + } +} + +// requireAboveCheck implements the TerraformVersionCheck interface +type requireAboveCheck struct { + minimumVersion *version.Version +} + +// CheckTerraformVersion satisfies the TerraformVersionCheck interface. +func (r requireAboveCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + + if req.TerraformVersion.LessThan(r.minimumVersion) { + resp.Error = fmt.Errorf("expected Terraform CLI version above %s but detected version is %s", + r.minimumVersion, req.TerraformVersion) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_below.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_below.go new file mode 100644 index 00000000000..99efa53468c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_below.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-version" +) + +// RequireBelow will fail the test if the Terraform CLI +// version is above the given version. For example, if given +// version.Must(version.NewVersion("0.15.0")), then versions 0.15.x and +// above will fail the test. +func RequireBelow(maximumVersion *version.Version) TerraformVersionCheck { + return requireBelowCheck{ + maximumVersion: maximumVersion, + } +} + +// requireBelowCheck implements the TerraformVersionCheck interface +type requireBelowCheck struct { + maximumVersion *version.Version +} + +// CheckTerraformVersion satisfies the TerraformVersionCheck interface. +func (s requireBelowCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + + if req.TerraformVersion.GreaterThan(s.maximumVersion) { + resp.Error = fmt.Errorf("expected Terraform CLI version below %s but detected version is %s", + s.maximumVersion, req.TerraformVersion) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_between.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_between.go new file mode 100644 index 00000000000..b992979280e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_between.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-version" +) + +// RequireBetween will fail the test if the Terraform CLI +// version is outside the given minimum (exclusive) and maximum (inclusive). +// For example, if given a minimum version of version.Must(version.NewVersion("0.15.0")) +// and a maximum version of version.Must(version.NewVersion("1.0.0")), then 0.15.x or +// any other prior versions and versions greater than 1.0.0 will fail the test. +func RequireBetween(minimumVersion, maximumVersion *version.Version) TerraformVersionCheck { + return requireBetweenCheck{ + minimumVersion: minimumVersion, + maximumVersion: maximumVersion, + } +} + +// requireBetweenCheck implements the TerraformVersionCheck interface +type requireBetweenCheck struct { + minimumVersion *version.Version + maximumVersion *version.Version +} + +// CheckTerraformVersion satisfies the TerraformVersionCheck interface. +func (s requireBetweenCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + + if req.TerraformVersion.LessThan(s.minimumVersion) || req.TerraformVersion.GreaterThanOrEqual(s.maximumVersion) { + resp.Error = fmt.Errorf("expected Terraform CLI version between %s and %s but detected version is %s", + s.minimumVersion, s.maximumVersion, req.TerraformVersion) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_not.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_not.go new file mode 100644 index 00000000000..18a9b68d13f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_not.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-version" +) + +// RequireNot will fail the test if the Terraform CLI +// version matches the given version. +func RequireNot(version *version.Version) TerraformVersionCheck { + return requireNotCheck{ + version: version, + } +} + +// requireNotCheck implements the TerraformVersionCheck interface +type requireNotCheck struct { + version *version.Version +} + +// CheckTerraformVersion satisfies the TerraformVersionCheck interface. +func (s requireNotCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + + if req.TerraformVersion.Equal(s.version) { + resp.Error = fmt.Errorf("unexpected Terraform CLI version: %s", s.version) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_above.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_above.go new file mode 100644 index 00000000000..ffc69b857be --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_above.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-version" +) + +// SkipAbove will skip (pass) the test if the Terraform CLI +// version is below the given version. For example, if given +// version.Must(version.NewVersion("0.15.0")), then 0.14.x or +// any other prior minor versions will skip the test. +func SkipAbove(maximumVersion *version.Version) TerraformVersionCheck { + return skipAboveCheck{ + maximumVersion: maximumVersion, + } +} + +// skipAboveCheck implements the TerraformVersionCheck interface +type skipAboveCheck struct { + maximumVersion *version.Version +} + +// CheckTerraformVersion satisfies the TerraformVersionCheck interface. +func (s skipAboveCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + + if req.TerraformVersion.GreaterThan(s.maximumVersion) { + resp.Skip = fmt.Sprintf("Terraform CLI version %s is above maximum version %s: skipping test", + req.TerraformVersion, s.maximumVersion) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_below.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_below.go new file mode 100644 index 00000000000..0b3dffddcbf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_below.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-version" +) + +// SkipBelow will skip (pass) the test if the Terraform CLI +// version is below the given version. For example, if given +// version.Must(version.NewVersion("0.15.0")), then 0.14.x or +// any other prior minor versions will skip the test. +func SkipBelow(minimumVersion *version.Version) TerraformVersionCheck { + return skipBelowCheck{ + minimumVersion: minimumVersion, + } +} + +// skipBelowCheck implements the TerraformVersionCheck interface +type skipBelowCheck struct { + minimumVersion *version.Version +} + +// CheckTerraformVersion satisfies the TerraformVersionCheck interface. +func (s skipBelowCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + + if req.TerraformVersion.LessThan(s.minimumVersion) { + resp.Skip = fmt.Sprintf("Terraform CLI version %s is below minimum version %s: skipping test", + req.TerraformVersion, s.minimumVersion) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_between.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_between.go new file mode 100644 index 00000000000..fb6e941082c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_between.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-version" +) + +// SkipBetween will skip the test if the Terraform CLI +// version is between the given minimum (inclusive) and maximum (exclusive). +// For example, if given a minimum version of version.Must(version.NewVersion("0.15.0")) +// and a maximum version of version.Must(version.NewVersion("0.16.0")), then versions 0.15.x +// will skip the test. +func SkipBetween(minimumVersion, maximumVersion *version.Version) TerraformVersionCheck { + return skipBetweenCheck{ + minimumVersion: minimumVersion, + maximumVersion: maximumVersion, + } +} + +// skipBetweenCheck implements the TerraformVersionCheck interface +type skipBetweenCheck struct { + minimumVersion *version.Version + maximumVersion *version.Version +} + +// CheckTerraformVersion satisfies the TerraformVersionCheck interface. +func (s skipBetweenCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + + if req.TerraformVersion.GreaterThanOrEqual(s.minimumVersion) && req.TerraformVersion.LessThan(s.maximumVersion) { + resp.Skip = fmt.Sprintf("Terraform CLI version %s is between %s and %s: skipping test.", + req.TerraformVersion, s.minimumVersion, s.maximumVersion) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if.go new file mode 100644 index 00000000000..6ece5e05d50 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-version" +) + +// SkipIf will skip (pass) the test if the Terraform CLI +// version matches the given version. +func SkipIf(version *version.Version) TerraformVersionCheck { + return skipIfCheck{ + version: version, + } +} + +// skipIfCheck implements the TerraformVersionCheck interface +type skipIfCheck struct { + version *version.Version +} + +// CheckTerraformVersion satisfies the TerraformVersionCheck interface. +func (s skipIfCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + + if req.TerraformVersion.Equal(s.version) { + resp.Skip = fmt.Sprintf("Terraform CLI version is %s: skipping test.", s.version) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/version_check.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/version_check.go new file mode 100644 index 00000000000..554ec2247d6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/version_check.go @@ -0,0 +1,39 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" + + "github.com/hashicorp/go-version" +) + +// TerraformVersionCheck is the interface for writing check logic against the Terraform CLI version. +// The Terraform CLI version is determined by the binary selected by the TF_ACC_TERRAFORM_PATH environment +// variable value, installed by the TF_ACC_TERRAFORM_VERSION value, or already existing based on the PATH environment +// variable. This logic is executed at the beginning of the TestCase before any TestStep is executed. +// +// This package contains some built-in functionality that implements the interface, otherwise consumers can use this +// interface for implementing their own custom logic. +type TerraformVersionCheck interface { + // CheckTerraformVersion should implement the logic to either pass, error (failing the test), or skip (passing the test). + CheckTerraformVersion(context.Context, CheckTerraformVersionRequest, *CheckTerraformVersionResponse) +} + +// CheckTerraformVersionRequest is the request received for the CheckTerraformVersion method of the +// TerraformVersionCheck interface. The response of that method is CheckTerraformVersionResponse. +type CheckTerraformVersionRequest struct { + // TerraformVersion is the version associated with the selected Terraform CLI binary. + TerraformVersion *version.Version +} + +// CheckTerraformVersionResponse is the response returned for the CheckTerraformVersion method of the +// TerraformVersionCheck interface. The request of that method is CheckTerraformVersionRequest. +type CheckTerraformVersionResponse struct { + // Error will result in failing the test with a given error message. + Error error + + // Skip will result in passing the test with a given skip message. + Skip string +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/versions.go new file mode 100644 index 00000000000..6cd04b27f0a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/versions.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import "github.com/hashicorp/go-version" + +// Common use version variables to simplify provider testing implementations. +// This list is not intended to be exhaustive of all Terraform versions, +// however these should at least include cases where Terraform +// introduced new configuration language features. +var ( + // Version0_12_26 is the first Terraform CLI version supported + // by the testing code. + Version0_12_26 *version.Version = version.Must(version.NewVersion("0.12.26")) + + // Major versions + + Version1_0_0 *version.Version = version.Must(version.NewVersion("1.0.0")) + Version2_0_0 *version.Version = version.Must(version.NewVersion("2.0.0")) + + // Minor versions + + Version0_13_0 *version.Version = version.Must(version.NewVersion("0.13.0")) + Version0_14_0 *version.Version = version.Must(version.NewVersion("0.14.0")) + Version0_15_0 *version.Version = version.Must(version.NewVersion("0.15.0")) + Version1_1_0 *version.Version = version.Must(version.NewVersion("1.1.0")) + Version1_2_0 *version.Version = version.Must(version.NewVersion("1.2.0")) + Version1_3_0 *version.Version = version.Must(version.NewVersion("1.3.0")) + Version1_4_0 *version.Version = version.Must(version.NewVersion("1.4.0")) + Version1_5_0 *version.Version = version.Must(version.NewVersion("1.5.0")) +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/CHANGELOG.md b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/CHANGELOG.md index 8fac9566621..4f1b56d9f58 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/CHANGELOG.md +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/CHANGELOG.md @@ -1,7 +1,614 @@ -## 5.37.0 (Unreleased) +## 6.10.0 (Unreleased) + +FEATURES: +* **New Data Source:** `google_compute_instance_guest_attributes` ([#20095](https://github.com/hashicorp/terraform-provider-google/pull/20095)) +* **New Data Source:** `google_service_accounts` ([#20062](https://github.com/hashicorp/terraform-provider-google/pull/20062)) +* **New Resource:** `google_iap_settings` ([#20085](https://github.com/hashicorp/terraform-provider-google/pull/20085)) + +IMPROVEMENTS: +* apphub: added `GLOBAL` enum value to `scope.type` field in `google_apphub_application` resource ([#20015](https://github.com/hashicorp/terraform-provider-google/pull/20015)) +* assuredworkloads: added `workload_options` field to `google_assured_workloads_workload` resource ([#19985](https://github.com/hashicorp/terraform-provider-google/pull/19985)) +* bigquery: added `external_catalog_dataset_options` fields to `google_bigquery_dataset` resource (beta) ([#20097](https://github.com/hashicorp/terraform-provider-google/pull/20097)) +* bigquery: added descriptive validation errors for missing required fields in `google_bigquery_job` destination table configuration ([#20077](https://github.com/hashicorp/terraform-provider-google/pull/20077)) +* compute: `desired_status` on google_compute_instance can now be set to `TERMINATED` or `SUSPENDED` on instance creation ([#20031](https://github.com/hashicorp/terraform-provider-google/pull/20031)) +* compute: added `header_action` and `redirect_options` fields to `google_compute_security_policy_rule` resource ([#20079](https://github.com/hashicorp/terraform-provider-google/pull/20079)) +* compute: added `interface.ipv6-address` field in `google_compute_external_vpn_gateway` resource ([#20091](https://github.com/hashicorp/terraform-provider-google/pull/20091)) +* compute: added `propagated_connection_limit` and `connected_endpoints.propagated_connection_count` fields to `google_compute_service_attachment` resource ([#20016](https://github.com/hashicorp/terraform-provider-google/pull/20016)) +* compute: added plan-time validation to `name` on `google_compute_instance` ([#20036](https://github.com/hashicorp/terraform-provider-google/pull/20036)) +* compute: added support for `advanced_machine_features.turbo_mode` to `google_compute_instance`, `google_compute_instance_template`, and `google_compute_region_instance_template` ([#20090](https://github.com/hashicorp/terraform-provider-google/pull/20090)) +* container: added in-place update support for `labels`, `resource_manager_tags` and `workload_metadata_config` in `google_container_cluster.node_config` ([#20038](https://github.com/hashicorp/terraform-provider-google/pull/20038)) +* filestore: added `protocol` property to resource `google_filestore_instance` ([#19982](https://github.com/hashicorp/terraform-provider-google/pull/19982)) +* memorystore: added `mode` flag to `google_memorystore_instance` ([#19988](https://github.com/hashicorp/terraform-provider-google/pull/19988)) +* netapp: added `zone` and `replica_zone` fields to `google_netapp_storage_pool` resource ([#19980](https://github.com/hashicorp/terraform-provider-google/pull/19980)) +* netapp: added `zone` and `replica_zone` fields to `google_netapp_volume` resource ([#19980](https://github.com/hashicorp/terraform-provider-google/pull/19980)) +* networksecurity: added `tls_inspection_policy` field to `google_network_security_gateway_security_policy` ([#19986](https://github.com/hashicorp/terraform-provider-google/pull/19986)) +* resourcemanager: added `disabled` to `google_service_account` datasource ([#20034](https://github.com/hashicorp/terraform-provider-google/pull/20034)) +* spanner: added `asymmetric_autoscaling_options` field to `google_spanner_instance` ([#20014](https://github.com/hashicorp/terraform-provider-google/pull/20014)) +* sql: removed the client-side default of `ENTERPRISE` for `edition` in `google_sql_database_instance` so that `edition` is determined by the API when unset. This will cause new instances to use `ENTERPRISE_PLUS` as the default for POSTGRES_16. ([#19977](https://github.com/hashicorp/terraform-provider-google/pull/19977)) +* vmwareengine: added `autoscaling_settings` to `google_vmwareengine_private_cloud` resource ([#20057](https://github.com/hashicorp/terraform-provider-google/pull/20057)) + +BUG FIXES: +* accesscontextmanager: fixed permadiff for perimeter ingress / egress rule resources ([#20046](https://github.com/hashicorp/terraform-provider-google/pull/20046)) +* compute: fixed an error in `google_compute_security_policy_rule` that prevented updating the default rule ([#20066](https://github.com/hashicorp/terraform-provider-google/pull/20066)) +* container: fixed missing in-place updates for some `google_container_cluster.node_config` subfields ([#20038](https://github.com/hashicorp/terraform-provider-google/pull/20038)) + +## 6.9.0 (October 28, 2024) + +DEPRECATIONS: +* containerattached: deprecated `security_posture_config` field in `google_container_attached_cluster` resource ([#19912](https://github.com/hashicorp/terraform-provider-google/pull/19912)) + +FEATURES: +* **New Data Source:** `google_oracle_database_autonomous_database` ([#19903](https://github.com/hashicorp/terraform-provider-google/pull/19903)) +* **New Data Source:** `google_oracle_database_autonomous_databases` ([#19901](https://github.com/hashicorp/terraform-provider-google/pull/19901)) +* **New Data Source:** `google_oracle_database_cloud_exadata_infrastructures` ([#19884](https://github.com/hashicorp/terraform-provider-google/pull/19884)) +* **New Data Source:** `google_oracle_database_cloud_vm_clusters` ([#19900](https://github.com/hashicorp/terraform-provider-google/pull/19900)) +* **New Resource:** `google_apigee_app_group` ([#19921](https://github.com/hashicorp/terraform-provider-google/pull/19921)) +* **New Resource:** `google_apigee_developer` ([#19911](https://github.com/hashicorp/terraform-provider-google/pull/19911)) +* **New Resource:** `google_network_connectivity_group` ([#19902](https://github.com/hashicorp/terraform-provider-google/pull/19902)) + +IMPROVEMENTS: +* compute: `google_compute_network_firewall_policy_association` now uses MMv1 engine instead of DCL. ([#19976](https://github.com/hashicorp/terraform-provider-google/pull/19976)) +* compute: `google_compute_region_network_firewall_policy_association` now uses MMv1 engine instead of DCL. ([#19976](https://github.com/hashicorp/terraform-provider-google/pull/19976)) +* compute: added `creation_timestamp` field to `google_compute_instance`, `google_compute_instance_template`, `google_compute_region_instance_template` ([#19906](https://github.com/hashicorp/terraform-provider-google/pull/19906)) +* compute: added `key_revocation_action_type` to `google_compute_instance` and related resources ([#19952](https://github.com/hashicorp/terraform-provider-google/pull/19952)) +* looker: added `deletion_policy` to `google_looker_instance` to allow force-destroying instances with nested resources by setting `deletion_policy = FORCE` ([#19924](https://github.com/hashicorp/terraform-provider-google/pull/19924)) +* monitoring: added `alert_strategy.notification_prompts` field to `google_monitoring_alert_policy` ([#19928](https://github.com/hashicorp/terraform-provider-google/pull/19928)) +* storage: added `hierarchical_namespace` to `google_storage_bucket` resource ([#19882](https://github.com/hashicorp/terraform-provider-google/pull/19882)) +* sql: removed the client-side default of `ENTERPRISE` for `edition` in `google_sql_database_instance` so that `edition` is determined by the API when unset. This will cause new instances to use `ENTERPRISE_PLUS` as the default for POSTGRES_16. ([#19977](https://github.com/hashicorp/terraform-provider-google/pull/19977)) +* vmwareengine: added `autoscaling_settings` to `google_vmwareengine_cluster` resource ([#19962](https://github.com/hashicorp/terraform-provider-google/pull/19962)) +* workstations: added `max_usable_workstations` field to `google_workstations_workstation_config` resource. ([#19872](https://github.com/hashicorp/terraform-provider-google/pull/19872)) + +BUG FIXES: +* compute: fixed an issue where immutable `distribution_zones` was incorrectly sent to the API when updating `distribution_policy_target_shape` in `google_compute_region_instance_group_manager` resource ([#19949](https://github.com/hashicorp/terraform-provider-google/pull/19949)) +* container: fixed a crash in `google_container_node_pool` caused by an occasional nil pointer ([#19922](https://github.com/hashicorp/terraform-provider-google/pull/19922)) +* essentialcontacts: fixed `google_essential_contacts_contact` import to include required parent field. ([#19877](https://github.com/hashicorp/terraform-provider-google/pull/19877)) +* sql: made `google_sql_database_instance.0.settings.0.data_cache_config` accept server-side changes when unset. When unset, no diffs will be created when instances change in `edition` and the feature is enabled or disabled as a result. ([#19972](https://github.com/hashicorp/terraform-provider-google/pull/19972)) +* storage: removed retry on 404s during refresh for `google_storage_bucket`, preventing hanging when refreshing deleted buckets ([#19964](https://github.com/hashicorp/terraform-provider-google/pull/19964)) + +## 6.8.0 (October 21, 2024) + +FEATURES: +* **New Data Source:** `google_oracle_database_cloud_exadata_infrastructure` ([#19856](https://github.com/hashicorp/terraform-provider-google/pull/19856)) +* **New Data Source:** `google_oracle_database_cloud_vm_cluster` ([#19859](https://github.com/hashicorp/terraform-provider-google/pull/19859)) +* **New Data Source:** `google_oracle_database_db_nodes` ([#19871](https://github.com/hashicorp/terraform-provider-google/pull/19871)) +* **New Data Source:** `google_oracle_database_db_servers` ([#19823](https://github.com/hashicorp/terraform-provider-google/pull/19823)) +* **New Resource:** `google_oracle_database_autonomous_database` ([#19860](https://github.com/hashicorp/terraform-provider-google/pull/19860)) +* **New Resource:** `google_oracle_database_cloud_exadata_infrastructure` ([#19798](https://github.com/hashicorp/terraform-provider-google/pull/19798)) +* **New Resource:** `google_oracle_database_cloud_vm_cluster` ([#19837](https://github.com/hashicorp/terraform-provider-google/pull/19837)) +* **New Resource:** `google_transcoder_job_template` ([#19854](https://github.com/hashicorp/terraform-provider-google/pull/19854)) +* **New Resource:** `google_transcoder_job` ([#19854](https://github.com/hashicorp/terraform-provider-google/pull/19854)) + +IMPROVEMENTS: +* cloudfunctions: increased the timeouts to 20 minutes for `google_cloudfunctions_function` resource ([#19799](https://github.com/hashicorp/terraform-provider-google/pull/19799)) +* cloudrunv2: added `invoker_iam_disabled` field to `google_cloud_run_v2_service` ([#19833](https://github.com/hashicorp/terraform-provider-google/pull/19833)) +* compute: made `google_compute_network_firewall_policy_rule` use MMv1 engine instead of DCL. ([#19862](https://github.com/hashicorp/terraform-provider-google/pull/19862)) +* compute: made `google_compute_region_network_firewall_policy_rule` use MMv1 engine instead of DCL. ([#19862](https://github.com/hashicorp/terraform-provider-google/pull/19862)) +* compute: added `ip_address_selection_policy` field to `google_compute_backend_service` and `google_compute_region_backend_service`. ([#19863](https://github.com/hashicorp/terraform-provider-google/pull/19863)) +* compute: added `provisioned_throughput` field to `google_compute_instance_template` resource ([#19852](https://github.com/hashicorp/terraform-provider-google/pull/19852)) +* compute: added `provisioned_throughput` field to `google_compute_region_instance_template` resource ([#19852](https://github.com/hashicorp/terraform-provider-google/pull/19852)) +* container: added support for additional values `KCP_CONNECTION`, and `KCP_SSHD`in `google_container_cluster.logging_config` ([#19812](https://github.com/hashicorp/terraform-provider-google/pull/19812)) +* dialogflowcx: added `advanced_settings.logging_settings` and `advanced_settings.speech_settings` to `google_dialogflow_cx_agent` and `google_dialogflow_cx_flow` ([#19801](https://github.com/hashicorp/terraform-provider-google/pull/19801)) +* networkconnectivity: added `linked_producer_vpc_network` field to `google_network_connectivity_spoke` resource ([#19806](https://github.com/hashicorp/terraform-provider-google/pull/19806)) +* secretmanager: added `is_secret_data_base64` field to `google_secret_manager_secret_version` and `google_secret_manager_secret_version_access` datasources ([#19831](https://github.com/hashicorp/terraform-provider-google/pull/19831)) +* secretmanager: added `is_secret_data_base64` field to `google_secret_manager_regional_secret_version` and `google_secret_manager_regional_secret_version_access` datasources ([#19831](https://github.com/hashicorp/terraform-provider-google/pull/19831)) +* spanner: added `kms_key_names` to `encryption_config` in `google_spanner_database` ([#19846](https://github.com/hashicorp/terraform-provider-google/pull/19846)) +* workstations: added `max_usable_workstations` field to `google_workstations_workstation_config` resource ([#19872](https://github.com/hashicorp/terraform-provider-google/pull/19872)) +* workstations: added field `allowed_ports` to `google_workstations_workstation_config` ([#19845](https://github.com/hashicorp/terraform-provider-google/pull/19845)) + +BUG FIXES: +* bigquery: fixed a regression that caused `google_bigquery_dataset_iam_*` resources to attempt to set deleted IAM members, thereby triggering an API error ([#19857](https://github.com/hashicorp/terraform-provider-google/pull/19857)) +* compute: fixed an issue in `google_compute_backend_service` and `google_compute_region_backend_service` to allow sending `false` for `iap.enabled` ([#19795](https://github.com/hashicorp/terraform-provider-google/pull/19795)) +* container: `node_config.linux_node_config`, `node_config.workload_metadata_config` and `node_config.kubelet_config` will now successfully send empty messages to the API when `terraform plan` indicates they are being removed, rather than null, which caused an error. The sole reliable case is `node_config.linux_node_config` when the block is removed, where there will still be a permadiff, but the update request that's triggered will no longer error and other changes displayed in the plan should go through. ([#19842](https://github.com/hashicorp/terraform-provider-google/pull/19842)) +* pubsub: fixed permadiff with configuring an empty `retry_policy` in `google_pubsub_subscription` ([#19784](https://github.com/hashicorp/terraform-provider-google/pull/19784)) + +## 5.44.2 (October 14, 2024) + +Notes: +* 5.44.2 is a backport release, responding to a GKE rollout that created permadiffs for many users. The changes in this release will be available in 6.7.0 and users upgrading to 6.X should upgrade to that version or higher. + +IMPROVEMENTS: +* container: `google_container_cluster` will now accept server-specified values for `node_pool_auto_config.0.node_kubelet_config` when it is not defined in configuration and will not detect drift. Note that this means that removing the value from configuration will now preserve old settings instead of reverting the old settings. ([#19817](https://github.com/hashicorp/terraform-provider-google/pull/19817)) + +BUG FIXES: +* container: fixed a diff triggered by a new API-side default value for `node_config.0.kubelet_config.0.insecure_kubelet_readonly_port_enabled`. Terraform will now accept server-specified values for `node_config.0.kubelet_config` when it is not defined in configuration and will not detect drift. Note that this means that removing the value from configuration will now preserve old settings instead of reverting the old settings. ([#19817](https://github.com/hashicorp/terraform-provider-google/pull/19817)) + +## 6.7.0 (October 14, 2024) + +FEATURES: +* **New Resource:** `google_healthcare_pipeline_job` ([#19717](https://github.com/hashicorp/terraform-provider-google/pull/19717)) +* **New Resource:** `google_secure_source_manager_branch_rule` ([#19773](https://github.com/hashicorp/terraform-provider-google/pull/19773)) + +IMPROVEMENTS: +* container: `google_container_cluster` will now accept server-specified values for `node_pool_auto_config.0.node_kubelet_config` when it is not defined in configuration and will not detect drift. Note that this means that removing the value from configuration will now preserve old settings instead of reverting the old settings. ([#19817](https://github.com/hashicorp/terraform-provider-google/pull/19817)) +* discoveryengine: added `chat_engine_config.dialogflow_agent_to_link` field to `google_discovery_engine_chat_engine` resource ([#19723](https://github.com/hashicorp/terraform-provider-google/pull/19723)) +* networkconnectivity: added field `migration` to resource `google_network_connectivity_internal_range` ([#19757](https://github.com/hashicorp/terraform-provider-google/pull/19757)) +* networkservices: added `routing_mode` field to `google_network_services_gateway` resource ([#19764](https://github.com/hashicorp/terraform-provider-google/pull/19764)) + +BUG FIXES: +* bigtable: fixed an error where BigTable IAM resources could be created with conditions but the condition was not stored in state ([#19725](https://github.com/hashicorp/terraform-provider-google/pull/19725)) +* container: fixed issue which caused to not being able to disable `enable_cilium_clusterwide_network_policy` field on `google_container_cluster`. ([#19736](https://github.com/hashicorp/terraform-provider-google/pull/19736)) +* container: fixed a diff triggered by a new API-side default value for `node_config.0.kubelet_config.0.insecure_kubelet_readonly_port_enabled`. Terraform will now accept server-specified values for `node_config.0.kubelet_config` when it is not defined in configuration and will not detect drift. Note that this means that removing the value from configuration will now preserve old settings instead of reverting the old settings. ([#19817](https://github.com/hashicorp/terraform-provider-google/pull/19817)) +* dataproc: fixed a bug in `google_dataproc_cluster` that prevented creation of clusters with `internal_ip_only` set to false ([#19782](https://github.com/hashicorp/terraform-provider-google/pull/19782)) +* iam: addressed `google_service_account` creation issues caused by the eventual consistency of the GCP IAM API by ignoring 403 errors returned on polling the service account after creation. ([#19727](https://github.com/hashicorp/terraform-provider-google/pull/19727)) +* logging: fixed the whitespace permadiff on `exclusions.filter` field in `google_logging_billing_account_sink`, `google_logging_folder_sink`, `google_logging_organization_sink` and `google_logging_project_sink` resources ([#19744](https://github.com/hashicorp/terraform-provider-google/pull/19744)) +* pubsub: fixed permadiff with configuring an empty `retry_policy`. ([#19784](https://github.com/hashicorp/terraform-provider-google/pull/19784)) +* secretmanager: fixed the issue of unpopulated fields `labels`, `annotations` and `version_destroy_ttl` in the terraform state for the `google_secret_manager_secrets` datasource ([#19748](https://github.com/hashicorp/terraform-provider-google/pull/19748)) + +## 6.6.0 (October 7, 2024) + +FEATURES: +* **New Resource:** `google_dataproc_batch` ([#19686](https://github.com/hashicorp/terraform-provider-google/pull/19686)) +* **New Resource:** `google_healthcare_pipeline_job` ([#19717](https://github.com/hashicorp/terraform-provider-google/pull/19717)) +* **New Resource:** `google_site_verification_owner` ([#19641](https://github.com/hashicorp/terraform-provider-google/pull/19641)) + +IMPROVEMENTS: +* assuredworkloads: added `HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS` and `HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT` enum values to `compliance_regime` in the `google_assuredworkload_workload` resource ([#19714](https://github.com/hashicorp/terraform-provider-google/pull/19714)) +* compute: added `bgp_best_path_selection_mode `,`bgp_bps_always_compare_med` and `bgp_bps_inter_region_cost ` fields to `google_compute_network` resource ([#19708](https://github.com/hashicorp/terraform-provider-google/pull/19708)) +* compute: added `next_hop_origin `,`next_hop_med ` and `next_hop_inter_region_cost ` output fields to `google_compute_route` resource ([#19708](https://github.com/hashicorp/terraform-provider-google/pull/19708)) +* compute: added enum `STATEFUL_COOKIE_AFFINITY` and `strong_session_affinity_cookie` field to `google_compute_backend_service` and `google_compute_region_backend_service` resource ([#19665](https://github.com/hashicorp/terraform-provider-google/pull/19665)) +* compute: moved `TDX` instance option for `confidential_instance_type` in `google_compute_instance` from Beta to GA ([#19706](https://github.com/hashicorp/terraform-provider-google/pull/19706)) +* containeraws: added `kubelet_config` field group to the `google_container_aws_node_pool` resource ([#19714](https://github.com/hashicorp/terraform-provider-google/pull/19714)) +* pubsub: added GCS ingestion settings and platform log settings to `google_pubsub_topic` resource ([#19669](https://github.com/hashicorp/terraform-provider-google/pull/19669)) +* sourcerepo: added `create_ignore_already_exists` field to `google_sourcerepo_repository` resource ([#19716](https://github.com/hashicorp/terraform-provider-google/pull/19716)) +* sql: added in-place update support for `settings.time_zone` in `google_sql_database_instance` resource ([#19654](https://github.com/hashicorp/terraform-provider-google/pull/19654)) +* tags: increased maximum accepted input length for the `short_name` field in `google_tags_tag_key` and `google_tags_tag_value` resources ([#19712](https://github.com/hashicorp/terraform-provider-google/pull/19712)) + +BUG FIXES: +* bigquery: fixed `google_bigquery_dataset_iam_member` to be able to delete itself and overwrite the existing iam members for bigquery dataset keeping the authorized datasets as they are. ([#19682](https://github.com/hashicorp/terraform-provider-google/pull/19682)) +* bigquery: fixed an error which could occur with service account field values containing non-lower-case characters in `google_bigquery_dataset_access` ([#19705](https://github.com/hashicorp/terraform-provider-google/pull/19705)) +* compute: fixed an issue where the `boot_disk.initialize_params.resource_policies` field in `google_compute_instance` forced a resource recreation when used in combination with `google_compute_disk_resource_policy_attachment` ([#19692](https://github.com/hashicorp/terraform-provider-google/pull/19692)) +* compute: fixed the issue that `labels` is not set when creating the resource `google_compute_interconnect` ([#19632](https://github.com/hashicorp/terraform-provider-google/pull/19632)) +* tags: removed `google_tags_location_tag_binding` resource from the Terraform state when its parent resource has been removed outside of Terraform ([#19693](https://github.com/hashicorp/terraform-provider-google/pull/19693)) +* workbench: fixed a bug in the `google_workbench_instance` resource where the removal of `labels` was not functioning as expected. ([#19620](https://github.com/hashicorp/terraform-provider-google/pull/19620)) + +## 6.5.0 (September 30, 2024) +DEPRECATIONS: +* compute: deprecated `macsec.pre_shared_keys.fail_open` field in `google_compute_interconnect` resource. Use the new `macsec.fail_open` field instead ([#19572](https://github.com/hashicorp/terraform-provider-google/pull/19572)) + +FEATURES: +* **New Data Source:** `google_compute_region_instance_group_manager` ([#19589](https://github.com/hashicorp/terraform-provider-google/pull/19589)) +* **New Data Source:** `google_privileged_access_manager_entitlement` ([#19580](https://github.com/hashicorp/terraform-provider-google/pull/19580)) +* **New Data Source:** `google_secret_manager_regional_secret_version_access` ([#19538](https://github.com/hashicorp/terraform-provider-google/pull/19538)) +* **New Data Source:** `google_secret_manager_regional_secret_version` ([#19514](https://github.com/hashicorp/terraform-provider-google/pull/19514)) +* **New Data Source:** `google_secret_manager_regional_secrets` ([#19532](https://github.com/hashicorp/terraform-provider-google/pull/19532)) +* **New Resource:** `google_compute_router_nat_address` ([#19550](https://github.com/hashicorp/terraform-provider-google/pull/19550)) +* **New Resource:** `google_logging_log_scope` ([#19559](https://github.com/hashicorp/terraform-provider-google/pull/19559)) + +IMPROVEMENTS: +* apigee: added `activate` field to `google_apigee_nat_address` resource ([#19591](https://github.com/hashicorp/terraform-provider-google/pull/19591)) +* bigquery: added `biglake_configuration` field to `google_bigquery_table` resource to support BigLake Managed Tables ([#19541](https://github.com/hashicorp/terraform-provider-google/pull/19541)) +* cloudrunv2: promoted `scaling` field in `google_cloud_run_v2_service` resource to GA ([#19588](https://github.com/hashicorp/terraform-provider-google/pull/19588)) +* composer: promoted `config.workloads_config.cloud_data_lineage_integration` field in `google_composer_environment` resource to GA ([#19612](https://github.com/hashicorp/terraform-provider-google/pull/19612)) +* compute: added `existing_reservations` field to `google_compute_region_commitment` resource ([#19585](https://github.com/hashicorp/terraform-provider-google/pull/19585)) +* compute: added `hostname` field to `google_compute_instance` data source ([#19607](https://github.com/hashicorp/terraform-provider-google/pull/19607)) +* compute: added `initial_nat_ip` field to `google_compute_router_nat` resource ([#19550](https://github.com/hashicorp/terraform-provider-google/pull/19550)) +* compute: added `macsec.fail_open` field to `google_compute_interconnect` resource ([#19572](https://github.com/hashicorp/terraform-provider-google/pull/19572)) +* compute: added `SUSPENDED` as a possible value to `desired_state` field in `google_compute_instance` resource ([#19586](https://github.com/hashicorp/terraform-provider-google/pull/19586)) +* compute: added import support for `projects/{{project}}/meta-data/{{key}}` format for `google_compute_project_metadata_item` resource ([#19613](https://github.com/hashicorp/terraform-provider-google/pull/19613)) +* compute: marked `customer_name` and `location` fields as optional in `google_compute_interconnect` resource to support cross cloud interconnect ([#19619](https://github.com/hashicorp/terraform-provider-google/pull/19619)) +* container: added `linux_node_config.hugepages_config` field to `google_container_node_pool` resource ([#19521](https://github.com/hashicorp/terraform-provider-google/pull/19521)) +* container: promoted `gcfs_config` field in `google_container_cluster` resource to GA ([#19617](https://github.com/hashicorp/terraform-provider-google/pull/19617)) +* looker: added `psc_enabled` and `psc_config` fields to `google_looker_instance` resource ([#19523](https://github.com/hashicorp/terraform-provider-google/pull/19523)) +* networkconnectivity: added `include_import_ranges` field to `google_network_connectivity_spoke` resource for `linked_vpn_tunnels`, `linked_interconnect_attachments` and `linked_router_appliance_instances` ([#19530](https://github.com/hashicorp/terraform-provider-google/pull/19530)) +* secretmanagerregional: added `version_aliases` field to `google_secret_manager_regional_secret` resource ([#19514](https://github.com/hashicorp/terraform-provider-google/pull/19514)) +* workbench: increased create timeout to 20 minutes for `google_workbench_instance` resource ([#19551](https://github.com/hashicorp/terraform-provider-google/pull/19551)) + +BUG FIXES: +* bigquery: fixed in-place update of `google_bigquery_table` resource when `external_data_configuration.schema` field is set ([#19558](https://github.com/hashicorp/terraform-provider-google/pull/19558)) +* bigquerydatapolicy: fixed permadiff on `policy_tag` field in `google_bigquery_datapolicy_data_policy` resource ([#19563](https://github.com/hashicorp/terraform-provider-google/pull/19563)) +* composer: fixed `storage_config.bucket` field to support a bucket name with or without "gs://" prefix ([#19552](https://github.com/hashicorp/terraform-provider-google/pull/19552)) +* container: added support for setting `addons_config.gcp_filestore_csi_driver_config` and `enable_autopilot` in the same `google_container_cluster` ([#19590](https://github.com/hashicorp/terraform-provider-google/pull/19590)) +* container: fixed `node_config.kubelet_config` updates in `google_container_cluster` resource ([#19562](https://github.com/hashicorp/terraform-provider-google/pull/19562)) +* container: fixed a bug where specifying `node_pool_defaults.node_config_defaults` with `enable_autopilot = true` would cause `google_container_cluster` resource creation failure ([#19543](https://github.com/hashicorp/terraform-provider-google/pull/19543)) +* workbench: fixed a bug in the `google_workbench_instance` resource where the removal of `labels` was not functioning as expected ([#19620](https://github.com/hashicorp/terraform-provider-google/pull/19620)) + + +## 6.4.0 (September 23, 2024) + +DEPRECATIONS: +* securitycenterv2: deprecated `google_scc_v2_organization_scc_big_query_exports`. Use `google_scc_v2_organization_scc_big_query_export` instead. ([#19457](https://github.com/hashicorp/terraform-provider-google/pull/19457)) + +FEATURES: +* **New Data Source:** `google_secret_manager_regional_secret_version` ([#19514](https://github.com/hashicorp/terraform-provider-google/pull/19514)) +* **New Data Source:** `google_secret_manager_regional_secret` ([#19491](https://github.com/hashicorp/terraform-provider-google/pull/19491)) +* **New Resource:** `google_database_migration_service_migration_job` ([#19488](https://github.com/hashicorp/terraform-provider-google/pull/19488)) +* **New Resource:** `google_discovery_engine_target_site` ([#19469](https://github.com/hashicorp/terraform-provider-google/pull/19469)) +* **New Resource:** `google_healthcare_workspace` ([#19476](https://github.com/hashicorp/terraform-provider-google/pull/19476)) +* **New Resource:** `google_scc_folder_scc_big_query_export` ([#19480](https://github.com/hashicorp/terraform-provider-google/pull/19480)) +* **New Resource:** `google_scc_organization_scc_big_query_export` ([#19465](https://github.com/hashicorp/terraform-provider-google/pull/19465)) +* **New Resource:** `google_scc_project_scc_big_query_export` ([#19466](https://github.com/hashicorp/terraform-provider-google/pull/19466)) +* **New Resource:** `google_scc_v2_organization_scc_big_query_export` ([#19457](https://github.com/hashicorp/terraform-provider-google/pull/19457)) +* **New Resource:** `google_secret_manager_regional_secret_version` ([#19504](https://github.com/hashicorp/terraform-provider-google/pull/19504)) +* **New Resource:** `google_secret_manager_regional_secret` ([#19461](https://github.com/hashicorp/terraform-provider-google/pull/19461)) +* **New Resource:** `google_site_verification_web_resource` ([#19477](https://github.com/hashicorp/terraform-provider-google/pull/19477)) +* **New Resource:** `google_spanner_backup_schedule` ([#19449](https://github.com/hashicorp/terraform-provider-google/pull/19449)) + +IMPROVEMENTS: +* alloydb: added `enable_outbound_public_ip` field to `google_alloydb_instance` resource ([#19444](https://github.com/hashicorp/terraform-provider-google/pull/19444)) +* apigee: added in-place update for `consumer_accept_list` field in `google_apigee_instance` resource ([#19442](https://github.com/hashicorp/terraform-provider-google/pull/19442)) +* compute: added `interface` field to `google_compute_attached_disk` resource ([#19440](https://github.com/hashicorp/terraform-provider-google/pull/19440)) +* compute: added in-place update in `google_compute_interconnect` resource, except for `remote_location` and `requested_features` fields ([#19508](https://github.com/hashicorp/terraform-provider-google/pull/19508)) +* filestore: added `deletion_protection_enabled` and `deletion_protection_reason` fields to `google_filestore_instance` resource ([#19446](https://github.com/hashicorp/terraform-provider-google/pull/19446)) +* looker: added `fips_enabled` field to `google_looker_instance` resource ([#19511](https://github.com/hashicorp/terraform-provider-google/pull/19511)) +* metastore: added `deletion_protection` field to `google_dataproc_metastore_service` resource ([#19505](https://github.com/hashicorp/terraform-provider-google/pull/19505)) +* netapp: added `allow_auto_tiering` field to `google_netapp_storage_pool` resource ([#19454](https://github.com/hashicorp/terraform-provider-google/pull/19454)) +* netapp: added `tiering_policy` field to `google_netapp_volume` resource ([#19454](https://github.com/hashicorp/terraform-provider-google/pull/19454)) +* secretmanagerregional: added `version_aliases` field to `google_secret_manager_regional_secret` resource ([#19514](https://github.com/hashicorp/terraform-provider-google/pull/19514)) +* spanner: added `edition` field to `google_spanner_instance` resource ([#19449](https://github.com/hashicorp/terraform-provider-google/pull/19449)) + +BUG FIXES: +* compute: fixed a permadiff on `iap` field in `google_compute_backend` and `google_compute_region_backend` resources ([#19509](https://github.com/hashicorp/terraform-provider-google/pull/19509)) +* container: fixed a bug where specifying `node_pool_defaults.node_config_defaults` with `enable_autopilot = true` will cause `google_container_cluster` resource creation failure ([#19543](https://github.com/hashicorp/terraform-provider-google/pull/19543)) +* container: fixed a permadiff on `node_config.gcfs_config` field in `google_container_cluster` and `google_container_node_pool` resources ([#19512](https://github.com/hashicorp/terraform-provider-google/pull/19512)) +* container: fixed the in-place update for `node_config.gcfs_config` field in `google_container_cluster` and `google_container_node_pool` resources ([#19512](https://github.com/hashicorp/terraform-provider-google/pull/19512)) +* container: made `node_config.kubelet_config.cpu_manager_policy` field optional to fix its update in `google_container_cluster` resource ([#19464](https://github.com/hashicorp/terraform-provider-google/pull/19464)) +* dns: fixed a permadiff on `dnssec_config` field in `google_dns_managed_zone` resource ([#19456](https://github.com/hashicorp/terraform-provider-google/pull/19456)) +* pubsub: allowed `filter` field to contain line breaks in `google_pubsub_subscription` resource ([#19451](https://github.com/hashicorp/terraform-provider-google/pull/19451)) + +## 6.3.0 (September 16, 2024) + +FEATURES: +* **New Data Source:** `google_bigquery_tables` ([#19402](https://github.com/hashicorp/terraform-provider-google/pull/19402)) +* **New Resource:** `google_developer_connect_connection` ([#19431](https://github.com/hashicorp/terraform-provider-google/pull/19431)) +* **New Resource:** `google_developer_connect_git_repository_link` ([#19431](https://github.com/hashicorp/terraform-provider-google/pull/19431)) +* **New Resource:** `google_memorystore_instance` ([#19398](https://github.com/hashicorp/terraform-provider-google/pull/19398)) + +IMPROVEMENTS: +* compute: added `connected_endpoints.consumer_network` and `connected_endpoints.psc_connection_id` fields to `google_compute_service_attachment` resource ([#19426](https://github.com/hashicorp/terraform-provider-google/pull/19426)) +* compute: added field `http_keep_alive_timeout_sec` to `google_region_compute_target_https_proxy` and `google_region_compute_target_http_proxy` resources ([#19432](https://github.com/hashicorp/terraform-provider-google/pull/19432)) +* compute: added support for `boot_disk.initialize_params.resource_policies` in `google_compute_instance` and `google_instance_template` ([#19407](https://github.com/hashicorp/terraform-provider-google/pull/19407)) +* container: added `storage_pools` to `node_config` in `google_container_cluster` and `google_container_node_pool` ([#19423](https://github.com/hashicorp/terraform-provider-google/pull/19423)) +* containerattached: added `security_posture_config` field to `google_container_attached_cluster` resource ([#19411](https://github.com/hashicorp/terraform-provider-google/pull/19411)) +* netapp: added `large_capacity` and `multiple_endpoints` to `google_netapp_volume` resource ([#19384](https://github.com/hashicorp/terraform-provider-google/pull/19384)) +* resourcemanager: added `tags` field to `google_folder` to allow setting tags for folders at creation time ([#19380](https://github.com/hashicorp/terraform-provider-google/pull/19380)) + +BUG FIXES: +* compute: setting `network_ip` to "" will no longer cause diff and will be treated the same as `null` ([#19400](https://github.com/hashicorp/terraform-provider-google/pull/19400)) +* dataproc: updated `google_dataproc_cluster` to protect against handling nil `kerberos_config` values ([#19401](https://github.com/hashicorp/terraform-provider-google/pull/19401)) +* dns: added a mutex to `google_dns_record_set` to prevent conflicts when multiple resources attempt to operate on the same record set ([#19416](https://github.com/hashicorp/terraform-provider-google/pull/19416)) +* managedkafka: added 5 second wait post `google_managed_kafka_topic` creation to fix eventual consistency errors ([#19429](https://github.com/hashicorp/terraform-provider-google/pull/19429)) + +## 6.2.0 (September 9, 2024) + +FEATURES: +* **New Data Source:** `google_certificate_manager_certificates` ([#19361](https://github.com/hashicorp/terraform-provider-google/pull/19361)) +* **New Resource:** `google_network_security_server_tls_policy` ([#19314](https://github.com/hashicorp/terraform-provider-google/pull/19314)) +* **New Resource:** `google_scc_v2_folder_scc_big_query_export` ([#19327](https://github.com/hashicorp/terraform-provider-google/pull/19327)) +* **New Resource:** `google_scc_v2_project_scc_big_query_export` ([#19311](https://github.com/hashicorp/terraform-provider-google/pull/19311)) + +IMPROVEMENTS: +* assuredworkload: added field `partner_service_billing_account` to `google_assured_workloads_workload` ([#19358](https://github.com/hashicorp/terraform-provider-google/pull/19358)) +* bigtable: added support for `column_family.type` in `google_bigtable_table` ([#19302](https://github.com/hashicorp/terraform-provider-google/pull/19302)) +* cloudrun: promoted support for nfs and csi volumes (for Cloud Storage FUSE) for `google_cloud_run_service` to GA ([#19359](https://github.com/hashicorp/terraform-provider-google/pull/19359)) +* cloudrunv2: promoted support for nfs and gcs volumes for `google_cloud_run_v2_job` to GA ([#19359](https://github.com/hashicorp/terraform-provider-google/pull/19359)) +* compute: added `boot_disk.interface` field to `google_compute_instance` resource ([#19319](https://github.com/hashicorp/terraform-provider-google/pull/19319)) +* container: added `node_pool_auto_config.node_kublet_config.insecure_kubelet_readonly_port_enabled` field to `google_container_cluster`. ([#19320](https://github.com/hashicorp/terraform-provider-google/pull/19320)) +* container: added `insecure_kubelet_readonly_port_enabled` to `node_pool.node_config.kubelet_config` and `node_config.kubelet_config` in `google_container_node_pool` resource. ([#19312](https://github.com/hashicorp/terraform-provider-google/pull/19312)) +* container: added `insecure_kubelet_readonly_port_enabled` to `node_pool_defaults.node_config_defaults`, `node_pool.node_config.kubelet_config`, and `node_config.kubelet_config` in `google_container_cluster` resource. ([#19312](https://github.com/hashicorp/terraform-provider-google/pull/19312)) +* container: added support for in-place updates for `google_compute_node_pool.node_config.gcfs_config` and `google_container_cluster.node_config.gcfs_cluster` and `google_container_cluster.node_pool.node_config.gcfs_cluster` ([#19365](https://github.com/hashicorp/terraform-provider-google/pull/19365)) +* container: promoted the `additive_vpc_scope_dns_domain` field on the `google_container_cluster` resource to GA ([#19313](https://github.com/hashicorp/terraform-provider-google/pull/19313)) +* iambeta: added `x509` field to `google_iam_workload_identity_pool_provider ` resource ([#19375](https://github.com/hashicorp/terraform-provider-google/pull/19375)) +* networkconnectivity: added `include_export_ranges` to `google_network_connectivity_spoke` ([#19346](https://github.com/hashicorp/terraform-provider-google/pull/19346)) +* pubsub: added `cloud_storage_config.max_messages` and `cloud_storage_config.avro_config.use_topic_schema` fields to `google_pubsub_subscription` resource ([#19338](https://github.com/hashicorp/terraform-provider-google/pull/19338)) +* redis: added the `maintenance_policy` field to the `google_redis_cluster` resource ([#19341](https://github.com/hashicorp/terraform-provider-google/pull/19341)) +* resourcemanager: added `tags` field to `google_project` to allow setting tags for projects at creation time ([#19351](https://github.com/hashicorp/terraform-provider-google/pull/19351)) +* securitycenter: added support for empty `streaming_config.filter` values in `google_scc_notification_config` resources ([#19369](https://github.com/hashicorp/terraform-provider-google/pull/19369)) + +BUG FIXES: +* compute: fixed `google_compute_interconnect` to support correct `available_features` option of `IF_MACSEC` ([#19330](https://github.com/hashicorp/terraform-provider-google/pull/19330)) +* compute: fixed a bug where `advertised_route_priority` was accidentally set to 0 during updates in `google_compute_router_peer` ([#19366](https://github.com/hashicorp/terraform-provider-google/pull/19366)) +* compute: fixed a permadiff caused by setting `start_time` in an incorrect H:mm format in `google_compute_resource_policies` resources ([#19297](https://github.com/hashicorp/terraform-provider-google/pull/19297)) +* compute: fixed `network_interface.subnetwork_project` validation to match with the project in `network_interface.subnetwork` field when `network_interface.subnetwork` has full self_link in `google_compute_instance` resource ([#19348](https://github.com/hashicorp/terraform-provider-google/pull/19348)) +* container: removed unnecessary force replacement in node pool `gcfs_config` ([#19365](https://github.com/hashicorp/terraform-provider-google/pull/19365) +* kms: updated the `google_kms_autokey_config` resource's `folder` field to accept values that are either full resource names (`folders/{folder_id}`) or just the folder id (`{folder_id}` only) ([#19364](https://github.com/hashicorp/terraform-provider-google/pull/19364))) +* storage: added retry support for 429 errors in `google_storage_bucket` resource ([#19353](https://github.com/hashicorp/terraform-provider-google/pull/19353)) + + +## 6.1.0 (September 4, 2024) + +FEATURES: +* **New Data Source:** `google_kms_crypto_key_latest_version` ([#19249](https://github.com/hashicorp/terraform-provider-google/pull/19249)) +* **New Data Source:** `google_kms_crypto_key_versions` ([#19241](https://github.com/hashicorp/terraform-provider-google/pull/19241)) + +IMPROVEMENTS: +* databasemigrationservice: added support in `google_database_migration_service_connection_profile` for creating DMS connection profiles that link to existing Cloud SQL instances/AlloyDB clusters. ([#19291](https://github.com/hashicorp/terraform-provider-google/pull/19291)) +* alloydb: added `subscription_type` and `trial_metadata` field to `google_alloydb_cluster` resource ([#19262](https://github.com/hashicorp/terraform-provider-google/pull/19262)) +* bigquery: added `encryption_configuration` field to `google_bigquery_data_transfer_config` resource ([#19267](https://github.com/hashicorp/terraform-provider-google/pull/19267)) +* bigqueryanalyticshub: added `selected_resources`, and `restrict_direct_table_access` to `google_bigquery_analytics_hub_listing` resource ([#19244](https://github.com/hashicorp/terraform-provider-google/pull/19244)) +* bigqueryanalyticshub: added `sharing_environment_config` to `google_bigquery_analytics_hub_data_exchange` resource ([#19244](https://github.com/hashicorp/terraform-provider-google/pull/19244)) +* cloudtasks: added `http_target` field to `google_cloud_tasks_queue` resource ([#19253](https://github.com/hashicorp/terraform-provider-google/pull/19253)) +* compute: added `accelerators` field to `google_compute_node_template` resource ([#19292](https://github.com/hashicorp/terraform-provider-google/pull/19292)) +* compute: allowed disabling `server_tls_policy` during update in `google_compute_target_https_proxy` resources ([#19233](https://github.com/hashicorp/terraform-provider-google/pull/19233)) +* container: added `secret_manager_config` field to `google_container_cluster` resource ([#19288](https://github.com/hashicorp/terraform-provider-google/pull/19288)) +* datastream: added `transaction_logs` and `change_tables` to the `datastream_stream` resource ([#19248](https://github.com/hashicorp/terraform-provider-google/pull/19248)) +* discoveryengine: added `chunking_config` and `layout_parsing_config` fields to `google_discovery_engine_data_store` resource ([#19274](https://github.com/hashicorp/terraform-provider-google/pull/19274)) +* dlp: added `inspect_template_modified_cadence` field to `big_query_target` and `cloud_sql_target` in `google_data_loss_prevention_discovery_config` resource ([#19282](https://github.com/hashicorp/terraform-provider-google/pull/19282)) +* dlp: added `tag_resources` field to `google_data_loss_prevention_discovery_config` resource ([#19282](https://github.com/hashicorp/terraform-provider-google/pull/19282)) +* networksecurity: promoted `google_network_security_client_tls_policy` to GA ([#19293](https://github.com/hashicorp/terraform-provider-google/pull/19293)) + +BUG FIXES: +* bigquery: fixed an error which could occur with email field values containing non-lower-case characters in `google_bigquery_dataset_access` resource ([#19259](https://github.com/hashicorp/terraform-provider-google/pull/19259)) +* bigqueryanalyticshub: made `bigquery_dataset` immutable in `google_bigquery_analytics_hub_listing` as it was not updatable in the API. Now modifying the field in Terraform will correctly recreate the resource rather than causing Terraform to report it would attempt an invalid update. ([#19244](https://github.com/hashicorp/terraform-provider-google/pull/19244)) +* container: fixed update inconsistency in `google_container_cluster` resource ([#19247](https://github.com/hashicorp/terraform-provider-google/pull/19247)) +* pubsub: fixed a validation bug that didn't allow empty filter definitions for `google_pubsub_subscription` resources ([#19284](https://github.com/hashicorp/terraform-provider-google/pull/19284)) +* resourcemanager: fixed a bug where data.google_client_config failed silently when inadequate credentials were used to configure the provider ([#19286](https://github.com/hashicorp/terraform-provider-google/pull/19286)) +* sql: fixed importing `google_sql_user` where `host` is an IPv4 CIDR ([#19243](https://github.com/hashicorp/terraform-provider-google/pull/19243)) +* sql: fixed overwriting of `name` field for IAM Group user in `google_sql_user` resource ([#19234](https://github.com/hashicorp/terraform-provider-google/pull/19234)) + +## 6.0.1 (August 26, 2024) + +BREAKING CHANGES: + +* sql: removed `settings.ip_configuration.require_ssl` from `google_sql_database_instance` in favor of `settings.ip_configuration.ssl_mode`. This field was intended to be removed in 6.0.0. ([#19263](https://github.com/hashicorp/terraform-provider-google/pull/19263)) + +## 6.0.0 (August 26, 2024) + +[Terraform Google Provider 6.0.0 Upgrade Guide](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/version_6_upgrade) + +BREAKING CHANGES: +* provider: changed provider labels to add the `goog-terraform-provisioned: true` label by default. ([#19190](https://github.com/hashicorp/terraform-provider-google/pull/19190)) +* activedirectory: added `deletion_protection` field to `google_active_directory_domain` resource. This field defaults to `true`, preventing accidental deletions. To delete the resource, you must first set `deletion_protection = false` before destroying the resource. ([#18906](https://github.com/hashicorp/terraform-provider-google/pull/18906)) +* alloydb: removed `network` in `google_alloy_db_cluster`. Use `network_config.network` instead. ([#19181](https://github.com/hashicorp/terraform-provider-google/pull/19181)) +* bigquery: added client-side validation to prevent table view creation if schema contains required fields for `google_bigquery_table` resource ([#18767](https://github.com/hashicorp/terraform-provider-google/pull/18767)) +* bigquery: removed `allow_resource_tags_on_deletion` from `google_bigquery_table`. Resource tags are now always allowed on table deletion. ([#19077](https://github.com/hashicorp/terraform-provider-google/pull/19077)) +* bigqueryreservation: removed `multi_region_auxiliary` from `google_bigquery_reservation` ([#18922](https://github.com/hashicorp/terraform-provider-google/pull/18922)) +* billing: revised the format of `id` for `google_billing_project_info` ([#18823](https://github.com/hashicorp/terraform-provider-google/pull/18823)) +* cloudrunv2: added `deletion_protection` field to `google_cloudrunv2_service`. This field defaults to `true`, preventing accidental deletions. To delete the resource, you must first set `deletion_protection = false` before destroying the resource.([#19019](https://github.com/hashicorp/terraform-provider-google/pull/19019)) +* cloudrunv2: changed `liveness_probe` to no longer infer a default value from api on `google_cloud_run_v2_service`. Removing this field and applying the change will now remove liveness probe from the Cloud Run service. ([#18764](https://github.com/hashicorp/terraform-provider-google/pull/18764)) +* cloudrunv2: retyped `containers.env` to SET from ARRAY for `google_cloud_run_v2_service` and `google_cloud_run_v2_job`. ([#18855](https://github.com/hashicorp/terraform-provider-google/pull/18855)) +* composer: `ip_allocation_policy = []` in `google_composer_environment` is no longer valid configuration. Removing the field from configuration should not produce a diff. ([#19207](https://github.com/hashicorp/terraform-provider-google/pull/19207)) +* compute: added new required field `enabled` in `google_compute_backend_service` and `google_compute_region_backend_service` ([#18772](https://github.com/hashicorp/terraform-provider-google/pull/18772)) +* compute: changed `certifcate_id` in `google_compute_managed_ssl_certificate` to correctly be output only. ([#19069](https://github.com/hashicorp/terraform-provider-google/pull/19069)) +* compute: revised and in some cases removed default values of `connection_draining_timeout_sec`, `balancing_mode` and `outlier_detection` in `google_compute_region_backend_service` and `google_compute_backend_service`. ([#18720](https://github.com/hashicorp/terraform-provider-google/pull/18720)) +* compute: revised the format of `id` for `compute_network_endpoints` ([#18844](https://github.com/hashicorp/terraform-provider-google/pull/18844)) +* compute: `guest_accelerator = []` is no longer valid configuration in `google_compute_instance`. To explicitly set an empty list of objects, set guest_accelerator.count = 0. ([#19207](https://github.com/hashicorp/terraform-provider-google/pull/19207)) +* compute: `google_compute_instance_from_template` and `google_compute_instance_from_machine_image` `network_interface.alias_ip_range, network_interface.access_config, attached_disk, guest_accelerator, service_account, scratch_disk` can no longer be set to an empty block `[]`. Removing the fields from configuration should not produce a diff. ([#19207](https://github.com/hashicorp/terraform-provider-google/pull/19207)) +* compute: `secondary_ip_ranges = []` in `google_compute_subnetwork` is no longer valid configuration. To set an explicitly empty list, use `send_secondary_ip_range_if_empty` and completely remove `secondary_ip_range` from config. ([#19207](https://github.com/hashicorp/terraform-provider-google/pull/19207)) +* container: made `advanced_datapath_observability_config.enable_relay` required in `google_container_cluster` ([#19060](https://github.com/hashicorp/terraform-provider-google/pull/19060)) +* container: removed deprecated field `advanced_datapath_observability_config.relay_mode` from `google_container_cluster` resource. Users are expected to use `enable_relay` field instead. ([#19060](https://github.com/hashicorp/terraform-provider-google/pull/19060)) +* container: three label-related fields are now in `google_container_cluster` resource. `resource_labels` field is non-authoritative and only manages the labels defined by the users on the resource through Terraform. The new output-only `terraform_labels` field merges the labels defined by the users on the resource through Terraform and the default labels configured on the provider. The new output-only `effective_labels` field lists all of labels present on the resource in GCP, including the labels configured through Terraform, the system, and other clients. ([#19062](https://github.com/hashicorp/terraform-provider-google/pull/19062)) +* container: made three fields `resource_labels`, `terraform_labels`, and `effective_labels` be present in `google_container_cluster` datasources. All three fields will have all of labels present on the resource in GCP including the labels configured through Terraform, the system, and other clients, equivalent to `effective_labels` on the resource. ([#19062](https://github.com/hashicorp/terraform-provider-google/pull/19062)) +* container: `guest_accelerator = []` is no longer valid configuration in `google_container_cluster` and `google_container_node_pool`. To explicitly set an empty list of objects, set guest_accelerator.count = 0. ([#19207](https://github.com/hashicorp/terraform-provider-google/pull/19207)) +* container: `guest_accelerator.gpu_driver_installation_config = []` and `guest_accelerator.gpu_sharing_config = []` are no longer valid configuration in `google_container_cluster` and `google_container_node_pool`. Removing the fields from configuration should not produce a diff. ([#19207](https://github.com/hashicorp/terraform-provider-google/pull/19207)) +* datastore: removed `google_datastore_index` in favor of `google_firestore_index` ([#19160](https://github.com/hashicorp/terraform-provider-google/pull/19160)) +* edgenetwork: three label-related fields are now in `google_edgenetwork_network ` and `google_edgenetwork_subnet` resources. `labels` field is non-authoritative and only manages the labels defined by the users on the resource through Terraform. The new output-only `terraform_labels` field merges the labels defined by the users on the resource through Terraform and the default labels configured on the provider. The new output-only `effective_labels` field lists all of labels present on the resource in GCP, including the labels configured through Terraform, the system, and other clients. ([#19062](https://github.com/hashicorp/terraform-provider-google/pull/19062)) +* identityplatform: removed resource `google_identity_platform_project_default_config` in favor of `google_identity_platform_project_config` ([#18992](https://github.com/hashicorp/terraform-provider-google/pull/18992)) +* pubsub: allowed `schema_settings` in `google_pubsub_topic` to be removed ([#18631](https://github.com/hashicorp/terraform-provider-google/pull/18631)) +* integrations: removed `create_sample_workflows` and `provision_gmek` from `google_integrations_client` ([#19148](https://github.com/hashicorp/terraform-provider-google/pull/19148)) +* redis: added a `deletion_protection_enabled` field to the `google_redis_cluster` resource. This field defaults to `true`, preventing accidental deletions. To delete the resource, you must first set `deletion_protection_enabled = false` before destroying the resource. ([#19173](https://github.com/hashicorp/terraform-provider-google/pull/19173)) +* resourcemanager: added `deletion_protection` field to `google_folder` to make deleting them require an explicit intent. Folder resources now cannot be destroyed unless `deletion_protection = false` is set for the resource. ([#19021](https://github.com/hashicorp/terraform-provider-google/pull/19021)) +* resourcemanager: made `deletion_policy` in `google_project` 'PREVENT' by default. This makes deleting them require an explicit intent. `google_project` resources cannot be destroyed unless `deletion_policy` is set to 'ABANDON' or 'DELETE' for the resource. ([#19114](https://github.com/hashicorp/terraform-provider-google/pull/19114)) +* sql: removed `settings.ip_configuration.require_ssl` in `google_sql_database_instance`. Please use `settings.ip_configuration.ssl_mode` instead. ([#18843](https://github.com/hashicorp/terraform-provider-google/pull/18843)) +* storage: removed `no_age` field from `lifecycle_rule.condition` in the `google_storage_bucket` resource ([#19048](https://github.com/hashicorp/terraform-provider-google/pull/19048)) +* vpcaccess: removed default values for `min_throughput` and `min_instances` fields on `google_vpc_access_connector` and made them default to values returned from the API when not provided by users ([#18697](https://github.com/hashicorp/terraform-provider-google/pull/18697)) +* vpcaccess: added a conflicting fields restriction between `min_throughput` and `min_instances` fields on `google_vpc_access_connector` ([#18697](https://github.com/hashicorp/terraform-provider-google/pull/18697)) +* vpcaccess: added a conflicting fields restriction between `max_throughput` and `max_instances` fields on `google_vpc_access_connector` ([#18697](https://github.com/hashicorp/terraform-provider-google/pull/18697)) +* workstation: defaulted `host.gce_instance.disable_ssh` to true for `google_workstations_workstation_config` ([#19101](https://github.com/hashicorp/terraform-provider-google/pull/19101)) +IMPROVEMENTS: +* compute: added fields `reserved_internal_range` and `secondary_ip_ranges[].reserved_internal_range` to `google_compute_subnetwork` resource ([#19151](https://github.com/hashicorp/terraform-provider-google/pull/19151)) +* compute: changed the behavior of `name_prefix` in multiple Compute resources to allow for a longer max length of 54 characters. See the upgrade guide and resource documentation for more details. ([#19152](https://github.com/hashicorp/terraform-provider-google/pull/19152)) +BUG FIXES: +* compute: fixed an issue regarding sending `enabled` field by default for null `iap` message in `google_compute_backend_service` and `google_compute_region_backend_service` ([#18772](https://github.com/hashicorp/terraform-provider-google/pull/18772)) + +## 5.44.1 (September 23, 2024) +NOTES: +* 5.44.1 is a backport release, intended to pull in critical container improvements and fixes for issues introduced in 5.44.0 + +IMPROVEMENTS: +* container: added in-place update support for `gcfs_config` in in `google_container_cluster` and `google_container_node_pool` ([#19365](https://github.com/hashicorp/terraform-provider-google/pull/19365)) ([#19512](https://github.com/hashicorp/terraform-provider-google/pull/19512)) + +BUG FIXES: +* container: fixed a permadiff on `gcfs_config` in `google_container_cluster` and `google_container_node_pool` ([#19512](https://github.com/hashicorp/terraform-provider-google/pull/19512)) +* container: fixed a bug where specifying `node_pool_defaults.node_config_defaults` with `enable_autopilot = true` will cause `google_container_cluster` resource creation failure. ([#19543](https://github.com/hashicorp/terraform-provider-google/pull/19543)) + +## 5.44.0 (September 9, 2024) NOTES: -* orgpolicy: converted `google_org_policy_policy` now to use MMv1 engine instead of DCL ([#18635](https://github.com/hashicorp/terraform-provider-google/pull/18635)) +* 5.44.0 is a backport release, intended to pull in critical container improvements from 6.2.0 + +IMPROVEMENTS: +* container: added `insecure_kubelet_readonly_port_enabled` to `node_pool.node_config.kubelet_config` and `node_config.kubelet_config` in `google_container_node_pool` resource. ([#19312](https://github.com/hashicorp/terraform-provider-google/pull/19312)) +* container: added `insecure_kubelet_readonly_port_enabled` to `node_pool_defaults.node_config_defaults`, `node_pool.node_config.kubelet_config`, and `node_config.kubelet_config` in `google_container_cluster` resource. ([#19312](https://github.com/hashicorp/terraform-provider-google/pull/19312)) +* container: added `node_pool_auto_config.node_kublet_config.insecure_kubelet_readonly_port_enabled` field to `google_container_cluster`. ([#19320](https://github.com/hashicorp/terraform-provider-google/pull/19320)) + +## 5.43.1 (August 30, 2024) + +NOTES: +* 5.43.1 is a backport release, and some changes will not appear in 6.X series releases until 6.1.0 + +BUG FIXES: +* pubsub: fixed a validation bug that didn't allow empty filter definitions for `google_pubsub_subscription` resources ([#19284](https://github.com/hashicorp/terraform-provider-google/pull/19284)) + +## 5.43.0 (August 26, 2024) + +DEPRECATIONS: +* storage: deprecated `lifecycle_rule.condition.no_age` field in `google_storage_bucket`. Use the new `lifecycle_rule.condition.send_age_if_zero` field instead. ([#19172](https://github.com/hashicorp/terraform-provider-google/pull/19172)) + +FEATURES: +* **New Resource:** `google_kms_ekm_connection_iam_binding` ([#19132](https://github.com/hashicorp/terraform-provider-google/pull/19132)) +* **New Resource:** `google_kms_ekm_connection_iam_member` ([#19132](https://github.com/hashicorp/terraform-provider-google/pull/19132)) +* **New Resource:** `google_kms_ekm_connection_iam_policy` ([#19132](https://github.com/hashicorp/terraform-provider-google/pull/19132)) +* **New Resource:** `google_scc_v2_organization_scc_big_query_exports` ([#19184](https://github.com/hashicorp/terraform-provider-google/pull/19184)) + +IMPROVEMENTS: +* compute: added `label_fingerprint` field to `google_compute_global_address` resource ([#19204](https://github.com/hashicorp/terraform-provider-google/pull/19204)) +* compute: exposed service side id as new output field `forwarding_rule_id` on resource `google_compute_forwarding_rule` ([#19139](https://github.com/hashicorp/terraform-provider-google/pull/19139)) +* container: added EXTENDED as a valid option for `release_channel` field in `google_container_cluster` resource ([#19141](https://github.com/hashicorp/terraform-provider-google/pull/19141)) +* logging: changed `enable_analytics` parsing to "no preference" in analytics if omitted, instead of explicitly disabling analytics in `google_logging_project_bucket_config` ([#19126](https://github.com/hashicorp/terraform-provider-google/pull/19126)) +* pusbub: added validation to `filter` field in resource `google_pubsub_subscription` ([#19131](https://github.com/hashicorp/terraform-provider-google/pull/19131)) +* resourcemanager: added `default_labels` field to `google_client_config` data source ([#19170](https://github.com/hashicorp/terraform-provider-google/pull/19170)) +* vmwareengine: added PC undelete support in `google_vmwareengine_private_cloud` ([#19192](https://github.com/hashicorp/terraform-provider-google/pull/19192)) + +BUG FIXES: +* alloydb: fixed a permadiff on `psc_instance_config` in `google_alloydb_instance` resource ([#19143](https://github.com/hashicorp/terraform-provider-google/pull/19143)) +* compute: fixed a malformed URL that affected updating the `server_tls_policy` property on `google_compute_target_https_proxy` resources ([#19164](https://github.com/hashicorp/terraform-provider-google/pull/19164)) +* compute: fixed bug where the `labels` field could not be updated on `google_compute_global_address` ([#19204](https://github.com/hashicorp/terraform-provider-google/pull/19204)) +* compute: fixed force diff replacement logic for `network_ip` on resource `google_compute_instance` ([#19135](https://github.com/hashicorp/terraform-provider-google/pull/19135)) + +## 5.42.0 (August 19, 2024) +DEPRECATIONS: +* compute: setting `google_compute_subnetwork.secondary_ip_range = []` to explicitly set a list of empty objects is deprecated and will produce an error in the upcoming major release. Use `send_secondary_ip_range_if_empty` while removing `secondary_ip_range` from config instead. ([#19122](https://github.com/hashicorp/terraform-provider-google/pull/19122)) + +FEATURES: +* **New Data Source:** `google_artifact_registry_locations` ([#19047](https://github.com/hashicorp/terraform-provider-google/pull/19047)) +* **New Data Source:** `google_cloud_identity_transitive_group_memberships` ([#19038](https://github.com/hashicorp/terraform-provider-google/pull/19038)) +* **New Resource:** `google_discovery_engine_schema` ([#19124](https://github.com/hashicorp/terraform-provider-google/pull/19124)) +* **New Resource:** `google_scc_folder_notification_config` ([#19057](https://github.com/hashicorp/terraform-provider-google/pull/19057)) +* **New Resource:** `google_scc_v2_folder_notification_config` ([#19055](https://github.com/hashicorp/terraform-provider-google/pull/19055)) +* **New Resource:** `google_vertex_ai_index_endpoint_deployed_index` ([#19061](https://github.com/hashicorp/terraform-provider-google/pull/19061)) + +IMPROVEMENTS: +* clouddeploy: added `serial_pipeline.stages.strategy.canary.runtime_config.kubernetes.gateway_service_mesh.pod_selector_label` and `serial_pipeline.stages.strategy.canary.runtime_config.kubernetes.service_networking.pod_selector_label` fields to `google_clouddeploy_delivery_pipeline` resource ([#19100](https://github.com/hashicorp/terraform-provider-google/pull/19100)) +* compute: added `send_secondary_ip_range_if_empty` to `google_compute_subnetwork` ([#19122](https://github.com/hashicorp/terraform-provider-google/pull/19122)) +* discoveryengine: added `skip_default_schema_creation` field to `google_data_store` resource ([#19017](https://github.com/hashicorp/terraform-provider-google/pull/19017)) +* dns: changed `load_balancer_type` field from required to optional in `google_dns_record_set` ([#19050](https://github.com/hashicorp/terraform-provider-google/pull/19050)) +* firestore: added `cmek_config` field to `google_firestore_database` resource ([#19107](https://github.com/hashicorp/terraform-provider-google/pull/19107)) +* servicenetworking: added `update_on_creation_fail` field to `google_service_networking_connection` resource. When it is set to true, enforce an update of the reserved peering ranges on the existing service networking connection in case of a new connection creation failure. ([#19035](https://github.com/hashicorp/terraform-provider-google/pull/19035)) +* sql: added `server_ca_mode` field to `google_sql_database_instance` resource ([#18998](https://github.com/hashicorp/terraform-provider-google/pull/18998)) + +BUG FIXES: +* bigquery: made `google_bigquery_dataset_iam_member` non-authoritative. To remove a bigquery dataset iam member, use an authoritative resource like `google_bigquery_dataset_iam_policy` ([#19121](https://github.com/hashicorp/terraform-provider-google/pull/19121)) +* cloudfunctions2: fixed a "Provider produced inconsistent final plan" bug affecting the `service_config.environment_variables` field in `google_cloudfunctions2_function` resource ([#19024](https://github.com/hashicorp/terraform-provider-google/pull/19024)) +* cloudfunctions2: fixed a permadiff on `storage_source.generation` in `google_cloudfunctions2_function` resource ([#19031](https://github.com/hashicorp/terraform-provider-google/pull/19031)) +* compute: fixed issue where sub-resources managed by `google_compute_forwarding_rule` prevented resource deletion ([#19117](https://github.com/hashicorp/terraform-provider-google/pull/19117)) +* logging: changed `google_logging_project_bucket_config.enable_analytics` behavior to set "no preference" in analytics if omitted, instead of explicitly disabling analytics. ([#19126](https://github.com/hashicorp/terraform-provider-google/pull/19126)) +* workbench: fixed a bug with `google_workbench_instance` metadata drifting when using custom containers. ([#19119](https://github.com/hashicorp/terraform-provider-google/pull/19119)) + +## 5.41.0 (August 13, 2024) + +DEPRECATIONS: +* resourcemanager: deprecated `skip_delete` field in the `google_project` resource. Use `deletion_policy` instead. ([#18867](https://github.com/hashicorp/terraform-provider-google/pull/18867)) + +FEATURES: +* **New Data Source:** `google_logging_log_view_iam_policy` ([#18990](https://github.com/hashicorp/terraform-provider-google/pull/18990)) +* **New Data Source:** `google_scc_v2_organization_source_iam_policy` ([#19004](https://github.com/hashicorp/terraform-provider-google/pull/19004)) +* **New Resource:** `google_access_context_manager_service_perimeter_dry_run_egress_policy` ([#18994](https://github.com/hashicorp/terraform-provider-google/pull/18994)) +* **New Resource:** `google_access_context_manager_service_perimeter_dry_run_ingress_policy` ([#18994](https://github.com/hashicorp/terraform-provider-google/pull/18994)) +* **New Resource:** `google_scc_v2_folder_mute_config` ([#18924](https://github.com/hashicorp/terraform-provider-google/pull/18924)) +* **New Resource:** `google_scc_v2_project_mute_config` ([#18993](https://github.com/hashicorp/terraform-provider-google/pull/18993)) +* **New Resource:** `google_scc_v2_project_notification_config` ([#19008](https://github.com/hashicorp/terraform-provider-google/pull/19008)) +* **New Resource:** `google_scc_v2_organization_source` ([#19004](https://github.com/hashicorp/terraform-provider-google/pull/19004)) +* **New Resource:** `google_scc_v2_organization_source_iam_binding` ([#19004](https://github.com/hashicorp/terraform-provider-google/pull/19004)) +* **New Resource:** `google_scc_v2_organization_source_iam_member` ([#19004](https://github.com/hashicorp/terraform-provider-google/pull/19004)) +* **New Resource:** `google_scc_v2_organization_source_iam_policy` ([#19004](https://github.com/hashicorp/terraform-provider-google/pull/19004)) +* **New Resource:** `google_logging_log_view_iam_binding` ([#18990](https://github.com/hashicorp/terraform-provider-google/pull/18990)) +* **New Resource:** `google_logging_log_view_iam_member` ([#18990](https://github.com/hashicorp/terraform-provider-google/pull/18990)) +* **New Resource:** `google_logging_log_view_iam_policy` ([#18990](https://github.com/hashicorp/terraform-provider-google/pull/18990)) + +IMPROVEMENTS: +* clouddeploy: added `gke.proxy_url` field to `google_clouddeploy_target` ([#19016](https://github.com/hashicorp/terraform-provider-google/pull/19016)) +* cloudrunv2: added field `binary_authorization.policy` to resource `google_cloud_run_v2_job` and resource `google_cloud_run_v2_service` to support named binary authorization policy. ([#18995](https://github.com/hashicorp/terraform-provider-google/pull/18995)) +* compute: added `source_regions` field to `google_compute_healthcheck` resource ([#19006](https://github.com/hashicorp/terraform-provider-google/pull/19006)) +* compute: added update-in-place support for the `google_compute_target_https_proxy.server_tls_policy` field ([#18996](https://github.com/hashicorp/terraform-provider-google/pull/18996)) +* compute: added update-in-place support for the `google_compute_region_target_https_proxy.server_tls_policy` field ([#19007](https://github.com/hashicorp/terraform-provider-google/pull/19007)) +* container: added `auto_provisioning_locations` field to `google_container_cluster` ([#18928](https://github.com/hashicorp/terraform-provider-google/pull/18928)) +* dataform: added `kms_key_name` field to `google_dataform_repository` resource ([#18947](https://github.com/hashicorp/terraform-provider-google/pull/18947)) +* discoveryengine: added `skip_default_schema_creation` field to `google_discovery_engine_data_store` resource ([#19017](https://github.com/hashicorp/terraform-provider-google/pull/19017)) +* gkehub: added `configmanagement.management` and `configmanagement.config_sync.enabled` fields to `google_gkehub_feature_membership` ([#19016](https://github.com/hashicorp/terraform-provider-google/pull/19016)) +* gkehub: added `management` field to `google_gke_hub_feature.fleet_default_member_config.configmanagement` ([#18963](https://github.com/hashicorp/terraform-provider-google/pull/18963)) +* resourcemanager: added `deletion_policy` field to the `google_project` resource. Setting `deletion_policy` to `PREVENT` will protect the project against any destroy actions caused by a terraform apply or terraform destroy. Setting `deletion_policy` to `ABANDON` allows the resource to be abandoned rather than deleted and it behaves the same with `skip_delete = true`. Default value is `DELETE`. `skip_delete = true` takes precedence over `deletion_policy = "DELETE"`. +* storage: added `force_destroy` field to `google_storage_managed_folder` resource ([#18973](https://github.com/hashicorp/terraform-provider-google/pull/18973)) +* storage: added `generation` field to `google_storage_bucket_object` resource ([#18971](https://github.com/hashicorp/terraform-provider-google/pull/18971)) + +BUG FIXES: +* compute: fixed `google_compute_instance.alias_ip_range` update behavior to avoid temporarily deleting unchanged alias IP ranges ([#19015](https://github.com/hashicorp/terraform-provider-google/pull/19015)) +* compute: fixed the bug that creation of PSC forwarding rules fails in `google_compute_forwarding_rule` resource when provider default labels are set ([#18984](https://github.com/hashicorp/terraform-provider-google/pull/18984)) +* sql: fixed a perma-diff in `settings.insights_config` in `google_sql_database_instance` ([#18962](https://github.com/hashicorp/terraform-provider-google/pull/18962)) + + + + +## 5.40.0 (August 5, 2024) + +IMPROVEMENTS: +* bigquery: added support for value `DELTA_LAKE` to `source_format` in `google_bigquery_table` resource ([#18915](https://github.com/hashicorp/terraform-provider-google/pull/18915)) +* compute: added `access_mode` field to `google_compute_disk` resource ([#18857](https://github.com/hashicorp/terraform-provider-google/pull/18857)) +* compute: added `stack_type`, and `gateway_ip_version` fields to `google_compute_router` resource ([#18839](https://github.com/hashicorp/terraform-provider-google/pull/18839)) +* container: added field `ray_operator_config` for `resource_container_cluster` ([#18825](https://github.com/hashicorp/terraform-provider-google/pull/18825)) +* container: promoted `additional_node_network_configs` and `additional_pod_network_configs` fields to GA in the `google_container_node_pool` resource ([#18842](https://github.com/hashicorp/terraform-provider-google/pull/18842)) +* container: promoted `enable_multi_networking` to GA in the `google_container_cluster` resource ([#18842](https://github.com/hashicorp/terraform-provider-google/pull/18842)) +* monitoring: updated `goal` field to accept a max threshold of up to 0.9999 in `google_monitoring_slo` resource to 0.9999 ([#18845](https://github.com/hashicorp/terraform-provider-google/pull/18845)) +* networkconnectivity: added `export_psc` field to `google_network_connectivity_hub` resource ([#18866](https://github.com/hashicorp/terraform-provider-google/pull/18866)) +* sql: added `enable_dataplex_integration` field to `google_sql_database_instance` resource ([#18852](https://github.com/hashicorp/terraform-provider-google/pull/18852)) + +BUG FIXES: +* bigquery: fixed a permadiff when handling "assets" in `params` in the `google_bigquery_data_transfer_config` resource ([#18898](https://github.com/hashicorp/terraform-provider-google/pull/18898)) +* bigquery: fixed an issue preventing certain keys in `params` from being assigned values in `google_bigquery_data_transfer_config` ([#18888](https://github.com/hashicorp/terraform-provider-google/pull/18888)) +* compute: fixed perma-diff of `advertised_ip_ranges` field in `google_compute_router` resource ([#18869](https://github.com/hashicorp/terraform-provider-google/pull/18869)) +* container: fixed perma-diff on `node_config.guest_accelerator.gpu_driver_installation_config` field in GKE 1.30+ in `google_container_node_pool` resource ([#18835](https://github.com/hashicorp/terraform-provider-google/pull/18835)) +* sql: fixed a perma-diff in `settings.insights_config` in `google_sql_database_instance` ([#18962](https://github.com/hashicorp/terraform-provider-google/pull/18962)) + +## v5.39.1 (July 30th, 2024) + +BUG FIXES: +* datastream: fixed a breaking change in 5.39.0 `google_datastream_stream` that made one of `destination_config.bigquery_destination_config.merge` or `destination_config.bigquery_destination_config.append_only` required ([#18903](https://github.com/hashicorp/terraform-provider-google/pull/18903)) + +## 5.39.0 (July 29th, 2024) + +NOTES: +* networkconnectivity: migrated `google_network_connectivity_hub` from DCL to MMv1 ([#18724](https://github.com/hashicorp/terraform-provider-google/pull/18724)) +* networkconnectivity: migrated `google_network_connectivity_spoke` from DCL to MMv1 ([#18779](https://github.com/hashicorp/terraform-provider-google/pull/18779)) + +DEPRECATIONS: +* bigquery: deprecated `allow_resource_tags_on_deletion` in `google_bigquery_table`. ([#18811](https://github.com/hashicorp/terraform-provider-google/pull/18811)) +* bigqueryreservation: deprecated `multi_region_auxiliary` on `google_bigquery_reservation`. ([#18803](https://github.com/hashicorp/terraform-provider-google/pull/18803)) +* datastore: deprecated the resource `google_datastore_index`. Use the `google_firestore_index` resource instead. ([#18781](https://github.com/hashicorp/terraform-provider-google/pull/18781)) + +FEATURES: +* **New Resource:** `google_apigee_environment_keyvaluemaps_entries` ([#18707](https://github.com/hashicorp/terraform-provider-google/pull/18707)) +* **New Resource:** `google_apigee_environment_keyvaluemaps` ([#18707](https://github.com/hashicorp/terraform-provider-google/pull/18707)) +* **New Resource:** `google_compute_resize_request` ([#18725](https://github.com/hashicorp/terraform-provider-google/pull/18725)) +* **New Resource:** `google_compute_router_route_policy` ([#18759](https://github.com/hashicorp/terraform-provider-google/pull/18759)) +* **New Resource:** `google_scc_v2_organization_mute_config` ([#18752](https://github.com/hashicorp/terraform-provider-google/pull/18752)) + +IMPROVEMENTS: +* alloydb: added `observability_config` field to `google_alloydb_instance` resource ([#18743](https://github.com/hashicorp/terraform-provider-google/pull/18743)) +* bigquery: added `resource_tags` field to `google_bigquery_dataset` resource (ga) ([#18711](https://github.com/hashicorp/terraform-provider-google/pull/18711)) +* bigquery: added `resource_tags` field to `google_bigquery_table` resource ([#18741](https://github.com/hashicorp/terraform-provider-google/pull/18741)) +* bigtable: added `data_boost_isolation_read_only` and `data_boost_isolation_read_only.compute_billing_owner` fields to `google_bigtable_app_profile` resource ([#18819](https://github.com/hashicorp/terraform-provider-google/pull/18819)) +* cloudfunctions: added `build_service_account` field to `google_cloudfunctions_function` resource ([#18702](https://github.com/hashicorp/terraform-provider-google/pull/18702)) +* compute: added `aws_v4_authentication` fields to `google_compute_backend_service` resource ([#18796](https://github.com/hashicorp/terraform-provider-google/pull/18796)) +* compute: added `custom_learned_ip_ranges` and `custom_learned_route_priority` fields to `google_compute_router_peer` resource ([#18727](https://github.com/hashicorp/terraform-provider-google/pull/18727)) +* compute: added `export_policies` and `import_policies` fields to `google_compute_router_peer` resource ([#18759](https://github.com/hashicorp/terraform-provider-google/pull/18759)) +* compute: added `shared_secret` field to `google_compute_public_advertised_prefix` resource ([#18786](https://github.com/hashicorp/terraform-provider-google/pull/18786)) +* compute: added `storage_pool` under `boot_disk.initialize_params` to `google_compute_instance` resource ([#18817](https://github.com/hashicorp/terraform-provider-google/pull/18817)) +* compute: changed `target_service` field on the `google_compute_service_attachment` resource to accept a `ForwardingRule` or `Gateway` URL. ([#18742](https://github.com/hashicorp/terraform-provider-google/pull/18742)) +* container: added field `ray_operator_config` for `google_container_cluster` ([#18825](https://github.com/hashicorp/terraform-provider-google/pull/18825)) +* datastream: added `merge` and `append_only` fields to `google_datastream_stream` resource ([#18726](https://github.com/hashicorp/terraform-provider-google/pull/18726)) +* datastream: promoted `source_config.sql_server_source_config` and `backfill_all.sql_server_excluded_objects` fields in `google_datastream_stream` resource from beta to GA ([#18732](https://github.com/hashicorp/terraform-provider-google/pull/18732)) +* datastream: promoted `sql_server_profile` field in `google_datastream_connection_profile` resource from beta to GA ([#18732](https://github.com/hashicorp/terraform-provider-google/pull/18732)) +* dlp: added `cloud_storage_target` field to `google_data_loss_prevention_discovery_config` resource ([#18740](https://github.com/hashicorp/terraform-provider-google/pull/18740)) +* resourcemanager: added `check_if_service_has_usage_on_destroy` field to `google_project_service` resource ([#18753](https://github.com/hashicorp/terraform-provider-google/pull/18753)) +* resourcemanager: added the `member` property to `google_project_service_identity` ([#18695](https://github.com/hashicorp/terraform-provider-google/pull/18695)) +* vmwareengine: added `deletion_delay_hours` field to `google_vmwareengine_private_cloud` resource ([#18698](https://github.com/hashicorp/terraform-provider-google/pull/18698)) +* vmwareengine: supported type change from `TIME_LIMITED` to `STANDARD` for multi-node `google_vmwareengine_private_cloud` resource ([#18698](https://github.com/hashicorp/terraform-provider-google/pull/18698)) +* workbench: added `access_configs` to `google_workbench_instance` resource ([#18737](https://github.com/hashicorp/terraform-provider-google/pull/18737)) + +BUG FIXES: +* compute: fixed perma-diff for `interconnect_type` being `DEDICATED` in `google_compute_interconnect` resource ([#18761](https://github.com/hashicorp/terraform-provider-google/pull/18761)) +* dialogflowcx: fixed intermittent issues with retrieving resource state soon after creating `google_dialogflow_cx_security_settings` resources ([#18792](https://github.com/hashicorp/terraform-provider-google/pull/18792)) +* firestore: fixed missing import of `field` for `google_firestore_field`. ([#18771](https://github.com/hashicorp/terraform-provider-google/pull/18771)) +* firestore: fixed bug where fields `database`, `collection`, `document_id`, and `field` could not be updated on `google_firestore_document` and `google_firestore_field` resources. ([#18821](https://github.com/hashicorp/terraform-provider-google/pull/18821)) +* netapp: made the `smb_settings` field on the `google_netapp_volume` resource default to the value returned from the API. This solves permadiffs when the field is unset. ([#18790](https://github.com/hashicorp/terraform-provider-google/pull/18790)) +* networksecurity: added recreate functionality on update for `client_validation_mode` and `client_validation_trust_config` in `google_network_security_server_tls_policy` ([#18769](https://github.com/hashicorp/terraform-provider-google/pull/18769)) + +## 5.38.0 (July 15, 2024) + +FEATURES: +* **New Data Source:** `google_gke_hub_membership_binding` ([#18680](https://github.com/hashicorp/terraform-provider-google/pull/18680)) +* **New Data Source:** `google_site_verification_token` ([#18688](https://github.com/hashicorp/terraform-provider-google/pull/18688)) +* **New Resource:** `google_scc_project_notification_config` ([#18682](https://github.com/hashicorp/terraform-provider-google/pull/18682)) + +IMPROVEMENTS: +* compute: promoted `labels` field on `google_compute_global_address` resource from beta to GA ([#18646](https://github.com/hashicorp/terraform-provider-google/pull/18646)) +* compute: made the `google_compute_resource_policy` resource updatable in-place ([#18673](https://github.com/hashicorp/terraform-provider-google/pull/18673)) +* privilegedaccessmanager: promoted `google_privileged_access_manager_entitlement` resource from beta to GA ([#18686](https://github.com/hashicorp/terraform-provider-google/pull/18686)) +* vertexai: added `project_number` field to `google_vertex_ai_feature_online_store_featureview` resource ([#18637](https://github.com/hashicorp/terraform-provider-google/pull/18637)) + +BUG FIXES: +* cloudfunctions2: fixed permadiffs on `service_config.environment_variables` field in `google_cloudfunctions2_function` resource ([#18651](https://github.com/hashicorp/terraform-provider-google/pull/18651)) + +## 5.37.0 (July 8, 2024) FEATURES: * **New Data Source:** `google_kms_crypto_keys` ([#18605](https://github.com/hashicorp/terraform-provider-google/pull/18605)) @@ -86,7 +693,7 @@ FEATURES: * **New Resource:** `google_netapp_backup` ([#18357](https://github.com/hashicorp/terraform-provider-google/pull/18357)) * **New Resource:** `google_network_services_service_lb_policies` ([#18326](https://github.com/hashicorp/terraform-provider-google/pull/18326)) * **New Resource:** `google_scc_management_folder_security_health_analytics_custom_module` ([#18360](https://github.com/hashicorp/terraform-provider-google/pull/18360)) -* **New Resource:** `google_scc_management_organization_project_security_health_analytics_custom_module` ([#18369](https://github.com/hashicorp/terraform-provider-google/pull/18369)) +* **New Resource:** `google_scc_management_project_security_health_analytics_custom_module` ([#18369](https://github.com/hashicorp/terraform-provider-google/pull/18369)) * **New Resource:** `google_scc_management_organization_security_health_analytics_custom_module` ([#18374](https://github.com/hashicorp/terraform-provider-google/pull/18374)) IMPROVEMENTS: diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/envvar/envvar_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/envvar/envvar_utils.go index 75f310984d4..7b4dff49fcc 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/envvar/envvar_utils.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/envvar/envvar_utils.go @@ -106,6 +106,10 @@ var PapDescriptionEnvVars = []string{ "GOOGLE_PUBLIC_AVERTISED_PREFIX_DESCRIPTION", } +var ImpersonateServiceAccountEnvVars = []string{ + "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT", +} + // AccTestPreCheck ensures at least one of the project env variables is set. func GetTestProjectNumberFromEnv() string { return transport_tpg.MultiEnvSearch(ProjectNumberEnvVars) @@ -140,6 +144,10 @@ func GetTestZoneFromEnv() string { return transport_tpg.MultiEnvSearch(ZoneEnvVars) } +func GetTestImpersonateServiceAccountFromEnv() string { + return transport_tpg.MultiEnvSearch(ImpersonateServiceAccountEnvVars) +} + func GetTestCustIdFromEnv(t *testing.T) string { SkipIfEnvNotSet(t, CustIdEnvVars...) return transport_tpg.MultiEnvSearch(CustIdEnvVars) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwmodels/provider_model.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwmodels/provider_model.go index 4118b94315d..40c8d1d0e3d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwmodels/provider_model.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwmodels/provider_model.go @@ -7,7 +7,9 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -// ProviderModel describes the provider config data model. +// ProviderModel maps provider schema data to a Go type. +// When the plugin-framework provider is configured, the Configure function receives data about +// the provider block in the configuration. That data is used to populate this struct. type ProviderModel struct { Credentials types.String `tfsdk:"credentials"` AccessToken types.String `tfsdk:"access_token"` @@ -76,7 +78,6 @@ type ProviderModel struct { DataplexCustomEndpoint types.String `tfsdk:"dataplex_custom_endpoint"` DataprocCustomEndpoint types.String `tfsdk:"dataproc_custom_endpoint"` DataprocMetastoreCustomEndpoint types.String `tfsdk:"dataproc_metastore_custom_endpoint"` - DatastoreCustomEndpoint types.String `tfsdk:"datastore_custom_endpoint"` DatastreamCustomEndpoint types.String `tfsdk:"datastream_custom_endpoint"` DeploymentManagerCustomEndpoint types.String `tfsdk:"deployment_manager_custom_endpoint"` DialogflowCustomEndpoint types.String `tfsdk:"dialogflow_custom_endpoint"` @@ -116,16 +117,19 @@ type ProviderModel struct { NetworkSecurityCustomEndpoint types.String `tfsdk:"network_security_custom_endpoint"` NetworkServicesCustomEndpoint types.String `tfsdk:"network_services_custom_endpoint"` NotebooksCustomEndpoint types.String `tfsdk:"notebooks_custom_endpoint"` + OracleDatabaseCustomEndpoint types.String `tfsdk:"oracle_database_custom_endpoint"` OrgPolicyCustomEndpoint types.String `tfsdk:"org_policy_custom_endpoint"` OSConfigCustomEndpoint types.String `tfsdk:"os_config_custom_endpoint"` OSLoginCustomEndpoint types.String `tfsdk:"os_login_custom_endpoint"` PrivatecaCustomEndpoint types.String `tfsdk:"privateca_custom_endpoint"` + PrivilegedAccessManagerCustomEndpoint types.String `tfsdk:"privileged_access_manager_custom_endpoint"` PublicCACustomEndpoint types.String `tfsdk:"public_ca_custom_endpoint"` PubsubCustomEndpoint types.String `tfsdk:"pubsub_custom_endpoint"` PubsubLiteCustomEndpoint types.String `tfsdk:"pubsub_lite_custom_endpoint"` RedisCustomEndpoint types.String `tfsdk:"redis_custom_endpoint"` ResourceManagerCustomEndpoint types.String `tfsdk:"resource_manager_custom_endpoint"` SecretManagerCustomEndpoint types.String `tfsdk:"secret_manager_custom_endpoint"` + SecretManagerRegionalCustomEndpoint types.String `tfsdk:"secret_manager_regional_custom_endpoint"` SecureSourceManagerCustomEndpoint types.String `tfsdk:"secure_source_manager_custom_endpoint"` SecurityCenterCustomEndpoint types.String `tfsdk:"security_center_custom_endpoint"` SecurityCenterManagementCustomEndpoint types.String `tfsdk:"security_center_management_custom_endpoint"` @@ -134,6 +138,7 @@ type ProviderModel struct { ServiceManagementCustomEndpoint types.String `tfsdk:"service_management_custom_endpoint"` ServiceNetworkingCustomEndpoint types.String `tfsdk:"service_networking_custom_endpoint"` ServiceUsageCustomEndpoint types.String `tfsdk:"service_usage_custom_endpoint"` + SiteVerificationCustomEndpoint types.String `tfsdk:"site_verification_custom_endpoint"` SourceRepoCustomEndpoint types.String `tfsdk:"source_repo_custom_endpoint"` SpannerCustomEndpoint types.String `tfsdk:"spanner_custom_endpoint"` SQLCustomEndpoint types.String `tfsdk:"sql_custom_endpoint"` @@ -142,6 +147,7 @@ type ProviderModel struct { StorageTransferCustomEndpoint types.String `tfsdk:"storage_transfer_custom_endpoint"` TagsCustomEndpoint types.String `tfsdk:"tags_custom_endpoint"` TPUCustomEndpoint types.String `tfsdk:"tpu_custom_endpoint"` + TranscoderCustomEndpoint types.String `tfsdk:"transcoder_custom_endpoint"` VertexAICustomEndpoint types.String `tfsdk:"vertex_ai_custom_endpoint"` VmwareengineCustomEndpoint types.String `tfsdk:"vmwareengine_custom_endpoint"` VPCAccessCustomEndpoint types.String `tfsdk:"vpc_access_custom_endpoint"` diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/data_source_provider_config_plugin_framework.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/data_source_provider_config_plugin_framework.go new file mode 100644 index 00000000000..ff499b7f43a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/data_source_provider_config_plugin_framework.go @@ -0,0 +1,233 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package fwprovider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwresource" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" +) + +// Ensure the data source satisfies the expected interfaces. +var ( + _ datasource.DataSource = &GoogleProviderConfigPluginFrameworkDataSource{} + _ datasource.DataSourceWithConfigure = &GoogleProviderConfigPluginFrameworkDataSource{} + _ fwresource.LocationDescriber = &GoogleProviderConfigPluginFrameworkModel{} +) + +func NewGoogleProviderConfigPluginFrameworkDataSource() datasource.DataSource { + return &GoogleProviderConfigPluginFrameworkDataSource{} +} + +type GoogleProviderConfigPluginFrameworkDataSource struct { + providerConfig *fwtransport.FrameworkProviderConfig +} + +type GoogleProviderConfigPluginFrameworkModel struct { + // Currently this reflects the FrameworkProviderConfig struct and ProviderModel in google/fwmodels/provider_model.go + // which means it uses the plugin-framework type system where values can be explicitly Null or Unknown. + // + // As part of future muxing fixes/refactoring we'll change this struct to reflect structs used in the SDK code, and will move to + // using the SDK type system. + Credentials types.String `tfsdk:"credentials"` + AccessToken types.String `tfsdk:"access_token"` + ImpersonateServiceAccount types.String `tfsdk:"impersonate_service_account"` + ImpersonateServiceAccountDelegates types.List `tfsdk:"impersonate_service_account_delegates"` + Project types.String `tfsdk:"project"` + BillingProject types.String `tfsdk:"billing_project"` + Region types.String `tfsdk:"region"` + Zone types.String `tfsdk:"zone"` + Scopes types.List `tfsdk:"scopes"` + // omit Batching + UserProjectOverride types.Bool `tfsdk:"user_project_override"` + RequestTimeout types.String `tfsdk:"request_timeout"` + RequestReason types.String `tfsdk:"request_reason"` + UniverseDomain types.String `tfsdk:"universe_domain"` + DefaultLabels types.Map `tfsdk:"default_labels"` + AddTerraformAttributionLabel types.Bool `tfsdk:"add_terraform_attribution_label"` + TerraformAttributionLabelAdditionStrategy types.String `tfsdk:"terraform_attribution_label_addition_strategy"` +} + +func (m *GoogleProviderConfigPluginFrameworkModel) GetLocationDescription(providerConfig *fwtransport.FrameworkProviderConfig) fwresource.LocationDescription { + return fwresource.LocationDescription{ + RegionSchemaField: types.StringValue("region"), + ZoneSchemaField: types.StringValue("zone"), + ProviderRegion: providerConfig.Region, + ProviderZone: providerConfig.Zone, + } +} + +func (d *GoogleProviderConfigPluginFrameworkDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_provider_config_plugin_framework" +} + +func (d *GoogleProviderConfigPluginFrameworkDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + + resp.Schema = schema.Schema{ + + Description: "Use this data source to access the configuration of the Google Cloud provider. This data source is implemented with the SDK.", + MarkdownDescription: "Use this data source to access the configuration of the Google Cloud provider. This data source is implemented with the SDK.", + Attributes: map[string]schema.Attribute{ + // Start of user inputs + "access_token": schema.StringAttribute{ + Description: "The access_token argument used to configure the provider", + MarkdownDescription: "The access_token argument used to configure the provider", + Computed: true, + Sensitive: true, + }, + "credentials": schema.StringAttribute{ + Description: "The credentials argument used to configure the provider", + MarkdownDescription: "The credentials argument used to configure the provider", + Computed: true, + Sensitive: true, + }, + "impersonate_service_account": schema.StringAttribute{ + Description: "The impersonate_service_account argument used to configure the provider", + MarkdownDescription: "The impersonate_service_account argument used to configure the provider.", + Computed: true, + }, + "impersonate_service_account_delegates": schema.ListAttribute{ + ElementType: types.StringType, + Description: "The impersonate_service_account_delegates argument used to configure the provider", + MarkdownDescription: "The impersonate_service_account_delegates argument used to configure the provider.", + Computed: true, + }, + "project": schema.StringAttribute{ + Description: "The project argument used to configure the provider", + MarkdownDescription: "The project argument used to configure the provider.", + Computed: true, + }, + "region": schema.StringAttribute{ + Description: "The region argument used to configure the provider.", + MarkdownDescription: "The region argument used to configure the provider.", + Computed: true, + }, + "billing_project": schema.StringAttribute{ + Description: "The billing_project argument used to configure the provider.", + MarkdownDescription: "The billing_project argument used to configure the provider.", + Computed: true, + }, + "zone": schema.StringAttribute{ + Description: "The zone argument used to configure the provider.", + MarkdownDescription: "The zone argument used to configure the provider.", + Computed: true, + }, + "universe_domain": schema.StringAttribute{ + Description: "The universe_domain argument used to configure the provider.", + MarkdownDescription: "The universe_domain argument used to configure the provider.", + Computed: true, + }, + "scopes": schema.ListAttribute{ + ElementType: types.StringType, + Description: "The scopes argument used to configure the provider.", + MarkdownDescription: "The scopes argument used to configure the provider.", + Computed: true, + }, + "user_project_override": schema.BoolAttribute{ + Description: "The user_project_override argument used to configure the provider.", + MarkdownDescription: "The user_project_override argument used to configure the provider.", + Computed: true, + }, + "request_reason": schema.StringAttribute{ + Description: "The request_reason argument used to configure the provider.", + MarkdownDescription: "The request_reason argument used to configure the provider.", + Computed: true, + }, + "request_timeout": schema.StringAttribute{ + Description: "The request_timeout argument used to configure the provider.", + MarkdownDescription: "The request_timeout argument used to configure the provider.", + Computed: true, + }, + "default_labels": schema.MapAttribute{ + ElementType: types.StringType, + Description: "The default_labels argument used to configure the provider.", + MarkdownDescription: "The default_labels argument used to configure the provider.", + Computed: true, + }, + "add_terraform_attribution_label": schema.BoolAttribute{ + Description: "The add_terraform_attribution_label argument used to configure the provider.", + MarkdownDescription: "The add_terraform_attribution_label argument used to configure the provider.", + Computed: true, + }, + "terraform_attribution_label_addition_strategy": schema.StringAttribute{ + Description: "The terraform_attribution_label_addition_strategy argument used to configure the provider.", + MarkdownDescription: "The terraform_attribution_label_addition_strategy argument used to configure the provider.", + Computed: true, + }, + // End of user inputs + + // Note - this data source excludes the default and custom endpoints for individual services + }, + } +} + +func (d *GoogleProviderConfigPluginFrameworkDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + p, ok := req.ProviderData.(*fwtransport.FrameworkProviderConfig) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *fwtransport.FrameworkProviderConfig, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + // Required for accessing project, region, zone and tokenSource + d.providerConfig = p +} + +func (d *GoogleProviderConfigPluginFrameworkDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data GoogleProviderConfigPluginFrameworkModel + var metaData *fwmodels.ProviderMetaModel + + // Read Provider meta into the meta model + resp.Diagnostics.Append(req.ProviderMeta.Get(ctx, &metaData)...) + if resp.Diagnostics.HasError() { + return + } + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + // Copy all values from the provider config into this data source + + data.Credentials = d.providerConfig.Credentials + data.AccessToken = d.providerConfig.AccessToken + data.ImpersonateServiceAccount = d.providerConfig.ImpersonateServiceAccount + data.ImpersonateServiceAccountDelegates = d.providerConfig.ImpersonateServiceAccountDelegates + data.Project = d.providerConfig.Project + data.Region = d.providerConfig.Region + data.BillingProject = d.providerConfig.BillingProject + data.Zone = d.providerConfig.Zone + data.UniverseDomain = d.providerConfig.UniverseDomain + data.Scopes = d.providerConfig.Scopes + data.UserProjectOverride = d.providerConfig.UserProjectOverride + data.RequestReason = d.providerConfig.RequestReason + data.RequestTimeout = d.providerConfig.RequestTimeout + data.DefaultLabels = d.providerConfig.DefaultLabels + data.AddTerraformAttributionLabel = d.providerConfig.AddTerraformAttributionLabel + data.TerraformAttributionLabelAdditionStrategy = d.providerConfig.TerraformAttributionLabelAdditionStrategy + + // Warn users against using this data source + resp.Diagnostics.Append(diag.NewWarningDiagnostic( + "Data source google_provider_config_plugin_framework should not be used", + "Data source google_provider_config_plugin_framework is intended to be used only in acceptance tests for the provider. Instead, please use the google_client_config data source to access provider configuration details, or open a GitHub issue requesting new features in that datasource. Please go to: https://github.com/hashicorp/terraform-provider-google/issues/new/choose", + )) + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/framework_provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/framework_provider.go index d54ae812900..14635ab88b6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/framework_provider.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/framework_provider.go @@ -20,20 +20,22 @@ import ( "github.com/hashicorp/terraform-provider-google/google/fwmodels" "github.com/hashicorp/terraform-provider-google/google/fwtransport" "github.com/hashicorp/terraform-provider-google/google/services/resourcemanager" + "github.com/hashicorp/terraform-provider-google/version" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) // Ensure the implementation satisfies the expected interfaces var ( + _ provider.Provider = &FrameworkProvider{} _ provider.ProviderWithMetaSchema = &FrameworkProvider{} _ provider.ProviderWithFunctions = &FrameworkProvider{} ) // New is a helper function to simplify provider server and testing implementation. -func New(version string) provider.ProviderWithMetaSchema { +func New() provider.ProviderWithMetaSchema { return &FrameworkProvider{ - Version: version, + Version: version.ProviderVersion, } } @@ -43,7 +45,9 @@ type FrameworkProvider struct { Version string } -// Metadata returns the provider type name. +// Metadata returns +// - the provider type name : this controls how "google" is present at the start of all resource type names +// - the provider version : this is currently unused by Terraform core func (p *FrameworkProvider) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { resp.TypeName = "google" resp.Version = p.Version @@ -432,12 +436,6 @@ func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, transport_tpg.CustomEndpointValidator(), }, }, - "datastore_custom_endpoint": &schema.StringAttribute{ - Optional: true, - Validators: []validator.String{ - transport_tpg.CustomEndpointValidator(), - }, - }, "datastream_custom_endpoint": &schema.StringAttribute{ Optional: true, Validators: []validator.String{ @@ -672,6 +670,12 @@ func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, transport_tpg.CustomEndpointValidator(), }, }, + "oracle_database_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, "org_policy_custom_endpoint": &schema.StringAttribute{ Optional: true, Validators: []validator.String{ @@ -696,6 +700,12 @@ func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, transport_tpg.CustomEndpointValidator(), }, }, + "privileged_access_manager_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, "public_ca_custom_endpoint": &schema.StringAttribute{ Optional: true, Validators: []validator.String{ @@ -732,6 +742,12 @@ func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, transport_tpg.CustomEndpointValidator(), }, }, + "secret_manager_regional_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, "secure_source_manager_custom_endpoint": &schema.StringAttribute{ Optional: true, Validators: []validator.String{ @@ -780,6 +796,12 @@ func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, transport_tpg.CustomEndpointValidator(), }, }, + "site_verification_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, "source_repo_custom_endpoint": &schema.StringAttribute{ Optional: true, Validators: []validator.String{ @@ -828,6 +850,12 @@ func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, transport_tpg.CustomEndpointValidator(), }, }, + "transcoder_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, "vertex_ai_custom_endpoint": &schema.StringAttribute{ Optional: true, Validators: []validator.String{ @@ -939,7 +967,10 @@ func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, transport_tpg.ConfigureDCLCustomEndpointAttributesFramework(&resp.Schema) } -// Configure prepares an API client for data sources and resources. +// Configure prepares the metadata/'meta' required for data sources and resources to function. +// Configuration logic implemented here should take user inputs and use them to populate a struct +// with that necessary metadata, e.g. default project value, configured client, etc. +// That prepared 'meta' struct is then returned in the response, and that value will later be supplied to all resources/data sources when they need to configure themselves. func (p *FrameworkProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { var data fwmodels.ProviderModel @@ -955,7 +986,8 @@ func (p *FrameworkProvider) Configure(ctx context.Context, req provider.Configur return } - // Example client configuration for data sources and resources + // This is how we make provider configuration info (configured clients, default project, etc) available to resources and data sources + // implemented using the plugin-framework. The resources' Configure functions receive this data in the ConfigureRequest argument. resp.DataSourceData = &p.FrameworkProviderConfig resp.ResourceData = &p.FrameworkProviderConfig } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwresource/field_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwresource/field_helpers.go index 09bd59a4556..78e015085a7 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwresource/field_helpers.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwresource/field_helpers.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/fwtransport" "github.com/hashicorp/terraform-provider-google/google/tpgresource" ) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_config.go index 24bbcdfb6d4..012e6e7735e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_config.go @@ -34,6 +34,18 @@ import ( ) type FrameworkProviderConfig struct { + // Temporary, as we'll replace use of FrameworkProviderConfig with transport_tpg.Config soon + // transport_tpg.Config has a the fields below, hence these changes are needed + Credentials types.String + AccessToken types.String + ImpersonateServiceAccount types.String + ImpersonateServiceAccountDelegates types.List + RequestReason types.String + RequestTimeout types.String + AddTerraformAttributionLabel types.Bool + TerraformAttributionLabelAdditionStrategy types.String + // End temporary + BillingProject types.String Client *http.Client Context context.Context @@ -49,6 +61,7 @@ type FrameworkProviderConfig struct { UniverseDomain types.String UserAgent string UserProjectOverride types.Bool + DefaultLabels types.Map // paths for client setup AccessApprovalBasePath string @@ -99,7 +112,6 @@ type FrameworkProviderConfig struct { DataplexBasePath string DataprocBasePath string DataprocMetastoreBasePath string - DatastoreBasePath string DatastreamBasePath string DeploymentManagerBasePath string DialogflowBasePath string @@ -139,16 +151,19 @@ type FrameworkProviderConfig struct { NetworkSecurityBasePath string NetworkServicesBasePath string NotebooksBasePath string + OracleDatabaseBasePath string OrgPolicyBasePath string OSConfigBasePath string OSLoginBasePath string PrivatecaBasePath string + PrivilegedAccessManagerBasePath string PublicCABasePath string PubsubBasePath string PubsubLiteBasePath string RedisBasePath string ResourceManagerBasePath string SecretManagerBasePath string + SecretManagerRegionalBasePath string SecureSourceManagerBasePath string SecurityCenterBasePath string SecurityCenterManagementBasePath string @@ -157,6 +172,7 @@ type FrameworkProviderConfig struct { ServiceManagementBasePath string ServiceNetworkingBasePath string ServiceUsageBasePath string + SiteVerificationBasePath string SourceRepoBasePath string SpannerBasePath string SQLBasePath string @@ -165,6 +181,7 @@ type FrameworkProviderConfig struct { StorageTransferBasePath string TagsBasePath string TPUBasePath string + TranscoderBasePath string VertexAIBasePath string VmwareengineBasePath string VPCAccessBasePath string @@ -257,7 +274,6 @@ func (p *FrameworkProviderConfig) LoadAndValidateFramework(ctx context.Context, p.DataplexBasePath = data.DataplexCustomEndpoint.ValueString() p.DataprocBasePath = data.DataprocCustomEndpoint.ValueString() p.DataprocMetastoreBasePath = data.DataprocMetastoreCustomEndpoint.ValueString() - p.DatastoreBasePath = data.DatastoreCustomEndpoint.ValueString() p.DatastreamBasePath = data.DatastreamCustomEndpoint.ValueString() p.DeploymentManagerBasePath = data.DeploymentManagerCustomEndpoint.ValueString() p.DialogflowBasePath = data.DialogflowCustomEndpoint.ValueString() @@ -297,16 +313,19 @@ func (p *FrameworkProviderConfig) LoadAndValidateFramework(ctx context.Context, p.NetworkSecurityBasePath = data.NetworkSecurityCustomEndpoint.ValueString() p.NetworkServicesBasePath = data.NetworkServicesCustomEndpoint.ValueString() p.NotebooksBasePath = data.NotebooksCustomEndpoint.ValueString() + p.OracleDatabaseBasePath = data.OracleDatabaseCustomEndpoint.ValueString() p.OrgPolicyBasePath = data.OrgPolicyCustomEndpoint.ValueString() p.OSConfigBasePath = data.OSConfigCustomEndpoint.ValueString() p.OSLoginBasePath = data.OSLoginCustomEndpoint.ValueString() p.PrivatecaBasePath = data.PrivatecaCustomEndpoint.ValueString() + p.PrivilegedAccessManagerBasePath = data.PrivilegedAccessManagerCustomEndpoint.ValueString() p.PublicCABasePath = data.PublicCACustomEndpoint.ValueString() p.PubsubBasePath = data.PubsubCustomEndpoint.ValueString() p.PubsubLiteBasePath = data.PubsubLiteCustomEndpoint.ValueString() p.RedisBasePath = data.RedisCustomEndpoint.ValueString() p.ResourceManagerBasePath = data.ResourceManagerCustomEndpoint.ValueString() p.SecretManagerBasePath = data.SecretManagerCustomEndpoint.ValueString() + p.SecretManagerRegionalBasePath = data.SecretManagerRegionalCustomEndpoint.ValueString() p.SecureSourceManagerBasePath = data.SecureSourceManagerCustomEndpoint.ValueString() p.SecurityCenterBasePath = data.SecurityCenterCustomEndpoint.ValueString() p.SecurityCenterManagementBasePath = data.SecurityCenterManagementCustomEndpoint.ValueString() @@ -315,6 +334,7 @@ func (p *FrameworkProviderConfig) LoadAndValidateFramework(ctx context.Context, p.ServiceManagementBasePath = data.ServiceManagementCustomEndpoint.ValueString() p.ServiceNetworkingBasePath = data.ServiceNetworkingCustomEndpoint.ValueString() p.ServiceUsageBasePath = data.ServiceUsageCustomEndpoint.ValueString() + p.SiteVerificationBasePath = data.SiteVerificationCustomEndpoint.ValueString() p.SourceRepoBasePath = data.SourceRepoCustomEndpoint.ValueString() p.SpannerBasePath = data.SpannerCustomEndpoint.ValueString() p.SQLBasePath = data.SQLCustomEndpoint.ValueString() @@ -323,21 +343,35 @@ func (p *FrameworkProviderConfig) LoadAndValidateFramework(ctx context.Context, p.StorageTransferBasePath = data.StorageTransferCustomEndpoint.ValueString() p.TagsBasePath = data.TagsCustomEndpoint.ValueString() p.TPUBasePath = data.TPUCustomEndpoint.ValueString() + p.TranscoderBasePath = data.TranscoderCustomEndpoint.ValueString() p.VertexAIBasePath = data.VertexAICustomEndpoint.ValueString() p.VmwareengineBasePath = data.VmwareengineCustomEndpoint.ValueString() p.VPCAccessBasePath = data.VPCAccessCustomEndpoint.ValueString() p.WorkbenchBasePath = data.WorkbenchCustomEndpoint.ValueString() p.WorkflowsBasePath = data.WorkflowsCustomEndpoint.ValueString() + // Temporary + p.Credentials = data.Credentials + p.AccessToken = data.AccessToken + p.ImpersonateServiceAccount = data.ImpersonateServiceAccount + p.ImpersonateServiceAccountDelegates = data.ImpersonateServiceAccountDelegates + p.RequestReason = data.RequestReason + p.RequestTimeout = data.RequestTimeout + p.AddTerraformAttributionLabel = data.AddTerraformAttributionLabel + p.TerraformAttributionLabelAdditionStrategy = data.TerraformAttributionLabelAdditionStrategy + // End temporary + + // Copy values from the ProviderModel struct containing data about the provider configuration (present only when responsing to ConfigureProvider rpc calls) + // to the FrameworkProviderConfig struct that will be passed and available to all resources/data sources p.Context = ctx p.BillingProject = data.BillingProject + p.DefaultLabels = data.DefaultLabels p.Project = data.Project p.Region = GetRegionFromRegionSelfLink(data.Region) p.Scopes = data.Scopes p.Zone = data.Zone p.UserProjectOverride = data.UserProjectOverride p.PollInterval = 10 * time.Second - p.Project = data.Project p.UniverseDomain = data.UniverseDomain p.RequestBatcherServiceUsage = transport_tpg.NewRequestBatcher("Service Usage", ctx, batchingConfig) p.RequestBatcherIam = transport_tpg.NewRequestBatcher("IAM", ctx, batchingConfig) @@ -839,14 +873,6 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo data.DataprocMetastoreCustomEndpoint = types.StringValue(customEndpoint.(string)) } } - if data.DatastoreCustomEndpoint.IsNull() { - customEndpoint := transport_tpg.MultiEnvDefault([]string{ - "GOOGLE_DATASTORE_CUSTOM_ENDPOINT", - }, transport_tpg.DefaultBasePaths[transport_tpg.DatastoreBasePathKey]) - if customEndpoint != nil { - data.DatastoreCustomEndpoint = types.StringValue(customEndpoint.(string)) - } - } if data.DatastreamCustomEndpoint.IsNull() { customEndpoint := transport_tpg.MultiEnvDefault([]string{ "GOOGLE_DATASTREAM_CUSTOM_ENDPOINT", @@ -1159,6 +1185,14 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo data.NotebooksCustomEndpoint = types.StringValue(customEndpoint.(string)) } } + if data.OracleDatabaseCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ORACLE_DATABASE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.OracleDatabaseBasePathKey]) + if customEndpoint != nil { + data.OracleDatabaseCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } if data.OrgPolicyCustomEndpoint.IsNull() { customEndpoint := transport_tpg.MultiEnvDefault([]string{ "GOOGLE_ORG_POLICY_CUSTOM_ENDPOINT", @@ -1191,6 +1225,14 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo data.PrivatecaCustomEndpoint = types.StringValue(customEndpoint.(string)) } } + if data.PrivilegedAccessManagerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_PRIVILEGED_ACCESS_MANAGER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.PrivilegedAccessManagerBasePathKey]) + if customEndpoint != nil { + data.PrivilegedAccessManagerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } if data.PublicCACustomEndpoint.IsNull() { customEndpoint := transport_tpg.MultiEnvDefault([]string{ "GOOGLE_PUBLIC_CA_CUSTOM_ENDPOINT", @@ -1239,6 +1281,14 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo data.SecretManagerCustomEndpoint = types.StringValue(customEndpoint.(string)) } } + if data.SecretManagerRegionalCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_SECRET_MANAGER_REGIONAL_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.SecretManagerRegionalBasePathKey]) + if customEndpoint != nil { + data.SecretManagerRegionalCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } if data.SecureSourceManagerCustomEndpoint.IsNull() { customEndpoint := transport_tpg.MultiEnvDefault([]string{ "GOOGLE_SECURE_SOURCE_MANAGER_CUSTOM_ENDPOINT", @@ -1303,6 +1353,14 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo data.ServiceUsageCustomEndpoint = types.StringValue(customEndpoint.(string)) } } + if data.SiteVerificationCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_SITE_VERIFICATION_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.SiteVerificationBasePathKey]) + if customEndpoint != nil { + data.SiteVerificationCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } if data.SourceRepoCustomEndpoint.IsNull() { customEndpoint := transport_tpg.MultiEnvDefault([]string{ "GOOGLE_SOURCE_REPO_CUSTOM_ENDPOINT", @@ -1367,6 +1425,14 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo data.TPUCustomEndpoint = types.StringValue(customEndpoint.(string)) } } + if data.TranscoderCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_TRANSCODER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.TranscoderBasePathKey]) + if customEndpoint != nil { + data.TranscoderCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } if data.VertexAICustomEndpoint.IsNull() { customEndpoint := transport_tpg.MultiEnvDefault([]string{ "GOOGLE_VERTEX_AI_CUSTOM_ENDPOINT", @@ -1781,6 +1847,10 @@ func GetCredentials(ctx context.Context, data fwmodels.ProviderModel, initialCre diags.AddError(fmt.Sprintf("error loading credentials: %s", err), err.Error()) return googleoauth.Credentials{} } + if len(contents) == 0 { + diags.AddError("error loading credentials", "provided credentials are empty") + return googleoauth.Credentials{} + } if !data.ImpersonateServiceAccount.IsNull() && !initialCredentialsOnly { opts := []option.ClientOption{option.WithCredentialsJSON([]byte(contents)), option.ImpersonateCredentials(data.ImpersonateServiceAccount.ValueString(), delegates...), option.WithScopes(clientScopes...)} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/data_source_provider_config_sdk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/data_source_provider_config_sdk.go new file mode 100644 index 00000000000..e97b084c858 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/data_source_provider_config_sdk.go @@ -0,0 +1,166 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package provider + +import ( + "crypto/sha1" + "encoding/base64" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleProviderConfigSdk() *schema.Resource { + return &schema.Resource{ + DeprecationMessage: "Data source google_provider_config_sdk is intended to be used only in acceptance tests for the provider. Instead, please use the google_client_config data source to access provider configuration details, or open a GitHub issue requesting new features in that datasource. Please go to: https://github.com/hashicorp/terraform-provider-google/issues/new/choose", + Read: dataSourceClientConfigRead, + Schema: map[string]*schema.Schema{ + // Start of user inputs + "access_token": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "credentials": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "impersonate_service_account": { + Type: schema.TypeString, + Computed: true, + }, + "impersonate_service_account_delegates": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Computed: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + }, + "billing_project": { + Type: schema.TypeString, + Computed: true, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + }, + "universe_domain": { + Type: schema.TypeString, + Computed: true, + }, + "scopes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "user_project_override": { + Type: schema.TypeBool, + Computed: true, + }, + "request_reason": { + Type: schema.TypeString, + Computed: true, + }, + "request_timeout": { + Type: schema.TypeString, + Computed: true, + }, + "default_labels": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "add_terraform_attribution_label": { + Type: schema.TypeBool, + Computed: true, + }, + "terraform_attribution_label_addition_strategy": { + Type: schema.TypeString, + Computed: true, + }, + // End of user inputs + + // Note - this data source excludes the default and custom endpoints for individual services + + // Start of values set during provider configuration + "user_agent": { + Type: schema.TypeString, + Computed: true, + }, + // End of values set during provider configuration + }, + } +} + +func dataSourceClientConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + if err := d.Set("access_token", config.AccessToken); err != nil { + return fmt.Errorf("error setting access_token: %s", err) + } + if err := d.Set("credentials", config.Credentials); err != nil { + return fmt.Errorf("error setting credentials: %s", err) + } + if err := d.Set("impersonate_service_account", config.ImpersonateServiceAccount); err != nil { + return fmt.Errorf("error setting impersonate_service_account: %s", err) + } + if err := d.Set("impersonate_service_account_delegates", config.ImpersonateServiceAccountDelegates); err != nil { + return fmt.Errorf("error setting impersonate_service_account_delegates: %s", err) + } + if err := d.Set("project", config.Project); err != nil { + return fmt.Errorf("error setting project: %s", err) + } + if err := d.Set("region", config.Region); err != nil { + return fmt.Errorf("error setting region: %s", err) + } + if err := d.Set("billing_project", config.BillingProject); err != nil { + return fmt.Errorf("error setting billing_project: %s", err) + } + if err := d.Set("zone", config.Zone); err != nil { + return fmt.Errorf("error setting zone: %s", err) + } + if err := d.Set("universe_domain", config.UniverseDomain); err != nil { + return fmt.Errorf("error setting universe_domain: %s", err) + } + if err := d.Set("scopes", config.Scopes); err != nil { + return fmt.Errorf("error setting scopes: %s", err) + } + if err := d.Set("user_project_override", config.UserProjectOverride); err != nil { + return fmt.Errorf("error setting user_project_override: %s", err) + } + if err := d.Set("request_reason", config.RequestReason); err != nil { + return fmt.Errorf("error setting request_reason: %s", err) + } + if err := d.Set("request_timeout", config.RequestTimeout.String()); err != nil { + return fmt.Errorf("error setting request_timeout: %s", err) + } + if err := d.Set("default_labels", config.DefaultLabels); err != nil { + return fmt.Errorf("error setting default_labels: %s", err) + } + if err := d.Set("add_terraform_attribution_label", config.AddTerraformAttributionLabel); err != nil { + return fmt.Errorf("error setting add_terraform_attribution_label: %s", err) + } + if err := d.Set("terraform_attribution_label_addition_strategy", config.TerraformAttributionLabelAdditionStrategy); err != nil { + return fmt.Errorf("error setting terraform_attribution_label_addition_strategy: %s", err) + } + if err := d.Set("user_agent", config.UserAgent); err != nil { + return fmt.Errorf("error setting user_agent: %s", err) + } + + // Id is a hash of the total transport.Config struct + configString := []byte(fmt.Sprintf("%#v", config)) + hasher := sha1.New() + hasher.Write(configString) + sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + d.SetId(string(sha)) + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider.go index 5e3f70fea31..9d361ed473e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider.go @@ -136,6 +136,7 @@ func Provider() *schema.Provider { "add_terraform_attribution_label": { Type: schema.TypeBool, Optional: true, + Default: true, }, "terraform_attribution_label_addition_strategy": { @@ -384,11 +385,6 @@ func Provider() *schema.Provider { Optional: true, ValidateFunc: transport_tpg.ValidateCustomEndpoint, }, - "datastore_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: transport_tpg.ValidateCustomEndpoint, - }, "datastream_custom_endpoint": { Type: schema.TypeString, Optional: true, @@ -584,6 +580,11 @@ func Provider() *schema.Provider { Optional: true, ValidateFunc: transport_tpg.ValidateCustomEndpoint, }, + "oracle_database_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, "org_policy_custom_endpoint": { Type: schema.TypeString, Optional: true, @@ -604,6 +605,11 @@ func Provider() *schema.Provider { Optional: true, ValidateFunc: transport_tpg.ValidateCustomEndpoint, }, + "privileged_access_manager_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, "public_ca_custom_endpoint": { Type: schema.TypeString, Optional: true, @@ -634,6 +640,11 @@ func Provider() *schema.Provider { Optional: true, ValidateFunc: transport_tpg.ValidateCustomEndpoint, }, + "secret_manager_regional_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, "secure_source_manager_custom_endpoint": { Type: schema.TypeString, Optional: true, @@ -674,6 +685,11 @@ func Provider() *schema.Provider { Optional: true, ValidateFunc: transport_tpg.ValidateCustomEndpoint, }, + "site_verification_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, "source_repo_custom_endpoint": { Type: schema.TypeString, Optional: true, @@ -714,6 +730,11 @@ func Provider() *schema.Provider { Optional: true, ValidateFunc: transport_tpg.ValidateCustomEndpoint, }, + "transcoder_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, "vertex_ai_custom_endpoint": { Type: schema.TypeString, Optional: true, @@ -910,7 +931,6 @@ func ProviderConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr config.DefaultLabels[k] = v.(string) } - // Attribution label is opt-in; if unset, the default for AddTerraformAttributionLabel is false. config.AddTerraformAttributionLabel = d.Get("add_terraform_attribution_label").(bool) if config.AddTerraformAttributionLabel { config.TerraformAttributionLabelAdditionStrategy = transport_tpg.CreateOnlyAttributionStrategy @@ -979,7 +999,6 @@ func ProviderConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr config.DataplexBasePath = d.Get("dataplex_custom_endpoint").(string) config.DataprocBasePath = d.Get("dataproc_custom_endpoint").(string) config.DataprocMetastoreBasePath = d.Get("dataproc_metastore_custom_endpoint").(string) - config.DatastoreBasePath = d.Get("datastore_custom_endpoint").(string) config.DatastreamBasePath = d.Get("datastream_custom_endpoint").(string) config.DeploymentManagerBasePath = d.Get("deployment_manager_custom_endpoint").(string) config.DialogflowBasePath = d.Get("dialogflow_custom_endpoint").(string) @@ -1019,16 +1038,19 @@ func ProviderConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr config.NetworkSecurityBasePath = d.Get("network_security_custom_endpoint").(string) config.NetworkServicesBasePath = d.Get("network_services_custom_endpoint").(string) config.NotebooksBasePath = d.Get("notebooks_custom_endpoint").(string) + config.OracleDatabaseBasePath = d.Get("oracle_database_custom_endpoint").(string) config.OrgPolicyBasePath = d.Get("org_policy_custom_endpoint").(string) config.OSConfigBasePath = d.Get("os_config_custom_endpoint").(string) config.OSLoginBasePath = d.Get("os_login_custom_endpoint").(string) config.PrivatecaBasePath = d.Get("privateca_custom_endpoint").(string) + config.PrivilegedAccessManagerBasePath = d.Get("privileged_access_manager_custom_endpoint").(string) config.PublicCABasePath = d.Get("public_ca_custom_endpoint").(string) config.PubsubBasePath = d.Get("pubsub_custom_endpoint").(string) config.PubsubLiteBasePath = d.Get("pubsub_lite_custom_endpoint").(string) config.RedisBasePath = d.Get("redis_custom_endpoint").(string) config.ResourceManagerBasePath = d.Get("resource_manager_custom_endpoint").(string) config.SecretManagerBasePath = d.Get("secret_manager_custom_endpoint").(string) + config.SecretManagerRegionalBasePath = d.Get("secret_manager_regional_custom_endpoint").(string) config.SecureSourceManagerBasePath = d.Get("secure_source_manager_custom_endpoint").(string) config.SecurityCenterBasePath = d.Get("security_center_custom_endpoint").(string) config.SecurityCenterManagementBasePath = d.Get("security_center_management_custom_endpoint").(string) @@ -1037,6 +1059,7 @@ func ProviderConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr config.ServiceManagementBasePath = d.Get("service_management_custom_endpoint").(string) config.ServiceNetworkingBasePath = d.Get("service_networking_custom_endpoint").(string) config.ServiceUsageBasePath = d.Get("service_usage_custom_endpoint").(string) + config.SiteVerificationBasePath = d.Get("site_verification_custom_endpoint").(string) config.SourceRepoBasePath = d.Get("source_repo_custom_endpoint").(string) config.SpannerBasePath = d.Get("spanner_custom_endpoint").(string) config.SQLBasePath = d.Get("sql_custom_endpoint").(string) @@ -1045,6 +1068,7 @@ func ProviderConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr config.StorageTransferBasePath = d.Get("storage_transfer_custom_endpoint").(string) config.TagsBasePath = d.Get("tags_custom_endpoint").(string) config.TPUBasePath = d.Get("tpu_custom_endpoint").(string) + config.TranscoderBasePath = d.Get("transcoder_custom_endpoint").(string) config.VertexAIBasePath = d.Get("vertex_ai_custom_endpoint").(string) config.VmwareengineBasePath = d.Get("vmwareengine_custom_endpoint").(string) config.VPCAccessBasePath = d.Get("vpc_access_custom_endpoint").(string) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider_dcl_resources.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider_dcl_resources.go index b680f80e3f8..f4e329e697d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider_dcl_resources.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider_dcl_resources.go @@ -32,39 +32,32 @@ import ( "github.com/hashicorp/terraform-provider-google/google/services/eventarc" "github.com/hashicorp/terraform-provider-google/google/services/firebaserules" "github.com/hashicorp/terraform-provider-google/google/services/gkehub" - "github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity" "github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise" ) var dclResources = map[string]*schema.Resource{ - "google_apikeys_key": apikeys.ResourceApikeysKey(), - "google_assured_workloads_workload": assuredworkloads.ResourceAssuredWorkloadsWorkload(), - "google_cloudbuild_worker_pool": cloudbuild.ResourceCloudbuildWorkerPool(), - "google_clouddeploy_delivery_pipeline": clouddeploy.ResourceClouddeployDeliveryPipeline(), - "google_clouddeploy_target": clouddeploy.ResourceClouddeployTarget(), - "google_compute_firewall_policy": compute.ResourceComputeFirewallPolicy(), - "google_compute_firewall_policy_association": compute.ResourceComputeFirewallPolicyAssociation(), - "google_compute_firewall_policy_rule": compute.ResourceComputeFirewallPolicyRule(), - "google_compute_network_firewall_policy_association": compute.ResourceComputeNetworkFirewallPolicyAssociation(), - "google_compute_region_network_firewall_policy_association": compute.ResourceComputeRegionNetworkFirewallPolicyAssociation(), - "google_compute_network_firewall_policy_rule": compute.ResourceComputeNetworkFirewallPolicyRule(), - "google_compute_region_network_firewall_policy_rule": compute.ResourceComputeRegionNetworkFirewallPolicyRule(), - "google_container_aws_cluster": containeraws.ResourceContainerAwsCluster(), - "google_container_aws_node_pool": containeraws.ResourceContainerAwsNodePool(), - "google_container_azure_client": containerazure.ResourceContainerAzureClient(), - "google_container_azure_cluster": containerazure.ResourceContainerAzureCluster(), - "google_container_azure_node_pool": containerazure.ResourceContainerAzureNodePool(), - "google_dataplex_asset": dataplex.ResourceDataplexAsset(), - "google_dataplex_lake": dataplex.ResourceDataplexLake(), - "google_dataplex_zone": dataplex.ResourceDataplexZone(), - "google_dataproc_workflow_template": dataproc.ResourceDataprocWorkflowTemplate(), - "google_eventarc_channel": eventarc.ResourceEventarcChannel(), - "google_eventarc_google_channel_config": eventarc.ResourceEventarcGoogleChannelConfig(), - "google_eventarc_trigger": eventarc.ResourceEventarcTrigger(), - "google_firebaserules_release": firebaserules.ResourceFirebaserulesRelease(), - "google_firebaserules_ruleset": firebaserules.ResourceFirebaserulesRuleset(), - "google_gke_hub_feature_membership": gkehub.ResourceGkeHubFeatureMembership(), - "google_network_connectivity_hub": networkconnectivity.ResourceNetworkConnectivityHub(), - "google_network_connectivity_spoke": networkconnectivity.ResourceNetworkConnectivitySpoke(), - "google_recaptcha_enterprise_key": recaptchaenterprise.ResourceRecaptchaEnterpriseKey(), + "google_apikeys_key": apikeys.ResourceApikeysKey(), + "google_assured_workloads_workload": assuredworkloads.ResourceAssuredWorkloadsWorkload(), + "google_cloudbuild_worker_pool": cloudbuild.ResourceCloudbuildWorkerPool(), + "google_clouddeploy_delivery_pipeline": clouddeploy.ResourceClouddeployDeliveryPipeline(), + "google_clouddeploy_target": clouddeploy.ResourceClouddeployTarget(), + "google_compute_firewall_policy": compute.ResourceComputeFirewallPolicy(), + "google_compute_firewall_policy_association": compute.ResourceComputeFirewallPolicyAssociation(), + "google_compute_firewall_policy_rule": compute.ResourceComputeFirewallPolicyRule(), + "google_container_aws_cluster": containeraws.ResourceContainerAwsCluster(), + "google_container_aws_node_pool": containeraws.ResourceContainerAwsNodePool(), + "google_container_azure_client": containerazure.ResourceContainerAzureClient(), + "google_container_azure_cluster": containerazure.ResourceContainerAzureCluster(), + "google_container_azure_node_pool": containerazure.ResourceContainerAzureNodePool(), + "google_dataplex_asset": dataplex.ResourceDataplexAsset(), + "google_dataplex_lake": dataplex.ResourceDataplexLake(), + "google_dataplex_zone": dataplex.ResourceDataplexZone(), + "google_dataproc_workflow_template": dataproc.ResourceDataprocWorkflowTemplate(), + "google_eventarc_channel": eventarc.ResourceEventarcChannel(), + "google_eventarc_google_channel_config": eventarc.ResourceEventarcGoogleChannelConfig(), + "google_eventarc_trigger": eventarc.ResourceEventarcTrigger(), + "google_firebaserules_release": firebaserules.ResourceFirebaserulesRelease(), + "google_firebaserules_ruleset": firebaserules.ResourceFirebaserulesRuleset(), + "google_gke_hub_feature_membership": gkehub.ResourceGkeHubFeatureMembership(), + "google_recaptcha_enterprise_key": recaptchaenterprise.ResourceRecaptchaEnterpriseKey(), } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider_mmv1_resources.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider_mmv1_resources.go index 9f83f9eae92..58b54298c34 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider_mmv1_resources.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider_mmv1_resources.go @@ -4,7 +4,6 @@ package provider import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-google/google/services/accessapproval" "github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager" "github.com/hashicorp/terraform-provider-google/google/services/activedirectory" @@ -52,7 +51,6 @@ import ( "github.com/hashicorp/terraform-provider-google/google/services/dataplex" "github.com/hashicorp/terraform-provider-google/google/services/dataproc" "github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore" - "github.com/hashicorp/terraform-provider-google/google/services/datastore" "github.com/hashicorp/terraform-provider-google/google/services/datastream" "github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager" "github.com/hashicorp/terraform-provider-google/google/services/dialogflow" @@ -92,16 +90,19 @@ import ( "github.com/hashicorp/terraform-provider-google/google/services/networksecurity" "github.com/hashicorp/terraform-provider-google/google/services/networkservices" "github.com/hashicorp/terraform-provider-google/google/services/notebooks" + "github.com/hashicorp/terraform-provider-google/google/services/oracledatabase" "github.com/hashicorp/terraform-provider-google/google/services/orgpolicy" "github.com/hashicorp/terraform-provider-google/google/services/osconfig" "github.com/hashicorp/terraform-provider-google/google/services/oslogin" "github.com/hashicorp/terraform-provider-google/google/services/privateca" + "github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager" "github.com/hashicorp/terraform-provider-google/google/services/publicca" "github.com/hashicorp/terraform-provider-google/google/services/pubsub" "github.com/hashicorp/terraform-provider-google/google/services/pubsublite" "github.com/hashicorp/terraform-provider-google/google/services/redis" "github.com/hashicorp/terraform-provider-google/google/services/resourcemanager" "github.com/hashicorp/terraform-provider-google/google/services/secretmanager" + "github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional" "github.com/hashicorp/terraform-provider-google/google/services/securesourcemanager" "github.com/hashicorp/terraform-provider-google/google/services/securitycenter" "github.com/hashicorp/terraform-provider-google/google/services/securitycentermanagement" @@ -109,6 +110,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/services/securityposture" "github.com/hashicorp/terraform-provider-google/google/services/servicemanagement" "github.com/hashicorp/terraform-provider-google/google/services/servicenetworking" + "github.com/hashicorp/terraform-provider-google/google/services/siteverification" "github.com/hashicorp/terraform-provider-google/google/services/sourcerepo" "github.com/hashicorp/terraform-provider-google/google/services/spanner" "github.com/hashicorp/terraform-provider-google/google/services/sql" @@ -117,6 +119,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/services/storagetransfer" "github.com/hashicorp/terraform-provider-google/google/services/tags" "github.com/hashicorp/terraform-provider-google/google/services/tpu" + "github.com/hashicorp/terraform-provider-google/google/services/transcoder" "github.com/hashicorp/terraform-provider-google/google/services/vertexai" "github.com/hashicorp/terraform-provider-google/google/services/vmwareengine" "github.com/hashicorp/terraform-provider-google/google/services/vpcaccess" @@ -134,180 +137,203 @@ import ( // Datasources var handwrittenDatasources = map[string]*schema.Resource{ // ####### START handwritten datasources ########### - "google_access_approval_folder_service_account": accessapproval.DataSourceAccessApprovalFolderServiceAccount(), - "google_access_approval_organization_service_account": accessapproval.DataSourceAccessApprovalOrganizationServiceAccount(), - "google_access_approval_project_service_account": accessapproval.DataSourceAccessApprovalProjectServiceAccount(), - "google_active_folder": resourcemanager.DataSourceGoogleActiveFolder(), - "google_alloydb_locations": alloydb.DataSourceAlloydbLocations(), - "google_alloydb_supported_database_flags": alloydb.DataSourceAlloydbSupportedDatabaseFlags(), - "google_artifact_registry_docker_image": artifactregistry.DataSourceArtifactRegistryDockerImage(), - "google_artifact_registry_repository": artifactregistry.DataSourceArtifactRegistryRepository(), - "google_apphub_discovered_workload": apphub.DataSourceApphubDiscoveredWorkload(), - "google_app_engine_default_service_account": appengine.DataSourceGoogleAppEngineDefaultServiceAccount(), - "google_apphub_application": apphub.DataSourceGoogleApphubApplication(), - "google_apphub_discovered_service": apphub.DataSourceApphubDiscoveredService(), - "google_beyondcorp_app_connection": beyondcorp.DataSourceGoogleBeyondcorpAppConnection(), - "google_beyondcorp_app_connector": beyondcorp.DataSourceGoogleBeyondcorpAppConnector(), - "google_beyondcorp_app_gateway": beyondcorp.DataSourceGoogleBeyondcorpAppGateway(), - "google_billing_account": billing.DataSourceGoogleBillingAccount(), - "google_bigquery_dataset": bigquery.DataSourceGoogleBigqueryDataset(), - "google_bigquery_default_service_account": bigquery.DataSourceGoogleBigqueryDefaultServiceAccount(), - "google_certificate_manager_certificate_map": certificatemanager.DataSourceGoogleCertificateManagerCertificateMap(), - "google_cloudbuild_trigger": cloudbuild.DataSourceGoogleCloudBuildTrigger(), - "google_cloudfunctions_function": cloudfunctions.DataSourceGoogleCloudFunctionsFunction(), - "google_cloudfunctions2_function": cloudfunctions2.DataSourceGoogleCloudFunctions2Function(), - "google_cloud_asset_search_all_resources": cloudasset.DataSourceGoogleCloudAssetSearchAllResources(), - "google_cloud_identity_groups": cloudidentity.DataSourceGoogleCloudIdentityGroups(), - "google_cloud_identity_group_memberships": cloudidentity.DataSourceGoogleCloudIdentityGroupMemberships(), - "google_cloud_identity_group_lookup": cloudidentity.DataSourceGoogleCloudIdentityGroupLookup(), - "google_cloud_quotas_quota_info": cloudquotas.DataSourceGoogleCloudQuotasQuotaInfo(), - "google_cloud_quotas_quota_infos": cloudquotas.DataSourceGoogleCloudQuotasQuotaInfos(), - "google_cloud_run_locations": cloudrun.DataSourceGoogleCloudRunLocations(), - "google_cloud_run_service": cloudrun.DataSourceGoogleCloudRunService(), - "google_cloud_run_v2_job": cloudrunv2.DataSourceGoogleCloudRunV2Job(), - "google_cloud_run_v2_service": cloudrunv2.DataSourceGoogleCloudRunV2Service(), - "google_composer_environment": composer.DataSourceGoogleComposerEnvironment(), - "google_composer_image_versions": composer.DataSourceGoogleComposerImageVersions(), - "google_compute_address": compute.DataSourceGoogleComputeAddress(), - "google_compute_addresses": compute.DataSourceGoogleComputeAddresses(), - "google_compute_backend_service": compute.DataSourceGoogleComputeBackendService(), - "google_compute_backend_bucket": compute.DataSourceGoogleComputeBackendBucket(), - "google_compute_default_service_account": compute.DataSourceGoogleComputeDefaultServiceAccount(), - "google_compute_disk": compute.DataSourceGoogleComputeDisk(), - "google_compute_forwarding_rule": compute.DataSourceGoogleComputeForwardingRule(), - "google_compute_forwarding_rules": compute.DataSourceGoogleComputeForwardingRules(), - "google_compute_global_address": compute.DataSourceGoogleComputeGlobalAddress(), - "google_compute_global_forwarding_rule": compute.DataSourceGoogleComputeGlobalForwardingRule(), - "google_compute_ha_vpn_gateway": compute.DataSourceGoogleComputeHaVpnGateway(), - "google_compute_health_check": compute.DataSourceGoogleComputeHealthCheck(), - "google_compute_image": compute.DataSourceGoogleComputeImage(), - "google_compute_instance": compute.DataSourceGoogleComputeInstance(), - "google_compute_instance_group": compute.DataSourceGoogleComputeInstanceGroup(), - "google_compute_instance_group_manager": compute.DataSourceGoogleComputeInstanceGroupManager(), - "google_compute_instance_serial_port": compute.DataSourceGoogleComputeInstanceSerialPort(), - "google_compute_instance_template": compute.DataSourceGoogleComputeInstanceTemplate(), - "google_compute_lb_ip_ranges": compute.DataSourceGoogleComputeLbIpRanges(), - "google_compute_machine_types": compute.DataSourceGoogleComputeMachineTypes(), - "google_compute_network": compute.DataSourceGoogleComputeNetwork(), - "google_compute_networks": compute.DataSourceGoogleComputeNetworks(), - "google_compute_network_endpoint_group": compute.DataSourceGoogleComputeNetworkEndpointGroup(), - "google_compute_network_peering": compute.DataSourceComputeNetworkPeering(), - "google_compute_node_types": compute.DataSourceGoogleComputeNodeTypes(), - "google_compute_regions": compute.DataSourceGoogleComputeRegions(), - "google_compute_region_disk": compute.DataSourceGoogleComputeRegionDisk(), - "google_compute_region_instance_group": compute.DataSourceGoogleComputeRegionInstanceGroup(), - "google_compute_region_instance_template": compute.DataSourceGoogleComputeRegionInstanceTemplate(), - "google_compute_region_network_endpoint_group": compute.DataSourceGoogleComputeRegionNetworkEndpointGroup(), - "google_compute_region_ssl_certificate": compute.DataSourceGoogleRegionComputeSslCertificate(), - "google_compute_reservation": compute.DataSourceGoogleComputeReservation(), - "google_compute_resource_policy": compute.DataSourceGoogleComputeResourcePolicy(), - "google_compute_router": compute.DataSourceGoogleComputeRouter(), - "google_compute_router_nat": compute.DataSourceGoogleComputeRouterNat(), - "google_compute_router_status": compute.DataSourceGoogleComputeRouterStatus(), - "google_compute_security_policy": compute.DataSourceGoogleComputeSecurityPolicy(), - "google_compute_snapshot": compute.DataSourceGoogleComputeSnapshot(), - "google_compute_ssl_certificate": compute.DataSourceGoogleComputeSslCertificate(), - "google_compute_ssl_policy": compute.DataSourceGoogleComputeSslPolicy(), - "google_compute_subnetwork": compute.DataSourceGoogleComputeSubnetwork(), - "google_compute_subnetworks": compute.DataSourceGoogleComputeSubnetworks(), - "google_compute_vpn_gateway": compute.DataSourceGoogleComputeVpnGateway(), - "google_compute_zones": compute.DataSourceGoogleComputeZones(), - "google_container_azure_versions": containerazure.DataSourceGoogleContainerAzureVersions(), - "google_container_aws_versions": containeraws.DataSourceGoogleContainerAwsVersions(), - "google_container_attached_versions": containerattached.DataSourceGoogleContainerAttachedVersions(), - "google_container_attached_install_manifest": containerattached.DataSourceGoogleContainerAttachedInstallManifest(), - "google_container_cluster": container.DataSourceGoogleContainerCluster(), - "google_container_engine_versions": container.DataSourceGoogleContainerEngineVersions(), - "google_container_registry_image": containeranalysis.DataSourceGoogleContainerImage(), - "google_container_registry_repository": containeranalysis.DataSourceGoogleContainerRepo(), - "google_dataproc_metastore_service": dataprocmetastore.DataSourceDataprocMetastoreService(), - "google_datastream_static_ips": datastream.DataSourceGoogleDatastreamStaticIps(), - "google_dns_keys": dns.DataSourceDNSKeys(), - "google_dns_managed_zone": dns.DataSourceDnsManagedZone(), - "google_dns_managed_zones": dns.DataSourceDnsManagedZones(), - "google_dns_record_set": dns.DataSourceDnsRecordSet(), - "google_filestore_instance": filestore.DataSourceGoogleFilestoreInstance(), - "google_iam_policy": resourcemanager.DataSourceGoogleIamPolicy(), - "google_iam_role": resourcemanager.DataSourceGoogleIamRole(), - "google_iam_testable_permissions": resourcemanager.DataSourceGoogleIamTestablePermissions(), - "google_iap_client": iap.DataSourceGoogleIapClient(), - "google_kms_crypto_key": kms.DataSourceGoogleKmsCryptoKey(), - "google_kms_crypto_keys": kms.DataSourceGoogleKmsCryptoKeys(), - "google_kms_crypto_key_version": kms.DataSourceGoogleKmsCryptoKeyVersion(), - "google_kms_key_ring": kms.DataSourceGoogleKmsKeyRing(), - "google_kms_key_rings": kms.DataSourceGoogleKmsKeyRings(), - "google_kms_secret": kms.DataSourceGoogleKmsSecret(), - "google_kms_secret_ciphertext": kms.DataSourceGoogleKmsSecretCiphertext(), - "google_folder": resourcemanager.DataSourceGoogleFolder(), - "google_folders": resourcemanager.DataSourceGoogleFolders(), - "google_folder_organization_policy": resourcemanager.DataSourceGoogleFolderOrganizationPolicy(), - "google_logging_folder_settings": logging.DataSourceGoogleLoggingFolderSettings(), - "google_logging_organization_settings": logging.DataSourceGoogleLoggingOrganizationSettings(), - "google_logging_project_cmek_settings": logging.DataSourceGoogleLoggingProjectCmekSettings(), - "google_logging_project_settings": logging.DataSourceGoogleLoggingProjectSettings(), - "google_logging_sink": logging.DataSourceGoogleLoggingSink(), - "google_monitoring_notification_channel": monitoring.DataSourceMonitoringNotificationChannel(), - "google_monitoring_cluster_istio_service": monitoring.DataSourceMonitoringServiceClusterIstio(), - "google_monitoring_istio_canonical_service": monitoring.DataSourceMonitoringIstioCanonicalService(), - "google_monitoring_mesh_istio_service": monitoring.DataSourceMonitoringServiceMeshIstio(), - "google_monitoring_app_engine_service": monitoring.DataSourceMonitoringServiceAppEngine(), - "google_monitoring_uptime_check_ips": monitoring.DataSourceGoogleMonitoringUptimeCheckIps(), - "google_netblock_ip_ranges": resourcemanager.DataSourceGoogleNetblockIpRanges(), - "google_organization": resourcemanager.DataSourceGoogleOrganization(), - "google_privateca_certificate_authority": privateca.DataSourcePrivatecaCertificateAuthority(), - "google_project": resourcemanager.DataSourceGoogleProject(), - "google_projects": resourcemanager.DataSourceGoogleProjects(), - "google_project_organization_policy": resourcemanager.DataSourceGoogleProjectOrganizationPolicy(), - "google_project_service": resourcemanager.DataSourceGoogleProjectService(), - "google_pubsub_subscription": pubsub.DataSourceGooglePubsubSubscription(), - "google_pubsub_topic": pubsub.DataSourceGooglePubsubTopic(), - "google_secret_manager_secret": secretmanager.DataSourceSecretManagerSecret(), - "google_secret_manager_secrets": secretmanager.DataSourceSecretManagerSecrets(), - "google_secret_manager_secret_version": secretmanager.DataSourceSecretManagerSecretVersion(), - "google_secret_manager_secret_version_access": secretmanager.DataSourceSecretManagerSecretVersionAccess(), - "google_service_account": resourcemanager.DataSourceGoogleServiceAccount(), - "google_service_account_access_token": resourcemanager.DataSourceGoogleServiceAccountAccessToken(), - "google_service_account_id_token": resourcemanager.DataSourceGoogleServiceAccountIdToken(), - "google_service_account_jwt": resourcemanager.DataSourceGoogleServiceAccountJwt(), - "google_service_account_key": resourcemanager.DataSourceGoogleServiceAccountKey(), - "google_sourcerepo_repository": sourcerepo.DataSourceGoogleSourceRepoRepository(), - "google_spanner_instance": spanner.DataSourceSpannerInstance(), - "google_sql_ca_certs": sql.DataSourceGoogleSQLCaCerts(), - "google_sql_tiers": sql.DataSourceGoogleSQLTiers(), - "google_sql_database_instance_latest_recovery_time": sql.DataSourceSqlDatabaseInstanceLatestRecoveryTime(), - "google_sql_backup_run": sql.DataSourceSqlBackupRun(), - "google_sql_databases": sql.DataSourceSqlDatabases(), - "google_sql_database": sql.DataSourceSqlDatabase(), - "google_sql_database_instance": sql.DataSourceSqlDatabaseInstance(), - "google_sql_database_instances": sql.DataSourceSqlDatabaseInstances(), - "google_service_networking_peered_dns_domain": servicenetworking.DataSourceGoogleServiceNetworkingPeeredDNSDomain(), - "google_storage_bucket": storage.DataSourceGoogleStorageBucket(), - "google_storage_buckets": storage.DataSourceGoogleStorageBuckets(), - "google_storage_bucket_object": storage.DataSourceGoogleStorageBucketObject(), - "google_storage_bucket_objects": storage.DataSourceGoogleStorageBucketObjects(), - "google_storage_bucket_object_content": storage.DataSourceGoogleStorageBucketObjectContent(), - "google_storage_object_signed_url": storage.DataSourceGoogleSignedUrl(), - "google_storage_project_service_account": storage.DataSourceGoogleStorageProjectServiceAccount(), - "google_storage_transfer_project_service_account": storagetransfer.DataSourceGoogleStorageTransferProjectServiceAccount(), - "google_tags_tag_key": tags.DataSourceGoogleTagsTagKey(), - "google_tags_tag_keys": tags.DataSourceGoogleTagsTagKeys(), - "google_tags_tag_value": tags.DataSourceGoogleTagsTagValue(), - "google_tags_tag_values": tags.DataSourceGoogleTagsTagValues(), - "google_tpu_tensorflow_versions": tpu.DataSourceTpuTensorflowVersions(), - "google_vpc_access_connector": vpcaccess.DataSourceVPCAccessConnector(), - "google_redis_instance": redis.DataSourceGoogleRedisInstance(), - "google_vertex_ai_index": vertexai.DataSourceVertexAIIndex(), - "google_vmwareengine_cluster": vmwareengine.DataSourceVmwareengineCluster(), - "google_vmwareengine_external_access_rule": vmwareengine.DataSourceVmwareengineExternalAccessRule(), - "google_vmwareengine_external_address": vmwareengine.DataSourceVmwareengineExternalAddress(), - "google_vmwareengine_network": vmwareengine.DataSourceVmwareengineNetwork(), - "google_vmwareengine_network_peering": vmwareengine.DataSourceVmwareengineNetworkPeering(), - "google_vmwareengine_network_policy": vmwareengine.DataSourceVmwareengineNetworkPolicy(), - "google_vmwareengine_nsx_credentials": vmwareengine.DataSourceVmwareengineNsxCredentials(), - "google_vmwareengine_private_cloud": vmwareengine.DataSourceVmwareenginePrivateCloud(), - "google_vmwareengine_subnet": vmwareengine.DataSourceVmwareengineSubnet(), - "google_vmwareengine_vcenter_credentials": vmwareengine.DataSourceVmwareengineVcenterCredentials(), - + "google_access_approval_folder_service_account": accessapproval.DataSourceAccessApprovalFolderServiceAccount(), + "google_access_approval_organization_service_account": accessapproval.DataSourceAccessApprovalOrganizationServiceAccount(), + "google_access_approval_project_service_account": accessapproval.DataSourceAccessApprovalProjectServiceAccount(), + "google_active_folder": resourcemanager.DataSourceGoogleActiveFolder(), + "google_alloydb_locations": alloydb.DataSourceAlloydbLocations(), + "google_alloydb_supported_database_flags": alloydb.DataSourceAlloydbSupportedDatabaseFlags(), + "google_artifact_registry_docker_image": artifactregistry.DataSourceArtifactRegistryDockerImage(), + "google_artifact_registry_locations": artifactregistry.DataSourceGoogleArtifactRegistryLocations(), + "google_artifact_registry_repository": artifactregistry.DataSourceArtifactRegistryRepository(), + "google_apphub_discovered_workload": apphub.DataSourceApphubDiscoveredWorkload(), + "google_app_engine_default_service_account": appengine.DataSourceGoogleAppEngineDefaultServiceAccount(), + "google_apphub_application": apphub.DataSourceGoogleApphubApplication(), + "google_apphub_discovered_service": apphub.DataSourceApphubDiscoveredService(), + "google_beyondcorp_app_connection": beyondcorp.DataSourceGoogleBeyondcorpAppConnection(), + "google_beyondcorp_app_connector": beyondcorp.DataSourceGoogleBeyondcorpAppConnector(), + "google_beyondcorp_app_gateway": beyondcorp.DataSourceGoogleBeyondcorpAppGateway(), + "google_billing_account": billing.DataSourceGoogleBillingAccount(), + "google_bigquery_tables": bigquery.DataSourceGoogleBigQueryTables(), + "google_bigquery_dataset": bigquery.DataSourceGoogleBigqueryDataset(), + "google_bigquery_default_service_account": bigquery.DataSourceGoogleBigqueryDefaultServiceAccount(), + "google_certificate_manager_certificates": certificatemanager.DataSourceGoogleCertificateManagerCertificates(), + "google_certificate_manager_certificate_map": certificatemanager.DataSourceGoogleCertificateManagerCertificateMap(), + "google_cloudbuild_trigger": cloudbuild.DataSourceGoogleCloudBuildTrigger(), + "google_cloudfunctions_function": cloudfunctions.DataSourceGoogleCloudFunctionsFunction(), + "google_cloudfunctions2_function": cloudfunctions2.DataSourceGoogleCloudFunctions2Function(), + "google_cloud_asset_search_all_resources": cloudasset.DataSourceGoogleCloudAssetSearchAllResources(), + "google_cloud_identity_groups": cloudidentity.DataSourceGoogleCloudIdentityGroups(), + "google_cloud_identity_group_memberships": cloudidentity.DataSourceGoogleCloudIdentityGroupMemberships(), + "google_cloud_identity_group_transitive_memberships": cloudidentity.DataSourceGoogleCloudIdentityGroupTransitiveMemberships(), + "google_cloud_identity_group_lookup": cloudidentity.DataSourceGoogleCloudIdentityGroupLookup(), + "google_cloud_quotas_quota_info": cloudquotas.DataSourceGoogleCloudQuotasQuotaInfo(), + "google_cloud_quotas_quota_infos": cloudquotas.DataSourceGoogleCloudQuotasQuotaInfos(), + "google_cloud_run_locations": cloudrun.DataSourceGoogleCloudRunLocations(), + "google_cloud_run_service": cloudrun.DataSourceGoogleCloudRunService(), + "google_cloud_run_v2_job": cloudrunv2.DataSourceGoogleCloudRunV2Job(), + "google_cloud_run_v2_service": cloudrunv2.DataSourceGoogleCloudRunV2Service(), + "google_composer_environment": composer.DataSourceGoogleComposerEnvironment(), + "google_composer_image_versions": composer.DataSourceGoogleComposerImageVersions(), + "google_compute_address": compute.DataSourceGoogleComputeAddress(), + "google_compute_addresses": compute.DataSourceGoogleComputeAddresses(), + "google_compute_backend_service": compute.DataSourceGoogleComputeBackendService(), + "google_compute_backend_bucket": compute.DataSourceGoogleComputeBackendBucket(), + "google_compute_default_service_account": compute.DataSourceGoogleComputeDefaultServiceAccount(), + "google_compute_disk": compute.DataSourceGoogleComputeDisk(), + "google_compute_forwarding_rule": compute.DataSourceGoogleComputeForwardingRule(), + "google_compute_forwarding_rules": compute.DataSourceGoogleComputeForwardingRules(), + "google_compute_global_address": compute.DataSourceGoogleComputeGlobalAddress(), + "google_compute_global_forwarding_rule": compute.DataSourceGoogleComputeGlobalForwardingRule(), + "google_compute_ha_vpn_gateway": compute.DataSourceGoogleComputeHaVpnGateway(), + "google_compute_health_check": compute.DataSourceGoogleComputeHealthCheck(), + "google_compute_image": compute.DataSourceGoogleComputeImage(), + "google_compute_instance": compute.DataSourceGoogleComputeInstance(), + "google_compute_instance_group": compute.DataSourceGoogleComputeInstanceGroup(), + "google_compute_instance_group_manager": compute.DataSourceGoogleComputeInstanceGroupManager(), + "google_compute_instance_serial_port": compute.DataSourceGoogleComputeInstanceSerialPort(), + "google_compute_instance_template": compute.DataSourceGoogleComputeInstanceTemplate(), + "google_compute_instance_guest_attributes": compute.DataSourceGoogleComputeInstanceGuestAttributes(), + "google_compute_lb_ip_ranges": compute.DataSourceGoogleComputeLbIpRanges(), + "google_compute_machine_types": compute.DataSourceGoogleComputeMachineTypes(), + "google_compute_network": compute.DataSourceGoogleComputeNetwork(), + "google_compute_networks": compute.DataSourceGoogleComputeNetworks(), + "google_compute_network_endpoint_group": compute.DataSourceGoogleComputeNetworkEndpointGroup(), + "google_compute_network_peering": compute.DataSourceComputeNetworkPeering(), + "google_compute_node_types": compute.DataSourceGoogleComputeNodeTypes(), + "google_compute_regions": compute.DataSourceGoogleComputeRegions(), + "google_compute_region_disk": compute.DataSourceGoogleComputeRegionDisk(), + "google_compute_region_instance_group": compute.DataSourceGoogleComputeRegionInstanceGroup(), + "google_compute_region_instance_group_manager": compute.DataSourceGoogleComputeRegionInstanceGroupManager(), + "google_compute_region_instance_template": compute.DataSourceGoogleComputeRegionInstanceTemplate(), + "google_compute_region_network_endpoint_group": compute.DataSourceGoogleComputeRegionNetworkEndpointGroup(), + "google_compute_region_ssl_certificate": compute.DataSourceGoogleRegionComputeSslCertificate(), + "google_compute_reservation": compute.DataSourceGoogleComputeReservation(), + "google_compute_resource_policy": compute.DataSourceGoogleComputeResourcePolicy(), + "google_compute_router": compute.DataSourceGoogleComputeRouter(), + "google_compute_router_nat": compute.DataSourceGoogleComputeRouterNat(), + "google_compute_router_status": compute.DataSourceGoogleComputeRouterStatus(), + "google_compute_security_policy": compute.DataSourceGoogleComputeSecurityPolicy(), + "google_compute_snapshot": compute.DataSourceGoogleComputeSnapshot(), + "google_compute_ssl_certificate": compute.DataSourceGoogleComputeSslCertificate(), + "google_compute_ssl_policy": compute.DataSourceGoogleComputeSslPolicy(), + "google_compute_subnetwork": compute.DataSourceGoogleComputeSubnetwork(), + "google_compute_subnetworks": compute.DataSourceGoogleComputeSubnetworks(), + "google_compute_vpn_gateway": compute.DataSourceGoogleComputeVpnGateway(), + "google_compute_zones": compute.DataSourceGoogleComputeZones(), + "google_container_azure_versions": containerazure.DataSourceGoogleContainerAzureVersions(), + "google_container_aws_versions": containeraws.DataSourceGoogleContainerAwsVersions(), + "google_container_attached_versions": containerattached.DataSourceGoogleContainerAttachedVersions(), + "google_container_attached_install_manifest": containerattached.DataSourceGoogleContainerAttachedInstallManifest(), + "google_container_cluster": container.DataSourceGoogleContainerCluster(), + "google_container_engine_versions": container.DataSourceGoogleContainerEngineVersions(), + "google_container_registry_image": containeranalysis.DataSourceGoogleContainerImage(), + "google_container_registry_repository": containeranalysis.DataSourceGoogleContainerRepo(), + "google_dataproc_metastore_service": dataprocmetastore.DataSourceDataprocMetastoreService(), + "google_datastream_static_ips": datastream.DataSourceGoogleDatastreamStaticIps(), + "google_dns_keys": dns.DataSourceDNSKeys(), + "google_dns_managed_zone": dns.DataSourceDnsManagedZone(), + "google_dns_managed_zones": dns.DataSourceDnsManagedZones(), + "google_dns_record_set": dns.DataSourceDnsRecordSet(), + "google_gke_hub_membership_binding": gkehub2.DataSourceGoogleGkeHubMembershipBinding(), + "google_filestore_instance": filestore.DataSourceGoogleFilestoreInstance(), + "google_iam_policy": resourcemanager.DataSourceGoogleIamPolicy(), + "google_iam_role": resourcemanager.DataSourceGoogleIamRole(), + "google_iam_testable_permissions": resourcemanager.DataSourceGoogleIamTestablePermissions(), + "google_iap_client": iap.DataSourceGoogleIapClient(), + "google_kms_crypto_key": kms.DataSourceGoogleKmsCryptoKey(), + "google_kms_crypto_keys": kms.DataSourceGoogleKmsCryptoKeys(), + "google_kms_crypto_key_version": kms.DataSourceGoogleKmsCryptoKeyVersion(), + "google_kms_crypto_key_latest_version": kms.DataSourceGoogleKmsLatestCryptoKeyVersion(), + "google_kms_crypto_key_versions": kms.DataSourceGoogleKmsCryptoKeyVersions(), + "google_kms_key_ring": kms.DataSourceGoogleKmsKeyRing(), + "google_kms_key_rings": kms.DataSourceGoogleKmsKeyRings(), + "google_kms_secret": kms.DataSourceGoogleKmsSecret(), + "google_kms_secret_ciphertext": kms.DataSourceGoogleKmsSecretCiphertext(), + "google_folder": resourcemanager.DataSourceGoogleFolder(), + "google_folders": resourcemanager.DataSourceGoogleFolders(), + "google_folder_organization_policy": resourcemanager.DataSourceGoogleFolderOrganizationPolicy(), + "google_logging_folder_settings": logging.DataSourceGoogleLoggingFolderSettings(), + "google_logging_organization_settings": logging.DataSourceGoogleLoggingOrganizationSettings(), + "google_logging_project_cmek_settings": logging.DataSourceGoogleLoggingProjectCmekSettings(), + "google_logging_project_settings": logging.DataSourceGoogleLoggingProjectSettings(), + "google_logging_sink": logging.DataSourceGoogleLoggingSink(), + "google_monitoring_notification_channel": monitoring.DataSourceMonitoringNotificationChannel(), + "google_monitoring_cluster_istio_service": monitoring.DataSourceMonitoringServiceClusterIstio(), + "google_monitoring_istio_canonical_service": monitoring.DataSourceMonitoringIstioCanonicalService(), + "google_monitoring_mesh_istio_service": monitoring.DataSourceMonitoringServiceMeshIstio(), + "google_monitoring_app_engine_service": monitoring.DataSourceMonitoringServiceAppEngine(), + "google_monitoring_uptime_check_ips": monitoring.DataSourceGoogleMonitoringUptimeCheckIps(), + "google_netblock_ip_ranges": resourcemanager.DataSourceGoogleNetblockIpRanges(), + "google_oracle_database_autonomous_database": oracledatabase.DataSourceOracleDatabaseAutonomousDatabase(), + "google_oracle_database_autonomous_databases": oracledatabase.DataSourceOracleDatabaseAutonomousDatabases(), + "google_oracle_database_db_nodes": oracledatabase.DataSourceOracleDatabaseDbNodes(), + "google_oracle_database_db_servers": oracledatabase.DataSourceOracleDatabaseDbServers(), + "google_oracle_database_cloud_exadata_infrastructures": oracledatabase.DataSourceOracleDatabaseCloudExadataInfrastructures(), + "google_oracle_database_cloud_exadata_infrastructure": oracledatabase.DataSourceOracleDatabaseCloudExadataInfrastructure(), + "google_oracle_database_cloud_vm_clusters": oracledatabase.DataSourceOracleDatabaseCloudVmClusters(), + "google_oracle_database_cloud_vm_cluster": oracledatabase.DataSourceOracleDatabaseCloudVmCluster(), + "google_organization": resourcemanager.DataSourceGoogleOrganization(), + "google_privateca_certificate_authority": privateca.DataSourcePrivatecaCertificateAuthority(), + "google_privileged_access_manager_entitlement": privilegedaccessmanager.DataSourceGooglePrivilegedAccessManagerEntitlement(), + "google_project": resourcemanager.DataSourceGoogleProject(), + "google_projects": resourcemanager.DataSourceGoogleProjects(), + "google_project_organization_policy": resourcemanager.DataSourceGoogleProjectOrganizationPolicy(), + "google_project_service": resourcemanager.DataSourceGoogleProjectService(), + "google_pubsub_subscription": pubsub.DataSourceGooglePubsubSubscription(), + "google_pubsub_topic": pubsub.DataSourceGooglePubsubTopic(), + "google_secret_manager_regional_secret_version_access": secretmanagerregional.DataSourceSecretManagerRegionalRegionalSecretVersionAccess(), + "google_secret_manager_regional_secret_version": secretmanagerregional.DataSourceSecretManagerRegionalRegionalSecretVersion(), + "google_secret_manager_regional_secret": secretmanagerregional.DataSourceSecretManagerRegionalRegionalSecret(), + "google_secret_manager_regional_secrets": secretmanagerregional.DataSourceSecretManagerRegionalRegionalSecrets(), + "google_secret_manager_secret": secretmanager.DataSourceSecretManagerSecret(), + "google_secret_manager_secrets": secretmanager.DataSourceSecretManagerSecrets(), + "google_secret_manager_secret_version": secretmanager.DataSourceSecretManagerSecretVersion(), + "google_secret_manager_secret_version_access": secretmanager.DataSourceSecretManagerSecretVersionAccess(), + "google_service_account": resourcemanager.DataSourceGoogleServiceAccount(), + "google_service_account_access_token": resourcemanager.DataSourceGoogleServiceAccountAccessToken(), + "google_service_account_id_token": resourcemanager.DataSourceGoogleServiceAccountIdToken(), + "google_service_account_jwt": resourcemanager.DataSourceGoogleServiceAccountJwt(), + "google_service_account_key": resourcemanager.DataSourceGoogleServiceAccountKey(), + "google_service_accounts": resourcemanager.DataSourceGoogleServiceAccounts(), + "google_site_verification_token": siteverification.DataSourceSiteVerificationToken(), + "google_sourcerepo_repository": sourcerepo.DataSourceGoogleSourceRepoRepository(), + "google_spanner_instance": spanner.DataSourceSpannerInstance(), + "google_sql_ca_certs": sql.DataSourceGoogleSQLCaCerts(), + "google_sql_tiers": sql.DataSourceGoogleSQLTiers(), + "google_sql_database_instance_latest_recovery_time": sql.DataSourceSqlDatabaseInstanceLatestRecoveryTime(), + "google_sql_backup_run": sql.DataSourceSqlBackupRun(), + "google_sql_databases": sql.DataSourceSqlDatabases(), + "google_sql_database": sql.DataSourceSqlDatabase(), + "google_sql_database_instance": sql.DataSourceSqlDatabaseInstance(), + "google_sql_database_instances": sql.DataSourceSqlDatabaseInstances(), + "google_service_networking_peered_dns_domain": servicenetworking.DataSourceGoogleServiceNetworkingPeeredDNSDomain(), + "google_storage_bucket": storage.DataSourceGoogleStorageBucket(), + "google_storage_buckets": storage.DataSourceGoogleStorageBuckets(), + "google_storage_bucket_object": storage.DataSourceGoogleStorageBucketObject(), + "google_storage_bucket_objects": storage.DataSourceGoogleStorageBucketObjects(), + "google_storage_bucket_object_content": storage.DataSourceGoogleStorageBucketObjectContent(), + "google_storage_object_signed_url": storage.DataSourceGoogleSignedUrl(), + "google_storage_project_service_account": storage.DataSourceGoogleStorageProjectServiceAccount(), + "google_storage_transfer_project_service_account": storagetransfer.DataSourceGoogleStorageTransferProjectServiceAccount(), + "google_tags_tag_key": tags.DataSourceGoogleTagsTagKey(), + "google_tags_tag_keys": tags.DataSourceGoogleTagsTagKeys(), + "google_tags_tag_value": tags.DataSourceGoogleTagsTagValue(), + "google_tags_tag_values": tags.DataSourceGoogleTagsTagValues(), + "google_tpu_tensorflow_versions": tpu.DataSourceTpuTensorflowVersions(), + "google_vpc_access_connector": vpcaccess.DataSourceVPCAccessConnector(), + "google_redis_instance": redis.DataSourceGoogleRedisInstance(), + "google_vertex_ai_index": vertexai.DataSourceVertexAIIndex(), + "google_vmwareengine_cluster": vmwareengine.DataSourceVmwareengineCluster(), + "google_vmwareengine_external_access_rule": vmwareengine.DataSourceVmwareengineExternalAccessRule(), + "google_vmwareengine_external_address": vmwareengine.DataSourceVmwareengineExternalAddress(), + "google_vmwareengine_network": vmwareengine.DataSourceVmwareengineNetwork(), + "google_vmwareengine_network_peering": vmwareengine.DataSourceVmwareengineNetworkPeering(), + "google_vmwareengine_network_policy": vmwareengine.DataSourceVmwareengineNetworkPolicy(), + "google_vmwareengine_nsx_credentials": vmwareengine.DataSourceVmwareengineNsxCredentials(), + "google_vmwareengine_private_cloud": vmwareengine.DataSourceVmwareenginePrivateCloud(), + "google_vmwareengine_subnet": vmwareengine.DataSourceVmwareengineSubnet(), + "google_vmwareengine_vcenter_credentials": vmwareengine.DataSourceVmwareengineVcenterCredentials(), // ####### END handwritten datasources ########### } @@ -372,6 +398,8 @@ var generatedIAMDatasources = map[string]*schema.Resource{ "google_iap_web_region_backend_service_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapWebRegionBackendServiceIamSchema, iap.IapWebRegionBackendServiceIamUpdaterProducer), "google_iap_web_type_app_engine_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapWebTypeAppEngineIamSchema, iap.IapWebTypeAppEngineIamUpdaterProducer), "google_iap_web_type_compute_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapWebTypeComputeIamSchema, iap.IapWebTypeComputeIamUpdaterProducer), + "google_kms_ekm_connection_iam_policy": tpgiamresource.DataSourceIamPolicy(kms.KMSEkmConnectionIamSchema, kms.KMSEkmConnectionIamUpdaterProducer), + "google_logging_log_view_iam_policy": tpgiamresource.DataSourceIamPolicy(logging.LoggingLogViewIamSchema, logging.LoggingLogViewIamUpdaterProducer), "google_network_security_address_group_iam_policy": tpgiamresource.DataSourceIamPolicy(networksecurity.NetworkSecurityProjectAddressGroupIamSchema, networksecurity.NetworkSecurityProjectAddressGroupIamUpdaterProducer), "google_notebooks_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(notebooks.NotebooksInstanceIamSchema, notebooks.NotebooksInstanceIamUpdaterProducer), "google_notebooks_runtime_iam_policy": tpgiamresource.DataSourceIamPolicy(notebooks.NotebooksRuntimeIamSchema, notebooks.NotebooksRuntimeIamUpdaterProducer), @@ -380,9 +408,11 @@ var generatedIAMDatasources = map[string]*schema.Resource{ "google_pubsub_schema_iam_policy": tpgiamresource.DataSourceIamPolicy(pubsub.PubsubSchemaIamSchema, pubsub.PubsubSchemaIamUpdaterProducer), "google_pubsub_topic_iam_policy": tpgiamresource.DataSourceIamPolicy(pubsub.PubsubTopicIamSchema, pubsub.PubsubTopicIamUpdaterProducer), "google_secret_manager_secret_iam_policy": tpgiamresource.DataSourceIamPolicy(secretmanager.SecretManagerSecretIamSchema, secretmanager.SecretManagerSecretIamUpdaterProducer), + "google_secret_manager_regional_secret_iam_policy": tpgiamresource.DataSourceIamPolicy(secretmanagerregional.SecretManagerRegionalRegionalSecretIamSchema, secretmanagerregional.SecretManagerRegionalRegionalSecretIamUpdaterProducer), "google_secure_source_manager_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(securesourcemanager.SecureSourceManagerInstanceIamSchema, securesourcemanager.SecureSourceManagerInstanceIamUpdaterProducer), "google_secure_source_manager_repository_iam_policy": tpgiamresource.DataSourceIamPolicy(securesourcemanager.SecureSourceManagerRepositoryIamSchema, securesourcemanager.SecureSourceManagerRepositoryIamUpdaterProducer), "google_scc_source_iam_policy": tpgiamresource.DataSourceIamPolicy(securitycenter.SecurityCenterSourceIamSchema, securitycenter.SecurityCenterSourceIamUpdaterProducer), + "google_scc_v2_organization_source_iam_policy": tpgiamresource.DataSourceIamPolicy(securitycenterv2.SecurityCenterV2OrganizationSourceIamSchema, securitycenterv2.SecurityCenterV2OrganizationSourceIamUpdaterProducer), "google_endpoints_service_iam_policy": tpgiamresource.DataSourceIamPolicy(servicemanagement.ServiceManagementServiceIamSchema, servicemanagement.ServiceManagementServiceIamUpdaterProducer), "google_endpoints_service_consumers_iam_policy": tpgiamresource.DataSourceIamPolicy(servicemanagement.ServiceManagementServiceConsumersIamSchema, servicemanagement.ServiceManagementServiceConsumersIamUpdaterProducer), "google_sourcerepo_repository_iam_policy": tpgiamresource.DataSourceIamPolicy(sourcerepo.SourceRepoRepositoryIamSchema, sourcerepo.SourceRepoRepositoryIamUpdaterProducer), @@ -419,9 +449,9 @@ var handwrittenIAMDatasources = map[string]*schema.Resource{ } // Resources -// Generated resources: 432 -// Generated IAM resources: 252 -// Total generated resources: 684 +// Generated resources: 482 +// Generated IAM resources: 261 +// Total generated resources: 743 var generatedResources = map[string]*schema.Resource{ "google_folder_access_approval_settings": accessapproval.ResourceAccessApprovalFolderSettings(), "google_organization_access_approval_settings": accessapproval.ResourceAccessApprovalOrganizationSettings(), @@ -438,6 +468,8 @@ var generatedResources = map[string]*schema.Resource{ "google_access_context_manager_gcp_user_access_binding": accesscontextmanager.ResourceAccessContextManagerGcpUserAccessBinding(), "google_access_context_manager_ingress_policy": accesscontextmanager.ResourceAccessContextManagerIngressPolicy(), "google_access_context_manager_service_perimeter": accesscontextmanager.ResourceAccessContextManagerServicePerimeter(), + "google_access_context_manager_service_perimeter_dry_run_egress_policy": accesscontextmanager.ResourceAccessContextManagerServicePerimeterDryRunEgressPolicy(), + "google_access_context_manager_service_perimeter_dry_run_ingress_policy": accesscontextmanager.ResourceAccessContextManagerServicePerimeterDryRunIngressPolicy(), "google_access_context_manager_service_perimeter_dry_run_resource": accesscontextmanager.ResourceAccessContextManagerServicePerimeterDryRunResource(), "google_access_context_manager_service_perimeter_egress_policy": accesscontextmanager.ResourceAccessContextManagerServicePerimeterEgressPolicy(), "google_access_context_manager_service_perimeter_ingress_policy": accesscontextmanager.ResourceAccessContextManagerServicePerimeterIngressPolicy(), @@ -450,6 +482,8 @@ var generatedResources = map[string]*schema.Resource{ "google_alloydb_instance": alloydb.ResourceAlloydbInstance(), "google_alloydb_user": alloydb.ResourceAlloydbUser(), "google_apigee_addons_config": apigee.ResourceApigeeAddonsConfig(), + "google_apigee_app_group": apigee.ResourceApigeeAppGroup(), + "google_apigee_developer": apigee.ResourceApigeeDeveloper(), "google_apigee_endpoint_attachment": apigee.ResourceApigeeEndpointAttachment(), "google_apigee_env_keystore": apigee.ResourceApigeeEnvKeystore(), "google_apigee_env_references": apigee.ResourceApigeeEnvReferences(), @@ -459,6 +493,8 @@ var generatedResources = map[string]*schema.Resource{ "google_apigee_environment_iam_binding": tpgiamresource.ResourceIamBinding(apigee.ApigeeEnvironmentIamSchema, apigee.ApigeeEnvironmentIamUpdaterProducer, apigee.ApigeeEnvironmentIdParseFunc), "google_apigee_environment_iam_member": tpgiamresource.ResourceIamMember(apigee.ApigeeEnvironmentIamSchema, apigee.ApigeeEnvironmentIamUpdaterProducer, apigee.ApigeeEnvironmentIdParseFunc), "google_apigee_environment_iam_policy": tpgiamresource.ResourceIamPolicy(apigee.ApigeeEnvironmentIamSchema, apigee.ApigeeEnvironmentIamUpdaterProducer, apigee.ApigeeEnvironmentIdParseFunc), + "google_apigee_environment_keyvaluemaps": apigee.ResourceApigeeEnvironmentKeyvaluemaps(), + "google_apigee_environment_keyvaluemaps_entries": apigee.ResourceApigeeEnvironmentKeyvaluemapsEntries(), "google_apigee_instance": apigee.ResourceApigeeInstance(), "google_apigee_instance_attachment": apigee.ResourceApigeeInstanceAttachment(), "google_apigee_keystores_aliases_self_signed_cert": apigee.ResourceApigeeKeystoresAliasesSelfSignedCert(), @@ -621,6 +657,8 @@ var generatedResources = map[string]*schema.Resource{ "google_compute_network_endpoint_group": compute.ResourceComputeNetworkEndpointGroup(), "google_compute_network_endpoints": compute.ResourceComputeNetworkEndpoints(), "google_compute_network_firewall_policy": compute.ResourceComputeNetworkFirewallPolicy(), + "google_compute_network_firewall_policy_association": compute.ResourceComputeNetworkFirewallPolicyAssociation(), + "google_compute_network_firewall_policy_rule": compute.ResourceComputeNetworkFirewallPolicyRule(), "google_compute_network_peering_routes_config": compute.ResourceComputeNetworkPeeringRoutesConfig(), "google_compute_node_group": compute.ResourceComputeNodeGroup(), "google_compute_node_template": compute.ResourceComputeNodeTemplate(), @@ -641,6 +679,8 @@ var generatedResources = map[string]*schema.Resource{ "google_compute_region_network_endpoint": compute.ResourceComputeRegionNetworkEndpoint(), "google_compute_region_network_endpoint_group": compute.ResourceComputeRegionNetworkEndpointGroup(), "google_compute_region_network_firewall_policy": compute.ResourceComputeRegionNetworkFirewallPolicy(), + "google_compute_region_network_firewall_policy_association": compute.ResourceComputeRegionNetworkFirewallPolicyAssociation(), + "google_compute_region_network_firewall_policy_rule": compute.ResourceComputeRegionNetworkFirewallPolicyRule(), "google_compute_region_per_instance_config": compute.ResourceComputeRegionPerInstanceConfig(), "google_compute_region_ssl_certificate": compute.ResourceComputeRegionSslCertificate(), "google_compute_region_ssl_policy": compute.ResourceComputeRegionSslPolicy(), @@ -649,10 +689,12 @@ var generatedResources = map[string]*schema.Resource{ "google_compute_region_target_tcp_proxy": compute.ResourceComputeRegionTargetTcpProxy(), "google_compute_region_url_map": compute.ResourceComputeRegionUrlMap(), "google_compute_reservation": compute.ResourceComputeReservation(), + "google_compute_resize_request": compute.ResourceComputeResizeRequest(), "google_compute_resource_policy": compute.ResourceComputeResourcePolicy(), "google_compute_route": compute.ResourceComputeRoute(), "google_compute_router": compute.ResourceComputeRouter(), "google_compute_router_nat": compute.ResourceComputeRouterNat(), + "google_compute_router_nat_address": compute.ResourceComputeRouterNatAddress(), "google_compute_security_policy_rule": compute.ResourceComputeSecurityPolicyRule(), "google_compute_service_attachment": compute.ResourceComputeServiceAttachment(), "google_compute_snapshot": compute.ResourceComputeSnapshot(), @@ -682,6 +724,7 @@ var generatedResources = map[string]*schema.Resource{ "google_container_attached_cluster": containerattached.ResourceContainerAttachedCluster(), "google_billing_project_info": corebilling.ResourceCoreBillingProjectInfo(), "google_database_migration_service_connection_profile": databasemigrationservice.ResourceDatabaseMigrationServiceConnectionProfile(), + "google_database_migration_service_migration_job": databasemigrationservice.ResourceDatabaseMigrationServiceMigrationJob(), "google_database_migration_service_private_connection": databasemigrationservice.ResourceDatabaseMigrationServicePrivateConnection(), "google_data_catalog_entry": datacatalog.ResourceDataCatalogEntry(), "google_data_catalog_entry_group": datacatalog.ResourceDataCatalogEntryGroup(), @@ -744,6 +787,7 @@ var generatedResources = map[string]*schema.Resource{ "google_dataproc_autoscaling_policy_iam_binding": tpgiamresource.ResourceIamBinding(dataproc.DataprocAutoscalingPolicyIamSchema, dataproc.DataprocAutoscalingPolicyIamUpdaterProducer, dataproc.DataprocAutoscalingPolicyIdParseFunc), "google_dataproc_autoscaling_policy_iam_member": tpgiamresource.ResourceIamMember(dataproc.DataprocAutoscalingPolicyIamSchema, dataproc.DataprocAutoscalingPolicyIamUpdaterProducer, dataproc.DataprocAutoscalingPolicyIdParseFunc), "google_dataproc_autoscaling_policy_iam_policy": tpgiamresource.ResourceIamPolicy(dataproc.DataprocAutoscalingPolicyIamSchema, dataproc.DataprocAutoscalingPolicyIamUpdaterProducer, dataproc.DataprocAutoscalingPolicyIdParseFunc), + "google_dataproc_batch": dataproc.ResourceDataprocBatch(), "google_dataproc_metastore_federation": dataprocmetastore.ResourceDataprocMetastoreFederation(), "google_dataproc_metastore_federation_iam_binding": tpgiamresource.ResourceIamBinding(dataprocmetastore.DataprocMetastoreFederationIamSchema, dataprocmetastore.DataprocMetastoreFederationIamUpdaterProducer, dataprocmetastore.DataprocMetastoreFederationIdParseFunc), "google_dataproc_metastore_federation_iam_member": tpgiamresource.ResourceIamMember(dataprocmetastore.DataprocMetastoreFederationIamSchema, dataprocmetastore.DataprocMetastoreFederationIamUpdaterProducer, dataprocmetastore.DataprocMetastoreFederationIdParseFunc), @@ -752,7 +796,6 @@ var generatedResources = map[string]*schema.Resource{ "google_dataproc_metastore_service_iam_binding": tpgiamresource.ResourceIamBinding(dataprocmetastore.DataprocMetastoreServiceIamSchema, dataprocmetastore.DataprocMetastoreServiceIamUpdaterProducer, dataprocmetastore.DataprocMetastoreServiceIdParseFunc), "google_dataproc_metastore_service_iam_member": tpgiamresource.ResourceIamMember(dataprocmetastore.DataprocMetastoreServiceIamSchema, dataprocmetastore.DataprocMetastoreServiceIamUpdaterProducer, dataprocmetastore.DataprocMetastoreServiceIdParseFunc), "google_dataproc_metastore_service_iam_policy": tpgiamresource.ResourceIamPolicy(dataprocmetastore.DataprocMetastoreServiceIamSchema, dataprocmetastore.DataprocMetastoreServiceIamUpdaterProducer, dataprocmetastore.DataprocMetastoreServiceIdParseFunc), - "google_datastore_index": datastore.ResourceDatastoreIndex(), "google_datastream_connection_profile": datastream.ResourceDatastreamConnectionProfile(), "google_datastream_private_connection": datastream.ResourceDatastreamPrivateConnection(), "google_datastream_stream": datastream.ResourceDatastreamStream(), @@ -773,7 +816,9 @@ var generatedResources = map[string]*schema.Resource{ "google_dialogflow_cx_webhook": dialogflowcx.ResourceDialogflowCXWebhook(), "google_discovery_engine_chat_engine": discoveryengine.ResourceDiscoveryEngineChatEngine(), "google_discovery_engine_data_store": discoveryengine.ResourceDiscoveryEngineDataStore(), + "google_discovery_engine_schema": discoveryengine.ResourceDiscoveryEngineSchema(), "google_discovery_engine_search_engine": discoveryengine.ResourceDiscoveryEngineSearchEngine(), + "google_discovery_engine_target_site": discoveryengine.ResourceDiscoveryEngineTargetSite(), "google_dns_managed_zone": dns.ResourceDNSManagedZone(), "google_dns_managed_zone_iam_binding": tpgiamresource.ResourceIamBinding(dns.DNSManagedZoneIamSchema, dns.DNSManagedZoneIamUpdaterProducer, dns.DNSManagedZoneIdParseFunc), "google_dns_managed_zone_iam_member": tpgiamresource.ResourceIamMember(dns.DNSManagedZoneIamSchema, dns.DNSManagedZoneIamUpdaterProducer, dns.DNSManagedZoneIdParseFunc), @@ -843,6 +888,8 @@ var generatedResources = map[string]*schema.Resource{ "google_healthcare_dicom_store": healthcare.ResourceHealthcareDicomStore(), "google_healthcare_fhir_store": healthcare.ResourceHealthcareFhirStore(), "google_healthcare_hl7_v2_store": healthcare.ResourceHealthcareHl7V2Store(), + "google_healthcare_pipeline_job": healthcare.ResourceHealthcarePipelineJob(), + "google_healthcare_workspace": healthcare.ResourceHealthcareWorkspace(), "google_iam_access_boundary_policy": iam2.ResourceIAM2AccessBoundaryPolicy(), "google_iam_deny_policy": iam2.ResourceIAM2DenyPolicy(), "google_iam_workload_identity_pool": iambeta.ResourceIAMBetaWorkloadIdentityPool(), @@ -857,6 +904,7 @@ var generatedResources = map[string]*schema.Resource{ "google_iap_app_engine_version_iam_policy": tpgiamresource.ResourceIamPolicy(iap.IapAppEngineVersionIamSchema, iap.IapAppEngineVersionIamUpdaterProducer, iap.IapAppEngineVersionIdParseFunc), "google_iap_brand": iap.ResourceIapBrand(), "google_iap_client": iap.ResourceIapClient(), + "google_iap_settings": iap.ResourceIapSettings(), "google_iap_tunnel_iam_binding": tpgiamresource.ResourceIamBinding(iap.IapTunnelIamSchema, iap.IapTunnelIamUpdaterProducer, iap.IapTunnelIdParseFunc), "google_iap_tunnel_iam_member": tpgiamresource.ResourceIamMember(iap.IapTunnelIamSchema, iap.IapTunnelIamUpdaterProducer, iap.IapTunnelIdParseFunc), "google_iap_tunnel_iam_policy": tpgiamresource.ResourceIamPolicy(iap.IapTunnelIamSchema, iap.IapTunnelIamUpdaterProducer, iap.IapTunnelIdParseFunc), @@ -886,7 +934,6 @@ var generatedResources = map[string]*schema.Resource{ "google_identity_platform_default_supported_idp_config": identityplatform.ResourceIdentityPlatformDefaultSupportedIdpConfig(), "google_identity_platform_inbound_saml_config": identityplatform.ResourceIdentityPlatformInboundSamlConfig(), "google_identity_platform_oauth_idp_config": identityplatform.ResourceIdentityPlatformOauthIdpConfig(), - "google_identity_platform_project_default_config": identityplatform.ResourceIdentityPlatformProjectDefaultConfig(), "google_identity_platform_tenant": identityplatform.ResourceIdentityPlatformTenant(), "google_identity_platform_tenant_default_supported_idp_config": identityplatform.ResourceIdentityPlatformTenantDefaultSupportedIdpConfig(), "google_identity_platform_tenant_inbound_saml_config": identityplatform.ResourceIdentityPlatformTenantInboundSamlConfig(), @@ -899,12 +946,19 @@ var generatedResources = map[string]*schema.Resource{ "google_kms_crypto_key": kms.ResourceKMSCryptoKey(), "google_kms_crypto_key_version": kms.ResourceKMSCryptoKeyVersion(), "google_kms_ekm_connection": kms.ResourceKMSEkmConnection(), + "google_kms_ekm_connection_iam_binding": tpgiamresource.ResourceIamBinding(kms.KMSEkmConnectionIamSchema, kms.KMSEkmConnectionIamUpdaterProducer, kms.KMSEkmConnectionIdParseFunc), + "google_kms_ekm_connection_iam_member": tpgiamresource.ResourceIamMember(kms.KMSEkmConnectionIamSchema, kms.KMSEkmConnectionIamUpdaterProducer, kms.KMSEkmConnectionIdParseFunc), + "google_kms_ekm_connection_iam_policy": tpgiamresource.ResourceIamPolicy(kms.KMSEkmConnectionIamSchema, kms.KMSEkmConnectionIamUpdaterProducer, kms.KMSEkmConnectionIdParseFunc), "google_kms_key_ring": kms.ResourceKMSKeyRing(), "google_kms_key_ring_import_job": kms.ResourceKMSKeyRingImportJob(), "google_kms_secret_ciphertext": kms.ResourceKMSSecretCiphertext(), "google_logging_folder_settings": logging.ResourceLoggingFolderSettings(), "google_logging_linked_dataset": logging.ResourceLoggingLinkedDataset(), + "google_logging_log_scope": logging.ResourceLoggingLogScope(), "google_logging_log_view": logging.ResourceLoggingLogView(), + "google_logging_log_view_iam_binding": tpgiamresource.ResourceIamBinding(logging.LoggingLogViewIamSchema, logging.LoggingLogViewIamUpdaterProducer, logging.LoggingLogViewIdParseFunc), + "google_logging_log_view_iam_member": tpgiamresource.ResourceIamMember(logging.LoggingLogViewIamSchema, logging.LoggingLogViewIamUpdaterProducer, logging.LoggingLogViewIdParseFunc), + "google_logging_log_view_iam_policy": tpgiamresource.ResourceIamPolicy(logging.LoggingLogViewIamSchema, logging.LoggingLogViewIamUpdaterProducer, logging.LoggingLogViewIdParseFunc), "google_logging_metric": logging.ResourceLoggingMetric(), "google_logging_organization_settings": logging.ResourceLoggingOrganizationSettings(), "google_looker_instance": looker.ResourceLookerInstance(), @@ -921,21 +975,25 @@ var generatedResources = map[string]*schema.Resource{ "google_monitoring_custom_service": monitoring.ResourceMonitoringService(), "google_monitoring_slo": monitoring.ResourceMonitoringSlo(), "google_monitoring_uptime_check_config": monitoring.ResourceMonitoringUptimeCheckConfig(), + "google_netapp_active_directory": netapp.ResourceNetappActiveDirectory(), + "google_netapp_backup": netapp.ResourceNetappBackup(), + "google_netapp_backup_policy": netapp.ResourceNetappBackupPolicy(), + "google_netapp_backup_vault": netapp.ResourceNetappBackupVault(), + "google_netapp_storage_pool": netapp.ResourceNetappStoragePool(), "google_netapp_volume": netapp.ResourceNetappVolume(), "google_netapp_volume_replication": netapp.ResourceNetappVolumeReplication(), "google_netapp_volume_snapshot": netapp.ResourceNetappVolumeSnapshot(), - "google_netapp_active_directory": netapp.ResourceNetappactiveDirectory(), - "google_netapp_backup": netapp.ResourceNetappbackup(), - "google_netapp_backup_policy": netapp.ResourceNetappbackupPolicy(), - "google_netapp_backup_vault": netapp.ResourceNetappbackupVault(), "google_netapp_kmsconfig": netapp.ResourceNetappkmsconfig(), - "google_netapp_storage_pool": netapp.ResourceNetappstoragePool(), + "google_network_connectivity_group": networkconnectivity.ResourceNetworkConnectivityGroup(), + "google_network_connectivity_hub": networkconnectivity.ResourceNetworkConnectivityHub(), "google_network_connectivity_internal_range": networkconnectivity.ResourceNetworkConnectivityInternalRange(), "google_network_connectivity_policy_based_route": networkconnectivity.ResourceNetworkConnectivityPolicyBasedRoute(), "google_network_connectivity_regional_endpoint": networkconnectivity.ResourceNetworkConnectivityRegionalEndpoint(), "google_network_connectivity_service_connection_policy": networkconnectivity.ResourceNetworkConnectivityServiceConnectionPolicy(), + "google_network_connectivity_spoke": networkconnectivity.ResourceNetworkConnectivitySpoke(), "google_network_management_connectivity_test": networkmanagement.ResourceNetworkManagementConnectivityTest(), "google_network_security_address_group": networksecurity.ResourceNetworkSecurityAddressGroup(), + "google_network_security_client_tls_policy": networksecurity.ResourceNetworkSecurityClientTlsPolicy(), "google_network_security_firewall_endpoint": networksecurity.ResourceNetworkSecurityFirewallEndpoint(), "google_network_security_firewall_endpoint_association": networksecurity.ResourceNetworkSecurityFirewallEndpointAssociation(), "google_network_security_gateway_security_policy": networksecurity.ResourceNetworkSecurityGatewaySecurityPolicy(), @@ -945,6 +1003,7 @@ var generatedResources = map[string]*schema.Resource{ "google_network_security_address_group_iam_policy": tpgiamresource.ResourceIamPolicy(networksecurity.NetworkSecurityProjectAddressGroupIamSchema, networksecurity.NetworkSecurityProjectAddressGroupIamUpdaterProducer, networksecurity.NetworkSecurityProjectAddressGroupIdParseFunc), "google_network_security_security_profile": networksecurity.ResourceNetworkSecuritySecurityProfile(), "google_network_security_security_profile_group": networksecurity.ResourceNetworkSecuritySecurityProfileGroup(), + "google_network_security_server_tls_policy": networksecurity.ResourceNetworkSecurityServerTlsPolicy(), "google_network_security_tls_inspection_policy": networksecurity.ResourceNetworkSecurityTlsInspectionPolicy(), "google_network_security_url_lists": networksecurity.ResourceNetworkSecurityUrlLists(), "google_network_services_edge_cache_keyset": networkservices.ResourceNetworkServicesEdgeCacheKeyset(), @@ -963,6 +1022,9 @@ var generatedResources = map[string]*schema.Resource{ "google_notebooks_runtime_iam_binding": tpgiamresource.ResourceIamBinding(notebooks.NotebooksRuntimeIamSchema, notebooks.NotebooksRuntimeIamUpdaterProducer, notebooks.NotebooksRuntimeIdParseFunc), "google_notebooks_runtime_iam_member": tpgiamresource.ResourceIamMember(notebooks.NotebooksRuntimeIamSchema, notebooks.NotebooksRuntimeIamUpdaterProducer, notebooks.NotebooksRuntimeIdParseFunc), "google_notebooks_runtime_iam_policy": tpgiamresource.ResourceIamPolicy(notebooks.NotebooksRuntimeIamSchema, notebooks.NotebooksRuntimeIamUpdaterProducer, notebooks.NotebooksRuntimeIdParseFunc), + "google_oracle_database_autonomous_database": oracledatabase.ResourceOracleDatabaseAutonomousDatabase(), + "google_oracle_database_cloud_exadata_infrastructure": oracledatabase.ResourceOracleDatabaseCloudExadataInfrastructure(), + "google_oracle_database_cloud_vm_cluster": oracledatabase.ResourceOracleDatabaseCloudVmCluster(), "google_org_policy_custom_constraint": orgpolicy.ResourceOrgPolicyCustomConstraint(), "google_org_policy_policy": orgpolicy.ResourceOrgPolicyPolicy(), "google_os_config_patch_deployment": osconfig.ResourceOSConfigPatchDeployment(), @@ -977,6 +1039,7 @@ var generatedResources = map[string]*schema.Resource{ "google_privateca_certificate_template_iam_binding": tpgiamresource.ResourceIamBinding(privateca.PrivatecaCertificateTemplateIamSchema, privateca.PrivatecaCertificateTemplateIamUpdaterProducer, privateca.PrivatecaCertificateTemplateIdParseFunc), "google_privateca_certificate_template_iam_member": tpgiamresource.ResourceIamMember(privateca.PrivatecaCertificateTemplateIamSchema, privateca.PrivatecaCertificateTemplateIamUpdaterProducer, privateca.PrivatecaCertificateTemplateIdParseFunc), "google_privateca_certificate_template_iam_policy": tpgiamresource.ResourceIamPolicy(privateca.PrivatecaCertificateTemplateIamSchema, privateca.PrivatecaCertificateTemplateIamUpdaterProducer, privateca.PrivatecaCertificateTemplateIdParseFunc), + "google_privileged_access_manager_entitlement": privilegedaccessmanager.ResourcePrivilegedAccessManagerEntitlement(), "google_public_ca_external_account_key": publicca.ResourcePublicCAExternalAccountKey(), "google_pubsub_schema": pubsub.ResourcePubsubSchema(), "google_pubsub_schema_iam_binding": tpgiamresource.ResourceIamBinding(pubsub.PubsubSchemaIamSchema, pubsub.PubsubSchemaIamUpdaterProducer, pubsub.PubsubSchemaIdParseFunc), @@ -998,6 +1061,12 @@ var generatedResources = map[string]*schema.Resource{ "google_secret_manager_secret_iam_member": tpgiamresource.ResourceIamMember(secretmanager.SecretManagerSecretIamSchema, secretmanager.SecretManagerSecretIamUpdaterProducer, secretmanager.SecretManagerSecretIdParseFunc), "google_secret_manager_secret_iam_policy": tpgiamresource.ResourceIamPolicy(secretmanager.SecretManagerSecretIamSchema, secretmanager.SecretManagerSecretIamUpdaterProducer, secretmanager.SecretManagerSecretIdParseFunc), "google_secret_manager_secret_version": secretmanager.ResourceSecretManagerSecretVersion(), + "google_secret_manager_regional_secret": secretmanagerregional.ResourceSecretManagerRegionalRegionalSecret(), + "google_secret_manager_regional_secret_iam_binding": tpgiamresource.ResourceIamBinding(secretmanagerregional.SecretManagerRegionalRegionalSecretIamSchema, secretmanagerregional.SecretManagerRegionalRegionalSecretIamUpdaterProducer, secretmanagerregional.SecretManagerRegionalRegionalSecretIdParseFunc), + "google_secret_manager_regional_secret_iam_member": tpgiamresource.ResourceIamMember(secretmanagerregional.SecretManagerRegionalRegionalSecretIamSchema, secretmanagerregional.SecretManagerRegionalRegionalSecretIamUpdaterProducer, secretmanagerregional.SecretManagerRegionalRegionalSecretIdParseFunc), + "google_secret_manager_regional_secret_iam_policy": tpgiamresource.ResourceIamPolicy(secretmanagerregional.SecretManagerRegionalRegionalSecretIamSchema, secretmanagerregional.SecretManagerRegionalRegionalSecretIamUpdaterProducer, secretmanagerregional.SecretManagerRegionalRegionalSecretIdParseFunc), + "google_secret_manager_regional_secret_version": secretmanagerregional.ResourceSecretManagerRegionalRegionalSecretVersion(), + "google_secure_source_manager_branch_rule": securesourcemanager.ResourceSecureSourceManagerBranchRule(), "google_secure_source_manager_instance": securesourcemanager.ResourceSecureSourceManagerInstance(), "google_secure_source_manager_instance_iam_binding": tpgiamresource.ResourceIamBinding(securesourcemanager.SecureSourceManagerInstanceIamSchema, securesourcemanager.SecureSourceManagerInstanceIamUpdaterProducer, securesourcemanager.SecureSourceManagerInstanceIdParseFunc), "google_secure_source_manager_instance_iam_member": tpgiamresource.ResourceIamMember(securesourcemanager.SecureSourceManagerInstanceIamSchema, securesourcemanager.SecureSourceManagerInstanceIamUpdaterProducer, securesourcemanager.SecureSourceManagerInstanceIdParseFunc), @@ -1008,10 +1077,15 @@ var generatedResources = map[string]*schema.Resource{ "google_secure_source_manager_repository_iam_policy": tpgiamresource.ResourceIamPolicy(securesourcemanager.SecureSourceManagerRepositoryIamSchema, securesourcemanager.SecureSourceManagerRepositoryIamUpdaterProducer, securesourcemanager.SecureSourceManagerRepositoryIdParseFunc), "google_scc_event_threat_detection_custom_module": securitycenter.ResourceSecurityCenterEventThreatDetectionCustomModule(), "google_scc_folder_custom_module": securitycenter.ResourceSecurityCenterFolderCustomModule(), + "google_scc_folder_notification_config": securitycenter.ResourceSecurityCenterFolderNotificationConfig(), + "google_scc_folder_scc_big_query_export": securitycenter.ResourceSecurityCenterFolderSccBigQueryExport(), "google_scc_mute_config": securitycenter.ResourceSecurityCenterMuteConfig(), "google_scc_notification_config": securitycenter.ResourceSecurityCenterNotificationConfig(), "google_scc_organization_custom_module": securitycenter.ResourceSecurityCenterOrganizationCustomModule(), + "google_scc_organization_scc_big_query_export": securitycenter.ResourceSecurityCenterOrganizationSccBigQueryExport(), "google_scc_project_custom_module": securitycenter.ResourceSecurityCenterProjectCustomModule(), + "google_scc_project_notification_config": securitycenter.ResourceSecurityCenterProjectNotificationConfig(), + "google_scc_project_scc_big_query_export": securitycenter.ResourceSecurityCenterProjectSccBigQueryExport(), "google_scc_source": securitycenter.ResourceSecurityCenterSource(), "google_scc_source_iam_binding": tpgiamresource.ResourceIamBinding(securitycenter.SecurityCenterSourceIamSchema, securitycenter.SecurityCenterSourceIamUpdaterProducer, securitycenter.SecurityCenterSourceIdParseFunc), "google_scc_source_iam_member": tpgiamresource.ResourceIamMember(securitycenter.SecurityCenterSourceIamSchema, securitycenter.SecurityCenterSourceIamUpdaterProducer, securitycenter.SecurityCenterSourceIdParseFunc), @@ -1020,7 +1094,20 @@ var generatedResources = map[string]*schema.Resource{ "google_scc_management_organization_event_threat_detection_custom_module": securitycentermanagement.ResourceSecurityCenterManagementOrganizationEventThreatDetectionCustomModule(), "google_scc_management_organization_security_health_analytics_custom_module": securitycentermanagement.ResourceSecurityCenterManagementOrganizationSecurityHealthAnalyticsCustomModule(), "google_scc_management_project_security_health_analytics_custom_module": securitycentermanagement.ResourceSecurityCenterManagementProjectSecurityHealthAnalyticsCustomModule(), + "google_scc_v2_folder_mute_config": securitycenterv2.ResourceSecurityCenterV2FolderMuteConfig(), + "google_scc_v2_folder_notification_config": securitycenterv2.ResourceSecurityCenterV2FolderNotificationConfig(), + "google_scc_v2_folder_scc_big_query_export": securitycenterv2.ResourceSecurityCenterV2FolderSccBigQueryExport(), + "google_scc_v2_organization_mute_config": securitycenterv2.ResourceSecurityCenterV2OrganizationMuteConfig(), "google_scc_v2_organization_notification_config": securitycenterv2.ResourceSecurityCenterV2OrganizationNotificationConfig(), + "google_scc_v2_organization_scc_big_query_export": securitycenterv2.ResourceSecurityCenterV2OrganizationSccBigQueryExport(), + "google_scc_v2_organization_scc_big_query_exports": securitycenterv2.ResourceSecurityCenterV2OrganizationSccBigQueryExports(), + "google_scc_v2_organization_source": securitycenterv2.ResourceSecurityCenterV2OrganizationSource(), + "google_scc_v2_organization_source_iam_binding": tpgiamresource.ResourceIamBinding(securitycenterv2.SecurityCenterV2OrganizationSourceIamSchema, securitycenterv2.SecurityCenterV2OrganizationSourceIamUpdaterProducer, securitycenterv2.SecurityCenterV2OrganizationSourceIdParseFunc), + "google_scc_v2_organization_source_iam_member": tpgiamresource.ResourceIamMember(securitycenterv2.SecurityCenterV2OrganizationSourceIamSchema, securitycenterv2.SecurityCenterV2OrganizationSourceIamUpdaterProducer, securitycenterv2.SecurityCenterV2OrganizationSourceIdParseFunc), + "google_scc_v2_organization_source_iam_policy": tpgiamresource.ResourceIamPolicy(securitycenterv2.SecurityCenterV2OrganizationSourceIamSchema, securitycenterv2.SecurityCenterV2OrganizationSourceIamUpdaterProducer, securitycenterv2.SecurityCenterV2OrganizationSourceIdParseFunc), + "google_scc_v2_project_mute_config": securitycenterv2.ResourceSecurityCenterV2ProjectMuteConfig(), + "google_scc_v2_project_notification_config": securitycenterv2.ResourceSecurityCenterV2ProjectNotificationConfig(), + "google_scc_v2_project_scc_big_query_export": securitycenterv2.ResourceSecurityCenterV2ProjectSccBigQueryExport(), "google_securityposture_posture": securityposture.ResourceSecurityposturePosture(), "google_securityposture_posture_deployment": securityposture.ResourceSecurityposturePostureDeployment(), "google_endpoints_service_iam_binding": tpgiamresource.ResourceIamBinding(servicemanagement.ServiceManagementServiceIamSchema, servicemanagement.ServiceManagementServiceIamUpdaterProducer, servicemanagement.ServiceManagementServiceIdParseFunc), @@ -1030,10 +1117,12 @@ var generatedResources = map[string]*schema.Resource{ "google_endpoints_service_consumers_iam_member": tpgiamresource.ResourceIamMember(servicemanagement.ServiceManagementServiceConsumersIamSchema, servicemanagement.ServiceManagementServiceConsumersIamUpdaterProducer, servicemanagement.ServiceManagementServiceConsumersIdParseFunc), "google_endpoints_service_consumers_iam_policy": tpgiamresource.ResourceIamPolicy(servicemanagement.ServiceManagementServiceConsumersIamSchema, servicemanagement.ServiceManagementServiceConsumersIamUpdaterProducer, servicemanagement.ServiceManagementServiceConsumersIdParseFunc), "google_service_networking_vpc_service_controls": servicenetworking.ResourceServiceNetworkingVPCServiceControls(), + "google_site_verification_web_resource": siteverification.ResourceSiteVerificationWebResource(), "google_sourcerepo_repository": sourcerepo.ResourceSourceRepoRepository(), "google_sourcerepo_repository_iam_binding": tpgiamresource.ResourceIamBinding(sourcerepo.SourceRepoRepositoryIamSchema, sourcerepo.SourceRepoRepositoryIamUpdaterProducer, sourcerepo.SourceRepoRepositoryIdParseFunc), "google_sourcerepo_repository_iam_member": tpgiamresource.ResourceIamMember(sourcerepo.SourceRepoRepositoryIamSchema, sourcerepo.SourceRepoRepositoryIamUpdaterProducer, sourcerepo.SourceRepoRepositoryIdParseFunc), "google_sourcerepo_repository_iam_policy": tpgiamresource.ResourceIamPolicy(sourcerepo.SourceRepoRepositoryIamSchema, sourcerepo.SourceRepoRepositoryIamUpdaterProducer, sourcerepo.SourceRepoRepositoryIdParseFunc), + "google_spanner_backup_schedule": spanner.ResourceSpannerBackupSchedule(), "google_spanner_database": spanner.ResourceSpannerDatabase(), "google_spanner_instance": spanner.ResourceSpannerInstance(), "google_spanner_instance_config": spanner.ResourceSpannerInstanceConfig(), @@ -1059,6 +1148,8 @@ var generatedResources = map[string]*schema.Resource{ "google_tags_tag_value_iam_member": tpgiamresource.ResourceIamMember(tags.TagsTagValueIamSchema, tags.TagsTagValueIamUpdaterProducer, tags.TagsTagValueIdParseFunc), "google_tags_tag_value_iam_policy": tpgiamresource.ResourceIamPolicy(tags.TagsTagValueIamSchema, tags.TagsTagValueIamUpdaterProducer, tags.TagsTagValueIdParseFunc), "google_tpu_node": tpu.ResourceTPUNode(), + "google_transcoder_job": transcoder.ResourceTranscoderJob(), + "google_transcoder_job_template": transcoder.ResourceTranscoderJobTemplate(), "google_vertex_ai_dataset": vertexai.ResourceVertexAIDataset(), "google_vertex_ai_deployment_resource_pool": vertexai.ResourceVertexAIDeploymentResourcePool(), "google_vertex_ai_endpoint": vertexai.ResourceVertexAIEndpoint(), @@ -1071,6 +1162,7 @@ var generatedResources = map[string]*schema.Resource{ "google_vertex_ai_featurestore_entitytype_feature": vertexai.ResourceVertexAIFeaturestoreEntitytypeFeature(), "google_vertex_ai_index": vertexai.ResourceVertexAIIndex(), "google_vertex_ai_index_endpoint": vertexai.ResourceVertexAIIndexEndpoint(), + "google_vertex_ai_index_endpoint_deployed_index": vertexai.ResourceVertexAIIndexEndpointDeployedIndex(), "google_vertex_ai_tensorboard": vertexai.ResourceVertexAITensorboard(), "google_vmwareengine_cluster": vmwareengine.ResourceVmwareengineCluster(), "google_vmwareengine_external_access_rule": vmwareengine.ResourceVmwareengineExternalAccessRule(), @@ -1163,6 +1255,7 @@ var handwrittenResources = map[string]*schema.Resource{ "google_service_account": resourcemanager.ResourceGoogleServiceAccount(), "google_service_account_key": resourcemanager.ResourceGoogleServiceAccountKey(), "google_service_networking_peered_dns_domain": servicenetworking.ResourceGoogleServiceNetworkingPeeredDNSDomain(), + "google_site_verification_owner": siteverification.ResourceSiteVerificationOwner(), "google_storage_bucket": storage.ResourceStorageBucket(), "google_storage_bucket_acl": storage.ResourceStorageBucketAcl(), "google_storage_bucket_object": storage.ResourceStorageBucketObject(), @@ -1183,7 +1276,7 @@ var handwrittenIAMResources = map[string]*schema.Resource{ "google_bigtable_table_iam_member": tpgiamresource.ResourceIamMember(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater, bigtable.BigtableTableIdParseFunc), "google_bigtable_table_iam_policy": tpgiamresource.ResourceIamPolicy(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater, bigtable.BigtableTableIdParseFunc), "google_bigquery_dataset_iam_binding": tpgiamresource.ResourceIamBinding(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater, bigquery.BigqueryDatasetIdParseFunc), - "google_bigquery_dataset_iam_member": tpgiamresource.ResourceIamMember(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater, bigquery.BigqueryDatasetIdParseFunc), + "google_bigquery_dataset_iam_member": tpgiamresource.ResourceIamMember(bigquery.IamMemberBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamMemberUpdater, bigquery.BigqueryDatasetIdParseFunc), "google_bigquery_dataset_iam_policy": tpgiamresource.ResourceIamPolicy(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater, bigquery.BigqueryDatasetIdParseFunc), "google_billing_account_iam_binding": tpgiamresource.ResourceIamBinding(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater, billing.BillingAccountIdParseFunc), "google_billing_account_iam_member": tpgiamresource.ResourceIamMember(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater, billing.BillingAccountIdParseFunc), diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_policy.go index 3616d09afe4..86da43776a4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_policy.go @@ -54,7 +54,7 @@ func ResourceAccessContextManagerAccessPolicy() *schema.Resource { Required: true, ForceNew: true, Description: `The parent of this AccessPolicy in the Cloud Resource Hierarchy. -Format: organizations/{organization_id}`, +Format: 'organizations/{{organization_id}}'`, }, "title": { Type: schema.TypeString, @@ -65,7 +65,7 @@ Format: organizations/{organization_id}`, Type: schema.TypeList, Optional: true, Description: `Folder or project on which this policy is applicable. -Format: folders/{{folder_id}} or projects/{{project_id}}`, +Format: 'folders/{{folder_id}}' or 'projects/{{project_number}}'`, MaxItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, @@ -79,7 +79,7 @@ Format: folders/{{folder_id}} or projects/{{project_id}}`, "name": { Type: schema.TypeString, Computed: true, - Description: `Resource name of the AccessPolicy. Format: {policy_id}`, + Description: `Resource name of the AccessPolicy. Format: '{{policy_id}}'`, }, "update_time": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter.go index 9096e98427c..9068ce3a10b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter.go @@ -689,7 +689,10 @@ If * is specified, then all IngressSources will be allowed.`, Optional: true, Description: `A Google Cloud resource that is allowed to ingress the perimeter. Requests from these resources will be allowed to access perimeter data. -Currently only projects are allowed. Format 'projects/{project_number}' +Currently only projects and VPCs are allowed. +Project format: 'projects/{projectNumber}' +VPC network format: +'//compute.googleapis.com/projects/{PROJECT_ID}/global/networks/{NAME}'. The project may be in any Google Cloud organization, not just the organization that the perimeter is defined in. '*' is not allowed, the case of allowing all Google Cloud resources only is not supported.`, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_dry_run_egress_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_dry_run_egress_policy.go new file mode 100644 index 00000000000..884455c143f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_dry_run_egress_policy.go @@ -0,0 +1,984 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + "log" + "net/http" + "reflect" + "slices" + "sort" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func AccessContextManagerServicePerimeterDryRunEgressPolicyEgressToResourcesDiffSupressFunc(_, _, _ string, d *schema.ResourceData) bool { + old, new := d.GetChange("egress_to.0.resources") + + oldResources, err := tpgresource.InterfaceSliceToStringSlice(old) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + newResources, err := tpgresource.InterfaceSliceToStringSlice(new) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + sort.Strings(oldResources) + sort.Strings(newResources) + + return slices.Equal(oldResources, newResources) +} + +func AccessContextManagerServicePerimeterDryRunEgressPolicyIngressToResourcesDiffSupressFunc(_, _, _ string, d *schema.ResourceData) bool { + old, new := d.GetChange("ingress_to.0.resources") + + oldResources, err := tpgresource.InterfaceSliceToStringSlice(old) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + newResources, err := tpgresource.InterfaceSliceToStringSlice(new) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + sort.Strings(oldResources) + sort.Strings(newResources) + + return slices.Equal(oldResources, newResources) +} + +func ResourceAccessContextManagerServicePerimeterDryRunEgressPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessContextManagerServicePerimeterDryRunEgressPolicyCreate, + Read: resourceAccessContextManagerServicePerimeterDryRunEgressPolicyRead, + Delete: resourceAccessContextManagerServicePerimeterDryRunEgressPolicyDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "perimeter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Service Perimeter to add this resource to.`, + }, + "egress_from": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Defines conditions on the source of a request causing this 'EgressPolicy' to apply.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identities": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of identities that are allowed access through this 'EgressPolicy'. +Should be in the format of email address. The email address should +represent individual user or service account only.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "identity_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access to outside the +perimeter. If left unspecified, then members of 'identities' field will +be allowed access. Possible values: ["ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, + }, + "source_restriction": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"SOURCE_RESTRICTION_ENABLED", "SOURCE_RESTRICTION_DISABLED", ""}), + Description: `Whether to enforce traffic restrictions based on 'sources' field. If the 'sources' field is non-empty, then this field must be set to 'SOURCE_RESTRICTION_ENABLED'. Possible values: ["SOURCE_RESTRICTION_ENABLED", "SOURCE_RESTRICTION_DISABLED"]`, + }, + "sources": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Sources that this EgressPolicy authorizes access from.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_level": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.`, + }, + }, + }, + }, + }, + }, + }, + "egress_to": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Defines the conditions on the 'ApiOperation' and destination resources that +cause this 'EgressPolicy' to apply.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "external_resources": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of external resources that are allowed to be accessed. A request +matches if it contains an external resource in this list (Example: +s3://bucket/path). Currently '*' is not allowed.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "operations": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches +if it contains an operation/service in this list.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "method_selectors": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `API methods or permissions to allow. Method or permission must belong +to the service specified by 'serviceName' field. A single MethodSelector +entry with '*' specified for the 'method' field will allow all methods +AND permissions for the service specified in 'serviceName'.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "method": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Value for 'method' should be a valid method name for the corresponding +'serviceName' in 'ApiOperation'. If '*' used as value for method, +then ALL methods and permissions are allowed.`, + }, + "permission": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Value for permission should be a valid Cloud IAM permission for the +corresponding 'serviceName' in 'ApiOperation'.`, + }, + }, + }, + }, + "service_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName +field set to '*' will allow all methods AND permissions for all services.`, + }, + }, + }, + }, + "resources": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + DiffSuppressFunc: AccessContextManagerServicePerimeterDryRunEgressPolicyEgressToResourcesDiffSupressFunc, + Description: `A list of resources, currently only projects in the form +'projects/', that match this to stanza. A request matches +if it contains a resource in this list. If * is specified for resources, +then this 'EgressTo' rule will authorize access to all resources outside +the perimeter.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAccessContextManagerServicePerimeterDryRunEgressPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + egressFromProp, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFrom(d.Get("egress_from"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("egress_from"); !tpgresource.IsEmptyValue(reflect.ValueOf(egressFromProp)) && (ok || !reflect.DeepEqual(v, egressFromProp)) { + obj["egressFrom"] = egressFromProp + } + egressToProp, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressTo(d.Get("egress_to"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("egress_to"); !tpgresource.IsEmptyValue(reflect.ValueOf(egressToProp)) && (ok || !reflect.DeepEqual(v, egressToProp)) { + obj["egressTo"] = egressToProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ServicePerimeterDryRunEgressPolicy: %#v", obj) + + obj, err = resourceAccessContextManagerServicePerimeterDryRunEgressPolicyPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "spec.egressPolicies"}) + if err != nil { + return err + } + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + obj["use_explicit_dry_run_spec"] = true + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating ServicePerimeterDryRunEgressPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = AccessContextManagerOperationWaitTimeWithResponse( + config, res, &opRes, "Creating ServicePerimeterDryRunEgressPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create ServicePerimeterDryRunEgressPolicy: %s", err) + } + + if _, ok := opRes["spec"]; ok { + opRes, err = flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicy(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error getting nested object from operation response: %s", err) + } + if opRes == nil { + // Object isn't there any more - remove it from the state. + return fmt.Errorf("Error decoding response from operation, could not find nested object") + } + } + if err := d.Set("egress_from", flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFrom(opRes["egressFrom"], d, config)); err != nil { + return err + } + if err := d.Set("egress_to", flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressTo(opRes["egressTo"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ServicePerimeterDryRunEgressPolicy %q: %#v", d.Id(), res) + + return resourceAccessContextManagerServicePerimeterDryRunEgressPolicyRead(d, meta) +} + +func resourceAccessContextManagerServicePerimeterDryRunEgressPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerServicePerimeterDryRunEgressPolicy %q", d.Id())) + } + + res, err = flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicy(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing AccessContextManagerServicePerimeterDryRunEgressPolicy because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("egress_from", flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFrom(res["egressFrom"], d, config)); err != nil { + return fmt.Errorf("Error reading ServicePerimeterDryRunEgressPolicy: %s", err) + } + if err := d.Set("egress_to", flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressTo(res["egressTo"], d, config)); err != nil { + return fmt.Errorf("Error reading ServicePerimeterDryRunEgressPolicy: %s", err) + } + + return nil +} + +func resourceAccessContextManagerServicePerimeterDryRunEgressPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceAccessContextManagerServicePerimeterDryRunEgressPolicyPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServicePerimeterDryRunEgressPolicy") + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "spec.egressPolicies"}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + obj["use_explicit_dry_run_spec"] = true + + log.Printf("[DEBUG] Deleting ServicePerimeterDryRunEgressPolicy %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServicePerimeterDryRunEgressPolicy") + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Deleting ServicePerimeterDryRunEgressPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ServicePerimeterDryRunEgressPolicy %q: %#v", d.Id(), res) + return nil +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromIdentities(original["identities"], d, config) + transformed["sources"] = + flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSources(original["sources"], d, config) + transformed["source_restriction"] = + flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSourceRestriction(original["sourceRestriction"], d, config) + return []interface{}{transformed} +} +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "access_level": flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSourcesAccessLevel(original["accessLevel"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSourceRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToResources(original["resources"], d, config) + transformed["external_resources"] = + flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToExternalResources(original["externalResources"], d, config) + transformed["operations"] = + flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + rawConfigValue := d.Get("egress_to.0.resources") + + // Convert config value to []string + configValue, err := tpgresource.InterfaceSliceToStringSlice(rawConfigValue) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return v + } + + // Convert v to []string + apiStringValue, err := tpgresource.InterfaceSliceToStringSlice(v) + if err != nil { + log.Printf("[ERROR] Failed to convert API value: %s", err) + return v + } + + sortedStrings, err := tpgresource.SortStringsByConfigOrder(configValue, apiStringValue) + if err != nil { + log.Printf("[ERROR] Could not sort API response value: %s", err) + return v + } + + return sortedStrings +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToExternalResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIdentityType, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromIdentityType(original["identity_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identityType"] = transformedIdentityType + } + + transformedIdentities, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromIdentities(original["identities"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identities"] = transformedIdentities + } + + transformedSources, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSources(original["sources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sources"] = transformedSources + } + + transformedSourceRestriction, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSourceRestriction(original["source_restriction"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSourceRestriction); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sourceRestriction"] = transformedSourceRestriction + } + + return transformed, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAccessLevel, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSourcesAccessLevel(original["access_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAccessLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accessLevel"] = transformedAccessLevel + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSourcesAccessLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFromSourceRestriction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResources, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToResources(original["resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resources"] = transformedResources + } + + transformedExternalResources, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToExternalResources(original["external_resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExternalResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["externalResources"] = transformedExternalResources + } + + transformedOperations, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperations(original["operations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["operations"] = transformedOperations + } + + return transformed, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToExternalResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceName, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsServiceName(original["service_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceName"] = transformedServiceName + } + + transformedMethodSelectors, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectors(original["method_selectors"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["methodSelectors"] = transformedMethodSelectors + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMethod, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectorsMethod(original["method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["method"] = transformedMethod + } + + transformedPermission, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectorsPermission(original["permission"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["permission"] = transformedPermission + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicy(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["spec"] + if !ok || v == nil { + return nil, nil + } + res = v.(map[string]interface{}) + + v, ok = res["egressPolicies"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value spec.egressPolicies. Actual value: %v", v) + } + + _, item, err := resourceAccessContextManagerServicePerimeterDryRunEgressPolicyFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceAccessContextManagerServicePerimeterDryRunEgressPolicyFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedEgressFrom, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFrom(d.Get("egress_from"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedEgressFrom := flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFrom(expectedEgressFrom, d, meta.(*transport_tpg.Config)) + expectedEgressTo, err := expandNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressTo(d.Get("egress_to"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedEgressTo := flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressTo(expectedEgressTo, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemEgressFrom := flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressFrom(item["egressFrom"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemEgressFrom)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedEgressFrom))) && !reflect.DeepEqual(itemEgressFrom, expectedFlattenedEgressFrom) { + log.Printf("[DEBUG] Skipping item with egressFrom= %#v, looking for %#v)", itemEgressFrom, expectedFlattenedEgressFrom) + continue + } + itemEgressTo := flattenNestedAccessContextManagerServicePerimeterDryRunEgressPolicyEgressTo(item["egressTo"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemEgressTo)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedEgressTo))) && !reflect.DeepEqual(itemEgressTo, expectedFlattenedEgressTo) { + log.Printf("[DEBUG] Skipping item with egressTo= %#v, looking for %#v)", itemEgressTo, expectedFlattenedEgressTo) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceAccessContextManagerServicePerimeterDryRunEgressPolicyPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerServicePerimeterDryRunEgressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceAccessContextManagerServicePerimeterDryRunEgressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create ServicePerimeterDryRunEgressPolicy, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "egressPolicies": append(currItems, obj), + } + wrapped := map[string]interface{}{ + "spec": res, + } + res = wrapped + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceAccessContextManagerServicePerimeterDryRunEgressPolicyPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerServicePerimeterDryRunEgressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceAccessContextManagerServicePerimeterDryRunEgressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "AccessContextManagerServicePerimeterDryRunEgressPolicy") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "egressPolicies": updatedItems, + } + wrapped := map[string]interface{}{ + "spec": res, + } + res = wrapped + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceAccessContextManagerServicePerimeterDryRunEgressPolicyListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + if v, ok = res["spec"]; ok && v != nil { + res = v.(map[string]interface{}) + } else { + return nil, nil + } + + v, ok = res["egressPolicies"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "egressPolicies"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_dry_run_ingress_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_dry_run_ingress_policy.go new file mode 100644 index 00000000000..ee6529a6aaa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_dry_run_ingress_policy.go @@ -0,0 +1,970 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + "log" + "net/http" + "reflect" + "slices" + "sort" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func AccessContextManagerServicePerimeterDryRunIngressPolicyEgressToResourcesDiffSupressFunc(_, _, _ string, d *schema.ResourceData) bool { + old, new := d.GetChange("egress_to.0.resources") + + oldResources, err := tpgresource.InterfaceSliceToStringSlice(old) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + newResources, err := tpgresource.InterfaceSliceToStringSlice(new) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + sort.Strings(oldResources) + sort.Strings(newResources) + + return slices.Equal(oldResources, newResources) +} + +func AccessContextManagerServicePerimeterDryRunIngressPolicyIngressToResourcesDiffSupressFunc(_, _, _ string, d *schema.ResourceData) bool { + old, new := d.GetChange("ingress_to.0.resources") + + oldResources, err := tpgresource.InterfaceSliceToStringSlice(old) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + newResources, err := tpgresource.InterfaceSliceToStringSlice(new) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + sort.Strings(oldResources) + sort.Strings(newResources) + + return slices.Equal(oldResources, newResources) +} + +func ResourceAccessContextManagerServicePerimeterDryRunIngressPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessContextManagerServicePerimeterDryRunIngressPolicyCreate, + Read: resourceAccessContextManagerServicePerimeterDryRunIngressPolicyRead, + Delete: resourceAccessContextManagerServicePerimeterDryRunIngressPolicyDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "perimeter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Service Perimeter to add this resource to.`, + }, + "ingress_from": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Defines the conditions on the source of a request causing this 'IngressPolicy' +to apply.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identities": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of identities that are allowed access through this ingress policy. +Should be in the format of email address. The email address should represent +individual user or service account only.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "identity_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access from outside the +perimeter. If left unspecified, then members of 'identities' field will be +allowed access. Possible values: ["ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, + }, + "sources": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Sources that this 'IngressPolicy' authorizes access from.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_level": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An 'AccessLevel' resource name that allow resources within the +'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed +must be in the same policy as this 'ServicePerimeter'. Referencing a nonexistent +'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, +resources within the perimeter can only be accessed via Google Cloud calls +with request origins within the perimeter. +Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' +If * is specified, then all IngressSources will be allowed.`, + }, + "resource": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A Google Cloud resource that is allowed to ingress the perimeter. +Requests from these resources will be allowed to access perimeter data. +Currently only projects are allowed. Format 'projects/{project_number}' +The project may be in any Google Cloud organization, not just the +organization that the perimeter is defined in. '*' is not allowed, the case +of allowing all Google Cloud resources only is not supported.`, + }, + }, + }, + }, + }, + }, + }, + "ingress_to": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Defines the conditions on the 'ApiOperation' and request destination that cause +this 'IngressPolicy' to apply.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "operations": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' +are allowed to perform in this 'ServicePerimeter'.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "method_selectors": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `API methods or permissions to allow. Method or permission must belong to +the service specified by serviceName field. A single 'MethodSelector' entry +with '*' specified for the method field will allow all methods AND +permissions for the service specified in 'serviceName'.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "method": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Value for method should be a valid method name for the corresponding +serviceName in 'ApiOperation'. If '*' used as value for 'method', then +ALL methods and permissions are allowed.`, + }, + "permission": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Value for permission should be a valid Cloud IAM permission for the +corresponding 'serviceName' in 'ApiOperation'.`, + }, + }, + }, + }, + "service_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' +field set to '*' will allow all methods AND permissions for all services.`, + }, + }, + }, + }, + "resources": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + DiffSuppressFunc: AccessContextManagerServicePerimeterDryRunIngressPolicyIngressToResourcesDiffSupressFunc, + Description: `A list of resources, currently only projects in the form +'projects/', protected by this 'ServicePerimeter' +that are allowed to be accessed by sources defined in the +corresponding 'IngressFrom'. A request matches if it contains +a resource in this list. If '*' is specified for resources, +then this 'IngressTo' rule will authorize access to all +resources inside the perimeter, provided that the request +also matches the 'operations' field.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAccessContextManagerServicePerimeterDryRunIngressPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + ingressFromProp, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFrom(d.Get("ingress_from"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ingress_from"); !tpgresource.IsEmptyValue(reflect.ValueOf(ingressFromProp)) && (ok || !reflect.DeepEqual(v, ingressFromProp)) { + obj["ingressFrom"] = ingressFromProp + } + ingressToProp, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressTo(d.Get("ingress_to"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ingress_to"); !tpgresource.IsEmptyValue(reflect.ValueOf(ingressToProp)) && (ok || !reflect.DeepEqual(v, ingressToProp)) { + obj["ingressTo"] = ingressToProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ServicePerimeterDryRunIngressPolicy: %#v", obj) + + obj, err = resourceAccessContextManagerServicePerimeterDryRunIngressPolicyPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "spec.ingressPolicies"}) + if err != nil { + return err + } + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + obj["use_explicit_dry_run_spec"] = true + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating ServicePerimeterDryRunIngressPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = AccessContextManagerOperationWaitTimeWithResponse( + config, res, &opRes, "Creating ServicePerimeterDryRunIngressPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create ServicePerimeterDryRunIngressPolicy: %s", err) + } + + if _, ok := opRes["spec"]; ok { + opRes, err = flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicy(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error getting nested object from operation response: %s", err) + } + if opRes == nil { + // Object isn't there any more - remove it from the state. + return fmt.Errorf("Error decoding response from operation, could not find nested object") + } + } + if err := d.Set("ingress_from", flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFrom(opRes["ingressFrom"], d, config)); err != nil { + return err + } + if err := d.Set("ingress_to", flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressTo(opRes["ingressTo"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ServicePerimeterDryRunIngressPolicy %q: %#v", d.Id(), res) + + return resourceAccessContextManagerServicePerimeterDryRunIngressPolicyRead(d, meta) +} + +func resourceAccessContextManagerServicePerimeterDryRunIngressPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerServicePerimeterDryRunIngressPolicy %q", d.Id())) + } + + res, err = flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicy(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing AccessContextManagerServicePerimeterDryRunIngressPolicy because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("ingress_from", flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFrom(res["ingressFrom"], d, config)); err != nil { + return fmt.Errorf("Error reading ServicePerimeterDryRunIngressPolicy: %s", err) + } + if err := d.Set("ingress_to", flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressTo(res["ingressTo"], d, config)); err != nil { + return fmt.Errorf("Error reading ServicePerimeterDryRunIngressPolicy: %s", err) + } + + return nil +} + +func resourceAccessContextManagerServicePerimeterDryRunIngressPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceAccessContextManagerServicePerimeterDryRunIngressPolicyPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServicePerimeterDryRunIngressPolicy") + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "spec.ingressPolicies"}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + obj["use_explicit_dry_run_spec"] = true + + log.Printf("[DEBUG] Deleting ServicePerimeterDryRunIngressPolicy %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServicePerimeterDryRunIngressPolicy") + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Deleting ServicePerimeterDryRunIngressPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ServicePerimeterDryRunIngressPolicy %q: %#v", d.Id(), res) + return nil +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromIdentities(original["identities"], d, config) + transformed["sources"] = + flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSources(original["sources"], d, config) + return []interface{}{transformed} +} +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "access_level": flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSourcesAccessLevel(original["accessLevel"], d, config), + "resource": flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSourcesResource(original["resource"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToResources(original["resources"], d, config) + transformed["operations"] = + flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + rawConfigValue := d.Get("ingress_to.0.resources") + + // Convert config value to []string + configValue, err := tpgresource.InterfaceSliceToStringSlice(rawConfigValue) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return v + } + + // Convert v to []string + apiStringValue, err := tpgresource.InterfaceSliceToStringSlice(v) + if err != nil { + log.Printf("[ERROR] Failed to convert API value: %s", err) + return v + } + + sortedStrings, err := tpgresource.SortStringsByConfigOrder(configValue, apiStringValue) + if err != nil { + log.Printf("[ERROR] Could not sort API response value: %s", err) + return v + } + + return sortedStrings +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIdentityType, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromIdentityType(original["identity_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identityType"] = transformedIdentityType + } + + transformedIdentities, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromIdentities(original["identities"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identities"] = transformedIdentities + } + + transformedSources, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSources(original["sources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sources"] = transformedSources + } + + return transformed, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAccessLevel, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSourcesAccessLevel(original["access_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAccessLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accessLevel"] = transformedAccessLevel + } + + transformedResource, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSourcesResource(original["resource"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resource"] = transformedResource + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSourcesAccessLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFromSourcesResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResources, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToResources(original["resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resources"] = transformedResources + } + + transformedOperations, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperations(original["operations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["operations"] = transformedOperations + } + + return transformed, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceName, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsServiceName(original["service_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceName"] = transformedServiceName + } + + transformedMethodSelectors, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectors(original["method_selectors"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["methodSelectors"] = transformedMethodSelectors + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMethod, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectorsMethod(original["method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["method"] = transformedMethod + } + + transformedPermission, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectorsPermission(original["permission"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["permission"] = transformedPermission + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicy(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["spec"] + if !ok || v == nil { + return nil, nil + } + res = v.(map[string]interface{}) + + v, ok = res["ingressPolicies"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value spec.ingressPolicies. Actual value: %v", v) + } + + _, item, err := resourceAccessContextManagerServicePerimeterDryRunIngressPolicyFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceAccessContextManagerServicePerimeterDryRunIngressPolicyFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedIngressFrom, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFrom(d.Get("ingress_from"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedIngressFrom := flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFrom(expectedIngressFrom, d, meta.(*transport_tpg.Config)) + expectedIngressTo, err := expandNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressTo(d.Get("ingress_to"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedIngressTo := flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressTo(expectedIngressTo, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemIngressFrom := flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressFrom(item["ingressFrom"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemIngressFrom)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedIngressFrom))) && !reflect.DeepEqual(itemIngressFrom, expectedFlattenedIngressFrom) { + log.Printf("[DEBUG] Skipping item with ingressFrom= %#v, looking for %#v)", itemIngressFrom, expectedFlattenedIngressFrom) + continue + } + itemIngressTo := flattenNestedAccessContextManagerServicePerimeterDryRunIngressPolicyIngressTo(item["ingressTo"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemIngressTo)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedIngressTo))) && !reflect.DeepEqual(itemIngressTo, expectedFlattenedIngressTo) { + log.Printf("[DEBUG] Skipping item with ingressTo= %#v, looking for %#v)", itemIngressTo, expectedFlattenedIngressTo) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceAccessContextManagerServicePerimeterDryRunIngressPolicyPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerServicePerimeterDryRunIngressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceAccessContextManagerServicePerimeterDryRunIngressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create ServicePerimeterDryRunIngressPolicy, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "ingressPolicies": append(currItems, obj), + } + wrapped := map[string]interface{}{ + "spec": res, + } + res = wrapped + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceAccessContextManagerServicePerimeterDryRunIngressPolicyPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerServicePerimeterDryRunIngressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceAccessContextManagerServicePerimeterDryRunIngressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "AccessContextManagerServicePerimeterDryRunIngressPolicy") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "ingressPolicies": updatedItems, + } + wrapped := map[string]interface{}{ + "spec": res, + } + res = wrapped + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceAccessContextManagerServicePerimeterDryRunIngressPolicyListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + if v, ok = res["spec"]; ok && v != nil { + res = v.(map[string]interface{}) + } else { + return nil, nil + } + + v, ok = res["ingressPolicies"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "ingressPolicies"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_egress_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_egress_policy.go index 7905e82bdbe..f533dbcc3bd 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_egress_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_egress_policy.go @@ -22,6 +22,8 @@ import ( "log" "net/http" "reflect" + "slices" + "sort" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -31,16 +33,54 @@ import ( "github.com/hashicorp/terraform-provider-google/google/verify" ) +func AccessContextManagerServicePerimeterEgressPolicyEgressToResourcesDiffSupressFunc(_, _, _ string, d *schema.ResourceData) bool { + old, new := d.GetChange("egress_to.0.resources") + + oldResources, err := tpgresource.InterfaceSliceToStringSlice(old) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + newResources, err := tpgresource.InterfaceSliceToStringSlice(new) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + sort.Strings(oldResources) + sort.Strings(newResources) + + return slices.Equal(oldResources, newResources) +} + +func AccessContextManagerServicePerimeterEgressPolicyIngressToResourcesDiffSupressFunc(_, _, _ string, d *schema.ResourceData) bool { + old, new := d.GetChange("ingress_to.0.resources") + + oldResources, err := tpgresource.InterfaceSliceToStringSlice(old) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + newResources, err := tpgresource.InterfaceSliceToStringSlice(new) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + sort.Strings(oldResources) + sort.Strings(newResources) + + return slices.Equal(oldResources, newResources) +} + func ResourceAccessContextManagerServicePerimeterEgressPolicy() *schema.Resource { return &schema.Resource{ Create: resourceAccessContextManagerServicePerimeterEgressPolicyCreate, Read: resourceAccessContextManagerServicePerimeterEgressPolicyRead, Delete: resourceAccessContextManagerServicePerimeterEgressPolicyDelete, - Importer: &schema.ResourceImporter{ - State: resourceAccessContextManagerServicePerimeterEgressPolicyImport, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), @@ -176,9 +216,10 @@ field set to '*' will allow all methods AND permissions for all services.`, }, }, "resources": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + DiffSuppressFunc: AccessContextManagerServicePerimeterEgressPolicyEgressToResourcesDiffSupressFunc, Description: `A list of resources, currently only projects in the form 'projects/', that match this to stanza. A request matches if it contains a resource in this list. If * is specified for resources, @@ -430,21 +471,6 @@ func resourceAccessContextManagerServicePerimeterEgressPolicyDelete(d *schema.Re return nil } -func resourceAccessContextManagerServicePerimeterEgressPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*transport_tpg.Config) - - // current import_formats can't import fields with forward slashes in their value - parts, err := tpgresource.GetImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/servicePerimeters/(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return nil, err - } - - if err := d.Set("perimeter", fmt.Sprintf("accessPolicies/%s/servicePerimeters/%s", parts["accessPolicy"], parts["perimeter"])); err != nil { - return nil, fmt.Errorf("Error setting perimeter: %s", err) - } - return []*schema.ResourceData{d}, nil -} - func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -516,7 +542,29 @@ func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressTo(v int return []interface{}{transformed} } func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + rawConfigValue := d.Get("egress_to.0.resources") + + // Convert config value to []string + configValue, err := tpgresource.InterfaceSliceToStringSlice(rawConfigValue) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return v + } + + // Convert v to []string + apiStringValue, err := tpgresource.InterfaceSliceToStringSlice(v) + if err != nil { + log.Printf("[ERROR] Failed to convert API value: %s", err) + return v + } + + sortedStrings, err := tpgresource.SortStringsByConfigOrder(configValue, apiStringValue) + if err != nil { + log.Printf("[ERROR] Could not sort API response value: %s", err) + return v + } + + return sortedStrings } func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToExternalResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_ingress_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_ingress_policy.go index c190f5b36b5..64e0c33d736 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_ingress_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_ingress_policy.go @@ -22,6 +22,8 @@ import ( "log" "net/http" "reflect" + "slices" + "sort" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -31,16 +33,54 @@ import ( "github.com/hashicorp/terraform-provider-google/google/verify" ) +func AccessContextManagerServicePerimeterIngressPolicyEgressToResourcesDiffSupressFunc(_, _, _ string, d *schema.ResourceData) bool { + old, new := d.GetChange("egress_to.0.resources") + + oldResources, err := tpgresource.InterfaceSliceToStringSlice(old) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + newResources, err := tpgresource.InterfaceSliceToStringSlice(new) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + sort.Strings(oldResources) + sort.Strings(newResources) + + return slices.Equal(oldResources, newResources) +} + +func AccessContextManagerServicePerimeterIngressPolicyIngressToResourcesDiffSupressFunc(_, _, _ string, d *schema.ResourceData) bool { + old, new := d.GetChange("ingress_to.0.resources") + + oldResources, err := tpgresource.InterfaceSliceToStringSlice(old) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + newResources, err := tpgresource.InterfaceSliceToStringSlice(new) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return false + } + + sort.Strings(oldResources) + sort.Strings(newResources) + + return slices.Equal(oldResources, newResources) +} + func ResourceAccessContextManagerServicePerimeterIngressPolicy() *schema.Resource { return &schema.Resource{ Create: resourceAccessContextManagerServicePerimeterIngressPolicyCreate, Read: resourceAccessContextManagerServicePerimeterIngressPolicyRead, Delete: resourceAccessContextManagerServicePerimeterIngressPolicyDelete, - Importer: &schema.ResourceImporter{ - State: resourceAccessContextManagerServicePerimeterIngressPolicyImport, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), @@ -109,7 +149,10 @@ If * is specified, then all IngressSources will be allowed.`, ForceNew: true, Description: `A Google Cloud resource that is allowed to ingress the perimeter. Requests from these resources will be allowed to access perimeter data. -Currently only projects are allowed. Format 'projects/{project_number}' +Currently only projects and VPCs are allowed. +Project format: 'projects/{projectNumber}' +VPC network format: +'//compute.googleapis.com/projects/{PROJECT_ID}/global/networks/{NAME}'. The project may be in any Google Cloud organization, not just the organization that the perimeter is defined in. '*' is not allowed, the case of allowing all Google Cloud resources only is not supported.`, @@ -177,9 +220,10 @@ field set to '*' will allow all methods AND permissions for all services.`, }, }, "resources": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + DiffSuppressFunc: AccessContextManagerServicePerimeterIngressPolicyIngressToResourcesDiffSupressFunc, Description: `A list of resources, currently only projects in the form 'projects/', protected by this 'ServicePerimeter' that are allowed to be accessed by sources defined in the @@ -434,21 +478,6 @@ func resourceAccessContextManagerServicePerimeterIngressPolicyDelete(d *schema.R return nil } -func resourceAccessContextManagerServicePerimeterIngressPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*transport_tpg.Config) - - // current import_formats can't import fields with forward slashes in their value - parts, err := tpgresource.GetImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/servicePerimeters/(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return nil, err - } - - if err := d.Set("perimeter", fmt.Sprintf("accessPolicies/%s/servicePerimeters/%s", parts["accessPolicy"], parts["perimeter"])); err != nil { - return nil, fmt.Errorf("Error setting perimeter: %s", err) - } - return []*schema.ResourceData{d}, nil -} - func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -517,7 +546,29 @@ func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressTo(v i return []interface{}{transformed} } func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + rawConfigValue := d.Get("ingress_to.0.resources") + + // Convert config value to []string + configValue, err := tpgresource.InterfaceSliceToStringSlice(rawConfigValue) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return v + } + + // Convert v to []string + apiStringValue, err := tpgresource.InterfaceSliceToStringSlice(v) + if err != nil { + log.Printf("[ERROR] Failed to convert API value: %s", err) + return v + } + + sortedStrings, err := tpgresource.SortStringsByConfigOrder(configValue, apiStringValue) + if err != nil { + log.Printf("[ERROR] Could not sort API response value: %s", err) + return v + } + + return sortedStrings } func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain.go index 744621beaa7..29d6810c526 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain.go @@ -131,6 +131,17 @@ Similar to what would be chosen for an Active Directory set up on an internal ne and default labels configured on the provider.`, Elem: &schema.Schema{Type: schema.TypeString}, }, + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether Terraform will be prevented from destroying the domain. Defaults to true. +When a'terraform destroy' or 'terraform apply' would delete the domain, +the command will fail if this field is not set to false in Terraform state. +When the field is set to true or unset in Terraform state, a 'terraform apply' +or 'terraform destroy' that would delete the domain will fail. +When the field is set to false, deleting the domain is allowed.`, + Default: true, + }, "project": { Type: schema.TypeString, Optional: true, @@ -291,6 +302,12 @@ func resourceActiveDirectoryDomainRead(d *schema.ResourceData, meta interface{}) return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ActiveDirectoryDomain %q", d.Id())) } + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("deletion_protection"); !ok { + if err := d.Set("deletion_protection", true); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + } if err := d.Set("project", project); err != nil { return fmt.Errorf("Error reading Domain: %s", err) } @@ -453,6 +470,9 @@ func resourceActiveDirectoryDomainDelete(d *schema.ResourceData, meta interface{ } headers := make(http.Header) + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("cannot destroy domain without setting deletion_protection=false and running `terraform apply`") + } log.Printf("[DEBUG] Deleting Domain %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_cluster.go index a8218fdb348..3895cc060c1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_cluster.go @@ -383,17 +383,6 @@ Please refer to the field 'effective_labels' for all of the labels present on th }, }, }, - "network": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Deprecated: "`network` is deprecated and will be removed in a future major release. Instead, use `network_config` to define the network configuration.", - DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, - Description: `The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - -"projects/{projectNumber}/global/networks/{network_id}".`, - ExactlyOneOf: []string{"network", "network_config.0.network", "psc_config.0.psc_enabled"}, - }, "network_config": { Type: schema.TypeList, Computed: true, @@ -414,7 +403,7 @@ If set, the instance IPs for this cluster will be created in the allocated range DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, Description: `The resource link for the VPC network in which cluster resources are created and from which they are accessible via Private IP. The network must belong to the same project as the cluster. It is specified in the form: "projects/{projectNumber}/global/networks/{network_id}".`, - ExactlyOneOf: []string{"network", "network_config.0.network", "psc_config.0.psc_enabled"}, + ExactlyOneOf: []string{"network_config.0.network", "psc_config.0.psc_enabled"}, }, }, }, @@ -492,6 +481,13 @@ It is specified in the form: "projects/{projectNumber}/global/networks/{network_ }, }, }, + "subscription_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"TRIAL", "STANDARD", ""}), + Description: `The subscrition type of cluster. Possible values: ["TRIAL", "STANDARD"]`, + }, "backup_source": { Type: schema.TypeList, Computed: true, @@ -637,6 +633,35 @@ This can happen due to user-triggered updates or system actions like failover or and default labels configured on the provider.`, Elem: &schema.Schema{Type: schema.TypeString}, }, + "trial_metadata": { + Type: schema.TypeList, + Computed: true, + Description: `Contains information and all metadata related to TRIAL clusters.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end_time": { + Type: schema.TypeString, + Optional: true, + Description: `End time of the trial cluster.`, + }, + "grace_end_time": { + Type: schema.TypeString, + Optional: true, + Description: `Grace end time of the trial cluster.`, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + Description: `Start time of the trial cluster.`, + }, + "upgrade_time": { + Type: schema.TypeString, + Optional: true, + Description: `Upgrade time of the trial cluster to standard cluster.`, + }, + }, + }, + }, "uid": { Type: schema.TypeString, Computed: true, @@ -645,10 +670,11 @@ This can happen due to user-triggered updates or system actions like failover or "deletion_policy": { Type: schema.TypeString, Optional: true, - Default: "DEFAULT", Description: `Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. -Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance.`, +Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. +Possible values: DEFAULT, FORCE`, + Default: "DEFAULT", }, "project": { Type: schema.TypeString, @@ -675,12 +701,6 @@ func resourceAlloydbClusterCreate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionConfigProp)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { obj["encryptionConfig"] = encryptionConfigProp } - networkProp, err := expandAlloydbClusterNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } networkConfigProp, err := expandAlloydbClusterNetworkConfig(d.Get("network_config"), d, config) if err != nil { return err @@ -759,6 +779,12 @@ func resourceAlloydbClusterCreate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("maintenance_update_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(maintenanceUpdatePolicyProp)) && (ok || !reflect.DeepEqual(v, maintenanceUpdatePolicyProp)) { obj["maintenanceUpdatePolicy"] = maintenanceUpdatePolicyProp } + subscriptionTypeProp, err := expandAlloydbClusterSubscriptionType(d.Get("subscription_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subscription_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(subscriptionTypeProp)) && (ok || !reflect.DeepEqual(v, subscriptionTypeProp)) { + obj["subscriptionType"] = subscriptionTypeProp + } labelsProp, err := expandAlloydbClusterEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -958,9 +984,6 @@ func resourceAlloydbClusterRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("continuous_backup_info", flattenAlloydbClusterContinuousBackupInfo(res["continuousBackupInfo"], d, config)); err != nil { return fmt.Errorf("Error reading Cluster: %s", err) } - if err := d.Set("network", flattenAlloydbClusterNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Cluster: %s", err) - } if err := d.Set("network_config", flattenAlloydbClusterNetworkConfig(res["networkConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Cluster: %s", err) } @@ -1006,6 +1029,12 @@ func resourceAlloydbClusterRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("maintenance_update_policy", flattenAlloydbClusterMaintenanceUpdatePolicy(res["maintenanceUpdatePolicy"], d, config)); err != nil { return fmt.Errorf("Error reading Cluster: %s", err) } + if err := d.Set("subscription_type", flattenAlloydbClusterSubscriptionType(res["subscriptionType"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("trial_metadata", flattenAlloydbClusterTrialMetadata(res["trialMetadata"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } if err := d.Set("terraform_labels", flattenAlloydbClusterTerraformLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Cluster: %s", err) } @@ -1041,12 +1070,6 @@ func resourceAlloydbClusterUpdate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { obj["encryptionConfig"] = encryptionConfigProp } - networkProp, err := expandAlloydbClusterNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } networkConfigProp, err := expandAlloydbClusterNetworkConfig(d.Get("network_config"), d, config) if err != nil { return err @@ -1113,6 +1136,12 @@ func resourceAlloydbClusterUpdate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("maintenance_update_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maintenanceUpdatePolicyProp)) { obj["maintenanceUpdatePolicy"] = maintenanceUpdatePolicyProp } + subscriptionTypeProp, err := expandAlloydbClusterSubscriptionType(d.Get("subscription_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subscription_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, subscriptionTypeProp)) { + obj["subscriptionType"] = subscriptionTypeProp + } labelsProp, err := expandAlloydbClusterEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -1139,10 +1168,6 @@ func resourceAlloydbClusterUpdate(d *schema.ResourceData, meta interface{}) erro updateMask = append(updateMask, "encryptionConfig") } - if d.HasChange("network") { - updateMask = append(updateMask, "network") - } - if d.HasChange("network_config") { updateMask = append(updateMask, "networkConfig") } @@ -1187,6 +1212,10 @@ func resourceAlloydbClusterUpdate(d *schema.ResourceData, meta interface{}) erro updateMask = append(updateMask, "maintenanceUpdatePolicy") } + if d.HasChange("subscription_type") { + updateMask = append(updateMask, "subscriptionType") + } + if d.HasChange("effective_labels") { updateMask = append(updateMask, "labels") } @@ -1507,10 +1536,6 @@ func flattenAlloydbClusterContinuousBackupInfoEncryptionInfoKmsKeyVersions(v int return v } -func flattenAlloydbClusterNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - func flattenAlloydbClusterNetworkConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -2047,6 +2072,45 @@ func flattenAlloydbClusterMaintenanceUpdatePolicyMaintenanceWindowsStartTimeNano return v // let terraform core handle it otherwise } +func flattenAlloydbClusterSubscriptionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterTrialMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["start_time"] = + flattenAlloydbClusterTrialMetadataStartTime(original["startTime"], d, config) + transformed["end_time"] = + flattenAlloydbClusterTrialMetadataEndTime(original["endTime"], d, config) + transformed["upgrade_time"] = + flattenAlloydbClusterTrialMetadataUpgradeTime(original["upgradeTime"], d, config) + transformed["grace_end_time"] = + flattenAlloydbClusterTrialMetadataGraceEndTime(original["graceEndTime"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterTrialMetadataStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterTrialMetadataEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterTrialMetadataUpgradeTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterTrialMetadataGraceEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenAlloydbClusterTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -2093,10 +2157,6 @@ func expandAlloydbClusterEncryptionConfigKmsKeyName(v interface{}, d tpgresource return v, nil } -func expandAlloydbClusterNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - func expandAlloydbClusterNetworkConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -2705,6 +2765,10 @@ func expandAlloydbClusterMaintenanceUpdatePolicyMaintenanceWindowsStartTimeNanos return v, nil } +func expandAlloydbClusterSubscriptionType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandAlloydbClusterEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_instance.go index 4b07f532577..e4fa573cf97 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_instance.go @@ -205,6 +205,11 @@ true.`, }, RequiredWith: []string{"network_config.0.enable_public_ip"}, }, + "enable_outbound_public_ip": { + Type: schema.TypeBool, + Optional: true, + Description: `Enabling outbound public ip for the instance.`, + }, "enable_public_ip": { Type: schema.TypeBool, Optional: true, @@ -217,6 +222,7 @@ the same instance.`, }, "psc_instance_config": { Type: schema.TypeList, + Computed: true, Optional: true, Description: `Configuration for Private Service Connect (PSC) for the instance.`, MaxItems: 1, @@ -321,6 +327,16 @@ The name of the resource will be in the format of Computed: true, Description: `The name of the instance resource.`, }, + "outbound_public_ip_addresses": { + Type: schema.TypeList, + Computed: true, + Description: `The outbound public IP addresses for the instance. This is available ONLY when +networkConfig.enableOutboundPublicIp is set to true. These IP addresses are used +for outbound connections.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, "public_ip_address": { Type: schema.TypeString, Computed: true, @@ -595,6 +611,9 @@ func resourceAlloydbInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("public_ip_address", flattenAlloydbInstancePublicIpAddress(res["publicIpAddress"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } + if err := d.Set("outbound_public_ip_addresses", flattenAlloydbInstanceOutboundPublicIpAddresses(res["outboundPublicIpAddresses"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } if err := d.Set("terraform_labels", flattenAlloydbInstanceTerraformLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } @@ -1153,6 +1172,8 @@ func flattenAlloydbInstanceNetworkConfig(v interface{}, d *schema.ResourceData, flattenAlloydbInstanceNetworkConfigAuthorizedExternalNetworks(original["authorizedExternalNetworks"], d, config) transformed["enable_public_ip"] = flattenAlloydbInstanceNetworkConfigEnablePublicIp(original["enablePublicIp"], d, config) + transformed["enable_outbound_public_ip"] = + flattenAlloydbInstanceNetworkConfigEnableOutboundPublicIp(original["enableOutboundPublicIp"], d, config) return []interface{}{transformed} } func flattenAlloydbInstanceNetworkConfigAuthorizedExternalNetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -1181,10 +1202,18 @@ func flattenAlloydbInstanceNetworkConfigEnablePublicIp(v interface{}, d *schema. return v } +func flattenAlloydbInstanceNetworkConfigEnableOutboundPublicIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenAlloydbInstancePublicIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } +func flattenAlloydbInstanceOutboundPublicIpAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenAlloydbInstanceTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -1458,6 +1487,13 @@ func expandAlloydbInstanceNetworkConfig(v interface{}, d tpgresource.TerraformRe transformed["enablePublicIp"] = transformedEnablePublicIp } + transformedEnableOutboundPublicIp, err := expandAlloydbInstanceNetworkConfigEnableOutboundPublicIp(original["enable_outbound_public_ip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableOutboundPublicIp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableOutboundPublicIp"] = transformedEnableOutboundPublicIp + } + return transformed, nil } @@ -1491,6 +1527,10 @@ func expandAlloydbInstanceNetworkConfigEnablePublicIp(v interface{}, d tpgresour return v, nil } +func expandAlloydbInstanceNetworkConfigEnableOutboundPublicIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandAlloydbInstanceEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/apigee_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/apigee_utils.go new file mode 100644 index 00000000000..8ea7d74d3a1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/apigee_utils.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package apigee + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func resourceApigeeNatAddressActivate(config *transport_tpg.Config, d *schema.ResourceData, billingProject string, userAgent string) error { + // 1. check prepare for activation + name := d.Get("name").(string) + + if d.Get("state").(string) != "RESERVED" { + return fmt.Errorf("Activating NAT address requires the state to become RESERVED") + } + + // 2. activation + activateUrl, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/natAddresses/{{name}}:activate") + if err != nil { + return err + } + log.Printf("[DEBUG] Activating NAT address: %s", name) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: activateUrl, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error activating NAT address: %s", err) + } + + var opRes map[string]interface{} + err = ApigeeOperationWaitTimeWithResponse( + config, res, &opRes, "Activating NAT address", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting to actiavte NAT address: %s", err) + } else { + log.Printf("[DEBUG] Finished activating NatAddress %q: %#v", d.Id(), res) + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_app_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_app_group.go new file mode 100644 index 00000000000..473b6346db2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_app_group.go @@ -0,0 +1,567 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceApigeeAppGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeAppGroupCreate, + Read: resourceApigeeAppGroupRead, + Update: resourceApigeeAppGroupUpdate, + Delete: resourceApigeeAppGroupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeAppGroupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the AppGroup. Characters you can use in the name are restricted to: A-Z0-9._-$ %.`, + }, + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee Organization associated with the Apigee app group, +in the format 'organizations/{{org_name}}'.`, + }, + "attributes": { + Type: schema.TypeList, + Optional: true, + Description: `A list of attributes`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Key of the attribute`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `Value of the attribute`, + }, + }, + }, + }, + "channel_id": { + Type: schema.TypeString, + Optional: true, + Description: `Channel identifier identifies the owner maintaing this grouping.`, + }, + "channel_uri": { + Type: schema.TypeString, + Optional: true, + Description: `A reference to the associated storefront/marketplace.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `App group name displayed in the UI`, + }, + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"active", "inactive", ""}), + Description: `Valid values are active or inactive. Note that the status of the AppGroup should be updated via UpdateAppGroupRequest by setting the action as active or inactive. Possible values: ["active", "inactive"]`, + }, + "app_group_id": { + Type: schema.TypeString, + Computed: true, + Description: `Internal identifier that cannot be edited`, + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: `Created time as milliseconds since epoch.`, + }, + "last_modified_at": { + Type: schema.TypeString, + Computed: true, + Description: `Modified time as milliseconds since epoch.`, + }, + "organization": { + Type: schema.TypeString, + Computed: true, + Description: `App group name displayed in the UI`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeAppGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandApigeeAppGroupName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + channelUriProp, err := expandApigeeAppGroupChannelUri(d.Get("channel_uri"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("channel_uri"); !tpgresource.IsEmptyValue(reflect.ValueOf(channelUriProp)) && (ok || !reflect.DeepEqual(v, channelUriProp)) { + obj["channelUri"] = channelUriProp + } + channelIdProp, err := expandApigeeAppGroupChannelId(d.Get("channel_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("channel_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(channelIdProp)) && (ok || !reflect.DeepEqual(v, channelIdProp)) { + obj["channelId"] = channelIdProp + } + displayNameProp, err := expandApigeeAppGroupDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + statusProp, err := expandApigeeAppGroupStatus(d.Get("status"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("status"); !tpgresource.IsEmptyValue(reflect.ValueOf(statusProp)) && (ok || !reflect.DeepEqual(v, statusProp)) { + obj["status"] = statusProp + } + attributesProp, err := expandApigeeAppGroupAttributes(d.Get("attributes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attributes"); !tpgresource.IsEmptyValue(reflect.ValueOf(attributesProp)) && (ok || !reflect.DeepEqual(v, attributesProp)) { + obj["attributes"] = attributesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/appgroups") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AppGroup: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating AppGroup: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/appgroups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating AppGroup %q: %#v", d.Id(), res) + + return resourceApigeeAppGroupRead(d, meta) +} + +func resourceApigeeAppGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/appgroups/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeAppGroup %q", d.Id())) + } + + if err := d.Set("app_group_id", flattenApigeeAppGroupAppGroupId(res["appGroupId"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGroup: %s", err) + } + if err := d.Set("name", flattenApigeeAppGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGroup: %s", err) + } + if err := d.Set("channel_uri", flattenApigeeAppGroupChannelUri(res["channelUri"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGroup: %s", err) + } + if err := d.Set("channel_id", flattenApigeeAppGroupChannelId(res["channelId"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGroup: %s", err) + } + if err := d.Set("display_name", flattenApigeeAppGroupDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGroup: %s", err) + } + if err := d.Set("organization", flattenApigeeAppGroupOrganization(res["organization"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGroup: %s", err) + } + if err := d.Set("status", flattenApigeeAppGroupStatus(res["status"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGroup: %s", err) + } + if err := d.Set("attributes", flattenApigeeAppGroupAttributes(res["attributes"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGroup: %s", err) + } + if err := d.Set("created_at", flattenApigeeAppGroupCreatedAt(res["createdAt"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGroup: %s", err) + } + if err := d.Set("last_modified_at", flattenApigeeAppGroupLastModifiedAt(res["lastModifiedAt"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGroup: %s", err) + } + + return nil +} + +func resourceApigeeAppGroupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + nameProp, err := expandApigeeAppGroupName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + channelUriProp, err := expandApigeeAppGroupChannelUri(d.Get("channel_uri"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("channel_uri"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, channelUriProp)) { + obj["channelUri"] = channelUriProp + } + channelIdProp, err := expandApigeeAppGroupChannelId(d.Get("channel_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("channel_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, channelIdProp)) { + obj["channelId"] = channelIdProp + } + displayNameProp, err := expandApigeeAppGroupDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + statusProp, err := expandApigeeAppGroupStatus(d.Get("status"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("status"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, statusProp)) { + obj["status"] = statusProp + } + attributesProp, err := expandApigeeAppGroupAttributes(d.Get("attributes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attributes"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attributesProp)) { + obj["attributes"] = attributesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/appgroups/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AppGroup %q: %#v", d.Id(), obj) + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating AppGroup %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating AppGroup %q: %#v", d.Id(), res) + } + + return resourceApigeeAppGroupRead(d, meta) +} + +func resourceApigeeAppGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/appgroups/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting AppGroup %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AppGroup") + } + + log.Printf("[DEBUG] Finished deleting AppGroup %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeAppGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + nameParts := strings.Split(d.Get("name").(string), "/") + if len(nameParts) == 4 { + // `organizations/{{org_name}}/appgroups/{{name}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("name", nameParts[3]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else if len(nameParts) == 3 { + // `organizations/{{org_name}}/{{name}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("name", nameParts[2]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s or %s", + d.Get("name"), + "organizations/{{org_name}}/appgroups/{{name}}", + "organizations/{{org_name}}/{{name}}") + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/appgroups/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeAppGroupAppGroupId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAppGroupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAppGroupChannelUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAppGroupChannelId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAppGroupDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAppGroupOrganization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAppGroupStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAppGroupAttributes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenApigeeAppGroupAttributesName(original["name"], d, config), + "value": flattenApigeeAppGroupAttributesValue(original["value"], d, config), + }) + } + return transformed +} +func flattenApigeeAppGroupAttributesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAppGroupAttributesValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAppGroupCreatedAt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAppGroupLastModifiedAt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeAppGroupName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAppGroupChannelUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAppGroupChannelId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAppGroupDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAppGroupStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAppGroupAttributes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandApigeeAppGroupAttributesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandApigeeAppGroupAttributesValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandApigeeAppGroupAttributesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAppGroupAttributesValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/resource_datastore_index_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_app_group_sweeper.go similarity index 90% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/resource_datastore_index_sweeper.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_app_group_sweeper.go index b907400cf69..3620dd54c4e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/resource_datastore_index_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_app_group_sweeper.go @@ -15,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package datastore +package apigee import ( "context" @@ -30,12 +30,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("DatastoreIndex", testSweepDatastoreIndex) + sweeper.AddTestSweepers("ApigeeAppGroup", testSweepApigeeAppGroup) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepDatastoreIndex(region string) error { - resourceName := "DatastoreIndex" +func testSweepApigeeAppGroup(region string) error { + resourceName := "ApigeeAppGroup" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) @@ -64,7 +64,7 @@ func testSweepDatastoreIndex(region string) error { }, } - listTemplate := strings.Split("https://datastore.googleapis.com/v1/projects/{{project}}/indexes", "?")[0] + listTemplate := strings.Split("https://apigee.googleapis.com/v1/appgroups", "?")[0] listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) @@ -83,7 +83,7 @@ func testSweepDatastoreIndex(region string) error { return nil } - resourceList, ok := res["indexes"] + resourceList, ok := res["appGroups"] if !ok { log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") return nil @@ -112,7 +112,7 @@ func testSweepDatastoreIndex(region string) error { continue } - deleteTemplate := "https://datastore.googleapis.com/v1/projects/{{project}}/indexes/{{index_id}}" + deleteTemplate := "https://apigee.googleapis.com/v1/{{org_id}}/appgroups/{{name}}" deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_developer.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_developer.go new file mode 100644 index 00000000000..def31b13d2e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_developer.go @@ -0,0 +1,568 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeDeveloper() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeDeveloperCreate, + Read: resourceApigeeDeveloperRead, + Update: resourceApigeeDeveloperUpdate, + Delete: resourceApigeeDeveloperDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeDeveloperImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Email address of the developer. This value is used to uniquely identify the developer in Apigee hybrid. Note that the email address has to be in lowercase only..`, + }, + "first_name": { + Type: schema.TypeString, + Required: true, + Description: `First name of the developer.`, + }, + "last_name": { + Type: schema.TypeString, + Required: true, + Description: `Last name of the developer.`, + }, + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee Organization associated with the Apigee instance, +in the format 'organizations/{{org_name}}'.`, + }, + "user_name": { + Type: schema.TypeString, + Required: true, + Description: `User name of the developer. Not used by Apigee hybrid.`, + }, + "attributes": { + Type: schema.TypeList, + Optional: true, + Description: `Developer attributes (name/value pairs). The custom attribute limit is 18.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Key of the attribute`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `Value of the attribute`, + }, + }, + }, + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: `Time at which the developer was created in milliseconds since epoch.`, + }, + "last_modified_at": { + Type: schema.TypeString, + Computed: true, + Description: `Time at which the developer was last modified in milliseconds since epoch.`, + }, + "organizatio_name": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the Apigee organization in which the developer resides.`, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `Status of the developer. Valid values are active and inactive.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeDeveloperCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + emailProp, err := expandApigeeDeveloperEmail(d.Get("email"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("email"); !tpgresource.IsEmptyValue(reflect.ValueOf(emailProp)) && (ok || !reflect.DeepEqual(v, emailProp)) { + obj["email"] = emailProp + } + firstNameProp, err := expandApigeeDeveloperFirstName(d.Get("first_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("first_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(firstNameProp)) && (ok || !reflect.DeepEqual(v, firstNameProp)) { + obj["firstName"] = firstNameProp + } + lastNameProp, err := expandApigeeDeveloperLastName(d.Get("last_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("last_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(lastNameProp)) && (ok || !reflect.DeepEqual(v, lastNameProp)) { + obj["lastName"] = lastNameProp + } + userNameProp, err := expandApigeeDeveloperUserName(d.Get("user_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(userNameProp)) && (ok || !reflect.DeepEqual(v, userNameProp)) { + obj["userName"] = userNameProp + } + attributesProp, err := expandApigeeDeveloperAttributes(d.Get("attributes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attributes"); !tpgresource.IsEmptyValue(reflect.ValueOf(attributesProp)) && (ok || !reflect.DeepEqual(v, attributesProp)) { + obj["attributes"] = attributesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/developers") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Developer: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Developer: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/developers/{{email}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Developer %q: %#v", d.Id(), res) + + return resourceApigeeDeveloperRead(d, meta) +} + +func resourceApigeeDeveloperRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/developers/{{email}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeDeveloper %q", d.Id())) + } + + if err := d.Set("email", flattenApigeeDeveloperEmail(res["email"], d, config)); err != nil { + return fmt.Errorf("Error reading Developer: %s", err) + } + if err := d.Set("first_name", flattenApigeeDeveloperFirstName(res["firstName"], d, config)); err != nil { + return fmt.Errorf("Error reading Developer: %s", err) + } + if err := d.Set("last_name", flattenApigeeDeveloperLastName(res["lastName"], d, config)); err != nil { + return fmt.Errorf("Error reading Developer: %s", err) + } + if err := d.Set("user_name", flattenApigeeDeveloperUserName(res["userName"], d, config)); err != nil { + return fmt.Errorf("Error reading Developer: %s", err) + } + if err := d.Set("attributes", flattenApigeeDeveloperAttributes(res["attributes"], d, config)); err != nil { + return fmt.Errorf("Error reading Developer: %s", err) + } + if err := d.Set("organizatio_name", flattenApigeeDeveloperOrganizatioName(res["organizatioName"], d, config)); err != nil { + return fmt.Errorf("Error reading Developer: %s", err) + } + if err := d.Set("status", flattenApigeeDeveloperStatus(res["status"], d, config)); err != nil { + return fmt.Errorf("Error reading Developer: %s", err) + } + if err := d.Set("created_at", flattenApigeeDeveloperCreatedAt(res["createdAt"], d, config)); err != nil { + return fmt.Errorf("Error reading Developer: %s", err) + } + if err := d.Set("last_modified_at", flattenApigeeDeveloperLastModifiedAt(res["lastModifiedAt"], d, config)); err != nil { + return fmt.Errorf("Error reading Developer: %s", err) + } + + return nil +} + +func resourceApigeeDeveloperUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + emailProp, err := expandApigeeDeveloperEmail(d.Get("email"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("email"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, emailProp)) { + obj["email"] = emailProp + } + firstNameProp, err := expandApigeeDeveloperFirstName(d.Get("first_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("first_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, firstNameProp)) { + obj["firstName"] = firstNameProp + } + lastNameProp, err := expandApigeeDeveloperLastName(d.Get("last_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("last_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, lastNameProp)) { + obj["lastName"] = lastNameProp + } + userNameProp, err := expandApigeeDeveloperUserName(d.Get("user_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userNameProp)) { + obj["userName"] = userNameProp + } + attributesProp, err := expandApigeeDeveloperAttributes(d.Get("attributes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attributes"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attributesProp)) { + obj["attributes"] = attributesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/developers/{{email}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Developer %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("email") { + updateMask = append(updateMask, "email") + } + + if d.HasChange("first_name") { + updateMask = append(updateMask, "firstName") + } + + if d.HasChange("last_name") { + updateMask = append(updateMask, "lastName") + } + + if d.HasChange("user_name") { + updateMask = append(updateMask, "userName") + } + + if d.HasChange("attributes") { + updateMask = append(updateMask, "attributes") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating Developer %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Developer %q: %#v", d.Id(), res) + } + + } + + return resourceApigeeDeveloperRead(d, meta) +} + +func resourceApigeeDeveloperDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/developers/{{email}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting Developer %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Developer") + } + + log.Printf("[DEBUG] Finished deleting Developer %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeDeveloperImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + nameParts := strings.Split(d.Get("email").(string), "/") + if len(nameParts) == 4 { + // `organizations/{{org_name}}/developers/{{email}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("email", nameParts[3]); err != nil { + return nil, fmt.Errorf("Error setting email: %s", err) + } + } else if len(nameParts) == 3 { + // `organizations/{{org_name}}/{{email}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("email", nameParts[2]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s or %s", + d.Get("name"), + "organizations/{{org_name}}/developers/{{email}}", + "organizations/{{org_name}}/{{email}}") + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/developers/{{email}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeDeveloperEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeDeveloperFirstName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeDeveloperLastName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeDeveloperUserName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeDeveloperAttributes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenApigeeDeveloperAttributesName(original["name"], d, config), + "value": flattenApigeeDeveloperAttributesValue(original["value"], d, config), + }) + } + return transformed +} +func flattenApigeeDeveloperAttributesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeDeveloperAttributesValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeDeveloperOrganizatioName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeDeveloperStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeDeveloperCreatedAt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeDeveloperLastModifiedAt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeDeveloperEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeDeveloperFirstName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeDeveloperLastName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeDeveloperUserName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeDeveloperAttributes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandApigeeDeveloperAttributesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandApigeeDeveloperAttributesValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandApigeeDeveloperAttributesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeDeveloperAttributesValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_developer_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_developer_sweeper.go new file mode 100644 index 00000000000..29adbcfac89 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_developer_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ApigeeDeveloper", testSweepApigeeDeveloper) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepApigeeDeveloper(region string) error { + resourceName := "ApigeeDeveloper" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://apigee.googleapis.com/v1/developers", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["developers"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://apigee.googleapis.com/v1/{{org_id}}/developers/{{email}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment_keyvaluemaps.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment_keyvaluemaps.go new file mode 100644 index 00000000000..4d6f4637414 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment_keyvaluemaps.go @@ -0,0 +1,253 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeEnvironmentKeyvaluemaps() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeEnvironmentKeyvaluemapsCreate, + Read: resourceApigeeEnvironmentKeyvaluemapsRead, + Delete: resourceApigeeEnvironmentKeyvaluemapsDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeEnvironmentKeyvaluemapsImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(1 * time.Minute), + Delete: schema.DefaultTimeout(1 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "env_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee environment group associated with the Apigee environment, +in the format 'organizations/{{org_name}}/environments/{{env_name}}'.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. ID of the key value map.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeEnvironmentKeyvaluemapsCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandApigeeEnvironmentKeyvaluemapsName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/keyvaluemaps") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EnvironmentKeyvaluemaps: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating EnvironmentKeyvaluemaps: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{env_id}}/keyvaluemaps/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating EnvironmentKeyvaluemaps %q: %#v", d.Id(), res) + + return resourceApigeeEnvironmentKeyvaluemapsRead(d, meta) +} + +func resourceApigeeEnvironmentKeyvaluemapsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/keyvaluemaps/{{name}}/entries") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvironmentKeyvaluemaps %q", d.Id())) + } + + res, err = resourceApigeeEnvironmentKeyvaluemapsDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ApigeeEnvironmentKeyvaluemaps because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenApigeeEnvironmentKeyvaluemapsName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading EnvironmentKeyvaluemaps: %s", err) + } + + return nil +} + +func resourceApigeeEnvironmentKeyvaluemapsDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/keyvaluemaps/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting EnvironmentKeyvaluemaps %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EnvironmentKeyvaluemaps") + } + + log.Printf("[DEBUG] Finished deleting EnvironmentKeyvaluemaps %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeEnvironmentKeyvaluemapsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/keyvaluemaps/(?P.+)", + "(?P.+)/(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{env_id}}/keyvaluemaps/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeEnvironmentKeyvaluemapsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeEnvironmentKeyvaluemapsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceApigeeEnvironmentKeyvaluemapsDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + name, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return nil, err + } + res["name"] = name + // "encrypted" field is retained for backward compatibility and the value of encrypted will always be true. Apigee X and hybrid do not support unencrypted key value maps. + res["encrypted"] = true + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment_keyvaluemaps_entries.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment_keyvaluemaps_entries.go new file mode 100644 index 00000000000..7ceababc563 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment_keyvaluemaps_entries.go @@ -0,0 +1,253 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeEnvironmentKeyvaluemapsEntries() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeEnvironmentKeyvaluemapsEntriesCreate, + Read: resourceApigeeEnvironmentKeyvaluemapsEntriesRead, + Delete: resourceApigeeEnvironmentKeyvaluemapsEntriesDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeEnvironmentKeyvaluemapsEntriesImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(1 * time.Minute), + Delete: schema.DefaultTimeout(1 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "env_keyvaluemap_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee environment keyvalumaps Id associated with the Apigee environment, +in the format 'organizations/{{org_name}}/environments/{{env_name}}/keyvaluemaps/{{keyvaluemap_name}}'.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. Resource URI that can be used to identify the scope of the key value map entries.`, + }, + "value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. Data or payload that is being retrieved and associated with the unique key.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeEnvironmentKeyvaluemapsEntriesCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandApigeeEnvironmentKeyvaluemapsEntriesName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + valueProp, err := expandApigeeEnvironmentKeyvaluemapsEntriesValue(d.Get("value"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("value"); !tpgresource.IsEmptyValue(reflect.ValueOf(valueProp)) && (ok || !reflect.DeepEqual(v, valueProp)) { + obj["value"] = valueProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_keyvaluemap_id}}/entries") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EnvironmentKeyvaluemapsEntries: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating EnvironmentKeyvaluemapsEntries: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{env_keyvaluemap_id}}/entries/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating EnvironmentKeyvaluemapsEntries %q: %#v", d.Id(), res) + + return resourceApigeeEnvironmentKeyvaluemapsEntriesRead(d, meta) +} + +func resourceApigeeEnvironmentKeyvaluemapsEntriesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_keyvaluemap_id}}/entries/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvironmentKeyvaluemapsEntries %q", d.Id())) + } + + if err := d.Set("name", flattenApigeeEnvironmentKeyvaluemapsEntriesName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading EnvironmentKeyvaluemapsEntries: %s", err) + } + if err := d.Set("value", flattenApigeeEnvironmentKeyvaluemapsEntriesValue(res["value"], d, config)); err != nil { + return fmt.Errorf("Error reading EnvironmentKeyvaluemapsEntries: %s", err) + } + + return nil +} + +func resourceApigeeEnvironmentKeyvaluemapsEntriesDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_keyvaluemap_id}}/entries/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting EnvironmentKeyvaluemapsEntries %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EnvironmentKeyvaluemapsEntries") + } + + log.Printf("[DEBUG] Finished deleting EnvironmentKeyvaluemapsEntries %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeEnvironmentKeyvaluemapsEntriesImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/entries/(?P.+)", + "(?P.+)/(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{env_keyvaluemap_id}}/entries/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeEnvironmentKeyvaluemapsEntriesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvironmentKeyvaluemapsEntriesValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeEnvironmentKeyvaluemapsEntriesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvironmentKeyvaluemapsEntriesValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance.go index 1b454440288..b86915ca930 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance.go @@ -60,6 +60,7 @@ func ResourceApigeeInstance() *schema.Resource { return &schema.Resource{ Create: resourceApigeeInstanceCreate, Read: resourceApigeeInstanceRead, + Update: resourceApigeeInstanceUpdate, Delete: resourceApigeeInstanceDelete, Importer: &schema.ResourceImporter{ @@ -68,6 +69,7 @@ func ResourceApigeeInstance() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(60 * time.Minute), }, @@ -95,7 +97,6 @@ in the format 'organizations/{{org_name}}'.`, Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, DiffSuppressFunc: projectListDiffSuppress, Description: `Optional. Customer accept list represents the list of projects (id/number) on customer side that can privately connect to the service attachment. It is an optional field @@ -362,6 +363,86 @@ func resourceApigeeInstanceRead(d *schema.ResourceData, meta interface{}) error return nil } +func resourceApigeeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + consumerAcceptListProp, err := expandApigeeInstanceConsumerAcceptList(d.Get("consumer_accept_list"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("consumer_accept_list"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, consumerAcceptListProp)) { + obj["consumerAcceptList"] = consumerAcceptListProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/apigeeInstances") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/instances/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("consumer_accept_list") { + updateMask = append(updateMask, "consumerAcceptList") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsApigeeRetryableError}, + }) + + if err != nil { + return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) + } + + err = ApigeeOperationWaitTime( + config, res, "Updating Instance", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } + + return resourceApigeeInstanceRead(d, meta) +} + func resourceApigeeInstanceDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_nat_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_nat_address.go index 4d2cd80b6cb..327adffbea2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_nat_address.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_nat_address.go @@ -24,16 +24,39 @@ import ( "reflect" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) +// waitForNatAddressReady waits for an NatAddress to leave the +// "CREATING" state and become "RESERVED", to indicate that it's ready. +func waitForNatAddressReserved(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { + return retry.Retry(timeout, func() *retry.RetryError { + if err := resourceApigeeNatAddressRead(d, config); err != nil { + return retry.NonRetryableError(err) + } + + id := d.Id() + state := d.Get("state").(string) + if state == "CREATING" { + return retry.RetryableError(fmt.Errorf("NatAddress %q has state %q.", id, state)) + } else if state == "RESERVED" { + log.Printf("[DEBUG] NatAddress %q has state %q.", id, state) + return nil + } else { + return retry.NonRetryableError(fmt.Errorf("NatAddress %q has state %q.", id, state)) + } + }) +} + func ResourceApigeeNatAddress() *schema.Resource { return &schema.Resource{ Create: resourceApigeeNatAddressCreate, Read: resourceApigeeNatAddressRead, + Update: resourceApigeeNatAddressUpdate, Delete: resourceApigeeNatAddressDelete, Importer: &schema.ResourceImporter{ @@ -42,6 +65,7 @@ func ResourceApigeeNatAddress() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), Delete: schema.DefaultTimeout(30 * time.Minute), }, @@ -59,6 +83,12 @@ in the format 'organizations/{{org_name}}/instances/{{instance_name}}'.`, ForceNew: true, Description: `Resource ID of the NAT address.`, }, + "activate": { + Type: schema.TypeBool, + Optional: true, + Description: `Flag that specifies whether the reserved NAT address should be activate.`, + Default: false, + }, "ip_address": { Type: schema.TypeString, Computed: true, @@ -88,6 +118,17 @@ func resourceApigeeNatAddressCreate(d *schema.ResourceData, meta interface{}) er } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } + activateProp, err := expandApigeeNatAddressActivate(d.Get("activate"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("activate"); !tpgresource.IsEmptyValue(reflect.ValueOf(activateProp)) && (ok || !reflect.DeepEqual(v, activateProp)) { + obj["activate"] = activateProp + } + + obj, err = resourceApigeeNatAddressEncoder(d, meta, obj) + if err != nil { + return err + } url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/natAddresses") if err != nil { @@ -137,6 +178,14 @@ func resourceApigeeNatAddressCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error waiting to create NatAddress: %s", err) } + opRes, err = resourceApigeeNatAddressDecoder(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error decoding response from operation: %s", err) + } + if opRes == nil { + return fmt.Errorf("Error decoding response from operation, could not find object") + } + if err := d.Set("name", flattenApigeeNatAddressName(opRes["name"], d, config)); err != nil { return err } @@ -148,6 +197,17 @@ func resourceApigeeNatAddressCreate(d *schema.ResourceData, meta interface{}) er } d.SetId(id) + if d.Get("activate").(bool) { + if err := waitForNatAddressReserved(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { + return fmt.Errorf("Error waiting for NatAddress %q to be RESERVED during creation: %q", d.Id(), err) + } + + log.Printf("[DEBUG] Activating for NatAddress %q to become ACTIVE", d.Id()) + if err := resourceApigeeNatAddressActivate(config, d, billingProject, userAgent); err != nil { + return fmt.Errorf("Error activating NatAddress: %s", err) + } + } + log.Printf("[DEBUG] Finished creating NatAddress %q: %#v", d.Id(), res) return resourceApigeeNatAddressRead(d, meta) @@ -185,9 +245,24 @@ func resourceApigeeNatAddressRead(d *schema.ResourceData, meta interface{}) erro return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeNatAddress %q", d.Id())) } + res, err = resourceApigeeNatAddressDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ApigeeNatAddress because it no longer exists.") + d.SetId("") + return nil + } + if err := d.Set("name", flattenApigeeNatAddressName(res["name"], d, config)); err != nil { return fmt.Errorf("Error reading NatAddress: %s", err) } + if err := d.Set("activate", flattenApigeeNatAddressActivate(res["activate"], d, config)); err != nil { + return fmt.Errorf("Error reading NatAddress: %s", err) + } if err := d.Set("ip_address", flattenApigeeNatAddressIpAddress(res["ipAddress"], d, config)); err != nil { return fmt.Errorf("Error reading NatAddress: %s", err) } @@ -198,6 +273,44 @@ func resourceApigeeNatAddressRead(d *schema.ResourceData, meta interface{}) erro return nil } +func resourceApigeeNatAddressUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + nameProp, err := expandApigeeNatAddressName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + log.Printf("[DEBUG] Updating NatAddress %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + if d.HasChange("activate") { + if !d.Get("activate").(bool) { + return fmt.Errorf("NatAddress %q allows only the activation action", d.Id()) + } else if d.Get("state").(string) == "RESERVED" { + log.Printf("[DEBUG] Activating for NatAddress %q to become ACTIVE", d.Id()) + if err := resourceApigeeNatAddressActivate(config, d, billingProject, userAgent); err != nil { + return fmt.Errorf("Error activating NatAddress: %s", err) + } + } + } + + return resourceApigeeNatAddressRead(d, meta) +} + func resourceApigeeNatAddressDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) @@ -273,6 +386,10 @@ func flattenApigeeNatAddressName(v interface{}, d *schema.ResourceData, config * return v } +func flattenApigeeNatAddressActivate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenApigeeNatAddressIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -284,3 +401,18 @@ func flattenApigeeNatAddressState(v interface{}, d *schema.ResourceData, config func expandApigeeNatAddressName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandApigeeNatAddressActivate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceApigeeNatAddressEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // cannot include activate prop in the body + delete(obj, "activate") + return obj, nil +} + +func resourceApigeeNatAddressDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + res["activate"] = res["state"].(string) == "ACTIVE" + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_flexible_app_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_flexible_app_version.go index 5e408e25aed..641cc782a81 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_flexible_app_version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_flexible_app_version.go @@ -878,14 +878,14 @@ Reserved names,"default", "latest", and any name with the prefix "ah-".`, "noop_on_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `If set to 'true', the application version will not be deleted.`, + Default: false, }, "delete_service_on_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `If set to 'true', the service will be deleted if it is the last version.`, + Default: false, }, "project": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_standard_app_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_standard_app_version.go index abf9de07c1c..796382b4f49 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_standard_app_version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_standard_app_version.go @@ -457,14 +457,14 @@ Substitute '' with 'python', 'java', 'php', 'ruby', 'go' or 'nodejs'.` "noop_on_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `If set to 'true', the application version will not be deleted.`, + Default: false, }, "delete_service_on_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `If set to 'true', the service will be deleted if it is the last version.`, + Default: false, }, "project": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apphub/resource_apphub_application.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apphub/resource_apphub_application.go index f21392ed18a..cc2eda1cd7a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apphub/resource_apphub_application.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apphub/resource_apphub_application.go @@ -18,6 +18,7 @@ package apphub import ( + "context" "fmt" "log" "net/http" @@ -33,6 +34,24 @@ import ( "github.com/hashicorp/terraform-provider-google/google/verify" ) +func apphubApplicationCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + if diff.HasChange("location") || diff.HasChange("scope.0.type") { + location := diff.Get("location") + scope_type := diff.Get("scope.0.type") + + if scope_type == "GLOBAL" { + if location != "global" { + return fmt.Errorf("Error validating location %s with %s scope type", location, scope_type) + } + } else { + if location == "global" { + return fmt.Errorf("Error validating location %s with %s scope type", location, scope_type) + } + } + } + return nil +} + func ResourceApphubApplication() *schema.Resource { return &schema.Resource{ Create: resourceApphubApplicationCreate, @@ -51,6 +70,7 @@ func ResourceApphubApplication() *schema.Resource { }, CustomizeDiff: customdiff.All( + apphubApplicationCustomizeDiff, tpgresource.DefaultProviderProject, ), @@ -77,10 +97,11 @@ func ResourceApphubApplication() *schema.Resource { "type": { Type: schema.TypeString, Required: true, - ValidateFunc: verify.ValidateEnum([]string{"REGIONAL"}), + ValidateFunc: verify.ValidateEnum([]string{"REGIONAL", "GLOBAL"}), Description: `Required. Scope Type. Possible values: -REGIONAL Possible values: ["REGIONAL"]`, +REGIONAL +GLOBAL Possible values: ["REGIONAL", "GLOBAL"]`, }, }, }, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apphub/resource_apphub_service_project_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apphub/resource_apphub_service_project_attachment.go index a5ff29e84df..4228c74d997 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apphub/resource_apphub_service_project_attachment.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apphub/resource_apphub_service_project_attachment.go @@ -69,7 +69,7 @@ func ResourceApphubServiceProjectAttachment() *schema.Resource { DiffSuppressFunc: ServiceProjectDiffSuppress, Description: `"Immutable. Service project name in the format: \"projects/abc\" or \"projects/123\". As input, project name with either project id or number -are accepted. As output, this field will contain project number. "`, +are accepted. As output, this field will contain project number."`, }, "create_time": { Type: schema.TypeString, @@ -79,7 +79,7 @@ are accepted. As output, this field will contain project number. "`, "name": { Type: schema.TypeString, Computed: true, - Description: `"Identifier. The resource name of a ServiceProjectAttachment. Format:\"projects/{host-project-id}/locations/global/serviceProjectAttachments/{service-project-id}.\" "`, + Description: `"Identifier. The resource name of a ServiceProjectAttachment. Format:\"projects/{host-project-id}/locations/global/serviceProjectAttachments/{service-project-id}.\""`, }, "state": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/data_source_artifact_registry_locations.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/data_source_artifact_registry_locations.go new file mode 100644 index 00000000000..1e2a482410e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/data_source_artifact_registry_locations.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package artifactregistry + +import ( + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleArtifactRegistryLocations() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleArtifactRegistryLocationsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "locations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleArtifactRegistryLocationsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "https://artifactregistry.googleapis.com/v1/projects/{{project}}/locations") + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error listing Artifact Registry Locations : %s", err) + } + + locationsRaw := flattenArtifactRegistryLocations(res) + + locations := make([]string, len(locationsRaw)) + for i, loc := range locationsRaw { + locations[i] = loc.(string) + } + sort.Strings(locations) + + log.Printf("[DEBUG] Received Artifact Registry Locations: %q", locations) + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("locations", locations); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + + d.SetId(fmt.Sprintf("projects/%s", project)) + + return nil +} + +func flattenArtifactRegistryLocations(resp map[string]interface{}) []interface{} { + regionList := resp["locations"].([]interface{}) + regions := make([]interface{}, len(regionList)) + for i, v := range regionList { + regionObj := v.(map[string]interface{}) + regions[i] = regionObj["locationId"] + } + return regions +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads/resource_assured_workloads_workload.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads/resource_assured_workloads_workload.go index abd8dd10b6a..8443b2c0a8d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads/resource_assured_workloads_workload.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads/resource_assured_workloads_workload.go @@ -60,7 +60,7 @@ func ResourceAssuredWorkloadsWorkload() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT", + Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT", }, "display_name": { @@ -118,7 +118,7 @@ func ResourceAssuredWorkloadsWorkload() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - Description: "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN", + Description: "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM", }, "partner_permissions": { @@ -130,6 +130,13 @@ func ResourceAssuredWorkloadsWorkload() *schema.Resource { Elem: AssuredWorkloadsWorkloadPartnerPermissionsSchema(), }, + "partner_services_billing_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC.", + }, + "provisioned_resources_parent": { Type: schema.TypeString, Optional: true, @@ -153,6 +160,15 @@ func ResourceAssuredWorkloadsWorkload() *schema.Resource { Description: "Optional. Indicates whether the e-mail notification for a violation is enabled for a workload. This value will be by default True, and if not present will be considered as true. This should only be updated via updateWorkload call. Any Changes to this field during the createWorkload call will not be honored. This will always be true while creating the workload.", }, + "workload_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Used to specify certain options for a workload during workload creation - currently only supporting KAT Optionality for Regional Controls workloads.", + MaxItems: 1, + Elem: AssuredWorkloadsWorkloadWorkloadOptionsSchema(), + }, + "compliance_status": { Type: schema.TypeList, Computed: true, @@ -296,6 +312,19 @@ func AssuredWorkloadsWorkloadResourceSettingsSchema() *schema.Resource { } } +func AssuredWorkloadsWorkloadWorkloadOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kaj_enrollment_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Indicates type of KAJ enrollment for the workload. Currently, only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not enroll in KAT-level KAJ enrollment for Regional Controls workloads. Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, KEY_ACCESS_TRANSPARENCY_OFF", + }, + }, + } +} + func AssuredWorkloadsWorkloadComplianceStatusSchema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -391,9 +420,11 @@ func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interfa KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), Partner: assuredworkloads.WorkloadPartnerEnumRef(d.Get("partner").(string)), PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), } id, err := obj.ID() @@ -458,9 +489,11 @@ func resourceAssuredWorkloadsWorkloadRead(d *schema.ResourceData, meta interface KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), Partner: assuredworkloads.WorkloadPartnerEnumRef(d.Get("partner").(string)), PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), Name: dcl.StringOrNil(d.Get("name").(string)), } @@ -516,6 +549,9 @@ func resourceAssuredWorkloadsWorkloadRead(d *schema.ResourceData, meta interface if err = d.Set("partner_permissions", flattenAssuredWorkloadsWorkloadPartnerPermissions(res.PartnerPermissions)); err != nil { return fmt.Errorf("error setting partner_permissions in state: %s", err) } + if err = d.Set("partner_services_billing_account", res.PartnerServicesBillingAccount); err != nil { + return fmt.Errorf("error setting partner_services_billing_account in state: %s", err) + } if err = d.Set("provisioned_resources_parent", res.ProvisionedResourcesParent); err != nil { return fmt.Errorf("error setting provisioned_resources_parent in state: %s", err) } @@ -525,6 +561,9 @@ func resourceAssuredWorkloadsWorkloadRead(d *schema.ResourceData, meta interface if err = d.Set("violation_notifications_enabled", res.ViolationNotificationsEnabled); err != nil { return fmt.Errorf("error setting violation_notifications_enabled in state: %s", err) } + if err = d.Set("workload_options", flattenAssuredWorkloadsWorkloadWorkloadOptions(res.WorkloadOptions)); err != nil { + return fmt.Errorf("error setting workload_options in state: %s", err) + } if err = d.Set("compliance_status", flattenAssuredWorkloadsWorkloadComplianceStatus(res.ComplianceStatus)); err != nil { return fmt.Errorf("error setting compliance_status in state: %s", err) } @@ -572,9 +611,11 @@ func resourceAssuredWorkloadsWorkloadUpdate(d *schema.ResourceData, meta interfa KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), Partner: assuredworkloads.WorkloadPartnerEnumRef(d.Get("partner").(string)), PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), Name: dcl.StringOrNil(d.Get("name").(string)), } // Construct state hint from old values @@ -589,9 +630,11 @@ func resourceAssuredWorkloadsWorkloadUpdate(d *schema.ResourceData, meta interfa KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(tpgdclresource.OldValue(d.GetChange("kms_settings"))), Partner: assuredworkloads.WorkloadPartnerEnumRef(tpgdclresource.OldValue(d.GetChange("partner")).(string)), PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(tpgdclresource.OldValue(d.GetChange("partner_permissions"))), + PartnerServicesBillingAccount: dcl.String(tpgdclresource.OldValue(d.GetChange("partner_services_billing_account")).(string)), ProvisionedResourcesParent: dcl.String(tpgdclresource.OldValue(d.GetChange("provisioned_resources_parent")).(string)), ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(tpgdclresource.OldValue(d.GetChange("resource_settings"))), ViolationNotificationsEnabled: dcl.Bool(tpgdclresource.OldValue(d.GetChange("violation_notifications_enabled")).(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(tpgdclresource.OldValue(d.GetChange("workload_options"))), Name: dcl.StringOrNil(tpgdclresource.OldValue(d.GetChange("name")).(string)), } directive := tpgdclresource.UpdateDirective @@ -642,9 +685,11 @@ func resourceAssuredWorkloadsWorkloadDelete(d *schema.ResourceData, meta interfa KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), Partner: assuredworkloads.WorkloadPartnerEnumRef(d.Get("partner").(string)), PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), Name: dcl.StringOrNil(d.Get("name").(string)), } @@ -810,6 +855,32 @@ func flattenAssuredWorkloadsWorkloadResourceSettings(obj *assuredworkloads.Workl } +func expandAssuredWorkloadsWorkloadWorkloadOptions(o interface{}) *assuredworkloads.WorkloadWorkloadOptions { + if o == nil { + return assuredworkloads.EmptyWorkloadWorkloadOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return assuredworkloads.EmptyWorkloadWorkloadOptions + } + obj := objArr[0].(map[string]interface{}) + return &assuredworkloads.WorkloadWorkloadOptions{ + KajEnrollmentType: assuredworkloads.WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef(obj["kaj_enrollment_type"].(string)), + } +} + +func flattenAssuredWorkloadsWorkloadWorkloadOptions(obj *assuredworkloads.WorkloadWorkloadOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kaj_enrollment_type": obj.KajEnrollmentType, + } + + return []interface{}{transformed} + +} + func flattenAssuredWorkloadsWorkloadComplianceStatus(obj *assuredworkloads.WorkloadComplianceStatus) interface{} { if obj == nil || obj.Empty() { return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/data_source_google_bigquery_tables.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/data_source_google_bigquery_tables.go new file mode 100644 index 00000000000..ecc163985f7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/data_source_google_bigquery_tables.go @@ -0,0 +1,149 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigquery + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleBigQueryTables() *schema.Resource { + + dsSchema := map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: "The ID of the dataset containing the tables.", + }, + "project": { + Type: schema.TypeString, + Optional: true, + Description: "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + }, + "tables": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "labels": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "table_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + } + + return &schema.Resource{ + Read: DataSourceGoogleBigQueryTablesRead, + Schema: dsSchema, + } +} + +func DataSourceGoogleBigQueryTablesRead(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + datasetID := d.Get("dataset_id").(string) + + project, err := tpgresource.GetProject(d, config) + + if err != nil { + return fmt.Errorf("Error fetching project: %s", err) + } + + params := make(map[string]string) + tables := make([]map[string]interface{}, 0) + + for { + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/tables") + if err != nil { + return err + } + + url, err = transport_tpg.AddQueryParams(url, params) + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error retrieving tables: %s", err) + } + + pageTables := flattenDataSourceGoogleBigQueryTablesList(res["tables"]) + tables = append(tables, pageTables...) + + pToken, ok := res["nextPageToken"] + if ok && pToken != nil && pToken.(string) != "" { + params["pageToken"] = pToken.(string) + } else { + break + } + } + + if err := d.Set("tables", tables); err != nil { + return fmt.Errorf("Error retrieving tables: %s", err) + } + + id := fmt.Sprintf("projects/%s/datasets/%s/tables", project, datasetID) + d.SetId(id) + + return nil +} + +func flattenDataSourceGoogleBigQueryTablesList(res interface{}) []map[string]interface{} { + + if res == nil { + return make([]map[string]interface{}, 0) + } + + ls := res.([]interface{}) + + tables := make([]map[string]interface{}, 0, len(ls)) + + for _, raw := range ls { + output := raw.(map[string]interface{}) + + var mLabels map[string]interface{} + var mTableName string + + if oLabels, ok := output["labels"].(map[string]interface{}); ok { + mLabels = oLabels + } else { + mLabels = make(map[string]interface{}) // Initialize as an empty map if labels are missing + } + + if oTableReference, ok := output["tableReference"].(map[string]interface{}); ok { + if tableID, ok := oTableReference["tableId"].(string); ok { + mTableName = tableID + } + } + tables = append(tables, map[string]interface{}{ + "labels": mLabels, + "table_id": mTableName, + }) + } + + return tables +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_dataset.go index fc6fa3c7cde..f7037e4735b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_dataset.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_dataset.go @@ -191,7 +191,7 @@ func policyToAccess(policy *cloudresourcemanager.Policy) ([]map[string]interface } for _, member := range binding.Members { // Do not append any deleted members - if strings.HasPrefix(member, "deleted:") { + if strings.HasPrefix(member, "iamMember:deleted:") { continue } access := map[string]interface{}{ @@ -213,7 +213,7 @@ func policyToAccess(policy *cloudresourcemanager.Policy) ([]map[string]interface // Dataset access uses different member types to identify groups, domains, etc. // these types are used as keys in the access JSON payload func iamMemberToAccess(member string) (string, string, error) { - if strings.HasPrefix(member, "deleted:") { + if strings.HasPrefix(member, "iamMember:deleted:") { return "", "", fmt.Errorf("BigQuery Dataset IAM member is deleted: %s", member) } pieces := strings.SplitN(member, ":", 2) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_member_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_member_dataset.go new file mode 100644 index 00000000000..f97a0bb5016 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_member_dataset.go @@ -0,0 +1,330 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +/* + This file is a copy of mmv1/third_party/terraform/services/bigquery/iam_bigquery_dataset.go + with new functions mergeAccess and GetCurrentResourceAccess +*/ +package bigquery + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamMemberBigqueryDatasetSchema = map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, +} + +var bigqueryIamMemberAccessPrimitiveToRoleMap = map[string]string{ + "OWNER": "roles/bigquery.dataOwner", + "WRITER": "roles/bigquery.dataEditor", + "READER": "roles/bigquery.dataViewer", +} + +type BigqueryDatasetIamMemberUpdater struct { + project string + datasetId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewBigqueryDatasetIamMemberUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + + return &BigqueryDatasetIamMemberUpdater{ + project: project, + datasetId: d.Get("dataset_id").(string), + d: d, + Config: config, + }, nil +} + +func (u *BigqueryDatasetIamMemberUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url := fmt.Sprintf("%s%s", u.Config.BigQueryBasePath, u.GetResourceId()) + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: u.project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + policy, err := accessToPolicyForIamMember(res["access"]) + if err != nil { + return nil, err + } + return policy, nil +} + +func GetCurrentResourceAccess(u *BigqueryDatasetIamMemberUpdater) ([]interface{}, error) { + url := fmt.Sprintf("%s%s", u.Config.BigQueryBasePath, u.GetResourceId()) + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: u.project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + var access []interface{} + if accessVal, ok := res["access"].([]interface{}); ok { + access = accessVal + } else if res["access"] == nil { + access = []interface{}{} // Return an empty slice if the key is missing + } else { + return nil, fmt.Errorf("value under 'access' is not a slice of interface{}") + } + + return access, nil +} + +func mergeAccess(newAccess []map[string]interface{}, currAccess []interface{}) []map[string]interface{} { + mergedAccess := make([]map[string]interface{}, 0, len(newAccess)+len(currAccess)) + mergedAccess = append(mergedAccess, newAccess...) + + for _, item := range currAccess { + if itemMap, ok := item.(map[string]interface{}); ok { + // Check if the item has a "dataset" key + if _, ok := itemMap["dataset"]; ok { + mergedAccess = append(mergedAccess, itemMap) + } + } + } + return mergedAccess +} + +func (u *BigqueryDatasetIamMemberUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + url := fmt.Sprintf("%s%s", u.Config.BigQueryBasePath, u.GetResourceId()) + + newAccess, err := policyToAccessForIamMember(policy) + if err != nil { + return err + } + + currAccess, err := GetCurrentResourceAccess(u) + if err != nil { + return err + } + + access := mergeAccess(newAccess, currAccess) + + obj := map[string]interface{}{ + "access": access, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "PATCH", + Project: u.project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return fmt.Errorf("Error creating DatasetAccess: %s", err) + } + + return nil +} + +func accessToPolicyForIamMember(access interface{}) (*cloudresourcemanager.Policy, error) { + if access == nil { + return nil, nil + } + roleToBinding := make(map[string]*cloudresourcemanager.Binding) + + accessArr := access.([]interface{}) + for _, v := range accessArr { + memberRole := v.(map[string]interface{}) + rawRole, ok := memberRole["role"] + if !ok { + // "view" allows role to not be defined. It is a special dataset access construct, so ignore + // If a user wants to manage "view" access they should use the `bigquery_dataset_access` resource + continue + } + role := rawRole.(string) + if iamRole, ok := bigqueryIamMemberAccessPrimitiveToRoleMap[role]; ok { + // API changes certain IAM roles to legacy roles. Revert these changes + role = iamRole + } + member, err := accessToIamMemberForIamMember(memberRole) + if err != nil { + return nil, err + } + // We have to combine bindings manually + binding, ok := roleToBinding[role] + if !ok { + binding = &cloudresourcemanager.Binding{Role: role, Members: []string{}} + } + binding.Members = append(binding.Members, member) + + roleToBinding[role] = binding + } + bindings := make([]*cloudresourcemanager.Binding, 0) + for _, v := range roleToBinding { + bindings = append(bindings, v) + } + + return &cloudresourcemanager.Policy{Bindings: bindings}, nil +} + +func policyToAccessForIamMember(policy *cloudresourcemanager.Policy) ([]map[string]interface{}, error) { + res := make([]map[string]interface{}, 0) + if len(policy.AuditConfigs) != 0 { + return nil, errors.New("Access policies not allowed on BigQuery Dataset IAM policies") + } + for _, binding := range policy.Bindings { + if binding.Condition != nil { + return nil, errors.New("IAM conditions not allowed on BigQuery Dataset IAM") + } + if fullRole, ok := bigqueryIamMemberAccessPrimitiveToRoleMap[binding.Role]; ok { + return nil, fmt.Errorf("BigQuery Dataset legacy role %s is not allowed when using google_bigquery_dataset_iam resources. Please use the full form: %s", binding.Role, fullRole) + } + for _, member := range binding.Members { + // Do not append any deleted members + if strings.HasPrefix(member, "iamMember:deleted:") { + continue + } + access := map[string]interface{}{ + "role": binding.Role, + } + memberType, member, err := iamMemberToAccessForIamMember(member) + if err != nil { + return nil, err + } + access[memberType] = member + res = append(res, access) + } + } + + return res, nil +} + +// Returns the member access type and member for an IAM member. +// Dataset access uses different member types to identify groups, domains, etc. +// these types are used as keys in the access JSON payload +func iamMemberToAccessForIamMember(member string) (string, string, error) { + if strings.HasPrefix(member, "iamMember:deleted:") { + return "", "", fmt.Errorf("BigQuery Dataset IAM member is deleted: %s", member) + } + pieces := strings.SplitN(member, ":", 2) + if len(pieces) > 1 { + switch pieces[0] { + case "group": + return "groupByEmail", pieces[1], nil + case "domain": + return "domain", pieces[1], nil + case "iamMember": + return "iamMember", pieces[1], nil + case "user": + return "userByEmail", pieces[1], nil + case "serviceAccount": + return "userByEmail", pieces[1], nil + } + } + if member == "projectOwners" || member == "projectReaders" || member == "projectWriters" || member == "allAuthenticatedUsers" { + // These are special BigQuery Dataset permissions + return "specialGroup", member, nil + } + return "", "", fmt.Errorf("Failed to parse BigQuery Dataset IAM member type: %s", member) +} + +func accessToIamMemberForIamMember(access map[string]interface{}) (string, error) { + // One of the fields must be set, we have to find which IAM member type this newAccess to + if member, ok := access["groupByEmail"]; ok { + return fmt.Sprintf("group:%s", member.(string)), nil + } + if member, ok := access["domain"]; ok { + return fmt.Sprintf("domain:%s", member.(string)), nil + } + if member, ok := access["specialGroup"]; ok { + return member.(string), nil + } + if member, ok := access["iamMember"]; ok { + return fmt.Sprintf("iamMember:%s", member.(string)), nil + } + if _, ok := access["view"]; ok { + // view does not map to an IAM member, use access instead + return "", fmt.Errorf("Failed to convert BigQuery Dataset access to IAM member. To use views with a dataset, please use dataset_access") + } + if _, ok := access["dataset"]; ok { + // dataset does not map to an IAM member, use access instead + return "", fmt.Errorf("Failed to convert BigQuery Dataset access to IAM member. To use views with a dataset, please use dataset_access") + } + if _, ok := access["routine"]; ok { + // dataset does not map to an IAM member, use access instead + return "", fmt.Errorf("Failed to convert BigQuery Dataset access to IAM member. To use views with a dataset, please use dataset_access") + } + if member, ok := access["userByEmail"]; ok { + // service accounts have "gservice" in their email. This is best guess due to lost information + if strings.Contains(member.(string), "gserviceaccount") { + return fmt.Sprintf("serviceAccount:%s", member.(string)), nil + } + return fmt.Sprintf("user:%s", member.(string)), nil + } + return "", fmt.Errorf("Failed to identify IAM member from BigQuery Dataset access: %v", access) +} + +func (u *BigqueryDatasetIamMemberUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/datasets/%s", u.project, u.datasetId) +} + +// Matches the mutex of google_big_query_dataset_access +func (u *BigqueryDatasetIamMemberUpdater) GetMutexKey() string { + return fmt.Sprintf("%s", u.datasetId) +} + +func (u *BigqueryDatasetIamMemberUpdater) DescribeResource() string { + return fmt.Sprintf("Bigquery Dataset %s/%s", u.project, u.datasetId) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_table.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_table.go index ed2b37a6d87..3a693bcada0 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_table.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_table.go @@ -148,11 +148,6 @@ func (u *BigQueryTableIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager. return nil, err } var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": 1, - }, - } userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset.go index 25091dd07ea..de494667383 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset.go @@ -140,8 +140,6 @@ access to this encryption key.`, Optional: true, Description: `The default partition expiration for all partitioned tables in the dataset, in milliseconds. - - Once this property is set, all newly-created partitioned tables in the dataset will have an 'expirationMs' property in the 'timePartitioning' settings set to this value, and changing the value will only @@ -160,8 +158,6 @@ over the default partition expiration time indicated by this property.`, ValidateFunc: validateDefaultTableExpirationMs, Description: `The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one hour). - - Once this property is set, all newly-created tables in the dataset will have an 'expirationTime' property set to the creation time plus the value in this property, and changing the value will only affect @@ -232,14 +228,10 @@ Please refer to the field 'effective_labels' for all of the labels present on th DiffSuppressFunc: tpgresource.CaseDiffSuppress, Description: `The geographic location where the dataset should reside. See [official docs](https://cloud.google.com/bigquery/docs/dataset-locations). - - There are two types of locations, regional or multi-regional. A regional location is a specific geographic place, such as Tokyo, and a multi-regional location is a large geographic area, such as the United States, that contains at least two geographic places. - - The default value is multi-regional location 'US'. Changing this forces a new resource to be created.`, Default: "US", @@ -250,6 +242,16 @@ Changing this forces a new resource to be created.`, Optional: true, Description: `Defines the time travel window in hours. The value can be from 48 to 168 hours (2 to 7 days).`, }, + "resource_tags": { + Type: schema.TypeMap, + Optional: true, + Description: `The tags attached to this table. Tag keys are globally unique. Tag key is expected to be +in the namespaced format, for example "123456789012/environment" where 123456789012 is the +ID of the parent organization or project resource for this tag key. Tag value is expected +to be the short name, for example "Production". See [Tag definitions](/iam/docs/tags-access-control#definitions) +for more details.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "storage_billing_model": { Type: schema.TypeString, Computed: true, @@ -293,10 +295,10 @@ milliseconds since the epoch.`, "delete_contents_on_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `If set to 'true', delete all the tables in the dataset when destroying the resource; otherwise, destroying the resource will fail if tables are present.`, + Default: false, }, "project": { Type: schema.TypeString, @@ -416,17 +418,9 @@ is 256 characters.`, Type: schema.TypeString, Optional: true, Description: `A special group to grant access to. Possible values include: - - * 'projectOwners': Owners of the enclosing project. - - * 'projectReaders': Readers of the enclosing project. - - * 'projectWriters': Writers of the enclosing project. - - * 'allAuthenticatedUsers': All authenticated BigQuery users.`, }, "user_by_email": { @@ -556,6 +550,12 @@ func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("storage_billing_model"); !tpgresource.IsEmptyValue(reflect.ValueOf(storageBillingModelProp)) && (ok || !reflect.DeepEqual(v, storageBillingModelProp)) { obj["storageBillingModel"] = storageBillingModelProp } + resourceTagsProp, err := expandBigQueryDatasetResourceTags(d.Get("resource_tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resource_tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourceTagsProp)) && (ok || !reflect.DeepEqual(v, resourceTagsProp)) { + obj["resourceTags"] = resourceTagsProp + } labelsProp, err := expandBigQueryDatasetEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -720,6 +720,9 @@ func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("storage_billing_model", flattenBigQueryDatasetStorageBillingModel(res["storageBillingModel"], d, config)); err != nil { return fmt.Errorf("Error reading Dataset: %s", err) } + if err := d.Set("resource_tags", flattenBigQueryDatasetResourceTags(res["resourceTags"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } if err := d.Set("terraform_labels", flattenBigQueryDatasetTerraformLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Dataset: %s", err) } @@ -827,6 +830,12 @@ func resourceBigQueryDatasetUpdate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("storage_billing_model"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, storageBillingModelProp)) { obj["storageBillingModel"] = storageBillingModelProp } + resourceTagsProp, err := expandBigQueryDatasetResourceTags(d.Get("resource_tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resource_tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, resourceTagsProp)) { + obj["resourceTags"] = resourceTagsProp + } labelsProp, err := expandBigQueryDatasetEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -1268,6 +1277,10 @@ func flattenBigQueryDatasetStorageBillingModel(v interface{}, d *schema.Resource return v } +func flattenBigQueryDatasetResourceTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenBigQueryDatasetTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -1653,6 +1666,17 @@ func expandBigQueryDatasetStorageBillingModel(v interface{}, d tpgresource.Terra return v, nil } +func expandBigQueryDatasetResourceTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + func expandBigQueryDatasetEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset_access.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset_access.go index f1d0d08644c..ac53a3e5dc5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset_access.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset_access.go @@ -61,22 +61,30 @@ func resourceBigQueryDatasetAccessIamMemberDiffSuppress(k, old, new string, d *s } if memberInState := d.Get("user_by_email").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } if memberInState := d.Get("group_by_email").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } if memberInState := d.Get("domain").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } if memberInState := d.Get("special_group").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } } + if memberInState := d.Get("user_by_email").(string); memberInState != "" { + return strings.ToLower(old) == strings.ToLower(new) + } + + if memberInState := d.Get("group_by_email").(string); memberInState != "" { + return strings.ToLower(old) == strings.ToLower(new) + } + return false } @@ -296,17 +304,9 @@ is 256 characters.`, ForceNew: true, DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, Description: `A special group to grant access to. Possible values include: - - * 'projectOwners': Owners of the enclosing project. - - * 'projectReaders': Readers of the enclosing project. - - * 'projectWriters': Writers of the enclosing project. - - * 'allAuthenticatedUsers': All authenticated BigQuery users.`, ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, }, @@ -679,11 +679,19 @@ func flattenNestedBigQueryDatasetAccessRole(v interface{}, d *schema.ResourceDat } func flattenNestedBigQueryDatasetAccessUserByEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + if v == nil { + return nil + } + + return strings.ToLower(v.(string)) } func flattenNestedBigQueryDatasetAccessGroupByEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + if v == nil { + return nil + } + + return strings.ToLower(v.(string)) } func flattenNestedBigQueryDatasetAccessDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -814,11 +822,19 @@ func expandNestedBigQueryDatasetAccessRole(v interface{}, d tpgresource.Terrafor } func expandNestedBigQueryDatasetAccessUserByEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil + if v == nil { + return nil, nil + } + + return strings.ToLower(v.(string)), nil } func expandNestedBigQueryDatasetAccessGroupByEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil + if v == nil { + return nil, nil + } + + return strings.ToLower(v.(string)), nil } func expandNestedBigQueryDatasetAccessDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_job.go index 94c7c797367..75a249eba8b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_job.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_job.go @@ -2331,6 +2331,20 @@ func expandBigQueryJobConfigurationQueryDestinationTable(v interface{}, d tpgres transformed["tableId"] = parts[3] } + configError := "Invalid BigQuery job destination_table configuration. You must either:\n" + + "1. Set all of project_id, dataset_id, and table_id separately, or\n" + + "2. Provide table_id in the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}'" + + // Validate required fields + if projectId, ok := transformed["projectId"]; !ok || projectId == nil || + reflect.ValueOf(projectId).IsZero() { + return nil, fmt.Errorf("%s\nMissing or empty projectId", configError) + } + + if datasetId, ok := transformed["datasetId"]; !ok || datasetId == nil || + reflect.ValueOf(datasetId).IsZero() { + return nil, fmt.Errorf("%s\nMissing or empty datasetId", configError) + } return transformed, nil } @@ -2714,6 +2728,20 @@ func expandBigQueryJobConfigurationLoadDestinationTable(v interface{}, d tpgreso transformed["tableId"] = parts[3] } + configError := "Invalid BigQuery job destination_table configuration. You must either:\n" + + "1. Set all of project_id, dataset_id, and table_id separately, or\n" + + "2. Provide table_id in the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}'" + + // Validate required fields + if projectId, ok := transformed["projectId"]; !ok || projectId == nil || + reflect.ValueOf(projectId).IsZero() { + return nil, fmt.Errorf("%s\nMissing or empty projectId", configError) + } + + if datasetId, ok := transformed["datasetId"]; !ok || datasetId == nil || + reflect.ValueOf(datasetId).IsZero() { + return nil, fmt.Errorf("%s\nMissing or empty datasetId", configError) + } return transformed, nil } @@ -3008,6 +3036,20 @@ func expandBigQueryJobConfigurationCopyDestinationTable(v interface{}, d tpgreso transformed["tableId"] = parts[3] } + configError := "Invalid BigQuery job destination_table configuration. You must either:\n" + + "1. Set all of project_id, dataset_id, and table_id separately, or\n" + + "2. Provide table_id in the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}'" + + // Validate required fields + if projectId, ok := transformed["projectId"]; !ok || projectId == nil || + reflect.ValueOf(projectId).IsZero() { + return nil, fmt.Errorf("%s\nMissing or empty projectId", configError) + } + + if datasetId, ok := transformed["datasetId"]; !ok || datasetId == nil || + reflect.ValueOf(datasetId).IsZero() { + return nil, fmt.Errorf("%s\nMissing or empty datasetId", configError) + } return transformed, nil } @@ -3175,6 +3217,20 @@ func expandBigQueryJobConfigurationExtractSourceTable(v interface{}, d tpgresour transformed["tableId"] = parts[3] } + configError := "Invalid BigQuery job destination_table configuration. You must either:\n" + + "1. Set all of project_id, dataset_id, and table_id separately, or\n" + + "2. Provide table_id in the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}'" + + // Validate required fields + if projectId, ok := transformed["projectId"]; !ok || projectId == nil || + reflect.ValueOf(projectId).IsZero() { + return nil, fmt.Errorf("%s\nMissing or empty projectId", configError) + } + + if datasetId, ok := transformed["datasetId"]; !ok || datasetId == nil || + reflect.ValueOf(datasetId).IsZero() { + return nil, fmt.Errorf("%s\nMissing or empty datasetId", configError) + } return transformed, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_table.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_table.go index f046c4966ef..ae9300ba1a1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_table.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_table.go @@ -517,7 +517,7 @@ func ResourceBigQueryTable() *schema.Resource { Optional: true, Description: `Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation (https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration) for supported formats. To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly".`, ValidateFunc: validation.StringInSlice([]string{ - "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "ICEBERG", "DATASTORE_BACKUP", "PARQUET", "ORC", "BIGTABLE", + "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "ICEBERG", "DATASTORE_BACKUP", "PARQUET", "ORC", "BIGTABLE", "DELTA_LAKE", }, false), }, // SourceURIs [Required] The fully-qualified URIs that point to your data in Google Cloud. @@ -892,6 +892,53 @@ func ResourceBigQueryTable() *schema.Resource { }, }, + // BiglakeConfiguration [Optional] Specifies the configuration of a BigLake managed table. + "biglake_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Description: "Specifies the configuration of a BigLake managed table.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // ConnectionId: [Required] The connection specifying the credentials to be used to read + // and write to external storage, such as Cloud Storage. The connection_id can have the + // form "<project\_id>.<location\_id>.<connection\_id>" or + // "projects/<project\_id>/locations/<location\_id>/connections/<connection\_id>". + "connection_id": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: bigQueryTableConnectionIdSuppress, + ForceNew: true, + Description: `The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project\_id>.<location\_id>.<connection\_id>" or "projects/<project\_id>/locations/<location\_id>/connections/<connection\_id>".`, + }, + // StorageUri: [Required] The fully qualified location prefix of the external folder where + // table data is stored. The '*' wildcard character is not allowed. + // The URI should be in the format "gs://bucket/path_to_table/" + "storage_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"`, + }, + // FileFormat: [Required] The file format the data is stored in. + "file_format": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The file format the data is stored in.", + }, + // TableFormat: [Required] + "table_format": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The table format the metadata only snapshots are stored in.", + }, + }, + }, + }, + // FriendlyName: [Optional] A descriptive name for this table. "friendly_name": { Type: schema.TypeString, @@ -1401,6 +1448,12 @@ func ResourceBigQueryTable() *schema.Resource { }, }, }, + "resource_tags": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".`, + }, }, UseJSONNumber: true, } @@ -1447,6 +1500,14 @@ func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, e table.ExternalDataConfiguration = externalDataConfiguration } + if v, ok := d.GetOk("biglake_configuration"); ok { + biglakeConfiguration, err := expandBigLakeConfiguration(v) + if err != nil { + return nil, err + } + table.BiglakeConfiguration = biglakeConfiguration + } + if v, ok := d.GetOk("friendly_name"); ok { table.FriendlyName = v.(string) } @@ -1515,6 +1576,8 @@ func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, e table.TableConstraints = tableConstraints } + table.ResourceTags = tpgresource.ExpandStringMap(d, "resource_tags") + return table, nil } @@ -1578,6 +1641,9 @@ func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error } if table.View != nil && table.Schema != nil { + if schemaHasRequiredFields(table.Schema) { + return errors.New("Schema cannot contain required fields when creating a view") + } log.Printf("[INFO] Removing schema from table definition because BigQuery does not support setting schema on view creation") schemaBack := table.Schema @@ -1732,6 +1798,17 @@ func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { } } + if res.BiglakeConfiguration != nil { + bigLakeConfiguration, err := flattenBigLakeConfiguration(res.BiglakeConfiguration) + if err != nil { + return err + } + + if err := d.Set("biglake_configuration", bigLakeConfiguration); err != nil { + return fmt.Errorf("Error setting biglake_configuration: %s", err) + } + } + if res.TimePartitioning != nil { if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning, use_old_rpf)); err != nil { return err @@ -1788,6 +1865,10 @@ func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { } } + if err := d.Set("resource_tags", res.ResourceTags); err != nil { + return fmt.Errorf("Error setting resource tags: %s", err) + } + // TODO: Update when the Get API fields for TableReplicationInfo are available in the client library. url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}") if err != nil { @@ -1835,6 +1916,11 @@ func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error return err } + if table.ExternalDataConfiguration != nil && table.ExternalDataConfiguration.Schema != nil { + log.Printf("[INFO] Removing ExternalDataConfiguration.Schema when updating BigQuery table %s", d.Id()) + table.ExternalDataConfiguration.Schema = nil + } + log.Printf("[INFO] Updating BigQuery table: %s", d.Id()) project, err := tpgresource.GetProject(d, config) @@ -1909,6 +1995,7 @@ func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error if d.Get("deletion_protection").(bool) { return fmt.Errorf("cannot destroy table %v without setting deletion_protection=false and running `terraform apply`", d.Id()) } + config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -2520,6 +2607,15 @@ func setEmptyPolicyTagsInSchema(field *bigquery.TableFieldSchema) { } } +func schemaHasRequiredFields(schema *bigquery.TableSchema) bool { + for _, field := range schema.Fields { + if "REQUIRED" == field.Mode { + return true + } + } + return false +} + func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning { raw := configured.([]interface{})[0].(map[string]interface{}) tp := &bigquery.TimePartitioning{Type: raw["type"].(string)} @@ -2676,6 +2772,41 @@ func flattenMaterializedView(mvd *bigquery.MaterializedViewDefinition) []map[str return []map[string]interface{}{result} } +func flattenBigLakeConfiguration(blc *bigquery.BigLakeConfiguration) ([]map[string]interface{}, error) { + result := map[string]interface{}{} + + result["connection_id"] = blc.ConnectionId + result["storage_uri"] = blc.StorageUri + result["file_format"] = blc.FileFormat + result["table_format"] = blc.TableFormat + + return []map[string]interface{}{result}, nil +} + +func expandBigLakeConfiguration(cfg interface{}) (*bigquery.BigLakeConfiguration, error) { + raw := cfg.([]interface{})[0].(map[string]interface{}) + + blc := &bigquery.BigLakeConfiguration{} + + if v, ok := raw["connection_id"]; ok { + blc.ConnectionId = v.(string) + } + + if v, ok := raw["storage_uri"]; ok { + blc.StorageUri = v.(string) + } + + if v, ok := raw["file_format"]; ok { + blc.FileFormat = v.(string) + } + + if v, ok := raw["table_format"]; ok { + blc.TableFormat = v.(string) + } + + return blc, nil +} + func expandPrimaryKey(configured interface{}) *bigquery.TableConstraintsPrimaryKey { if len(configured.([]interface{})) == 0 { return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_data_exchange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_data_exchange.go index c7403b2a37e..bc442594129 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_data_exchange.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_data_exchange.go @@ -91,6 +91,41 @@ func ResourceBigqueryAnalyticsHubDataExchange() *schema.Resource { Optional: true, Description: `Email or URL of the primary point of contact of the data exchange.`, }, + "sharing_environment_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Configurable data sharing environment option for a data exchange. +This field is required for data clean room exchanges.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dcr_exchange_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Data Clean Room (DCR), used for privacy-safe and secured data sharing.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{"sharing_environment_config.0.default_exchange_config", "sharing_environment_config.0.dcr_exchange_config"}, + }, + "default_exchange_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Default Analytics Hub data exchange, used for secured data sharing.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{"sharing_environment_config.0.default_exchange_config", "sharing_environment_config.0.dcr_exchange_config"}, + }, + }, + }, + }, "listing_count": { Type: schema.TypeInt, Computed: true, @@ -151,6 +186,12 @@ func resourceBigqueryAnalyticsHubDataExchangeCreate(d *schema.ResourceData, meta } else if v, ok := d.GetOkExists("icon"); !tpgresource.IsEmptyValue(reflect.ValueOf(iconProp)) && (ok || !reflect.DeepEqual(v, iconProp)) { obj["icon"] = iconProp } + sharingEnvironmentConfigProp, err := expandBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfig(d.Get("sharing_environment_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sharing_environment_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(sharingEnvironmentConfigProp)) && (ok || !reflect.DeepEqual(v, sharingEnvironmentConfigProp)) { + obj["sharingEnvironmentConfig"] = sharingEnvironmentConfigProp + } url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges?data_exchange_id={{data_exchange_id}}") if err != nil { @@ -264,6 +305,9 @@ func resourceBigqueryAnalyticsHubDataExchangeRead(d *schema.ResourceData, meta i if err := d.Set("icon", flattenBigqueryAnalyticsHubDataExchangeIcon(res["icon"], d, config)); err != nil { return fmt.Errorf("Error reading DataExchange: %s", err) } + if err := d.Set("sharing_environment_config", flattenBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfig(res["sharingEnvironmentConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading DataExchange: %s", err) + } return nil } @@ -489,6 +533,37 @@ func flattenBigqueryAnalyticsHubDataExchangeIcon(v interface{}, d *schema.Resour return v } +func flattenBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["default_exchange_config"] = + flattenBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfigDefaultExchangeConfig(original["defaultExchangeConfig"], d, config) + transformed["dcr_exchange_config"] = + flattenBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfigDcrExchangeConfig(original["dcrExchangeConfig"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfigDefaultExchangeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfigDcrExchangeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + func expandBigqueryAnalyticsHubDataExchangeDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -508,3 +583,59 @@ func expandBigqueryAnalyticsHubDataExchangeDocumentation(v interface{}, d tpgres func expandBigqueryAnalyticsHubDataExchangeIcon(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDefaultExchangeConfig, err := expandBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfigDefaultExchangeConfig(original["default_exchange_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["defaultExchangeConfig"] = transformedDefaultExchangeConfig + } + + transformedDcrExchangeConfig, err := expandBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfigDcrExchangeConfig(original["dcr_exchange_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["dcrExchangeConfig"] = transformedDcrExchangeConfig + } + + return transformed, nil +} + +func expandBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfigDefaultExchangeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandBigqueryAnalyticsHubDataExchangeSharingEnvironmentConfigDcrExchangeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_listing.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_listing.go index 1b2584bbc25..7a72f36de4d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_listing.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_listing.go @@ -57,6 +57,7 @@ func ResourceBigqueryAnalyticsHubListing() *schema.Resource { "bigquery_dataset": { Type: schema.TypeList, Required: true, + ForceNew: true, Description: `Shared dataset i.e. BigQuery dataset source.`, MaxItems: 1, Elem: &schema.Resource{ @@ -64,9 +65,27 @@ func ResourceBigqueryAnalyticsHubListing() *schema.Resource { "dataset": { Type: schema.TypeString, Required: true, + ForceNew: true, DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, Description: `Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123`, }, + "selected_resources": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Resource in this dataset that is selectively shared. This field is required for data clean room exchanges.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table"`, + }, + }, + }, + }, }, }, }, @@ -183,6 +202,11 @@ func ResourceBigqueryAnalyticsHubListing() *schema.Resource { Optional: true, Description: `If true, restrict export of query result derived from restricted linked dataset table.`, }, + "restrict_direct_table_access": { + Type: schema.TypeBool, + Computed: true, + Description: `If true, restrict direct table access(read api/tabledata.list) on linked table.`, + }, }, }, }, @@ -478,12 +502,6 @@ func resourceBigqueryAnalyticsHubListingUpdate(d *schema.ResourceData, meta inte } else if v, ok := d.GetOkExists("categories"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, categoriesProp)) { obj["categories"] = categoriesProp } - bigqueryDatasetProp, err := expandBigqueryAnalyticsHubListingBigqueryDataset(d.Get("bigquery_dataset"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bigquery_dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bigqueryDatasetProp)) { - obj["bigqueryDataset"] = bigqueryDatasetProp - } restrictedExportConfigProp, err := expandBigqueryAnalyticsHubListingRestrictedExportConfig(d.Get("restricted_export_config"), d, config) if err != nil { return err @@ -536,10 +554,6 @@ func resourceBigqueryAnalyticsHubListingUpdate(d *schema.ResourceData, meta inte updateMask = append(updateMask, "categories") } - if d.HasChange("bigquery_dataset") { - updateMask = append(updateMask, "bigqueryDataset") - } - if d.HasChange("restricted_export_config") { updateMask = append(updateMask, "restrictedExportConfig") } @@ -736,12 +750,36 @@ func flattenBigqueryAnalyticsHubListingBigqueryDataset(v interface{}, d *schema. transformed := make(map[string]interface{}) transformed["dataset"] = flattenBigqueryAnalyticsHubListingBigqueryDatasetDataset(original["dataset"], d, config) + transformed["selected_resources"] = + flattenBigqueryAnalyticsHubListingBigqueryDatasetSelectedResources(original["selectedResources"], d, config) return []interface{}{transformed} } func flattenBigqueryAnalyticsHubListingBigqueryDatasetDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } +func flattenBigqueryAnalyticsHubListingBigqueryDatasetSelectedResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenBigqueryAnalyticsHubListingBigqueryDatasetSelectedResourcesTable(original["table"], d, config), + }) + } + return transformed +} +func flattenBigqueryAnalyticsHubListingBigqueryDatasetSelectedResourcesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenBigqueryAnalyticsHubListingRestrictedExportConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -753,6 +791,8 @@ func flattenBigqueryAnalyticsHubListingRestrictedExportConfig(v interface{}, d * transformed := make(map[string]interface{}) transformed["enabled"] = flattenBigqueryAnalyticsHubListingRestrictedExportConfigEnabled(original["enabled"], d, config) + transformed["restrict_direct_table_access"] = + flattenBigqueryAnalyticsHubListingRestrictedExportConfigRestrictDirectTableAccess(original["restrictDirectTableAccess"], d, config) transformed["restrict_query_result"] = flattenBigqueryAnalyticsHubListingRestrictedExportConfigRestrictQueryResult(original["restrictQueryResult"], d, config) return []interface{}{transformed} @@ -761,6 +801,10 @@ func flattenBigqueryAnalyticsHubListingRestrictedExportConfigEnabled(v interface return v } +func flattenBigqueryAnalyticsHubListingRestrictedExportConfigRestrictDirectTableAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenBigqueryAnalyticsHubListingRestrictedExportConfigRestrictQueryResult(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -877,6 +921,13 @@ func expandBigqueryAnalyticsHubListingBigqueryDataset(v interface{}, d tpgresour transformed["dataset"] = transformedDataset } + transformedSelectedResources, err := expandBigqueryAnalyticsHubListingBigqueryDatasetSelectedResources(original["selected_resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSelectedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["selectedResources"] = transformedSelectedResources + } + return transformed, nil } @@ -884,6 +935,32 @@ func expandBigqueryAnalyticsHubListingBigqueryDatasetDataset(v interface{}, d tp return v, nil } +func expandBigqueryAnalyticsHubListingBigqueryDatasetSelectedResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandBigqueryAnalyticsHubListingBigqueryDatasetSelectedResourcesTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["table"] = transformedTable + } + + req = append(req, transformed) + } + return req, nil +} + +func expandBigqueryAnalyticsHubListingBigqueryDatasetSelectedResourcesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandBigqueryAnalyticsHubListingRestrictedExportConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -900,6 +977,13 @@ func expandBigqueryAnalyticsHubListingRestrictedExportConfig(v interface{}, d tp transformed["enabled"] = transformedEnabled } + transformedRestrictDirectTableAccess, err := expandBigqueryAnalyticsHubListingRestrictedExportConfigRestrictDirectTableAccess(original["restrict_direct_table_access"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRestrictDirectTableAccess); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["restrictDirectTableAccess"] = transformedRestrictDirectTableAccess + } + transformedRestrictQueryResult, err := expandBigqueryAnalyticsHubListingRestrictedExportConfigRestrictQueryResult(original["restrict_query_result"], d, config) if err != nil { return nil, err @@ -914,6 +998,10 @@ func expandBigqueryAnalyticsHubListingRestrictedExportConfigEnabled(v interface{ return v, nil } +func expandBigqueryAnalyticsHubListingRestrictedExportConfigRestrictDirectTableAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandBigqueryAnalyticsHubListingRestrictedExportConfigRestrictQueryResult(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/resource_bigquery_datapolicy_data_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/resource_bigquery_datapolicy_data_policy.go index 788cc466033..ece12672277 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/resource_bigquery_datapolicy_data_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/resource_bigquery_datapolicy_data_policy.go @@ -22,6 +22,7 @@ import ( "log" "net/http" "reflect" + "regexp" "strings" "time" @@ -430,7 +431,21 @@ func flattenBigqueryDatapolicyDataPolicyDataPolicyId(v interface{}, d *schema.Re } func flattenBigqueryDatapolicyDataPolicyPolicyTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + if v == nil { + return nil + } + + if _, ok := v.(string); !ok { + return v + } + + re := regexp.MustCompile(`(projects/.*)(/locations/.*/)(policyTags/.*)`) + result := re.ReplaceAllStringFunc(v.(string), func(match string) string { + matches := re.FindStringSubmatch(match) + return matches[1] + strings.ToLower(matches[2]) + matches[3] + }) + + return result } func flattenBigqueryDatapolicyDataPolicyDataPolicyType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer/resource_bigquery_data_transfer_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer/resource_bigquery_data_transfer_config.go index dbb72787078..4f177a5ce38 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer/resource_bigquery_data_transfer_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer/resource_bigquery_data_transfer_config.go @@ -19,6 +19,7 @@ package bigquerydatatransfer import ( "context" + "encoding/json" "fmt" "log" "net/http" @@ -175,6 +176,21 @@ email address of the user who owns this transfer config.`, }, }, }, + "encryption_configuration": { + Type: schema.TypeList, + Optional: true, + Description: `Represents the encryption configuration for a transfer.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the KMS key used for encrypting BigQuery data.`, + }, + }, + }, + }, "location": { Type: schema.TypeString, Optional: true, @@ -347,6 +363,12 @@ func resourceBigqueryDataTransferConfigCreate(d *schema.ResourceData, meta inter } else if v, ok := d.GetOkExists("data_refresh_window_days"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataRefreshWindowDaysProp)) && (ok || !reflect.DeepEqual(v, dataRefreshWindowDaysProp)) { obj["dataRefreshWindowDays"] = dataRefreshWindowDaysProp } + encryptionConfigurationProp, err := expandBigqueryDataTransferConfigEncryptionConfiguration(d.Get("encryption_configuration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_configuration"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionConfigurationProp)) && (ok || !reflect.DeepEqual(v, encryptionConfigurationProp)) { + obj["encryptionConfiguration"] = encryptionConfigurationProp + } disabledProp, err := expandBigqueryDataTransferConfigDisabled(d.Get("disabled"), d, config) if err != nil { return err @@ -515,6 +537,9 @@ func resourceBigqueryDataTransferConfigRead(d *schema.ResourceData, meta interfa if err := d.Set("data_refresh_window_days", flattenBigqueryDataTransferConfigDataRefreshWindowDays(res["dataRefreshWindowDays"], d, config)); err != nil { return fmt.Errorf("Error reading Config: %s", err) } + if err := d.Set("encryption_configuration", flattenBigqueryDataTransferConfigEncryptionConfiguration(res["encryptionConfiguration"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } if err := d.Set("disabled", flattenBigqueryDataTransferConfigDisabled(res["disabled"], d, config)); err != nil { return fmt.Errorf("Error reading Config: %s", err) } @@ -583,6 +608,12 @@ func resourceBigqueryDataTransferConfigUpdate(d *schema.ResourceData, meta inter } else if v, ok := d.GetOkExists("data_refresh_window_days"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataRefreshWindowDaysProp)) { obj["dataRefreshWindowDays"] = dataRefreshWindowDaysProp } + encryptionConfigurationProp, err := expandBigqueryDataTransferConfigEncryptionConfiguration(d.Get("encryption_configuration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_configuration"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptionConfigurationProp)) { + obj["encryptionConfiguration"] = encryptionConfigurationProp + } disabledProp, err := expandBigqueryDataTransferConfigDisabled(d.Get("disabled"), d, config) if err != nil { return err @@ -834,6 +865,23 @@ func flattenBigqueryDataTransferConfigDataRefreshWindowDays(v interface{}, d *sc return v // let terraform core handle it otherwise } +func flattenBigqueryDataTransferConfigEncryptionConfiguration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenBigqueryDataTransferConfigEncryptionConfigurationKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryDataTransferConfigEncryptionConfigurationKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenBigqueryDataTransferConfigDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -944,6 +992,29 @@ func expandBigqueryDataTransferConfigDataRefreshWindowDays(v interface{}, d tpgr return v, nil } +func expandBigqueryDataTransferConfigEncryptionConfiguration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandBigqueryDataTransferConfigEncryptionConfigurationKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandBigqueryDataTransferConfigEncryptionConfigurationKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandBigqueryDataTransferConfigDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -965,8 +1036,24 @@ func resourceBigqueryDataTransferConfigEncoder(d *schema.ResourceData, meta inte paramMap = make(map[string]string) } - var params map[string]string - params = paramMap.(map[string]string) + params := map[string]interface{}{} + + for k, v := range paramMap.(map[string]string) { + var value interface{} + if err := json.Unmarshal([]byte(v), &value); err != nil { + // If the value is a string, don't convert it to anything. + params[k] = v + } else { + switch value.(type) { + case float64: + // If the value is a number, keep the string representation. + params[k] = v + default: + // If the value is another JSON type, keep the unmarshalled type as is. + params[k] = value + } + } + } for _, sp := range sensitiveParams { if auth, _ := d.GetOkExists("sensitive_params.0." + sp); auth != "" { @@ -991,6 +1078,19 @@ func resourceBigqueryDataTransferConfigDecoder(d *schema.ResourceData, meta inte } } } + for k, v := range params { + switch v.(type) { + case []interface{}, map[string]interface{}: + value, err := json.Marshal(v) + if err != nil { + return nil, err + } + params[k] = string(value) + default: + params[k] = v + } + } + res["params"] = params } return res, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation.go index 4aef47f680a..d46f8adb0ed 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation.go @@ -115,12 +115,6 @@ capacity specified above at most.`, Examples: US, EU, asia-northeast1. The default value is US.`, Default: "US", }, - "multi_region_auxiliary": { - Type: schema.TypeBool, - Optional: true, - Description: `Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). -If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region.`, - }, "project": { Type: schema.TypeString, Optional: true, @@ -158,12 +152,6 @@ func resourceBigqueryReservationReservationCreate(d *schema.ResourceData, meta i } else if v, ok := d.GetOkExists("concurrency"); !tpgresource.IsEmptyValue(reflect.ValueOf(concurrencyProp)) && (ok || !reflect.DeepEqual(v, concurrencyProp)) { obj["concurrency"] = concurrencyProp } - multiRegionAuxiliaryProp, err := expandBigqueryReservationReservationMultiRegionAuxiliary(d.Get("multi_region_auxiliary"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("multi_region_auxiliary"); !tpgresource.IsEmptyValue(reflect.ValueOf(multiRegionAuxiliaryProp)) && (ok || !reflect.DeepEqual(v, multiRegionAuxiliaryProp)) { - obj["multiRegionAuxiliary"] = multiRegionAuxiliaryProp - } editionProp, err := expandBigqueryReservationReservationEdition(d.Get("edition"), d, config) if err != nil { return err @@ -274,9 +262,6 @@ func resourceBigqueryReservationReservationRead(d *schema.ResourceData, meta int if err := d.Set("concurrency", flattenBigqueryReservationReservationConcurrency(res["concurrency"], d, config)); err != nil { return fmt.Errorf("Error reading Reservation: %s", err) } - if err := d.Set("multi_region_auxiliary", flattenBigqueryReservationReservationMultiRegionAuxiliary(res["multiRegionAuxiliary"], d, config)); err != nil { - return fmt.Errorf("Error reading Reservation: %s", err) - } if err := d.Set("edition", flattenBigqueryReservationReservationEdition(res["edition"], d, config)); err != nil { return fmt.Errorf("Error reading Reservation: %s", err) } @@ -321,12 +306,6 @@ func resourceBigqueryReservationReservationUpdate(d *schema.ResourceData, meta i } else if v, ok := d.GetOkExists("concurrency"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, concurrencyProp)) { obj["concurrency"] = concurrencyProp } - multiRegionAuxiliaryProp, err := expandBigqueryReservationReservationMultiRegionAuxiliary(d.Get("multi_region_auxiliary"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("multi_region_auxiliary"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, multiRegionAuxiliaryProp)) { - obj["multiRegionAuxiliary"] = multiRegionAuxiliaryProp - } autoscaleProp, err := expandBigqueryReservationReservationAutoscale(d.Get("autoscale"), d, config) if err != nil { return err @@ -355,10 +334,6 @@ func resourceBigqueryReservationReservationUpdate(d *schema.ResourceData, meta i updateMask = append(updateMask, "concurrency") } - if d.HasChange("multi_region_auxiliary") { - updateMask = append(updateMask, "multiRegionAuxiliary") - } - if d.HasChange("autoscale") { updateMask = append(updateMask, "autoscale") } @@ -504,10 +479,6 @@ func flattenBigqueryReservationReservationConcurrency(v interface{}, d *schema.R return v // let terraform core handle it otherwise } -func flattenBigqueryReservationReservationMultiRegionAuxiliary(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - func flattenBigqueryReservationReservationEdition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -573,10 +544,6 @@ func expandBigqueryReservationReservationConcurrency(v interface{}, d tpgresourc return v, nil } -func expandBigqueryReservationReservationMultiRegionAuxiliary(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - func expandBigqueryReservationReservationEdition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/iam_bigtable_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/iam_bigtable_instance.go index 4d62d152069..74b3a38a844 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/iam_bigtable_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/iam_bigtable_instance.go @@ -73,8 +73,11 @@ func BigtableInstanceIdParseFunc(d *schema.ResourceData, config *transport_tpg.C } func (u *BigtableInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - req := &bigtableadmin.GetIamPolicyRequest{} - + req := &bigtableadmin.GetIamPolicyRequest{ + Options: &bigtableadmin.GetPolicyOptions{ + RequestedPolicyVersion: tpgiamresource.IamPolicyVersion, + }, + } userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return nil, err diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_app_profile.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_app_profile.go index 6cf3dfd41e8..03b6573487d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_app_profile.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_app_profile.go @@ -62,6 +62,23 @@ func ResourceBigtableAppProfile() *schema.Resource { ForceNew: true, Description: `The unique name of the app profile in the form '[_a-zA-Z0-9][-_.a-zA-Z0-9]*'.`, }, + "data_boost_isolation_read_only": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies that this app profile is intended for read-only usage via the Data Boost feature.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "compute_billing_owner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"HOST_PAYS"}), + Description: `The Compute Billing Owner for this Data Boost App Profile. Possible values: ["HOST_PAYS"]`, + }, + }, + }, + ConflictsWith: []string{"standard_isolation"}, + }, "description": { Type: schema.TypeString, Optional: true, @@ -126,6 +143,7 @@ It is unsafe to send these requests to the same table/row/column in multiple clu }, }, }, + ConflictsWith: []string{"data_boost_isolation_read_only"}, }, "name": { Type: schema.TypeString, @@ -184,6 +202,12 @@ func resourceBigtableAppProfileCreate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("standard_isolation"); !tpgresource.IsEmptyValue(reflect.ValueOf(standardIsolationProp)) && (ok || !reflect.DeepEqual(v, standardIsolationProp)) { obj["standardIsolation"] = standardIsolationProp } + dataBoostIsolationReadOnlyProp, err := expandBigtableAppProfileDataBoostIsolationReadOnly(d.Get("data_boost_isolation_read_only"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_boost_isolation_read_only"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataBoostIsolationReadOnlyProp)) && (ok || !reflect.DeepEqual(v, dataBoostIsolationReadOnlyProp)) { + obj["dataBoostIsolationReadOnly"] = dataBoostIsolationReadOnlyProp + } obj, err = resourceBigtableAppProfileEncoder(d, meta, obj) if err != nil { @@ -296,6 +320,9 @@ func resourceBigtableAppProfileRead(d *schema.ResourceData, meta interface{}) er if err := d.Set("standard_isolation", flattenBigtableAppProfileStandardIsolation(res["standardIsolation"], d, config)); err != nil { return fmt.Errorf("Error reading AppProfile: %s", err) } + if err := d.Set("data_boost_isolation_read_only", flattenBigtableAppProfileDataBoostIsolationReadOnly(res["dataBoostIsolationReadOnly"], d, config)); err != nil { + return fmt.Errorf("Error reading AppProfile: %s", err) + } return nil } @@ -340,6 +367,12 @@ func resourceBigtableAppProfileUpdate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("standard_isolation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, standardIsolationProp)) { obj["standardIsolation"] = standardIsolationProp } + dataBoostIsolationReadOnlyProp, err := expandBigtableAppProfileDataBoostIsolationReadOnly(d.Get("data_boost_isolation_read_only"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_boost_isolation_read_only"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataBoostIsolationReadOnlyProp)) { + obj["dataBoostIsolationReadOnly"] = dataBoostIsolationReadOnlyProp + } obj, err = resourceBigtableAppProfileEncoder(d, meta, obj) if err != nil { @@ -370,6 +403,10 @@ func resourceBigtableAppProfileUpdate(d *schema.ResourceData, meta interface{}) if d.HasChange("standard_isolation") { updateMask = append(updateMask, "standardIsolation") } + + if d.HasChange("data_boost_isolation_read_only") { + updateMask = append(updateMask, "dataBoostIsolationReadOnly") + } // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -396,6 +433,16 @@ func resourceBigtableAppProfileUpdate(d *schema.ResourceData, meta interface{}) } } } + + _, hasStandardIsolation := obj["standardIsolation"] + _, hasDataBoostIsolationReadOnly := obj["dataBoostIsolationReadOnly"] + if hasStandardIsolation && hasDataBoostIsolationReadOnly { + // Due to the "conflicts" both fields should be present only if neither was + // previously specified and the user is now manually adding dataBoostIsolationReadOnly. + delete(obj, "standardIsolation") + updateMask = append(updateMask, "dataBoostIsolationReadOnly") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -566,6 +613,23 @@ func flattenBigtableAppProfileStandardIsolationPriority(v interface{}, d *schema return v } +func flattenBigtableAppProfileDataBoostIsolationReadOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["compute_billing_owner"] = + flattenBigtableAppProfileDataBoostIsolationReadOnlyComputeBillingOwner(original["computeBillingOwner"], d, config) + return []interface{}{transformed} +} +func flattenBigtableAppProfileDataBoostIsolationReadOnlyComputeBillingOwner(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func expandBigtableAppProfileDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -643,6 +707,29 @@ func expandBigtableAppProfileStandardIsolationPriority(v interface{}, d tpgresou return v, nil } +func expandBigtableAppProfileDataBoostIsolationReadOnly(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedComputeBillingOwner, err := expandBigtableAppProfileDataBoostIsolationReadOnlyComputeBillingOwner(original["compute_billing_owner"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedComputeBillingOwner); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["computeBillingOwner"] = transformedComputeBillingOwner + } + + return transformed, nil +} + +func expandBigtableAppProfileDataBoostIsolationReadOnlyComputeBillingOwner(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func resourceBigtableAppProfileEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { // Instance is a URL parameter only, so replace self-link/path with resource name only. if err := d.Set("instance", tpgresource.GetResourceNameFromSelfLink(d.Get("instance").(string))); err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance.go index 4f6311d3456..2d007ddfb38 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance.go @@ -184,7 +184,7 @@ func ResourceBigtableInstance() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: true, - Description: ` When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the instance will fail. When the field is set to false, deleting the instance is allowed.`, + Description: `When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the instance will fail. When the field is set to false, deleting the instance is allowed.`, }, "labels": { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_table.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_table.go index fd5a5d94069..18501c541a1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_table.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_table.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "log" + "strings" "time" "cloud.google.com/go/bigtable" @@ -18,6 +19,24 @@ import ( "github.com/hashicorp/terraform-provider-google/google/verify" ) +func familyHash(v interface{}) int { + m := v.(map[string]interface{}) + cf := m["family"].(string) + t, err := getType(m["type"]) + if err != nil { + panic(err) + } + if t == nil { + // no specified type. + return tpgresource.Hashcode(cf) + } + b, err := bigtable.MarshalJSON(t) + if err != nil { + panic(err) + } + return tpgresource.Hashcode(cf + string(b)) +} + func ResourceBigtableTable() *schema.Resource { return &schema.Resource{ Create: resourceBigtableTableCreate, @@ -61,8 +80,15 @@ func ResourceBigtableTable() *schema.Resource { Required: true, Description: `The name of the column family.`, }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: `The type of the column family.`, + DiffSuppressFunc: typeDiffFunc, + }, }, }, + Set: familyHash, }, "instance_name": { @@ -135,6 +161,18 @@ func ResourceBigtableTable() *schema.Resource { } } +func typeDiffFunc(k, oldValue, newValue string, d *schema.ResourceData) bool { + old, err := getType(oldValue) + if err != nil { + panic(fmt.Sprintf("old error: %v", err)) + } + new, err := getType(newValue) + if err != nil { + panic(fmt.Sprintf("new error: %v", err)) + } + return bigtable.Equal(old, new) +} + func resourceBigtableTableCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) @@ -214,7 +252,7 @@ func resourceBigtableTableCreate(d *schema.ResourceData, meta interface{}) error } // Set the column families if given. - columnFamilies := make(map[string]bigtable.GCPolicy) + columnFamilies := make(map[string]bigtable.Family) if d.Get("column_family.#").(int) > 0 { columns := d.Get("column_family").(*schema.Set).List() @@ -222,12 +260,19 @@ func resourceBigtableTableCreate(d *schema.ResourceData, meta interface{}) error column := co.(map[string]interface{}) if v, ok := column["family"]; ok { - // By default, there is no GC rules. - columnFamilies[v.(string)] = bigtable.NoGcPolicy() + valueType, err := getType(column["type"]) + if err != nil { + return err + } + columnFamilies[v.(string)] = bigtable.Family{ + // By default, there is no GC rules. + GCPolicy: bigtable.NoGcPolicy(), + ValueType: valueType, + } } } } - tblConf.Families = columnFamilies + tblConf.ColumnFamilies = columnFamilies // This method may return before the table's creation is complete - we may need to wait until // it exists in the future. @@ -283,7 +328,11 @@ func resourceBigtableTableRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("project", project); err != nil { return fmt.Errorf("Error setting project: %s", err) } - if err := d.Set("column_family", FlattenColumnFamily(table.Families)); err != nil { + families, err := FlattenColumnFamily(table.FamilyInfos) + if err != nil { + return fmt.Errorf("Error flatenning column families: %v", err) + } + if err := d.Set("column_family", families); err != nil { return fmt.Errorf("Error setting column_family: %s", err) } @@ -330,6 +379,48 @@ func resourceBigtableTableRead(d *schema.ResourceData, meta interface{}) error { return nil } +func toFamilyMap(set *schema.Set) (map[string]bigtable.Family, error) { + result := map[string]bigtable.Family{} + for _, item := range set.List() { + column := item.(map[string]interface{}) + + if v, ok := column["family"]; ok && v != "" { + valueType, err := getType(column["type"]) + if err != nil { + return nil, err + } + result[v.(string)] = bigtable.Family{ + ValueType: valueType, + } + } + } + return result, nil +} + +// familyMapDiffKeys returns a new map that is the result of a-b, comparing keys +func familyMapDiffKeys(a, b map[string]bigtable.Family) map[string]bigtable.Family { + result := map[string]bigtable.Family{} + for k, v := range a { + if _, ok := b[k]; !ok { + result[k] = v + } + } + return result +} + +// familyMapDiffValueTypes returns a new map that is the result of a-b, where a and b share keys but have different value types +func familyMapDiffValueTypes(a, b map[string]bigtable.Family) map[string]bigtable.Family { + result := map[string]bigtable.Family{} + for k, va := range a { + if vb, ok := b[k]; ok { + if !bigtable.Equal(va.ValueType, vb.ValueType) { + result[k] = va + } + } + } + return result +} + func resourceBigtableTableUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) @@ -351,31 +442,33 @@ func resourceBigtableTableUpdate(d *schema.ResourceData, meta interface{}) error defer c.Close() o, n := d.GetChange("column_family") - oSet := o.(*schema.Set) - nSet := n.(*schema.Set) name := d.Get("name").(string) - // Add column families that are in new but not in old - for _, new := range nSet.Difference(oSet).List() { - column := new.(map[string]interface{}) + oMap, err := toFamilyMap(o.(*schema.Set)) + if err != nil { + return err + } + nMap, err := toFamilyMap(n.(*schema.Set)) + if err != nil { + return err + } - if v, ok := column["family"]; ok { - log.Printf("[DEBUG] adding column family %q", v) - if err := c.CreateColumnFamily(ctx, name, v.(string)); err != nil { - return fmt.Errorf("Error creating column family %q: %s", v, err) - } + for cfn, cf := range familyMapDiffKeys(nMap, oMap) { + log.Printf("[DEBUG] adding column family %q", cfn) + if err := c.CreateColumnFamilyWithConfig(ctx, name, cfn, cf); err != nil { + return fmt.Errorf("Error creating column family %q: %s", cfn, err) } } - - // Remove column families that are in old but not in new - for _, old := range oSet.Difference(nSet).List() { - column := old.(map[string]interface{}) - - if v, ok := column["family"]; ok { - log.Printf("[DEBUG] removing column family %q", v) - if err := c.DeleteColumnFamily(ctx, name, v.(string)); err != nil { - return fmt.Errorf("Error deleting column family %q: %s", v, err) - } + for cfn, _ := range familyMapDiffKeys(oMap, nMap) { + log.Printf("[DEBUG] removing column family %q", cfn) + if err := c.DeleteColumnFamily(ctx, name, cfn); err != nil { + return fmt.Errorf("Error deleting column family %q: %s", cfn, err) + } + } + for cfn, cf := range familyMapDiffValueTypes(nMap, oMap) { + log.Printf("[DEBUG] updating column family: %q", cfn) + if err := c.UpdateFamily(ctx, name, cfn, cf); err != nil { + return fmt.Errorf("Error update column family %q: %s", cfn, err) } } @@ -487,16 +580,23 @@ func resourceBigtableTableDestroy(d *schema.ResourceData, meta interface{}) erro return nil } -func FlattenColumnFamily(families []string) []map[string]interface{} { +func FlattenColumnFamily(families []bigtable.FamilyInfo) ([]map[string]interface{}, error) { result := make([]map[string]interface{}, 0, len(families)) for _, f := range families { data := make(map[string]interface{}) - data["family"] = f + data["family"] = f.Name + if _, ok := f.ValueType.(bigtable.AggregateType); ok { + marshalled, err := bigtable.MarshalJSON(f.ValueType) + if err != nil { + return nil, err + } + data["type"] = string(marshalled) + } result = append(result, data) } - return result + return result, nil } // TODO(rileykarson): Fix the stored import format after rebasing 3.0.0 @@ -519,3 +619,38 @@ func resourceBigtableTableImport(d *schema.ResourceData, meta interface{}) ([]*s return []*schema.ResourceData{d}, nil } + +func getType(input interface{}) (bigtable.Type, error) { + if input == nil || input.(string) == "" { + return nil, nil + } + inputType := strings.TrimSuffix(input.(string), "\n") + switch inputType { + case "intsum": + return bigtable.AggregateType{ + Input: bigtable.Int64Type{}, + Aggregator: bigtable.SumAggregator{}, + }, nil + case "intmin": + return bigtable.AggregateType{ + Input: bigtable.Int64Type{}, + Aggregator: bigtable.MinAggregator{}, + }, nil + case "intmax": + return bigtable.AggregateType{ + Input: bigtable.Int64Type{}, + Aggregator: bigtable.MaxAggregator{}, + }, nil + case "inthll": + return bigtable.AggregateType{ + Input: bigtable.Int64Type{}, + Aggregator: bigtable.HllppUniqueCountAggregator{}, + }, nil + } + + output, err := bigtable.UnmarshalJSON([]byte(inputType)) + if err != nil { + return nil, err + } + return output, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/data_source_google_certificate_manager_certificates.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/data_source_google_certificate_manager_certificates.go new file mode 100644 index 00000000000..c03b2be5597 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/data_source_google_certificate_manager_certificates.go @@ -0,0 +1,140 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package certificatemanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/certificatemanager/v1" +) + +func DataSourceGoogleCertificateManagerCertificates() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceCertificateManagerCertificate().Schema) + tpgresource.DeleteFieldsFromSchema(dsSchema, "self_managed") + + return &schema.Resource{ + Read: dataSourceGoogleCertificateManagerCertificatesRead, + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Optional: true, + }, + "region": { + Type: schema.TypeString, + Optional: true, + Default: "global", + }, + "certificates": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: dsSchema, + }, + }, + }, + } +} + +func dataSourceGoogleCertificateManagerCertificatesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("error fetching project for certificate: %s", err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return fmt.Errorf("error fetching region for certificate: %s", err) + } + + filter := d.Get("filter").(string) + + certificates := make([]map[string]interface{}, 0) + certificatesList, err := config.NewCertificateManagerClient(userAgent).Projects.Locations.Certificates.List(fmt.Sprintf("projects/%s/locations/%s", project, region)).Filter(filter).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Certificates : %s %s", project, region)) + } + + for _, certificate := range certificatesList.Certificates { + if certificate != nil { + certificates = append(certificates, map[string]interface{}{ + "name": certificate.Name, + "description": certificate.Description, + "labels": certificate.Labels, + "location": region, + "managed": flattenCertificateManaged(certificate.Managed), + "san_dnsnames": certificate.SanDnsnames, + "scope": certificate.Scope, + }) + } + } + + if err := d.Set("certificates", certificates); err != nil { + return fmt.Errorf("error setting certificates: %s", err) + } + + d.SetId(fmt.Sprintf( + "projects/%s/locations/%s/certificates", + project, + region, + )) + + return nil +} + +func flattenCertificateManaged(v *certificatemanager.ManagedCertificate) interface{} { + if v == nil { + return nil + } + + output := make(map[string]interface{}) + + output["authorization_attempt_info"] = flattenCertificateManagedAuthorizationAttemptInfo(v.AuthorizationAttemptInfo) + output["dns_authorizations"] = v.DnsAuthorizations + output["domains"] = v.Domains + output["issuance_config"] = v.IssuanceConfig + output["state"] = v.State + output["provisioning_issue"] = flattenCertificateManagedProvisioningIssue(v.ProvisioningIssue) + + return []interface{}{output} +} + +func flattenCertificateManagedAuthorizationAttemptInfo(v []*certificatemanager.AuthorizationAttemptInfo) interface{} { + if v == nil { + return nil + } + + output := make([]interface{}, 0, len(v)) + + for _, authorizationAttemptInfo := range v { + output = append(output, map[string]interface{}{ + "details": authorizationAttemptInfo.Details, + "domain": authorizationAttemptInfo.Domain, + "failure_reason": authorizationAttemptInfo.FailureReason, + "state": authorizationAttemptInfo.State, + }) + } + + return output +} + +func flattenCertificateManagedProvisioningIssue(v *certificatemanager.ProvisioningIssue) interface{} { + if v == nil { + return nil + } + + output := make(map[string]interface{}) + + output["details"] = v.Details + output["reason"] = v.Reason + + return []interface{}{output} +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate.go index a6f6aaa1fe1..27c436546f7 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate.go @@ -276,6 +276,14 @@ Leaf certificate comes first, followed by intermediate ones if any.`, Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, Elem: &schema.Schema{Type: schema.TypeString}, }, + "san_dnsnames": { + Type: schema.TypeList, + Computed: true, + Description: `The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6)`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, "terraform_labels": { Type: schema.TypeMap, Computed: true, @@ -440,6 +448,9 @@ func resourceCertificateManagerCertificateRead(d *schema.ResourceData, meta inte if err := d.Set("scope", flattenCertificateManagerCertificateScope(res["scope"], d, config)); err != nil { return fmt.Errorf("Error reading Certificate: %s", err) } + if err := d.Set("san_dnsnames", flattenCertificateManagerCertificateSanDnsnames(res["sanDnsnames"], d, config)); err != nil { + return fmt.Errorf("Error reading Certificate: %s", err) + } if err := d.Set("managed", flattenCertificateManagerCertificateManaged(res["managed"], d, config)); err != nil { return fmt.Errorf("Error reading Certificate: %s", err) } @@ -640,6 +651,10 @@ func flattenCertificateManagerCertificateScope(v interface{}, d *schema.Resource return v } +func flattenCertificateManagerCertificateSanDnsnames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenCertificateManagerCertificateManaged(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go index 45e558609aa..448ccd0bd53 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go @@ -531,6 +531,12 @@ func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigK Description: "Required. Name of the Kubernetes Service.", }, + "pod_selector_label": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The label to use when selecting Pods for the Deployment and Service resources. This label must already be present in both resources.", + }, + "route_update_wait_time": { Type: schema.TypeString, Optional: true, @@ -566,6 +572,12 @@ func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigK Optional: true, Description: "Optional. Whether to disable Pod overprovisioning. If Pod overprovisioning is disabled then Cloud Deploy will limit the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster.", }, + + "pod_selector_label": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The label to use when selecting Pods for the Deployment resource. This label must already be present in the Deployment.", + }, }, } } @@ -1502,6 +1514,7 @@ func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeC Deployment: dcl.String(obj["deployment"].(string)), HttpRoute: dcl.String(obj["http_route"].(string)), Service: dcl.String(obj["service"].(string)), + PodSelectorLabel: dcl.String(obj["pod_selector_label"].(string)), RouteUpdateWaitTime: dcl.String(obj["route_update_wait_time"].(string)), StableCutbackDuration: dcl.String(obj["stable_cutback_duration"].(string)), } @@ -1515,6 +1528,7 @@ func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntime "deployment": obj.Deployment, "http_route": obj.HttpRoute, "service": obj.Service, + "pod_selector_label": obj.PodSelectorLabel, "route_update_wait_time": obj.RouteUpdateWaitTime, "stable_cutback_duration": obj.StableCutbackDuration, } @@ -1536,6 +1550,7 @@ func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeC Deployment: dcl.String(obj["deployment"].(string)), Service: dcl.String(obj["service"].(string)), DisablePodOverprovisioning: dcl.Bool(obj["disable_pod_overprovisioning"].(bool)), + PodSelectorLabel: dcl.String(obj["pod_selector_label"].(string)), } } @@ -1547,6 +1562,7 @@ func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntime "deployment": obj.Deployment, "service": obj.Service, "disable_pod_overprovisioning": obj.DisablePodOverprovisioning, + "pod_selector_label": obj.PodSelectorLabel, } return []interface{}{transformed} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_target.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_target.go index 5f42cd84bd3..631206922b5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_target.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_target.go @@ -306,6 +306,12 @@ func ClouddeployTargetGkeSchema() *schema.Resource { Optional: true, Description: "Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).", }, + + "proxy_url": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server.", + }, }, } } @@ -766,6 +772,7 @@ func expandClouddeployTargetGke(o interface{}) *clouddeploy.TargetGke { return &clouddeploy.TargetGke{ Cluster: dcl.String(obj["cluster"].(string)), InternalIP: dcl.Bool(obj["internal_ip"].(bool)), + ProxyUrl: dcl.String(obj["proxy_url"].(string)), } } @@ -776,6 +783,7 @@ func flattenClouddeployTargetGke(obj *clouddeploy.TargetGke) interface{} { transformed := map[string]interface{}{ "cluster": obj.Cluster, "internal_ip": obj.InternalIP, + "proxy_url": obj.ProxyUrl, } return []interface{}{transformed} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddomains/resource_clouddomains_registration.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddomains/resource_clouddomains_registration.go index 91efc29a3de..61fa83d98ea 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddomains/resource_clouddomains_registration.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddomains/resource_clouddomains_registration.go @@ -1952,7 +1952,6 @@ func expandClouddomainsRegistrationDomainName(v interface{}, d tpgresource.Terra } func resourceClouddomainsRegistrationEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Request body is registration object with additional fields // See https://cloud.google.com/domains/docs/reference/rest/v1beta1/projects.locations.registrations/register diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/resource_cloudfunctions_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/resource_cloudfunctions_function.go index fbede76589a..2cdd0f07dc8 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/resource_cloudfunctions_function.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/resource_cloudfunctions_function.go @@ -138,10 +138,10 @@ func ResourceCloudFunctionsFunction() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(5 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(5 * time.Minute), - Delete: schema.DefaultTimeout(5 * time.Minute), + Create: schema.DefaultTimeout(20 * time.Minute), + Read: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), }, CustomizeDiff: customdiff.All( @@ -300,6 +300,13 @@ func ResourceCloudFunctionsFunction() *schema.Resource { Description: ` If provided, the self-provided service account to run the function with.`, }, + "build_service_account": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The fully-qualified name of the service account to be used for the build step of deploying this function`, + }, + "vpc_connector": { Type: schema.TypeString, Optional: true, @@ -629,6 +636,10 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro function.MinInstances = int64(v.(int)) } + if v, ok := d.GetOk("build_service_account"); ok { + function.BuildServiceAccount = v.(string) + } + log.Printf("[DEBUG] Creating cloud function: %s", function.Name) // We retry the whole create-and-wait because Cloud Functions @@ -716,6 +727,9 @@ func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("service_account_email", function.ServiceAccountEmail); err != nil { return fmt.Errorf("Error setting service_account_email: %s", err) } + if err := d.Set("build_service_account", function.BuildServiceAccount); err != nil { + return fmt.Errorf("Error setting build_service_account: %s", err) + } if err := d.Set("environment_variables", function.EnvironmentVariables); err != nil { return fmt.Errorf("Error setting environment_variables: %s", err) } @@ -947,6 +961,11 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro updateMaskArr = append(updateMaskArr, "minInstances") } + if d.HasChange("build_service_account") { + function.BuildServiceAccount = d.Get("build_service_account").(string) + updateMaskArr = append(updateMaskArr, "buildServiceAccount") + } + if len(updateMaskArr) > 0 { log.Printf("[DEBUG] Send Patch CloudFunction Configuration request: %#v", function) updateMask := strings.Join(updateMaskArr, ",") diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/resource_cloudfunctions2_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/resource_cloudfunctions2_function.go index d9f19f333fd..4d5b976b335 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/resource_cloudfunctions2_function.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/resource_cloudfunctions2_function.go @@ -33,6 +33,21 @@ import ( "github.com/hashicorp/terraform-provider-google/google/verify" ) +// Suppress diffs for the system environment variables +func environmentVariablesDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if k == "service_config.0.environment_variables.LOG_EXECUTION_ID" && new == "" { + return true + } + + // Let diff be determined by environment_variables (above) + if strings.HasPrefix(k, "service_config.0.environment_variables.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + func ResourceCloudfunctions2function() *schema.Resource { return &schema.Resource{ Create: resourceCloudfunctions2functionCreate, @@ -351,10 +366,12 @@ Defaults to 256M. Supported units are k, M, G, Mi, Gi. If no unit is supplied the value is interpreted as bytes.`, }, "environment_variables": { - Type: schema.TypeMap, - Optional: true, - Description: `Environment variables that shall be available during function execution.`, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Computed: true, + Optional: true, + DiffSuppressFunc: environmentVariablesDiffSuppress, + Description: `Environment variables that shall be available during function execution.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, "ingress_settings": { Type: schema.TypeString, @@ -1123,6 +1140,16 @@ func flattenCloudfunctions2functionBuildConfigSourceStorageSourceObject(v interf } func flattenCloudfunctions2functionBuildConfigSourceStorageSourceGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // This flatten function is shared between the resource and the datasource. + // TF Input will use the generation from the source object + // GET Response will use the generation from the automatically created object + // As TF Input and GET response values have different format, + // we will return TF Input value to prevent state drift. + + if genVal, ok := d.GetOk("build_config.0.source.0.storage_source.0.generation"); ok { + v = genVal + } + // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/data_source_cloud_identity_group_transitive_memberships.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/data_source_cloud_identity_group_transitive_memberships.go new file mode 100644 index 00000000000..903191c60a3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/data_source_cloud_identity_group_transitive_memberships.go @@ -0,0 +1,177 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudidentity + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudidentity/v1" +) + +func DataSourceGoogleCloudIdentityGroupTransitiveMemberships() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceGoogleCloudIdentityGroupTransitiveMembershipsRead, + + // We don't reuse schemas from google_cloud_identity_group_membership because data returned about + // transative memberships is structured differently, with information like expiry missing. + Schema: map[string]*schema.Schema{ + "group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Group to get memberships from.`, + }, + "memberships": { + Type: schema.TypeList, + Computed: true, + Description: `List of Cloud Identity group memberships.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "roles": { + Type: schema.TypeSet, + // Default schema.HashSchema is used. + Computed: true, + Description: `The membership role details`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the TransitiveMembershipRole. Possible values: ["OWNER", "MANAGER", "MEMBER"]`, + }, + }, + }, + }, + "preferred_member_key": { + Type: schema.TypeList, + Computed: true, + Description: `EntityKey of the member. Entity key has an id and a namespace. In case of discussion forums, the id will be an email address without a namespace.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID of the entity. + +For Google-managed entities, the id must be the email address of an existing +group or user. + +For external-identity-mapped entities, the id must be a string conforming +to the Identity Source's requirements. + +Must be unique within a namespace.`, + }, + "namespace": { + Type: schema.TypeString, + Computed: true, + Description: `The namespace in which the entity exists. + +If not specified, the EntityKey represents a Google-managed entity +such as a Google user or a Google Group. + +If specified, the EntityKey represents an external-identity-mapped group. +The namespace must correspond to an identity source created in Admin Console +and must be in the form of 'identitysources/{identity_source_id}'.`, + }, + }, + }, + }, + "member": { + Type: schema.TypeString, + Computed: true, + Description: `Resource name for this member.`, + }, + "relation_type": { + Type: schema.TypeString, + Computed: true, + Description: `The relation between the group and the transitive member. The value can be DIRECT, INDIRECT, or DIRECT_AND_INDIRECT`, + }, + }, + }, + }, + }, + } +} + +func dataSourceGoogleCloudIdentityGroupTransitiveMembershipsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + result := []map[string]interface{}{} + membershipsCall := config.NewCloudIdentityClient(userAgent).Groups.Memberships.SearchTransitiveMemberships(d.Get("group").(string)) + if config.UserProjectOverride { + billingProject := "" + // err may be nil - project isn't required for this resource + if project, err := tpgresource.GetProject(d, config); err == nil { + billingProject = project + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + if billingProject != "" { + membershipsCall.Header().Set("X-Goog-User-Project", billingProject) + } + } + + err = membershipsCall.Pages(config.Context, func(resp *cloudidentity.SearchTransitiveMembershipsResponse) error { + for _, member := range resp.Memberships { + result = append(result, map[string]interface{}{ + "member": member.Member, + "relation_type": member.RelationType, + "roles": flattenCloudIdentityGroupTransitiveMembershipsRoles(member.Roles), + "preferred_member_key": flattenCloudIdentityGroupsEntityKeyList(member.PreferredMemberKey), + }) + } + + return nil + }) + if err != nil { + return transport_tpg.HandleDataSourceNotFoundError(err, d, fmt.Sprintf("CloudIdentityGroupMemberships %q", d.Id()), "") + } + + if err := d.Set("memberships", result); err != nil { + return fmt.Errorf("Error setting memberships: %s", err) + } + + group := d.Get("group") + d.SetId(fmt.Sprintf("%s/transitiveMemberships", group.(string))) // groups/{group_id}/transitiveMemberships + return nil +} + +func flattenCloudIdentityGroupTransitiveMembershipsRoles(roles []*cloudidentity.TransitiveMembershipRole) []interface{} { + transformed := []interface{}{} + + for _, role := range roles { + transformed = append(transformed, map[string]interface{}{ + "role": role.Role, + }) + } + return transformed +} + +// flattenCloudIdentityGroupsEntityKeyList is a version of flattenCloudIdentityGroupsEntityKey that +// can accept a list of EntityKeys +func flattenCloudIdentityGroupsEntityKeyList(entityKeys []*cloudidentity.EntityKey) []interface{} { + transformed := []interface{}{} + + for _, key := range entityKeys { + transformed = append(transformed, map[string]interface{}{ + "id": key.Id, + "namespace": key.Namespace, + }) + } + + return transformed +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudquotas/resource_cloud_quotas_quota_preference.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudquotas/resource_cloud_quotas_quota_preference.go index 905787403ea..88504379f67 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudquotas/resource_cloud_quotas_quota_preference.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudquotas/resource_cloud_quotas_quota_preference.go @@ -645,7 +645,7 @@ func expandCloudQuotasQuotaPreferenceQuotaConfigGrantedValue(v interface{}, d tp } func expandCloudQuotasQuotaPreferenceQuotaConfigTraceId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil + return nil, nil } func expandCloudQuotasQuotaPreferenceQuotaConfigAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_domain_mapping.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_domain_mapping.go index 68838d096cf..25d406ae3fa 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_domain_mapping.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_domain_mapping.go @@ -145,8 +145,8 @@ project ID or project number.`, Optional: true, ForceNew: true, Description: `Annotations is a key value map stored with a resource that -may be set by external tools to store and retrieve arbitrary metadata. More -info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations +may be set by external tools to store and retrieve arbitrary metadata. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations **Note**: The Cloud Run API may add additional annotations that were not provided in your config. If terraform plan shows a diff where a server-side annotation is added, you can add it to your config diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_service.go index e177877c05e..804ba4d7c39 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_service.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_service.go @@ -614,11 +614,8 @@ might be configured in the container image.`, Computed: true, Optional: true, Description: `ContainerConcurrency specifies the maximum allowed in-flight (concurrent) -requests per container of the Revision. Values are: -- '0' thread-safe, the system should manage the max concurrency. This is - the default value. -- '1' not-thread-safe. Single concurrency -- '2-N' thread-safe, max concurrency of N`, +requests per container of the Revision. If not specified or 0, defaults to 80 when +requested CPU >= 1 and defaults to 1 when requested CPU < 1.`, }, "service_account_name": { Type: schema.TypeString, @@ -646,6 +643,63 @@ will use the project's default service account.`, Required: true, Description: `Volume's name.`, }, + "csi": { + Type: schema.TypeList, + Optional: true, + Description: `A filesystem specified by the Container Storage Interface (CSI).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver": { + Type: schema.TypeString, + Required: true, + Description: `Unique name representing the type of file system to be created. Cloud Run supports the following values: + * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the + run.googleapis.com/execution-environment annotation to be unset or set to "gen2"`, + }, + "read_only": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `If true, all mounts created from this volume will be read-only.`, + }, + "volume_attributes": { + Type: schema.TypeMap, + Optional: true, + Description: `Driver-specific attributes. The following options are supported for available drivers: + * gcsfuse.run.googleapis.com + * bucketName: The name of the Cloud Storage Bucket that backs this volume. The Cloud Run Service identity must have access to this bucket.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "nfs": { + Type: schema.TypeList, + Optional: true, + Description: `A filesystem backed by a Network File System share. This filesystem requires the +run.googleapis.com/execution-environment annotation to be unset or set to "gen2"`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `Path exported by the NFS server`, + }, + "server": { + Type: schema.TypeString, + Required: true, + Description: `IP address or hostname of the NFS server`, + }, + "read_only": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, mount the NFS volume as read only in all mounts. Defaults to false.`, + }, + }, + }, + }, "secret": { Type: schema.TypeList, Optional: true, @@ -749,8 +803,8 @@ annotation key.`, Optional: true, DiffSuppressFunc: cloudrunTemplateAnnotationDiffSuppress, Description: `Annotations is a key value map stored with a resource that -may be set by external tools to store and retrieve arbitrary metadata. More -info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations +may be set by external tools to store and retrieve arbitrary metadata. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations **Note**: The Cloud Run API may add additional annotations that were not provided in your config. If terraform plan shows a diff where a server-side annotation is added, you can add it to your config @@ -901,8 +955,8 @@ and annotations.`, Type: schema.TypeMap, Optional: true, Description: `Annotations is a key value map stored with a resource that -may be set by external tools to store and retrieve arbitrary metadata. More -info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations +may be set by external tools to store and retrieve arbitrary metadata. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations **Note**: The Cloud Run API may add additional annotations that were not provided in your config. If terraform plan shows a diff where a server-side annotation is added, you can add it to your config @@ -1102,12 +1156,12 @@ https://{route-hash}-{project-hash}-{cluster-level-suffix}.a.run.app`, "autogenerate_revision_name": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `If set to 'true', the revision name (template.metadata.name) will be omitted and autogenerated by Cloud Run. This cannot be set to 'true' while 'template.metadata.name' is also set. (For legacy support, if 'template.metadata.name' is unset in state while this field is set to false, the revision name will still autogenerate.)`, + Default: false, }, "project": { Type: schema.TypeString, @@ -2472,6 +2526,8 @@ func flattenCloudRunServiceSpecTemplateSpecVolumes(v interface{}, d *schema.Reso transformed = append(transformed, map[string]interface{}{ "name": flattenCloudRunServiceSpecTemplateSpecVolumesName(original["name"], d, config), "secret": flattenCloudRunServiceSpecTemplateSpecVolumesSecret(original["secret"], d, config), + "csi": flattenCloudRunServiceSpecTemplateSpecVolumesCsi(original["csi"], d, config), + "nfs": flattenCloudRunServiceSpecTemplateSpecVolumesNfs(original["nfs"], d, config), }) } return transformed @@ -2563,6 +2619,64 @@ func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsMode(v interface{}, return v // let terraform core handle it otherwise } +func flattenCloudRunServiceSpecTemplateSpecVolumesCsi(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["driver"] = + flattenCloudRunServiceSpecTemplateSpecVolumesCsiDriver(original["driver"], d, config) + transformed["read_only"] = + flattenCloudRunServiceSpecTemplateSpecVolumesCsiReadOnly(original["readOnly"], d, config) + transformed["volume_attributes"] = + flattenCloudRunServiceSpecTemplateSpecVolumesCsiVolumeAttributes(original["volumeAttributes"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecVolumesCsiDriver(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecVolumesCsiReadOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecVolumesCsiVolumeAttributes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecVolumesNfs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["server"] = + flattenCloudRunServiceSpecTemplateSpecVolumesNfsServer(original["server"], d, config) + transformed["path"] = + flattenCloudRunServiceSpecTemplateSpecVolumesNfsPath(original["path"], d, config) + transformed["read_only"] = + flattenCloudRunServiceSpecTemplateSpecVolumesNfsReadOnly(original["readOnly"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecVolumesNfsServer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecVolumesNfsPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecVolumesNfsReadOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenCloudRunServiceSpecTemplateSpecServingState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -4072,6 +4186,20 @@ func expandCloudRunServiceSpecTemplateSpecVolumes(v interface{}, d tpgresource.T transformed["secret"] = transformedSecret } + transformedCsi, err := expandCloudRunServiceSpecTemplateSpecVolumesCsi(original["csi"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCsi); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["csi"] = transformedCsi + } + + transformedNfs, err := expandCloudRunServiceSpecTemplateSpecVolumesNfs(original["nfs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNfs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nfs"] = transformedNfs + } + req = append(req, transformed) } return req, nil @@ -4170,6 +4298,103 @@ func expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsMode(v interface{}, return v, nil } +func expandCloudRunServiceSpecTemplateSpecVolumesCsi(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDriver, err := expandCloudRunServiceSpecTemplateSpecVolumesCsiDriver(original["driver"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDriver); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["driver"] = transformedDriver + } + + transformedReadOnly, err := expandCloudRunServiceSpecTemplateSpecVolumesCsiReadOnly(original["read_only"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReadOnly); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["readOnly"] = transformedReadOnly + } + + transformedVolumeAttributes, err := expandCloudRunServiceSpecTemplateSpecVolumesCsiVolumeAttributes(original["volume_attributes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVolumeAttributes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["volumeAttributes"] = transformedVolumeAttributes + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesCsiDriver(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesCsiReadOnly(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesCsiVolumeAttributes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesNfs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServer, err := expandCloudRunServiceSpecTemplateSpecVolumesNfsServer(original["server"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["server"] = transformedServer + } + + transformedPath, err := expandCloudRunServiceSpecTemplateSpecVolumesNfsPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedReadOnly, err := expandCloudRunServiceSpecTemplateSpecVolumesNfsReadOnly(original["read_only"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReadOnly); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["readOnly"] = transformedReadOnly + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesNfsServer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesNfsPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesNfsReadOnly(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandCloudRunServiceSpecTemplateSpecServingState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job.go index c8bb773541a..49189a055a8 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job.go @@ -98,7 +98,7 @@ func ResourceCloudRunV2Job() *schema.Resource { "args": { Type: schema.TypeList, Optional: true, - Description: `Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, + Description: `Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run.`, Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -112,53 +112,11 @@ func ResourceCloudRunV2Job() *schema.Resource { }, }, "env": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, Description: `List of environment variables to set in the container.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters.`, - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: `Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes`, - }, - "value_source": { - Type: schema.TypeList, - Optional: true, - Description: `Source for the environment variable's value.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret_key_ref": { - Type: schema.TypeList, - Optional: true, - Description: `Selects a secret and a specific version from Cloud Secret Manager.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret": { - Type: schema.TypeString, - Required: true, - Description: `The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project.`, - }, - "version": { - Type: schema.TypeString, - Required: true, - Description: `The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, + Elem: cloudrunv2JobTemplateTemplateContainersContainersEnvSchema(), + // Default schema.HashSchema is used. }, "name": { Type: schema.TypeString, @@ -293,6 +251,51 @@ A duration in seconds with up to nine fractional digits, ending with 's'. Exampl }, }, }, + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: `Cloud Storage bucket mounted as a volume using GCSFuse.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `Name of the cloud storage bucket to back the volume. The resource service account must have permission to access the bucket.`, + }, + "read_only": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, mount this volume as read-only in all mounts. If false, mount this volume as read-write.`, + }, + }, + }, + }, + "nfs": { + Type: schema.TypeList, + Optional: true, + Description: `NFS share mounted as a volume.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "server": { + Type: schema.TypeString, + Required: true, + Description: `Hostname or IP address of the NFS server.`, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path that is exported by the NFS server.`, + }, + "read_only": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, mount this volume as read-only in all mounts.`, + }, + }, + }, + }, "secret": { Type: schema.TypeList, Optional: true, @@ -461,10 +464,17 @@ Please refer to the field 'effective_annotations' for all of the annotations pre Optional: true, Description: `If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass`, }, + "policy": { + Type: schema.TypeString, + Optional: true, + Description: `The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name}`, + ConflictsWith: []string{}, + }, "use_default": { - Type: schema.TypeBool, - Optional: true, - Description: `If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled.`, + Type: schema.TypeBool, + Optional: true, + Description: `If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled.`, + ConflictsWith: []string{}, }, }, }, @@ -717,6 +727,17 @@ A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to n Computed: true, Description: `The last-modified time.`, }, + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether Terraform will be prevented from destroying the job. Defaults to true. +When a'terraform destroy' or 'terraform apply' would delete the job, +the command will fail if this field is not set to false in Terraform state. +When the field is set to true or unset in Terraform state, a 'terraform apply' +or 'terraform destroy' that would delete the job will fail. +When the field is set to false, deleting the job is allowed.`, + Default: true, + }, "project": { Type: schema.TypeString, Optional: true, @@ -728,6 +749,53 @@ A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to n } } +func cloudrunv2JobTemplateTemplateContainersContainersEnvSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run.`, + }, + "value_source": { + Type: schema.TypeList, + Optional: true, + Description: `Source for the environment variable's value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_key_ref": { + Type: schema.TypeList, + Optional: true, + Description: `Selects a secret and a specific version from Cloud Secret Manager.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret": { + Type: schema.TypeString, + Required: true, + Description: `The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project.`, + }, + "version": { + Type: schema.TypeString, + Required: true, + Description: `The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version.`, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + func resourceCloudRunV2JobCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) @@ -880,6 +948,12 @@ func resourceCloudRunV2JobRead(d *schema.ResourceData, meta interface{}) error { return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudRunV2Job %q", d.Id())) } + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("deletion_protection"); !ok { + if err := d.Set("deletion_protection", true); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + } if err := d.Set("project", project); err != nil { return fmt.Errorf("Error reading Job: %s", err) } @@ -1091,6 +1165,9 @@ func resourceCloudRunV2JobDelete(d *schema.ResourceData, meta interface{}) error } headers := make(http.Header) + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("cannot destroy job without setting deletion_protection=false and running `terraform apply`") + } log.Printf("[DEBUG] Deleting Job %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ @@ -1136,6 +1213,11 @@ func resourceCloudRunV2JobImport(d *schema.ResourceData, meta interface{}) ([]*s } d.SetId(id) + // Explicitly set virtual fields to default values on import + if err := d.Set("deletion_protection", true); err != nil { + return nil, fmt.Errorf("Error setting deletion_protection: %s", err) + } + return []*schema.ResourceData{d}, nil } @@ -1226,6 +1308,8 @@ func flattenCloudRunV2JobBinaryAuthorization(v interface{}, d *schema.ResourceDa flattenCloudRunV2JobBinaryAuthorizationBreakglassJustification(original["breakglassJustification"], d, config) transformed["use_default"] = flattenCloudRunV2JobBinaryAuthorizationUseDefault(original["useDefault"], d, config) + transformed["policy"] = + flattenCloudRunV2JobBinaryAuthorizationPolicy(original["policy"], d, config) return []interface{}{transformed} } func flattenCloudRunV2JobBinaryAuthorizationBreakglassJustification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -1236,6 +1320,10 @@ func flattenCloudRunV2JobBinaryAuthorizationUseDefault(v interface{}, d *schema. return v } +func flattenCloudRunV2JobBinaryAuthorizationPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenCloudRunV2JobTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -1373,14 +1461,14 @@ func flattenCloudRunV2JobTemplateTemplateContainersEnv(v interface{}, d *schema. return v } l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + transformed := schema.NewSet(schema.HashResource(cloudrunv2JobTemplateTemplateContainersContainersEnvSchema()), []interface{}{}) for _, raw := range l { original := raw.(map[string]interface{}) if len(original) < 1 { // Do not include empty json objects coming back from the api continue } - transformed = append(transformed, map[string]interface{}{ + transformed.Add(map[string]interface{}{ "name": flattenCloudRunV2JobTemplateTemplateContainersEnvName(original["name"], d, config), "value": flattenCloudRunV2JobTemplateTemplateContainersEnvValue(original["value"], d, config), "value_source": flattenCloudRunV2JobTemplateTemplateContainersEnvValueSource(original["valueSource"], d, config), @@ -1536,6 +1624,8 @@ func flattenCloudRunV2JobTemplateTemplateVolumes(v interface{}, d *schema.Resour "name": flattenCloudRunV2JobTemplateTemplateVolumesName(original["name"], d, config), "secret": flattenCloudRunV2JobTemplateTemplateVolumesSecret(original["secret"], d, config), "cloud_sql_instance": flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(original["cloudSqlInstance"], d, config), + "gcs": flattenCloudRunV2JobTemplateTemplateVolumesGcs(original["gcs"], d, config), + "nfs": flattenCloudRunV2JobTemplateTemplateVolumesNfs(original["nfs"], d, config), }) } return transformed @@ -1644,6 +1734,58 @@ func flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(v inte return v } +func flattenCloudRunV2JobTemplateTemplateVolumesGcs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bucket"] = + flattenCloudRunV2JobTemplateTemplateVolumesGcsBucket(original["bucket"], d, config) + transformed["read_only"] = + flattenCloudRunV2JobTemplateTemplateVolumesGcsReadOnly(original["readOnly"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateVolumesGcsBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateVolumesGcsReadOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateVolumesNfs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["server"] = + flattenCloudRunV2JobTemplateTemplateVolumesNfsServer(original["server"], d, config) + transformed["path"] = + flattenCloudRunV2JobTemplateTemplateVolumesNfsPath(original["path"], d, config) + transformed["read_only"] = + flattenCloudRunV2JobTemplateTemplateVolumesNfsReadOnly(original["readOnly"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateVolumesNfsServer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateVolumesNfsPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateVolumesNfsReadOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenCloudRunV2JobTemplateTemplateTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1966,6 +2108,13 @@ func expandCloudRunV2JobBinaryAuthorization(v interface{}, d tpgresource.Terrafo transformed["useDefault"] = transformedUseDefault } + transformedPolicy, err := expandCloudRunV2JobBinaryAuthorizationPolicy(original["policy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["policy"] = transformedPolicy + } + return transformed, nil } @@ -1977,6 +2126,10 @@ func expandCloudRunV2JobBinaryAuthorizationUseDefault(v interface{}, d tpgresour return v, nil } +func expandCloudRunV2JobBinaryAuthorizationPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandCloudRunV2JobTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -2217,6 +2370,7 @@ func expandCloudRunV2JobTemplateTemplateContainersArgs(v interface{}, d tpgresou } func expandCloudRunV2JobTemplateTemplateContainersEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2452,6 +2606,20 @@ func expandCloudRunV2JobTemplateTemplateVolumes(v interface{}, d tpgresource.Ter transformed["cloudSqlInstance"] = transformedCloudSqlInstance } + transformedGcs, err := expandCloudRunV2JobTemplateTemplateVolumesGcs(original["gcs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGcs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gcs"] = transformedGcs + } + + transformedNfs, err := expandCloudRunV2JobTemplateTemplateVolumesNfs(original["nfs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNfs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nfs"] = transformedNfs + } + req = append(req, transformed) } return req, nil @@ -2573,6 +2741,85 @@ func expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(v inter return v, nil } +func expandCloudRunV2JobTemplateTemplateVolumesGcs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBucket, err := expandCloudRunV2JobTemplateTemplateVolumesGcsBucket(original["bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucket"] = transformedBucket + } + + transformedReadOnly, err := expandCloudRunV2JobTemplateTemplateVolumesGcsReadOnly(original["read_only"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReadOnly); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["readOnly"] = transformedReadOnly + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesGcsBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesGcsReadOnly(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesNfs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServer, err := expandCloudRunV2JobTemplateTemplateVolumesNfsServer(original["server"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["server"] = transformedServer + } + + transformedPath, err := expandCloudRunV2JobTemplateTemplateVolumesNfsPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedReadOnly, err := expandCloudRunV2JobTemplateTemplateVolumesNfsReadOnly(original["read_only"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReadOnly); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["readOnly"] = transformedReadOnly + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesNfsServer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesNfsPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesNfsReadOnly(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandCloudRunV2JobTemplateTemplateTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service.go index cc95a7b5268..d63a9a470fc 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service.go @@ -101,7 +101,7 @@ This field follows Kubernetes annotations' namespacing, limits, and rules.`, "args": { Type: schema.TypeList, Optional: true, - Description: `Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, + Description: `Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run.`, Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -123,57 +123,14 @@ This field follows Kubernetes annotations' namespacing, limits, and rules.`, }, }, "env": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, Description: `List of environment variables to set in the container.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters.`, - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: `Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes`, - }, - "value_source": { - Type: schema.TypeList, - Optional: true, - Description: `Source for the environment variable's value.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret_key_ref": { - Type: schema.TypeList, - Optional: true, - Description: `Selects a secret and a specific version from Cloud Secret Manager.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret": { - Type: schema.TypeString, - Required: true, - Description: `The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project.`, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: `The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, + Elem: cloudrunv2ServiceTemplateContainersContainersEnvSchema(), + // Default schema.HashSchema is used. }, "liveness_probe": { Type: schema.TypeList, - Computed: true, Optional: true, Description: `Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, MaxItems: 1, @@ -337,7 +294,7 @@ If omitted, a port number will be chosen and passed to the container through the Type: schema.TypeMap, Computed: true, Optional: true, - Description: `Only memory and CPU are supported. Use key 'cpu' for CPU limit and 'memory' for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, + Description: `Only memory, CPU, and nvidia.com/gpu are supported. Use key 'cpu' for CPU limit, 'memory' for memory limit, 'nvidia.com/gpu' for gpu limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, Elem: &schema.Schema{Type: schema.TypeString}, }, "startup_cpu_boost": { @@ -516,10 +473,11 @@ All system labels in v1 now have a corresponding field in v2 RevisionTemplate.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "max_instance_request_concurrency": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Sets the maximum number of requests that each serving instance can receive.`, + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Sets the maximum number of requests that each serving instance can receive. +If not specified or 0, defaults to 80 when requested CPU >= 1 and defaults to 1 when requested CPU < 1.`, }, "revision": { Type: schema.TypeString, @@ -600,7 +558,7 @@ A duration in seconds with up to nine fractional digits, ending with 's'. Exampl "gcs": { Type: schema.TypeList, Optional: true, - Description: `Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA.`, + Description: `Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -773,10 +731,17 @@ Please refer to the field 'effective_annotations' for all of the annotations pre Optional: true, Description: `If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass`, }, + "policy": { + Type: schema.TypeString, + Optional: true, + Description: `The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name}`, + ConflictsWith: []string{}, + }, "use_default": { - Type: schema.TypeBool, - Optional: true, - Description: `If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled.`, + Type: schema.TypeBool, + Optional: true, + Description: `If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled.`, + ConflictsWith: []string{}, }, }, }, @@ -812,6 +777,11 @@ For more information, see https://cloud.google.com/run/docs/configuring/custom-a ValidateFunc: verify.ValidateEnum([]string{"INGRESS_TRAFFIC_ALL", "INGRESS_TRAFFIC_INTERNAL_ONLY", "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER", ""}), Description: `Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. Possible values: ["INGRESS_TRAFFIC_ALL", "INGRESS_TRAFFIC_INTERNAL_ONLY", "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER"]`, }, + "invoker_iam_disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Disables IAM permission check for run.routes.invoke for callers of this service. This feature is available by invitation only. For more information, visit https://cloud.google.com/run/docs/securing/managing-access#invoker_check.`, + }, "labels": { Type: schema.TypeMap, Optional: true, @@ -835,6 +805,21 @@ If no value is specified, GA is assumed. Set the launch stage to a preview stage For example, if ALPHA is provided as input, but only BETA and GA-level features are used, this field will be BETA on output. Possible values: ["UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"]`, }, + "scaling": { + Type: schema.TypeList, + Optional: true, + Description: `Scaling settings that apply to the whole service`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_instance_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum number of instances for the service, to be divided among all revisions receiving traffic.`, + }, + }, + }, + }, "traffic": { Type: schema.TypeList, Computed: true, @@ -1096,6 +1081,17 @@ If reconciliation failed, trafficStatuses, observedGeneration, and latestReadyRe Computed: true, Description: `The main URI in which this Service is serving traffic.`, }, + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether Terraform will be prevented from destroying the service. Defaults to true. +When a'terraform destroy' or 'terraform apply' would delete the service, +the command will fail if this field is not set to false in Terraform state. +When the field is set to true or unset in Terraform state, a 'terraform apply' +or 'terraform destroy' that would delete the service will fail. +When the field is set to false, deleting the service is allowed.`, + Default: true, + }, "project": { Type: schema.TypeString, Optional: true, @@ -1107,6 +1103,53 @@ If reconciliation failed, trafficStatuses, observedGeneration, and latestReadyRe } } +func cloudrunv2ServiceTemplateContainersContainersEnvSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the environment variable. Must be a C_IDENTIFIER, and may not exceed 32768 characters.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run.`, + }, + "value_source": { + Type: schema.TypeList, + Optional: true, + Description: `Source for the environment variable's value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_key_ref": { + Type: schema.TypeList, + Optional: true, + Description: `Selects a secret and a specific version from Cloud Secret Manager.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret": { + Type: schema.TypeString, + Required: true, + Description: `The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project.`, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version.`, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + func resourceCloudRunV2ServiceCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) @@ -1157,6 +1200,12 @@ func resourceCloudRunV2ServiceCreate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("custom_audiences"); !tpgresource.IsEmptyValue(reflect.ValueOf(customAudiencesProp)) && (ok || !reflect.DeepEqual(v, customAudiencesProp)) { obj["customAudiences"] = customAudiencesProp } + scalingProp, err := expandCloudRunV2ServiceScaling(d.Get("scaling"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(scalingProp)) && (ok || !reflect.DeepEqual(v, scalingProp)) { + obj["scaling"] = scalingProp + } templateProp, err := expandCloudRunV2ServiceTemplate(d.Get("template"), d, config) if err != nil { return err @@ -1169,6 +1218,12 @@ func resourceCloudRunV2ServiceCreate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("traffic"); !tpgresource.IsEmptyValue(reflect.ValueOf(trafficProp)) && (ok || !reflect.DeepEqual(v, trafficProp)) { obj["traffic"] = trafficProp } + invokerIamDisabledProp, err := expandCloudRunV2ServiceInvokerIamDisabled(d.Get("invoker_iam_disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("invoker_iam_disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(invokerIamDisabledProp)) && (ok || !reflect.DeepEqual(v, invokerIamDisabledProp)) { + obj["invokerIamDisabled"] = invokerIamDisabledProp + } labelsProp, err := expandCloudRunV2ServiceEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -1283,6 +1338,12 @@ func resourceCloudRunV2ServiceRead(d *schema.ResourceData, meta interface{}) err return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudRunV2Service %q", d.Id())) } + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("deletion_protection"); !ok { + if err := d.Set("deletion_protection", true); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + } if err := d.Set("project", project); err != nil { return fmt.Errorf("Error reading Service: %s", err) } @@ -1338,12 +1399,18 @@ func resourceCloudRunV2ServiceRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("custom_audiences", flattenCloudRunV2ServiceCustomAudiences(res["customAudiences"], d, config)); err != nil { return fmt.Errorf("Error reading Service: %s", err) } + if err := d.Set("scaling", flattenCloudRunV2ServiceScaling(res["scaling"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } if err := d.Set("template", flattenCloudRunV2ServiceTemplate(res["template"], d, config)); err != nil { return fmt.Errorf("Error reading Service: %s", err) } if err := d.Set("traffic", flattenCloudRunV2ServiceTraffic(res["traffic"], d, config)); err != nil { return fmt.Errorf("Error reading Service: %s", err) } + if err := d.Set("invoker_iam_disabled", flattenCloudRunV2ServiceInvokerIamDisabled(res["invokerIamDisabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } if err := d.Set("observed_generation", flattenCloudRunV2ServiceObservedGeneration(res["observedGeneration"], d, config)); err != nil { return fmt.Errorf("Error reading Service: %s", err) } @@ -1442,6 +1509,12 @@ func resourceCloudRunV2ServiceUpdate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("custom_audiences"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customAudiencesProp)) { obj["customAudiences"] = customAudiencesProp } + scalingProp, err := expandCloudRunV2ServiceScaling(d.Get("scaling"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scalingProp)) { + obj["scaling"] = scalingProp + } templateProp, err := expandCloudRunV2ServiceTemplate(d.Get("template"), d, config) if err != nil { return err @@ -1454,6 +1527,12 @@ func resourceCloudRunV2ServiceUpdate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("traffic"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trafficProp)) { obj["traffic"] = trafficProp } + invokerIamDisabledProp, err := expandCloudRunV2ServiceInvokerIamDisabled(d.Get("invoker_iam_disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("invoker_iam_disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, invokerIamDisabledProp)) { + obj["invokerIamDisabled"] = invokerIamDisabledProp + } labelsProp, err := expandCloudRunV2ServiceEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -1536,6 +1615,9 @@ func resourceCloudRunV2ServiceDelete(d *schema.ResourceData, meta interface{}) e } headers := make(http.Header) + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("cannot destroy service without setting deletion_protection=false and running `terraform apply`") + } log.Printf("[DEBUG] Deleting Service %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ @@ -1581,6 +1663,11 @@ func resourceCloudRunV2ServiceImport(d *schema.ResourceData, meta interface{}) ( } d.SetId(id) + // Explicitly set virtual fields to default values on import + if err := d.Set("deletion_protection", true); err != nil { + return nil, fmt.Errorf("Error setting deletion_protection: %s", err) + } + return []*schema.ResourceData{d}, nil } @@ -1679,6 +1766,8 @@ func flattenCloudRunV2ServiceBinaryAuthorization(v interface{}, d *schema.Resour flattenCloudRunV2ServiceBinaryAuthorizationBreakglassJustification(original["breakglassJustification"], d, config) transformed["use_default"] = flattenCloudRunV2ServiceBinaryAuthorizationUseDefault(original["useDefault"], d, config) + transformed["policy"] = + flattenCloudRunV2ServiceBinaryAuthorizationPolicy(original["policy"], d, config) return []interface{}{transformed} } func flattenCloudRunV2ServiceBinaryAuthorizationBreakglassJustification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -1689,10 +1778,44 @@ func flattenCloudRunV2ServiceBinaryAuthorizationUseDefault(v interface{}, d *sch return v } +func flattenCloudRunV2ServiceBinaryAuthorizationPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenCloudRunV2ServiceCustomAudiences(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } +func flattenCloudRunV2ServiceScaling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_instance_count"] = + flattenCloudRunV2ServiceScalingMinInstanceCount(original["minInstanceCount"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceScalingMinInstanceCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + func flattenCloudRunV2ServiceTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -1906,14 +2029,14 @@ func flattenCloudRunV2ServiceTemplateContainersEnv(v interface{}, d *schema.Reso return v } l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + transformed := schema.NewSet(schema.HashResource(cloudrunv2ServiceTemplateContainersContainersEnvSchema()), []interface{}{}) for _, raw := range l { original := raw.(map[string]interface{}) if len(original) < 1 { // Do not include empty json objects coming back from the api continue } - transformed = append(transformed, map[string]interface{}{ + transformed.Add(map[string]interface{}{ "name": flattenCloudRunV2ServiceTemplateContainersEnvName(original["name"], d, config), "value": flattenCloudRunV2ServiceTemplateContainersEnvValue(original["value"], d, config), "value_source": flattenCloudRunV2ServiceTemplateContainersEnvValueSource(original["valueSource"], d, config), @@ -2762,6 +2885,10 @@ func flattenCloudRunV2ServiceTrafficTag(v interface{}, d *schema.ResourceData, c return v } +func flattenCloudRunV2ServiceInvokerIamDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenCloudRunV2ServiceObservedGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -3023,6 +3150,13 @@ func expandCloudRunV2ServiceBinaryAuthorization(v interface{}, d tpgresource.Ter transformed["useDefault"] = transformedUseDefault } + transformedPolicy, err := expandCloudRunV2ServiceBinaryAuthorizationPolicy(original["policy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["policy"] = transformedPolicy + } + return transformed, nil } @@ -3034,10 +3168,37 @@ func expandCloudRunV2ServiceBinaryAuthorizationUseDefault(v interface{}, d tpgre return v, nil } +func expandCloudRunV2ServiceBinaryAuthorizationPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandCloudRunV2ServiceCustomAudiences(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } +func expandCloudRunV2ServiceScaling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMinInstanceCount, err := expandCloudRunV2ServiceScalingMinInstanceCount(original["min_instance_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinInstanceCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minInstanceCount"] = transformedMinInstanceCount + } + + return transformed, nil +} + +func expandCloudRunV2ServiceScalingMinInstanceCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandCloudRunV2ServiceTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -3414,6 +3575,7 @@ func expandCloudRunV2ServiceTemplateContainersArgs(v interface{}, d tpgresource. } func expandCloudRunV2ServiceTemplateContainersEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4414,6 +4576,10 @@ func expandCloudRunV2ServiceTrafficTag(v interface{}, d tpgresource.TerraformRes return v, nil } +func expandCloudRunV2ServiceInvokerIamDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandCloudRunV2ServiceEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/resource_cloud_tasks_queue.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/resource_cloud_tasks_queue.go index ea82fe4b1a5..3a9c69d5dda 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/resource_cloud_tasks_queue.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/resource_cloud_tasks_queue.go @@ -30,6 +30,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func suppressOmittedMaxDuration(k, old, new string, d *schema.ResourceData) bool { @@ -105,6 +106,207 @@ By default, the task is sent to the version which is the default version when th }, }, }, + "http_target": { + Type: schema.TypeList, + Optional: true, + Description: `Modifies HTTP target for HTTP tasks.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_overrides": { + Type: schema.TypeList, + Optional: true, + Description: `HTTP target headers. + +This map contains the header field names and values. +Headers will be set when running the CreateTask and/or BufferTask. + +These headers represent a subset of the headers that will be configured for the task's HTTP request. +Some HTTP request headers will be ignored or replaced. + +Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + +The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header": { + Type: schema.TypeList, + Required: true, + Description: `Header embodying a key and a value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `The Key of the header.`, + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: `The Value of the header.`, + }, + }, + }, + }, + }, + }, + }, + "http_method": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"HTTP_METHOD_UNSPECIFIED", "POST", "GET", "HEAD", "PUT", "DELETE", "PATCH", "OPTIONS", ""}), + Description: `The HTTP method to use for the request. + +When specified, it overrides HttpRequest for the task. +Note that if the value is set to GET the body of the task will be ignored at execution time. Possible values: ["HTTP_METHOD_UNSPECIFIED", "POST", "GET", "HEAD", "PUT", "DELETE", "PATCH", "OPTIONS"]`, + }, + "oauth_token": { + Type: schema.TypeList, + Optional: true, + Description: `If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + +This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. +Note that both the service account email and the scope MUST be specified when using the queue-level authorization override.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service_account_email": { + Type: schema.TypeString, + Required: true, + Description: `Service account email to be used for generating OAuth token. +The service account must be within the same project as the queue. +The caller must have iam.serviceAccounts.actAs permission for the service account.`, + }, + "scope": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `OAuth scope to be used for generating OAuth access token. +If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used.`, + }, + }, + }, + ConflictsWith: []string{}, + }, + "oidc_token": { + Type: schema.TypeList, + Optional: true, + Description: `If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + +This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. +Note that both the service account email and the audience MUST be specified when using the queue-level authorization override.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service_account_email": { + Type: schema.TypeString, + Required: true, + Description: `Service account email to be used for generating OIDC token. +The service account must be within the same project as the queue. +The caller must have iam.serviceAccounts.actAs permission for the service account.`, + }, + "audience": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used.`, + }, + }, + }, + ConflictsWith: []string{}, + }, + "uri_override": { + Type: schema.TypeList, + Optional: true, + Description: `URI override. + +When specified, overrides the execution URI for all the tasks in the queue.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Optional: true, + Description: `Host override. + +When specified, replaces the host part of the task URL. +For example, if the task URL is "https://www.google.com", and host value +is set to "example.net", the overridden URI will be changed to "https://example.net". +Host value cannot be an empty string (INVALID_ARGUMENT).`, + }, + "path_override": { + Type: schema.TypeList, + Optional: true, + Description: `URI path. + +When specified, replaces the existing path of the task URL. +Setting the path value to an empty string clears the URI path segment.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The URI path (e.g., /users/1234). Default is an empty string.`, + }, + }, + }, + }, + "port": { + Type: schema.TypeString, + Optional: true, + Description: `Port override. + +When specified, replaces the port part of the task URI. +For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. +Note that the port value must be a positive integer. +Setting the port to 0 (Zero) clears the URI port.`, + }, + "query_override": { + Type: schema.TypeList, + Optional: true, + Description: `URI query. + +When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "query_params": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string.`, + }, + }, + }, + }, + "scheme": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"HTTP", "HTTPS", ""}), + Description: `Scheme override. + +When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). Possible values: ["HTTP", "HTTPS"]`, + }, + "uri_override_enforce_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ALWAYS", "IF_NOT_EXISTS", ""}), + Description: `URI Override Enforce Mode + +When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. Possible values: ["ALWAYS", "IF_NOT_EXISTS"]`, + }, + }, + }, + }, + }, + }, + }, "name": { Type: schema.TypeString, Optional: true, @@ -290,6 +492,12 @@ func resourceCloudTasksQueueCreate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("stackdriver_logging_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(stackdriverLoggingConfigProp)) && (ok || !reflect.DeepEqual(v, stackdriverLoggingConfigProp)) { obj["stackdriverLoggingConfig"] = stackdriverLoggingConfigProp } + httpTargetProp, err := expandCloudTasksQueueHttpTarget(d.Get("http_target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("http_target"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpTargetProp)) && (ok || !reflect.DeepEqual(v, httpTargetProp)) { + obj["httpTarget"] = httpTargetProp + } url, err := tpgresource.ReplaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues") if err != nil { @@ -394,6 +602,9 @@ func resourceCloudTasksQueueRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("stackdriver_logging_config", flattenCloudTasksQueueStackdriverLoggingConfig(res["stackdriverLoggingConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Queue: %s", err) } + if err := d.Set("http_target", flattenCloudTasksQueueHttpTarget(res["httpTarget"], d, config)); err != nil { + return fmt.Errorf("Error reading Queue: %s", err) + } return nil } @@ -438,6 +649,12 @@ func resourceCloudTasksQueueUpdate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("stackdriver_logging_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, stackdriverLoggingConfigProp)) { obj["stackdriverLoggingConfig"] = stackdriverLoggingConfigProp } + httpTargetProp, err := expandCloudTasksQueueHttpTarget(d.Get("http_target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("http_target"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpTargetProp)) { + obj["httpTarget"] = httpTargetProp + } url, err := tpgresource.ReplaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues/{{name}}") if err != nil { @@ -463,6 +680,10 @@ func resourceCloudTasksQueueUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("stackdriver_logging_config") { updateMask = append(updateMask, "stackdriverLoggingConfig") } + + if d.HasChange("http_target") { + updateMask = append(updateMask, "httpTarget") + } // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -732,6 +953,191 @@ func flattenCloudTasksQueueStackdriverLoggingConfigSamplingRatio(v interface{}, return v } +func flattenCloudTasksQueueHttpTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["http_method"] = + flattenCloudTasksQueueHttpTargetHttpMethod(original["httpMethod"], d, config) + transformed["uri_override"] = + flattenCloudTasksQueueHttpTargetUriOverride(original["uriOverride"], d, config) + transformed["header_overrides"] = + flattenCloudTasksQueueHttpTargetHeaderOverrides(original["headerOverrides"], d, config) + transformed["oauth_token"] = + flattenCloudTasksQueueHttpTargetOauthToken(original["oauthToken"], d, config) + transformed["oidc_token"] = + flattenCloudTasksQueueHttpTargetOidcToken(original["oidcToken"], d, config) + return []interface{}{transformed} +} +func flattenCloudTasksQueueHttpTargetHttpMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetUriOverride(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["scheme"] = + flattenCloudTasksQueueHttpTargetUriOverrideScheme(original["scheme"], d, config) + transformed["host"] = + flattenCloudTasksQueueHttpTargetUriOverrideHost(original["host"], d, config) + transformed["port"] = + flattenCloudTasksQueueHttpTargetUriOverridePort(original["port"], d, config) + transformed["path_override"] = + flattenCloudTasksQueueHttpTargetUriOverridePathOverride(original["pathOverride"], d, config) + transformed["query_override"] = + flattenCloudTasksQueueHttpTargetUriOverrideQueryOverride(original["queryOverride"], d, config) + transformed["uri_override_enforce_mode"] = + flattenCloudTasksQueueHttpTargetUriOverrideUriOverrideEnforceMode(original["uriOverrideEnforceMode"], d, config) + return []interface{}{transformed} +} +func flattenCloudTasksQueueHttpTargetUriOverrideScheme(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetUriOverrideHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetUriOverridePort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetUriOverridePathOverride(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["path"] = + flattenCloudTasksQueueHttpTargetUriOverridePathOverridePath(original["path"], d, config) + return []interface{}{transformed} +} +func flattenCloudTasksQueueHttpTargetUriOverridePathOverridePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetUriOverrideQueryOverride(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["query_params"] = + flattenCloudTasksQueueHttpTargetUriOverrideQueryOverrideQueryParams(original["queryParams"], d, config) + return []interface{}{transformed} +} +func flattenCloudTasksQueueHttpTargetUriOverrideQueryOverrideQueryParams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetUriOverrideUriOverrideEnforceMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetHeaderOverrides(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "header": flattenCloudTasksQueueHttpTargetHeaderOverridesHeader(original["header"], d, config), + }) + } + return transformed +} +func flattenCloudTasksQueueHttpTargetHeaderOverridesHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenCloudTasksQueueHttpTargetHeaderOverridesHeaderKey(original["key"], d, config) + transformed["value"] = + flattenCloudTasksQueueHttpTargetHeaderOverridesHeaderValue(original["value"], d, config) + return []interface{}{transformed} +} +func flattenCloudTasksQueueHttpTargetHeaderOverridesHeaderKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetHeaderOverridesHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetOauthToken(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service_account_email"] = + flattenCloudTasksQueueHttpTargetOauthTokenServiceAccountEmail(original["serviceAccountEmail"], d, config) + transformed["scope"] = + flattenCloudTasksQueueHttpTargetOauthTokenScope(original["scope"], d, config) + return []interface{}{transformed} +} +func flattenCloudTasksQueueHttpTargetOauthTokenServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetOauthTokenScope(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetOidcToken(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service_account_email"] = + flattenCloudTasksQueueHttpTargetOidcTokenServiceAccountEmail(original["serviceAccountEmail"], d, config) + transformed["audience"] = + flattenCloudTasksQueueHttpTargetOidcTokenAudience(original["audience"], d, config) + return []interface{}{transformed} +} +func flattenCloudTasksQueueHttpTargetOidcTokenServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueHttpTargetOidcTokenAudience(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func expandCloudTasksQueueName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/queues/{{name}}") } @@ -926,3 +1332,294 @@ func expandCloudTasksQueueStackdriverLoggingConfig(v interface{}, d tpgresource. func expandCloudTasksQueueStackdriverLoggingConfigSamplingRatio(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandCloudTasksQueueHttpTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHttpMethod, err := expandCloudTasksQueueHttpTargetHttpMethod(original["http_method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHttpMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["httpMethod"] = transformedHttpMethod + } + + transformedUriOverride, err := expandCloudTasksQueueHttpTargetUriOverride(original["uri_override"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUriOverride); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uriOverride"] = transformedUriOverride + } + + transformedHeaderOverrides, err := expandCloudTasksQueueHttpTargetHeaderOverrides(original["header_overrides"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeaderOverrides); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["headerOverrides"] = transformedHeaderOverrides + } + + transformedOauthToken, err := expandCloudTasksQueueHttpTargetOauthToken(original["oauth_token"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOauthToken); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oauthToken"] = transformedOauthToken + } + + transformedOidcToken, err := expandCloudTasksQueueHttpTargetOidcToken(original["oidc_token"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOidcToken); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oidcToken"] = transformedOidcToken + } + + return transformed, nil +} + +func expandCloudTasksQueueHttpTargetHttpMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetUriOverride(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScheme, err := expandCloudTasksQueueHttpTargetUriOverrideScheme(original["scheme"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScheme); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scheme"] = transformedScheme + } + + transformedHost, err := expandCloudTasksQueueHttpTargetUriOverrideHost(original["host"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["host"] = transformedHost + } + + transformedPort, err := expandCloudTasksQueueHttpTargetUriOverridePort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedPathOverride, err := expandCloudTasksQueueHttpTargetUriOverridePathOverride(original["path_override"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPathOverride); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pathOverride"] = transformedPathOverride + } + + transformedQueryOverride, err := expandCloudTasksQueueHttpTargetUriOverrideQueryOverride(original["query_override"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQueryOverride); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["queryOverride"] = transformedQueryOverride + } + + transformedUriOverrideEnforceMode, err := expandCloudTasksQueueHttpTargetUriOverrideUriOverrideEnforceMode(original["uri_override_enforce_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUriOverrideEnforceMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uriOverrideEnforceMode"] = transformedUriOverrideEnforceMode + } + + return transformed, nil +} + +func expandCloudTasksQueueHttpTargetUriOverrideScheme(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetUriOverrideHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetUriOverridePort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetUriOverridePathOverride(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandCloudTasksQueueHttpTargetUriOverridePathOverridePath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + return transformed, nil +} + +func expandCloudTasksQueueHttpTargetUriOverridePathOverridePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetUriOverrideQueryOverride(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedQueryParams, err := expandCloudTasksQueueHttpTargetUriOverrideQueryOverrideQueryParams(original["query_params"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQueryParams); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["queryParams"] = transformedQueryParams + } + + return transformed, nil +} + +func expandCloudTasksQueueHttpTargetUriOverrideQueryOverrideQueryParams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetUriOverrideUriOverrideEnforceMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetHeaderOverrides(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHeader, err := expandCloudTasksQueueHttpTargetHeaderOverridesHeader(original["header"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["header"] = transformedHeader + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudTasksQueueHttpTargetHeaderOverridesHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandCloudTasksQueueHttpTargetHeaderOverridesHeaderKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedValue, err := expandCloudTasksQueueHttpTargetHeaderOverridesHeaderValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + return transformed, nil +} + +func expandCloudTasksQueueHttpTargetHeaderOverridesHeaderKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetHeaderOverridesHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetOauthToken(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceAccountEmail, err := expandCloudTasksQueueHttpTargetOauthTokenServiceAccountEmail(original["service_account_email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccountEmail"] = transformedServiceAccountEmail + } + + transformedScope, err := expandCloudTasksQueueHttpTargetOauthTokenScope(original["scope"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScope); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scope"] = transformedScope + } + + return transformed, nil +} + +func expandCloudTasksQueueHttpTargetOauthTokenServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetOauthTokenScope(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetOidcToken(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceAccountEmail, err := expandCloudTasksQueueHttpTargetOidcTokenServiceAccountEmail(original["service_account_email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccountEmail"] = transformedServiceAccountEmail + } + + transformedAudience, err := expandCloudTasksQueueHttpTargetOidcTokenAudience(original["audience"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAudience); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["audience"] = transformedAudience + } + + return transformed, nil +} + +func expandCloudTasksQueueHttpTargetOidcTokenServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueHttpTargetOidcTokenAudience(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/resource_composer_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/resource_composer_environment.go index c9d47864b66..c0400e53621 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/resource_composer_environment.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/resource_composer_environment.go @@ -53,6 +53,7 @@ var ( "config.0.software_config.0.image_version", "config.0.software_config.0.python_version", "config.0.software_config.0.scheduler_count", + "config.0.software_config.0.cloud_data_lineage_integration", } composerConfigKeys = []string{ @@ -290,7 +291,6 @@ func ResourceComposerEnvironment() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ConfigMode: schema.SchemaConfigModeAttr, MaxItems: 1, Description: `Configuration for controlling how IPs are allocated in the GKE cluster. Cannot be updated.`, Elem: &schema.Resource{ @@ -440,6 +440,23 @@ func ResourceComposerEnvironment() *schema.Resource { Computed: true, Description: `The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*.`, }, + "cloud_data_lineage_integration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: composerSoftwareConfigKeys, + MaxItems: 1, + Description: `The configuration for Cloud Data Lineage integration. Supported for Cloud Composer environments in versions composer-2.1.2-airflow-*.*.* and newer`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether or not Cloud Data Lineage integration is enabled.`, + }, + }, + }, + }, }, }, }, @@ -925,10 +942,11 @@ func ResourceComposerEnvironment() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "bucket": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Optional. Name of an existing Cloud Storage bucket to be used by the environment.`, + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: gscBucketNameDiffSuppress, + Description: `Optional. Name of an existing Cloud Storage bucket to be used by the environment.`, }, }, }, @@ -1105,6 +1123,21 @@ func resourceComposerEnvironmentUpdate(d *schema.ResourceData, meta interface{}) } } + if d.HasChange("config.0.software_config.0.cloud_data_lineage_integration") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{}, + }, + } + if config != nil && config.SoftwareConfig != nil { + patchObj.Config.SoftwareConfig.CloudDataLineageIntegration = config.SoftwareConfig.CloudDataLineageIntegration + } + err = resourceComposerEnvironmentPatchField("config.softwareConfig.cloudDataLineageIntegration", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + if d.HasChange("config.0.software_config.0.airflow_config_overrides") { patchObj := &composer.Environment{ Config: &composer.EnvironmentConfig{ @@ -1690,6 +1723,18 @@ func flattenComposerEnvironmentConfigSoftwareConfig(softwareCfg *composer.Softwa transformed["pypi_packages"] = softwareCfg.PypiPackages transformed["env_variables"] = softwareCfg.EnvVariables transformed["scheduler_count"] = softwareCfg.SchedulerCount + transformed["cloud_data_lineage_integration"] = flattenComposerEnvironmentConfigSoftwareConfigCloudDataLineageIntegration(softwareCfg.CloudDataLineageIntegration) + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigSoftwareConfigCloudDataLineageIntegration(cloudDataLineageIntegration *composer.CloudDataLineageIntegration) interface{} { + if cloudDataLineageIntegration == nil { + return nil + } + + transformed := make(map[string]interface{}) + transformed["enabled"] = cloudDataLineageIntegration.Enabled + return []interface{}{transformed} } @@ -2354,6 +2399,12 @@ func expandComposerEnvironmentConfigSoftwareConfig(v interface{}, d *schema.Reso transformed.EnvVariables = expandComposerEnvironmentConfigSoftwareConfigStringMap(original, "env_variables") transformed.SchedulerCount = int64(original["scheduler_count"].(int)) + transformedCloudDataLineageIntegration, err := expandComposerEnvironmentConfigSoftwareConfigCloudDataLineageIntegration(original["cloud_data_lineage_integration"], d, config) + if err != nil { + return nil, err + } + transformed.CloudDataLineageIntegration = transformedCloudDataLineageIntegration + return transformed, nil } @@ -2365,6 +2416,20 @@ func expandComposerEnvironmentConfigSoftwareConfigStringMap(softwareConfig map[s return map[string]string{} } +func expandComposerEnvironmentConfigSoftwareConfigCloudDataLineageIntegration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.CloudDataLineageIntegration, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + + transformed := &composer.CloudDataLineageIntegration{} + transformed.Enabled = original["enabled"].(bool) + + return transformed, nil +} + func validateComposerEnvironmentPypiPackages(v interface{}, k string) (ws []string, errors []error) { if v == nil { return ws, errors @@ -2702,3 +2767,11 @@ func validateComposerInternalIpv4CidrBlock(v any, k string) (warns []string, err } return } + +func gscBucketNameDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + prefix := "gs://" + if prefix+old == new || old == prefix+new { + return true + } + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_instance_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_instance_helpers.go index 58b022cf3b5..4a68927c3db 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_instance_helpers.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_instance_helpers.go @@ -51,15 +51,28 @@ func expandAliasIpRanges(ranges []interface{}) []*compute.AliasIpRange { return ipRanges } -func flattenAliasIpRange(ranges []*compute.AliasIpRange) []map[string]interface{} { - rangesSchema := make([]map[string]interface{}, 0, len(ranges)) +func flattenAliasIpRange(d *schema.ResourceData, ranges []*compute.AliasIpRange, i int) []map[string]interface{} { + prefix := fmt.Sprintf("network_interface.%d", i) + + configData := []map[string]interface{}{} + for _, item := range d.Get(prefix + ".alias_ip_range").([]interface{}) { + configData = append(configData, item.(map[string]interface{})) + } + + apiData := make([]map[string]interface{}, 0, len(ranges)) for _, ipRange := range ranges { - rangesSchema = append(rangesSchema, map[string]interface{}{ + apiData = append(apiData, map[string]interface{}{ "ip_cidr_range": ipRange.IpCidrRange, "subnetwork_range_name": ipRange.SubnetworkRangeName, }) } - return rangesSchema + + //permadiff fix + sorted, err := tpgresource.SortMapsByConfigOrder(configData, apiData, "ip_cidr_range") + if err != nil { + return apiData + } + return sorted } func expandScheduling(v interface{}) (*compute.Scheduling, error) { @@ -363,7 +376,7 @@ func flattenNetworkInterfaces(d *schema.ResourceData, config *transport_tpg.Conf "subnetwork": tpgresource.ConvertSelfLinkToV1(iface.Subnetwork), "subnetwork_project": subnet.Project, "access_config": ac, - "alias_ip_range": flattenAliasIpRange(iface.AliasIpRanges), + "alias_ip_range": flattenAliasIpRange(d, iface.AliasIpRanges, i), "nic_type": iface.NicType, "stack_type": iface.StackType, "ipv6_access_config": flattenIpv6AccessConfigs(iface.Ipv6AccessConfigs), @@ -573,6 +586,7 @@ func expandAdvancedMachineFeatures(d tpgresource.TerraformResourceData) *compute return &compute.AdvancedMachineFeatures{ EnableNestedVirtualization: d.Get(prefix + ".enable_nested_virtualization").(bool), ThreadsPerCore: int64(d.Get(prefix + ".threads_per_core").(int)), + TurboMode: d.Get(prefix + ".turbo_mode").(string), VisibleCoreCount: int64(d.Get(prefix + ".visible_core_count").(int)), } } @@ -584,6 +598,7 @@ func flattenAdvancedMachineFeatures(AdvancedMachineFeatures *compute.AdvancedMac return []map[string]interface{}{{ "enable_nested_virtualization": AdvancedMachineFeatures.EnableNestedVirtualization, "threads_per_core": AdvancedMachineFeatures.ThreadsPerCore, + "turbo_mode": AdvancedMachineFeatures.TurboMode, "visible_core_count": AdvancedMachineFeatures.VisibleCoreCount, }} } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_secutity_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_security_policy.go similarity index 93% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_secutity_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_security_policy.go index 74b9d7f9347..a2a79e183d4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_secutity_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_security_policy.go @@ -21,12 +21,12 @@ func DataSourceGoogleComputeSecurityPolicy() *schema.Resource { tpgresource.AddOptionalFieldsToSchema(dsSchema, "self_link") return &schema.Resource{ - Read: dataSourceComputSecurityPolicyRead, + Read: dataSourceComputeSecurityPolicyRead, Schema: dsSchema, } } -func dataSourceComputSecurityPolicyRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceComputeSecurityPolicyRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) id := "" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance.go index acab1b38540..3e04ccdb47b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance.go @@ -53,6 +53,9 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ if err := d.Set("machine_type", tpgresource.GetResourceNameFromSelfLink(instance.MachineType)); err != nil { return fmt.Errorf("Error setting machine_type: %s", err) } + if err := d.Set("hostname", instance.Hostname); err != nil { + return fmt.Errorf("Error setting hostname: %s", err) + } // Set the networks // Use the first external IP found for the default connection info. @@ -201,6 +204,13 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ if err := d.Set("name", instance.Name); err != nil { return fmt.Errorf("Error setting name: %s", err) } + if err := d.Set("key_revocation_action_type", instance.KeyRevocationActionType); err != nil { + return fmt.Errorf("Error setting key_revocation_action_type: %s", err) + } + if err := d.Set("creation_timestamp", instance.CreationTimestamp); err != nil { + return fmt.Errorf("Error setting creation_timestamp: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, tpgresource.GetResourceNameFromSelfLink(instance.Zone), instance.Name)) return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_guest_attributes.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_guest_attributes.go new file mode 100644 index 00000000000..3c315dbeb6c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_guest_attributes.go @@ -0,0 +1,133 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + compute "google.golang.org/api/compute/v1" +) + +func DataSourceGoogleComputeInstanceGuestAttributes() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeInstanceGuestAttributesRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "query_path": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"variable_key"}, + }, + + "variable_key": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"query_path"}, + }, + + "variable_value": { + Type: schema.TypeString, + Computed: true, + }, + + "query_value": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Computed: true, + }, + "namespace": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceGoogleComputeInstanceGuestAttributesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + project, zone, name, err := tpgresource.GetZonalResourcePropertiesFromSelfLinkOrSchema(d, config) + if err != nil { + return err + } + + id := fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, zone, name) + instanceGuestAttributes := &compute.GuestAttributes{} + + // You can either query based on variable_key, query_path or just get the first value + if d.Get("query_path").(string) != "" { + instanceGuestAttributes, err = config.NewComputeClient(userAgent).Instances.GetGuestAttributes(project, zone, name).QueryPath(d.Get("query_path").(string)).Do() + } else if d.Get("variable_key").(string) != "" { + instanceGuestAttributes, err = config.NewComputeClient(userAgent).Instances.GetGuestAttributes(project, zone, name).VariableKey(d.Get("variable_key").(string)).Do() + } else { + instanceGuestAttributes, err = config.NewComputeClient(userAgent).Instances.GetGuestAttributes(project, zone, name).Do() + } + if err != nil { + return transport_tpg.HandleDataSourceNotFoundError(err, d, fmt.Sprintf("Instance's Guest Attributes %s", name), id) + } + + // Set query results + if err := d.Set("variable_value", instanceGuestAttributes.VariableValue); err != nil { + return fmt.Errorf("Error variable_value: %s", err) + } + if err := d.Set("query_value", flattenQueryValues(instanceGuestAttributes.QueryValue)); err != nil { + return fmt.Errorf("Error query_value: %s", err) + } + + d.SetId(fmt.Sprintf(instanceGuestAttributes.SelfLink)) + return nil +} + +func flattenQueryValues(queryValue *compute.GuestAttributesValue) []map[string]interface{} { + if queryValue == nil { + return nil + } + queryValueItems := make([]map[string]interface{}, 0) + for _, item := range queryValue.Items { + queryValueItems = append(queryValueItems, map[string]interface{}{ + "key": item.Key, + "namespace": item.Namespace, + "value": item.Value, + }) + } + return queryValueItems +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_instance_group_manager.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_instance_group_manager.go new file mode 100644 index 00000000000..21dcba28f2c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_instance_group_manager.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeRegionInstanceGroupManager() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeRegionInstanceGroupManager().Schema) + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name", "self_link", "project", "region") + + return &schema.Resource{ + Read: dataSourceComputeRegionInstanceGroupManagerRead, + Schema: dsSchema, + } +} + +func dataSourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + if selfLink, ok := d.Get("self_link").(string); ok && selfLink != "" { + parsed, err := tpgresource.ParseRegionalInstanceGroupManagersFieldValue(selfLink, d, config) + if err != nil { + return fmt.Errorf("InstanceGroup name, region or project could not be parsed from %s: %v", selfLink, err) + } + if err := d.Set("name", parsed.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("region", parsed.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("project", parsed.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/regions/%s/instanceGroupManagers/%s", parsed.Project, parsed.Region, parsed.Name)) + } else if name, ok := d.Get("name").(string); ok && name != "" { + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("projects/%s/regions/%s/instanceGroupManagers/%s", project, region, name)) + } else { + return errors.New("Must provide either `self_link` or `region/name`") + } + + err := resourceComputeRegionInstanceGroupManagerRead(d, meta) + + if err != nil { + return err + } + if d.Id() == "" { + return errors.New("Regional Instance Manager Group not found") + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/metadata.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/metadata.go index a7671155756..3b82efda849 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/metadata.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/metadata.go @@ -86,7 +86,7 @@ func BetaMetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]int } func expandComputeMetadata(m map[string]interface{}) []*compute.MetadataItems { - metadata := make([]*compute.MetadataItems, len(m)) + metadata := make([]*compute.MetadataItems, 0, len(m)) var keys []string for key := range m { keys = append(keys, key) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_attached_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_attached_disk.go index b5bc57aee21..dafd6d81478 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_attached_disk.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_attached_disk.go @@ -84,6 +84,13 @@ func ResourceComputeAttachedDisk() *schema.Resource { Description: `The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.`, ValidateFunc: validation.StringInSlice([]string{"READ_ONLY", "READ_WRITE"}, false), }, + "interface": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: `The disk interface used for attaching this disk. One of SCSI or NVME. (This field is only used for specific cases, please don't specify this field without advice from Google.)`, + ValidateFunc: validation.StringInSlice([]string{"SCSI", "NVME"}, false), + }, }, UseJSONNumber: true, } @@ -118,6 +125,7 @@ func resourceAttachedDiskCreate(d *schema.ResourceData, meta interface{}) error Source: diskSrc, Mode: d.Get("mode").(string), DeviceName: d.Get("device_name").(string), + Interface: d.Get("interface").(string), } op, err := config.NewComputeClient(userAgent).Instances.AttachDisk(zv.Project, zv.Zone, zv.Name, &attachedDisk).Do() diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service.go index b0de8c749c8..51ac9a7d1be 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service.go @@ -632,19 +632,25 @@ For internal load balancing, a URL to a HealthCheck resource must be specified i }, "iap": { Type: schema.TypeList, + Computed: true, Optional: true, Description: `Settings for enabling Cloud Identity Aware Proxy`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether the serving infrastructure will authenticate and authorize all incoming requests.`, + }, "oauth2_client_id": { Type: schema.TypeString, - Required: true, + Optional: true, Description: `OAuth2 Client ID for IAP`, }, "oauth2_client_secret": { Type: schema.TypeString, - Required: true, + Optional: true, Description: `OAuth2 Client Secret for IAP`, Sensitive: true, }, @@ -657,6 +663,12 @@ For internal load balancing, a URL to a HealthCheck resource must be specified i }, }, }, + "ip_address_selection_policy": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"IPV4_ONLY", "PREFER_IPV6", "IPV6_ONLY", ""}), + Description: `Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). Possible values: ["IPV4_ONLY", "PREFER_IPV6", "IPV6_ONLY"]`, + }, "load_balancing_scheme": { Type: schema.TypeString, Optional: true, @@ -797,7 +809,8 @@ The possible values are: Maglev, refer to https://ai.google/research/pubs/pub44824 * 'WEIGHTED_MAGLEV': Per-instance weighted Load Balancing via health check - reported weights. If set, the Backend Service must + reported weights. Only applicable to loadBalancingScheme + EXTERNAL. If set, the Backend Service must configure a non legacy HTTP-based Health Check, and health check replies are expected to contain non-standard HTTP response header field @@ -809,7 +822,7 @@ The possible values are: UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. -This field is applicable to either: +locality_lb_policy is applicable to either: * A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and loadBalancingScheme set to INTERNAL_MANAGED. @@ -818,7 +831,7 @@ This field is applicable to either: Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External Network Load Balancing. The default is MAGLEV. -If session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV, +If session_affinity is not NONE, and locality_lb_policy is not set to MAGLEV, WEIGHTED_MAGLEV, or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced @@ -859,10 +872,7 @@ The default value is 1.0.`, Optional: true, Description: `Settings controlling eviction of unhealthy hosts from the load balancing pool. Applicable backend service types can be a global backend service with the -loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - -From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. -Default values are enforce by GCP without providing them.`, +loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -898,7 +908,6 @@ less than one second are represented with a 0 'seconds' field and a positive Description: `Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.`, - Default: 5, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "consecutive_gateway_failure": { @@ -907,7 +916,6 @@ Defaults to 5.`, Description: `The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 5.`, - Default: 5, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "enforcing_consecutive_errors": { @@ -916,7 +924,6 @@ gateway failure ejection occurs. Defaults to 5.`, Description: `The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.`, - Default: 100, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "enforcing_consecutive_gateway_failure": { @@ -925,7 +932,6 @@ ejection or to ramp it up slowly. Defaults to 100.`, Description: `The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.`, - Default: 0, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "enforcing_success_rate": { @@ -934,7 +940,6 @@ used to disable ejection or to ramp it up slowly. Defaults to 0.`, Description: `The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.`, - Default: 100, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "interval": { @@ -967,7 +972,6 @@ less than one second are represented with a 0 'seconds' field and a positive Optional: true, Description: `Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 10%.`, - Default: 10, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "success_rate_minimum_hosts": { @@ -977,7 +981,6 @@ that can be ejected. Defaults to 10%.`, success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5.`, - Default: 5, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "success_rate_request_volume": { @@ -988,7 +991,6 @@ defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100.`, - Default: 100, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "success_rate_stdev_factor": { @@ -1000,7 +1002,6 @@ rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900.`, - Default: 1900, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, }, @@ -1041,17 +1042,51 @@ load_balancing_scheme set to INTERNAL_SELF_MANAGED.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "aws_v4_authentication": { + Type: schema.TypeList, + Optional: true, + Description: `The configuration needed to generate a signature for access to private storage buckets that support AWS's Signature Version 4 for authentication. +Allowed only for INTERNET_IP_PORT and INTERNET_FQDN_PORT NEG backends.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_key": { + Type: schema.TypeString, + Optional: true, + Description: `The access key used for s3 bucket authentication. +Required for updating or creating a backend that uses AWS v4 signature authentication, but will not be returned as part of the configuration when queried with a REST API GET request.`, + Sensitive: true, + }, + "access_key_id": { + Type: schema.TypeString, + Optional: true, + Description: `The identifier of an access key used for s3 bucket authentication.`, + }, + "access_key_version": { + Type: schema.TypeString, + Optional: true, + Description: `The optional version identifier for the access key. You can use this to keep track of different iterations of your access key.`, + }, + "origin_region": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the cloud region of your origin. This is a free-form field with the name of the region your cloud uses to host your origin. +For example, "us-east-1" for AWS or "us-ashburn-1" for OCI.`, + }, + }, + }, + }, "client_tls_policy": { Type: schema.TypeString, - Required: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Optional: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, Description: `ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource.`, }, "subject_alt_names": { Type: schema.TypeList, - Required: true, + Optional: true, Description: `A list of alternate names to verify the subject identity in the certificate. If specified, the client will verify that the server certificate's subject alt name matches one of the specified values.`, @@ -1072,9 +1107,56 @@ Can only be set if load balancing scheme is EXTERNAL, EXTERNAL_MANAGED, INTERNAL Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: verify.ValidateEnum([]string{"NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", "STRONG_COOKIE_AFFINITY", ""}), Description: `Type of session affinity to use. The default is NONE. Session affinity is -not applicable if the protocol is UDP. Possible values: ["NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE"]`, +not applicable if the protocol is UDP. Possible values: ["NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", "STRONG_COOKIE_AFFINITY"]`, + }, + "strong_session_affinity_cookie": { + Type: schema.TypeList, + Optional: true, + Description: `Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the cookie.`, + AtLeastOneOf: []string{"strong_session_affinity_cookie.0.ttl", "strong_session_affinity_cookie.0.name", "strong_session_affinity_cookie.0.path"}, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path to set for the cookie.`, + AtLeastOneOf: []string{"strong_session_affinity_cookie.0.ttl", "strong_session_affinity_cookie.0.name", "strong_session_affinity_cookie.0.path"}, + }, + "ttl": { + Type: schema.TypeList, + Optional: true, + Description: `Lifetime of the cookie.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "seconds": { + Type: schema.TypeInt, + Required: true, + Description: `Span of time at a resolution of a second. +Must be from 0 to 315,576,000,000 inclusive.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Span of time that's a fraction of a second at nanosecond +resolution. Durations less than one second are represented +with a 0 seconds field and a positive nanos field. Must +be from 0 to 999,999,999 inclusive.`, + }, + }, + }, + AtLeastOneOf: []string{"strong_session_affinity_cookie.0.ttl", "strong_session_affinity_cookie.0.name", "strong_session_affinity_cookie.0.path"}, + }, + }, + }, }, "timeout_sec": { Type: schema.TypeInt, @@ -1152,9 +1234,7 @@ UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL). See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) -for an explanation of load balancing modes. - -From version 6.0.0 default value will be UTILIZATION to match default GCP value. Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, +for an explanation of load balancing modes. Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, Default: "UTILIZATION", }, "capacity_scaler": { @@ -1342,6 +1422,12 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ } else if v, ok := d.GetOkExists("iap"); ok || !reflect.DeepEqual(v, iapProp) { obj["iap"] = iapProp } + ipAddressSelectionPolicyProp, err := expandComputeBackendServiceIpAddressSelectionPolicy(d.Get("ip_address_selection_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address_selection_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipAddressSelectionPolicyProp)) && (ok || !reflect.DeepEqual(v, ipAddressSelectionPolicyProp)) { + obj["ipAddressSelectionPolicy"] = ipAddressSelectionPolicyProp + } loadBalancingSchemeProp, err := expandComputeBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) if err != nil { return err @@ -1408,6 +1494,12 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ } else if v, ok := d.GetOkExists("session_affinity"); !tpgresource.IsEmptyValue(reflect.ValueOf(sessionAffinityProp)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { obj["sessionAffinity"] = sessionAffinityProp } + strongSessionAffinityCookieProp, err := expandComputeBackendServiceStrongSessionAffinityCookie(d.Get("strong_session_affinity_cookie"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("strong_session_affinity_cookie"); !tpgresource.IsEmptyValue(reflect.ValueOf(strongSessionAffinityCookieProp)) && (ok || !reflect.DeepEqual(v, strongSessionAffinityCookieProp)) { + obj["strongSessionAffinityCookie"] = strongSessionAffinityCookieProp + } timeoutSecProp, err := expandComputeBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err @@ -1641,6 +1733,9 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) if err := d.Set("iap", flattenComputeBackendServiceIap(res["iap"], d, config)); err != nil { return fmt.Errorf("Error reading BackendService: %s", err) } + if err := d.Set("ip_address_selection_policy", flattenComputeBackendServiceIpAddressSelectionPolicy(res["ipAddressSelectionPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendService: %s", err) + } if err := d.Set("load_balancing_scheme", flattenComputeBackendServiceLoadBalancingScheme(res["loadBalancingScheme"], d, config)); err != nil { return fmt.Errorf("Error reading BackendService: %s", err) } @@ -1674,6 +1769,9 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) if err := d.Set("session_affinity", flattenComputeBackendServiceSessionAffinity(res["sessionAffinity"], d, config)); err != nil { return fmt.Errorf("Error reading BackendService: %s", err) } + if err := d.Set("strong_session_affinity_cookie", flattenComputeBackendServiceStrongSessionAffinityCookie(res["strongSessionAffinityCookie"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendService: %s", err) + } if err := d.Set("timeout_sec", flattenComputeBackendServiceTimeoutSec(res["timeoutSec"], d, config)); err != nil { return fmt.Errorf("Error reading BackendService: %s", err) } @@ -1790,6 +1888,12 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ } else if v, ok := d.GetOkExists("iap"); ok || !reflect.DeepEqual(v, iapProp) { obj["iap"] = iapProp } + ipAddressSelectionPolicyProp, err := expandComputeBackendServiceIpAddressSelectionPolicy(d.Get("ip_address_selection_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address_selection_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipAddressSelectionPolicyProp)) { + obj["ipAddressSelectionPolicy"] = ipAddressSelectionPolicyProp + } loadBalancingSchemeProp, err := expandComputeBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) if err != nil { return err @@ -1856,6 +1960,12 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ } else if v, ok := d.GetOkExists("session_affinity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { obj["sessionAffinity"] = sessionAffinityProp } + strongSessionAffinityCookieProp, err := expandComputeBackendServiceStrongSessionAffinityCookie(d.Get("strong_session_affinity_cookie"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("strong_session_affinity_cookie"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, strongSessionAffinityCookieProp)) { + obj["strongSessionAffinityCookie"] = strongSessionAffinityCookieProp + } timeoutSecProp, err := expandComputeBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err @@ -2752,6 +2862,8 @@ func flattenComputeBackendServiceIap(v interface{}, d *schema.ResourceData, conf return nil } transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenComputeBackendServiceIapEnabled(original["enabled"], d, config) transformed["oauth2_client_id"] = flattenComputeBackendServiceIapOauth2ClientId(original["oauth2ClientId"], d, config) transformed["oauth2_client_secret"] = @@ -2760,6 +2872,10 @@ func flattenComputeBackendServiceIap(v interface{}, d *schema.ResourceData, conf flattenComputeBackendServiceIapOauth2ClientSecretSha256(original["oauth2ClientSecretSha256"], d, config) return []interface{}{transformed} } +func flattenComputeBackendServiceIapEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeBackendServiceIapOauth2ClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -2772,6 +2888,10 @@ func flattenComputeBackendServiceIapOauth2ClientSecretSha256(v interface{}, d *s return v } +func flattenComputeBackendServiceIpAddressSelectionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeBackendServiceLoadBalancingScheme(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -3156,6 +3276,8 @@ func flattenComputeBackendServiceSecuritySettings(v interface{}, d *schema.Resou flattenComputeBackendServiceSecuritySettingsClientTlsPolicy(original["clientTlsPolicy"], d, config) transformed["subject_alt_names"] = flattenComputeBackendServiceSecuritySettingsSubjectAltNames(original["subjectAltNames"], d, config) + transformed["aws_v4_authentication"] = + flattenComputeBackendServiceSecuritySettingsAwsV4Authentication(original["awsV4Authentication"], d, config) return []interface{}{transformed} } func flattenComputeBackendServiceSecuritySettingsClientTlsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -3169,10 +3291,119 @@ func flattenComputeBackendServiceSecuritySettingsSubjectAltNames(v interface{}, return v } +func flattenComputeBackendServiceSecuritySettingsAwsV4Authentication(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["access_key_id"] = + flattenComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKeyId(original["accessKeyId"], d, config) + transformed["access_key"] = + flattenComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKey(original["accessKey"], d, config) + transformed["access_key_version"] = + flattenComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKeyVersion(original["accessKeyVersion"], d, config) + transformed["origin_region"] = + flattenComputeBackendServiceSecuritySettingsAwsV4AuthenticationOriginRegion(original["originRegion"], d, config) + return []interface{}{transformed} +} +func flattenComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("security_settings.0.aws_v4_authentication.0.access_key") +} + +func flattenComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendServiceSecuritySettingsAwsV4AuthenticationOriginRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeBackendServiceSessionAffinity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } +func flattenComputeBackendServiceStrongSessionAffinityCookie(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ttl"] = + flattenComputeBackendServiceStrongSessionAffinityCookieTtl(original["ttl"], d, config) + transformed["name"] = + flattenComputeBackendServiceStrongSessionAffinityCookieName(original["name"], d, config) + transformed["path"] = + flattenComputeBackendServiceStrongSessionAffinityCookiePath(original["path"], d, config) + return []interface{}{transformed} +} +func flattenComputeBackendServiceStrongSessionAffinityCookieTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["seconds"] = + flattenComputeBackendServiceStrongSessionAffinityCookieTtlSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenComputeBackendServiceStrongSessionAffinityCookieTtlNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenComputeBackendServiceStrongSessionAffinityCookieTtlSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeBackendServiceStrongSessionAffinityCookieTtlNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeBackendServiceStrongSessionAffinityCookieName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendServiceStrongSessionAffinityCookiePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeBackendServiceTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { @@ -3861,6 +4092,13 @@ func expandComputeBackendServiceIap(v interface{}, d tpgresource.TerraformResour original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) + transformedEnabled, err := expandComputeBackendServiceIapEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else { + transformed["enabled"] = transformedEnabled + } + transformedOauth2ClientId, err := expandComputeBackendServiceIapOauth2ClientId(original["oauth2_client_id"], d, config) if err != nil { return nil, err @@ -3885,6 +4123,10 @@ func expandComputeBackendServiceIap(v interface{}, d tpgresource.TerraformResour return transformed, nil } +func expandComputeBackendServiceIapEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeBackendServiceIapOauth2ClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -3897,6 +4139,10 @@ func expandComputeBackendServiceIapOauth2ClientSecretSha256(v interface{}, d tpg return v, nil } +func expandComputeBackendServiceIpAddressSelectionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeBackendServiceLoadBalancingScheme(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -4227,18 +4473,77 @@ func expandComputeBackendServiceSecuritySettings(v interface{}, d tpgresource.Te transformed["subjectAltNames"] = transformedSubjectAltNames } + transformedAwsV4Authentication, err := expandComputeBackendServiceSecuritySettingsAwsV4Authentication(original["aws_v4_authentication"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAwsV4Authentication); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["awsV4Authentication"] = transformedAwsV4Authentication + } + return transformed, nil } func expandComputeBackendServiceSecuritySettingsClientTlsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + return v, nil +} + +func expandComputeBackendServiceSecuritySettingsSubjectAltNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendServiceSecuritySettingsAwsV4Authentication(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAccessKeyId, err := expandComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKeyId(original["access_key_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAccessKeyId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accessKeyId"] = transformedAccessKeyId + } + + transformedAccessKey, err := expandComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKey(original["access_key"], d, config) + if err != nil { + return nil, err + } else { + transformed["accessKey"] = transformedAccessKey + } + + transformedAccessKeyVersion, err := expandComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKeyVersion(original["access_key_version"], d, config) if err != nil { - return nil, fmt.Errorf("Invalid value for client_tls_policy: %s", err) + return nil, err + } else if val := reflect.ValueOf(transformedAccessKeyVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accessKeyVersion"] = transformedAccessKeyVersion } - return f.RelativeLink(), nil + + transformedOriginRegion, err := expandComputeBackendServiceSecuritySettingsAwsV4AuthenticationOriginRegion(original["origin_region"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOriginRegion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["originRegion"] = transformedOriginRegion + } + + return transformed, nil } -func expandComputeBackendServiceSecuritySettingsSubjectAltNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKeyId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendServiceSecuritySettingsAwsV4AuthenticationAccessKeyVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendServiceSecuritySettingsAwsV4AuthenticationOriginRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -4246,6 +4551,81 @@ func expandComputeBackendServiceSessionAffinity(v interface{}, d tpgresource.Ter return v, nil } +func expandComputeBackendServiceStrongSessionAffinityCookie(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTtl, err := expandComputeBackendServiceStrongSessionAffinityCookieTtl(original["ttl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ttl"] = transformedTtl + } + + transformedName, err := expandComputeBackendServiceStrongSessionAffinityCookieName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedPath, err := expandComputeBackendServiceStrongSessionAffinityCookiePath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + return transformed, nil +} + +func expandComputeBackendServiceStrongSessionAffinityCookieTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSeconds, err := expandComputeBackendServiceStrongSessionAffinityCookieTtlSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandComputeBackendServiceStrongSessionAffinityCookieTtlNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandComputeBackendServiceStrongSessionAffinityCookieTtlSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendServiceStrongSessionAffinityCookieTtlNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendServiceStrongSessionAffinityCookieName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendServiceStrongSessionAffinityCookiePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeBackendServiceTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -4289,24 +4669,6 @@ func expandComputeBackendServiceServiceLbPolicy(v interface{}, d tpgresource.Ter } func resourceComputeBackendServiceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // The BackendService API's Update / PUT API is badly formed and behaves like - // a PATCH field for at least IAP. When sent a `null` `iap` field, the API - // doesn't disable an existing field. To work around this, we need to emulate - // the old Terraform behaviour of always sending the block (at both update and - // create), and force sending each subfield as empty when the block isn't - // present in config. - - iapVal := obj["iap"] - if iapVal == nil { - data := map[string]interface{}{} - data["enabled"] = false - obj["iap"] = data - } else { - iap := iapVal.(map[string]interface{}) - iap["enabled"] = true - obj["iap"] = iap - } - backendsRaw, ok := obj["backends"] if !ok { return obj, nil @@ -4365,18 +4727,6 @@ func resourceComputeBackendServiceEncoder(d *schema.ResourceData, meta interface } func resourceComputeBackendServiceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // We need to pretend IAP isn't there if it's disabled for Terraform to maintain - // BC behaviour with the handwritten resource. - v, ok := res["iap"] - if !ok || v == nil { - delete(res, "iap") - return res, nil - } - m := v.(map[string]interface{}) - if ok && m["enabled"] == false { - delete(res, "iap") - } - // Requests with consistentHash will error for specific values of // localityLbPolicy. However, the API will not remove it if the backend // service is updated to from supporting to non-supporting localityLbPolicy diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk.go index a67cac2a160..d9abc3b1191 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk.go @@ -329,6 +329,16 @@ the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.`, + }, + "access_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The accessMode of the disk. +For example: +* READ_WRITE_SINGLE +* READ_WRITE_MANY +* READ_ONLY_SINGLE`, }, "async_primary_disk": { Type: schema.TypeList, @@ -861,6 +871,12 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { } else if v, ok := d.GetOkExists("storage_pool"); !tpgresource.IsEmptyValue(reflect.ValueOf(storagePoolProp)) && (ok || !reflect.DeepEqual(v, storagePoolProp)) { obj["storagePool"] = storagePoolProp } + accessModeProp, err := expandComputeDiskAccessMode(d.Get("access_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("access_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(accessModeProp)) && (ok || !reflect.DeepEqual(v, accessModeProp)) { + obj["accessMode"] = accessModeProp + } labelsProp, err := expandComputeDiskEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -1079,6 +1095,9 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("storage_pool", flattenComputeDiskStoragePool(res["storagePool"], d, config)); err != nil { return fmt.Errorf("Error reading Disk: %s", err) } + if err := d.Set("access_mode", flattenComputeDiskAccessMode(res["accessMode"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } if err := d.Set("terraform_labels", flattenComputeDiskTerraformLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Disk: %s", err) } @@ -1336,6 +1355,56 @@ func resourceComputeDiskUpdate(d *schema.ResourceData, meta interface{}) error { return err } } + if d.HasChange("access_mode") { + obj := make(map[string]interface{}) + + accessModeProp, err := expandComputeDiskAccessMode(d.Get("access_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("access_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, accessModeProp)) { + obj["accessMode"] = accessModeProp + } + + obj, err = resourceComputeDiskUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}?paths=accessMode") + if err != nil { + return err + } + + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Disk %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Disk", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } d.Partial(false) @@ -1674,6 +1743,10 @@ func flattenComputeDiskStoragePool(v interface{}, d *schema.ResourceData, config return tpgresource.NameFromSelfLinkStateFunc(v) } +func flattenComputeDiskAccessMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeDiskTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -1944,6 +2017,10 @@ func expandComputeDiskStoragePool(v interface{}, d tpgresource.TerraformResource return v, nil } +func expandComputeDiskAccessMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeDiskEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil @@ -2193,7 +2270,7 @@ func resourceComputeDiskEncoder(d *schema.ResourceData, meta interface{}, obj ma func resourceComputeDiskUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - if (d.HasChange("provisioned_iops") && strings.Contains(d.Get("type").(string), "hyperdisk")) || (d.HasChange("provisioned_throughput") && strings.Contains(d.Get("type").(string), "hyperdisk")) { + if (d.HasChange("provisioned_iops") && strings.Contains(d.Get("type").(string), "hyperdisk")) || (d.HasChange("provisioned_throughput") && strings.Contains(d.Get("type").(string), "hyperdisk")) || (d.HasChange("access_mode") && strings.Contains(d.Get("type").(string), "hyperdisk")) { nameProp := d.Get("name") if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_external_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_external_vpn_gateway.go index 7e1f8492669..ec46bb6ccbd 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_external_vpn_gateway.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_external_vpn_gateway.go @@ -99,6 +99,17 @@ Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine.`, }, + "ipv6_address": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `IPv6 address of the interface in the external VPN gateway. This IPv6 +address can be either from your on-premise gateway or another Cloud +provider's VPN gateway, it cannot be an IP address from Google Compute +Engine. Must specify an IPv6 address (not IPV4-mapped) using any format +described in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format +is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0).`, + }, }, }, }, @@ -520,8 +531,9 @@ func flattenComputeExternalVpnGatewayInterface(v interface{}, d *schema.Resource continue } transformed = append(transformed, map[string]interface{}{ - "id": flattenComputeExternalVpnGatewayInterfaceId(original["id"], d, config), - "ip_address": flattenComputeExternalVpnGatewayInterfaceIpAddress(original["ipAddress"], d, config), + "id": flattenComputeExternalVpnGatewayInterfaceId(original["id"], d, config), + "ip_address": flattenComputeExternalVpnGatewayInterfaceIpAddress(original["ipAddress"], d, config), + "ipv6_address": flattenComputeExternalVpnGatewayInterfaceIpv6Address(original["ipv6Address"], d, config), }) } return transformed @@ -547,6 +559,10 @@ func flattenComputeExternalVpnGatewayInterfaceIpAddress(v interface{}, d *schema return v } +func flattenComputeExternalVpnGatewayInterfaceIpv6Address(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeExternalVpnGatewayTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -606,6 +622,13 @@ func expandComputeExternalVpnGatewayInterface(v interface{}, d tpgresource.Terra transformed["ipAddress"] = transformedIpAddress } + transformedIpv6Address, err := expandComputeExternalVpnGatewayInterfaceIpv6Address(original["ipv6_address"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpv6Address); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipv6Address"] = transformedIpv6Address + } + req = append(req, transformed) } return req, nil @@ -619,6 +642,10 @@ func expandComputeExternalVpnGatewayInterfaceIpAddress(v interface{}, d tpgresou return v, nil } +func expandComputeExternalVpnGatewayInterfaceIpv6Address(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeExternalVpnGatewayEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall.go index e7c01d96e2b..acf7df2f19d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall.go @@ -403,7 +403,7 @@ is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. -Example inputs include: ["22"], ["80","443"], and +Example inputs include: [22], [80, 443], and ["12345-12349"].`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -433,7 +433,7 @@ is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. -Example inputs include: ["22"], ["80","443"], and +Example inputs include: [22], [80, 443], and ["12345-12349"].`, Elem: &schema.Schema{ Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_forwarding_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_forwarding_rule.go index 5d83e665fb6..9c92d9d4a0e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_forwarding_rule.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_forwarding_rule.go @@ -123,7 +123,7 @@ func ResourceComputeForwardingRule() *schema.Resource { CustomizeDiff: customdiff.All( forwardingRuleCustomizeDiff, - tpgresource.SetLabelsDiff, + tpgresource.SetLabelsDiffWithoutAttributionLabel, tpgresource.DefaultProviderProject, ), @@ -528,6 +528,11 @@ For Private Service Connect forwarding rules that forward traffic to managed ser Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, Elem: &schema.Schema{Type: schema.TypeString}, }, + "forwarding_rule_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier number for the resource. This identifier is defined by the server.`, + }, "label_fingerprint": { Type: schema.TypeString, Computed: true, @@ -561,8 +566,8 @@ This field is only used for INTERNAL load balancing.`, "recreate_closed_psc": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `This is used in PSC consumer ForwardingRule to make terraform recreate the ForwardingRule when the status is closed`, + Default: false, }, "project": { Type: schema.TypeString, @@ -752,6 +757,12 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ } headers := make(http.Header) + // Labels cannot be set in a create for PSC forwarding rules, so remove it from the CREATE request. + if targetProp != nil && strings.Contains(targetProp.(string), "/serviceAttachments/") { + if _, ok := obj["labels"]; ok { + delete(obj, "labels") + } + } res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "POST", @@ -902,6 +913,9 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) if err := d.Set("is_mirroring_collector", flattenComputeForwardingRuleIsMirroringCollector(res["isMirroringCollector"], d, config)); err != nil { return fmt.Errorf("Error reading ForwardingRule: %s", err) } + if err := d.Set("forwarding_rule_id", flattenComputeForwardingRuleForwardingRuleId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } if err := d.Set("psc_connection_id", flattenComputeForwardingRulePscConnectionId(res["pscConnectionId"], d, config)); err != nil { return fmt.Errorf("Error reading ForwardingRule: %s", err) } @@ -1315,6 +1329,23 @@ func flattenComputeForwardingRuleIsMirroringCollector(v interface{}, d *schema.R return v } +func flattenComputeForwardingRuleForwardingRuleId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + func flattenComputeForwardingRulePscConnectionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_address.go index ceaf1d19e93..06b541bcdcb 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_address.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_address.go @@ -36,6 +36,7 @@ func ResourceComputeGlobalAddress() *schema.Resource { return &schema.Resource{ Create: resourceComputeGlobalAddressCreate, Read: resourceComputeGlobalAddressRead, + Update: resourceComputeGlobalAddressUpdate, Delete: resourceComputeGlobalAddressDelete, Importer: &schema.ResourceImporter{ @@ -44,6 +45,7 @@ func ResourceComputeGlobalAddress() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), }, @@ -100,6 +102,16 @@ address or omitted to allow GCP to choose a valid one for you.`, DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("IPV4"), Description: `The IP Version that will be used by this address. The default value is 'IPV4'. Possible values: ["IPV4", "IPV6"]`, }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels to apply to this address. A list of key->value pairs. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "network": { Type: schema.TypeString, Optional: true, @@ -137,6 +149,25 @@ when purpose=PRIVATE_SERVICE_CONNECT`, Computed: true, Description: `Creation timestamp in RFC3339 text format.`, }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint used for optimistic locking of this resource. Used +internally during updates.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "project": { Type: schema.TypeString, Optional: true, @@ -178,6 +209,12 @@ func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{} } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } + labelFingerprintProp, err := expandComputeGlobalAddressLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } ipVersionProp, err := expandComputeGlobalAddressIpVersion(d.Get("ip_version"), d, config) if err != nil { return err @@ -208,6 +245,12 @@ func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{} } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { obj["network"] = networkProp } + labelsProp, err := expandComputeGlobalAddressEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses") if err != nil { @@ -265,6 +308,66 @@ func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error waiting to create GlobalAddress: %s", err) } + if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + labels := d.Get("labels") + terraformLables := d.Get("terraform_labels") + + // Labels cannot be set in a create. We'll have to set them here. + err = resourceComputeGlobalAddressRead(d, meta) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + // d.Get("effective_labels") will have been overridden by the Read call. + labelsProp, err := expandComputeGlobalAddressEffectiveLabels(v, d, config) + if err != nil { + return err + } + obj["labels"] = labelsProp + labelFingerprintProp := d.Get("label_fingerprint") + obj["labelFingerprint"] = labelFingerprintProp + + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses/{{name}}/setLabels") + if err != nil { + return err + } + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return fmt.Errorf("Error adding labels to ComputeGlobalAddress %q: %s", d.Id(), err) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating ComputeGlobalAddress Labels", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + // Set back the labels field, as it is needed to decide the value of "labels" in the state in the read function. + if err := d.Set("labels", labels); err != nil { + return fmt.Errorf("Error setting back labels: %s", err) + } + + // Set back the terraform_labels field, as it is needed to decide the value of "terraform_labels" in the state in the read function. + if err := d.Set("terraform_labels", terraformLables); err != nil { + return fmt.Errorf("Error setting back terraform_labels: %s", err) + } + + // Set back the effective_labels field, as it is needed to decide the value of "effective_labels" in the state in the read function. + if err := d.Set("effective_labels", v); err != nil { + return fmt.Errorf("Error setting back effective_labels: %s", err) + } + } + log.Printf("[DEBUG] Finished creating GlobalAddress %q: %#v", d.Id(), res) return resourceComputeGlobalAddressRead(d, meta) @@ -324,6 +427,12 @@ func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) if err := d.Set("name", flattenComputeGlobalAddressName(res["name"], d, config)); err != nil { return fmt.Errorf("Error reading GlobalAddress: %s", err) } + if err := d.Set("labels", flattenComputeGlobalAddressLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + if err := d.Set("label_fingerprint", flattenComputeGlobalAddressLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } if err := d.Set("ip_version", flattenComputeGlobalAddressIpVersion(res["ipVersion"], d, config)); err != nil { return fmt.Errorf("Error reading GlobalAddress: %s", err) } @@ -339,6 +448,12 @@ func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) if err := d.Set("network", flattenComputeGlobalAddressNetwork(res["network"], d, config)); err != nil { return fmt.Errorf("Error reading GlobalAddress: %s", err) } + if err := d.Set("terraform_labels", flattenComputeGlobalAddressTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + if err := d.Set("effective_labels", flattenComputeGlobalAddressEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading GlobalAddress: %s", err) } @@ -346,6 +461,80 @@ func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) return nil } +func resourceComputeGlobalAddressUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalAddress: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("label_fingerprint") || d.HasChange("effective_labels") { + obj := make(map[string]interface{}) + + labelFingerprintProp, err := expandComputeGlobalAddressLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + labelsProp, err := expandComputeGlobalAddressEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses/{{name}}/setLabels") + if err != nil { + return err + } + + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error updating GlobalAddress %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating GlobalAddress %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating GlobalAddress", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeGlobalAddressRead(d, meta) +} + func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) @@ -438,6 +627,25 @@ func flattenComputeGlobalAddressName(v interface{}, d *schema.ResourceData, conf return v } +func flattenComputeGlobalAddressLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenComputeGlobalAddressLabelFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeGlobalAddressIpVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -474,6 +682,25 @@ func flattenComputeGlobalAddressNetwork(v interface{}, d *schema.ResourceData, c return tpgresource.ConvertSelfLinkToV1(v.(string)) } +func flattenComputeGlobalAddressTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenComputeGlobalAddressEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func expandComputeGlobalAddressAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -486,6 +713,10 @@ func expandComputeGlobalAddressName(v interface{}, d tpgresource.TerraformResour return v, nil } +func expandComputeGlobalAddressLabelFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeGlobalAddressIpVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -509,3 +740,14 @@ func expandComputeGlobalAddressNetwork(v interface{}, d tpgresource.TerraformRes } return f.RelativeLink(), nil } + +func expandComputeGlobalAddressEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_forwarding_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_forwarding_rule.go index dd5213e5a47..83d353eca55 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_forwarding_rule.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_forwarding_rule.go @@ -51,7 +51,7 @@ func ResourceComputeGlobalForwardingRule() *schema.Resource { }, CustomizeDiff: customdiff.All( - tpgresource.SetLabelsDiff, + tpgresource.SetLabelsDiffWithoutAttributionLabel, tpgresource.DefaultProviderProject, ), diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ha_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ha_vpn_gateway.go index c6bc4c880bb..ae459fe1357 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ha_vpn_gateway.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ha_vpn_gateway.go @@ -78,6 +78,14 @@ character, which cannot be a dash.`, ForceNew: true, Description: `An optional description of this resource.`, }, + "gateway_ip_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"IPV4", "IPV6", ""}), + Description: `The IP family of the gateway IPs for the HA-VPN gateway interfaces. If not specified, IPV4 will be used. Default value: "IPV4" Possible values: ["IPV4", "IPV6"]`, + Default: "IPV4", + }, "region": { Type: schema.TypeString, Computed: true, @@ -90,9 +98,9 @@ character, which cannot be a dash.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: verify.ValidateEnum([]string{"IPV4_ONLY", "IPV4_IPV6", ""}), + ValidateFunc: verify.ValidateEnum([]string{"IPV4_ONLY", "IPV4_IPV6", "IPV6_ONLY", ""}), Description: `The stack type for this VPN gateway to identify the IP protocols that are enabled. -If not specified, IPV4_ONLY will be used. Default value: "IPV4_ONLY" Possible values: ["IPV4_ONLY", "IPV4_IPV6"]`, +If not specified, IPV4_ONLY will be used. Default value: "IPV4_ONLY" Possible values: ["IPV4_ONLY", "IPV4_IPV6", "IPV6_ONLY"]`, Default: "IPV4_ONLY", }, "vpn_interfaces": { @@ -177,6 +185,12 @@ func resourceComputeHaVpnGatewayCreate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("stack_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(stackTypeProp)) && (ok || !reflect.DeepEqual(v, stackTypeProp)) { obj["stackType"] = stackTypeProp } + gatewayIpVersionProp, err := expandComputeHaVpnGatewayGatewayIpVersion(d.Get("gateway_ip_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gateway_ip_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(gatewayIpVersionProp)) && (ok || !reflect.DeepEqual(v, gatewayIpVersionProp)) { + obj["gatewayIpVersion"] = gatewayIpVersionProp + } vpnInterfacesProp, err := expandComputeHaVpnGatewayVpnInterfaces(d.Get("vpn_interfaces"), d, config) if err != nil { return err @@ -300,6 +314,9 @@ func resourceComputeHaVpnGatewayRead(d *schema.ResourceData, meta interface{}) e if err := d.Set("stack_type", flattenComputeHaVpnGatewayStackType(res["stackType"], d, config)); err != nil { return fmt.Errorf("Error reading HaVpnGateway: %s", err) } + if err := d.Set("gateway_ip_version", flattenComputeHaVpnGatewayGatewayIpVersion(res["gatewayIpVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading HaVpnGateway: %s", err) + } if err := d.Set("vpn_interfaces", flattenComputeHaVpnGatewayVpnInterfaces(res["vpnInterfaces"], d, config)); err != nil { return fmt.Errorf("Error reading HaVpnGateway: %s", err) } @@ -413,6 +430,14 @@ func flattenComputeHaVpnGatewayStackType(v interface{}, d *schema.ResourceData, return v } +func flattenComputeHaVpnGatewayGatewayIpVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "IPV4" + } + + return v +} + func flattenComputeHaVpnGatewayVpnInterfaces(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -488,6 +513,10 @@ func expandComputeHaVpnGatewayStackType(v interface{}, d tpgresource.TerraformRe return v, nil } +func expandComputeHaVpnGatewayGatewayIpVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeHaVpnGatewayVpnInterfaces(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_health_check.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_health_check.go index 81476fd7d0e..52bd43994cf 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_health_check.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_health_check.go @@ -485,6 +485,32 @@ which means no health check logging will be done.`, }, }, }, + "source_regions": { + Type: schema.TypeList, + Optional: true, + Description: `The list of cloud regions from which health checks are performed. If +any regions are specified, then exactly 3 regions should be specified. +The region names must be valid names of Google Cloud regions. This can +only be set for global health check. If this list is non-empty, then +there are restrictions on what other health check fields are supported +and what other resources can use this health check: + +* SSL, HTTP2, and GRPC protocols are not supported. + +* The TCP request field is not supported. + +* The proxyHeader field for HTTP, HTTPS, and TCP is not supported. + +* The checkIntervalSec field must be at least 30. + +* The health check cannot be used with BackendService nor with managed +instance group auto-healing.`, + MinItems: 3, + MaxItems: 3, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, "ssl_health_check": { Type: schema.TypeList, Optional: true, @@ -707,6 +733,12 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { obj["timeoutSec"] = timeoutSecProp } + sourceRegionsProp, err := expandComputeHealthCheckSourceRegions(d.Get("source_regions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_regions"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceRegionsProp)) && (ok || !reflect.DeepEqual(v, sourceRegionsProp)) { + obj["sourceRegions"] = sourceRegionsProp + } unhealthyThresholdProp, err := expandComputeHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) if err != nil { return err @@ -877,6 +909,9 @@ func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) er if err := d.Set("timeout_sec", flattenComputeHealthCheckTimeoutSec(res["timeoutSec"], d, config)); err != nil { return fmt.Errorf("Error reading HealthCheck: %s", err) } + if err := d.Set("source_regions", flattenComputeHealthCheckSourceRegions(res["sourceRegions"], d, config)); err != nil { + return fmt.Errorf("Error reading HealthCheck: %s", err) + } if err := d.Set("unhealthy_threshold", flattenComputeHealthCheckUnhealthyThreshold(res["unhealthyThreshold"], d, config)); err != nil { return fmt.Errorf("Error reading HealthCheck: %s", err) } @@ -957,6 +992,12 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { obj["timeoutSec"] = timeoutSecProp } + sourceRegionsProp, err := expandComputeHealthCheckSourceRegions(d.Get("source_regions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_regions"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceRegionsProp)) { + obj["sourceRegions"] = sourceRegionsProp + } unhealthyThresholdProp, err := expandComputeHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) if err != nil { return err @@ -1191,6 +1232,10 @@ func flattenComputeHealthCheckTimeoutSec(v interface{}, d *schema.ResourceData, return v // let terraform core handle it otherwise } +func flattenComputeHealthCheckSourceRegions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeHealthCheckUnhealthyThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { @@ -1611,6 +1656,10 @@ func expandComputeHealthCheckTimeoutSec(v interface{}, d tpgresource.TerraformRe return v, nil } +func expandComputeHealthCheckSourceRegions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeHealthCheckUnhealthyThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance.go index b5d1f763296..e555cf2e2ed 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance.go @@ -22,6 +22,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" "google.golang.org/api/compute/v1" ) @@ -51,6 +52,13 @@ func IpCidrRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { } var ( + advancedMachineFeaturesKeys = []string{ + "advanced_machine_features.0.enable_nested_virtualization", + "advanced_machine_features.0.threads_per_core", + "advanced_machine_features.0.turbo_mode", + "advanced_machine_features.0.visible_core_count", + } + bootDiskKeys = []string{ "boot_disk.0.auto_delete", "boot_disk.0.device_name", @@ -70,6 +78,8 @@ var ( "boot_disk.0.initialize_params.0.provisioned_iops", "boot_disk.0.initialize_params.0.provisioned_throughput", "boot_disk.0.initialize_params.0.enable_confidential_compute", + "boot_disk.0.initialize_params.0.storage_pool", + "boot_disk.0.initialize_params.0.resource_policies", } schedulingKeys = []string{ @@ -92,6 +102,36 @@ var ( } ) +// This checks if the project provided in subnetwork's self_link matches +// the project provided in subnetwork_project not to produce a confusing plan diff. +func validateSubnetworkProject(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + // separate func to allow unit testing + return ValidateSubnetworkProjectFunc(d) +} + +func ValidateSubnetworkProjectFunc(d tpgresource.TerraformResourceDiff) error { + oldCount, newCount := d.GetChange("network_interface.#") + if oldCount.(int) != newCount.(int) { + return nil + } + for i := 0; i < newCount.(int); i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + subnetworkProject := d.Get(prefix + ".subnetwork_project") + subnetwork := d.Get(prefix + ".subnetwork") + + _, err := tpgresource.GetRelativePath(subnetwork.(string)) + if err != nil { + log.Printf("[DEBUG] Subnetwork %q is not a selflink", subnetwork) + return nil + } + + if tpgresource.GetProjectFromRegionalSelfLink(subnetwork.(string)) != subnetworkProject.(string) { + return fmt.Errorf("project in subnetwork's self_link %q must match subnetwork_project %q", subnetwork, subnetworkProject) + } + } + return nil +} + // network_interface.[d].network_ip can only change when subnet/network // is also changing. Validate that if network_ip is changing this scenario // holds up to par. @@ -109,11 +149,13 @@ func forceNewIfNetworkIPNotUpdatableFunc(d tpgresource.TerraformResourceDiff) er for i := 0; i < newCount.(int); i++ { prefix := fmt.Sprintf("network_interface.%d", i) networkKey := prefix + ".network" + oldN, newN := d.GetChange(networkKey) subnetworkKey := prefix + ".subnetwork" + oldS, newS := d.GetChange(subnetworkKey) subnetworkProjectKey := prefix + ".subnetwork_project" networkIPKey := prefix + ".network_ip" - if d.HasChange(networkIPKey) { - if !d.HasChange(networkKey) && !d.HasChange(subnetworkKey) && !d.HasChange(subnetworkProjectKey) { + if d.HasChange(networkIPKey) && d.Get(networkIPKey).(string) != "" { + if tpgresource.CompareSelfLinkOrResourceName("", oldS.(string), newS.(string), nil) && !d.HasChange(subnetworkProjectKey) && tpgresource.CompareSelfLinkOrResourceName("", oldN.(string), newN.(string), nil) { if err := d.ForceNew(networkIPKey); err != nil { return err } @@ -197,6 +239,13 @@ func ResourceComputeInstance() *schema.Resource { Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource.`, }, + "interface": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"SCSI", "NVME"}, false), + Description: `The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.)`, + }, + "kms_key_self_link": { Type: schema.TypeString, Optional: true, @@ -264,6 +313,18 @@ func ResourceComputeInstance() *schema.Resource { Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, }, + "resource_policies": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ForceNew: true, + Computed: true, + AtLeastOneOf: initializeParamsKeys, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + MaxItems: 1, + Description: `A list of self_links of resource policies to attach to the instance's boot disk. Modifying this list will cause the instance to recreate. Currently a max of 1 resource policy is supported.`, + }, + "provisioned_iops": { Type: schema.TypeInt, Optional: true, @@ -289,6 +350,15 @@ func ResourceComputeInstance() *schema.Resource { ForceNew: true, Description: `A flag to enable confidential compute mode on boot disk`, }, + + "storage_pool": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: initializeParamsKeys, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The URL of the storage pool in which the new disk is created`, + }, }, }, }, @@ -325,10 +395,11 @@ func ResourceComputeInstance() *schema.Resource { }, "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the instance. One of name or self_link must be provided.`, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRFC1035Name(1, 63), + Description: `The name of the instance. One of name or self_link must be provided.`, }, "network_interface": { @@ -617,7 +688,6 @@ func ResourceComputeInstance() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ConfigMode: schema.SchemaConfigModeAttr, Description: `List of the type and count of accelerator cards attached to the instance.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -964,19 +1034,26 @@ be from 0 to 999,999,999 inclusive.`, "enable_nested_virtualization": { Type: schema.TypeBool, Optional: true, - AtLeastOneOf: []string{"advanced_machine_features.0.enable_nested_virtualization", "advanced_machine_features.0.threads_per_core"}, + AtLeastOneOf: advancedMachineFeaturesKeys, Description: `Whether to enable nested virtualization or not.`, }, "threads_per_core": { Type: schema.TypeInt, Optional: true, - AtLeastOneOf: []string{"advanced_machine_features.0.enable_nested_virtualization", "advanced_machine_features.0.threads_per_core"}, + AtLeastOneOf: advancedMachineFeaturesKeys, Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`, }, + "turbo_mode": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: advancedMachineFeaturesKeys, + Description: `Turbo frequency mode to use for the instance. Currently supported modes is "ALL_CORE_MAX".`, + ValidateFunc: validation.StringInSlice([]string{"ALL_CORE_MAX"}, false), + }, "visible_core_count": { Type: schema.TypeInt, Optional: true, - AtLeastOneOf: []string{"advanced_machine_features.0.enable_nested_virtualization", "advanced_machine_features.0.threads_per_core", "advanced_machine_features.0.visible_core_count"}, + AtLeastOneOf: advancedMachineFeaturesKeys, Description: `The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\'s nominal CPU count and the underlying platform\'s SMT width.`, }, }, @@ -1001,9 +1078,10 @@ be from 0 to 999,999,999 inclusive.`, Type: schema.TypeString, Optional: true, Description: ` - Specifies which confidential computing technology to use. - This could be one of the following values: SEV, SEV_SNP. - If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, + The confidential computing technology the instance uses. + SEV is an AMD feature. TDX is an Intel feature. One of the following + values is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform = + "AMD Milan" is currently required.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, }, @@ -1012,8 +1090,8 @@ be from 0 to 999,999,999 inclusive.`, "desired_status": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringInSlice([]string{"RUNNING", "TERMINATED"}, false), - Description: `Desired status of the instance. Either "RUNNING" or "TERMINATED".`, + ValidateFunc: validation.StringInSlice([]string{"RUNNING", "TERMINATED", "SUSPENDED"}, false), + Description: `Desired status of the instance. Either "RUNNING", "SUSPENDED" or "TERMINATED".`, }, "current_status": { Type: schema.TypeString, @@ -1051,6 +1129,12 @@ be from 0 to 999,999,999 inclusive.`, Description: `The server-assigned unique identifier of this instance.`, }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "label_fingerprint": { Type: schema.TypeString, Computed: true, @@ -1136,6 +1220,14 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, + + "key_revocation_action_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"STOP", "NONE", ""}, false), + Description: `Action to be taken when a customer's encryption key is revoked. Supports "STOP" and "NONE", with "NONE" being the default.`, + }, }, CustomizeDiff: customdiff.All( tpgresource.DefaultProviderProject, @@ -1146,7 +1238,7 @@ be from 0 to 999,999,999 inclusive.`, }, suppressEmptyGuestAcceleratorDiff, ), - desiredStatusDiff, + validateSubnetworkProject, forceNewIfNetworkIPNotUpdatable, tpgresource.SetLabelsDiff, ), @@ -1296,6 +1388,7 @@ func expandComputeInstance(project string, d *schema.ResourceData, config *trans DisplayDevice: expandDisplayDevice(d), ResourcePolicies: tpgresource.ConvertStringArr(d.Get("resource_policies").([]interface{})), ReservationAffinity: reservationAffinity, + KeyRevocationActionType: d.Get("key_revocation_action_type").(string), }, nil } @@ -1321,10 +1414,34 @@ func getAllStatusBut(status string) []string { return computeInstanceStatus } -func waitUntilInstanceHasDesiredStatus(config *transport_tpg.Config, d *schema.ResourceData) error { - desiredStatus := d.Get("desired_status").(string) +func changeInstanceStatusOnCreation(config *transport_tpg.Config, d *schema.ResourceData, project, zone, status, userAgent string) error { + var op *compute.Operation + var err error + if status == "TERMINATED" { + op, err = config.NewComputeClient(userAgent).Instances.Stop(project, zone, d.Get("name").(string)).Do() + } else if status == "SUSPENDED" { + op, err = config.NewComputeClient(userAgent).Instances.Suspend(project, zone, d.Get("name").(string)).Do() + } + if err != nil { + return fmt.Errorf("Error changing instance status after creation: %s", err) + } + + waitErr := ComputeOperationWaitTime(config, op, project, "changing instance status", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + d.SetId("") + return waitErr + } + + err = waitUntilInstanceHasDesiredStatus(config, d, status) + if err != nil { + return fmt.Errorf("Error waiting for status: %s", err) + } - if desiredStatus != "" { + return nil +} + +func waitUntilInstanceHasDesiredStatus(config *transport_tpg.Config, d *schema.ResourceData, status string) error { + if status != "" { stateRefreshFunc := func() (interface{}, string, error) { instance, err := getInstance(config, d) if err != nil || instance == nil { @@ -1335,9 +1452,9 @@ func waitUntilInstanceHasDesiredStatus(config *transport_tpg.Config, d *schema.R } stateChangeConf := retry.StateChangeConf{ Delay: 5 * time.Second, - Pending: getAllStatusBut(desiredStatus), + Pending: getAllStatusBut(status), Refresh: stateRefreshFunc, - Target: []string{desiredStatus}, + Target: []string{status}, Timeout: d.Timeout(schema.TimeoutUpdate), MinTimeout: 2 * time.Second, } @@ -1345,7 +1462,7 @@ func waitUntilInstanceHasDesiredStatus(config *transport_tpg.Config, d *schema.R if err != nil { return fmt.Errorf( - "Error waiting for instance to reach desired status %s: %s", desiredStatus, err) + "Error waiting for instance to reach desired status %s: %s", status, err) } } @@ -1398,11 +1515,20 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err return waitErr } - err = waitUntilInstanceHasDesiredStatus(config, d) + err = waitUntilInstanceHasDesiredStatus(config, d, "RUNNING") if err != nil { return fmt.Errorf("Error waiting for status: %s", err) } + if val, ok := d.GetOk("desired_status"); ok { + if val.(string) != "RUNNING" { + err = changeInstanceStatusOnCreation(config, d, project, zone.Name, val.(string), userAgent) + if err != nil { + return fmt.Errorf("Error changing instance status after creation: %s", err) + } + } + } + return resourceComputeInstanceRead(d, meta) } @@ -1574,6 +1700,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error di["disk_encryption_key_sha256"] = key.Sha256 } } + // We want the disks to remain in the order we set in the config, so if a disk // is present in the config, make sure it's at the correct index. Otherwise, append it. if inConfig { @@ -1635,6 +1762,9 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("instance_id", fmt.Sprintf("%d", instance.Id)); err != nil { return fmt.Errorf("Error setting instance_id: %s", err) } + if err := d.Set("creation_timestamp", instance.CreationTimestamp); err != nil { + return fmt.Errorf("Error setting creation_timestamp: %s", err) + } if err := d.Set("project", project); err != nil { return fmt.Errorf("Error setting project: %s", err) } @@ -1667,6 +1797,9 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("reservation_affinity", flattenReservationAffinity(instance.ReservationAffinity)); err != nil { return fmt.Errorf("Error setting reservation_affinity: %s", err) } + if err := d.Set("key_revocation_action_type", instance.KeyRevocationActionType); err != nil { + return fmt.Errorf("Error setting key_revocation_action_type: %s", err) + } d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, zone, instance.Name)) @@ -1978,6 +2111,9 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err Fingerprint: instNetworkInterface.Fingerprint, ForceSendFields: []string{"AliasIpRanges"}, } + if commonAliasIpRanges := CheckForCommonAliasIp(instNetworkInterface, networkInterface); len(commonAliasIpRanges) > 0 { + ni.AliasIpRanges = commonAliasIpRanges + } op, err := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, ni).Do() if err != nil { return errwrap.Wrapf("Error removing alias_ip_range: {{err}}", err) @@ -2248,21 +2384,33 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err needToStopInstanceBeforeUpdating := scopesChange || d.HasChange("service_account.0.email") || d.HasChange("machine_type") || d.HasChange("min_cpu_platform") || d.HasChange("enable_display") || d.HasChange("shielded_instance_config") || len(updatesToNIWhileStopped) > 0 || bootRequiredSchedulingChange || d.HasChange("advanced_machine_features") if d.HasChange("desired_status") && !needToStopInstanceBeforeUpdating { - desiredStatus := d.Get("desired_status").(string) + previousStatus, desiredStatus := d.GetChange("desired_status") if desiredStatus != "" { var op *compute.Operation if desiredStatus == "RUNNING" { - op, err = startInstanceOperation(d, config) - if err != nil { - return errwrap.Wrapf("Error starting instance: {{err}}", err) + if previousStatus == "SUSPENDED" { + op, err = config.NewComputeClient(userAgent).Instances.Resume(project, zone, instance.Name).Do() + if err != nil { + return err + } + } else { + op, err = startInstanceOperation(d, config) + if err != nil { + return errwrap.Wrapf("Error starting instance: {{err}}", err) + } } } else if desiredStatus == "TERMINATED" { op, err = config.NewComputeClient(userAgent).Instances.Stop(project, zone, instance.Name).Do() if err != nil { return err } + } else if desiredStatus == "SUSPENDED" { + op, err = config.NewComputeClient(userAgent).Instances.Suspend(project, zone, instance.Name).Do() + if err != nil { + return err + } } opErr := ComputeOperationWaitTime( config, op, project, "updating status", userAgent, @@ -2546,6 +2694,10 @@ func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceDat disk.DeviceName = v.(string) } + if v, ok := d.GetOk("boot_disk.0.interface"); ok && v != "" { + disk.Interface = v.(string) + } + keyValue, keyOk := diskConfig["disk_encryption_key_raw"] if keyOk { if keyValue != "" { @@ -2640,25 +2792,6 @@ func suppressEmptyGuestAcceleratorDiff(_ context.Context, d *schema.ResourceDiff return nil } -// return an error if the desired_status field is set to a value other than RUNNING on Create. -func desiredStatusDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { - // when creating an instance, name is not set - oldName, _ := diff.GetChange("name") - - if oldName == nil || oldName == "" { - _, newDesiredStatus := diff.GetChange("desired_status") - - if newDesiredStatus == nil || newDesiredStatus == "" { - return nil - } else if newDesiredStatus != "RUNNING" { - return fmt.Errorf("When creating an instance, desired_status can only accept RUNNING value") - } - return nil - } - - return nil -} - func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) @@ -2746,6 +2879,10 @@ func expandBootDisk(d *schema.ResourceData, config *transport_tpg.Config, projec disk.DeviceName = v.(string) } + if v, ok := d.GetOk("boot_disk.0.interface"); ok { + disk.Interface = v.(string) + } + if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok { if v != "" { disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{ @@ -2823,6 +2960,14 @@ func expandBootDisk(d *schema.ResourceData, config *transport_tpg.Config, projec if _, ok := d.GetOk("boot_disk.0.initialize_params.0.resource_manager_tags"); ok { disk.InitializeParams.ResourceManagerTags = tpgresource.ExpandStringMap(d, "boot_disk.0.initialize_params.0.resource_manager_tags") } + + if _, ok := d.GetOk("boot_disk.0.initialize_params.0.resource_policies"); ok { + disk.InitializeParams.ResourcePolicies = tpgresource.ConvertStringArr(d.Get("boot_disk.0.initialize_params.0.resource_policies").([]interface{})) + } + + if v, ok := d.GetOk("boot_disk.0.initialize_params.0.storage_pool"); ok { + disk.InitializeParams.StoragePool = v.(string) + } } if v, ok := d.GetOk("boot_disk.0.mode"); ok { @@ -2842,6 +2987,9 @@ func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk, config // originally specified to avoid diffs. "disk_encryption_key_raw": d.Get("boot_disk.0.disk_encryption_key_raw"), } + if _, ok := d.GetOk("boot_disk.0.interface"); ok { + result["interface"] = disk.Interface + } diskDetails, err := getDisk(disk.Source, d, config) if err != nil { @@ -2862,9 +3010,11 @@ func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk, config "size": diskDetails.SizeGb, "labels": diskDetails.Labels, "resource_manager_tags": d.Get("boot_disk.0.initialize_params.0.resource_manager_tags"), + "resource_policies": diskDetails.ResourcePolicies, "provisioned_iops": diskDetails.ProvisionedIops, "provisioned_throughput": diskDetails.ProvisionedThroughput, "enable_confidential_compute": diskDetails.EnableConfidentialCompute, + "storage_pool": tpgresource.GetResourceNameFromSelfLink(diskDetails.StoragePool), }} } @@ -2996,3 +3146,20 @@ func isEmptyServiceAccountBlock(d *schema.ResourceData) bool { } return false } + +// Alias ip ranges cannot be removed and created at the same time. This checks if there are any unchanged alias ip ranges +// to be kept in between the PATCH operations on Network Interface +func CheckForCommonAliasIp(old, new *compute.NetworkInterface) []*compute.AliasIpRange { + newAliasIpMap := make(map[string]bool) + for _, ipRange := range new.AliasIpRanges { + newAliasIpMap[ipRange.IpCidrRange] = true + } + + resultAliasIpRanges := make([]*compute.AliasIpRange, 0) + for _, val := range old.AliasIpRanges { + if newAliasIpMap[val.IpCidrRange] { + resultAliasIpRanges = append(resultAliasIpRanges, val) + } + } + return resultAliasIpRanges +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_from_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_from_template.go index bcf293fa016..754e9e81ba6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_from_template.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_from_template.go @@ -48,17 +48,6 @@ func computeInstanceFromTemplateSchema() map[string]*schema.Schema { s[field].Optional = true } - // schema.SchemaConfigModeAttr allows these fields to be removed in Terraform 0.12. - // Passing field_name = [] in this mode differentiates between an intentionally empty - // block vs an ignored computed block. - nic := s["network_interface"].Elem.(*schema.Resource) - nic.Schema["alias_ip_range"].ConfigMode = schema.SchemaConfigModeAttr - nic.Schema["access_config"].ConfigMode = schema.SchemaConfigModeAttr - - for _, field := range []string{"attached_disk", "guest_accelerator", "service_account", "scratch_disk"} { - s[field].ConfigMode = schema.SchemaConfigModeAttr - } - // Remove deprecated/removed fields that are never d.Set. We can't // programmatically remove all of them, because some of them still have d.Set // calls. @@ -259,6 +248,15 @@ func adjustInstanceFromTemplateDisks(d *schema.ResourceData, config *transport_t // only have the name (since they're global). disk.InitializeParams.DiskType = fmt.Sprintf("zones/%s/diskTypes/%s", zone.Name, dt) } + if rp := disk.InitializeParams.ResourcePolicies; len(rp) > 0 { + // Instances need a URL for the resource policy, but instance templates + // only have the name (since they're global). + for i := range rp { + rp[i], _ = parseUniqueId(rp[i]) // in some cases the API translation doesn't work and returns entire url when only name is provided. And allows for id to be passed as well + rp[i] = fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, regionFromUrl(zone.Region), rp[i]) + } + disk.InitializeParams.ResourcePolicies = rp + } } disks = append(disks, disk) break diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_manager.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_manager.go index 0fa24f4563e..e2d692f26a7 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_manager.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_manager.go @@ -247,7 +247,7 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { Optional: true, Computed: true, ConflictsWith: []string{"update_policy.0.max_surge_percent"}, - Description: `The maximum number of instances that can be created above the specified targetSize during the update process. Conflicts with max_surge_percent. If neither is set, defaults to 1`, + Description: `Specifies a fixed number of VM instances. This must be a positive integer. Conflicts with max_surge_percent. Both cannot be 0`, }, "max_surge_percent": { @@ -255,7 +255,7 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { Optional: true, ConflictsWith: []string{"update_policy.0.max_surge_fixed"}, ValidateFunc: validation.IntBetween(0, 100), - Description: `The maximum number of instances(calculated as percentage) that can be created above the specified targetSize during the update process. Conflicts with max_surge_fixed.`, + Description: `Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. Conflicts with max_surge_fixed.`, }, "max_unavailable_fixed": { @@ -263,7 +263,7 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { Optional: true, Computed: true, ConflictsWith: []string{"update_policy.0.max_unavailable_percent"}, - Description: `The maximum number of instances that can be unavailable during the update process. Conflicts with max_unavailable_percent. If neither is set, defaults to 1.`, + Description: `Specifies a fixed number of VM instances. This must be a positive integer.`, }, "max_unavailable_percent": { @@ -271,7 +271,7 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { Optional: true, ConflictsWith: []string{"update_policy.0.max_unavailable_fixed"}, ValidateFunc: validation.IntBetween(0, 100), - Description: `The maximum number of instances(calculated as percentage) that can be unavailable during the update process. Conflicts with max_unavailable_fixed.`, + Description: `Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.`, }, "replacement_method": { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template.go index 6dd2d5806d9..709fc0b8813 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template.go @@ -89,14 +89,14 @@ func ResourceComputeInstanceTemplate() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - Description: `Creates a unique name beginning with the specified prefix. Conflicts with name.`, + Description: `Creates a unique name beginning with the specified prefix. Conflicts with name. Max length is 54 characters. Prefixes with lengths longer than 37 characters will use a shortened UUID that will be more prone to collisions.`, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource - // uuid is 26 characters, limit the prefix to 37. + // shortened uuid is 9 characters, limit the prefix to 55. value := v.(string) - if len(value) > 37 { + if len(value) > 54 { errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) + "%q cannot be longer than 54 characters, name is limited to 63", k)) } return }, @@ -171,7 +171,15 @@ func ResourceComputeInstanceTemplate() *schema.Resource { Optional: true, ForceNew: true, Computed: true, - Description: `Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk).`, + Description: `Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk) or the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks) depending on the selected disk_type.`, + }, + + "provisioned_throughput": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `Indicates how much throughput to provision for the disk, in MB/s. This sets the amount of data that can be read or written from the disk per second. Values must greater than or equal to 1. For more details, see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks).`, }, "resource_manager_tags": { @@ -771,6 +779,13 @@ be from 0 to 999,999,999 inclusive.`, Description: `A special URI of the created resource that uniquely identifies this instance template.`, }, + "creation_timestamp": { + Type: schema.TypeString, + ForceNew: true, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "service_account": { Type: schema.TypeList, MaxItems: 1, @@ -866,9 +881,10 @@ be from 0 to 999,999,999 inclusive.`, Optional: true, ForceNew: true, Description: ` - Specifies which confidential computing technology to use. - This could be one of the following values: SEV, SEV_SNP. - If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, + The confidential computing technology the instance uses. + SEV is an AMD feature. TDX is an Intel feature. One of the following + values is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform = + "AMD Milan" is currently required.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, }, @@ -896,6 +912,12 @@ be from 0 to 999,999,999 inclusive.`, ForceNew: true, Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`, }, + "turbo_mode": { + Type: schema.TypeString, + Optional: true, + Description: `Turbo frequency mode to use for the instance. Currently supported modes is "ALL_CORE_MAX".`, + ValidateFunc: validation.StringInSlice([]string{"ALL_CORE_MAX"}, false), + }, "visible_core_count": { Type: schema.TypeInt, Optional: true, @@ -958,7 +980,7 @@ be from 0 to 999,999,999 inclusive.`, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, Description: `A set of key/value label pairs to assign to instances created from this template. - + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.`, }, @@ -1036,6 +1058,14 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, + + "key_revocation_action_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"NONE", "STOP", ""}, false), + Description: `Action to be taken when a customer's encryption key is revoked. Supports "STOP" and "NONE", with "NONE" being the default.`, + }, }, UseJSONNumber: true, } @@ -1180,7 +1210,7 @@ func buildDisks(d *schema.ResourceData, config *transport_tpg.Config) ([]*comput } if v, ok := d.GetOk(prefix + ".source"); ok { disk.Source = v.(string) - conflicts := []string{"disk_size_gb", "disk_name", "disk_type", "provisioned_iops", "source_image", "source_snapshot", "labels"} + conflicts := []string{"disk_size_gb", "disk_name", "disk_type", "provisioned_iops", "provisioned_throughput", "source_image", "source_snapshot", "labels"} for _, conflict := range conflicts { if _, ok := d.GetOk(prefix + "." + conflict); ok { return nil, fmt.Errorf("Cannot use `source` with any of the fields in %s", conflicts) @@ -1203,6 +1233,9 @@ func buildDisks(d *schema.ResourceData, config *transport_tpg.Config) ([]*comput if v, ok := d.GetOk(prefix + ".provisioned_iops"); ok { disk.InitializeParams.ProvisionedIops = int64(v.(int)) } + if v, ok := d.GetOk(prefix + ".provisioned_throughput"); ok { + disk.InitializeParams.ProvisionedThroughput = int64(v.(int)) + } if _, ok := d.GetOk(prefix + ".resource_manager_tags"); ok { disk.InitializeParams.ResourceManagerTags = tpgresource.ExpandStringMap(d, prefix+".resource_manager_tags") } @@ -1362,6 +1395,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac AdvancedMachineFeatures: expandAdvancedMachineFeatures(d), ResourcePolicies: resourcePolicies, ReservationAffinity: reservationAffinity, + KeyRevocationActionType: d.Get("key_revocation_action_type").(string), } if _, ok := d.GetOk("effective_labels"); ok { @@ -1376,7 +1410,12 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac if v, ok := d.GetOk("name"); ok { itName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - itName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + itName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + itName = id.PrefixedUniqueId(prefix) + } } else { itName = id.UniqueId() } @@ -1410,12 +1449,13 @@ func resourceComputeInstanceTemplateUpdate(d *schema.ResourceData, meta interfac } type diskCharacteristics struct { - mode string - diskType string - diskSizeGb string - autoDelete bool - sourceImage string - provisionedIops string + mode string + diskType string + diskSizeGb string + autoDelete bool + sourceImage string + provisionedIops string + provisionedThroughput string } func diskCharacteristicsFromMap(m map[string]interface{}) diskCharacteristics { @@ -1451,6 +1491,12 @@ func diskCharacteristicsFromMap(m map[string]interface{}) diskCharacteristics { dc.provisionedIops = fmt.Sprintf("%v", v) } + if v := m["provisioned_throughput"]; v != nil { + // Terraform and GCP return ints as different types (int vs int64), so just + // use strings to compare for simplicity. + dc.provisionedThroughput = fmt.Sprintf("%v", v) + } + return dc } @@ -1474,6 +1520,7 @@ func flattenDisk(disk *compute.AttachedDisk, configDisk map[string]any, defaultP } diskMap["disk_type"] = disk.InitializeParams.DiskType diskMap["provisioned_iops"] = disk.InitializeParams.ProvisionedIops + diskMap["provisioned_throughput"] = disk.InitializeParams.ProvisionedThroughput diskMap["disk_name"] = disk.InitializeParams.DiskName diskMap["labels"] = disk.InitializeParams.Labels // The API does not return a disk size value for scratch disks. They are largely only one size, @@ -1713,6 +1760,9 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ if err = d.Set("self_link_unique", fmt.Sprintf("%v?uniqueId=%v", instanceTemplate.SelfLink, instanceTemplate.Id)); err != nil { return fmt.Errorf("Error setting self_link_unique: %s", err) } + if err = d.Set("creation_timestamp", instanceTemplate.CreationTimestamp); err != nil { + return fmt.Errorf("Error setting creation_timestamp: %s", err) + } if err = d.Set("name", instanceTemplate.Name); err != nil { return fmt.Errorf("Error setting name: %s", err) } @@ -1742,6 +1792,9 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ if err = d.Set("instance_description", instanceTemplate.Properties.Description); err != nil { return fmt.Errorf("Error setting instance_description: %s", err) } + if err = d.Set("key_revocation_action_type", instanceTemplate.Properties.KeyRevocationActionType); err != nil { + return fmt.Errorf("Error setting key_revocation_action_type: %s", err) + } if err = d.Set("project", project); err != nil { return fmt.Errorf("Error setting project: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_interconnect.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_interconnect.go index 93b986f3df7..b4725cf0d7a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_interconnect.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_interconnect.go @@ -32,6 +32,10 @@ import ( "github.com/hashicorp/terraform-provider-google/google/verify" ) +func InterconnectTypeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return old == "IT_PRIVATE" && new == "DEDICATED" +} + func ResourceComputeInterconnect() *schema.Resource { return &schema.Resource{ Create: resourceComputeInterconnectCreate, @@ -55,18 +59,12 @@ func ResourceComputeInterconnect() *schema.Resource { ), Schema: map[string]*schema.Schema{ - "customer_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Customer name, to put in the Letter of Authorization as the party authorized to request a -crossconnect.`, - }, "interconnect_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: verify.ValidateEnum([]string{"DEDICATED", "PARTNER", "IT_PRIVATE"}), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DEDICATED", "PARTNER", "IT_PRIVATE"}), + DiffSuppressFunc: InterconnectTypeDiffSuppress, Description: `Type of interconnect. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED. Can take one of the following values: - PARTNER: A partner-managed interconnection shared between customers though a partner. @@ -82,13 +80,6 @@ bundle, not the speed of the entire bundle. Can take one of the following values - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics. - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Possible values: ["LINK_TYPE_ETHERNET_10G_LR", "LINK_TYPE_ETHERNET_100G_LR"]`, }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: `URL of the InterconnectLocation object that represents where this connection is to be provisioned.`, - }, "name": { Type: schema.TypeString, Required: true, @@ -109,16 +100,22 @@ lowercase letter, or digit, except the last character, which cannot be a dash.`, "admin_enabled": { Type: schema.TypeBool, Optional: true, - ForceNew: true, Description: `Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.`, Default: true, }, + "customer_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Customer name, to put in the Letter of Authorization as the party authorized to request a +crossconnect. This field is required for Dedicated and Partner Interconnect, should not be specified +for cross-cloud interconnect.`, + }, "description": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: `An optional description of this resource. Provide this property when you create the resource.`, }, "labels": { @@ -132,10 +129,17 @@ method. Each label key/value pair must comply with RFC1035. Label values may be Please refer to the field 'effective_labels' for all of the labels present on the resource.`, Elem: &schema.Schema{Type: schema.TypeString}, }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the InterconnectLocation object that represents where this connection is to be provisioned. +Specifies the location inside Google's Networks, should not be passed in case of cross-cloud interconnect.`, + }, "macsec": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `Configuration that enables Media Access Control security (MACsec) on the Cloud Interconnect connection between Google and your on-premises router.`, MaxItems: 1, @@ -144,7 +148,6 @@ Interconnect connection between Google and your on-premises router.`, "pre_shared_keys": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: `A keychain placeholder describing a set of named key objects along with their start times. A MACsec CKN/CAK is generated for each key in the key chain. Google router automatically picks the key with the most recent startTime when establishing @@ -154,7 +157,6 @@ or re-establishing a MACsec secure link.`, "name": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: verify.ValidateRegexp(`^[a-z]([-a-z0-9]*[a-z0-9])?$`), Description: `A name for this pre-shared key. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match @@ -163,9 +165,9 @@ or re-establishing a MACsec secure link.`, letter, or digit, except the last character, which cannot be a dash.`, }, "fail_open": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + Deprecated: "`failOpen` is deprecated and will be removed in a future major release. Use other `failOpen` instead.", Description: `If set to true, the Interconnect connection is configured with a should-secure MACsec security policy, that allows the Google router to fallback to cleartext traffic if the MKA session cannot be established. By default, the Interconnect @@ -175,7 +177,6 @@ if the MKA session cannot be established with your router.`, "start_time": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: `A RFC3339 timestamp on or after which the key is valid. startTime can be in the future. If the keychain has a single key, startTime can be omitted. If the keychain has multiple keys, startTime is mandatory for each key. The start times of keys must @@ -185,20 +186,27 @@ hours apart.`, }, }, }, + "fail_open": { + Type: schema.TypeBool, + Optional: true, + Description: `If set to true, the Interconnect connection is configured with a should-secure +MACsec security policy, that allows the Google router to fallback to cleartext +traffic if the MKA session cannot be established. By default, the Interconnect +connection is configured with a must-secure security policy that drops all traffic +if the MKA session cannot be established with your router.`, + }, }, }, }, "macsec_enabled": { Type: schema.TypeBool, Optional: true, - ForceNew: true, Description: `Enable or disable MACsec on this Interconnect connection. MACsec enablement fails if the MACsec object is not specified.`, }, "noc_contact_email": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: `Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Cloud Monitoring logs alerting and Cloud Notifications. @@ -216,13 +224,14 @@ of Google's network that the interconnect is connected to.`, Type: schema.TypeList, Optional: true, ForceNew: true, - Description: `interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + Description: `interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if -available). Possible values: ["MACSEC"]`, +available). Note that MACSEC is still technically allowed for compatibility reasons, but it +does not work with the API, and will be removed in an upcoming major version. Possible values: ["MACSEC", "IF_MACSEC"]`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: verify.ValidateEnum([]string{"MACSEC"}), + ValidateFunc: verify.ValidateEnum([]string{"MACSEC", "IF_MACSEC"}), }, }, "available_features": { @@ -269,7 +278,6 @@ Google to the customer in the LOA.`, "effective_labels": { Type: schema.TypeMap, Computed: true, - ForceNew: true, Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -576,6 +584,66 @@ func resourceComputeInterconnectCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error waiting to create Interconnect: %s", err) } + if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + labels := d.Get("labels") + terraformLables := d.Get("terraform_labels") + + // Labels cannot be set in a create. We'll have to set them here. + err = resourceComputeInterconnectRead(d, meta) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + // d.Get("effective_labels") will have been overridden by the Read call. + labelsProp, err := expandComputeInterconnectEffectiveLabels(v, d, config) + if err != nil { + return err + } + obj["labels"] = labelsProp + labelFingerprintProp := d.Get("label_fingerprint") + obj["labelFingerprint"] = labelFingerprintProp + + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/interconnects/{{name}}/setLabels") + if err != nil { + return err + } + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return fmt.Errorf("Error adding labels to ComputeInterconnect %q: %s", d.Id(), err) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating ComputeInterconnect Labels", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + // Set back the labels field, as it is needed to decide the value of "labels" in the state in the read function. + if err := d.Set("labels", labels); err != nil { + return fmt.Errorf("Error setting back labels: %s", err) + } + + // Set back the terraform_labels field, as it is needed to decide the value of "terraform_labels" in the state in the read function. + if err := d.Set("terraform_labels", terraformLables); err != nil { + return fmt.Errorf("Error setting back terraform_labels: %s", err) + } + + // Set back the effective_labels field, as it is needed to decide the value of "effective_labels" in the state in the read function. + if err := d.Set("effective_labels", v); err != nil { + return fmt.Errorf("Error setting back effective_labels: %s", err) + } + } + log.Printf("[DEBUG] Finished creating Interconnect %q: %#v", d.Id(), res) return resourceComputeInterconnectRead(d, meta) @@ -715,7 +783,145 @@ func resourceComputeInterconnectRead(d *schema.ResourceData, meta interface{}) e } func resourceComputeInterconnectUpdate(d *schema.ResourceData, meta interface{}) error { - // Only the root field "labels" and "terraform_labels" are mutable + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Interconnect: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeInterconnectDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + adminEnabledProp, err := expandComputeInterconnectAdminEnabled(d.Get("admin_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("admin_enabled"); ok || !reflect.DeepEqual(v, adminEnabledProp) { + obj["adminEnabled"] = adminEnabledProp + } + nocContactEmailProp, err := expandComputeInterconnectNocContactEmail(d.Get("noc_contact_email"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("noc_contact_email"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nocContactEmailProp)) { + obj["nocContactEmail"] = nocContactEmailProp + } + macsecProp, err := expandComputeInterconnectMacsec(d.Get("macsec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("macsec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, macsecProp)) { + obj["macsec"] = macsecProp + } + macsecEnabledProp, err := expandComputeInterconnectMacsecEnabled(d.Get("macsec_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("macsec_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, macsecEnabledProp)) { + obj["macsecEnabled"] = macsecEnabledProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/interconnects/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Interconnect %q: %#v", d.Id(), obj) + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating Interconnect %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Interconnect %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Interconnect", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + d.Partial(true) + + if d.HasChange("label_fingerprint") || d.HasChange("effective_labels") { + obj := make(map[string]interface{}) + + labelFingerprintProp, err := expandComputeInterconnectLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + labelsProp, err := expandComputeInterconnectEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/interconnects/{{name}}/setLabels") + if err != nil { + return err + } + + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error updating Interconnect %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Interconnect %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Interconnect", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + return resourceComputeInterconnectRead(d, meta) } @@ -1015,6 +1221,8 @@ func flattenComputeInterconnectMacsec(v interface{}, d *schema.ResourceData, con transformed := make(map[string]interface{}) transformed["pre_shared_keys"] = flattenComputeInterconnectMacsecPreSharedKeys(original["preSharedKeys"], d, config) + transformed["fail_open"] = + flattenComputeInterconnectMacsecFailOpen(original["failOpen"], d, config) return []interface{}{transformed} } func flattenComputeInterconnectMacsecPreSharedKeys(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -1049,6 +1257,10 @@ func flattenComputeInterconnectMacsecPreSharedKeysFailOpen(v interface{}, d *sch return v } +func flattenComputeInterconnectMacsecFailOpen(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeInterconnectMacsecEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1140,6 +1352,13 @@ func expandComputeInterconnectMacsec(v interface{}, d tpgresource.TerraformResou transformed["preSharedKeys"] = transformedPreSharedKeys } + transformedFailOpen, err := expandComputeInterconnectMacsecFailOpen(original["fail_open"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFailOpen); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["failOpen"] = transformedFailOpen + } + return transformed, nil } @@ -1191,6 +1410,10 @@ func expandComputeInterconnectMacsecPreSharedKeysFailOpen(v interface{}, d tpgre return v, nil } +func expandComputeInterconnectMacsecFailOpen(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeInterconnectMacsecEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_managed_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_managed_ssl_certificate.go index 3978516e18f..57604cbabce 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_managed_ssl_certificate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_managed_ssl_certificate.go @@ -117,7 +117,6 @@ which type this is. Default value: "MANAGED" Possible values: ["MANAGED"]`, "certificate_id": { Type: schema.TypeInt, Computed: true, - Optional: true, Description: `The unique identifier for the resource.`, }, "creation_timestamp": { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network.go index 726dcc7fce2..9a3fb012ee1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network.go @@ -150,9 +150,9 @@ is selected by GCP.`, "delete_default_routes_on_create": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `If set to 'true', default routes ('0.0.0.0/0') will be deleted immediately after network creation. Defaults to 'false'.`, + Default: false, }, "project": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoints.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoints.go index fccc29756b5..465dd200647 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoints.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoints.go @@ -285,7 +285,7 @@ func resourceComputeNetworkEndpointsCreate(d *schema.ResourceData, meta interfac } // Store the ID now - id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}/endpoints") + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -628,7 +628,7 @@ func resourceComputeNetworkEndpointsImport(d *schema.ResourceData, meta interfac } // Replace import id for the resource id - id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}/endpoints") + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_association.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_association.go index 083d3e6a996..d43505ccca5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_association.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_association.go @@ -3,34 +3,31 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: DCL *** +// *** AUTO GENERATED CODE *** Type: MMv1 *** // // ---------------------------------------------------------------------------- // -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. // -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- package compute import ( - "context" "fmt" "log" + "net/http" + "reflect" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" - - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -49,9 +46,9 @@ func ResourceComputeNetworkFirewallPolicyAssociation() *schema.Resource { Create: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), }, + CustomizeDiff: customdiff.All( tpgresource.DefaultProviderProject, - tpgresource.DefaultProviderRegion, ), Schema: map[string]*schema.Schema{ @@ -60,86 +57,107 @@ func ResourceComputeNetworkFirewallPolicyAssociation() *schema.Resource { Required: true, ForceNew: true, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The target that the firewall policy is attached to.", + Description: `The target that the firewall policy is attached to.`, }, - "firewall_policy": { Type: schema.TypeString, Required: true, ForceNew: true, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The firewall policy ID of the association.", + Description: `The firewall policy of the resource.`, }, - "name": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The name for an association.", + Description: `The name for an association.`, }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The project for the resource", - }, - "short_name": { Type: schema.TypeString, Computed: true, - Description: "The short name of the firewall policy of the association.", + Description: `The short name of the firewall policy of the association.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, }, }, + UseJSONNumber: true, } } func resourceComputeNetworkFirewallPolicyAssociationCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &compute.NetworkFirewallPolicyAssociation{ - AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Name: dcl.String(d.Get("name").(string)), - Project: dcl.String(project), + obj := make(map[string]interface{}) + nameProp, err := expandComputeNetworkFirewallPolicyAssociationName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp } - - id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + attachmentTargetProp, err := expandComputeNetworkFirewallPolicyAssociationAttachmentTarget(d.Get("attachment_target"), d, config) if err != nil { - return fmt.Errorf("error constructing id: %s", err) + return err + } else if v, ok := d.GetOkExists("attachment_target"); !tpgresource.IsEmptyValue(reflect.ValueOf(attachmentTargetProp)) && (ok || !reflect.DeepEqual(v, attachmentTargetProp)) { + obj["attachmentTarget"] = attachmentTargetProp } - d.SetId(id) - directive := tpgdclresource.CreateDirective - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/addAssociation") if err != nil { return err } - billingProject := project + + log.Printf("[DEBUG] Creating new NetworkFirewallPolicyAssociation: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkFirewallPolicyAssociation: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating NetworkFirewallPolicyAssociation: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) } - res, err := client.ApplyNetworkFirewallPolicyAssociation(context.Background(), obj, directive...) + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Creating NetworkFirewallPolicyAssociation", userAgent, + d.Timeout(schema.TimeoutCreate)) - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { + if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error creating NetworkFirewallPolicyAssociation: %s", err) + return fmt.Errorf("Error waiting to create NetworkFirewallPolicyAssociation: %s", err) } log.Printf("[DEBUG] Finished creating NetworkFirewallPolicyAssociation %q: %#v", d.Id(), res) @@ -149,54 +167,54 @@ func resourceComputeNetworkFirewallPolicyAssociationCreate(d *schema.ResourceDat func resourceComputeNetworkFirewallPolicyAssociationRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &compute.NetworkFirewallPolicyAssociation{ - AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Name: dcl.String(d.Get("name").(string)), - Project: dcl.String(project), + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/getAssociation?name={{name}}") + if err != nil { + return err } - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) if err != nil { - return err + return fmt.Errorf("Error fetching project for NetworkFirewallPolicyAssociation: %s", err) } - billingProject := project + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetNetworkFirewallPolicyAssociation(context.Background(), obj) + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) if err != nil { - resourceName := fmt.Sprintf("ComputeNetworkFirewallPolicyAssociation %q", d.Id()) - return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkFirewallPolicyAssociation %q", d.Id())) } - if err = d.Set("attachment_target", res.AttachmentTarget); err != nil { - return fmt.Errorf("error setting attachment_target in state: %s", err) + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyAssociation: %s", err) } - if err = d.Set("firewall_policy", res.FirewallPolicy); err != nil { - return fmt.Errorf("error setting firewall_policy in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return fmt.Errorf("error setting name in state: %s", err) + + if err := d.Set("name", flattenComputeNetworkFirewallPolicyAssociationName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyAssociation: %s", err) } - if err = d.Set("project", res.Project); err != nil { - return fmt.Errorf("error setting project in state: %s", err) + if err := d.Set("attachment_target", flattenComputeNetworkFirewallPolicyAssociationAttachmentTarget(res["attachmentTarget"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyAssociation: %s", err) } - if err = d.Set("short_name", res.ShortName); err != nil { - return fmt.Errorf("error setting short_name in state: %s", err) + if err := d.Set("short_name", flattenComputeNetworkFirewallPolicyAssociationShortName(res["shortName"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyAssociation: %s", err) } return nil @@ -204,49 +222,66 @@ func resourceComputeNetworkFirewallPolicyAssociationRead(d *schema.ResourceData, func resourceComputeNetworkFirewallPolicyAssociationDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &compute.NetworkFirewallPolicyAssociation{ - AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Name: dcl.String(d.Get("name").(string)), - Project: dcl.String(project), + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkFirewallPolicyAssociation: %s", err) } + billingProject = strings.TrimPrefix(project, "projects/") - log.Printf("[DEBUG] Deleting NetworkFirewallPolicyAssociation %q", d.Id()) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/removeAssociation?name={{name}}") if err != nil { return err } - billingProject := project + + var obj map[string]interface{} + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting NetworkFirewallPolicyAssociation %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NetworkFirewallPolicyAssociation") } - if err := client.DeleteNetworkFirewallPolicyAssociation(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting NetworkFirewallPolicyAssociation: %s", err) + + err = ComputeOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Deleting NetworkFirewallPolicyAssociation", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err } - log.Printf("[DEBUG] Finished deleting NetworkFirewallPolicyAssociation %q", d.Id()) + log.Printf("[DEBUG] Finished deleting NetworkFirewallPolicyAssociation %q: %#v", d.Id(), res) return nil } func resourceComputeNetworkFirewallPolicyAssociationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "projects/(?P[^/]+)/global/firewallPolicies/(?P[^/]+)/associations/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "^projects/(?P[^/]+)/global/firewallPolicies/(?P[^/]+)/associations/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", }, d, config); err != nil { return nil, err } @@ -260,3 +295,23 @@ func resourceComputeNetworkFirewallPolicyAssociationImport(d *schema.ResourceDat return []*schema.ResourceData{d}, nil } + +func flattenComputeNetworkFirewallPolicyAssociationName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkFirewallPolicyAssociationAttachmentTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkFirewallPolicyAssociationShortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeNetworkFirewallPolicyAssociationName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyAssociationAttachmentTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_association_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_association_sweeper.go new file mode 100644 index 00000000000..dc179e80421 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_association_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeNetworkFirewallPolicyAssociation", testSweepComputeNetworkFirewallPolicyAssociation) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeNetworkFirewallPolicyAssociation(region string) error { + resourceName := "ComputeNetworkFirewallPolicyAssociation" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/firewallPolicies/{{firewall_policy}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["networkFirewallPolicyAssociations"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/removeAssociation?name={{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_rule.go index 37072e4c0cb..8d29528cc5a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_rule.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_rule.go @@ -3,36 +3,34 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: DCL *** +// *** AUTO GENERATED CODE *** Type: MMv1 *** // // ---------------------------------------------------------------------------- // -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. // -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- package compute import ( - "context" "fmt" "log" + "net/http" + "reflect" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" - - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceComputeNetworkFirewallPolicyRule() *schema.Resource { @@ -51,322 +49,396 @@ func ResourceComputeNetworkFirewallPolicyRule() *schema.Resource { Update: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), }, + CustomizeDiff: customdiff.All( tpgresource.DefaultProviderProject, - tpgresource.DefaultProviderRegion, ), Schema: map[string]*schema.Schema{ "action": { Type: schema.TypeString, Required: true, - Description: "The Action to perform when the client connection triggers the rule. Valid actions are \"allow\", \"deny\", \"goto_next\" and \"apply_security_profile_group\".", + Description: `The Action to perform when the client connection triggers the rule. Valid actions are "allow", "deny", "goto_next" and "apply_security_profile_group".`, }, - "direction": { - Type: schema.TypeString, - Required: true, - Description: "The direction in which this rule applies. Possible values: INGRESS, EGRESS", + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"INGRESS", "EGRESS"}), + Description: `The direction in which this rule applies. Possible values: ["INGRESS", "EGRESS"]`, }, - "firewall_policy": { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The firewall policy of the resource.", + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The firewall policy of the resource.`, }, - "match": { Type: schema.TypeList, Required: true, - Description: "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced.", + Description: `A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced.`, MaxItems: 1, - Elem: ComputeNetworkFirewallPolicyRuleMatchSchema(), + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "layer4_configs": { + Type: schema.TypeList, + Required: true, + Description: `Pairs of IP protocols and ports that the rule should match.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_protocol": { + Type: schema.TypeString, + Required: true, + Description: `The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. +This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.`, + }, + "ports": { + Type: schema.TypeList, + Optional: true, + Description: `An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. +Example inputs include: ["22"], ["80","443"], and ["12345-12349"].`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "dest_address_groups": { + Type: schema.TypeList, + Optional: true, + Description: `Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dest_fqdns": { + Type: schema.TypeList, + Optional: true, + Description: `Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dest_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dest_region_codes": { + Type: schema.TypeList, + Optional: true, + Description: `Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dest_threat_intelligences": { + Type: schema.TypeList, + Optional: true, + Description: `Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "src_address_groups": { + Type: schema.TypeList, + Optional: true, + Description: `Address groups which should be matched against the traffic source. Maximum number of source address groups is 10.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "src_fqdns": { + Type: schema.TypeList, + Optional: true, + Description: `Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "src_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "src_region_codes": { + Type: schema.TypeList, + Optional: true, + Description: `Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "src_secure_tags": { + Type: schema.TypeList, + Optional: true, + Description: `List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the secure tag, created with TagManager's TagValue API.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted.`, + }, + }, + }, + }, + "src_threat_intelligences": { + Type: schema.TypeList, + Optional: true, + Description: `Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, }, - "priority": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `An integer indicating the priority of a rule in the list. +The priority must be a positive value between 0 and 2147483647. +Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.`, }, - "description": { Type: schema.TypeString, Optional: true, - Description: "An optional description for this resource.", + Description: `An optional description for this resource.`, }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: "Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled.", + Type: schema.TypeBool, + Optional: true, + Description: `Denotes whether the firewall policy rule is disabled. +When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. +If this is unspecified, the firewall policy rule will be enabled.`, }, - "enable_logging": { - Type: schema.TypeBool, - Optional: true, - Description: "Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on \"goto_next\" rules.", + Type: schema.TypeBool, + Optional: true, + Description: `Denotes whether to enable logging for a particular rule. +If logging is enabled, logs will be exported to the configured export destination in Stackdriver. +Logs may be exported to BigQuery or Pub/Sub. +Note: you cannot enable logging on "goto_next" rules.`, }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The project for the resource", - }, - "rule_name": { Type: schema.TypeString, Optional: true, - Description: "An optional name for the rule. This field is not a unique identifier and can be updated.", + Description: `An optional name for the rule. This field is not a unique identifier and can be updated.`, }, - "security_profile_group": { - Type: schema.TypeString, - Optional: true, - Description: "A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.", + Type: schema.TypeString, + Optional: true, + Description: `A fully-qualified URL of a SecurityProfile resource instance. +Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group +Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.`, }, - "target_secure_tags": { - Type: schema.TypeList, - Optional: true, - Description: "A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256.", - Elem: ComputeNetworkFirewallPolicyRuleTargetSecureTagsSchema(), + Type: schema.TypeList, + Optional: true, + Description: `A list of secure tags that controls which instances the firewall rule applies to. +If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored. +targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the secure tag, created with TagManager's TagValue API.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted.`, + }, + }, + }, }, - "target_service_accounts": { Type: schema.TypeList, Optional: true, - Description: "A list of service accounts indicating the sets of instances that are applied with this rule.", - Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A list of service accounts indicating the sets of instances that are applied with this rule.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, - "tls_inspect": { - Type: schema.TypeBool, - Optional: true, - Description: "Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions.", + Type: schema.TypeBool, + Optional: true, + Description: `Boolean flag indicating if the traffic should be TLS decrypted. +Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, }, - "kind": { Type: schema.TypeString, Computed: true, - Description: "Type of the resource. Always `compute#firewallPolicyRule` for firewall policy rules", + Description: `Type of the resource. Always 'compute#firewallPolicyRule' for firewall policy rules`, }, - "rule_tuple_count": { Type: schema.TypeInt, Computed: true, - Description: "Calculation of the complexity of a single firewall policy rule.", - }, - }, - } -} - -func ComputeNetworkFirewallPolicyRuleMatchSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "layer4_configs": { - Type: schema.TypeList, - Required: true, - Description: "Pairs of IP protocols and ports that the rule should match.", - Elem: ComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsSchema(), - }, - - "dest_address_groups": { - Type: schema.TypeList, - Optional: true, - Description: "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "dest_fqdns": { - Type: schema.TypeList, - Optional: true, - Description: "Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "dest_ip_ranges": { - Type: schema.TypeList, - Optional: true, - Description: "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000.", - Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Calculation of the complexity of a single firewall policy rule.`, }, - - "dest_region_codes": { - Type: schema.TypeList, - Optional: true, - Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "dest_threat_intelligences": { - Type: schema.TypeList, - Optional: true, - Description: "Name of the Google Cloud Threat Intelligence list.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "src_address_groups": { - Type: schema.TypeList, - Optional: true, - Description: "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "src_fqdns": { - Type: schema.TypeList, - Optional: true, - Description: "Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "src_ip_ranges": { - Type: schema.TypeList, - Optional: true, - Description: "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "src_region_codes": { - Type: schema.TypeList, - Optional: true, - Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "src_secure_tags": { - Type: schema.TypeList, - Optional: true, - Description: "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", - Elem: ComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsSchema(), - }, - - "src_threat_intelligences": { - Type: schema.TypeList, - Optional: true, - Description: "Name of the Google Cloud Threat Intelligence list.", - Elem: &schema.Schema{Type: schema.TypeString}, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, }, }, + UseJSONNumber: true, } } -func ComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_protocol": { - Type: schema.TypeString, - Required: true, - Description: "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (`tcp`, `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol number.", - }, - - "ports": { - Type: schema.TypeList, - Optional: true, - Description: "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, +func resourceComputeNetworkFirewallPolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err } -} -func ComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", - }, - - "state": { - Type: schema.TypeString, - Computed: true, - Description: "[Output Only] State of the secure tag, either `EFFECTIVE` or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted or its network is deleted.", - }, - }, + obj := make(map[string]interface{}) + ruleNameProp, err := expandComputeNetworkFirewallPolicyRuleRuleName(d.Get("rule_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rule_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(ruleNameProp)) && (ok || !reflect.DeepEqual(v, ruleNameProp)) { + obj["ruleName"] = ruleNameProp } -} - -func ComputeNetworkFirewallPolicyRuleTargetSecureTagsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", - }, - - "state": { - Type: schema.TypeString, - Computed: true, - Description: "[Output Only] State of the secure tag, either `EFFECTIVE` or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted or its network is deleted.", - }, - }, + descriptionProp, err := expandComputeNetworkFirewallPolicyRuleDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp } -} - -func resourceComputeNetworkFirewallPolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + priorityProp, err := expandComputeNetworkFirewallPolicyRulePriority(d.Get("priority"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(priorityProp)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp } - - obj := &compute.NetworkFirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.NetworkFirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeNetworkFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - Project: dcl.String(project), - RuleName: dcl.String(d.Get("rule_name").(string)), - SecurityProfileGroup: dcl.String(d.Get("security_profile_group").(string)), - TargetSecureTags: expandComputeNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), - TlsInspect: dcl.Bool(d.Get("tls_inspect").(bool)), + matchProp, err := expandComputeNetworkFirewallPolicyRuleMatch(d.Get("match"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("match"); !tpgresource.IsEmptyValue(reflect.ValueOf(matchProp)) && (ok || !reflect.DeepEqual(v, matchProp)) { + obj["match"] = matchProp } - - id, err := obj.ID() + actionProp, err := expandComputeNetworkFirewallPolicyRuleAction(d.Get("action"), d, config) if err != nil { - return fmt.Errorf("error constructing id: %s", err) + return err + } else if v, ok := d.GetOkExists("action"); !tpgresource.IsEmptyValue(reflect.ValueOf(actionProp)) && (ok || !reflect.DeepEqual(v, actionProp)) { + obj["action"] = actionProp } - d.SetId(id) - directive := tpgdclresource.CreateDirective - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + securityProfileGroupProp, err := expandComputeNetworkFirewallPolicyRuleSecurityProfileGroup(d.Get("security_profile_group"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("security_profile_group"); !tpgresource.IsEmptyValue(reflect.ValueOf(securityProfileGroupProp)) && (ok || !reflect.DeepEqual(v, securityProfileGroupProp)) { + obj["securityProfileGroup"] = securityProfileGroupProp + } + tlsInspectProp, err := expandComputeNetworkFirewallPolicyRuleTlsInspect(d.Get("tls_inspect"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tls_inspect"); !tpgresource.IsEmptyValue(reflect.ValueOf(tlsInspectProp)) && (ok || !reflect.DeepEqual(v, tlsInspectProp)) { + obj["tlsInspect"] = tlsInspectProp + } + directionProp, err := expandComputeNetworkFirewallPolicyRuleDirection(d.Get("direction"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("direction"); !tpgresource.IsEmptyValue(reflect.ValueOf(directionProp)) && (ok || !reflect.DeepEqual(v, directionProp)) { + obj["direction"] = directionProp + } + enableLoggingProp, err := expandComputeNetworkFirewallPolicyRuleEnableLogging(d.Get("enable_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_logging"); ok || !reflect.DeepEqual(v, enableLoggingProp) { + obj["enableLogging"] = enableLoggingProp + } + targetServiceAccountsProp, err := expandComputeNetworkFirewallPolicyRuleTargetServiceAccounts(d.Get("target_service_accounts"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_service_accounts"); ok || !reflect.DeepEqual(v, targetServiceAccountsProp) { + obj["targetServiceAccounts"] = targetServiceAccountsProp + } + targetSecureTagsProp, err := expandComputeNetworkFirewallPolicyRuleTargetSecureTags(d.Get("target_secure_tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_secure_tags"); ok || !reflect.DeepEqual(v, targetSecureTagsProp) { + obj["targetSecureTags"] = targetSecureTagsProp + } + disabledProp, err := expandComputeNetworkFirewallPolicyRuleDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/addRule") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NetworkFirewallPolicyRule: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkFirewallPolicyRule: %s", err) } - billingProject := project + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating NetworkFirewallPolicyRule: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) } - res, err := client.ApplyNetworkFirewallPolicyRule(context.Background(), obj, directive...) + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Creating NetworkFirewallPolicyRule", userAgent, + d.Timeout(schema.TimeoutCreate)) - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { + if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error creating NetworkFirewallPolicyRule: %s", err) + return fmt.Errorf("Error waiting to create NetworkFirewallPolicyRule: %s", err) } log.Printf("[DEBUG] Finished creating NetworkFirewallPolicyRule %q: %#v", d.Id(), res) @@ -376,213 +448,287 @@ func resourceComputeNetworkFirewallPolicyRuleCreate(d *schema.ResourceData, meta func resourceComputeNetworkFirewallPolicyRuleRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &compute.NetworkFirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.NetworkFirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeNetworkFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - Project: dcl.String(project), - RuleName: dcl.String(d.Get("rule_name").(string)), - SecurityProfileGroup: dcl.String(d.Get("security_profile_group").(string)), - TargetSecureTags: expandComputeNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), - TlsInspect: dcl.Bool(d.Get("tls_inspect").(bool)), + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/getRule?priority={{priority}}") + if err != nil { + return err } - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) if err != nil { - return err + return fmt.Errorf("Error fetching project for NetworkFirewallPolicyRule: %s", err) } - billingProject := project + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetNetworkFirewallPolicyRule(context.Background(), obj) + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) if err != nil { - resourceName := fmt.Sprintf("ComputeNetworkFirewallPolicyRule %q", d.Id()) - return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkFirewallPolicyRule %q", d.Id())) } - if err = d.Set("action", res.Action); err != nil { - return fmt.Errorf("error setting action in state: %s", err) + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("direction", res.Direction); err != nil { - return fmt.Errorf("error setting direction in state: %s", err) + + if err := d.Set("creation_timestamp", flattenComputeNetworkFirewallPolicyRuleCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("firewall_policy", res.FirewallPolicy); err != nil { - return fmt.Errorf("error setting firewall_policy in state: %s", err) + if err := d.Set("kind", flattenComputeNetworkFirewallPolicyRuleKind(res["kind"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("match", flattenComputeNetworkFirewallPolicyRuleMatch(res.Match)); err != nil { - return fmt.Errorf("error setting match in state: %s", err) + if err := d.Set("rule_name", flattenComputeNetworkFirewallPolicyRuleRuleName(res["ruleName"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("priority", res.Priority); err != nil { - return fmt.Errorf("error setting priority in state: %s", err) + if err := d.Set("description", flattenComputeNetworkFirewallPolicyRuleDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("description", res.Description); err != nil { - return fmt.Errorf("error setting description in state: %s", err) + if err := d.Set("priority", flattenComputeNetworkFirewallPolicyRulePriority(res["priority"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("disabled", res.Disabled); err != nil { - return fmt.Errorf("error setting disabled in state: %s", err) + if err := d.Set("match", flattenComputeNetworkFirewallPolicyRuleMatch(res["match"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("enable_logging", res.EnableLogging); err != nil { - return fmt.Errorf("error setting enable_logging in state: %s", err) + if err := d.Set("action", flattenComputeNetworkFirewallPolicyRuleAction(res["action"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("project", res.Project); err != nil { - return fmt.Errorf("error setting project in state: %s", err) + if err := d.Set("security_profile_group", flattenComputeNetworkFirewallPolicyRuleSecurityProfileGroup(res["securityProfileGroup"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("rule_name", res.RuleName); err != nil { - return fmt.Errorf("error setting rule_name in state: %s", err) + if err := d.Set("tls_inspect", flattenComputeNetworkFirewallPolicyRuleTlsInspect(res["tlsInspect"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("security_profile_group", res.SecurityProfileGroup); err != nil { - return fmt.Errorf("error setting security_profile_group in state: %s", err) + if err := d.Set("direction", flattenComputeNetworkFirewallPolicyRuleDirection(res["direction"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("target_secure_tags", flattenComputeNetworkFirewallPolicyRuleTargetSecureTagsArray(res.TargetSecureTags)); err != nil { - return fmt.Errorf("error setting target_secure_tags in state: %s", err) + if err := d.Set("enable_logging", flattenComputeNetworkFirewallPolicyRuleEnableLogging(res["enableLogging"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("target_service_accounts", res.TargetServiceAccounts); err != nil { - return fmt.Errorf("error setting target_service_accounts in state: %s", err) + if err := d.Set("rule_tuple_count", flattenComputeNetworkFirewallPolicyRuleRuleTupleCount(res["ruleTupleCount"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("tls_inspect", res.TlsInspect); err != nil { - return fmt.Errorf("error setting tls_inspect in state: %s", err) + if err := d.Set("target_service_accounts", flattenComputeNetworkFirewallPolicyRuleTargetServiceAccounts(res["targetServiceAccounts"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("kind", res.Kind); err != nil { - return fmt.Errorf("error setting kind in state: %s", err) + if err := d.Set("target_secure_tags", flattenComputeNetworkFirewallPolicyRuleTargetSecureTags(res["targetSecureTags"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } - if err = d.Set("rule_tuple_count", res.RuleTupleCount); err != nil { - return fmt.Errorf("error setting rule_tuple_count in state: %s", err) + if err := d.Set("disabled", flattenComputeNetworkFirewallPolicyRuleDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyRule: %s", err) } return nil } + func resourceComputeNetworkFirewallPolicyRuleUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &compute.NetworkFirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.NetworkFirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeNetworkFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - Project: dcl.String(project), - RuleName: dcl.String(d.Get("rule_name").(string)), - SecurityProfileGroup: dcl.String(d.Get("security_profile_group").(string)), - TargetSecureTags: expandComputeNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), - TlsInspect: dcl.Bool(d.Get("tls_inspect").(bool)), + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkFirewallPolicyRule: %s", err) } - directive := tpgdclresource.UpdateDirective - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + billingProject = strings.TrimPrefix(project, "projects/") + + obj := make(map[string]interface{}) + ruleNameProp, err := expandComputeNetworkFirewallPolicyRuleRuleName(d.Get("rule_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rule_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ruleNameProp)) { + obj["ruleName"] = ruleNameProp + } + descriptionProp, err := expandComputeNetworkFirewallPolicyRuleDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + priorityProp, err := expandComputeNetworkFirewallPolicyRulePriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp + } + matchProp, err := expandComputeNetworkFirewallPolicyRuleMatch(d.Get("match"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("match"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, matchProp)) { + obj["match"] = matchProp + } + actionProp, err := expandComputeNetworkFirewallPolicyRuleAction(d.Get("action"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("action"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, actionProp)) { + obj["action"] = actionProp + } + securityProfileGroupProp, err := expandComputeNetworkFirewallPolicyRuleSecurityProfileGroup(d.Get("security_profile_group"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("security_profile_group"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securityProfileGroupProp)) { + obj["securityProfileGroup"] = securityProfileGroupProp + } + tlsInspectProp, err := expandComputeNetworkFirewallPolicyRuleTlsInspect(d.Get("tls_inspect"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tls_inspect"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tlsInspectProp)) { + obj["tlsInspect"] = tlsInspectProp + } + directionProp, err := expandComputeNetworkFirewallPolicyRuleDirection(d.Get("direction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("direction"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, directionProp)) { + obj["direction"] = directionProp + } + enableLoggingProp, err := expandComputeNetworkFirewallPolicyRuleEnableLogging(d.Get("enable_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_logging"); ok || !reflect.DeepEqual(v, enableLoggingProp) { + obj["enableLogging"] = enableLoggingProp + } + targetServiceAccountsProp, err := expandComputeNetworkFirewallPolicyRuleTargetServiceAccounts(d.Get("target_service_accounts"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("target_service_accounts"); ok || !reflect.DeepEqual(v, targetServiceAccountsProp) { + obj["targetServiceAccounts"] = targetServiceAccountsProp + } + targetSecureTagsProp, err := expandComputeNetworkFirewallPolicyRuleTargetSecureTags(d.Get("target_secure_tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_secure_tags"); ok || !reflect.DeepEqual(v, targetSecureTagsProp) { + obj["targetSecureTags"] = targetSecureTagsProp + } + disabledProp, err := expandComputeNetworkFirewallPolicyRuleDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp } - billingProject := "" + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/patchRule?priority={{priority}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating NetworkFirewallPolicyRule %q: %#v", d.Id(), obj) + headers := make(http.Header) + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating NetworkFirewallPolicyRule %q: %s", d.Id(), err) } else { - client.Config.BasePath = bp + log.Printf("[DEBUG] Finished updating NetworkFirewallPolicyRule %q: %#v", d.Id(), res) } - res, err := client.ApplyNetworkFirewallPolicyRule(context.Background(), obj, directive...) - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error updating NetworkFirewallPolicyRule: %s", err) - } + err = ComputeOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Updating NetworkFirewallPolicyRule", userAgent, + d.Timeout(schema.TimeoutUpdate)) - log.Printf("[DEBUG] Finished creating NetworkFirewallPolicyRule %q: %#v", d.Id(), res) + if err != nil { + return err + } return resourceComputeNetworkFirewallPolicyRuleRead(d, meta) } func resourceComputeNetworkFirewallPolicyRuleDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &compute.NetworkFirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.NetworkFirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeNetworkFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - Project: dcl.String(project), - RuleName: dcl.String(d.Get("rule_name").(string)), - SecurityProfileGroup: dcl.String(d.Get("security_profile_group").(string)), - TargetSecureTags: expandComputeNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), - TlsInspect: dcl.Bool(d.Get("tls_inspect").(bool)), + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkFirewallPolicyRule: %s", err) } + billingProject = strings.TrimPrefix(project, "projects/") - log.Printf("[DEBUG] Deleting NetworkFirewallPolicyRule %q", d.Id()) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/removeRule?priority={{priority}}") if err != nil { return err } - billingProject := project + + var obj map[string]interface{} + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting NetworkFirewallPolicyRule %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NetworkFirewallPolicyRule") } - if err := client.DeleteNetworkFirewallPolicyRule(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting NetworkFirewallPolicyRule: %s", err) + + err = ComputeOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Deleting NetworkFirewallPolicyRule", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err } - log.Printf("[DEBUG] Finished deleting NetworkFirewallPolicyRule %q", d.Id()) + log.Printf("[DEBUG] Finished deleting NetworkFirewallPolicyRule %q: %#v", d.Id(), res) return nil } func resourceComputeNetworkFirewallPolicyRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "projects/(?P[^/]+)/global/firewallPolicies/(?P[^/]+)/rules/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", + "^projects/(?P[^/]+)/global/firewallPolicies/(?P[^/]+)/rules/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", }, d, config); err != nil { return nil, err } @@ -597,219 +743,523 @@ func resourceComputeNetworkFirewallPolicyRuleImport(d *schema.ResourceData, meta return []*schema.ResourceData{d}, nil } -func expandComputeNetworkFirewallPolicyRuleMatch(o interface{}) *compute.NetworkFirewallPolicyRuleMatch { - if o == nil { - return compute.EmptyNetworkFirewallPolicyRuleMatch - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return compute.EmptyNetworkFirewallPolicyRuleMatch +func flattenComputeNetworkFirewallPolicyRuleCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkFirewallPolicyRuleKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkFirewallPolicyRuleRuleName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkFirewallPolicyRuleDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkFirewallPolicyRulePriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } } - obj := objArr[0].(map[string]interface{}) - return &compute.NetworkFirewallPolicyRuleMatch{ - Layer4Configs: expandComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj["layer4_configs"]), - DestAddressGroups: tpgdclresource.ExpandStringArray(obj["dest_address_groups"]), - DestFqdns: tpgdclresource.ExpandStringArray(obj["dest_fqdns"]), - DestIPRanges: tpgdclresource.ExpandStringArray(obj["dest_ip_ranges"]), - DestRegionCodes: tpgdclresource.ExpandStringArray(obj["dest_region_codes"]), - DestThreatIntelligences: tpgdclresource.ExpandStringArray(obj["dest_threat_intelligences"]), - SrcAddressGroups: tpgdclresource.ExpandStringArray(obj["src_address_groups"]), - SrcFqdns: tpgdclresource.ExpandStringArray(obj["src_fqdns"]), - SrcIPRanges: tpgdclresource.ExpandStringArray(obj["src_ip_ranges"]), - SrcRegionCodes: tpgdclresource.ExpandStringArray(obj["src_region_codes"]), - SrcSecureTags: expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj["src_secure_tags"]), - SrcThreatIntelligences: tpgdclresource.ExpandStringArray(obj["src_threat_intelligences"]), + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal } + + return v // let terraform core handle it otherwise } -func flattenComputeNetworkFirewallPolicyRuleMatch(obj *compute.NetworkFirewallPolicyRuleMatch) interface{} { - if obj == nil || obj.Empty() { +func flattenComputeNetworkFirewallPolicyRuleMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { return nil } - transformed := map[string]interface{}{ - "layer4_configs": flattenComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj.Layer4Configs), - "dest_address_groups": obj.DestAddressGroups, - "dest_fqdns": obj.DestFqdns, - "dest_ip_ranges": obj.DestIPRanges, - "dest_region_codes": obj.DestRegionCodes, - "dest_threat_intelligences": obj.DestThreatIntelligences, - "src_address_groups": obj.SrcAddressGroups, - "src_fqdns": obj.SrcFqdns, - "src_ip_ranges": obj.SrcIPRanges, - "src_region_codes": obj.SrcRegionCodes, - "src_secure_tags": flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj.SrcSecureTags), - "src_threat_intelligences": obj.SrcThreatIntelligences, + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - + transformed := make(map[string]interface{}) + transformed["src_ip_ranges"] = + flattenComputeNetworkFirewallPolicyRuleMatchSrcIpRanges(original["srcIpRanges"], d, config) + transformed["dest_ip_ranges"] = + flattenComputeNetworkFirewallPolicyRuleMatchDestIpRanges(original["destIpRanges"], d, config) + transformed["layer4_configs"] = + flattenComputeNetworkFirewallPolicyRuleMatchLayer4Configs(original["layer4Configs"], d, config) + transformed["src_secure_tags"] = + flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTags(original["srcSecureTags"], d, config) + transformed["dest_address_groups"] = + flattenComputeNetworkFirewallPolicyRuleMatchDestAddressGroups(original["destAddressGroups"], d, config) + transformed["src_address_groups"] = + flattenComputeNetworkFirewallPolicyRuleMatchSrcAddressGroups(original["srcAddressGroups"], d, config) + transformed["src_fqdns"] = + flattenComputeNetworkFirewallPolicyRuleMatchSrcFqdns(original["srcFqdns"], d, config) + transformed["dest_fqdns"] = + flattenComputeNetworkFirewallPolicyRuleMatchDestFqdns(original["destFqdns"], d, config) + transformed["src_region_codes"] = + flattenComputeNetworkFirewallPolicyRuleMatchSrcRegionCodes(original["srcRegionCodes"], d, config) + transformed["dest_region_codes"] = + flattenComputeNetworkFirewallPolicyRuleMatchDestRegionCodes(original["destRegionCodes"], d, config) + transformed["dest_threat_intelligences"] = + flattenComputeNetworkFirewallPolicyRuleMatchDestThreatIntelligences(original["destThreatIntelligences"], d, config) + transformed["src_threat_intelligences"] = + flattenComputeNetworkFirewallPolicyRuleMatchSrcThreatIntelligences(original["srcThreatIntelligences"], d, config) return []interface{}{transformed} +} +func flattenComputeNetworkFirewallPolicyRuleMatchSrcIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} +func flattenComputeNetworkFirewallPolicyRuleMatchDestIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(o interface{}) []compute.NetworkFirewallPolicyRuleMatchLayer4Configs { - if o == nil { - return make([]compute.NetworkFirewallPolicyRuleMatchLayer4Configs, 0) - } - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]compute.NetworkFirewallPolicyRuleMatchLayer4Configs, 0) +func flattenComputeNetworkFirewallPolicyRuleMatchLayer4Configs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "ip_protocol": flattenComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsIpProtocol(original["ipProtocol"], d, config), + "ports": flattenComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsPorts(original["ports"], d, config), + }) } + return transformed +} +func flattenComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsIpProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - items := make([]compute.NetworkFirewallPolicyRuleMatchLayer4Configs, 0, len(objs)) - for _, item := range objs { - i := expandComputeNetworkFirewallPolicyRuleMatchLayer4Configs(item) - items = append(items, *i) +func flattenComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsPorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsName(original["name"], d, config), + "state": flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsState(original["state"], d, config), + }) } + return transformed +} +func flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - return items +func flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandComputeNetworkFirewallPolicyRuleMatchLayer4Configs(o interface{}) *compute.NetworkFirewallPolicyRuleMatchLayer4Configs { - if o == nil { - return compute.EmptyNetworkFirewallPolicyRuleMatchLayer4Configs - } +func flattenComputeNetworkFirewallPolicyRuleMatchDestAddressGroups(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - obj := o.(map[string]interface{}) - return &compute.NetworkFirewallPolicyRuleMatchLayer4Configs{ - IPProtocol: dcl.String(obj["ip_protocol"].(string)), - Ports: tpgdclresource.ExpandStringArray(obj["ports"]), - } +func flattenComputeNetworkFirewallPolicyRuleMatchSrcAddressGroups(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(objs []compute.NetworkFirewallPolicyRuleMatchLayer4Configs) []interface{} { - if objs == nil { - return nil - } +func flattenComputeNetworkFirewallPolicyRuleMatchSrcFqdns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - items := []interface{}{} - for _, item := range objs { - i := flattenComputeNetworkFirewallPolicyRuleMatchLayer4Configs(&item) - items = append(items, i) - } +func flattenComputeNetworkFirewallPolicyRuleMatchDestFqdns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - return items +func flattenComputeNetworkFirewallPolicyRuleMatchSrcRegionCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenComputeNetworkFirewallPolicyRuleMatchLayer4Configs(obj *compute.NetworkFirewallPolicyRuleMatchLayer4Configs) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "ip_protocol": obj.IPProtocol, - "ports": obj.Ports, - } +func flattenComputeNetworkFirewallPolicyRuleMatchDestRegionCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - return transformed +func flattenComputeNetworkFirewallPolicyRuleMatchDestThreatIntelligences(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} +func flattenComputeNetworkFirewallPolicyRuleMatchSrcThreatIntelligences(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(o interface{}) []compute.NetworkFirewallPolicyRuleMatchSrcSecureTags { - if o == nil { - return make([]compute.NetworkFirewallPolicyRuleMatchSrcSecureTags, 0) - } - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]compute.NetworkFirewallPolicyRuleMatchSrcSecureTags, 0) - } +func flattenComputeNetworkFirewallPolicyRuleAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - items := make([]compute.NetworkFirewallPolicyRuleMatchSrcSecureTags, 0, len(objs)) - for _, item := range objs { - i := expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTags(item) - items = append(items, *i) - } +func flattenComputeNetworkFirewallPolicyRuleSecurityProfileGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - return items +func flattenComputeNetworkFirewallPolicyRuleTlsInspect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTags(o interface{}) *compute.NetworkFirewallPolicyRuleMatchSrcSecureTags { - if o == nil { - return compute.EmptyNetworkFirewallPolicyRuleMatchSrcSecureTags - } +func flattenComputeNetworkFirewallPolicyRuleDirection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - obj := o.(map[string]interface{}) - return &compute.NetworkFirewallPolicyRuleMatchSrcSecureTags{ - Name: dcl.String(obj["name"].(string)), - } +func flattenComputeNetworkFirewallPolicyRuleEnableLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(objs []compute.NetworkFirewallPolicyRuleMatchSrcSecureTags) []interface{} { - if objs == nil { - return nil +func flattenComputeNetworkFirewallPolicyRuleRuleTupleCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } } - items := []interface{}{} - for _, item := range objs { - i := flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTags(&item) - items = append(items, i) + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal } - return items + return v // let terraform core handle it otherwise } -func flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTags(obj *compute.NetworkFirewallPolicyRuleMatchSrcSecureTags) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "name": obj.Name, - "state": obj.State, - } +func flattenComputeNetworkFirewallPolicyRuleTargetServiceAccounts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} +func flattenComputeNetworkFirewallPolicyRuleTargetSecureTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenComputeNetworkFirewallPolicyRuleTargetSecureTagsName(original["name"], d, config), + "state": flattenComputeNetworkFirewallPolicyRuleTargetSecureTagsState(original["state"], d, config), + }) + } return transformed +} +func flattenComputeNetworkFirewallPolicyRuleTargetSecureTagsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkFirewallPolicyRuleTargetSecureTagsState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkFirewallPolicyRuleDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeNetworkFirewallPolicyRuleRuleName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} +func expandComputeNetworkFirewallPolicyRuleDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil } -func expandComputeNetworkFirewallPolicyRuleTargetSecureTagsArray(o interface{}) []compute.NetworkFirewallPolicyRuleTargetSecureTags { - if o == nil { - return make([]compute.NetworkFirewallPolicyRuleTargetSecureTags, 0) + +func expandComputeNetworkFirewallPolicyRulePriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]compute.NetworkFirewallPolicyRuleTargetSecureTags, 0) + transformedSrcIpRanges, err := expandComputeNetworkFirewallPolicyRuleMatchSrcIpRanges(original["src_ip_ranges"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcIpRanges"] = transformedSrcIpRanges } - items := make([]compute.NetworkFirewallPolicyRuleTargetSecureTags, 0, len(objs)) - for _, item := range objs { - i := expandComputeNetworkFirewallPolicyRuleTargetSecureTags(item) - items = append(items, *i) + transformedDestIpRanges, err := expandComputeNetworkFirewallPolicyRuleMatchDestIpRanges(original["dest_ip_ranges"], d, config) + if err != nil { + return nil, err + } else { + transformed["destIpRanges"] = transformedDestIpRanges } - return items -} + transformedLayer4Configs, err := expandComputeNetworkFirewallPolicyRuleMatchLayer4Configs(original["layer4_configs"], d, config) + if err != nil { + return nil, err + } else { + transformed["layer4Configs"] = transformedLayer4Configs + } -func expandComputeNetworkFirewallPolicyRuleTargetSecureTags(o interface{}) *compute.NetworkFirewallPolicyRuleTargetSecureTags { - if o == nil { - return compute.EmptyNetworkFirewallPolicyRuleTargetSecureTags + transformedSrcSecureTags, err := expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTags(original["src_secure_tags"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcSecureTags"] = transformedSrcSecureTags } - obj := o.(map[string]interface{}) - return &compute.NetworkFirewallPolicyRuleTargetSecureTags{ - Name: dcl.String(obj["name"].(string)), + transformedDestAddressGroups, err := expandComputeNetworkFirewallPolicyRuleMatchDestAddressGroups(original["dest_address_groups"], d, config) + if err != nil { + return nil, err + } else { + transformed["destAddressGroups"] = transformedDestAddressGroups } -} -func flattenComputeNetworkFirewallPolicyRuleTargetSecureTagsArray(objs []compute.NetworkFirewallPolicyRuleTargetSecureTags) []interface{} { - if objs == nil { - return nil + transformedSrcAddressGroups, err := expandComputeNetworkFirewallPolicyRuleMatchSrcAddressGroups(original["src_address_groups"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcAddressGroups"] = transformedSrcAddressGroups } - items := []interface{}{} - for _, item := range objs { - i := flattenComputeNetworkFirewallPolicyRuleTargetSecureTags(&item) - items = append(items, i) + transformedSrcFqdns, err := expandComputeNetworkFirewallPolicyRuleMatchSrcFqdns(original["src_fqdns"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcFqdns"] = transformedSrcFqdns } - return items -} + transformedDestFqdns, err := expandComputeNetworkFirewallPolicyRuleMatchDestFqdns(original["dest_fqdns"], d, config) + if err != nil { + return nil, err + } else { + transformed["destFqdns"] = transformedDestFqdns + } -func flattenComputeNetworkFirewallPolicyRuleTargetSecureTags(obj *compute.NetworkFirewallPolicyRuleTargetSecureTags) interface{} { - if obj == nil || obj.Empty() { - return nil + transformedSrcRegionCodes, err := expandComputeNetworkFirewallPolicyRuleMatchSrcRegionCodes(original["src_region_codes"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcRegionCodes"] = transformedSrcRegionCodes } - transformed := map[string]interface{}{ - "name": obj.Name, - "state": obj.State, + + transformedDestRegionCodes, err := expandComputeNetworkFirewallPolicyRuleMatchDestRegionCodes(original["dest_region_codes"], d, config) + if err != nil { + return nil, err + } else { + transformed["destRegionCodes"] = transformedDestRegionCodes } - return transformed + transformedDestThreatIntelligences, err := expandComputeNetworkFirewallPolicyRuleMatchDestThreatIntelligences(original["dest_threat_intelligences"], d, config) + if err != nil { + return nil, err + } else { + transformed["destThreatIntelligences"] = transformedDestThreatIntelligences + } + + transformedSrcThreatIntelligences, err := expandComputeNetworkFirewallPolicyRuleMatchSrcThreatIntelligences(original["src_threat_intelligences"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcThreatIntelligences"] = transformedSrcThreatIntelligences + } + + return transformed, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchSrcIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchDestIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchLayer4Configs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIpProtocol, err := expandComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsIpProtocol(original["ip_protocol"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpProtocol); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipProtocol"] = transformedIpProtocol + } + + transformedPorts, err := expandComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsPorts(original["ports"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ports"] = transformedPorts + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsIpProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsPorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedState, err := expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchDestAddressGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchSrcAddressGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchSrcFqdns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchDestFqdns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchSrcRegionCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchDestRegionCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchDestThreatIntelligences(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleMatchSrcThreatIntelligences(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleSecurityProfileGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleTlsInspect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleDirection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleEnableLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleTargetServiceAccounts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleTargetSecureTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandComputeNetworkFirewallPolicyRuleTargetSecureTagsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedState, err := expandComputeNetworkFirewallPolicyRuleTargetSecureTagsState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeNetworkFirewallPolicyRuleTargetSecureTagsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkFirewallPolicyRuleTargetSecureTagsState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} +func expandComputeNetworkFirewallPolicyRuleDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_security_policy_rule_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_rule_sweeper.go similarity index 90% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_security_policy_rule_sweeper.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_rule_sweeper.go index e245c5fedba..f4ca5c5b92a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_security_policy_rule_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_rule_sweeper.go @@ -30,12 +30,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("ComputeSecurityPolicyRule", testSweepComputeSecurityPolicyRule) + sweeper.AddTestSweepers("ComputeNetworkFirewallPolicyRule", testSweepComputeNetworkFirewallPolicyRule) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepComputeSecurityPolicyRule(region string) error { - resourceName := "ComputeSecurityPolicyRule" +func testSweepComputeNetworkFirewallPolicyRule(region string) error { + resourceName := "ComputeNetworkFirewallPolicyRule" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) @@ -64,7 +64,7 @@ func testSweepComputeSecurityPolicyRule(region string) error { }, } - listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/securityPolicies/{{security_policy}}", "?")[0] + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/firewallPolicies/{{firewall_policy}}", "?")[0] listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) @@ -83,7 +83,7 @@ func testSweepComputeSecurityPolicyRule(region string) error { return nil } - resourceList, ok := res["securityPolicyRules"] + resourceList, ok := res["networkFirewallPolicyRules"] if !ok { log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") return nil @@ -108,7 +108,7 @@ func testSweepComputeSecurityPolicyRule(region string) error { continue } - deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/securityPolicies/{{security_policy}}/removeRule?priority={{priority}}" + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/removeRule?priority={{priority}}" deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_template.go index 78d57096afe..52b9b128529 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_template.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_template.go @@ -52,6 +52,31 @@ func ResourceComputeNodeTemplate() *schema.Resource { ), Schema: map[string]*schema.Schema{ + "accelerators": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `List of the type and count of accelerator cards attached to the +node template`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The number of the guest accelerator cards exposed to this +node template.`, + }, + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Full or partial URL of the accelerator type resource to expose +to this node template.`, + }, + }, + }, + }, "cpu_overcommit_type": { Type: schema.TypeString, Optional: true, @@ -226,6 +251,12 @@ func resourceComputeNodeTemplateCreate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("server_binding"); !tpgresource.IsEmptyValue(reflect.ValueOf(serverBindingProp)) && (ok || !reflect.DeepEqual(v, serverBindingProp)) { obj["serverBinding"] = serverBindingProp } + acceleratorsProp, err := expandComputeNodeTemplateAccelerators(d.Get("accelerators"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("accelerators"); !tpgresource.IsEmptyValue(reflect.ValueOf(acceleratorsProp)) && (ok || !reflect.DeepEqual(v, acceleratorsProp)) { + obj["accelerators"] = acceleratorsProp + } cpuOvercommitTypeProp, err := expandComputeNodeTemplateCpuOvercommitType(d.Get("cpu_overcommit_type"), d, config) if err != nil { return err @@ -358,6 +389,9 @@ func resourceComputeNodeTemplateRead(d *schema.ResourceData, meta interface{}) e if err := d.Set("server_binding", flattenComputeNodeTemplateServerBinding(res["serverBinding"], d, config)); err != nil { return fmt.Errorf("Error reading NodeTemplate: %s", err) } + if err := d.Set("accelerators", flattenComputeNodeTemplateAccelerators(res["accelerators"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } if err := d.Set("cpu_overcommit_type", flattenComputeNodeTemplateCpuOvercommitType(res["cpuOvercommitType"], d, config)); err != nil { return fmt.Errorf("Error reading NodeTemplate: %s", err) } @@ -514,6 +548,46 @@ func flattenComputeNodeTemplateServerBindingType(v interface{}, d *schema.Resour return v } +func flattenComputeNodeTemplateAccelerators(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "accelerator_count": flattenComputeNodeTemplateAcceleratorsAcceleratorCount(original["acceleratorCount"], d, config), + "accelerator_type": flattenComputeNodeTemplateAcceleratorsAcceleratorType(original["acceleratorType"], d, config), + }) + } + return transformed +} +func flattenComputeNodeTemplateAcceleratorsAcceleratorCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeNodeTemplateAcceleratorsAcceleratorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeNodeTemplateCpuOvercommitType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -616,6 +690,43 @@ func expandComputeNodeTemplateServerBindingType(v interface{}, d tpgresource.Ter return v, nil } +func expandComputeNodeTemplateAccelerators(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAcceleratorCount, err := expandComputeNodeTemplateAcceleratorsAcceleratorCount(original["accelerator_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceleratorCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["acceleratorCount"] = transformedAcceleratorCount + } + + transformedAcceleratorType, err := expandComputeNodeTemplateAcceleratorsAcceleratorType(original["accelerator_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceleratorType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["acceleratorType"] = transformedAcceleratorType + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeNodeTemplateAcceleratorsAcceleratorCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeTemplateAcceleratorsAcceleratorType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeNodeTemplateCpuOvercommitType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_per_instance_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_per_instance_config.go index 8b07f0105a2..42c8abe85ec 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_per_instance_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_per_instance_config.go @@ -174,39 +174,39 @@ func ResourceComputePerInstanceConfig() *schema.Resource { "minimal_action": { Type: schema.TypeString, Optional: true, - Default: "NONE", Description: `The minimal action to perform on the instance during an update. Default is 'NONE'. Possible values are: * REPLACE * RESTART * REFRESH * NONE`, + Default: "NONE", }, "most_disruptive_allowed_action": { Type: schema.TypeString, Optional: true, - Default: "REPLACE", Description: `The most disruptive action to perform on the instance during an update. Default is 'REPLACE'. Possible values are: * REPLACE * RESTART * REFRESH * NONE`, + Default: "REPLACE", }, "remove_instance_on_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `When true, deleting this config will immediately remove the underlying instance. When false, deleting this config will use the behavior as determined by remove_instance_on_destroy.`, + Default: false, }, "remove_instance_state_on_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `When true, deleting this config will immediately remove any specified state from the underlying instance. When false, deleting this config will *not* immediately remove any state from the underlying instance. State will be removed on the next instance recreation or update.`, + Default: false, }, "project": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_metadata_item.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_metadata_item.go index d85ee38d42e..eb2ff1c6b97 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_metadata_item.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_metadata_item.go @@ -30,7 +30,7 @@ func ResourceComputeProjectMetadataItem() *schema.Resource { Update: resourceComputeProjectMetadataItemUpdate, Delete: resourceComputeProjectMetadataItemDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceComputeProjectMetadataItemImportState, }, CustomizeDiff: customdiff.All( @@ -179,6 +179,25 @@ func resourceComputeProjectMetadataItemDelete(d *schema.ResourceData, meta inter return nil } +func resourceComputeProjectMetadataItemImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/meta-data/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{key}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + func updateComputeCommonInstanceMetadata(config *transport_tpg.Config, projectID, key, userAgent string, afterVal *string, timeout time.Duration, failIfPresent metadataPresentBehavior) error { updateMD := func() error { lockName := fmt.Sprintf("projects/%s/commoninstancemetadata", projectID) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_advertised_prefix.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_advertised_prefix.go index 99c981cdaa3..aa6efaead81 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_advertised_prefix.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_advertised_prefix.go @@ -80,6 +80,11 @@ except the last character, which cannot be a dash.`, ForceNew: true, Description: `An optional description of this resource.`, }, + "shared_secret": { + Type: schema.TypeString, + Computed: true, + Description: `Output Only. The shared secret to be used for reverse DNS verification.`, + }, "project": { Type: schema.TypeString, Optional: true, @@ -238,6 +243,9 @@ func resourceComputePublicAdvertisedPrefixRead(d *schema.ResourceData, meta inte if err := d.Set("ip_cidr_range", flattenComputePublicAdvertisedPrefixIpCidrRange(res["ipCidrRange"], d, config)); err != nil { return fmt.Errorf("Error reading PublicAdvertisedPrefix: %s", err) } + if err := d.Set("shared_secret", flattenComputePublicAdvertisedPrefixSharedSecret(res["sharedSecret"], d, config)); err != nil { + return fmt.Errorf("Error reading PublicAdvertisedPrefix: %s", err) + } if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading PublicAdvertisedPrefix: %s", err) } @@ -337,6 +345,10 @@ func flattenComputePublicAdvertisedPrefixIpCidrRange(v interface{}, d *schema.Re return v } +func flattenComputePublicAdvertisedPrefixSharedSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func expandComputePublicAdvertisedPrefixDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_backend_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_backend_service.go index 07dac285864..cbd2b3e76ef 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_backend_service.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_backend_service.go @@ -391,10 +391,8 @@ Defaults to 3.`, Type: schema.TypeInt, Optional: true, Description: `Time for which instance will be drained (not accept new -connections, but still work to finish started). - -From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value.`, - Default: 0, +connections, but still work to finish started).`, + Default: 300, }, "consistent_hash": { @@ -561,19 +559,25 @@ or serverless NEG as a backend.`, }, "iap": { Type: schema.TypeList, + Computed: true, Optional: true, Description: `Settings for enabling Cloud Identity Aware Proxy`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether the serving infrastructure will authenticate and authorize all incoming requests.`, + }, "oauth2_client_id": { Type: schema.TypeString, - Required: true, + Optional: true, Description: `OAuth2 Client ID for IAP`, }, "oauth2_client_secret": { Type: schema.TypeString, - Required: true, + Optional: true, Description: `OAuth2 Client Secret for IAP`, Sensitive: true, }, @@ -586,6 +590,12 @@ or serverless NEG as a backend.`, }, }, }, + "ip_address_selection_policy": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"IPV4_ONLY", "PREFER_IPV6", "IPV6_ONLY", ""}), + Description: `Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). Possible values: ["IPV4_ONLY", "PREFER_IPV6", "IPV6_ONLY"]`, + }, "load_balancing_scheme": { Type: schema.TypeString, Optional: true, @@ -629,7 +639,8 @@ The possible values are: Maglev, refer to https://ai.google/research/pubs/pub44824 * 'WEIGHTED_MAGLEV': Per-instance weighted Load Balancing via health check - reported weights. If set, the Backend Service must + reported weights. Only applicable to loadBalancingScheme + EXTERNAL. If set, the Backend Service must configure a non legacy HTTP-based Health Check, and health check replies are expected to contain non-standard HTTP response header field @@ -641,7 +652,7 @@ The possible values are: UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. -This field is applicable to either: +locality_lb_policy is applicable to either: * A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and loadBalancingScheme set to INTERNAL_MANAGED. @@ -650,7 +661,7 @@ This field is applicable to either: Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External Network Load Balancing. The default is MAGLEV. -If session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV, +If session_affinity is not NONE, and locality_lb_policy is not set to MAGLEV, WEIGHTED_MAGLEV, or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced @@ -698,10 +709,7 @@ This field can only be specified when the load balancing scheme is set to INTERN Optional: true, Description: `Settings controlling eviction of unhealthy hosts from the load balancing pool. This field is applicable only when the 'load_balancing_scheme' is set -to INTERNAL_MANAGED and the 'protocol' is set to HTTP, HTTPS, or HTTP2. - -From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. -Default values are enforce by GCP without providing them.`, +to INTERNAL_MANAGED and the 'protocol' is set to HTTP, HTTPS, or HTTP2.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -737,7 +745,6 @@ less than one second are represented with a 0 'seconds' field and a positive Description: `Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.`, - Default: 5, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "consecutive_gateway_failure": { @@ -746,7 +753,6 @@ Defaults to 5.`, Description: `The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 5.`, - Default: 5, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "enforcing_consecutive_errors": { @@ -755,7 +761,6 @@ gateway failure ejection occurs. Defaults to 5.`, Description: `The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.`, - Default: 100, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "enforcing_consecutive_gateway_failure": { @@ -764,7 +769,6 @@ ejection or to ramp it up slowly. Defaults to 100.`, Description: `The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.`, - Default: 0, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "enforcing_success_rate": { @@ -773,7 +777,6 @@ used to disable ejection or to ramp it up slowly. Defaults to 0.`, Description: `The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.`, - Default: 100, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "interval": { @@ -806,7 +809,6 @@ less than one second are represented with a 0 'seconds' field and a positive Optional: true, Description: `Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 10%.`, - Default: 10, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "success_rate_minimum_hosts": { @@ -816,7 +818,6 @@ that can be ejected. Defaults to 10%.`, success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5.`, - Default: 5, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "success_rate_request_volume": { @@ -827,7 +828,6 @@ defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100.`, - Default: 100, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, "success_rate_stdev_factor": { @@ -839,7 +839,6 @@ rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900.`, - Default: 1900, AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, }, }, @@ -878,9 +877,56 @@ If it is not provided, the provider region is used.`, Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: verify.ValidateEnum([]string{"NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", "CLIENT_IP_NO_DESTINATION", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", "CLIENT_IP_NO_DESTINATION", "STRONG_COOKIE_AFFINITY", ""}), Description: `Type of session affinity to use. The default is NONE. Session affinity is -not applicable if the protocol is UDP. Possible values: ["NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", "CLIENT_IP_NO_DESTINATION"]`, +not applicable if the protocol is UDP. Possible values: ["NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", "CLIENT_IP_NO_DESTINATION", "STRONG_COOKIE_AFFINITY"]`, + }, + "strong_session_affinity_cookie": { + Type: schema.TypeList, + Optional: true, + Description: `Describes the HTTP cookie used for stateful session affinity. This field is applicable and required if the sessionAffinity is set to STRONG_COOKIE_AFFINITY.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the cookie.`, + AtLeastOneOf: []string{"strong_session_affinity_cookie.0.ttl", "strong_session_affinity_cookie.0.name", "strong_session_affinity_cookie.0.path"}, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path to set for the cookie.`, + AtLeastOneOf: []string{"strong_session_affinity_cookie.0.ttl", "strong_session_affinity_cookie.0.name", "strong_session_affinity_cookie.0.path"}, + }, + "ttl": { + Type: schema.TypeList, + Optional: true, + Description: `Lifetime of the cookie.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "seconds": { + Type: schema.TypeInt, + Required: true, + Description: `Span of time at a resolution of a second. +Must be from 0 to 315,576,000,000 inclusive.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Span of time that's a fraction of a second at nanosecond +resolution. Durations less than one second are represented +with a 0 seconds field and a positive nanos field. Must +be from 0 to 999,999,999 inclusive.`, + }, + }, + }, + AtLeastOneOf: []string{"strong_session_affinity_cookie.0.ttl", "strong_session_affinity_cookie.0.name", "strong_session_affinity_cookie.0.path"}, + }, + }, + }, }, "timeout_sec": { Type: schema.TypeInt, @@ -957,10 +1003,8 @@ partial URL.`, Description: `Specifies the balancing mode for this backend. See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) -for an explanation of load balancing modes. - -From version 6.0.0 default value will be UTILIZATION to match default GCP value. Default value: "CONNECTION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, - Default: "CONNECTION", +for an explanation of load balancing modes. Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, + Default: "UTILIZATION", }, "capacity_scaler": { Type: schema.TypeFloat, @@ -1144,6 +1188,12 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte } else if v, ok := d.GetOkExists("iap"); ok || !reflect.DeepEqual(v, iapProp) { obj["iap"] = iapProp } + ipAddressSelectionPolicyProp, err := expandComputeRegionBackendServiceIpAddressSelectionPolicy(d.Get("ip_address_selection_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address_selection_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipAddressSelectionPolicyProp)) && (ok || !reflect.DeepEqual(v, ipAddressSelectionPolicyProp)) { + obj["ipAddressSelectionPolicy"] = ipAddressSelectionPolicyProp + } loadBalancingSchemeProp, err := expandComputeRegionBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) if err != nil { return err @@ -1186,6 +1236,12 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte } else if v, ok := d.GetOkExists("session_affinity"); !tpgresource.IsEmptyValue(reflect.ValueOf(sessionAffinityProp)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { obj["sessionAffinity"] = sessionAffinityProp } + strongSessionAffinityCookieProp, err := expandComputeRegionBackendServiceStrongSessionAffinityCookie(d.Get("strong_session_affinity_cookie"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("strong_session_affinity_cookie"); !tpgresource.IsEmptyValue(reflect.ValueOf(strongSessionAffinityCookieProp)) && (ok || !reflect.DeepEqual(v, strongSessionAffinityCookieProp)) { + obj["strongSessionAffinityCookie"] = strongSessionAffinityCookieProp + } timeoutSecProp, err := expandComputeRegionBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err @@ -1380,6 +1436,9 @@ func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interf if err := d.Set("iap", flattenComputeRegionBackendServiceIap(res["iap"], d, config)); err != nil { return fmt.Errorf("Error reading RegionBackendService: %s", err) } + if err := d.Set("ip_address_selection_policy", flattenComputeRegionBackendServiceIpAddressSelectionPolicy(res["ipAddressSelectionPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionBackendService: %s", err) + } if err := d.Set("load_balancing_scheme", flattenComputeRegionBackendServiceLoadBalancingScheme(res["loadBalancingScheme"], d, config)); err != nil { return fmt.Errorf("Error reading RegionBackendService: %s", err) } @@ -1401,6 +1460,9 @@ func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interf if err := d.Set("session_affinity", flattenComputeRegionBackendServiceSessionAffinity(res["sessionAffinity"], d, config)); err != nil { return fmt.Errorf("Error reading RegionBackendService: %s", err) } + if err := d.Set("strong_session_affinity_cookie", flattenComputeRegionBackendServiceStrongSessionAffinityCookie(res["strongSessionAffinityCookie"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionBackendService: %s", err) + } if err := d.Set("timeout_sec", flattenComputeRegionBackendServiceTimeoutSec(res["timeoutSec"], d, config)); err != nil { return fmt.Errorf("Error reading RegionBackendService: %s", err) } @@ -1508,6 +1570,12 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte } else if v, ok := d.GetOkExists("iap"); ok || !reflect.DeepEqual(v, iapProp) { obj["iap"] = iapProp } + ipAddressSelectionPolicyProp, err := expandComputeRegionBackendServiceIpAddressSelectionPolicy(d.Get("ip_address_selection_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address_selection_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipAddressSelectionPolicyProp)) { + obj["ipAddressSelectionPolicy"] = ipAddressSelectionPolicyProp + } loadBalancingSchemeProp, err := expandComputeRegionBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) if err != nil { return err @@ -1550,6 +1618,12 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte } else if v, ok := d.GetOkExists("session_affinity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { obj["sessionAffinity"] = sessionAffinityProp } + strongSessionAffinityCookieProp, err := expandComputeRegionBackendServiceStrongSessionAffinityCookie(d.Get("strong_session_affinity_cookie"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("strong_session_affinity_cookie"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, strongSessionAffinityCookieProp)) { + obj["strongSessionAffinityCookie"] = strongSessionAffinityCookieProp + } timeoutSecProp, err := expandComputeRegionBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err @@ -2383,6 +2457,8 @@ func flattenComputeRegionBackendServiceIap(v interface{}, d *schema.ResourceData return nil } transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenComputeRegionBackendServiceIapEnabled(original["enabled"], d, config) transformed["oauth2_client_id"] = flattenComputeRegionBackendServiceIapOauth2ClientId(original["oauth2ClientId"], d, config) transformed["oauth2_client_secret"] = @@ -2391,6 +2467,10 @@ func flattenComputeRegionBackendServiceIap(v interface{}, d *schema.ResourceData flattenComputeRegionBackendServiceIapOauth2ClientSecretSha256(original["oauth2ClientSecretSha256"], d, config) return []interface{}{transformed} } +func flattenComputeRegionBackendServiceIapEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeRegionBackendServiceIapOauth2ClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -2403,6 +2483,10 @@ func flattenComputeRegionBackendServiceIapOauth2ClientSecretSha256(v interface{} return v } +func flattenComputeRegionBackendServiceIpAddressSelectionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeRegionBackendServiceLoadBalancingScheme(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -2711,6 +2795,80 @@ func flattenComputeRegionBackendServiceSessionAffinity(v interface{}, d *schema. return v } +func flattenComputeRegionBackendServiceStrongSessionAffinityCookie(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ttl"] = + flattenComputeRegionBackendServiceStrongSessionAffinityCookieTtl(original["ttl"], d, config) + transformed["name"] = + flattenComputeRegionBackendServiceStrongSessionAffinityCookieName(original["name"], d, config) + transformed["path"] = + flattenComputeRegionBackendServiceStrongSessionAffinityCookiePath(original["path"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceStrongSessionAffinityCookieTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["seconds"] = + flattenComputeRegionBackendServiceStrongSessionAffinityCookieTtlSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenComputeRegionBackendServiceStrongSessionAffinityCookieTtlNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceStrongSessionAffinityCookieTtlSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceStrongSessionAffinityCookieTtlNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceStrongSessionAffinityCookieName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceStrongSessionAffinityCookiePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeRegionBackendServiceTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { @@ -3396,6 +3554,13 @@ func expandComputeRegionBackendServiceIap(v interface{}, d tpgresource.Terraform original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) + transformedEnabled, err := expandComputeRegionBackendServiceIapEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else { + transformed["enabled"] = transformedEnabled + } + transformedOauth2ClientId, err := expandComputeRegionBackendServiceIapOauth2ClientId(original["oauth2_client_id"], d, config) if err != nil { return nil, err @@ -3420,6 +3585,10 @@ func expandComputeRegionBackendServiceIap(v interface{}, d tpgresource.Terraform return transformed, nil } +func expandComputeRegionBackendServiceIapEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeRegionBackendServiceIapOauth2ClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -3432,6 +3601,10 @@ func expandComputeRegionBackendServiceIapOauth2ClientSecretSha256(v interface{}, return v, nil } +func expandComputeRegionBackendServiceIpAddressSelectionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeRegionBackendServiceLoadBalancingScheme(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -3649,6 +3822,81 @@ func expandComputeRegionBackendServiceSessionAffinity(v interface{}, d tpgresour return v, nil } +func expandComputeRegionBackendServiceStrongSessionAffinityCookie(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTtl, err := expandComputeRegionBackendServiceStrongSessionAffinityCookieTtl(original["ttl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ttl"] = transformedTtl + } + + transformedName, err := expandComputeRegionBackendServiceStrongSessionAffinityCookieName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedPath, err := expandComputeRegionBackendServiceStrongSessionAffinityCookiePath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + return transformed, nil +} + +func expandComputeRegionBackendServiceStrongSessionAffinityCookieTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSeconds, err := expandComputeRegionBackendServiceStrongSessionAffinityCookieTtlSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandComputeRegionBackendServiceStrongSessionAffinityCookieTtlNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandComputeRegionBackendServiceStrongSessionAffinityCookieTtlSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionBackendServiceStrongSessionAffinityCookieTtlNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionBackendServiceStrongSessionAffinityCookieName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionBackendServiceStrongSessionAffinityCookiePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeRegionBackendServiceTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -3704,24 +3952,6 @@ func expandComputeRegionBackendServiceRegion(v interface{}, d tpgresource.Terraf } func resourceComputeRegionBackendServiceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // The RegionBackendService API's Update / PUT API is badly formed and behaves like - // a PATCH field for at least IAP. When sent a `null` `iap` field, the API - // doesn't disable an existing field. To work around this, we need to emulate - // the old Terraform behaviour of always sending the block (at both update and - // create), and force sending each subfield as empty when the block isn't - // present in config. - - iapVal := obj["iap"] - if iapVal == nil { - data := map[string]interface{}{} - data["enabled"] = false - obj["iap"] = data - } else { - iap := iapVal.(map[string]interface{}) - iap["enabled"] = true - obj["iap"] = iap - } - if d.Get("load_balancing_scheme").(string) == "EXTERNAL_MANAGED" || d.Get("load_balancing_scheme").(string) == "INTERNAL_MANAGED" { return obj, nil } @@ -3760,17 +3990,6 @@ func resourceComputeRegionBackendServiceEncoder(d *schema.ResourceData, meta int } func resourceComputeRegionBackendServiceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // We need to pretend IAP isn't there if it's disabled for Terraform to maintain - // BC behaviour with the handwritten resource. - v, ok := res["iap"] - if !ok || v == nil { - delete(res, "iap") - return res, nil - } - m := v.(map[string]interface{}) - if ok && m["enabled"] == false { - delete(res, "iap") - } // Requests with consistentHash will error for specific values of // localityLbPolicy. However, the API will not remove it if the backend diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_commitment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_commitment.go index 31a5730b6eb..62433633aab 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_commitment.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_commitment.go @@ -98,6 +98,13 @@ Note that only MACHINE commitments should have a Type specified. Possible values ForceNew: true, Description: `An optional description of this resource.`, }, + "existing_reservations": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Specifies the already existing reservations to attach to the Commitment.`, + }, "license_resource": { Type: schema.TypeList, Optional: true, @@ -281,6 +288,12 @@ func resourceComputeRegionCommitmentCreate(d *schema.ResourceData, meta interfac } else if v, ok := d.GetOkExists("auto_renew"); !tpgresource.IsEmptyValue(reflect.ValueOf(autoRenewProp)) && (ok || !reflect.DeepEqual(v, autoRenewProp)) { obj["autoRenew"] = autoRenewProp } + existingReservationsProp, err := expandComputeRegionCommitmentExistingReservations(d.Get("existing_reservations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("existing_reservations"); !tpgresource.IsEmptyValue(reflect.ValueOf(existingReservationsProp)) && (ok || !reflect.DeepEqual(v, existingReservationsProp)) { + obj["existingReservations"] = existingReservationsProp + } regionProp, err := expandComputeRegionCommitmentRegion(d.Get("region"), d, config) if err != nil { return err @@ -428,6 +441,9 @@ func resourceComputeRegionCommitmentRead(d *schema.ResourceData, meta interface{ if err := d.Set("auto_renew", flattenComputeRegionCommitmentAutoRenew(res["autoRenew"], d, config)); err != nil { return fmt.Errorf("Error reading RegionCommitment: %s", err) } + if err := d.Set("existing_reservations", flattenComputeRegionCommitmentExistingReservations(res["existingReservations"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } if err := d.Set("region", flattenComputeRegionCommitmentRegion(res["region"], d, config)); err != nil { return fmt.Errorf("Error reading RegionCommitment: %s", err) } @@ -590,6 +606,10 @@ func flattenComputeRegionCommitmentAutoRenew(v interface{}, d *schema.ResourceDa return v } +func flattenComputeRegionCommitmentExistingReservations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeRegionCommitmentRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -714,6 +734,10 @@ func expandComputeRegionCommitmentAutoRenew(v interface{}, d tpgresource.Terrafo return v, nil } +func expandComputeRegionCommitmentExistingReservations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeRegionCommitmentRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) if err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_group_manager.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_group_manager.go index 65b96dd3362..461a0779353 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_group_manager.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_group_manager.go @@ -312,14 +312,14 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { Optional: true, Computed: true, ConflictsWith: []string{"update_policy.0.max_surge_percent"}, - Description: `The maximum number of instances that can be created above the specified targetSize during the update process. Conflicts with max_surge_percent. It has to be either 0 or at least equal to the number of zones. If fixed values are used, at least one of max_unavailable_fixed or max_surge_fixed must be greater than 0.`, + Description: `Specifies a fixed number of VM instances. This must be a positive integer. Conflicts with max_surge_percent. Both cannot be 0`, }, "max_surge_percent": { Type: schema.TypeInt, Optional: true, ConflictsWith: []string{"update_policy.0.max_surge_fixed"}, - Description: `The maximum number of instances(calculated as percentage) that can be created above the specified targetSize during the update process. Conflicts with max_surge_fixed. Percent value is only allowed for regional managed instance groups with size at least 10.`, + Description: `Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. Conflicts with max_surge_fixed.`, ValidateFunc: validation.IntBetween(0, 100), }, @@ -327,7 +327,7 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { Type: schema.TypeInt, Optional: true, Computed: true, - Description: `The maximum number of instances that can be unavailable during the update process. Conflicts with max_unavailable_percent. It has to be either 0 or at least equal to the number of zones. If fixed values are used, at least one of max_unavailable_fixed or max_surge_fixed must be greater than 0.`, + Description: `Specifies a fixed number of VM instances. This must be a positive integer.`, ConflictsWith: []string{"update_policy.0.max_unavailable_percent"}, }, @@ -336,7 +336,7 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { Optional: true, ConflictsWith: []string{"update_policy.0.max_unavailable_fixed"}, ValidateFunc: validation.IntBetween(0, 100), - Description: `The maximum number of instances(calculated as percentage) that can be unavailable during the update process. Conflicts with max_unavailable_fixed. Percent value is only allowed for regional managed instance groups with size at least 10.`, + Description: `Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.`, }, "instance_redistribution_type": { @@ -555,7 +555,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met UpdatePolicy: expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})), InstanceLifecyclePolicy: expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})), AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})), - DistributionPolicy: expandDistributionPolicy(d), + DistributionPolicy: expandDistributionPolicyForCreate(d), StatefulPolicy: expandStatefulPolicy(d), // Force send TargetSize to allow size of 0. ForceSendFields: []string{"TargetSize"}, @@ -815,7 +815,7 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met } if d.HasChange("distribution_policy_target_shape") { - updatedManager.DistributionPolicy = expandDistributionPolicy(d) + updatedManager.DistributionPolicy = expandDistributionPolicyForUpdate(d) change = true } @@ -1027,24 +1027,39 @@ func flattenRegionUpdatePolicy(updatePolicy *compute.InstanceGroupManagerUpdateP return results } -func expandDistributionPolicy(d *schema.ResourceData) *compute.DistributionPolicy { +func expandDistributionPolicyForUpdate(d *schema.ResourceData) *compute.DistributionPolicy { + dpts := d.Get("distribution_policy_target_shape").(string) + if dpts == "" { + return nil + } + // distributionPolicy.Zones is NOT updateable. + return &compute.DistributionPolicy{TargetShape: dpts} +} + +func expandDistributionPolicyForCreate(d *schema.ResourceData) *compute.DistributionPolicy { dpz := d.Get("distribution_policy_zones").(*schema.Set) dpts := d.Get("distribution_policy_target_shape").(string) if dpz.Len() == 0 && dpts == "" { return nil } + distributionPolicy := &compute.DistributionPolicy{} - distributionPolicyZoneConfigs := make([]*compute.DistributionPolicyZoneConfiguration, 0, dpz.Len()) - for _, raw := range dpz.List() { - data := raw.(string) - distributionPolicyZoneConfig := compute.DistributionPolicyZoneConfiguration{ - Zone: "zones/" + data, - } + if dpz.Len() > 0 { + distributionPolicyZoneConfigs := make([]*compute.DistributionPolicyZoneConfiguration, 0, dpz.Len()) + for _, raw := range dpz.List() { + data := raw.(string) + distributionPolicyZoneConfig := compute.DistributionPolicyZoneConfiguration{ + Zone: "zones/" + data, + } - distributionPolicyZoneConfigs = append(distributionPolicyZoneConfigs, &distributionPolicyZoneConfig) + distributionPolicyZoneConfigs = append(distributionPolicyZoneConfigs, &distributionPolicyZoneConfig) + } + distributionPolicy.Zones = distributionPolicyZoneConfigs } - - return &compute.DistributionPolicy{Zones: distributionPolicyZoneConfigs, TargetShape: dpts} + if dpts != "" { + distributionPolicy.TargetShape = dpts + } + return distributionPolicy } func flattenDistributionPolicy(distributionPolicy *compute.DistributionPolicy) []string { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_template.go index 4f30f160949..4b3c9ddaa8e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_template.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_template.go @@ -73,11 +73,11 @@ func ResourceComputeRegionInstanceTemplate() *schema.Resource { Description: `Creates a unique name beginning with the specified prefix. Conflicts with name.`, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource - // uuid is 26 characters, limit the prefix to 37. + // uuid is 9 characters, limit the prefix to 54. value := v.(string) - if len(value) > 37 { + if len(value) > 54 { errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) + "%q cannot be longer than 54 characters, name is limited to 63", k)) } return }, @@ -152,7 +152,15 @@ func ResourceComputeRegionInstanceTemplate() *schema.Resource { Optional: true, ForceNew: true, Computed: true, - Description: `Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk).`, + Description: `Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk) or the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks) depending on the selected disk_type.`, + }, + + "provisioned_throughput": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `Indicates how much throughput to provision for the disk, in MB/s. This sets the amount of data that can be read or written from the disk per second. Values must greater than or equal to 1. For more details, see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks).`, }, "resource_manager_tags": { @@ -737,6 +745,13 @@ be from 0 to 999,999,999 inclusive.`, Description: `The URI of the created resource.`, }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The time at which the instance was created in RFC 3339 format.`, + }, + "service_account": { Type: schema.TypeList, MaxItems: 1, @@ -832,9 +847,10 @@ be from 0 to 999,999,999 inclusive.`, Optional: true, ForceNew: true, Description: ` - Specifies which confidential computing technology to use. - This could be one of the following values: SEV, SEV_SNP. - If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, + The confidential computing technology the instance uses. + SEV is an AMD feature. TDX is an Intel feature. One of the following + values is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform = + "AMD Milan" is currently required.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, }, @@ -862,6 +878,12 @@ be from 0 to 999,999,999 inclusive.`, ForceNew: true, Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`, }, + "turbo_mode": { + Type: schema.TypeString, + Optional: true, + Description: `Turbo frequency mode to use for the instance. Currently supported modes is "ALL_CORE_MAX".`, + ValidateFunc: validation.StringInSlice([]string{"ALL_CORE_MAX"}, false), + }, "visible_core_count": { Type: schema.TypeInt, Optional: true, @@ -924,7 +946,7 @@ be from 0 to 999,999,999 inclusive.`, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, Description: `A set of key/value label pairs to assign to instances created from this template, - + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.`, }, @@ -1002,6 +1024,14 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, + + "key_revocation_action_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"NONE", "STOP", ""}, false), + Description: `Action to be taken when a customer's encryption key is revoked. Supports "STOP" and "NONE", with "NONE" being the default.`, + }, }, UseJSONNumber: true, } @@ -1071,6 +1101,7 @@ func resourceComputeRegionInstanceTemplateCreate(d *schema.ResourceData, meta in AdvancedMachineFeatures: expandAdvancedMachineFeatures(d), ResourcePolicies: resourcePolicies, ReservationAffinity: reservationAffinity, + KeyRevocationActionType: d.Get("key_revocation_action_type").(string), } if _, ok := d.GetOk("effective_labels"); ok { @@ -1085,7 +1116,12 @@ func resourceComputeRegionInstanceTemplateCreate(d *schema.ResourceData, meta in if v, ok := d.GetOk("name"); ok { itName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - itName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + itName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + itName = id.PrefixedUniqueId(prefix) + } } else { itName = id.UniqueId() } @@ -1222,6 +1258,9 @@ func resourceComputeRegionInstanceTemplateRead(d *schema.ResourceData, meta inte if err = d.Set("self_link", instanceTemplate["selfLink"]); err != nil { return fmt.Errorf("Error setting self_link: %s", err) } + if err := d.Set("creation_timestamp", instanceTemplate["creationTimestamp"]); err != nil { + return fmt.Errorf("Error setting creation_timestamp: %s", err) + } if err = d.Set("name", instanceTemplate["name"]); err != nil { return fmt.Errorf("Error setting name: %s", err) } @@ -1251,6 +1290,9 @@ func resourceComputeRegionInstanceTemplateRead(d *schema.ResourceData, meta inte if err = d.Set("instance_description", instanceProperties.Description); err != nil { return fmt.Errorf("Error setting instance_description: %s", err) } + if err = d.Set("key_revocation_action_type", instanceProperties.KeyRevocationActionType); err != nil { + return fmt.Errorf("Error setting key_revocation_action_type: %s", err) + } if err = d.Set("project", project); err != nil { return fmt.Errorf("Error setting project: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_association.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_association.go index b5b728befb1..1b9b675007b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_association.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_association.go @@ -3,34 +3,31 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: DCL *** +// *** AUTO GENERATED CODE *** Type: MMv1 *** // // ---------------------------------------------------------------------------- // -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. // -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- package compute import ( - "context" "fmt" "log" + "net/http" + "reflect" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" - - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -49,9 +46,9 @@ func ResourceComputeRegionNetworkFirewallPolicyAssociation() *schema.Resource { Create: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), }, + CustomizeDiff: customdiff.All( tpgresource.DefaultProviderProject, - tpgresource.DefaultProviderRegion, ), Schema: map[string]*schema.Schema{ @@ -60,164 +57,171 @@ func ResourceComputeRegionNetworkFirewallPolicyAssociation() *schema.Resource { Required: true, ForceNew: true, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The target that the firewall policy is attached to.", + Description: `The target that the firewall policy is attached to.`, }, - "firewall_policy": { Type: schema.TypeString, Required: true, ForceNew: true, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The firewall policy ID of the association.", + Description: `The firewall policy of the resource.`, }, - "name": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The name for an association.", + Description: `The name for an association.`, }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The project for the resource", - }, - "region": { Type: schema.TypeString, Computed: true, Optional: true, ForceNew: true, - Description: "The location of this resource.", + Description: `The location of this resource.`, }, - "short_name": { Type: schema.TypeString, Computed: true, - Description: "The short name of the firewall policy of the association.", + Description: `The short name of the firewall policy of the association.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, }, }, + UseJSONNumber: true, } } func resourceComputeRegionNetworkFirewallPolicyAssociationCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - region, err := tpgresource.GetRegion(d, config) + + obj := make(map[string]interface{}) + nameProp, err := expandComputeRegionNetworkFirewallPolicyAssociationName(d.Get("name"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp } - - obj := &compute.NetworkFirewallPolicyAssociation{ - AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Name: dcl.String(d.Get("name").(string)), - Project: dcl.String(project), - Location: dcl.String(region), + attachmentTargetProp, err := expandComputeRegionNetworkFirewallPolicyAssociationAttachmentTarget(d.Get("attachment_target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attachment_target"); !tpgresource.IsEmptyValue(reflect.ValueOf(attachmentTargetProp)) && (ok || !reflect.DeepEqual(v, attachmentTargetProp)) { + obj["attachmentTarget"] = attachmentTargetProp } - id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/addAssociation") if err != nil { - return fmt.Errorf("error constructing id: %s", err) + return err } - d.SetId(id) - directive := tpgdclresource.CreateDirective - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + + log.Printf("[DEBUG] Creating new RegionNetworkFirewallPolicyAssociation: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) if err != nil { - return err + return fmt.Errorf("Error fetching project for RegionNetworkFirewallPolicyAssociation: %s", err) } - billingProject := project + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating RegionNetworkFirewallPolicyAssociation: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) } - res, err := client.ApplyNetworkFirewallPolicyAssociation(context.Background(), obj, directive...) + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Creating RegionNetworkFirewallPolicyAssociation", userAgent, + d.Timeout(schema.TimeoutCreate)) - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { + if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error creating NetworkFirewallPolicyAssociation: %s", err) + return fmt.Errorf("Error waiting to create RegionNetworkFirewallPolicyAssociation: %s", err) } - log.Printf("[DEBUG] Finished creating NetworkFirewallPolicyAssociation %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished creating RegionNetworkFirewallPolicyAssociation %q: %#v", d.Id(), res) return resourceComputeRegionNetworkFirewallPolicyAssociationRead(d, meta) } func resourceComputeRegionNetworkFirewallPolicyAssociationRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - region, err := tpgresource.GetRegion(d, config) + + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/getAssociation?name={{name}}") if err != nil { return err } - obj := &compute.NetworkFirewallPolicyAssociation{ - AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Name: dcl.String(d.Get("name").(string)), - Project: dcl.String(project), - Location: dcl.String(region), - } + billingProject := "" - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + project, err := tpgresource.GetProject(d, config) if err != nil { - return err + return fmt.Errorf("Error fetching project for RegionNetworkFirewallPolicyAssociation: %s", err) } - billingProject := project + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetNetworkFirewallPolicyAssociation(context.Background(), obj) + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) if err != nil { - resourceName := fmt.Sprintf("ComputeRegionNetworkFirewallPolicyAssociation %q", d.Id()) - return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionNetworkFirewallPolicyAssociation %q", d.Id())) } - if err = d.Set("attachment_target", res.AttachmentTarget); err != nil { - return fmt.Errorf("error setting attachment_target in state: %s", err) - } - if err = d.Set("firewall_policy", res.FirewallPolicy); err != nil { - return fmt.Errorf("error setting firewall_policy in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return fmt.Errorf("error setting name in state: %s", err) + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyAssociation: %s", err) } - if err = d.Set("project", res.Project); err != nil { - return fmt.Errorf("error setting project in state: %s", err) + + if err := d.Set("name", flattenComputeRegionNetworkFirewallPolicyAssociationName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyAssociation: %s", err) } - if err = d.Set("region", res.Location); err != nil { - return fmt.Errorf("error setting region in state: %s", err) + if err := d.Set("attachment_target", flattenComputeRegionNetworkFirewallPolicyAssociationAttachmentTarget(res["attachmentTarget"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyAssociation: %s", err) } - if err = d.Set("short_name", res.ShortName); err != nil { - return fmt.Errorf("error setting short_name in state: %s", err) + if err := d.Set("short_name", flattenComputeRegionNetworkFirewallPolicyAssociationShortName(res["shortName"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyAssociation: %s", err) } return nil @@ -225,54 +229,68 @@ func resourceComputeRegionNetworkFirewallPolicyAssociationRead(d *schema.Resourc func resourceComputeRegionNetworkFirewallPolicyAssociationDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - region, err := tpgresource.GetRegion(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &compute.NetworkFirewallPolicyAssociation{ - AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Name: dcl.String(d.Get("name").(string)), - Project: dcl.String(project), - Location: dcl.String(region), + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionNetworkFirewallPolicyAssociation: %s", err) } + billingProject = strings.TrimPrefix(project, "projects/") - log.Printf("[DEBUG] Deleting NetworkFirewallPolicyAssociation %q", d.Id()) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/removeAssociation?name={{name}}") if err != nil { return err } - billingProject := project + + var obj map[string]interface{} + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting RegionNetworkFirewallPolicyAssociation %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionNetworkFirewallPolicyAssociation") } - if err := client.DeleteNetworkFirewallPolicyAssociation(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting NetworkFirewallPolicyAssociation: %s", err) + + err = ComputeOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Deleting RegionNetworkFirewallPolicyAssociation", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err } - log.Printf("[DEBUG] Finished deleting NetworkFirewallPolicyAssociation %q", d.Id()) + log.Printf("[DEBUG] Finished deleting RegionNetworkFirewallPolicyAssociation %q: %#v", d.Id(), res) return nil } func resourceComputeRegionNetworkFirewallPolicyAssociationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/firewallPolicies/(?P[^/]+)/associations/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "^projects/(?P[^/]+)/regions/(?P[^/]+)/firewallPolicies/(?P[^/]+)/associations/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", }, d, config); err != nil { return nil, err } @@ -286,3 +304,23 @@ func resourceComputeRegionNetworkFirewallPolicyAssociationImport(d *schema.Resou return []*schema.ResourceData{d}, nil } + +func flattenComputeRegionNetworkFirewallPolicyAssociationName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkFirewallPolicyAssociationAttachmentTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkFirewallPolicyAssociationShortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeRegionNetworkFirewallPolicyAssociationName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyAssociationAttachmentTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_association_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_association_sweeper.go new file mode 100644 index 00000000000..b8bddce5684 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_association_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionNetworkFirewallPolicyAssociation", testSweepComputeRegionNetworkFirewallPolicyAssociation) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRegionNetworkFirewallPolicyAssociation(region string) error { + resourceName := "ComputeRegionNetworkFirewallPolicyAssociation" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["regionNetworkFirewallPolicyAssociations"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/removeAssociation?name={{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_rule.go index 5d2c5537896..fac2795349c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_rule.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_rule.go @@ -3,36 +3,34 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: DCL *** +// *** AUTO GENERATED CODE *** Type: MMv1 *** // // ---------------------------------------------------------------------------- // -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. // -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- package compute import ( - "context" "fmt" "log" + "net/http" + "reflect" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" - - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceComputeRegionNetworkFirewallPolicyRule() *schema.Resource { @@ -51,570 +49,697 @@ func ResourceComputeRegionNetworkFirewallPolicyRule() *schema.Resource { Update: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), }, + CustomizeDiff: customdiff.All( - tpgresource.DefaultProviderProject, tpgresource.DefaultProviderRegion, + tpgresource.DefaultProviderProject, ), Schema: map[string]*schema.Schema{ "action": { Type: schema.TypeString, Required: true, - Description: "The Action to perform when the client connection triggers the rule. Valid actions are \"allow\", \"deny\", \"goto_next\" and \"apply_security_profile_group\".", + Description: `The Action to perform when the client connection triggers the rule. Valid actions are "allow", "deny", "goto_next" and "apply_security_profile_group".`, }, - "direction": { - Type: schema.TypeString, - Required: true, - Description: "The direction in which this rule applies. Possible values: INGRESS, EGRESS", + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"INGRESS", "EGRESS"}), + Description: `The direction in which this rule applies. Possible values: ["INGRESS", "EGRESS"]`, }, - "firewall_policy": { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The firewall policy of the resource.", + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The firewall policy of the resource.`, }, - "match": { Type: schema.TypeList, Required: true, - Description: "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced.", + Description: `A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced.`, MaxItems: 1, - Elem: ComputeRegionNetworkFirewallPolicyRuleMatchSchema(), + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "layer4_configs": { + Type: schema.TypeList, + Required: true, + Description: `Pairs of IP protocols and ports that the rule should match.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_protocol": { + Type: schema.TypeString, + Required: true, + Description: `The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. +This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.`, + }, + "ports": { + Type: schema.TypeList, + Optional: true, + Description: `An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. +Example inputs include: ["22"], ["80","443"], and ["12345-12349"].`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "dest_address_groups": { + Type: schema.TypeList, + Optional: true, + Description: `Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dest_fqdns": { + Type: schema.TypeList, + Optional: true, + Description: `Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dest_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dest_region_codes": { + Type: schema.TypeList, + Optional: true, + Description: `Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dest_threat_intelligences": { + Type: schema.TypeList, + Optional: true, + Description: `Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "src_address_groups": { + Type: schema.TypeList, + Optional: true, + Description: `Address groups which should be matched against the traffic source. Maximum number of source address groups is 10.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "src_fqdns": { + Type: schema.TypeList, + Optional: true, + Description: `Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "src_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "src_region_codes": { + Type: schema.TypeList, + Optional: true, + Description: `Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "src_secure_tags": { + Type: schema.TypeList, + Optional: true, + Description: `List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the secure tag, created with TagManager's TagValue API.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted.`, + }, + }, + }, + }, + "src_threat_intelligences": { + Type: schema.TypeList, + Optional: true, + Description: `Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, }, - "priority": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `An integer indicating the priority of a rule in the list. +The priority must be a positive value between 0 and 2147483647. +Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.`, }, - "description": { Type: schema.TypeString, Optional: true, - Description: "An optional description for this resource.", + Description: `An optional description for this resource.`, }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: "Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled.", + Type: schema.TypeBool, + Optional: true, + Description: `Denotes whether the firewall policy rule is disabled. +When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. +If this is unspecified, the firewall policy rule will be enabled.`, }, - "enable_logging": { - Type: schema.TypeBool, - Optional: true, - Description: "Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on \"goto_next\" rules.", - }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The project for the resource", + Type: schema.TypeBool, + Optional: true, + Description: `Denotes whether to enable logging for a particular rule. +If logging is enabled, logs will be exported to the configured export destination in Stackdriver. +Logs may be exported to BigQuery or Pub/Sub. +Note: you cannot enable logging on "goto_next" rules.`, }, - "region": { Type: schema.TypeString, Computed: true, Optional: true, ForceNew: true, - Description: "The location of this resource.", + Description: `The location of this resource.`, }, - "rule_name": { Type: schema.TypeString, Optional: true, - Description: "An optional name for the rule. This field is not a unique identifier and can be updated.", + Description: `An optional name for the rule. This field is not a unique identifier and can be updated.`, }, - "security_profile_group": { - Type: schema.TypeString, - Optional: true, - Description: "A fully-qualified URL of a SecurityProfileGroup resource. Example: https://networksecurity.googleapis.com/v1/organizations/{organizationId}/locations/global/securityProfileGroups/my-security-profile-group. It must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.", - }, + Type: schema.TypeString, + Optional: true, + Description: `A fully-qualified URL of a SecurityProfile resource instance. +Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group +Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions. +Security Profile Group and Firewall Policy Rule must be in the same scope.`, + }, "target_secure_tags": { - Type: schema.TypeList, - Optional: true, - Description: "A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256.", - Elem: ComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsSchema(), + Type: schema.TypeList, + Optional: true, + Description: `A list of secure tags that controls which instances the firewall rule applies to. +If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the targetSecureTag are in INEFFECTIVE state, then this rule will be ignored. +targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the secure tag, created with TagManager's TagValue API.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted.`, + }, + }, + }, }, - "target_service_accounts": { Type: schema.TypeList, Optional: true, - Description: "A list of service accounts indicating the sets of instances that are applied with this rule.", - Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A list of service accounts indicating the sets of instances that are applied with this rule.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, - "tls_inspect": { - Type: schema.TypeBool, - Optional: true, - Description: "Boolean flag indicating if the traffic should be TLS decrypted. It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions.", + Type: schema.TypeBool, + Optional: true, + Description: `Boolean flag indicating if the traffic should be TLS decrypted. +Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, }, - "kind": { Type: schema.TypeString, Computed: true, - Description: "Type of the resource. Always `compute#firewallPolicyRule` for firewall policy rules", + Description: `Type of the resource. Always 'compute#firewallPolicyRule' for firewall policy rules`, }, - "rule_tuple_count": { Type: schema.TypeInt, Computed: true, - Description: "Calculation of the complexity of a single firewall policy rule.", - }, - }, - } -} - -func ComputeRegionNetworkFirewallPolicyRuleMatchSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "layer4_configs": { - Type: schema.TypeList, - Required: true, - Description: "Pairs of IP protocols and ports that the rule should match.", - Elem: ComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsSchema(), - }, - - "dest_address_groups": { - Type: schema.TypeList, - Optional: true, - Description: "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "dest_fqdns": { - Type: schema.TypeList, - Optional: true, - Description: "Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "dest_ip_ranges": { - Type: schema.TypeList, - Optional: true, - Description: "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "dest_region_codes": { - Type: schema.TypeList, - Optional: true, - Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "dest_threat_intelligences": { - Type: schema.TypeList, - Optional: true, - Description: "Name of the Google Cloud Threat Intelligence list.", - Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Calculation of the complexity of a single firewall policy rule.`, }, - - "src_address_groups": { - Type: schema.TypeList, - Optional: true, - Description: "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "src_fqdns": { - Type: schema.TypeList, - Optional: true, - Description: "Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "src_ip_ranges": { - Type: schema.TypeList, - Optional: true, - Description: "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "src_region_codes": { - Type: schema.TypeList, - Optional: true, - Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "src_secure_tags": { - Type: schema.TypeList, - Optional: true, - Description: "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", - Elem: ComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsSchema(), - }, - - "src_threat_intelligences": { - Type: schema.TypeList, - Optional: true, - Description: "Name of the Google Cloud Threat Intelligence list.", - Elem: &schema.Schema{Type: schema.TypeString}, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, }, }, + UseJSONNumber: true, } } -func ComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_protocol": { - Type: schema.TypeString, - Required: true, - Description: "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (`tcp`, `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol number.", - }, - - "ports": { - Type: schema.TypeList, - Optional: true, - Description: "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, +func resourceComputeRegionNetworkFirewallPolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err } -} - -func ComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: "[Output Only] State of the secure tag, either `EFFECTIVE` or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted or its network is deleted.", - }, - }, + obj := make(map[string]interface{}) + ruleNameProp, err := expandComputeRegionNetworkFirewallPolicyRuleRuleName(d.Get("rule_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rule_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(ruleNameProp)) && (ok || !reflect.DeepEqual(v, ruleNameProp)) { + obj["ruleName"] = ruleNameProp } -} - -func ComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", - }, - - "state": { - Type: schema.TypeString, - Computed: true, - Description: "[Output Only] State of the secure tag, either `EFFECTIVE` or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted or its network is deleted.", - }, - }, + descriptionProp, err := expandComputeRegionNetworkFirewallPolicyRuleDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp } -} - -func resourceComputeRegionNetworkFirewallPolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + priorityProp, err := expandComputeRegionNetworkFirewallPolicyRulePriority(d.Get("priority"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(priorityProp)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp } - region, err := tpgresource.GetRegion(d, config) + matchProp, err := expandComputeRegionNetworkFirewallPolicyRuleMatch(d.Get("match"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("match"); !tpgresource.IsEmptyValue(reflect.ValueOf(matchProp)) && (ok || !reflect.DeepEqual(v, matchProp)) { + obj["match"] = matchProp } - - obj := &compute.NetworkFirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.NetworkFirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeRegionNetworkFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - Project: dcl.String(project), - Location: dcl.String(region), - RuleName: dcl.String(d.Get("rule_name").(string)), - SecurityProfileGroup: dcl.String(d.Get("security_profile_group").(string)), - TargetSecureTags: expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), - TlsInspect: dcl.Bool(d.Get("tls_inspect").(bool)), + actionProp, err := expandComputeRegionNetworkFirewallPolicyRuleAction(d.Get("action"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("action"); !tpgresource.IsEmptyValue(reflect.ValueOf(actionProp)) && (ok || !reflect.DeepEqual(v, actionProp)) { + obj["action"] = actionProp } - - id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/{{priority}}") + securityProfileGroupProp, err := expandComputeRegionNetworkFirewallPolicyRuleSecurityProfileGroup(d.Get("security_profile_group"), d, config) if err != nil { - return fmt.Errorf("error constructing id: %s", err) + return err + } else if v, ok := d.GetOkExists("security_profile_group"); !tpgresource.IsEmptyValue(reflect.ValueOf(securityProfileGroupProp)) && (ok || !reflect.DeepEqual(v, securityProfileGroupProp)) { + obj["securityProfileGroup"] = securityProfileGroupProp } - d.SetId(id) - directive := tpgdclresource.CreateDirective - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + tlsInspectProp, err := expandComputeRegionNetworkFirewallPolicyRuleTlsInspect(d.Get("tls_inspect"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tls_inspect"); !tpgresource.IsEmptyValue(reflect.ValueOf(tlsInspectProp)) && (ok || !reflect.DeepEqual(v, tlsInspectProp)) { + obj["tlsInspect"] = tlsInspectProp + } + directionProp, err := expandComputeRegionNetworkFirewallPolicyRuleDirection(d.Get("direction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("direction"); !tpgresource.IsEmptyValue(reflect.ValueOf(directionProp)) && (ok || !reflect.DeepEqual(v, directionProp)) { + obj["direction"] = directionProp + } + enableLoggingProp, err := expandComputeRegionNetworkFirewallPolicyRuleEnableLogging(d.Get("enable_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_logging"); ok || !reflect.DeepEqual(v, enableLoggingProp) { + obj["enableLogging"] = enableLoggingProp + } + targetServiceAccountsProp, err := expandComputeRegionNetworkFirewallPolicyRuleTargetServiceAccounts(d.Get("target_service_accounts"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_service_accounts"); ok || !reflect.DeepEqual(v, targetServiceAccountsProp) { + obj["targetServiceAccounts"] = targetServiceAccountsProp + } + targetSecureTagsProp, err := expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTags(d.Get("target_secure_tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_secure_tags"); ok || !reflect.DeepEqual(v, targetSecureTagsProp) { + obj["targetSecureTags"] = targetSecureTagsProp + } + disabledProp, err := expandComputeRegionNetworkFirewallPolicyRuleDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/addRule") if err != nil { return err } - billingProject := project + + log.Printf("[DEBUG] Creating new RegionNetworkFirewallPolicyRule: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionNetworkFirewallPolicyRule: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating RegionNetworkFirewallPolicyRule: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/{{priority}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) } - res, err := client.ApplyNetworkFirewallPolicyRule(context.Background(), obj, directive...) + d.SetId(id) - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { + err = ComputeOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Creating RegionNetworkFirewallPolicyRule", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error creating NetworkFirewallPolicyRule: %s", err) + return fmt.Errorf("Error waiting to create RegionNetworkFirewallPolicyRule: %s", err) } - log.Printf("[DEBUG] Finished creating NetworkFirewallPolicyRule %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished creating RegionNetworkFirewallPolicyRule %q: %#v", d.Id(), res) return resourceComputeRegionNetworkFirewallPolicyRuleRead(d, meta) } func resourceComputeRegionNetworkFirewallPolicyRuleRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - region, err := tpgresource.GetRegion(d, config) + + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/getRule?priority={{priority}}") if err != nil { return err } - obj := &compute.NetworkFirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.NetworkFirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeRegionNetworkFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - Project: dcl.String(project), - Location: dcl.String(region), - RuleName: dcl.String(d.Get("rule_name").(string)), - SecurityProfileGroup: dcl.String(d.Get("security_profile_group").(string)), - TargetSecureTags: expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), - TlsInspect: dcl.Bool(d.Get("tls_inspect").(bool)), - } + billingProject := "" - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + project, err := tpgresource.GetProject(d, config) if err != nil { - return err + return fmt.Errorf("Error fetching project for RegionNetworkFirewallPolicyRule: %s", err) } - billingProject := project + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetNetworkFirewallPolicyRule(context.Background(), obj) + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) if err != nil { - resourceName := fmt.Sprintf("ComputeRegionNetworkFirewallPolicyRule %q", d.Id()) - return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionNetworkFirewallPolicyRule %q", d.Id())) } - if err = d.Set("action", res.Action); err != nil { - return fmt.Errorf("error setting action in state: %s", err) - } - if err = d.Set("direction", res.Direction); err != nil { - return fmt.Errorf("error setting direction in state: %s", err) + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("firewall_policy", res.FirewallPolicy); err != nil { - return fmt.Errorf("error setting firewall_policy in state: %s", err) + + if err := d.Set("creation_timestamp", flattenComputeRegionNetworkFirewallPolicyRuleCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("match", flattenComputeRegionNetworkFirewallPolicyRuleMatch(res.Match)); err != nil { - return fmt.Errorf("error setting match in state: %s", err) + if err := d.Set("kind", flattenComputeRegionNetworkFirewallPolicyRuleKind(res["kind"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("priority", res.Priority); err != nil { - return fmt.Errorf("error setting priority in state: %s", err) + if err := d.Set("rule_name", flattenComputeRegionNetworkFirewallPolicyRuleRuleName(res["ruleName"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("description", res.Description); err != nil { - return fmt.Errorf("error setting description in state: %s", err) + if err := d.Set("description", flattenComputeRegionNetworkFirewallPolicyRuleDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("disabled", res.Disabled); err != nil { - return fmt.Errorf("error setting disabled in state: %s", err) + if err := d.Set("priority", flattenComputeRegionNetworkFirewallPolicyRulePriority(res["priority"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("enable_logging", res.EnableLogging); err != nil { - return fmt.Errorf("error setting enable_logging in state: %s", err) + if err := d.Set("match", flattenComputeRegionNetworkFirewallPolicyRuleMatch(res["match"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("project", res.Project); err != nil { - return fmt.Errorf("error setting project in state: %s", err) + if err := d.Set("action", flattenComputeRegionNetworkFirewallPolicyRuleAction(res["action"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("region", res.Location); err != nil { - return fmt.Errorf("error setting region in state: %s", err) + if err := d.Set("security_profile_group", flattenComputeRegionNetworkFirewallPolicyRuleSecurityProfileGroup(res["securityProfileGroup"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("rule_name", res.RuleName); err != nil { - return fmt.Errorf("error setting rule_name in state: %s", err) + if err := d.Set("tls_inspect", flattenComputeRegionNetworkFirewallPolicyRuleTlsInspect(res["tlsInspect"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("security_profile_group", res.SecurityProfileGroup); err != nil { - return fmt.Errorf("error setting security_profile_group in state: %s", err) + if err := d.Set("direction", flattenComputeRegionNetworkFirewallPolicyRuleDirection(res["direction"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("target_secure_tags", flattenComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsArray(res.TargetSecureTags)); err != nil { - return fmt.Errorf("error setting target_secure_tags in state: %s", err) + if err := d.Set("enable_logging", flattenComputeRegionNetworkFirewallPolicyRuleEnableLogging(res["enableLogging"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("target_service_accounts", res.TargetServiceAccounts); err != nil { - return fmt.Errorf("error setting target_service_accounts in state: %s", err) + if err := d.Set("rule_tuple_count", flattenComputeRegionNetworkFirewallPolicyRuleRuleTupleCount(res["ruleTupleCount"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("tls_inspect", res.TlsInspect); err != nil { - return fmt.Errorf("error setting tls_inspect in state: %s", err) + if err := d.Set("target_service_accounts", flattenComputeRegionNetworkFirewallPolicyRuleTargetServiceAccounts(res["targetServiceAccounts"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("kind", res.Kind); err != nil { - return fmt.Errorf("error setting kind in state: %s", err) + if err := d.Set("target_secure_tags", flattenComputeRegionNetworkFirewallPolicyRuleTargetSecureTags(res["targetSecureTags"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } - if err = d.Set("rule_tuple_count", res.RuleTupleCount); err != nil { - return fmt.Errorf("error setting rule_tuple_count in state: %s", err) + if err := d.Set("disabled", flattenComputeRegionNetworkFirewallPolicyRuleDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkFirewallPolicyRule: %s", err) } return nil } + func resourceComputeRegionNetworkFirewallPolicyRuleUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionNetworkFirewallPolicyRule: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + obj := make(map[string]interface{}) + ruleNameProp, err := expandComputeRegionNetworkFirewallPolicyRuleRuleName(d.Get("rule_name"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("rule_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ruleNameProp)) { + obj["ruleName"] = ruleNameProp } - region, err := tpgresource.GetRegion(d, config) + descriptionProp, err := expandComputeRegionNetworkFirewallPolicyRuleDescription(d.Get("description"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp } - - obj := &compute.NetworkFirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.NetworkFirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeRegionNetworkFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - Project: dcl.String(project), - Location: dcl.String(region), - RuleName: dcl.String(d.Get("rule_name").(string)), - SecurityProfileGroup: dcl.String(d.Get("security_profile_group").(string)), - TargetSecureTags: expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), - TlsInspect: dcl.Bool(d.Get("tls_inspect").(bool)), + priorityProp, err := expandComputeRegionNetworkFirewallPolicyRulePriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp } - directive := tpgdclresource.UpdateDirective - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + matchProp, err := expandComputeRegionNetworkFirewallPolicyRuleMatch(d.Get("match"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("match"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, matchProp)) { + obj["match"] = matchProp + } + actionProp, err := expandComputeRegionNetworkFirewallPolicyRuleAction(d.Get("action"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("action"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, actionProp)) { + obj["action"] = actionProp + } + securityProfileGroupProp, err := expandComputeRegionNetworkFirewallPolicyRuleSecurityProfileGroup(d.Get("security_profile_group"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("security_profile_group"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securityProfileGroupProp)) { + obj["securityProfileGroup"] = securityProfileGroupProp + } + tlsInspectProp, err := expandComputeRegionNetworkFirewallPolicyRuleTlsInspect(d.Get("tls_inspect"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tls_inspect"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tlsInspectProp)) { + obj["tlsInspect"] = tlsInspectProp + } + directionProp, err := expandComputeRegionNetworkFirewallPolicyRuleDirection(d.Get("direction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("direction"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, directionProp)) { + obj["direction"] = directionProp + } + enableLoggingProp, err := expandComputeRegionNetworkFirewallPolicyRuleEnableLogging(d.Get("enable_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_logging"); ok || !reflect.DeepEqual(v, enableLoggingProp) { + obj["enableLogging"] = enableLoggingProp + } + targetServiceAccountsProp, err := expandComputeRegionNetworkFirewallPolicyRuleTargetServiceAccounts(d.Get("target_service_accounts"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_service_accounts"); ok || !reflect.DeepEqual(v, targetServiceAccountsProp) { + obj["targetServiceAccounts"] = targetServiceAccountsProp + } + targetSecureTagsProp, err := expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTags(d.Get("target_secure_tags"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("target_secure_tags"); ok || !reflect.DeepEqual(v, targetSecureTagsProp) { + obj["targetSecureTags"] = targetSecureTagsProp + } + disabledProp, err := expandComputeRegionNetworkFirewallPolicyRuleDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp } - billingProject := "" + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/patchRule?priority={{priority}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating RegionNetworkFirewallPolicyRule %q: %#v", d.Id(), obj) + headers := make(http.Header) + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating RegionNetworkFirewallPolicyRule %q: %s", d.Id(), err) } else { - client.Config.BasePath = bp + log.Printf("[DEBUG] Finished updating RegionNetworkFirewallPolicyRule %q: %#v", d.Id(), res) } - res, err := client.ApplyNetworkFirewallPolicyRule(context.Background(), obj, directive...) - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error updating NetworkFirewallPolicyRule: %s", err) - } + err = ComputeOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Updating RegionNetworkFirewallPolicyRule", userAgent, + d.Timeout(schema.TimeoutUpdate)) - log.Printf("[DEBUG] Finished creating NetworkFirewallPolicyRule %q: %#v", d.Id(), res) + if err != nil { + return err + } return resourceComputeRegionNetworkFirewallPolicyRuleRead(d, meta) } func resourceComputeRegionNetworkFirewallPolicyRuleDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - region, err := tpgresource.GetRegion(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &compute.NetworkFirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.NetworkFirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeRegionNetworkFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - Project: dcl.String(project), - Location: dcl.String(region), - RuleName: dcl.String(d.Get("rule_name").(string)), - SecurityProfileGroup: dcl.String(d.Get("security_profile_group").(string)), - TargetSecureTags: expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), - TlsInspect: dcl.Bool(d.Get("tls_inspect").(bool)), + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionNetworkFirewallPolicyRule: %s", err) } + billingProject = strings.TrimPrefix(project, "projects/") - log.Printf("[DEBUG] Deleting NetworkFirewallPolicyRule %q", d.Id()) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + url, err := tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/removeRule?priority={{priority}}") if err != nil { return err } - billingProject := project + + var obj map[string]interface{} + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting RegionNetworkFirewallPolicyRule %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionNetworkFirewallPolicyRule") } - if err := client.DeleteNetworkFirewallPolicyRule(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting NetworkFirewallPolicyRule: %s", err) + + err = ComputeOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Deleting RegionNetworkFirewallPolicyRule", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err } - log.Printf("[DEBUG] Finished deleting NetworkFirewallPolicyRule %q", d.Id()) + log.Printf("[DEBUG] Finished deleting RegionNetworkFirewallPolicyRule %q: %#v", d.Id(), res) return nil } func resourceComputeRegionNetworkFirewallPolicyRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/firewallPolicies/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", + "^projects/(?P[^/]+)/regions/(?P[^/]+)/firewallPolicies/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", }, d, config); err != nil { return nil, err } @@ -629,219 +754,523 @@ func resourceComputeRegionNetworkFirewallPolicyRuleImport(d *schema.ResourceData return []*schema.ResourceData{d}, nil } -func expandComputeRegionNetworkFirewallPolicyRuleMatch(o interface{}) *compute.NetworkFirewallPolicyRuleMatch { - if o == nil { - return compute.EmptyNetworkFirewallPolicyRuleMatch - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return compute.EmptyNetworkFirewallPolicyRuleMatch +func flattenComputeRegionNetworkFirewallPolicyRuleCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkFirewallPolicyRuleKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkFirewallPolicyRuleRuleName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkFirewallPolicyRuleDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkFirewallPolicyRulePriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } } - obj := objArr[0].(map[string]interface{}) - return &compute.NetworkFirewallPolicyRuleMatch{ - Layer4Configs: expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj["layer4_configs"]), - DestAddressGroups: tpgdclresource.ExpandStringArray(obj["dest_address_groups"]), - DestFqdns: tpgdclresource.ExpandStringArray(obj["dest_fqdns"]), - DestIPRanges: tpgdclresource.ExpandStringArray(obj["dest_ip_ranges"]), - DestRegionCodes: tpgdclresource.ExpandStringArray(obj["dest_region_codes"]), - DestThreatIntelligences: tpgdclresource.ExpandStringArray(obj["dest_threat_intelligences"]), - SrcAddressGroups: tpgdclresource.ExpandStringArray(obj["src_address_groups"]), - SrcFqdns: tpgdclresource.ExpandStringArray(obj["src_fqdns"]), - SrcIPRanges: tpgdclresource.ExpandStringArray(obj["src_ip_ranges"]), - SrcRegionCodes: tpgdclresource.ExpandStringArray(obj["src_region_codes"]), - SrcSecureTags: expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj["src_secure_tags"]), - SrcThreatIntelligences: tpgdclresource.ExpandStringArray(obj["src_threat_intelligences"]), + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal } + + return v // let terraform core handle it otherwise } -func flattenComputeRegionNetworkFirewallPolicyRuleMatch(obj *compute.NetworkFirewallPolicyRuleMatch) interface{} { - if obj == nil || obj.Empty() { +func flattenComputeRegionNetworkFirewallPolicyRuleMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { return nil } - transformed := map[string]interface{}{ - "layer4_configs": flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj.Layer4Configs), - "dest_address_groups": obj.DestAddressGroups, - "dest_fqdns": obj.DestFqdns, - "dest_ip_ranges": obj.DestIPRanges, - "dest_region_codes": obj.DestRegionCodes, - "dest_threat_intelligences": obj.DestThreatIntelligences, - "src_address_groups": obj.SrcAddressGroups, - "src_fqdns": obj.SrcFqdns, - "src_ip_ranges": obj.SrcIPRanges, - "src_region_codes": obj.SrcRegionCodes, - "src_secure_tags": flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj.SrcSecureTags), - "src_threat_intelligences": obj.SrcThreatIntelligences, + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - + transformed := make(map[string]interface{}) + transformed["src_ip_ranges"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcIpRanges(original["srcIpRanges"], d, config) + transformed["dest_ip_ranges"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchDestIpRanges(original["destIpRanges"], d, config) + transformed["layer4_configs"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4Configs(original["layer4Configs"], d, config) + transformed["src_secure_tags"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTags(original["srcSecureTags"], d, config) + transformed["dest_address_groups"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchDestAddressGroups(original["destAddressGroups"], d, config) + transformed["src_address_groups"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcAddressGroups(original["srcAddressGroups"], d, config) + transformed["src_fqdns"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcFqdns(original["srcFqdns"], d, config) + transformed["dest_fqdns"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchDestFqdns(original["destFqdns"], d, config) + transformed["src_region_codes"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcRegionCodes(original["srcRegionCodes"], d, config) + transformed["dest_region_codes"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchDestRegionCodes(original["destRegionCodes"], d, config) + transformed["dest_threat_intelligences"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchDestThreatIntelligences(original["destThreatIntelligences"], d, config) + transformed["src_threat_intelligences"] = + flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcThreatIntelligences(original["srcThreatIntelligences"], d, config) return []interface{}{transformed} +} +func flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} +func flattenComputeRegionNetworkFirewallPolicyRuleMatchDestIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(o interface{}) []compute.NetworkFirewallPolicyRuleMatchLayer4Configs { - if o == nil { - return make([]compute.NetworkFirewallPolicyRuleMatchLayer4Configs, 0) - } - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]compute.NetworkFirewallPolicyRuleMatchLayer4Configs, 0) +func flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4Configs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "ip_protocol": flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsIpProtocol(original["ipProtocol"], d, config), + "ports": flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsPorts(original["ports"], d, config), + }) } + return transformed +} +func flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsIpProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - items := make([]compute.NetworkFirewallPolicyRuleMatchLayer4Configs, 0, len(objs)) - for _, item := range objs { - i := expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4Configs(item) - items = append(items, *i) +func flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsPorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsName(original["name"], d, config), + "state": flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsState(original["state"], d, config), + }) } + return transformed +} +func flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - return items +func flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4Configs(o interface{}) *compute.NetworkFirewallPolicyRuleMatchLayer4Configs { - if o == nil { - return compute.EmptyNetworkFirewallPolicyRuleMatchLayer4Configs - } +func flattenComputeRegionNetworkFirewallPolicyRuleMatchDestAddressGroups(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - obj := o.(map[string]interface{}) - return &compute.NetworkFirewallPolicyRuleMatchLayer4Configs{ - IPProtocol: dcl.String(obj["ip_protocol"].(string)), - Ports: tpgdclresource.ExpandStringArray(obj["ports"]), - } +func flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcAddressGroups(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(objs []compute.NetworkFirewallPolicyRuleMatchLayer4Configs) []interface{} { - if objs == nil { - return nil - } +func flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcFqdns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - items := []interface{}{} - for _, item := range objs { - i := flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4Configs(&item) - items = append(items, i) - } +func flattenComputeRegionNetworkFirewallPolicyRuleMatchDestFqdns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - return items +func flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcRegionCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4Configs(obj *compute.NetworkFirewallPolicyRuleMatchLayer4Configs) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "ip_protocol": obj.IPProtocol, - "ports": obj.Ports, - } +func flattenComputeRegionNetworkFirewallPolicyRuleMatchDestRegionCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - return transformed +func flattenComputeRegionNetworkFirewallPolicyRuleMatchDestThreatIntelligences(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} +func flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcThreatIntelligences(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(o interface{}) []compute.NetworkFirewallPolicyRuleMatchSrcSecureTags { - if o == nil { - return make([]compute.NetworkFirewallPolicyRuleMatchSrcSecureTags, 0) - } - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]compute.NetworkFirewallPolicyRuleMatchSrcSecureTags, 0) - } +func flattenComputeRegionNetworkFirewallPolicyRuleAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - items := make([]compute.NetworkFirewallPolicyRuleMatchSrcSecureTags, 0, len(objs)) - for _, item := range objs { - i := expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTags(item) - items = append(items, *i) - } +func flattenComputeRegionNetworkFirewallPolicyRuleSecurityProfileGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - return items +func flattenComputeRegionNetworkFirewallPolicyRuleTlsInspect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTags(o interface{}) *compute.NetworkFirewallPolicyRuleMatchSrcSecureTags { - if o == nil { - return compute.EmptyNetworkFirewallPolicyRuleMatchSrcSecureTags - } +func flattenComputeRegionNetworkFirewallPolicyRuleDirection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - obj := o.(map[string]interface{}) - return &compute.NetworkFirewallPolicyRuleMatchSrcSecureTags{ - Name: dcl.String(obj["name"].(string)), - } +func flattenComputeRegionNetworkFirewallPolicyRuleEnableLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(objs []compute.NetworkFirewallPolicyRuleMatchSrcSecureTags) []interface{} { - if objs == nil { - return nil +func flattenComputeRegionNetworkFirewallPolicyRuleRuleTupleCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } } - items := []interface{}{} - for _, item := range objs { - i := flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTags(&item) - items = append(items, i) + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal } - return items + return v // let terraform core handle it otherwise } -func flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTags(obj *compute.NetworkFirewallPolicyRuleMatchSrcSecureTags) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "name": obj.Name, - "state": obj.State, - } +func flattenComputeRegionNetworkFirewallPolicyRuleTargetServiceAccounts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} +func flattenComputeRegionNetworkFirewallPolicyRuleTargetSecureTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsName(original["name"], d, config), + "state": flattenComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsState(original["state"], d, config), + }) + } return transformed +} +func flattenComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkFirewallPolicyRuleDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeRegionNetworkFirewallPolicyRuleRuleName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} +func expandComputeRegionNetworkFirewallPolicyRuleDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil } -func expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsArray(o interface{}) []compute.NetworkFirewallPolicyRuleTargetSecureTags { - if o == nil { - return make([]compute.NetworkFirewallPolicyRuleTargetSecureTags, 0) + +func expandComputeRegionNetworkFirewallPolicyRulePriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]compute.NetworkFirewallPolicyRuleTargetSecureTags, 0) + transformedSrcIpRanges, err := expandComputeRegionNetworkFirewallPolicyRuleMatchSrcIpRanges(original["src_ip_ranges"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcIpRanges"] = transformedSrcIpRanges } - items := make([]compute.NetworkFirewallPolicyRuleTargetSecureTags, 0, len(objs)) - for _, item := range objs { - i := expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTags(item) - items = append(items, *i) + transformedDestIpRanges, err := expandComputeRegionNetworkFirewallPolicyRuleMatchDestIpRanges(original["dest_ip_ranges"], d, config) + if err != nil { + return nil, err + } else { + transformed["destIpRanges"] = transformedDestIpRanges } - return items -} + transformedLayer4Configs, err := expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4Configs(original["layer4_configs"], d, config) + if err != nil { + return nil, err + } else { + transformed["layer4Configs"] = transformedLayer4Configs + } -func expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTags(o interface{}) *compute.NetworkFirewallPolicyRuleTargetSecureTags { - if o == nil { - return compute.EmptyNetworkFirewallPolicyRuleTargetSecureTags + transformedSrcSecureTags, err := expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTags(original["src_secure_tags"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcSecureTags"] = transformedSrcSecureTags } - obj := o.(map[string]interface{}) - return &compute.NetworkFirewallPolicyRuleTargetSecureTags{ - Name: dcl.String(obj["name"].(string)), + transformedDestAddressGroups, err := expandComputeRegionNetworkFirewallPolicyRuleMatchDestAddressGroups(original["dest_address_groups"], d, config) + if err != nil { + return nil, err + } else { + transformed["destAddressGroups"] = transformedDestAddressGroups } -} -func flattenComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsArray(objs []compute.NetworkFirewallPolicyRuleTargetSecureTags) []interface{} { - if objs == nil { - return nil + transformedSrcAddressGroups, err := expandComputeRegionNetworkFirewallPolicyRuleMatchSrcAddressGroups(original["src_address_groups"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcAddressGroups"] = transformedSrcAddressGroups } - items := []interface{}{} - for _, item := range objs { - i := flattenComputeRegionNetworkFirewallPolicyRuleTargetSecureTags(&item) - items = append(items, i) + transformedSrcFqdns, err := expandComputeRegionNetworkFirewallPolicyRuleMatchSrcFqdns(original["src_fqdns"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcFqdns"] = transformedSrcFqdns } - return items -} + transformedDestFqdns, err := expandComputeRegionNetworkFirewallPolicyRuleMatchDestFqdns(original["dest_fqdns"], d, config) + if err != nil { + return nil, err + } else { + transformed["destFqdns"] = transformedDestFqdns + } -func flattenComputeRegionNetworkFirewallPolicyRuleTargetSecureTags(obj *compute.NetworkFirewallPolicyRuleTargetSecureTags) interface{} { - if obj == nil || obj.Empty() { - return nil + transformedSrcRegionCodes, err := expandComputeRegionNetworkFirewallPolicyRuleMatchSrcRegionCodes(original["src_region_codes"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcRegionCodes"] = transformedSrcRegionCodes } - transformed := map[string]interface{}{ - "name": obj.Name, - "state": obj.State, + + transformedDestRegionCodes, err := expandComputeRegionNetworkFirewallPolicyRuleMatchDestRegionCodes(original["dest_region_codes"], d, config) + if err != nil { + return nil, err + } else { + transformed["destRegionCodes"] = transformedDestRegionCodes } - return transformed + transformedDestThreatIntelligences, err := expandComputeRegionNetworkFirewallPolicyRuleMatchDestThreatIntelligences(original["dest_threat_intelligences"], d, config) + if err != nil { + return nil, err + } else { + transformed["destThreatIntelligences"] = transformedDestThreatIntelligences + } + + transformedSrcThreatIntelligences, err := expandComputeRegionNetworkFirewallPolicyRuleMatchSrcThreatIntelligences(original["src_threat_intelligences"], d, config) + if err != nil { + return nil, err + } else { + transformed["srcThreatIntelligences"] = transformedSrcThreatIntelligences + } + + return transformed, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchSrcIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchDestIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4Configs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIpProtocol, err := expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsIpProtocol(original["ip_protocol"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpProtocol); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipProtocol"] = transformedIpProtocol + } + + transformedPorts, err := expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsPorts(original["ports"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ports"] = transformedPorts + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsIpProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsPorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedState, err := expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchDestAddressGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchSrcAddressGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchSrcFqdns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchDestFqdns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchSrcRegionCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchDestRegionCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchDestThreatIntelligences(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleMatchSrcThreatIntelligences(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleSecurityProfileGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleTlsInspect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleDirection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleEnableLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleTargetServiceAccounts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedState, err := expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} +func expandComputeRegionNetworkFirewallPolicyRuleDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_rule_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_rule_sweeper.go new file mode 100644 index 00000000000..eed3af39ad3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_rule_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionNetworkFirewallPolicyRule", testSweepComputeRegionNetworkFirewallPolicyRule) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRegionNetworkFirewallPolicyRule(region string) error { + resourceName := "ComputeRegionNetworkFirewallPolicyRule" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["regionNetworkFirewallPolicyRules"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/removeRule?priority={{priority}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_per_instance_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_per_instance_config.go index 5cd4e3025b9..c6306f49fea 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_per_instance_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_per_instance_config.go @@ -174,39 +174,39 @@ func ResourceComputeRegionPerInstanceConfig() *schema.Resource { "minimal_action": { Type: schema.TypeString, Optional: true, - Default: "NONE", Description: `The minimal action to perform on the instance during an update. Default is 'NONE'. Possible values are: * REPLACE * RESTART * REFRESH * NONE`, + Default: "NONE", }, "most_disruptive_allowed_action": { Type: schema.TypeString, Optional: true, - Default: "REPLACE", Description: `The most disruptive action to perform on the instance during an update. Default is 'REPLACE'. Possible values are: * REPLACE * RESTART * REFRESH * NONE`, + Default: "REPLACE", }, "remove_instance_on_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `When true, deleting this config will immediately remove the underlying instance. When false, deleting this config will use the behavior as determined by remove_instance_on_destroy.`, + Default: false, }, "remove_instance_state_on_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `When true, deleting this config will immediately remove any specified state from the underlying instance. When false, deleting this config will *not* immediately remove any state from the underlying instance. State will be removed on the next instance recreation or update.`, + Default: false, }, "project": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_ssl_certificate.go index e1cb4b9223b..f330f74798c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_ssl_certificate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_ssl_certificate.go @@ -133,11 +133,11 @@ If it is not provided, the provider region is used.`, Description: "Creates a unique name beginning with the specified prefix. Conflicts with name.", ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource - // uuid is 26 characters, limit the prefix to 37. + // uuid is 9 characters, limit the prefix to 54. value := v.(string) - if len(value) > 37 { + if len(value) > 54 { errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) + "%q cannot be longer than 54 characters, name is limited to 63", k)) } return }, @@ -456,7 +456,12 @@ func expandComputeRegionSslCertificateName(v interface{}, d tpgresource.Terrafor if v, ok := d.GetOk("name"); ok { certName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - certName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + certName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + certName = id.PrefixedUniqueId(prefix) + } } else { certName = id.UniqueId() } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_http_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_http_proxy.go index c81b7fcb46e..d6b23527e31 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_http_proxy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_http_proxy.go @@ -78,6 +78,16 @@ to the BackendService.`, ForceNew: true, Description: `An optional description of this resource.`, }, + "http_keep_alive_timeout_sec": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Specifies how long to keep a connection open, after completing a response, +while there is no matching traffic (in seconds). If an HTTP keepalive is +not specified, a default value (600 seconds) will be used. For Regional +HTTP(S) load balancer, the minimum allowed value is 5 seconds and the +maximum allowed value is 600 seconds.`, + }, "region": { Type: schema.TypeString, Computed: true, @@ -138,6 +148,12 @@ func resourceComputeRegionTargetHttpProxyCreate(d *schema.ResourceData, meta int } else if v, ok := d.GetOkExists("url_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { obj["urlMap"] = urlMapProp } + httpKeepAliveTimeoutSecProp, err := expandComputeRegionTargetHttpProxyHttpKeepAliveTimeoutSec(d.Get("http_keep_alive_timeout_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("http_keep_alive_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpKeepAliveTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, httpKeepAliveTimeoutSecProp)) { + obj["httpKeepAliveTimeoutSec"] = httpKeepAliveTimeoutSecProp + } regionProp, err := expandComputeRegionTargetHttpProxyRegion(d.Get("region"), d, config) if err != nil { return err @@ -258,6 +274,9 @@ func resourceComputeRegionTargetHttpProxyRead(d *schema.ResourceData, meta inter if err := d.Set("url_map", flattenComputeRegionTargetHttpProxyUrlMap(res["urlMap"], d, config)); err != nil { return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) } + if err := d.Set("http_keep_alive_timeout_sec", flattenComputeRegionTargetHttpProxyHttpKeepAliveTimeoutSec(res["httpKeepAliveTimeoutSec"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) + } if err := d.Set("region", flattenComputeRegionTargetHttpProxyRegion(res["region"], d, config)); err != nil { return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) } @@ -449,6 +468,23 @@ func flattenComputeRegionTargetHttpProxyUrlMap(v interface{}, d *schema.Resource return tpgresource.ConvertSelfLinkToV1(v.(string)) } +func flattenComputeRegionTargetHttpProxyHttpKeepAliveTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + func flattenComputeRegionTargetHttpProxyRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -472,6 +508,10 @@ func expandComputeRegionTargetHttpProxyUrlMap(v interface{}, d tpgresource.Terra return f.RelativeLink(), nil } +func expandComputeRegionTargetHttpProxyHttpKeepAliveTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeRegionTargetHttpProxyRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) if err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_https_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_https_proxy.go index bef239d00c6..031d730438e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_https_proxy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_https_proxy.go @@ -92,6 +92,16 @@ Accepted format is '//certificatemanager.googleapis.com/projects/{project}/locat ForceNew: true, Description: `An optional description of this resource.`, }, + "http_keep_alive_timeout_sec": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Specifies how long to keep a connection open, after completing a response, +while there is no matching traffic (in seconds). If an HTTP keepalive is +not specified, a default value (600 seconds) will be used. For Regioanl +HTTP(S) load balancer, the minimum allowed value is 5 seconds and the +maximum allowed value is 600 seconds.`, + }, "region": { Type: schema.TypeString, Computed: true, @@ -104,7 +114,6 @@ If it is not provided, the provider region is used.`, "server_tls_policy": { Type: schema.TypeString, Optional: true, - ForceNew: true, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound @@ -114,7 +123,12 @@ set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. -If left blank, communications are not encrypted.`, +If left blank, communications are not encrypted. + +If you remove this field from your configuration at the same time as +deleting or recreating a referenced ServerTlsPolicy resource, you will +receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy +within the ServerTlsPolicy resource to avoid this.`, }, "ssl_certificates": { Type: schema.TypeList, @@ -205,6 +219,12 @@ func resourceComputeRegionTargetHttpsProxyCreate(d *schema.ResourceData, meta in } else if v, ok := d.GetOkExists("url_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { obj["urlMap"] = urlMapProp } + httpKeepAliveTimeoutSecProp, err := expandComputeRegionTargetHttpsProxyHttpKeepAliveTimeoutSec(d.Get("http_keep_alive_timeout_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("http_keep_alive_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpKeepAliveTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, httpKeepAliveTimeoutSecProp)) { + obj["httpKeepAliveTimeoutSec"] = httpKeepAliveTimeoutSecProp + } serverTlsPolicyProp, err := expandComputeRegionTargetHttpsProxyServerTlsPolicy(d.Get("server_tls_policy"), d, config) if err != nil { return err @@ -357,6 +377,9 @@ func resourceComputeRegionTargetHttpsProxyRead(d *schema.ResourceData, meta inte if err := d.Set("url_map", flattenComputeRegionTargetHttpsProxyUrlMap(res["urlMap"], d, config)); err != nil { return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) } + if err := d.Set("http_keep_alive_timeout_sec", flattenComputeRegionTargetHttpsProxyHttpKeepAliveTimeoutSec(res["httpKeepAliveTimeoutSec"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) + } if err := d.Set("server_tls_policy", flattenComputeRegionTargetHttpsProxyServerTlsPolicy(res["serverTlsPolicy"], d, config)); err != nil { return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) } @@ -493,6 +516,79 @@ func resourceComputeRegionTargetHttpsProxyUpdate(d *schema.ResourceData, meta in return err } } + if d.HasChange("server_tls_policy") { + obj := make(map[string]interface{}) + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionTargetHttpsProxy %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + serverTlsPolicyProp, err := expandComputeRegionTargetHttpsProxyServerTlsPolicy(d.Get("server_tls_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("server_tls_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serverTlsPolicyProp)) { + obj["serverTlsPolicy"] = serverTlsPolicyProp + } + + obj, err = resourceComputeRegionTargetHttpsProxyUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") + if err != nil { + return err + } + + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error updating RegionTargetHttpsProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RegionTargetHttpsProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating RegionTargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } if d.HasChange("ssl_policy") { obj := make(map[string]interface{}) @@ -703,6 +799,23 @@ func flattenComputeRegionTargetHttpsProxyUrlMap(v interface{}, d *schema.Resourc return tpgresource.ConvertSelfLinkToV1(v.(string)) } +func flattenComputeRegionTargetHttpsProxyHttpKeepAliveTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + func flattenComputeRegionTargetHttpsProxyServerTlsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -782,6 +895,10 @@ func expandComputeRegionTargetHttpsProxyUrlMap(v interface{}, d tpgresource.Terr return f.RelativeLink(), nil } +func expandComputeRegionTargetHttpsProxyHttpKeepAliveTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeRegionTargetHttpsProxyServerTlsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -804,6 +921,14 @@ func resourceComputeRegionTargetHttpsProxyEncoder(d *schema.ResourceData, meta i obj["sslCertificates"] = obj["certificateManagerCertificates"] delete(obj, "certificateManagerCertificates") } + + // Send null if serverTlsPolicy is not set. Without this, Terraform would not send any value for `serverTlsPolicy` + // in the "PATCH" payload so if you were to remove a server TLS policy from a target HTTPS proxy, it would NOT remove + // the association. + if _, ok := obj["serverTlsPolicy"]; !ok { + obj["serverTlsPolicy"] = nil + } + return obj, nil } @@ -817,6 +942,14 @@ func resourceComputeRegionTargetHttpsProxyUpdateEncoder(d *schema.ResourceData, obj["sslCertificates"] = obj["certificateManagerCertificates"] delete(obj, "certificateManagerCertificates") } + + // Send null if serverTlsPolicy is not set. Without this, Terraform would not send any value for `serverTlsPolicy` + // in the "PATCH" payload so if you were to remove a server TLS policy from a target HTTPS proxy, it would NOT remove + // the association. + if _, ok := obj["serverTlsPolicy"]; !ok { + obj["serverTlsPolicy"] = nil + } + return obj, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resize_request.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resize_request.go new file mode 100644 index 00000000000..c7a2cd232d3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resize_request.go @@ -0,0 +1,1300 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeResizeRequest() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeResizeRequestCreate, + Read: resourceComputeResizeRequestRead, + Delete: resourceComputeResizeRequestDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeResizeRequestImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "instance_group_manager": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the managed instance group. The name should conform to RFC1035 or be a resource ID. +Authorization requires the following IAM permission on the specified resource instanceGroupManager: +*compute.instanceGroupManagers.update`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of this resize request. The name must be 1-63 characters long, and comply with RFC1035.`, + }, + "resize_by": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The number of instances to be created by this resize request. The group's target size will be increased by this number.`, + }, + "zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the compute zone scoping this request. Name should conform to RFC1035.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resize-request.`, + }, + "requested_run_duration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Requested run duration for instances that will be created by this request. At the end of the run duration instance will be deleted.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "seconds": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive.`, + }, + }, + }, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `The creation timestamp for this resize request in RFC3339 text format.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `[Output only] Current state of the request.`, + }, + "status": { + Type: schema.TypeList, + Computed: true, + Description: `[Output only] Status of the request.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error": { + Type: schema.TypeList, + Computed: true, + Description: `[Output only] Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "errors": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only] The array of errors encountered while processing this operation.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeString, + Computed: true, + Description: `[Output Only] The error type identifier for this error.`, + }, + "error_details": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error_info": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only]`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Computed: true, + Description: `The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com".`, + }, + "metadatas": { + Type: schema.TypeMap, + Computed: true, + Description: `Additional structured details about this error. +Keys must match /[a-z][a-zA-Z0-9-_]+/ but should ideally be lowerCamelCase. Also they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than {"instanceLimit": "100/request"}, should be returned as, {"instanceLimitPerRequest": "100"}, if the client exceeds the number of instances that can be created in a single (batch) request.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of [A-Z][A-Z0-9_]+[A-Z0-9], which represents UPPER_SNAKE_CASE.`, + }, + }, + }, + }, + "help": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only]`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "links": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only]`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Computed: true, + Description: `Describes what the link offers.`, + }, + "url": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the link.`, + }, + }, + }, + }, + }, + }, + }, + "localized_message": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only]`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "locale": { + Type: schema.TypeString, + Computed: true, + Description: `The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX"`, + }, + "message": { + Type: schema.TypeString, + Computed: true, + Description: `The localized error message in the above locale.`, + }, + }, + }, + }, + "quota_info": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only]`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dimensions": { + Type: schema.TypeMap, + Computed: true, + Description: `The map holding related quota dimensions`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "future_limit": { + Type: schema.TypeInt, + Computed: true, + Description: `Future quota limit being rolled out. The limit's unit depends on the quota type or metric.`, + }, + "limit": { + Type: schema.TypeInt, + Computed: true, + Description: `Current effective quota limit. The limit's unit depends on the quota type or metric.`, + }, + "limit_name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the quota limit.`, + }, + "metric_name": { + Type: schema.TypeString, + Computed: true, + Description: `The Compute Engine quota metric name.`, + }, + "rollout_status": { + Type: schema.TypeString, + Computed: true, + Description: `Rollout status of the future quota limit.`, + }, + }, + }, + }, + }, + }, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Description: `Output Only] Indicates the field in the request that caused the error. This property is optional.`, + }, + "message": { + Type: schema.TypeString, + Computed: true, + Description: `[Output Only] An optional, human-readable error message.`, + }, + }, + }, + }, + }, + }, + }, + "last_attempt": { + Type: schema.TypeList, + Computed: true, + Description: `[Output only] Information about the last attempt to fulfill the request. The value is temporary since the ResizeRequest can retry, as long as it's still active and the last attempt value can either be cleared or replaced with a different error. Since ResizeRequest retries infrequently, the value may be stale and no longer show an active problem. The value is cleared when ResizeRequest transitions to the final state (becomes inactive). If the final state is FAILED the error describing it will be storred in the "error" field only.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error": { + Type: schema.TypeList, + Computed: true, + Description: `[Output only] Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "errors": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only] The array of errors encountered while processing this operation.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeString, + Computed: true, + Description: `[Output Only] The error type identifier for this error.`, + }, + "error_details": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error_info": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only]`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Computed: true, + Description: `The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com".`, + }, + "metadatas": { + Type: schema.TypeMap, + Computed: true, + Description: `Additional structured details about this error. +Keys must match /[a-z][a-zA-Z0-9-_]+/ but should ideally be lowerCamelCase. Also they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than {"instanceLimit": "100/request"}, should be returned as, {"instanceLimitPerRequest": "100"}, if the client exceeds the number of instances that can be created in a single (batch) request.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of [A-Z][A-Z0-9_]+[A-Z0-9], which represents UPPER_SNAKE_CASE.`, + }, + }, + }, + }, + "help": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only]`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "links": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only]`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Computed: true, + Description: `Describes what the link offers.`, + }, + "url": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the link.`, + }, + }, + }, + }, + }, + }, + }, + "localized_message": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only]`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "locale": { + Type: schema.TypeString, + Computed: true, + Description: `The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX"`, + }, + "message": { + Type: schema.TypeString, + Computed: true, + Description: `The localized error message in the above locale.`, + }, + }, + }, + }, + "quota_info": { + Type: schema.TypeList, + Computed: true, + Description: `[Output Only]`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dimensions": { + Type: schema.TypeMap, + Computed: true, + Description: `The map holding related quota dimensions`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "future_limit": { + Type: schema.TypeInt, + Computed: true, + Description: `Future quota limit being rolled out. The limit's unit depends on the quota type or metric.`, + }, + "limit": { + Type: schema.TypeInt, + Computed: true, + Description: `Current effective quota limit. The limit's unit depends on the quota type or metric.`, + }, + "limit_name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the quota limit.`, + }, + "metric_name": { + Type: schema.TypeString, + Computed: true, + Description: `The Compute Engine quota metric name.`, + }, + "rollout_status": { + Type: schema.TypeString, + Computed: true, + Description: `Rollout status of the future quota limit.`, + }, + }, + }, + }, + }, + }, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Description: `Output Only] Indicates the field in the request that caused the error. This property is optional.`, + }, + "message": { + Type: schema.TypeString, + Computed: true, + Description: `[Output Only] An optional, human-readable error message.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeResizeRequestCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandComputeResizeRequestName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeResizeRequestDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + resizeByProp, err := expandComputeResizeRequestResizeBy(d.Get("resize_by"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resize_by"); !tpgresource.IsEmptyValue(reflect.ValueOf(resizeByProp)) && (ok || !reflect.DeepEqual(v, resizeByProp)) { + obj["resizeBy"] = resizeByProp + } + requestedRunDurationProp, err := expandComputeResizeRequestRequestedRunDuration(d.Get("requested_run_duration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("requested_run_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(requestedRunDurationProp)) && (ok || !reflect.DeepEqual(v, requestedRunDurationProp)) { + obj["requestedRunDuration"] = requestedRunDurationProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/resizeRequests") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ResizeRequest: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ResizeRequest: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating ResizeRequest: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/resizeRequests/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating ResizeRequest", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create ResizeRequest: %s", err) + } + + log.Printf("[DEBUG] Finished creating ResizeRequest %q: %#v", d.Id(), res) + + return resourceComputeResizeRequestRead(d, meta) +} + +func resourceComputeResizeRequestRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/resizeRequests/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ResizeRequest: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeResizeRequest %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ResizeRequest: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeResizeRequestCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading ResizeRequest: %s", err) + } + if err := d.Set("state", flattenComputeResizeRequestState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading ResizeRequest: %s", err) + } + if err := d.Set("name", flattenComputeResizeRequestName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ResizeRequest: %s", err) + } + if err := d.Set("description", flattenComputeResizeRequestDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ResizeRequest: %s", err) + } + if err := d.Set("resize_by", flattenComputeResizeRequestResizeBy(res["resizeBy"], d, config)); err != nil { + return fmt.Errorf("Error reading ResizeRequest: %s", err) + } + if err := d.Set("requested_run_duration", flattenComputeResizeRequestRequestedRunDuration(res["requestedRunDuration"], d, config)); err != nil { + return fmt.Errorf("Error reading ResizeRequest: %s", err) + } + if err := d.Set("status", flattenComputeResizeRequestStatus(res["status"], d, config)); err != nil { + return fmt.Errorf("Error reading ResizeRequest: %s", err) + } + + return nil +} + +func resourceComputeResizeRequestDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // Get project id + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for resize request: %s", err) + } + + // Get cancel url + var cancelUrl string + cancelUrl, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/resizeRequests/{{name}}/cancel") + + if err != nil { + return err + } + + // Get delete url + var deleteUrl string + deleteUrl, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/resizeRequests/{{name}}") + if err != nil { + return err + } + + // Get current state (accepted or not) and delete + if d.Get("state") == "ACCEPTED" { + // cancel resize request + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: cancelUrl, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ResizeRequest") + } + + err = ComputeOperationWaitTime( + config, res, project, "Cancelling the resize request", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + } + + // delete resize request + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: project, + RawURL: deleteUrl, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + + err = ComputeOperationWaitTime( + config, res, project, "Cancelling the resize request", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + return nil + +} + +func resourceComputeResizeRequestImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)/resizeRequests/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/resizeRequests/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeResizeRequestCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestResizeBy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeResizeRequestRequestedRunDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["seconds"] = + flattenComputeResizeRequestRequestedRunDurationSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenComputeResizeRequestRequestedRunDurationNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestRequestedRunDurationSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestRequestedRunDurationNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeResizeRequestStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["error"] = + flattenComputeResizeRequestStatusError(original["error"], d, config) + transformed["last_attempt"] = + flattenComputeResizeRequestStatusLastAttempt(original["lastAttempt"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusError(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["errors"] = + flattenComputeResizeRequestStatusErrorErrors(original["errors"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusErrorErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["code"] = + flattenComputeResizeRequestStatusErrorErrorsCode(original["code"], d, config) + transformed["location"] = + flattenComputeResizeRequestStatusErrorErrorsLocation(original["location"], d, config) + transformed["message"] = + flattenComputeResizeRequestStatusErrorErrorsMessage(original["message"], d, config) + transformed["error_details"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetails(original["errorDetails"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusErrorErrorsCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["error_info"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsErrorInfo(original["errorInfo"], d, config) + transformed["quota_info"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfo(original["quotaInfo"], d, config) + transformed["help"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsHelp(original["help"], d, config) + transformed["localized_message"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsLocalizedMessage(original["localizedMessage"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsErrorInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["reason"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsErrorInfoReason(original["reason"], d, config) + transformed["domain"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsErrorInfoDomain(original["domain"], d, config) + transformed["metadatas"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsErrorInfoMetadatas(original["metadatas"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsErrorInfoReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsErrorInfoDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsErrorInfoMetadatas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["metric_name"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoMetricName(original["metricName"], d, config) + transformed["limit_name"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoLimitName(original["limitName"], d, config) + transformed["dimensions"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoDimensions(original["dimensions"], d, config) + transformed["limit"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoLimit(original["limit"], d, config) + transformed["future_limit"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoFutureLimit(original["futureLimit"], d, config) + transformed["rollout_status"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoRolloutStatus(original["rolloutStatus"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoMetricName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoLimitName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoDimensions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoFutureLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsQuotaInfoRolloutStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsHelp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["links"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsHelpLinks(original["links"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsHelpLinks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["description"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsHelpLinksDescription(original["description"], d, config) + transformed["url"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsHelpLinksUrl(original["url"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsHelpLinksDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsHelpLinksUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsLocalizedMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["locale"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsLocalizedMessageLocale(original["locale"], d, config) + transformed["message"] = + flattenComputeResizeRequestStatusErrorErrorsErrorDetailsLocalizedMessageMessage(original["message"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsLocalizedMessageLocale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusErrorErrorsErrorDetailsLocalizedMessageMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttempt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["error"] = + flattenComputeResizeRequestStatusLastAttemptError(original["error"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusLastAttemptError(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["errors"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrors(original["errors"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusLastAttemptErrorErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["code"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsCode(original["code"], d, config) + transformed["location"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsLocation(original["location"], d, config) + transformed["message"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsMessage(original["message"], d, config) + transformed["error_details"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetails(original["errorDetails"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["error_info"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsErrorInfo(original["errorInfo"], d, config) + transformed["quota_info"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfo(original["quotaInfo"], d, config) + transformed["help"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsHelp(original["help"], d, config) + transformed["localized_message"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsLocalizedMessage(original["localizedMessage"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsErrorInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["reason"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsErrorInfoReason(original["reason"], d, config) + transformed["domain"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsErrorInfoDomain(original["domain"], d, config) + transformed["metadatas"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsErrorInfoMetadatas(original["metadatas"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsErrorInfoReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsErrorInfoDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsErrorInfoMetadatas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["metric_name"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoMetricName(original["metricName"], d, config) + transformed["limit_name"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoLimitName(original["limitName"], d, config) + transformed["dimensions"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoDimensions(original["dimensions"], d, config) + transformed["limit"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoLimit(original["limit"], d, config) + transformed["future_limit"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoFutureLimit(original["futureLimit"], d, config) + transformed["rollout_status"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoRolloutStatus(original["rolloutStatus"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoMetricName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoLimitName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoDimensions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoFutureLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsQuotaInfoRolloutStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsHelp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["links"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsHelpLinks(original["links"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsHelpLinks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["description"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsHelpLinksDescription(original["description"], d, config) + transformed["url"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsHelpLinksUrl(original["url"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsHelpLinksDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsHelpLinksUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsLocalizedMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["locale"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsLocalizedMessageLocale(original["locale"], d, config) + transformed["message"] = + flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsLocalizedMessageMessage(original["message"], d, config) + return []interface{}{transformed} +} +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsLocalizedMessageLocale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeResizeRequestStatusLastAttemptErrorErrorsErrorDetailsLocalizedMessageMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeResizeRequestName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeResizeRequestDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeResizeRequestResizeBy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeResizeRequestRequestedRunDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSeconds, err := expandComputeResizeRequestRequestedRunDurationSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandComputeResizeRequestRequestedRunDurationNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandComputeResizeRequestRequestedRunDurationSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeResizeRequestRequestedRunDurationNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resource_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resource_policy.go index 5469836089c..173aa2a2155 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resource_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resource_policy.go @@ -32,10 +32,17 @@ import ( "github.com/hashicorp/terraform-provider-google/google/verify" ) +// Suppresses a diff on cases like 1:00 when it should be 01:00. +// Because API will normalize this value +func HourlyFormatSuppressDiff(_, old, new string, _ *schema.ResourceData) bool { + return old == "0"+new +} + func ResourceComputeResourcePolicy() *schema.Resource { return &schema.Resource{ Create: resourceComputeResourcePolicyCreate, Read: resourceComputeResourcePolicyRead, + Update: resourceComputeResourcePolicyUpdate, Delete: resourceComputeResourcePolicyDelete, Importer: &schema.ResourceImporter{ @@ -44,6 +51,7 @@ func ResourceComputeResourcePolicy() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), }, @@ -56,7 +64,6 @@ func ResourceComputeResourcePolicy() *schema.Resource { "name": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: `The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and @@ -68,13 +75,11 @@ which cannot be a dash.`, "description": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: `An optional description of this resource. Provide this property when you create the resource.`, }, "disk_consistency_group_policy": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `Replication consistency group for asynchronous disk replication.`, MaxItems: 1, Elem: &schema.Resource{ @@ -92,7 +97,6 @@ which cannot be a dash.`, "group_placement_policy": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `Resource policy for instances used for placement configuration.`, MaxItems: 1, Elem: &schema.Resource{ @@ -100,14 +104,12 @@ which cannot be a dash.`, "availability_domain_count": { Type: schema.TypeInt, Optional: true, - ForceNew: true, Description: `The number of availability domains instances will be spread across. If two instances are in different availability domain, they will not be put in the same low latency network`, }, "collocation": { Type: schema.TypeString, Optional: true, - ForceNew: true, ValidateFunc: verify.ValidateEnum([]string{"COLLOCATED", ""}), Description: `Collocation specifies whether to place VMs inside the same availability domain on the same low-latency network. Specify 'COLLOCATED' to enable collocation. Can only be specified with 'vm_count'. If compute instances are created @@ -117,7 +119,6 @@ attached. Possible values: ["COLLOCATED"]`, "vm_count": { Type: schema.TypeInt, Optional: true, - ForceNew: true, Description: `Number of VMs in this placement group. Google does not recommend that you use this field unless you use a compact policy and you want your policy to work only if it contains this exact number of VMs.`, @@ -129,7 +130,6 @@ exact number of VMs.`, "instance_schedule_policy": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `Resource policy for scheduling instance operations.`, MaxItems: 1, Elem: &schema.Resource{ @@ -137,26 +137,22 @@ exact number of VMs.`, "time_zone": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: `Specifies the time zone to be used in interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, }, "expiration_time": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: `The expiration time of the schedule. The timestamp is an RFC3339 string.`, }, "start_time": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: `The start time of the schedule. The timestamp is an RFC3339 string.`, }, "vm_start_schedule": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `Specifies the schedule for starting instances.`, MaxItems: 1, Elem: &schema.Resource{ @@ -164,7 +160,6 @@ from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, "schedule": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: `Specifies the frequency for the operation, using the unix-cron format.`, }, }, @@ -174,7 +169,6 @@ from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, "vm_stop_schedule": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `Specifies the schedule for stopping instances.`, MaxItems: 1, Elem: &schema.Resource{ @@ -182,7 +176,6 @@ from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, "schedule": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: `Specifies the frequency for the operation, using the unix-cron format.`, }, }, @@ -204,7 +197,6 @@ from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, "snapshot_schedule_policy": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `Policy for creating snapshots of persistent disks.`, MaxItems: 1, Elem: &schema.Resource{ @@ -212,7 +204,6 @@ from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, "schedule": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: `Contains one of an 'hourlySchedule', 'dailySchedule', or 'weeklySchedule'.`, MaxItems: 1, Elem: &schema.Resource{ @@ -220,7 +211,6 @@ from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, "daily_schedule": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `The policy will execute every nth day at the specified time.`, MaxItems: 1, Elem: &schema.Resource{ @@ -228,14 +218,13 @@ from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, "days_in_cycle": { Type: schema.TypeInt, Required: true, - ForceNew: true, Description: `Defines a schedule with units measured in days. The value determines how many days pass between the start of each cycle. Days in cycle for snapshot schedule policy must be 1.`, }, "start_time": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: verify.ValidateHourlyOnly, + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateHourlyOnly, + DiffSuppressFunc: HourlyFormatSuppressDiff, Description: `This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.`, @@ -247,7 +236,6 @@ both 13:00-5 and 08:00 are valid.`, "hourly_schedule": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `The policy will execute every nth hour starting at the specified time.`, MaxItems: 1, Elem: &schema.Resource{ @@ -255,14 +243,13 @@ both 13:00-5 and 08:00 are valid.`, "hours_in_cycle": { Type: schema.TypeInt, Required: true, - ForceNew: true, Description: `The number of hours between snapshots.`, }, "start_time": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: verify.ValidateHourlyOnly, + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateHourlyOnly, + DiffSuppressFunc: HourlyFormatSuppressDiff, Description: `Time within the window to start the operations. It must be in an hourly format "HH:MM", where HH : [00-23] and MM : [00] GMT. eg: 21:00`, @@ -274,7 +261,6 @@ where HH : [00-23] and MM : [00] GMT. eg: 21:00`, "weekly_schedule": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `Allows specifying a snapshot time for each day of the week.`, MaxItems: 1, Elem: &schema.Resource{ @@ -282,7 +268,6 @@ where HH : [00-23] and MM : [00] GMT. eg: 21:00`, "day_of_weeks": { Type: schema.TypeSet, Required: true, - ForceNew: true, Description: `May contain up to seven (one for each day of the week) snapshot times.`, MinItems: 1, MaxItems: 7, @@ -299,7 +284,6 @@ where HH : [00-23] and MM : [00] GMT. eg: 21:00`, "retention_policy": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `Retention policy applied to snapshots created by this resource policy.`, MaxItems: 1, Elem: &schema.Resource{ @@ -307,13 +291,11 @@ where HH : [00-23] and MM : [00] GMT. eg: 21:00`, "max_retention_days": { Type: schema.TypeInt, Required: true, - ForceNew: true, Description: `Maximum age of the snapshot that is allowed to be kept.`, }, "on_source_disk_delete": { Type: schema.TypeString, Optional: true, - ForceNew: true, ValidateFunc: verify.ValidateEnum([]string{"KEEP_AUTO_SNAPSHOTS", "APPLY_RETENTION_POLICY", ""}), Description: `Specifies the behavior to apply to scheduled snapshots when the source disk is deleted. Default value: "KEEP_AUTO_SNAPSHOTS" Possible values: ["KEEP_AUTO_SNAPSHOTS", "APPLY_RETENTION_POLICY"]`, @@ -325,7 +307,6 @@ the source disk is deleted. Default value: "KEEP_AUTO_SNAPSHOTS" Possible values "snapshot_properties": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `Properties with which the snapshots are created, such as labels.`, MaxItems: 1, Elem: &schema.Resource{ @@ -333,7 +314,6 @@ the source disk is deleted. Default value: "KEEP_AUTO_SNAPSHOTS" Possible values "chain_name": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: `Creates the new snapshot in the snapshot chain labeled with the specified name. The chain name must be 1-63 characters long and comply with RFC1035.`, @@ -341,14 +321,12 @@ with RFC1035.`, "guest_flush": { Type: schema.TypeBool, Optional: true, - ForceNew: true, Description: `Whether to perform a 'guest aware' snapshot.`, AtLeastOneOf: []string{"snapshot_schedule_policy.0.snapshot_properties.0.labels", "snapshot_schedule_policy.0.snapshot_properties.0.storage_locations", "snapshot_schedule_policy.0.snapshot_properties.0.guest_flush"}, }, "labels": { Type: schema.TypeMap, Optional: true, - ForceNew: true, Description: `A set of key-value pairs.`, Elem: &schema.Schema{Type: schema.TypeString}, AtLeastOneOf: []string{"snapshot_schedule_policy.0.snapshot_properties.0.labels", "snapshot_schedule_policy.0.snapshot_properties.0.storage_locations", "snapshot_schedule_policy.0.snapshot_properties.0.guest_flush"}, @@ -356,7 +334,6 @@ with RFC1035.`, "storage_locations": { Type: schema.TypeSet, Optional: true, - ForceNew: true, Description: `Cloud Storage bucket location to store the auto snapshot (regional or multi-regional)`, MaxItems: 1, @@ -394,14 +371,12 @@ func computeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks "day": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), Description: `The day of the week to create the snapshot. e.g. MONDAY Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, }, "start_time": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: `Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT.`, }, @@ -591,6 +566,100 @@ func resourceComputeResourcePolicyRead(d *schema.ResourceData, meta interface{}) return nil } +func resourceComputeResourcePolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ResourcePolicy: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + nameProp, err := expandComputeResourcePolicyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeResourcePolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + snapshotSchedulePolicyProp, err := expandComputeResourcePolicySnapshotSchedulePolicy(d.Get("snapshot_schedule_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("snapshot_schedule_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, snapshotSchedulePolicyProp)) { + obj["snapshotSchedulePolicy"] = snapshotSchedulePolicyProp + } + groupPlacementPolicyProp, err := expandComputeResourcePolicyGroupPlacementPolicy(d.Get("group_placement_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("group_placement_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, groupPlacementPolicyProp)) { + obj["groupPlacementPolicy"] = groupPlacementPolicyProp + } + instanceSchedulePolicyProp, err := expandComputeResourcePolicyInstanceSchedulePolicy(d.Get("instance_schedule_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance_schedule_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, instanceSchedulePolicyProp)) { + obj["instanceSchedulePolicy"] = instanceSchedulePolicyProp + } + diskConsistencyGroupPolicyProp, err := expandComputeResourcePolicyDiskConsistencyGroupPolicy(d.Get("disk_consistency_group_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disk_consistency_group_policy"); ok || !reflect.DeepEqual(v, diskConsistencyGroupPolicyProp) { + obj["diskConsistencyGroupPolicy"] = diskConsistencyGroupPolicyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ResourcePolicy %q: %#v", d.Id(), obj) + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating ResourcePolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ResourcePolicy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating ResourcePolicy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeResourcePolicyRead(d, meta) +} + func resourceComputeResourcePolicyDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router.go index bc0cd9801ee..daf8120f938 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router.go @@ -646,7 +646,7 @@ func flattenComputeRouterBgpAdvertisedIpRanges(v interface{}, d *schema.Resource }) } configData := []map[string]interface{}{} - if v, ok := d.GetOk("advertised_ip_ranges"); ok { + if v, ok := d.GetOk("bgp.0.advertised_ip_ranges"); ok { for _, item := range v.([]interface{}) { configData = append(configData, item.(map[string]interface{})) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat.go index fb758357e47..761d8060e2f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat.go @@ -231,6 +231,7 @@ project-level default tier is used. Possible values: ["PREMIUM", "STANDARD"]`, }, "drain_nat_ips": { Type: schema.TypeSet, + Computed: true, Optional: true, Description: `A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT.`, @@ -279,6 +280,19 @@ Supported values include: Description: `Timeout (in seconds) for ICMP connections. Defaults to 30s if not set.`, Default: 30, }, + "initial_nat_ips": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: `Self-links of NAT IPs to be used as initial value for creation alongside a RouterNatAddress resource. +Conflicts with natIps and drainNatIps. Only valid if natIpAllocateOption is set to MANUAL_ONLY.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + Set: computeRouterNatIPsHash, + ConflictsWith: []string{"nat_ips", "drain_nat_ips"}, + }, "log_config": { Type: schema.TypeList, Optional: true, @@ -322,9 +336,13 @@ Platform, or 'MANUAL_ONLY' for only user-allocated NAT IP addresses. Possible va }, "nat_ips": { Type: schema.TypeSet, + Computed: true, Optional: true, Description: `Self-links of NAT IPs. Only valid if natIpAllocateOption -is set to MANUAL_ONLY.`, +is set to MANUAL_ONLY. +If this field is used alongside with a count created list of address resources 'google_compute_address.foobar.*.self_link', +the access level resource for the address resource must have a 'lifecycle' block with 'create_before_destroy = true' so +the number of resources can be increased/decreased without triggering the 'resourceInUseByAnotherResource' error.`, Elem: &schema.Schema{ Type: schema.TypeString, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, @@ -521,6 +539,12 @@ func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) er } else if v, ok := d.GetOkExists("nat_ip_allocate_option"); !tpgresource.IsEmptyValue(reflect.ValueOf(natIpAllocateOptionProp)) && (ok || !reflect.DeepEqual(v, natIpAllocateOptionProp)) { obj["natIpAllocateOption"] = natIpAllocateOptionProp } + initialNatIpsProp, err := expandNestedComputeRouterNatInitialNatIps(d.Get("initial_nat_ips"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("initial_nat_ips"); ok || !reflect.DeepEqual(v, initialNatIpsProp) { + obj["initialNatIps"] = initialNatIpsProp + } natIpsProp, err := expandNestedComputeRouterNatNatIps(d.Get("nat_ips"), d, config) if err != nil { return err @@ -624,6 +648,11 @@ func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) er obj["autoNetworkTier"] = autoNetworkTierProp } + obj, err = resourceComputeRouterNatEncoder(d, meta, obj) + if err != nil { + return err + } + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") if err != nil { return err @@ -656,6 +685,7 @@ func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) er } headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "PATCH", @@ -926,6 +956,11 @@ func resourceComputeRouterNatUpdate(d *schema.ResourceData, meta interface{}) er obj["autoNetworkTier"] = autoNetworkTierProp } + obj, err = resourceComputeRouterNatEncoder(d, meta, obj) + if err != nil { + return err + } + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") if err != nil { return err @@ -1361,6 +1396,23 @@ func expandNestedComputeRouterNatNatIpAllocateOption(v interface{}, d tpgresourc return v, nil } +func expandNestedComputeRouterNatInitialNatIps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for initial_nat_ips: nil") + } + f, err := tpgresource.ParseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for initial_nat_ips: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + func expandNestedComputeRouterNatNatIps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) @@ -1648,6 +1700,24 @@ func expandNestedComputeRouterNatAutoNetworkTier(v interface{}, d tpgresource.Te return v, nil } +func resourceComputeRouterNatEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // initial_nat_ips uses the same api_name as nat_ips + if tpgresource.IsEmptyValue(reflect.ValueOf(obj["initialNatIps"])) { + return obj, nil + } + + newObj := make(map[string]interface{}) + for key, value := range obj { + newObj[key] = value + } + + newObj["natIps"] = obj["initialNatIps"] + delete(newObj, "initialNatIps") + + log.Printf("[DEBUG] Replacing initialNatIps value \n oldObj: %+v \n newObj: %+v", obj, newObj) + return newObj, nil +} + func flattenNestedComputeRouterNat(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { var v interface{} var ok bool diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat_address.go new file mode 100644 index 00000000000..209cbb6daa1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat_address.go @@ -0,0 +1,814 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func addressResourceNameSetFromSelfLinkSet(v interface{}) *schema.Set { + if v == nil { + return schema.NewSet(schema.HashString, nil) + } + vSet := v.(*schema.Set) + ls := make([]interface{}, 0, vSet.Len()) + for _, v := range vSet.List() { + if v == nil { + continue + } + ls = append(ls, tpgresource.GetResourceNameFromSelfLink(v.(string))) + } + return schema.NewSet(schema.HashString, ls) +} + +// drain_nat_ips MUST be set from (just set) previous values of nat_ips +// so this customizeDiff func makes sure drainNatIps values: +// - aren't set at creation time +// - are in old value of nat_ips but not in new values +func resourceComputeRouterNatAddressDrainNatIpsCustomDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + o, n := diff.GetChange("drain_nat_ips") + oSet := addressResourceNameSetFromSelfLinkSet(o) + nSet := addressResourceNameSetFromSelfLinkSet(n) + addDrainIps := nSet.Difference(oSet) + + // We don't care if there are no new drainNatIps + if addDrainIps.Len() == 0 { + return nil + } + + // Resource hasn't been created yet - return error + if diff.Id() == "" { + return fmt.Errorf("New RouterNat cannot have drain_nat_ips, got values %+v", addDrainIps.List()) + } + // + o, n = diff.GetChange("nat_ips") + oNatSet := addressResourceNameSetFromSelfLinkSet(o) + nNatSet := addressResourceNameSetFromSelfLinkSet(n) + + // Resource is being updated - make sure new drainNatIps were in natIps prior d and no longer are in natIps. + for _, v := range addDrainIps.List() { + if !oNatSet.Contains(v) { + return fmt.Errorf("drain_nat_ip %q was not previously set in nat_ips %+v", v.(string), oNatSet.List()) + } + if nNatSet.Contains(v) { + return fmt.Errorf("drain_nat_ip %q cannot be drained if still set in nat_ips %+v", v.(string), nNatSet.List()) + } + } + return nil +} + +func resourceComputeRouterNatAddressDeleteOnlyNatIps(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + items, err := resourceComputeRouterNatAddressListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceComputeRouterNatAddressFindNestedObjectInList(d, meta, items) + if err != nil { + return nil, err + } + + // Return error if item to update does not exist. + if item == nil { + return nil, fmt.Errorf("Unable to update RouterNatAddress %q - not found in list", d.Id()) + } + + if item["natIps"] != nil { + croppedNatIps := item["natIps"].([]interface{})[:1] + item["natIps"] = croppedNatIps + } + + items[idx] = item + // Return list with new item added + res := map[string]interface{}{ + "nats": items, + } + return res, nil +} + +func ResourceComputeRouterNatAddress() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterNatAddressCreate, + Read: resourceComputeRouterNatAddressRead, + Update: resourceComputeRouterNatAddressUpdate, + Delete: resourceComputeRouterNatAddressDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterNatAddressImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + resourceComputeRouterNatAddressDrainNatIpsCustomDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "nat_ips": { + Type: schema.TypeSet, + Required: true, + Description: `Self-links of NAT IPs to be used in a Nat service. Only valid if the referenced RouterNat +natIpAllocateOption is set to MANUAL_ONLY.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + Set: computeRouterNatIPsHash, + }, + "router": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Cloud Router in which the referenced NAT service is configured.`, + }, + "router_nat": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Nat service in which this address will be configured.`, + }, + "drain_nat_ips": { + Type: schema.TypeSet, + Optional: true, + Description: `A list of URLs of the IP resources to be drained. These IPs must be +valid static external IPs that have been assigned to the NAT.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + // Default schema.HashSchema is used. + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Region where the NAT service reside.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRouterNatAddressCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + // A custom_create function similar to the generated code when using a nested_query, but replaces the encoder with a custom one instead of just injecting it; + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + natIpsProp, err := expandNestedComputeRouterNatAddressNatIps(d.Get("nat_ips"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("nat_ips"); ok || !reflect.DeepEqual(v, natIpsProp) { + obj["natIps"] = natIpsProp + } + drainNatIpsProp, err := expandNestedComputeRouterNatAddressDrainNatIps(d.Get("drain_nat_ips"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("drain_nat_ips"); ok || !reflect.DeepEqual(v, drainNatIpsProp) { + obj["drainNatIps"] = drainNatIpsProp + } + nameProp, err := expandNestedComputeRouterNatAddressRouterNat(d.Get("router_nat"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("router_nat"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + log.Printf("[DEBUG] Creating new RouterNatAddress: %#v", obj) + + obj, err = resourceComputeRouterNatAddressEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterNatAddress: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating RouterNatAddress: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{router}}/{{router_nat}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RouterNatAddress", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RouterNatAddress: %s", err) + } + + log.Printf("[DEBUG] Finished creating RouterNatAddress %q: %#v", d.Id(), res) + + return resourceComputeRouterNatAddressRead(d, meta) +} + +func resourceComputeRouterNatAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterNatAddress: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRouterNatAddress %q", d.Id())) + } + + res, err = flattenNestedComputeRouterNatAddress(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeRouterNatAddress because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RouterNatAddress: %s", err) + } + + if err := d.Set("nat_ips", flattenNestedComputeRouterNatAddressNatIps(res["natIps"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterNatAddress: %s", err) + } + if err := d.Set("drain_nat_ips", flattenNestedComputeRouterNatAddressDrainNatIps(res["drainNatIps"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterNatAddress: %s", err) + } + if err := d.Set("router_nat", flattenNestedComputeRouterNatAddressRouterNat(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterNatAddress: %s", err) + } + + return nil +} + +func resourceComputeRouterNatAddressUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterNatAddress: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + natIpsProp, err := expandNestedComputeRouterNatAddressNatIps(d.Get("nat_ips"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("nat_ips"); ok || !reflect.DeepEqual(v, natIpsProp) { + obj["natIps"] = natIpsProp + } + drainNatIpsProp, err := expandNestedComputeRouterNatAddressDrainNatIps(d.Get("drain_nat_ips"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("drain_nat_ips"); ok || !reflect.DeepEqual(v, drainNatIpsProp) { + obj["drainNatIps"] = drainNatIpsProp + } + + obj, err = resourceComputeRouterNatAddressUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating RouterNatAddress %q: %#v", d.Id(), obj) + headers := make(http.Header) + + obj, err = resourceComputeRouterNatAddressPatchUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating RouterNatAddress %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RouterNatAddress %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating RouterNatAddress", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeRouterNatAddressRead(d, meta) +} + +func resourceComputeRouterNatAddressDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterNatAddress: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceComputeRouterNatAddressPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RouterNatAddress") + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + // Since RouterNatAddress reopresents only the natIps field, we must make sure we only remove this value and not the entire nat + obj, err = resourceComputeRouterNatAddressDeleteOnlyNatIps(d, meta, obj) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting RouterNatAddress %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RouterNatAddress") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RouterNatAddress", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting RouterNatAddress %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRouterNatAddressImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/regions/(?P[^/]+)/routers/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{router}}/{{router_nat}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputeRouterNatAddressNatIps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenNestedComputeRouterNatAddressDrainNatIps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenNestedComputeRouterNatAddressRouterNat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandNestedComputeRouterNatAddressNatIps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for nat_ips: nil") + } + f, err := tpgresource.ParseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for nat_ips: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandNestedComputeRouterNatAddressDrainNatIps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for drain_nat_ips: nil") + } + f, err := tpgresource.ParseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for drain_nat_ips: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandNestedComputeRouterNatAddressRouterNat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeRouterNatAddressEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceComputeRouterNatAddressListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, found, err := resourceComputeRouterNatAddressFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Merge new with existing item if item was already created (with the router nat resource). + if found != nil { + // Merge new object into old. + for k, v := range obj { + found[k] = v + } + currItems[idx] = found + + // Return list with new item added + resPatch := map[string]interface{}{ + "nats": currItems, + } + + return resPatch, nil + } + + // Prevent creating a RouterNatAddress if no RouterNat has been found + log.Printf("[WARNING] No RouterNat resource %+v found, preventing RouterNatAddress creation", obj) + res := map[string]interface{}{ + "nats": nil, + } + + return res, nil +} + +func resourceComputeRouterNatAddressUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Since we only want to change the handling of the CREATE function, this encoder just returns the unchanged obj value + return obj, nil +} + +func flattenNestedComputeRouterNatAddress(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["nats"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value nats. Actual value: %v", v) + } + + _, item, err := resourceComputeRouterNatAddressFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeRouterNatAddressFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedRouterNat, err := expandNestedComputeRouterNatAddressRouterNat(d.Get("router_nat"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedRouterNat := flattenNestedComputeRouterNatAddressRouterNat(expectedRouterNat, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemRouterNat := flattenNestedComputeRouterNatAddressRouterNat(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemRouterNat)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedRouterNat))) && !reflect.DeepEqual(itemRouterNat, expectedFlattenedRouterNat) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemRouterNat, expectedFlattenedRouterNat) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceComputeRouterNatAddressPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceComputeRouterNatAddressListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceComputeRouterNatAddressFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create RouterNatAddress, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "nats": append(currItems, obj), + } + + return res, nil +} + +// PatchUpdateEncoder handles creating request data to PATCH parent resource +// with list including updated object. +func resourceComputeRouterNatAddressPatchUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + items, err := resourceComputeRouterNatAddressListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceComputeRouterNatAddressFindNestedObjectInList(d, meta, items) + if err != nil { + return nil, err + } + + // Return error if item to update does not exist. + if item == nil { + return nil, fmt.Errorf("Unable to update RouterNatAddress %q - not found in list", d.Id()) + } + + // Merge new object into old. + for k, v := range obj { + item[k] = v + } + items[idx] = item + + // Return list with new item added + res := map[string]interface{}{ + "nats": items, + } + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceComputeRouterNatAddressPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceComputeRouterNatAddressListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceComputeRouterNatAddressFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "ComputeRouterNatAddress") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "nats": updatedItems, + } + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceComputeRouterNatAddressListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return nil, err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + + v, ok = res["nats"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "nats"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_peer.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_peer.go index 42ef6ade516..bba2d95f0ca 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_peer.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_peer.go @@ -144,6 +144,30 @@ CIDR-formatted string.`, Where there is more than one matching route of maximum length, the routes with the lowest priority value win.`, }, + "custom_learned_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `The custom learned route IP address range. Must be a valid CIDR-formatted prefix. If an +IP address is provided without a subnet mask, it is interpreted as, for IPv4, a /32 singular IP address range, and, for IPv6, /128.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "range": { + Type: schema.TypeString, + Required: true, + Description: `The IP range to advertise. The value must be a +CIDR-formatted string.`, + }, + }, + }, + }, + "custom_learned_route_priority": { + Type: schema.TypeInt, + Optional: true, + Description: `The user-defined custom learned route priority for a BGP session. +This value is applied to all custom learned route ranges for the session. You can choose a value +from 0 to 65335. If you don't provide a value, Google Cloud assigns a priority of 100 to the ranges.`, + }, + "bfd": { Type: schema.TypeList, Computed: true, @@ -371,7 +395,7 @@ func resourceComputeRouterBgpPeerCreate(d *schema.ResourceData, meta interface{} advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + } else if v, ok := d.GetOk("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { obj["advertisedRoutePriority"] = advertisedRoutePriorityProp } advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) @@ -392,6 +416,18 @@ func resourceComputeRouterBgpPeerCreate(d *schema.ResourceData, meta interface{} } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !reflect.DeepEqual(v, advertisedIpRangesProp) { obj["advertisedIpRanges"] = advertisedIpRangesProp } + customLearnedIpRangesProp, err := expandNestedComputeRouterBgpPeerCustomLearnedIpRanges(d.Get("custom_learned_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_learned_ip_ranges"); ok || !reflect.DeepEqual(v, customLearnedIpRangesProp) { + obj["customLearnedIpRanges"] = customLearnedIpRangesProp + } + customLearnedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerCustomLearnedRoutePriority(d.Get("custom_learned_route_priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_learned_route_priority"); ok || !reflect.DeepEqual(v, customLearnedRoutePriorityProp) { + obj["customLearnedRoutePriority"] = customLearnedRoutePriorityProp + } bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) if err != nil { return err @@ -610,6 +646,12 @@ func resourceComputeRouterBgpPeerRead(d *schema.ResourceData, meta interface{}) if err := d.Set("advertised_ip_ranges", flattenNestedComputeRouterBgpPeerAdvertisedIpRanges(res["advertisedIpRanges"], d, config)); err != nil { return fmt.Errorf("Error reading RouterBgpPeer: %s", err) } + if err := d.Set("custom_learned_ip_ranges", flattenNestedComputeRouterBgpPeerCustomLearnedIpRanges(res["customLearnedIpRanges"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("custom_learned_route_priority", flattenNestedComputeRouterBgpPeerCustomLearnedRoutePriority(res["customLearnedRoutePriority"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } if err := d.Set("management_type", flattenNestedComputeRouterBgpPeerManagementType(res["managementType"], d, config)); err != nil { return fmt.Errorf("Error reading RouterBgpPeer: %s", err) } @@ -684,7 +726,7 @@ func resourceComputeRouterBgpPeerUpdate(d *schema.ResourceData, meta interface{} advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + } else if v, ok := d.GetOk("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { obj["advertisedRoutePriority"] = advertisedRoutePriorityProp } advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) @@ -705,6 +747,18 @@ func resourceComputeRouterBgpPeerUpdate(d *schema.ResourceData, meta interface{} } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !reflect.DeepEqual(v, advertisedIpRangesProp) { obj["advertisedIpRanges"] = advertisedIpRangesProp } + customLearnedIpRangesProp, err := expandNestedComputeRouterBgpPeerCustomLearnedIpRanges(d.Get("custom_learned_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_learned_ip_ranges"); ok || !reflect.DeepEqual(v, customLearnedIpRangesProp) { + obj["customLearnedIpRanges"] = customLearnedIpRangesProp + } + customLearnedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerCustomLearnedRoutePriority(d.Get("custom_learned_route_priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_learned_route_priority"); ok || !reflect.DeepEqual(v, customLearnedRoutePriorityProp) { + obj["customLearnedRoutePriority"] = customLearnedRoutePriorityProp + } bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) if err != nil { return err @@ -1003,7 +1057,44 @@ func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d * func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } +func flattenNestedComputeRouterBgpPeerCustomLearnedIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "range": flattenNestedComputeRouterBgpPeerCustomLearnedIpRangesRange(original["range"], d, config), + }) + } + return transformed +} +func flattenNestedComputeRouterBgpPeerCustomLearnedIpRangesRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} +func flattenNestedComputeRouterBgpPeerCustomLearnedRoutePriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} func flattenNestedComputeRouterBgpPeerManagementType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1208,7 +1299,35 @@ func expandNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d tp func expandNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } +func expandNestedComputeRouterBgpPeerCustomLearnedIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRange, err := expandNestedComputeRouterBgpPeerCustomLearnedIpRangesRange(original["range"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["range"] = transformedRange + } + + req = append(req, transformed) + } + return req, nil +} +func expandNestedComputeRouterBgpPeerCustomLearnedIpRangesRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerCustomLearnedRoutePriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} func expandNestedComputeRouterBgpPeerBfd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_security_policy_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_security_policy_rule.go index 80aff5dde81..aed31128637 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_security_policy_rule.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_security_policy_rule.go @@ -90,6 +90,35 @@ Rules are evaluated from highest to lowest priority where 0 is the highest prior Optional: true, Description: `An optional description of this resource. Provide this property when you create the resource.`, }, + "header_action": { + Type: schema.TypeList, + Optional: true, + Description: `Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "request_headers_to_adds": { + Type: schema.TypeList, + Optional: true, + Description: `The list of request headers to add or overwrite if they're already present.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_name": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the header to set.`, + }, + "header_value": { + Type: schema.TypeString, + Optional: true, + Description: `The value to set the named header to.`, + }, + }, + }, + }, + }, + }, + }, "match": { Type: schema.TypeList, Optional: true, @@ -473,6 +502,26 @@ Valid options are deny(STATUS), where valid values for STATUS are 403, 404, 429, }, }, }, + "redirect_options": { + Type: schema.TypeList, + Optional: true, + Description: `Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target": { + Type: schema.TypeString, + Optional: true, + Description: `Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: `Type of the redirect action.`, + }, + }, + }, + }, "project": { Type: schema.TypeString, Optional: true, @@ -528,6 +577,18 @@ func resourceComputeSecurityPolicyRuleCreate(d *schema.ResourceData, meta interf } else if v, ok := d.GetOkExists("rate_limit_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(rateLimitOptionsProp)) && (ok || !reflect.DeepEqual(v, rateLimitOptionsProp)) { obj["rateLimitOptions"] = rateLimitOptionsProp } + redirectOptionsProp, err := expandComputeSecurityPolicyRuleRedirectOptions(d.Get("redirect_options"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("redirect_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(redirectOptionsProp)) && (ok || !reflect.DeepEqual(v, redirectOptionsProp)) { + obj["redirectOptions"] = redirectOptionsProp + } + headerActionProp, err := expandComputeSecurityPolicyRuleHeaderAction(d.Get("header_action"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("header_action"); !tpgresource.IsEmptyValue(reflect.ValueOf(headerActionProp)) && (ok || !reflect.DeepEqual(v, headerActionProp)) { + obj["headerAction"] = headerActionProp + } previewProp, err := expandComputeSecurityPolicyRulePreview(d.Get("preview"), d, config) if err != nil { return err @@ -555,6 +616,17 @@ func resourceComputeSecurityPolicyRuleCreate(d *schema.ResourceData, meta interf } headers := make(http.Header) + // We can't Create a default rule since one is automatically created with the policy + rulePriority, ok := d.GetOk("priority") + + if ok && rulePriority.(int) == 2147483647 { + log.Printf("[WARN] SecurityPolicyRule represents a default rule, will attempt an Update instead") + newUrl, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/securityPolicies/{{security_policy}}/patchRule?priority={{priority}}") + if err != nil { + return err + } + url = newUrl + } res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "POST", @@ -651,6 +723,12 @@ func resourceComputeSecurityPolicyRuleRead(d *schema.ResourceData, meta interfac if err := d.Set("rate_limit_options", flattenComputeSecurityPolicyRuleRateLimitOptions(res["rateLimitOptions"], d, config)); err != nil { return fmt.Errorf("Error reading SecurityPolicyRule: %s", err) } + if err := d.Set("redirect_options", flattenComputeSecurityPolicyRuleRedirectOptions(res["redirectOptions"], d, config)); err != nil { + return fmt.Errorf("Error reading SecurityPolicyRule: %s", err) + } + if err := d.Set("header_action", flattenComputeSecurityPolicyRuleHeaderAction(res["headerAction"], d, config)); err != nil { + return fmt.Errorf("Error reading SecurityPolicyRule: %s", err) + } if err := d.Set("preview", flattenComputeSecurityPolicyRulePreview(res["preview"], d, config)); err != nil { return fmt.Errorf("Error reading SecurityPolicyRule: %s", err) } @@ -710,6 +788,18 @@ func resourceComputeSecurityPolicyRuleUpdate(d *schema.ResourceData, meta interf } else if v, ok := d.GetOkExists("rate_limit_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rateLimitOptionsProp)) { obj["rateLimitOptions"] = rateLimitOptionsProp } + redirectOptionsProp, err := expandComputeSecurityPolicyRuleRedirectOptions(d.Get("redirect_options"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("redirect_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, redirectOptionsProp)) { + obj["redirectOptions"] = redirectOptionsProp + } + headerActionProp, err := expandComputeSecurityPolicyRuleHeaderAction(d.Get("header_action"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("header_action"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, headerActionProp)) { + obj["headerAction"] = headerActionProp + } previewProp, err := expandComputeSecurityPolicyRulePreview(d.Get("preview"), d, config) if err != nil { return err @@ -758,6 +848,14 @@ func resourceComputeSecurityPolicyRuleUpdate(d *schema.ResourceData, meta interf "rateLimitOptions.banDurationSec") } + if d.HasChange("redirect_options") { + updateMask = append(updateMask, "redirectOptions") + } + + if d.HasChange("header_action") { + updateMask = append(updateMask, "headerAction") + } + if d.HasChange("preview") { updateMask = append(updateMask, "preview") } @@ -832,6 +930,13 @@ func resourceComputeSecurityPolicyRuleDelete(d *schema.ResourceData, meta interf } headers := make(http.Header) + // The default rule of a Security Policy cannot be removed + rulePriority, ok := d.GetOk("priority") + + if ok && rulePriority.(int) == 2147483647 { + log.Printf("[WARN] SecurityPolicyRule represents a default rule, skipping Delete request") + return nil + } log.Printf("[DEBUG] Deleting SecurityPolicyRule %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ @@ -1360,6 +1465,69 @@ func flattenComputeSecurityPolicyRuleRateLimitOptionsBanDurationSec(v interface{ return v // let terraform core handle it otherwise } +func flattenComputeSecurityPolicyRuleRedirectOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenComputeSecurityPolicyRuleRedirectOptionsType(original["type"], d, config) + transformed["target"] = + flattenComputeSecurityPolicyRuleRedirectOptionsTarget(original["target"], d, config) + return []interface{}{transformed} +} +func flattenComputeSecurityPolicyRuleRedirectOptionsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSecurityPolicyRuleRedirectOptionsTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSecurityPolicyRuleHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["request_headers_to_adds"] = + flattenComputeSecurityPolicyRuleHeaderActionRequestHeadersToAdds(original["requestHeadersToAdds"], d, config) + return []interface{}{transformed} +} +func flattenComputeSecurityPolicyRuleHeaderActionRequestHeadersToAdds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "header_name": flattenComputeSecurityPolicyRuleHeaderActionRequestHeadersToAddsHeaderName(original["headerName"], d, config), + "header_value": flattenComputeSecurityPolicyRuleHeaderActionRequestHeadersToAddsHeaderValue(original["headerValue"], d, config), + }) + } + return transformed +} +func flattenComputeSecurityPolicyRuleHeaderActionRequestHeadersToAddsHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSecurityPolicyRuleHeaderActionRequestHeadersToAddsHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeSecurityPolicyRulePreview(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1985,6 +2153,96 @@ func expandComputeSecurityPolicyRuleRateLimitOptionsBanDurationSec(v interface{} return v, nil } +func expandComputeSecurityPolicyRuleRedirectOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandComputeSecurityPolicyRuleRedirectOptionsType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedTarget, err := expandComputeSecurityPolicyRuleRedirectOptionsTarget(original["target"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["target"] = transformedTarget + } + + return transformed, nil +} + +func expandComputeSecurityPolicyRuleRedirectOptionsType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSecurityPolicyRuleRedirectOptionsTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSecurityPolicyRuleHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRequestHeadersToAdds, err := expandComputeSecurityPolicyRuleHeaderActionRequestHeadersToAdds(original["request_headers_to_adds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequestHeadersToAdds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requestHeadersToAdds"] = transformedRequestHeadersToAdds + } + + return transformed, nil +} + +func expandComputeSecurityPolicyRuleHeaderActionRequestHeadersToAdds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHeaderName, err := expandComputeSecurityPolicyRuleHeaderActionRequestHeadersToAddsHeaderName(original["header_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["headerName"] = transformedHeaderName + } + + transformedHeaderValue, err := expandComputeSecurityPolicyRuleHeaderActionRequestHeadersToAddsHeaderValue(original["header_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["headerValue"] = transformedHeaderValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeSecurityPolicyRuleHeaderActionRequestHeadersToAddsHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSecurityPolicyRuleHeaderActionRequestHeadersToAddsHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeSecurityPolicyRulePreview(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_service_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_service_attachment.go index 80b164f1f73..867defae0a1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_service_attachment.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_service_attachment.go @@ -129,8 +129,7 @@ except the last character, which cannot be a dash.`, Required: true, ForceNew: true, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: `The URL of a forwarding rule that represents the service identified by -this service attachment.`, + Description: `The URL of a service serving the endpoint identified by this service attachment.`, }, "consumer_accept_lists": { Type: schema.TypeSet, @@ -166,6 +165,18 @@ supported is 1.`, Type: schema.TypeString, }, }, + "propagated_connection_limit": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center. +This limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer. + +If the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list. +If the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint. + +If unspecified, the default propagated connection limit is 250.`, + }, "reconcile_connections": { Type: schema.TypeBool, Computed: true, @@ -190,11 +201,26 @@ If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. Fo attachment.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "consumer_network": { + Type: schema.TypeString, + Computed: true, + Description: `The url of the consumer network.`, + }, "endpoint": { Type: schema.TypeString, Computed: true, Description: `The URL of the consumer forwarding rule.`, }, + "propagated_connection_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The number of consumer Network Connectivity Center spokes that the connected Private Service Connect endpoint has propagated to.`, + }, + "psc_connection_id": { + Type: schema.TypeString, + Computed: true, + Description: `The PSC connection id of the connected endpoint.`, + }, "status": { Type: schema.TypeString, Computed: true, @@ -325,6 +351,12 @@ func resourceComputeServiceAttachmentCreate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("reconcile_connections"); ok || !reflect.DeepEqual(v, reconcileConnectionsProp) { obj["reconcileConnections"] = reconcileConnectionsProp } + propagatedConnectionLimitProp, err := expandComputeServiceAttachmentPropagatedConnectionLimit(d.Get("propagated_connection_limit"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("propagated_connection_limit"); !tpgresource.IsEmptyValue(reflect.ValueOf(propagatedConnectionLimitProp)) && (ok || !reflect.DeepEqual(v, propagatedConnectionLimitProp)) { + obj["propagatedConnectionLimit"] = propagatedConnectionLimitProp + } regionProp, err := expandComputeServiceAttachmentRegion(d.Get("region"), d, config) if err != nil { return err @@ -474,6 +506,9 @@ func resourceComputeServiceAttachmentRead(d *schema.ResourceData, meta interface if err := d.Set("reconcile_connections", flattenComputeServiceAttachmentReconcileConnections(res["reconcileConnections"], d, config)); err != nil { return fmt.Errorf("Error reading ServiceAttachment: %s", err) } + if err := d.Set("propagated_connection_limit", flattenComputeServiceAttachmentPropagatedConnectionLimit(res["propagatedConnectionLimit"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading ServiceAttachment: %s", err) } @@ -545,6 +580,12 @@ func resourceComputeServiceAttachmentUpdate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("reconcile_connections"); ok || !reflect.DeepEqual(v, reconcileConnectionsProp) { obj["reconcileConnections"] = reconcileConnectionsProp } + propagatedConnectionLimitProp, err := expandComputeServiceAttachmentPropagatedConnectionLimit(d.Get("propagated_connection_limit"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("propagated_connection_limit"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, propagatedConnectionLimitProp)) { + obj["propagatedConnectionLimit"] = propagatedConnectionLimitProp + } obj, err = resourceComputeServiceAttachmentUpdateEncoder(d, meta, obj) if err != nil { @@ -698,8 +739,11 @@ func flattenComputeServiceAttachmentConnectedEndpoints(v interface{}, d *schema. continue } transformed = append(transformed, map[string]interface{}{ - "endpoint": flattenComputeServiceAttachmentConnectedEndpointsEndpoint(original["endpoint"], d, config), - "status": flattenComputeServiceAttachmentConnectedEndpointsStatus(original["status"], d, config), + "endpoint": flattenComputeServiceAttachmentConnectedEndpointsEndpoint(original["endpoint"], d, config), + "status": flattenComputeServiceAttachmentConnectedEndpointsStatus(original["status"], d, config), + "consumer_network": flattenComputeServiceAttachmentConnectedEndpointsConsumerNetwork(original["consumerNetwork"], d, config), + "psc_connection_id": flattenComputeServiceAttachmentConnectedEndpointsPscConnectionId(original["pscConnectionId"], d, config), + "propagated_connection_count": flattenComputeServiceAttachmentConnectedEndpointsPropagatedConnectionCount(original["propagatedConnectionCount"], d, config), }) } return transformed @@ -712,11 +756,33 @@ func flattenComputeServiceAttachmentConnectedEndpointsStatus(v interface{}, d *s return v } -func flattenComputeServiceAttachmentTargetService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return v +func flattenComputeServiceAttachmentConnectedEndpointsConsumerNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentConnectedEndpointsPscConnectionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentConnectedEndpointsPropagatedConnectionCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } } - return tpgresource.ConvertSelfLinkToV1(v.(string)) + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeServiceAttachmentTargetService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } func flattenComputeServiceAttachmentNatSubnets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -787,6 +853,23 @@ func flattenComputeServiceAttachmentReconcileConnections(v interface{}, d *schem return v } +func flattenComputeServiceAttachmentPropagatedConnectionLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + func expandComputeServiceAttachmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -804,11 +887,7 @@ func expandComputeServiceAttachmentConnectionPreference(v interface{}, d tpgreso } func expandComputeServiceAttachmentTargetService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - f, err := tpgresource.ParseRegionalFieldValue("forwardingRules", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for target_service: %s", err) - } - return f.RelativeLink(), nil + return v, nil } func expandComputeServiceAttachmentNatSubnets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { @@ -892,6 +971,10 @@ func expandComputeServiceAttachmentReconcileConnections(v interface{}, d tpgreso return v, nil } +func expandComputeServiceAttachmentPropagatedConnectionLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeServiceAttachmentRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) if err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_certificate.go index a8bde8eb789..24e9b790b94 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_certificate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_certificate.go @@ -116,11 +116,11 @@ These are in the same namespace as the managed SSL certificates.`, Description: "Creates a unique name beginning with the specified prefix. Conflicts with name.", ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource - // uuid is 26 characters, limit the prefix to 37. + // uuid is 9 characters, limit the prefix to 54. value := v.(string) - if len(value) > 37 { + if len(value) > 54 { errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) + "%q cannot be longer than 54 characters, name is limited to 63", k)) } return }, @@ -422,7 +422,12 @@ func expandComputeSslCertificateName(v interface{}, d tpgresource.TerraformResou if v, ok := d.GetOk("name"); ok { certName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - certName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + certName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + certName = id.PrefixedUniqueId(prefix) + } } else { certName = id.UniqueId() } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_storage_pool_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_storage_pool_sweeper.go new file mode 100644 index 00000000000..e759b3d628f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_storage_pool_sweeper.go @@ -0,0 +1,98 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// This will sweep GCE Storage Pool resources +func init() { + sweeper.AddTestSweepers("ComputeStoragePool", testSweepStoragePool) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepStoragePool(region string) error { + resourceName := "ComputeStoragePool" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + zones := []string{"us-central1-a", "us-central1-b", "us-central1-c", "us-central1-f", "us-east1-b", "us-east1-c", "us-east1-d", "us-west1-a", "us-west1-b", "us-west1-c"} + for _, zone := range zones { + servicesUrl := "https://compute.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/storagePools" + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: servicesUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", servicesUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Count items that weren't sweeped. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["id"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource id was nil", resourceName) + return nil + } + + id := obj["name"].(string) + // Increment count and skip if resource is not sweepable. + if !sweeper.IsSweepableTestResource(id) { + nonPrefixCount++ + continue + } + + deleteUrl := servicesUrl + "/" + id + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, id) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items without tf-test prefix remain for zone %s", nonPrefixCount, zone) + } + + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_subnetwork.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_subnetwork.go index 71f30340b0c..2fa96397b87 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_subnetwork.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_subnetwork.go @@ -55,6 +55,37 @@ func IsShrinkageIpCidr(_ context.Context, old, new, _ interface{}) bool { return true } +func sendSecondaryIpRangeIfEmptyDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + // on create, return immediately as we don't need to determine if the value is empty or not + if diff.Id() == "" { + return nil + } + + sendZero := diff.Get("send_secondary_ip_range_if_empty").(bool) + if !sendZero { + return nil + } + + configSecondaryIpRange := diff.GetRawConfig().GetAttr("secondary_ip_range") + if !configSecondaryIpRange.IsKnown() { + return nil + } + configValueIsEmpty := configSecondaryIpRange.IsNull() || configSecondaryIpRange.LengthInt() == 0 + + stateSecondaryIpRange := diff.GetRawState().GetAttr("secondary_ip_range") + if !stateSecondaryIpRange.IsKnown() { + return nil + } + stateValueIsEmpty := stateSecondaryIpRange.IsNull() || stateSecondaryIpRange.LengthInt() == 0 + + if configValueIsEmpty && !stateValueIsEmpty { + log.Printf("[DEBUG] setting secondary_ip_range to newly empty") + diff.SetNew("secondary_ip_range", make([]interface{}, 0)) + } + + return nil +} + func ResourceComputeSubnetwork() *schema.Resource { return &schema.Resource{ Create: resourceComputeSubnetworkCreate, @@ -75,19 +106,11 @@ func ResourceComputeSubnetwork() *schema.Resource { CustomizeDiff: customdiff.All( resourceComputeSubnetworkSecondaryIpRangeSetStyleDiff, customdiff.ForceNewIfChange("ip_cidr_range", IsShrinkageIpCidr), + sendSecondaryIpRangeIfEmptyDiff, tpgresource.DefaultProviderProject, ), Schema: map[string]*schema.Schema{ - "ip_cidr_range": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidateIpCidrRange, - Description: `The range of internal addresses that are owned by this subnetwork. -Provide this property when you create the subnetwork. For example, -10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and -non-overlapping within a network. Only IPv4 is supported.`, - }, "name": { Type: schema.TypeString, Required: true, @@ -124,6 +147,17 @@ creation time.`, ForceNew: true, Description: `The range of external IPv6 addresses that are owned by this subnetwork.`, }, + "ip_cidr_range": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateIpCidrRange, + Description: `The range of internal addresses that are owned by this subnetwork. +Provide this property when you create the subnetwork. For example, +10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and +non-overlapping within a network. Only IPv4 is supported. +Field is optional when 'reserved_internal_range' is defined, otherwise required.`, + }, "ipv6_access_type": { Type: schema.TypeString, Optional: true, @@ -214,13 +248,13 @@ access Google APIs and services by using Private Google Access.`, Computed: true, Optional: true, ForceNew: true, - Description: `The purpose of the resource. This field can be either 'PRIVATE_RFC_1918', 'REGIONAL_MANAGED_PROXY', 'GLOBAL_MANAGED_PROXY', 'PRIVATE_SERVICE_CONNECT' or 'PRIVATE_NAT'([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)). + Description: `The purpose of the resource. This field can be either 'PRIVATE', 'REGIONAL_MANAGED_PROXY', 'GLOBAL_MANAGED_PROXY', 'PRIVATE_SERVICE_CONNECT' or 'PRIVATE_NAT'([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)). A subnet with purpose set to 'REGIONAL_MANAGED_PROXY' is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnetwork in a given region with purpose set to 'GLOBAL_MANAGED_PROXY' is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. A subnetwork with purpose set to 'PRIVATE_SERVICE_CONNECT' reserves the subnet for hosting a Private Service Connect published service. A subnetwork with purpose set to 'PRIVATE_NAT' is used as source range for Private NAT gateways. Note that 'REGIONAL_MANAGED_PROXY' is the preferred setting for all regional Envoy load balancers. -If unspecified, the purpose defaults to 'PRIVATE_RFC_1918'.`, +If unspecified, the purpose defaults to 'PRIVATE'.`, }, "region": { Type: schema.TypeString, @@ -230,6 +264,14 @@ If unspecified, the purpose defaults to 'PRIVATE_RFC_1918'.`, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The GCP region for this subnetwork.`, }, + "reserved_internal_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The ID of the reserved internal range. Must be prefixed with 'networkconnectivity.googleapis.com' +E.g. 'networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId}'`, + }, "role": { Type: schema.TypeString, Optional: true, @@ -241,31 +283,19 @@ An 'ACTIVE' subnetwork is one that is currently being used for Envoy-based load A 'BACKUP' subnetwork is one that is ready to be promoted to 'ACTIVE' or is currently draining. Possible values: ["ACTIVE", "BACKUP"]`, }, "secondary_ip_range": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ConfigMode: schema.SchemaConfigModeAttr, + Type: schema.TypeList, + Computed: true, + Optional: true, Description: `An array of configurations for secondary IP ranges for VM instances contained in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange of the subnetwork. The alias IPs may belong to either primary or secondary ranges. **Note**: This field uses [attr-as-block mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html) to avoid -breaking users during the 0.12 upgrade. To explicitly send a list -of zero objects you must use the following syntax: -'example=[]' -For more details about this behavior, see [this section](https://www.terraform.io/docs/configuration/attr-as-blocks.html#defining-a-fixed-object-collection-value).`, +breaking users during the 0.12 upgrade. To explicitly send a list of zero objects, +set 'send_secondary_ip_range_if_empty = true'`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "ip_cidr_range": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidateIpCidrRange, - Description: `The range of IP addresses belonging to this subnetwork secondary -range. Provide this property when you create the subnetwork. -Ranges must be unique and non-overlapping with all primary and -secondary IP ranges within a network. Only IPv4 is supported.`, - }, "range_name": { Type: schema.TypeString, Required: true, @@ -275,6 +305,24 @@ when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork.`, }, + "ip_cidr_range": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateIpCidrRange, + Description: `The range of IP addresses belonging to this subnetwork secondary +range. Provide this property when you create the subnetwork. +Ranges must be unique and non-overlapping with all primary and +secondary IP ranges within a network. Only IPv4 is supported. +Field is optional when 'reserved_internal_range' is defined, otherwise required.`, + }, + "reserved_internal_range": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The ID of the reserved internal range. Must be prefixed with 'networkconnectivity.googleapis.com' +E.g. 'networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId}'`, + }, }, }, }, @@ -307,6 +355,16 @@ outside this subnetwork.`, Computed: true, Description: `The range of internal IPv6 addresses that are owned by this subnetwork.`, }, + "send_secondary_ip_range_if_empty": { + Type: schema.TypeBool, + Optional: true, + Description: `Controls the removal behavior of secondary_ip_range. +When false, removing secondary_ip_range from config will not produce a diff as +the provider will default to the API's value. +When true, the provider will treat removing secondary_ip_range as sending an +empty list of secondary IP ranges to the API. +Defaults to false.`, + }, "fingerprint": { Type: schema.TypeString, Computed: true, @@ -345,8 +403,8 @@ func resourceComputeSubnetworkSecondaryIpRangeSetStyleDiff(_ context.Context, di if count < 1 { return nil } - old := make([]interface{}, count) - new := make([]interface{}, count) + old := make([]interface{}, 0, count) + new := make([]interface{}, 0, count) for i := 0; i < count; i++ { o, n := diff.GetChange(fmt.Sprintf("secondary_ip_range.%d", i)) @@ -390,6 +448,12 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("ip_cidr_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipCidrRangeProp)) && (ok || !reflect.DeepEqual(v, ipCidrRangeProp)) { obj["ipCidrRange"] = ipCidrRangeProp } + reservedInternalRangeProp, err := expandComputeSubnetworkReservedInternalRange(d.Get("reserved_internal_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reserved_internal_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(reservedInternalRangeProp)) && (ok || !reflect.DeepEqual(v, reservedInternalRangeProp)) { + obj["reservedInternalRange"] = reservedInternalRangeProp + } nameProp, err := expandComputeSubnetworkName(d.Get("name"), d, config) if err != nil { return err @@ -557,6 +621,7 @@ func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) err return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) } + // Explicitly set virtual fields to default values if unset if err := d.Set("project", project); err != nil { return fmt.Errorf("Error reading Subnetwork: %s", err) } @@ -573,6 +638,9 @@ func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("ip_cidr_range", flattenComputeSubnetworkIpCidrRange(res["ipCidrRange"], d, config)); err != nil { return fmt.Errorf("Error reading Subnetwork: %s", err) } + if err := d.Set("reserved_internal_range", flattenComputeSubnetworkReservedInternalRange(res["reservedInternalRange"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } if err := d.Set("name", flattenComputeSubnetworkName(res["name"], d, config)); err != nil { return fmt.Errorf("Error reading Subnetwork: %s", err) } @@ -1016,6 +1084,78 @@ func resourceComputeSubnetworkUpdate(d *schema.ResourceData, meta interface{}) e d.Partial(false) + if v, ok := d.GetOk("send_secondary_ip_range_if_empty"); ok && v.(bool) { + if sv, ok := d.GetOk("secondary_ip_range"); ok { + configValue := d.GetRawConfig().GetAttr("secondary_ip_range") + stateValue := sv.([]interface{}) + if configValue.LengthInt() == 0 && len(stateValue) != 0 { + log.Printf("[DEBUG] Sending empty secondary_ip_range in update") + obj := make(map[string]interface{}) + obj["secondaryIpRanges"] = make([]interface{}, 0) + + // The rest is the same as the secondary_ip_range generated update code + // without the secondaryIpRangesProp logic + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Subnetwork", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + } + } return resourceComputeSubnetworkRead(d, meta) } @@ -1093,6 +1233,8 @@ func resourceComputeSubnetworkImport(d *schema.ResourceData, meta interface{}) ( } d.SetId(id) + // Explicitly set virtual fields to default values on import + return []*schema.ResourceData{d}, nil } @@ -1112,6 +1254,13 @@ func flattenComputeSubnetworkIpCidrRange(v interface{}, d *schema.ResourceData, return v } +func flattenComputeSubnetworkReservedInternalRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + func flattenComputeSubnetworkName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1144,8 +1293,9 @@ func flattenComputeSubnetworkSecondaryIpRange(v interface{}, d *schema.ResourceD continue } transformed = append(transformed, map[string]interface{}{ - "range_name": flattenComputeSubnetworkSecondaryIpRangeRangeName(original["rangeName"], d, config), - "ip_cidr_range": flattenComputeSubnetworkSecondaryIpRangeIpCidrRange(original["ipCidrRange"], d, config), + "range_name": flattenComputeSubnetworkSecondaryIpRangeRangeName(original["rangeName"], d, config), + "ip_cidr_range": flattenComputeSubnetworkSecondaryIpRangeIpCidrRange(original["ipCidrRange"], d, config), + "reserved_internal_range": flattenComputeSubnetworkSecondaryIpRangeReservedInternalRange(original["reservedInternalRange"], d, config), }) } return transformed @@ -1158,6 +1308,13 @@ func flattenComputeSubnetworkSecondaryIpRangeIpCidrRange(v interface{}, d *schem return v } +func flattenComputeSubnetworkSecondaryIpRangeReservedInternalRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + func flattenComputeSubnetworkPrivateIpGoogleAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1234,6 +1391,10 @@ func expandComputeSubnetworkIpCidrRange(v interface{}, d tpgresource.TerraformRe return v, nil } +func expandComputeSubnetworkReservedInternalRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeSubnetworkName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -1278,6 +1439,13 @@ func expandComputeSubnetworkSecondaryIpRange(v interface{}, d tpgresource.Terraf transformed["ipCidrRange"] = transformedIpCidrRange } + transformedReservedInternalRange, err := expandComputeSubnetworkSecondaryIpRangeReservedInternalRange(original["reserved_internal_range"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReservedInternalRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["reservedInternalRange"] = transformedReservedInternalRange + } + req = append(req, transformed) } return req, nil @@ -1291,6 +1459,10 @@ func expandComputeSubnetworkSecondaryIpRangeIpCidrRange(v interface{}, d tpgreso return v, nil } +func expandComputeSubnetworkSecondaryIpRangeReservedInternalRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeSubnetworkPrivateIpGoogleAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_http_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_http_proxy.go index 027890a208f..4c9fcbcdf2c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_http_proxy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_http_proxy.go @@ -84,10 +84,13 @@ to the BackendService.`, ForceNew: true, Description: `Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keepalive is -not specified, a default value (610 seconds) will be used. For Global -external HTTP(S) load balancer, the minimum allowed value is 5 seconds and -the maximum allowed value is 1200 seconds. For Global external HTTP(S) -load balancer (classic), this option is not available publicly.`, +not specified, a default value will be used. For Global +external HTTP(S) load balancer, the default value is 610 seconds, the +minimum allowed value is 5 seconds and the maximum allowed value is 1200 +seconds. For cross-region internal HTTP(S) load balancer, the default +value is 600 seconds, the minimum allowed value is 5 seconds, and the +maximum allowed value is 600 seconds. For Global external HTTP(S) load +balancer (classic), this option is not available publicly.`, }, "proxy_bind": { Type: schema.TypeBool, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_https_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_https_proxy.go index 7a4ebe74f7e..b60d0195cb0 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_https_proxy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_https_proxy.go @@ -109,10 +109,13 @@ Accepted format is '//certificatemanager.googleapis.com/projects/{project}/locat ForceNew: true, Description: `Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keepalive is -not specified, a default value (610 seconds) will be used. For Global -external HTTP(S) load balancer, the minimum allowed value is 5 seconds and -the maximum allowed value is 1200 seconds. For Global external HTTP(S) -load balancer (classic), this option is not available publicly.`, +not specified, a default value will be used. For Global +external HTTP(S) load balancer, the default value is 610 seconds, the +minimum allowed value is 5 seconds and the maximum allowed value is 1200 +seconds. For cross-region internal HTTP(S) load balancer, the default +value is 600 seconds, the minimum allowed value is 5 seconds, and the +maximum allowed value is 600 seconds. For Global external HTTP(S) load +balancer (classic), this option is not available publicly.`, }, "proxy_bind": { Type: schema.TypeBool, @@ -135,7 +138,6 @@ specified, Google manages whether QUIC is used. Default value: "NONE" Possible v "server_tls_policy": { Type: schema.TypeString, Optional: true, - ForceNew: true, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound @@ -145,7 +147,12 @@ set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. -If left blank, communications are not encrypted.`, +If left blank, communications are not encrypted. + +If you remove this field from your configuration at the same time as +deleting or recreating a referenced ServerTlsPolicy resource, you will +receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy +within the ServerTlsPolicy resource to avoid this.`, }, "ssl_certificates": { Type: schema.TypeList, @@ -721,6 +728,79 @@ func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interfac return err } } + if d.HasChange("server_tls_policy") { + obj := make(map[string]interface{}) + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeTargetHttpsProxy %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + serverTlsPolicyProp, err := expandComputeTargetHttpsProxyServerTlsPolicy(d.Get("server_tls_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("server_tls_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serverTlsPolicyProp)) { + obj["serverTlsPolicy"] = serverTlsPolicyProp + } + + obj, err = resourceComputeTargetHttpsProxyUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}") + if err != nil { + return err + } + + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } d.Partial(false) @@ -1000,6 +1080,14 @@ func resourceComputeTargetHttpsProxyEncoder(d *schema.ResourceData, meta interfa obj["sslCertificates"] = obj["certificateManagerCertificates"] delete(obj, "certificateManagerCertificates") } + + // Send null if serverTlsPolicy is not set. Without this, Terraform would not send any value for `serverTlsPolicy` + // in the "PATCH" payload so if you were to remove a server TLS policy from a target HTTPS proxy, it would NOT remove + // the association. + if _, ok := obj["serverTlsPolicy"]; !ok { + obj["serverTlsPolicy"] = nil + } + return obj, nil } @@ -1013,6 +1101,14 @@ func resourceComputeTargetHttpsProxyUpdateEncoder(d *schema.ResourceData, meta i obj["sslCertificates"] = obj["certificateManagerCertificates"] delete(obj, "certificateManagerCertificates") } + + // Send null if serverTlsPolicy is not set. Without this, Terraform would not send any value for `serverTlsPolicy` + // in the "PATCH" payload so if you were to remove a server TLS policy from a target HTTPS proxy, it would NOT remove + // the association. + if _, ok := obj["serverTlsPolicy"]; !ok { + obj["serverTlsPolicy"] = nil + } + return obj, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/data_source_google_container_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/data_source_google_container_cluster.go index 93f9758e99b..b46db355e85 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/data_source_google_container_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/data_source_google_container_cluster.go @@ -49,6 +49,15 @@ func datasourceContainerClusterRead(d *schema.ResourceData, meta interface{}) er return err } + // Sets the "resource_labels" field and "terraform_labels" with the value of the field "effective_labels". + effectiveLabels := d.Get("effective_labels") + if err := d.Set("resource_labels", effectiveLabels); err != nil { + return fmt.Errorf("Error setting labels in data source: %s", err) + } + if err := d.Set("terraform_labels", effectiveLabels); err != nil { + return fmt.Errorf("Error setting terraform_labels in data source: %s", err) + } + if d.Id() == "" { return fmt.Errorf("%s not found", id) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/node_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/node_config.go index 241355bbbaa..1725a21890f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/node_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/node_config.go @@ -3,10 +3,14 @@ package container import ( + "log" + "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/container/v1" ) @@ -71,6 +75,18 @@ func schemaContainerdConfig() *schema.Schema { } } +// Note: this is a bool internally, but implementing as an enum internally to +// make it easier to accept API level defaults. +func schemaInsecureKubeletReadonlyPortEnabled() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.", + ValidateFunc: validation.StringInSlice([]string{"FALSE", "TRUE"}, false), + } +} + func schemaLoggingVariant() *schema.Schema { return &schema.Schema{ Type: schema.TypeString, @@ -81,19 +97,18 @@ func schemaLoggingVariant() *schema.Schema { } } -func schemaGcfsConfig(forceNew bool) *schema.Schema { +func schemaGcfsConfig() *schema.Schema { return &schema.Schema{ Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, Description: `GCFS configuration for this node.`, - ForceNew: forceNew, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { Type: schema.TypeBool, Required: true, - ForceNew: forceNew, Description: `Whether or not GCFS is enabled`, }, }, @@ -128,13 +143,10 @@ func schemaNodeConfig() *schema.Schema { }, "guest_accelerator": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - // Legacy config mode allows removing GPU's from an existing resource - // See https://www.terraform.io/docs/configuration/attr-as-blocks.html - ConfigMode: schema.SchemaConfigModeAttr, + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, Description: `List of the type and count of accelerator cards attached to the instance.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -155,8 +167,8 @@ func schemaNodeConfig() *schema.Schema { Type: schema.TypeList, MaxItems: 1, Optional: true, + Computed: true, ForceNew: true, - ConfigMode: schema.SchemaConfigModeAttr, Description: `Configuration for auto installation of GPU driver.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -181,7 +193,6 @@ func schemaNodeConfig() *schema.Schema { MaxItems: 1, Optional: true, ForceNew: true, - ConfigMode: schema.SchemaConfigModeAttr, Description: `Configuration for GPU sharing.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -301,7 +312,7 @@ func schemaNodeConfig() *schema.Schema { }, }, - "gcfs_config": schemaGcfsConfig(true), + "gcfs_config": schemaGcfsConfig(), "gvnic": { Type: schema.TypeList, @@ -424,6 +435,14 @@ func schemaNodeConfig() *schema.Schema { Description: `The list of instance tags applied to all nodes.`, }, + "storage_pools": { + Type: schema.TypeList, + ForceNew: true, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of Storage Pools where boot disks are provisioned.`, + }, + "shielded_instance_config": { Type: schema.TypeList, Optional: true, @@ -531,13 +550,14 @@ func schemaNodeConfig() *schema.Schema { "kubelet_config": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, Description: `Node kubelet configs.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cpu_manager_policy": { Type: schema.TypeString, - Required: true, + Optional: true, ValidateFunc: validation.StringInSlice([]string{"static", "none", ""}, false), Description: `Control the CPU management policy on the node.`, }, @@ -551,6 +571,7 @@ func schemaNodeConfig() *schema.Schema { Optional: true, Description: `Set the CPU CFS quota period value 'cpu.cfs_period_us'.`, }, + "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), "pod_pids_limit": { Type: schema.TypeInt, Optional: true, @@ -581,6 +602,26 @@ func schemaNodeConfig() *schema.Schema { Description: `cgroupMode specifies the cgroup mode to be used on the node.`, DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("CGROUP_MODE_UNSPECIFIED"), }, + "hugepages_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Amounts for 2M and 1G hugepages.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hugepage_size_2m": { + Type: schema.TypeInt, + Optional: true, + Description: `Amount of 2M hugepages.`, + }, + "hugepage_size_1g": { + Type: schema.TypeInt, + Optional: true, + Description: `Amount of 1G hugepages.`, + }, + }, + }, + }, }, }, }, @@ -722,6 +763,23 @@ func schemaNodeConfig() *schema.Schema { } } +// Separate since this currently only supports a single value -- a subset of +// the overall NodeKubeletConfig +func schemaNodePoolAutoConfigNodeKubeletConfig() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Node kubelet configs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), + }, + }, + } +} + func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults { configs := configured.([]interface{}) if len(configs) == 0 || configs[0] == nil { @@ -731,6 +789,11 @@ func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefau nodeConfigDefaults := &container.NodeConfigDefaults{} nodeConfigDefaults.ContainerdConfig = expandContainerdConfig(config["containerd_config"]) + if v, ok := config["insecure_kubelet_readonly_port_enabled"]; ok { + nodeConfigDefaults.NodeKubeletConfig = &container.NodeKubeletConfig{ + InsecureKubeletReadonlyPortEnabled: expandInsecureKubeletReadonlyPortEnabled(v), + } + } if variant, ok := config["logging_variant"]; ok { nodeConfigDefaults.LoggingConfig = &container.NodePoolLoggingConfig{ VariantConfig: &container.LoggingVariantConfig{ @@ -738,6 +801,14 @@ func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefau }, } } + + if v, ok := config["gcfs_config"]; ok && len(v.([]interface{})) > 0 { + gcfsConfig := v.([]interface{})[0].(map[string]interface{}) + nodeConfigDefaults.GcfsConfig = &container.GcfsConfig{ + Enabled: gcfsConfig["enabled"].(bool), + } + } + return nodeConfigDefaults } @@ -944,6 +1015,16 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { nc.Tags = tags } + if v, ok := nodeConfig["storage_pools"]; ok { + spList := v.([]interface{}) + storagePools := []string{} + for _, v := range spList { + if v != nil { + storagePools = append(storagePools, v.(string)) + } + } + nc.StoragePools = storagePools + } if v, ok := nodeConfig["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 { conf := v.([]interface{})[0].(map[string]interface{}) nc.ShieldedInstanceConfig = &container.ShieldedInstanceConfig{ @@ -1023,6 +1104,10 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { } func expandResourceManagerTags(v interface{}) *container.ResourceManagerTags { + if v == nil { + return nil + } + rmts := make(map[string]string) if v != nil { @@ -1054,6 +1139,13 @@ func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConf return wmc } +func expandInsecureKubeletReadonlyPortEnabled(v interface{}) bool { + if v == "TRUE" { + return true + } + return false +} + func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig { if v == nil { return nil @@ -1074,6 +1166,10 @@ func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig { if cpuCfsQuotaPeriod, ok := cfg["cpu_cfs_quota_period"]; ok { kConfig.CpuCfsQuotaPeriod = cpuCfsQuotaPeriod.(string) } + if insecureKubeletReadonlyPortEnabled, ok := cfg["insecure_kubelet_readonly_port_enabled"]; ok { + kConfig.InsecureKubeletReadonlyPortEnabled = expandInsecureKubeletReadonlyPortEnabled(insecureKubeletReadonlyPortEnabled) + kConfig.ForceSendFields = append(kConfig.ForceSendFields, "InsecureKubeletReadonlyPortEnabled") + } if podPidsLimit, ok := cfg["pod_pids_limit"]; ok { kConfig.PodPidsLimit = int64(podPidsLimit.(int)) } @@ -1103,6 +1199,10 @@ func expandLinuxNodeConfig(v interface{}) *container.LinuxNodeConfig { linuxNodeConfig.CgroupMode = cgroupMode } + if v, ok := cfg["hugepages_config"]; ok { + linuxNodeConfig.Hugepages = expandHugepagesConfig(v) + } + return linuxNodeConfig } @@ -1127,6 +1227,32 @@ func expandCgroupMode(cfg map[string]interface{}) string { return cgroupMode.(string) } +func expandHugepagesConfig(v interface{}) *container.HugepagesConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + if ls[0] == nil { + return &container.HugepagesConfig{} + } + cfg := ls[0].(map[string]interface{}) + + hugepagesConfig := &container.HugepagesConfig{} + + if v, ok := cfg["hugepage_size_2m"]; ok { + hugepagesConfig.HugepageSize2m = int64(v.(int)) + } + + if v, ok := cfg["hugepage_size_1g"]; ok { + hugepagesConfig.HugepageSize1g = int64(v.(int)) + } + + return hugepagesConfig +} + func expandContainerdConfig(v interface{}) *container.ContainerdConfig { if v == nil { return nil @@ -1263,8 +1389,12 @@ func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]int result[0]["containerd_config"] = flattenContainerdConfig(c.ContainerdConfig) + result[0]["insecure_kubelet_readonly_port_enabled"] = flattenInsecureKubeletReadonlyPortEnabled(c.NodeKubeletConfig) + result[0]["logging_variant"] = flattenLoggingVariant(c.LoggingConfig) + result[0]["gcfs_config"] = flattenGcfsConfig(c.GcfsConfig) + return result } @@ -1307,6 +1437,7 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte "tags": c.Tags, "preemptible": c.Preemptible, "secondary_boot_disks": flattenSecondaryBootDisks(c.SecondaryBootDisks), + "storage_pools": c.StoragePools, "spot": c.Spot, "min_cpu_platform": c.MinCpuPlatform, "shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig), @@ -1333,6 +1464,10 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte } func flattenResourceManagerTags(c *container.ResourceManagerTags) map[string]interface{} { + if c == nil { + return nil + } + rmt := make(map[string]interface{}) if c != nil { @@ -1428,6 +1563,14 @@ func flattenSecondaryBootDisks(c []*container.SecondaryBootDisk) []map[string]in return result } +func flattenInsecureKubeletReadonlyPortEnabled(c *container.NodeKubeletConfig) string { + // Convert bool from the API to the enum values used internally + if c != nil && c.InsecureKubeletReadonlyPortEnabled { + return "TRUE" + } + return "FALSE" +} + func flattenLoggingVariant(c *container.NodePoolLoggingConfig) string { variant := "DEFAULT" if c != nil && c.VariantConfig != nil && c.VariantConfig.Variant != "" { @@ -1519,10 +1662,21 @@ func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface result := []map[string]interface{}{} if c != nil { result = append(result, map[string]interface{}{ - "cpu_cfs_quota": c.CpuCfsQuota, - "cpu_cfs_quota_period": c.CpuCfsQuotaPeriod, - "cpu_manager_policy": c.CpuManagerPolicy, - "pod_pids_limit": c.PodPidsLimit, + "cpu_cfs_quota": c.CpuCfsQuota, + "cpu_cfs_quota_period": c.CpuCfsQuotaPeriod, + "cpu_manager_policy": c.CpuManagerPolicy, + "insecure_kubelet_readonly_port_enabled": flattenInsecureKubeletReadonlyPortEnabled(c), + "pod_pids_limit": c.PodPidsLimit, + }) + } + return result +} + +func flattenNodePoolAutoConfigNodeKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "insecure_kubelet_readonly_port_enabled": flattenInsecureKubeletReadonlyPortEnabled(c), }) } return result @@ -1532,8 +1686,20 @@ func flattenLinuxNodeConfig(c *container.LinuxNodeConfig) []map[string]interface result := []map[string]interface{}{} if c != nil { result = append(result, map[string]interface{}{ - "sysctls": c.Sysctls, - "cgroup_mode": c.CgroupMode, + "sysctls": c.Sysctls, + "cgroup_mode": c.CgroupMode, + "hugepages_config": flattenHugepagesConfig(c.Hugepages), + }) + } + return result +} + +func flattenHugepagesConfig(c *container.HugepagesConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "hugepage_size_2m": c.HugepageSize2m, + "hugepage_size_1g": c.HugepageSize1g, }) } return result @@ -1644,3 +1810,533 @@ func flattenFastSocket(c *container.FastSocket) []map[string]interface{} { } return result } + +// This portion of nodePoolUpdate() is moved here to be shared with +// node pool updates in `resource_container_cluster` +func nodePoolNodeConfigUpdate(d *schema.ResourceData, config *transport_tpg.Config, nodePoolInfo *NodePoolInformation, prefix, name string, timeout time.Duration) error { + + // Nodepool write-lock will be acquired when update function is called. + npLockKey := nodePoolInfo.nodePoolLockKey(name) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + if d.HasChange(prefix + "node_config") { + + if d.HasChange(prefix + "node_config.0.logging_variant") { + if v, ok := d.GetOk(prefix + "node_config.0.logging_variant"); ok { + loggingVariant := v.(string) + req := &container.UpdateNodePoolRequest{ + Name: name, + LoggingConfig: &container.NodePoolLoggingConfig{ + VariantConfig: &container.LoggingVariantConfig{ + Variant: loggingVariant, + }, + }, + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool logging_variant", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated logging_variant for node pool %s", name) + } + } + + if d.HasChange("node_config.0.disk_size_gb") || + d.HasChange("node_config.0.disk_type") || + d.HasChange("node_config.0.machine_type") || + d.HasChange("node_config.0.storage_pools") { + req := &container.UpdateNodePoolRequest{ + Name: name, + DiskSizeGb: int64(d.Get("node_config.0.disk_size_gb").(int)), + DiskType: d.Get("node_config.0.disk_type").(string), + MachineType: d.Get("node_config.0.machine_type").(string), + } + if v, ok := d.GetOk("node_config.0.storage_pools"); ok { + spList := v.([]interface{}) + storagePools := []string{} + for _, v := range spList { + if v != nil { + storagePools = append(storagePools, v.(string)) + } + } + req.StoragePools = storagePools + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool disk_size_gb/disk_type/machine_type/storage_pools", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type/storage_pools for Node Pool %s", d.Id()) + } + + if d.HasChange(prefix + "node_config.0.taint") { + req := &container.UpdateNodePoolRequest{ + Name: name, + } + if v, ok := d.GetOk(prefix + "node_config.0.taint"); ok { + taintsList := v.([]interface{}) + taints := make([]*container.NodeTaint, 0, len(taintsList)) + for _, v := range taintsList { + if v != nil { + data := v.(map[string]interface{}) + taint := &container.NodeTaint{ + Key: data["key"].(string), + Value: data["value"].(string), + Effect: data["effect"].(string), + } + taints = append(taints, taint) + } + } + ntaints := &container.NodeTaints{ + Taints: taints, + } + req.Taints = ntaints + } + + if req.Taints == nil { + taints := make([]*container.NodeTaint, 0, 0) + ntaints := &container.NodeTaints{ + Taints: taints, + } + req.Taints = ntaints + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool taints", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated taints for Node Pool %s", d.Id()) + } + + if d.HasChange(prefix + "node_config.0.tags") { + req := &container.UpdateNodePoolRequest{ + Name: name, + } + if v, ok := d.GetOk(prefix + "node_config.0.tags"); ok { + tagsList := v.([]interface{}) + tags := []string{} + for _, v := range tagsList { + if v != nil { + tags = append(tags, v.(string)) + } + } + ntags := &container.NetworkTags{ + Tags: tags, + } + req.Tags = ntags + } + + // sets tags to the empty list when user removes a previously defined list of tags entriely + // aka the node pool goes from having tags to no longer having any + if req.Tags == nil { + tags := []string{} + ntags := &container.NetworkTags{ + Tags: tags, + } + req.Tags = ntags + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool tags", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated tags for node pool %s", name) + } + + if d.HasChange(prefix + "node_config.0.resource_manager_tags") { + req := &container.UpdateNodePoolRequest{ + Name: name, + } + if v, ok := d.GetOk(prefix + "node_config.0.resource_manager_tags"); ok { + req.ResourceManagerTags = expandResourceManagerTags(v) + } + + // sets resource manager tags to the empty list when user removes a previously defined list of tags entriely + // aka the node pool goes from having tags to no longer having any + if req.ResourceManagerTags == nil { + tags := make(map[string]string) + rmTags := &container.ResourceManagerTags{ + Tags: tags, + } + req.ResourceManagerTags = rmTags + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool resource manager tags", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated resource manager tags for node pool %s", name) + } + + if d.HasChange(prefix + "node_config.0.resource_labels") { + req := &container.UpdateNodePoolRequest{ + Name: name, + } + + if v, ok := d.GetOk(prefix + "node_config.0.resource_labels"); ok { + resourceLabels := v.(map[string]interface{}) + req.ResourceLabels = &container.ResourceLabels{ + Labels: tpgresource.ConvertStringMap(resourceLabels), + } + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool resource labels", userAgent, + timeout) + } + + // Call update serially. + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated resource labels for node pool %s", name) + } + + if d.HasChange(prefix + "node_config.0.labels") { + req := &container.UpdateNodePoolRequest{ + Name: name, + } + + if v, ok := d.GetOk(prefix + "node_config.0.labels"); ok { + labels := v.(map[string]interface{}) + req.Labels = &container.NodeLabels{ + Labels: tpgresource.ConvertStringMap(labels), + } + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool labels", userAgent, + timeout) + } + + // Call update serially. + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated labels for node pool %s", name) + } + + if d.HasChange(prefix + "node_config.0.image_type") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolId: name, + DesiredImageType: d.Get(prefix + "node_config.0.image_type").(string), + }, + } + + updateF := func() error { + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, "updating GKE node pool", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated image type in Node Pool %s", d.Id()) + } + + if d.HasChange(prefix + "node_config.0.workload_metadata_config") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + WorkloadMetadataConfig: expandWorkloadMetadataConfig( + d.Get(prefix + "node_config.0.workload_metadata_config")), + } + if req.WorkloadMetadataConfig == nil { + req.WorkloadMetadataConfig = &container.WorkloadMetadataConfig{} + req.ForceSendFields = []string{"WorkloadMetadataConfig"} + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool workload_metadata_config", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name) + } + + if d.HasChange(prefix + "node_config.0.gcfs_config") { + gcfsEnabled := bool(d.Get(prefix + "node_config.0.gcfs_config.0.enabled").(bool)) + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + GcfsConfig: &container.GcfsConfig{ + Enabled: gcfsEnabled, + }, + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool gcfs_config", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated gcfs_config for node pool %s", name) + } + + if d.HasChange(prefix + "node_config.0.kubelet_config") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + KubeletConfig: expandKubeletConfig( + d.Get(prefix + "node_config.0.kubelet_config")), + } + if req.KubeletConfig == nil { + req.KubeletConfig = &container.NodeKubeletConfig{} + req.ForceSendFields = []string{"KubeletConfig"} + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool kubelet_config", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated kubelet_config for node pool %s", name) + } + if d.HasChange(prefix + "node_config.0.linux_node_config") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + LinuxNodeConfig: expandLinuxNodeConfig( + d.Get(prefix + "node_config.0.linux_node_config")), + } + if req.LinuxNodeConfig == nil { + req.LinuxNodeConfig = &container.LinuxNodeConfig{} + req.ForceSendFields = []string{"LinuxNodeConfig"} + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool linux_node_config", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated linux_node_config for node pool %s", name) + } + if d.HasChange(prefix + "node_config.0.fast_socket") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + FastSocket: &container.FastSocket{}, + } + if v, ok := d.GetOk(prefix + "node_config.0.fast_socket"); ok { + fastSocket := v.([]interface{})[0].(map[string]interface{}) + req.FastSocket = &container.FastSocket{ + Enabled: fastSocket["enabled"].(bool), + } + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool fast_socket", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated fast_socket for node pool %s", name) + } + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster.go index 2b4b499ac70..2dae99d84a4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster.go @@ -87,6 +87,7 @@ var ( "addons_config.0.config_connector_config", "addons_config.0.gcs_fuse_csi_driver_config", "addons_config.0.stateful_ha_config", + "addons_config.0.ray_operator_config", } privateClusterConfigKeys = []string{ @@ -97,12 +98,6 @@ var ( "private_cluster_config.0.master_global_access_config", } - forceNewClusterNodeConfigFields = []string{ - "labels", - "workload_metadata_config", - "resource_manager_tags", - } - suppressDiffForAutopilot = schema.SchemaDiffSuppressFunc(func(k, oldValue, newValue string, d *schema.ResourceData) bool { if v, _ := d.Get("enable_autopilot").(bool); v { return true @@ -119,19 +114,6 @@ var ( }) ) -// This uses the node pool nodeConfig schema but sets -// node-pool-only updatable fields to ForceNew -func clusterSchemaNodeConfig() *schema.Schema { - nodeConfigSch := schemaNodeConfig() - schemaMap := nodeConfigSch.Elem.(*schema.Resource).Schema - for _, k := range forceNewClusterNodeConfigFields { - if sch, ok := schemaMap[k]; ok { - tpgresource.ChangeFieldSchemaToForceNew(sch) - } - } - return nodeConfigSch -} - // Defines default nodel pool settings for the entire cluster. These settings are // overridden if specified on the specific NodePool object. func clusterSchemaNodePoolDefaults() *schema.Schema { @@ -150,8 +132,10 @@ func clusterSchemaNodePoolDefaults() *schema.Schema { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "containerd_config": schemaContainerdConfig(), - "logging_variant": schemaLoggingVariant(), + "containerd_config": schemaContainerdConfig(), + "gcfs_config": schemaGcfsConfig(), + "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), + "logging_variant": schemaLoggingVariant(), }, }, }, @@ -204,6 +188,7 @@ func ResourceContainerCluster() *schema.Resource { containerClusterSurgeSettingsCustomizeDiff, containerClusterEnableK8sBetaApisCustomizeDiff, containerClusterNodeVersionCustomizeDiff, + tpgresource.SetDiffForLabelsWithCustomizedName("resource_labels"), ), Timeouts: &schema.ResourceTimeout{ @@ -342,13 +327,12 @@ func ResourceContainerCluster() *schema.Resource { }, }, "gcp_filestore_csi_driver_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: addonsConfigKeys, - MaxItems: 1, - Description: `The status of the Filestore CSI driver addon, which allows the usage of filestore instance as volumes. Defaults to disabled; set enabled = true to enable.`, - ConflictsWith: []string{"enable_autopilot"}, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Filestore CSI driver addon, which allows the usage of filestore instance as volumes. Defaults to disabled for Standard clusters; set enabled = true to enable. It is enabled by default for Autopilot clusters; set enabled = true to enable it explicitly.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -477,6 +461,52 @@ func ResourceContainerCluster() *schema.Resource { }, }, }, + "ray_operator_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 3, + Description: `The status of the Ray Operator addon, which enabled management of Ray AI/ML jobs on GKE. Defaults to disabled; set enabled = true to enable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + "ray_cluster_logging_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `The status of Ray Logging, which scrapes Ray cluster logs to Cloud Logging. Defaults to disabled; set enabled = true to enable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "ray_cluster_monitoring_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `The status of Ray Cluster monitoring, which shows Ray cluster metrics in Cloud Console. Defaults to disabled; set enabled = true to enable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + }, + }, + }, }, }, }, @@ -744,6 +774,13 @@ func ResourceContainerCluster() *schema.Resource { }, }, }, + "auto_provisioning_locations": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of Google Compute Engine zones in which the NodePool's nodes can be created by NAP.`, + }, "autoscaling_profile": { Type: schema.TypeString, Default: "BALANCED", @@ -896,10 +933,10 @@ func ResourceContainerCluster() *schema.Resource { "enable_components": { Type: schema.TypeList, Required: true, - Description: `GKE components exposing logs. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, SCHEDULER, and WORKLOADS.`, + Description: `GKE components exposing logs. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, KCP_CONNECTION, KCP_SSHD, SCHEDULER, and WORKLOADS.`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER", "WORKLOADS"}, false), + ValidateFunc: validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "KCP_CONNECTION", "KCP_SSHD", "SCHEDULER", "WORKLOADS"}, false), }, }, }, @@ -1092,20 +1129,9 @@ func ResourceContainerCluster() *schema.Resource { Description: `Whether or not the advanced datapath metrics are enabled.`, }, "enable_relay": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether or not Relay is enabled.`, - Default: false, - ConflictsWith: []string{"monitoring_config.0.advanced_datapath_observability_config.0.relay_mode"}, - }, - "relay_mode": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "Deprecated in favor of enable_relay field. Remove this attribute's configuration as this field will be removed in the next major release and enable_relay will become a required field.", - Description: `Mode used to make Relay available.`, - ValidateFunc: validation.StringInSlice([]string{"DISABLED", "INTERNAL_VPC_LB", "EXTERNAL_LB"}, false), - ConflictsWith: []string{"monitoring_config.0.advanced_datapath_observability_config.0.enable_relay"}, + Type: schema.TypeBool, + Required: true, + Description: `Whether or not Relay is enabled.`, }, }, }, @@ -1289,7 +1315,7 @@ func ResourceContainerCluster() *schema.Resource { }, }, - "node_config": clusterSchemaNodeConfig(), + "node_config": schemaNodeConfig(), "node_pool": { Type: schema.TypeList, @@ -1313,6 +1339,7 @@ func ResourceContainerCluster() *schema.Resource { Description: `Node pool configs that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "node_kubelet_config": schemaNodePoolAutoConfigNodeKubeletConfig(), "network_tags": { Type: schema.TypeList, Optional: true, @@ -1345,6 +1372,23 @@ func ResourceContainerCluster() *schema.Resource { Description: `The Kubernetes version on the nodes. Must either be unset or set to the same value as min_master_version on create. Defaults to the default version set by GKE which is not necessarily the latest version. This only affects nodes in the default node pool. While a fuzzy version can be specified, it's recommended that you specify explicit versions as Terraform will see spurious diffs when fuzzy versions are used. See the google_container_engine_versions data source's version_prefix field to approximate fuzzy versions in a Terraform-compatible way. To update nodes in other node pools, use the version attribute on the node pool.`, }, + "secret_manager_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for the Secret Manager feature.`, + MaxItems: 1, + DiffSuppressFunc: SecretManagerCfgSuppress, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Enable the Secret manager csi component.`, + }, + }, + }, + }, + "project": { Type: schema.TypeString, Optional: true, @@ -1581,10 +1625,25 @@ func ResourceContainerCluster() *schema.Resource { }, "resource_labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The GCE resource labels (a map of key/value pairs) to be applied to the cluster. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + }, + "terraform_labels": { Type: schema.TypeMap, - Optional: true, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, Elem: &schema.Schema{Type: schema.TypeString}, - Description: `The GCE resource labels (a map of key/value pairs) to be applied to the cluster.`, }, "label_fingerprint": { @@ -1723,12 +1782,13 @@ func ResourceContainerCluster() *schema.Resource { "channel": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "RAPID", "REGULAR", "STABLE"}, false), + ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "RAPID", "REGULAR", "STABLE", "EXTENDED"}, false), Description: `The selected release channel. Accepted values are: * UNSPECIFIED: Not set. * RAPID: Weekly upgrade cadence; Early testers and developers who requires new features. * REGULAR: Multiple per month upgrade cadence; Production users who need features not yet offered in the Stable channel. -* STABLE: Every few months upgrade cadence; Production users who need stability above all else, and for whom frequent upgrades are too risky.`, +* STABLE: Every few months upgrade cadence; Production users who need stability above all else, and for whom frequent upgrades are too risky. +* EXTENDED: GKE provides extended support for Kubernetes minor versions through the Extended channel. With this channel, you can stay on a minor version for up to 24 months.`, }, }, }, @@ -1785,6 +1845,13 @@ func ResourceContainerCluster() *schema.Resource { Description: `Whether L4ILB Subsetting is enabled for this cluster.`, Default: false, }, + "enable_multi_networking": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether multi-networking is enabled for this cluster.`, + Default: false, + }, "private_ipv6_google_access": { Type: schema.TypeString, Optional: true, @@ -1854,6 +1921,11 @@ func ResourceContainerCluster() *schema.Resource { Description: `Configuration for Cloud DNS for Kubernetes Engine.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "additive_vpc_scope_dns_domain": { + Type: schema.TypeString, + Description: `Enable additive VPC scope DNS in a GKE cluster.`, + Optional: true, + }, "cluster_dns": { Type: schema.TypeString, Default: "PROVIDER_UNSPECIFIED", @@ -1938,18 +2010,18 @@ func ResourceContainerCluster() *schema.Resource { // by only comparing the blocks with a positive count and ignoring those with count=0 // // One quirk with this approach is that configs with mixed count=0 and count>0 accelerator blocks will -// show a confusing diff if one of there are config changes that result in a legitimate diff as the count=0 +// show a confusing diff if there are config changes that result in a legitimate diff as the count=0 // blocks will not be in state. -// -// This could also be modelled by setting `guest_accelerator = []` in the config. However since the -// previous syntax requires that schema.SchemaConfigModeAttr is set on the field it is advisable that -// we have a work around for removing guest accelerators. Also Terraform 0.11 cannot use dynamic blocks -// so this isn't a solution for module authors who want to dynamically omit guest accelerators -// See https://github.com/hashicorp/terraform-provider-google/issues/3786 -func resourceNodeConfigEmptyGuestAccelerator(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { +func resourceNodeConfigEmptyGuestAccelerator(_ context.Context, diff *schema.ResourceDiff, meta any) error { old, new := diff.GetChange("node_config.0.guest_accelerator") - oList := old.([]interface{}) - nList := new.([]interface{}) + oList, ok := old.([]any) + if !ok { + return fmt.Errorf("type assertion failed, expected []any, got %T", old) + } + nList, ok := new.([]any) + if !ok { + return fmt.Errorf("type assertion failed, expected []any, got %T", new) + } if len(nList) == len(oList) || len(nList) == 0 { return nil @@ -1959,9 +2031,12 @@ func resourceNodeConfigEmptyGuestAccelerator(_ context.Context, diff *schema.Res // will be longer than the current state. // this index tracks the location of positive count accelerator blocks index := 0 - for i, item := range nList { - accel := item.(map[string]interface{}) - if accel["count"].(int) == 0 { + for _, item := range nList { + nAccel, ok := item.(map[string]any) + if !ok { + return fmt.Errorf("type assertion failed, expected []any, got %T", item) + } + if nAccel["count"].(int) == 0 { hasAcceleratorWithEmptyCount = true // Ignore any 'empty' accelerators because they aren't sent to the API continue @@ -1972,7 +2047,14 @@ func resourceNodeConfigEmptyGuestAccelerator(_ context.Context, diff *schema.Res // This will prevent array index overruns return nil } - if !reflect.DeepEqual(nList[i], oList[index]) { + // Delete Optional + Computed field from old and new map. + oAccel, ok := oList[index].(map[string]any) + if !ok { + return fmt.Errorf("type assertion failed, expected []any, got %T", oList[index]) + } + delete(nAccel, "gpu_driver_installation_config") + delete(oAccel, "gpu_driver_installation_config") + if !reflect.DeepEqual(oAccel, nAccel) { return nil } index += 1 @@ -2054,6 +2136,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er AddonsConfig: expandClusterAddonsConfig(d.Get("addons_config")), EnableKubernetesAlpha: d.Get("enable_kubernetes_alpha").(bool), IpAllocationPolicy: ipAllocationBlock, + SecretManagerConfig: expandSecretManagerConfig(d.Get("secret_manager_config")), Autoscaling: expandClusterAutoscaling(d.Get("cluster_autoscaling"), d), BinaryAuthorization: expandBinaryAuthorization(d.Get("binary_authorization")), Autopilot: &container.Autopilot{ @@ -2072,11 +2155,12 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er EnableL4ilbSubsetting: d.Get("enable_l4_ilb_subsetting").(bool), DnsConfig: expandDnsConfig(d.Get("dns_config")), GatewayApiConfig: expandGatewayApiConfig(d.Get("gateway_api_config")), + EnableMultiNetworking: d.Get("enable_multi_networking").(bool), }, MasterAuth: expandMasterAuth(d.Get("master_auth")), NotificationConfig: expandNotificationConfig(d.Get("notification_config")), ConfidentialNodes: expandConfidentialNodes(d.Get("confidential_nodes")), - ResourceLabels: tpgresource.ExpandStringMap(d, "resource_labels"), + ResourceLabels: tpgresource.ExpandStringMap(d, "effective_labels"), NodePoolAutoConfig: expandNodePoolAutoConfig(d.Get("node_pool_auto_config")), CostManagementConfig: expandCostManagementConfig(d.Get("cost_management_config")), EnableK8sBetaApis: expandEnableK8sBetaApis(d.Get("enable_k8s_beta_apis"), nil), @@ -2593,6 +2677,9 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro if err := d.Set("enable_intranode_visibility", cluster.NetworkConfig.EnableIntraNodeVisibility); err != nil { return fmt.Errorf("Error setting enable_intranode_visibility: %s", err) } + if err := d.Set("enable_multi_networking", cluster.NetworkConfig.EnableMultiNetworking); err != nil { + return fmt.Errorf("Error setting enable_multi_networking: %s", err) + } if err := d.Set("private_ipv6_google_access", cluster.NetworkConfig.PrivateIpv6GoogleAccess); err != nil { return fmt.Errorf("Error setting private_ipv6_google_access: %s", err) } @@ -2657,8 +2744,18 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro return err } - if err := d.Set("resource_labels", cluster.ResourceLabels); err != nil { - return fmt.Errorf("Error setting resource_labels: %s", err) + if err := d.Set("secret_manager_config", flattenSecretManagerConfig(cluster.SecretManagerConfig)); err != nil { + return err + } + + if err := tpgresource.SetLabels(cluster.ResourceLabels, d, "resource_labels"); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := tpgresource.SetLabels(cluster.ResourceLabels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) + } + if err := d.Set("effective_labels", cluster.ResourceLabels); err != nil { + return fmt.Errorf("Error setting effective_labels: %s", err) } if err := d.Set("label_fingerprint", cluster.LabelFingerprint); err != nil { return fmt.Errorf("Error setting label_fingerprint: %s", err) @@ -3047,6 +3144,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er req := &container.UpdateClusterRequest{ Update: &container.ClusterUpdate{ DesiredEnableCiliumClusterwideNetworkPolicy: enabled, + ForceSendFields: []string{"DesiredEnableCiliumClusterwideNetworkPolicy"}, }, } updateF := updateFunc(req, "updating cilium clusterwide network policy") @@ -3419,36 +3517,15 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } if d.HasChange("node_config") { - if d.HasChange("node_config.0.image_type") { - it := d.Get("node_config.0.image_type").(string) - req := &container.UpdateClusterRequest{ - Update: &container.ClusterUpdate{ - DesiredImageType: it, - }, - } - updateF := func() error { - name := containerClusterFullName(project, location, clusterName) - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, project, location, "updating GKE image type", userAgent, d.Timeout(schema.TimeoutUpdate)) - } + defaultPool := "default-pool" - // Call update serially. - if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { - return err - } - - log.Printf("[INFO] GKE cluster %s: image type has been updated to %s", d.Id(), it) + nodePoolInfo, err := extractNodePoolInformationFromCluster(d, config, clusterName) + if err != nil { + return err } + + nodePoolNodeConfigUpdate(d, config, nodePoolInfo, "", defaultPool, d.Timeout(schema.TimeoutUpdate)) } if d.HasChange("notification_config") { @@ -3582,6 +3659,33 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s database encryption config has been updated", d.Id()) } + if d.HasChange("secret_manager_config") { + c := d.Get("secret_manager_config") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredSecretManagerConfig: expandSecretManagerConfig(c), + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating secret manager csi driver config", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s secret manager csi add-on has been updated", d.Id()) + } + if d.HasChange("workload_identity_config") { // Because GKE uses a non-RESTful update function, when removing the // feature you need to specify a fairly full request body or it fails: @@ -3662,8 +3766,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s monitoring config has been updated", d.Id()) } - if d.HasChange("resource_labels") { - resourceLabels := d.Get("resource_labels").(map[string]interface{}) + if d.HasChange("effective_labels") { + resourceLabels := d.Get("effective_labels").(map[string]interface{}) labelFingerprint := d.Get("label_fingerprint").(string) req := &container.SetLabelsRequest{ ResourceLabels: tpgresource.ConvertStringMap(resourceLabels), @@ -3817,6 +3921,28 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.insecure_kubelet_readonly_port_enabled") { + if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.insecure_kubelet_readonly_port_enabled"); ok { + insecureKubeletReadonlyPortEnabled := v.(string) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodeKubeletConfig: &container.NodeKubeletConfig{ + InsecureKubeletReadonlyPortEnabled: expandInsecureKubeletReadonlyPortEnabled(insecureKubeletReadonlyPortEnabled), + ForceSendFields: []string{"InsecureKubeletReadonlyPortEnabled"}, + }, + }, + } + + updateF := updateFunc(req, "updating GKE cluster desired node pool insecure kubelet readonly port configuration defaults.") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool insecure_kubelet_readonly_port_enabled default has been updated", d.Id()) + } + } + if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.logging_variant") { if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.logging_variant"); ok { loggingVariant := v.(string) @@ -3840,6 +3966,27 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.gcfs_config") { + if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.gcfs_config"); ok { + gcfsConfig := v.([]interface{})[0].(map[string]interface{}) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredGcfsConfig: &container.GcfsConfig{ + Enabled: gcfsConfig["enabled"].(bool), + }, + }, + } + + updateF := updateFunc(req, "updating GKE cluster desired gcfs config.") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s default gcfs config has been updated", d.Id()) + } + } + if d.HasChange("security_posture_config") { req := &container.UpdateClusterRequest{ Update: &container.ClusterUpdate{ @@ -3869,6 +4016,24 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("node_pool_auto_config.0.node_kubelet_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolAutoConfigKubeletConfig: expandKubeletConfig( + d.Get("node_pool_auto_config.0.node_kubelet_config"), + ), + }, + } + + updateF := updateFunc(req, "updating GKE cluster node pool auto config node_kubelet_config parameters") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool auto config node_kubelet_config parameters have been updated", d.Id()) + } + if d.HasChange("node_pool_auto_config.0.network_tags.0.tags") { tags := d.Get("node_pool_auto_config.0.network_tags.0.tags").([]interface{}) @@ -4124,6 +4289,28 @@ func expandClusterAddonsConfig(configured interface{}) *container.AddonsConfig { } } + if v, ok := config["ray_operator_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.RayOperatorConfig = &container.RayOperatorConfig{ + Enabled: addon["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } + if v, ok := addon["ray_cluster_logging_config"]; ok && len(v.([]interface{})) > 0 { + loggingConfig := v.([]interface{})[0].(map[string]interface{}) + ac.RayOperatorConfig.RayClusterLoggingConfig = &container.RayClusterLoggingConfig{ + Enabled: loggingConfig["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } + } + if v, ok := addon["ray_cluster_monitoring_config"]; ok && len(v.([]interface{})) > 0 { + loggingConfig := v.([]interface{})[0].(map[string]interface{}) + ac.RayOperatorConfig.RayClusterMonitoringConfig = &container.RayClusterMonitoringConfig{ + Enabled: loggingConfig["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } + } + } + return ac } @@ -4314,6 +4501,7 @@ func expandClusterAutoscaling(configured interface{}, d *schema.ResourceData) *c ResourceLimits: resourceLimits, AutoscalingProfile: config["autoscaling_profile"].(string), AutoprovisioningNodePoolDefaults: expandAutoProvisioningDefaults(config["auto_provisioning_defaults"], d), + AutoprovisioningLocations: tpgresource.ConvertStringArr(config["auto_provisioning_locations"].([]interface{})), } } @@ -4768,6 +4956,19 @@ func expandIdentityServiceConfig(configured interface{}) *container.IdentityServ return v } +func expandSecretManagerConfig(configured interface{}) *container.SecretManagerConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.SecretManagerConfig{ + Enabled: config["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } +} + func expandDefaultMaxPodsConstraint(v interface{}) *container.MaxPodsConstraint { if v == nil { return nil @@ -4829,9 +5030,10 @@ func expandDnsConfig(configured interface{}) *container.DNSConfig { config := l[0].(map[string]interface{}) return &container.DNSConfig{ - ClusterDns: config["cluster_dns"].(string), - ClusterDnsScope: config["cluster_dns_scope"].(string), - ClusterDnsDomain: config["cluster_dns_domain"].(string), + AdditiveVpcScopeDnsDomain: config["additive_vpc_scope_dns_domain"].(string), + ClusterDns: config["cluster_dns"].(string), + ClusterDnsScope: config["cluster_dns_scope"].(string), + ClusterDnsDomain: config["cluster_dns_domain"].(string), } } @@ -4923,21 +5125,10 @@ func expandMonitoringConfig(configured interface{}) *container.MonitoringConfig if v, ok := config["advanced_datapath_observability_config"]; ok && len(v.([]interface{})) > 0 { advanced_datapath_observability_config := v.([]interface{})[0].(map[string]interface{}) - mc.AdvancedDatapathObservabilityConfig = &container.AdvancedDatapathObservabilityConfig{ - EnableMetrics: advanced_datapath_observability_config["enable_metrics"].(bool), - } - - enable_relay := advanced_datapath_observability_config["enable_relay"].(bool) - relay_mode := advanced_datapath_observability_config["relay_mode"].(string) - if enable_relay { - mc.AdvancedDatapathObservabilityConfig.EnableRelay = enable_relay - } else if relay_mode == "INTERNAL_VPC_LB" || relay_mode == "EXTERNAL_LB" { - mc.AdvancedDatapathObservabilityConfig.RelayMode = relay_mode - } else { - mc.AdvancedDatapathObservabilityConfig.EnableRelay = enable_relay - mc.AdvancedDatapathObservabilityConfig.RelayMode = "DISABLED" - mc.AdvancedDatapathObservabilityConfig.ForceSendFields = []string{"EnableRelay"} + EnableMetrics: advanced_datapath_observability_config["enable_metrics"].(bool), + EnableRelay: advanced_datapath_observability_config["enable_relay"].(bool), + ForceSendFields: []string{"EnableRelay"}, } } @@ -4990,13 +5181,18 @@ func flattenNodePoolDefaults(c *container.NodePoolDefaults) []map[string]interfa } func expandNodePoolAutoConfig(configured interface{}) *container.NodePoolAutoConfig { - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { + l, ok := configured.([]interface{}) + if !ok || l == nil || len(l) == 0 || l[0] == nil { return nil } + npac := &container.NodePoolAutoConfig{} config := l[0].(map[string]interface{}) + if v, ok := config["node_kubelet_config"]; ok { + npac.NodeKubeletConfig = expandKubeletConfig(v) + } + if v, ok := config["network_tags"]; ok && len(v.([]interface{})) > 0 { npac.NetworkTags = expandNodePoolAutoConfigNetworkTags(v) } @@ -5177,6 +5373,24 @@ func flattenClusterAddonsConfig(c *container.AddonsConfig) []map[string]interfac }, } } + if c.RayOperatorConfig != nil { + rayConfig := c.RayOperatorConfig + result["ray_operator_config"] = []map[string]interface{}{ + { + "enabled": rayConfig.Enabled, + }, + } + if rayConfig.RayClusterLoggingConfig != nil { + result["ray_operator_config"].([]map[string]any)[0]["ray_cluster_logging_config"] = []map[string]interface{}{{ + "enabled": rayConfig.RayClusterLoggingConfig.Enabled, + }} + } + if rayConfig.RayClusterMonitoringConfig != nil { + result["ray_operator_config"].([]map[string]any)[0]["ray_cluster_monitoring_config"] = []map[string]interface{}{{ + "enabled": rayConfig.RayClusterMonitoringConfig.Enabled, + }} + } + } return []map[string]interface{}{result} } @@ -5447,6 +5661,7 @@ func flattenClusterAutoscaling(a *container.ClusterAutoscaling) []map[string]int r["resource_limits"] = resourceLimits r["enabled"] = true r["auto_provisioning_defaults"] = flattenAutoProvisioningDefaults(a.AutoprovisioningNodePoolDefaults) + r["auto_provisioning_locations"] = a.AutoprovisioningLocations } else { r["enabled"] = false } @@ -5550,6 +5765,21 @@ func flattenMasterAuthorizedNetworksConfig(c *container.MasterAuthorizedNetworks return []map[string]interface{}{result} } +func flattenSecretManagerConfig(c *container.SecretManagerConfig) []map[string]interface{} { + if c == nil { + return []map[string]interface{}{ + { + "enabled": false, + }, + } + } + return []map[string]interface{}{ + { + "enabled": c.Enabled, + }, + } +} + func flattenResourceUsageExportConfig(c *container.ResourceUsageExportConfig) []map[string]interface{} { if c == nil { return nil @@ -5622,9 +5852,10 @@ func flattenDnsConfig(c *container.DNSConfig) []map[string]interface{} { } return []map[string]interface{}{ { - "cluster_dns": c.ClusterDns, - "cluster_dns_scope": c.ClusterDnsScope, - "cluster_dns_domain": c.ClusterDnsDomain, + "additive_vpc_scope_dns_domain": c.AdditiveVpcScopeDnsDomain, + "cluster_dns": c.ClusterDns, + "cluster_dns_scope": c.ClusterDnsScope, + "cluster_dns_domain": c.ClusterDnsDomain, }, } } @@ -5711,29 +5942,10 @@ func flattenAdvancedDatapathObservabilityConfig(c *container.AdvancedDatapathObs return nil } - if c.EnableRelay { - return []map[string]interface{}{ - { - "enable_metrics": c.EnableMetrics, - "enable_relay": c.EnableRelay, - }, - } - } - - if c.RelayMode == "INTERNAL_VPC_LB" || c.RelayMode == "EXTERNAL_LB" { - return []map[string]interface{}{ - { - "enable_metrics": c.EnableMetrics, - "relay_mode": c.RelayMode, - }, - } - } - return []map[string]interface{}{ { "enable_metrics": c.EnableMetrics, - "enable_relay": false, - "relay_mode": "DISABLED", + "enable_relay": c.EnableRelay, }, } } @@ -5752,6 +5964,9 @@ func flattenNodePoolAutoConfig(c *container.NodePoolAutoConfig) []map[string]int } result := make(map[string]interface{}) + if c.NodeKubeletConfig != nil { + result["node_kubelet_config"] = flattenNodePoolAutoConfigNodeKubeletConfig(c.NodeKubeletConfig) + } if c.NetworkTags != nil { result["network_tags"] = flattenNodePoolAutoConfigNetworkTags(c.NetworkTags) } @@ -5967,6 +6182,20 @@ func containerClusterNetworkPolicyEmptyCustomizeDiff(_ context.Context, d *schem return nil } +func SecretManagerCfgSuppress(k, old, new string, r *schema.ResourceData) bool { + if k == "secret_manager_config.#" && old == "1" && new == "0" { + if v, ok := r.GetOk("secret_manager_config"); ok { + cfgList := v.([]interface{}) + if len(cfgList) > 0 { + d := cfgList[0].(map[string]interface{}) + // Suppress if old value was {enabled == false} + return !d["enabled"].(bool) + } + } + } + return false +} + func containerClusterNetworkPolicyDiffSuppress(k, old, new string, r *schema.ResourceData) bool { // if network_policy configuration is empty, we store it as populated and enabled=false, and // provider=PROVIDER_UNSPECIFIED, in the case that it was previously stored with this state, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster_migratev1.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster_migratev1.go index c3562aafa61..d9b65aeecb5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster_migratev1.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster_migratev1.go @@ -131,13 +131,12 @@ func resourceContainerClusterResourceV1() *schema.Resource { }, }, "gcp_filestore_csi_driver_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: addonsConfigKeys, - MaxItems: 1, - Description: `The status of the Filestore CSI driver addon, which allows the usage of filestore instance as volumes. Defaults to disabled; set enabled = true to enable.`, - ConflictsWith: []string{"enable_autopilot"}, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Filestore CSI driver addon, which allows the usage of filestore instance as volumes. Defaults to disabled for Standard clusters; set enabled = true to enable. It is enabled by default for Autopilot clusters; set enabled = true to enable it explicitly.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -517,6 +516,13 @@ func resourceContainerClusterResourceV1() *schema.Resource { }, }, }, + "auto_provisioning_locations": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of Google Compute Engine zones in which the NodePool's nodes can be created by NAP.`, + }, }, }, }, @@ -660,10 +666,10 @@ func resourceContainerClusterResourceV1() *schema.Resource { "enable_components": { Type: schema.TypeList, Required: true, - Description: `GKE components exposing logs. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, SCHEDULER, and WORKLOADS.`, + Description: `GKE components exposing logs. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, KCP_CONNECTION, KCP_SSHD, SCHEDULER, and WORKLOADS.`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER", "WORKLOADS"}, false), + ValidateFunc: validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "KCP_CONNECTION", "KCP_SSHD", "SCHEDULER", "WORKLOADS"}, false), }, }, }, @@ -1049,7 +1055,7 @@ func resourceContainerClusterResourceV1() *schema.Resource { }, }, - "node_config": clusterSchemaNodeConfig(), + "node_config": schemaNodeConfig(), "node_pool": { Type: schema.TypeList, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_node_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_node_pool.go index bd07346c2bb..6777ce7dddd 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_node_pool.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_node_pool.go @@ -391,6 +391,57 @@ var schemaNodePool = map[string]*schema.Schema{ ValidateFunc: verify.ValidateIpCidrRange, Description: `The IP address range for pod IPs in this node pool. Only applicable if create_pod_range is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.`, }, + "additional_node_network_configs": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the VPC where the additional interface belongs.`, + }, + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the subnetwork where the additional interface belongs.`, + }, + }, + }, + }, + "additional_pod_network_configs": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the subnetwork where the additional pod network belongs.`, + }, + "secondary_pod_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the secondary range on the subnet which provides IP address for this pod range.`, + }, + "max_pods_per_node": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The maximum number of pods per node which use this pod network.`, + }, + }, + }, + }, "pod_cidr_overprovision_config": { Type: schema.TypeList, Optional: true, @@ -1144,11 +1195,13 @@ func flattenNodePool(d *schema.ResourceData, config *transport_tpg.Config, np *c nodePool["max_pods_per_node"] = np.MaxPodsConstraint.MaxPodsPerNode } - nodePool["management"] = []map[string]interface{}{ - { - "auto_repair": np.Management.AutoRepair, - "auto_upgrade": np.Management.AutoUpgrade, - }, + if np.Management != nil { + nodePool["management"] = []map[string]interface{}{ + { + "auto_repair": np.Management.AutoRepair, + "auto_upgrade": np.Management.AutoUpgrade, + }, + } } if np.UpgradeSettings != nil { @@ -1164,12 +1217,14 @@ func flattenNodeNetworkConfig(c *container.NodeNetworkConfig, d *schema.Resource result := []map[string]interface{}{} if c != nil { result = append(result, map[string]interface{}{ - "create_pod_range": d.Get(prefix + "network_config.0.create_pod_range"), // API doesn't return this value so we set the old one. Field is ForceNew + Required - "pod_ipv4_cidr_block": c.PodIpv4CidrBlock, - "pod_range": c.PodRange, - "enable_private_nodes": c.EnablePrivateNodes, - "pod_cidr_overprovision_config": flattenPodCidrOverprovisionConfig(c.PodCidrOverprovisionConfig), - "network_performance_config": flattenNodeNetworkPerformanceConfig(c.NetworkPerformanceConfig), + "create_pod_range": d.Get(prefix + "network_config.0.create_pod_range"), // API doesn't return this value so we set the old one. Field is ForceNew + Required + "pod_ipv4_cidr_block": c.PodIpv4CidrBlock, + "pod_range": c.PodRange, + "enable_private_nodes": c.EnablePrivateNodes, + "pod_cidr_overprovision_config": flattenPodCidrOverprovisionConfig(c.PodCidrOverprovisionConfig), + "network_performance_config": flattenNodeNetworkPerformanceConfig(c.NetworkPerformanceConfig), + "additional_node_network_configs": flattenAdditionalNodeNetworkConfig(c.AdditionalNodeNetworkConfigs), + "additional_pod_network_configs": flattenAdditionalPodNetworkConfig(c.AdditionalPodNetworkConfigs), }) } return result @@ -1185,6 +1240,37 @@ func flattenNodeNetworkPerformanceConfig(c *container.NetworkPerformanceConfig) return result } +func flattenAdditionalNodeNetworkConfig(c []*container.AdditionalNodeNetworkConfig) []map[string]interface{} { + if c == nil { + return nil + } + + result := []map[string]interface{}{} + for _, nodeNetworkConfig := range c { + result = append(result, map[string]interface{}{ + "network": nodeNetworkConfig.Network, + "subnetwork": nodeNetworkConfig.Subnetwork, + }) + } + return result +} + +func flattenAdditionalPodNetworkConfig(c []*container.AdditionalPodNetworkConfig) []map[string]interface{} { + if c == nil { + return nil + } + + result := []map[string]interface{}{} + for _, podNetworkConfig := range c { + result = append(result, map[string]interface{}{ + "subnetwork": podNetworkConfig.Subnetwork, + "secondary_pod_range": podNetworkConfig.SecondaryPodRange, + "max_pods_per_node": podNetworkConfig.MaxPodsPerNode.MaxPodsPerNode, + }) + } + return result +} + func expandNodeNetworkConfig(v interface{}) *container.NodeNetworkConfig { networkNodeConfigs := v.([]interface{}) @@ -1213,6 +1299,37 @@ func expandNodeNetworkConfig(v interface{}) *container.NodeNetworkConfig { nnc.ForceSendFields = []string{"EnablePrivateNodes"} } + if v, ok := networkNodeConfig["additional_node_network_configs"]; ok && len(v.([]interface{})) > 0 { + node_network_configs := v.([]interface{}) + nodeNetworkConfigs := make([]*container.AdditionalNodeNetworkConfig, 0, len(node_network_configs)) + for _, raw := range node_network_configs { + data := raw.(map[string]interface{}) + networkConfig := &container.AdditionalNodeNetworkConfig{ + Network: data["network"].(string), + Subnetwork: data["subnetwork"].(string), + } + nodeNetworkConfigs = append(nodeNetworkConfigs, networkConfig) + } + nnc.AdditionalNodeNetworkConfigs = nodeNetworkConfigs + } + + if v, ok := networkNodeConfig["additional_pod_network_configs"]; ok && len(v.([]interface{})) > 0 { + pod_network_configs := v.([]interface{}) + podNetworkConfigs := make([]*container.AdditionalPodNetworkConfig, 0, len(pod_network_configs)) + for _, raw := range pod_network_configs { + data := raw.(map[string]interface{}) + podnetworkConfig := &container.AdditionalPodNetworkConfig{ + Subnetwork: data["subnetwork"].(string), + SecondaryPodRange: data["secondary_pod_range"].(string), + MaxPodsPerNode: &container.MaxPodsConstraint{ + MaxPodsPerNode: int64(data["max_pods_per_node"].(int)), + }, + } + podNetworkConfigs = append(podNetworkConfigs, podnetworkConfig) + } + nnc.AdditionalPodNetworkConfigs = podNetworkConfigs + } + nnc.PodCidrOverprovisionConfig = expandPodCidrOverprovisionConfig(networkNodeConfig["pod_cidr_overprovision_config"]) if v, ok := networkNodeConfig["network_performance_config"]; ok && len(v.([]interface{})) > 0 { @@ -1292,470 +1409,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } if d.HasChange(prefix + "node_config") { - - if d.HasChange(prefix + "node_config.0.logging_variant") { - if v, ok := d.GetOk(prefix + "node_config.0.logging_variant"); ok { - loggingVariant := v.(string) - req := &container.UpdateNodePoolRequest{ - Name: name, - LoggingConfig: &container.NodePoolLoggingConfig{ - VariantConfig: &container.LoggingVariantConfig{ - Variant: loggingVariant, - }, - }, - } - - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool logging_variant", userAgent, - timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - - log.Printf("[INFO] Updated logging_variant for node pool %s", name) - } - } - - if d.HasChange("node_config.0.disk_size_gb") || - d.HasChange("node_config.0.disk_type") || - d.HasChange("node_config.0.machine_type") { - req := &container.UpdateNodePoolRequest{ - Name: name, - DiskSizeGb: int64(d.Get("node_config.0.disk_size_gb").(int)), - DiskType: d.Get("node_config.0.disk_type").(string), - MachineType: d.Get("node_config.0.machine_type").(string), - } - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool disk_size_gb/disk_type/machine_type", userAgent, - timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type for Node Pool %s", d.Id()) - } - - if d.HasChange(prefix + "node_config.0.taint") { - req := &container.UpdateNodePoolRequest{ - Name: name, - } - if v, ok := d.GetOk(prefix + "node_config.0.taint"); ok { - taintsList := v.([]interface{}) - taints := make([]*container.NodeTaint, 0, len(taintsList)) - for _, v := range taintsList { - if v != nil { - data := v.(map[string]interface{}) - taint := &container.NodeTaint{ - Key: data["key"].(string), - Value: data["value"].(string), - Effect: data["effect"].(string), - } - taints = append(taints, taint) - } - } - ntaints := &container.NodeTaints{ - Taints: taints, - } - req.Taints = ntaints - } - - if req.Taints == nil { - taints := make([]*container.NodeTaint, 0, 0) - ntaints := &container.NodeTaints{ - Taints: taints, - } - req.Taints = ntaints - } - - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool taints", userAgent, - timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - log.Printf("[INFO] Updated taints for Node Pool %s", d.Id()) - } - - if d.HasChange(prefix + "node_config.0.tags") { - req := &container.UpdateNodePoolRequest{ - Name: name, - } - if v, ok := d.GetOk(prefix + "node_config.0.tags"); ok { - tagsList := v.([]interface{}) - tags := []string{} - for _, v := range tagsList { - if v != nil { - tags = append(tags, v.(string)) - } - } - ntags := &container.NetworkTags{ - Tags: tags, - } - req.Tags = ntags - } - - // sets tags to the empty list when user removes a previously defined list of tags entriely - // aka the node pool goes from having tags to no longer having any - if req.Tags == nil { - tags := []string{} - ntags := &container.NetworkTags{ - Tags: tags, - } - req.Tags = ntags - } - - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool tags", userAgent, - timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - log.Printf("[INFO] Updated tags for node pool %s", name) - } - - if d.HasChange(prefix + "node_config.0.resource_manager_tags") { - req := &container.UpdateNodePoolRequest{ - Name: name, - } - if v, ok := d.GetOk(prefix + "node_config.0.resource_manager_tags"); ok { - req.ResourceManagerTags = expandResourceManagerTags(v) - } - - // sets resource manager tags to the empty list when user removes a previously defined list of tags entriely - // aka the node pool goes from having tags to no longer having any - if req.ResourceManagerTags == nil { - tags := make(map[string]string) - rmTags := &container.ResourceManagerTags{ - Tags: tags, - } - req.ResourceManagerTags = rmTags - } - - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool resource manager tags", userAgent, - timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - log.Printf("[INFO] Updated resource manager tags for node pool %s", name) - } - - if d.HasChange(prefix + "node_config.0.resource_labels") { - req := &container.UpdateNodePoolRequest{ - Name: name, - } - - if v, ok := d.GetOk(prefix + "node_config.0.resource_labels"); ok { - resourceLabels := v.(map[string]interface{}) - req.ResourceLabels = &container.ResourceLabels{ - Labels: tpgresource.ConvertStringMap(resourceLabels), - } - } - - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool resource labels", userAgent, - timeout) - } - - // Call update serially. - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - - log.Printf("[INFO] Updated resource labels for node pool %s", name) - } - - if d.HasChange(prefix + "node_config.0.labels") { - req := &container.UpdateNodePoolRequest{ - Name: name, - } - - if v, ok := d.GetOk(prefix + "node_config.0.labels"); ok { - labels := v.(map[string]interface{}) - req.Labels = &container.NodeLabels{ - Labels: tpgresource.ConvertStringMap(labels), - } - } - - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool labels", userAgent, - timeout) - } - - // Call update serially. - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - - log.Printf("[INFO] Updated labels for node pool %s", name) - } - - if d.HasChange(prefix + "node_config.0.image_type") { - req := &container.UpdateClusterRequest{ - Update: &container.ClusterUpdate{ - DesiredNodePoolId: name, - DesiredImageType: d.Get(prefix + "node_config.0.image_type").(string), - }, - } - - updateF := func() error { - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, "updating GKE node pool", userAgent, - timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - log.Printf("[INFO] Updated image type in Node Pool %s", d.Id()) - } - - if d.HasChange(prefix + "node_config.0.workload_metadata_config") { - req := &container.UpdateNodePoolRequest{ - NodePoolId: name, - WorkloadMetadataConfig: expandWorkloadMetadataConfig( - d.Get(prefix + "node_config.0.workload_metadata_config")), - } - if req.WorkloadMetadataConfig == nil { - req.ForceSendFields = []string{"WorkloadMetadataConfig"} - } - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool workload_metadata_config", userAgent, - timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name) - } - - if d.HasChange(prefix + "node_config.0.kubelet_config") { - req := &container.UpdateNodePoolRequest{ - NodePoolId: name, - KubeletConfig: expandKubeletConfig( - d.Get(prefix + "node_config.0.kubelet_config")), - } - if req.KubeletConfig == nil { - req.ForceSendFields = []string{"KubeletConfig"} - } - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool kubelet_config", userAgent, - timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - - log.Printf("[INFO] Updated kubelet_config for node pool %s", name) - } - if d.HasChange(prefix + "node_config.0.linux_node_config") { - req := &container.UpdateNodePoolRequest{ - NodePoolId: name, - LinuxNodeConfig: expandLinuxNodeConfig( - d.Get(prefix + "node_config.0.linux_node_config")), - } - if req.LinuxNodeConfig == nil { - req.ForceSendFields = []string{"LinuxNodeConfig"} - } - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool linux_node_config", userAgent, - timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - - log.Printf("[INFO] Updated linux_node_config for node pool %s", name) - } - if d.HasChange(prefix + "node_config.0.fast_socket") { - req := &container.UpdateNodePoolRequest{ - NodePoolId: name, - FastSocket: &container.FastSocket{}, - } - if v, ok := d.GetOk(prefix + "node_config.0.fast_socket"); ok { - fastSocket := v.([]interface{})[0].(map[string]interface{}) - req.FastSocket = &container.FastSocket{ - Enabled: fastSocket["enabled"].(bool), - } - } - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool fast_socket", userAgent, - timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - - log.Printf("[INFO] Updated fast_socket for node pool %s", name) - } + nodePoolNodeConfigUpdate(d, config, nodePoolInfo, prefix, name, timeout) } if d.HasChange(prefix + "node_count") { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/resource_container_attached_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/resource_container_attached_cluster.go index 2bd53b039f1..75d2fd3a392 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/resource_container_attached_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/resource_container_attached_cluster.go @@ -72,7 +72,8 @@ func ResourceContainerAttachedCluster() *schema.Resource { Required: true, ForceNew: true, Description: `The Kubernetes distribution of the underlying attached cluster. Supported values: -"eks", "aks".`, +"eks", "aks", "generic". The generic distribution provides the ability to register +or migrate any CNCF conformant cluster.`, }, "fleet": { Type: schema.TypeList, @@ -307,6 +308,24 @@ than 255 UTF-8 encoded bytes.`, }, }, }, + "security_posture_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Deprecated: "`security_posture_config` is deprecated and will be removed in a future major release.", + Description: `Enable/Disable Security Posture API features for the cluster.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vulnerability_mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"VULNERABILITY_DISABLED", "VULNERABILITY_ENTERPRISE"}), + Description: `Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. Possible values: ["VULNERABILITY_DISABLED", "VULNERABILITY_ENTERPRISE"]`, + }, + }, + }, + }, "cluster_region": { Type: schema.TypeString, Computed: true, @@ -395,8 +414,8 @@ the Workload Identity Pool.`, "deletion_policy": { Type: schema.TypeString, Optional: true, + Description: `Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS`, Default: "DELETE", - Description: `Policy to determine what flags to send on delete.`, }, "project": { Type: schema.TypeString, @@ -483,6 +502,12 @@ func resourceContainerAttachedClusterCreate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("proxy_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(proxyConfigProp)) && (ok || !reflect.DeepEqual(v, proxyConfigProp)) { obj["proxyConfig"] = proxyConfigProp } + securityPostureConfigProp, err := expandContainerAttachedClusterSecurityPostureConfig(d.Get("security_posture_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("security_posture_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(securityPostureConfigProp)) && (ok || !reflect.DeepEqual(v, securityPostureConfigProp)) { + obj["securityPostureConfig"] = securityPostureConfigProp + } annotationsProp, err := expandContainerAttachedClusterEffectiveAnnotations(d.Get("effective_annotations"), d, config) if err != nil { return err @@ -671,6 +696,9 @@ func resourceContainerAttachedClusterRead(d *schema.ResourceData, meta interface if err := d.Set("proxy_config", flattenContainerAttachedClusterProxyConfig(res["proxyConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Cluster: %s", err) } + if err := d.Set("security_posture_config", flattenContainerAttachedClusterSecurityPostureConfig(res["securityPostureConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } if err := d.Set("effective_annotations", flattenContainerAttachedClusterEffectiveAnnotations(res["annotations"], d, config)); err != nil { return fmt.Errorf("Error reading Cluster: %s", err) } @@ -748,6 +776,12 @@ func resourceContainerAttachedClusterUpdate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("proxy_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, proxyConfigProp)) { obj["proxyConfig"] = proxyConfigProp } + securityPostureConfigProp, err := expandContainerAttachedClusterSecurityPostureConfig(d.Get("security_posture_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("security_posture_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securityPostureConfigProp)) { + obj["securityPostureConfig"] = securityPostureConfigProp + } annotationsProp, err := expandContainerAttachedClusterEffectiveAnnotations(d.Get("effective_annotations"), d, config) if err != nil { return err @@ -800,6 +834,10 @@ func resourceContainerAttachedClusterUpdate(d *schema.ResourceData, meta interfa updateMask = append(updateMask, "proxyConfig") } + if d.HasChange("security_posture_config") { + updateMask = append(updateMask, "securityPostureConfig") + } + if d.HasChange("effective_annotations") { updateMask = append(updateMask, "annotations") } @@ -830,9 +868,12 @@ func resourceContainerAttachedClusterUpdate(d *schema.ResourceData, meta interfa newUpdateMask = append(newUpdateMask, "proxy_config.kubernetes_secret.name") newUpdateMask = append(newUpdateMask, "proxy_config.kubernetes_secret.namespace") } + if d.HasChange("security_posture_config") { + newUpdateMask = append(newUpdateMask, "security_posture_config.vulnerability_mode") + } // Pull out any other set fields from the generated mask. for _, mask := range updateMask { - if mask == "authorization" || mask == "loggingConfig" || mask == "monitoringConfig" || mask == "binaryAuthorization" || mask == "proxyConfig" { + if mask == "authorization" || mask == "loggingConfig" || mask == "monitoringConfig" || mask == "binaryAuthorization" || mask == "proxyConfig" || mask == "securityPostureConfig" { continue } newUpdateMask = append(newUpdateMask, mask) @@ -1278,6 +1319,23 @@ func flattenContainerAttachedClusterProxyConfigKubernetesSecretNamespace(v inter return v } +func flattenContainerAttachedClusterSecurityPostureConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["vulnerability_mode"] = + flattenContainerAttachedClusterSecurityPostureConfigVulnerabilityMode(original["vulnerabilityMode"], d, config) + return []interface{}{transformed} +} +func flattenContainerAttachedClusterSecurityPostureConfigVulnerabilityMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenContainerAttachedClusterEffectiveAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1604,6 +1662,29 @@ func expandContainerAttachedClusterProxyConfigKubernetesSecretNamespace(v interf return v, nil } +func expandContainerAttachedClusterSecurityPostureConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVulnerabilityMode, err := expandContainerAttachedClusterSecurityPostureConfigVulnerabilityMode(original["vulnerability_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVulnerabilityMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vulnerabilityMode"] = transformedVulnerabilityMode + } + + return transformed, nil +} + +func expandContainerAttachedClusterSecurityPostureConfigVulnerabilityMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandContainerAttachedClusterEffectiveAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_node_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_node_pool.go index 40e58fd2e73..1203d3a4ff1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_node_pool.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_node_pool.go @@ -123,6 +123,16 @@ func ResourceContainerAwsNodePool() *schema.Resource { Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", }, + "kubelet_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The kubelet configuration for the node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolKubeletConfigSchema(), + }, + "management": { Type: schema.TypeList, Computed: true, @@ -446,6 +456,42 @@ func ContainerAwsNodePoolMaxPodsConstraintSchema() *schema.Resource { } } +func ContainerAwsNodePoolKubeletConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_cfs_quota": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Whether or not to enable CPU CFS quota. Defaults to true.", + }, + + "cpu_cfs_quota_period": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The CPU CFS quota period to use for the node. Defaults to \"100ms\".", + }, + + "cpu_manager_policy": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The CpuManagerPolicy to use for the node. Defaults to \"none\".", + }, + + "pod_pids_limit": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.", + }, + }, + } +} + func ContainerAwsNodePoolManagementSchema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -511,6 +557,7 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), Management: expandContainerAwsNodePoolManagement(d.Get("management")), Project: dcl.String(project), UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), @@ -570,6 +617,7 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), Management: expandContainerAwsNodePoolManagement(d.Get("management")), Project: dcl.String(project), UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), @@ -624,6 +672,9 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) if err = d.Set("effective_annotations", res.Annotations); err != nil { return fmt.Errorf("error setting effective_annotations in state: %s", err) } + if err = d.Set("kubelet_config", flattenContainerAwsNodePoolKubeletConfig(res.KubeletConfig)); err != nil { + return fmt.Errorf("error setting kubelet_config in state: %s", err) + } if err = d.Set("management", tpgresource.FlattenContainerAwsNodePoolManagement(res.Management, d, config)); err != nil { return fmt.Errorf("error setting management in state: %s", err) } @@ -674,6 +725,7 @@ func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{} SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), Management: expandContainerAwsNodePoolManagement(d.Get("management")), Project: dcl.String(project), UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), @@ -728,6 +780,7 @@ func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{} SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), Management: expandContainerAwsNodePoolManagement(d.Get("management")), Project: dcl.String(project), UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), @@ -1080,6 +1133,38 @@ func flattenContainerAwsNodePoolMaxPodsConstraint(obj *containeraws.NodePoolMaxP } +func expandContainerAwsNodePoolKubeletConfig(o interface{}) *containeraws.NodePoolKubeletConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolKubeletConfig{ + CpuCfsQuota: dcl.Bool(obj["cpu_cfs_quota"].(bool)), + CpuCfsQuotaPeriod: dcl.String(obj["cpu_cfs_quota_period"].(string)), + CpuManagerPolicy: containeraws.NodePoolKubeletConfigCpuManagerPolicyEnumRef(obj["cpu_manager_policy"].(string)), + PodPidsLimit: dcl.Int64(int64(obj["pod_pids_limit"].(int))), + } +} + +func flattenContainerAwsNodePoolKubeletConfig(obj *containeraws.NodePoolKubeletConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cpu_cfs_quota": obj.CpuCfsQuota, + "cpu_cfs_quota_period": obj.CpuCfsQuotaPeriod, + "cpu_manager_policy": obj.CpuManagerPolicy, + "pod_pids_limit": obj.PodPidsLimit, + } + + return []interface{}{transformed} + +} + func expandContainerAwsNodePoolManagement(o interface{}) *containeraws.NodePoolManagement { if o == nil { return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/corebilling/resource_billing_project_info.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/corebilling/resource_billing_project_info.go index 18f54ab2854..d7280be0c90 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/corebilling/resource_billing_project_info.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/corebilling/resource_billing_project_info.go @@ -127,7 +127,7 @@ func resourceCoreBillingProjectInfoCreate(d *schema.ResourceData, meta interface } // Store the ID now - id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/billingInfo") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -318,7 +318,7 @@ func resourceCoreBillingProjectInfoImport(d *schema.ResourceData, meta interface } // Replace import id for the resource id - id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/billingInfo") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_connection_profile.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_connection_profile.go index 7ca53663197..05e1f5fa490 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_connection_profile.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_connection_profile.go @@ -388,33 +388,30 @@ Please refer to the field 'effective_labels' for all of the labels present on th MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "host": { + "cloud_sql_id": { Type: schema.TypeString, - Required: true, - Description: `Required. The IP or hostname of the source MySQL database.`, + Optional: true, + Description: `If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source.`, + }, + "host": { + Type: schema.TypeString, + Optional: true, + Description: `The IP or hostname of the source MySQL database.`, + RequiredWith: []string{"mysql.0.port", "mysql.0.username"}, }, "password": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, - Description: `Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + Description: `Input only. The password for the user that Database Migration Service will be using to connect to the database. This field is not returned on request, and the value is encrypted when stored in Database Migration Service.`, Sensitive: true, }, "port": { - Type: schema.TypeInt, - Required: true, - Description: `Required. The network port of the source MySQL database.`, - }, - "username": { - Type: schema.TypeString, - Required: true, - Description: `Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.`, - }, - "cloud_sql_id": { - Type: schema.TypeString, - Optional: true, - Description: `If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source.`, + Type: schema.TypeInt, + Optional: true, + Description: `The network port of the source MySQL database.`, + RequiredWith: []string{"mysql.0.host", "mysql.0.username"}, }, "ssl": { Type: schema.TypeList, @@ -455,6 +452,12 @@ If this field is used then the 'clientCertificate' field is mandatory.`, }, }, }, + "username": { + Type: schema.TypeString, + Optional: true, + Description: `The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.`, + RequiredWith: []string{"mysql.0.host", "mysql.0.port"}, + }, "password_set": { Type: schema.TypeBool, Computed: true, @@ -626,33 +629,36 @@ Static IP address connectivity configured on service project.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "host": { + "alloydb_cluster_id": { Type: schema.TypeString, - Required: true, - Description: `Required. The IP or hostname of the source MySQL database.`, + Optional: true, + Description: `If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID.`, + }, + "cloud_sql_id": { + Type: schema.TypeString, + Optional: true, + Description: `If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source.`, + }, + "host": { + Type: schema.TypeString, + Optional: true, + Description: `The IP or hostname of the source MySQL database.`, + RequiredWith: []string{"postgresql.0.port", "postgresql.0.username", "postgresql.0.password"}, }, "password": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, - Description: `Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + Description: `Input only. The password for the user that Database Migration Service will be using to connect to the database. This field is not returned on request, and the value is encrypted when stored in Database Migration Service.`, - Sensitive: true, + Sensitive: true, + RequiredWith: []string{"postgresql.0.host", "postgresql.0.port", "postgresql.0.username"}, }, "port": { - Type: schema.TypeInt, - Required: true, - Description: `Required. The network port of the source MySQL database.`, - }, - "username": { - Type: schema.TypeString, - Required: true, - Description: `Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.`, - }, - "cloud_sql_id": { - Type: schema.TypeString, - Optional: true, - Description: `If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source.`, + Type: schema.TypeInt, + Optional: true, + Description: `The network port of the source MySQL database.`, + RequiredWith: []string{"postgresql.0.host", "postgresql.0.username", "postgresql.0.password"}, }, "ssl": { Type: schema.TypeList, @@ -695,6 +701,12 @@ If this field is used then the 'clientCertificate' field is mandatory.`, }, }, }, + "username": { + Type: schema.TypeString, + Optional: true, + Description: `The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.`, + RequiredWith: []string{"postgresql.0.host", "postgresql.0.port", "postgresql.0.password"}, + }, "network_architecture": { Type: schema.TypeString, Computed: true, @@ -1386,6 +1398,8 @@ func flattenDatabaseMigrationServiceConnectionProfilePostgresql(v interface{}, d flattenDatabaseMigrationServiceConnectionProfilePostgresqlSsl(original["ssl"], d, config) transformed["cloud_sql_id"] = flattenDatabaseMigrationServiceConnectionProfilePostgresqlCloudSqlId(original["cloudSqlId"], d, config) + transformed["alloydb_cluster_id"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlAlloydbClusterId(original["alloydbClusterId"], d, config) transformed["network_architecture"] = flattenDatabaseMigrationServiceConnectionProfilePostgresqlNetworkArchitecture(original["networkArchitecture"], d, config) return []interface{}{transformed} @@ -1462,6 +1476,10 @@ func flattenDatabaseMigrationServiceConnectionProfilePostgresqlCloudSqlId(v inte return v } +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlAlloydbClusterId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenDatabaseMigrationServiceConnectionProfilePostgresqlNetworkArchitecture(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -2220,6 +2238,13 @@ func expandDatabaseMigrationServiceConnectionProfilePostgresql(v interface{}, d transformed["cloudSqlId"] = transformedCloudSqlId } + transformedAlloydbClusterId, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlAlloydbClusterId(original["alloydb_cluster_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAlloydbClusterId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["alloydbClusterId"] = transformedAlloydbClusterId + } + transformedNetworkArchitecture, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlNetworkArchitecture(original["network_architecture"], d, config) if err != nil { return nil, err @@ -2310,6 +2335,10 @@ func expandDatabaseMigrationServiceConnectionProfilePostgresqlCloudSqlId(v inter return v, nil } +func expandDatabaseMigrationServiceConnectionProfilePostgresqlAlloydbClusterId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandDatabaseMigrationServiceConnectionProfilePostgresqlNetworkArchitecture(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_migration_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_migration_job.go new file mode 100644 index 00000000000..ec04173b72b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_migration_job.go @@ -0,0 +1,1224 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package databasemigrationservice + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDatabaseMigrationServiceMigrationJob() *schema.Resource { + return &schema.Resource{ + Create: resourceDatabaseMigrationServiceMigrationJobCreate, + Read: resourceDatabaseMigrationServiceMigrationJobRead, + Update: resourceDatabaseMigrationServiceMigrationJobUpdate, + Delete: resourceDatabaseMigrationServiceMigrationJobDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDatabaseMigrationServiceMigrationJobImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "destination": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the destination connection profile resource in the form of projects/{project}/locations/{location}/connectionProfiles/{destinationConnectionProfile}.`, + }, + "migration_job_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the migration job.`, + }, + "source": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the source connection profile resource in the form of projects/{project}/locations/{location}/connectionProfiles/{sourceConnectionProfile}.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ONE_TIME", "CONTINUOUS"}), + Description: `The type of the migration job. Possible values: ["ONE_TIME", "CONTINUOUS"]`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `The migration job display name.`, + }, + "dump_flags": { + Type: schema.TypeList, + Optional: true, + Description: `The initial dump flags.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dump_flags": { + Type: schema.TypeList, + Optional: true, + Description: `A list of dump flags`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the flag`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The vale of the flag`, + }, + }, + }, + }, + }, + }, + }, + "dump_path": { + Type: schema.TypeString, + Optional: true, + Description: `The path to the dump file in Google Cloud Storage, +in the format: (gs://[BUCKET_NAME]/[OBJECT_NAME]). +This field and the "dump_flags" field are mutually exclusive.`, + }, + "dump_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"LOGICAL", "PHYSICAL", ""}), + Description: `The type of the data dump. Supported for MySQL to CloudSQL for MySQL +migrations only. Possible values: ["LOGICAL", "PHYSICAL"]`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The resource labels for migration job to use to annotate any related underlying resources such as Compute Engine VMs. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location where the migration job should reside.`, + }, + "performance_config": { + Type: schema.TypeList, + Optional: true, + Description: `Data dump parallelism settings used by the migration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dump_parallel_level": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MIN", "OPTIMAL", "MAX", ""}), + Description: `Initial dump parallelism level. Possible values: ["MIN", "OPTIMAL", "MAX"]`, + }, + }, + }, + }, + "reverse_ssh_connectivity": { + Type: schema.TypeList, + Optional: true, + Description: `The details of the VPC network that the source database is located in.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vm": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the virtual machine (Compute Engine) used as the bastion server +for the SSH tunnel.`, + }, + "vm_ip": { + Type: schema.TypeString, + Optional: true, + Description: `The IP of the virtual machine (Compute Engine) used as the bastion server +for the SSH tunnel.`, + }, + "vm_port": { + Type: schema.TypeInt, + Optional: true, + Description: `The forwarding port of the virtual machine (Compute Engine) used as the +bastion server for the SSH tunnel.`, + }, + "vpc": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the VPC to peer with the Cloud SQL private network.`, + }, + }, + }, + ExactlyOneOf: []string{"static_ip_connectivity", "vpc_peering_connectivity"}, + }, + "static_ip_connectivity": { + Type: schema.TypeList, + Optional: true, + Description: `If set to an empty object ('{}'), the source database will allow incoming +connections from the public IP of the destination database. +You can retrieve the public IP of the Cloud SQL instance from the +Cloud SQL console or using Cloud SQL APIs.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{"reverse_ssh_connectivity", "vpc_peering_connectivity"}, + }, + "vpc_peering_connectivity": { + Type: schema.TypeList, + Optional: true, + Description: `The details of the VPC network that the source database is located in.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vpc": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the VPC network to peer with the Cloud SQL private network.`, + }, + }, + }, + ExactlyOneOf: []string{"static_ip_connectivity", "reverse_ssh_connectivity"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The timestamp when the resource was created. A timestamp in RFC3339 UTC 'Zulu' format, accurate to nanoseconds. Example: '2014-10-02T15:01:23.045123456Z'.`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "error": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. The error details in case of state FAILED.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeInt, + Computed: true, + Description: `The status code, which should be an enum value of google.rpc.Code.`, + }, + "details": { + Type: schema.TypeList, + Computed: true, + Description: `A list of messages that carry the error details.`, + Elem: &schema.Schema{ + Type: schema.TypeMap, + }, + }, + "message": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable message indicating details about the current status.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of this migration job resource in the form of projects/{project}/locations/{location}/migrationJobs/{migrationJob}.`, + }, + "phase": { + Type: schema.TypeString, + Computed: true, + Description: `The current migration job phase.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current migration job state.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDatabaseMigrationServiceMigrationJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDatabaseMigrationServiceMigrationJobDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + typeProp, err := expandDatabaseMigrationServiceMigrationJobType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + sourceProp, err := expandDatabaseMigrationServiceMigrationJobSource(d.Get("source"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceProp)) && (ok || !reflect.DeepEqual(v, sourceProp)) { + obj["source"] = sourceProp + } + destinationProp, err := expandDatabaseMigrationServiceMigrationJobDestination(d.Get("destination"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("destination"); !tpgresource.IsEmptyValue(reflect.ValueOf(destinationProp)) && (ok || !reflect.DeepEqual(v, destinationProp)) { + obj["destination"] = destinationProp + } + dumpFlagsProp, err := expandDatabaseMigrationServiceMigrationJobDumpFlags(d.Get("dump_flags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dump_flags"); !tpgresource.IsEmptyValue(reflect.ValueOf(dumpFlagsProp)) && (ok || !reflect.DeepEqual(v, dumpFlagsProp)) { + obj["dumpFlags"] = dumpFlagsProp + } + performanceConfigProp, err := expandDatabaseMigrationServiceMigrationJobPerformanceConfig(d.Get("performance_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("performance_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(performanceConfigProp)) && (ok || !reflect.DeepEqual(v, performanceConfigProp)) { + obj["performanceConfig"] = performanceConfigProp + } + dumpPathProp, err := expandDatabaseMigrationServiceMigrationJobDumpPath(d.Get("dump_path"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dump_path"); !tpgresource.IsEmptyValue(reflect.ValueOf(dumpPathProp)) && (ok || !reflect.DeepEqual(v, dumpPathProp)) { + obj["dumpPath"] = dumpPathProp + } + dumpTypeProp, err := expandDatabaseMigrationServiceMigrationJobDumpType(d.Get("dump_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dump_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(dumpTypeProp)) && (ok || !reflect.DeepEqual(v, dumpTypeProp)) { + obj["dumpType"] = dumpTypeProp + } + staticIpConnectivityProp, err := expandDatabaseMigrationServiceMigrationJobStaticIpConnectivity(d.Get("static_ip_connectivity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("static_ip_connectivity"); ok || !reflect.DeepEqual(v, staticIpConnectivityProp) { + obj["staticIpConnectivity"] = staticIpConnectivityProp + } + reverseSshConnectivityProp, err := expandDatabaseMigrationServiceMigrationJobReverseSshConnectivity(d.Get("reverse_ssh_connectivity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reverse_ssh_connectivity"); !tpgresource.IsEmptyValue(reflect.ValueOf(reverseSshConnectivityProp)) && (ok || !reflect.DeepEqual(v, reverseSshConnectivityProp)) { + obj["reverseSshConnectivity"] = reverseSshConnectivityProp + } + vpcPeeringConnectivityProp, err := expandDatabaseMigrationServiceMigrationJobVpcPeeringConnectivity(d.Get("vpc_peering_connectivity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vpc_peering_connectivity"); !tpgresource.IsEmptyValue(reflect.ValueOf(vpcPeeringConnectivityProp)) && (ok || !reflect.DeepEqual(v, vpcPeeringConnectivityProp)) { + obj["vpcPeeringConnectivity"] = vpcPeeringConnectivityProp + } + labelsProp, err := expandDatabaseMigrationServiceMigrationJobEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DatabaseMigrationServiceBasePath}}projects/{{project}}/locations/{{location}}/migrationJobs?migrationJobId={{migration_job_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new MigrationJob: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MigrationJob: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating MigrationJob: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/migrationJobs/{{migration_job_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = DatabaseMigrationServiceOperationWaitTime( + config, res, project, "Creating MigrationJob", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create MigrationJob: %s", err) + } + + log.Printf("[DEBUG] Finished creating MigrationJob %q: %#v", d.Id(), res) + + return resourceDatabaseMigrationServiceMigrationJobRead(d, meta) +} + +func resourceDatabaseMigrationServiceMigrationJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DatabaseMigrationServiceBasePath}}projects/{{project}}/locations/{{location}}/migrationJobs/{{migration_job_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MigrationJob: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DatabaseMigrationServiceMigrationJob %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + + if err := d.Set("name", flattenDatabaseMigrationServiceMigrationJobName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("display_name", flattenDatabaseMigrationServiceMigrationJobDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("create_time", flattenDatabaseMigrationServiceMigrationJobCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("labels", flattenDatabaseMigrationServiceMigrationJobLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("state", flattenDatabaseMigrationServiceMigrationJobState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("phase", flattenDatabaseMigrationServiceMigrationJobPhase(res["phase"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("error", flattenDatabaseMigrationServiceMigrationJobError(res["error"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("type", flattenDatabaseMigrationServiceMigrationJobType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("source", flattenDatabaseMigrationServiceMigrationJobSource(res["source"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("destination", flattenDatabaseMigrationServiceMigrationJobDestination(res["destination"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("dump_flags", flattenDatabaseMigrationServiceMigrationJobDumpFlags(res["dumpFlags"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("performance_config", flattenDatabaseMigrationServiceMigrationJobPerformanceConfig(res["performanceConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("dump_path", flattenDatabaseMigrationServiceMigrationJobDumpPath(res["dumpPath"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("dump_type", flattenDatabaseMigrationServiceMigrationJobDumpType(res["dumpType"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("static_ip_connectivity", flattenDatabaseMigrationServiceMigrationJobStaticIpConnectivity(res["staticIpConnectivity"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("reverse_ssh_connectivity", flattenDatabaseMigrationServiceMigrationJobReverseSshConnectivity(res["reverseSshConnectivity"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("vpc_peering_connectivity", flattenDatabaseMigrationServiceMigrationJobVpcPeeringConnectivity(res["vpcPeeringConnectivity"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("terraform_labels", flattenDatabaseMigrationServiceMigrationJobTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + if err := d.Set("effective_labels", flattenDatabaseMigrationServiceMigrationJobEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading MigrationJob: %s", err) + } + + return nil +} + +func resourceDatabaseMigrationServiceMigrationJobUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MigrationJob: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandDatabaseMigrationServiceMigrationJobDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + dumpFlagsProp, err := expandDatabaseMigrationServiceMigrationJobDumpFlags(d.Get("dump_flags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dump_flags"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dumpFlagsProp)) { + obj["dumpFlags"] = dumpFlagsProp + } + performanceConfigProp, err := expandDatabaseMigrationServiceMigrationJobPerformanceConfig(d.Get("performance_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("performance_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, performanceConfigProp)) { + obj["performanceConfig"] = performanceConfigProp + } + dumpPathProp, err := expandDatabaseMigrationServiceMigrationJobDumpPath(d.Get("dump_path"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dump_path"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dumpPathProp)) { + obj["dumpPath"] = dumpPathProp + } + dumpTypeProp, err := expandDatabaseMigrationServiceMigrationJobDumpType(d.Get("dump_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dump_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dumpTypeProp)) { + obj["dumpType"] = dumpTypeProp + } + staticIpConnectivityProp, err := expandDatabaseMigrationServiceMigrationJobStaticIpConnectivity(d.Get("static_ip_connectivity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("static_ip_connectivity"); ok || !reflect.DeepEqual(v, staticIpConnectivityProp) { + obj["staticIpConnectivity"] = staticIpConnectivityProp + } + reverseSshConnectivityProp, err := expandDatabaseMigrationServiceMigrationJobReverseSshConnectivity(d.Get("reverse_ssh_connectivity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reverse_ssh_connectivity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, reverseSshConnectivityProp)) { + obj["reverseSshConnectivity"] = reverseSshConnectivityProp + } + vpcPeeringConnectivityProp, err := expandDatabaseMigrationServiceMigrationJobVpcPeeringConnectivity(d.Get("vpc_peering_connectivity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vpc_peering_connectivity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, vpcPeeringConnectivityProp)) { + obj["vpcPeeringConnectivity"] = vpcPeeringConnectivityProp + } + labelsProp, err := expandDatabaseMigrationServiceMigrationJobEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DatabaseMigrationServiceBasePath}}projects/{{project}}/locations/{{location}}/migrationJobs/{{migration_job_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating MigrationJob %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("dump_flags") { + updateMask = append(updateMask, "dumpFlags") + } + + if d.HasChange("performance_config") { + updateMask = append(updateMask, "performanceConfig") + } + + if d.HasChange("dump_path") { + updateMask = append(updateMask, "dumpPath") + } + + if d.HasChange("dump_type") { + updateMask = append(updateMask, "dumpType") + } + + if d.HasChange("static_ip_connectivity") { + updateMask = append(updateMask, "staticIpConnectivity") + } + + if d.HasChange("reverse_ssh_connectivity") { + updateMask = append(updateMask, "reverseSshConnectivity") + } + + if d.HasChange("vpc_peering_connectivity") { + updateMask = append(updateMask, "vpcPeeringConnectivity") + } + + if d.HasChange("effective_labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating MigrationJob %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating MigrationJob %q: %#v", d.Id(), res) + } + + err = DatabaseMigrationServiceOperationWaitTime( + config, res, project, "Updating MigrationJob", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } + + return resourceDatabaseMigrationServiceMigrationJobRead(d, meta) +} + +func resourceDatabaseMigrationServiceMigrationJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MigrationJob: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DatabaseMigrationServiceBasePath}}projects/{{project}}/locations/{{location}}/migrationJobs/{{migration_job_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting MigrationJob %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "MigrationJob") + } + + err = DatabaseMigrationServiceOperationWaitTime( + config, res, project, "Deleting MigrationJob", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting MigrationJob %q: %#v", d.Id(), res) + return nil +} + +func resourceDatabaseMigrationServiceMigrationJobImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/migrationJobs/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/migrationJobs/{{migration_job_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDatabaseMigrationServiceMigrationJobName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenDatabaseMigrationServiceMigrationJobState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobPhase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobError(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["code"] = + flattenDatabaseMigrationServiceMigrationJobErrorCode(original["code"], d, config) + transformed["message"] = + flattenDatabaseMigrationServiceMigrationJobErrorMessage(original["message"], d, config) + transformed["details"] = + flattenDatabaseMigrationServiceMigrationJobErrorDetails(original["details"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceMigrationJobErrorCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatabaseMigrationServiceMigrationJobErrorMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobErrorDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobDumpFlags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dump_flags"] = + flattenDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlags(original["dumpFlags"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlagsName(original["name"], d, config), + "value": flattenDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlagsValue(original["value"], d, config), + }) + } + return transformed +} +func flattenDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlagsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlagsValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobPerformanceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dump_parallel_level"] = + flattenDatabaseMigrationServiceMigrationJobPerformanceConfigDumpParallelLevel(original["dumpParallelLevel"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceMigrationJobPerformanceConfigDumpParallelLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobDumpPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobDumpType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobStaticIpConnectivity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDatabaseMigrationServiceMigrationJobReverseSshConnectivity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["vm_ip"] = + flattenDatabaseMigrationServiceMigrationJobReverseSshConnectivityVmIp(original["vmIp"], d, config) + transformed["vm_port"] = + flattenDatabaseMigrationServiceMigrationJobReverseSshConnectivityVmPort(original["vmPort"], d, config) + transformed["vm"] = + flattenDatabaseMigrationServiceMigrationJobReverseSshConnectivityVm(original["vm"], d, config) + transformed["vpc"] = + flattenDatabaseMigrationServiceMigrationJobReverseSshConnectivityVpc(original["vpc"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceMigrationJobReverseSshConnectivityVmIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobReverseSshConnectivityVmPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatabaseMigrationServiceMigrationJobReverseSshConnectivityVm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobReverseSshConnectivityVpc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobVpcPeeringConnectivity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["vpc"] = + flattenDatabaseMigrationServiceMigrationJobVpcPeeringConnectivityVpc(original["vpc"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceMigrationJobVpcPeeringConnectivityVpc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceMigrationJobTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenDatabaseMigrationServiceMigrationJobEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDatabaseMigrationServiceMigrationJobDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobDestination(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobDumpFlags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDumpFlags, err := expandDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlags(original["dump_flags"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDumpFlags); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dumpFlags"] = transformedDumpFlags + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlagsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlagsValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlagsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobDumpFlagsDumpFlagsValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobPerformanceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDumpParallelLevel, err := expandDatabaseMigrationServiceMigrationJobPerformanceConfigDumpParallelLevel(original["dump_parallel_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDumpParallelLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dumpParallelLevel"] = transformedDumpParallelLevel + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceMigrationJobPerformanceConfigDumpParallelLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobDumpPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobDumpType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobStaticIpConnectivity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDatabaseMigrationServiceMigrationJobReverseSshConnectivity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVmIp, err := expandDatabaseMigrationServiceMigrationJobReverseSshConnectivityVmIp(original["vm_ip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVmIp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vmIp"] = transformedVmIp + } + + transformedVmPort, err := expandDatabaseMigrationServiceMigrationJobReverseSshConnectivityVmPort(original["vm_port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVmPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vmPort"] = transformedVmPort + } + + transformedVm, err := expandDatabaseMigrationServiceMigrationJobReverseSshConnectivityVm(original["vm"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVm); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vm"] = transformedVm + } + + transformedVpc, err := expandDatabaseMigrationServiceMigrationJobReverseSshConnectivityVpc(original["vpc"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVpc); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpc"] = transformedVpc + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceMigrationJobReverseSshConnectivityVmIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobReverseSshConnectivityVmPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobReverseSshConnectivityVm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobReverseSshConnectivityVpc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobVpcPeeringConnectivity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVpc, err := expandDatabaseMigrationServiceMigrationJobVpcPeeringConnectivityVpc(original["vpc"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVpc); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpc"] = transformedVpc + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceMigrationJobVpcPeeringConnectivityVpc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceMigrationJobEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_migration_job_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_migration_job_sweeper.go new file mode 100644 index 00000000000..bfceedc7433 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_migration_job_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package databasemigrationservice + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DatabaseMigrationServiceMigrationJob", testSweepDatabaseMigrationServiceMigrationJob) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDatabaseMigrationServiceMigrationJob(region string) error { + resourceName := "DatabaseMigrationServiceMigrationJob" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://datamigration.googleapis.com/v1/projects/{{project}}/locations/{{location}}/migrationJobs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["migrationJobs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://datamigration.googleapis.com/v1/projects/{{project}}/locations/{{location}}/migrationJobs/{{migration_job_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_discovery_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_discovery_config.go index 22f591fb6ae..31e6351a4e7 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_discovery_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_discovery_config.go @@ -180,6 +180,70 @@ func ResourceDataLossPreventionDiscoveryConfig() *schema.Resource { }, }, }, + "tag_resources": { + Type: schema.TypeList, + Optional: true, + Description: `Publish a message into the Pub/Sub topic.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "lower_data_risk_to_low": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles.`, + }, + "profile_generations_to_tag": { + Type: schema.TypeList, + Optional: true, + Description: `The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both 'PROFILE_GENERATION_NEW' and 'PROFILE_GENERATION_UPDATE'. Possible values: ["PROFILE_GENERATION_NEW", "PROFILE_GENERATION_UPDATE"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"PROFILE_GENERATION_NEW", "PROFILE_GENERATION_UPDATE"}), + }, + }, + "tag_conditions": { + Type: schema.TypeList, + Optional: true, + Description: `The tags to associate with different conditions.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Conditions attaching the tag to a resource on its profile having this sensitivity score.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "tag": { + Type: schema.TypeList, + Optional: true, + Description: `The tag value to attach to resources.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "namespaced_value": { + Type: schema.TypeString, + Optional: true, + Description: `The namespaced name for the tag value to attach to resources. Must be in the format '{parent_id}/{tag_key_short_name}/{short_name}', for example, "123456/environment/prod".`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, }, }, @@ -257,6 +321,22 @@ func ResourceDataLossPreventionDiscoveryConfig() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "inspect_template_modified_cadence": { + Type: schema.TypeList, + Optional: true, + Description: `Governs when to update data profiles when the inspection rules defined by the 'InspectTemplate' change. If not set, changing the template will not cause a data profile to update.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "frequency": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"UPDATE_FREQUENCY_NEVER", "UPDATE_FREQUENCY_DAILY", "UPDATE_FREQUENCY_MONTHLY", ""}), + Description: `How frequently data profiles can be updated when the template is modified. Defaults to never. Possible values: ["UPDATE_FREQUENCY_NEVER", "UPDATE_FREQUENCY_DAILY", "UPDATE_FREQUENCY_MONTHLY"]`, + }, + }, + }, + }, "schema_modified_cadence": { Type: schema.TypeList, Optional: true, @@ -614,6 +694,22 @@ func ResourceDataLossPreventionDiscoveryConfig() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "inspect_template_modified_cadence": { + Type: schema.TypeList, + Optional: true, + Description: `Governs when to update data profiles when the inspection rules defined by the 'InspectTemplate' change. If not set, changing the template will not cause a data profile to update.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "frequency": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"UPDATE_FREQUENCY_NEVER", "UPDATE_FREQUENCY_DAILY", "UPDATE_FREQUENCY_MONTHLY"}), + Description: `How frequently data profiles can be updated when the template is modified. Defaults to never. Possible values: ["UPDATE_FREQUENCY_NEVER", "UPDATE_FREQUENCY_DAILY", "UPDATE_FREQUENCY_MONTHLY"]`, + }, + }, + }, + }, "refresh_frequency": { Type: schema.TypeString, Optional: true, @@ -651,6 +747,193 @@ func ResourceDataLossPreventionDiscoveryConfig() *schema.Resource { }, }, }, + "cloud_storage_target": { + Type: schema.TypeList, + Optional: true, + Description: `Cloud Storage target for Discovery. The first target to match a bucket will be the one applied.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeList, + Required: true, + Description: `The buckets the generation_cadence applies to. The first target with a matching filter will be the one to apply to a bucket.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_storage_resource_reference": { + Type: schema.TypeList, + Optional: true, + Description: `The bucket to scan. Targets including this can only include one target (the target with this bucket). This enables profiling the contents of a single bucket, while the other options allow for easy profiling of many buckets within a project or an organization.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Optional: true, + Description: `The bucket to scan.`, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Description: `If within a project-level config, then this must match the config's project id.`, + }, + }, + }, + }, + "collection": { + Type: schema.TypeList, + Optional: true, + Description: `A specific set of buckets for this filter to apply to.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "include_regexes": { + Type: schema.TypeList, + Optional: true, + Description: `A collection of regular expressions to match a file store against.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "patterns": { + Type: schema.TypeList, + Optional: true, + Description: `The group of regular expression patterns to match against one or more file stores. Maximum of 100 entries. The sum of all lengths of regular expressions can't exceed 10 KiB.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_storage_regex": { + Type: schema.TypeList, + Optional: true, + Description: `Regex for Cloud Storage.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket_name_regex": { + Type: schema.TypeString, + Optional: true, + Description: `Regex to test the bucket name against. If empty, all buckets match. Example: "marketing2021" or "(marketing)\d{4}" will both match the bucket gs://marketing2021`, + }, + "project_id_regex": { + Type: schema.TypeString, + Optional: true, + Description: `For organizations, if unset, will match all projects.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "others": { + Type: schema.TypeList, + Optional: true, + Description: `Match discovery resources not covered by any other filter.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + }, + }, + }, + "conditions": { + Type: schema.TypeList, + Optional: true, + Description: `In addition to matching the filter, these conditions must be true before a profile is generated.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_storage_conditions": { + Type: schema.TypeList, + Optional: true, + Description: `Cloud Storage conditions.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "included_bucket_attributes": { + Type: schema.TypeList, + Optional: true, + Description: `Only objects with the specified attributes will be scanned. Defaults to [ALL_SUPPORTED_BUCKETS] if unset. Possible values: ["ALL_SUPPORTED_BUCKETS", "AUTOCLASS_DISABLED", "AUTOCLASS_ENABLED"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"ALL_SUPPORTED_BUCKETS", "AUTOCLASS_DISABLED", "AUTOCLASS_ENABLED"}), + }, + }, + "included_object_attributes": { + Type: schema.TypeList, + Optional: true, + Description: `Only objects with the specified attributes will be scanned. If an object has one of the specified attributes but is inside an excluded bucket, it will not be scanned. Defaults to [ALL_SUPPORTED_OBJECTS]. A profile will be created even if no objects match the included_object_attributes. Possible values: ["ALL_SUPPORTED_OBJECTS", "STANDARD", "NEARLINE", "COLDLINE", "ARCHIVE", "REGIONAL", "MULTI_REGIONAL", "DURABLE_REDUCED_AVAILABILITY"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"ALL_SUPPORTED_OBJECTS", "STANDARD", "NEARLINE", "COLDLINE", "ARCHIVE", "REGIONAL", "MULTI_REGIONAL", "DURABLE_REDUCED_AVAILABILITY"}), + }, + }, + }, + }, + }, + "created_after": { + Type: schema.TypeString, + Optional: true, + Description: `File store must have been created after this date. Used to avoid backfilling. A timestamp in RFC3339 UTC "Zulu" format with nanosecond resolution and upto nine fractional digits.`, + }, + "min_age": { + Type: schema.TypeString, + Optional: true, + Description: `Duration format. Minimum age a file store must have. If set, the value must be 1 hour or greater.`, + }, + }, + }, + }, + "disabled": { + Type: schema.TypeList, + Optional: true, + Description: `Disable profiling for buckets that match this filter.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "generation_cadence": { + Type: schema.TypeList, + Optional: true, + Description: `How often and when to update profiles. New buckets that match both the filter and conditions are scanned as quickly as possible depending on system capacity.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "inspect_template_modified_cadence": { + Type: schema.TypeList, + Optional: true, + Description: `Governs when to update data profiles when the inspection rules defined by the 'InspectTemplate' change. If not set, changing the template will not cause a data profile to update.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "frequency": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"UPDATE_FREQUENCY_NEVER", "UPDATE_FREQUENCY_DAILY", "UPDATE_FREQUENCY_MONTHLY", ""}), + Description: `How frequently data profiles can be updated when the template is modified. Defaults to never. Possible values: ["UPDATE_FREQUENCY_NEVER", "UPDATE_FREQUENCY_DAILY", "UPDATE_FREQUENCY_MONTHLY"]`, + }, + }, + }, + }, + "refresh_frequency": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"UPDATE_FREQUENCY_NEVER", "UPDATE_FREQUENCY_DAILY", "UPDATE_FREQUENCY_MONTHLY", ""}), + Description: `Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. Possible values: ["UPDATE_FREQUENCY_NEVER", "UPDATE_FREQUENCY_DAILY", "UPDATE_FREQUENCY_MONTHLY"]`, + }, + }, + }, + }, + }, + }, + }, "secrets_target": { Type: schema.TypeList, Optional: true, @@ -1173,6 +1456,7 @@ func flattenDataLossPreventionDiscoveryConfigActions(v interface{}, d *schema.Re transformed = append(transformed, map[string]interface{}{ "export_data": flattenDataLossPreventionDiscoveryConfigActionsExportData(original["exportData"], d, config), "pub_sub_notification": flattenDataLossPreventionDiscoveryConfigActionsPubSubNotification(original["pubSubNotification"], d, config), + "tag_resources": flattenDataLossPreventionDiscoveryConfigActionsTagResources(original["tagResources"], d, config), }) } return transformed @@ -1309,7 +1593,24 @@ func flattenDataLossPreventionDiscoveryConfigActionsPubSubNotificationDetailOfMe return v } -func flattenDataLossPreventionDiscoveryConfigTargets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigActionsTagResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["tag_conditions"] = + flattenDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditions(original["tagConditions"], d, config) + transformed["profile_generations_to_tag"] = + flattenDataLossPreventionDiscoveryConfigActionsTagResourcesProfileGenerationsToTag(original["profileGenerationsToTag"], d, config) + transformed["lower_data_risk_to_low"] = + flattenDataLossPreventionDiscoveryConfigActionsTagResourcesLowerDataRiskToLow(original["lowerDataRiskToLow"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1322,14 +1623,13 @@ func flattenDataLossPreventionDiscoveryConfigTargets(v interface{}, d *schema.Re continue } transformed = append(transformed, map[string]interface{}{ - "big_query_target": flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTarget(original["bigQueryTarget"], d, config), - "cloud_sql_target": flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTarget(original["cloudSqlTarget"], d, config), - "secrets_target": flattenDataLossPreventionDiscoveryConfigTargetsSecretsTarget(original["secretsTarget"], d, config), + "tag": flattenDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsTag(original["tag"], d, config), + "sensitivity_score": flattenDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsSensitivityScore(original["sensitivityScore"], d, config), }) } return transformed } -func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1338,17 +1638,15 @@ func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTarget(v interface{} return nil } transformed := make(map[string]interface{}) - transformed["filter"] = - flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilter(original["filter"], d, config) - transformed["conditions"] = - flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetConditions(original["conditions"], d, config) - transformed["cadence"] = - flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadence(original["cadence"], d, config) - transformed["disabled"] = - flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetDisabled(original["disabled"], d, config) + transformed["namespaced_value"] = + flattenDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsTagNamespacedValue(original["namespacedValue"], d, config) return []interface{}{transformed} } -func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsTagNamespacedValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1357,23 +1655,88 @@ func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilter(v inter return nil } transformed := make(map[string]interface{}) - transformed["tables"] = - flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilterTables(original["tables"], d, config) - transformed["other_tables"] = - flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilterOtherTables(original["otherTables"], d, config) - transformed["table_reference"] = - flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilterTableReference(original["tableReference"], d, config) + transformed["score"] = + flattenDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsSensitivityScoreScore(original["score"], d, config) return []interface{}{transformed} } -func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilterTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDiscoveryConfigActionsTagResourcesProfileGenerationsToTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDiscoveryConfigActionsTagResourcesLowerDataRiskToLow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDiscoveryConfigTargets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil + return v } - transformed := make(map[string]interface{}) + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "big_query_target": flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTarget(original["bigQueryTarget"], d, config), + "cloud_sql_target": flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTarget(original["cloudSqlTarget"], d, config), + "secrets_target": flattenDataLossPreventionDiscoveryConfigTargetsSecretsTarget(original["secretsTarget"], d, config), + "cloud_storage_target": flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTarget(original["cloudStorageTarget"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["filter"] = + flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilter(original["filter"], d, config) + transformed["conditions"] = + flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetConditions(original["conditions"], d, config) + transformed["cadence"] = + flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadence(original["cadence"], d, config) + transformed["disabled"] = + flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetDisabled(original["disabled"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["tables"] = + flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilterTables(original["tables"], d, config) + transformed["other_tables"] = + flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilterOtherTables(original["otherTables"], d, config) + transformed["table_reference"] = + flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilterTableReference(original["tableReference"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilterTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) transformed["include_regexes"] = flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetFilterTablesIncludeRegexes(original["includeRegexes"], d, config) return []interface{}{transformed} @@ -1547,6 +1910,8 @@ func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadence(v inte flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceSchemaModifiedCadence(original["schemaModifiedCadence"], d, config) transformed["table_modified_cadence"] = flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceTableModifiedCadence(original["tableModifiedCadence"], d, config) + transformed["inspect_template_modified_cadence"] = + flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceInspectTemplateModifiedCadence(original["inspectTemplateModifiedCadence"], d, config) return []interface{}{transformed} } func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceSchemaModifiedCadence(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -1595,6 +1960,23 @@ func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceTableMo return v } +func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceInspectTemplateModifiedCadence(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["frequency"] = + flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceInspectTemplateModifiedCadenceFrequency(original["frequency"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceInspectTemplateModifiedCadenceFrequency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenDataLossPreventionDiscoveryConfigTargetsBigQueryTargetDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -1781,6 +2163,8 @@ func flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCade flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCadenceSchemaModifiedCadence(original["schemaModifiedCadence"], d, config) transformed["refresh_frequency"] = flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCadenceRefreshFrequency(original["refreshFrequency"], d, config) + transformed["inspect_template_modified_cadence"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence(original["inspectTemplateModifiedCadence"], d, config) return []interface{}{transformed} } func flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCadenceSchemaModifiedCadence(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -1810,6 +2194,23 @@ func flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCade return v } +func flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["frequency"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceFrequency(original["frequency"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceFrequency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -1826,7 +2227,69 @@ func flattenDataLossPreventionDiscoveryConfigTargetsSecretsTarget(v interface{}, return []interface{}{transformed} } -func flattenDataLossPreventionDiscoveryConfigErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["filter"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilter(original["filter"], d, config) + transformed["conditions"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditions(original["conditions"], d, config) + transformed["generation_cadence"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadence(original["generationCadence"], d, config) + transformed["disabled"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetDisabled(original["disabled"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["collection"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollection(original["collection"], d, config) + transformed["cloud_storage_resource_reference"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReference(original["cloudStorageResourceReference"], d, config) + transformed["others"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterOthers(original["others"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["include_regexes"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexes(original["includeRegexes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["patterns"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatterns(original["patterns"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatterns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1839,13 +2302,12 @@ func flattenDataLossPreventionDiscoveryConfigErrors(v interface{}, d *schema.Res continue } transformed = append(transformed, map[string]interface{}{ - "details": flattenDataLossPreventionDiscoveryConfigErrorsDetails(original["details"], d, config), - "timestamp": flattenDataLossPreventionDiscoveryConfigErrorsTimestamp(original["timestamp"], d, config), + "cloud_storage_regex": flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegex(original["cloudStorageRegex"], d, config), }) } return transformed } -func flattenDataLossPreventionDiscoveryConfigErrorsDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1854,91 +2316,256 @@ func flattenDataLossPreventionDiscoveryConfigErrorsDetails(v interface{}, d *sch return nil } transformed := make(map[string]interface{}) - transformed["code"] = - flattenDataLossPreventionDiscoveryConfigErrorsDetailsCode(original["code"], d, config) - transformed["message"] = - flattenDataLossPreventionDiscoveryConfigErrorsDetailsMessage(original["message"], d, config) - transformed["details"] = - flattenDataLossPreventionDiscoveryConfigErrorsDetailsDetails(original["details"], d, config) + transformed["project_id_regex"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegexProjectIdRegex(original["projectIdRegex"], d, config) + transformed["bucket_name_regex"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegexBucketNameRegex(original["bucketNameRegex"], d, config) return []interface{}{transformed} } -func flattenDataLossPreventionDiscoveryConfigErrorsDetailsCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegexProjectIdRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenDataLossPreventionDiscoveryConfigErrorsDetailsMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegexBucketNameRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataLossPreventionDiscoveryConfigErrorsDetailsDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bucket_name"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReferenceBucketName(original["bucketName"], d, config) + transformed["project_id"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReferenceProjectId(original["projectId"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReferenceBucketName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataLossPreventionDiscoveryConfigErrorsTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReferenceProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataLossPreventionDiscoveryConfigCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterOthers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} } -func flattenDataLossPreventionDiscoveryConfigUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["created_after"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCreatedAfter(original["createdAfter"], d, config) + transformed["min_age"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsMinAge(original["minAge"], d, config) + transformed["cloud_storage_conditions"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditions(original["cloudStorageConditions"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCreatedAfter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataLossPreventionDiscoveryConfigLastRunTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsMinAge(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataLossPreventionDiscoveryConfigStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["included_object_attributes"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditionsIncludedObjectAttributes(original["includedObjectAttributes"], d, config) + transformed["included_bucket_attributes"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditionsIncludedBucketAttributes(original["includedBucketAttributes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditionsIncludedObjectAttributes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandDataLossPreventionDiscoveryConfigDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditionsIncludedBucketAttributes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandDataLossPreventionDiscoveryConfigOrgConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadence(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - raw := l[0] - original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) + transformed["refresh_frequency"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceRefreshFrequency(original["refreshFrequency"], d, config) + transformed["inspect_template_modified_cadence"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceInspectTemplateModifiedCadence(original["inspectTemplateModifiedCadence"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceRefreshFrequency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - transformedProjectId, err := expandDataLossPreventionDiscoveryConfigOrgConfigProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["projectId"] = transformedProjectId +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceInspectTemplateModifiedCadence(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil } - - transformedLocation, err := expandDataLossPreventionDiscoveryConfigOrgConfigLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["location"] = transformedLocation + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - - return transformed, nil + transformed := make(map[string]interface{}) + transformed["frequency"] = + flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceInspectTemplateModifiedCadenceFrequency(original["frequency"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceInspectTemplateModifiedCadenceFrequency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandDataLossPreventionDiscoveryConfigOrgConfigProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil +func flattenDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionDiscoveryConfigErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "details": flattenDataLossPreventionDiscoveryConfigErrorsDetails(original["details"], d, config), + "timestamp": flattenDataLossPreventionDiscoveryConfigErrorsTimestamp(original["timestamp"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDiscoveryConfigErrorsDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["code"] = + flattenDataLossPreventionDiscoveryConfigErrorsDetailsCode(original["code"], d, config) + transformed["message"] = + flattenDataLossPreventionDiscoveryConfigErrorsDetailsMessage(original["message"], d, config) + transformed["details"] = + flattenDataLossPreventionDiscoveryConfigErrorsDetailsDetails(original["details"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDiscoveryConfigErrorsDetailsCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDiscoveryConfigErrorsDetailsMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDiscoveryConfigErrorsDetailsDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDiscoveryConfigErrorsTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDiscoveryConfigCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDiscoveryConfigUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDiscoveryConfigLastRunTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDiscoveryConfigStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataLossPreventionDiscoveryConfigDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigOrgConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProjectId, err := expandDataLossPreventionDiscoveryConfigOrgConfigProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedLocation, err := expandDataLossPreventionDiscoveryConfigOrgConfigLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigOrgConfigProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil } func expandDataLossPreventionDiscoveryConfigOrgConfigLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { @@ -2003,6 +2630,13 @@ func expandDataLossPreventionDiscoveryConfigActions(v interface{}, d tpgresource transformed["pubSubNotification"] = transformedPubSubNotification } + transformedTagResources, err := expandDataLossPreventionDiscoveryConfigActionsTagResources(original["tag_resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTagResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tagResources"] = transformedTagResources + } + req = append(req, transformed) } return req, nil @@ -2210,6 +2844,122 @@ func expandDataLossPreventionDiscoveryConfigActionsPubSubNotificationDetailOfMes return v, nil } +func expandDataLossPreventionDiscoveryConfigActionsTagResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTagConditions, err := expandDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditions(original["tag_conditions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTagConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tagConditions"] = transformedTagConditions + } + + transformedProfileGenerationsToTag, err := expandDataLossPreventionDiscoveryConfigActionsTagResourcesProfileGenerationsToTag(original["profile_generations_to_tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProfileGenerationsToTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["profileGenerationsToTag"] = transformedProfileGenerationsToTag + } + + transformedLowerDataRiskToLow, err := expandDataLossPreventionDiscoveryConfigActionsTagResourcesLowerDataRiskToLow(original["lower_data_risk_to_low"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLowerDataRiskToLow); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lowerDataRiskToLow"] = transformedLowerDataRiskToLow + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTag, err := expandDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsTag(original["tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tag"] = transformedTag + } + + transformedSensitivityScore, err := expandDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNamespacedValue, err := expandDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsTagNamespacedValue(original["namespaced_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespacedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespacedValue"] = transformedNamespacedValue + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsTagNamespacedValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigActionsTagResourcesTagConditionsSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigActionsTagResourcesProfileGenerationsToTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigActionsTagResourcesLowerDataRiskToLow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandDataLossPreventionDiscoveryConfigTargets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -2241,6 +2991,13 @@ func expandDataLossPreventionDiscoveryConfigTargets(v interface{}, d tpgresource transformed["secretsTarget"] = transformedSecretsTarget } + transformedCloudStorageTarget, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTarget(original["cloud_storage_target"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStorageTarget); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStorageTarget"] = transformedCloudStorageTarget + } + req = append(req, transformed) } return req, nil @@ -2582,6 +3339,13 @@ func expandDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadence(v inter transformed["tableModifiedCadence"] = transformedTableModifiedCadence } + transformedInspectTemplateModifiedCadence, err := expandDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceInspectTemplateModifiedCadence(original["inspect_template_modified_cadence"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInspectTemplateModifiedCadence); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inspectTemplateModifiedCadence"] = transformedInspectTemplateModifiedCadence + } + return transformed, nil } @@ -2653,6 +3417,29 @@ func expandDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceTableMod return v, nil } +func expandDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceInspectTemplateModifiedCadence(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFrequency, err := expandDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceInspectTemplateModifiedCadenceFrequency(original["frequency"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFrequency); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["frequency"] = transformedFrequency + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsBigQueryTargetCadenceInspectTemplateModifiedCadenceFrequency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandDataLossPreventionDiscoveryConfigTargetsBigQueryTargetDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { @@ -2966,6 +3753,13 @@ func expandDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCaden transformed["refreshFrequency"] = transformedRefreshFrequency } + transformedInspectTemplateModifiedCadence, err := expandDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence(original["inspect_template_modified_cadence"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInspectTemplateModifiedCadence); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inspectTemplateModifiedCadence"] = transformedInspectTemplateModifiedCadence + } + return transformed, nil } @@ -3007,22 +3801,404 @@ func expandDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCaden return v, nil } -func expandDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) - if len(l) == 0 { + if len(l) == 0 || l[0] == nil { return nil, nil } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil + transformedFrequency, err := expandDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceFrequency(original["frequency"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFrequency); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["frequency"] = transformedFrequency } - transformed := make(map[string]interface{}) return transformed, nil } -func expandDataLossPreventionDiscoveryConfigTargetsSecretsTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceFrequency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudSqlTargetDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsSecretsTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFilter, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filter"] = transformedFilter + } + + transformedConditions, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditions(original["conditions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["conditions"] = transformedConditions + } + + transformedGenerationCadence, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadence(original["generation_cadence"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGenerationCadence); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generationCadence"] = transformedGenerationCadence + } + + transformedDisabled, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetDisabled(original["disabled"], d, config) + if err != nil { + return nil, err + } else { + transformed["disabled"] = transformedDisabled + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCollection, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollection(original["collection"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCollection); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["collection"] = transformedCollection + } + + transformedCloudStorageResourceReference, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReference(original["cloud_storage_resource_reference"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStorageResourceReference); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStorageResourceReference"] = transformedCloudStorageResourceReference + } + + transformedOthers, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterOthers(original["others"], d, config) + if err != nil { + return nil, err + } else { + transformed["others"] = transformedOthers + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIncludeRegexes, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexes(original["include_regexes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeRegexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeRegexes"] = transformedIncludeRegexes + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPatterns, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatterns(original["patterns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPatterns); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["patterns"] = transformedPatterns + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatterns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCloudStorageRegex, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegex(original["cloud_storage_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStorageRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStorageRegex"] = transformedCloudStorageRegex + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProjectIdRegex, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegexProjectIdRegex(original["project_id_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectIdRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectIdRegex"] = transformedProjectIdRegex + } + + transformedBucketNameRegex, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegexBucketNameRegex(original["bucket_name_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucketNameRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucketNameRegex"] = transformedBucketNameRegex + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegexProjectIdRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCollectionIncludeRegexesPatternsCloudStorageRegexBucketNameRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBucketName, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReferenceBucketName(original["bucket_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucketName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucketName"] = transformedBucketName + } + + transformedProjectId, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReferenceProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReferenceBucketName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterCloudStorageResourceReferenceProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetFilterOthers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCreatedAfter, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCreatedAfter(original["created_after"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCreatedAfter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["createdAfter"] = transformedCreatedAfter + } + + transformedMinAge, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsMinAge(original["min_age"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinAge); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minAge"] = transformedMinAge + } + + transformedCloudStorageConditions, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditions(original["cloud_storage_conditions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStorageConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStorageConditions"] = transformedCloudStorageConditions + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCreatedAfter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsMinAge(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIncludedObjectAttributes, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditionsIncludedObjectAttributes(original["included_object_attributes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludedObjectAttributes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includedObjectAttributes"] = transformedIncludedObjectAttributes + } + + transformedIncludedBucketAttributes, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditionsIncludedBucketAttributes(original["included_bucket_attributes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludedBucketAttributes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includedBucketAttributes"] = transformedIncludedBucketAttributes + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditionsIncludedObjectAttributes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetConditionsCloudStorageConditionsIncludedBucketAttributes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadence(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRefreshFrequency, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceRefreshFrequency(original["refresh_frequency"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRefreshFrequency); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["refreshFrequency"] = transformedRefreshFrequency + } + + transformedInspectTemplateModifiedCadence, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceInspectTemplateModifiedCadence(original["inspect_template_modified_cadence"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInspectTemplateModifiedCadence); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inspectTemplateModifiedCadence"] = transformedInspectTemplateModifiedCadence + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceRefreshFrequency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceInspectTemplateModifiedCadence(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFrequency, err := expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceInspectTemplateModifiedCadenceFrequency(original["frequency"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFrequency); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["frequency"] = transformedFrequency + } + + return transformed, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetGenerationCadenceInspectTemplateModifiedCadenceFrequency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDiscoveryConfigTargetsCloudStorageTargetDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_discovery_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_discovery_config_sweeper.go index a1a9bc7c344..0384c9130a2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_discovery_config_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_discovery_config_sweeper.go @@ -1,29 +1,12 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package datalossprevention import ( "context" "log" "strings" - "testing" - "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-provider-google/google/sweeper" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" @@ -50,21 +33,17 @@ func testSweepDataLossPreventionDiscoveryConfig(region string) error { return err } - t := &testing.T{} - billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template d := &tpgresource.ResourceDataMock{ FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, + "project": config.Project, + "region": region, + "location": region, + "zone": "-", }, } - listTemplate := strings.Split("https://dlp.googleapis.com/v2/{{parent}}/discoveryConfigs", "?")[0] + listTemplate := strings.Split("https://dlp.googleapis.com/v2/projects/{{project}}/locations/{{location}}/discoveryConfigs", "?")[0] listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) @@ -92,8 +71,6 @@ func testSweepDataLossPreventionDiscoveryConfig(region string) error { rl := resourceList.([]interface{}) log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 for _, ri := range rl { obj := ri.(map[string]interface{}) if obj["name"] == nil { @@ -101,14 +78,11 @@ func testSweepDataLossPreventionDiscoveryConfig(region string) error { return nil } + // Note that we do not check for a sweepable prefix here. + // We can have at most 1 DiscoveryConfig for a storage type in the same project/location, so ensure we delete everything. name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { - nonPrefixCount++ - continue - } - deleteTemplate := "https://dlp.googleapis.com/v2/{{parent}}/discoveryConfigs/{{name}}" + deleteTemplate := "https://dlp.googleapis.com/v2/projects/{{project}}/locations/{{location}}/discoveryConfigs/{{name}}" deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) @@ -130,10 +104,5 @@ func testSweepDataLossPreventionDiscoveryConfig(region string) error { log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) } } - - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) - } - return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_job_trigger.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_job_trigger.go index 698f4622bd0..74639223355 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_job_trigger.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_job_trigger.go @@ -5801,7 +5801,6 @@ func expandDataLossPreventionJobTriggerInspectJobActionsPublishToStackdriver(v i } func resourceDataLossPreventionJobTriggerEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) newObj["jobTrigger"] = obj triggerIdProp, ok := d.GetOk("trigger_id") diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_datascan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_datascan.go index 4d830c0954c..9119e5497a2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_datascan.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_datascan.go @@ -51,7 +51,7 @@ func ResourceDataplexDatascan() *schema.Resource { }, CustomizeDiff: customdiff.All( - tpgresource.SetLabelsDiff, + tpgresource.SetLabelsDiffWithoutAttributionLabel, tpgresource.DefaultProviderProject, ), diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_task.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_task.go index 2e1599dbd03..a9a3c3558ac 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_task.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_task.go @@ -51,7 +51,7 @@ func ResourceDataplexTask() *schema.Resource { }, CustomizeDiff: customdiff.All( - tpgresource.SetLabelsDiff, + tpgresource.SetLabelsDiffWithoutAttributionLabel, tpgresource.DefaultProviderProject, ), diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/datastore_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/dataproc_operation.go similarity index 62% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/datastore_operation.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/dataproc_operation.go index 81f2507266c..3a19d4e9820 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/datastore_operation.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/dataproc_operation.go @@ -15,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package datastore +package dataproc import ( "encoding/json" @@ -27,32 +27,31 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) -type DatastoreOperationWaiter struct { +type DataprocOperationWaiter struct { Config *transport_tpg.Config UserAgent string Project string tpgresource.CommonOperationWaiter } -func (w *DatastoreOperationWaiter) QueryOp() (interface{}, error) { +func (w *DataprocOperationWaiter) QueryOp() (interface{}, error) { if w == nil { return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") } // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.DatastoreBasePath, w.CommonOperationWaiter.Op.Name) + url := fmt.Sprintf("%s%s", w.Config.DataprocBasePath, w.CommonOperationWaiter.Op.Name) return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: w.Config, - Method: "GET", - Project: w.Project, - RawURL: url, - UserAgent: w.UserAgent, - ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.DatastoreIndex409Contention}, + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, }) } -func createDatastoreWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*DatastoreOperationWaiter, error) { - w := &DatastoreOperationWaiter{ +func createDataprocWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*DataprocOperationWaiter, error) { + w := &DataprocOperationWaiter{ Config: config, UserAgent: userAgent, Project: project, @@ -64,8 +63,8 @@ func createDatastoreWaiter(config *transport_tpg.Config, op map[string]interface } // nolint: deadcode,unused -func DatastoreOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createDatastoreWaiter(config, op, project, activity, userAgent) +func DataprocOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createDataprocWaiter(config, op, project, activity, userAgent) if err != nil { return err } @@ -79,12 +78,12 @@ func DatastoreOperationWaitTimeWithResponse(config *transport_tpg.Config, op map return json.Unmarshal(rawResponse, response) } -func DatastoreOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { +func DataprocOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { if val, ok := op["name"]; !ok || val == "" { // This was a synchronous call - there is no operation to wait for. return nil } - w, err := createDatastoreWaiter(config, op, project, activity, userAgent) + w, err := createDataprocWaiter(config, op, project, activity, userAgent) if err != nil { // If w is nil, the op was synchronous. return err diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_batch.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_batch.go new file mode 100644 index 00000000000..40f751f175c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_batch.go @@ -0,0 +1,2011 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataproc + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +/* + * Dataproc Batch api apends subminor version to the provided + * version. We are suppressing this server generated subminor. + */ +func CloudDataprocBatchRuntimeConfigVersionDiffSuppressFunc(old, new string) bool { + if old != "" && strings.HasPrefix(new, old) || (new != "" && strings.HasPrefix(old, new)) { + return true + } + + return old == new +} + +func CloudDataprocBatchRuntimeConfigVersionDiffSuppress(_, old, new string, d *schema.ResourceData) bool { + return CloudDataprocBatchRuntimeConfigVersionDiffSuppressFunc(old, new) +} + +func ResourceDataprocBatch() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocBatchCreate, + Read: resourceDataprocBatchRead, + Update: resourceDataprocBatchUpdate, + Delete: resourceDataprocBatchDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataprocBatchImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "batch_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The ID to use for the batch, which will become the final component of the batch's resource name. +This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.`, + }, + "environment_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Environment configuration for the batch execution.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "execution_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Execution configuration for a workload.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Cloud KMS key to use for encryption.`, + }, + "network_tags": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Tags used for network traffic control.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "network_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Network configuration for workload execution.`, + ConflictsWith: []string{"environment_config.0.execution_config.0.subnetwork_uri"}, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Service account that used to execute workload.`, + }, + "staging_bucket": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A Cloud Storage bucket used to stage workload dependencies, config files, and store +workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, +Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, +and then create and manage project-level, per-location staging and temporary buckets. +This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.`, + }, + "subnetwork_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Subnetwork configuration for workload execution.`, + ConflictsWith: []string{"environment_config.0.execution_config.0.network_uri"}, + }, + "ttl": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The duration after which the workload will be terminated. +When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing +work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it +exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, +it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. +Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), +the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or +when ttl has been exceeded, whichever occurs first.`, + }, + }, + }, + }, + "peripherals_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Peripherals configuration that workload has access to.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metastore_service": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Resource name of an existing Dataproc Metastore service.`, + }, + "spark_history_server_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The Spark History Server configuration for the workload.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataproc_cluster": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels to associate with this batch. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location in which the batch will be created in.`, + }, + "pyspark_batch": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `PySpark batch config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of archives to be extracted into the working directory of each executor. +Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The arguments to pass to the driver. Do not include arguments that can be set as batch +properties, such as --conf, since a collision can occur that causes an incorrect batch submission.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of files to be placed in the working directory of each executor.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "main_python_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.`, + }, + "python_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS file URIs of Python files to pass to the PySpark framework. +Supported file types: .py, .egg, and .zip.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ExactlyOneOf: []string{"pyspark_batch", "spark_batch", "spark_sql_batch", "spark_r_batch"}, + }, + "runtime_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Runtime configuration for the batch execution.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Optional custom container image for the job runtime environment. If not specified, a default container image will be used.`, + }, + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `A mapping of property names to values, which are used to configure workload execution.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: CloudDataprocBatchRuntimeConfigVersionDiffSuppress, + Description: `Version of the batch runtime.`, + }, + "effective_properties": { + Type: schema.TypeMap, + Computed: true, + Description: `A mapping of property names to values, which are used to configure workload execution.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "spark_batch": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Spark batch config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of archives to be extracted into the working directory of each executor. +Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The arguments to pass to the driver. Do not include arguments that can be set as batch +properties, such as --conf, since a collision can occur that causes an incorrect batch submission.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of files to be placed in the working directory of each executor.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the driver main class. The jar file that contains the class must be in the +classpath or specified in jarFileUris.`, + ExactlyOneOf: []string{"spark_batch.0.main_jar_file_uri"}, + }, + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of the jar file that contains the main class.`, + ExactlyOneOf: []string{"spark_batch.0.main_class"}, + }, + }, + }, + ExactlyOneOf: []string{"pyspark_batch", "spark_batch", "spark_sql_batch", "spark_r_batch"}, + }, + "spark_r_batch": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `SparkR batch config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of archives to be extracted into the working directory of each executor. +Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The arguments to pass to the driver. Do not include arguments that can be set as batch +properties, such as --conf, since a collision can occur that causes an incorrect batch submission.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of files to be placed in the working directory of each executor.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "main_r_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.`, + }, + }, + }, + ExactlyOneOf: []string{"pyspark_batch", "spark_batch", "spark_sql_batch", "spark_r_batch"}, + }, + "spark_sql_batch": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Spark SQL batch config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of jar files to be added to the Spark CLASSPATH.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of the script that contains Spark SQL queries to execute.`, + }, + "query_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + ExactlyOneOf: []string{"pyspark_batch", "spark_batch", "spark_sql_batch", "spark_r_batch"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the batch was created.`, + }, + "creator": { + Type: schema.TypeString, + Computed: true, + Description: `The email address of the user who created the batch.`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the batch.`, + }, + "operation": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the operation associated with this batch.`, + }, + "runtime_info": { + Type: schema.TypeList, + Computed: true, + Description: `Runtime information about batch execution.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "approximate_usage": { + Type: schema.TypeList, + Computed: true, + Description: `Approximate workload resource usage, calculated when the workload completes(see [Dataproc Serverless pricing](https://cloud.google.com/dataproc-serverless/pricing))`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_type": { + Type: schema.TypeString, + Computed: true, + Description: `Accelerator type being used, if any`, + }, + "milli_accelerator_seconds": { + Type: schema.TypeString, + Computed: true, + Description: `Accelerator usage in (milliAccelerator x seconds)`, + }, + "milli_dcu_seconds": { + Type: schema.TypeString, + Computed: true, + Description: `DCU (Dataproc Compute Units) usage in (milliDCU x seconds)`, + }, + "shuffle_storage_gb_seconds": { + Type: schema.TypeString, + Computed: true, + Description: `Shuffle storage usage in (GB x seconds)`, + }, + }, + }, + }, + "current_usage": { + Type: schema.TypeList, + Computed: true, + Description: `Snapshot of current workload resource usage(see [Dataproc Serverless pricing](https://cloud.google.com/dataproc-serverless/pricing))`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_type": { + Type: schema.TypeString, + Computed: true, + Description: `Accelerator type being used, if any.`, + }, + "milli_accelerator": { + Type: schema.TypeString, + Computed: true, + Description: `Milli (one-thousandth) accelerator..`, + }, + "milli_dcu": { + Type: schema.TypeString, + Computed: true, + Description: `Milli (one-thousandth) Dataproc Compute Units (DCUs).`, + }, + "milli_dcu_premium": { + Type: schema.TypeString, + Computed: true, + Description: `Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.`, + }, + "shuffle_storage_gb": { + Type: schema.TypeString, + Computed: true, + Description: `Shuffle Storage in gigabytes (GB).`, + }, + "shuffle_storage_gb_premium": { + Type: schema.TypeString, + Computed: true, + Description: `Shuffle Storage in gigabytes (GB) charged at premium tier.`, + }, + "snapshot_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of the usage snapshot.`, + }, + }, + }, + }, + "diagnostic_output_uri": { + Type: schema.TypeString, + Computed: true, + Description: `A URI pointing to the location of the diagnostics tarball.`, + }, + "endpoints": { + Type: schema.TypeMap, + Computed: true, + Description: `Map of remote access endpoints (such as web interfaces and APIs) to their URIs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "output_uri": { + Type: schema.TypeString, + Computed: true, + Description: `A URI pointing to the location of the stdout and stderr of the workload.`, + }, + }, + }, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of the batch. For possible values, see the [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches#State).`, + }, + "state_history": { + Type: schema.TypeList, + Computed: true, + Description: `Historical state information for the batch.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of the batch at this point in history. For possible values, see the [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches#State).`, + }, + "state_message": { + Type: schema.TypeString, + Computed: true, + Description: `Details about the state at this point in history.`, + }, + "state_start_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the batch entered the historical state.`, + }, + }, + }, + }, + "state_message": { + Type: schema.TypeString, + Computed: true, + Description: `Batch state details, such as a failure description if the state is FAILED.`, + }, + "state_time": { + Type: schema.TypeString, + Computed: true, + Description: `Batch state details, such as a failure description if the state is FAILED.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "uuid": { + Type: schema.TypeString, + Computed: true, + Description: `A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataprocBatchCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + runtimeConfigProp, err := expandDataprocBatchRuntimeConfig(d.Get("runtime_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("runtime_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeConfigProp)) && (ok || !reflect.DeepEqual(v, runtimeConfigProp)) { + obj["runtimeConfig"] = runtimeConfigProp + } + environmentConfigProp, err := expandDataprocBatchEnvironmentConfig(d.Get("environment_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("environment_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(environmentConfigProp)) && (ok || !reflect.DeepEqual(v, environmentConfigProp)) { + obj["environmentConfig"] = environmentConfigProp + } + pysparkBatchProp, err := expandDataprocBatchPysparkBatch(d.Get("pyspark_batch"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pyspark_batch"); !tpgresource.IsEmptyValue(reflect.ValueOf(pysparkBatchProp)) && (ok || !reflect.DeepEqual(v, pysparkBatchProp)) { + obj["pysparkBatch"] = pysparkBatchProp + } + sparkBatchProp, err := expandDataprocBatchSparkBatch(d.Get("spark_batch"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spark_batch"); !tpgresource.IsEmptyValue(reflect.ValueOf(sparkBatchProp)) && (ok || !reflect.DeepEqual(v, sparkBatchProp)) { + obj["sparkBatch"] = sparkBatchProp + } + sparkRBatchProp, err := expandDataprocBatchSparkRBatch(d.Get("spark_r_batch"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spark_r_batch"); !tpgresource.IsEmptyValue(reflect.ValueOf(sparkRBatchProp)) && (ok || !reflect.DeepEqual(v, sparkRBatchProp)) { + obj["sparkRBatch"] = sparkRBatchProp + } + sparkSqlBatchProp, err := expandDataprocBatchSparkSqlBatch(d.Get("spark_sql_batch"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spark_sql_batch"); !tpgresource.IsEmptyValue(reflect.ValueOf(sparkSqlBatchProp)) && (ok || !reflect.DeepEqual(v, sparkSqlBatchProp)) { + obj["sparkSqlBatch"] = sparkSqlBatchProp + } + labelsProp, err := expandDataprocBatchEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/batches?batchId={{batch_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Batch: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Batch: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Batch: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/batches/{{batch_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = DataprocOperationWaitTime( + config, res, project, "Creating Batch", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Batch: %s", err) + } + + log.Printf("[DEBUG] Finished creating Batch %q: %#v", d.Id(), res) + + return resourceDataprocBatchRead(d, meta) +} + +func resourceDataprocBatchRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/batches/{{batch_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Batch: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataprocBatch %q", d.Id())) + } + + res, err = resourceDataprocBatchDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing DataprocBatch because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + + if err := d.Set("name", flattenDataprocBatchName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("uuid", flattenDataprocBatchUuid(res["uuid"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("create_time", flattenDataprocBatchCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("runtime_info", flattenDataprocBatchRuntimeInfo(res["runtimeInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("state", flattenDataprocBatchState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("state_message", flattenDataprocBatchStateMessage(res["stateMessage"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("state_time", flattenDataprocBatchStateTime(res["stateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("creator", flattenDataprocBatchCreator(res["creator"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("labels", flattenDataprocBatchLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("runtime_config", flattenDataprocBatchRuntimeConfig(res["runtimeConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("environment_config", flattenDataprocBatchEnvironmentConfig(res["environmentConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("operation", flattenDataprocBatchOperation(res["operation"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("state_history", flattenDataprocBatchStateHistory(res["stateHistory"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("pyspark_batch", flattenDataprocBatchPysparkBatch(res["pysparkBatch"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("spark_batch", flattenDataprocBatchSparkBatch(res["sparkBatch"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("spark_r_batch", flattenDataprocBatchSparkRBatch(res["sparkRBatch"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("spark_sql_batch", flattenDataprocBatchSparkSqlBatch(res["sparkSqlBatch"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("terraform_labels", flattenDataprocBatchTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("effective_labels", flattenDataprocBatchEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + + return nil +} + +func resourceDataprocBatchUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the root field "labels" and "terraform_labels" are mutable + return resourceDataprocBatchRead(d, meta) +} + +func resourceDataprocBatchDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Batch: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/batches/{{batch_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting Batch %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Batch") + } + + err = DataprocOperationWaitTime( + config, res, project, "Deleting Batch", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Batch %q: %#v", d.Id(), res) + return nil +} + +func resourceDataprocBatchImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/batches/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/batches/{{batch_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataprocBatchName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchUuid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["output_uri"] = + flattenDataprocBatchRuntimeInfoOutputUri(original["outputUri"], d, config) + transformed["diagnostic_output_uri"] = + flattenDataprocBatchRuntimeInfoDiagnosticOutputUri(original["diagnosticOutputUri"], d, config) + transformed["endpoints"] = + flattenDataprocBatchRuntimeInfoEndpoints(original["endpoints"], d, config) + transformed["approximate_usage"] = + flattenDataprocBatchRuntimeInfoApproximateUsage(original["approximateUsage"], d, config) + transformed["current_usage"] = + flattenDataprocBatchRuntimeInfoCurrentUsage(original["currentUsage"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchRuntimeInfoOutputUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoDiagnosticOutputUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoApproximateUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["milli_dcu_seconds"] = + flattenDataprocBatchRuntimeInfoApproximateUsageMilliDcuSeconds(original["milliDcuSeconds"], d, config) + transformed["shuffle_storage_gb_seconds"] = + flattenDataprocBatchRuntimeInfoApproximateUsageShuffleStorageGbSeconds(original["shuffleStorageGbSeconds"], d, config) + transformed["milli_accelerator_seconds"] = + flattenDataprocBatchRuntimeInfoApproximateUsageMilliAcceleratorSeconds(original["milliAcceleratorSeconds"], d, config) + transformed["accelerator_type"] = + flattenDataprocBatchRuntimeInfoApproximateUsageAcceleratorType(original["acceleratorType"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchRuntimeInfoApproximateUsageMilliDcuSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoApproximateUsageShuffleStorageGbSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoApproximateUsageMilliAcceleratorSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoApproximateUsageAcceleratorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["milli_dcu"] = + flattenDataprocBatchRuntimeInfoCurrentUsageMilliDcu(original["milliDcu"], d, config) + transformed["shuffle_storage_gb"] = + flattenDataprocBatchRuntimeInfoCurrentUsageShuffleStorageGb(original["shuffleStorageGb"], d, config) + transformed["milli_dcu_premium"] = + flattenDataprocBatchRuntimeInfoCurrentUsageMilliDcuPremium(original["milliDcuPremium"], d, config) + transformed["shuffle_storage_gb_premium"] = + flattenDataprocBatchRuntimeInfoCurrentUsageShuffleStorageGbPremium(original["shuffleStorageGbPremium"], d, config) + transformed["milli_accelerator"] = + flattenDataprocBatchRuntimeInfoCurrentUsageMilliAccelerator(original["milliAccelerator"], d, config) + transformed["accelerator_type"] = + flattenDataprocBatchRuntimeInfoCurrentUsageAcceleratorType(original["acceleratorType"], d, config) + transformed["snapshot_time"] = + flattenDataprocBatchRuntimeInfoCurrentUsageSnapshotTime(original["snapshotTime"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchRuntimeInfoCurrentUsageMilliDcu(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageShuffleStorageGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageMilliDcuPremium(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageShuffleStorageGbPremium(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageMilliAccelerator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageAcceleratorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageSnapshotTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchStateMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchStateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchCreator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenDataprocBatchRuntimeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["version"] = + flattenDataprocBatchRuntimeConfigVersion(original["version"], d, config) + transformed["container_image"] = + flattenDataprocBatchRuntimeConfigContainerImage(original["containerImage"], d, config) + transformed["properties"] = + flattenDataprocBatchRuntimeConfigProperties(original["properties"], d, config) + transformed["effective_properties"] = + flattenDataprocBatchRuntimeConfigEffectiveProperties(original["effective_properties"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchRuntimeConfigVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeConfigContainerImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeConfigProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeConfigEffectiveProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["execution_config"] = + flattenDataprocBatchEnvironmentConfigExecutionConfig(original["executionConfig"], d, config) + transformed["peripherals_config"] = + flattenDataprocBatchEnvironmentConfigPeripheralsConfig(original["peripheralsConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchEnvironmentConfigExecutionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service_account"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigServiceAccount(original["serviceAccount"], d, config) + transformed["network_tags"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigNetworkTags(original["networkTags"], d, config) + transformed["kms_key"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigKmsKey(original["kmsKey"], d, config) + transformed["ttl"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigTtl(original["ttl"], d, config) + transformed["staging_bucket"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigStagingBucket(original["stagingBucket"], d, config) + transformed["network_uri"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigNetworkUri(original["networkUri"], d, config) + transformed["subnetwork_uri"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigSubnetworkUri(original["subnetworkUri"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchEnvironmentConfigExecutionConfigServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigNetworkTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigKmsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigStagingBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigNetworkUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigSubnetworkUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigPeripheralsConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["metastore_service"] = + flattenDataprocBatchEnvironmentConfigPeripheralsConfigMetastoreService(original["metastoreService"], d, config) + transformed["spark_history_server_config"] = + flattenDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig(original["sparkHistoryServerConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchEnvironmentConfigPeripheralsConfigMetastoreService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataproc_cluster"] = + flattenDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigDataprocCluster(original["dataprocCluster"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigDataprocCluster(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchOperation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchStateHistory(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "state": flattenDataprocBatchStateHistoryState(original["state"], d, config), + "state_message": flattenDataprocBatchStateHistoryStateMessage(original["stateMessage"], d, config), + "state_start_time": flattenDataprocBatchStateHistoryStateStartTime(original["stateStartTime"], d, config), + }) + } + return transformed +} +func flattenDataprocBatchStateHistoryState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchStateHistoryStateMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchStateHistoryStateStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["main_python_file_uri"] = + flattenDataprocBatchPysparkBatchMainPythonFileUri(original["mainPythonFileUri"], d, config) + transformed["args"] = + flattenDataprocBatchPysparkBatchArgs(original["args"], d, config) + transformed["python_file_uris"] = + flattenDataprocBatchPysparkBatchPythonFileUris(original["pythonFileUris"], d, config) + transformed["jar_file_uris"] = + flattenDataprocBatchPysparkBatchJarFileUris(original["jarFileUris"], d, config) + transformed["file_uris"] = + flattenDataprocBatchPysparkBatchFileUris(original["fileUris"], d, config) + transformed["archive_uris"] = + flattenDataprocBatchPysparkBatchArchiveUris(original["archiveUris"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchPysparkBatchMainPythonFileUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatchArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatchPythonFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatchJarFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatchFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatchArchiveUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["args"] = + flattenDataprocBatchSparkBatchArgs(original["args"], d, config) + transformed["jar_file_uris"] = + flattenDataprocBatchSparkBatchJarFileUris(original["jarFileUris"], d, config) + transformed["file_uris"] = + flattenDataprocBatchSparkBatchFileUris(original["fileUris"], d, config) + transformed["archive_uris"] = + flattenDataprocBatchSparkBatchArchiveUris(original["archiveUris"], d, config) + transformed["main_jar_file_uri"] = + flattenDataprocBatchSparkBatchMainJarFileUri(original["mainJarFileUri"], d, config) + transformed["main_class"] = + flattenDataprocBatchSparkBatchMainClass(original["mainClass"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchSparkBatchArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatchJarFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatchFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatchArchiveUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatchMainJarFileUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatchMainClass(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkRBatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["main_r_file_uri"] = + flattenDataprocBatchSparkRBatchMainRFileUri(original["mainRFileUri"], d, config) + transformed["args"] = + flattenDataprocBatchSparkRBatchArgs(original["args"], d, config) + transformed["file_uris"] = + flattenDataprocBatchSparkRBatchFileUris(original["fileUris"], d, config) + transformed["archive_uris"] = + flattenDataprocBatchSparkRBatchArchiveUris(original["archiveUris"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchSparkRBatchMainRFileUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkRBatchArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkRBatchFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkRBatchArchiveUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkSqlBatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["query_file_uri"] = + flattenDataprocBatchSparkSqlBatchQueryFileUri(original["queryFileUri"], d, config) + transformed["jar_file_uris"] = + flattenDataprocBatchSparkSqlBatchJarFileUris(original["jarFileUris"], d, config) + transformed["query_variables"] = + flattenDataprocBatchSparkSqlBatchQueryVariables(original["queryVariables"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchSparkSqlBatchQueryFileUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkSqlBatchJarFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkSqlBatchQueryVariables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenDataprocBatchEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataprocBatchRuntimeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVersion, err := expandDataprocBatchRuntimeConfigVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedContainerImage, err := expandDataprocBatchRuntimeConfigContainerImage(original["container_image"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainerImage); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["containerImage"] = transformedContainerImage + } + + transformedProperties, err := expandDataprocBatchRuntimeConfigProperties(original["properties"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProperties); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["properties"] = transformedProperties + } + + transformedEffectiveProperties, err := expandDataprocBatchRuntimeConfigEffectiveProperties(original["effective_properties"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEffectiveProperties); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["effective_properties"] = transformedEffectiveProperties + } + + return transformed, nil +} + +func expandDataprocBatchRuntimeConfigVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchRuntimeConfigContainerImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchRuntimeConfigProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocBatchRuntimeConfigEffectiveProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocBatchEnvironmentConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExecutionConfig, err := expandDataprocBatchEnvironmentConfigExecutionConfig(original["execution_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExecutionConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["executionConfig"] = transformedExecutionConfig + } + + transformedPeripheralsConfig, err := expandDataprocBatchEnvironmentConfigPeripheralsConfig(original["peripherals_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeripheralsConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["peripheralsConfig"] = transformedPeripheralsConfig + } + + return transformed, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceAccount, err := expandDataprocBatchEnvironmentConfigExecutionConfigServiceAccount(original["service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccount"] = transformedServiceAccount + } + + transformedNetworkTags, err := expandDataprocBatchEnvironmentConfigExecutionConfigNetworkTags(original["network_tags"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetworkTags); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["networkTags"] = transformedNetworkTags + } + + transformedKmsKey, err := expandDataprocBatchEnvironmentConfigExecutionConfigKmsKey(original["kms_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKey"] = transformedKmsKey + } + + transformedTtl, err := expandDataprocBatchEnvironmentConfigExecutionConfigTtl(original["ttl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ttl"] = transformedTtl + } + + transformedStagingBucket, err := expandDataprocBatchEnvironmentConfigExecutionConfigStagingBucket(original["staging_bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStagingBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stagingBucket"] = transformedStagingBucket + } + + transformedNetworkUri, err := expandDataprocBatchEnvironmentConfigExecutionConfigNetworkUri(original["network_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetworkUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["networkUri"] = transformedNetworkUri + } + + transformedSubnetworkUri, err := expandDataprocBatchEnvironmentConfigExecutionConfigSubnetworkUri(original["subnetwork_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubnetworkUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subnetworkUri"] = transformedSubnetworkUri + } + + return transformed, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigNetworkTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigKmsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigStagingBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigNetworkUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigSubnetworkUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigPeripheralsConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMetastoreService, err := expandDataprocBatchEnvironmentConfigPeripheralsConfigMetastoreService(original["metastore_service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetastoreService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metastoreService"] = transformedMetastoreService + } + + transformedSparkHistoryServerConfig, err := expandDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig(original["spark_history_server_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSparkHistoryServerConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sparkHistoryServerConfig"] = transformedSparkHistoryServerConfig + } + + return transformed, nil +} + +func expandDataprocBatchEnvironmentConfigPeripheralsConfigMetastoreService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDataprocCluster, err := expandDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigDataprocCluster(original["dataproc_cluster"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataprocCluster); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataprocCluster"] = transformedDataprocCluster + } + + return transformed, nil +} + +func expandDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigDataprocCluster(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMainPythonFileUri, err := expandDataprocBatchPysparkBatchMainPythonFileUri(original["main_python_file_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMainPythonFileUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mainPythonFileUri"] = transformedMainPythonFileUri + } + + transformedArgs, err := expandDataprocBatchPysparkBatchArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedPythonFileUris, err := expandDataprocBatchPysparkBatchPythonFileUris(original["python_file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPythonFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pythonFileUris"] = transformedPythonFileUris + } + + transformedJarFileUris, err := expandDataprocBatchPysparkBatchJarFileUris(original["jar_file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedJarFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["jarFileUris"] = transformedJarFileUris + } + + transformedFileUris, err := expandDataprocBatchPysparkBatchFileUris(original["file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileUris"] = transformedFileUris + } + + transformedArchiveUris, err := expandDataprocBatchPysparkBatchArchiveUris(original["archive_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArchiveUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["archiveUris"] = transformedArchiveUris + } + + return transformed, nil +} + +func expandDataprocBatchPysparkBatchMainPythonFileUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatchArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatchPythonFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatchJarFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatchFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatchArchiveUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedArgs, err := expandDataprocBatchSparkBatchArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedJarFileUris, err := expandDataprocBatchSparkBatchJarFileUris(original["jar_file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedJarFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["jarFileUris"] = transformedJarFileUris + } + + transformedFileUris, err := expandDataprocBatchSparkBatchFileUris(original["file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileUris"] = transformedFileUris + } + + transformedArchiveUris, err := expandDataprocBatchSparkBatchArchiveUris(original["archive_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArchiveUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["archiveUris"] = transformedArchiveUris + } + + transformedMainJarFileUri, err := expandDataprocBatchSparkBatchMainJarFileUri(original["main_jar_file_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMainJarFileUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mainJarFileUri"] = transformedMainJarFileUri + } + + transformedMainClass, err := expandDataprocBatchSparkBatchMainClass(original["main_class"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMainClass); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mainClass"] = transformedMainClass + } + + return transformed, nil +} + +func expandDataprocBatchSparkBatchArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatchJarFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatchFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatchArchiveUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatchMainJarFileUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatchMainClass(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkRBatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMainRFileUri, err := expandDataprocBatchSparkRBatchMainRFileUri(original["main_r_file_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMainRFileUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mainRFileUri"] = transformedMainRFileUri + } + + transformedArgs, err := expandDataprocBatchSparkRBatchArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedFileUris, err := expandDataprocBatchSparkRBatchFileUris(original["file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileUris"] = transformedFileUris + } + + transformedArchiveUris, err := expandDataprocBatchSparkRBatchArchiveUris(original["archive_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArchiveUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["archiveUris"] = transformedArchiveUris + } + + return transformed, nil +} + +func expandDataprocBatchSparkRBatchMainRFileUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkRBatchArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkRBatchFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkRBatchArchiveUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkSqlBatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedQueryFileUri, err := expandDataprocBatchSparkSqlBatchQueryFileUri(original["query_file_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQueryFileUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["queryFileUri"] = transformedQueryFileUri + } + + transformedJarFileUris, err := expandDataprocBatchSparkSqlBatchJarFileUris(original["jar_file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedJarFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["jarFileUris"] = transformedJarFileUris + } + + transformedQueryVariables, err := expandDataprocBatchSparkSqlBatchQueryVariables(original["query_variables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQueryVariables); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["queryVariables"] = transformedQueryVariables + } + + return transformed, nil +} + +func expandDataprocBatchSparkSqlBatchQueryFileUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkSqlBatchJarFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkSqlBatchQueryVariables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocBatchEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func resourceDataprocBatchDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if obj1, ok := res["runtimeConfig"]; ok { + if rconfig, ok := obj1.(map[string]interface{}); ok { + if obj2, ok := rconfig["properties"]; ok { + if properties, ok := obj2.(map[string]interface{}); ok { + // Update effective_properties to include both server set and client set properties + propertiesCopy := make(map[string]interface{}) + for k, v := range properties { + propertiesCopy[k] = v + } + rconfig["effectiveProperties"] = propertiesCopy + + // Update properties back to original client set properties + originalPropertiesCopy := make(map[string]interface{}) + originalProperties := d.Get("runtime_config.0.properties").(interface{}).(map[string]interface{}) + for k, v := range originalProperties { + originalPropertiesCopy[k] = v + } + rconfig["properties"] = originalPropertiesCopy + return res, nil + } + } + } + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_batch_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_batch_sweeper.go new file mode 100644 index 00000000000..93ca47e2ccc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_batch_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataproc + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataprocBatch", testSweepDataprocBatch) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataprocBatch(region string) error { + resourceName := "DataprocBatch" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://dataproc.googleapis.com/v1/projects/{{project}}/locations/{{location}}/batches", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["batches"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://dataproc.googleapis.com/v1/projects/{{project}}/locations/{{location}}/batches/{{batch_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_cluster.go index a620bb03332..c262ed2535b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_cluster.go @@ -189,7 +189,8 @@ func ResourceDataprocCluster() *schema.Resource { CustomizeDiff: customdiff.All( tpgresource.DefaultProviderProject, - tpgresource.SetLabelsDiff, + // User labels are not supported in Dataproc Virtual Cluster + tpgresource.SetLabelsDiffWithoutAttributionLabel, ), SchemaVersion: 1, @@ -2178,6 +2179,7 @@ func expandGceClusterConfig(d *schema.ResourceData, config *transport_tpg.Config } if v, ok := cfg["internal_ip_only"]; ok { conf.InternalIpOnly = v.(bool) + conf.ForceSendFields = append(conf.ForceSendFields, "InternalIpOnly") } if v, ok := cfg["metadata"]; ok { conf.Metadata = tpgresource.ConvertStringMap(v.(map[string]interface{})) @@ -2892,6 +2894,9 @@ func flattenSecurityConfig(d *schema.ResourceData, sc *dataproc.SecurityConfig) } func flattenKerberosConfig(d *schema.ResourceData, kfg *dataproc.KerberosConfig) []map[string]interface{} { + if kfg == nil { + return nil + } data := map[string]interface{}{ "enable_kerberos": kfg.EnableKerberos, "root_principal_password_uri": kfg.RootPrincipalPasswordUri, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/resource_dataproc_metastore_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/resource_dataproc_metastore_service.go index 054b4364996..f974b6cc8e9 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/resource_dataproc_metastore_service.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/resource_dataproc_metastore_service.go @@ -72,6 +72,11 @@ and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of Description: `The database type that the Metastore service stores its data. Default value: "MYSQL" Possible values: ["MYSQL", "SPANNER"]`, Default: "MYSQL", }, + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates if the dataproc metastore should be protected against accidental deletions.`, + }, "encryption_config": { Type: schema.TypeList, Optional: true, @@ -484,6 +489,12 @@ func resourceDataprocMetastoreServiceCreate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("scheduled_backup"); !tpgresource.IsEmptyValue(reflect.ValueOf(scheduledBackupProp)) && (ok || !reflect.DeepEqual(v, scheduledBackupProp)) { obj["scheduledBackup"] = scheduledBackupProp } + deletionProtectionProp, err := expandDataprocMetastoreServiceDeletionProtection(d.Get("deletion_protection"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deletion_protection"); !tpgresource.IsEmptyValue(reflect.ValueOf(deletionProtectionProp)) && (ok || !reflect.DeepEqual(v, deletionProtectionProp)) { + obj["deletionProtection"] = deletionProtectionProp + } maintenanceWindowProp, err := expandDataprocMetastoreServiceMaintenanceWindow(d.Get("maintenance_window"), d, config) if err != nil { return err @@ -670,6 +681,9 @@ func resourceDataprocMetastoreServiceRead(d *schema.ResourceData, meta interface if err := d.Set("scheduled_backup", flattenDataprocMetastoreServiceScheduledBackup(res["scheduledBackup"], d, config)); err != nil { return fmt.Errorf("Error reading Service: %s", err) } + if err := d.Set("deletion_protection", flattenDataprocMetastoreServiceDeletionProtection(res["deletionProtection"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } if err := d.Set("maintenance_window", flattenDataprocMetastoreServiceMaintenanceWindow(res["maintenanceWindow"], d, config)); err != nil { return fmt.Errorf("Error reading Service: %s", err) } @@ -747,6 +761,12 @@ func resourceDataprocMetastoreServiceUpdate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("scheduled_backup"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scheduledBackupProp)) { obj["scheduledBackup"] = scheduledBackupProp } + deletionProtectionProp, err := expandDataprocMetastoreServiceDeletionProtection(d.Get("deletion_protection"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deletion_protection"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deletionProtectionProp)) { + obj["deletionProtection"] = deletionProtectionProp + } maintenanceWindowProp, err := expandDataprocMetastoreServiceMaintenanceWindow(d.Get("maintenance_window"), d, config) if err != nil { return err @@ -809,6 +829,10 @@ func resourceDataprocMetastoreServiceUpdate(d *schema.ResourceData, meta interfa updateMask = append(updateMask, "scheduledBackup") } + if d.HasChange("deletion_protection") { + updateMask = append(updateMask, "deletionProtection") + } + if d.HasChange("maintenance_window") { updateMask = append(updateMask, "maintenanceWindow") } @@ -1069,6 +1093,10 @@ func flattenDataprocMetastoreServiceScheduledBackupBackupLocation(v interface{}, return v } +func flattenDataprocMetastoreServiceDeletionProtection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenDataprocMetastoreServiceMaintenanceWindow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -1441,6 +1469,10 @@ func expandDataprocMetastoreServiceScheduledBackupBackupLocation(v interface{}, return v, nil } +func expandDataprocMetastoreServiceDeletionProtection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandDataprocMetastoreServiceMaintenanceWindow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/resource_datastore_index.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/resource_datastore_index.go deleted file mode 100644 index ee45e264d10..00000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/resource_datastore_index.go +++ /dev/null @@ -1,425 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package datastore - -import ( - "fmt" - "log" - "net/http" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "github.com/hashicorp/terraform-provider-google/google/verify" -) - -func ResourceDatastoreIndex() *schema.Resource { - return &schema.Resource{ - Create: resourceDatastoreIndexCreate, - Read: resourceDatastoreIndexRead, - Delete: resourceDatastoreIndexDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDatastoreIndexImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: customdiff.All( - tpgresource.DefaultProviderProject, - ), - - Schema: map[string]*schema.Schema{ - "kind": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The entity kind which the index applies to.`, - }, - "ancestor": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: verify.ValidateEnum([]string{"NONE", "ALL_ANCESTORS", ""}), - Description: `Policy for including ancestors in the index. Default value: "NONE" Possible values: ["NONE", "ALL_ANCESTORS"]`, - Default: "NONE", - }, - "properties": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An ordered list of properties to index on.`, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "direction": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: verify.ValidateEnum([]string{"ASCENDING", "DESCENDING"}), - Description: `The direction the index should optimize for sorting. Possible values: ["ASCENDING", "DESCENDING"]`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The property name to index.`, - }, - }, - }, - }, - "index_id": { - Type: schema.TypeString, - Computed: true, - Description: `The index id.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDatastoreIndexCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - kindProp, err := expandDatastoreIndexKind(d.Get("kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kind"); !tpgresource.IsEmptyValue(reflect.ValueOf(kindProp)) && (ok || !reflect.DeepEqual(v, kindProp)) { - obj["kind"] = kindProp - } - ancestorProp, err := expandDatastoreIndexAncestor(d.Get("ancestor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ancestor"); !tpgresource.IsEmptyValue(reflect.ValueOf(ancestorProp)) && (ok || !reflect.DeepEqual(v, ancestorProp)) { - obj["ancestor"] = ancestorProp - } - propertiesProp, err := expandDatastoreIndexProperties(d.Get("properties"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("properties"); !tpgresource.IsEmptyValue(reflect.ValueOf(propertiesProp)) && (ok || !reflect.DeepEqual(v, propertiesProp)) { - obj["properties"] = propertiesProp - } - - url, err := tpgresource.ReplaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Index: %#v", obj) - billingProject := "" - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := tpgresource.GetBillingProject(d, config); err == nil { - billingProject = bp - } - - headers := make(http.Header) - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "POST", - Project: billingProject, - RawURL: url, - UserAgent: userAgent, - Body: obj, - Timeout: d.Timeout(schema.TimeoutCreate), - Headers: headers, - ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.DatastoreIndex409Contention}, - }) - if err != nil { - return fmt.Errorf("Error creating Index: %s", err) - } - - // Store the ID now - id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = DatastoreOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Index", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Index: %s", err) - } - - if err := d.Set("index_id", flattenDatastoreIndexIndexId(opRes["indexId"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Index %q: %#v", d.Id(), res) - - return resourceDatastoreIndexRead(d, meta) -} - -func resourceDatastoreIndexRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := tpgresource.ReplaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := tpgresource.GetBillingProject(d, config); err == nil { - billingProject = bp - } - - headers := make(http.Header) - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: billingProject, - RawURL: url, - UserAgent: userAgent, - Headers: headers, - ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.DatastoreIndex409Contention}, - }) - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DatastoreIndex %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - - if err := d.Set("index_id", flattenDatastoreIndexIndexId(res["indexId"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("kind", flattenDatastoreIndexKind(res["kind"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("ancestor", flattenDatastoreIndexAncestor(res["ancestor"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("properties", flattenDatastoreIndexProperties(res["properties"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - - return nil -} - -func resourceDatastoreIndexDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - url, err := tpgresource.ReplaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - // err == nil indicates that the billing_project value was found - if bp, err := tpgresource.GetBillingProject(d, config); err == nil { - billingProject = bp - } - - headers := make(http.Header) - - log.Printf("[DEBUG] Deleting Index %q", d.Id()) - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "DELETE", - Project: billingProject, - RawURL: url, - UserAgent: userAgent, - Body: obj, - Timeout: d.Timeout(schema.TimeoutDelete), - Headers: headers, - ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.DatastoreIndex409Contention}, - }) - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, "Index") - } - - err = DatastoreOperationWaitTime( - config, res, project, "Deleting Index", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Index %q: %#v", d.Id(), res) - return nil -} - -func resourceDatastoreIndexImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "^projects/(?P[^/]+)/indexes/(?P[^/]+)$", - "^(?P[^/]+)/(?P[^/]+)$", - "^(?P[^/]+)$", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDatastoreIndexIndexId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastoreIndexKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastoreIndexAncestor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastoreIndexProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDatastoreIndexPropertiesName(original["name"], d, config), - "direction": flattenDatastoreIndexPropertiesDirection(original["direction"], d, config), - }) - } - return transformed -} -func flattenDatastoreIndexPropertiesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastoreIndexPropertiesDirection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func expandDatastoreIndexKind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandDatastoreIndexAncestor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandDatastoreIndexProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDatastoreIndexPropertiesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedDirection, err := expandDatastoreIndexPropertiesDirection(original["direction"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDirection); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["direction"] = transformedDirection - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDatastoreIndexPropertiesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandDatastoreIndexPropertiesDirection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_connection_profile.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_connection_profile.go index 7cd77b1d9b2..da95cd39813 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_connection_profile.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_connection_profile.go @@ -80,7 +80,7 @@ func ResourceDatastreamConnectionProfile() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{}, }, - ExactlyOneOf: []string{"oracle_profile", "gcs_profile", "mysql_profile", "bigquery_profile", "postgresql_profile"}, + ExactlyOneOf: []string{"oracle_profile", "gcs_profile", "mysql_profile", "bigquery_profile", "postgresql_profile", "sql_server_profile"}, }, "create_without_validation": { Type: schema.TypeBool, @@ -151,7 +151,7 @@ func ResourceDatastreamConnectionProfile() *schema.Resource { }, }, }, - ExactlyOneOf: []string{"oracle_profile", "gcs_profile", "mysql_profile", "bigquery_profile", "postgresql_profile"}, + ExactlyOneOf: []string{"oracle_profile", "gcs_profile", "mysql_profile", "bigquery_profile", "postgresql_profile", "sql_server_profile"}, }, "labels": { Type: schema.TypeMap, @@ -245,7 +245,7 @@ If this field is used then the 'client_certificate' and the }, }, }, - ExactlyOneOf: []string{"oracle_profile", "gcs_profile", "mysql_profile", "bigquery_profile", "postgresql_profile"}, + ExactlyOneOf: []string{"oracle_profile", "gcs_profile", "mysql_profile", "bigquery_profile", "postgresql_profile", "sql_server_profile"}, }, "oracle_profile": { Type: schema.TypeList, @@ -289,7 +289,7 @@ If this field is used then the 'client_certificate' and the }, }, }, - ExactlyOneOf: []string{"oracle_profile", "gcs_profile", "mysql_profile", "bigquery_profile", "postgresql_profile"}, + ExactlyOneOf: []string{"oracle_profile", "gcs_profile", "mysql_profile", "bigquery_profile", "postgresql_profile", "sql_server_profile"}, }, "postgresql_profile": { Type: schema.TypeList, @@ -327,7 +327,7 @@ If this field is used then the 'client_certificate' and the }, }, }, - ExactlyOneOf: []string{"oracle_profile", "gcs_profile", "mysql_profile", "bigquery_profile", "postgresql_profile"}, + ExactlyOneOf: []string{"oracle_profile", "gcs_profile", "mysql_profile", "bigquery_profile", "postgresql_profile", "sql_server_profile"}, }, "private_connectivity": { Type: schema.TypeList, @@ -345,6 +345,44 @@ If this field is used then the 'client_certificate' and the }, ConflictsWith: []string{"forward_ssh_connectivity"}, }, + "sql_server_profile": { + Type: schema.TypeList, + Optional: true, + Description: `SQL Server database profile.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "database": { + Type: schema.TypeString, + Required: true, + Description: `Database for the SQL Server connection.`, + }, + "hostname": { + Type: schema.TypeString, + Required: true, + Description: `Hostname for the SQL Server connection.`, + }, + "password": { + Type: schema.TypeString, + Required: true, + Description: `Password for the SQL Server connection.`, + Sensitive: true, + }, + "username": { + Type: schema.TypeString, + Required: true, + Description: `Username for the SQL Server connection.`, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + Description: `Port for the SQL Server connection.`, + Default: 1433, + }, + }, + }, + ExactlyOneOf: []string{"oracle_profile", "gcs_profile", "mysql_profile", "bigquery_profile", "postgresql_profile", "sql_server_profile"}, + }, "effective_labels": { Type: schema.TypeMap, Computed: true, @@ -418,6 +456,12 @@ func resourceDatastreamConnectionProfileCreate(d *schema.ResourceData, meta inte } else if v, ok := d.GetOkExists("postgresql_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(postgresqlProfileProp)) && (ok || !reflect.DeepEqual(v, postgresqlProfileProp)) { obj["postgresqlProfile"] = postgresqlProfileProp } + sqlServerProfileProp, err := expandDatastreamConnectionProfileSqlServerProfile(d.Get("sql_server_profile"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sql_server_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(sqlServerProfileProp)) && (ok || !reflect.DeepEqual(v, sqlServerProfileProp)) { + obj["sqlServerProfile"] = sqlServerProfileProp + } forwardSshConnectivityProp, err := expandDatastreamConnectionProfileForwardSshConnectivity(d.Get("forward_ssh_connectivity"), d, config) if err != nil { return err @@ -573,6 +617,9 @@ func resourceDatastreamConnectionProfileRead(d *schema.ResourceData, meta interf if err := d.Set("postgresql_profile", flattenDatastreamConnectionProfilePostgresqlProfile(res["postgresqlProfile"], d, config)); err != nil { return fmt.Errorf("Error reading ConnectionProfile: %s", err) } + if err := d.Set("sql_server_profile", flattenDatastreamConnectionProfileSqlServerProfile(res["sqlServerProfile"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } if err := d.Set("forward_ssh_connectivity", flattenDatastreamConnectionProfileForwardSshConnectivity(res["forwardSshConnectivity"], d, config)); err != nil { return fmt.Errorf("Error reading ConnectionProfile: %s", err) } @@ -641,6 +688,12 @@ func resourceDatastreamConnectionProfileUpdate(d *schema.ResourceData, meta inte } else if v, ok := d.GetOkExists("postgresql_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, postgresqlProfileProp)) { obj["postgresqlProfile"] = postgresqlProfileProp } + sqlServerProfileProp, err := expandDatastreamConnectionProfileSqlServerProfile(d.Get("sql_server_profile"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sql_server_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sqlServerProfileProp)) { + obj["sqlServerProfile"] = sqlServerProfileProp + } forwardSshConnectivityProp, err := expandDatastreamConnectionProfileForwardSshConnectivity(d.Get("forward_ssh_connectivity"), d, config) if err != nil { return err @@ -693,6 +746,10 @@ func resourceDatastreamConnectionProfileUpdate(d *schema.ResourceData, meta inte updateMask = append(updateMask, "postgresqlProfile") } + if d.HasChange("sql_server_profile") { + updateMask = append(updateMask, "sqlServerProfile") + } + if d.HasChange("forward_ssh_connectivity") { updateMask = append(updateMask, "forwardSshConnectivity") } @@ -1089,6 +1146,60 @@ func flattenDatastreamConnectionProfilePostgresqlProfileDatabase(v interface{}, return v } +func flattenDatastreamConnectionProfileSqlServerProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hostname"] = + flattenDatastreamConnectionProfileSqlServerProfileHostname(original["hostname"], d, config) + transformed["port"] = + flattenDatastreamConnectionProfileSqlServerProfilePort(original["port"], d, config) + transformed["username"] = + flattenDatastreamConnectionProfileSqlServerProfileUsername(original["username"], d, config) + transformed["password"] = + flattenDatastreamConnectionProfileSqlServerProfilePassword(original["password"], d, config) + transformed["database"] = + flattenDatastreamConnectionProfileSqlServerProfileDatabase(original["database"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamConnectionProfileSqlServerProfileHostname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamConnectionProfileSqlServerProfilePort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamConnectionProfileSqlServerProfileUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamConnectionProfileSqlServerProfilePassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("sql_server_profile.0.password") +} + +func flattenDatastreamConnectionProfileSqlServerProfileDatabase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenDatastreamConnectionProfileForwardSshConnectivity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -1525,6 +1636,73 @@ func expandDatastreamConnectionProfilePostgresqlProfileDatabase(v interface{}, d return v, nil } +func expandDatastreamConnectionProfileSqlServerProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHostname, err := expandDatastreamConnectionProfileSqlServerProfileHostname(original["hostname"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHostname); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hostname"] = transformedHostname + } + + transformedPort, err := expandDatastreamConnectionProfileSqlServerProfilePort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedUsername, err := expandDatastreamConnectionProfileSqlServerProfileUsername(original["username"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["username"] = transformedUsername + } + + transformedPassword, err := expandDatastreamConnectionProfileSqlServerProfilePassword(original["password"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["password"] = transformedPassword + } + + transformedDatabase, err := expandDatastreamConnectionProfileSqlServerProfileDatabase(original["database"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["database"] = transformedDatabase + } + + return transformed, nil +} + +func expandDatastreamConnectionProfileSqlServerProfileHostname(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamConnectionProfileSqlServerProfilePort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamConnectionProfileSqlServerProfileUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamConnectionProfileSqlServerProfilePassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamConnectionProfileSqlServerProfileDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandDatastreamConnectionProfileForwardSshConnectivity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_private_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_private_connection.go index 478c8f9b689..9d18476a762 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_private_connection.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_private_connection.go @@ -24,6 +24,7 @@ import ( "log" "net/http" "reflect" + "strconv" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -416,6 +417,11 @@ func resourceDatastreamPrivateConnectionDelete(d *schema.ResourceData, meta inte } headers := make(http.Header) + // Add force=true query param to force deletion of private connection sub resources like Routes + url, err = transport_tpg.AddQueryParams(url, map[string]string{"force": strconv.FormatBool(true)}) + if err != nil { + return err + } log.Printf("[DEBUG] Deleting PrivateConnection %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_stream.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_stream.go index 4b128c72c85..1c415ac0a88 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_stream.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_stream.go @@ -143,10 +143,23 @@ func ResourceDatastreamStream() *schema.Resource { "bigquery_destination_config": { Type: schema.TypeList, Optional: true, - Description: `A configuration for how data should be loaded to Cloud Storage.`, + Description: `A configuration for how data should be loaded to Google BigQuery.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "append_only": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `AppendOnly mode defines that the stream of changes (INSERT, UPDATE-INSERT, UPDATE-DELETE and DELETE +events) to a source table will be written to the destination Google BigQuery table, retaining the +historical state of the data.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ConflictsWith: []string{"destination_config.0.bigquery_destination_config.0.merge"}, + }, "data_freshness": { Type: schema.TypeString, Optional: true, @@ -155,6 +168,19 @@ Editing this field will only affect new tables created in the future, but existi will not be impacted. Lower values mean that queries will return fresher data, but may result in higher cost. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". Defaults to 900s.`, }, + "merge": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Merge mode defines that all changes to a table will be merged at the destination Google BigQuery +table. This is the default write mode. When selected, BigQuery reflects the way the data is stored +in the source database. With Merge mode, no historical record of the change events is kept.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ConflictsWith: []string{"destination_config.0.bigquery_destination_config.0.append_only"}, + }, "single_target_dataset": { Type: schema.TypeList, Optional: true, @@ -506,7 +532,7 @@ If not set (or set to 0), the system's default value will be used.`, }, }, }, - ExactlyOneOf: []string{"source_config.0.mysql_source_config", "source_config.0.oracle_source_config", "source_config.0.postgresql_source_config"}, + ExactlyOneOf: []string{"source_config.0.mysql_source_config", "source_config.0.oracle_source_config", "source_config.0.postgresql_source_config", "source_config.0.sql_server_source_config"}, }, "oracle_source_config": { Type: schema.TypeList, @@ -743,7 +769,7 @@ If not set (or set to 0), the system's default value will be used.`, }, }, }, - ExactlyOneOf: []string{"source_config.0.mysql_source_config", "source_config.0.oracle_source_config", "source_config.0.postgresql_source_config"}, + ExactlyOneOf: []string{"source_config.0.mysql_source_config", "source_config.0.oracle_source_config", "source_config.0.postgresql_source_config", "source_config.0.sql_server_source_config"}, }, "postgresql_source_config": { Type: schema.TypeList, @@ -956,7 +982,232 @@ negative. If not set (or set to 0), the system's default value will be used.`, }, }, }, - ExactlyOneOf: []string{"source_config.0.mysql_source_config", "source_config.0.oracle_source_config", "source_config.0.postgresql_source_config"}, + ExactlyOneOf: []string{"source_config.0.mysql_source_config", "source_config.0.oracle_source_config", "source_config.0.postgresql_source_config", "source_config.0.sql_server_source_config"}, + }, + "sql_server_source_config": { + Type: schema.TypeList, + Optional: true, + Description: `SQL Server data source configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "change_tables": { + Type: schema.TypeList, + Optional: true, + Description: `CDC reader reads from change tables.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "exclude_objects": { + Type: schema.TypeList, + Optional: true, + Description: `SQL Server objects to exclude from the stream.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schemas": { + Type: schema.TypeList, + Required: true, + Description: `SQL Server schemas/databases in the database server`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schema": { + Type: schema.TypeString, + Required: true, + Description: `Schema name.`, + }, + "tables": { + Type: schema.TypeList, + Optional: true, + Description: `Tables in the database.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Required: true, + Description: `Table name.`, + }, + "columns": { + Type: schema.TypeList, + Optional: true, + Description: `SQL Server columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeString, + Optional: true, + Description: `Column name.`, + }, + "data_type": { + Type: schema.TypeString, + Optional: true, + Description: `The SQL Server data type. Full data types list can be found here: +https://learn.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-ver16`, + }, + "length": { + Type: schema.TypeInt, + Computed: true, + Description: `Column length.`, + }, + "nullable": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column can accept a null value.`, + }, + "ordinal_position": { + Type: schema.TypeInt, + Computed: true, + Description: `The ordinal position of the column in the table.`, + }, + "precision": { + Type: schema.TypeInt, + Computed: true, + Description: `Column precision.`, + }, + "primary_key": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column represents a primary key.`, + }, + "scale": { + Type: schema.TypeInt, + Computed: true, + Description: `Column scale.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "include_objects": { + Type: schema.TypeList, + Optional: true, + Description: `SQL Server objects to retrieve from the source.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schemas": { + Type: schema.TypeList, + Required: true, + Description: `SQL Server schemas/databases in the database server`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schema": { + Type: schema.TypeString, + Required: true, + Description: `Schema name.`, + }, + "tables": { + Type: schema.TypeList, + Optional: true, + Description: `Tables in the database.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Required: true, + Description: `Table name.`, + }, + "columns": { + Type: schema.TypeList, + Optional: true, + Description: `SQL Server columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeString, + Optional: true, + Description: `Column name.`, + }, + "data_type": { + Type: schema.TypeString, + Optional: true, + Description: `The SQL Server data type. Full data types list can be found here: +https://learn.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-ver16`, + }, + "length": { + Type: schema.TypeInt, + Computed: true, + Description: `Column length.`, + }, + "nullable": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column can accept a null value.`, + }, + "ordinal_position": { + Type: schema.TypeInt, + Computed: true, + Description: `The ordinal position of the column in the table.`, + }, + "precision": { + Type: schema.TypeInt, + Computed: true, + Description: `Column precision.`, + }, + "primary_key": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column represents a primary key.`, + }, + "scale": { + Type: schema.TypeInt, + Computed: true, + Description: `Column scale.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "max_concurrent_backfill_tasks": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Max concurrent backfill tasks.`, + }, + "max_concurrent_cdc_tasks": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Max concurrent CDC tasks.`, + }, + "transaction_logs": { + Type: schema.TypeList, + Optional: true, + Description: `CDC reader reads from transaction logs.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + }, + }, + ExactlyOneOf: []string{"source_config.0.mysql_source_config", "source_config.0.oracle_source_config", "source_config.0.postgresql_source_config", "source_config.0.sql_server_source_config"}, }, }, }, @@ -1247,51 +1498,142 @@ https://www.postgresql.org/docs/current/datatype.html`, }, }, }, - }, - }, - ExactlyOneOf: []string{"backfill_all", "backfill_none"}, - }, - "backfill_none": { - Type: schema.TypeList, - Optional: true, - Description: `Backfill strategy to disable automatic backfill for the Stream's objects.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{}, - }, - ExactlyOneOf: []string{"backfill_all", "backfill_none"}, - }, - "create_without_validation": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Create the stream without validating it.`, - Default: false, - }, - "customer_managed_encryption_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data -will be encrypted using an internal Stream-specific encryption key provisioned through KMS.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels. - -**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. -Please refer to the field 'effective_labels' for all of the labels present on the resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "effective_labels": { - Type: schema.TypeMap, - Computed: true, - Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name": { - Type: schema.TypeString, + "sql_server_excluded_objects": { + Type: schema.TypeList, + Optional: true, + Description: `SQL Server data source objects to avoid backfilling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schemas": { + Type: schema.TypeList, + Required: true, + Description: `SQL Server schemas/databases in the database server`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schema": { + Type: schema.TypeString, + Required: true, + Description: `Schema name.`, + }, + "tables": { + Type: schema.TypeList, + Optional: true, + Description: `Tables in the database.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Required: true, + Description: `Table name.`, + }, + "columns": { + Type: schema.TypeList, + Optional: true, + Description: `SQL Server columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeString, + Optional: true, + Description: `Column name.`, + }, + "data_type": { + Type: schema.TypeString, + Optional: true, + Description: `The SQL Server data type. Full data types list can be found here: +https://learn.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-ver16`, + }, + "length": { + Type: schema.TypeInt, + Computed: true, + Description: `Column length.`, + }, + "nullable": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column can accept a null value.`, + }, + "ordinal_position": { + Type: schema.TypeInt, + Computed: true, + Description: `The ordinal position of the column in the table.`, + }, + "precision": { + Type: schema.TypeInt, + Computed: true, + Description: `Column precision.`, + }, + "primary_key": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column represents a primary key.`, + }, + "scale": { + Type: schema.TypeInt, + Computed: true, + Description: `Column scale.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"backfill_all", "backfill_none"}, + }, + "backfill_none": { + Type: schema.TypeList, + Optional: true, + Description: `Backfill strategy to disable automatic backfill for the Stream's objects.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{"backfill_all", "backfill_none"}, + }, + "create_without_validation": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Create the stream without validating it.`, + Default: false, + }, + "customer_managed_encryption_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data +will be encrypted using an internal Stream-specific encryption key provisioned through KMS.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, Computed: true, Description: `The stream's name.`, }, @@ -1308,10 +1650,13 @@ Please refer to the field 'effective_labels' for all of the labels present on th Elem: &schema.Schema{Type: schema.TypeString}, }, "desired_state": { - Type: schema.TypeString, - Optional: true, - Default: "NOT_STARTED", - Description: `Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.`, + Type: schema.TypeString, + Optional: true, + Description: `Desired state of the Stream. Set this field to 'RUNNING' to start the stream, +'NOT_STARTED' to create the stream without starting and 'PAUSED' to pause +the stream from a 'RUNNING' state. +Possible values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED`, + Default: "NOT_STARTED", }, "project": { Type: schema.TypeString, @@ -1833,6 +2178,8 @@ func flattenDatastreamStreamSourceConfig(v interface{}, d *schema.ResourceData, flattenDatastreamStreamSourceConfigOracleSourceConfig(original["oracleSourceConfig"], d, config) transformed["postgresql_source_config"] = flattenDatastreamStreamSourceConfigPostgresqlSourceConfig(original["postgresqlSourceConfig"], d, config) + transformed["sql_server_source_config"] = + flattenDatastreamStreamSourceConfigSqlServerSourceConfig(original["sqlServerSourceConfig"], d, config) return []interface{}{transformed} } func flattenDatastreamStreamSourceConfigSourceConnectionProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -2958,28 +3305,27 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackf return v // let terraform core handle it otherwise } -func flattenDatastreamStreamDestinationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } transformed := make(map[string]interface{}) - transformed["destination_connection_profile"] = - flattenDatastreamStreamDestinationConfigDestinationConnectionProfile(original["destinationConnectionProfile"], d, config) - transformed["gcs_destination_config"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfig(original["gcsDestinationConfig"], d, config) - transformed["bigquery_destination_config"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(original["bigqueryDestinationConfig"], d, config) + transformed["include_objects"] = + flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjects(original["includeObjects"], d, config) + transformed["exclude_objects"] = + flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjects(original["excludeObjects"], d, config) + transformed["max_concurrent_cdc_tasks"] = + flattenDatastreamStreamSourceConfigSqlServerSourceConfigMaxConcurrentCdcTasks(original["maxConcurrentCdcTasks"], d, config) + transformed["max_concurrent_backfill_tasks"] = + flattenDatastreamStreamSourceConfigSqlServerSourceConfigMaxConcurrentBackfillTasks(original["maxConcurrentBackfillTasks"], d, config) + transformed["transaction_logs"] = + flattenDatastreamStreamSourceConfigSqlServerSourceConfigTransactionLogs(original["transactionLogs"], d, config) + transformed["change_tables"] = + flattenDatastreamStreamSourceConfigSqlServerSourceConfigChangeTables(original["changeTables"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamDestinationConfigDestinationConnectionProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2988,23 +3334,90 @@ func flattenDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, return nil } transformed := make(map[string]interface{}) - transformed["path"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigPath(original["path"], d, config) - transformed["file_rotation_mb"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(original["fileRotationMb"], d, config) - transformed["file_rotation_interval"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(original["fileRotationInterval"], d, config) - transformed["avro_file_format"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(original["avroFileFormat"], d, config) - transformed["json_file_format"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(original["jsonFileFormat"], d, config) + transformed["schemas"] = + flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemas(original["schemas"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "schema": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasSchema(original["schema"], d, config), + "tables": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTables(original["tables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesTable(original["table"], d, config), + "columns": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumns(original["columns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsLength(original["length"], d, config), + "precision": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsPrecision(original["precision"], d, config), + "scale": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsScale(original["scale"], d, config), + "primary_key": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -3021,140 +3434,66 @@ func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb( return v // let terraform core handle it otherwise } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal } - transformed := make(map[string]interface{}) - return []interface{}{transformed} + + return v // let terraform core handle it otherwise } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal } - transformed := make(map[string]interface{}) - transformed["schema_file_format"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(original["schemaFileFormat"], d, config) - transformed["compression"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(original["compression"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + return v // let terraform core handle it otherwise } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["data_freshness"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(original["dataFreshness"], d, config) - transformed["single_target_dataset"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(original["singleTargetDataset"], d, config) - transformed["source_hierarchy_datasets"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(original["sourceHierarchyDatasets"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(original["datasetId"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_template"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(original["datasetTemplate"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } } - transformed := make(map[string]interface{}) - transformed["location"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(original["location"], d, config) - transformed["dataset_id_prefix"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(original["datasetIdPrefix"], d, config) - transformed["kms_key_name"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } -func flattenDatastreamStreamState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAll(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["mysql_excluded_objects"] = - flattenDatastreamStreamBackfillAllMysqlExcludedObjects(original["mysqlExcludedObjects"], d, config) - transformed["postgresql_excluded_objects"] = - flattenDatastreamStreamBackfillAllPostgresqlExcludedObjects(original["postgresqlExcludedObjects"], d, config) - transformed["oracle_excluded_objects"] = - flattenDatastreamStreamBackfillAllOracleExcludedObjects(original["oracleExcludedObjects"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3163,11 +3502,11 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d *sc return nil } transformed := make(map[string]interface{}) - transformed["mysql_databases"] = - flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(original["mysqlDatabases"], d, config) + transformed["schemas"] = + flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemas(original["schemas"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3180,17 +3519,17 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v inte continue } transformed = append(transformed, map[string]interface{}{ - "database": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(original["database"], d, config), - "mysql_tables": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(original["mysqlTables"], d, config), + "schema": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasSchema(original["schema"], d, config), + "tables": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTables(original["tables"], d, config), }) } return transformed } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3203,17 +3542,17 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTa continue } transformed = append(transformed, map[string]interface{}{ - "table": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config), - "mysql_columns": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysqlColumns"], d, config), + "table": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesTable(original["table"], d, config), + "columns": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumns(original["columns"], d, config), }) } return transformed } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3226,26 +3565,27 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTa continue } transformed = append(transformed, map[string]interface{}{ - "column": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config), - "data_type": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["dataType"], d, config), - "length": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config), - "collation": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config), - "primary_key": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primaryKey"], d, config), - "nullable": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config), - "ordinal_position": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), + "column": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsLength(original["length"], d, config), + "precision": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsPrecision(original["precision"], d, config), + "scale": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsScale(original["scale"], d, config), + "primary_key": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsOrdinalPosition(original["ordinalPosition"], d, config), }) } return transformed } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -3262,19 +3602,7 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTa return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -3291,99 +3619,7 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTa return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["postgresql_schemas"] = - flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(original["postgresqlSchemas"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "schema": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(original["schema"], d, config), - "postgresql_tables": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(original["postgresqlTables"], d, config), - }) - } - return transformed -} -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "table": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config), - "postgresql_columns": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresqlColumns"], d, config), - }) - } - return transformed -} -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "column": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config), - "data_type": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["dataType"], d, config), - "length": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config), - "precision": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config), - "scale": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config), - "primary_key": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primaryKey"], d, config), - "nullable": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config), - "ordinal_position": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), - }) - } - return transformed -} -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -3400,7 +3636,15 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -3417,7 +3661,7 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigMaxConcurrentCdcTasks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -3434,15 +3678,7 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigMaxConcurrentBackfillTasks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -3459,100 +3695,69 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllOracleExcludedObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigTransactionLogs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } transformed := make(map[string]interface{}) - transformed["oracle_schemas"] = - flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(original["oracleSchemas"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + +func flattenDatastreamStreamSourceConfigSqlServerSourceConfigChangeTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "schema": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(original["schema"], d, config), - "oracle_tables": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(original["oracleTables"], d, config), - }) + return nil } - return transformed -} -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + transformed := make(map[string]interface{}) + return []interface{}{transformed} } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamDestinationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { - return v + return nil } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "table": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(original["table"], d, config), - "oracle_columns": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(original["oracleColumns"], d, config), - }) + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - return transformed + transformed := make(map[string]interface{}) + transformed["destination_connection_profile"] = + flattenDatastreamStreamDestinationConfigDestinationConnectionProfile(original["destinationConnectionProfile"], d, config) + transformed["gcs_destination_config"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfig(original["gcsDestinationConfig"], d, config) + transformed["bigquery_destination_config"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(original["bigqueryDestinationConfig"], d, config) + return []interface{}{transformed} } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamDestinationConfigDestinationConnectionProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { - return v + return nil } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "column": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config), - "data_type": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["dataType"], d, config), - "length": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config), - "precision": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config), - "scale": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config), - "encoding": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config), - "primary_key": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primaryKey"], d, config), - "nullable": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config), - "ordinal_position": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinalPosition"], d, config), - }) + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - return transformed -} -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + transformed := make(map[string]interface{}) + transformed["path"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigPath(original["path"], d, config) + transformed["file_rotation_mb"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(original["fileRotationMb"], d, config) + transformed["file_rotation_interval"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(original["fileRotationInterval"], d, config) + transformed["avro_file_format"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(original["avroFileFormat"], d, config) + transformed["json_file_format"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(original["jsonFileFormat"], d, config) + return []interface{}{transformed} } - -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -3569,149 +3774,1301 @@ func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleT return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { - return intVal - } - } +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil } - - return v // let terraform core handle it otherwise + transformed := make(map[string]interface{}) + return []interface{}{transformed} } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { - return intVal - } +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - - return v // let terraform core handle it otherwise -} - -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + transformed := make(map[string]interface{}) + transformed["schema_file_format"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(original["schemaFileFormat"], d, config) + transformed["compression"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(original["compression"], d, config) + return []interface{}{transformed} } - -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { - return intVal - } +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - - return v // let terraform core handle it otherwise + transformed := make(map[string]interface{}) + transformed["data_freshness"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(original["dataFreshness"], d, config) + transformed["single_target_dataset"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(original["singleTargetDataset"], d, config) + transformed["source_hierarchy_datasets"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(original["sourceHierarchyDatasets"], d, config) + transformed["merge"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigMerge(original["merge"], d, config) + transformed["append_only"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigAppendOnly(original["appendOnly"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenDatastreamStreamBackfillNone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(original["datasetId"], d, config) return []interface{}{transformed} } - -func flattenDatastreamStreamCustomerManagedEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { - return v + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - transformed := make(map[string]interface{}) - if l, ok := d.GetOkExists("terraform_labels"); ok { - for k := range l.(map[string]interface{}) { - transformed[k] = v.(map[string]interface{})[k] - } + transformed["dataset_template"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(original["datasetTemplate"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil } - - return transformed + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["location"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(original["location"], d, config) + transformed["dataset_id_prefix"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(original["datasetIdPrefix"], d, config) + transformed["kms_key_name"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenDatastreamStreamEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandDatastreamStreamDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandDatastreamStreamSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigMerge(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil } - raw := l[0] - original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) + return []interface{}{transformed} +} - transformedSourceConnectionProfile, err := expandDatastreamStreamSourceConfigSourceConnectionProfile(original["source_connection_profile"], d, config) +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigAppendOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDatastreamStreamState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAll(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["mysql_excluded_objects"] = + flattenDatastreamStreamBackfillAllMysqlExcludedObjects(original["mysqlExcludedObjects"], d, config) + transformed["postgresql_excluded_objects"] = + flattenDatastreamStreamBackfillAllPostgresqlExcludedObjects(original["postgresqlExcludedObjects"], d, config) + transformed["oracle_excluded_objects"] = + flattenDatastreamStreamBackfillAllOracleExcludedObjects(original["oracleExcludedObjects"], d, config) + transformed["sql_server_excluded_objects"] = + flattenDatastreamStreamBackfillAllSqlServerExcludedObjects(original["sqlServerExcludedObjects"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["mysql_databases"] = + flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(original["mysqlDatabases"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "database": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(original["database"], d, config), + "mysql_tables": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(original["mysqlTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config), + "mysql_columns": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysqlColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config), + "collation": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config), + "primary_key": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["postgresql_schemas"] = + flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(original["postgresqlSchemas"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "schema": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(original["schema"], d, config), + "postgresql_tables": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(original["postgresqlTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config), + "postgresql_columns": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresqlColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config), + "precision": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config), + "scale": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config), + "primary_key": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["oracle_schemas"] = + flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(original["oracleSchemas"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "schema": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(original["schema"], d, config), + "oracle_tables": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(original["oracleTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(original["table"], d, config), + "oracle_columns": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(original["oracleColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config), + "precision": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config), + "scale": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config), + "encoding": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config), + "primary_key": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["schemas"] = + flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemas(original["schemas"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "schema": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasSchema(original["schema"], d, config), + "tables": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTables(original["tables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesTable(original["table"], d, config), + "columns": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumns(original["columns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsLength(original["length"], d, config), + "precision": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsPrecision(original["precision"], d, config), + "scale": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsScale(original["scale"], d, config), + "primary_key": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillNone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDatastreamStreamCustomerManagedEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamStreamTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenDatastreamStreamEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDatastreamStreamDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSourceConnectionProfile, err := expandDatastreamStreamSourceConfigSourceConnectionProfile(original["source_connection_profile"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedSourceConnectionProfile); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceConnectionProfile"] = transformedSourceConnectionProfile } - transformedMysqlSourceConfig, err := expandDatastreamStreamSourceConfigMysqlSourceConfig(original["mysql_source_config"], d, config) - if err != nil { - return nil, err - } else { - transformed["mysqlSourceConfig"] = transformedMysqlSourceConfig + transformedMysqlSourceConfig, err := expandDatastreamStreamSourceConfigMysqlSourceConfig(original["mysql_source_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["mysqlSourceConfig"] = transformedMysqlSourceConfig + } + + transformedOracleSourceConfig, err := expandDatastreamStreamSourceConfigOracleSourceConfig(original["oracle_source_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["oracleSourceConfig"] = transformedOracleSourceConfig + } + + transformedPostgresqlSourceConfig, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfig(original["postgresql_source_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["postgresqlSourceConfig"] = transformedPostgresqlSourceConfig + } + + transformedSqlServerSourceConfig, err := expandDatastreamStreamSourceConfigSqlServerSourceConfig(original["sql_server_source_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["sqlServerSourceConfig"] = transformedSqlServerSourceConfig + } + + return transformed, nil +} + +func expandDatastreamStreamSourceConfigSourceConnectionProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIncludeObjects, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(original["include_objects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeObjects"] = transformedIncludeObjects + } + + transformedExcludeObjects, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(original["exclude_objects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["excludeObjects"] = transformedExcludeObjects + } + + transformedMaxConcurrentCdcTasks, err := expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(original["max_concurrent_cdc_tasks"], d, config) + if err != nil { + return nil, err + } else { + transformed["maxConcurrentCdcTasks"] = transformedMaxConcurrentCdcTasks + } + + transformedMaxConcurrentBackfillTasks, err := expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentBackfillTasks(original["max_concurrent_backfill_tasks"], d, config) + if err != nil { + return nil, err + } else { + transformed["maxConcurrentBackfillTasks"] = transformedMaxConcurrentBackfillTasks + } + + return transformed, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMysqlDatabases, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(original["mysql_databases"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mysqlDatabases"] = transformedMysqlDatabases + } + + return transformed, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatabase, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(original["database"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["database"] = transformedDatabase + } + + transformedMysqlTables, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mysqlTables"] = transformedMysqlTables + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["table"] = transformedTable + } + + transformedMysqlColumns, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mysqlColumns"] = transformedMysqlColumns + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedColumn, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["column"] = transformedColumn + } + + transformedDataType, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataType"] = transformedDataType + } + + transformedLength, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["length"] = transformedLength + } + + transformedCollation, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["collation"] = transformedCollation + } + + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["primaryKey"] = transformedPrimaryKey + } + + transformedNullable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nullable"] = transformedNullable + } + + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ordinalPosition"] = transformedOrdinalPosition + } + + req = append(req, transformed) } + return req, nil +} - transformedOracleSourceConfig, err := expandDatastreamStreamSourceConfigOracleSourceConfig(original["oracle_source_config"], d, config) - if err != nil { - return nil, err - } else { - transformed["oracleSourceConfig"] = transformedOracleSourceConfig +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - transformedPostgresqlSourceConfig, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfig(original["postgresql_source_config"], d, config) + transformedMysqlDatabases, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(original["mysql_databases"], d, config) if err != nil { return nil, err - } else { - transformed["postgresqlSourceConfig"] = transformedPostgresqlSourceConfig + } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mysqlDatabases"] = transformedMysqlDatabases } return transformed, nil } -func expandDatastreamStreamSourceConfigSourceConnectionProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatabase, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(original["database"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["database"] = transformedDatabase + } + + transformedMysqlTables, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mysqlTables"] = transformedMysqlTables + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["table"] = transformedTable + } + + transformedMysqlColumns, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mysqlColumns"] = transformedMysqlColumns + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedColumn, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["column"] = transformedColumn + } + + transformedDataType, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataType"] = transformedDataType + } + + transformedLength, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["length"] = transformedLength + } + + transformedCollation, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["collation"] = transformedCollation + } + + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["primaryKey"] = transformedPrimaryKey + } + + transformedNullable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nullable"] = transformedNullable + } + + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ordinalPosition"] = transformedOrdinalPosition + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -3725,38 +5082,52 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d tpgres original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedIncludeObjects, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(original["include_objects"], d, config) + transformedIncludeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(original["include_objects"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["includeObjects"] = transformedIncludeObjects } - transformedExcludeObjects, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(original["exclude_objects"], d, config) + transformedExcludeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(original["exclude_objects"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludeObjects"] = transformedExcludeObjects } - transformedMaxConcurrentCdcTasks, err := expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(original["max_concurrent_cdc_tasks"], d, config) + transformedMaxConcurrentCdcTasks, err := expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(original["max_concurrent_cdc_tasks"], d, config) if err != nil { return nil, err } else { transformed["maxConcurrentCdcTasks"] = transformedMaxConcurrentCdcTasks } - transformedMaxConcurrentBackfillTasks, err := expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentBackfillTasks(original["max_concurrent_backfill_tasks"], d, config) + transformedMaxConcurrentBackfillTasks, err := expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(original["max_concurrent_backfill_tasks"], d, config) if err != nil { return nil, err } else { transformed["maxConcurrentBackfillTasks"] = transformedMaxConcurrentBackfillTasks } + transformedDropLargeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(original["drop_large_objects"], d, config) + if err != nil { + return nil, err + } else { + transformed["dropLargeObjects"] = transformedDropLargeObjects + } + + transformedStreamLargeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(original["stream_large_objects"], d, config) + if err != nil { + return nil, err + } else { + transformed["streamLargeObjects"] = transformedStreamLargeObjects + } + return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3765,17 +5136,17 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interfa original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedMysqlDatabases, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(original["mysql_databases"], d, config) + transformedOracleSchemas, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(original["oracle_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["mysqlDatabases"] = transformedMysqlDatabases + } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oracleSchemas"] = transformedOracleSchemas } return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3785,18 +5156,18 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedDatabase, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(original["database"], d, config) + transformedSchema, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(original["schema"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["database"] = transformedDatabase + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schema"] = transformedSchema } - transformedMysqlTables, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) + transformedOracleTables, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(original["oracle_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["mysqlTables"] = transformedMysqlTables + } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oracleTables"] = transformedOracleTables } req = append(req, transformed) @@ -3804,11 +5175,11 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3818,18 +5189,18 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedTable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) + transformedTable, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(original["table"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } - transformedMysqlColumns, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) + transformedOracleColumns, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(original["oracle_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["mysqlColumns"] = transformedMysqlColumns + } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oracleColumns"] = transformedOracleColumns } req = append(req, transformed) @@ -3837,11 +5208,11 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3851,49 +5222,63 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedColumn, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) + transformedColumn, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } - transformedDataType, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) + transformedDataType, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } - transformedLength, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) + transformedLength, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } - transformedCollation, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) + transformedPrecision, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["collation"] = transformedCollation + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["precision"] = transformedPrecision } - transformedPrimaryKey, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) + transformedScale, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scale"] = transformedScale + } + + transformedEncoding, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encoding"] = transformedEncoding + } + + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } - transformedNullable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) + transformedNullable, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } - transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { @@ -3905,35 +5290,43 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3942,17 +5335,17 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interfa original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedMysqlDatabases, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(original["mysql_databases"], d, config) + transformedOracleSchemas, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(original["oracle_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["mysqlDatabases"] = transformedMysqlDatabases + } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oracleSchemas"] = transformedOracleSchemas } return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3962,18 +5355,18 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedDatabase, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(original["database"], d, config) + transformedSchema, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(original["schema"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["database"] = transformedDatabase + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schema"] = transformedSchema } - transformedMysqlTables, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) + transformedOracleTables, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(original["oracle_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["mysqlTables"] = transformedMysqlTables + } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oracleTables"] = transformedOracleTables } req = append(req, transformed) @@ -3981,11 +5374,11 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3995,18 +5388,18 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedTable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) + transformedTable, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(original["table"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } - transformedMysqlColumns, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) + transformedOracleColumns, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(original["oracle_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["mysqlColumns"] = transformedMysqlColumns + } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oracleColumns"] = transformedOracleColumns } req = append(req, transformed) @@ -4014,11 +5407,11 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4028,49 +5421,63 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedColumn, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) + transformedColumn, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } - transformedDataType, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) + transformedDataType, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } - transformedLength, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) + transformedLength, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } - transformedCollation, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) + transformedPrecision, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["collation"] = transformedCollation + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["precision"] = transformedPrecision } - transformedPrimaryKey, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) + transformedScale, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scale"] = transformedScale + } + + transformedEncoding, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encoding"] = transformedEncoding + } + + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } - transformedNullable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) + transformedNullable, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } - transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { @@ -4082,43 +5489,81 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -4132,52 +5577,45 @@ func expandDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d tpgre original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedIncludeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(original["include_objects"], d, config) + transformedIncludeObjects, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(original["include_objects"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["includeObjects"] = transformedIncludeObjects } - transformedExcludeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(original["exclude_objects"], d, config) + transformedExcludeObjects, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(original["exclude_objects"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludeObjects"] = transformedExcludeObjects } - transformedMaxConcurrentCdcTasks, err := expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(original["max_concurrent_cdc_tasks"], d, config) - if err != nil { - return nil, err - } else { - transformed["maxConcurrentCdcTasks"] = transformedMaxConcurrentCdcTasks - } - - transformedMaxConcurrentBackfillTasks, err := expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(original["max_concurrent_backfill_tasks"], d, config) + transformedReplicationSlot, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(original["replication_slot"], d, config) if err != nil { - return nil, err - } else { - transformed["maxConcurrentBackfillTasks"] = transformedMaxConcurrentBackfillTasks + return nil, err + } else if val := reflect.ValueOf(transformedReplicationSlot); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replicationSlot"] = transformedReplicationSlot } - transformedDropLargeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(original["drop_large_objects"], d, config) + transformedPublication, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(original["publication"], d, config) if err != nil { return nil, err - } else { - transformed["dropLargeObjects"] = transformedDropLargeObjects + } else if val := reflect.ValueOf(transformedPublication); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publication"] = transformedPublication } - transformedStreamLargeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(original["stream_large_objects"], d, config) + transformedMaxConcurrentBackfillTasks, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(original["max_concurrent_backfill_tasks"], d, config) if err != nil { return nil, err } else { - transformed["streamLargeObjects"] = transformedStreamLargeObjects + transformed["maxConcurrentBackfillTasks"] = transformedMaxConcurrentBackfillTasks } return transformed, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4186,17 +5624,17 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(v interf original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedOracleSchemas, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(original["oracle_schemas"], d, config) + transformedPostgresqlSchemas, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(original["postgresql_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["oracleSchemas"] = transformedOracleSchemas + } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["postgresqlSchemas"] = transformedPostgresqlSchemas } return transformed, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4206,18 +5644,18 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedSchema, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(original["schema"], d, config) + transformedSchema, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(original["schema"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schema"] = transformedSchema } - transformedOracleTables, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(original["oracle_tables"], d, config) + transformedPostgresqlTables, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(original["postgresql_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["oracleTables"] = transformedOracleTables + } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["postgresqlTables"] = transformedPostgresqlTables } req = append(req, transformed) @@ -4225,11 +5663,11 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4239,18 +5677,18 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedTable, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(original["table"], d, config) + transformedTable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } - transformedOracleColumns, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(original["oracle_columns"], d, config) + transformedPostgresqlColumns, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresql_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["oracleColumns"] = transformedOracleColumns + } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["postgresqlColumns"] = transformedPostgresqlColumns } req = append(req, transformed) @@ -4258,11 +5696,11 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4272,63 +5710,56 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedColumn, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config) + transformedColumn, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } - transformedDataType, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["data_type"], d, config) + transformedDataType, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } - transformedLength, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config) + transformedLength, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } - transformedPrecision, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config) + transformedPrecision, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["precision"] = transformedPrecision } - transformedScale, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config) + transformedScale, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scale"] = transformedScale } - transformedEncoding, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["encoding"] = transformedEncoding - } - - transformedPrimaryKey, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primary_key"], d, config) + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } - transformedNullable, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config) + transformedNullable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } - transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinal_position"], d, config) + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { @@ -4340,43 +5771,39 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4385,17 +5812,17 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(v interf original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedOracleSchemas, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(original["oracle_schemas"], d, config) + transformedPostgresqlSchemas, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(original["postgresql_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["oracleSchemas"] = transformedOracleSchemas + } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["postgresqlSchemas"] = transformedPostgresqlSchemas } return transformed, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4405,18 +5832,18 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedSchema, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(original["schema"], d, config) + transformedSchema, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(original["schema"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schema"] = transformedSchema } - transformedOracleTables, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(original["oracle_tables"], d, config) + transformedPostgresqlTables, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(original["postgresql_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["oracleTables"] = transformedOracleTables + } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["postgresqlTables"] = transformedPostgresqlTables } req = append(req, transformed) @@ -4424,11 +5851,11 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4438,18 +5865,18 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedTable, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(original["table"], d, config) + transformedTable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } - transformedOracleColumns, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(original["oracle_columns"], d, config) + transformedPostgresqlColumns, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresql_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["oracleColumns"] = transformedOracleColumns + } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["postgresqlColumns"] = transformedPostgresqlColumns } req = append(req, transformed) @@ -4457,11 +5884,11 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4471,63 +5898,56 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedColumn, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config) + transformedColumn, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } - transformedDataType, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["data_type"], d, config) + transformedDataType, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } - transformedLength, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config) + transformedLength, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } - transformedPrecision, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config) + transformedPrecision, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["precision"] = transformedPrecision } - transformedScale, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config) + transformedScale, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scale"] = transformedScale } - transformedEncoding, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["encoding"] = transformedEncoding - } - - transformedPrimaryKey, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primary_key"], d, config) + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } - transformedNullable, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config) + transformedNullable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } - transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinal_position"], d, config) + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { @@ -4539,81 +5959,51 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - transformed := make(map[string]interface{}) - - return transformed, nil -} - -func expandDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - transformed := make(map[string]interface{}) - - return transformed, nil -} - -func expandDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -4627,45 +6017,52 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d t original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedIncludeObjects, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(original["include_objects"], d, config) + transformedIncludeObjects, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjects(original["include_objects"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["includeObjects"] = transformedIncludeObjects } - transformedExcludeObjects, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(original["exclude_objects"], d, config) + transformedExcludeObjects, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjects(original["exclude_objects"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludeObjects"] = transformedExcludeObjects } - transformedReplicationSlot, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(original["replication_slot"], d, config) + transformedMaxConcurrentCdcTasks, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigMaxConcurrentCdcTasks(original["max_concurrent_cdc_tasks"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplicationSlot); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["replicationSlot"] = transformedReplicationSlot + } else { + transformed["maxConcurrentCdcTasks"] = transformedMaxConcurrentCdcTasks } - transformedPublication, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(original["publication"], d, config) + transformedMaxConcurrentBackfillTasks, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigMaxConcurrentBackfillTasks(original["max_concurrent_backfill_tasks"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPublication); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["publication"] = transformedPublication + } else { + transformed["maxConcurrentBackfillTasks"] = transformedMaxConcurrentBackfillTasks } - transformedMaxConcurrentBackfillTasks, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(original["max_concurrent_backfill_tasks"], d, config) + transformedTransactionLogs, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigTransactionLogs(original["transaction_logs"], d, config) if err != nil { return nil, err } else { - transformed["maxConcurrentBackfillTasks"] = transformedMaxConcurrentBackfillTasks + transformed["transactionLogs"] = transformedTransactionLogs + } + + transformedChangeTables, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigChangeTables(original["change_tables"], d, config) + if err != nil { + return nil, err + } else { + transformed["changeTables"] = transformedChangeTables } return transformed, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4674,17 +6071,17 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(v in original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedPostgresqlSchemas, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(original["postgresql_schemas"], d, config) + transformedSchemas, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemas(original["schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["postgresqlSchemas"] = transformedPostgresqlSchemas + } else if val := reflect.ValueOf(transformedSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schemas"] = transformedSchemas } return transformed, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4694,18 +6091,18 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedSchema, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(original["schema"], d, config) + transformedSchema, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasSchema(original["schema"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schema"] = transformedSchema } - transformedPostgresqlTables, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(original["postgresql_tables"], d, config) + transformedTables, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTables(original["tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["postgresqlTables"] = transformedPostgresqlTables + } else if val := reflect.ValueOf(transformedTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tables"] = transformedTables } req = append(req, transformed) @@ -4713,11 +6110,11 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4727,18 +6124,18 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedTable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config) + transformedTable, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesTable(original["table"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } - transformedPostgresqlColumns, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresql_columns"], d, config) + transformedColumns, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumns(original["columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["postgresqlColumns"] = transformedPostgresqlColumns + } else if val := reflect.ValueOf(transformedColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["columns"] = transformedColumns } req = append(req, transformed) @@ -4746,11 +6143,11 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4760,56 +6157,56 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedColumn, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config) + transformedColumn, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsColumn(original["column"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } - transformedDataType, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["data_type"], d, config) + transformedDataType, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } - transformedLength, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config) + transformedLength, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsLength(original["length"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } - transformedPrecision, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config) + transformedPrecision, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["precision"] = transformedPrecision } - transformedScale, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config) + transformedScale, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsScale(original["scale"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scale"] = transformedScale } - transformedPrimaryKey, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primary_key"], d, config) + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } - transformedNullable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config) + transformedNullable, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } - transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { @@ -4821,39 +6218,39 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemasTablesColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4862,17 +6259,17 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(v in original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedPostgresqlSchemas, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(original["postgresql_schemas"], d, config) + transformedSchemas, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemas(original["schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["postgresqlSchemas"] = transformedPostgresqlSchemas + } else if val := reflect.ValueOf(transformedSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schemas"] = transformedSchemas } return transformed, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4882,18 +6279,18 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedSchema, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(original["schema"], d, config) + transformedSchema, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasSchema(original["schema"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schema"] = transformedSchema } - transformedPostgresqlTables, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(original["postgresql_tables"], d, config) + transformedTables, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTables(original["tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["postgresqlTables"] = transformedPostgresqlTables + } else if val := reflect.ValueOf(transformedTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tables"] = transformedTables } req = append(req, transformed) @@ -4901,11 +6298,11 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4915,18 +6312,18 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedTable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config) + transformedTable, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesTable(original["table"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } - transformedPostgresqlColumns, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresql_columns"], d, config) + transformedColumns, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumns(original["columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["postgresqlColumns"] = transformedPostgresqlColumns + } else if val := reflect.ValueOf(transformedColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["columns"] = transformedColumns } req = append(req, transformed) @@ -4934,11 +6331,11 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4948,56 +6345,56 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedColumn, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config) + transformedColumn, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsColumn(original["column"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } - transformedDataType, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["data_type"], d, config) + transformedDataType, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } - transformedLength, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config) + transformedLength, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsLength(original["length"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } - transformedPrecision, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config) + transformedPrecision, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["precision"] = transformedPrecision } - transformedScale, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config) + transformedScale, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsScale(original["scale"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scale"] = transformedScale } - transformedPrimaryKey, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primary_key"], d, config) + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } - transformedNullable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config) + transformedNullable, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } - transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { @@ -5009,48 +6406,74 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemasTablesColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigMaxConcurrentCdcTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSqlServerSourceConfigMaxConcurrentBackfillTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil +func expandDatastreamStreamSourceConfigSqlServerSourceConfigTransactionLogs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDatastreamStreamSourceConfigSqlServerSourceConfigChangeTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil } func expandDatastreamStreamDestinationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { @@ -5228,6 +6651,20 @@ func expandDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interfac transformed["sourceHierarchyDatasets"] = transformedSourceHierarchyDatasets } + transformedMerge, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigMerge(original["merge"], d, config) + if err != nil { + return nil, err + } else { + transformed["merge"] = transformedMerge + } + + transformedAppendOnly, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigAppendOnly(original["append_only"], d, config) + if err != nil { + return nil, err + } else { + transformed["appendOnly"] = transformedAppendOnly + } + return transformed, nil } @@ -5331,6 +6768,36 @@ func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHiera return v, nil } +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigMerge(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigAppendOnly(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + func expandDatastreamStreamBackfillAll(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { @@ -5366,6 +6833,13 @@ func expandDatastreamStreamBackfillAll(v interface{}, d tpgresource.TerraformRes transformed["oracleExcludedObjects"] = transformedOracleExcludedObjects } + transformedSqlServerExcludedObjects, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjects(original["sql_server_excluded_objects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSqlServerExcludedObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sqlServerExcludedObjects"] = transformedSqlServerExcludedObjects + } + return transformed, nil } @@ -5933,6 +7407,194 @@ func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTa return v, nil } +func expandDatastreamStreamBackfillAllSqlServerExcludedObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSchemas, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemas(original["schemas"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schemas"] = transformedSchemas + } + + return transformed, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSchema, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasSchema(original["schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schema"] = transformedSchema + } + + transformedTables, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTables(original["tables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tables"] = transformedTables + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["table"] = transformedTable + } + + transformedColumns, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumns(original["columns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["columns"] = transformedColumns + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedColumn, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsColumn(original["column"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["column"] = transformedColumn + } + + transformedDataType, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsDataType(original["data_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataType"] = transformedDataType + } + + transformedLength, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsLength(original["length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["length"] = transformedLength + } + + transformedPrecision, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsPrecision(original["precision"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["precision"] = transformedPrecision + } + + transformedScale, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsScale(original["scale"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scale"] = transformedScale + } + + transformedPrimaryKey, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsPrimaryKey(original["primary_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["primaryKey"] = transformedPrimaryKey + } + + transformedNullable, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsNullable(original["nullable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nullable"] = transformedNullable + } + + transformedOrdinalPosition, err := expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsOrdinalPosition(original["ordinal_position"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ordinalPosition"] = transformedOrdinalPosition + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllSqlServerExcludedObjectsSchemasTablesColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandDatastreamStreamBackfillNone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_agent.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_agent.go index 586e76a17b6..ba7c4ae4205 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_agent.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_agent.go @@ -141,6 +141,69 @@ Format: gs://bucket/object-name-or-prefix`, }, }, }, + "logging_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: +* Agent level`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_consent_based_redaction": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables consent-based end-user input redaction, if true, a pre-defined session parameter **$session.params.conversation-redaction** will be used to determine if the utterance should be redacted.`, + }, + "enable_interaction_logging": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables DF Interaction logging.`, + }, + "enable_stackdriver_logging": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables Google Cloud Logging.`, + }, + }, + }, + }, + "speech_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings for speech to text detection. Exposed at the following levels: +* Agent level +* Flow level +* Page level +* Parameter level`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpointer_sensitivity": { + Type: schema.TypeInt, + Optional: true, + Description: `Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100.`, + }, + "models": { + Type: schema.TypeMap, + Optional: true, + Description: `Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). +An object containing a list of **"key": value** pairs. Example: **{ "name": "wrench", "mass": "1.3kg", "count": "3" }**.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "no_speech_timeout": { + Type: schema.TypeString, + Optional: true, + Description: `Timeout before detecting no speech. +A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`, + }, + "use_timeout_based_endpointing": { + Type: schema.TypeBool, + Optional: true, + Description: `Use timeout based endpointing, interpreting endpointer sensitivy as seconds of timeout value.`, + }, + }, + }, + }, }, }, }, @@ -163,6 +226,7 @@ Format: gs://bucket/object-name-or-prefix`, "enable_stackdriver_logging": { Type: schema.TypeBool, Optional: true, + Deprecated: "`enable_stackdriver_logging` is deprecated and will be removed in a future major release. Please use `advanced_settings.logging_settings.enable_stackdriver_logging`instead.", Description: `Determines whether this agent should log conversation queries.`, }, "git_integration_settings": { @@ -491,9 +555,6 @@ func resourceDialogflowCXAgentRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("security_settings", flattenDialogflowCXAgentSecuritySettings(res["securitySettings"], d, config)); err != nil { return fmt.Errorf("Error reading Agent: %s", err) } - if err := d.Set("enable_stackdriver_logging", flattenDialogflowCXAgentEnableStackdriverLogging(res["enableStackdriverLogging"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } if err := d.Set("enable_spell_correction", flattenDialogflowCXAgentEnableSpellCorrection(res["enableSpellCorrection"], d, config)); err != nil { return fmt.Errorf("Error reading Agent: %s", err) } @@ -815,10 +876,6 @@ func flattenDialogflowCXAgentSecuritySettings(v interface{}, d *schema.ResourceD return v } -func flattenDialogflowCXAgentEnableStackdriverLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - func flattenDialogflowCXAgentEnableSpellCorrection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -834,8 +891,12 @@ func flattenDialogflowCXAgentAdvancedSettings(v interface{}, d *schema.ResourceD transformed := make(map[string]interface{}) transformed["audio_export_gcs_destination"] = flattenDialogflowCXAgentAdvancedSettingsAudioExportGcsDestination(original["audioExportGcsDestination"], d, config) + transformed["speech_settings"] = + flattenDialogflowCXAgentAdvancedSettingsSpeechSettings(original["speechSettings"], d, config) transformed["dtmf_settings"] = flattenDialogflowCXAgentAdvancedSettingsDtmfSettings(original["dtmfSettings"], d, config) + transformed["logging_settings"] = + flattenDialogflowCXAgentAdvancedSettingsLoggingSettings(original["loggingSettings"], d, config) return []interface{}{transformed} } func flattenDialogflowCXAgentAdvancedSettingsAudioExportGcsDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -855,6 +916,54 @@ func flattenDialogflowCXAgentAdvancedSettingsAudioExportGcsDestinationUri(v inte return v } +func flattenDialogflowCXAgentAdvancedSettingsSpeechSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["endpointer_sensitivity"] = + flattenDialogflowCXAgentAdvancedSettingsSpeechSettingsEndpointerSensitivity(original["endpointerSensitivity"], d, config) + transformed["no_speech_timeout"] = + flattenDialogflowCXAgentAdvancedSettingsSpeechSettingsNoSpeechTimeout(original["noSpeechTimeout"], d, config) + transformed["use_timeout_based_endpointing"] = + flattenDialogflowCXAgentAdvancedSettingsSpeechSettingsUseTimeoutBasedEndpointing(original["useTimeoutBasedEndpointing"], d, config) + transformed["models"] = + flattenDialogflowCXAgentAdvancedSettingsSpeechSettingsModels(original["models"], d, config) + return []interface{}{transformed} +} +func flattenDialogflowCXAgentAdvancedSettingsSpeechSettingsEndpointerSensitivity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDialogflowCXAgentAdvancedSettingsSpeechSettingsNoSpeechTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentAdvancedSettingsSpeechSettingsUseTimeoutBasedEndpointing(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentAdvancedSettingsSpeechSettingsModels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenDialogflowCXAgentAdvancedSettingsDtmfSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -897,6 +1006,21 @@ func flattenDialogflowCXAgentAdvancedSettingsDtmfSettingsFinishDigit(v interface return v } +func flattenDialogflowCXAgentAdvancedSettingsLoggingSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("advanced_settings.0.logging_settings") +} +func flattenDialogflowCXAgentAdvancedSettingsLoggingSettingsEnableStackdriverLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentAdvancedSettingsLoggingSettingsEnableInteractionLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentAdvancedSettingsLoggingSettingsEnableConsentBasedRedaction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenDialogflowCXAgentGitIntegrationSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -1023,6 +1147,13 @@ func expandDialogflowCXAgentAdvancedSettings(v interface{}, d tpgresource.Terraf transformed["audioExportGcsDestination"] = transformedAudioExportGcsDestination } + transformedSpeechSettings, err := expandDialogflowCXAgentAdvancedSettingsSpeechSettings(original["speech_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSpeechSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["speechSettings"] = transformedSpeechSettings + } + transformedDtmfSettings, err := expandDialogflowCXAgentAdvancedSettingsDtmfSettings(original["dtmf_settings"], d, config) if err != nil { return nil, err @@ -1030,6 +1161,13 @@ func expandDialogflowCXAgentAdvancedSettings(v interface{}, d tpgresource.Terraf transformed["dtmfSettings"] = transformedDtmfSettings } + transformedLoggingSettings, err := expandDialogflowCXAgentAdvancedSettingsLoggingSettings(original["logging_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLoggingSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["loggingSettings"] = transformedLoggingSettings + } + return transformed, nil } @@ -1056,6 +1194,69 @@ func expandDialogflowCXAgentAdvancedSettingsAudioExportGcsDestinationUri(v inter return v, nil } +func expandDialogflowCXAgentAdvancedSettingsSpeechSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEndpointerSensitivity, err := expandDialogflowCXAgentAdvancedSettingsSpeechSettingsEndpointerSensitivity(original["endpointer_sensitivity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndpointerSensitivity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["endpointerSensitivity"] = transformedEndpointerSensitivity + } + + transformedNoSpeechTimeout, err := expandDialogflowCXAgentAdvancedSettingsSpeechSettingsNoSpeechTimeout(original["no_speech_timeout"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNoSpeechTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["noSpeechTimeout"] = transformedNoSpeechTimeout + } + + transformedUseTimeoutBasedEndpointing, err := expandDialogflowCXAgentAdvancedSettingsSpeechSettingsUseTimeoutBasedEndpointing(original["use_timeout_based_endpointing"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUseTimeoutBasedEndpointing); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["useTimeoutBasedEndpointing"] = transformedUseTimeoutBasedEndpointing + } + + transformedModels, err := expandDialogflowCXAgentAdvancedSettingsSpeechSettingsModels(original["models"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedModels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["models"] = transformedModels + } + + return transformed, nil +} + +func expandDialogflowCXAgentAdvancedSettingsSpeechSettingsEndpointerSensitivity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentAdvancedSettingsSpeechSettingsNoSpeechTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentAdvancedSettingsSpeechSettingsUseTimeoutBasedEndpointing(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentAdvancedSettingsSpeechSettingsModels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + func expandDialogflowCXAgentAdvancedSettingsDtmfSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -1101,6 +1302,51 @@ func expandDialogflowCXAgentAdvancedSettingsDtmfSettingsFinishDigit(v interface{ return v, nil } +func expandDialogflowCXAgentAdvancedSettingsLoggingSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnableStackdriverLogging, err := expandDialogflowCXAgentAdvancedSettingsLoggingSettingsEnableStackdriverLogging(original["enable_stackdriver_logging"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableStackdriverLogging); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableStackdriverLogging"] = transformedEnableStackdriverLogging + } + + transformedEnableInteractionLogging, err := expandDialogflowCXAgentAdvancedSettingsLoggingSettingsEnableInteractionLogging(original["enable_interaction_logging"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableInteractionLogging); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableInteractionLogging"] = transformedEnableInteractionLogging + } + + transformedEnableConsentBasedRedaction, err := expandDialogflowCXAgentAdvancedSettingsLoggingSettingsEnableConsentBasedRedaction(original["enable_consent_based_redaction"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableConsentBasedRedaction); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableConsentBasedRedaction"] = transformedEnableConsentBasedRedaction + } + + return transformed, nil +} + +func expandDialogflowCXAgentAdvancedSettingsLoggingSettingsEnableStackdriverLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentAdvancedSettingsLoggingSettingsEnableInteractionLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentAdvancedSettingsLoggingSettingsEnableConsentBasedRedaction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandDialogflowCXAgentGitIntegrationSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_entity_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_entity_type.go index 023c6da949e..7e355cff751 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_entity_type.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_entity_type.go @@ -218,7 +218,6 @@ func resourceDialogflowCXEntityTypeCreate(d *schema.ResourceData, meta interface } headers := make(http.Header) - // extract location from the parent location := "" @@ -282,7 +281,6 @@ func resourceDialogflowCXEntityTypeRead(d *schema.ResourceData, meta interface{} } headers := make(http.Header) - // extract location from the parent location := "" @@ -432,7 +430,6 @@ func resourceDialogflowCXEntityTypeUpdate(d *schema.ResourceData, meta interface if err != nil { return err } - // extract location from the parent location := "" @@ -499,7 +496,6 @@ func resourceDialogflowCXEntityTypeDelete(d *schema.ResourceData, meta interface } headers := make(http.Header) - // extract location from the parent location := "" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_environment.go index e44d16a51cd..0cba5a92f29 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_environment.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_environment.go @@ -145,7 +145,6 @@ func resourceDialogflowCXEnvironmentCreate(d *schema.ResourceData, meta interfac } headers := make(http.Header) - // extract location from the parent location := "" @@ -230,7 +229,6 @@ func resourceDialogflowCXEnvironmentRead(d *schema.ResourceData, meta interface{ } headers := make(http.Header) - // extract location from the parent location := "" @@ -331,7 +329,6 @@ func resourceDialogflowCXEnvironmentUpdate(d *schema.ResourceData, meta interfac if err != nil { return err } - // extract location from the parent location := "" @@ -405,7 +402,6 @@ func resourceDialogflowCXEnvironmentDelete(d *schema.ResourceData, meta interfac } headers := make(http.Header) - // extract location from the parent location := "" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_flow.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_flow.go index 7446da3d287..9e223fe04fc 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_flow.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_flow.go @@ -114,6 +114,69 @@ Format: gs://bucket/object-name-or-prefix`, }, }, }, + "logging_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: +* Agent level`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_consent_based_redaction": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables consent-based end-user input redaction, if true, a pre-defined session parameter **$session.params.conversation-redaction** will be used to determine if the utterance should be redacted.`, + }, + "enable_interaction_logging": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables DF Interaction logging.`, + }, + "enable_stackdriver_logging": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables Google Cloud Logging.`, + }, + }, + }, + }, + "speech_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings for speech to text detection. Exposed at the following levels: +* Agent level +* Flow level +* Page level +* Parameter level`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpointer_sensitivity": { + Type: schema.TypeInt, + Optional: true, + Description: `Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100.`, + }, + "models": { + Type: schema.TypeMap, + Optional: true, + Description: `Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). +An object containing a list of **"key": value** pairs. Example: **{ "name": "wrench", "mass": "1.3kg", "count": "3" }**.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "no_speech_timeout": { + Type: schema.TypeString, + Optional: true, + Description: `Timeout before detecting no speech. +A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`, + }, + "use_timeout_based_endpointing": { + Type: schema.TypeBool, + Optional: true, + Description: `Use timeout based endpointing, interpreting endpointer sensitivy as seconds of timeout value.`, + }, + }, + }, + }, }, }, }, @@ -773,7 +836,6 @@ func resourceDialogflowCXFlowCreate(d *schema.ResourceData, meta interface{}) er } headers := make(http.Header) - // extract location from the parent location := "" @@ -865,7 +927,6 @@ func resourceDialogflowCXFlowRead(d *schema.ResourceData, meta interface{}) erro } headers := make(http.Header) - // extract location from the parent location := "" @@ -1020,7 +1081,6 @@ func resourceDialogflowCXFlowUpdate(d *schema.ResourceData, meta interface{}) er if err != nil { return err } - // extract location from the parent location := "" @@ -1087,7 +1147,6 @@ func resourceDialogflowCXFlowDelete(d *schema.ResourceData, meta interface{}) er } headers := make(http.Header) - // extract location from the parent location := "" @@ -1865,8 +1924,12 @@ func flattenDialogflowCXFlowAdvancedSettings(v interface{}, d *schema.ResourceDa transformed := make(map[string]interface{}) transformed["audio_export_gcs_destination"] = flattenDialogflowCXFlowAdvancedSettingsAudioExportGcsDestination(original["audioExportGcsDestination"], d, config) + transformed["speech_settings"] = + flattenDialogflowCXFlowAdvancedSettingsSpeechSettings(original["speechSettings"], d, config) transformed["dtmf_settings"] = flattenDialogflowCXFlowAdvancedSettingsDtmfSettings(original["dtmfSettings"], d, config) + transformed["logging_settings"] = + flattenDialogflowCXFlowAdvancedSettingsLoggingSettings(original["loggingSettings"], d, config) return []interface{}{transformed} } func flattenDialogflowCXFlowAdvancedSettingsAudioExportGcsDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -1886,6 +1949,54 @@ func flattenDialogflowCXFlowAdvancedSettingsAudioExportGcsDestinationUri(v inter return v } +func flattenDialogflowCXFlowAdvancedSettingsSpeechSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["endpointer_sensitivity"] = + flattenDialogflowCXFlowAdvancedSettingsSpeechSettingsEndpointerSensitivity(original["endpointerSensitivity"], d, config) + transformed["no_speech_timeout"] = + flattenDialogflowCXFlowAdvancedSettingsSpeechSettingsNoSpeechTimeout(original["noSpeechTimeout"], d, config) + transformed["use_timeout_based_endpointing"] = + flattenDialogflowCXFlowAdvancedSettingsSpeechSettingsUseTimeoutBasedEndpointing(original["useTimeoutBasedEndpointing"], d, config) + transformed["models"] = + flattenDialogflowCXFlowAdvancedSettingsSpeechSettingsModels(original["models"], d, config) + return []interface{}{transformed} +} +func flattenDialogflowCXFlowAdvancedSettingsSpeechSettingsEndpointerSensitivity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDialogflowCXFlowAdvancedSettingsSpeechSettingsNoSpeechTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXFlowAdvancedSettingsSpeechSettingsUseTimeoutBasedEndpointing(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXFlowAdvancedSettingsSpeechSettingsModels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenDialogflowCXFlowAdvancedSettingsDtmfSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -1928,6 +2039,21 @@ func flattenDialogflowCXFlowAdvancedSettingsDtmfSettingsFinishDigit(v interface{ return v } +func flattenDialogflowCXFlowAdvancedSettingsLoggingSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("advanced_settings.0.logging_settings") +} +func flattenDialogflowCXFlowAdvancedSettingsLoggingSettingsEnableStackdriverLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXFlowAdvancedSettingsLoggingSettingsEnableInteractionLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXFlowAdvancedSettingsLoggingSettingsEnableConsentBasedRedaction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenDialogflowCXFlowLanguageCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -3008,6 +3134,13 @@ func expandDialogflowCXFlowAdvancedSettings(v interface{}, d tpgresource.Terrafo transformed["audioExportGcsDestination"] = transformedAudioExportGcsDestination } + transformedSpeechSettings, err := expandDialogflowCXFlowAdvancedSettingsSpeechSettings(original["speech_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSpeechSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["speechSettings"] = transformedSpeechSettings + } + transformedDtmfSettings, err := expandDialogflowCXFlowAdvancedSettingsDtmfSettings(original["dtmf_settings"], d, config) if err != nil { return nil, err @@ -3015,6 +3148,13 @@ func expandDialogflowCXFlowAdvancedSettings(v interface{}, d tpgresource.Terrafo transformed["dtmfSettings"] = transformedDtmfSettings } + transformedLoggingSettings, err := expandDialogflowCXFlowAdvancedSettingsLoggingSettings(original["logging_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLoggingSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["loggingSettings"] = transformedLoggingSettings + } + return transformed, nil } @@ -3041,6 +3181,69 @@ func expandDialogflowCXFlowAdvancedSettingsAudioExportGcsDestinationUri(v interf return v, nil } +func expandDialogflowCXFlowAdvancedSettingsSpeechSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEndpointerSensitivity, err := expandDialogflowCXFlowAdvancedSettingsSpeechSettingsEndpointerSensitivity(original["endpointer_sensitivity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndpointerSensitivity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["endpointerSensitivity"] = transformedEndpointerSensitivity + } + + transformedNoSpeechTimeout, err := expandDialogflowCXFlowAdvancedSettingsSpeechSettingsNoSpeechTimeout(original["no_speech_timeout"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNoSpeechTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["noSpeechTimeout"] = transformedNoSpeechTimeout + } + + transformedUseTimeoutBasedEndpointing, err := expandDialogflowCXFlowAdvancedSettingsSpeechSettingsUseTimeoutBasedEndpointing(original["use_timeout_based_endpointing"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUseTimeoutBasedEndpointing); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["useTimeoutBasedEndpointing"] = transformedUseTimeoutBasedEndpointing + } + + transformedModels, err := expandDialogflowCXFlowAdvancedSettingsSpeechSettingsModels(original["models"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedModels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["models"] = transformedModels + } + + return transformed, nil +} + +func expandDialogflowCXFlowAdvancedSettingsSpeechSettingsEndpointerSensitivity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXFlowAdvancedSettingsSpeechSettingsNoSpeechTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXFlowAdvancedSettingsSpeechSettingsUseTimeoutBasedEndpointing(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXFlowAdvancedSettingsSpeechSettingsModels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + func expandDialogflowCXFlowAdvancedSettingsDtmfSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -3086,6 +3289,51 @@ func expandDialogflowCXFlowAdvancedSettingsDtmfSettingsFinishDigit(v interface{} return v, nil } +func expandDialogflowCXFlowAdvancedSettingsLoggingSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnableStackdriverLogging, err := expandDialogflowCXFlowAdvancedSettingsLoggingSettingsEnableStackdriverLogging(original["enable_stackdriver_logging"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableStackdriverLogging); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableStackdriverLogging"] = transformedEnableStackdriverLogging + } + + transformedEnableInteractionLogging, err := expandDialogflowCXFlowAdvancedSettingsLoggingSettingsEnableInteractionLogging(original["enable_interaction_logging"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableInteractionLogging); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableInteractionLogging"] = transformedEnableInteractionLogging + } + + transformedEnableConsentBasedRedaction, err := expandDialogflowCXFlowAdvancedSettingsLoggingSettingsEnableConsentBasedRedaction(original["enable_consent_based_redaction"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableConsentBasedRedaction); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableConsentBasedRedaction"] = transformedEnableConsentBasedRedaction + } + + return transformed, nil +} + +func expandDialogflowCXFlowAdvancedSettingsLoggingSettingsEnableStackdriverLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXFlowAdvancedSettingsLoggingSettingsEnableInteractionLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXFlowAdvancedSettingsLoggingSettingsEnableConsentBasedRedaction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandDialogflowCXFlowLanguageCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_intent.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_intent.go index f893fddc0d6..4f283b25bff 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_intent.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_intent.go @@ -297,7 +297,6 @@ func resourceDialogflowCXIntentCreate(d *schema.ResourceData, meta interface{}) } headers := make(http.Header) - // extract location from the parent location := "" @@ -389,7 +388,6 @@ func resourceDialogflowCXIntentRead(d *schema.ResourceData, meta interface{}) er } headers := make(http.Header) - // extract location from the parent location := "" @@ -550,7 +548,6 @@ func resourceDialogflowCXIntentUpdate(d *schema.ResourceData, meta interface{}) if err != nil { return err } - // extract location from the parent location := "" @@ -617,7 +614,6 @@ func resourceDialogflowCXIntentDelete(d *schema.ResourceData, meta interface{}) } headers := make(http.Header) - // extract location from the parent location := "" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_page.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_page.go index 55dd1ae2301..4157b8eec60 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_page.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_page.go @@ -1483,7 +1483,6 @@ func resourceDialogflowCXPageCreate(d *schema.ResourceData, meta interface{}) er } headers := make(http.Header) - // extract location from the parent location := "" @@ -1547,7 +1546,6 @@ func resourceDialogflowCXPageRead(d *schema.ResourceData, meta interface{}) erro } headers := make(http.Header) - // extract location from the parent location := "" @@ -1700,7 +1698,6 @@ func resourceDialogflowCXPageUpdate(d *schema.ResourceData, meta interface{}) er if err != nil { return err } - // extract location from the parent location := "" @@ -1767,7 +1764,6 @@ func resourceDialogflowCXPageDelete(d *schema.ResourceData, meta interface{}) er } headers := make(http.Header) - // extract location from the parent location := "" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_security_settings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_security_settings.go index a409696e192..e8456aac827 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_security_settings.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_security_settings.go @@ -302,6 +302,11 @@ func resourceDialogflowCXSecuritySettingsCreate(d *schema.ResourceData, meta int } d.SetId(id) + // This is useful if the resource in question doesn't have a perfectly consistent API + // That is, the Operation for Create might return before the Get operation shows the + // completed state of the resource. + time.Sleep(5 * time.Second) + log.Printf("[DEBUG] Finished creating SecuritySettings %q: %#v", d.Id(), res) return resourceDialogflowCXSecuritySettingsRead(d, meta) @@ -544,6 +549,10 @@ func resourceDialogflowCXSecuritySettingsUpdate(d *schema.ResourceData, meta int } + // This is useful if the resource in question doesn't have a perfectly consistent API + // That is, the Operation for Create might return before the Get operation shows the + // completed state of the resource. + time.Sleep(5 * time.Second) return resourceDialogflowCXSecuritySettingsRead(d, meta) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_test_case.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_test_case.go index 65236586847..ed84f3ba2d3 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_test_case.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_test_case.go @@ -628,7 +628,6 @@ func resourceDialogflowCXTestCaseCreate(d *schema.ResourceData, meta interface{} } headers := make(http.Header) - // extract location from the parent location := "" @@ -692,7 +691,6 @@ func resourceDialogflowCXTestCaseRead(d *schema.ResourceData, meta interface{}) } headers := make(http.Header) - // extract location from the parent location := "" @@ -822,7 +820,6 @@ func resourceDialogflowCXTestCaseUpdate(d *schema.ResourceData, meta interface{} if err != nil { return err } - // extract location from the parent location := "" @@ -889,7 +886,6 @@ func resourceDialogflowCXTestCaseDelete(d *schema.ResourceData, meta interface{} } headers := make(http.Header) - // extract location from the parent location := "" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_version.go index f3092060a76..7d859ac37b5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_version.go @@ -165,7 +165,6 @@ func resourceDialogflowCXVersionCreate(d *schema.ResourceData, meta interface{}) } headers := make(http.Header) - // extract location from the parent location := "" @@ -250,7 +249,6 @@ func resourceDialogflowCXVersionRead(d *schema.ResourceData, meta interface{}) e } headers := make(http.Header) - // extract location from the parent location := "" @@ -344,7 +342,6 @@ func resourceDialogflowCXVersionUpdate(d *schema.ResourceData, meta interface{}) if err != nil { return err } - // extract location from the parent location := "" @@ -411,7 +408,6 @@ func resourceDialogflowCXVersionDelete(d *schema.ResourceData, meta interface{}) } headers := make(http.Header) - // extract location from the parent location := "" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_webhook.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_webhook.go index d10681fd8a2..90cfd666ff8 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_webhook.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_webhook.go @@ -249,7 +249,6 @@ func resourceDialogflowCXWebhookCreate(d *schema.ResourceData, meta interface{}) } headers := make(http.Header) - // extract location from the parent location := "" @@ -313,7 +312,6 @@ func resourceDialogflowCXWebhookRead(d *schema.ResourceData, meta interface{}) e } headers := make(http.Header) - // extract location from the parent location := "" @@ -479,7 +477,6 @@ func resourceDialogflowCXWebhookUpdate(d *schema.ResourceData, meta interface{}) if err != nil { return err } - // extract location from the parent location := "" @@ -546,7 +543,6 @@ func resourceDialogflowCXWebhookDelete(d *schema.ResourceData, meta interface{}) } headers := make(http.Header) - // extract location from the parent location := "" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_chat_engine.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_chat_engine.go index 279a214b811..7a2d34fade6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_chat_engine.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_chat_engine.go @@ -64,10 +64,12 @@ func ResourceDiscoveryEngineChatEngine() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "agent_creation_config": { - Type: schema.TypeList, - Required: true, - Description: `The configuration to generate the Dialogflow agent that is associated to this Engine.`, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The configuration to generate the Dialogflow agent that is associated to this Engine. +Exactly one of 'agent_creation_config' or 'dialogflow_agent_to_link' must be set.`, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "default_language_code": { @@ -92,6 +94,16 @@ func ResourceDiscoveryEngineChatEngine() *schema.Resource { }, }, }, + ExactlyOneOf: []string{"chat_engine_config.0.agent_creation_config", "chat_engine_config.0.dialogflow_agent_to_link"}, + }, + "dialogflow_agent_to_link": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^projects\/[a-zA-Z0-9-]+(?:\/locations\/[a-zA-Z0-9-]+)?\/agents\/[a-zA-Z0-9-]+$`), + Description: `The resource name of an existing Dialogflow agent to link to this Chat Engine. Format: 'projects//locations//agents/'. +Exactly one of 'agent_creation_config' or 'dialogflow_agent_to_link' must be set.`, + ExactlyOneOf: []string{"chat_engine_config.0.agent_creation_config", "chat_engine_config.0.dialogflow_agent_to_link"}, }, }, }, @@ -105,7 +117,6 @@ func ResourceDiscoveryEngineChatEngine() *schema.Resource { "data_store_ids": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: `The data stores associated with this engine. Multiple DataStores in the same Collection can be associated here. All listed DataStores must be 'SOLUTION_TYPE_CHAT'. Adding or removing data stores will force recreation.`, MinItems: 1, Elem: &schema.Schema{ @@ -403,6 +414,12 @@ func resourceDiscoveryEngineChatEngineUpdate(d *schema.ResourceData, meta interf } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } + dataStoreIdsProp, err := expandDiscoveryEngineChatEngineDataStoreIds(d.Get("data_store_ids"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_store_ids"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataStoreIdsProp)) { + obj["dataStoreIds"] = dataStoreIdsProp + } obj, err = resourceDiscoveryEngineChatEngineEncoder(d, meta, obj) if err != nil { @@ -421,6 +438,10 @@ func resourceDiscoveryEngineChatEngineUpdate(d *schema.ResourceData, meta interf if d.HasChange("display_name") { updateMask = append(updateMask, "displayName") } + + if d.HasChange("data_store_ids") { + updateMask = append(updateMask, "dataStoreIds") + } // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -619,6 +640,13 @@ func expandDiscoveryEngineChatEngineChatEngineConfig(v interface{}, d tpgresourc transformed["agentCreationConfig"] = transformedAgentCreationConfig } + transformedDialogflowAgentToLink, err := expandDiscoveryEngineChatEngineChatEngineConfigDialogflowAgentToLink(original["dialogflow_agent_to_link"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDialogflowAgentToLink); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dialogflowAgentToLink"] = transformedDialogflowAgentToLink + } + return transformed, nil } @@ -678,6 +706,10 @@ func expandDiscoveryEngineChatEngineChatEngineConfigAgentCreationConfigLocation( return v, nil } +func expandDiscoveryEngineChatEngineChatEngineConfigDialogflowAgentToLink(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandDiscoveryEngineChatEngineCommonConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -702,7 +734,7 @@ func expandDiscoveryEngineChatEngineCommonConfigCompanyName(v interface{}, d tpg } func resourceDiscoveryEngineChatEngineEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - //hard code solutionType to "SOLUTION_TYPE_CHAT" for chat engine resource + // hard code solutionType to "SOLUTION_TYPE_CHAT" for chat engine resource obj["solutionType"] = "SOLUTION_TYPE_CHAT" return obj, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_data_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_data_store.go index 3d8e9b145fd..b7a083ba859 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_data_store.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_data_store.go @@ -45,9 +45,9 @@ func ResourceDiscoveryEngineDataStore() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), }, CustomizeDiff: customdiff.All( @@ -78,8 +78,8 @@ string with a length limit of 128 characters.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: verify.ValidateEnum([]string{"GENERIC", "MEDIA"}), - Description: `The industry vertical that the data store registers. Possible values: ["GENERIC", "MEDIA"]`, + ValidateFunc: verify.ValidateEnum([]string{"GENERIC", "MEDIA", "HEALTHCARE_FHIR"}), + Description: `The industry vertical that the data store registers. Possible values: ["GENERIC", "MEDIA", "HEALTHCARE_FHIR"]`, }, "location": { Type: schema.TypeString, @@ -104,6 +104,38 @@ PUBLIC_WEBSITE contentConfig), this flag will be ignored.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "chunking_config": { + Type: schema.TypeList, + Optional: true, + Description: `Whether chunking mode is enabled.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "layout_based_chunking_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for the layout based chunking.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "chunk_size": { + Type: schema.TypeInt, + Optional: true, + Description: `The token size limit for each chunk. +Supported values: 100-500 (inclusive). Default value: 500.`, + }, + "include_ancestor_headings": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. +Default value: False.`, + }, + }, + }, + }, + }, + }, + }, "default_parsing_config": { Type: schema.TypeList, Optional: true, @@ -123,6 +155,16 @@ config will be applied to all file types for Document parsing.`, }, ExactlyOneOf: []string{}, }, + "layout_parsing_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configurations applied to layout parser.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{}, + }, "ocr_parsing_config": { Type: schema.TypeList, Optional: true, @@ -165,6 +207,16 @@ config will be applied to all file types for Document parsing.`, }, ExactlyOneOf: []string{}, }, + "layout_parsing_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configurations applied to layout parser.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{}, + }, "ocr_parsing_config": { Type: schema.TypeList, Optional: true, @@ -193,14 +245,26 @@ config will be applied to all file types for Document parsing.`, }, }, }, + "skip_default_schema_creation": { + Type: schema.TypeBool, + Optional: true, + Description: `A boolean flag indicating whether to skip the default schema creation for +the data store. Only enable this flag if you are certain that the default +schema is incompatible with your use case. +If set to true, you must manually create a schema for the data store +before any documents can be ingested. +This flag cannot be specified if 'data_store.starting_schema' is +specified.`, + Default: false, + }, "solution_types": { Type: schema.TypeList, Optional: true, ForceNew: true, - Description: `The solutions that the data store enrolls. Possible values: ["SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", "SOLUTION_TYPE_CHAT"]`, + Description: `The solutions that the data store enrolls. Possible values: ["SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", "SOLUTION_TYPE_CHAT", "SOLUTION_TYPE_GENERATIVE_CHAT"]`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: verify.ValidateEnum([]string{"SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", "SOLUTION_TYPE_CHAT"}), + ValidateFunc: verify.ValidateEnum([]string{"SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", "SOLUTION_TYPE_CHAT", "SOLUTION_TYPE_GENERATIVE_CHAT"}), }, }, "create_time": { @@ -271,7 +335,7 @@ func resourceDiscoveryEngineDataStoreCreate(d *schema.ResourceData, meta interfa obj["documentProcessingConfig"] = documentProcessingConfigProp } - url, err := tpgresource.ReplaceVars(d, config, "{{DiscoveryEngineBasePath}}projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores?dataStoreId={{data_store_id}}&createAdvancedSiteSearch={{create_advanced_site_search}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DiscoveryEngineBasePath}}projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores?dataStoreId={{data_store_id}}&createAdvancedSiteSearch={{create_advanced_site_search}}&skipDefaultSchemaCreation={{skip_default_schema_creation}}") if err != nil { return err } @@ -463,13 +527,6 @@ func resourceDiscoveryEngineDataStoreUpdate(d *schema.ResourceData, meta interfa log.Printf("[DEBUG] Finished updating DataStore %q: %#v", d.Id(), res) } - err = DiscoveryEngineOperationWaitTime( - config, res, project, "Updating DataStore", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } } return resourceDiscoveryEngineDataStoreRead(d, meta) @@ -586,6 +643,8 @@ func flattenDiscoveryEngineDataStoreDocumentProcessingConfig(v interface{}, d *s transformed := make(map[string]interface{}) transformed["name"] = flattenDiscoveryEngineDataStoreDocumentProcessingConfigName(original["name"], d, config) + transformed["chunking_config"] = + flattenDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfig(original["chunkingConfig"], d, config) transformed["default_parsing_config"] = flattenDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfig(original["defaultParsingConfig"], d, config) transformed["parsing_config_overrides"] = @@ -596,6 +655,52 @@ func flattenDiscoveryEngineDataStoreDocumentProcessingConfigName(v interface{}, return v } +func flattenDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["layout_based_chunking_config"] = + flattenDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig(original["layoutBasedChunkingConfig"], d, config) + return []interface{}{transformed} +} +func flattenDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["chunk_size"] = + flattenDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigChunkSize(original["chunkSize"], d, config) + transformed["include_ancestor_headings"] = + flattenDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigIncludeAncestorHeadings(original["includeAncestorHeadings"], d, config) + return []interface{}{transformed} +} +func flattenDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigChunkSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigIncludeAncestorHeadings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -609,6 +714,8 @@ func flattenDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfig flattenDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig(original["digitalParsingConfig"], d, config) transformed["ocr_parsing_config"] = flattenDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig(original["ocrParsingConfig"], d, config) + transformed["layout_parsing_config"] = + flattenDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig(original["layoutParsingConfig"], d, config) return []interface{}{transformed} } func flattenDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -636,6 +743,14 @@ func flattenDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfig return v } +func flattenDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + func flattenDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverrides(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -648,6 +763,7 @@ func flattenDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverrid "file_type": k, "digital_parsing_config": flattenDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverridesDigitalParsingConfig(original["digitalParsingConfig"], d, config), "ocr_parsing_config": flattenDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverridesOcrParsingConfig(original["ocrParsingConfig"], d, config), + "layout_parsing_config": flattenDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverridesLayoutParsingConfig(original["layoutParsingConfig"], d, config), }) } return transformed @@ -677,6 +793,14 @@ func flattenDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverrid return v } +func flattenDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverridesLayoutParsingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + func flattenDiscoveryEngineDataStoreCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -713,6 +837,13 @@ func expandDiscoveryEngineDataStoreDocumentProcessingConfig(v interface{}, d tpg transformed["name"] = transformedName } + transformedChunkingConfig, err := expandDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfig(original["chunking_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedChunkingConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["chunkingConfig"] = transformedChunkingConfig + } + transformedDefaultParsingConfig, err := expandDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfig(original["default_parsing_config"], d, config) if err != nil { return nil, err @@ -734,6 +865,64 @@ func expandDiscoveryEngineDataStoreDocumentProcessingConfigName(v interface{}, d return v, nil } +func expandDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLayoutBasedChunkingConfig, err := expandDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig(original["layout_based_chunking_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["layoutBasedChunkingConfig"] = transformedLayoutBasedChunkingConfig + } + + return transformed, nil +} + +func expandDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedChunkSize, err := expandDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigChunkSize(original["chunk_size"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedChunkSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["chunkSize"] = transformedChunkSize + } + + transformedIncludeAncestorHeadings, err := expandDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigIncludeAncestorHeadings(original["include_ancestor_headings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeAncestorHeadings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeAncestorHeadings"] = transformedIncludeAncestorHeadings + } + + return transformed, nil +} + +func expandDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigChunkSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDiscoveryEngineDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigIncludeAncestorHeadings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -757,6 +946,13 @@ func expandDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfig( transformed["ocrParsingConfig"] = transformedOcrParsingConfig } + transformedLayoutParsingConfig, err := expandDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig(original["layout_parsing_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["layoutParsingConfig"] = transformedLayoutParsingConfig + } + return transformed, nil } @@ -798,6 +994,21 @@ func expandDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfigO return v, nil } +func expandDiscoveryEngineDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + func expandDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverrides(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { if v == nil { return map[string]interface{}{}, nil @@ -821,6 +1032,13 @@ func expandDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverride transformed["ocrParsingConfig"] = transformedOcrParsingConfig } + transformedLayoutParsingConfig, err := expandDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverridesLayoutParsingConfig(original["layout_parsing_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["layoutParsingConfig"] = transformedLayoutParsingConfig + } + transformedFileType, err := tpgresource.ExpandString(original["file_type"], d, config) if err != nil { return nil, err @@ -867,3 +1085,18 @@ func expandDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverride func expandDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverridesOcrParsingConfigUseNativeText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandDiscoveryEngineDataStoreDocumentProcessingConfigParsingConfigOverridesLayoutParsingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_schema.go new file mode 100644 index 00000000000..dc98d3eac2b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_schema.go @@ -0,0 +1,319 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package discoveryengine + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDiscoveryEngineSchema() *schema.Resource { + return &schema.Resource{ + Create: resourceDiscoveryEngineSchemaCreate, + Read: resourceDiscoveryEngineSchemaRead, + Delete: resourceDiscoveryEngineSchemaDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDiscoveryEngineSchemaImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "data_store_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The unique id of the data store.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The geographic location where the data store should reside. The value can +only be one of "global", "us" and "eu".`, + }, + "schema_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The unique id of the schema.`, + }, + "json_schema": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `The JSON representation of the schema.`, + ExactlyOneOf: []string{"json_schema"}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique full resource name of the schema. Values are of the format +'projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/schemas/{schema_id}'. +This field must be a UTF-8 encoded string with a length limit of 1024 +characters.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDiscoveryEngineSchemaCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + jsonSchemaProp, err := expandDiscoveryEngineSchemaJsonSchema(d.Get("json_schema"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("json_schema"); !tpgresource.IsEmptyValue(reflect.ValueOf(jsonSchemaProp)) && (ok || !reflect.DeepEqual(v, jsonSchemaProp)) { + obj["jsonSchema"] = jsonSchemaProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DiscoveryEngineBasePath}}projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas?schemaId={{schema_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Schema: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Schema: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Schema: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas/{{schema_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = DiscoveryEngineOperationWaitTime( + config, res, project, "Creating Schema", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Schema: %s", err) + } + + log.Printf("[DEBUG] Finished creating Schema %q: %#v", d.Id(), res) + + return resourceDiscoveryEngineSchemaRead(d, meta) +} + +func resourceDiscoveryEngineSchemaRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DiscoveryEngineBasePath}}projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas/{{schema_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Schema: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DiscoveryEngineSchema %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Schema: %s", err) + } + + if err := d.Set("name", flattenDiscoveryEngineSchemaName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Schema: %s", err) + } + if err := d.Set("json_schema", flattenDiscoveryEngineSchemaJsonSchema(res["jsonSchema"], d, config)); err != nil { + return fmt.Errorf("Error reading Schema: %s", err) + } + + return nil +} + +func resourceDiscoveryEngineSchemaDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Schema: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DiscoveryEngineBasePath}}projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas/{{schema_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting Schema %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Schema") + } + + err = DiscoveryEngineOperationWaitTime( + config, res, project, "Deleting Schema", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Schema %q: %#v", d.Id(), res) + return nil +} + +func resourceDiscoveryEngineSchemaImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/collections/default_collection/dataStores/(?P[^/]+)/schemas/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas/{{schema_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDiscoveryEngineSchemaName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDiscoveryEngineSchemaJsonSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + s, err := structure.NormalizeJsonString(v) + if err != nil { + log.Printf("[ERROR] failed to normalize JSON string: %v", err) + } + return s +} + +func expandDiscoveryEngineSchemaJsonSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_schema_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_schema_sweeper.go new file mode 100644 index 00000000000..b9daafe42c0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_schema_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package discoveryengine + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DiscoveryEngineSchema", testSweepDiscoveryEngineSchema) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDiscoveryEngineSchema(region string) error { + resourceName := "DiscoveryEngineSchema" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{location}}-discoveryengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["schemas"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{location}}-discoveryengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas/{{schema_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_target_site.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_target_site.go new file mode 100644 index 00000000000..c4db9cdb7fe --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_target_site.go @@ -0,0 +1,540 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package discoveryengine + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDiscoveryEngineTargetSite() *schema.Resource { + return &schema.Resource{ + Create: resourceDiscoveryEngineTargetSiteCreate, + Read: resourceDiscoveryEngineTargetSiteRead, + Delete: resourceDiscoveryEngineTargetSiteDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDiscoveryEngineTargetSiteImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "data_store_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The unique id of the data store.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The geographic location where the data store should reside. The value can +only be one of "global", "us" and "eu".`, + }, + "provided_uri_pattern": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The user provided URI pattern from which the 'generated_uri_pattern' is +generated.`, + }, + "exact_match": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If set to false, a uri_pattern is generated to include all pages whose +address contains the provided_uri_pattern. If set to true, an uri_pattern +is generated to try to be an exact match of the provided_uri_pattern or +just the specific page if the provided_uri_pattern is a specific one. +provided_uri_pattern is always normalized to generate the URI pattern to +be used by the search engine.`, + Default: false, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"INCLUDE", "EXCLUDE", ""}), + Description: `The possible target site types. Possible values: ["INCLUDE", "EXCLUDE"]`, + }, + "failure_reason": { + Type: schema.TypeList, + Computed: true, + Description: `Site search indexing failure reasons.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "quota_failure": { + Type: schema.TypeList, + Optional: true, + Description: `Site verification state indicating the ownership and validity.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_required_quota": { + Type: schema.TypeInt, + Optional: true, + Description: `This number is an estimation on how much total quota this project +needs to successfully complete indexing.`, + }, + }, + }, + }, + }, + }, + }, + "generated_uri_pattern": { + Type: schema.TypeString, + Computed: true, + Description: `This is system-generated based on the 'provided_uri_pattern'.`, + }, + "indexing_status": { + Type: schema.TypeString, + Computed: true, + Description: `The indexing status.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique full resource name of the target site. Values are of the format +'projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/siteSearchEngine/targetSites/{target_site_id}'. +This field must be a UTF-8 encoded string with a length limit of 1024 +characters.`, + }, + "root_domain_uri": { + Type: schema.TypeString, + Computed: true, + Description: `Root domain of the 'provided_uri_pattern'.`, + }, + "site_verification_info": { + Type: schema.TypeList, + Computed: true, + Description: `Site ownership and validity verification status.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "site_verification_state": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"VERIFIED", "UNVERIFIED", "EXEMPTED", ""}), + Description: `Site verification state indicating the ownership and validity. Possible values: ["VERIFIED", "UNVERIFIED", "EXEMPTED"]`, + }, + "verify_time": { + Type: schema.TypeString, + Optional: true, + Description: `Latest site verification time.`, + }, + }, + }, + }, + "target_site_id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique id of the target site.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The target site's last updated time.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDiscoveryEngineTargetSiteCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + providedUriPatternProp, err := expandDiscoveryEngineTargetSiteProvidedUriPattern(d.Get("provided_uri_pattern"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("provided_uri_pattern"); !tpgresource.IsEmptyValue(reflect.ValueOf(providedUriPatternProp)) && (ok || !reflect.DeepEqual(v, providedUriPatternProp)) { + obj["providedUriPattern"] = providedUriPatternProp + } + typeProp, err := expandDiscoveryEngineTargetSiteType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + exactMatchProp, err := expandDiscoveryEngineTargetSiteExactMatch(d.Get("exact_match"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("exact_match"); !tpgresource.IsEmptyValue(reflect.ValueOf(exactMatchProp)) && (ok || !reflect.DeepEqual(v, exactMatchProp)) { + obj["exactMatch"] = exactMatchProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DiscoveryEngineBasePath}}projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/siteSearchEngine/targetSites") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TargetSite: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetSite: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating TargetSite: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = DiscoveryEngineOperationWaitTime( + config, res, project, "Creating TargetSite", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create TargetSite: %s", err) + } + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating TargetSite %q: %#v", d.Id(), res) + + return resourceDiscoveryEngineTargetSiteRead(d, meta) +} + +func resourceDiscoveryEngineTargetSiteRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DiscoveryEngineBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetSite: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DiscoveryEngineTargetSite %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading TargetSite: %s", err) + } + + if err := d.Set("name", flattenDiscoveryEngineTargetSiteName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSite: %s", err) + } + if err := d.Set("type", flattenDiscoveryEngineTargetSiteType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSite: %s", err) + } + if err := d.Set("exact_match", flattenDiscoveryEngineTargetSiteExactMatch(res["exactMatch"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSite: %s", err) + } + if err := d.Set("generated_uri_pattern", flattenDiscoveryEngineTargetSiteGeneratedUriPattern(res["generatedUriPattern"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSite: %s", err) + } + if err := d.Set("root_domain_uri", flattenDiscoveryEngineTargetSiteRootDomainUri(res["rootDomainUri"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSite: %s", err) + } + if err := d.Set("site_verification_info", flattenDiscoveryEngineTargetSiteSiteVerificationInfo(res["siteVerificationInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSite: %s", err) + } + if err := d.Set("indexing_status", flattenDiscoveryEngineTargetSiteIndexingStatus(res["indexingStatus"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSite: %s", err) + } + if err := d.Set("update_time", flattenDiscoveryEngineTargetSiteUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSite: %s", err) + } + if err := d.Set("failure_reason", flattenDiscoveryEngineTargetSiteFailureReason(res["failureReason"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSite: %s", err) + } + + return nil +} + +func resourceDiscoveryEngineTargetSiteDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetSite: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DiscoveryEngineBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting TargetSite %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TargetSite") + } + + err = DiscoveryEngineOperationWaitTime( + config, res, project, "Deleting TargetSite", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting TargetSite %q: %#v", d.Id(), res) + return nil +} + +func resourceDiscoveryEngineTargetSiteImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/collections/default_collection/dataStores/(?P[^/]+)/siteSearchEngine/targetSites/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Set name based on the components + if err := d.Set("name", "projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/siteSearchEngine/targetSites/{{target_site_id}}"); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, d.Get("name").(string)) + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDiscoveryEngineTargetSiteName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDiscoveryEngineTargetSiteType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDiscoveryEngineTargetSiteExactMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDiscoveryEngineTargetSiteGeneratedUriPattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDiscoveryEngineTargetSiteRootDomainUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDiscoveryEngineTargetSiteSiteVerificationInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["site_verification_state"] = + flattenDiscoveryEngineTargetSiteSiteVerificationInfoSiteVerificationState(original["siteVerificationState"], d, config) + transformed["verify_time"] = + flattenDiscoveryEngineTargetSiteSiteVerificationInfoVerifyTime(original["verifyTime"], d, config) + return []interface{}{transformed} +} +func flattenDiscoveryEngineTargetSiteSiteVerificationInfoSiteVerificationState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDiscoveryEngineTargetSiteSiteVerificationInfoVerifyTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDiscoveryEngineTargetSiteIndexingStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDiscoveryEngineTargetSiteUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDiscoveryEngineTargetSiteFailureReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["quota_failure"] = + flattenDiscoveryEngineTargetSiteFailureReasonQuotaFailure(original["quotaFailure"], d, config) + return []interface{}{transformed} +} +func flattenDiscoveryEngineTargetSiteFailureReasonQuotaFailure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["total_required_quota"] = + flattenDiscoveryEngineTargetSiteFailureReasonQuotaFailureTotalRequiredQuota(original["totalRequiredQuota"], d, config) + return []interface{}{transformed} +} +func flattenDiscoveryEngineTargetSiteFailureReasonQuotaFailureTotalRequiredQuota(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandDiscoveryEngineTargetSiteProvidedUriPattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDiscoveryEngineTargetSiteType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDiscoveryEngineTargetSiteExactMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_target_site_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_target_site_sweeper.go new file mode 100644 index 00000000000..fd32891c0e9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/discoveryengine/resource_discovery_engine_target_site_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package discoveryengine + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DiscoveryEngineTargetSite", testSweepDiscoveryEngineTargetSite) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDiscoveryEngineTargetSite(region string) error { + resourceName := "DiscoveryEngineTargetSite" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{location}}-discoveryengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/siteSearchEngine/targetSites", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["targetSites"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{location}}-discoveryengine.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_managed_zone.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_managed_zone.go index 4e484c83fb3..fa1d15719eb 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_managed_zone.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_managed_zone.go @@ -97,6 +97,7 @@ Must be unique within the project.`, }, "dnssec_config": { Type: schema.TypeList, + Computed: true, Optional: true, Description: `DNSSEC configuration`, MaxItems: 1, @@ -334,8 +335,8 @@ defined by the server`, "force_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `Set this true to delete all records in the zone.`, + Default: false, }, "project": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_record_set.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_record_set.go index 0f81ac4ad1c..a9ced351104 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_record_set.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_record_set.go @@ -18,6 +18,18 @@ import ( "google.golang.org/api/dns/v1" ) +func lbTypeNoneDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Extract the index from the key + var index int + _, err := fmt.Sscanf(k, "routing_policy.0.primary_backup.0.primary.0.internal_load_balancers.%d.load_balancer_type", &index) + if err != nil { + return false // Key doesn't match the expected format + } + + // Check if the value is changing between "none" and "" (null) + return (old == "none" && new == "") || (old == "" && new == "none") +} + func rrdatasDnsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { if k == "rrdatas.#" && (new == "0" || new == "") && old != new { return false @@ -263,10 +275,11 @@ var healthCheckedTargetSchema *schema.Resource = &schema.Resource{ Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "load_balancer_type": { - Type: schema.TypeString, - Required: true, - Description: `The type of load balancer. This value is case-sensitive. Possible values: ["regionalL4ilb", "regionalL7ilb", "globalL7ilb"]`, - ValidateFunc: validation.StringInSlice([]string{"regionalL4ilb", "regionalL7ilb", "globalL7ilb"}, false), + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: lbTypeNoneDiffSuppress, + Description: `The type of load balancer. This value is case-sensitive. Possible values: ["regionalL4ilb", "regionalL7ilb", "globalL7ilb"]`, + ValidateFunc: validation.StringInSlice([]string{"regionalL4ilb", "regionalL7ilb", "globalL7ilb"}, false), }, "ip_address": { Type: schema.TypeString, @@ -364,6 +377,14 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error chg.Deletions = deletions } + // Mutex + lockName := fmt.Sprintf("projects/%s/managedZones/%s/rrsets/%s/%s", project, zone, name, rType) + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + log.Printf("[DEBUG] DNS Record create request: %#v", chg) chg, err = config.NewDnsClient(userAgent).Changes.Create(project, zone, chg).Do() if err != nil { @@ -457,7 +478,9 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error return err } + name := d.Get("name").(string) zone := d.Get("managed_zone").(string) + rType := d.Get("type").(string) // NS and SOA records on the root zone must always have a value, // so we short-circuit delete this allows terraform delete to work, @@ -500,6 +523,14 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error }, } + // Mutex + lockName := fmt.Sprintf("projects/%s/managedZones/%s/rrsets/%s/%s", project, zone, name, rType) + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + log.Printf("[DEBUG] DNS Record delete request: %#v", chg) chg, err = config.NewDnsClient(userAgent).Changes.Create(project, zone, chg).Do() if err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/edgenetwork/resource_edgenetwork_network.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/edgenetwork/resource_edgenetwork_network.go index 195d72f2028..deaaba40079 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/edgenetwork/resource_edgenetwork_network.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/edgenetwork/resource_edgenetwork_network.go @@ -35,6 +35,7 @@ func ResourceEdgenetworkNetwork() *schema.Resource { return &schema.Resource{ Create: resourceEdgenetworkNetworkCreate, Read: resourceEdgenetworkNetworkRead, + Update: resourceEdgenetworkNetworkUpdate, Delete: resourceEdgenetworkNetworkDelete, Importer: &schema.ResourceImporter{ @@ -43,10 +44,12 @@ func ResourceEdgenetworkNetwork() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(30 * time.Minute), }, CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, tpgresource.DefaultProviderProject, ), @@ -76,11 +79,14 @@ func ResourceEdgenetworkNetwork() *schema.Resource { Description: `A free-text description of the resource. Max length 1024 characters.`, }, "labels": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Labels associated with this resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Optional: true, + Description: `Labels associated with this resource. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, "mtu": { Type: schema.TypeInt, @@ -96,12 +102,26 @@ func ResourceEdgenetworkNetwork() *schema.Resource { A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: '2014-10-02T15:01:23Z' and '2014-10-02T15:01:23.045123456Z'.`, }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "name": { Type: schema.TypeString, Computed: true, Description: `The canonical name of this resource, with format 'projects/{{project}}/locations/{{location}}/zones/{{zone}}/networks/{{network_id}}'`, }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "update_time": { Type: schema.TypeString, Computed: true, @@ -128,12 +148,6 @@ func resourceEdgenetworkNetworkCreate(d *schema.ResourceData, meta interface{}) } obj := make(map[string]interface{}) - labelsProp, err := expandEdgenetworkNetworkLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } descriptionProp, err := expandEdgenetworkNetworkDescription(d.Get("description"), d, config) if err != nil { return err @@ -146,6 +160,12 @@ func resourceEdgenetworkNetworkCreate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("mtu"); !tpgresource.IsEmptyValue(reflect.ValueOf(mtuProp)) && (ok || !reflect.DeepEqual(v, mtuProp)) { obj["mtu"] = mtuProp } + labelsProp, err := expandEdgenetworkNetworkEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } url, err := tpgresource.ReplaceVars(d, config, "{{EdgenetworkBasePath}}projects/{{project}}/locations/{{location}}/zones/{{zone}}/networks?networkId={{network_id}}") if err != nil { @@ -263,10 +283,21 @@ func resourceEdgenetworkNetworkRead(d *schema.ResourceData, meta interface{}) er if err := d.Set("mtu", flattenEdgenetworkNetworkMtu(res["mtu"], d, config)); err != nil { return fmt.Errorf("Error reading Network: %s", err) } + if err := d.Set("terraform_labels", flattenEdgenetworkNetworkTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } + if err := d.Set("effective_labels", flattenEdgenetworkNetworkEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } return nil } +func resourceEdgenetworkNetworkUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the root field "labels" and "terraform_labels" are mutable + return resourceEdgenetworkNetworkRead(d, meta) +} + func resourceEdgenetworkNetworkDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) @@ -350,7 +381,18 @@ func flattenEdgenetworkNetworkName(v interface{}, d *schema.ResourceData, config } func flattenEdgenetworkNetworkLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed } func flattenEdgenetworkNetworkDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -382,15 +424,23 @@ func flattenEdgenetworkNetworkMtu(v interface{}, d *schema.ResourceData, config return v // let terraform core handle it otherwise } -func expandEdgenetworkNetworkLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { +func flattenEdgenetworkNetworkTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { - return map[string]string{}, nil + return v } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } } - return m, nil + + return transformed +} + +func flattenEdgenetworkNetworkEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } func expandEdgenetworkNetworkDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { @@ -400,3 +450,14 @@ func expandEdgenetworkNetworkDescription(v interface{}, d tpgresource.TerraformR func expandEdgenetworkNetworkMtu(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandEdgenetworkNetworkEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/edgenetwork/resource_edgenetwork_subnet.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/edgenetwork/resource_edgenetwork_subnet.go index d27fe211001..42ea2404929 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/edgenetwork/resource_edgenetwork_subnet.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/edgenetwork/resource_edgenetwork_subnet.go @@ -35,6 +35,7 @@ func ResourceEdgenetworkSubnet() *schema.Resource { return &schema.Resource{ Create: resourceEdgenetworkSubnetCreate, Read: resourceEdgenetworkSubnetRead, + Update: resourceEdgenetworkSubnetUpdate, Delete: resourceEdgenetworkSubnetDelete, Importer: &schema.ResourceImporter{ @@ -43,10 +44,12 @@ func ResourceEdgenetworkSubnet() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(30 * time.Minute), }, CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, tpgresource.DefaultProviderProject, ), @@ -102,11 +105,14 @@ Must be of the form: 'projects/{{project}}/locations/{{location}}/zones/{{zone}} }, }, "labels": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Labels associated with this resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Optional: true, + Description: `Labels associated with this resource. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, "vlan_id": { Type: schema.TypeInt, @@ -122,6 +128,13 @@ Must be of the form: 'projects/{{project}}/locations/{{location}}/zones/{{zone}} A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: '2014-10-02T15:01:23Z' and '2014-10-02T15:01:23.045123456Z'.`, }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "name": { Type: schema.TypeString, Computed: true, @@ -133,6 +146,13 @@ fractional digits. Examples: '2014-10-02T15:01:23Z' and '2014-10-02T15:01:23.045 Computed: true, Description: `Current stage of the resource to the device by config push.`, }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "update_time": { Type: schema.TypeString, Computed: true, @@ -159,12 +179,6 @@ func resourceEdgenetworkSubnetCreate(d *schema.ResourceData, meta interface{}) e } obj := make(map[string]interface{}) - labelsProp, err := expandEdgenetworkSubnetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } descriptionProp, err := expandEdgenetworkSubnetDescription(d.Get("description"), d, config) if err != nil { return err @@ -195,6 +209,12 @@ func resourceEdgenetworkSubnetCreate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("vlan_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(vlanIdProp)) && (ok || !reflect.DeepEqual(v, vlanIdProp)) { obj["vlanId"] = vlanIdProp } + labelsProp, err := expandEdgenetworkSubnetEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } url, err := tpgresource.ReplaceVars(d, config, "{{EdgenetworkBasePath}}projects/{{project}}/locations/{{location}}/zones/{{zone}}/subnets?subnetId={{subnet_id}}") if err != nil { @@ -324,10 +344,21 @@ func resourceEdgenetworkSubnetRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("state", flattenEdgenetworkSubnetState(res["state"], d, config)); err != nil { return fmt.Errorf("Error reading Subnet: %s", err) } + if err := d.Set("terraform_labels", flattenEdgenetworkSubnetTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnet: %s", err) + } + if err := d.Set("effective_labels", flattenEdgenetworkSubnetEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnet: %s", err) + } return nil } +func resourceEdgenetworkSubnetUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the root field "labels" and "terraform_labels" are mutable + return resourceEdgenetworkSubnetRead(d, meta) +} + func resourceEdgenetworkSubnetDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) @@ -411,7 +442,18 @@ func flattenEdgenetworkSubnetName(v interface{}, d *schema.ResourceData, config } func flattenEdgenetworkSubnetLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed } func flattenEdgenetworkSubnetDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -462,15 +504,23 @@ func flattenEdgenetworkSubnetState(v interface{}, d *schema.ResourceData, config return v } -func expandEdgenetworkSubnetLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { +func flattenEdgenetworkSubnetTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { - return map[string]string{}, nil + return v } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } } - return m, nil + + return transformed +} + +func flattenEdgenetworkSubnetEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } func expandEdgenetworkSubnetDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { @@ -496,3 +546,14 @@ func expandEdgenetworkSubnetIpv6Cidr(v interface{}, d tpgresource.TerraformResou func expandEdgenetworkSubnetVlanId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandEdgenetworkSubnetEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts/resource_essential_contacts_contact.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts/resource_essential_contacts_contact.go index ddf55b54509..f2644eacd5d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts/resource_essential_contacts_contact.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts/resource_essential_contacts_contact.go @@ -334,6 +334,13 @@ func resourceEssentialContactsContactImport(d *schema.ResourceData, meta interfa } d.SetId(id) + // Split resource name into tokens + nameTokens := strings.SplitAfterN(d.Id(), "/", 3) + + if err := d.Set("parent", nameTokens[0]+strings.Trim(nameTokens[1], "/")); err != nil { + return nil, fmt.Errorf("error getting parent for the contact : %s", err) + } + return []*schema.ResourceData{d}, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_instance.go index ad045853f81..5abbfb5e1f1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_instance.go @@ -217,6 +217,16 @@ addresses reserved for this instance.`, Description: `The service tier of the instance. Possible values include: STANDARD, PREMIUM, BASIC_HDD, BASIC_SSD, HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE`, }, + "deletion_protection_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether the instance is protected against deletion.`, + }, + "deletion_protection_reason": { + Type: schema.TypeString, + Optional: true, + Description: `The reason for enabling deletion protection.`, + }, "description": { Type: schema.TypeString, Optional: true, @@ -246,6 +256,17 @@ Please refer to the field 'effective_labels' for all of the labels present on th Description: `The name of the location of the instance. This can be a region for ENTERPRISE tier instances.`, ExactlyOneOf: []string{}, }, + "protocol": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"NFS_V3", "NFS_V4_1", ""}), + Description: `Either NFSv3, for using NFS version 3 as file sharing protocol, +or NFSv4.1, for using NFS version 4.1 as file sharing protocol. +NFSv4.1 can be used with HIGH_SCALE_SSD, ZONAL, REGIONAL and ENTERPRISE. +The default is NFSv3. Default value: "NFS_V3" Possible values: ["NFS_V3", "NFS_V4_1"]`, + Default: "NFS_V3", + }, "zone": { Type: schema.TypeString, Computed: true, @@ -310,6 +331,12 @@ func resourceFilestoreInstanceCreate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("tier"); !tpgresource.IsEmptyValue(reflect.ValueOf(tierProp)) && (ok || !reflect.DeepEqual(v, tierProp)) { obj["tier"] = tierProp } + protocolProp, err := expandFilestoreInstanceProtocol(d.Get("protocol"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(protocolProp)) && (ok || !reflect.DeepEqual(v, protocolProp)) { + obj["protocol"] = protocolProp + } fileSharesProp, err := expandFilestoreInstanceFileShares(d.Get("file_shares"), d, config) if err != nil { return err @@ -328,6 +355,18 @@ func resourceFilestoreInstanceCreate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("kms_key_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(kmsKeyNameProp)) && (ok || !reflect.DeepEqual(v, kmsKeyNameProp)) { obj["kmsKeyName"] = kmsKeyNameProp } + deletionProtectionEnabledProp, err := expandFilestoreInstanceDeletionProtectionEnabled(d.Get("deletion_protection_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deletion_protection_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(deletionProtectionEnabledProp)) && (ok || !reflect.DeepEqual(v, deletionProtectionEnabledProp)) { + obj["deletionProtectionEnabled"] = deletionProtectionEnabledProp + } + deletionProtectionReasonProp, err := expandFilestoreInstanceDeletionProtectionReason(d.Get("deletion_protection_reason"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deletion_protection_reason"); !tpgresource.IsEmptyValue(reflect.ValueOf(deletionProtectionReasonProp)) && (ok || !reflect.DeepEqual(v, deletionProtectionReasonProp)) { + obj["deletionProtectionReason"] = deletionProtectionReasonProp + } labelsProp, err := expandFilestoreInstanceEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -471,6 +510,9 @@ func resourceFilestoreInstanceRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("tier", flattenFilestoreInstanceTier(res["tier"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } + if err := d.Set("protocol", flattenFilestoreInstanceProtocol(res["protocol"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } if err := d.Set("labels", flattenFilestoreInstanceLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } @@ -486,6 +528,12 @@ func resourceFilestoreInstanceRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("kms_key_name", flattenFilestoreInstanceKmsKeyName(res["kmsKeyName"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } + if err := d.Set("deletion_protection_enabled", flattenFilestoreInstanceDeletionProtectionEnabled(res["deletionProtectionEnabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("deletion_protection_reason", flattenFilestoreInstanceDeletionProtectionReason(res["deletionProtectionReason"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } if err := d.Set("terraform_labels", flattenFilestoreInstanceTerraformLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } @@ -524,6 +572,18 @@ func resourceFilestoreInstanceUpdate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("file_shares"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fileSharesProp)) { obj["fileShares"] = fileSharesProp } + deletionProtectionEnabledProp, err := expandFilestoreInstanceDeletionProtectionEnabled(d.Get("deletion_protection_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deletion_protection_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deletionProtectionEnabledProp)) { + obj["deletionProtectionEnabled"] = deletionProtectionEnabledProp + } + deletionProtectionReasonProp, err := expandFilestoreInstanceDeletionProtectionReason(d.Get("deletion_protection_reason"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deletion_protection_reason"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deletionProtectionReasonProp)) { + obj["deletionProtectionReason"] = deletionProtectionReasonProp + } labelsProp, err := expandFilestoreInstanceEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -548,6 +608,14 @@ func resourceFilestoreInstanceUpdate(d *schema.ResourceData, meta interface{}) e updateMask = append(updateMask, "fileShares") } + if d.HasChange("deletion_protection_enabled") { + updateMask = append(updateMask, "deletionProtectionEnabled") + } + + if d.HasChange("deletion_protection_reason") { + updateMask = append(updateMask, "deletionProtectionReason") + } + if d.HasChange("effective_labels") { updateMask = append(updateMask, "labels") } @@ -684,6 +752,14 @@ func flattenFilestoreInstanceTier(v interface{}, d *schema.ResourceData, config return v } +func flattenFilestoreInstanceProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "NFS_V3" + } + + return v +} + func flattenFilestoreInstanceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -867,6 +943,14 @@ func flattenFilestoreInstanceKmsKeyName(v interface{}, d *schema.ResourceData, c return v } +func flattenFilestoreInstanceDeletionProtectionEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreInstanceDeletionProtectionReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenFilestoreInstanceTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -894,6 +978,10 @@ func expandFilestoreInstanceTier(v interface{}, d tpgresource.TerraformResourceD return v, nil } +func expandFilestoreInstanceProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandFilestoreInstanceFileShares(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -1093,6 +1181,14 @@ func expandFilestoreInstanceKmsKeyName(v interface{}, d tpgresource.TerraformRes return v, nil } +func expandFilestoreInstanceDeletionProtectionEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFilestoreInstanceDeletionProtectionReason(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandFilestoreInstanceEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_database.go index 8ccdc5cff3a..7fee90ee16e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_database.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_database.go @@ -88,6 +88,50 @@ for information about how to choose. Possible values: ["FIRESTORE_NATIVE", "DATA ValidateFunc: verify.ValidateEnum([]string{"ENABLED", "DISABLED", ""}), Description: `The App Engine integration mode to use for this database. Possible values: ["ENABLED", "DISABLED"]`, }, + "cmek_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The CMEK (Customer Managed Encryption Key) configuration for a Firestore +database. If not present, the database is secured by the default Google +encryption key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource ID of a Cloud KMS key. If set, the database created will +be a Customer-managed Encryption Key (CMEK) database encrypted with +this key. This feature is allowlist only in initial launch. + +Only keys in the same location as this database are allowed to be used +for encryption. For Firestore's nam5 multi-region, this corresponds to Cloud KMS +multi-region us. For Firestore's eur3 multi-region, this corresponds to +Cloud KMS multi-region europe. See https://cloud.google.com/kms/docs/locations. + +This value should be the KMS key resource ID in the format of +'projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}'. +How to retrive this resource ID is listed at +https://cloud.google.com/kms/docs/getting-resource-ids#getting_the_id_for_a_key_and_version.`, + }, + "active_key_version": { + Type: schema.TypeList, + Computed: true, + Description: `Currently in-use KMS key versions (https://cloud.google.com/kms/docs/resource-hierarchy#key_versions). +During key rotation (https://cloud.google.com/kms/docs/key-rotation), there can be +multiple in-use key versions. + +The expected format is +'projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{key_version}'.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, "concurrency_mode": { Type: schema.TypeString, Computed: true, @@ -164,12 +208,12 @@ A duration in seconds with up to nine fractional digits, ending with 's'. Exampl "deletion_policy": { Type: schema.TypeString, Optional: true, - Default: "ABANDON", Description: `Deletion behavior for this database. If the deletion policy is 'ABANDON', the database will be removed from Terraform state but not deleted from Google Cloud upon destruction. If the deletion policy is 'DELETE', the database will both be removed from Terraform state and deleted from Google Cloud upon destruction. The default value is 'ABANDON'. See also 'delete_protection'.`, + Default: "ABANDON", }, "project": { Type: schema.TypeString, @@ -238,6 +282,12 @@ func resourceFirestoreDatabaseCreate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(etagProp)) && (ok || !reflect.DeepEqual(v, etagProp)) { obj["etag"] = etagProp } + cmekConfigProp, err := expandFirestoreDatabaseCmekConfig(d.Get("cmek_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cmek_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(cmekConfigProp)) && (ok || !reflect.DeepEqual(v, cmekConfigProp)) { + obj["cmekConfig"] = cmekConfigProp + } url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases?databaseId={{name}}") if err != nil { @@ -399,6 +449,9 @@ func resourceFirestoreDatabaseRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("earliest_version_time", flattenFirestoreDatabaseEarliestVersionTime(res["earliestVersionTime"], d, config)); err != nil { return fmt.Errorf("Error reading Database: %s", err) } + if err := d.Set("cmek_config", flattenFirestoreDatabaseCmekConfig(res["cmekConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } return nil } @@ -670,6 +723,29 @@ func flattenFirestoreDatabaseEarliestVersionTime(v interface{}, d *schema.Resour return v } +func flattenFirestoreDatabaseCmekConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenFirestoreDatabaseCmekConfigKmsKeyName(original["kmsKeyName"], d, config) + transformed["active_key_version"] = + flattenFirestoreDatabaseCmekConfigActiveKeyVersion(original["activeKeyVersion"], d, config) + return []interface{}{transformed} +} +func flattenFirestoreDatabaseCmekConfigKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreDatabaseCmekConfigActiveKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func expandFirestoreDatabaseName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return tpgresource.ReplaceVars(d, config, "projects/{{project}}/databases/{{name}}") } @@ -701,3 +777,37 @@ func expandFirestoreDatabaseDeleteProtectionState(v interface{}, d tpgresource.T func expandFirestoreDatabaseEtag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandFirestoreDatabaseCmekConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandFirestoreDatabaseCmekConfigKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + transformedActiveKeyVersion, err := expandFirestoreDatabaseCmekConfigActiveKeyVersion(original["active_key_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedActiveKeyVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["activeKeyVersion"] = transformedActiveKeyVersion + } + + return transformed, nil +} + +func expandFirestoreDatabaseCmekConfigKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirestoreDatabaseCmekConfigActiveKeyVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_database_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_database_sweeper.go index 4537750529a..8f18b79c6fc 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_database_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_database_sweeper.go @@ -19,9 +19,7 @@ func init() { } // At the time of writing, the CI only passes us-central1 as the region -// But all Firestore examples use nam5, so we will force that instead func testSweepFirestoreDatabase(region string) error { - actualRegion := "nam5" resourceName := "FirestoreDatabase" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) @@ -40,12 +38,12 @@ func testSweepFirestoreDatabase(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Set up variables to replace in list template + // Setup variables to replace in list template d := &tpgresource.ResourceDataMock{ FieldsInSchema: map[string]interface{}{ "project": config.Project, - "region": actualRegion, - "location": actualRegion, + "region": region, + "location": region, "zone": "-", "billing_account": billingId, }, @@ -90,7 +88,7 @@ func testSweepFirestoreDatabase(region string) error { name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) && name != "(default)" { + if !sweeper.IsSweepableTestResource(name) { nonPrefixCount++ continue } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_document.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_document.go index 1b97ae29179..41b3e8eb1a6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_document.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_document.go @@ -60,11 +60,13 @@ func ResourceFirestoreDocument() *schema.Resource { "collection": { Type: schema.TypeString, Required: true, + ForceNew: true, Description: `The collection ID, relative to database. For example: chatrooms or chatrooms/my-document/private-messages.`, }, "document_id": { Type: schema.TypeString, Required: true, + ForceNew: true, Description: `The client-assigned document ID to use for this document during creation.`, }, "fields": { @@ -77,6 +79,7 @@ func ResourceFirestoreDocument() *schema.Resource { "database": { Type: schema.TypeString, Optional: true, + ForceNew: true, Description: `The Firestore database id. Defaults to '"(default)"'.`, Default: "(default)", }, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_field.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_field.go index 5f17e6bd4e0..a8875e33e86 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_field.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_field.go @@ -22,6 +22,7 @@ import ( "log" "net/http" "reflect" + "regexp" "strings" "time" @@ -58,16 +59,19 @@ func ResourceFirestoreField() *schema.Resource { "collection": { Type: schema.TypeString, Required: true, + ForceNew: true, Description: `The id of the collection group to configure.`, }, "field": { Type: schema.TypeString, Required: true, + ForceNew: true, Description: `The id of the field to configure.`, }, "database": { Type: schema.TypeString, Optional: true, + ForceNew: true, Description: `The Firestore database id. Defaults to '"(default)"'.`, Default: "(default)", }, @@ -475,24 +479,26 @@ func resourceFirestoreFieldImport(d *schema.ResourceData, meta interface{}) ([]* return nil, err } - stringParts := strings.Split(d.Get("name").(string), "/") - if len(stringParts) != 8 { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes/{{server_generated_id}}", - ) + // Re-populate split fields from the name. + re := regexp.MustCompile("^projects/([^/]+)/databases/([^/]+)/collectionGroups/([^/]+)/fields/(.+)$") + match := re.FindStringSubmatch(d.Get("name").(string)) + if len(match) > 0 { + if err := d.Set("project", match[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("database", match[2]); err != nil { + return nil, fmt.Errorf("Error setting database: %s", err) + } + if err := d.Set("collection", match[3]); err != nil { + return nil, fmt.Errorf("Error setting collection: %s", err) + } + if err := d.Set("field", match[4]); err != nil { + return nil, fmt.Errorf("Error setting field: %s", err) + } + } else { + return nil, fmt.Errorf("import did not match the regex ^projects/([^/]+)/databases/([^/]+)/collectionGroups/([^/]+)/fields/(.+)$") } - if err := d.Set("project", stringParts[1]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("database", stringParts[3]); err != nil { - return nil, fmt.Errorf("Error setting database: %s", err) - } - if err := d.Set("collection", stringParts[5]); err != nil { - return nil, fmt.Errorf("Error setting collection: %s", err) - } return []*schema.ResourceData{d}, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/resource_gke_hub_feature_membership.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/resource_gke_hub_feature_membership.go index 1fdce92b3dd..702a82f2a0e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/resource_gke_hub_feature_membership.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/resource_gke_hub_feature_membership.go @@ -146,10 +146,17 @@ func GkeHubFeatureMembershipConfigmanagementSchema() *schema.Resource { Elem: GkeHubFeatureMembershipConfigmanagementHierarchyControllerSchema(), }, + "management": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades.", + }, + "policy_controller": { Type: schema.TypeList, Optional: true, - Description: "Policy Controller configuration for the cluster.", + Description: "**DEPRECATED** Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead.", MaxItems: 1, Elem: GkeHubFeatureMembershipConfigmanagementPolicyControllerSchema(), }, @@ -179,6 +186,12 @@ func GkeHubFeatureMembershipConfigmanagementBinauthzSchema() *schema.Resource { func GkeHubFeatureMembershipConfigmanagementConfigSyncSchema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", + }, + "git": { Type: schema.TypeList, Optional: true, @@ -328,7 +341,7 @@ func GkeHubFeatureMembershipConfigmanagementHierarchyControllerSchema() *schema. "enabled": { Type: schema.TypeBool, Optional: true, - Description: "Whether Hierarchy Controller is enabled in this cluster.", + Description: "**DEPRECATED** Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead.", }, }, } @@ -985,6 +998,7 @@ func expandGkeHubFeatureMembershipConfigmanagement(o interface{}) *gkehub.Featur Binauthz: expandGkeHubFeatureMembershipConfigmanagementBinauthz(obj["binauthz"]), ConfigSync: expandGkeHubFeatureMembershipConfigmanagementConfigSync(obj["config_sync"]), HierarchyController: expandGkeHubFeatureMembershipConfigmanagementHierarchyController(obj["hierarchy_controller"]), + Management: gkehub.FeatureMembershipConfigmanagementManagementEnumRef(obj["management"].(string)), PolicyController: expandGkeHubFeatureMembershipConfigmanagementPolicyController(obj["policy_controller"]), Version: dcl.StringOrNil(obj["version"].(string)), } @@ -998,6 +1012,7 @@ func flattenGkeHubFeatureMembershipConfigmanagement(obj *gkehub.FeatureMembershi "binauthz": flattenGkeHubFeatureMembershipConfigmanagementBinauthz(obj.Binauthz), "config_sync": flattenGkeHubFeatureMembershipConfigmanagementConfigSync(obj.ConfigSync), "hierarchy_controller": flattenGkeHubFeatureMembershipConfigmanagementHierarchyController(obj.HierarchyController), + "management": obj.Management, "policy_controller": flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj.PolicyController), "version": obj.Version, } @@ -1042,6 +1057,7 @@ func expandGkeHubFeatureMembershipConfigmanagementConfigSync(o interface{}) *gke } obj := objArr[0].(map[string]interface{}) return &gkehub.FeatureMembershipConfigmanagementConfigSync{ + Enabled: dcl.Bool(obj["enabled"].(bool)), Git: expandGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj["git"]), MetricsGcpServiceAccountEmail: dcl.String(obj["metrics_gcp_service_account_email"].(string)), Oci: expandGkeHubFeatureMembershipConfigmanagementConfigSyncOci(obj["oci"]), @@ -1055,6 +1071,7 @@ func flattenGkeHubFeatureMembershipConfigmanagementConfigSync(obj *gkehub.Featur return nil } transformed := map[string]interface{}{ + "enabled": obj.Enabled, "git": flattenGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj.Git), "metrics_gcp_service_account_email": obj.MetricsGcpServiceAccountEmail, "oci": flattenGkeHubFeatureMembershipConfigmanagementConfigSyncOci(obj.Oci), diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/resource_gke_hub_membership.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/resource_gke_hub_membership.go index a2777bd0494..02e7ed867a3 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/resource_gke_hub_membership.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/resource_gke_hub_membership.go @@ -95,7 +95,7 @@ https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity`, Required: true, ForceNew: true, Description: `A JSON Web Token (JWT) issuer URI. 'issuer' must start with 'https://' and // be a valid -with length <2000 characters. For example: 'https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster' (must be 'locations' rather than 'zones'). If the cluster is provisioned with Terraform, this is '"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}"'.`, +with length <2000 characters. For example: 'https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster'. If the cluster is provisioned with Terraform, this is '"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}"'.`, }, }, }, @@ -122,7 +122,7 @@ with length <2000 characters. For example: 'https://container.googleapis.com/v1/ ForceNew: true, DiffSuppressFunc: suppressGkeHubEndpointSelfLinkDiff, Description: `Self-link of the GCP resource for the GKE cluster. -For example: '//container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster'. +For example: '//container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster'. It can be at the most 1000 characters in length. If the cluster is provisioned with Terraform, this can be '"//container.googleapis.com/${google_container_cluster.my-cluster.id}"' or 'google_container_cluster.my-cluster.id'.`, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/data_source_google_gke_hub_membership_binding.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/data_source_google_gke_hub_membership_binding.go new file mode 100644 index 00000000000..c23fcd73445 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/data_source_google_gke_hub_membership_binding.go @@ -0,0 +1,53 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package gkehub2 + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleGkeHubMembershipBinding() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceGKEHub2MembershipBinding().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "membership_binding_id") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "membership_id") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "location") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleGkeHubMembershipBindingRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleGkeHubMembershipBindingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/{{membership_binding_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + d.SetId(id) + + err = resourceGKEHub2MembershipBindingRead(d, meta) + if err != nil { + return err + } + + if err := tpgresource.SetDataSourceLabels(d); err != nil { + return err + } + + if err := tpgresource.SetDataSourceAnnotations(d); err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/resource_gke_hub_feature.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/resource_gke_hub_feature.go index 372f0e30d77..98bbaa0c1f4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/resource_gke_hub_feature.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/resource_gke_hub_feature.go @@ -173,8 +173,8 @@ func ResourceGKEHub2Feature() *schema.Resource { "version": { Type: schema.TypeString, Optional: true, - Deprecated: "The `configmanagement.config_sync.oci.version` field is deprecated and will be removed in a future major release. Please use `configmanagement.version` field to specify the version of ACM installed instead.", - Description: `Version of ACM installed`, + Deprecated: "The `configmanagement.config_sync.oci.version` field is deprecated and will be removed in a future major release. Please use `configmanagement.version` field to specify the version of Config Sync installed instead.", + Description: `Version of Config Sync installed`, }, }, }, @@ -192,10 +192,16 @@ func ResourceGKEHub2Feature() *schema.Resource { }, }, }, + "management": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MANAGEMENT_UNSPECIFIED", "MANAGEMENT_AUTOMATIC", "MANAGEMENT_MANUAL", ""}), + Description: `Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades. Possible values: ["MANAGEMENT_UNSPECIFIED", "MANAGEMENT_AUTOMATIC", "MANAGEMENT_MANUAL"]`, + }, "version": { Type: schema.TypeString, Optional: true, - Description: `Version of ACM installed`, + Description: `Version of Config Sync installed`, }, }, }, @@ -1340,6 +1346,8 @@ func flattenGKEHub2FeatureFleetDefaultMemberConfigConfigmanagement(v interface{} transformed := make(map[string]interface{}) transformed["version"] = flattenGKEHub2FeatureFleetDefaultMemberConfigConfigmanagementVersion(original["version"], d, config) + transformed["management"] = + flattenGKEHub2FeatureFleetDefaultMemberConfigConfigmanagementManagement(original["management"], d, config) transformed["config_sync"] = flattenGKEHub2FeatureFleetDefaultMemberConfigConfigmanagementConfigSync(original["configSync"], d, config) return []interface{}{transformed} @@ -1348,6 +1356,10 @@ func flattenGKEHub2FeatureFleetDefaultMemberConfigConfigmanagementVersion(v inte return v } +func flattenGKEHub2FeatureFleetDefaultMemberConfigConfigmanagementManagement(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenGKEHub2FeatureFleetDefaultMemberConfigConfigmanagementConfigSync(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -2234,6 +2246,13 @@ func expandGKEHub2FeatureFleetDefaultMemberConfigConfigmanagement(v interface{}, transformed["version"] = transformedVersion } + transformedManagement, err := expandGKEHub2FeatureFleetDefaultMemberConfigConfigmanagementManagement(original["management"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedManagement); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["management"] = transformedManagement + } + transformedConfigSync, err := expandGKEHub2FeatureFleetDefaultMemberConfigConfigmanagementConfigSync(original["config_sync"], d, config) if err != nil { return nil, err @@ -2248,6 +2267,10 @@ func expandGKEHub2FeatureFleetDefaultMemberConfigConfigmanagementVersion(v inter return v, nil } +func expandGKEHub2FeatureFleetDefaultMemberConfigConfigmanagementManagement(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandGKEHub2FeatureFleetDefaultMemberConfigConfigmanagementConfigSync(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_admin_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_admin_cluster.go index 5d17207e35f..ec0b435a20b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_admin_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_admin_cluster.go @@ -136,9 +136,9 @@ label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: - http://kubernetes.io/v1.1/docs/user-guide/labels.html + - http://kubernetes.io/v1.1/docs/user-guide/labels.html An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, +For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "node_configs": { @@ -157,9 +157,9 @@ label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: - http://kubernetes.io/v1.1/docs/user-guide/labels.html + - http://kubernetes.io/v1.1/docs/user-guide/labels.html An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, +For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "node_ip": { @@ -393,15 +393,15 @@ assigned to the node will be derived from this parameter.`, Type: schema.TypeString, Required: true, Description: `Specifies the address of your proxy server. -Examples: http://domain +For Example: http://domain WARNING: Do not provide credentials in the format -http://(username:password@)domain these will be rejected by the server.`, +of http://(username:password@)domain these will be rejected by the server.`, }, "no_proxy": { Type: schema.TypeList, Optional: true, Description: `A list of IPs, hostnames, and domains that should skip the proxy. -Examples: ["127.0.0.1", "example.com", ".corp", "localhost"].`, +For example: ["127.0.0.1", "example.com", ".corp", "localhost"].`, Elem: &schema.Schema{ Type: schema.TypeString, }, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_cluster.go index afe6962e1a0..54238db39f5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_cluster.go @@ -101,9 +101,9 @@ label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: - http://kubernetes.io/v1.1/docs/user-guide/labels.html + - http://kubernetes.io/v1.1/docs/user-guide/labels.html An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, +For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "node_configs": { @@ -122,7 +122,7 @@ label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: - http://kubernetes.io/v1.1/docs/user-guide/labels.html + - http://kubernetes.io/v1.1/docs/user-guide/labels.html An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, Elem: &schema.Schema{Type: schema.TypeString}, @@ -391,9 +391,9 @@ label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: - http://kubernetes.io/v1.1/docs/user-guide/labels.html + - http://kubernetes.io/v1.1/docs/user-guide/labels.html An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, +For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "node_configs": { @@ -412,9 +412,9 @@ label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: - http://kubernetes.io/v1.1/docs/user-guide/labels.html + - http://kubernetes.io/v1.1/docs/user-guide/labels.html An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, +For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "node_ip": { @@ -549,9 +549,9 @@ label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: - http://kubernetes.io/v1.1/docs/user-guide/labels.html + - http://kubernetes.io/v1.1/docs/user-guide/labels.html An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, +For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "node_configs": { @@ -570,9 +570,9 @@ label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: - http://kubernetes.io/v1.1/docs/user-guide/labels.html + - http://kubernetes.io/v1.1/docs/user-guide/labels.html An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, +For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "node_ip": { @@ -930,15 +930,15 @@ bare metal machines.`, Type: schema.TypeString, Required: true, Description: `Specifies the address of your proxy server. -Examples: http://domain +For example: http://domain WARNING: Do not provide credentials in the format -http://(username:password@)domain these will be rejected by the server.`, +of http://(username:password@)domain these will be rejected by the server.`, }, "no_proxy": { Type: schema.TypeList, Optional: true, Description: `A list of IPs, hostnames, and domains that should skip the proxy. -Examples: ["127.0.0.1", "example.com", ".corp", "localhost"].`, +For example ["127.0.0.1", "example.com", ".corp", "localhost"].`, Elem: &schema.Schema{ Type: schema.TypeString, }, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_node_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_node_pool.go index a2ac54718c9..6222fdfc82b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_node_pool.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_bare_metal_node_pool.go @@ -98,9 +98,9 @@ label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: - http://kubernetes.io/v1.1/docs/user-guide/labels.html + - http://kubernetes.io/v1.1/docs/user-guide/labels.html An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, +For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "node_ip": { @@ -123,9 +123,9 @@ label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: - http://kubernetes.io/v1.1/docs/user-guide/labels.html + - http://kubernetes.io/v1.1/docs/user-guide/labels.html An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, +For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "operating_system": { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_vmware_node_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_vmware_node_pool.go index 9ab00492f86..c306a8b5eec 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_vmware_node_pool.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkeonprem/resource_gkeonprem_vmware_node_pool.go @@ -67,7 +67,7 @@ func ResourceGkeonpremVmwareNodePool() *schema.Resource { Type: schema.TypeString, Required: true, Description: `The OS image to be used for each node in a node pool. -Currently 'cos', 'ubuntu', 'ubuntu_containerd' and 'windows' are supported.`, +Currently 'cos', 'cos_cgv2', 'ubuntu', 'ubuntu_cgv2', 'ubuntu_containerd' and 'windows' are supported.`, }, "boot_disk_size_gb": { Type: schema.TypeInt, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_pipeline_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_pipeline_job.go new file mode 100644 index 00000000000..d12a7975c5d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_pipeline_job.go @@ -0,0 +1,1148 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package healthcare + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceHealthcarePipelineJob() *schema.Resource { + return &schema.Resource{ + Create: resourceHealthcarePipelineJobCreate, + Read: resourceHealthcarePipelineJobRead, + Update: resourceHealthcarePipelineJobUpdate, + Delete: resourceHealthcarePipelineJobDelete, + + Importer: &schema.ResourceImporter{ + State: resourceHealthcarePipelineJobImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "dataset": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Healthcare Dataset under which the Pipeline Job is to run`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Location where the Pipeline Job is to run`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `Specifies the name of the pipeline job. This field is user-assigned.`, + }, + "backfill_pipeline_job": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies the backfill configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mapping_pipeline_job": { + Type: schema.TypeString, + Optional: true, + Description: `Specifies the mapping pipeline job to backfill, the name format +should follow: projects/{projectId}/locations/{locationId}/datasets/{datasetId}/pipelineJobs/{pipelineJobId}.`, + }, + }, + }, + ConflictsWith: []string{"mapping_pipeline_job", "reconciliation_pipeline_job"}, + }, + "disable_lineage": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, disables writing lineage for the pipeline.`, + Default: false, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-supplied key-value pairs used to organize Pipeline Jobs. +Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of +maximum 128 bytes, and must conform to the following PCRE regular expression: +[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} +Label values are optional, must be between 1 and 63 characters long, have a +UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE +regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} +No more than 64 labels can be associated with a given pipeline. +An object containing a list of "key": value pairs. +Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "mapping_pipeline_job": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies mapping configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mapping_config": { + Type: schema.TypeList, + Required: true, + Description: `The location of the mapping configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Describes the mapping configuration.`, + }, + "whistle_config_source": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies the path to the mapping configuration for harmonization pipeline.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "import_uri_prefix": { + Type: schema.TypeString, + Required: true, + Description: `Directory path where all the Whistle files are located. +Example: gs://{bucket-id}/{path/to/import-root/dir}`, + }, + "uri": { + Type: schema.TypeString, + Required: true, + Description: `Main configuration file which has the entrypoint or the root function. +Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.`, + }, + }, + }, + }, + }, + }, + }, + "fhir_store_destination": { + Type: schema.TypeString, + Optional: true, + Description: `If set, the mapping pipeline will write snapshots to this +FHIR store without assigning stable IDs. You must +grant your pipeline project's Cloud Healthcare Service +Agent serviceaccount healthcare.fhirResources.executeBundle +and healthcare.fhirResources.create permissions on the +destination store. The destination store must set +[disableReferentialIntegrity][FhirStore.disable_referential_integrity] +to true. The destination store must use FHIR version R4. +Format: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{fhirStoreID}.`, + ConflictsWith: []string{}, + }, + "fhir_streaming_source": { + Type: schema.TypeList, + Optional: true, + Description: `A streaming FHIR data source.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fhir_store": { + Type: schema.TypeString, + Required: true, + Description: `The path to the FHIR store in the format projects/{projectId}/locations/{locationId}/datasets/{datasetId}/fhirStores/{fhirStoreId}.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Describes the streaming FHIR data source.`, + }, + }, + }, + }, + "reconciliation_destination": { + Type: schema.TypeBool, + Optional: true, + Description: `If set to true, a mapping pipeline will send output snapshots +to the reconciliation pipeline in its dataset. A reconciliation +pipeline must exist in this dataset before a mapping pipeline +with a reconciliation destination can be created.`, + ConflictsWith: []string{}, + }, + }, + }, + ConflictsWith: []string{"reconciliation_pipeline_job", "backfill_pipeline_job"}, + }, + "reconciliation_pipeline_job": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies reconciliation configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "matching_uri_prefix": { + Type: schema.TypeString, + Required: true, + Description: `Specifies the top level directory of the matching configs used +in all mapping pipelines, which extract properties for resources +to be matched on. +Example: gs://{bucket-id}/{path/to/matching/configs}`, + }, + "merge_config": { + Type: schema.TypeList, + Required: true, + Description: `Specifies the location of the reconciliation configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "whistle_config_source": { + Type: schema.TypeList, + Required: true, + Description: `Specifies the path to the mapping configuration for harmonization pipeline.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "import_uri_prefix": { + Type: schema.TypeString, + Required: true, + Description: `Directory path where all the Whistle files are located. +Example: gs://{bucket-id}/{path/to/import-root/dir}`, + }, + "uri": { + Type: schema.TypeString, + Required: true, + Description: `Main configuration file which has the entrypoint or the root function. +Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Describes the mapping configuration.`, + }, + }, + }, + }, + "fhir_store_destination": { + Type: schema.TypeString, + Optional: true, + Description: `The harmonized FHIR store to write harmonized FHIR resources to, +in the format of: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{id}`, + }, + }, + }, + ConflictsWith: []string{"mapping_pipeline_job", "backfill_pipeline_job"}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The fully qualified name of this dataset`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + UseJSONNumber: true, + } +} + +func resourceHealthcarePipelineJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandHealthcarePipelineJobName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + disableLineageProp, err := expandHealthcarePipelineJobDisableLineage(d.Get("disable_lineage"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disable_lineage"); !tpgresource.IsEmptyValue(reflect.ValueOf(disableLineageProp)) && (ok || !reflect.DeepEqual(v, disableLineageProp)) { + obj["disableLineage"] = disableLineageProp + } + mappingPipelineJobProp, err := expandHealthcarePipelineJobMappingPipelineJob(d.Get("mapping_pipeline_job"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mapping_pipeline_job"); !tpgresource.IsEmptyValue(reflect.ValueOf(mappingPipelineJobProp)) && (ok || !reflect.DeepEqual(v, mappingPipelineJobProp)) { + obj["mappingPipelineJob"] = mappingPipelineJobProp + } + reconciliationPipelineJobProp, err := expandHealthcarePipelineJobReconciliationPipelineJob(d.Get("reconciliation_pipeline_job"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reconciliation_pipeline_job"); !tpgresource.IsEmptyValue(reflect.ValueOf(reconciliationPipelineJobProp)) && (ok || !reflect.DeepEqual(v, reconciliationPipelineJobProp)) { + obj["reconciliationPipelineJob"] = reconciliationPipelineJobProp + } + backfillPipelineJobProp, err := expandHealthcarePipelineJobBackfillPipelineJob(d.Get("backfill_pipeline_job"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backfill_pipeline_job"); !tpgresource.IsEmptyValue(reflect.ValueOf(backfillPipelineJobProp)) && (ok || !reflect.DeepEqual(v, backfillPipelineJobProp)) { + obj["backfillPipelineJob"] = backfillPipelineJobProp + } + labelsProp, err := expandHealthcarePipelineJobEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/pipelineJobs?pipelineJobId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new PipelineJob: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating PipelineJob: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{dataset}}/pipelineJobs/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating PipelineJob %q: %#v", d.Id(), res) + + return resourceHealthcarePipelineJobRead(d, meta) +} + +func resourceHealthcarePipelineJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/pipelineJobs/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("HealthcarePipelineJob %q", d.Id())) + } + + res, err = resourceHealthcarePipelineJobDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing HealthcarePipelineJob because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenHealthcarePipelineJobName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading PipelineJob: %s", err) + } + if err := d.Set("disable_lineage", flattenHealthcarePipelineJobDisableLineage(res["disableLineage"], d, config)); err != nil { + return fmt.Errorf("Error reading PipelineJob: %s", err) + } + if err := d.Set("labels", flattenHealthcarePipelineJobLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading PipelineJob: %s", err) + } + if err := d.Set("mapping_pipeline_job", flattenHealthcarePipelineJobMappingPipelineJob(res["mappingPipelineJob"], d, config)); err != nil { + return fmt.Errorf("Error reading PipelineJob: %s", err) + } + if err := d.Set("reconciliation_pipeline_job", flattenHealthcarePipelineJobReconciliationPipelineJob(res["reconciliationPipelineJob"], d, config)); err != nil { + return fmt.Errorf("Error reading PipelineJob: %s", err) + } + if err := d.Set("backfill_pipeline_job", flattenHealthcarePipelineJobBackfillPipelineJob(res["backfillPipelineJob"], d, config)); err != nil { + return fmt.Errorf("Error reading PipelineJob: %s", err) + } + if err := d.Set("terraform_labels", flattenHealthcarePipelineJobTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading PipelineJob: %s", err) + } + if err := d.Set("effective_labels", flattenHealthcarePipelineJobEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading PipelineJob: %s", err) + } + + return nil +} + +func resourceHealthcarePipelineJobUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + nameProp, err := expandHealthcarePipelineJobName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + disableLineageProp, err := expandHealthcarePipelineJobDisableLineage(d.Get("disable_lineage"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disable_lineage"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disableLineageProp)) { + obj["disableLineage"] = disableLineageProp + } + mappingPipelineJobProp, err := expandHealthcarePipelineJobMappingPipelineJob(d.Get("mapping_pipeline_job"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mapping_pipeline_job"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mappingPipelineJobProp)) { + obj["mappingPipelineJob"] = mappingPipelineJobProp + } + reconciliationPipelineJobProp, err := expandHealthcarePipelineJobReconciliationPipelineJob(d.Get("reconciliation_pipeline_job"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reconciliation_pipeline_job"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, reconciliationPipelineJobProp)) { + obj["reconciliationPipelineJob"] = reconciliationPipelineJobProp + } + backfillPipelineJobProp, err := expandHealthcarePipelineJobBackfillPipelineJob(d.Get("backfill_pipeline_job"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backfill_pipeline_job"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backfillPipelineJobProp)) { + obj["backfillPipelineJob"] = backfillPipelineJobProp + } + labelsProp, err := expandHealthcarePipelineJobEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/pipelineJobs/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating PipelineJob %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("name") { + updateMask = append(updateMask, "name") + } + + if d.HasChange("disable_lineage") { + updateMask = append(updateMask, "disableLineage") + } + + if d.HasChange("mapping_pipeline_job") { + updateMask = append(updateMask, "mappingPipelineJob") + } + + if d.HasChange("reconciliation_pipeline_job") { + updateMask = append(updateMask, "reconciliationPipelineJob") + } + + if d.HasChange("backfill_pipeline_job") { + updateMask = append(updateMask, "backfillPipelineJob") + } + + if d.HasChange("effective_labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating PipelineJob %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating PipelineJob %q: %#v", d.Id(), res) + } + + } + + return resourceHealthcarePipelineJobRead(d, meta) +} + +func resourceHealthcarePipelineJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/pipelineJobs/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting PipelineJob %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "PipelineJob") + } + + log.Printf("[DEBUG] Finished deleting PipelineJob %q: %#v", d.Id(), res) + return nil +} + +func resourceHealthcarePipelineJobImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^(?P.+)/pipelineJobs/(?P[^/]+)$", + "^(?P[^/]+)/pipelineJobs?pipelineJobId=(?P[^/]+)$", + "^(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{dataset}}/pipelineJobs/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenHealthcarePipelineJobName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobDisableLineage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenHealthcarePipelineJobMappingPipelineJob(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["mapping_config"] = + flattenHealthcarePipelineJobMappingPipelineJobMappingConfig(original["mappingConfig"], d, config) + transformed["fhir_streaming_source"] = + flattenHealthcarePipelineJobMappingPipelineJobFhirStreamingSource(original["fhirStreamingSource"], d, config) + transformed["fhir_store_destination"] = + flattenHealthcarePipelineJobMappingPipelineJobFhirStoreDestination(original["fhirStoreDestination"], d, config) + transformed["reconciliation_destination"] = + flattenHealthcarePipelineJobMappingPipelineJobReconciliationDestination(original["reconciliationDestination"], d, config) + return []interface{}{transformed} +} +func flattenHealthcarePipelineJobMappingPipelineJobMappingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["description"] = + flattenHealthcarePipelineJobMappingPipelineJobMappingConfigDescription(original["description"], d, config) + transformed["whistle_config_source"] = + flattenHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSource(original["whistleConfigSource"], d, config) + return []interface{}{transformed} +} +func flattenHealthcarePipelineJobMappingPipelineJobMappingConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceUri(original["uri"], d, config) + transformed["import_uri_prefix"] = + flattenHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceImportUriPrefix(original["importUriPrefix"], d, config) + return []interface{}{transformed} +} +func flattenHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceImportUriPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobMappingPipelineJobFhirStreamingSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["fhir_store"] = + flattenHealthcarePipelineJobMappingPipelineJobFhirStreamingSourceFhirStore(original["fhirStore"], d, config) + transformed["description"] = + flattenHealthcarePipelineJobMappingPipelineJobFhirStreamingSourceDescription(original["description"], d, config) + return []interface{}{transformed} +} +func flattenHealthcarePipelineJobMappingPipelineJobFhirStreamingSourceFhirStore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobMappingPipelineJobFhirStreamingSourceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobMappingPipelineJobFhirStoreDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobMappingPipelineJobReconciliationDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobReconciliationPipelineJob(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["merge_config"] = + flattenHealthcarePipelineJobReconciliationPipelineJobMergeConfig(original["mergeConfig"], d, config) + transformed["matching_uri_prefix"] = + flattenHealthcarePipelineJobReconciliationPipelineJobMatchingUriPrefix(original["matchingUriPrefix"], d, config) + transformed["fhir_store_destination"] = + flattenHealthcarePipelineJobReconciliationPipelineJobFhirStoreDestination(original["fhirStoreDestination"], d, config) + return []interface{}{transformed} +} +func flattenHealthcarePipelineJobReconciliationPipelineJobMergeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["description"] = + flattenHealthcarePipelineJobReconciliationPipelineJobMergeConfigDescription(original["description"], d, config) + transformed["whistle_config_source"] = + flattenHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSource(original["whistleConfigSource"], d, config) + return []interface{}{transformed} +} +func flattenHealthcarePipelineJobReconciliationPipelineJobMergeConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceUri(original["uri"], d, config) + transformed["import_uri_prefix"] = + flattenHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceImportUriPrefix(original["importUriPrefix"], d, config) + return []interface{}{transformed} +} +func flattenHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceImportUriPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobReconciliationPipelineJobMatchingUriPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobReconciliationPipelineJobFhirStoreDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobBackfillPipelineJob(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["mapping_pipeline_job"] = + flattenHealthcarePipelineJobBackfillPipelineJobMappingPipelineJob(original["mappingPipelineJob"], d, config) + return []interface{}{transformed} +} +func flattenHealthcarePipelineJobBackfillPipelineJobMappingPipelineJob(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcarePipelineJobTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenHealthcarePipelineJobEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandHealthcarePipelineJobName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobDisableLineage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobMappingPipelineJob(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMappingConfig, err := expandHealthcarePipelineJobMappingPipelineJobMappingConfig(original["mapping_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMappingConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mappingConfig"] = transformedMappingConfig + } + + transformedFhirStreamingSource, err := expandHealthcarePipelineJobMappingPipelineJobFhirStreamingSource(original["fhir_streaming_source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFhirStreamingSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fhirStreamingSource"] = transformedFhirStreamingSource + } + + transformedFhirStoreDestination, err := expandHealthcarePipelineJobMappingPipelineJobFhirStoreDestination(original["fhir_store_destination"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFhirStoreDestination); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fhirStoreDestination"] = transformedFhirStoreDestination + } + + transformedReconciliationDestination, err := expandHealthcarePipelineJobMappingPipelineJobReconciliationDestination(original["reconciliation_destination"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReconciliationDestination); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["reconciliationDestination"] = transformedReconciliationDestination + } + + return transformed, nil +} + +func expandHealthcarePipelineJobMappingPipelineJobMappingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDescription, err := expandHealthcarePipelineJobMappingPipelineJobMappingConfigDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedWhistleConfigSource, err := expandHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSource(original["whistle_config_source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWhistleConfigSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["whistleConfigSource"] = transformedWhistleConfigSource + } + + return transformed, nil +} + +func expandHealthcarePipelineJobMappingPipelineJobMappingConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedImportUriPrefix, err := expandHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceImportUriPrefix(original["import_uri_prefix"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImportUriPrefix); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["importUriPrefix"] = transformedImportUriPrefix + } + + return transformed, nil +} + +func expandHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceImportUriPrefix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobMappingPipelineJobFhirStreamingSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFhirStore, err := expandHealthcarePipelineJobMappingPipelineJobFhirStreamingSourceFhirStore(original["fhir_store"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFhirStore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fhirStore"] = transformedFhirStore + } + + transformedDescription, err := expandHealthcarePipelineJobMappingPipelineJobFhirStreamingSourceDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + return transformed, nil +} + +func expandHealthcarePipelineJobMappingPipelineJobFhirStreamingSourceFhirStore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobMappingPipelineJobFhirStreamingSourceDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobMappingPipelineJobFhirStoreDestination(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobMappingPipelineJobReconciliationDestination(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobReconciliationPipelineJob(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMergeConfig, err := expandHealthcarePipelineJobReconciliationPipelineJobMergeConfig(original["merge_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMergeConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mergeConfig"] = transformedMergeConfig + } + + transformedMatchingUriPrefix, err := expandHealthcarePipelineJobReconciliationPipelineJobMatchingUriPrefix(original["matching_uri_prefix"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMatchingUriPrefix); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["matchingUriPrefix"] = transformedMatchingUriPrefix + } + + transformedFhirStoreDestination, err := expandHealthcarePipelineJobReconciliationPipelineJobFhirStoreDestination(original["fhir_store_destination"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFhirStoreDestination); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fhirStoreDestination"] = transformedFhirStoreDestination + } + + return transformed, nil +} + +func expandHealthcarePipelineJobReconciliationPipelineJobMergeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDescription, err := expandHealthcarePipelineJobReconciliationPipelineJobMergeConfigDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedWhistleConfigSource, err := expandHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSource(original["whistle_config_source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWhistleConfigSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["whistleConfigSource"] = transformedWhistleConfigSource + } + + return transformed, nil +} + +func expandHealthcarePipelineJobReconciliationPipelineJobMergeConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedImportUriPrefix, err := expandHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceImportUriPrefix(original["import_uri_prefix"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImportUriPrefix); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["importUriPrefix"] = transformedImportUriPrefix + } + + return transformed, nil +} + +func expandHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceImportUriPrefix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobReconciliationPipelineJobMatchingUriPrefix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobReconciliationPipelineJobFhirStoreDestination(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobBackfillPipelineJob(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMappingPipelineJob, err := expandHealthcarePipelineJobBackfillPipelineJobMappingPipelineJob(original["mapping_pipeline_job"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMappingPipelineJob); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mappingPipelineJob"] = transformedMappingPipelineJob + } + + return transformed, nil +} + +func expandHealthcarePipelineJobBackfillPipelineJobMappingPipelineJob(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcarePipelineJobEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func resourceHealthcarePipelineJobDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // Take the returned long form of the name and use it as `self_link`. + // Then modify the name to be the user specified form. + // We can't just ignore_read on `name` as the linter will + // complain that the returned `res` is never used afterwards. + // Some field needs to be actually set, and we chose `name`. + if err := d.Set("self_link", res["name"].(string)); err != nil { + return nil, fmt.Errorf("Error setting self_link: %s", err) + } + res["name"] = d.Get("name").(string) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_workspace.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_workspace.go new file mode 100644 index 00000000000..955941a5029 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_workspace.go @@ -0,0 +1,464 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package healthcare + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceHealthcareWorkspace() *schema.Resource { + return &schema.Resource{ + Create: resourceHealthcareWorkspaceCreate, + Read: resourceHealthcareWorkspaceRead, + Update: resourceHealthcareWorkspaceUpdate, + Delete: resourceHealthcareWorkspaceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceHealthcareWorkspaceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "dataset": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Identifies the dataset addressed by this request. Must be in the format +'projects/{project}/locations/{location}/datasets/{dataset}'`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the workspace, in the format 'projects/{projectId}/locations/{location}/datasets/{datasetId}/dataMapperWorkspaces/{workspaceId}'`, + }, + "settings": { + Type: schema.TypeList, + Required: true, + Description: `Settings associated with this workspace.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_project_ids": { + Type: schema.TypeList, + Required: true, + Description: `Project IDs for data projects hosted in a workspace.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The user labels. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" } + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + UseJSONNumber: true, + } +} + +func resourceHealthcareWorkspaceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandHealthcareWorkspaceName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + settingsProp, err := expandHealthcareWorkspaceSettings(d.Get("settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(settingsProp)) && (ok || !reflect.DeepEqual(v, settingsProp)) { + obj["settings"] = settingsProp + } + labelsProp, err := expandHealthcareWorkspaceEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dataMapperWorkspaces?workspaceId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Workspace: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Workspace: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{dataset}}/dataMapperWorkspaces/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Workspace %q: %#v", d.Id(), res) + + return resourceHealthcareWorkspaceRead(d, meta) +} + +func resourceHealthcareWorkspaceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dataMapperWorkspaces/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("HealthcareWorkspace %q", d.Id())) + } + + if err := d.Set("name", flattenHealthcareWorkspaceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Workspace: %s", err) + } + if err := d.Set("settings", flattenHealthcareWorkspaceSettings(res["settings"], d, config)); err != nil { + return fmt.Errorf("Error reading Workspace: %s", err) + } + if err := d.Set("labels", flattenHealthcareWorkspaceLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Workspace: %s", err) + } + if err := d.Set("terraform_labels", flattenHealthcareWorkspaceTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Workspace: %s", err) + } + if err := d.Set("effective_labels", flattenHealthcareWorkspaceEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Workspace: %s", err) + } + + return nil +} + +func resourceHealthcareWorkspaceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + settingsProp, err := expandHealthcareWorkspaceSettings(d.Get("settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, settingsProp)) { + obj["settings"] = settingsProp + } + labelsProp, err := expandHealthcareWorkspaceEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dataMapperWorkspaces/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Workspace %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("settings") { + updateMask = append(updateMask, "settings") + } + + if d.HasChange("effective_labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating Workspace %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Workspace %q: %#v", d.Id(), res) + } + + } + + return resourceHealthcareWorkspaceRead(d, meta) +} + +func resourceHealthcareWorkspaceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dataMapperWorkspaces/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting Workspace %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Workspace") + } + + log.Printf("[DEBUG] Finished deleting Workspace %q: %#v", d.Id(), res) + return nil +} + +func resourceHealthcareWorkspaceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^(?P.+)/dataMapperWorkspaces/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{dataset}}/dataMapperWorkspaces/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenHealthcareWorkspaceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenHealthcareWorkspaceSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["data_project_ids"] = + flattenHealthcareWorkspaceSettingsDataProjectIds(original["dataProjectIds"], d, config) + return []interface{}{transformed} +} +func flattenHealthcareWorkspaceSettingsDataProjectIds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareWorkspaceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenHealthcareWorkspaceTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenHealthcareWorkspaceEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandHealthcareWorkspaceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareWorkspaceSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDataProjectIds, err := expandHealthcareWorkspaceSettingsDataProjectIds(original["data_project_ids"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataProjectIds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataProjectIds"] = transformedDataProjectIds + } + + return transformed, nil +} + +func expandHealthcareWorkspaceSettingsDataProjectIds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareWorkspaceEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_provider.go index c1fcde21c1a..f6612b9d6ce 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_provider.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_provider.go @@ -202,7 +202,7 @@ For OIDC providers, the following rules apply: }, }, }, - ExactlyOneOf: []string{"aws", "oidc", "saml"}, + ExactlyOneOf: []string{"aws", "oidc", "saml", "x509"}, }, "description": { Type: schema.TypeString, @@ -280,7 +280,7 @@ the following fields: }, }, }, - ExactlyOneOf: []string{"aws", "oidc", "saml"}, + ExactlyOneOf: []string{"aws", "oidc", "saml", "x509"}, }, "saml": { Type: schema.TypeList, @@ -296,7 +296,67 @@ the following fields: }, }, }, - ExactlyOneOf: []string{"aws", "oidc", "saml"}, + ExactlyOneOf: []string{"aws", "oidc", "saml", "x509"}, + }, + "x509": { + Type: schema.TypeList, + Optional: true, + Description: `An X.509-type identity provider represents a CA. It is trusted to assert a +client identity if the client has a certificate that chains up to this CA.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "trust_store": { + Type: schema.TypeList, + Required: true, + Description: `A Trust store, use this trust store as a wrapper to config the trust +anchor and optional intermediate cas to help build the trust chain for +the incoming end entity certificate. Follow the x509 guidelines to +define those PEM encoded certs. Only 1 trust store is currently +supported.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "trust_anchors": { + Type: schema.TypeList, + Required: true, + Description: `List of Trust Anchors to be used while performing validation +against a given TrustStore. The incoming end entity's certificate +must be chained up to one of the trust anchors here.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pem_certificate": { + Type: schema.TypeString, + Optional: true, + Description: `PEM certificate of the PKI used for validation. Must only contain one +ca certificate(either root or intermediate cert).`, + }, + }, + }, + }, + "intermediate_cas": { + Type: schema.TypeList, + Optional: true, + Description: `Set of intermediate CA certificates used for building the trust chain to +trust anchor. +IMPORTANT: Intermediate CAs are only supported when configuring x509 federation.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pem_certificate": { + Type: schema.TypeString, + Optional: true, + Description: `PEM certificate of the PKI used for validation. Must only contain one +ca certificate(either root or intermediate cert).`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"aws", "oidc", "saml", "x509"}, }, "name": { Type: schema.TypeString, @@ -382,6 +442,12 @@ func resourceIAMBetaWorkloadIdentityPoolProviderCreate(d *schema.ResourceData, m } else if v, ok := d.GetOkExists("saml"); !tpgresource.IsEmptyValue(reflect.ValueOf(samlProp)) && (ok || !reflect.DeepEqual(v, samlProp)) { obj["saml"] = samlProp } + x509Prop, err := expandIAMBetaWorkloadIdentityPoolProviderX509(d.Get("x509"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("x509"); !tpgresource.IsEmptyValue(reflect.ValueOf(x509Prop)) && (ok || !reflect.DeepEqual(v, x509Prop)) { + obj["x509"] = x509Prop + } url, err := tpgresource.ReplaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers?workloadIdentityPoolProviderId={{workload_identity_pool_provider_id}}") if err != nil { @@ -523,6 +589,9 @@ func resourceIAMBetaWorkloadIdentityPoolProviderRead(d *schema.ResourceData, met if err := d.Set("saml", flattenIAMBetaWorkloadIdentityPoolProviderSaml(res["saml"], d, config)); err != nil { return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) } + if err := d.Set("x509", flattenIAMBetaWorkloadIdentityPoolProviderX509(res["x509"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) + } return nil } @@ -591,6 +660,12 @@ func resourceIAMBetaWorkloadIdentityPoolProviderUpdate(d *schema.ResourceData, m } else if v, ok := d.GetOkExists("saml"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, samlProp)) { obj["saml"] = samlProp } + x509Prop, err := expandIAMBetaWorkloadIdentityPoolProviderX509(d.Get("x509"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("x509"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, x509Prop)) { + obj["x509"] = x509Prop + } url, err := tpgresource.ReplaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}") if err != nil { @@ -634,6 +709,10 @@ func resourceIAMBetaWorkloadIdentityPoolProviderUpdate(d *schema.ResourceData, m if d.HasChange("saml") { updateMask = append(updateMask, "saml") } + + if d.HasChange("x509") { + updateMask = append(updateMask, "x509") + } // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -844,6 +923,78 @@ func flattenIAMBetaWorkloadIdentityPoolProviderSamlIdpMetadataXml(v interface{}, return v } +func flattenIAMBetaWorkloadIdentityPoolProviderX509(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["trust_store"] = + flattenIAMBetaWorkloadIdentityPoolProviderX509TrustStore(original["trustStore"], d, config) + return []interface{}{transformed} +} +func flattenIAMBetaWorkloadIdentityPoolProviderX509TrustStore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["trust_anchors"] = + flattenIAMBetaWorkloadIdentityPoolProviderX509TrustStoreTrustAnchors(original["trustAnchors"], d, config) + transformed["intermediate_cas"] = + flattenIAMBetaWorkloadIdentityPoolProviderX509TrustStoreIntermediateCas(original["intermediateCas"], d, config) + return []interface{}{transformed} +} +func flattenIAMBetaWorkloadIdentityPoolProviderX509TrustStoreTrustAnchors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "pem_certificate": flattenIAMBetaWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorsPemCertificate(original["pemCertificate"], d, config), + }) + } + return transformed +} +func flattenIAMBetaWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorsPemCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolProviderX509TrustStoreIntermediateCas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "pem_certificate": flattenIAMBetaWorkloadIdentityPoolProviderX509TrustStoreIntermediateCasPemCertificate(original["pemCertificate"], d, config), + }) + } + return transformed +} +func flattenIAMBetaWorkloadIdentityPoolProviderX509TrustStoreIntermediateCasPemCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func expandIAMBetaWorkloadIdentityPoolProviderDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -962,6 +1113,103 @@ func expandIAMBetaWorkloadIdentityPoolProviderSamlIdpMetadataXml(v interface{}, return v, nil } +func expandIAMBetaWorkloadIdentityPoolProviderX509(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTrustStore, err := expandIAMBetaWorkloadIdentityPoolProviderX509TrustStore(original["trust_store"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTrustStore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["trustStore"] = transformedTrustStore + } + + return transformed, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderX509TrustStore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTrustAnchors, err := expandIAMBetaWorkloadIdentityPoolProviderX509TrustStoreTrustAnchors(original["trust_anchors"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTrustAnchors); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["trustAnchors"] = transformedTrustAnchors + } + + transformedIntermediateCas, err := expandIAMBetaWorkloadIdentityPoolProviderX509TrustStoreIntermediateCas(original["intermediate_cas"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntermediateCas); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["intermediateCas"] = transformedIntermediateCas + } + + return transformed, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderX509TrustStoreTrustAnchors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPemCertificate, err := expandIAMBetaWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorsPemCertificate(original["pem_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPemCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pemCertificate"] = transformedPemCertificate + } + + req = append(req, transformed) + } + return req, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorsPemCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderX509TrustStoreIntermediateCas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPemCertificate, err := expandIAMBetaWorkloadIdentityPoolProviderX509TrustStoreIntermediateCasPemCertificate(original["pem_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPemCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pemCertificate"] = transformedPemCertificate + } + + req = append(req, transformed) + } + return req, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderX509TrustStoreIntermediateCasPemCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func resourceIAMBetaWorkloadIdentityPoolProviderDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { if v := res["state"]; v == "DELETED" { return nil, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_client.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_client.go index b521e33ee1a..0b5dde2ae5d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_client.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_client.go @@ -53,7 +53,7 @@ func ResourceIapClient() *schema.Resource { ForceNew: true, Description: `Identifier of the brand to which this client is attached to. The format is -'projects/{project_number}/brands/{brand_id}/identityAwareProxyClients/{client_id}'.`, +'projects/{project_number}/brands/{brand_id}'.`, }, "display_name": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_settings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_settings.go new file mode 100644 index 00000000000..38e9624bf82 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_settings.go @@ -0,0 +1,1367 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iap + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceIapSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceIapSettingsCreate, + Read: resourceIapSettingsRead, + Update: resourceIapSettingsUpdate, + Delete: resourceIapSettingsDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIapSettingsImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the IAP protected resource. Name can have below resources: +* organizations/{organization_id} +* folders/{folder_id} +* projects/{projects_id} +* projects/{projects_id}/iap_web +* projects/{projects_id}/iap_web/compute +* projects/{projects_id}/iap_web/compute-{region} +* projects/{projects_id}/iap_web/compute/service/{service_id} +* projects/{projects_id}/iap_web/compute-{region}/service/{service_id} +* projects/{projects_id}/iap_web/appengine-{app_id} +* projects/{projects_id}/iap_web/appengine-{app_id}/service/{service_id} +* projects/{projects_id}/iap_web/appengine-{app_id}/service/{service_id}/version/{version_id}`, + }, + "access_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Top level wrapper for all access related setting in IAP.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_domains_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings to configure and enable allowed domains.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domains": { + Type: schema.TypeList, + Optional: true, + Description: `List of trusted domains.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "enable": { + Type: schema.TypeBool, + Optional: true, + Description: `Configuration for customers to opt in for the feature.`, + }, + }, + }, + }, + "cors_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration to allow cross-origin requests via IAP.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_http_options": { + Type: schema.TypeBool, + Optional: true, + Description: `Configuration to allow HTTP OPTIONS calls to skip authorization. +If undefined, IAP will not apply any special logic to OPTIONS requests.`, + }, + }, + }, + }, + "gcip_settings": { + Type: schema.TypeList, + Optional: true, + Description: `GCIP claims and endpoint configurations for 3p identity providers.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "login_page_uri": { + Type: schema.TypeString, + Optional: true, + Description: `Login page URI associated with the GCIP tenants. Typically, all resources within +the same project share the same login page, though it could be overridden at the +sub resource level.`, + }, + "tenant_ids": { + Type: schema.TypeList, + Optional: true, + Description: `GCIP tenant ids that are linked to the IAP resource. tenantIds could be a string +beginning with a number character to indicate authenticating with GCIP tenant flow, +or in the format of _ to indicate authenticating with GCIP agent flow. If agent flow +is used, tenantIds should only contain one single element, while for tenant flow, +tenantIds can contain multiple elements.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "identity_sources": { + Type: schema.TypeList, + Optional: true, + Description: `Identity sources that IAP can use to authenticate the end user. Only one identity source +can be configured. The possible values are: + +* 'WORKFORCE_IDENTITY_FEDERATION': Use external identities set up on Google Cloud Workforce + Identity Federation. Possible values: ["WORKFORCE_IDENTITY_FEDERATION"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"WORKFORCE_IDENTITY_FEDERATION"}), + }, + }, + "oauth_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings to configure IAP's OAuth behavior.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "login_hint": { + Type: schema.TypeString, + Optional: true, + Description: `Domain hint to send as hd=? parameter in OAuth request flow. +Enables redirect to primary IDP by skipping Google's login screen. +(https://developers.google.com/identity/protocols/OpenIDConnect#hd-param) +Note: IAP does not verify that the id token's hd claim matches this value +since access behavior is managed by IAM policies.`, + }, + "programmatic_clients": { + Type: schema.TypeList, + Optional: true, + Description: `List of client ids allowed to use IAP programmatically.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "reauth_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings to configure reauthentication policies in IAP.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_age": { + Type: schema.TypeString, + Required: true, + Description: `Reauth session lifetime, how long before a user has to reauthenticate again. +A duration in seconds with up to nine fractional digits, ending with 's'. +Example: "3.5s".`, + }, + "method": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"LOGIN", "SECURE_KEY", "ENROLLED_SECOND_FACTORS"}), + Description: `Reauth method requested. The possible values are: + +* 'LOGIN': Prompts the user to log in again. +* 'SECURE_KEY': User must use their secure key 2nd factor device. +* 'ENROLLED_SECOND_FACTORS': User can use any enabled 2nd factor. Possible values: ["LOGIN", "SECURE_KEY", "ENROLLED_SECOND_FACTORS"]`, + }, + "policy_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"MINIMUM", "DEFAULT"}), + Description: `How IAP determines the effective policy in cases of hierarchical policies. +Policies are merged from higher in the hierarchy to lower in the hierarchy. +The possible values are: + +* 'MINIMUM': This policy acts as a minimum to other policies, lower in the hierarchy. + Effective policy may only be the same or stricter. +* 'DEFAULT': This policy acts as a default if no other reauth policy is set. Possible values: ["MINIMUM", "DEFAULT"]`, + }, + }, + }, + }, + "workforce_identity_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings to configure the workforce identity federation, including workforce pools +and OAuth 2.0 settings.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oauth2": { + Type: schema.TypeList, + Optional: true, + Description: `OAuth 2.0 settings for IAP to perform OIDC flow with workforce identity +federation services.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Optional: true, + Description: `The OAuth 2.0 client ID registered in the workforce identity +federation OAuth 2.0 Server.`, + }, + "client_secret": { + Type: schema.TypeString, + Optional: true, + Description: `Input only. The OAuth 2.0 client secret created while registering +the client ID.`, + Sensitive: true, + }, + "client_secret_sha256": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. SHA256 hash value for the client secret. This field +is returned by IAP when the settings are retrieved.`, + }, + }, + }, + }, + "workforce_pools": { + Type: schema.TypeList, + Optional: true, + Description: `The workforce pool resources. Only one workforce pool is accepted.`, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "application_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Top level wrapper for all application related settings in IAP.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_denied_page_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Customization for Access Denied page. IAP allows customers to define a custom URI +to use as the error page when access is denied to users. If IAP prevents access +to this page, the default IAP error page will be displayed instead.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_denied_page_uri": { + Type: schema.TypeString, + Optional: true, + Description: `The URI to be redirected to when access is denied.`, + }, + "generate_troubleshooting_uri": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether to generate a troubleshooting URL on access denied events to this application.`, + }, + "remediation_token_generation_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether to generate remediation token on access denied events to this application.`, + }, + }, + }, + }, + "attribute_propagation_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings to configure attribute propagation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the provided attribute propagation settings should be evaluated on user requests. +If set to true, attributes returned from the expression will be propagated in the set output credentials.`, + }, + "expression": { + Type: schema.TypeString, + Optional: true, + Description: `Raw string CEL expression. Must return a list of attributes. A maximum of 45 attributes can +be selected. Expressions can select different attribute types from attributes: +attributes.saml_attributes, attributes.iap_attributes.`, + }, + "output_credentials": { + Type: schema.TypeList, + Optional: true, + Description: `Which output credentials attributes selected by the CEL expression should be propagated in. +All attributes will be fully duplicated in each selected output credential. +Possible values are: + +* 'HEADER': Propagate attributes in the headers with "x-goog-iap-attr-" prefix. +* 'JWT': Propagate attributes in the JWT of the form: + "additional_claims": { "my_attribute": ["value1", "value2"] } +* 'RCTOKEN': Propagate attributes in the RCToken of the form: " + additional_claims": { "my_attribute": ["value1", "value2"] } Possible values: ["HEADER", "JWT", "RCTOKEN"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"HEADER", "JWT", "RCTOKEN"}), + }, + }, + }, + }, + }, + "cookie_domain": { + Type: schema.TypeString, + Optional: true, + Description: `The Domain value to set for cookies generated by IAP. This value is not validated by the API, +but will be ignored at runtime if invalid.`, + }, + "csm_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings to configure IAP's behavior for a service mesh.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rctoken_aud": { + Type: schema.TypeString, + Optional: true, + Description: `Audience claim set in the generated RCToken. This value is not validated by IAP.`, + }, + }, + }, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIapSettingsCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandIapSettingsName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + accessSettingsProp, err := expandIapSettingsAccessSettings(d.Get("access_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("access_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(accessSettingsProp)) && (ok || !reflect.DeepEqual(v, accessSettingsProp)) { + obj["accessSettings"] = accessSettingsProp + } + applicationSettingsProp, err := expandIapSettingsApplicationSettings(d.Get("application_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("application_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(applicationSettingsProp)) && (ok || !reflect.DeepEqual(v, applicationSettingsProp)) { + obj["applicationSettings"] = applicationSettingsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IapBasePath}}{{name}}:iapSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Settings: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Settings: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}/iapSettings") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Settings %q: %#v", d.Id(), res) + + return resourceIapSettingsRead(d, meta) +} + +func resourceIapSettingsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IapBasePath}}{{name}}:iapSettings") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IapSettings %q", d.Id())) + } + + if err := d.Set("access_settings", flattenIapSettingsAccessSettings(res["accessSettings"], d, config)); err != nil { + return fmt.Errorf("Error reading Settings: %s", err) + } + if err := d.Set("application_settings", flattenIapSettingsApplicationSettings(res["applicationSettings"], d, config)); err != nil { + return fmt.Errorf("Error reading Settings: %s", err) + } + + return nil +} + +func resourceIapSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + accessSettingsProp, err := expandIapSettingsAccessSettings(d.Get("access_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("access_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, accessSettingsProp)) { + obj["accessSettings"] = accessSettingsProp + } + applicationSettingsProp, err := expandIapSettingsApplicationSettings(d.Get("application_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("application_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, applicationSettingsProp)) { + obj["applicationSettings"] = applicationSettingsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IapBasePath}}{{name}}:iapSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Settings %q: %#v", d.Id(), obj) + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating Settings %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Settings %q: %#v", d.Id(), res) + } + + return resourceIapSettingsRead(d, meta) +} + +func resourceIapSettingsDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IapBasePath}}{{name}}:iapSettings") + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Settings: %s", err) + } + + headers := make(http.Header) + + obj := make(map[string]interface{}) + + log.Printf("[DEBUG] Updating Settings %q: %#v", d.Id(), obj) + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating Settings %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Settings %q: %#v", d.Id(), res) + } + + return nil + +} + +func resourceIapSettingsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^(?P.+)/iapSettings$", + "^(?P.+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{name}}/iapSettings") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil + +} + +func flattenIapSettingsAccessSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["gcip_settings"] = + flattenIapSettingsAccessSettingsGcipSettings(original["gcipSettings"], d, config) + transformed["cors_settings"] = + flattenIapSettingsAccessSettingsCorsSettings(original["corsSettings"], d, config) + transformed["oauth_settings"] = + flattenIapSettingsAccessSettingsOauthSettings(original["oauthSettings"], d, config) + transformed["reauth_settings"] = + flattenIapSettingsAccessSettingsReauthSettings(original["reauthSettings"], d, config) + transformed["allowed_domains_settings"] = + flattenIapSettingsAccessSettingsAllowedDomainsSettings(original["allowedDomainsSettings"], d, config) + transformed["workforce_identity_settings"] = + flattenIapSettingsAccessSettingsWorkforceIdentitySettings(original["workforceIdentitySettings"], d, config) + transformed["identity_sources"] = + flattenIapSettingsAccessSettingsIdentitySources(original["identitySources"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsAccessSettingsGcipSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["tenant_ids"] = + flattenIapSettingsAccessSettingsGcipSettingsTenantIds(original["tenantIds"], d, config) + transformed["login_page_uri"] = + flattenIapSettingsAccessSettingsGcipSettingsLoginPageUri(original["loginPageUri"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsAccessSettingsGcipSettingsTenantIds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsGcipSettingsLoginPageUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsCorsSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["allow_http_options"] = + flattenIapSettingsAccessSettingsCorsSettingsAllowHttpOptions(original["allowHttpOptions"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsAccessSettingsCorsSettingsAllowHttpOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsOauthSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["login_hint"] = + flattenIapSettingsAccessSettingsOauthSettingsLoginHint(original["loginHint"], d, config) + transformed["programmatic_clients"] = + flattenIapSettingsAccessSettingsOauthSettingsProgrammaticClients(original["programmaticClients"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsAccessSettingsOauthSettingsLoginHint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsOauthSettingsProgrammaticClients(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsReauthSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["method"] = + flattenIapSettingsAccessSettingsReauthSettingsMethod(original["method"], d, config) + transformed["max_age"] = + flattenIapSettingsAccessSettingsReauthSettingsMaxAge(original["maxAge"], d, config) + transformed["policy_type"] = + flattenIapSettingsAccessSettingsReauthSettingsPolicyType(original["policyType"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsAccessSettingsReauthSettingsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsReauthSettingsMaxAge(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsReauthSettingsPolicyType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsAllowedDomainsSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["domains"] = + flattenIapSettingsAccessSettingsAllowedDomainsSettingsDomains(original["domains"], d, config) + transformed["enable"] = + flattenIapSettingsAccessSettingsAllowedDomainsSettingsEnable(original["enable"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsAccessSettingsAllowedDomainsSettingsDomains(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsAllowedDomainsSettingsEnable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsWorkforceIdentitySettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["workforce_pools"] = + flattenIapSettingsAccessSettingsWorkforceIdentitySettingsWorkforcePools(original["workforcePools"], d, config) + transformed["oauth2"] = + flattenIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2(original["oauth2"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsAccessSettingsWorkforceIdentitySettingsWorkforcePools(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["client_id"] = + flattenIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientId(original["clientId"], d, config) + transformed["client_secret"] = + flattenIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientSecret(original["clientSecret"], d, config) + transformed["client_secret_sha256"] = + flattenIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientSecretSha256(original["clientSecretSha256"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("access_settings.0.workforce_identity_settings.0.oauth2.0.client_secret") +} + +func flattenIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientSecretSha256(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsAccessSettingsIdentitySources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsApplicationSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["csm_settings"] = + flattenIapSettingsApplicationSettingsCsmSettings(original["csmSettings"], d, config) + transformed["access_denied_page_settings"] = + flattenIapSettingsApplicationSettingsAccessDeniedPageSettings(original["accessDeniedPageSettings"], d, config) + transformed["cookie_domain"] = + flattenIapSettingsApplicationSettingsCookieDomain(original["cookieDomain"], d, config) + transformed["attribute_propagation_settings"] = + flattenIapSettingsApplicationSettingsAttributePropagationSettings(original["attributePropagationSettings"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsApplicationSettingsCsmSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["rctoken_aud"] = + flattenIapSettingsApplicationSettingsCsmSettingsRctokenAud(original["rctokenAud"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsApplicationSettingsCsmSettingsRctokenAud(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsApplicationSettingsAccessDeniedPageSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["access_denied_page_uri"] = + flattenIapSettingsApplicationSettingsAccessDeniedPageSettingsAccessDeniedPageUri(original["accessDeniedPageUri"], d, config) + transformed["generate_troubleshooting_uri"] = + flattenIapSettingsApplicationSettingsAccessDeniedPageSettingsGenerateTroubleshootingUri(original["generateTroubleshootingUri"], d, config) + transformed["remediation_token_generation_enabled"] = + flattenIapSettingsApplicationSettingsAccessDeniedPageSettingsRemediationTokenGenerationEnabled(original["remediationTokenGenerationEnabled"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsApplicationSettingsAccessDeniedPageSettingsAccessDeniedPageUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsApplicationSettingsAccessDeniedPageSettingsGenerateTroubleshootingUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsApplicationSettingsAccessDeniedPageSettingsRemediationTokenGenerationEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsApplicationSettingsCookieDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsApplicationSettingsAttributePropagationSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["output_credentials"] = + flattenIapSettingsApplicationSettingsAttributePropagationSettingsOutputCredentials(original["outputCredentials"], d, config) + transformed["expression"] = + flattenIapSettingsApplicationSettingsAttributePropagationSettingsExpression(original["expression"], d, config) + transformed["enable"] = + flattenIapSettingsApplicationSettingsAttributePropagationSettingsEnable(original["enable"], d, config) + return []interface{}{transformed} +} +func flattenIapSettingsApplicationSettingsAttributePropagationSettingsOutputCredentials(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsApplicationSettingsAttributePropagationSettingsExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapSettingsApplicationSettingsAttributePropagationSettingsEnable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIapSettingsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGcipSettings, err := expandIapSettingsAccessSettingsGcipSettings(original["gcip_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGcipSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gcipSettings"] = transformedGcipSettings + } + + transformedCorsSettings, err := expandIapSettingsAccessSettingsCorsSettings(original["cors_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCorsSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["corsSettings"] = transformedCorsSettings + } + + transformedOauthSettings, err := expandIapSettingsAccessSettingsOauthSettings(original["oauth_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOauthSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oauthSettings"] = transformedOauthSettings + } + + transformedReauthSettings, err := expandIapSettingsAccessSettingsReauthSettings(original["reauth_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReauthSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["reauthSettings"] = transformedReauthSettings + } + + transformedAllowedDomainsSettings, err := expandIapSettingsAccessSettingsAllowedDomainsSettings(original["allowed_domains_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedDomainsSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedDomainsSettings"] = transformedAllowedDomainsSettings + } + + transformedWorkforceIdentitySettings, err := expandIapSettingsAccessSettingsWorkforceIdentitySettings(original["workforce_identity_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWorkforceIdentitySettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["workforceIdentitySettings"] = transformedWorkforceIdentitySettings + } + + transformedIdentitySources, err := expandIapSettingsAccessSettingsIdentitySources(original["identity_sources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentitySources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identitySources"] = transformedIdentitySources + } + + return transformed, nil +} + +func expandIapSettingsAccessSettingsGcipSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTenantIds, err := expandIapSettingsAccessSettingsGcipSettingsTenantIds(original["tenant_ids"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTenantIds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tenantIds"] = transformedTenantIds + } + + transformedLoginPageUri, err := expandIapSettingsAccessSettingsGcipSettingsLoginPageUri(original["login_page_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLoginPageUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["loginPageUri"] = transformedLoginPageUri + } + + return transformed, nil +} + +func expandIapSettingsAccessSettingsGcipSettingsTenantIds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsGcipSettingsLoginPageUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsCorsSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAllowHttpOptions, err := expandIapSettingsAccessSettingsCorsSettingsAllowHttpOptions(original["allow_http_options"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowHttpOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowHttpOptions"] = transformedAllowHttpOptions + } + + return transformed, nil +} + +func expandIapSettingsAccessSettingsCorsSettingsAllowHttpOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsOauthSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLoginHint, err := expandIapSettingsAccessSettingsOauthSettingsLoginHint(original["login_hint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLoginHint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["loginHint"] = transformedLoginHint + } + + transformedProgrammaticClients, err := expandIapSettingsAccessSettingsOauthSettingsProgrammaticClients(original["programmatic_clients"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProgrammaticClients); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["programmaticClients"] = transformedProgrammaticClients + } + + return transformed, nil +} + +func expandIapSettingsAccessSettingsOauthSettingsLoginHint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsOauthSettingsProgrammaticClients(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsReauthSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMethod, err := expandIapSettingsAccessSettingsReauthSettingsMethod(original["method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["method"] = transformedMethod + } + + transformedMaxAge, err := expandIapSettingsAccessSettingsReauthSettingsMaxAge(original["max_age"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxAge"] = transformedMaxAge + } + + transformedPolicyType, err := expandIapSettingsAccessSettingsReauthSettingsPolicyType(original["policy_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPolicyType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["policyType"] = transformedPolicyType + } + + return transformed, nil +} + +func expandIapSettingsAccessSettingsReauthSettingsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsReauthSettingsMaxAge(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsReauthSettingsPolicyType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsAllowedDomainsSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDomains, err := expandIapSettingsAccessSettingsAllowedDomainsSettingsDomains(original["domains"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDomains); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["domains"] = transformedDomains + } + + transformedEnable, err := expandIapSettingsAccessSettingsAllowedDomainsSettingsEnable(original["enable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enable"] = transformedEnable + } + + return transformed, nil +} + +func expandIapSettingsAccessSettingsAllowedDomainsSettingsDomains(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsAllowedDomainsSettingsEnable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsWorkforceIdentitySettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWorkforcePools, err := expandIapSettingsAccessSettingsWorkforceIdentitySettingsWorkforcePools(original["workforce_pools"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWorkforcePools); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["workforcePools"] = transformedWorkforcePools + } + + transformedOauth2, err := expandIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2(original["oauth2"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOauth2); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oauth2"] = transformedOauth2 + } + + return transformed, nil +} + +func expandIapSettingsAccessSettingsWorkforceIdentitySettingsWorkforcePools(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedClientId, err := expandIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientId(original["client_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientId"] = transformedClientId + } + + transformedClientSecret, err := expandIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientSecret(original["client_secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientSecret"] = transformedClientSecret + } + + transformedClientSecretSha256, err := expandIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientSecretSha256(original["client_secret_sha256"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientSecretSha256); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientSecretSha256"] = transformedClientSecretSha256 + } + + return transformed, nil +} + +func expandIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsWorkforceIdentitySettingsOauth2ClientSecretSha256(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsAccessSettingsIdentitySources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsApplicationSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCsmSettings, err := expandIapSettingsApplicationSettingsCsmSettings(original["csm_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCsmSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["csmSettings"] = transformedCsmSettings + } + + transformedAccessDeniedPageSettings, err := expandIapSettingsApplicationSettingsAccessDeniedPageSettings(original["access_denied_page_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAccessDeniedPageSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accessDeniedPageSettings"] = transformedAccessDeniedPageSettings + } + + transformedCookieDomain, err := expandIapSettingsApplicationSettingsCookieDomain(original["cookie_domain"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCookieDomain); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cookieDomain"] = transformedCookieDomain + } + + transformedAttributePropagationSettings, err := expandIapSettingsApplicationSettingsAttributePropagationSettings(original["attribute_propagation_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAttributePropagationSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["attributePropagationSettings"] = transformedAttributePropagationSettings + } + + return transformed, nil +} + +func expandIapSettingsApplicationSettingsCsmSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRctokenAud, err := expandIapSettingsApplicationSettingsCsmSettingsRctokenAud(original["rctoken_aud"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRctokenAud); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rctokenAud"] = transformedRctokenAud + } + + return transformed, nil +} + +func expandIapSettingsApplicationSettingsCsmSettingsRctokenAud(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsApplicationSettingsAccessDeniedPageSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAccessDeniedPageUri, err := expandIapSettingsApplicationSettingsAccessDeniedPageSettingsAccessDeniedPageUri(original["access_denied_page_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAccessDeniedPageUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accessDeniedPageUri"] = transformedAccessDeniedPageUri + } + + transformedGenerateTroubleshootingUri, err := expandIapSettingsApplicationSettingsAccessDeniedPageSettingsGenerateTroubleshootingUri(original["generate_troubleshooting_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGenerateTroubleshootingUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generateTroubleshootingUri"] = transformedGenerateTroubleshootingUri + } + + transformedRemediationTokenGenerationEnabled, err := expandIapSettingsApplicationSettingsAccessDeniedPageSettingsRemediationTokenGenerationEnabled(original["remediation_token_generation_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRemediationTokenGenerationEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["remediationTokenGenerationEnabled"] = transformedRemediationTokenGenerationEnabled + } + + return transformed, nil +} + +func expandIapSettingsApplicationSettingsAccessDeniedPageSettingsAccessDeniedPageUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsApplicationSettingsAccessDeniedPageSettingsGenerateTroubleshootingUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsApplicationSettingsAccessDeniedPageSettingsRemediationTokenGenerationEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsApplicationSettingsCookieDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsApplicationSettingsAttributePropagationSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOutputCredentials, err := expandIapSettingsApplicationSettingsAttributePropagationSettingsOutputCredentials(original["output_credentials"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOutputCredentials); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["outputCredentials"] = transformedOutputCredentials + } + + transformedExpression, err := expandIapSettingsApplicationSettingsAttributePropagationSettingsExpression(original["expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expression"] = transformedExpression + } + + transformedEnable, err := expandIapSettingsApplicationSettingsAttributePropagationSettingsEnable(original["enable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enable"] = transformedEnable + } + + return transformed, nil +} + +func expandIapSettingsApplicationSettingsAttributePropagationSettingsOutputCredentials(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsApplicationSettingsAttributePropagationSettingsExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapSettingsApplicationSettingsAttributePropagationSettingsEnable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config.go deleted file mode 100644 index f33f61bb41f..00000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config.go +++ /dev/null @@ -1,810 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package identityplatform - -import ( - "fmt" - "log" - "net/http" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" -) - -func ResourceIdentityPlatformProjectDefaultConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityPlatformProjectDefaultConfigCreate, - Read: resourceIdentityPlatformProjectDefaultConfigRead, - Update: resourceIdentityPlatformProjectDefaultConfigUpdate, - Delete: resourceIdentityPlatformProjectDefaultConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIdentityPlatformProjectDefaultConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: customdiff.All( - tpgresource.DefaultProviderProject, - ), - - DeprecationMessage: "`google_identity_platform_config` is deprecated and will be removed in the next major release of the provider. Use the `google_identity_platform_config` resource instead. It contains a more comprehensive list of fields, and was created before `google_identity_platform_project_default_config` was added.", - - Schema: map[string]*schema.Schema{ - "sign_in": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration related to local sign in methods.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allow_duplicate_emails": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether to allow more than one account to have the same email.`, - }, - "anonymous": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration options related to authenticating an anonymous user.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - Description: `Whether anonymous user auth is enabled for the project or not.`, - }, - }, - }, - }, - "email": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration options related to authenticating a user by their email address.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether email auth is enabled for the project or not.`, - }, - "password_required": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether a password is required for email auth or not. If true, both an email and -password must be provided to sign in. If false, a user may sign in via either -email/password or email link.`, - }, - }, - }, - }, - "phone_number": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration options related to authenticated a user by their phone number.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether phone number auth is enabled for the project or not.`, - }, - "test_phone_numbers": { - Type: schema.TypeMap, - Optional: true, - Description: `A map of that can be used for phone auth testing.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "hash_config": { - Type: schema.TypeList, - Computed: true, - Description: `Output only. Hash config information.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "algorithm": { - Type: schema.TypeString, - Computed: true, - Description: `Different password hash algorithms used in Identity Toolkit.`, - }, - "memory_cost": { - Type: schema.TypeInt, - Computed: true, - Description: `Memory cost for hash calculation. Used by scrypt and other similar password derivation algorithms. See https://tools.ietf.org/html/rfc7914 for explanation of field.`, - }, - "rounds": { - Type: schema.TypeInt, - Computed: true, - Description: `How many rounds for hash calculation. Used by scrypt and other similar password derivation algorithms.`, - }, - "salt_separator": { - Type: schema.TypeString, - Computed: true, - Description: `Non-printable character to be inserted between the salt and plain text password in base64.`, - }, - "signer_key": { - Type: schema.TypeString, - Computed: true, - Description: `Signer key in base64.`, - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the Config resource. Example: "projects/my-awesome-project/config"`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformProjectDefaultConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - signInProp, err := expandIdentityPlatformProjectDefaultConfigSignIn(d.Get("sign_in"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sign_in"); !tpgresource.IsEmptyValue(reflect.ValueOf(signInProp)) && (ok || !reflect.DeepEqual(v, signInProp)) { - obj["signIn"] = signInProp - } - - url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ProjectDefaultConfig: %#v", obj) - billingProject := "" - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := tpgresource.GetBillingProject(d, config); err == nil { - billingProject = bp - } - - headers := make(http.Header) - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "PATCH", - Project: billingProject, - RawURL: url, - UserAgent: userAgent, - Body: obj, - Timeout: d.Timeout(schema.TimeoutCreate), - Headers: headers, - }) - if err != nil { - return fmt.Errorf("Error creating ProjectDefaultConfig: %s", err) - } - if err := d.Set("name", flattenIdentityPlatformProjectDefaultConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := tpgresource.ReplaceVars(d, config, "{{project}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating ProjectDefaultConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformProjectDefaultConfigRead(d, meta) -} - -func resourceIdentityPlatformProjectDefaultConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") - if err != nil { - return err - } - - billingProject := "" - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := tpgresource.GetBillingProject(d, config); err == nil { - billingProject = bp - } - - headers := make(http.Header) - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: billingProject, - RawURL: url, - UserAgent: userAgent, - Headers: headers, - }) - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformProjectDefaultConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading ProjectDefaultConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformProjectDefaultConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectDefaultConfig: %s", err) - } - if err := d.Set("sign_in", flattenIdentityPlatformProjectDefaultConfigSignIn(res["signIn"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectDefaultConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformProjectDefaultConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - signInProp, err := expandIdentityPlatformProjectDefaultConfigSignIn(d.Get("sign_in"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sign_in"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, signInProp)) { - obj["signIn"] = signInProp - } - - url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating ProjectDefaultConfig %q: %#v", d.Id(), obj) - headers := make(http.Header) - updateMask := []string{} - - if d.HasChange("sign_in") { - updateMask = append(updateMask, "signIn") - } - // updateMask is a URL parameter but not present in the schema, so ReplaceVars - // won't set it - url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := tpgresource.GetBillingProject(d, config); err == nil { - billingProject = bp - } - - // if updateMask is empty we are not updating anything so skip the post - if len(updateMask) > 0 { - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "PATCH", - Project: billingProject, - RawURL: url, - UserAgent: userAgent, - Body: obj, - Timeout: d.Timeout(schema.TimeoutUpdate), - Headers: headers, - }) - - if err != nil { - return fmt.Errorf("Error updating ProjectDefaultConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ProjectDefaultConfig %q: %#v", d.Id(), res) - } - - } - - return resourceIdentityPlatformProjectDefaultConfigRead(d, meta) -} - -func resourceIdentityPlatformProjectDefaultConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) - } - billingProject = project - - url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") - if err != nil { - return err - } - - var obj map[string]interface{} - - // err == nil indicates that the billing_project value was found - if bp, err := tpgresource.GetBillingProject(d, config); err == nil { - billingProject = bp - } - - headers := make(http.Header) - - log.Printf("[DEBUG] Deleting ProjectDefaultConfig %q", d.Id()) - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "PATCH", - Project: billingProject, - RawURL: url, - UserAgent: userAgent, - Body: obj, - Timeout: d.Timeout(schema.TimeoutDelete), - Headers: headers, - }) - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, "ProjectDefaultConfig") - } - - log.Printf("[DEBUG] Finished deleting ProjectDefaultConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformProjectDefaultConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "^projects/(?P[^/]+)/config/(?P[^/]+)$", - "^(?P[^/]+)/(?P[^/]+)$", - "^(?P[^/]+)$", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := tpgresource.ReplaceVars(d, config, "{{project}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformProjectDefaultConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignIn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["email"] = - flattenIdentityPlatformProjectDefaultConfigSignInEmail(original["email"], d, config) - transformed["phone_number"] = - flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumber(original["phoneNumber"], d, config) - transformed["anonymous"] = - flattenIdentityPlatformProjectDefaultConfigSignInAnonymous(original["anonymous"], d, config) - transformed["allow_duplicate_emails"] = - flattenIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(original["allowDuplicateEmails"], d, config) - transformed["hash_config"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfig(original["hashConfig"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformProjectDefaultConfigSignInEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenIdentityPlatformProjectDefaultConfigSignInEmailEnabled(original["enabled"], d, config) - transformed["password_required"] = - flattenIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(original["passwordRequired"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformProjectDefaultConfigSignInEmailEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(original["enabled"], d, config) - transformed["test_phone_numbers"] = - flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(original["testPhoneNumbers"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInAnonymous(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(original["enabled"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["algorithm"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(original["algorithm"], d, config) - transformed["signer_key"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(original["signerKey"], d, config) - transformed["salt_separator"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(original["saltSeparator"], d, config) - transformed["rounds"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(original["rounds"], d, config) - transformed["memory_cost"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(original["memoryCost"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandIdentityPlatformProjectDefaultConfigSignIn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEmail, err := expandIdentityPlatformProjectDefaultConfigSignInEmail(original["email"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["email"] = transformedEmail - } - - transformedPhoneNumber, err := expandIdentityPlatformProjectDefaultConfigSignInPhoneNumber(original["phone_number"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPhoneNumber); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["phoneNumber"] = transformedPhoneNumber - } - - transformedAnonymous, err := expandIdentityPlatformProjectDefaultConfigSignInAnonymous(original["anonymous"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAnonymous); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["anonymous"] = transformedAnonymous - } - - transformedAllowDuplicateEmails, err := expandIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(original["allow_duplicate_emails"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowDuplicateEmails); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["allowDuplicateEmails"] = transformedAllowDuplicateEmails - } - - transformedHashConfig, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfig(original["hash_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHashConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["hashConfig"] = transformedHashConfig - } - - return transformed, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandIdentityPlatformProjectDefaultConfigSignInEmailEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - transformedPasswordRequired, err := expandIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(original["password_required"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPasswordRequired); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["passwordRequired"] = transformedPasswordRequired - } - - return transformed, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInEmailEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInPhoneNumber(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - transformedTestPhoneNumbers, err := expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(original["test_phone_numbers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTestPhoneNumbers); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["testPhoneNumbers"] = transformedTestPhoneNumbers - } - - return transformed, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInAnonymous(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - return transformed, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAlgorithm, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(original["algorithm"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAlgorithm); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["algorithm"] = transformedAlgorithm - } - - transformedSignerKey, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(original["signer_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSignerKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["signerKey"] = transformedSignerKey - } - - transformedSaltSeparator, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(original["salt_separator"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSaltSeparator); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["saltSeparator"] = transformedSaltSeparator - } - - transformedRounds, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(original["rounds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRounds); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["rounds"] = transformedRounds - } - - transformedMemoryCost, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(original["memory_cost"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMemoryCost); val.IsValid() && !tpgresource.IsEmptyValue(val) { - transformed["memoryCost"] = transformedMemoryCost - } - - return transformed, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/integrationconnectors/resource_integration_connectors_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/integrationconnectors/resource_integration_connectors_connection.go index c2f9cf33168..d5b8343f223 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/integrationconnectors/resource_integration_connectors_connection.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/integrationconnectors/resource_integration_connectors_connection.go @@ -138,8 +138,8 @@ func ResourceIntegrationConnectorsConnection() *schema.Resource { "kms_key_name": { Type: schema.TypeString, Optional: true, - Description: `The [KMS key name] with which the content of the Operation is encrypted. The expected -format: projects/*/locations/*/keyRings/*/cryptoKeys/*. + Description: `The [KMS key name] with which the content of the Operation is encrypted. The +expected format: projects/*/locations/*/keyRings/*/cryptoKeys/*. Will be empty string if google managed.`, }, }, @@ -434,8 +434,8 @@ format as: projects/*/secrets/*/versions/*.`, "kms_key_name": { Type: schema.TypeString, Optional: true, - Description: `The [KMS key name] with which the content of the Operation is encrypted. The expected -format: projects/*/locations/*/keyRings/*/cryptoKeys/*. + Description: `The [KMS key name] with which the content of the Operation is encrypted. The +expected format: projects/*/locations/*/keyRings/*/cryptoKeys/*. Will be empty string if google managed.`, }, }, @@ -584,8 +584,8 @@ Will be empty string if google managed.`, "kms_key_name": { Type: schema.TypeString, Optional: true, - Description: `The [KMS key name] with which the content of the Operation is encrypted. The expected -format: projects/*/locations/*/keyRings/*/cryptoKeys/*. + Description: `The [KMS key name] with which the content of the Operation is encrypted. The +expected format: projects/*/locations/*/keyRings/*/cryptoKeys/*. Will be empty string if google managed.`, }, "type": { @@ -695,8 +695,8 @@ format as: projects/*/secrets/*/versions/*.`, "kms_key_name": { Type: schema.TypeString, Optional: true, - Description: `The [KMS key name] with which the content of the Operation is encrypted. The expected -format: projects/*/locations/*/keyRings/*/cryptoKeys/*. + Description: `The [KMS key name] with which the content of the Operation is encrypted. The +expected format: projects/*/locations/*/keyRings/*/cryptoKeys/*. Will be empty string if google managed.`, }, "type": { @@ -871,8 +871,8 @@ Please refer to the field 'effective_labels' for all of the labels present on th "kms_key_name": { Type: schema.TypeString, Optional: true, - Description: `The [KMS key name] with which the content of the Operation is encrypted. The expected -format: projects/*/locations/*/keyRings/*/cryptoKeys/*. + Description: `The [KMS key name] with which the content of the Operation is encrypted. The +expected format: projects/*/locations/*/keyRings/*/cryptoKeys/*. Will be empty string if google managed.`, }, "type": { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/integrations/resource_integrations_client.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/integrations/resource_integrations_client.go index 6c013ce7ea8..99aa38384d5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/integrations/resource_integrations_client.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/integrations/resource_integrations_client.go @@ -107,30 +107,12 @@ encrypted with GMEK.`, }, }, }, - ConflictsWith: []string{"provision_gmek"}, }, "create_sample_integrations": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Indicates if sample integrations should be created along with provisioning.`, - ConflictsWith: []string{"create_sample_workflows"}, - }, - "create_sample_workflows": { - Type: schema.TypeBool, - Optional: true, - Deprecated: "`create_sample_workflows` is deprecated and will be removed in a future major release. Use `create_sample_integrations` instead.", - ForceNew: true, - Description: `Indicates if sample workflow should be created along with provisioning.`, - ConflictsWith: []string{"create_sample_integrations"}, - }, - "provision_gmek": { - Type: schema.TypeBool, - Optional: true, - Deprecated: "`provision_gmek` is deprecated and will be removed in a future major release. Client would be provisioned as gmek if `cloud_kms_config` is not given.", - ForceNew: true, - Description: `Indicates provision with GMEK or CMEK.`, - ConflictsWith: []string{"cloud_kms_config"}, + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Indicates if sample integrations should be created along with provisioning.`, }, "run_as_service_account": { Type: schema.TypeString, @@ -163,24 +145,12 @@ func resourceIntegrationsClientCreate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("cloud_kms_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(cloudKmsConfigProp)) && (ok || !reflect.DeepEqual(v, cloudKmsConfigProp)) { obj["cloudKmsConfig"] = cloudKmsConfigProp } - createSampleWorkflowsProp, err := expandIntegrationsClientCreateSampleWorkflows(d.Get("create_sample_workflows"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("create_sample_workflows"); !tpgresource.IsEmptyValue(reflect.ValueOf(createSampleWorkflowsProp)) && (ok || !reflect.DeepEqual(v, createSampleWorkflowsProp)) { - obj["createSampleWorkflows"] = createSampleWorkflowsProp - } createSampleIntegrationsProp, err := expandIntegrationsClientCreateSampleIntegrations(d.Get("create_sample_integrations"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("create_sample_integrations"); !tpgresource.IsEmptyValue(reflect.ValueOf(createSampleIntegrationsProp)) && (ok || !reflect.DeepEqual(v, createSampleIntegrationsProp)) { obj["createSampleIntegrations"] = createSampleIntegrationsProp } - provisionGmekProp, err := expandIntegrationsClientProvisionGmek(d.Get("provision_gmek"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("provision_gmek"); !tpgresource.IsEmptyValue(reflect.ValueOf(provisionGmekProp)) && (ok || !reflect.DeepEqual(v, provisionGmekProp)) { - obj["provisionGmek"] = provisionGmekProp - } runAsServiceAccountProp, err := expandIntegrationsClientRunAsServiceAccount(d.Get("run_as_service_account"), d, config) if err != nil { return err @@ -445,18 +415,10 @@ func expandIntegrationsClientCloudKmsConfigKmsProjectId(v interface{}, d tpgreso return v, nil } -func expandIntegrationsClientCreateSampleWorkflows(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - func expandIntegrationsClientCreateSampleIntegrations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandIntegrationsClientProvisionGmek(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { - return v, nil -} - func expandIntegrationsClientRunAsServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key_latest_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key_latest_version.go new file mode 100644 index 00000000000..c3af8248682 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key_latest_version.go @@ -0,0 +1,181 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleKmsLatestCryptoKeyVersion() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleKmsLatestCryptoKeyVersionRead, + Schema: map[string]*schema.Schema{ + "crypto_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "version": { + Type: schema.TypeInt, + Computed: true, + }, + "algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "protection_level": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: ` + The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + + Example filter values if filtering on state. + + * "state:ENABLED" will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + + [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + `, + }, + "public_key": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "pem": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceGoogleKmsLatestCryptoKeyVersionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + cryptoKeyId, err := ParseKmsCryptoKeyId(d.Get("crypto_key").(string), config) + if err != nil { + return err + } + + id := fmt.Sprintf("%s/latestCryptoKeyVersion", cryptoKeyId.CryptoKeyId()) + if filter, ok := d.GetOk("filter"); ok { + id += "/filter=" + filter.(string) + } + d.SetId(id) + + versions, err := dataSourceKMSCryptoKeyVersionsList(d, meta, cryptoKeyId.CryptoKeyId(), userAgent) + if err != nil { + return err + } + + // grab latest version + lv := len(versions) - 1 + if lv < 0 { + return fmt.Errorf("No CryptoVersions found in crypto key %s", cryptoKeyId.CryptoKeyId()) + } + + latestVersion := versions[lv].(map[string]interface{}) + + // The google_kms_crypto_key resource and dataset set + // id as the value of name (projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{name}}) + // and set name is set as just {{name}}. + + if err := d.Set("name", flattenKmsCryptoKeyVersionName(latestVersion["name"], d)); err != nil { + return fmt.Errorf("Error setting LatestCryptoKeyVersion: %s", err) + } + if err := d.Set("version", flattenKmsCryptoKeyVersionVersion(latestVersion["name"], d)); err != nil { + return fmt.Errorf("Error setting CryptoKeyVersion: %s", err) + } + if err := d.Set("state", flattenKmsCryptoKeyVersionState(latestVersion["state"], d)); err != nil { + return fmt.Errorf("Error setting LatestCryptoKeyVersion: %s", err) + } + if err := d.Set("protection_level", flattenKmsCryptoKeyVersionProtectionLevel(latestVersion["protectionLevel"], d)); err != nil { + return fmt.Errorf("Error setting LatestCryptoKeyVersion: %s", err) + } + if err := d.Set("algorithm", flattenKmsCryptoKeyVersionAlgorithm(latestVersion["algorithm"], d)); err != nil { + return fmt.Errorf("Error setting LatestCryptoKeyVersion: %s", err) + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions/{{version}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Getting attributes for CryptoKeyVersion: %#v", url) + + url, err = tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Getting purpose of CryptoKey: %#v", url) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: cryptoKeyId.KeyRingId.Project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleDataSourceNotFoundError(err, d, fmt.Sprintf("KmsCryptoKey %q", d.Id()), url) + } + + if res["purpose"] == "ASYMMETRIC_SIGN" || res["purpose"] == "ASYMMETRIC_DECRYPT" { + url, err = tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions/{{version}}/publicKey") + if err != nil { + return err + } + log.Printf("[DEBUG] Getting public key of CryptoKeyVersion: %#v", url) + + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: cryptoKeyId.KeyRingId.Project, + RawURL: url, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCryptoKeyVersionsPendingGeneration}, + }) + + if err != nil { + log.Printf("Error generating public key: %s", err) + return err + } + + if err := d.Set("public_key", flattenKmsCryptoKeyVersionPublicKey(res, d)); err != nil { + return fmt.Errorf("Error setting CryptoKeyVersion public key: %s", err) + } + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key_versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key_versions.go new file mode 100644 index 00000000000..93732a3452f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key_versions.go @@ -0,0 +1,257 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms + +import ( + "fmt" + "log" + "net/http" + "regexp" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleKmsCryptoKeyVersions() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(DataSourceGoogleKmsCryptoKeyVersion().Schema) + + dsSchema["id"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + return &schema.Resource{ + Read: dataSourceGoogleKmsCryptoKeyVersionsRead, + Schema: map[string]*schema.Schema{ + "crypto_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "versions": { + Type: schema.TypeList, + Computed: true, + Description: "A list of all the retrieved cryptoKeyVersions from the provided crypto key", + Elem: &schema.Resource{ + Schema: dsSchema, + }, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: ` + The filter argument is used to add a filter query parameter that limits which cryptoKeyVersions are retrieved by the data source: ?filter={{filter}}. + Example values: + + * "name:my-cryptokey-version-" will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions/{{cryptoKeyVersion}}. + * "name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/1" will only retrieve a key with that exact name. + + [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + `, + }, + "public_key": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "pem": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceGoogleKmsCryptoKeyVersionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + cryptoKeyId, err := ParseKmsCryptoKeyId(d.Get("crypto_key").(string), config) + if err != nil { + return err + } + + id := fmt.Sprintf("%s/cryptoKeyVersions", cryptoKeyId.CryptoKeyId()) + if filter, ok := d.GetOk("filter"); ok { + id += "/filter=" + filter.(string) + } + d.SetId(id) + + log.Printf("[DEBUG] Searching for cryptoKeyVersions in crypto key %s", cryptoKeyId.CryptoKeyId()) + versions, err := dataSourceKMSCryptoKeyVersionsList(d, meta, cryptoKeyId.CryptoKeyId(), userAgent) + if err != nil { + return err + } + + log.Printf("[DEBUG] Found %d cryptoKeyVersions in crypto key %s", len(versions), cryptoKeyId.CryptoKeyId()) + value, err := flattenKMSCryptoKeyVersionsList(d, config, versions, cryptoKeyId.CryptoKeyId()) + if err != nil { + return fmt.Errorf("error flattening cryptoKeyVersions list: %s", err) + } + if err := d.Set("versions", value); err != nil { + return fmt.Errorf("error setting versions: %s", err) + } + + if len(value) == 0 { + return nil + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Getting purpose of CryptoKey: %#v", url) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: cryptoKeyId.KeyRingId.Project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleDataSourceNotFoundError(err, d, fmt.Sprintf("KmsCryptoKey %q", d.Id()), url) + } + + if res["purpose"] == "ASYMMETRIC_SIGN" || res["purpose"] == "ASYMMETRIC_DECRYPT" { + url, err = tpgresource.ReplaceVars(d, config, fmt.Sprintf("{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions/%d/publicKey", d.Get("versions.0.version"))) + if err != nil { + return err + } + log.Printf("[DEBUG] Getting public key of CryptoKeyVersion: %#v", url) + + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: cryptoKeyId.KeyRingId.Project, + RawURL: url, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCryptoKeyVersionsPendingGeneration}, + }) + + if err != nil { + log.Printf("Error generating public key: %s", err) + return err + } + + if err := d.Set("public_key", flattenKmsCryptoKeyVersionPublicKey(res, d)); err != nil { + return fmt.Errorf("Error setting CryptoKeyVersion public key: %s", err) + } + } + + return nil +} + +func dataSourceKMSCryptoKeyVersionsList(d *schema.ResourceData, meta interface{}, cryptoKeyId string, userAgent string) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions") + if err != nil { + return nil, err + } + + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // Always include the filter param, and optionally include the pageToken parameter for subsequent requests + var params = make(map[string]string, 0) + if filter, ok := d.GetOk("filter"); ok { + log.Printf("[DEBUG] Search for cryptoKeyVersions in crypto key %s is using filter ?filter=%s", cryptoKeyId, filter.(string)) + params["filter"] = filter.(string) + } + + cryptoKeyVersions := make([]interface{}, 0) + for { + // Depending on previous iterations, params might contain a pageToken param + url, err = transport_tpg.AddQueryParams(url, params) + if err != nil { + return nil, err + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + // ErrorRetryPredicates used to allow retrying if rate limits are hit when requesting multiple pages in a row + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429RetryableQuotaError}, + }) + if err != nil { + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("KMSCryptoKeyVersions %q", d.Id())) + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing KMSCryptoKeyVersion because it no longer exists.") + d.SetId("") + return nil, nil + } + + // Store info from this page + if v, ok := res["cryptoKeyVersions"].([]interface{}); ok { + cryptoKeyVersions = append(cryptoKeyVersions, v...) + } + + // Handle pagination for next loop, or break loop + v, ok := res["nextPageToken"] + if ok { + params["pageToken"] = v.(string) + } + if !ok { + break + } + } + + return cryptoKeyVersions, nil +} + +func flattenKMSCryptoKeyVersionsList(d *schema.ResourceData, meta interface{}, versionsList []interface{}, cryptoKeyId string) ([]interface{}, error) { + var versions []interface{} + for _, v := range versionsList { + version := v.(map[string]interface{}) + + data := map[string]interface{}{} + // The google_kms_crypto_key resource and dataset set + // id as the value of name (projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{name}}) + // and set name is set as just {{name}}. + data["id"] = version["name"] + data["name"] = flattenKmsCryptoKeyVersionName(version["name"], d) + data["crypto_key"] = cryptoKeyId + data["version"] = flattenKmsCryptoKeyVersionVersion(version["name"], d) + + data["state"] = flattenKmsCryptoKeyVersionState(version["state"], d) + data["protection_level"] = flattenKmsCryptoKeyVersionProtectionLevel(version["protectionLevel"], d) + data["algorithm"] = flattenKmsCryptoKeyVersionAlgorithm(version["algorithm"], d) + + versions = append(versions, data) + } + + return versions, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/iam_kms_ekm_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/iam_kms_ekm_connection.go new file mode 100644 index 00000000000..dc5c65e51a0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/iam_kms_ekm_connection.go @@ -0,0 +1,249 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package kms + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var KMSEkmConnectionIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type KMSEkmConnectionIamUpdater struct { + project string + location string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func KMSEkmConnectionIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/ekmConnections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &KMSEkmConnectionIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func KMSEkmConnectionIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/ekmConnections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &KMSEkmConnectionIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *KMSEkmConnectionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyEkmConnectionUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + url, err = transport_tpg.AddQueryParams(url, map[string]string{"options.requestedPolicyVersion": fmt.Sprintf("%d", tpgiamresource.IamPolicyVersion)}) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *KMSEkmConnectionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyEkmConnectionUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *KMSEkmConnectionIamUpdater) qualifyEkmConnectionUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{KMSBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/ekmConnections/%s", u.project, u.location, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *KMSEkmConnectionIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/ekmConnections/%s", u.project, u.location, u.name) +} + +func (u *KMSEkmConnectionIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-kms-ekmconnection-%s", u.GetResourceId()) +} + +func (u *KMSEkmConnectionIamUpdater) DescribeResource() string { + return fmt.Sprintf("kms ekmconnection %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_autokey_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_autokey_config_sweeper.go new file mode 100644 index 00000000000..c0cc643b9cb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_autokey_config_sweeper.go @@ -0,0 +1,126 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("KMSAutokeyConfig", testSweepKMSAutokeyConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepKMSAutokeyConfig(region string) error { + resourceName := "KMSAutokeyConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudkms.googleapis.com/v1/folders/{{folder}}/autokeyConfig", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + listUrl = strings.Replace(listUrl, "folders/folders/", "folders/", 1) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["autokeyConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudkms.googleapis.com/v1/folders/{{folder}}/autokeyConfig?updateMask=keyProject" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + deleteUrl = strings.Replace(deleteUrl, "folders/folders/", "folders/", 1) + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/iam_logging_log_view.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/iam_logging_log_view.go new file mode 100644 index 00000000000..90edb687443 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/iam_logging_log_view.go @@ -0,0 +1,246 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package logging + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var LoggingLogViewIamSchema = map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type LoggingLogViewIamUpdater struct { + parent string + location string + bucket string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func LoggingLogViewIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("parent"); ok { + values["parent"] = v.(string) + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("bucket"); ok { + values["bucket"] = v.(string) + } + + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"(?P.+)/locations/(?P[^/]+)/buckets/(?P[^/]+)/views/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &LoggingLogViewIamUpdater{ + parent: values["parent"], + location: values["location"], + bucket: values["bucket"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("parent", u.parent); err != nil { + return nil, fmt.Errorf("Error setting parent: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("bucket", u.bucket); err != nil { + return nil, fmt.Errorf("Error setting bucket: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func LoggingLogViewIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"(?P.+)/locations/(?P[^/]+)/buckets/(?P[^/]+)/views/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &LoggingLogViewIamUpdater{ + parent: values["parent"], + location: values["location"], + bucket: values["bucket"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *LoggingLogViewIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyLogViewUrl("getIamPolicy") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + obj = map[string]interface{}{ + "options": map[string]interface{}{ + "requestedPolicyVersion": tpgiamresource.IamPolicyVersion, + }, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *LoggingLogViewIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyLogViewUrl("setIamPolicy") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *LoggingLogViewIamUpdater) qualifyLogViewUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{LoggingBasePath}}%s:%s", fmt.Sprintf("%s/locations/%s/buckets/%s/views/%s", u.parent, u.location, u.bucket, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *LoggingLogViewIamUpdater) GetResourceId() string { + return fmt.Sprintf("%s/locations/%s/buckets/%s/views/%s", u.parent, u.location, u.bucket, u.name) +} + +func (u *LoggingLogViewIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-logging-logview-%s", u.GetResourceId()) +} + +func (u *LoggingLogViewIamUpdater) DescribeResource() string { + return fmt.Sprintf("logging logview %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_scope.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_scope.go new file mode 100644 index 00000000000..8445927ad31 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_scope.go @@ -0,0 +1,411 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package logging + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceLoggingLogScope() *schema.Resource { + return &schema.Resource{ + Create: resourceLoggingLogScopeCreate, + Read: resourceLoggingLogScopeRead, + Update: resourceLoggingLogScopeUpdate, + Delete: resourceLoggingLogScopeDelete, + + Importer: &schema.ResourceImporter{ + State: resourceLoggingLogScopeImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The resource name of the log scope. For example: \'projects/my-project/locations/global/logScopes/my-log-scope\'`, + }, + "resource_names": { + Type: schema.TypeList, + Required: true, + Description: `Names of one or more parent resources : * \'projects/[PROJECT_ID]\' May alternatively be one or more views : * \'projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]\' A log scope can include a maximum of 50 projects and a maximum of 100 resources in total.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Describes this log scopes.`, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The location of the resource. The only supported location is global so far.`, + }, + "parent": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The parent of the resource.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The creation timestamp of the log scopes.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The last update timestamp of the log scopes.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceLoggingLogScopeCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandLoggingLogScopeName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + resourceNamesProp, err := expandLoggingLogScopeResourceNames(d.Get("resource_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resource_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourceNamesProp)) && (ok || !reflect.DeepEqual(v, resourceNamesProp)) { + obj["resourceNames"] = resourceNamesProp + } + descriptionProp, err := expandLoggingLogScopeDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + obj, err = resourceLoggingLogScopeEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/logScopes?logScopeId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new LogScope: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating LogScope: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/logScopes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating LogScope %q: %#v", d.Id(), res) + + return resourceLoggingLogScopeRead(d, meta) +} + +func resourceLoggingLogScopeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/logScopes/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("LoggingLogScope %q", d.Id())) + } + + if err := d.Set("resource_names", flattenLoggingLogScopeResourceNames(res["resourceNames"], d, config)); err != nil { + return fmt.Errorf("Error reading LogScope: %s", err) + } + if err := d.Set("description", flattenLoggingLogScopeDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading LogScope: %s", err) + } + if err := d.Set("create_time", flattenLoggingLogScopeCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading LogScope: %s", err) + } + if err := d.Set("update_time", flattenLoggingLogScopeUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading LogScope: %s", err) + } + + return nil +} + +func resourceLoggingLogScopeUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + resourceNamesProp, err := expandLoggingLogScopeResourceNames(d.Get("resource_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resource_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, resourceNamesProp)) { + obj["resourceNames"] = resourceNamesProp + } + descriptionProp, err := expandLoggingLogScopeDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + obj, err = resourceLoggingLogScopeEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/logScopes/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating LogScope %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("resource_names") { + updateMask = append(updateMask, "resourceNames") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating LogScope %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating LogScope %q: %#v", d.Id(), res) + } + + } + + return resourceLoggingLogScopeRead(d, meta) +} + +func resourceLoggingLogScopeDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/logScopes/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting LogScope %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "LogScope") + } + + log.Printf("[DEBUG] Finished deleting LogScope %q: %#v", d.Id(), res) + return nil +} + +func resourceLoggingLogScopeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^(?P.+)/locations/(?P[^/]+)/logScopes/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/logScopes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenLoggingLogScopeResourceNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingLogScopeDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingLogScopeCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingLogScopeUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandLoggingLogScopeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingLogScopeResourceNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingLogScopeDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceLoggingLogScopeEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Extract any empty fields from the bucket field. + // Extract parent, location from name + parent := d.Get("parent").(string) + name := d.Get("name").(string) + parent, err := tpgresource.ExtractFieldByPattern("parent", parent, name, "((projects|folders|organizations|billingAccounts)/[a-z0-9A-Z-]*)/locations/.*") + if err != nil { + return nil, fmt.Errorf("error extracting parent field: %s", err) + } + location := d.Get("location").(string) + location, err = tpgresource.ExtractFieldByPattern("location", location, name, "[a-zA-Z]*/[a-z0-9A-Z-]*/locations/([a-z0-9-]*)/logScopes/.*") + if err != nil { + return nil, fmt.Errorf("error extracting location field: %s", err) + } + // Set parent to the extracted value. + d.Set("parent", parent) + // Set all the other fields to their short forms before forming url and setting ID. + name = tpgresource.GetResourceNameFromSelfLink(name) + d.Set("location", location) + d.Set("name", name) + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_scope_sweeper.go similarity index 89% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service_sweeper.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_scope_sweeper.go index d90dbf6a3ca..0e2cc402b52 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_scope_sweeper.go @@ -15,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package cloudrunv2 +package logging import ( "context" @@ -30,12 +30,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("CloudRunV2Service", testSweepCloudRunV2Service) + sweeper.AddTestSweepers("LoggingLogScope", testSweepLoggingLogScope) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepCloudRunV2Service(region string) error { - resourceName := "CloudRunV2Service" +func testSweepLoggingLogScope(region string) error { + resourceName := "LoggingLogScope" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) @@ -64,7 +64,7 @@ func testSweepCloudRunV2Service(region string) error { }, } - listTemplate := strings.Split("https://run.googleapis.com/v2/projects/{{project}}/locations/{{location}}/services", "?")[0] + listTemplate := strings.Split("https://logging.googleapis.com/v2/{{parent}}/locations/{{location}}/logScopes", "?")[0] listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) @@ -83,7 +83,7 @@ func testSweepCloudRunV2Service(region string) error { return nil } - resourceList, ok := res["services"] + resourceList, ok := res["logScopes"] if !ok { log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") return nil @@ -108,7 +108,7 @@ func testSweepCloudRunV2Service(region string) error { continue } - deleteTemplate := "https://run.googleapis.com/v2/projects/{{project}}/locations/{{location}}/services/{{name}}" + deleteTemplate := "https://logging.googleapis.com/v2/{{parent}}/locations/{{location}}/logScopes/{{name}}" deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_view.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_view.go index fd50ea2a5aa..59229e8465b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_view.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_view.go @@ -195,6 +195,11 @@ func resourceLoggingLogViewRead(d *schema.ResourceData, meta interface{}) error } headers := make(http.Header) + resourceLoggingLogViewEncoder(d, nil, nil) + url, err = tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}") + if err != nil { + return err + } res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "GET", diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_project_bucket_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_project_bucket_config.go index 2a4a79a12b0..8322268c6be 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_project_bucket_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_project_bucket_config.go @@ -189,7 +189,7 @@ func resourceLoggingProjectBucketConfigAcquireOrCreate(parentType string, iDFunc UserAgent: userAgent, }) if res == nil { - log.Printf("[DEGUG] Loggin Bucket not exist %s", id) + log.Printf("[DEBUG] Logging Bucket does not exist %s", id) // we need to pass the id in here because we don't want to set it in state // until we know there won't be any errors on create return resourceLoggingProjectBucketConfigCreate(d, meta, id) @@ -214,7 +214,11 @@ func resourceLoggingProjectBucketConfigCreate(d *schema.ResourceData, meta inter obj["description"] = d.Get("description") obj["locked"] = d.Get("locked") obj["retentionDays"] = d.Get("retention_days") - obj["analyticsEnabled"] = d.Get("enable_analytics") + // Only set analyticsEnabled if it has been explicitly preferenced. + analyticsRawValue := d.GetRawConfig().GetAttr("enable_analytics") + if !analyticsRawValue.IsNull() { + obj["analyticsEnabled"] = analyticsRawValue.True() + } obj["cmekSettings"] = expandCmekSettings(d.Get("cmek_settings")) obj["indexConfigs"] = expandIndexConfigs(d.Get("index_configs")) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_sink.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_sink.go index c08e5c705cc..6f6cd5c4afa 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_sink.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_sink.go @@ -65,9 +65,10 @@ func resourceLoggingSinkSchema() map[string]*schema.Schema { Description: `A description of this exclusion.`, }, "filter": { - Type: schema.TypeString, - Required: true, - Description: `An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries`, + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: OptionalSurroundingSpacesSuppress, + Description: `An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries`, }, "disabled": { Type: schema.TypeBool, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/resource_looker_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/resource_looker_instance.go index 904687e1eaa..67d216f5fdf 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/resource_looker_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/resource_looker_instance.go @@ -248,6 +248,11 @@ a year.`, }, }, }, + "fips_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `FIPS 140-2 Encryption enablement for Looker (Google Cloud Core).`, + }, "maintenance_window": { Type: schema.TypeList, Optional: true, @@ -349,6 +354,58 @@ disrupt service.`, Description: `Whether private IP is enabled on the Looker instance.`, Default: false, }, + "psc_config": { + Type: schema.TypeList, + Optional: true, + Description: `Information for Private Service Connect (PSC) setup for a Looker instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_vpcs": { + Type: schema.TypeList, + Optional: true, + Description: `List of VPCs that are allowed ingress into the Looker instance.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "service_attachments": { + Type: schema.TypeList, + Optional: true, + Description: `List of egress service attachment configurations.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_fqdn": { + Type: schema.TypeString, + Optional: true, + Description: `Fully qualified domain name that will be used in the private DNS record created for the service attachment.`, + }, + "target_service_attachment_uri": { + Type: schema.TypeString, + Optional: true, + Description: `URI of the service attachment to connect to.`, + }, + "connection_status": { + Type: schema.TypeString, + Computed: true, + Description: `Status of the service attachment connection.`, + }, + }, + }, + }, + "looker_service_attachment_uri": { + Type: schema.TypeString, + Computed: true, + Description: `URI of the Looker service attachment.`, + }, + }, + }, + }, + "psc_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether Public Service Connect (PSC) is enabled on the Looker instance`, + }, "public_ip_enabled": { Type: schema.TypeBool, Optional: true, @@ -439,6 +496,15 @@ accurate to nanoseconds.`, Description: `The time the instance was updated in RFC3339 UTC "Zulu" format, accurate to nanoseconds.`, }, + "deletion_policy": { + Type: schema.TypeString, + Optional: true, + Description: `Policy to determine if the cluster should be deleted forcefully. +If setting deletion_policy = "FORCE", the Looker instance will be deleted regardless +of its nested resources. If set to "DEFAULT", Looker instances that still have +nested resources will return an error. Possible values: DEFAULT, FORCE`, + Default: "DEFAULT", + }, "project": { Type: schema.TypeString, Optional: true, @@ -482,6 +548,12 @@ func resourceLookerInstanceCreate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionConfigProp)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { obj["encryptionConfig"] = encryptionConfigProp } + fipsEnabledProp, err := expandLookerInstanceFipsEnabled(d.Get("fips_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fips_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(fipsEnabledProp)) && (ok || !reflect.DeepEqual(v, fipsEnabledProp)) { + obj["fipsEnabled"] = fipsEnabledProp + } maintenanceWindowProp, err := expandLookerInstanceMaintenanceWindow(d.Get("maintenance_window"), d, config) if err != nil { return err @@ -506,6 +578,18 @@ func resourceLookerInstanceCreate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("private_ip_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(privateIpEnabledProp)) && (ok || !reflect.DeepEqual(v, privateIpEnabledProp)) { obj["privateIpEnabled"] = privateIpEnabledProp } + pscConfigProp, err := expandLookerInstancePscConfig(d.Get("psc_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("psc_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(pscConfigProp)) && (ok || !reflect.DeepEqual(v, pscConfigProp)) { + obj["pscConfig"] = pscConfigProp + } + pscEnabledProp, err := expandLookerInstancePscEnabled(d.Get("psc_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("psc_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(pscEnabledProp)) && (ok || !reflect.DeepEqual(v, pscEnabledProp)) { + obj["pscEnabled"] = pscEnabledProp + } publicIpEnabledProp, err := expandLookerInstancePublicIpEnabled(d.Get("public_ip_enabled"), d, config) if err != nil { return err @@ -637,6 +721,12 @@ func resourceLookerInstanceRead(d *schema.ResourceData, meta interface{}) error return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("LookerInstance %q", d.Id())) } + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("deletion_policy"); !ok { + if err := d.Set("deletion_policy", "DEFAULT"); err != nil { + return fmt.Errorf("Error setting deletion_policy: %s", err) + } + } if err := d.Set("project", project); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } @@ -659,6 +749,9 @@ func resourceLookerInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("encryption_config", flattenLookerInstanceEncryptionConfig(res["encryptionConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } + if err := d.Set("fips_enabled", flattenLookerInstanceFipsEnabled(res["fipsEnabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } if err := d.Set("ingress_private_ip", flattenLookerInstanceIngressPrivateIp(res["ingressPrivateIp"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } @@ -680,6 +773,12 @@ func resourceLookerInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("private_ip_enabled", flattenLookerInstancePrivateIpEnabled(res["privateIpEnabled"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } + if err := d.Set("psc_config", flattenLookerInstancePscConfig(res["pscConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("psc_enabled", flattenLookerInstancePscEnabled(res["pscEnabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } if err := d.Set("public_ip_enabled", flattenLookerInstancePublicIpEnabled(res["publicIpEnabled"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } @@ -739,6 +838,12 @@ func resourceLookerInstanceUpdate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { obj["encryptionConfig"] = encryptionConfigProp } + fipsEnabledProp, err := expandLookerInstanceFipsEnabled(d.Get("fips_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fips_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fipsEnabledProp)) { + obj["fipsEnabled"] = fipsEnabledProp + } maintenanceWindowProp, err := expandLookerInstanceMaintenanceWindow(d.Get("maintenance_window"), d, config) if err != nil { return err @@ -757,6 +862,18 @@ func resourceLookerInstanceUpdate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("private_ip_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, privateIpEnabledProp)) { obj["privateIpEnabled"] = privateIpEnabledProp } + pscConfigProp, err := expandLookerInstancePscConfig(d.Get("psc_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("psc_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pscConfigProp)) { + obj["pscConfig"] = pscConfigProp + } + pscEnabledProp, err := expandLookerInstancePscEnabled(d.Get("psc_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("psc_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pscEnabledProp)) { + obj["pscEnabled"] = pscEnabledProp + } publicIpEnabledProp, err := expandLookerInstancePublicIpEnabled(d.Get("public_ip_enabled"), d, config) if err != nil { return err @@ -807,6 +924,10 @@ func resourceLookerInstanceUpdate(d *schema.ResourceData, meta interface{}) erro updateMask = append(updateMask, "encryptionConfig") } + if d.HasChange("fips_enabled") { + updateMask = append(updateMask, "fipsEnabled") + } + if d.HasChange("maintenance_window") { updateMask = append(updateMask, "maintenanceWindow") } @@ -819,6 +940,15 @@ func resourceLookerInstanceUpdate(d *schema.ResourceData, meta interface{}) erro updateMask = append(updateMask, "privateIpEnabled") } + if d.HasChange("psc_config") { + updateMask = append(updateMask, "psc_config.allowed_vpcs", + "psc_config.service_attachments") + } + + if d.HasChange("psc_enabled") { + updateMask = append(updateMask, "pscEnabled") + } + if d.HasChange("public_ip_enabled") { updateMask = append(updateMask, "publicIpEnabled") } @@ -906,6 +1036,10 @@ func resourceLookerInstanceDelete(d *schema.ResourceData, meta interface{}) erro } headers := make(http.Header) + // Forcefully delete the Looker instance + if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "FORCE" { + url = url + "?force=True" + } log.Printf("[DEBUG] Deleting Instance %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ @@ -953,6 +1087,11 @@ func resourceLookerInstanceImport(d *schema.ResourceData, meta interface{}) ([]* } d.SetId(id) + // Explicitly set virtual fields to default values on import + if err := d.Set("deletion_policy", "DEFAULT"); err != nil { + return nil, fmt.Errorf("Error setting deletion_policy: %s", err) + } + return []*schema.ResourceData{d}, nil } @@ -1254,6 +1393,10 @@ func flattenLookerInstanceEncryptionConfigKmsKeyNameVersion(v interface{}, d *sc return v } +func flattenLookerInstanceFipsEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenLookerInstanceIngressPrivateIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1384,6 +1527,67 @@ func flattenLookerInstancePrivateIpEnabled(v interface{}, d *schema.ResourceData return v } +func flattenLookerInstancePscConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["allowed_vpcs"] = + flattenLookerInstancePscConfigAllowedVpcs(original["allowedVpcs"], d, config) + transformed["looker_service_attachment_uri"] = + flattenLookerInstancePscConfigLookerServiceAttachmentUri(original["lookerServiceAttachmentUri"], d, config) + transformed["service_attachments"] = + flattenLookerInstancePscConfigServiceAttachments(original["serviceAttachments"], d, config) + return []interface{}{transformed} +} +func flattenLookerInstancePscConfigAllowedVpcs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstancePscConfigLookerServiceAttachmentUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstancePscConfigServiceAttachments(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "connection_status": flattenLookerInstancePscConfigServiceAttachmentsConnectionStatus(original["connectionStatus"], d, config), + "local_fqdn": flattenLookerInstancePscConfigServiceAttachmentsLocalFqdn(original["localFqdn"], d, config), + "target_service_attachment_uri": flattenLookerInstancePscConfigServiceAttachmentsTargetServiceAttachmentUri(original["targetServiceAttachmentUri"], d, config), + }) + } + return transformed +} +func flattenLookerInstancePscConfigServiceAttachmentsConnectionStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstancePscConfigServiceAttachmentsLocalFqdn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstancePscConfigServiceAttachmentsTargetServiceAttachmentUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstancePscEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenLookerInstancePublicIpEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1738,6 +1942,10 @@ func expandLookerInstanceEncryptionConfigKmsKeyNameVersion(v interface{}, d tpgr return v, nil } +func expandLookerInstanceFipsEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandLookerInstanceMaintenanceWindow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -1866,6 +2074,99 @@ func expandLookerInstancePrivateIpEnabled(v interface{}, d tpgresource.Terraform return v, nil } +func expandLookerInstancePscConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAllowedVpcs, err := expandLookerInstancePscConfigAllowedVpcs(original["allowed_vpcs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedVpcs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedVpcs"] = transformedAllowedVpcs + } + + transformedLookerServiceAttachmentUri, err := expandLookerInstancePscConfigLookerServiceAttachmentUri(original["looker_service_attachment_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLookerServiceAttachmentUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lookerServiceAttachmentUri"] = transformedLookerServiceAttachmentUri + } + + transformedServiceAttachments, err := expandLookerInstancePscConfigServiceAttachments(original["service_attachments"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAttachments); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAttachments"] = transformedServiceAttachments + } + + return transformed, nil +} + +func expandLookerInstancePscConfigAllowedVpcs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstancePscConfigLookerServiceAttachmentUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstancePscConfigServiceAttachments(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConnectionStatus, err := expandLookerInstancePscConfigServiceAttachmentsConnectionStatus(original["connection_status"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConnectionStatus); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["connectionStatus"] = transformedConnectionStatus + } + + transformedLocalFqdn, err := expandLookerInstancePscConfigServiceAttachmentsLocalFqdn(original["local_fqdn"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalFqdn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localFqdn"] = transformedLocalFqdn + } + + transformedTargetServiceAttachmentUri, err := expandLookerInstancePscConfigServiceAttachmentsTargetServiceAttachmentUri(original["target_service_attachment_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetServiceAttachmentUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetServiceAttachmentUri"] = transformedTargetServiceAttachmentUri + } + + req = append(req, transformed) + } + return req, nil +} + +func expandLookerInstancePscConfigServiceAttachmentsConnectionStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstancePscConfigServiceAttachmentsLocalFqdn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstancePscConfigServiceAttachmentsTargetServiceAttachmentUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstancePscEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandLookerInstancePublicIpEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/migrationcenter/resource_migration_center_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/migrationcenter/resource_migration_center_group.go index 2ae2652e97d..72fdb7db737 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/migrationcenter/resource_migration_center_group.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/migrationcenter/resource_migration_center_group.go @@ -80,7 +80,7 @@ func ResourceMigrationCenterGroup() *schema.Resource { "labels": { Type: schema.TypeMap, Optional: true, - Description: `Labels as key value pairs. + Description: `Labels as key value pairs. **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.`, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/migrationcenter/resource_migration_center_preference_set.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/migrationcenter/resource_migration_center_preference_set.go index d38971d26d3..8308083ddca 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/migrationcenter/resource_migration_center_preference_set.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/migrationcenter/resource_migration_center_preference_set.go @@ -84,14 +84,9 @@ func ResourceMigrationCenterPreferenceSet() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "commitment_plan": { - Type: schema.TypeString, - Optional: true, - Description: `Commitment plan to consider when calculating costs for virtual machine insights and recommendations. If you are unsure which value to set, a 3 year commitment plan is often a good value to start with. - Possible values: - COMMITMENT_PLAN_UNSPECIFIED -COMMITMENT_PLAN_NONE -COMMITMENT_PLAN_ONE_YEAR -COMMITMENT_PLAN_THREE_YEARS`, + Type: schema.TypeString, + Optional: true, + Description: `Commitment plan to consider when calculating costs for virtual machine insights and recommendations. If you are unsure which value to set, a 3 year commitment plan is often a good value to start with. Possible values: 'COMMITMENT_PLAN_UNSPECIFIED', 'COMMITMENT_PLAN_NONE', 'COMMITMENT_PLAN_ONE_YEAR', 'COMMITMENT_PLAN_THREE_YEARS'`, }, "compute_engine_preferences": { Type: schema.TypeList, @@ -101,13 +96,9 @@ COMMITMENT_PLAN_THREE_YEARS`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "license_type": { - Type: schema.TypeString, - Optional: true, - Description: `License type to consider when calculating costs for virtual machine insights and recommendations. If unspecified, costs are calculated based on the default licensing plan. - Possible values: - LICENSE_TYPE_UNSPECIFIED -LICENSE_TYPE_DEFAULT -LICENSE_TYPE_BRING_YOUR_OWN_LICENSE`, + Type: schema.TypeString, + Optional: true, + Description: `License type to consider when calculating costs for virtual machine insights and recommendations. If unspecified, costs are calculated based on the default licensing plan. Possible values: 'LICENSE_TYPE_UNSPECIFIED', 'LICENSE_TYPE_DEFAULT', 'LICENSE_TYPE_BRING_YOUR_OWN_LICENSE'`, }, "machine_preferences": { Type: schema.TypeList, @@ -155,14 +146,9 @@ LICENSE_TYPE_BRING_YOUR_OWN_LICENSE`, }, }, "sizing_optimization_strategy": { - Type: schema.TypeString, - Optional: true, - Description: `Sizing optimization strategy specifies the preferred strategy used when extrapolating usage data to calculate insights and recommendations for a virtual machine. If you are unsure which value to set, a moderate sizing optimization strategy is often a good value to start with. - Possible values: - SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED -SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE -SIZING_OPTIMIZATION_STRATEGY_MODERATE -SIZING_OPTIMIZATION_STRATEGY_AGGRESSIVE`, + Type: schema.TypeString, + Optional: true, + Description: `Sizing optimization strategy specifies the preferred strategy used when extrapolating usage data to calculate insights and recommendations for a virtual machine. If you are unsure which value to set, a moderate sizing optimization strategy is often a good value to start with. Possible values: 'SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED', 'SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE', 'SIZING_OPTIMIZATION_STRATEGY_MODERATE', 'SIZING_OPTIMIZATION_STRATEGY_AGGRESSIVE'`, }, "sole_tenancy_preferences": { Type: schema.TypeList, @@ -172,14 +158,9 @@ SIZING_OPTIMIZATION_STRATEGY_AGGRESSIVE`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "commitment_plan": { - Type: schema.TypeString, - Optional: true, - Description: `Commitment plan to consider when calculating costs for virtual machine insights and recommendations. If you are unsure which value to set, a 3 year commitment plan is often a good value to start with. - Possible values: - COMMITMENT_PLAN_UNSPECIFIED -ON_DEMAND -COMMITMENT_1_YEAR -COMMITMENT_3_YEAR`, + Type: schema.TypeString, + Optional: true, + Description: `Commitment plan to consider when calculating costs for virtual machine insights and recommendations. If you are unsure which value to set, a 3 year commitment plan is often a good value to start with. Possible values: 'COMMITMENT_PLAN_UNSPECIFIED', 'ON_DEMAND', 'COMMITMENT_1_YEAR', 'COMMITMENT_3_YEAR'`, }, "cpu_overcommit_ratio": { Type: schema.TypeFloat, @@ -187,14 +168,9 @@ COMMITMENT_3_YEAR`, Description: `CPU overcommit ratio. Acceptable values are between 1.0 and 2.0 inclusive.`, }, "host_maintenance_policy": { - Type: schema.TypeString, - Optional: true, - Description: `Sole Tenancy nodes maintenance policy. - Possible values: - HOST_MAINTENANCE_POLICY_UNSPECIFIED -HOST_MAINTENANCE_POLICY_DEFAULT -HOST_MAINTENANCE_POLICY_RESTART_IN_PLACE -HOST_MAINTENANCE_POLICY_MIGRATE_WITHIN_NODE_GROUP`, + Type: schema.TypeString, + Optional: true, + Description: `Sole Tenancy nodes maintenance policy. Possible values: 'HOST_MAINTENANCE_POLICY_UNSPECIFIED', 'HOST_MAINTENANCE_POLICY_DEFAULT', 'HOST_MAINTENANCE_POLICY_RESTART_IN_PLACE', 'HOST_MAINTENANCE_POLICY_MIGRATE_WITHIN_NODE_GROUP'`, }, "node_types": { Type: schema.TypeList, @@ -214,14 +190,9 @@ HOST_MAINTENANCE_POLICY_MIGRATE_WITHIN_NODE_GROUP`, }, }, "target_product": { - Type: schema.TypeString, - Optional: true, - Description: `Target product for assets using this preference set. Specify either target product or business goal, but not both. - Possible values: - COMPUTE_MIGRATION_TARGET_PRODUCT_UNSPECIFIED -COMPUTE_MIGRATION_TARGET_PRODUCT_COMPUTE_ENGINE -COMPUTE_MIGRATION_TARGET_PRODUCT_VMWARE_ENGINE -COMPUTE_MIGRATION_TARGET_PRODUCT_SOLE_TENANCY`, + Type: schema.TypeString, + Optional: true, + Description: `Target product for assets using this preference set. Specify either target product or business goal, but not both. Possible values: 'COMPUTE_MIGRATION_TARGET_PRODUCT_UNSPECIFIED', 'COMPUTE_MIGRATION_TARGET_PRODUCT_COMPUTE_ENGINE', 'COMPUTE_MIGRATION_TARGET_PRODUCT_VMWARE_ENGINE', 'COMPUTE_MIGRATION_TARGET_PRODUCT_SOLE_TENANCY'`, }, "vmware_engine_preferences": { Type: schema.TypeList, @@ -231,16 +202,9 @@ COMPUTE_MIGRATION_TARGET_PRODUCT_SOLE_TENANCY`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "commitment_plan": { - Type: schema.TypeString, - Optional: true, - Description: `Commitment plan to consider when calculating costs for virtual machine insights and recommendations. If you are unsure which value to set, a 3 year commitment plan is often a good value to start with. - Possible values: - COMMITMENT_PLAN_UNSPECIFIED -ON_DEMAND -COMMITMENT_1_YEAR_MONTHLY_PAYMENTS -COMMITMENT_3_YEAR_MONTHLY_PAYMENTS -COMMITMENT_1_YEAR_UPFRONT_PAYMENT -COMMITMENT_3_YEAR_UPFRONT_PAYMENT`, + Type: schema.TypeString, + Optional: true, + Description: `Commitment plan to consider when calculating costs for virtual machine insights and recommendations. If you are unsure which value to set, a 3 year commitment plan is often a good value to start with. Possible values: 'COMMITMENT_PLAN_UNSPECIFIED', 'ON_DEMAND', 'COMMITMENT_1_YEAR_MONTHLY_PAYMENTS', 'COMMITMENT_3_YEAR_MONTHLY_PAYMENTS', 'COMMITMENT_1_YEAR_UPFRONT_PAYMENT', 'COMMITMENT_3_YEAR_UPFRONT_PAYMENT',`, }, "cpu_overcommit_ratio": { Type: schema.TypeFloat, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_alert_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_alert_policy.go index 9e0c6d22945..2a8485deb94 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_alert_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_alert_policy.go @@ -882,6 +882,15 @@ referenced in the notification_channels field of this AlertPolicy. The format is }, }, }, + "notification_prompts": { + Type: schema.TypeList, + Optional: true, + Description: `Control when notifications will be sent out. Possible values: ["NOTIFICATION_PROMPT_UNSPECIFIED", "OPENED", "CLOSED"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"NOTIFICATION_PROMPT_UNSPECIFIED", "OPENED", "CLOSED"}), + }, + }, "notification_rate_limit": { Type: schema.TypeList, Optional: true, @@ -1994,6 +2003,8 @@ func flattenMonitoringAlertPolicyAlertStrategy(v interface{}, d *schema.Resource flattenMonitoringAlertPolicyAlertStrategyNotificationRateLimit(original["notificationRateLimit"], d, config) transformed["auto_close"] = flattenMonitoringAlertPolicyAlertStrategyAutoClose(original["autoClose"], d, config) + transformed["notification_prompts"] = + flattenMonitoringAlertPolicyAlertStrategyNotificationPrompts(original["notificationPrompts"], d, config) transformed["notification_channel_strategy"] = flattenMonitoringAlertPolicyAlertStrategyNotificationChannelStrategy(original["notificationChannelStrategy"], d, config) return []interface{}{transformed} @@ -2019,6 +2030,10 @@ func flattenMonitoringAlertPolicyAlertStrategyAutoClose(v interface{}, d *schema return v } +func flattenMonitoringAlertPolicyAlertStrategyNotificationPrompts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenMonitoringAlertPolicyAlertStrategyNotificationChannelStrategy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -2857,6 +2872,13 @@ func expandMonitoringAlertPolicyAlertStrategy(v interface{}, d tpgresource.Terra transformed["autoClose"] = transformedAutoClose } + transformedNotificationPrompts, err := expandMonitoringAlertPolicyAlertStrategyNotificationPrompts(original["notification_prompts"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNotificationPrompts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["notificationPrompts"] = transformedNotificationPrompts + } + transformedNotificationChannelStrategy, err := expandMonitoringAlertPolicyAlertStrategyNotificationChannelStrategy(original["notification_channel_strategy"], d, config) if err != nil { return nil, err @@ -2894,6 +2916,10 @@ func expandMonitoringAlertPolicyAlertStrategyAutoClose(v interface{}, d tpgresou return v, nil } +func expandMonitoringAlertPolicyAlertStrategyNotificationPrompts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandMonitoringAlertPolicyAlertStrategyNotificationChannelStrategy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_notification_channel.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_notification_channel.go index f0900828f7f..13e1ed325b4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_notification_channel.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_notification_channel.go @@ -160,12 +160,12 @@ The [CHANNEL_ID] is automatically assigned by the server on creation.`, "force_delete": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `If true, the notification channel will be deleted regardless of its use in alert policies (the policies will be updated to remove the channel). If false, channels that are still referenced by an existing alerting policy will fail to be deleted in a delete operation.`, + Default: false, }, "project": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_slo.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_slo.go index 2a7af5db83a..9344e269539 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_slo.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_slo.go @@ -38,8 +38,8 @@ import ( func validateMonitoringSloGoal(v interface{}, k string) (warnings []string, errors []error) { goal := v.(float64) - if goal <= 0 || goal > 0.999 { - errors = append(errors, fmt.Errorf("goal %f must be > 0 and <= 0.999", goal)) + if goal <= 0 || goal > 0.9999 { + errors = append(errors, fmt.Errorf("goal %f must be > 0 and <= 0.9999", goal)) } return } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_active_directory.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_active_directory.go index 5e3754fdef0..c4e4789dcd5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_active_directory.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_active_directory.go @@ -32,15 +32,15 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) -func ResourceNetappactiveDirectory() *schema.Resource { +func ResourceNetappActiveDirectory() *schema.Resource { return &schema.Resource{ - Create: resourceNetappactiveDirectoryCreate, - Read: resourceNetappactiveDirectoryRead, - Update: resourceNetappactiveDirectoryUpdate, - Delete: resourceNetappactiveDirectoryDelete, + Create: resourceNetappActiveDirectoryCreate, + Read: resourceNetappActiveDirectoryRead, + Update: resourceNetappActiveDirectoryUpdate, + Delete: resourceNetappActiveDirectoryDelete, Importer: &schema.ResourceImporter{ - State: resourceNetappactiveDirectoryImport, + State: resourceNetappActiveDirectoryImport, }, Timeouts: &schema.ResourceTimeout{ @@ -218,7 +218,7 @@ Use when Active Directory domain controllers in multiple regions are configured. } } -func resourceNetappactiveDirectoryCreate(d *schema.ResourceData, meta interface{}) error { +func resourceNetappActiveDirectoryCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -226,109 +226,109 @@ func resourceNetappactiveDirectoryCreate(d *schema.ResourceData, meta interface{ } obj := make(map[string]interface{}) - domainProp, err := expandNetappactiveDirectoryDomain(d.Get("domain"), d, config) + domainProp, err := expandNetappActiveDirectoryDomain(d.Get("domain"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("domain"); !tpgresource.IsEmptyValue(reflect.ValueOf(domainProp)) && (ok || !reflect.DeepEqual(v, domainProp)) { obj["domain"] = domainProp } - siteProp, err := expandNetappactiveDirectorySite(d.Get("site"), d, config) + siteProp, err := expandNetappActiveDirectorySite(d.Get("site"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("site"); !tpgresource.IsEmptyValue(reflect.ValueOf(siteProp)) && (ok || !reflect.DeepEqual(v, siteProp)) { obj["site"] = siteProp } - dnsProp, err := expandNetappactiveDirectoryDns(d.Get("dns"), d, config) + dnsProp, err := expandNetappActiveDirectoryDns(d.Get("dns"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("dns"); !tpgresource.IsEmptyValue(reflect.ValueOf(dnsProp)) && (ok || !reflect.DeepEqual(v, dnsProp)) { obj["dns"] = dnsProp } - netBiosPrefixProp, err := expandNetappactiveDirectoryNetBiosPrefix(d.Get("net_bios_prefix"), d, config) + netBiosPrefixProp, err := expandNetappActiveDirectoryNetBiosPrefix(d.Get("net_bios_prefix"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("net_bios_prefix"); !tpgresource.IsEmptyValue(reflect.ValueOf(netBiosPrefixProp)) && (ok || !reflect.DeepEqual(v, netBiosPrefixProp)) { obj["netBiosPrefix"] = netBiosPrefixProp } - organizationalUnitProp, err := expandNetappactiveDirectoryOrganizationalUnit(d.Get("organizational_unit"), d, config) + organizationalUnitProp, err := expandNetappActiveDirectoryOrganizationalUnit(d.Get("organizational_unit"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("organizational_unit"); !tpgresource.IsEmptyValue(reflect.ValueOf(organizationalUnitProp)) && (ok || !reflect.DeepEqual(v, organizationalUnitProp)) { obj["organizationalUnit"] = organizationalUnitProp } - aesEncryptionProp, err := expandNetappactiveDirectoryAesEncryption(d.Get("aes_encryption"), d, config) + aesEncryptionProp, err := expandNetappActiveDirectoryAesEncryption(d.Get("aes_encryption"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("aes_encryption"); !tpgresource.IsEmptyValue(reflect.ValueOf(aesEncryptionProp)) && (ok || !reflect.DeepEqual(v, aesEncryptionProp)) { obj["aesEncryption"] = aesEncryptionProp } - usernameProp, err := expandNetappactiveDirectoryUsername(d.Get("username"), d, config) + usernameProp, err := expandNetappActiveDirectoryUsername(d.Get("username"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("username"); !tpgresource.IsEmptyValue(reflect.ValueOf(usernameProp)) && (ok || !reflect.DeepEqual(v, usernameProp)) { obj["username"] = usernameProp } - passwordProp, err := expandNetappactiveDirectoryPassword(d.Get("password"), d, config) + passwordProp, err := expandNetappActiveDirectoryPassword(d.Get("password"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("password"); !tpgresource.IsEmptyValue(reflect.ValueOf(passwordProp)) && (ok || !reflect.DeepEqual(v, passwordProp)) { obj["password"] = passwordProp } - backupOperatorsProp, err := expandNetappactiveDirectoryBackupOperators(d.Get("backup_operators"), d, config) + backupOperatorsProp, err := expandNetappActiveDirectoryBackupOperators(d.Get("backup_operators"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("backup_operators"); !tpgresource.IsEmptyValue(reflect.ValueOf(backupOperatorsProp)) && (ok || !reflect.DeepEqual(v, backupOperatorsProp)) { obj["backupOperators"] = backupOperatorsProp } - administratorsProp, err := expandNetappactiveDirectoryAdministrators(d.Get("administrators"), d, config) + administratorsProp, err := expandNetappActiveDirectoryAdministrators(d.Get("administrators"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("administrators"); !tpgresource.IsEmptyValue(reflect.ValueOf(administratorsProp)) && (ok || !reflect.DeepEqual(v, administratorsProp)) { obj["administrators"] = administratorsProp } - securityOperatorsProp, err := expandNetappactiveDirectorySecurityOperators(d.Get("security_operators"), d, config) + securityOperatorsProp, err := expandNetappActiveDirectorySecurityOperators(d.Get("security_operators"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("security_operators"); !tpgresource.IsEmptyValue(reflect.ValueOf(securityOperatorsProp)) && (ok || !reflect.DeepEqual(v, securityOperatorsProp)) { obj["securityOperators"] = securityOperatorsProp } - kdcHostnameProp, err := expandNetappactiveDirectoryKdcHostname(d.Get("kdc_hostname"), d, config) + kdcHostnameProp, err := expandNetappActiveDirectoryKdcHostname(d.Get("kdc_hostname"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("kdc_hostname"); !tpgresource.IsEmptyValue(reflect.ValueOf(kdcHostnameProp)) && (ok || !reflect.DeepEqual(v, kdcHostnameProp)) { obj["kdcHostname"] = kdcHostnameProp } - kdcIpProp, err := expandNetappactiveDirectoryKdcIp(d.Get("kdc_ip"), d, config) + kdcIpProp, err := expandNetappActiveDirectoryKdcIp(d.Get("kdc_ip"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("kdc_ip"); !tpgresource.IsEmptyValue(reflect.ValueOf(kdcIpProp)) && (ok || !reflect.DeepEqual(v, kdcIpProp)) { obj["kdcIp"] = kdcIpProp } - nfsUsersWithLdapProp, err := expandNetappactiveDirectoryNfsUsersWithLdap(d.Get("nfs_users_with_ldap"), d, config) + nfsUsersWithLdapProp, err := expandNetappActiveDirectoryNfsUsersWithLdap(d.Get("nfs_users_with_ldap"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("nfs_users_with_ldap"); !tpgresource.IsEmptyValue(reflect.ValueOf(nfsUsersWithLdapProp)) && (ok || !reflect.DeepEqual(v, nfsUsersWithLdapProp)) { obj["nfsUsersWithLdap"] = nfsUsersWithLdapProp } - descriptionProp, err := expandNetappactiveDirectoryDescription(d.Get("description"), d, config) + descriptionProp, err := expandNetappActiveDirectoryDescription(d.Get("description"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } - ldapSigningProp, err := expandNetappactiveDirectoryLdapSigning(d.Get("ldap_signing"), d, config) + ldapSigningProp, err := expandNetappActiveDirectoryLdapSigning(d.Get("ldap_signing"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("ldap_signing"); !tpgresource.IsEmptyValue(reflect.ValueOf(ldapSigningProp)) && (ok || !reflect.DeepEqual(v, ldapSigningProp)) { obj["ldapSigning"] = ldapSigningProp } - encryptDcConnectionsProp, err := expandNetappactiveDirectoryEncryptDcConnections(d.Get("encrypt_dc_connections"), d, config) + encryptDcConnectionsProp, err := expandNetappActiveDirectoryEncryptDcConnections(d.Get("encrypt_dc_connections"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("encrypt_dc_connections"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptDcConnectionsProp)) && (ok || !reflect.DeepEqual(v, encryptDcConnectionsProp)) { obj["encryptDcConnections"] = encryptDcConnectionsProp } - labelsProp, err := expandNetappactiveDirectoryEffectiveLabels(d.Get("effective_labels"), d, config) + labelsProp, err := expandNetappActiveDirectoryEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { @@ -340,12 +340,12 @@ func resourceNetappactiveDirectoryCreate(d *schema.ResourceData, meta interface{ return err } - log.Printf("[DEBUG] Creating new activeDirectory: %#v", obj) + log.Printf("[DEBUG] Creating new ActiveDirectory: %#v", obj) billingProject := "" project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for activeDirectory: %s", err) + return fmt.Errorf("Error fetching project for ActiveDirectory: %s", err) } billingProject = project @@ -366,7 +366,7 @@ func resourceNetappactiveDirectoryCreate(d *schema.ResourceData, meta interface{ Headers: headers, }) if err != nil { - return fmt.Errorf("Error creating activeDirectory: %s", err) + return fmt.Errorf("Error creating ActiveDirectory: %s", err) } // Store the ID now @@ -377,21 +377,21 @@ func resourceNetappactiveDirectoryCreate(d *schema.ResourceData, meta interface{ d.SetId(id) err = NetappOperationWaitTime( - config, res, project, "Creating activeDirectory", userAgent, + config, res, project, "Creating ActiveDirectory", userAgent, d.Timeout(schema.TimeoutCreate)) if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error waiting to create activeDirectory: %s", err) + return fmt.Errorf("Error waiting to create ActiveDirectory: %s", err) } - log.Printf("[DEBUG] Finished creating activeDirectory %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished creating ActiveDirectory %q: %#v", d.Id(), res) - return resourceNetappactiveDirectoryRead(d, meta) + return resourceNetappActiveDirectoryRead(d, meta) } -func resourceNetappactiveDirectoryRead(d *schema.ResourceData, meta interface{}) error { +func resourceNetappActiveDirectoryRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -407,7 +407,7 @@ func resourceNetappactiveDirectoryRead(d *schema.ResourceData, meta interface{}) project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for activeDirectory: %s", err) + return fmt.Errorf("Error fetching project for ActiveDirectory: %s", err) } billingProject = project @@ -426,84 +426,84 @@ func resourceNetappactiveDirectoryRead(d *schema.ResourceData, meta interface{}) Headers: headers, }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetappactiveDirectory %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetappActiveDirectory %q", d.Id())) } if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("create_time", flattenNetappactiveDirectoryCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("create_time", flattenNetappActiveDirectoryCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("state", flattenNetappactiveDirectoryState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("state", flattenNetappActiveDirectoryState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("domain", flattenNetappactiveDirectoryDomain(res["domain"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("domain", flattenNetappActiveDirectoryDomain(res["domain"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("site", flattenNetappactiveDirectorySite(res["site"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("site", flattenNetappActiveDirectorySite(res["site"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("dns", flattenNetappactiveDirectoryDns(res["dns"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("dns", flattenNetappActiveDirectoryDns(res["dns"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("net_bios_prefix", flattenNetappactiveDirectoryNetBiosPrefix(res["netBiosPrefix"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("net_bios_prefix", flattenNetappActiveDirectoryNetBiosPrefix(res["netBiosPrefix"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("organizational_unit", flattenNetappactiveDirectoryOrganizationalUnit(res["organizationalUnit"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("organizational_unit", flattenNetappActiveDirectoryOrganizationalUnit(res["organizationalUnit"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("aes_encryption", flattenNetappactiveDirectoryAesEncryption(res["aesEncryption"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("aes_encryption", flattenNetappActiveDirectoryAesEncryption(res["aesEncryption"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("username", flattenNetappactiveDirectoryUsername(res["username"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("username", flattenNetappActiveDirectoryUsername(res["username"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("backup_operators", flattenNetappactiveDirectoryBackupOperators(res["backupOperators"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("backup_operators", flattenNetappActiveDirectoryBackupOperators(res["backupOperators"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("administrators", flattenNetappactiveDirectoryAdministrators(res["administrators"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("administrators", flattenNetappActiveDirectoryAdministrators(res["administrators"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("security_operators", flattenNetappactiveDirectorySecurityOperators(res["securityOperators"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("security_operators", flattenNetappActiveDirectorySecurityOperators(res["securityOperators"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("kdc_hostname", flattenNetappactiveDirectoryKdcHostname(res["kdcHostname"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("kdc_hostname", flattenNetappActiveDirectoryKdcHostname(res["kdcHostname"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("kdc_ip", flattenNetappactiveDirectoryKdcIp(res["kdcIp"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("kdc_ip", flattenNetappActiveDirectoryKdcIp(res["kdcIp"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("nfs_users_with_ldap", flattenNetappactiveDirectoryNfsUsersWithLdap(res["nfsUsersWithLdap"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("nfs_users_with_ldap", flattenNetappActiveDirectoryNfsUsersWithLdap(res["nfsUsersWithLdap"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("description", flattenNetappactiveDirectoryDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("description", flattenNetappActiveDirectoryDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("ldap_signing", flattenNetappactiveDirectoryLdapSigning(res["ldapSigning"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("ldap_signing", flattenNetappActiveDirectoryLdapSigning(res["ldapSigning"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("encrypt_dc_connections", flattenNetappactiveDirectoryEncryptDcConnections(res["encryptDcConnections"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("encrypt_dc_connections", flattenNetappActiveDirectoryEncryptDcConnections(res["encryptDcConnections"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("labels", flattenNetappactiveDirectoryLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("labels", flattenNetappActiveDirectoryLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("state_details", flattenNetappactiveDirectoryStateDetails(res["stateDetails"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("state_details", flattenNetappActiveDirectoryStateDetails(res["stateDetails"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("terraform_labels", flattenNetappactiveDirectoryTerraformLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("terraform_labels", flattenNetappActiveDirectoryTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } - if err := d.Set("effective_labels", flattenNetappactiveDirectoryEffectiveLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading activeDirectory: %s", err) + if err := d.Set("effective_labels", flattenNetappActiveDirectoryEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ActiveDirectory: %s", err) } return nil } -func resourceNetappactiveDirectoryUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceNetappActiveDirectoryUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -514,114 +514,114 @@ func resourceNetappactiveDirectoryUpdate(d *schema.ResourceData, meta interface{ project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for activeDirectory: %s", err) + return fmt.Errorf("Error fetching project for ActiveDirectory: %s", err) } billingProject = project obj := make(map[string]interface{}) - domainProp, err := expandNetappactiveDirectoryDomain(d.Get("domain"), d, config) + domainProp, err := expandNetappActiveDirectoryDomain(d.Get("domain"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("domain"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, domainProp)) { obj["domain"] = domainProp } - siteProp, err := expandNetappactiveDirectorySite(d.Get("site"), d, config) + siteProp, err := expandNetappActiveDirectorySite(d.Get("site"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("site"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, siteProp)) { obj["site"] = siteProp } - dnsProp, err := expandNetappactiveDirectoryDns(d.Get("dns"), d, config) + dnsProp, err := expandNetappActiveDirectoryDns(d.Get("dns"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("dns"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dnsProp)) { obj["dns"] = dnsProp } - netBiosPrefixProp, err := expandNetappactiveDirectoryNetBiosPrefix(d.Get("net_bios_prefix"), d, config) + netBiosPrefixProp, err := expandNetappActiveDirectoryNetBiosPrefix(d.Get("net_bios_prefix"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("net_bios_prefix"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, netBiosPrefixProp)) { obj["netBiosPrefix"] = netBiosPrefixProp } - organizationalUnitProp, err := expandNetappactiveDirectoryOrganizationalUnit(d.Get("organizational_unit"), d, config) + organizationalUnitProp, err := expandNetappActiveDirectoryOrganizationalUnit(d.Get("organizational_unit"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("organizational_unit"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, organizationalUnitProp)) { obj["organizationalUnit"] = organizationalUnitProp } - aesEncryptionProp, err := expandNetappactiveDirectoryAesEncryption(d.Get("aes_encryption"), d, config) + aesEncryptionProp, err := expandNetappActiveDirectoryAesEncryption(d.Get("aes_encryption"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("aes_encryption"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, aesEncryptionProp)) { obj["aesEncryption"] = aesEncryptionProp } - usernameProp, err := expandNetappactiveDirectoryUsername(d.Get("username"), d, config) + usernameProp, err := expandNetappActiveDirectoryUsername(d.Get("username"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("username"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, usernameProp)) { obj["username"] = usernameProp } - passwordProp, err := expandNetappactiveDirectoryPassword(d.Get("password"), d, config) + passwordProp, err := expandNetappActiveDirectoryPassword(d.Get("password"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("password"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, passwordProp)) { obj["password"] = passwordProp } - backupOperatorsProp, err := expandNetappactiveDirectoryBackupOperators(d.Get("backup_operators"), d, config) + backupOperatorsProp, err := expandNetappActiveDirectoryBackupOperators(d.Get("backup_operators"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("backup_operators"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backupOperatorsProp)) { obj["backupOperators"] = backupOperatorsProp } - administratorsProp, err := expandNetappactiveDirectoryAdministrators(d.Get("administrators"), d, config) + administratorsProp, err := expandNetappActiveDirectoryAdministrators(d.Get("administrators"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("administrators"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, administratorsProp)) { obj["administrators"] = administratorsProp } - securityOperatorsProp, err := expandNetappactiveDirectorySecurityOperators(d.Get("security_operators"), d, config) + securityOperatorsProp, err := expandNetappActiveDirectorySecurityOperators(d.Get("security_operators"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("security_operators"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securityOperatorsProp)) { obj["securityOperators"] = securityOperatorsProp } - kdcHostnameProp, err := expandNetappactiveDirectoryKdcHostname(d.Get("kdc_hostname"), d, config) + kdcHostnameProp, err := expandNetappActiveDirectoryKdcHostname(d.Get("kdc_hostname"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("kdc_hostname"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, kdcHostnameProp)) { obj["kdcHostname"] = kdcHostnameProp } - kdcIpProp, err := expandNetappactiveDirectoryKdcIp(d.Get("kdc_ip"), d, config) + kdcIpProp, err := expandNetappActiveDirectoryKdcIp(d.Get("kdc_ip"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("kdc_ip"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, kdcIpProp)) { obj["kdcIp"] = kdcIpProp } - nfsUsersWithLdapProp, err := expandNetappactiveDirectoryNfsUsersWithLdap(d.Get("nfs_users_with_ldap"), d, config) + nfsUsersWithLdapProp, err := expandNetappActiveDirectoryNfsUsersWithLdap(d.Get("nfs_users_with_ldap"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("nfs_users_with_ldap"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nfsUsersWithLdapProp)) { obj["nfsUsersWithLdap"] = nfsUsersWithLdapProp } - descriptionProp, err := expandNetappactiveDirectoryDescription(d.Get("description"), d, config) + descriptionProp, err := expandNetappActiveDirectoryDescription(d.Get("description"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } - ldapSigningProp, err := expandNetappactiveDirectoryLdapSigning(d.Get("ldap_signing"), d, config) + ldapSigningProp, err := expandNetappActiveDirectoryLdapSigning(d.Get("ldap_signing"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("ldap_signing"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ldapSigningProp)) { obj["ldapSigning"] = ldapSigningProp } - encryptDcConnectionsProp, err := expandNetappactiveDirectoryEncryptDcConnections(d.Get("encrypt_dc_connections"), d, config) + encryptDcConnectionsProp, err := expandNetappActiveDirectoryEncryptDcConnections(d.Get("encrypt_dc_connections"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("encrypt_dc_connections"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptDcConnectionsProp)) { obj["encryptDcConnections"] = encryptDcConnectionsProp } - labelsProp, err := expandNetappactiveDirectoryEffectiveLabels(d.Get("effective_labels"), d, config) + labelsProp, err := expandNetappActiveDirectoryEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { @@ -633,7 +633,7 @@ func resourceNetappactiveDirectoryUpdate(d *schema.ResourceData, meta interface{ return err } - log.Printf("[DEBUG] Updating activeDirectory %q: %#v", d.Id(), obj) + log.Printf("[DEBUG] Updating ActiveDirectory %q: %#v", d.Id(), obj) headers := make(http.Header) updateMask := []string{} @@ -734,13 +734,13 @@ func resourceNetappactiveDirectoryUpdate(d *schema.ResourceData, meta interface{ }) if err != nil { - return fmt.Errorf("Error updating activeDirectory %q: %s", d.Id(), err) + return fmt.Errorf("Error updating ActiveDirectory %q: %s", d.Id(), err) } else { - log.Printf("[DEBUG] Finished updating activeDirectory %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished updating ActiveDirectory %q: %#v", d.Id(), res) } err = NetappOperationWaitTime( - config, res, project, "Updating activeDirectory", userAgent, + config, res, project, "Updating ActiveDirectory", userAgent, d.Timeout(schema.TimeoutUpdate)) if err != nil { @@ -748,10 +748,10 @@ func resourceNetappactiveDirectoryUpdate(d *schema.ResourceData, meta interface{ } } - return resourceNetappactiveDirectoryRead(d, meta) + return resourceNetappActiveDirectoryRead(d, meta) } -func resourceNetappactiveDirectoryDelete(d *schema.ResourceData, meta interface{}) error { +func resourceNetappActiveDirectoryDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -762,7 +762,7 @@ func resourceNetappactiveDirectoryDelete(d *schema.ResourceData, meta interface{ project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for activeDirectory: %s", err) + return fmt.Errorf("Error fetching project for ActiveDirectory: %s", err) } billingProject = project @@ -780,7 +780,7 @@ func resourceNetappactiveDirectoryDelete(d *schema.ResourceData, meta interface{ headers := make(http.Header) - log.Printf("[DEBUG] Deleting activeDirectory %q", d.Id()) + log.Printf("[DEBUG] Deleting ActiveDirectory %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "DELETE", @@ -792,22 +792,22 @@ func resourceNetappactiveDirectoryDelete(d *schema.ResourceData, meta interface{ Headers: headers, }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, "activeDirectory") + return transport_tpg.HandleNotFoundError(err, d, "ActiveDirectory") } err = NetappOperationWaitTime( - config, res, project, "Deleting activeDirectory", userAgent, + config, res, project, "Deleting ActiveDirectory", userAgent, d.Timeout(schema.TimeoutDelete)) if err != nil { return err } - log.Printf("[DEBUG] Finished deleting activeDirectory %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished deleting ActiveDirectory %q: %#v", d.Id(), res) return nil } -func resourceNetappactiveDirectoryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { +func resourceNetappActiveDirectoryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) if err := tpgresource.ParseImportId([]string{ "^projects/(?P[^/]+)/locations/(?P[^/]+)/activeDirectories/(?P[^/]+)$", @@ -827,79 +827,79 @@ func resourceNetappactiveDirectoryImport(d *schema.ResourceData, meta interface{ return []*schema.ResourceData{d}, nil } -func flattenNetappactiveDirectoryCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectorySite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectorySite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryDns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryDns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryNetBiosPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryNetBiosPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryOrganizationalUnit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryOrganizationalUnit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryAesEncryption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryAesEncryption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryBackupOperators(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryBackupOperators(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryAdministrators(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryAdministrators(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectorySecurityOperators(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectorySecurityOperators(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryKdcHostname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryKdcHostname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryKdcIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryKdcIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryNfsUsersWithLdap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryNfsUsersWithLdap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryLdapSigning(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryLdapSigning(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryEncryptDcConnections(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryEncryptDcConnections(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -914,11 +914,11 @@ func flattenNetappactiveDirectoryLabels(v interface{}, d *schema.ResourceData, c return transformed } -func flattenNetappactiveDirectoryStateDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryStateDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappactiveDirectoryTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -933,79 +933,79 @@ func flattenNetappactiveDirectoryTerraformLabels(v interface{}, d *schema.Resour return transformed } -func flattenNetappactiveDirectoryEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappActiveDirectoryEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandNetappactiveDirectoryDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectorySite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectorySite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryDns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryDns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryNetBiosPrefix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryNetBiosPrefix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryOrganizationalUnit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryOrganizationalUnit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryAesEncryption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryAesEncryption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryBackupOperators(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryBackupOperators(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryAdministrators(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryAdministrators(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectorySecurityOperators(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectorySecurityOperators(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryKdcHostname(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryKdcHostname(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryKdcIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryKdcIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryNfsUsersWithLdap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryNfsUsersWithLdap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryLdapSigning(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryLdapSigning(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryEncryptDcConnections(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappActiveDirectoryEncryptDcConnections(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappactiveDirectoryEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { +func expandNetappActiveDirectoryEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_active_directory_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_active_directory_sweeper.go index de2b49060de..7becca64f34 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_active_directory_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_active_directory_sweeper.go @@ -1,20 +1,5 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package netapp import ( @@ -30,12 +15,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("NetappactiveDirectory", testSweepNetappactiveDirectory) + sweeper.AddTestSweepers("NetappActiveDirectory", testSweepNetappActiveDirectory) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepNetappactiveDirectory(region string) error { - resourceName := "NetappactiveDirectory" +func testSweepNetappActiveDirectory(region string) error { + resourceName := "NetappActiveDirectory" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) @@ -53,86 +38,91 @@ func testSweepNetappactiveDirectory(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &tpgresource.ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, - } - - listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/activeDirectories", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["activeDirectories"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil + regions := []string{"us-central1", "us-west2", "us-east4"} + for _, r := range regions { + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s in %s", resourceName, r) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": r, + "location": r, + "zone": "-", + "billing_account": billingId, + }, } - name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { - nonPrefixCount++ - continue - } - - deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}" - deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/activeDirectories", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue } - deleteUrl = deleteUrl + name - // Don't wait on operations as we may have a lot to delete - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, - Method: "DELETE", + Method: "GET", Project: config.Project, - RawURL: deleteUrl, + RawURL: listUrl, UserAgent: config.UserAgent, }) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue } - } - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + resourceList, ok := res["activeDirectories"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } } return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup.go index 47855759302..d5ce7a0a1d6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup.go @@ -32,15 +32,15 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) -func ResourceNetappbackup() *schema.Resource { +func ResourceNetappBackup() *schema.Resource { return &schema.Resource{ - Create: resourceNetappbackupCreate, - Read: resourceNetappbackupRead, - Update: resourceNetappbackupUpdate, - Delete: resourceNetappbackupDelete, + Create: resourceNetappBackupCreate, + Read: resourceNetappBackupRead, + Update: resourceNetappBackupUpdate, + Delete: resourceNetappBackupDelete, Importer: &schema.ResourceImporter{ - State: resourceNetappbackupImport, + State: resourceNetappBackupImport, }, Timeouts: &schema.ResourceTimeout{ @@ -153,7 +153,7 @@ Total size of all backups in a chain in bytes = baseline backup size + sum(incre } } -func resourceNetappbackupCreate(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -161,25 +161,25 @@ func resourceNetappbackupCreate(d *schema.ResourceData, meta interface{}) error } obj := make(map[string]interface{}) - descriptionProp, err := expandNetappbackupDescription(d.Get("description"), d, config) + descriptionProp, err := expandNetappBackupDescription(d.Get("description"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } - sourceVolumeProp, err := expandNetappbackupSourceVolume(d.Get("source_volume"), d, config) + sourceVolumeProp, err := expandNetappBackupSourceVolume(d.Get("source_volume"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("source_volume"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceVolumeProp)) && (ok || !reflect.DeepEqual(v, sourceVolumeProp)) { obj["sourceVolume"] = sourceVolumeProp } - sourceSnapshotProp, err := expandNetappbackupSourceSnapshot(d.Get("source_snapshot"), d, config) + sourceSnapshotProp, err := expandNetappBackupSourceSnapshot(d.Get("source_snapshot"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("source_snapshot"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceSnapshotProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotProp)) { obj["sourceSnapshot"] = sourceSnapshotProp } - labelsProp, err := expandNetappbackupEffectiveLabels(d.Get("effective_labels"), d, config) + labelsProp, err := expandNetappBackupEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { @@ -191,12 +191,12 @@ func resourceNetappbackupCreate(d *schema.ResourceData, meta interface{}) error return err } - log.Printf("[DEBUG] Creating new backup: %#v", obj) + log.Printf("[DEBUG] Creating new Backup: %#v", obj) billingProject := "" project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backup: %s", err) + return fmt.Errorf("Error fetching project for Backup: %s", err) } billingProject = project @@ -217,7 +217,7 @@ func resourceNetappbackupCreate(d *schema.ResourceData, meta interface{}) error Headers: headers, }) if err != nil { - return fmt.Errorf("Error creating backup: %s", err) + return fmt.Errorf("Error creating Backup: %s", err) } // Store the ID now @@ -228,21 +228,21 @@ func resourceNetappbackupCreate(d *schema.ResourceData, meta interface{}) error d.SetId(id) err = NetappOperationWaitTime( - config, res, project, "Creating backup", userAgent, + config, res, project, "Creating Backup", userAgent, d.Timeout(schema.TimeoutCreate)) if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error waiting to create backup: %s", err) + return fmt.Errorf("Error waiting to create Backup: %s", err) } - log.Printf("[DEBUG] Finished creating backup %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished creating Backup %q: %#v", d.Id(), res) - return resourceNetappbackupRead(d, meta) + return resourceNetappBackupRead(d, meta) } -func resourceNetappbackupRead(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -258,7 +258,7 @@ func resourceNetappbackupRead(d *schema.ResourceData, meta interface{}) error { project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backup: %s", err) + return fmt.Errorf("Error fetching project for Backup: %s", err) } billingProject = project @@ -277,51 +277,51 @@ func resourceNetappbackupRead(d *schema.ResourceData, meta interface{}) error { Headers: headers, }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Netappbackup %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetappBackup %q", d.Id())) } if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("state", flattenNetappbackupState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + if err := d.Set("state", flattenNetappBackupState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("description", flattenNetappbackupDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + if err := d.Set("description", flattenNetappBackupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("volume_usage_bytes", flattenNetappbackupVolumeUsageBytes(res["volumeUsageBytes"], d, config)); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + if err := d.Set("volume_usage_bytes", flattenNetappBackupVolumeUsageBytes(res["volumeUsageBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("backup_type", flattenNetappbackupBackupType(res["backupType"], d, config)); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + if err := d.Set("backup_type", flattenNetappBackupBackupType(res["backupType"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("source_volume", flattenNetappbackupSourceVolume(res["sourceVolume"], d, config)); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + if err := d.Set("source_volume", flattenNetappBackupSourceVolume(res["sourceVolume"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("create_time", flattenNetappbackupCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + if err := d.Set("create_time", flattenNetappBackupCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("labels", flattenNetappbackupLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + if err := d.Set("labels", flattenNetappBackupLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("chain_storage_bytes", flattenNetappbackupChainStorageBytes(res["chainStorageBytes"], d, config)); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + if err := d.Set("chain_storage_bytes", flattenNetappBackupChainStorageBytes(res["chainStorageBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("source_snapshot", flattenNetappbackupSourceSnapshot(res["sourceSnapshot"], d, config)); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + if err := d.Set("source_snapshot", flattenNetappBackupSourceSnapshot(res["sourceSnapshot"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("terraform_labels", flattenNetappbackupTerraformLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + if err := d.Set("terraform_labels", flattenNetappBackupTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("effective_labels", flattenNetappbackupEffectiveLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading backup: %s", err) + if err := d.Set("effective_labels", flattenNetappBackupEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) } return nil } -func resourceNetappbackupUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -332,24 +332,24 @@ func resourceNetappbackupUpdate(d *schema.ResourceData, meta interface{}) error project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backup: %s", err) + return fmt.Errorf("Error fetching project for Backup: %s", err) } billingProject = project obj := make(map[string]interface{}) - descriptionProp, err := expandNetappbackupDescription(d.Get("description"), d, config) + descriptionProp, err := expandNetappBackupDescription(d.Get("description"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } - sourceSnapshotProp, err := expandNetappbackupSourceSnapshot(d.Get("source_snapshot"), d, config) + sourceSnapshotProp, err := expandNetappBackupSourceSnapshot(d.Get("source_snapshot"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("source_snapshot"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceSnapshotProp)) { obj["sourceSnapshot"] = sourceSnapshotProp } - labelsProp, err := expandNetappbackupEffectiveLabels(d.Get("effective_labels"), d, config) + labelsProp, err := expandNetappBackupEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { @@ -361,7 +361,7 @@ func resourceNetappbackupUpdate(d *schema.ResourceData, meta interface{}) error return err } - log.Printf("[DEBUG] Updating backup %q: %#v", d.Id(), obj) + log.Printf("[DEBUG] Updating Backup %q: %#v", d.Id(), obj) headers := make(http.Header) updateMask := []string{} @@ -402,13 +402,13 @@ func resourceNetappbackupUpdate(d *schema.ResourceData, meta interface{}) error }) if err != nil { - return fmt.Errorf("Error updating backup %q: %s", d.Id(), err) + return fmt.Errorf("Error updating Backup %q: %s", d.Id(), err) } else { - log.Printf("[DEBUG] Finished updating backup %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished updating Backup %q: %#v", d.Id(), res) } err = NetappOperationWaitTime( - config, res, project, "Updating backup", userAgent, + config, res, project, "Updating Backup", userAgent, d.Timeout(schema.TimeoutUpdate)) if err != nil { @@ -416,10 +416,10 @@ func resourceNetappbackupUpdate(d *schema.ResourceData, meta interface{}) error } } - return resourceNetappbackupRead(d, meta) + return resourceNetappBackupRead(d, meta) } -func resourceNetappbackupDelete(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -430,7 +430,7 @@ func resourceNetappbackupDelete(d *schema.ResourceData, meta interface{}) error project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backup: %s", err) + return fmt.Errorf("Error fetching project for Backup: %s", err) } billingProject = project @@ -448,7 +448,7 @@ func resourceNetappbackupDelete(d *schema.ResourceData, meta interface{}) error headers := make(http.Header) - log.Printf("[DEBUG] Deleting backup %q", d.Id()) + log.Printf("[DEBUG] Deleting Backup %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "DELETE", @@ -460,22 +460,22 @@ func resourceNetappbackupDelete(d *schema.ResourceData, meta interface{}) error Headers: headers, }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, "backup") + return transport_tpg.HandleNotFoundError(err, d, "Backup") } err = NetappOperationWaitTime( - config, res, project, "Deleting backup", userAgent, + config, res, project, "Deleting Backup", userAgent, d.Timeout(schema.TimeoutDelete)) if err != nil { return err } - log.Printf("[DEBUG] Finished deleting backup %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished deleting Backup %q: %#v", d.Id(), res) return nil } -func resourceNetappbackupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { +func resourceNetappBackupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) if err := tpgresource.ParseImportId([]string{ "^projects/(?P[^/]+)/locations/(?P[^/]+)/backupVaults/(?P[^/]+)/backups/(?P[^/]+)$", @@ -495,31 +495,31 @@ func resourceNetappbackupImport(d *schema.ResourceData, meta interface{}) ([]*sc return []*schema.ResourceData{d}, nil } -func flattenNetappbackupState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupVolumeUsageBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupVolumeUsageBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupBackupType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupBackupType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupSourceVolume(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupSourceVolume(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -534,15 +534,15 @@ func flattenNetappbackupLabels(v interface{}, d *schema.ResourceData, config *tr return transformed } -func flattenNetappbackupChainStorageBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupChainStorageBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupSourceSnapshot(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupSourceSnapshot(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -557,23 +557,23 @@ func flattenNetappbackupTerraformLabels(v interface{}, d *schema.ResourceData, c return transformed } -func flattenNetappbackupEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandNetappbackupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappBackupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappbackupSourceVolume(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappBackupSourceVolume(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappbackupSourceSnapshot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappBackupSourceSnapshot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappbackupEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { +func expandNetappBackupEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_policy.go index 6a2426d436a..eb5f3e913e9 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_policy.go @@ -32,15 +32,15 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) -func ResourceNetappbackupPolicy() *schema.Resource { +func ResourceNetappBackupPolicy() *schema.Resource { return &schema.Resource{ - Create: resourceNetappbackupPolicyCreate, - Read: resourceNetappbackupPolicyRead, - Update: resourceNetappbackupPolicyUpdate, - Delete: resourceNetappbackupPolicyDelete, + Create: resourceNetappBackupPolicyCreate, + Read: resourceNetappBackupPolicyRead, + Update: resourceNetappBackupPolicyUpdate, + Delete: resourceNetappBackupPolicyDelete, Importer: &schema.ResourceImporter{ - State: resourceNetappbackupPolicyImport, + State: resourceNetappBackupPolicyImport, }, Timeouts: &schema.ResourceTimeout{ @@ -143,7 +143,7 @@ Please refer to the field 'effective_labels' for all of the labels present on th } } -func resourceNetappbackupPolicyCreate(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupPolicyCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -151,37 +151,37 @@ func resourceNetappbackupPolicyCreate(d *schema.ResourceData, meta interface{}) } obj := make(map[string]interface{}) - dailyBackupLimitProp, err := expandNetappbackupPolicyDailyBackupLimit(d.Get("daily_backup_limit"), d, config) + dailyBackupLimitProp, err := expandNetappBackupPolicyDailyBackupLimit(d.Get("daily_backup_limit"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("daily_backup_limit"); !tpgresource.IsEmptyValue(reflect.ValueOf(dailyBackupLimitProp)) && (ok || !reflect.DeepEqual(v, dailyBackupLimitProp)) { obj["dailyBackupLimit"] = dailyBackupLimitProp } - weeklyBackupLimitProp, err := expandNetappbackupPolicyWeeklyBackupLimit(d.Get("weekly_backup_limit"), d, config) + weeklyBackupLimitProp, err := expandNetappBackupPolicyWeeklyBackupLimit(d.Get("weekly_backup_limit"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("weekly_backup_limit"); !tpgresource.IsEmptyValue(reflect.ValueOf(weeklyBackupLimitProp)) && (ok || !reflect.DeepEqual(v, weeklyBackupLimitProp)) { obj["weeklyBackupLimit"] = weeklyBackupLimitProp } - monthlyBackupLimitProp, err := expandNetappbackupPolicyMonthlyBackupLimit(d.Get("monthly_backup_limit"), d, config) + monthlyBackupLimitProp, err := expandNetappBackupPolicyMonthlyBackupLimit(d.Get("monthly_backup_limit"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("monthly_backup_limit"); !tpgresource.IsEmptyValue(reflect.ValueOf(monthlyBackupLimitProp)) && (ok || !reflect.DeepEqual(v, monthlyBackupLimitProp)) { obj["monthlyBackupLimit"] = monthlyBackupLimitProp } - descriptionProp, err := expandNetappbackupPolicyDescription(d.Get("description"), d, config) + descriptionProp, err := expandNetappBackupPolicyDescription(d.Get("description"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } - enabledProp, err := expandNetappbackupPolicyEnabled(d.Get("enabled"), d, config) + enabledProp, err := expandNetappBackupPolicyEnabled(d.Get("enabled"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { obj["enabled"] = enabledProp } - labelsProp, err := expandNetappbackupPolicyEffectiveLabels(d.Get("effective_labels"), d, config) + labelsProp, err := expandNetappBackupPolicyEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { @@ -193,12 +193,12 @@ func resourceNetappbackupPolicyCreate(d *schema.ResourceData, meta interface{}) return err } - log.Printf("[DEBUG] Creating new backupPolicy: %#v", obj) + log.Printf("[DEBUG] Creating new BackupPolicy: %#v", obj) billingProject := "" project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backupPolicy: %s", err) + return fmt.Errorf("Error fetching project for BackupPolicy: %s", err) } billingProject = project @@ -219,7 +219,7 @@ func resourceNetappbackupPolicyCreate(d *schema.ResourceData, meta interface{}) Headers: headers, }) if err != nil { - return fmt.Errorf("Error creating backupPolicy: %s", err) + return fmt.Errorf("Error creating BackupPolicy: %s", err) } // Store the ID now @@ -230,21 +230,21 @@ func resourceNetappbackupPolicyCreate(d *schema.ResourceData, meta interface{}) d.SetId(id) err = NetappOperationWaitTime( - config, res, project, "Creating backupPolicy", userAgent, + config, res, project, "Creating BackupPolicy", userAgent, d.Timeout(schema.TimeoutCreate)) if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error waiting to create backupPolicy: %s", err) + return fmt.Errorf("Error waiting to create BackupPolicy: %s", err) } - log.Printf("[DEBUG] Finished creating backupPolicy %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished creating BackupPolicy %q: %#v", d.Id(), res) - return resourceNetappbackupPolicyRead(d, meta) + return resourceNetappBackupPolicyRead(d, meta) } -func resourceNetappbackupPolicyRead(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupPolicyRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -260,7 +260,7 @@ func resourceNetappbackupPolicyRead(d *schema.ResourceData, meta interface{}) er project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backupPolicy: %s", err) + return fmt.Errorf("Error fetching project for BackupPolicy: %s", err) } billingProject = project @@ -279,51 +279,51 @@ func resourceNetappbackupPolicyRead(d *schema.ResourceData, meta interface{}) er Headers: headers, }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetappbackupPolicy %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetappBackupPolicy %q", d.Id())) } if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + return fmt.Errorf("Error reading BackupPolicy: %s", err) } - if err := d.Set("create_time", flattenNetappbackupPolicyCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + if err := d.Set("create_time", flattenNetappBackupPolicyCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPolicy: %s", err) } - if err := d.Set("labels", flattenNetappbackupPolicyLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + if err := d.Set("labels", flattenNetappBackupPolicyLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPolicy: %s", err) } - if err := d.Set("state", flattenNetappbackupPolicyState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + if err := d.Set("state", flattenNetappBackupPolicyState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPolicy: %s", err) } - if err := d.Set("daily_backup_limit", flattenNetappbackupPolicyDailyBackupLimit(res["dailyBackupLimit"], d, config)); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + if err := d.Set("daily_backup_limit", flattenNetappBackupPolicyDailyBackupLimit(res["dailyBackupLimit"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPolicy: %s", err) } - if err := d.Set("weekly_backup_limit", flattenNetappbackupPolicyWeeklyBackupLimit(res["weeklyBackupLimit"], d, config)); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + if err := d.Set("weekly_backup_limit", flattenNetappBackupPolicyWeeklyBackupLimit(res["weeklyBackupLimit"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPolicy: %s", err) } - if err := d.Set("monthly_backup_limit", flattenNetappbackupPolicyMonthlyBackupLimit(res["monthlyBackupLimit"], d, config)); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + if err := d.Set("monthly_backup_limit", flattenNetappBackupPolicyMonthlyBackupLimit(res["monthlyBackupLimit"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPolicy: %s", err) } - if err := d.Set("description", flattenNetappbackupPolicyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + if err := d.Set("description", flattenNetappBackupPolicyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPolicy: %s", err) } - if err := d.Set("enabled", flattenNetappbackupPolicyEnabled(res["enabled"], d, config)); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + if err := d.Set("enabled", flattenNetappBackupPolicyEnabled(res["enabled"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPolicy: %s", err) } - if err := d.Set("assigned_volume_count", flattenNetappbackupPolicyAssignedVolumeCount(res["assignedVolumeCount"], d, config)); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + if err := d.Set("assigned_volume_count", flattenNetappBackupPolicyAssignedVolumeCount(res["assignedVolumeCount"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPolicy: %s", err) } - if err := d.Set("terraform_labels", flattenNetappbackupPolicyTerraformLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + if err := d.Set("terraform_labels", flattenNetappBackupPolicyTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPolicy: %s", err) } - if err := d.Set("effective_labels", flattenNetappbackupPolicyEffectiveLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading backupPolicy: %s", err) + if err := d.Set("effective_labels", flattenNetappBackupPolicyEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPolicy: %s", err) } return nil } -func resourceNetappbackupPolicyUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupPolicyUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -334,42 +334,42 @@ func resourceNetappbackupPolicyUpdate(d *schema.ResourceData, meta interface{}) project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backupPolicy: %s", err) + return fmt.Errorf("Error fetching project for BackupPolicy: %s", err) } billingProject = project obj := make(map[string]interface{}) - dailyBackupLimitProp, err := expandNetappbackupPolicyDailyBackupLimit(d.Get("daily_backup_limit"), d, config) + dailyBackupLimitProp, err := expandNetappBackupPolicyDailyBackupLimit(d.Get("daily_backup_limit"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("daily_backup_limit"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dailyBackupLimitProp)) { obj["dailyBackupLimit"] = dailyBackupLimitProp } - weeklyBackupLimitProp, err := expandNetappbackupPolicyWeeklyBackupLimit(d.Get("weekly_backup_limit"), d, config) + weeklyBackupLimitProp, err := expandNetappBackupPolicyWeeklyBackupLimit(d.Get("weekly_backup_limit"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("weekly_backup_limit"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, weeklyBackupLimitProp)) { obj["weeklyBackupLimit"] = weeklyBackupLimitProp } - monthlyBackupLimitProp, err := expandNetappbackupPolicyMonthlyBackupLimit(d.Get("monthly_backup_limit"), d, config) + monthlyBackupLimitProp, err := expandNetappBackupPolicyMonthlyBackupLimit(d.Get("monthly_backup_limit"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("monthly_backup_limit"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, monthlyBackupLimitProp)) { obj["monthlyBackupLimit"] = monthlyBackupLimitProp } - descriptionProp, err := expandNetappbackupPolicyDescription(d.Get("description"), d, config) + descriptionProp, err := expandNetappBackupPolicyDescription(d.Get("description"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } - enabledProp, err := expandNetappbackupPolicyEnabled(d.Get("enabled"), d, config) + enabledProp, err := expandNetappBackupPolicyEnabled(d.Get("enabled"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { obj["enabled"] = enabledProp } - labelsProp, err := expandNetappbackupPolicyEffectiveLabels(d.Get("effective_labels"), d, config) + labelsProp, err := expandNetappBackupPolicyEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { @@ -381,7 +381,7 @@ func resourceNetappbackupPolicyUpdate(d *schema.ResourceData, meta interface{}) return err } - log.Printf("[DEBUG] Updating backupPolicy %q: %#v", d.Id(), obj) + log.Printf("[DEBUG] Updating BackupPolicy %q: %#v", d.Id(), obj) headers := make(http.Header) updateMask := []string{} @@ -434,13 +434,13 @@ func resourceNetappbackupPolicyUpdate(d *schema.ResourceData, meta interface{}) }) if err != nil { - return fmt.Errorf("Error updating backupPolicy %q: %s", d.Id(), err) + return fmt.Errorf("Error updating BackupPolicy %q: %s", d.Id(), err) } else { - log.Printf("[DEBUG] Finished updating backupPolicy %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished updating BackupPolicy %q: %#v", d.Id(), res) } err = NetappOperationWaitTime( - config, res, project, "Updating backupPolicy", userAgent, + config, res, project, "Updating BackupPolicy", userAgent, d.Timeout(schema.TimeoutUpdate)) if err != nil { @@ -448,10 +448,10 @@ func resourceNetappbackupPolicyUpdate(d *schema.ResourceData, meta interface{}) } } - return resourceNetappbackupPolicyRead(d, meta) + return resourceNetappBackupPolicyRead(d, meta) } -func resourceNetappbackupPolicyDelete(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupPolicyDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -462,7 +462,7 @@ func resourceNetappbackupPolicyDelete(d *schema.ResourceData, meta interface{}) project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backupPolicy: %s", err) + return fmt.Errorf("Error fetching project for BackupPolicy: %s", err) } billingProject = project @@ -480,7 +480,7 @@ func resourceNetappbackupPolicyDelete(d *schema.ResourceData, meta interface{}) headers := make(http.Header) - log.Printf("[DEBUG] Deleting backupPolicy %q", d.Id()) + log.Printf("[DEBUG] Deleting BackupPolicy %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "DELETE", @@ -492,22 +492,22 @@ func resourceNetappbackupPolicyDelete(d *schema.ResourceData, meta interface{}) Headers: headers, }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, "backupPolicy") + return transport_tpg.HandleNotFoundError(err, d, "BackupPolicy") } err = NetappOperationWaitTime( - config, res, project, "Deleting backupPolicy", userAgent, + config, res, project, "Deleting BackupPolicy", userAgent, d.Timeout(schema.TimeoutDelete)) if err != nil { return err } - log.Printf("[DEBUG] Finished deleting backupPolicy %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished deleting BackupPolicy %q: %#v", d.Id(), res) return nil } -func resourceNetappbackupPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { +func resourceNetappBackupPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) if err := tpgresource.ParseImportId([]string{ "^projects/(?P[^/]+)/locations/(?P[^/]+)/backupPolicies/(?P[^/]+)$", @@ -527,11 +527,11 @@ func resourceNetappbackupPolicyImport(d *schema.ResourceData, meta interface{}) return []*schema.ResourceData{d}, nil } -func flattenNetappbackupPolicyCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupPolicyCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupPolicyLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupPolicyLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -546,11 +546,11 @@ func flattenNetappbackupPolicyLabels(v interface{}, d *schema.ResourceData, conf return transformed } -func flattenNetappbackupPolicyState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupPolicyState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupPolicyDailyBackupLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupPolicyDailyBackupLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -567,7 +567,7 @@ func flattenNetappbackupPolicyDailyBackupLimit(v interface{}, d *schema.Resource return v // let terraform core handle it otherwise } -func flattenNetappbackupPolicyWeeklyBackupLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupPolicyWeeklyBackupLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -584,7 +584,7 @@ func flattenNetappbackupPolicyWeeklyBackupLimit(v interface{}, d *schema.Resourc return v // let terraform core handle it otherwise } -func flattenNetappbackupPolicyMonthlyBackupLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupPolicyMonthlyBackupLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -601,15 +601,15 @@ func flattenNetappbackupPolicyMonthlyBackupLimit(v interface{}, d *schema.Resour return v // let terraform core handle it otherwise } -func flattenNetappbackupPolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupPolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupPolicyEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupPolicyEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupPolicyAssignedVolumeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupPolicyAssignedVolumeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -626,7 +626,7 @@ func flattenNetappbackupPolicyAssignedVolumeCount(v interface{}, d *schema.Resou return v // let terraform core handle it otherwise } -func flattenNetappbackupPolicyTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupPolicyTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -641,31 +641,31 @@ func flattenNetappbackupPolicyTerraformLabels(v interface{}, d *schema.ResourceD return transformed } -func flattenNetappbackupPolicyEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupPolicyEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandNetappbackupPolicyDailyBackupLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappBackupPolicyDailyBackupLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappbackupPolicyWeeklyBackupLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappBackupPolicyWeeklyBackupLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappbackupPolicyMonthlyBackupLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappBackupPolicyMonthlyBackupLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappbackupPolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappBackupPolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappbackupPolicyEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappBackupPolicyEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappbackupPolicyEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { +func expandNetappBackupPolicyEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_policy_sweeper.go index 4d9ae2d7343..15ce2c8e520 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_policy_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_policy_sweeper.go @@ -1,20 +1,5 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package netapp import ( @@ -30,12 +15,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("NetappbackupPolicy", testSweepNetappbackupPolicy) + sweeper.AddTestSweepers("NetappBackupPolicy", testSweepNetappBackupPolicy) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepNetappbackupPolicy(region string) error { - resourceName := "NetappbackupPolicy" +func testSweepNetappBackupPolicy(region string) error { + resourceName := "NetappBackupPolicy" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) @@ -53,86 +38,91 @@ func testSweepNetappbackupPolicy(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &tpgresource.ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, - } - - listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupPolicies", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["backupPolicies"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil + regions := []string{"us-central1", "us-west2", "us-east4"} + for _, r := range regions { + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s in %s", resourceName, r) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": r, + "location": r, + "zone": "-", + "billing_account": billingId, + }, } - name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { - nonPrefixCount++ - continue - } - - deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}" - deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupPolicies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue } - deleteUrl = deleteUrl + name - // Don't wait on operations as we may have a lot to delete - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, - Method: "DELETE", + Method: "GET", Project: config.Project, - RawURL: deleteUrl, + RawURL: listUrl, UserAgent: config.UserAgent, }) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue } - } - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + resourceList, ok := res["backupPolicies"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } } return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_sweeper.go index bf490800f8d..0d784327c21 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_sweeper.go @@ -1,20 +1,5 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package netapp import ( @@ -30,12 +15,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("Netappbackup", testSweepNetappbackup) + sweeper.AddTestSweepers("NetappBackup", testSweepNetappBackup) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepNetappbackup(region string) error { - resourceName := "Netappbackup" +func testSweepNetappBackup(region string) error { + resourceName := "NetappBackup" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) @@ -53,86 +38,91 @@ func testSweepNetappbackup(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &tpgresource.ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, - } - - listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["backups"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil + regions := []string{"us-central1", "us-west2", "us-east4"} + for _, r := range regions { + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s in %s", resourceName, r) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": r, + "location": r, + "zone": "-", + "billing_account": billingId, + }, } - name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { - nonPrefixCount++ - continue - } - - deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}" - deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue } - deleteUrl = deleteUrl + name - // Don't wait on operations as we may have a lot to delete - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, - Method: "DELETE", + Method: "GET", Project: config.Project, - RawURL: deleteUrl, + RawURL: listUrl, UserAgent: config.UserAgent, }) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue } - } - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + resourceList, ok := res["backups"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } } return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_vault.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_vault.go index cc7fa94590e..ccb214f2bbe 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_vault.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_vault.go @@ -32,15 +32,15 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) -func ResourceNetappbackupVault() *schema.Resource { +func ResourceNetappBackupVault() *schema.Resource { return &schema.Resource{ - Create: resourceNetappbackupVaultCreate, - Read: resourceNetappbackupVaultRead, - Update: resourceNetappbackupVaultUpdate, - Delete: resourceNetappbackupVaultDelete, + Create: resourceNetappBackupVaultCreate, + Read: resourceNetappBackupVaultRead, + Update: resourceNetappBackupVaultUpdate, + Delete: resourceNetappBackupVaultDelete, Importer: &schema.ResourceImporter{ - State: resourceNetappbackupVaultImport, + State: resourceNetappBackupVaultImport, }, Timeouts: &schema.ResourceTimeout{ @@ -116,7 +116,7 @@ Please refer to the field 'effective_labels' for all of the labels present on th } } -func resourceNetappbackupVaultCreate(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupVaultCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -124,13 +124,13 @@ func resourceNetappbackupVaultCreate(d *schema.ResourceData, meta interface{}) e } obj := make(map[string]interface{}) - descriptionProp, err := expandNetappbackupVaultDescription(d.Get("description"), d, config) + descriptionProp, err := expandNetappBackupVaultDescription(d.Get("description"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } - labelsProp, err := expandNetappbackupVaultEffectiveLabels(d.Get("effective_labels"), d, config) + labelsProp, err := expandNetappBackupVaultEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { @@ -142,12 +142,12 @@ func resourceNetappbackupVaultCreate(d *schema.ResourceData, meta interface{}) e return err } - log.Printf("[DEBUG] Creating new backupVault: %#v", obj) + log.Printf("[DEBUG] Creating new BackupVault: %#v", obj) billingProject := "" project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backupVault: %s", err) + return fmt.Errorf("Error fetching project for BackupVault: %s", err) } billingProject = project @@ -168,7 +168,7 @@ func resourceNetappbackupVaultCreate(d *schema.ResourceData, meta interface{}) e Headers: headers, }) if err != nil { - return fmt.Errorf("Error creating backupVault: %s", err) + return fmt.Errorf("Error creating BackupVault: %s", err) } // Store the ID now @@ -179,21 +179,21 @@ func resourceNetappbackupVaultCreate(d *schema.ResourceData, meta interface{}) e d.SetId(id) err = NetappOperationWaitTime( - config, res, project, "Creating backupVault", userAgent, + config, res, project, "Creating BackupVault", userAgent, d.Timeout(schema.TimeoutCreate)) if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error waiting to create backupVault: %s", err) + return fmt.Errorf("Error waiting to create BackupVault: %s", err) } - log.Printf("[DEBUG] Finished creating backupVault %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished creating BackupVault %q: %#v", d.Id(), res) - return resourceNetappbackupVaultRead(d, meta) + return resourceNetappBackupVaultRead(d, meta) } -func resourceNetappbackupVaultRead(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupVaultRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -209,7 +209,7 @@ func resourceNetappbackupVaultRead(d *schema.ResourceData, meta interface{}) err project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backupVault: %s", err) + return fmt.Errorf("Error fetching project for BackupVault: %s", err) } billingProject = project @@ -228,36 +228,36 @@ func resourceNetappbackupVaultRead(d *schema.ResourceData, meta interface{}) err Headers: headers, }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetappbackupVault %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetappBackupVault %q", d.Id())) } if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading backupVault: %s", err) + return fmt.Errorf("Error reading BackupVault: %s", err) } - if err := d.Set("state", flattenNetappbackupVaultState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading backupVault: %s", err) + if err := d.Set("state", flattenNetappBackupVaultState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupVault: %s", err) } - if err := d.Set("create_time", flattenNetappbackupVaultCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading backupVault: %s", err) + if err := d.Set("create_time", flattenNetappBackupVaultCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupVault: %s", err) } - if err := d.Set("description", flattenNetappbackupVaultDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading backupVault: %s", err) + if err := d.Set("description", flattenNetappBackupVaultDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupVault: %s", err) } - if err := d.Set("labels", flattenNetappbackupVaultLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading backupVault: %s", err) + if err := d.Set("labels", flattenNetappBackupVaultLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupVault: %s", err) } - if err := d.Set("terraform_labels", flattenNetappbackupVaultTerraformLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading backupVault: %s", err) + if err := d.Set("terraform_labels", flattenNetappBackupVaultTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupVault: %s", err) } - if err := d.Set("effective_labels", flattenNetappbackupVaultEffectiveLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading backupVault: %s", err) + if err := d.Set("effective_labels", flattenNetappBackupVaultEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupVault: %s", err) } return nil } -func resourceNetappbackupVaultUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupVaultUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -268,18 +268,18 @@ func resourceNetappbackupVaultUpdate(d *schema.ResourceData, meta interface{}) e project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backupVault: %s", err) + return fmt.Errorf("Error fetching project for BackupVault: %s", err) } billingProject = project obj := make(map[string]interface{}) - descriptionProp, err := expandNetappbackupVaultDescription(d.Get("description"), d, config) + descriptionProp, err := expandNetappBackupVaultDescription(d.Get("description"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } - labelsProp, err := expandNetappbackupVaultEffectiveLabels(d.Get("effective_labels"), d, config) + labelsProp, err := expandNetappBackupVaultEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { @@ -291,7 +291,7 @@ func resourceNetappbackupVaultUpdate(d *schema.ResourceData, meta interface{}) e return err } - log.Printf("[DEBUG] Updating backupVault %q: %#v", d.Id(), obj) + log.Printf("[DEBUG] Updating BackupVault %q: %#v", d.Id(), obj) headers := make(http.Header) updateMask := []string{} @@ -328,13 +328,13 @@ func resourceNetappbackupVaultUpdate(d *schema.ResourceData, meta interface{}) e }) if err != nil { - return fmt.Errorf("Error updating backupVault %q: %s", d.Id(), err) + return fmt.Errorf("Error updating BackupVault %q: %s", d.Id(), err) } else { - log.Printf("[DEBUG] Finished updating backupVault %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished updating BackupVault %q: %#v", d.Id(), res) } err = NetappOperationWaitTime( - config, res, project, "Updating backupVault", userAgent, + config, res, project, "Updating BackupVault", userAgent, d.Timeout(schema.TimeoutUpdate)) if err != nil { @@ -342,10 +342,10 @@ func resourceNetappbackupVaultUpdate(d *schema.ResourceData, meta interface{}) e } } - return resourceNetappbackupVaultRead(d, meta) + return resourceNetappBackupVaultRead(d, meta) } -func resourceNetappbackupVaultDelete(d *schema.ResourceData, meta interface{}) error { +func resourceNetappBackupVaultDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -356,7 +356,7 @@ func resourceNetappbackupVaultDelete(d *schema.ResourceData, meta interface{}) e project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for backupVault: %s", err) + return fmt.Errorf("Error fetching project for BackupVault: %s", err) } billingProject = project @@ -374,7 +374,7 @@ func resourceNetappbackupVaultDelete(d *schema.ResourceData, meta interface{}) e headers := make(http.Header) - log.Printf("[DEBUG] Deleting backupVault %q", d.Id()) + log.Printf("[DEBUG] Deleting BackupVault %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "DELETE", @@ -386,22 +386,22 @@ func resourceNetappbackupVaultDelete(d *schema.ResourceData, meta interface{}) e Headers: headers, }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, "backupVault") + return transport_tpg.HandleNotFoundError(err, d, "BackupVault") } err = NetappOperationWaitTime( - config, res, project, "Deleting backupVault", userAgent, + config, res, project, "Deleting BackupVault", userAgent, d.Timeout(schema.TimeoutDelete)) if err != nil { return err } - log.Printf("[DEBUG] Finished deleting backupVault %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished deleting BackupVault %q: %#v", d.Id(), res) return nil } -func resourceNetappbackupVaultImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { +func resourceNetappBackupVaultImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) if err := tpgresource.ParseImportId([]string{ "^projects/(?P[^/]+)/locations/(?P[^/]+)/backupVaults/(?P[^/]+)$", @@ -421,19 +421,19 @@ func resourceNetappbackupVaultImport(d *schema.ResourceData, meta interface{}) ( return []*schema.ResourceData{d}, nil } -func flattenNetappbackupVaultState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupVaultState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupVaultCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupVaultCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupVaultDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupVaultDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappbackupVaultLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupVaultLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -448,7 +448,7 @@ func flattenNetappbackupVaultLabels(v interface{}, d *schema.ResourceData, confi return transformed } -func flattenNetappbackupVaultTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupVaultTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -463,15 +463,15 @@ func flattenNetappbackupVaultTerraformLabels(v interface{}, d *schema.ResourceDa return transformed } -func flattenNetappbackupVaultEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappBackupVaultEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandNetappbackupVaultDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappBackupVaultDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappbackupVaultEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { +func expandNetappBackupVaultEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_vault_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_vault_sweeper.go index 31b59fba338..4075932c906 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_vault_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_backup_vault_sweeper.go @@ -1,20 +1,5 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package netapp import ( @@ -30,12 +15,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("NetappbackupVault", testSweepNetappbackupVault) + sweeper.AddTestSweepers("NetappBackupVault", testSweepNetappBackupVault) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepNetappbackupVault(region string) error { - resourceName := "NetappbackupVault" +func testSweepNetappBackupVault(region string) error { + resourceName := "NetappBackupVault" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) @@ -53,86 +38,91 @@ func testSweepNetappbackupVault(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &tpgresource.ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, - } - - listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupVaults", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["backupVaults"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil + regions := []string{"us-central1", "us-west1", "us-west2", "us-east4"} + for _, r := range regions { + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s in %s", resourceName, r) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": r, + "location": r, + "zone": "-", + "billing_account": billingId, + }, } - name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { - nonPrefixCount++ - continue - } - - deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupVaults/{{name}}" - deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupVaults", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue } - deleteUrl = deleteUrl + name - // Don't wait on operations as we may have a lot to delete - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, - Method: "DELETE", + Method: "GET", Project: config.Project, - RawURL: deleteUrl, + RawURL: listUrl, UserAgent: config.UserAgent, }) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue } - } - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + resourceList, ok := res["backupVaults"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupVaults/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } } return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_kmsconfig_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_kmsconfig_sweeper.go index d1f15e7b35c..53d6c4af9b0 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_kmsconfig_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_kmsconfig_sweeper.go @@ -1,20 +1,5 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package netapp import ( @@ -53,86 +38,91 @@ func testSweepNetappkmsconfig(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &tpgresource.ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, - } - - listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/kmsConfigs", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["kmsconfigs"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil + regions := []string{"us-central1", "us-west2", "us-east4"} + for _, r := range regions { + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s in %s", resourceName, r) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": r, + "location": r, + "zone": "-", + "billing_account": billingId, + }, } - name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { - nonPrefixCount++ - continue - } - - deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/kmsConfigs/{{name}}" - deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/kmsConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue } - deleteUrl = deleteUrl + name - // Don't wait on operations as we may have a lot to delete - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, - Method: "DELETE", + Method: "GET", Project: config.Project, - RawURL: deleteUrl, + RawURL: listUrl, UserAgent: config.UserAgent, }) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue } - } - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + resourceList, ok := res["kmsconfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/kmsConfigs/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } } return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_storage_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_storage_pool.go index 9a9032d2040..d64ff99fcdc 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_storage_pool.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_storage_pool.go @@ -33,15 +33,15 @@ import ( "github.com/hashicorp/terraform-provider-google/google/verify" ) -func ResourceNetappstoragePool() *schema.Resource { +func ResourceNetappStoragePool() *schema.Resource { return &schema.Resource{ - Create: resourceNetappstoragePoolCreate, - Read: resourceNetappstoragePoolRead, - Update: resourceNetappstoragePoolUpdate, - Delete: resourceNetappstoragePoolDelete, + Create: resourceNetappStoragePoolCreate, + Read: resourceNetappStoragePoolRead, + Update: resourceNetappStoragePoolUpdate, + Delete: resourceNetappStoragePoolDelete, Importer: &schema.ResourceImporter{ - State: resourceNetappstoragePoolImport, + State: resourceNetappStoragePoolImport, }, Timeouts: &schema.ResourceTimeout{ @@ -92,6 +92,13 @@ func ResourceNetappstoragePool() *schema.Resource { Optional: true, Description: `Specifies the Active Directory policy to be used. Format: 'projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}'. The policy needs to be in the same location as the storage pool.`, + }, + "allow_auto_tiering": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Optional. True if the storage pool supports Auto Tiering enabled volumes. Default is false. +Auto-tiering can be enabled after storage pool creation but it can't be disabled once enabled.`, }, "description": { Type: schema.TypeString, @@ -121,6 +128,19 @@ Please refer to the field 'effective_labels' for all of the labels present on th ForceNew: true, Description: `When enabled, the volumes uses Active Directory as LDAP name service for UID/GID lookups. Required to enable extended group support for NFSv3, using security identifiers for NFSv4.1 or principal names for kerberized NFSv4.1.`, + }, + "replica_zone": { + Type: schema.TypeString, + Optional: true, + Description: `Specifies the replica zone for regional Flex pools. 'zone' and 'replica_zone' values can be swapped to initiate a +[zone switch](https://cloud.google.com/netapp/volumes/docs/configure-and-use/storage-pools/edit-or-delete-storage-pool#switch_active_and_replica_zones).`, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + Description: `Specifies the active zone for regional Flex pools. 'zone' and 'replica_zone' values can be swapped to initiate a +[zone switch](https://cloud.google.com/netapp/volumes/docs/configure-and-use/storage-pools/edit-or-delete-storage-pool#switch_active_and_replica_zones). +If you want to create a zonal Flex pool, specify a zone name for 'location' and omit 'zone'.`, }, "effective_labels": { Type: schema.TypeMap, @@ -161,7 +181,7 @@ using security identifiers for NFSv4.1 or principal names for kerberized NFSv4.1 } } -func resourceNetappstoragePoolCreate(d *schema.ResourceData, meta interface{}) error { +func resourceNetappStoragePoolCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -169,49 +189,67 @@ func resourceNetappstoragePoolCreate(d *schema.ResourceData, meta interface{}) e } obj := make(map[string]interface{}) - serviceLevelProp, err := expandNetappstoragePoolServiceLevel(d.Get("service_level"), d, config) + serviceLevelProp, err := expandNetappStoragePoolServiceLevel(d.Get("service_level"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("service_level"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceLevelProp)) && (ok || !reflect.DeepEqual(v, serviceLevelProp)) { obj["serviceLevel"] = serviceLevelProp } - capacityGibProp, err := expandNetappstoragePoolCapacityGib(d.Get("capacity_gib"), d, config) + capacityGibProp, err := expandNetappStoragePoolCapacityGib(d.Get("capacity_gib"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("capacity_gib"); !tpgresource.IsEmptyValue(reflect.ValueOf(capacityGibProp)) && (ok || !reflect.DeepEqual(v, capacityGibProp)) { obj["capacityGib"] = capacityGibProp } - descriptionProp, err := expandNetappstoragePoolDescription(d.Get("description"), d, config) + descriptionProp, err := expandNetappStoragePoolDescription(d.Get("description"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } - networkProp, err := expandNetappstoragePoolNetwork(d.Get("network"), d, config) + networkProp, err := expandNetappStoragePoolNetwork(d.Get("network"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { obj["network"] = networkProp } - activeDirectoryProp, err := expandNetappstoragePoolActiveDirectory(d.Get("active_directory"), d, config) + activeDirectoryProp, err := expandNetappStoragePoolActiveDirectory(d.Get("active_directory"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("active_directory"); !tpgresource.IsEmptyValue(reflect.ValueOf(activeDirectoryProp)) && (ok || !reflect.DeepEqual(v, activeDirectoryProp)) { obj["activeDirectory"] = activeDirectoryProp } - kmsConfigProp, err := expandNetappstoragePoolKmsConfig(d.Get("kms_config"), d, config) + kmsConfigProp, err := expandNetappStoragePoolKmsConfig(d.Get("kms_config"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("kms_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(kmsConfigProp)) && (ok || !reflect.DeepEqual(v, kmsConfigProp)) { obj["kmsConfig"] = kmsConfigProp } - ldapEnabledProp, err := expandNetappstoragePoolLdapEnabled(d.Get("ldap_enabled"), d, config) + ldapEnabledProp, err := expandNetappStoragePoolLdapEnabled(d.Get("ldap_enabled"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("ldap_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(ldapEnabledProp)) && (ok || !reflect.DeepEqual(v, ldapEnabledProp)) { obj["ldapEnabled"] = ldapEnabledProp } - labelsProp, err := expandNetappstoragePoolEffectiveLabels(d.Get("effective_labels"), d, config) + zoneProp, err := expandNetappStoragePoolZone(d.Get("zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + obj["zone"] = zoneProp + } + replicaZoneProp, err := expandNetappStoragePoolReplicaZone(d.Get("replica_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("replica_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(replicaZoneProp)) && (ok || !reflect.DeepEqual(v, replicaZoneProp)) { + obj["replicaZone"] = replicaZoneProp + } + allowAutoTieringProp, err := expandNetappStoragePoolAllowAutoTiering(d.Get("allow_auto_tiering"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow_auto_tiering"); !tpgresource.IsEmptyValue(reflect.ValueOf(allowAutoTieringProp)) && (ok || !reflect.DeepEqual(v, allowAutoTieringProp)) { + obj["allowAutoTiering"] = allowAutoTieringProp + } + labelsProp, err := expandNetappStoragePoolEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { @@ -223,12 +261,12 @@ func resourceNetappstoragePoolCreate(d *schema.ResourceData, meta interface{}) e return err } - log.Printf("[DEBUG] Creating new storagePool: %#v", obj) + log.Printf("[DEBUG] Creating new StoragePool: %#v", obj) billingProject := "" project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for storagePool: %s", err) + return fmt.Errorf("Error fetching project for StoragePool: %s", err) } billingProject = project @@ -249,7 +287,7 @@ func resourceNetappstoragePoolCreate(d *schema.ResourceData, meta interface{}) e Headers: headers, }) if err != nil { - return fmt.Errorf("Error creating storagePool: %s", err) + return fmt.Errorf("Error creating StoragePool: %s", err) } // Store the ID now @@ -260,21 +298,21 @@ func resourceNetappstoragePoolCreate(d *schema.ResourceData, meta interface{}) e d.SetId(id) err = NetappOperationWaitTime( - config, res, project, "Creating storagePool", userAgent, + config, res, project, "Creating StoragePool", userAgent, d.Timeout(schema.TimeoutCreate)) if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error waiting to create storagePool: %s", err) + return fmt.Errorf("Error waiting to create StoragePool: %s", err) } - log.Printf("[DEBUG] Finished creating storagePool %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished creating StoragePool %q: %#v", d.Id(), res) - return resourceNetappstoragePoolRead(d, meta) + return resourceNetappStoragePoolRead(d, meta) } -func resourceNetappstoragePoolRead(d *schema.ResourceData, meta interface{}) error { +func resourceNetappStoragePoolRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -290,7 +328,7 @@ func resourceNetappstoragePoolRead(d *schema.ResourceData, meta interface{}) err project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for storagePool: %s", err) + return fmt.Errorf("Error fetching project for StoragePool: %s", err) } billingProject = project @@ -309,57 +347,66 @@ func resourceNetappstoragePoolRead(d *schema.ResourceData, meta interface{}) err Headers: headers, }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetappstoragePool %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetappStoragePool %q", d.Id())) } if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("service_level", flattenNetappstoragePoolServiceLevel(res["serviceLevel"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("service_level", flattenNetappStoragePoolServiceLevel(res["serviceLevel"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("capacity_gib", flattenNetappstoragePoolCapacityGib(res["capacityGib"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("capacity_gib", flattenNetappStoragePoolCapacityGib(res["capacityGib"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("volume_capacity_gib", flattenNetappstoragePoolVolumeCapacityGib(res["volumeCapacityGib"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("volume_capacity_gib", flattenNetappStoragePoolVolumeCapacityGib(res["volumeCapacityGib"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("volume_count", flattenNetappstoragePoolVolumeCount(res["volumeCount"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("volume_count", flattenNetappStoragePoolVolumeCount(res["volumeCount"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("description", flattenNetappstoragePoolDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("description", flattenNetappStoragePoolDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("labels", flattenNetappstoragePoolLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("labels", flattenNetappStoragePoolLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("network", flattenNetappstoragePoolNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("network", flattenNetappStoragePoolNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("active_directory", flattenNetappstoragePoolActiveDirectory(res["activeDirectory"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("active_directory", flattenNetappStoragePoolActiveDirectory(res["activeDirectory"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("kms_config", flattenNetappstoragePoolKmsConfig(res["kmsConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("kms_config", flattenNetappStoragePoolKmsConfig(res["kmsConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("ldap_enabled", flattenNetappstoragePoolLdapEnabled(res["ldapEnabled"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("ldap_enabled", flattenNetappStoragePoolLdapEnabled(res["ldapEnabled"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("encryption_type", flattenNetappstoragePoolEncryptionType(res["encryptionType"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("encryption_type", flattenNetappStoragePoolEncryptionType(res["encryptionType"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("terraform_labels", flattenNetappstoragePoolTerraformLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("zone", flattenNetappStoragePoolZone(res["zone"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } - if err := d.Set("effective_labels", flattenNetappstoragePoolEffectiveLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading storagePool: %s", err) + if err := d.Set("replica_zone", flattenNetappStoragePoolReplicaZone(res["replicaZone"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) + } + if err := d.Set("allow_auto_tiering", flattenNetappStoragePoolAllowAutoTiering(res["allowAutoTiering"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) + } + if err := d.Set("terraform_labels", flattenNetappStoragePoolTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) + } + if err := d.Set("effective_labels", flattenNetappStoragePoolEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading StoragePool: %s", err) } return nil } -func resourceNetappstoragePoolUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceNetappStoragePoolUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -370,30 +417,42 @@ func resourceNetappstoragePoolUpdate(d *schema.ResourceData, meta interface{}) e project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for storagePool: %s", err) + return fmt.Errorf("Error fetching project for StoragePool: %s", err) } billingProject = project obj := make(map[string]interface{}) - capacityGibProp, err := expandNetappstoragePoolCapacityGib(d.Get("capacity_gib"), d, config) + capacityGibProp, err := expandNetappStoragePoolCapacityGib(d.Get("capacity_gib"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("capacity_gib"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, capacityGibProp)) { obj["capacityGib"] = capacityGibProp } - descriptionProp, err := expandNetappstoragePoolDescription(d.Get("description"), d, config) + descriptionProp, err := expandNetappStoragePoolDescription(d.Get("description"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } - activeDirectoryProp, err := expandNetappstoragePoolActiveDirectory(d.Get("active_directory"), d, config) + activeDirectoryProp, err := expandNetappStoragePoolActiveDirectory(d.Get("active_directory"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("active_directory"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, activeDirectoryProp)) { obj["activeDirectory"] = activeDirectoryProp } - labelsProp, err := expandNetappstoragePoolEffectiveLabels(d.Get("effective_labels"), d, config) + zoneProp, err := expandNetappStoragePoolZone(d.Get("zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + obj["zone"] = zoneProp + } + replicaZoneProp, err := expandNetappStoragePoolReplicaZone(d.Get("replica_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("replica_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, replicaZoneProp)) { + obj["replicaZone"] = replicaZoneProp + } + labelsProp, err := expandNetappStoragePoolEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { @@ -405,7 +464,7 @@ func resourceNetappstoragePoolUpdate(d *schema.ResourceData, meta interface{}) e return err } - log.Printf("[DEBUG] Updating storagePool %q: %#v", d.Id(), obj) + log.Printf("[DEBUG] Updating StoragePool %q: %#v", d.Id(), obj) headers := make(http.Header) updateMask := []string{} @@ -421,6 +480,14 @@ func resourceNetappstoragePoolUpdate(d *schema.ResourceData, meta interface{}) e updateMask = append(updateMask, "activeDirectory") } + if d.HasChange("zone") { + updateMask = append(updateMask, "zone") + } + + if d.HasChange("replica_zone") { + updateMask = append(updateMask, "replicaZone") + } + if d.HasChange("effective_labels") { updateMask = append(updateMask, "labels") } @@ -515,13 +582,13 @@ func resourceNetappstoragePoolUpdate(d *schema.ResourceData, meta interface{}) e }) if err != nil { - return fmt.Errorf("Error updating storagePool %q: %s", d.Id(), err) + return fmt.Errorf("Error updating StoragePool %q: %s", d.Id(), err) } else { - log.Printf("[DEBUG] Finished updating storagePool %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished updating StoragePool %q: %#v", d.Id(), res) } err = NetappOperationWaitTime( - config, res, project, "Updating storagePool", userAgent, + config, res, project, "Updating StoragePool", userAgent, d.Timeout(schema.TimeoutUpdate)) if err != nil { @@ -529,10 +596,10 @@ func resourceNetappstoragePoolUpdate(d *schema.ResourceData, meta interface{}) e } } - return resourceNetappstoragePoolRead(d, meta) + return resourceNetappStoragePoolRead(d, meta) } -func resourceNetappstoragePoolDelete(d *schema.ResourceData, meta interface{}) error { +func resourceNetappStoragePoolDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -543,7 +610,7 @@ func resourceNetappstoragePoolDelete(d *schema.ResourceData, meta interface{}) e project, err := tpgresource.GetProject(d, config) if err != nil { - return fmt.Errorf("Error fetching project for storagePool: %s", err) + return fmt.Errorf("Error fetching project for StoragePool: %s", err) } billingProject = project @@ -561,7 +628,7 @@ func resourceNetappstoragePoolDelete(d *schema.ResourceData, meta interface{}) e headers := make(http.Header) - log.Printf("[DEBUG] Deleting storagePool %q", d.Id()) + log.Printf("[DEBUG] Deleting StoragePool %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "DELETE", @@ -573,22 +640,22 @@ func resourceNetappstoragePoolDelete(d *schema.ResourceData, meta interface{}) e Headers: headers, }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, "storagePool") + return transport_tpg.HandleNotFoundError(err, d, "StoragePool") } err = NetappOperationWaitTime( - config, res, project, "Deleting storagePool", userAgent, + config, res, project, "Deleting StoragePool", userAgent, d.Timeout(schema.TimeoutDelete)) if err != nil { return err } - log.Printf("[DEBUG] Finished deleting storagePool %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished deleting StoragePool %q: %#v", d.Id(), res) return nil } -func resourceNetappstoragePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { +func resourceNetappStoragePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) if err := tpgresource.ParseImportId([]string{ "^projects/(?P[^/]+)/locations/(?P[^/]+)/storagePools/(?P[^/]+)$", @@ -608,19 +675,19 @@ func resourceNetappstoragePoolImport(d *schema.ResourceData, meta interface{}) ( return []*schema.ResourceData{d}, nil } -func flattenNetappstoragePoolServiceLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolServiceLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappstoragePoolCapacityGib(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolCapacityGib(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappstoragePoolVolumeCapacityGib(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolVolumeCapacityGib(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappstoragePoolVolumeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolVolumeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { @@ -637,11 +704,11 @@ func flattenNetappstoragePoolVolumeCount(v interface{}, d *schema.ResourceData, return v // let terraform core handle it otherwise } -func flattenNetappstoragePoolDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappstoragePoolLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -656,27 +723,39 @@ func flattenNetappstoragePoolLabels(v interface{}, d *schema.ResourceData, confi return transformed } -func flattenNetappstoragePoolNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetappStoragePoolActiveDirectory(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetappStoragePoolKmsConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappstoragePoolActiveDirectory(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolLdapEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappstoragePoolKmsConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolEncryptionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappstoragePoolLdapEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappstoragePoolEncryptionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolReplicaZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetappstoragePoolTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolAllowAutoTiering(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetappStoragePoolTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -691,39 +770,51 @@ func flattenNetappstoragePoolTerraformLabels(v interface{}, d *schema.ResourceDa return transformed } -func flattenNetappstoragePoolEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenNetappStoragePoolEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandNetappstoragePoolServiceLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappStoragePoolServiceLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetappStoragePoolCapacityGib(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetappStoragePoolDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetappStoragePoolNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappstoragePoolCapacityGib(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappStoragePoolActiveDirectory(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappstoragePoolDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappStoragePoolKmsConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappstoragePoolNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappStoragePoolLdapEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappstoragePoolActiveDirectory(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappStoragePoolZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappstoragePoolKmsConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappStoragePoolReplicaZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappstoragePoolLdapEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandNetappStoragePoolAllowAutoTiering(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetappstoragePoolEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { +func expandNetappStoragePoolEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_storage_pool_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_storage_pool_sweeper.go index 0af6167ed9c..8e0540f26c6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_storage_pool_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_storage_pool_sweeper.go @@ -1,20 +1,5 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package netapp import ( @@ -30,12 +15,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("NetappstoragePool", testSweepNetappstoragePool) + sweeper.AddTestSweepers("NetappStoragePool", testSweepNetappStoragePool) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepNetappstoragePool(region string) error { - resourceName := "NetappstoragePool" +func testSweepNetappStoragePool(region string) error { + resourceName := "NetappStoragePool" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) @@ -53,86 +38,91 @@ func testSweepNetappstoragePool(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &tpgresource.ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, - } - - listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/storagePools", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["storagePools"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil + regions := []string{"us-central1", "us-west2", "us-east4"} + for _, r := range regions { + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s in %s", resourceName, r) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": r, + "location": r, + "zone": "-", + "billing_account": billingId, + }, } - name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { - nonPrefixCount++ - continue - } - - deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/storagePools/{{name}}" - deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/storagePools", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue } - deleteUrl = deleteUrl + name - // Don't wait on operations as we may have a lot to delete - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, - Method: "DELETE", + Method: "GET", Project: config.Project, - RawURL: deleteUrl, + RawURL: listUrl, UserAgent: config.UserAgent, }) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue } - } - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + resourceList, ok := res["storagePools"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/storagePools/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } } return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume.go index e40af340d64..af332e26bf2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume.go @@ -220,6 +220,17 @@ Format: 'projects/{{projectId}}/locations/{{location}}/backupVaults/{{backupVaul Please refer to the field 'effective_labels' for all of the labels present on the resource.`, Elem: &schema.Schema{Type: schema.TypeString}, }, + "large_capacity": { + Type: schema.TypeBool, + Optional: true, + Description: `Optional. Flag indicating if the volume will be a large capacity volume or a regular volume.`, + }, + "multiple_endpoints": { + Type: schema.TypeBool, + Optional: true, + Description: `Optional. Flag indicating if the volume will have an IP address per node for volumes supporting multiple IP endpoints. +Only the volume with largeCapacity will be allowed to have multiple endpoints.`, + }, "restore_parameters": { Type: schema.TypeList, Optional: true, @@ -269,6 +280,7 @@ Use NTFS to use NTFS ACLs for file permissions. Can only be set for volumes whic }, "smb_settings": { Type: schema.TypeList, + Computed: true, Optional: true, Description: `Settings for volumes with SMB access. Possible values: ["ENCRYPT_DATA", "BROWSABLE", "CHANGE_NOTIFY", "NON_BROWSABLE", "OPLOCKS", "SHOW_SNAPSHOT", "SHOW_PREVIOUS_VERSIONS", "ACCESS_BASED_ENUMERATION", "CONTINUOUSLY_AVAILABLE"]`, Elem: &schema.Schema{ @@ -413,6 +425,29 @@ To disable automatic snapshot creation you have to remove the whole snapshot_pol }, }, }, + "tiering_policy": { + Type: schema.TypeList, + Optional: true, + Description: `Tiering policy for the volume.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cooling_threshold_days": { + Type: schema.TypeInt, + Optional: true, + Description: `Optional. Time in days to mark the volume's data block as cold and make it eligible for tiering, can be range from 7-183. +Default is 31.`, + }, + "tier_action": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ENABLED", "PAUSED", ""}), + Description: `Optional. Flag indicating if the volume has tiering policy enable/pause. Default is PAUSED. Default value: "PAUSED" Possible values: ["ENABLED", "PAUSED"]`, + Default: "PAUSED", + }, + }, + }, + }, "unix_permissions": { Type: schema.TypeString, Computed: true, @@ -424,6 +459,11 @@ To disable automatic snapshot creation you have to remove the whole snapshot_pol Computed: true, Description: `Reports the resource name of the Active Directory policy being used. Inherited from storage pool.`, }, + "cold_tier_size_gib": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Size of the volume cold tier data in GiB.`, + }, "create_time": { Type: schema.TypeString, Computed: true, @@ -498,6 +538,11 @@ Format for SMB volumes: '\\\\netbios_prefix-four_random_hex_letters.domain_name\ Computed: true, Description: `Name of the Private Service Access allocated range. Inherited from storage pool.`, }, + "replica_zone": { + Type: schema.TypeString, + Computed: true, + Description: `Specifies the replica zone for regional volume.`, + }, "service_level": { Type: schema.TypeString, Computed: true, @@ -525,13 +570,19 @@ Format for SMB volumes: '\\\\netbios_prefix-four_random_hex_letters.domain_name\ Computed: true, Description: `Used capacity of the volume (in GiB). This is computed periodically and it does not represent the realtime usage.`, }, + "zone": { + Type: schema.TypeString, + Computed: true, + Description: `Specifies the active zone for regional volume.`, + }, "deletion_policy": { Type: schema.TypeString, Optional: true, - Default: "DEFAULT", Description: `Policy to determine if the volume should be deleted forcefully. Volumes may have nested snapshot resources. Deleting such a volume will fail. -Setting this parameter to FORCE will delete volumes including nested snapshots.`, +Setting this parameter to FORCE will delete volumes including nested snapshots. +Possible values: DEFAULT, FORCE.`, + Default: "DEFAULT", }, "project": { Type: schema.TypeString, @@ -642,6 +693,24 @@ func resourceNetappVolumeCreate(d *schema.ResourceData, meta interface{}) error } else if v, ok := d.GetOkExists("backup_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(backupConfigProp)) && (ok || !reflect.DeepEqual(v, backupConfigProp)) { obj["backupConfig"] = backupConfigProp } + largeCapacityProp, err := expandNetappVolumeLargeCapacity(d.Get("large_capacity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("large_capacity"); !tpgresource.IsEmptyValue(reflect.ValueOf(largeCapacityProp)) && (ok || !reflect.DeepEqual(v, largeCapacityProp)) { + obj["largeCapacity"] = largeCapacityProp + } + multipleEndpointsProp, err := expandNetappVolumeMultipleEndpoints(d.Get("multiple_endpoints"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("multiple_endpoints"); !tpgresource.IsEmptyValue(reflect.ValueOf(multipleEndpointsProp)) && (ok || !reflect.DeepEqual(v, multipleEndpointsProp)) { + obj["multipleEndpoints"] = multipleEndpointsProp + } + tieringPolicyProp, err := expandNetappVolumeTieringPolicy(d.Get("tiering_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tiering_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(tieringPolicyProp)) && (ok || !reflect.DeepEqual(v, tieringPolicyProp)) { + obj["tieringPolicy"] = tieringPolicyProp + } labelsProp, err := expandNetappVolumeEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -837,6 +906,24 @@ func resourceNetappVolumeRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("backup_config", flattenNetappVolumeBackupConfig(res["backupConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Volume: %s", err) } + if err := d.Set("zone", flattenNetappVolumeZone(res["zone"], d, config)); err != nil { + return fmt.Errorf("Error reading Volume: %s", err) + } + if err := d.Set("replica_zone", flattenNetappVolumeReplicaZone(res["replicaZone"], d, config)); err != nil { + return fmt.Errorf("Error reading Volume: %s", err) + } + if err := d.Set("large_capacity", flattenNetappVolumeLargeCapacity(res["largeCapacity"], d, config)); err != nil { + return fmt.Errorf("Error reading Volume: %s", err) + } + if err := d.Set("multiple_endpoints", flattenNetappVolumeMultipleEndpoints(res["multipleEndpoints"], d, config)); err != nil { + return fmt.Errorf("Error reading Volume: %s", err) + } + if err := d.Set("cold_tier_size_gib", flattenNetappVolumeColdTierSizeGib(res["coldTierSizeGib"], d, config)); err != nil { + return fmt.Errorf("Error reading Volume: %s", err) + } + if err := d.Set("tiering_policy", flattenNetappVolumeTieringPolicy(res["tieringPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading Volume: %s", err) + } if err := d.Set("terraform_labels", flattenNetappVolumeTerraformLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Volume: %s", err) } @@ -923,6 +1010,24 @@ func resourceNetappVolumeUpdate(d *schema.ResourceData, meta interface{}) error } else if v, ok := d.GetOkExists("backup_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backupConfigProp)) { obj["backupConfig"] = backupConfigProp } + largeCapacityProp, err := expandNetappVolumeLargeCapacity(d.Get("large_capacity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("large_capacity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, largeCapacityProp)) { + obj["largeCapacity"] = largeCapacityProp + } + multipleEndpointsProp, err := expandNetappVolumeMultipleEndpoints(d.Get("multiple_endpoints"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("multiple_endpoints"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, multipleEndpointsProp)) { + obj["multipleEndpoints"] = multipleEndpointsProp + } + tieringPolicyProp, err := expandNetappVolumeTieringPolicy(d.Get("tiering_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tiering_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tieringPolicyProp)) { + obj["tieringPolicy"] = tieringPolicyProp + } labelsProp, err := expandNetappVolumeEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -981,6 +1086,19 @@ func resourceNetappVolumeUpdate(d *schema.ResourceData, meta interface{}) error "backup_config.scheduled_backup_enabled") } + if d.HasChange("large_capacity") { + updateMask = append(updateMask, "largeCapacity") + } + + if d.HasChange("multiple_endpoints") { + updateMask = append(updateMask, "multipleEndpoints") + } + + if d.HasChange("tiering_policy") { + updateMask = append(updateMask, "tiering_policy.cooling_threshold_days", + "tiering_policy.tier_action") + } + if d.HasChange("effective_labels") { updateMask = append(updateMask, "labels") } @@ -1660,6 +1778,62 @@ func flattenNetappVolumeBackupConfigScheduledBackupEnabled(v interface{}, d *sch return v } +func flattenNetappVolumeZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetappVolumeReplicaZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetappVolumeLargeCapacity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetappVolumeMultipleEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetappVolumeColdTierSizeGib(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetappVolumeTieringPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cooling_threshold_days"] = + flattenNetappVolumeTieringPolicyCoolingThresholdDays(original["coolingThresholdDays"], d, config) + transformed["tier_action"] = + flattenNetappVolumeTieringPolicyTierAction(original["tierAction"], d, config) + return []interface{}{transformed} +} +func flattenNetappVolumeTieringPolicyCoolingThresholdDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNetappVolumeTieringPolicyTierAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenNetappVolumeTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -2199,6 +2373,48 @@ func expandNetappVolumeBackupConfigScheduledBackupEnabled(v interface{}, d tpgre return v, nil } +func expandNetappVolumeLargeCapacity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetappVolumeMultipleEndpoints(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetappVolumeTieringPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCoolingThresholdDays, err := expandNetappVolumeTieringPolicyCoolingThresholdDays(original["cooling_threshold_days"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCoolingThresholdDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["coolingThresholdDays"] = transformedCoolingThresholdDays + } + + transformedTierAction, err := expandNetappVolumeTieringPolicyTierAction(original["tier_action"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTierAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tierAction"] = transformedTierAction + } + + return transformed, nil +} + +func expandNetappVolumeTieringPolicyCoolingThresholdDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetappVolumeTieringPolicyTierAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandNetappVolumeEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume_replication.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume_replication.go index c2518ac827a..47a0d1ffe60 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume_replication.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume_replication.go @@ -34,7 +34,7 @@ import ( ) // Custom function to wait for mirrorState target states -func NetAppVolumeReplicationWaitForMirror(d *schema.ResourceData, meta interface{}, targetState string) error { +func NetappVolumeReplicationWaitForMirror(d *schema.ResourceData, meta interface{}, targetState string) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -279,12 +279,12 @@ data in relation to the source volume data.`, "total_transfer_duration": { Type: schema.TypeString, Computed: true, - Description: `Total time taken so far during current transfer.`, + Description: `Cumulative time taken across all transfers for the replication relationship.`, }, "transfer_bytes": { Type: schema.TypeString, Computed: true, - Description: `Number of bytes transferred so far in current transfer.`, + Description: `Cumulative bytes transferred so far for the replication relationship.`, }, "update_time": { Type: schema.TypeString, @@ -297,7 +297,6 @@ data in relation to the source volume data.`, "delete_destination_volume": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `A destination volume is created as part of replication creation. The destination volume will not became under Terraform management unless you import it manually. If you delete the replication, this volume will remain. @@ -305,32 +304,33 @@ Setting this parameter to true will delete the *current* destination volume when replication. If you reversed the replication direction, this will be your former source volume! For production use, it is recommended to keep this parameter false to avoid accidental volume deletion. Handle with care. Default is false.`, + Default: false, }, "replication_enabled": { Type: schema.TypeBool, Optional: true, - Default: true, Description: `Set to false to stop/break the mirror. Stopping the mirror makes the destination volume read-write and act independently from the source volume. Set to true to enable/resume the mirror. WARNING: Resuming a mirror overwrites any changes done to the destination volume with the content of the source volume.`, + Default: true, }, "force_stopping": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `Only replications with mirror_state=MIRRORED can be stopped. A replication in mirror_state=TRANSFERRING currently receives an update and stopping the update might be undesirable. Set this parameter to true to stop anyway. All data transferred to the destination will be discarded and content of destination volume will remain at the state of the last successful update. Default is false.`, + Default: false, }, "wait_for_mirror": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `Replication resource state is independent of mirror_state. With enough data, it can take many hours for mirror_state to reach MIRRORED. If you want Terraform to wait for the mirror to finish on create/stop/resume operations, set this parameter to true. Default is false.`, + Default: false, }, "project": { Type: schema.TypeString, @@ -429,7 +429,7 @@ func resourceNetappVolumeReplicationCreate(d *schema.ResourceData, meta interfac if d.Get("wait_for_mirror").(bool) == true { // Wait for mirrorState=MIRRORED before treating the resource as created - err = NetAppVolumeReplicationWaitForMirror(d, meta, "MIRRORED") + err = NetappVolumeReplicationWaitForMirror(d, meta, "MIRRORED") if err != nil { return fmt.Errorf("Error waiting for volume replication to reach mirror_state==MIRRORED: %s", err) } @@ -737,7 +737,7 @@ func resourceNetappVolumeReplicationUpdate(d *schema.ResourceData, meta interfac // If user specified to wait for mirror operations, wait to reach target state if d.Get("wait_for_mirror").(bool) == true { - err = NetAppVolumeReplicationWaitForMirror(d, meta, targetState) + err = NetappVolumeReplicationWaitForMirror(d, meta, targetState) if err != nil { return fmt.Errorf("Error waiting for volume replication to reach mirror_state==%s: %s", targetState, err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume_snapshot_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume_snapshot_sweeper.go index a8b9627e2dc..fee2931bd12 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume_snapshot_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/netapp/resource_netapp_volume_snapshot_sweeper.go @@ -1,20 +1,5 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package netapp import ( @@ -53,86 +38,91 @@ func testSweepNetappVolumeSnapshot(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &tpgresource.ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, - } - - listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/snapshots", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["volumeSnapshots"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil + regions := []string{"us-central1", "us-west2", "us-east4"} + for _, r := range regions { + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s in %s", resourceName, r) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": r, + "location": r, + "zone": "-", + "billing_account": billingId, + }, } - name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { - nonPrefixCount++ - continue - } - - deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/snapshots/{{name}}" - deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + listTemplate := strings.Split("https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/snapshots", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue } - deleteUrl = deleteUrl + name - // Don't wait on operations as we may have a lot to delete - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, - Method: "DELETE", + Method: "GET", Project: config.Project, - RawURL: deleteUrl, + RawURL: listUrl, UserAgent: config.UserAgent, }) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue } - } - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + resourceList, ok := res["volumeSnapshots"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://netapp.googleapis.com/v1/projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/snapshots/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } } return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_group.go new file mode 100644 index 00000000000..d92aa5869b5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_group.go @@ -0,0 +1,598 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networkconnectivity + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceNetworkConnectivityGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkConnectivityGroupCreate, + Read: resourceNetworkConnectivityGroupRead, + Update: resourceNetworkConnectivityGroupUpdate, + Delete: resourceNetworkConnectivityGroupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkConnectivityGroupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "hub": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the hub. Hub names must be unique. They use the following form: projects/{projectNumber}/locations/global/hubs/{hubId}`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"default", "center", "edge"}), + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the group. Group names must be unique. Possible values: ["default", "center", "edge"]`, + }, + "auto_accept": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. The auto-accept setting for this group.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_accept_projects": { + Type: schema.TypeList, + Required: true, + Description: `A list of project ids or project numbers for which you want to enable auto-accept. The auto-accept setting is applied to spokes being created or updated in these projects.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional description of the group.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements). + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time the hub was created.`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "route_table": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The name of the route table that corresponds to this group. They use the following form: 'projects/{projectNumber}/locations/global/hubs/{hubId}/routeTables/{route_table_id}'`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The current lifecycle state of this hub.`, + ExactlyOneOf: []string{}, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The Google-generated UUID for the group. This value is unique across all group resources. If a group is deleted and another with the same name is created, the new route table is assigned a different uniqueId.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time the hub was last updated.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNetworkConnectivityGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNetworkConnectivityGroupName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandNetworkConnectivityGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + autoAcceptProp, err := expandNetworkConnectivityGroupAutoAccept(d.Get("auto_accept"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("auto_accept"); !tpgresource.IsEmptyValue(reflect.ValueOf(autoAcceptProp)) && (ok || !reflect.DeepEqual(v, autoAcceptProp)) { + obj["autoAccept"] = autoAcceptProp + } + labelsProp, err := expandNetworkConnectivityGroupEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/global/hubs/{{hub}}/groups/{{name}}?updateMask=autoAccept.autoAcceptProjects,labels,description") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Group: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Group: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Group: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/global/hubs/{{hub}}/groups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = NetworkConnectivityOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Creating Group", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Group: %s", err) + } + + log.Printf("[DEBUG] Finished creating Group %q: %#v", d.Id(), res) + + return resourceNetworkConnectivityGroupRead(d, meta) +} + +func resourceNetworkConnectivityGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/global/hubs/{{hub}}/groups/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Group: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkConnectivityGroup %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + + if err := d.Set("name", flattenNetworkConnectivityGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("create_time", flattenNetworkConnectivityGroupCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("update_time", flattenNetworkConnectivityGroupUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("labels", flattenNetworkConnectivityGroupLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("description", flattenNetworkConnectivityGroupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("uid", flattenNetworkConnectivityGroupUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("state", flattenNetworkConnectivityGroupState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("auto_accept", flattenNetworkConnectivityGroupAutoAccept(res["autoAccept"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("route_table", flattenNetworkConnectivityGroupRouteTable(res["routeTable"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("terraform_labels", flattenNetworkConnectivityGroupTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("effective_labels", flattenNetworkConnectivityGroupEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + + return nil +} + +func resourceNetworkConnectivityGroupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Group: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkConnectivityGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + autoAcceptProp, err := expandNetworkConnectivityGroupAutoAccept(d.Get("auto_accept"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("auto_accept"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoAcceptProp)) { + obj["autoAccept"] = autoAcceptProp + } + labelsProp, err := expandNetworkConnectivityGroupEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/global/hubs/{{hub}}/groups/{{name}}?updateMask=autoAccept.autoAcceptProjects,labels,description") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Group %q: %#v", d.Id(), obj) + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating Group %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Group %q: %#v", d.Id(), res) + } + + err = NetworkConnectivityOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Updating Group", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceNetworkConnectivityGroupRead(d, meta) +} + +func resourceNetworkConnectivityGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Group: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/global/hubs/{{hub}}/groups/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting Group %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Group") + } + + err = NetworkConnectivityOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Deleting Group", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Group %q: %#v", d.Id(), res) + return nil +} + +func resourceNetworkConnectivityGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/global/hubs/(?P[^/]+)/groups/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/global/hubs/{{hub}}/groups/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNetworkConnectivityGroupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenNetworkConnectivityGroupCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivityGroupUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivityGroupLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenNetworkConnectivityGroupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivityGroupUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivityGroupState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivityGroupAutoAccept(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["auto_accept_projects"] = + flattenNetworkConnectivityGroupAutoAcceptAutoAcceptProjects(original["autoAcceptProjects"], d, config) + return []interface{}{transformed} +} +func flattenNetworkConnectivityGroupAutoAcceptAutoAcceptProjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivityGroupRouteTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivityGroupTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenNetworkConnectivityGroupEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNetworkConnectivityGroupName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivityGroupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivityGroupAutoAccept(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAutoAcceptProjects, err := expandNetworkConnectivityGroupAutoAcceptAutoAcceptProjects(original["auto_accept_projects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAutoAcceptProjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["autoAcceptProjects"] = transformedAutoAcceptProjects + } + + return transformed, nil +} + +func expandNetworkConnectivityGroupAutoAcceptAutoAcceptProjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivityGroupEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub.go index 9c9dcbdd0ac..34f7c938e7d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub.go @@ -3,34 +3,31 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: DCL *** +// *** AUTO GENERATED CODE *** Type: MMv1 *** // // ---------------------------------------------------------------------------- // -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. // -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- package networkconnectivity import ( - "context" "fmt" "log" + "net/http" + "reflect" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" - - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -51,143 +48,182 @@ func ResourceNetworkConnectivityHub() *schema.Resource { Update: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), }, + CustomizeDiff: customdiff.All( - tpgresource.DefaultProviderProject, tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, ), Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Immutable. The name of the hub. Hub names must be unique. They use the following form: `projects/{project_number}/locations/global/hubs/{hub_id}`", + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Immutable. The name of the hub. Hub names must be unique. They use the following form: 'projects/{project_number}/locations/global/hubs/{hub_id}'`, }, - "description": { Type: schema.TypeString, Optional: true, - Description: "An optional description of the hub.", + Description: `An optional description of the hub.`, }, - - "effective_labels": { - Type: schema.TypeMap, + "export_psc": { + Type: schema.TypeBool, Computed: true, - Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + Optional: true, + Description: `Whether Private Service Connect transitivity is enabled for the hub. If true, Private Service Connect endpoints in VPC spokes attached to the hub are made accessible to other VPC spokes attached to the hub. The default value is false.`, }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements). - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The project for the resource", +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, - "create_time": { Type: schema.TypeString, Computed: true, - Description: "Output only. The time the hub was created.", + Description: `Output only. The time the hub was created.`, }, - - "labels": { + "effective_labels": { Type: schema.TypeMap, - Optional: true, - Description: "Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements).\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, Elem: &schema.Schema{Type: schema.TypeString}, }, - "routing_vpcs": { Type: schema.TypeList, Computed: true, - Description: "The VPC network associated with this hub's spokes. All of the VPN tunnels, VLAN attachments, and router appliance instances referenced by this hub's spokes must belong to this VPC network. This field is read-only. Network Connectivity Center automatically populates it based on the set of spokes attached to the hub.", - Elem: NetworkConnectivityHubRoutingVpcsSchema(), + Description: `The VPC network associated with this hub's spokes. All of the VPN tunnels, VLAN attachments, and router appliance instances referenced by this hub's spokes must belong to this VPC network. This field is read-only. Network Connectivity Center automatically populates it based on the set of spokes attached to the hub.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Optional: true, + Description: `The URI of the VPC network.`, + }, + }, + }, }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. The current lifecycle state of this hub. Possible values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING", + Type: schema.TypeString, + Computed: true, + Description: `Output only. The current lifecycle state of this hub.`, + ExactlyOneOf: []string{}, }, - "terraform_labels": { - Type: schema.TypeMap, - Computed: true, - Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, - "unique_id": { Type: schema.TypeString, Computed: true, - Description: "Output only. The Google-generated UUID for the hub. This value is unique across all hub resources. If a hub is deleted and another with the same name is created, the new hub is assigned a different unique_id.", + Description: `Output only. The Google-generated UUID for the hub. This value is unique across all hub resources. If a hub is deleted and another with the same name is created, the new hub is assigned a different unique_id.`, }, - "update_time": { Type: schema.TypeString, Computed: true, - Description: "Output only. The time the hub was last updated.", + Description: `Output only. The time the hub was last updated.`, }, - }, - } -} - -func NetworkConnectivityHubRoutingVpcsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Computed: true, - Description: "The URI of the VPC network.", + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, }, }, + UseJSONNumber: true, } } func resourceNetworkConnectivityHubCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &networkconnectivity.Hub{ - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), - Project: dcl.String(project), + obj := make(map[string]interface{}) + nameProp, err := expandNetworkConnectivityHubName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp } - - id, err := obj.ID() + descriptionProp, err := expandNetworkConnectivityHubDescription(d.Get("description"), d, config) if err != nil { - return fmt.Errorf("error constructing id: %s", err) + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp } - d.SetId(id) - directive := tpgdclresource.CreateDirective - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + exportPscProp, err := expandNetworkConnectivityHubExportPsc(d.Get("export_psc"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("export_psc"); !tpgresource.IsEmptyValue(reflect.ValueOf(exportPscProp)) && (ok || !reflect.DeepEqual(v, exportPscProp)) { + obj["exportPsc"] = exportPscProp + } + labelsProp, err := expandNetworkConnectivityHubEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp } - billingProject := project + + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/global/hubs?hubId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Hub: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Hub: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Hub: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/global/hubs/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) } - res, err := client.ApplyHub(context.Background(), obj, directive...) + d.SetId(id) + + err = NetworkConnectivityOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Creating Hub", userAgent, + d.Timeout(schema.TimeoutCreate)) - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { + if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error creating Hub: %s", err) + return fmt.Errorf("Error waiting to create Hub: %s", err) } log.Printf("[DEBUG] Finished creating Hub %q: %#v", d.Id(), res) @@ -197,168 +233,243 @@ func resourceNetworkConnectivityHubCreate(d *schema.ResourceData, meta interface func resourceNetworkConnectivityHubRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &networkconnectivity.Hub{ - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), - Project: dcl.String(project), + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/global/hubs/{{name}}") + if err != nil { + return err } - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) if err != nil { - return err + return fmt.Errorf("Error fetching project for Hub: %s", err) } - billingProject := project + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetHub(context.Background(), obj) + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) if err != nil { - resourceName := fmt.Sprintf("NetworkConnectivityHub %q", d.Id()) - return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkConnectivityHub %q", d.Id())) } - if err = d.Set("name", res.Name); err != nil { - return fmt.Errorf("error setting name in state: %s", err) + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) } - if err = d.Set("description", res.Description); err != nil { - return fmt.Errorf("error setting description in state: %s", err) + + if err := d.Set("name", flattenNetworkConnectivityHubName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) + } + if err := d.Set("create_time", flattenNetworkConnectivityHubCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) } - if err = d.Set("effective_labels", res.Labels); err != nil { - return fmt.Errorf("error setting effective_labels in state: %s", err) + if err := d.Set("update_time", flattenNetworkConnectivityHubUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) } - if err = d.Set("project", res.Project); err != nil { - return fmt.Errorf("error setting project in state: %s", err) + if err := d.Set("labels", flattenNetworkConnectivityHubLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) } - if err = d.Set("create_time", res.CreateTime); err != nil { - return fmt.Errorf("error setting create_time in state: %s", err) + if err := d.Set("description", flattenNetworkConnectivityHubDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) } - if err = d.Set("labels", flattenNetworkConnectivityHubLabels(res.Labels, d)); err != nil { - return fmt.Errorf("error setting labels in state: %s", err) + if err := d.Set("unique_id", flattenNetworkConnectivityHubUniqueId(res["uniqueId"], d, config)); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) } - if err = d.Set("routing_vpcs", flattenNetworkConnectivityHubRoutingVpcsArray(res.RoutingVpcs)); err != nil { - return fmt.Errorf("error setting routing_vpcs in state: %s", err) + if err := d.Set("state", flattenNetworkConnectivityHubState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) } - if err = d.Set("state", res.State); err != nil { - return fmt.Errorf("error setting state in state: %s", err) + if err := d.Set("routing_vpcs", flattenNetworkConnectivityHubRoutingVpcs(res["routingVpcs"], d, config)); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) } - if err = d.Set("terraform_labels", flattenNetworkConnectivityHubTerraformLabels(res.Labels, d)); err != nil { - return fmt.Errorf("error setting terraform_labels in state: %s", err) + if err := d.Set("export_psc", flattenNetworkConnectivityHubExportPsc(res["exportPsc"], d, config)); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) } - if err = d.Set("unique_id", res.UniqueId); err != nil { - return fmt.Errorf("error setting unique_id in state: %s", err) + if err := d.Set("terraform_labels", flattenNetworkConnectivityHubTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) } - if err = d.Set("update_time", res.UpdateTime); err != nil { - return fmt.Errorf("error setting update_time in state: %s", err) + if err := d.Set("effective_labels", flattenNetworkConnectivityHubEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Hub: %s", err) } return nil } + func resourceNetworkConnectivityHubUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Hub: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkConnectivityHubDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + exportPscProp, err := expandNetworkConnectivityHubExportPsc(d.Get("export_psc"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("export_psc"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, exportPscProp)) { + obj["exportPsc"] = exportPscProp + } + labelsProp, err := expandNetworkConnectivityHubEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp } - obj := &networkconnectivity.Hub{ - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), - Project: dcl.String(project), + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/global/hubs/{{name}}") + if err != nil { + return err } - directive := tpgdclresource.UpdateDirective - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + + log.Printf("[DEBUG] Updating Hub %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("export_psc") { + updateMask = append(updateMask, "exportPsc") + } + + if d.HasChange("effective_labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } - billingProject := "" // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyHub(context.Background(), obj, directive...) - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error updating Hub: %s", err) - } + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating Hub %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Hub %q: %#v", d.Id(), res) + } - log.Printf("[DEBUG] Finished creating Hub %q: %#v", d.Id(), res) + err = NetworkConnectivityOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Updating Hub", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } return resourceNetworkConnectivityHubRead(d, meta) } func resourceNetworkConnectivityHubDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &networkconnectivity.Hub{ - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), - Project: dcl.String(project), + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Hub: %s", err) } + billingProject = strings.TrimPrefix(project, "projects/") - log.Printf("[DEBUG] Deleting Hub %q", d.Id()) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/global/hubs/{{name}}") if err != nil { return err } - billingProject := project + + var obj map[string]interface{} + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting Hub %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Hub") } - if err := client.DeleteHub(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting Hub: %s", err) + + err = NetworkConnectivityOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Deleting Hub", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err } - log.Printf("[DEBUG] Finished deleting Hub %q", d.Id()) + log.Printf("[DEBUG] Finished deleting Hub %q: %#v", d.Id(), res) return nil } func resourceNetworkConnectivityHubImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "projects/(?P[^/]+)/locations/global/hubs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", + "^projects/(?P[^/]+)/locations/global/hubs/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)$", }, d, config); err != nil { return nil, err } @@ -373,58 +484,109 @@ func resourceNetworkConnectivityHubImport(d *schema.ResourceData, meta interface return []*schema.ResourceData{d}, nil } -func flattenNetworkConnectivityHubRoutingVpcsArray(objs []networkconnectivity.HubRoutingVpcs) []interface{} { - if objs == nil { - return nil - } +func flattenNetworkConnectivityHubName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - items := []interface{}{} - for _, item := range objs { - i := flattenNetworkConnectivityHubRoutingVpcs(&item) - items = append(items, i) - } +func flattenNetworkConnectivityHubCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - return items +func flattenNetworkConnectivityHubUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenNetworkConnectivityHubRoutingVpcs(obj *networkconnectivity.HubRoutingVpcs) interface{} { - if obj == nil || obj.Empty() { - return nil +func flattenNetworkConnectivityHubLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v } - transformed := map[string]interface{}{ - "uri": obj.Uri, + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } } return transformed +} +func flattenNetworkConnectivityHubDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenNetworkConnectivityHubLabels(v map[string]string, d *schema.ResourceData) interface{} { - if v == nil { - return nil - } +func flattenNetworkConnectivityHubUniqueId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - transformed := make(map[string]interface{}) - if l, ok := d.Get("labels").(map[string]interface{}); ok { - for k, _ := range l { - transformed[k] = v[k] +func flattenNetworkConnectivityHubState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivityHubRoutingVpcs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue } + transformed = append(transformed, map[string]interface{}{ + "uri": flattenNetworkConnectivityHubRoutingVpcsUri(original["uri"], d, config), + }) } - return transformed } +func flattenNetworkConnectivityHubRoutingVpcsUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivityHubExportPsc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} -func flattenNetworkConnectivityHubTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { +func flattenNetworkConnectivityHubTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { - return nil + return v } transformed := make(map[string]interface{}) - if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { - for k, _ := range l { - transformed[k] = v[k] + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] } } return transformed } + +func flattenNetworkConnectivityHubEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNetworkConnectivityHubName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivityHubDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivityHubExportPsc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivityHubEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub_sweeper.go index 23bc57337b1..e0a447ebec9 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub_sweeper.go @@ -3,16 +3,15 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: DCL *** +// *** AUTO GENERATED CODE *** Type: MMv1 *** // // ---------------------------------------------------------------------------- // -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. // -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- @@ -21,11 +20,12 @@ package networkconnectivity import ( "context" "log" + "strings" "testing" - networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -33,8 +33,10 @@ func init() { sweeper.AddTestSweepers("NetworkConnectivityHub", testSweepNetworkConnectivityHub) } +// At the time of writing, the CI only passes us-central1 as the region func testSweepNetworkConnectivityHub(region string) error { - log.Print("[INFO][SWEEPER_LOG] Starting sweeper for NetworkConnectivityHub") + resourceName := "NetworkConnectivityHub" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) if err != nil { @@ -51,23 +53,87 @@ func testSweepNetworkConnectivityHub(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to be used for Delete arguments. - d := map[string]string{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, } - client := transport_tpg.NewDCLNetworkConnectivityClient(config, config.UserAgent, "", 0) - err = client.DeleteAllHub(context.Background(), d["project"], isDeletableNetworkConnectivityHub) + listTemplate := strings.Split("https://networkconnectivity.googleapis.com/v1/projects/{{project}}/locations/global/hubs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { - return err + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil } - return nil -} -func isDeletableNetworkConnectivityHub(r *networkconnectivity.Hub) bool { - return sweeper.IsSweepableTestResource(*r.Name) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["hubs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networkconnectivity.googleapis.com/v1/projects/{{project}}/locations/global/hubs/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_internal_range.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_internal_range.go index 8ee35762f43..dd74813f8b1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_internal_range.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_internal_range.go @@ -77,8 +77,8 @@ func ResourceNetworkConnectivityInternalRange() *schema.Resource { "usage": { Type: schema.TypeString, Required: true, - ValidateFunc: verify.ValidateEnum([]string{"FOR_VPC", "EXTERNAL_TO_VPC"}), - Description: `The type of usage set for this InternalRange. Possible values: ["FOR_VPC", "EXTERNAL_TO_VPC"]`, + ValidateFunc: verify.ValidateEnum([]string{"FOR_VPC", "EXTERNAL_TO_VPC", "FOR_MIGRATION"}), + Description: `The type of usage set for this InternalRange. Possible values: ["FOR_VPC", "EXTERNAL_TO_VPC", "FOR_MIGRATION"]`, }, "description": { Type: schema.TypeString, @@ -101,6 +101,33 @@ func ResourceNetworkConnectivityInternalRange() *schema.Resource { Please refer to the field 'effective_labels' for all of the labels present on the resource.`, Elem: &schema.Schema{Type: schema.TypeString}, }, + "migration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Specification for migration with source and target resource names.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeString, + Required: true, + Description: `Resource path as an URI of the source resource, for example a subnet. +The project for the source resource should match the project for the +InternalRange. +An example /projects/{project}/regions/{region}/subnetworks/{subnet}`, + }, + "target": { + Type: schema.TypeString, + Required: true, + Description: `Resource path of the target resource. The target project can be +different, as in the cases when migrating to peer networks. The resource +may not exist yet. +For example /projects/{project}/regions/{region}/subnetworks/{subnet}`, + }, + }, + }, + }, "overlaps": { Type: schema.TypeList, Optional: true, @@ -215,6 +242,12 @@ func resourceNetworkConnectivityInternalRangeCreate(d *schema.ResourceData, meta } else if v, ok := d.GetOkExists("overlaps"); !tpgresource.IsEmptyValue(reflect.ValueOf(overlapsProp)) && (ok || !reflect.DeepEqual(v, overlapsProp)) { obj["overlaps"] = overlapsProp } + migrationProp, err := expandNetworkConnectivityInternalRangeMigration(d.Get("migration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("migration"); !tpgresource.IsEmptyValue(reflect.ValueOf(migrationProp)) && (ok || !reflect.DeepEqual(v, migrationProp)) { + obj["migration"] = migrationProp + } labelsProp, err := expandNetworkConnectivityInternalRangeEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -350,6 +383,9 @@ func resourceNetworkConnectivityInternalRangeRead(d *schema.ResourceData, meta i if err := d.Set("overlaps", flattenNetworkConnectivityInternalRangeOverlaps(res["overlaps"], d, config)); err != nil { return fmt.Errorf("Error reading InternalRange: %s", err) } + if err := d.Set("migration", flattenNetworkConnectivityInternalRangeMigration(res["migration"], d, config)); err != nil { + return fmt.Errorf("Error reading InternalRange: %s", err) + } if err := d.Set("terraform_labels", flattenNetworkConnectivityInternalRangeTerraformLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading InternalRange: %s", err) } @@ -661,6 +697,29 @@ func flattenNetworkConnectivityInternalRangeOverlaps(v interface{}, d *schema.Re return v } +func flattenNetworkConnectivityInternalRangeMigration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["source"] = + flattenNetworkConnectivityInternalRangeMigrationSource(original["source"], d, config) + transformed["target"] = + flattenNetworkConnectivityInternalRangeMigrationTarget(original["target"], d, config) + return []interface{}{transformed} +} +func flattenNetworkConnectivityInternalRangeMigrationSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivityInternalRangeMigrationTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenNetworkConnectivityInternalRangeTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -712,6 +771,40 @@ func expandNetworkConnectivityInternalRangeOverlaps(v interface{}, d tpgresource return v, nil } +func expandNetworkConnectivityInternalRangeMigration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSource, err := expandNetworkConnectivityInternalRangeMigrationSource(original["source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["source"] = transformedSource + } + + transformedTarget, err := expandNetworkConnectivityInternalRangeMigrationTarget(original["target"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["target"] = transformedTarget + } + + return transformed, nil +} + +func expandNetworkConnectivityInternalRangeMigrationSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivityInternalRangeMigrationTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandNetworkConnectivityInternalRangeEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke.go index f371cbbff7c..57c2884ad29 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke.go @@ -3,34 +3,31 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: DCL *** +// *** AUTO GENERATED CODE *** Type: MMv1 *** // // ---------------------------------------------------------------------------- // -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. // -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- package networkconnectivity import ( - "context" "fmt" "log" + "net/http" + "reflect" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" - - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -51,9 +48,10 @@ func ResourceNetworkConnectivitySpoke() *schema.Resource { Update: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), }, + CustomizeDiff: customdiff.All( - tpgresource.DefaultProviderProject, tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, ), Schema: map[string]*schema.Schema{ @@ -62,280 +60,399 @@ func ResourceNetworkConnectivitySpoke() *schema.Resource { Required: true, ForceNew: true, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "Immutable. The URI of the hub that this spoke is attached to.", + Description: `Immutable. The URI of the hub that this spoke is attached to.`, }, - "location": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The location for the resource", + Description: `The location for the resource`, }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Immutable. The name of the spoke. Spoke names must be unique.", + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Immutable. The name of the spoke. Spoke names must be unique.`, }, - "description": { Type: schema.TypeString, Optional: true, - Description: "An optional description of the spoke.", + Description: `An optional description of the spoke.`, }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements). - "effective_labels": { - Type: schema.TypeMap, - Computed: true, - Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, - "linked_interconnect_attachments": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: "A collection of VLAN attachment resources. These resources should be redundant attachments that all advertise the same prefixes to Google Cloud. Alternatively, in active/passive configurations, all attachments should be capable of advertising the same prefixes.", - MaxItems: 1, - Elem: NetworkConnectivitySpokeLinkedInterconnectAttachmentsSchema(), - ConflictsWith: []string{"linked_vpn_tunnels", "linked_router_appliance_instances", "linked_vpc_network"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A collection of VLAN attachment resources. These resources should be redundant attachments that all advertise the same prefixes to Google Cloud. Alternatively, in active/passive configurations, all attachments should be capable of advertising the same prefixes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "site_to_site_data_transfer": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.`, + }, + "uris": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The URIs of linked interconnect attachment resources`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "include_import_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `IP ranges allowed to be included during import from hub (does not control transit connectivity). +The only allowed value for now is "ALL_IPV4_RANGES".`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ConflictsWith: []string{"linked_vpn_tunnels", "linked_router_appliance_instances", "linked_vpc_network", "linked_producer_vpc_network"}, + }, + "linked_producer_vpc_network": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Producer VPC network that is associated with the spoke.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URI of the Service Consumer VPC that the Producer VPC is peered with.`, + }, + "peering": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state.`, + }, + "exclude_export_ranges": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `IP ranges encompassing the subnets to be excluded from peering.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "include_export_ranges": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `IP ranges allowed to be included from peering.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "producer_network": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the Producer VPC.`, + }, + }, + }, + ConflictsWith: []string{"linked_interconnect_attachments", "linked_router_appliance_instances", "linked_vpn_tunnels", "linked_vpc_network"}, }, - "linked_router_appliance_instances": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: "The URIs of linked Router appliance resources", - MaxItems: 1, - Elem: NetworkConnectivitySpokeLinkedRouterApplianceInstancesSchema(), - ConflictsWith: []string{"linked_vpn_tunnels", "linked_interconnect_attachments", "linked_vpc_network"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The URIs of linked Router appliance resources`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instances": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The list of router appliance instances`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The IP address on the VM to use for peering.`, + }, + "virtual_machine": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URI of the virtual machine resource`, + }, + }, + }, + }, + "site_to_site_data_transfer": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.`, + }, + "include_import_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `IP ranges allowed to be included during import from hub (does not control transit connectivity). +The only allowed value for now is "ALL_IPV4_RANGES".`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ConflictsWith: []string{"linked_interconnect_attachments", "linked_vpn_tunnels", "linked_vpc_network", "linked_producer_vpc_network"}, }, - "linked_vpc_network": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: "VPC network that is associated with the spoke.", - MaxItems: 1, - Elem: NetworkConnectivitySpokeLinkedVPCNetworkSchema(), - ConflictsWith: []string{"linked_vpn_tunnels", "linked_interconnect_attachments", "linked_router_appliance_instances"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `VPC network that is associated with the spoke.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URI of the VPC network resource.`, + }, + "exclude_export_ranges": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `IP ranges encompassing the subnets to be excluded from peering.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "include_export_ranges": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `IP ranges allowed to be included from peering.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ConflictsWith: []string{"linked_interconnect_attachments", "linked_router_appliance_instances", "linked_vpn_tunnels", "linked_producer_vpc_network"}, }, - "linked_vpn_tunnels": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: "The URIs of linked VPN tunnel resources", - MaxItems: 1, - Elem: NetworkConnectivitySpokeLinkedVpnTunnelsSchema(), - ConflictsWith: []string{"linked_interconnect_attachments", "linked_router_appliance_instances", "linked_vpc_network"}, - }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The project for the resource", + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The URIs of linked VPN tunnel resources`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "site_to_site_data_transfer": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.`, + }, + "uris": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The URIs of linked VPN tunnel resources.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "include_import_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `IP ranges allowed to be included during import from hub (does not control transit connectivity). +The only allowed value for now is "ALL_IPV4_RANGES".`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ConflictsWith: []string{"linked_interconnect_attachments", "linked_router_appliance_instances", "linked_vpc_network", "linked_producer_vpc_network"}, }, - "create_time": { Type: schema.TypeString, Computed: true, - Description: "Output only. The time the spoke was created.", + Description: `Output only. The time the spoke was created.`, }, - - "labels": { + "effective_labels": { Type: schema.TypeMap, - Optional: true, - Description: "Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements).\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, Elem: &schema.Schema{Type: schema.TypeString}, }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. The current lifecycle state of this spoke. Possible values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING", + Type: schema.TypeString, + Computed: true, + Description: `Output only. The current lifecycle state of this spoke.`, + ExactlyOneOf: []string{}, }, - "terraform_labels": { - Type: schema.TypeMap, - Computed: true, - Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, - "unique_id": { Type: schema.TypeString, Computed: true, - Description: "Output only. The Google-generated UUID for the spoke. This value is unique across all spoke resources. If a spoke is deleted and another with the same name is created, the new spoke is assigned a different unique_id.", + Description: `Output only. The Google-generated UUID for the spoke. This value is unique across all spoke resources. If a spoke is deleted and another with the same name is created, the new spoke is assigned a different unique_id.`, }, - "update_time": { Type: schema.TypeString, Computed: true, - Description: "Output only. The time the spoke was last updated.", + Description: `Output only. The time the spoke was last updated.`, }, - }, - } -} - -func NetworkConnectivitySpokeLinkedInterconnectAttachmentsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "site_to_site_data_transfer": { - Type: schema.TypeBool, - Required: true, - ForceNew: true, - Description: "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", - }, - - "uris": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: "The URIs of linked interconnect attachment resources", - Elem: &schema.Schema{Type: schema.TypeString}, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, }, }, + UseJSONNumber: true, } } -func NetworkConnectivitySpokeLinkedRouterApplianceInstancesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instances": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: "The list of router appliance instances", - Elem: NetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesSchema(), - }, - - "site_to_site_data_transfer": { - Type: schema.TypeBool, - Required: true, - ForceNew: true, - Description: "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", - }, - }, +func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err } -} - -func NetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_address": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The IP address on the VM to use for peering.", - }, - "virtual_machine": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The URI of the virtual machine resource", - }, - }, + obj := make(map[string]interface{}) + nameProp, err := expandNetworkConnectivitySpokeName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp } -} - -func NetworkConnectivitySpokeLinkedVPCNetworkSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: "The URI of the VPC network resource.", - }, - - "exclude_export_ranges": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: "IP ranges encompassing the subnets to be excluded from peering.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, + descriptionProp, err := expandNetworkConnectivitySpokeDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp } -} - -func NetworkConnectivitySpokeLinkedVpnTunnelsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "site_to_site_data_transfer": { - Type: schema.TypeBool, - Required: true, - ForceNew: true, - Description: "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", - }, - - "uris": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: "The URIs of linked VPN tunnel resources.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, + hubProp, err := expandNetworkConnectivitySpokeHub(d.Get("hub"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hub"); !tpgresource.IsEmptyValue(reflect.ValueOf(hubProp)) && (ok || !reflect.DeepEqual(v, hubProp)) { + obj["hub"] = hubProp } -} - -func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + linkedVpnTunnelsProp, err := expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("linked_vpn_tunnels"); !tpgresource.IsEmptyValue(reflect.ValueOf(linkedVpnTunnelsProp)) && (ok || !reflect.DeepEqual(v, linkedVpnTunnelsProp)) { + obj["linkedVpnTunnels"] = linkedVpnTunnelsProp } - - obj := &networkconnectivity.Spoke{ - Hub: dcl.String(d.Get("hub").(string)), - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), - LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), - LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), - LinkedVPCNetwork: expandNetworkConnectivitySpokeLinkedVPCNetwork(d.Get("linked_vpc_network")), - LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), - Project: dcl.String(project), + linkedInterconnectAttachmentsProp, err := expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("linked_interconnect_attachments"); !tpgresource.IsEmptyValue(reflect.ValueOf(linkedInterconnectAttachmentsProp)) && (ok || !reflect.DeepEqual(v, linkedInterconnectAttachmentsProp)) { + obj["linkedInterconnectAttachments"] = linkedInterconnectAttachmentsProp } - - id, err := obj.ID() + linkedRouterApplianceInstancesProp, err := expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances"), d, config) if err != nil { - return fmt.Errorf("error constructing id: %s", err) + return err + } else if v, ok := d.GetOkExists("linked_router_appliance_instances"); !tpgresource.IsEmptyValue(reflect.ValueOf(linkedRouterApplianceInstancesProp)) && (ok || !reflect.DeepEqual(v, linkedRouterApplianceInstancesProp)) { + obj["linkedRouterApplianceInstances"] = linkedRouterApplianceInstancesProp } - d.SetId(id) - directive := tpgdclresource.CreateDirective - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + linkedVpcNetworkProp, err := expandNetworkConnectivitySpokeLinkedVpcNetwork(d.Get("linked_vpc_network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("linked_vpc_network"); !tpgresource.IsEmptyValue(reflect.ValueOf(linkedVpcNetworkProp)) && (ok || !reflect.DeepEqual(v, linkedVpcNetworkProp)) { + obj["linkedVpcNetwork"] = linkedVpcNetworkProp + } + linkedProducerVpcNetworkProp, err := expandNetworkConnectivitySpokeLinkedProducerVpcNetwork(d.Get("linked_producer_vpc_network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("linked_producer_vpc_network"); !tpgresource.IsEmptyValue(reflect.ValueOf(linkedProducerVpcNetworkProp)) && (ok || !reflect.DeepEqual(v, linkedProducerVpcNetworkProp)) { + obj["linkedProducerVpcNetwork"] = linkedProducerVpcNetworkProp + } + labelsProp, err := expandNetworkConnectivitySpokeEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/{{location}}/spokes?spokeId={{name}}") if err != nil { return err } - billingProject := project + + log.Printf("[DEBUG] Creating new Spoke: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Spoke: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Spoke: %s", err) } - res, err := client.ApplySpoke(context.Background(), obj, directive...) - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { + // Store the ID now + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/spokes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = NetworkConnectivityOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Creating Spoke", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error creating Spoke: %s", err) + return fmt.Errorf("Error waiting to create Spoke: %s", err) } log.Printf("[DEBUG] Finished creating Spoke %q: %#v", d.Id(), res) @@ -345,201 +462,245 @@ func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interfa func resourceNetworkConnectivitySpokeRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &networkconnectivity.Spoke{ - Hub: dcl.String(d.Get("hub").(string)), - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), - LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), - LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), - LinkedVPCNetwork: expandNetworkConnectivitySpokeLinkedVPCNetwork(d.Get("linked_vpc_network")), - LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), - Project: dcl.String(project), + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/{{location}}/spokes/{{name}}") + if err != nil { + return err } - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) if err != nil { - return err + return fmt.Errorf("Error fetching project for Spoke: %s", err) } - billingProject := project + billingProject = strings.TrimPrefix(project, "projects/") + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetSpoke(context.Background(), obj) + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) if err != nil { - resourceName := fmt.Sprintf("NetworkConnectivitySpoke %q", d.Id()) - return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkConnectivitySpoke %q", d.Id())) } - if err = d.Set("hub", res.Hub); err != nil { - return fmt.Errorf("error setting hub in state: %s", err) + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("location", res.Location); err != nil { - return fmt.Errorf("error setting location in state: %s", err) + + if err := d.Set("name", flattenNetworkConnectivitySpokeName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("name", res.Name); err != nil { - return fmt.Errorf("error setting name in state: %s", err) + if err := d.Set("create_time", flattenNetworkConnectivitySpokeCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("description", res.Description); err != nil { - return fmt.Errorf("error setting description in state: %s", err) + if err := d.Set("update_time", flattenNetworkConnectivitySpokeUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("effective_labels", res.Labels); err != nil { - return fmt.Errorf("error setting effective_labels in state: %s", err) + if err := d.Set("labels", flattenNetworkConnectivitySpokeLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("linked_interconnect_attachments", flattenNetworkConnectivitySpokeLinkedInterconnectAttachments(res.LinkedInterconnectAttachments)); err != nil { - return fmt.Errorf("error setting linked_interconnect_attachments in state: %s", err) + if err := d.Set("description", flattenNetworkConnectivitySpokeDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("linked_router_appliance_instances", flattenNetworkConnectivitySpokeLinkedRouterApplianceInstances(res.LinkedRouterApplianceInstances)); err != nil { - return fmt.Errorf("error setting linked_router_appliance_instances in state: %s", err) + if err := d.Set("hub", flattenNetworkConnectivitySpokeHub(res["hub"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("linked_vpc_network", flattenNetworkConnectivitySpokeLinkedVPCNetwork(res.LinkedVPCNetwork)); err != nil { - return fmt.Errorf("error setting linked_vpc_network in state: %s", err) + if err := d.Set("linked_vpn_tunnels", flattenNetworkConnectivitySpokeLinkedVpnTunnels(res["linkedVpnTunnels"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("linked_vpn_tunnels", flattenNetworkConnectivitySpokeLinkedVpnTunnels(res.LinkedVpnTunnels)); err != nil { - return fmt.Errorf("error setting linked_vpn_tunnels in state: %s", err) + if err := d.Set("linked_interconnect_attachments", flattenNetworkConnectivitySpokeLinkedInterconnectAttachments(res["linkedInterconnectAttachments"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("project", res.Project); err != nil { - return fmt.Errorf("error setting project in state: %s", err) + if err := d.Set("linked_router_appliance_instances", flattenNetworkConnectivitySpokeLinkedRouterApplianceInstances(res["linkedRouterApplianceInstances"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("create_time", res.CreateTime); err != nil { - return fmt.Errorf("error setting create_time in state: %s", err) + if err := d.Set("linked_vpc_network", flattenNetworkConnectivitySpokeLinkedVpcNetwork(res["linkedVpcNetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("labels", flattenNetworkConnectivitySpokeLabels(res.Labels, d)); err != nil { - return fmt.Errorf("error setting labels in state: %s", err) + if err := d.Set("linked_producer_vpc_network", flattenNetworkConnectivitySpokeLinkedProducerVpcNetwork(res["linkedProducerVpcNetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("state", res.State); err != nil { - return fmt.Errorf("error setting state in state: %s", err) + if err := d.Set("unique_id", flattenNetworkConnectivitySpokeUniqueId(res["uniqueId"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("terraform_labels", flattenNetworkConnectivitySpokeTerraformLabels(res.Labels, d)); err != nil { - return fmt.Errorf("error setting terraform_labels in state: %s", err) + if err := d.Set("state", flattenNetworkConnectivitySpokeState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("unique_id", res.UniqueId); err != nil { - return fmt.Errorf("error setting unique_id in state: %s", err) + if err := d.Set("terraform_labels", flattenNetworkConnectivitySpokeTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } - if err = d.Set("update_time", res.UpdateTime); err != nil { - return fmt.Errorf("error setting update_time in state: %s", err) + if err := d.Set("effective_labels", flattenNetworkConnectivitySpokeEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Spoke: %s", err) } return nil } + func resourceNetworkConnectivitySpokeUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Spoke: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkConnectivitySpokeDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandNetworkConnectivitySpokeEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp } - obj := &networkconnectivity.Spoke{ - Hub: dcl.String(d.Get("hub").(string)), - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), - LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), - LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), - LinkedVPCNetwork: expandNetworkConnectivitySpokeLinkedVPCNetwork(d.Get("linked_vpc_network")), - LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), - Project: dcl.String(project), + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/{{location}}/spokes/{{name}}") + if err != nil { + return err } - directive := tpgdclresource.UpdateDirective - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + + log.Printf("[DEBUG] Updating Spoke %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("effective_labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } - billingProject := "" // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplySpoke(context.Background(), obj, directive...) - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error updating Spoke: %s", err) - } + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating Spoke %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Spoke %q: %#v", d.Id(), res) + } - log.Printf("[DEBUG] Finished creating Spoke %q: %#v", d.Id(), res) + err = NetworkConnectivityOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Updating Spoke", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } return resourceNetworkConnectivitySpokeRead(d, meta) } func resourceNetworkConnectivitySpokeDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - project, err := tpgresource.GetProject(d, config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - obj := &networkconnectivity.Spoke{ - Hub: dcl.String(d.Get("hub").(string)), - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), - LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), - LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), - LinkedVPCNetwork: expandNetworkConnectivitySpokeLinkedVPCNetwork(d.Get("linked_vpc_network")), - LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), - Project: dcl.String(project), + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Spoke: %s", err) } + billingProject = strings.TrimPrefix(project, "projects/") - log.Printf("[DEBUG] Deleting Spoke %q", d.Id()) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + url, err := tpgresource.ReplaceVarsForId(d, config, "{{NetworkConnectivityBasePath}}projects/{{project}}/locations/{{location}}/spokes/{{name}}") if err != nil { return err } - billingProject := project + + var obj map[string]interface{} + // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting Spoke %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Spoke") } - if err := client.DeleteSpoke(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting Spoke: %s", err) + + err = NetworkConnectivityOperationWaitTime( + config, res, tpgresource.GetResourceNameFromSelfLink(project), "Deleting Spoke", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err } - log.Printf("[DEBUG] Finished deleting Spoke %q", d.Id()) + log.Printf("[DEBUG] Finished deleting Spoke %q: %#v", d.Id(), res) return nil } func resourceNetworkConnectivitySpokeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/spokes/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", + "^projects/(?P[^/]+)/locations/(?P[^/]+)/spokes/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", }, d, config); err != nil { return nil, err } @@ -554,201 +715,550 @@ func resourceNetworkConnectivitySpokeImport(d *schema.ResourceData, meta interfa return []*schema.ResourceData{d}, nil } -func expandNetworkConnectivitySpokeLinkedInterconnectAttachments(o interface{}) *networkconnectivity.SpokeLinkedInterconnectAttachments { - if o == nil { - return networkconnectivity.EmptySpokeLinkedInterconnectAttachments +func flattenNetworkConnectivitySpokeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return networkconnectivity.EmptySpokeLinkedInterconnectAttachments + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } } - obj := objArr[0].(map[string]interface{}) - return &networkconnectivity.SpokeLinkedInterconnectAttachments{ - SiteToSiteDataTransfer: dcl.Bool(obj["site_to_site_data_transfer"].(bool)), - Uris: tpgdclresource.ExpandStringArray(obj["uris"]), + + return transformed +} + +func flattenNetworkConnectivitySpokeDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeHub(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v } + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenNetworkConnectivitySpokeLinkedInterconnectAttachments(obj *networkconnectivity.SpokeLinkedInterconnectAttachments) interface{} { - if obj == nil || obj.Empty() { +func flattenNetworkConnectivitySpokeLinkedVpnTunnels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { return nil } - transformed := map[string]interface{}{ - "site_to_site_data_transfer": obj.SiteToSiteDataTransfer, - "uris": obj.Uris, + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } + transformed := make(map[string]interface{}) + transformed["uris"] = + flattenNetworkConnectivitySpokeLinkedVpnTunnelsUris(original["uris"], d, config) + transformed["site_to_site_data_transfer"] = + flattenNetworkConnectivitySpokeLinkedVpnTunnelsSiteToSiteDataTransfer(original["siteToSiteDataTransfer"], d, config) + transformed["include_import_ranges"] = + flattenNetworkConnectivitySpokeLinkedVpnTunnelsIncludeImportRanges(original["includeImportRanges"], d, config) + return []interface{}{transformed} +} +func flattenNetworkConnectivitySpokeLinkedVpnTunnelsUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLinkedVpnTunnelsSiteToSiteDataTransfer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} +func flattenNetworkConnectivitySpokeLinkedVpnTunnelsIncludeImportRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLinkedInterconnectAttachments(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uris"] = + flattenNetworkConnectivitySpokeLinkedInterconnectAttachmentsUris(original["uris"], d, config) + transformed["site_to_site_data_transfer"] = + flattenNetworkConnectivitySpokeLinkedInterconnectAttachmentsSiteToSiteDataTransfer(original["siteToSiteDataTransfer"], d, config) + transformed["include_import_ranges"] = + flattenNetworkConnectivitySpokeLinkedInterconnectAttachmentsIncludeImportRanges(original["includeImportRanges"], d, config) return []interface{}{transformed} +} +func flattenNetworkConnectivitySpokeLinkedInterconnectAttachmentsUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} +func flattenNetworkConnectivitySpokeLinkedInterconnectAttachmentsSiteToSiteDataTransfer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(o interface{}) *networkconnectivity.SpokeLinkedRouterApplianceInstances { - if o == nil { - return networkconnectivity.EmptySpokeLinkedRouterApplianceInstances +func flattenNetworkConnectivitySpokeLinkedInterconnectAttachmentsIncludeImportRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return networkconnectivity.EmptySpokeLinkedRouterApplianceInstances + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - obj := objArr[0].(map[string]interface{}) - return &networkconnectivity.SpokeLinkedRouterApplianceInstances{ - Instances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesArray(obj["instances"]), - SiteToSiteDataTransfer: dcl.Bool(obj["site_to_site_data_transfer"].(bool)), + transformed := make(map[string]interface{}) + transformed["instances"] = + flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(original["instances"], d, config) + transformed["site_to_site_data_transfer"] = + flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesSiteToSiteDataTransfer(original["siteToSiteDataTransfer"], d, config) + transformed["include_import_ranges"] = + flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesIncludeImportRanges(original["includeImportRanges"], d, config) + return []interface{}{transformed} +} +func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "virtual_machine": flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesVirtualMachine(original["virtualMachine"], d, config), + "ip_address": flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesIpAddress(original["ipAddress"], d, config), + }) } + return transformed +} +func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesVirtualMachine(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstances(obj *networkconnectivity.SpokeLinkedRouterApplianceInstances) interface{} { - if obj == nil || obj.Empty() { +func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesSiteToSiteDataTransfer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesIncludeImportRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLinkedVpcNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { return nil } - transformed := map[string]interface{}{ - "instances": flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesArray(obj.Instances), - "site_to_site_data_transfer": obj.SiteToSiteDataTransfer, + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenNetworkConnectivitySpokeLinkedVpcNetworkUri(original["uri"], d, config) + transformed["exclude_export_ranges"] = + flattenNetworkConnectivitySpokeLinkedVpcNetworkExcludeExportRanges(original["excludeExportRanges"], d, config) + transformed["include_export_ranges"] = + flattenNetworkConnectivitySpokeLinkedVpcNetworkIncludeExportRanges(original["includeExportRanges"], d, config) return []interface{}{transformed} +} +func flattenNetworkConnectivitySpokeLinkedVpcNetworkUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLinkedVpcNetworkExcludeExportRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} +func flattenNetworkConnectivitySpokeLinkedVpcNetworkIncludeExportRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesArray(o interface{}) []networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances { - if o == nil { - return make([]networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances, 0) + +func flattenNetworkConnectivitySpokeLinkedProducerVpcNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["network"] = + flattenNetworkConnectivitySpokeLinkedProducerVpcNetworkNetwork(original["network"], d, config) + transformed["peering"] = + flattenNetworkConnectivitySpokeLinkedProducerVpcNetworkPeering(original["peering"], d, config) + transformed["producer_network"] = + flattenNetworkConnectivitySpokeLinkedProducerVpcNetworkProducerNetwork(original["producerNetwork"], d, config) + transformed["include_export_ranges"] = + flattenNetworkConnectivitySpokeLinkedProducerVpcNetworkIncludeExportRanges(original["includeExportRanges"], d, config) + transformed["exclude_export_ranges"] = + flattenNetworkConnectivitySpokeLinkedProducerVpcNetworkExcludeExportRanges(original["excludeExportRanges"], d, config) + return []interface{}{transformed} +} +func flattenNetworkConnectivitySpokeLinkedProducerVpcNetworkNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLinkedProducerVpcNetworkPeering(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLinkedProducerVpcNetworkProducerNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLinkedProducerVpcNetworkIncludeExportRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeLinkedProducerVpcNetworkExcludeExportRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeUniqueId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkConnectivitySpokeState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances, 0) +func flattenNetworkConnectivitySpokeTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v } - items := make([]networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances, 0, len(objs)) - for _, item := range objs { - i := expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(item) - items = append(items, *i) + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } } - return items + return transformed +} + +func flattenNetworkConnectivitySpokeEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNetworkConnectivitySpokeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil } -func expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(o interface{}) *networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances { - if o == nil { - return networkconnectivity.EmptySpokeLinkedRouterApplianceInstancesInstances +func expandNetworkConnectivitySpokeHub(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedVpnTunnels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - obj := o.(map[string]interface{}) - return &networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances{ - IPAddress: dcl.String(obj["ip_address"].(string)), - VirtualMachine: dcl.String(obj["virtual_machine"].(string)), + transformedUris, err := expandNetworkConnectivitySpokeLinkedVpnTunnelsUris(original["uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uris"] = transformedUris } -} -func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesArray(objs []networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances) []interface{} { - if objs == nil { - return nil + transformedSiteToSiteDataTransfer, err := expandNetworkConnectivitySpokeLinkedVpnTunnelsSiteToSiteDataTransfer(original["site_to_site_data_transfer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSiteToSiteDataTransfer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["siteToSiteDataTransfer"] = transformedSiteToSiteDataTransfer } - items := []interface{}{} - for _, item := range objs { - i := flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(&item) - items = append(items, i) + transformedIncludeImportRanges, err := expandNetworkConnectivitySpokeLinkedVpnTunnelsIncludeImportRanges(original["include_import_ranges"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeImportRanges); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeImportRanges"] = transformedIncludeImportRanges } - return items + return transformed, nil } -func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(obj *networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances) interface{} { - if obj == nil || obj.Empty() { - return nil +func expandNetworkConnectivitySpokeLinkedVpnTunnelsUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedVpnTunnelsSiteToSiteDataTransfer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedVpnTunnelsIncludeImportRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedInterconnectAttachments(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } - transformed := map[string]interface{}{ - "ip_address": obj.IPAddress, - "virtual_machine": obj.VirtualMachine, + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUris, err := expandNetworkConnectivitySpokeLinkedInterconnectAttachmentsUris(original["uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uris"] = transformedUris } - return transformed + transformedSiteToSiteDataTransfer, err := expandNetworkConnectivitySpokeLinkedInterconnectAttachmentsSiteToSiteDataTransfer(original["site_to_site_data_transfer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSiteToSiteDataTransfer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["siteToSiteDataTransfer"] = transformedSiteToSiteDataTransfer + } + transformedIncludeImportRanges, err := expandNetworkConnectivitySpokeLinkedInterconnectAttachmentsIncludeImportRanges(original["include_import_ranges"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeImportRanges); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeImportRanges"] = transformedIncludeImportRanges + } + + return transformed, nil } -func expandNetworkConnectivitySpokeLinkedVPCNetwork(o interface{}) *networkconnectivity.SpokeLinkedVPCNetwork { - if o == nil { - return networkconnectivity.EmptySpokeLinkedVPCNetwork +func expandNetworkConnectivitySpokeLinkedInterconnectAttachmentsUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedInterconnectAttachmentsSiteToSiteDataTransfer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedInterconnectAttachmentsIncludeImportRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return networkconnectivity.EmptySpokeLinkedVPCNetwork + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInstances, err := expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(original["instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instances"] = transformedInstances } - obj := objArr[0].(map[string]interface{}) - return &networkconnectivity.SpokeLinkedVPCNetwork{ - Uri: dcl.String(obj["uri"].(string)), - ExcludeExportRanges: tpgdclresource.ExpandStringArray(obj["exclude_export_ranges"]), + + transformedSiteToSiteDataTransfer, err := expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesSiteToSiteDataTransfer(original["site_to_site_data_transfer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSiteToSiteDataTransfer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["siteToSiteDataTransfer"] = transformedSiteToSiteDataTransfer } -} -func flattenNetworkConnectivitySpokeLinkedVPCNetwork(obj *networkconnectivity.SpokeLinkedVPCNetwork) interface{} { - if obj == nil || obj.Empty() { - return nil + transformedIncludeImportRanges, err := expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesIncludeImportRanges(original["include_import_ranges"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeImportRanges); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeImportRanges"] = transformedIncludeImportRanges } - transformed := map[string]interface{}{ - "uri": obj.Uri, - "exclude_export_ranges": obj.ExcludeExportRanges, + + return transformed, nil +} + +func expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVirtualMachine, err := expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesVirtualMachine(original["virtual_machine"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVirtualMachine); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["virtualMachine"] = transformedVirtualMachine + } + + transformedIpAddress, err := expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesIpAddress(original["ip_address"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpAddress); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipAddress"] = transformedIpAddress + } + + req = append(req, transformed) } + return req, nil +} - return []interface{}{transformed} +func expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesVirtualMachine(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesSiteToSiteDataTransfer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} +func expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesIncludeImportRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil } -func expandNetworkConnectivitySpokeLinkedVpnTunnels(o interface{}) *networkconnectivity.SpokeLinkedVpnTunnels { - if o == nil { - return networkconnectivity.EmptySpokeLinkedVpnTunnels +func expandNetworkConnectivitySpokeLinkedVpcNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return networkconnectivity.EmptySpokeLinkedVpnTunnels + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandNetworkConnectivitySpokeLinkedVpcNetworkUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri } - obj := objArr[0].(map[string]interface{}) - return &networkconnectivity.SpokeLinkedVpnTunnels{ - SiteToSiteDataTransfer: dcl.Bool(obj["site_to_site_data_transfer"].(bool)), - Uris: tpgdclresource.ExpandStringArray(obj["uris"]), + + transformedExcludeExportRanges, err := expandNetworkConnectivitySpokeLinkedVpcNetworkExcludeExportRanges(original["exclude_export_ranges"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeExportRanges); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["excludeExportRanges"] = transformedExcludeExportRanges } + + transformedIncludeExportRanges, err := expandNetworkConnectivitySpokeLinkedVpcNetworkIncludeExportRanges(original["include_export_ranges"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeExportRanges); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeExportRanges"] = transformedIncludeExportRanges + } + + return transformed, nil } -func flattenNetworkConnectivitySpokeLinkedVpnTunnels(obj *networkconnectivity.SpokeLinkedVpnTunnels) interface{} { - if obj == nil || obj.Empty() { - return nil +func expandNetworkConnectivitySpokeLinkedVpcNetworkUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedVpcNetworkExcludeExportRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedVpcNetworkIncludeExportRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedProducerVpcNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } - transformed := map[string]interface{}{ - "site_to_site_data_transfer": obj.SiteToSiteDataTransfer, - "uris": obj.Uris, + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNetwork, err := expandNetworkConnectivitySpokeLinkedProducerVpcNetworkNetwork(original["network"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["network"] = transformedNetwork } - return []interface{}{transformed} + transformedPeering, err := expandNetworkConnectivitySpokeLinkedProducerVpcNetworkPeering(original["peering"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeering); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["peering"] = transformedPeering + } -} + transformedProducerNetwork, err := expandNetworkConnectivitySpokeLinkedProducerVpcNetworkProducerNetwork(original["producer_network"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProducerNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["producerNetwork"] = transformedProducerNetwork + } -func flattenNetworkConnectivitySpokeLabels(v map[string]string, d *schema.ResourceData) interface{} { - if v == nil { - return nil + transformedIncludeExportRanges, err := expandNetworkConnectivitySpokeLinkedProducerVpcNetworkIncludeExportRanges(original["include_export_ranges"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeExportRanges); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeExportRanges"] = transformedIncludeExportRanges } - transformed := make(map[string]interface{}) - if l, ok := d.Get("labels").(map[string]interface{}); ok { - for k, _ := range l { - transformed[k] = v[k] - } + transformedExcludeExportRanges, err := expandNetworkConnectivitySpokeLinkedProducerVpcNetworkExcludeExportRanges(original["exclude_export_ranges"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeExportRanges); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["excludeExportRanges"] = transformedExcludeExportRanges } - return transformed + return transformed, nil +} + +func expandNetworkConnectivitySpokeLinkedProducerVpcNetworkNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil } -func flattenNetworkConnectivitySpokeTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { +func expandNetworkConnectivitySpokeLinkedProducerVpcNetworkPeering(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedProducerVpcNetworkProducerNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedProducerVpcNetworkIncludeExportRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeLinkedProducerVpcNetworkExcludeExportRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkConnectivitySpokeEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { - return nil + return map[string]string{}, nil } - - transformed := make(map[string]interface{}) - if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { - for k, _ := range l { - transformed[k] = v[k] - } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) } - - return transformed + return m, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke_sweeper.go index e1c264b8682..d13481b23f3 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke_sweeper.go @@ -3,16 +3,15 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: DCL *** +// *** AUTO GENERATED CODE *** Type: MMv1 *** // // ---------------------------------------------------------------------------- // -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. // -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- @@ -21,11 +20,12 @@ package networkconnectivity import ( "context" "log" + "strings" "testing" - networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -33,8 +33,10 @@ func init() { sweeper.AddTestSweepers("NetworkConnectivitySpoke", testSweepNetworkConnectivitySpoke) } +// At the time of writing, the CI only passes us-central1 as the region func testSweepNetworkConnectivitySpoke(region string) error { - log.Print("[INFO][SWEEPER_LOG] Starting sweeper for NetworkConnectivitySpoke") + resourceName := "NetworkConnectivitySpoke" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) if err != nil { @@ -51,23 +53,87 @@ func testSweepNetworkConnectivitySpoke(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to be used for Delete arguments. - d := map[string]string{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, } - client := transport_tpg.NewDCLNetworkConnectivityClient(config, config.UserAgent, "", 0) - err = client.DeleteAllSpoke(context.Background(), d["project"], d["location"], isDeletableNetworkConnectivitySpoke) + listTemplate := strings.Split("https://networkconnectivity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/spokes", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { - return err + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil } - return nil -} -func isDeletableNetworkConnectivitySpoke(r *networkconnectivity.Spoke) bool { - return sweeper.IsSweepableTestResource(*r.Name) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["spokes"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networkconnectivity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/spokes/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_client_tls_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_client_tls_policy.go new file mode 100644 index 00000000000..f8260a57906 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_client_tls_policy.go @@ -0,0 +1,885 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceNetworkSecurityClientTlsPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkSecurityClientTlsPolicyCreate, + Read: resourceNetworkSecurityClientTlsPolicyRead, + Update: resourceNetworkSecurityClientTlsPolicyUpdate, + Delete: resourceNetworkSecurityClientTlsPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkSecurityClientTlsPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the ClientTlsPolicy resource.`, + }, + "client_certificate": { + Type: schema.TypeList, + Optional: true, + Description: `Defines a mechanism to provision client identity (public and private keys) for peer to peer authentication. The presence of this dictates mTLS.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_provider_instance": { + Type: schema.TypeList, + Optional: true, + Description: `The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "plugin_instance": { + Type: schema.TypeString, + Required: true, + Description: `Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "grpc_endpoint": { + Type: schema.TypeList, + Optional: true, + Description: `gRPC specific configuration to access the gRPC server to obtain the cert and private key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_uri": { + Type: schema.TypeString, + Required: true, + Description: `The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A free-text description of the resource. Max length 1024 characters.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Set of label tags associated with the ClientTlsPolicy resource. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `The location of the client tls policy. +The default value is 'global'.`, + Default: "global", + }, + "server_validation_ca": { + Type: schema.TypeList, + Optional: true, + Description: `Defines the mechanism to obtain the Certificate Authority certificate to validate the server certificate. If empty, client does not validate the server certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_provider_instance": { + Type: schema.TypeList, + Optional: true, + Description: `The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "plugin_instance": { + Type: schema.TypeString, + Required: true, + Description: `Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "grpc_endpoint": { + Type: schema.TypeList, + Optional: true, + Description: `gRPC specific configuration to access the gRPC server to obtain the cert and private key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_uri": { + Type: schema.TypeString, + Required: true, + Description: `The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + }, + }, + }, + "sni": { + Type: schema.TypeString, + Optional: true, + Description: `Server Name Indication string to present to the server during TLS handshake. E.g: "secure.example.com".`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the ClientTlsPolicy was created in UTC.`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the ClientTlsPolicy was updated in UTC.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNetworkSecurityClientTlsPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkSecurityClientTlsPolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + sniProp, err := expandNetworkSecurityClientTlsPolicySni(d.Get("sni"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sni"); !tpgresource.IsEmptyValue(reflect.ValueOf(sniProp)) && (ok || !reflect.DeepEqual(v, sniProp)) { + obj["sni"] = sniProp + } + clientCertificateProp, err := expandNetworkSecurityClientTlsPolicyClientCertificate(d.Get("client_certificate"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_certificate"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientCertificateProp)) && (ok || !reflect.DeepEqual(v, clientCertificateProp)) { + obj["clientCertificate"] = clientCertificateProp + } + serverValidationCaProp, err := expandNetworkSecurityClientTlsPolicyServerValidationCa(d.Get("server_validation_ca"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("server_validation_ca"); !tpgresource.IsEmptyValue(reflect.ValueOf(serverValidationCaProp)) && (ok || !reflect.DeepEqual(v, serverValidationCaProp)) { + obj["serverValidationCa"] = serverValidationCaProp + } + labelsProp, err := expandNetworkSecurityClientTlsPolicyEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/clientTlsPolicies?clientTlsPolicyId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ClientTlsPolicy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ClientTlsPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating ClientTlsPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/clientTlsPolicies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Creating ClientTlsPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create ClientTlsPolicy: %s", err) + } + + log.Printf("[DEBUG] Finished creating ClientTlsPolicy %q: %#v", d.Id(), res) + + return resourceNetworkSecurityClientTlsPolicyRead(d, meta) +} + +func resourceNetworkSecurityClientTlsPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/clientTlsPolicies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ClientTlsPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkSecurityClientTlsPolicy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ClientTlsPolicy: %s", err) + } + + if err := d.Set("create_time", flattenNetworkSecurityClientTlsPolicyCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ClientTlsPolicy: %s", err) + } + if err := d.Set("update_time", flattenNetworkSecurityClientTlsPolicyUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ClientTlsPolicy: %s", err) + } + if err := d.Set("labels", flattenNetworkSecurityClientTlsPolicyLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ClientTlsPolicy: %s", err) + } + if err := d.Set("description", flattenNetworkSecurityClientTlsPolicyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ClientTlsPolicy: %s", err) + } + if err := d.Set("sni", flattenNetworkSecurityClientTlsPolicySni(res["sni"], d, config)); err != nil { + return fmt.Errorf("Error reading ClientTlsPolicy: %s", err) + } + if err := d.Set("client_certificate", flattenNetworkSecurityClientTlsPolicyClientCertificate(res["clientCertificate"], d, config)); err != nil { + return fmt.Errorf("Error reading ClientTlsPolicy: %s", err) + } + if err := d.Set("server_validation_ca", flattenNetworkSecurityClientTlsPolicyServerValidationCa(res["serverValidationCa"], d, config)); err != nil { + return fmt.Errorf("Error reading ClientTlsPolicy: %s", err) + } + if err := d.Set("terraform_labels", flattenNetworkSecurityClientTlsPolicyTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ClientTlsPolicy: %s", err) + } + if err := d.Set("effective_labels", flattenNetworkSecurityClientTlsPolicyEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ClientTlsPolicy: %s", err) + } + + return nil +} + +func resourceNetworkSecurityClientTlsPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ClientTlsPolicy: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkSecurityClientTlsPolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + sniProp, err := expandNetworkSecurityClientTlsPolicySni(d.Get("sni"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sni"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sniProp)) { + obj["sni"] = sniProp + } + clientCertificateProp, err := expandNetworkSecurityClientTlsPolicyClientCertificate(d.Get("client_certificate"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_certificate"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientCertificateProp)) { + obj["clientCertificate"] = clientCertificateProp + } + serverValidationCaProp, err := expandNetworkSecurityClientTlsPolicyServerValidationCa(d.Get("server_validation_ca"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("server_validation_ca"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serverValidationCaProp)) { + obj["serverValidationCa"] = serverValidationCaProp + } + labelsProp, err := expandNetworkSecurityClientTlsPolicyEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/clientTlsPolicies/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ClientTlsPolicy %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("sni") { + updateMask = append(updateMask, "sni") + } + + if d.HasChange("client_certificate") { + updateMask = append(updateMask, "clientCertificate") + } + + if d.HasChange("server_validation_ca") { + updateMask = append(updateMask, "serverValidationCa") + } + + if d.HasChange("effective_labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating ClientTlsPolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ClientTlsPolicy %q: %#v", d.Id(), res) + } + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Updating ClientTlsPolicy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } + + return resourceNetworkSecurityClientTlsPolicyRead(d, meta) +} + +func resourceNetworkSecurityClientTlsPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ClientTlsPolicy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/clientTlsPolicies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting ClientTlsPolicy %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ClientTlsPolicy") + } + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Deleting ClientTlsPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ClientTlsPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceNetworkSecurityClientTlsPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/clientTlsPolicies/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/clientTlsPolicies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNetworkSecurityClientTlsPolicyCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityClientTlsPolicyUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityClientTlsPolicyLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenNetworkSecurityClientTlsPolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityClientTlsPolicySni(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityClientTlsPolicyClientCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["grpc_endpoint"] = + flattenNetworkSecurityClientTlsPolicyClientCertificateGrpcEndpoint(original["grpcEndpoint"], d, config) + transformed["certificate_provider_instance"] = + flattenNetworkSecurityClientTlsPolicyClientCertificateCertificateProviderInstance(original["certificateProviderInstance"], d, config) + return []interface{}{transformed} +} +func flattenNetworkSecurityClientTlsPolicyClientCertificateGrpcEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["target_uri"] = + flattenNetworkSecurityClientTlsPolicyClientCertificateGrpcEndpointTargetUri(original["targetUri"], d, config) + return []interface{}{transformed} +} +func flattenNetworkSecurityClientTlsPolicyClientCertificateGrpcEndpointTargetUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityClientTlsPolicyClientCertificateCertificateProviderInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["plugin_instance"] = + flattenNetworkSecurityClientTlsPolicyClientCertificateCertificateProviderInstancePluginInstance(original["pluginInstance"], d, config) + return []interface{}{transformed} +} +func flattenNetworkSecurityClientTlsPolicyClientCertificateCertificateProviderInstancePluginInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityClientTlsPolicyServerValidationCa(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "grpc_endpoint": flattenNetworkSecurityClientTlsPolicyServerValidationCaGrpcEndpoint(original["grpcEndpoint"], d, config), + "certificate_provider_instance": flattenNetworkSecurityClientTlsPolicyServerValidationCaCertificateProviderInstance(original["certificateProviderInstance"], d, config), + }) + } + return transformed +} +func flattenNetworkSecurityClientTlsPolicyServerValidationCaGrpcEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["target_uri"] = + flattenNetworkSecurityClientTlsPolicyServerValidationCaGrpcEndpointTargetUri(original["targetUri"], d, config) + return []interface{}{transformed} +} +func flattenNetworkSecurityClientTlsPolicyServerValidationCaGrpcEndpointTargetUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityClientTlsPolicyServerValidationCaCertificateProviderInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["plugin_instance"] = + flattenNetworkSecurityClientTlsPolicyServerValidationCaCertificateProviderInstancePluginInstance(original["pluginInstance"], d, config) + return []interface{}{transformed} +} +func flattenNetworkSecurityClientTlsPolicyServerValidationCaCertificateProviderInstancePluginInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityClientTlsPolicyTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenNetworkSecurityClientTlsPolicyEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNetworkSecurityClientTlsPolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityClientTlsPolicySni(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityClientTlsPolicyClientCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGrpcEndpoint, err := expandNetworkSecurityClientTlsPolicyClientCertificateGrpcEndpoint(original["grpc_endpoint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGrpcEndpoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["grpcEndpoint"] = transformedGrpcEndpoint + } + + transformedCertificateProviderInstance, err := expandNetworkSecurityClientTlsPolicyClientCertificateCertificateProviderInstance(original["certificate_provider_instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertificateProviderInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["certificateProviderInstance"] = transformedCertificateProviderInstance + } + + return transformed, nil +} + +func expandNetworkSecurityClientTlsPolicyClientCertificateGrpcEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTargetUri, err := expandNetworkSecurityClientTlsPolicyClientCertificateGrpcEndpointTargetUri(original["target_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetUri"] = transformedTargetUri + } + + return transformed, nil +} + +func expandNetworkSecurityClientTlsPolicyClientCertificateGrpcEndpointTargetUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityClientTlsPolicyClientCertificateCertificateProviderInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPluginInstance, err := expandNetworkSecurityClientTlsPolicyClientCertificateCertificateProviderInstancePluginInstance(original["plugin_instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPluginInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pluginInstance"] = transformedPluginInstance + } + + return transformed, nil +} + +func expandNetworkSecurityClientTlsPolicyClientCertificateCertificateProviderInstancePluginInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityClientTlsPolicyServerValidationCa(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGrpcEndpoint, err := expandNetworkSecurityClientTlsPolicyServerValidationCaGrpcEndpoint(original["grpc_endpoint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGrpcEndpoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["grpcEndpoint"] = transformedGrpcEndpoint + } + + transformedCertificateProviderInstance, err := expandNetworkSecurityClientTlsPolicyServerValidationCaCertificateProviderInstance(original["certificate_provider_instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertificateProviderInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["certificateProviderInstance"] = transformedCertificateProviderInstance + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNetworkSecurityClientTlsPolicyServerValidationCaGrpcEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTargetUri, err := expandNetworkSecurityClientTlsPolicyServerValidationCaGrpcEndpointTargetUri(original["target_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetUri"] = transformedTargetUri + } + + return transformed, nil +} + +func expandNetworkSecurityClientTlsPolicyServerValidationCaGrpcEndpointTargetUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityClientTlsPolicyServerValidationCaCertificateProviderInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPluginInstance, err := expandNetworkSecurityClientTlsPolicyServerValidationCaCertificateProviderInstancePluginInstance(original["plugin_instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPluginInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pluginInstance"] = transformedPluginInstance + } + + return transformed, nil +} + +func expandNetworkSecurityClientTlsPolicyServerValidationCaCertificateProviderInstancePluginInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityClientTlsPolicyEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_client_tls_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_client_tls_policy_sweeper.go new file mode 100644 index 00000000000..02672954234 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_client_tls_policy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkSecurityClientTlsPolicy", testSweepNetworkSecurityClientTlsPolicy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNetworkSecurityClientTlsPolicy(region string) error { + resourceName := "NetworkSecurityClientTlsPolicy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://networksecurity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/clientTlsPolicies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["clientTlsPolicies"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networksecurity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/clientTlsPolicies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy.go index b66a7d42f6c..6bf0a9e3293 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy.go @@ -72,6 +72,12 @@ gatewaySecurityPolicy should match the pattern:(^a-z?$).`, The default value is 'global'.`, Default: "global", }, + "tls_inspection_policy": { + Type: schema.TypeString, + Optional: true, + Description: `Name of a TlsInspectionPolicy resource that defines how TLS inspection is performed for any rule that enables it. +Note: google_network_security_tls_inspection_policy resource is still in [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) therefore it will need to import the provider.`, + }, "create_time": { Type: schema.TypeString, Computed: true, @@ -116,6 +122,12 @@ func resourceNetworkSecurityGatewaySecurityPolicyCreate(d *schema.ResourceData, } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } + tlsInspectionPolicyProp, err := expandNetworkSecurityGatewaySecurityPolicyTlsInspectionPolicy(d.Get("tls_inspection_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tls_inspection_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(tlsInspectionPolicyProp)) && (ok || !reflect.DeepEqual(v, tlsInspectionPolicyProp)) { + obj["tlsInspectionPolicy"] = tlsInspectionPolicyProp + } url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies?gatewaySecurityPolicyId={{name}}") if err != nil { @@ -253,6 +265,12 @@ func resourceNetworkSecurityGatewaySecurityPolicyUpdate(d *schema.ResourceData, } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } + tlsInspectionPolicyProp, err := expandNetworkSecurityGatewaySecurityPolicyTlsInspectionPolicy(d.Get("tls_inspection_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tls_inspection_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tlsInspectionPolicyProp)) { + obj["tlsInspectionPolicy"] = tlsInspectionPolicyProp + } url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{name}}") if err != nil { @@ -266,6 +284,10 @@ func resourceNetworkSecurityGatewaySecurityPolicyUpdate(d *schema.ResourceData, if d.HasChange("description") { updateMask = append(updateMask, "description") } + + if d.HasChange("tls_inspection_policy") { + updateMask = append(updateMask, "tlsInspectionPolicy") + } // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -404,3 +426,7 @@ func flattenNetworkSecurityGatewaySecurityPolicyDescription(v interface{}, d *sc func expandNetworkSecurityGatewaySecurityPolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandNetworkSecurityGatewaySecurityPolicyTlsInspectionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_server_tls_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_server_tls_policy.go new file mode 100644 index 00000000000..c76b55dde93 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_server_tls_policy.go @@ -0,0 +1,984 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceNetworkSecurityServerTlsPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkSecurityServerTlsPolicyCreate, + Read: resourceNetworkSecurityServerTlsPolicyRead, + Update: resourceNetworkSecurityServerTlsPolicyUpdate, + Delete: resourceNetworkSecurityServerTlsPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkSecurityServerTlsPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the ServerTlsPolicy resource.`, + }, + "allow_open": { + Type: schema.TypeBool, + Optional: true, + Description: `This field applies only for Traffic Director policies. It is must be set to false for external HTTPS load balancer policies. +Determines if server allows plaintext connections. If set to true, server allows plain text connections. By default, it is set to false. This setting is not exclusive of other encryption modes. For example, if allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections. See documentation of other encryption modes to confirm compatibility. +Consider using it if you wish to upgrade in place your deployment to TLS while having mixed TLS and non-TLS traffic reaching port :80.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A free-text description of the resource. Max length 1024 characters.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Set of label tags associated with the ServerTlsPolicy resource. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `The location of the server tls policy. +The default value is 'global'.`, + Default: "global", + }, + "mtls_policy": { + Type: schema.TypeList, + Optional: true, + Description: `This field is required if the policy is used with external HTTPS load balancers. This field can be empty for Traffic Director. +Defines a mechanism to provision peer validation certificates for peer to peer authentication (Mutual TLS - mTLS). If not specified, client certificate will not be requested. The connection is treated as TLS and not mTLS. If allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_validation_ca": { + Type: schema.TypeList, + Optional: true, + Description: `Required if the policy is to be used with Traffic Director. For external HTTPS load balancers it must be empty. +Defines the mechanism to obtain the Certificate Authority certificate to validate the client certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_provider_instance": { + Type: schema.TypeList, + Optional: true, + Description: `Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. +Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "plugin_instance": { + Type: schema.TypeString, + Required: true, + Description: `Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "grpc_endpoint": { + Type: schema.TypeList, + Optional: true, + Description: `gRPC specific configuration to access the gRPC server to obtain the cert and private key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_uri": { + Type: schema.TypeString, + Required: true, + Description: `The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + }, + }, + }, + "client_validation_mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"CLIENT_VALIDATION_MODE_UNSPECIFIED", "ALLOW_INVALID_OR_MISSING_CLIENT_CERT", "REJECT_INVALID", ""}), + Description: `When the client presents an invalid certificate or no certificate to the load balancer, the clientValidationMode specifies how the client connection is handled. +Required if the policy is to be used with the external HTTPS load balancing. For Traffic Director it must be empty. Possible values: ["CLIENT_VALIDATION_MODE_UNSPECIFIED", "ALLOW_INVALID_OR_MISSING_CLIENT_CERT", "REJECT_INVALID"]`, + }, + "client_validation_trust_config": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Reference to the TrustConfig from certificatemanager.googleapis.com namespace. +If specified, the chain validation will be performed against certificates configured in the given TrustConfig. +Allowed only if the policy is to be used with external HTTPS load balancers.`, + }, + }, + }, + }, + "server_certificate": { + Type: schema.TypeList, + Optional: true, + Description: `Defines a mechanism to provision client identity (public and private keys) for peer to peer authentication. The presence of this dictates mTLS.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_provider_instance": { + Type: schema.TypeList, + Optional: true, + Description: `Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. +Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "plugin_instance": { + Type: schema.TypeString, + Required: true, + Description: `Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "grpc_endpoint": { + Type: schema.TypeList, + Optional: true, + Description: `gRPC specific configuration to access the gRPC server to obtain the cert and private key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_uri": { + Type: schema.TypeString, + Required: true, + Description: `The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the ServerTlsPolicy was created in UTC.`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the ServerTlsPolicy was updated in UTC.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNetworkSecurityServerTlsPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkSecurityServerTlsPolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + allowOpenProp, err := expandNetworkSecurityServerTlsPolicyAllowOpen(d.Get("allow_open"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow_open"); !tpgresource.IsEmptyValue(reflect.ValueOf(allowOpenProp)) && (ok || !reflect.DeepEqual(v, allowOpenProp)) { + obj["allowOpen"] = allowOpenProp + } + serverCertificateProp, err := expandNetworkSecurityServerTlsPolicyServerCertificate(d.Get("server_certificate"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("server_certificate"); !tpgresource.IsEmptyValue(reflect.ValueOf(serverCertificateProp)) && (ok || !reflect.DeepEqual(v, serverCertificateProp)) { + obj["serverCertificate"] = serverCertificateProp + } + mtlsPolicyProp, err := expandNetworkSecurityServerTlsPolicyMtlsPolicy(d.Get("mtls_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mtls_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(mtlsPolicyProp)) && (ok || !reflect.DeepEqual(v, mtlsPolicyProp)) { + obj["mtlsPolicy"] = mtlsPolicyProp + } + labelsProp, err := expandNetworkSecurityServerTlsPolicyEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/serverTlsPolicies?serverTlsPolicyId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ServerTlsPolicy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServerTlsPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating ServerTlsPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Creating ServerTlsPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create ServerTlsPolicy: %s", err) + } + + log.Printf("[DEBUG] Finished creating ServerTlsPolicy %q: %#v", d.Id(), res) + + return resourceNetworkSecurityServerTlsPolicyRead(d, meta) +} + +func resourceNetworkSecurityServerTlsPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServerTlsPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkSecurityServerTlsPolicy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ServerTlsPolicy: %s", err) + } + + if err := d.Set("create_time", flattenNetworkSecurityServerTlsPolicyCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ServerTlsPolicy: %s", err) + } + if err := d.Set("update_time", flattenNetworkSecurityServerTlsPolicyUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ServerTlsPolicy: %s", err) + } + if err := d.Set("labels", flattenNetworkSecurityServerTlsPolicyLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ServerTlsPolicy: %s", err) + } + if err := d.Set("description", flattenNetworkSecurityServerTlsPolicyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ServerTlsPolicy: %s", err) + } + if err := d.Set("allow_open", flattenNetworkSecurityServerTlsPolicyAllowOpen(res["allowOpen"], d, config)); err != nil { + return fmt.Errorf("Error reading ServerTlsPolicy: %s", err) + } + if err := d.Set("server_certificate", flattenNetworkSecurityServerTlsPolicyServerCertificate(res["serverCertificate"], d, config)); err != nil { + return fmt.Errorf("Error reading ServerTlsPolicy: %s", err) + } + if err := d.Set("mtls_policy", flattenNetworkSecurityServerTlsPolicyMtlsPolicy(res["mtlsPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading ServerTlsPolicy: %s", err) + } + if err := d.Set("terraform_labels", flattenNetworkSecurityServerTlsPolicyTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ServerTlsPolicy: %s", err) + } + if err := d.Set("effective_labels", flattenNetworkSecurityServerTlsPolicyEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ServerTlsPolicy: %s", err) + } + + return nil +} + +func resourceNetworkSecurityServerTlsPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServerTlsPolicy: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkSecurityServerTlsPolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + allowOpenProp, err := expandNetworkSecurityServerTlsPolicyAllowOpen(d.Get("allow_open"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow_open"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, allowOpenProp)) { + obj["allowOpen"] = allowOpenProp + } + serverCertificateProp, err := expandNetworkSecurityServerTlsPolicyServerCertificate(d.Get("server_certificate"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("server_certificate"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serverCertificateProp)) { + obj["serverCertificate"] = serverCertificateProp + } + mtlsPolicyProp, err := expandNetworkSecurityServerTlsPolicyMtlsPolicy(d.Get("mtls_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mtls_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mtlsPolicyProp)) { + obj["mtlsPolicy"] = mtlsPolicyProp + } + labelsProp, err := expandNetworkSecurityServerTlsPolicyEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ServerTlsPolicy %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("allow_open") { + updateMask = append(updateMask, "allowOpen") + } + + if d.HasChange("server_certificate") { + updateMask = append(updateMask, "serverCertificate") + } + + if d.HasChange("mtls_policy") { + updateMask = append(updateMask, "mtlsPolicy") + } + + if d.HasChange("effective_labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating ServerTlsPolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ServerTlsPolicy %q: %#v", d.Id(), res) + } + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Updating ServerTlsPolicy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } + + return resourceNetworkSecurityServerTlsPolicyRead(d, meta) +} + +func resourceNetworkSecurityServerTlsPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServerTlsPolicy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting ServerTlsPolicy %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServerTlsPolicy") + } + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Deleting ServerTlsPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ServerTlsPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceNetworkSecurityServerTlsPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/serverTlsPolicies/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNetworkSecurityServerTlsPolicyCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityServerTlsPolicyUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityServerTlsPolicyLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenNetworkSecurityServerTlsPolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityServerTlsPolicyAllowOpen(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityServerTlsPolicyServerCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["grpc_endpoint"] = + flattenNetworkSecurityServerTlsPolicyServerCertificateGrpcEndpoint(original["grpcEndpoint"], d, config) + transformed["certificate_provider_instance"] = + flattenNetworkSecurityServerTlsPolicyServerCertificateCertificateProviderInstance(original["certificateProviderInstance"], d, config) + return []interface{}{transformed} +} +func flattenNetworkSecurityServerTlsPolicyServerCertificateGrpcEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["target_uri"] = + flattenNetworkSecurityServerTlsPolicyServerCertificateGrpcEndpointTargetUri(original["targetUri"], d, config) + return []interface{}{transformed} +} +func flattenNetworkSecurityServerTlsPolicyServerCertificateGrpcEndpointTargetUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityServerTlsPolicyServerCertificateCertificateProviderInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["plugin_instance"] = + flattenNetworkSecurityServerTlsPolicyServerCertificateCertificateProviderInstancePluginInstance(original["pluginInstance"], d, config) + return []interface{}{transformed} +} +func flattenNetworkSecurityServerTlsPolicyServerCertificateCertificateProviderInstancePluginInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityServerTlsPolicyMtlsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["client_validation_mode"] = + flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationMode(original["clientValidationMode"], d, config) + transformed["client_validation_trust_config"] = + flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationTrustConfig(original["clientValidationTrustConfig"], d, config) + transformed["client_validation_ca"] = + flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCa(original["clientValidationCa"], d, config) + return []interface{}{transformed} +} +func flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationTrustConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCa(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "grpc_endpoint": flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpoint(original["grpcEndpoint"], d, config), + "certificate_provider_instance": flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstance(original["certificateProviderInstance"], d, config), + }) + } + return transformed +} +func flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["target_uri"] = + flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointTargetUri(original["targetUri"], d, config) + return []interface{}{transformed} +} +func flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointTargetUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["plugin_instance"] = + flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstancePluginInstance(original["pluginInstance"], d, config) + return []interface{}{transformed} +} +func flattenNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstancePluginInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityServerTlsPolicyTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenNetworkSecurityServerTlsPolicyEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNetworkSecurityServerTlsPolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityServerTlsPolicyAllowOpen(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityServerTlsPolicyServerCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGrpcEndpoint, err := expandNetworkSecurityServerTlsPolicyServerCertificateGrpcEndpoint(original["grpc_endpoint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGrpcEndpoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["grpcEndpoint"] = transformedGrpcEndpoint + } + + transformedCertificateProviderInstance, err := expandNetworkSecurityServerTlsPolicyServerCertificateCertificateProviderInstance(original["certificate_provider_instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertificateProviderInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["certificateProviderInstance"] = transformedCertificateProviderInstance + } + + return transformed, nil +} + +func expandNetworkSecurityServerTlsPolicyServerCertificateGrpcEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTargetUri, err := expandNetworkSecurityServerTlsPolicyServerCertificateGrpcEndpointTargetUri(original["target_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetUri"] = transformedTargetUri + } + + return transformed, nil +} + +func expandNetworkSecurityServerTlsPolicyServerCertificateGrpcEndpointTargetUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityServerTlsPolicyServerCertificateCertificateProviderInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPluginInstance, err := expandNetworkSecurityServerTlsPolicyServerCertificateCertificateProviderInstancePluginInstance(original["plugin_instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPluginInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pluginInstance"] = transformedPluginInstance + } + + return transformed, nil +} + +func expandNetworkSecurityServerTlsPolicyServerCertificateCertificateProviderInstancePluginInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityServerTlsPolicyMtlsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedClientValidationMode, err := expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationMode(original["client_validation_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientValidationMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientValidationMode"] = transformedClientValidationMode + } + + transformedClientValidationTrustConfig, err := expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationTrustConfig(original["client_validation_trust_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientValidationTrustConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientValidationTrustConfig"] = transformedClientValidationTrustConfig + } + + transformedClientValidationCa, err := expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCa(original["client_validation_ca"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientValidationCa); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientValidationCa"] = transformedClientValidationCa + } + + return transformed, nil +} + +func expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationTrustConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCa(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGrpcEndpoint, err := expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpoint(original["grpc_endpoint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGrpcEndpoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["grpcEndpoint"] = transformedGrpcEndpoint + } + + transformedCertificateProviderInstance, err := expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstance(original["certificate_provider_instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertificateProviderInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["certificateProviderInstance"] = transformedCertificateProviderInstance + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTargetUri, err := expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointTargetUri(original["target_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetUri"] = transformedTargetUri + } + + return transformed, nil +} + +func expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointTargetUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPluginInstance, err := expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstancePluginInstance(original["plugin_instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPluginInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pluginInstance"] = transformedPluginInstance + } + + return transformed, nil +} + +func expandNetworkSecurityServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstancePluginInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityServerTlsPolicyEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_server_tls_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_server_tls_policy_sweeper.go new file mode 100644 index 00000000000..7f011c0c063 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_server_tls_policy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkSecurityServerTlsPolicy", testSweepNetworkSecurityServerTlsPolicy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNetworkSecurityServerTlsPolicy(region string) error { + resourceName := "NetworkSecurityServerTlsPolicy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://networksecurity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/serverTlsPolicies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["serverTlsPolicies"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networksecurity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_gateway.go index e23a6380fe6..3d25e8aaa7a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_gateway.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_gateway.go @@ -251,6 +251,12 @@ The default value is 'global'.`, For example: 'projects/*/global/networks/network-1'. Currently, this field is specific to gateways of type 'SECURE_WEB_GATEWAY'.`, }, + "routing_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NEXT_HOP_ROUTING_MODE", ""}), + Description: `The routing mode of the Gateway. This field is configurable only for gateways of type SECURE_WEB_GATEWAY. This field is required for gateways of type SECURE_WEB_GATEWAY. Possible values: ["NEXT_HOP_ROUTING_MODE"]`, + }, "scope": { Type: schema.TypeString, Optional: true, @@ -305,9 +311,9 @@ Currently, this field is specific to gateways of type 'SECURE_WEB_GATEWAY.`, "delete_swg_autogen_router_on_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `When deleting a gateway of type 'SECURE_WEB_GATEWAY', this boolean option will also delete auto generated router by the gateway creation. If there is no other gateway of type 'SECURE_WEB_GATEWAY' remaining for that region and network it will be deleted.`, + Default: false, }, "project": { Type: schema.TypeString, @@ -388,6 +394,12 @@ func resourceNetworkServicesGatewayCreate(d *schema.ResourceData, meta interface } else if v, ok := d.GetOkExists("certificate_urls"); !tpgresource.IsEmptyValue(reflect.ValueOf(certificateUrlsProp)) && (ok || !reflect.DeepEqual(v, certificateUrlsProp)) { obj["certificateUrls"] = certificateUrlsProp } + routingModeProp, err := expandNetworkServicesGatewayRoutingMode(d.Get("routing_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("routing_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(routingModeProp)) && (ok || !reflect.DeepEqual(v, routingModeProp)) { + obj["routingMode"] = routingModeProp + } labelsProp, err := expandNetworkServicesGatewayEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -541,6 +553,9 @@ func resourceNetworkServicesGatewayRead(d *schema.ResourceData, meta interface{} if err := d.Set("certificate_urls", flattenNetworkServicesGatewayCertificateUrls(res["certificateUrls"], d, config)); err != nil { return fmt.Errorf("Error reading Gateway: %s", err) } + if err := d.Set("routing_mode", flattenNetworkServicesGatewayRoutingMode(res["routingMode"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } if err := d.Set("terraform_labels", flattenNetworkServicesGatewayTerraformLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Gateway: %s", err) } @@ -591,6 +606,12 @@ func resourceNetworkServicesGatewayUpdate(d *schema.ResourceData, meta interface } else if v, ok := d.GetOkExists("certificate_urls"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificateUrlsProp)) { obj["certificateUrls"] = certificateUrlsProp } + routingModeProp, err := expandNetworkServicesGatewayRoutingMode(d.Get("routing_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("routing_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, routingModeProp)) { + obj["routingMode"] = routingModeProp + } labelsProp, err := expandNetworkServicesGatewayEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -623,6 +644,10 @@ func resourceNetworkServicesGatewayUpdate(d *schema.ResourceData, meta interface updateMask = append(updateMask, "certificateUrls") } + if d.HasChange("routing_mode") { + updateMask = append(updateMask, "routingMode") + } + if d.HasChange("effective_labels") { updateMask = append(updateMask, "labels") } @@ -635,6 +660,7 @@ func resourceNetworkServicesGatewayUpdate(d *schema.ResourceData, meta interface if d.Get("type") == "SECURE_WEB_GATEWAY" { obj["name"] = d.Get("name") obj["type"] = d.Get("type") + obj["routingMode"] = d.Get("routingMode") } // err == nil indicates that the billing_project value was found @@ -836,6 +862,10 @@ func flattenNetworkServicesGatewayCertificateUrls(v interface{}, d *schema.Resou return v } +func flattenNetworkServicesGatewayRoutingMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenNetworkServicesGatewayTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -895,6 +925,10 @@ func expandNetworkServicesGatewayCertificateUrls(v interface{}, d tpgresource.Te return v, nil } +func expandNetworkServicesGatewayRoutingMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandNetworkServicesGatewayEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_instance.go index b373a557215..2f7884fd02a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_instance.go @@ -123,7 +123,6 @@ func modifyNotebooksInstanceState(config *transport_tpg.Config, d *schema.Resour } return res, nil } - func waitForNotebooksOperation(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string, response map[string]interface{}) error { var opRes map[string]interface{} err := NotebooksOperationWaitTimeWithResponse( @@ -566,8 +565,8 @@ the population of this value.`, "desired_state": { Type: schema.TypeString, Optional: true, - Default: "ACTIVE", Description: `Desired state of the Notebook Instance. Set this field to 'ACTIVE' to start the Instance, and 'STOPPED' to stop the Instance.`, + Default: "ACTIVE", }, "project": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_autonomous_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_autonomous_database.go new file mode 100644 index 00000000000..399bb07c2b1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_autonomous_database.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package oracledatabase + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceOracleDatabaseAutonomousDatabase() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceOracleDatabaseAutonomousDatabase().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "location", "autonomous_database_id") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + return &schema.Resource{ + Read: dataSourceOracleDatabaseAutonomousDatabaseRead, + Schema: dsSchema, + } + +} + +func dataSourceOracleDatabaseAutonomousDatabaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/autonomousDatabases/{{autonomous_database_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + d.SetId(id) + + err = resourceOracleDatabaseAutonomousDatabaseRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_autonomous_databases.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_autonomous_databases.go new file mode 100644 index 00000000000..eadf26bac89 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_autonomous_databases.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package oracledatabase + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceOracleDatabaseAutonomousDatabases() *schema.Resource { + dsSchema := map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Description: "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + }, + "location": { + Type: schema.TypeString, + Required: true, + Description: "location", + }, + "autonomous_databases": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: tpgresource.DatasourceSchemaFromResourceSchema(ResourceOracleDatabaseAutonomousDatabase().Schema), + }, + }, + } + return &schema.Resource{ + Read: dataSourceOracleDatabaseAutonomousDatabasesRead, + Schema: dsSchema, + } + +} + +func dataSourceOracleDatabaseAutonomousDatabasesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/autonomousDatabases") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + billingProject := "" + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for autonomousDatabases: %s", err) + } + billingProject = project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + + if err != nil { + return fmt.Errorf("Error reading autonomousDatabases: %s", err) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting autonomousDatabases project: %s", err) + } + + if err := d.Set("autonomous_databases", flattenOracleDatabaseautonomousDatabases(res["autonomousDatabases"], d, config)); err != nil { + return fmt.Errorf("Error setting autonomousDatabases: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/autonomousDatabases") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return nil +} + +func flattenOracleDatabaseautonomousDatabases(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) []map[string]interface{} { + if v == nil { + return nil + } + l := v.([]interface{}) + transformed := make([]map[string]interface{}, 0) + for _, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "name": flattenOracleDatabaseAutonomousDatabaseName(original["name"], d, config), + "database": flattenOracleDatabaseAutonomousDatabaseDatabase(original["database"], d, config), + "display_name": flattenOracleDatabaseAutonomousDatabaseDisplayName(original["displayName"], d, config), + "entitlement_id": flattenOracleDatabaseAutonomousDatabaseEntitlementId(original["entitlementId"], d, config), + "properties": flattenOracleDatabaseAutonomousDatabaseProperties(original["properties"], d, config), + "labels": flattenOracleDatabaseAutonomousDatabaseLabels(original["labels"], d, config), + "network": flattenOracleDatabaseAutonomousDatabaseNetwork(original["network"], d, config), + "cidr": flattenOracleDatabaseAutonomousDatabaseCidr(original["cidr"], d, config), + "create_time": flattenOracleDatabaseAutonomousDatabaseCreateTime(original["createTime"], d, config), + "terraform_labels": flattenOracleDatabaseAutonomousDatabaseTerraformLabels(original["labels"], d, config), + "effective_labels": flattenOracleDatabaseAutonomousDatabaseEffectiveLabels(original["labels"], d, config), + }) + } + return transformed +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_exadata_infrastructure.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_exadata_infrastructure.go new file mode 100644 index 00000000000..314437e0132 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_exadata_infrastructure.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package oracledatabase + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceOracleDatabaseCloudExadataInfrastructure() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceOracleDatabaseCloudExadataInfrastructure().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "location", "cloud_exadata_infrastructure_id") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + return &schema.Resource{ + Read: dataSourceOracleDatabaseCloudExadataInfrastructureRead, + Schema: dsSchema, + } + +} + +func dataSourceOracleDatabaseCloudExadataInfrastructureRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures/{{cloud_exadata_infrastructure_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + err = resourceOracleDatabaseCloudExadataInfrastructureRead(d, meta) + if err != nil { + return err + } + d.SetId(id) + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_exadata_infrastructures.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_exadata_infrastructures.go new file mode 100644 index 00000000000..4c6739823de --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_exadata_infrastructures.go @@ -0,0 +1,111 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package oracledatabase + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceOracleDatabaseCloudExadataInfrastructures() *schema.Resource { + dsSchema := map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Description: "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + }, + "location": { + Type: schema.TypeString, + Required: true, + Description: "location", + }, + "cloud_exadata_infrastructures": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: tpgresource.DatasourceSchemaFromResourceSchema(ResourceOracleDatabaseCloudExadataInfrastructure().Schema), + }, + }, + } + return &schema.Resource{ + Read: dataSourceOracleDatabaseCloudExadataInfrastructuresRead, + Schema: dsSchema, + } + +} + +func dataSourceOracleDatabaseCloudExadataInfrastructuresRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + billingProject := "" + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for cloudExadataInfrastructures: %s", err) + } + billingProject = project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + + if err != nil { + return fmt.Errorf("Error reading cloudExadataInfrastructures: %s", err) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting cloudExadataInfrastructures project: %s", err) + } + + if err := d.Set("cloud_exadata_infrastructures", flattenOracleDatabaseCloudExadataInfrastructures(res["cloudExadataInfrastructures"], d, config)); err != nil { + return fmt.Errorf("Error setting cloudExadataInfrastructures: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return nil +} + +func flattenOracleDatabaseCloudExadataInfrastructures(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) []map[string]interface{} { + if v == nil { + return nil + } + l := v.([]interface{}) + transformed := make([]map[string]interface{}, 0) + for _, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "name": flattenOracleDatabaseCloudExadataInfrastructureName(original["name"], d, config), + "display_name": flattenOracleDatabaseCloudExadataInfrastructureDisplayName(original["displayName"], d, config), + "gcp_oracle_zone": flattenOracleDatabaseCloudExadataInfrastructureGcpOracleZone(original["gcpOracleZone"], d, config), + "entitlement_id": flattenOracleDatabaseCloudExadataInfrastructureEntitlementId(original["entitlementId"], d, config), + "properties": flattenOracleDatabaseCloudExadataInfrastructureProperties(original["properties"], d, config), + "labels": flattenOracleDatabaseCloudExadataInfrastructureLabels(original["labels"], d, config), + "create_time": flattenOracleDatabaseCloudExadataInfrastructureCreateTime(original["createTime"], d, config), + "terraform_labels": flattenOracleDatabaseCloudExadataInfrastructureTerraformLabels(original["labels"], d, config), + "effective_labels": flattenOracleDatabaseCloudExadataInfrastructureEffectiveLabels(original["labels"], d, config), + }) + } + return transformed +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_vm_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_vm_cluster.go new file mode 100644 index 00000000000..89bc08d9f8c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_vm_cluster.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package oracledatabase + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceOracleDatabaseCloudVmCluster() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceOracleDatabaseCloudVmCluster().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "location", "cloud_vm_cluster_id") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + return &schema.Resource{ + Read: dataSourceOracleDatabaseCloudVmClusterRead, + Schema: dsSchema, + } + +} + +func dataSourceOracleDatabaseCloudVmClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudVmClusters/{{cloud_vm_cluster_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + err = resourceOracleDatabaseCloudVmClusterRead(d, meta) + if err != nil { + return err + } + d.SetId(id) + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_vm_clusters.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_vm_clusters.go new file mode 100644 index 00000000000..60c2b899d5f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_cloud_vm_clusters.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package oracledatabase + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceOracleDatabaseCloudVmClusters() *schema.Resource { + dsSchema := map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Description: "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + }, + "location": { + Type: schema.TypeString, + Required: true, + Description: "location", + }, + "cloud_vm_clusters": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: tpgresource.DatasourceSchemaFromResourceSchema(ResourceOracleDatabaseCloudVmCluster().Schema), + }, + }, + } + return &schema.Resource{ + Read: dataSourceOracleDatabaseCloudVmClustersRead, + Schema: dsSchema, + } + +} + +func dataSourceOracleDatabaseCloudVmClustersRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/cloudVmClusters") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudVmClusters") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + billingProject := "" + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for cloudVmClusters: %s", err) + } + billingProject = project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + + if err != nil { + return fmt.Errorf("Error reading cloudVmClusters: %s", err) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting cloudVmClusters project: %s", err) + } + + if err := d.Set("cloud_vm_clusters", flattenOracleDatabaseCloudVmClusters(res["cloudVmClusters"], d, config)); err != nil { + return fmt.Errorf("Error setting cloudVmClusters: %s", err) + } + + return nil +} + +func flattenOracleDatabaseCloudVmClusters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) []map[string]interface{} { + if v == nil { + return nil + } + l := v.([]interface{}) + transformed := make([]map[string]interface{}, 0) + for _, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "name": flattenOracleDatabaseCloudVmClusterName(original["name"], d, config), + "exadata_infrastructure": flattenOracleDatabaseCloudVmClusterExadataInfrastructure(original["exadataInfrastructure"], d, config), + "display_name": flattenOracleDatabaseCloudVmClusterDisplayName(original["displayName"], d, config), + "gcp_oracle_zone": flattenOracleDatabaseCloudVmClusterGcpOracleZone(original["gcpOracleZone"], d, config), + "properties": flattenOracleDatabaseCloudVmClusterProperties(original["properties"], d, config), + "labels": flattenOracleDatabaseCloudVmClusterLabels(original["labels"], d, config), + "create_time": flattenOracleDatabaseCloudVmClusterCreateTime(original["createTime"], d, config), + "cidr": flattenOracleDatabaseCloudVmClusterCidr(original["cidr"], d, config), + "backup_subnet_cidr": flattenOracleDatabaseCloudVmClusterBackupSubnetCidr(original["backupSubnetCidr"], d, config), + "network": flattenOracleDatabaseCloudVmClusterNetwork(original["network"], d, config), + "terraform_labels": flattenOracleDatabaseCloudVmClusterTerraformLabels(original["labels"], d, config), + "effective_labels": flattenOracleDatabaseCloudVmClusterEffectiveLabels(original["labels"], d, config), + }) + } + return transformed +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_db_nodes.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_db_nodes.go new file mode 100644 index 00000000000..53e05d073cb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_db_nodes.go @@ -0,0 +1,216 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package oracledatabase + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceOracleDatabaseDbNodes() *schema.Resource { + dsSchema := map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Description: "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + }, + "location": { + Type: schema.TypeString, + Required: true, + Description: "location", + }, + "cloud_vm_cluster": { + Type: schema.TypeString, + Required: true, + Description: "vmcluster", + }, + "db_nodes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The dbnode name", + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ocid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only", + }, + "ocpu_count": { + Type: schema.TypeInt, + Computed: true, + Description: "Output only", + }, + "memory_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: "Output only", + }, + "db_node_storage_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: "Output only", + }, + "db_server_ocid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only", + }, + "hostname": { + Type: schema.TypeString, + Computed: true, + Description: "Output only", + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only", + }, + "total_cpu_core_count": { + Type: schema.TypeInt, + Computed: true, + Description: "Output only", + }, + }, + }, + }, + }, + }, + }, + } + return &schema.Resource{ + Read: DataSourceOracleDatabaseDbNodesRead, + Schema: dsSchema, + } +} + +func DataSourceOracleDatabaseDbNodesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/cloudVmClusters/{{cloud_vm_cluster}}/dbNodes") + if err != nil { + return err + } + billingProject := "" + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DbNode: %s", err) + } + billingProject = project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + + if err != nil { + return fmt.Errorf("Error reading DbNode: %s", err) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DbNode: %s", err) + } + if err := d.Set("db_nodes", flattenOracleDatabaseDbNodes(res["dbNodes"], d, config)); err != nil { + return fmt.Errorf("Error reading DbNode: %s", err) + } + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudVmClusters/{{cloud_vm_cluster}}/dbNodes") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return nil +} + +func flattenOracleDatabaseDbNodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) []map[string]interface{} { + if v == nil { + return nil + } + l := v.([]interface{}) + transformed := make([]map[string]interface{}, 0) + for _, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "name": flattenOracleDatabaseDbNodeName(original["name"], d, config), + "properties": flattenOracleDatabaseDbNodeProperties(original["properties"], d, config), + }) + } + + return transformed +} + +func flattenOracleDatabaseDbNodeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbNodeProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ocid"] = flattenOracleDatabaseDbNodePropertiesOcid(original["ocid"], d, config) + transformed["ocpu_count"] = flattenOracleDatabaseDbNodePropertiesOcpuCount(original["ocpuCount"], d, config) + transformed["memory_size_gb"] = flattenOracleDatabaseDbNodePropertiesMemorySizeGb(original["memorySizeGb"], d, config) + transformed["db_node_storage_size_gb"] = flattenOracleDatabaseDbNodePropertiesDbNodeStorageSizeGb(original["dbNodeStorageSizeGb"], d, config) + transformed["db_server_ocid"] = flattenOracleDatabaseDbNodePropertiesDbServerOcid(original["dbServerOcid"], d, config) + transformed["hostname"] = flattenOracleDatabaseDbNodePropertiesHostname(original["hostname"], d, config) + transformed["state"] = flattenOracleDatabaseDbNodePropertiesState(original["state"], d, config) + transformed["total_cpu_core_count"] = flattenOracleDatabaseDbNodePropertiesTotalCpuCoreCount(original["totalCpuCoreCount"], d, config) + + return []interface{}{transformed} +} + +func flattenOracleDatabaseDbNodePropertiesOcid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbNodePropertiesOcpuCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbNodePropertiesMemorySizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbNodePropertiesDbNodeStorageSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbNodePropertiesDbServerOcid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbNodePropertiesHostname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbNodePropertiesState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbNodePropertiesTotalCpuCoreCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_db_servers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_db_servers.go new file mode 100644 index 00000000000..2c60fefcf23 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/data_source_oracle_database_db_servers.go @@ -0,0 +1,249 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package oracledatabase + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceOracleDatabaseDbServers() *schema.Resource { + dsSchema := map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Description: "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + }, + "location": { + Type: schema.TypeString, + Required: true, + Description: "location", + }, + "cloud_exadata_infrastructure": { + Type: schema.TypeString, + Required: true, + Description: "exadata", + }, + "db_servers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Computed: true, + Description: "The Display name", + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ocid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only", + }, + "ocpu_count": { + Type: schema.TypeInt, + Computed: true, + Description: "Output only", + }, + "max_ocpu_count": { + Type: schema.TypeInt, + Computed: true, + Description: "Output only", + }, + "memory_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: "Output only", + }, + "max_memory_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: "Output only", + }, + "db_node_storage_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: "Output only", + }, + "max_db_node_storage_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: "Output only", + }, + "vm_count": { + Type: schema.TypeInt, + Computed: true, + Description: "Output only", + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only", + }, + "db_node_ids": { + Type: schema.TypeList, + Computed: true, + Description: "Output only", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + } + return &schema.Resource{ + Read: DataSourceOracleDatabaseDbServersRead, + Schema: dsSchema, + UseJSONNumber: true, + } +} + +func DataSourceOracleDatabaseDbServersRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DbServer: %s", err) + } + billingProject = project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures/{{cloud_exadata_infrastructure}}/dbServers") + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error reading DbServer: %s", err) + } + if res["dbServers"] == nil { + return fmt.Errorf("Error reading DbServer: %s", err) + } + dbServers, err := flattenOracleDatabaseDbServerList(config, res["dbServers"]) + if err != nil { + return fmt.Errorf("error flattening dbserver list: %s", err) + } + + if err := d.Set("db_servers", dbServers); err != nil { + return fmt.Errorf("error setting dbserver: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures/{{cloud_exadata_infrastructure_id}}/dbServers") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return nil +} + +func flattenOracleDatabaseDbServerList(config *transport_tpg.Config, dbServerList interface{}) (interface{}, error) { + + if dbServerList == nil { + return nil, nil + } + + l := dbServerList.([]interface{}) + transformed := make([]interface{}, 0) + for _, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "display_name": flattenOracleDatabaseDbServerDisplayName(original["displayName"], config), + "properties": flattenOracleDatabaseDbServerProperties(original["properties"], config), + }) + } + return transformed, nil + +} + +func flattenOracleDatabaseDbServerDisplayName(v interface{}, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbServerProperties(v interface{}, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ocid"] = flattenOracleDatabaseDbServerPropertiesOcid(original["ocid"], config) + transformed["ocpu_count"] = flattenOracleDatabaseDbServerPropertiesOcpuCount(original["ocpuCount"], config) + transformed["max_ocpu_count"] = flattenOracleDatabaseDbServerPropertiesMaxOcpuCount(original["maxOcpuCount"], config) + transformed["memory_size_gb"] = flattenOracleDatabaseDbServerPropertiesMemorySizeGb(original["memorySizeGb"], config) + transformed["max_memory_size_gb"] = flattenOracleDatabaseDbServerPropertiesMaxMemorySizeGb(original["maxMemorySizeGb"], config) + transformed["db_node_storage_size_gb"] = flattenOracleDatabaseDbServerPropertiesDbNodeStorageSizeGb(original["dbNodeStorageSizeGb"], config) + transformed["max_db_node_storage_size_gb"] = flattenOracleDatabaseDbServerPropertiesMaxDbNodeStorageSizeGb(original["maxDbNodeStorageSizeGb"], config) + transformed["vm_count"] = flattenOracleDatabaseDbServerPropertiesVmcount(original["vmCount"], config) + transformed["state"] = flattenOracleDatabaseDbServerPropertiesState(original["state"], config) + transformed["db_node_ids"] = flattenOracleDatabaseDbServerPropertiesDbNodeIds(original["dbNodeIds"], config) + + return []interface{}{transformed} +} + +func flattenOracleDatabaseDbServerPropertiesOcid(v interface{}, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbServerPropertiesOcpuCount(v interface{}, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbServerPropertiesMaxOcpuCount(v interface{}, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbServerPropertiesMemorySizeGb(v interface{}, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbServerPropertiesMaxMemorySizeGb(v interface{}, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbServerPropertiesDbNodeStorageSizeGb(v interface{}, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbServerPropertiesMaxDbNodeStorageSizeGb(v interface{}, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbServerPropertiesVmcount(v interface{}, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbServerPropertiesState(v interface{}, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseDbServerPropertiesDbNodeIds(v interface{}, config *transport_tpg.Config) interface{} { + return v +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/oracle_database_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/oracle_database_operation.go new file mode 100644 index 00000000000..d1131797f71 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/oracle_database_operation.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package oracledatabase + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type OracleDatabaseOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *OracleDatabaseOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.OracleDatabaseBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createOracleDatabaseWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*OracleDatabaseOperationWaiter, error) { + w := &OracleDatabaseOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func OracleDatabaseOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createOracleDatabaseWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + rawResponse := []byte(w.CommonOperationWaiter.Op.Response) + if len(rawResponse) == 0 { + return errors.New("`resource` not set in operation response") + } + return json.Unmarshal(rawResponse, response) +} + +func OracleDatabaseOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createOracleDatabaseWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_autonomous_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_autonomous_database.go new file mode 100644 index 00000000000..824b9409e9f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_autonomous_database.go @@ -0,0 +1,3381 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package oracledatabase + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceOracleDatabaseAutonomousDatabase() *schema.Resource { + return &schema.Resource{ + Create: resourceOracleDatabaseAutonomousDatabaseCreate, + Read: resourceOracleDatabaseAutonomousDatabaseRead, + Update: resourceOracleDatabaseAutonomousDatabaseUpdate, + Delete: resourceOracleDatabaseAutonomousDatabaseDelete, + + Importer: &schema.ResourceImporter{ + State: resourceOracleDatabaseAutonomousDatabaseImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(240 * time.Minute), + Update: schema.DefaultTimeout(120 * time.Minute), + Delete: schema.DefaultTimeout(120 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "autonomous_database_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the Autonomous Database to create. This value is restricted +to (^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$) and must be a maximum of 63 +characters in length. The value must start with a letter and end with +a letter or a number.`, + }, + "cidr": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The subnet CIDR range for the Autonmous Database.`, + }, + "database": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the Autonomous Database. The database name must be unique in +the project. The name must begin with a letter and can +contain a maximum of 30 alphanumeric characters.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Resource ID segment making up resource 'name'. See documentation for resource type 'oracledatabase.googleapis.com/AutonomousDatabaseBackup'.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the VPC network used by the Autonomous Database. +Format: projects/{project}/global/networks/{network}`, + }, + "properties": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The properties of an Autonomous Database.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "db_workload": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Possible values: + DB_WORKLOAD_UNSPECIFIED +OLTP +DW +AJD +APEX`, + }, + "license_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The license type used for the Autonomous Database. + Possible values: + LICENSE_TYPE_UNSPECIFIED +LICENSE_INCLUDED +BRING_YOUR_OWN_LICENSE`, + }, + "backup_retention_period_days": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The retention period for the Autonomous Database. This field is specified +in days, can range from 1 day to 60 days, and has a default value of +60 days.`, + }, + "character_set": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The character set for the Autonomous Database. The default is AL32UTF8.`, + Default: "AL32UTF8", + }, + "compute_count": { + Type: schema.TypeFloat, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The number of compute servers for the Autonomous Database.`, + }, + "customer_contacts": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of customer contacts.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The email address used by Oracle to send notifications regarding databases +and infrastructure.`, + }, + }, + }, + }, + "data_storage_size_gb": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The size of the data stored in the database, in gigabytes.`, + }, + "data_storage_size_tb": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The size of the data stored in the database, in terabytes.`, + }, + "db_edition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The edition of the Autonomous Databases. + Possible values: + DATABASE_EDITION_UNSPECIFIED +STANDARD_EDITION +ENTERPRISE_EDITION`, + }, + "db_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Oracle Database version for the Autonomous Database.`, + }, + "is_auto_scaling_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `This field indicates if auto scaling is enabled for the Autonomous Database +CPU core count.`, + }, + "is_storage_auto_scaling_enabled": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: `This field indicates if auto scaling is enabled for the Autonomous Database +storage.`, + }, + "maintenance_schedule_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The maintenance schedule of the Autonomous Database. + Possible values: + MAINTENANCE_SCHEDULE_TYPE_UNSPECIFIED +EARLY +REGULAR`, + }, + "mtls_connection_required": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `This field specifies if the Autonomous Database requires mTLS connections.`, + }, + "n_character_set": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The national character set for the Autonomous Database. The default is +AL16UTF16.`, + Default: "AL16UTF16", + }, + "operations_insights_state": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Possible values: + OPERATIONS_INSIGHTS_STATE_UNSPECIFIED +ENABLING +ENABLED +DISABLING +NOT_ENABLED +FAILED_ENABLING +FAILED_DISABLING`, + }, + "private_endpoint_ip": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The private endpoint IP address for the Autonomous Database.`, + }, + "private_endpoint_label": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The private endpoint label for the Autonomous Database.`, + }, + "actual_used_data_storage_size_tb": { + Type: schema.TypeFloat, + Computed: true, + Description: `The amount of storage currently being used for user and system data, in +terabytes.`, + }, + "allocated_storage_size_tb": { + Type: schema.TypeFloat, + Computed: true, + Description: `The amount of storage currently allocated for the database tables and +billed for, rounded up in terabytes.`, + }, + "apex_details": { + Type: schema.TypeList, + Computed: true, + Description: `Oracle APEX Application Development. +https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseApex`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "apex_version": { + Type: schema.TypeString, + Computed: true, + Description: `The Oracle APEX Application Development version.`, + }, + "ords_version": { + Type: schema.TypeString, + Computed: true, + Description: `The Oracle REST Data Services (ORDS) version.`, + }, + }, + }, + }, + "are_primary_allowlisted_ips_used": { + Type: schema.TypeBool, + Computed: true, + Description: `This field indicates the status of Data Guard and Access control for the +Autonomous Database. The field's value is null if Data Guard is disabled +or Access Control is disabled. The field's value is TRUE if both Data Guard +and Access Control are enabled, and the Autonomous Database is using +primary IP access control list (ACL) for standby. The field's value is +FALSE if both Data Guard and Access Control are enabled, and the Autonomous +Database is using a different IP access control list (ACL) for standby +compared to primary.`, + }, + "autonomous_container_database_id": { + Type: schema.TypeString, + Computed: true, + Description: `The Autonomous Container Database OCID.`, + }, + "available_upgrade_versions": { + Type: schema.TypeList, + Computed: true, + Description: `The list of available Oracle Database upgrade versions for an Autonomous +Database.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "connection_strings": { + Type: schema.TypeList, + Computed: true, + Description: `The connection string used to connect to the Autonomous Database. +https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionStrings`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "all_connection_strings": { + Type: schema.TypeList, + Computed: true, + Description: `A list of all connection strings that can be used to connect to the +Autonomous Database.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "high": { + Type: schema.TypeString, + Computed: true, + Description: `The database service provides the highest level of resources to each SQL +statement.`, + }, + "low": { + Type: schema.TypeString, + Computed: true, + Description: `The database service provides the least level of resources to each SQL +statement.`, + }, + "medium": { + Type: schema.TypeString, + Computed: true, + Description: `The database service provides a lower level of resources to each SQL +statement.`, + }, + }, + }, + }, + "dedicated": { + Type: schema.TypeString, + Computed: true, + Description: `The database service provides the least level of resources to each SQL +statement, but supports the most number of concurrent SQL statements.`, + }, + "high": { + Type: schema.TypeString, + Computed: true, + Description: `The database service provides the highest level of resources to each SQL +statement.`, + }, + "low": { + Type: schema.TypeString, + Computed: true, + Description: `The database service provides the least level of resources to each SQL +statement.`, + }, + "medium": { + Type: schema.TypeString, + Computed: true, + Description: `The database service provides a lower level of resources to each SQL +statement.`, + }, + "profiles": { + Type: schema.TypeList, + Computed: true, + Description: `A list of connection string profiles to allow clients to group, filter, and +select values based on the structured metadata.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "consumer_group": { + Type: schema.TypeString, + Computed: true, + Description: `The current consumer group being used by the connection. + Possible values: + CONSUMER_GROUP_UNSPECIFIED +HIGH +MEDIUM +LOW +TP +TPURGENT`, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + Description: `The display name for the database connection.`, + }, + "host_format": { + Type: schema.TypeString, + Computed: true, + Description: `The host name format being currently used in connection string. + Possible values: + HOST_FORMAT_UNSPECIFIED +FQDN +IP`, + }, + "is_regional": { + Type: schema.TypeBool, + Computed: true, + Description: `This field indicates if the connection string is regional and is only +applicable for cross-region Data Guard.`, + }, + "protocol": { + Type: schema.TypeString, + Computed: true, + Description: `The protocol being used by the connection. + Possible values: + PROTOCOL_UNSPECIFIED +TCP +TCPS`, + }, + "session_mode": { + Type: schema.TypeString, + Computed: true, + Description: `The current session mode of the connection. + Possible values: + SESSION_MODE_UNSPECIFIED +DIRECT +INDIRECT`, + }, + "syntax_format": { + Type: schema.TypeString, + Computed: true, + Description: `The syntax of the connection string. + Possible values: + SYNTAX_FORMAT_UNSPECIFIED +LONG +EZCONNECT +EZCONNECTPLUS`, + }, + "tls_authentication": { + Type: schema.TypeString, + Computed: true, + Description: `This field indicates the TLS authentication type of the connection. + Possible values: + TLS_AUTHENTICATION_UNSPECIFIED +SERVER +MUTUAL`, + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: `The value of the connection string.`, + }, + }, + }, + }, + }, + }, + }, + "connection_urls": { + Type: schema.TypeList, + Computed: true, + Description: `The URLs for accessing Oracle Application Express (APEX) and SQL Developer +Web with a browser from a Compute instance. +https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionUrls`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "apex_uri": { + Type: schema.TypeString, + Computed: true, + Description: `Oracle Application Express (APEX) URL.`, + }, + "database_transforms_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the Database Transforms for the Autonomous Database.`, + }, + "graph_studio_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the Graph Studio for the Autonomous Database.`, + }, + "machine_learning_notebook_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the Oracle Machine Learning (OML) Notebook for the Autonomous +Database.`, + }, + "machine_learning_user_management_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of Machine Learning user management the Autonomous Database.`, + }, + "mongo_db_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the MongoDB API for the Autonomous Database.`, + }, + "ords_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The Oracle REST Data Services (ORDS) URL of the Web Access for the +Autonomous Database.`, + }, + "sql_dev_web_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the Oracle SQL Developer Web for the Autonomous Database.`, + }, + }, + }, + }, + "data_safe_state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the Data Safe registration for the +Autonomous Database. + Possible values: + DATA_SAFE_STATE_UNSPECIFIED +REGISTERING +REGISTERED +DEREGISTERING +NOT_REGISTERED +FAILED`, + }, + "database_management_state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of database management for the Autonomous Database. + Possible values: + DATABASE_MANAGEMENT_STATE_UNSPECIFIED +ENABLING +ENABLED +DISABLING +NOT_ENABLED +FAILED_ENABLING +FAILED_DISABLING`, + }, + "failed_data_recovery_duration": { + Type: schema.TypeString, + Computed: true, + Description: `This field indicates the number of seconds of data loss during a Data +Guard failover.`, + }, + "is_local_data_guard_enabled": { + Type: schema.TypeBool, + Computed: true, + Description: `This field indicates whether the Autonomous Database has local (in-region) +Data Guard enabled.`, + }, + "lifecycle_details": { + Type: schema.TypeString, + Computed: true, + Description: `The details of the current lifestyle state of the Autonomous Database.`, + }, + "local_adg_auto_failover_max_data_loss_limit": { + Type: schema.TypeInt, + Computed: true, + Description: `This field indicates the maximum data loss limit for an Autonomous +Database, in seconds.`, + }, + "local_disaster_recovery_type": { + Type: schema.TypeString, + Computed: true, + Description: `This field indicates the local disaster recovery (DR) type of an +Autonomous Database. + Possible values: + LOCAL_DISASTER_RECOVERY_TYPE_UNSPECIFIED +ADG +BACKUP_BASED`, + }, + "local_standby_db": { + Type: schema.TypeList, + Computed: true, + Description: `Autonomous Data Guard standby database details. +https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseStandbySummary`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_guard_role_changed_time": { + Type: schema.TypeString, + Computed: true, + Description: `The date and time the Autonomous Data Guard role was switched for the +standby Autonomous Database.`, + }, + "disaster_recovery_role_changed_time": { + Type: schema.TypeString, + Computed: true, + Description: `The date and time the Disaster Recovery role was switched for the standby +Autonomous Database.`, + }, + "lag_time_duration": { + Type: schema.TypeString, + Computed: true, + Description: `The amount of time, in seconds, that the data of the standby database lags +in comparison to the data of the primary database.`, + }, + "lifecycle_details": { + Type: schema.TypeString, + Computed: true, + Description: `The additional details about the current lifecycle state of the +Autonomous Database.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Possible values: + STATE_UNSPECIFIED +PROVISIONING +AVAILABLE +STOPPING +STOPPED +STARTING +TERMINATING +TERMINATED +UNAVAILABLE +RESTORE_IN_PROGRESS +RESTORE_FAILED +BACKUP_IN_PROGRESS +SCALE_IN_PROGRESS +AVAILABLE_NEEDS_ATTENTION +UPDATING +MAINTENANCE_IN_PROGRESS +RESTARTING +RECREATING +ROLE_CHANGE_IN_PROGRESS +UPGRADING +INACCESSIBLE +STANDBY`, + }, + }, + }, + }, + "maintenance_begin_time": { + Type: schema.TypeString, + Computed: true, + Description: `The date and time when maintenance will begin.`, + }, + "maintenance_end_time": { + Type: schema.TypeString, + Computed: true, + Description: `The date and time when maintenance will end.`, + }, + "memory_per_oracle_compute_unit_gbs": { + Type: schema.TypeInt, + Computed: true, + Description: `The amount of memory enabled per ECPU, in gigabytes.`, + }, + "memory_table_gbs": { + Type: schema.TypeInt, + Computed: true, + Description: `The memory assigned to in-memory tables in an Autonomous Database.`, + }, + "next_long_term_backup_time": { + Type: schema.TypeString, + Computed: true, + Description: `The long term backup schedule of the Autonomous Database.`, + }, + "oci_url": { + Type: schema.TypeString, + Computed: true, + Description: `The Oracle Cloud Infrastructure link for the Autonomous Database.`, + }, + "ocid": { + Type: schema.TypeString, + Computed: true, + Description: `OCID of the Autonomous Database. +https://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle`, + }, + "open_mode": { + Type: schema.TypeString, + Computed: true, + Description: `This field indicates the current mode of the Autonomous Database. + Possible values: + OPEN_MODE_UNSPECIFIED +READ_ONLY +READ_WRITE`, + }, + "peer_db_ids": { + Type: schema.TypeList, + Computed: true, + Description: `The list of OCIDs of standby databases located in Autonomous Data Guard +remote regions that are associated with the source database.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "permission_level": { + Type: schema.TypeString, + Computed: true, + Description: `The permission level of the Autonomous Database. + Possible values: + PERMISSION_LEVEL_UNSPECIFIED +RESTRICTED +UNRESTRICTED`, + }, + "private_endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `The private endpoint for the Autonomous Database.`, + }, + "refreshable_mode": { + Type: schema.TypeString, + Computed: true, + Description: `The refresh mode of the cloned Autonomous Database. + Possible values: + REFRESHABLE_MODE_UNSPECIFIED +AUTOMATIC +MANUAL`, + }, + "refreshable_state": { + Type: schema.TypeString, + Computed: true, + Description: `The refresh State of the clone. + Possible values: + REFRESHABLE_STATE_UNSPECIFIED +REFRESHING +NOT_REFRESHING`, + }, + "role": { + Type: schema.TypeString, + Computed: true, + Description: `The Data Guard role of the Autonomous Database. + Possible values: + ROLE_UNSPECIFIED +PRIMARY +STANDBY +DISABLED_STANDBY +BACKUP_COPY +SNAPSHOT_STANDBY`, + }, + "scheduled_operation_details": { + Type: schema.TypeList, + Computed: true, + Description: `The list and details of the scheduled operations of the Autonomous +Database.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day_of_week": { + Type: schema.TypeString, + Computed: true, + Description: `Possible values: + DAY_OF_WEEK_UNSPECIFIED +MONDAY +TUESDAY +WEDNESDAY +THURSDAY +FRIDAY +SATURDAY +SUNDAY`, + }, + "start_time": { + Type: schema.TypeList, + Computed: true, + Description: `Represents a time of day. The date and time zone are either not significant +or are specified elsewhere. An API may choose to allow leap seconds. Related +types are google.type.Date and 'google.protobuf.Timestamp'.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Computed: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose +to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Computed: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Computed: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Computed: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may +allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "stop_time": { + Type: schema.TypeList, + Computed: true, + Description: `Represents a time of day. The date and time zone are either not significant +or are specified elsewhere. An API may choose to allow leap seconds. Related +types are google.type.Date and 'google.protobuf.Timestamp'.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Computed: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose +to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Computed: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Computed: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Computed: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may +allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + }, + }, + }, + "sql_web_developer_url": { + Type: schema.TypeString, + Computed: true, + Description: `The SQL Web Developer URL for the Autonomous Database.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Possible values: + STATE_UNSPECIFIED +PROVISIONING +AVAILABLE +STOPPING +STOPPED +STARTING +TERMINATING +TERMINATED +UNAVAILABLE +RESTORE_IN_PROGRESS +RESTORE_FAILED +BACKUP_IN_PROGRESS +SCALE_IN_PROGRESS +AVAILABLE_NEEDS_ATTENTION +UPDATING +MAINTENANCE_IN_PROGRESS +RESTARTING +RECREATING +ROLE_CHANGE_IN_PROGRESS +UPGRADING +INACCESSIBLE +STANDBY`, + }, + "supported_clone_regions": { + Type: schema.TypeList, + Computed: true, + Description: `The list of available regions that can be used to create a clone for the +Autonomous Database.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "total_auto_backup_storage_size_gbs": { + Type: schema.TypeFloat, + Computed: true, + Description: `The storage space used by automatic backups of Autonomous Database, in +gigabytes.`, + }, + "used_data_storage_size_tbs": { + Type: schema.TypeInt, + Computed: true, + Description: `The storage space used by Autonomous Database, in gigabytes.`, + }, + }, + }, + }, + "admin_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The password for the default ADMIN user.`, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The display name for the Autonomous Database. The name does not have to +be unique within your project.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels or tags associated with the Autonomous Database. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The date and time that the Autonomous Database was created.`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "entitlement_id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID of the subscription entitlement associated with the Autonomous +Database.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Identifier. The name of the Autonomous Database resource in the following format: +projects/{project}/locations/{region}/autonomousDatabases/{autonomous_database}`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceOracleDatabaseAutonomousDatabaseCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + databaseProp, err := expandOracleDatabaseAutonomousDatabaseDatabase(d.Get("database"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("database"); !tpgresource.IsEmptyValue(reflect.ValueOf(databaseProp)) && (ok || !reflect.DeepEqual(v, databaseProp)) { + obj["database"] = databaseProp + } + displayNameProp, err := expandOracleDatabaseAutonomousDatabaseDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + adminPasswordProp, err := expandOracleDatabaseAutonomousDatabaseAdminPassword(d.Get("admin_password"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("admin_password"); !tpgresource.IsEmptyValue(reflect.ValueOf(adminPasswordProp)) && (ok || !reflect.DeepEqual(v, adminPasswordProp)) { + obj["adminPassword"] = adminPasswordProp + } + propertiesProp, err := expandOracleDatabaseAutonomousDatabaseProperties(d.Get("properties"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("properties"); !tpgresource.IsEmptyValue(reflect.ValueOf(propertiesProp)) && (ok || !reflect.DeepEqual(v, propertiesProp)) { + obj["properties"] = propertiesProp + } + networkProp, err := expandOracleDatabaseAutonomousDatabaseNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + cidrProp, err := expandOracleDatabaseAutonomousDatabaseCidr(d.Get("cidr"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cidr"); !tpgresource.IsEmptyValue(reflect.ValueOf(cidrProp)) && (ok || !reflect.DeepEqual(v, cidrProp)) { + obj["cidr"] = cidrProp + } + labelsProp, err := expandOracleDatabaseAutonomousDatabaseEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/autonomousDatabases?autonomousDatabaseId={{autonomous_database_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AutonomousDatabase: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AutonomousDatabase: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating AutonomousDatabase: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/autonomousDatabases/{{autonomous_database_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = OracleDatabaseOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating AutonomousDatabase", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create AutonomousDatabase: %s", err) + } + + if err := d.Set("name", flattenOracleDatabaseAutonomousDatabaseName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/autonomousDatabases/{{autonomous_database_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating AutonomousDatabase %q: %#v", d.Id(), res) + + return resourceOracleDatabaseAutonomousDatabaseRead(d, meta) +} + +func resourceOracleDatabaseAutonomousDatabaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/autonomousDatabases/{{autonomous_database_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AutonomousDatabase: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("OracleDatabaseAutonomousDatabase %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + + if err := d.Set("name", flattenOracleDatabaseAutonomousDatabaseName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + if err := d.Set("database", flattenOracleDatabaseAutonomousDatabaseDatabase(res["database"], d, config)); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + if err := d.Set("display_name", flattenOracleDatabaseAutonomousDatabaseDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + if err := d.Set("entitlement_id", flattenOracleDatabaseAutonomousDatabaseEntitlementId(res["entitlementId"], d, config)); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + if err := d.Set("properties", flattenOracleDatabaseAutonomousDatabaseProperties(res["properties"], d, config)); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + if err := d.Set("labels", flattenOracleDatabaseAutonomousDatabaseLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + if err := d.Set("network", flattenOracleDatabaseAutonomousDatabaseNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + if err := d.Set("cidr", flattenOracleDatabaseAutonomousDatabaseCidr(res["cidr"], d, config)); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + if err := d.Set("create_time", flattenOracleDatabaseAutonomousDatabaseCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + if err := d.Set("terraform_labels", flattenOracleDatabaseAutonomousDatabaseTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + if err := d.Set("effective_labels", flattenOracleDatabaseAutonomousDatabaseEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading AutonomousDatabase: %s", err) + } + + return nil +} + +func resourceOracleDatabaseAutonomousDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the root field "labels" and "terraform_labels" are mutable + return resourceOracleDatabaseAutonomousDatabaseRead(d, meta) +} + +func resourceOracleDatabaseAutonomousDatabaseDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AutonomousDatabase: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/autonomousDatabases/{{autonomous_database_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting AutonomousDatabase %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AutonomousDatabase") + } + + err = OracleDatabaseOperationWaitTime( + config, res, project, "Deleting AutonomousDatabase", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting AutonomousDatabase %q: %#v", d.Id(), res) + return nil +} + +func resourceOracleDatabaseAutonomousDatabaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/autonomousDatabases/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/autonomousDatabases/{{autonomous_database_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenOracleDatabaseAutonomousDatabaseName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabaseDatabase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabaseDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabaseEntitlementId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabaseProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ocid"] = + flattenOracleDatabaseAutonomousDatabasePropertiesOcid(original["ocid"], d, config) + transformed["compute_count"] = + flattenOracleDatabaseAutonomousDatabasePropertiesComputeCount(original["computeCount"], d, config) + transformed["data_storage_size_tb"] = + flattenOracleDatabaseAutonomousDatabasePropertiesDataStorageSizeTb(original["dataStorageSizeTb"], d, config) + transformed["data_storage_size_gb"] = + flattenOracleDatabaseAutonomousDatabasePropertiesDataStorageSizeGb(original["dataStorageSizeGb"], d, config) + transformed["db_workload"] = + flattenOracleDatabaseAutonomousDatabasePropertiesDbWorkload(original["dbWorkload"], d, config) + transformed["db_edition"] = + flattenOracleDatabaseAutonomousDatabasePropertiesDbEdition(original["dbEdition"], d, config) + transformed["character_set"] = + flattenOracleDatabaseAutonomousDatabasePropertiesCharacterSet(original["characterSet"], d, config) + transformed["n_character_set"] = + flattenOracleDatabaseAutonomousDatabasePropertiesNCharacterSet(original["nCharacterSet"], d, config) + transformed["private_endpoint_ip"] = + flattenOracleDatabaseAutonomousDatabasePropertiesPrivateEndpointIp(original["privateEndpointIp"], d, config) + transformed["private_endpoint_label"] = + flattenOracleDatabaseAutonomousDatabasePropertiesPrivateEndpointLabel(original["privateEndpointLabel"], d, config) + transformed["db_version"] = + flattenOracleDatabaseAutonomousDatabasePropertiesDbVersion(original["dbVersion"], d, config) + transformed["is_auto_scaling_enabled"] = + flattenOracleDatabaseAutonomousDatabasePropertiesIsAutoScalingEnabled(original["isAutoScalingEnabled"], d, config) + transformed["is_storage_auto_scaling_enabled"] = + flattenOracleDatabaseAutonomousDatabasePropertiesIsStorageAutoScalingEnabled(original["isStorageAutoScalingEnabled"], d, config) + transformed["license_type"] = + flattenOracleDatabaseAutonomousDatabasePropertiesLicenseType(original["licenseType"], d, config) + transformed["customer_contacts"] = + flattenOracleDatabaseAutonomousDatabasePropertiesCustomerContacts(original["customerContacts"], d, config) + transformed["maintenance_schedule_type"] = + flattenOracleDatabaseAutonomousDatabasePropertiesMaintenanceScheduleType(original["maintenanceScheduleType"], d, config) + transformed["mtls_connection_required"] = + flattenOracleDatabaseAutonomousDatabasePropertiesMtlsConnectionRequired(original["mtlsConnectionRequired"], d, config) + transformed["backup_retention_period_days"] = + flattenOracleDatabaseAutonomousDatabasePropertiesBackupRetentionPeriodDays(original["backupRetentionPeriodDays"], d, config) + transformed["actual_used_data_storage_size_tb"] = + flattenOracleDatabaseAutonomousDatabasePropertiesActualUsedDataStorageSizeTb(original["actualUsedDataStorageSizeTb"], d, config) + transformed["allocated_storage_size_tb"] = + flattenOracleDatabaseAutonomousDatabasePropertiesAllocatedStorageSizeTb(original["allocatedStorageSizeTb"], d, config) + transformed["apex_details"] = + flattenOracleDatabaseAutonomousDatabasePropertiesApexDetails(original["apexDetails"], d, config) + transformed["are_primary_allowlisted_ips_used"] = + flattenOracleDatabaseAutonomousDatabasePropertiesArePrimaryAllowlistedIpsUsed(original["arePrimaryAllowlistedIpsUsed"], d, config) + transformed["lifecycle_details"] = + flattenOracleDatabaseAutonomousDatabasePropertiesLifecycleDetails(original["lifecycleDetails"], d, config) + transformed["state"] = + flattenOracleDatabaseAutonomousDatabasePropertiesState(original["state"], d, config) + transformed["autonomous_container_database_id"] = + flattenOracleDatabaseAutonomousDatabasePropertiesAutonomousContainerDatabaseId(original["autonomousContainerDatabaseId"], d, config) + transformed["available_upgrade_versions"] = + flattenOracleDatabaseAutonomousDatabasePropertiesAvailableUpgradeVersions(original["availableUpgradeVersions"], d, config) + transformed["connection_strings"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStrings(original["connectionStrings"], d, config) + transformed["connection_urls"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrls(original["connectionUrls"], d, config) + transformed["failed_data_recovery_duration"] = + flattenOracleDatabaseAutonomousDatabasePropertiesFailedDataRecoveryDuration(original["failedDataRecoveryDuration"], d, config) + transformed["memory_table_gbs"] = + flattenOracleDatabaseAutonomousDatabasePropertiesMemoryTableGbs(original["memoryTableGbs"], d, config) + transformed["is_local_data_guard_enabled"] = + flattenOracleDatabaseAutonomousDatabasePropertiesIsLocalDataGuardEnabled(original["isLocalDataGuardEnabled"], d, config) + transformed["local_adg_auto_failover_max_data_loss_limit"] = + flattenOracleDatabaseAutonomousDatabasePropertiesLocalAdgAutoFailoverMaxDataLossLimit(original["localAdgAutoFailoverMaxDataLossLimit"], d, config) + transformed["local_standby_db"] = + flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDb(original["localStandbyDb"], d, config) + transformed["memory_per_oracle_compute_unit_gbs"] = + flattenOracleDatabaseAutonomousDatabasePropertiesMemoryPerOracleComputeUnitGbs(original["memoryPerOracleComputeUnitGbs"], d, config) + transformed["local_disaster_recovery_type"] = + flattenOracleDatabaseAutonomousDatabasePropertiesLocalDisasterRecoveryType(original["localDisasterRecoveryType"], d, config) + transformed["data_safe_state"] = + flattenOracleDatabaseAutonomousDatabasePropertiesDataSafeState(original["dataSafeState"], d, config) + transformed["database_management_state"] = + flattenOracleDatabaseAutonomousDatabasePropertiesDatabaseManagementState(original["databaseManagementState"], d, config) + transformed["open_mode"] = + flattenOracleDatabaseAutonomousDatabasePropertiesOpenMode(original["openMode"], d, config) + transformed["operations_insights_state"] = + flattenOracleDatabaseAutonomousDatabasePropertiesOperationsInsightsState(original["operationsInsightsState"], d, config) + transformed["peer_db_ids"] = + flattenOracleDatabaseAutonomousDatabasePropertiesPeerDbIds(original["peerDbIds"], d, config) + transformed["permission_level"] = + flattenOracleDatabaseAutonomousDatabasePropertiesPermissionLevel(original["permissionLevel"], d, config) + transformed["private_endpoint"] = + flattenOracleDatabaseAutonomousDatabasePropertiesPrivateEndpoint(original["privateEndpoint"], d, config) + transformed["refreshable_mode"] = + flattenOracleDatabaseAutonomousDatabasePropertiesRefreshableMode(original["refreshableMode"], d, config) + transformed["refreshable_state"] = + flattenOracleDatabaseAutonomousDatabasePropertiesRefreshableState(original["refreshableState"], d, config) + transformed["role"] = + flattenOracleDatabaseAutonomousDatabasePropertiesRole(original["role"], d, config) + transformed["scheduled_operation_details"] = + flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetails(original["scheduledOperationDetails"], d, config) + transformed["sql_web_developer_url"] = + flattenOracleDatabaseAutonomousDatabasePropertiesSqlWebDeveloperUrl(original["sqlWebDeveloperUrl"], d, config) + transformed["supported_clone_regions"] = + flattenOracleDatabaseAutonomousDatabasePropertiesSupportedCloneRegions(original["supportedCloneRegions"], d, config) + transformed["used_data_storage_size_tbs"] = + flattenOracleDatabaseAutonomousDatabasePropertiesUsedDataStorageSizeTbs(original["usedDataStorageSizeTbs"], d, config) + transformed["oci_url"] = + flattenOracleDatabaseAutonomousDatabasePropertiesOciUrl(original["ociUrl"], d, config) + transformed["total_auto_backup_storage_size_gbs"] = + flattenOracleDatabaseAutonomousDatabasePropertiesTotalAutoBackupStorageSizeGbs(original["totalAutoBackupStorageSizeGbs"], d, config) + transformed["next_long_term_backup_time"] = + flattenOracleDatabaseAutonomousDatabasePropertiesNextLongTermBackupTime(original["nextLongTermBackupTime"], d, config) + transformed["maintenance_begin_time"] = + flattenOracleDatabaseAutonomousDatabasePropertiesMaintenanceBeginTime(original["maintenanceBeginTime"], d, config) + transformed["maintenance_end_time"] = + flattenOracleDatabaseAutonomousDatabasePropertiesMaintenanceEndTime(original["maintenanceEndTime"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseAutonomousDatabasePropertiesOcid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesComputeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesDataStorageSizeTb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesDataStorageSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesDbWorkload(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesDbEdition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesCharacterSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesNCharacterSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesPrivateEndpointIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesPrivateEndpointLabel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesDbVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesIsAutoScalingEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesIsStorageAutoScalingEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesLicenseType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesCustomerContacts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "email": flattenOracleDatabaseAutonomousDatabasePropertiesCustomerContactsEmail(original["email"], d, config), + }) + } + return transformed +} +func flattenOracleDatabaseAutonomousDatabasePropertiesCustomerContactsEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesMaintenanceScheduleType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesMtlsConnectionRequired(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesBackupRetentionPeriodDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesActualUsedDataStorageSizeTb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesAllocatedStorageSizeTb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesApexDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["apex_version"] = + flattenOracleDatabaseAutonomousDatabasePropertiesApexDetailsApexVersion(original["apexVersion"], d, config) + transformed["ords_version"] = + flattenOracleDatabaseAutonomousDatabasePropertiesApexDetailsOrdsVersion(original["ordsVersion"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseAutonomousDatabasePropertiesApexDetailsApexVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesApexDetailsOrdsVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesArePrimaryAllowlistedIpsUsed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesLifecycleDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesAutonomousContainerDatabaseId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesAvailableUpgradeVersions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStrings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["all_connection_strings"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStrings(original["allConnectionStrings"], d, config) + transformed["dedicated"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsDedicated(original["dedicated"], d, config) + transformed["high"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsHigh(original["high"], d, config) + transformed["low"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsLow(original["low"], d, config) + transformed["medium"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsMedium(original["medium"], d, config) + transformed["profiles"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfiles(original["profiles"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStrings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["high"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsHigh(original["high"], d, config) + transformed["low"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsLow(original["low"], d, config) + transformed["medium"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsMedium(original["medium"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsHigh(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsLow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsMedium(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsDedicated(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsHigh(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsLow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsMedium(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfiles(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "consumer_group": flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesConsumerGroup(original["consumerGroup"], d, config), + "display_name": flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesDisplayName(original["displayName"], d, config), + "host_format": flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesHostFormat(original["hostFormat"], d, config), + "is_regional": flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesIsRegional(original["isRegional"], d, config), + "protocol": flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesProtocol(original["protocol"], d, config), + "session_mode": flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesSessionMode(original["sessionMode"], d, config), + "syntax_format": flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesSyntaxFormat(original["syntaxFormat"], d, config), + "tls_authentication": flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesTlsAuthentication(original["tlsAuthentication"], d, config), + "value": flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesValue(original["value"], d, config), + }) + } + return transformed +} +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesConsumerGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesHostFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesIsRegional(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesSessionMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesSyntaxFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesTlsAuthentication(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrls(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["apex_uri"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsApexUri(original["apexUri"], d, config) + transformed["database_transforms_uri"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsDatabaseTransformsUri(original["databaseTransformsUri"], d, config) + transformed["graph_studio_uri"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsGraphStudioUri(original["graphStudioUri"], d, config) + transformed["machine_learning_notebook_uri"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMachineLearningNotebookUri(original["machineLearningNotebookUri"], d, config) + transformed["machine_learning_user_management_uri"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMachineLearningUserManagementUri(original["machineLearningUserManagementUri"], d, config) + transformed["mongo_db_uri"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMongoDbUri(original["mongoDbUri"], d, config) + transformed["ords_uri"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsOrdsUri(original["ordsUri"], d, config) + transformed["sql_dev_web_uri"] = + flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsSqlDevWebUri(original["sqlDevWebUri"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsApexUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsDatabaseTransformsUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsGraphStudioUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMachineLearningNotebookUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMachineLearningUserManagementUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMongoDbUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsOrdsUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsSqlDevWebUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesFailedDataRecoveryDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesMemoryTableGbs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesIsLocalDataGuardEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesLocalAdgAutoFailoverMaxDataLossLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["lag_time_duration"] = + flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbLagTimeDuration(original["lagTimeDuration"], d, config) + transformed["lifecycle_details"] = + flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbLifecycleDetails(original["lifecycleDetails"], d, config) + transformed["state"] = + flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbState(original["state"], d, config) + transformed["data_guard_role_changed_time"] = + flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbDataGuardRoleChangedTime(original["dataGuardRoleChangedTime"], d, config) + transformed["disaster_recovery_role_changed_time"] = + flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbDisasterRecoveryRoleChangedTime(original["disasterRecoveryRoleChangedTime"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbLagTimeDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbLifecycleDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbDataGuardRoleChangedTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbDisasterRecoveryRoleChangedTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesMemoryPerOracleComputeUnitGbs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesLocalDisasterRecoveryType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesDataSafeState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesDatabaseManagementState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesOpenMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesOperationsInsightsState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesPeerDbIds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesPermissionLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesPrivateEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesRefreshableMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesRefreshableState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "day_of_week": flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsDayOfWeek(original["dayOfWeek"], d, config), + "start_time": flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTime(original["startTime"], d, config), + "stop_time": flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTime(original["stopTime"], d, config), + }) + } + return transformed +} +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsDayOfWeek(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeHours(original["hours"], d, config) + transformed["minutes"] = + flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeHours(original["hours"], d, config) + transformed["minutes"] = + flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesSqlWebDeveloperUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesSupportedCloneRegions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesUsedDataStorageSizeTbs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesOciUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesTotalAutoBackupStorageSizeGbs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesNextLongTermBackupTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesMaintenanceBeginTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabasePropertiesMaintenanceEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabaseLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenOracleDatabaseAutonomousDatabaseNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabaseCidr(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabaseCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseAutonomousDatabaseTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenOracleDatabaseAutonomousDatabaseEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandOracleDatabaseAutonomousDatabaseDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabaseDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabaseAdminPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabaseProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOcid, err := expandOracleDatabaseAutonomousDatabasePropertiesOcid(original["ocid"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOcid); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ocid"] = transformedOcid + } + + transformedComputeCount, err := expandOracleDatabaseAutonomousDatabasePropertiesComputeCount(original["compute_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedComputeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["computeCount"] = transformedComputeCount + } + + transformedDataStorageSizeTb, err := expandOracleDatabaseAutonomousDatabasePropertiesDataStorageSizeTb(original["data_storage_size_tb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataStorageSizeTb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataStorageSizeTb"] = transformedDataStorageSizeTb + } + + transformedDataStorageSizeGb, err := expandOracleDatabaseAutonomousDatabasePropertiesDataStorageSizeGb(original["data_storage_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataStorageSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataStorageSizeGb"] = transformedDataStorageSizeGb + } + + transformedDbWorkload, err := expandOracleDatabaseAutonomousDatabasePropertiesDbWorkload(original["db_workload"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDbWorkload); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dbWorkload"] = transformedDbWorkload + } + + transformedDbEdition, err := expandOracleDatabaseAutonomousDatabasePropertiesDbEdition(original["db_edition"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDbEdition); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dbEdition"] = transformedDbEdition + } + + transformedCharacterSet, err := expandOracleDatabaseAutonomousDatabasePropertiesCharacterSet(original["character_set"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCharacterSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["characterSet"] = transformedCharacterSet + } + + transformedNCharacterSet, err := expandOracleDatabaseAutonomousDatabasePropertiesNCharacterSet(original["n_character_set"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNCharacterSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nCharacterSet"] = transformedNCharacterSet + } + + transformedPrivateEndpointIp, err := expandOracleDatabaseAutonomousDatabasePropertiesPrivateEndpointIp(original["private_endpoint_ip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrivateEndpointIp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["privateEndpointIp"] = transformedPrivateEndpointIp + } + + transformedPrivateEndpointLabel, err := expandOracleDatabaseAutonomousDatabasePropertiesPrivateEndpointLabel(original["private_endpoint_label"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrivateEndpointLabel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["privateEndpointLabel"] = transformedPrivateEndpointLabel + } + + transformedDbVersion, err := expandOracleDatabaseAutonomousDatabasePropertiesDbVersion(original["db_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDbVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dbVersion"] = transformedDbVersion + } + + transformedIsAutoScalingEnabled, err := expandOracleDatabaseAutonomousDatabasePropertiesIsAutoScalingEnabled(original["is_auto_scaling_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIsAutoScalingEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["isAutoScalingEnabled"] = transformedIsAutoScalingEnabled + } + + transformedIsStorageAutoScalingEnabled, err := expandOracleDatabaseAutonomousDatabasePropertiesIsStorageAutoScalingEnabled(original["is_storage_auto_scaling_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIsStorageAutoScalingEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["isStorageAutoScalingEnabled"] = transformedIsStorageAutoScalingEnabled + } + + transformedLicenseType, err := expandOracleDatabaseAutonomousDatabasePropertiesLicenseType(original["license_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLicenseType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["licenseType"] = transformedLicenseType + } + + transformedCustomerContacts, err := expandOracleDatabaseAutonomousDatabasePropertiesCustomerContacts(original["customer_contacts"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomerContacts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customerContacts"] = transformedCustomerContacts + } + + transformedMaintenanceScheduleType, err := expandOracleDatabaseAutonomousDatabasePropertiesMaintenanceScheduleType(original["maintenance_schedule_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaintenanceScheduleType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maintenanceScheduleType"] = transformedMaintenanceScheduleType + } + + transformedMtlsConnectionRequired, err := expandOracleDatabaseAutonomousDatabasePropertiesMtlsConnectionRequired(original["mtls_connection_required"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMtlsConnectionRequired); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mtlsConnectionRequired"] = transformedMtlsConnectionRequired + } + + transformedBackupRetentionPeriodDays, err := expandOracleDatabaseAutonomousDatabasePropertiesBackupRetentionPeriodDays(original["backup_retention_period_days"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBackupRetentionPeriodDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["backupRetentionPeriodDays"] = transformedBackupRetentionPeriodDays + } + + transformedActualUsedDataStorageSizeTb, err := expandOracleDatabaseAutonomousDatabasePropertiesActualUsedDataStorageSizeTb(original["actual_used_data_storage_size_tb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedActualUsedDataStorageSizeTb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["actualUsedDataStorageSizeTb"] = transformedActualUsedDataStorageSizeTb + } + + transformedAllocatedStorageSizeTb, err := expandOracleDatabaseAutonomousDatabasePropertiesAllocatedStorageSizeTb(original["allocated_storage_size_tb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllocatedStorageSizeTb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allocatedStorageSizeTb"] = transformedAllocatedStorageSizeTb + } + + transformedApexDetails, err := expandOracleDatabaseAutonomousDatabasePropertiesApexDetails(original["apex_details"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApexDetails); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["apexDetails"] = transformedApexDetails + } + + transformedArePrimaryAllowlistedIpsUsed, err := expandOracleDatabaseAutonomousDatabasePropertiesArePrimaryAllowlistedIpsUsed(original["are_primary_allowlisted_ips_used"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArePrimaryAllowlistedIpsUsed); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["arePrimaryAllowlistedIpsUsed"] = transformedArePrimaryAllowlistedIpsUsed + } + + transformedLifecycleDetails, err := expandOracleDatabaseAutonomousDatabasePropertiesLifecycleDetails(original["lifecycle_details"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLifecycleDetails); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lifecycleDetails"] = transformedLifecycleDetails + } + + transformedState, err := expandOracleDatabaseAutonomousDatabasePropertiesState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + transformedAutonomousContainerDatabaseId, err := expandOracleDatabaseAutonomousDatabasePropertiesAutonomousContainerDatabaseId(original["autonomous_container_database_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAutonomousContainerDatabaseId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["autonomousContainerDatabaseId"] = transformedAutonomousContainerDatabaseId + } + + transformedAvailableUpgradeVersions, err := expandOracleDatabaseAutonomousDatabasePropertiesAvailableUpgradeVersions(original["available_upgrade_versions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAvailableUpgradeVersions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["availableUpgradeVersions"] = transformedAvailableUpgradeVersions + } + + transformedConnectionStrings, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStrings(original["connection_strings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConnectionStrings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["connectionStrings"] = transformedConnectionStrings + } + + transformedConnectionUrls, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrls(original["connection_urls"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConnectionUrls); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["connectionUrls"] = transformedConnectionUrls + } + + transformedFailedDataRecoveryDuration, err := expandOracleDatabaseAutonomousDatabasePropertiesFailedDataRecoveryDuration(original["failed_data_recovery_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFailedDataRecoveryDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["failedDataRecoveryDuration"] = transformedFailedDataRecoveryDuration + } + + transformedMemoryTableGbs, err := expandOracleDatabaseAutonomousDatabasePropertiesMemoryTableGbs(original["memory_table_gbs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMemoryTableGbs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["memoryTableGbs"] = transformedMemoryTableGbs + } + + transformedIsLocalDataGuardEnabled, err := expandOracleDatabaseAutonomousDatabasePropertiesIsLocalDataGuardEnabled(original["is_local_data_guard_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIsLocalDataGuardEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["isLocalDataGuardEnabled"] = transformedIsLocalDataGuardEnabled + } + + transformedLocalAdgAutoFailoverMaxDataLossLimit, err := expandOracleDatabaseAutonomousDatabasePropertiesLocalAdgAutoFailoverMaxDataLossLimit(original["local_adg_auto_failover_max_data_loss_limit"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalAdgAutoFailoverMaxDataLossLimit); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localAdgAutoFailoverMaxDataLossLimit"] = transformedLocalAdgAutoFailoverMaxDataLossLimit + } + + transformedLocalStandbyDb, err := expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDb(original["local_standby_db"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalStandbyDb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localStandbyDb"] = transformedLocalStandbyDb + } + + transformedMemoryPerOracleComputeUnitGbs, err := expandOracleDatabaseAutonomousDatabasePropertiesMemoryPerOracleComputeUnitGbs(original["memory_per_oracle_compute_unit_gbs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMemoryPerOracleComputeUnitGbs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["memoryPerOracleComputeUnitGbs"] = transformedMemoryPerOracleComputeUnitGbs + } + + transformedLocalDisasterRecoveryType, err := expandOracleDatabaseAutonomousDatabasePropertiesLocalDisasterRecoveryType(original["local_disaster_recovery_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalDisasterRecoveryType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localDisasterRecoveryType"] = transformedLocalDisasterRecoveryType + } + + transformedDataSafeState, err := expandOracleDatabaseAutonomousDatabasePropertiesDataSafeState(original["data_safe_state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataSafeState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataSafeState"] = transformedDataSafeState + } + + transformedDatabaseManagementState, err := expandOracleDatabaseAutonomousDatabasePropertiesDatabaseManagementState(original["database_management_state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabaseManagementState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["databaseManagementState"] = transformedDatabaseManagementState + } + + transformedOpenMode, err := expandOracleDatabaseAutonomousDatabasePropertiesOpenMode(original["open_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOpenMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["openMode"] = transformedOpenMode + } + + transformedOperationsInsightsState, err := expandOracleDatabaseAutonomousDatabasePropertiesOperationsInsightsState(original["operations_insights_state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOperationsInsightsState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["operationsInsightsState"] = transformedOperationsInsightsState + } + + transformedPeerDbIds, err := expandOracleDatabaseAutonomousDatabasePropertiesPeerDbIds(original["peer_db_ids"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeerDbIds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["peerDbIds"] = transformedPeerDbIds + } + + transformedPermissionLevel, err := expandOracleDatabaseAutonomousDatabasePropertiesPermissionLevel(original["permission_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPermissionLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["permissionLevel"] = transformedPermissionLevel + } + + transformedPrivateEndpoint, err := expandOracleDatabaseAutonomousDatabasePropertiesPrivateEndpoint(original["private_endpoint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrivateEndpoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["privateEndpoint"] = transformedPrivateEndpoint + } + + transformedRefreshableMode, err := expandOracleDatabaseAutonomousDatabasePropertiesRefreshableMode(original["refreshable_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRefreshableMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["refreshableMode"] = transformedRefreshableMode + } + + transformedRefreshableState, err := expandOracleDatabaseAutonomousDatabasePropertiesRefreshableState(original["refreshable_state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRefreshableState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["refreshableState"] = transformedRefreshableState + } + + transformedRole, err := expandOracleDatabaseAutonomousDatabasePropertiesRole(original["role"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRole); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["role"] = transformedRole + } + + transformedScheduledOperationDetails, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetails(original["scheduled_operation_details"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScheduledOperationDetails); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scheduledOperationDetails"] = transformedScheduledOperationDetails + } + + transformedSqlWebDeveloperUrl, err := expandOracleDatabaseAutonomousDatabasePropertiesSqlWebDeveloperUrl(original["sql_web_developer_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSqlWebDeveloperUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sqlWebDeveloperUrl"] = transformedSqlWebDeveloperUrl + } + + transformedSupportedCloneRegions, err := expandOracleDatabaseAutonomousDatabasePropertiesSupportedCloneRegions(original["supported_clone_regions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSupportedCloneRegions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["supportedCloneRegions"] = transformedSupportedCloneRegions + } + + transformedUsedDataStorageSizeTbs, err := expandOracleDatabaseAutonomousDatabasePropertiesUsedDataStorageSizeTbs(original["used_data_storage_size_tbs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUsedDataStorageSizeTbs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["usedDataStorageSizeTbs"] = transformedUsedDataStorageSizeTbs + } + + transformedOciUrl, err := expandOracleDatabaseAutonomousDatabasePropertiesOciUrl(original["oci_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOciUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ociUrl"] = transformedOciUrl + } + + transformedTotalAutoBackupStorageSizeGbs, err := expandOracleDatabaseAutonomousDatabasePropertiesTotalAutoBackupStorageSizeGbs(original["total_auto_backup_storage_size_gbs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTotalAutoBackupStorageSizeGbs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["totalAutoBackupStorageSizeGbs"] = transformedTotalAutoBackupStorageSizeGbs + } + + transformedNextLongTermBackupTime, err := expandOracleDatabaseAutonomousDatabasePropertiesNextLongTermBackupTime(original["next_long_term_backup_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNextLongTermBackupTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nextLongTermBackupTime"] = transformedNextLongTermBackupTime + } + + transformedMaintenanceBeginTime, err := expandOracleDatabaseAutonomousDatabasePropertiesMaintenanceBeginTime(original["maintenance_begin_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaintenanceBeginTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maintenanceBeginTime"] = transformedMaintenanceBeginTime + } + + transformedMaintenanceEndTime, err := expandOracleDatabaseAutonomousDatabasePropertiesMaintenanceEndTime(original["maintenance_end_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaintenanceEndTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maintenanceEndTime"] = transformedMaintenanceEndTime + } + + return transformed, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesOcid(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesComputeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesDataStorageSizeTb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesDataStorageSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesDbWorkload(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesDbEdition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesCharacterSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesNCharacterSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesPrivateEndpointIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesPrivateEndpointLabel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesDbVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesIsAutoScalingEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesIsStorageAutoScalingEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesLicenseType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesCustomerContacts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEmail, err := expandOracleDatabaseAutonomousDatabasePropertiesCustomerContactsEmail(original["email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["email"] = transformedEmail + } + + req = append(req, transformed) + } + return req, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesCustomerContactsEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesMaintenanceScheduleType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesMtlsConnectionRequired(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesBackupRetentionPeriodDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesActualUsedDataStorageSizeTb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesAllocatedStorageSizeTb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesApexDetails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedApexVersion, err := expandOracleDatabaseAutonomousDatabasePropertiesApexDetailsApexVersion(original["apex_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApexVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["apexVersion"] = transformedApexVersion + } + + transformedOrdsVersion, err := expandOracleDatabaseAutonomousDatabasePropertiesApexDetailsOrdsVersion(original["ords_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdsVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ordsVersion"] = transformedOrdsVersion + } + + return transformed, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesApexDetailsApexVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesApexDetailsOrdsVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesArePrimaryAllowlistedIpsUsed(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesLifecycleDetails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesAutonomousContainerDatabaseId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesAvailableUpgradeVersions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStrings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAllConnectionStrings, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStrings(original["all_connection_strings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllConnectionStrings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allConnectionStrings"] = transformedAllConnectionStrings + } + + transformedDedicated, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsDedicated(original["dedicated"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDedicated); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dedicated"] = transformedDedicated + } + + transformedHigh, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsHigh(original["high"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHigh); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["high"] = transformedHigh + } + + transformedLow, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsLow(original["low"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLow); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["low"] = transformedLow + } + + transformedMedium, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsMedium(original["medium"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMedium); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["medium"] = transformedMedium + } + + transformedProfiles, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfiles(original["profiles"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProfiles); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["profiles"] = transformedProfiles + } + + return transformed, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStrings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHigh, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsHigh(original["high"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHigh); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["high"] = transformedHigh + } + + transformedLow, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsLow(original["low"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLow); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["low"] = transformedLow + } + + transformedMedium, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsMedium(original["medium"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMedium); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["medium"] = transformedMedium + } + + return transformed, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsHigh(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsLow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsAllConnectionStringsMedium(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsDedicated(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsHigh(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsLow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsMedium(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfiles(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConsumerGroup, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesConsumerGroup(original["consumer_group"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConsumerGroup); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["consumerGroup"] = transformedConsumerGroup + } + + transformedDisplayName, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["displayName"] = transformedDisplayName + } + + transformedHostFormat, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesHostFormat(original["host_format"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHostFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hostFormat"] = transformedHostFormat + } + + transformedIsRegional, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesIsRegional(original["is_regional"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIsRegional); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["isRegional"] = transformedIsRegional + } + + transformedProtocol, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesProtocol(original["protocol"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProtocol); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["protocol"] = transformedProtocol + } + + transformedSessionMode, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesSessionMode(original["session_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSessionMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sessionMode"] = transformedSessionMode + } + + transformedSyntaxFormat, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesSyntaxFormat(original["syntax_format"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSyntaxFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["syntaxFormat"] = transformedSyntaxFormat + } + + transformedTlsAuthentication, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesTlsAuthentication(original["tls_authentication"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTlsAuthentication); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tlsAuthentication"] = transformedTlsAuthentication + } + + transformedValue, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesConsumerGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesHostFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesIsRegional(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesSessionMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesSyntaxFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesTlsAuthentication(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionStringsProfilesValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrls(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedApexUri, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsApexUri(original["apex_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApexUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["apexUri"] = transformedApexUri + } + + transformedDatabaseTransformsUri, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsDatabaseTransformsUri(original["database_transforms_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabaseTransformsUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["databaseTransformsUri"] = transformedDatabaseTransformsUri + } + + transformedGraphStudioUri, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsGraphStudioUri(original["graph_studio_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGraphStudioUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["graphStudioUri"] = transformedGraphStudioUri + } + + transformedMachineLearningNotebookUri, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMachineLearningNotebookUri(original["machine_learning_notebook_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineLearningNotebookUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineLearningNotebookUri"] = transformedMachineLearningNotebookUri + } + + transformedMachineLearningUserManagementUri, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMachineLearningUserManagementUri(original["machine_learning_user_management_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineLearningUserManagementUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineLearningUserManagementUri"] = transformedMachineLearningUserManagementUri + } + + transformedMongoDbUri, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMongoDbUri(original["mongo_db_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMongoDbUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mongoDbUri"] = transformedMongoDbUri + } + + transformedOrdsUri, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsOrdsUri(original["ords_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdsUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ordsUri"] = transformedOrdsUri + } + + transformedSqlDevWebUri, err := expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsSqlDevWebUri(original["sql_dev_web_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSqlDevWebUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sqlDevWebUri"] = transformedSqlDevWebUri + } + + return transformed, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsApexUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsDatabaseTransformsUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsGraphStudioUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMachineLearningNotebookUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMachineLearningUserManagementUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsMongoDbUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsOrdsUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesConnectionUrlsSqlDevWebUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesFailedDataRecoveryDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesMemoryTableGbs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesIsLocalDataGuardEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesLocalAdgAutoFailoverMaxDataLossLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLagTimeDuration, err := expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbLagTimeDuration(original["lag_time_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLagTimeDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lagTimeDuration"] = transformedLagTimeDuration + } + + transformedLifecycleDetails, err := expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbLifecycleDetails(original["lifecycle_details"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLifecycleDetails); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lifecycleDetails"] = transformedLifecycleDetails + } + + transformedState, err := expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + transformedDataGuardRoleChangedTime, err := expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbDataGuardRoleChangedTime(original["data_guard_role_changed_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataGuardRoleChangedTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataGuardRoleChangedTime"] = transformedDataGuardRoleChangedTime + } + + transformedDisasterRecoveryRoleChangedTime, err := expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbDisasterRecoveryRoleChangedTime(original["disaster_recovery_role_changed_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisasterRecoveryRoleChangedTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["disasterRecoveryRoleChangedTime"] = transformedDisasterRecoveryRoleChangedTime + } + + return transformed, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbLagTimeDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbLifecycleDetails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbDataGuardRoleChangedTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesLocalStandbyDbDisasterRecoveryRoleChangedTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesMemoryPerOracleComputeUnitGbs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesLocalDisasterRecoveryType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesDataSafeState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesDatabaseManagementState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesOpenMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesOperationsInsightsState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesPeerDbIds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesPermissionLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesPrivateEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesRefreshableMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesRefreshableState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDayOfWeek, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsDayOfWeek(original["day_of_week"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeek); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeek"] = transformedDayOfWeek + } + + transformedStartTime, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTime(original["start_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTime"] = transformedStartTime + } + + transformedStopTime, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTime(original["stop_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStopTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stopTime"] = transformedStopTime + } + + req = append(req, transformed) + } + return req, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsDayOfWeek(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStartTimeNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesScheduledOperationDetailsStopTimeNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesSqlWebDeveloperUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesSupportedCloneRegions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesUsedDataStorageSizeTbs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesOciUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesTotalAutoBackupStorageSizeGbs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesNextLongTermBackupTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesMaintenanceBeginTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabasePropertiesMaintenanceEndTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabaseNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabaseCidr(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseAutonomousDatabaseEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_autonomous_database_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_autonomous_database_sweeper.go new file mode 100644 index 00000000000..2ae999ddfee --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_autonomous_database_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package oracledatabase + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("OracleDatabaseAutonomousDatabase", testSweepOracleDatabaseAutonomousDatabase) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepOracleDatabaseAutonomousDatabase(region string) error { + resourceName := "OracleDatabaseAutonomousDatabase" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://oracledatabase.googleapis.com/v1/projects/{{project}}/locations/{{location}}/autonomousDatabases", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["autonomousDatabases"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://oracledatabase.googleapis.com/v1/projects/{{project}}/locations/{{location}}/autonomousDatabases/{{autonomous_database_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_exadata_infrastructure.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_exadata_infrastructure.go new file mode 100644 index 00000000000..1401a9b56ee --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_exadata_infrastructure.go @@ -0,0 +1,1623 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package oracledatabase + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceOracleDatabaseCloudExadataInfrastructure() *schema.Resource { + return &schema.Resource{ + Create: resourceOracleDatabaseCloudExadataInfrastructureCreate, + Read: resourceOracleDatabaseCloudExadataInfrastructureRead, + Update: resourceOracleDatabaseCloudExadataInfrastructureUpdate, + Delete: resourceOracleDatabaseCloudExadataInfrastructureDelete, + + Importer: &schema.ResourceImporter{ + State: resourceOracleDatabaseCloudExadataInfrastructureImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(240 * time.Minute), + Update: schema.DefaultTimeout(120 * time.Minute), + Delete: schema.DefaultTimeout(120 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "cloud_exadata_infrastructure_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the Exadata Infrastructure to create. This value is restricted +to (^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$) and must be a maximum of 63 +characters in length. The value must start with a letter and end with +a letter or a number.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Resource ID segment making up resource 'name'. See documentation for resource type 'oracledatabase.googleapis.com/DbServer'.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `User friendly name for this resource.`, + }, + "gcp_oracle_zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `GCP location where Oracle Exadata is hosted.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels or tags associated with the resource. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "properties": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Various properties of Exadata Infrastructure.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "shape": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The shape of the Exadata Infrastructure. The shape determines the +amount of CPU, storage, and memory resources allocated to the instance.`, + }, + "compute_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The number of compute servers for the Exadata Infrastructure.`, + }, + "customer_contacts": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of customer contacts.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The email address used by Oracle to send notifications regarding databases +and infrastructure.`, + }, + }, + }, + }, + "maintenance_window": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Maintenance window as defined by Oracle. +https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/MaintenanceWindow`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "custom_action_timeout_mins": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Determines the amount of time the system will wait before the start of each +database server patching operation. Custom action timeout is in minutes and +valid value is between 15 to 120 (inclusive).`, + }, + "days_of_week": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Days during the week when maintenance should be performed.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "hours_of_day": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The window of hours during the day when maintenance should be performed. +The window is a 4 hour slot. Valid values are: + 0 - represents time slot 0:00 - 3:59 UTC + 4 - represents time slot 4:00 - 7:59 UTC + 8 - represents time slot 8:00 - 11:59 UTC + 12 - represents time slot 12:00 - 15:59 UTC + 16 - represents time slot 16:00 - 19:59 UTC + 20 - represents time slot 20:00 - 23:59 UTC`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "is_custom_action_timeout_enabled": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: `If true, enables the configuration of a custom action timeout (waiting +period) between database server patching operations.`, + }, + "lead_time_week": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Lead time window allows user to set a lead time to prepare for a down time. +The lead time is in weeks and valid value is between 1 to 4.`, + }, + "months": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Months during the year when maintenance should be performed.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "patching_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Cloud CloudExadataInfrastructure node patching method, either "ROLLING" + or "NONROLLING". Default value is ROLLING. + Possible values: + PATCHING_MODE_UNSPECIFIED +ROLLING +NON_ROLLING`, + }, + "preference": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The maintenance window scheduling preference. + Possible values: + MAINTENANCE_WINDOW_PREFERENCE_UNSPECIFIED +CUSTOM_PREFERENCE +NO_PREFERENCE`, + }, + "weeks_of_month": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Weeks during the month when maintenance should be performed. Weeks start on +the 1st, 8th, 15th, and 22nd days of the month, and have a duration of 7 +days. Weeks start and end based on calendar dates, not days of the week.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "storage_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The number of Cloud Exadata storage servers for the Exadata Infrastructure.`, + }, + "total_storage_size_gb": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The total storage allocated to the Exadata Infrastructure +resource, in gigabytes (GB).`, + }, + "activated_storage_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The requested number of additional storage servers activated for the +Exadata Infrastructure.`, + }, + "additional_storage_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The requested number of additional storage servers for the Exadata +Infrastructure.`, + }, + "available_storage_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: `The available storage can be allocated to the Exadata Infrastructure +resource, in gigabytes (GB).`, + }, + "cpu_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The number of enabled CPU cores.`, + }, + "data_storage_size_tb": { + Type: schema.TypeFloat, + Computed: true, + Description: `Size, in terabytes, of the DATA disk group.`, + }, + "db_node_storage_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: `The local node storage allocated in GBs.`, + }, + "db_server_version": { + Type: schema.TypeString, + Computed: true, + Description: `The software version of the database servers (dom0) in the Exadata +Infrastructure.`, + }, + "max_cpu_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The total number of CPU cores available.`, + }, + "max_data_storage_tb": { + Type: schema.TypeFloat, + Computed: true, + Description: `The total available DATA disk group size.`, + }, + "max_db_node_storage_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: `The total local node storage available in GBs.`, + }, + "max_memory_gb": { + Type: schema.TypeInt, + Computed: true, + Description: `The total memory available in GBs.`, + }, + "memory_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: `The memory allocated in GBs.`, + }, + "monthly_db_server_version": { + Type: schema.TypeString, + Computed: true, + Description: `The monthly software version of the database servers (dom0) +in the Exadata Infrastructure. Example: 20.1.15`, + }, + "monthly_storage_server_version": { + Type: schema.TypeString, + Computed: true, + Description: `The monthly software version of the storage servers (cells) +in the Exadata Infrastructure. Example: 20.1.15`, + }, + "next_maintenance_run_id": { + Type: schema.TypeString, + Computed: true, + Description: `The OCID of the next maintenance run.`, + }, + "next_maintenance_run_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the next maintenance run will occur.`, + }, + "next_security_maintenance_run_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the next security maintenance run will occur.`, + }, + "oci_url": { + Type: schema.TypeString, + Computed: true, + Description: `Deep link to the OCI console to view this resource.`, + }, + "ocid": { + Type: schema.TypeString, + Computed: true, + Description: `OCID of created infra. +https://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current lifecycle state of the Exadata Infrastructure. + Possible values: + STATE_UNSPECIFIED +PROVISIONING +AVAILABLE +UPDATING +TERMINATING +TERMINATED +FAILED +MAINTENANCE_IN_PROGRESS`, + }, + "storage_server_version": { + Type: schema.TypeString, + Computed: true, + Description: `The software version of the storage servers (cells) in the Exadata +Infrastructure.`, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The date and time that the Exadata Infrastructure was created.`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "entitlement_id": { + Type: schema.TypeString, + Computed: true, + Description: `Entitlement ID of the private offer against which this infrastructure +resource is provisioned.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Identifier. The name of the Exadata Infrastructure resource with the following format: +projects/{project}/locations/{region}/cloudExadataInfrastructures/{cloud_exadata_infrastructure}`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceOracleDatabaseCloudExadataInfrastructureCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandOracleDatabaseCloudExadataInfrastructureDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + gcpOracleZoneProp, err := expandOracleDatabaseCloudExadataInfrastructureGcpOracleZone(d.Get("gcp_oracle_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gcp_oracle_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(gcpOracleZoneProp)) && (ok || !reflect.DeepEqual(v, gcpOracleZoneProp)) { + obj["gcpOracleZone"] = gcpOracleZoneProp + } + propertiesProp, err := expandOracleDatabaseCloudExadataInfrastructureProperties(d.Get("properties"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("properties"); !tpgresource.IsEmptyValue(reflect.ValueOf(propertiesProp)) && (ok || !reflect.DeepEqual(v, propertiesProp)) { + obj["properties"] = propertiesProp + } + labelsProp, err := expandOracleDatabaseCloudExadataInfrastructureEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures?cloudExadataInfrastructureId={{cloud_exadata_infrastructure_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new CloudExadataInfrastructure: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CloudExadataInfrastructure: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating CloudExadataInfrastructure: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures/{{cloud_exadata_infrastructure_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = OracleDatabaseOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating CloudExadataInfrastructure", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create CloudExadataInfrastructure: %s", err) + } + + if err := d.Set("name", flattenOracleDatabaseCloudExadataInfrastructureName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures/{{cloud_exadata_infrastructure_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating CloudExadataInfrastructure %q: %#v", d.Id(), res) + + return resourceOracleDatabaseCloudExadataInfrastructureRead(d, meta) +} + +func resourceOracleDatabaseCloudExadataInfrastructureRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures/{{cloud_exadata_infrastructure_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CloudExadataInfrastructure: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("OracleDatabaseCloudExadataInfrastructure %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading CloudExadataInfrastructure: %s", err) + } + + if err := d.Set("name", flattenOracleDatabaseCloudExadataInfrastructureName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudExadataInfrastructure: %s", err) + } + if err := d.Set("display_name", flattenOracleDatabaseCloudExadataInfrastructureDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudExadataInfrastructure: %s", err) + } + if err := d.Set("gcp_oracle_zone", flattenOracleDatabaseCloudExadataInfrastructureGcpOracleZone(res["gcpOracleZone"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudExadataInfrastructure: %s", err) + } + if err := d.Set("entitlement_id", flattenOracleDatabaseCloudExadataInfrastructureEntitlementId(res["entitlementId"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudExadataInfrastructure: %s", err) + } + if err := d.Set("properties", flattenOracleDatabaseCloudExadataInfrastructureProperties(res["properties"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudExadataInfrastructure: %s", err) + } + if err := d.Set("labels", flattenOracleDatabaseCloudExadataInfrastructureLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudExadataInfrastructure: %s", err) + } + if err := d.Set("create_time", flattenOracleDatabaseCloudExadataInfrastructureCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudExadataInfrastructure: %s", err) + } + if err := d.Set("terraform_labels", flattenOracleDatabaseCloudExadataInfrastructureTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudExadataInfrastructure: %s", err) + } + if err := d.Set("effective_labels", flattenOracleDatabaseCloudExadataInfrastructureEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudExadataInfrastructure: %s", err) + } + + return nil +} + +func resourceOracleDatabaseCloudExadataInfrastructureUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the root field "labels" and "terraform_labels" are mutable + return resourceOracleDatabaseCloudExadataInfrastructureRead(d, meta) +} + +func resourceOracleDatabaseCloudExadataInfrastructureDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CloudExadataInfrastructure: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures/{{cloud_exadata_infrastructure_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting CloudExadataInfrastructure %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "CloudExadataInfrastructure") + } + + err = OracleDatabaseOperationWaitTime( + config, res, project, "Deleting CloudExadataInfrastructure", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting CloudExadataInfrastructure %q: %#v", d.Id(), res) + return nil +} + +func resourceOracleDatabaseCloudExadataInfrastructureImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/cloudExadataInfrastructures/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures/{{cloud_exadata_infrastructure_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenOracleDatabaseCloudExadataInfrastructureName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructureDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructureGcpOracleZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructureEntitlementId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructureProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ocid"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesOcid(original["ocid"], d, config) + transformed["compute_count"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesComputeCount(original["computeCount"], d, config) + transformed["storage_count"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesStorageCount(original["storageCount"], d, config) + transformed["total_storage_size_gb"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesTotalStorageSizeGb(original["totalStorageSizeGb"], d, config) + transformed["available_storage_size_gb"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesAvailableStorageSizeGb(original["availableStorageSizeGb"], d, config) + transformed["maintenance_window"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindow(original["maintenanceWindow"], d, config) + transformed["state"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesState(original["state"], d, config) + transformed["shape"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesShape(original["shape"], d, config) + transformed["oci_url"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesOciUrl(original["ociUrl"], d, config) + transformed["cpu_count"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesCpuCount(original["cpuCount"], d, config) + transformed["max_cpu_count"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaxCpuCount(original["maxCpuCount"], d, config) + transformed["memory_size_gb"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMemorySizeGb(original["memorySizeGb"], d, config) + transformed["max_memory_gb"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaxMemoryGb(original["maxMemoryGb"], d, config) + transformed["db_node_storage_size_gb"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesDbNodeStorageSizeGb(original["dbNodeStorageSizeGb"], d, config) + transformed["max_db_node_storage_size_gb"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaxDbNodeStorageSizeGb(original["maxDbNodeStorageSizeGb"], d, config) + transformed["data_storage_size_tb"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesDataStorageSizeTb(original["dataStorageSizeTb"], d, config) + transformed["max_data_storage_tb"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaxDataStorageTb(original["maxDataStorageTb"], d, config) + transformed["activated_storage_count"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesActivatedStorageCount(original["activatedStorageCount"], d, config) + transformed["additional_storage_count"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesAdditionalStorageCount(original["additionalStorageCount"], d, config) + transformed["db_server_version"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesDbServerVersion(original["dbServerVersion"], d, config) + transformed["storage_server_version"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesStorageServerVersion(original["storageServerVersion"], d, config) + transformed["next_maintenance_run_id"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesNextMaintenanceRunId(original["nextMaintenanceRunId"], d, config) + transformed["next_maintenance_run_time"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesNextMaintenanceRunTime(original["nextMaintenanceRunTime"], d, config) + transformed["next_security_maintenance_run_time"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesNextSecurityMaintenanceRunTime(original["nextSecurityMaintenanceRunTime"], d, config) + transformed["customer_contacts"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesCustomerContacts(original["customerContacts"], d, config) + transformed["monthly_storage_server_version"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMonthlyStorageServerVersion(original["monthlyStorageServerVersion"], d, config) + transformed["monthly_db_server_version"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMonthlyDbServerVersion(original["monthlyDbServerVersion"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesOcid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesComputeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesStorageCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesTotalStorageSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesAvailableStorageSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["preference"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowPreference(original["preference"], d, config) + transformed["months"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowMonths(original["months"], d, config) + transformed["weeks_of_month"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowWeeksOfMonth(original["weeksOfMonth"], d, config) + transformed["days_of_week"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowDaysOfWeek(original["daysOfWeek"], d, config) + transformed["hours_of_day"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowHoursOfDay(original["hoursOfDay"], d, config) + transformed["lead_time_week"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowLeadTimeWeek(original["leadTimeWeek"], d, config) + transformed["patching_mode"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowPatchingMode(original["patchingMode"], d, config) + transformed["custom_action_timeout_mins"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowCustomActionTimeoutMins(original["customActionTimeoutMins"], d, config) + transformed["is_custom_action_timeout_enabled"] = + flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowIsCustomActionTimeoutEnabled(original["isCustomActionTimeoutEnabled"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowPreference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowMonths(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowWeeksOfMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowDaysOfWeek(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowHoursOfDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowLeadTimeWeek(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowPatchingMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowCustomActionTimeoutMins(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowIsCustomActionTimeoutEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesShape(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesOciUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesCpuCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaxCpuCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMemorySizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaxMemoryGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesDbNodeStorageSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaxDbNodeStorageSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesDataStorageSizeTb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMaxDataStorageTb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesActivatedStorageCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesAdditionalStorageCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesDbServerVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesStorageServerVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesNextMaintenanceRunId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesNextMaintenanceRunTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesNextSecurityMaintenanceRunTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesCustomerContacts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "email": flattenOracleDatabaseCloudExadataInfrastructurePropertiesCustomerContactsEmail(original["email"], d, config), + }) + } + return transformed +} +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesCustomerContactsEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMonthlyStorageServerVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructurePropertiesMonthlyDbServerVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructureLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenOracleDatabaseCloudExadataInfrastructureCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudExadataInfrastructureTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenOracleDatabaseCloudExadataInfrastructureEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandOracleDatabaseCloudExadataInfrastructureDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructureGcpOracleZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructureProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOcid, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesOcid(original["ocid"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOcid); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ocid"] = transformedOcid + } + + transformedComputeCount, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesComputeCount(original["compute_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedComputeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["computeCount"] = transformedComputeCount + } + + transformedStorageCount, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesStorageCount(original["storage_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStorageCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["storageCount"] = transformedStorageCount + } + + transformedTotalStorageSizeGb, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesTotalStorageSizeGb(original["total_storage_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTotalStorageSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["totalStorageSizeGb"] = transformedTotalStorageSizeGb + } + + transformedAvailableStorageSizeGb, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesAvailableStorageSizeGb(original["available_storage_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAvailableStorageSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["availableStorageSizeGb"] = transformedAvailableStorageSizeGb + } + + transformedMaintenanceWindow, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindow(original["maintenance_window"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaintenanceWindow); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maintenanceWindow"] = transformedMaintenanceWindow + } + + transformedState, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + transformedShape, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesShape(original["shape"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedShape); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["shape"] = transformedShape + } + + transformedOciUrl, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesOciUrl(original["oci_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOciUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ociUrl"] = transformedOciUrl + } + + transformedCpuCount, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesCpuCount(original["cpu_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCpuCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cpuCount"] = transformedCpuCount + } + + transformedMaxCpuCount, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaxCpuCount(original["max_cpu_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxCpuCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxCpuCount"] = transformedMaxCpuCount + } + + transformedMemorySizeGb, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMemorySizeGb(original["memory_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMemorySizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["memorySizeGb"] = transformedMemorySizeGb + } + + transformedMaxMemoryGb, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaxMemoryGb(original["max_memory_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxMemoryGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxMemoryGb"] = transformedMaxMemoryGb + } + + transformedDbNodeStorageSizeGb, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesDbNodeStorageSizeGb(original["db_node_storage_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDbNodeStorageSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dbNodeStorageSizeGb"] = transformedDbNodeStorageSizeGb + } + + transformedMaxDbNodeStorageSizeGb, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaxDbNodeStorageSizeGb(original["max_db_node_storage_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxDbNodeStorageSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxDbNodeStorageSizeGb"] = transformedMaxDbNodeStorageSizeGb + } + + transformedDataStorageSizeTb, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesDataStorageSizeTb(original["data_storage_size_tb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataStorageSizeTb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataStorageSizeTb"] = transformedDataStorageSizeTb + } + + transformedMaxDataStorageTb, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaxDataStorageTb(original["max_data_storage_tb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxDataStorageTb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxDataStorageTb"] = transformedMaxDataStorageTb + } + + transformedActivatedStorageCount, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesActivatedStorageCount(original["activated_storage_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedActivatedStorageCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["activatedStorageCount"] = transformedActivatedStorageCount + } + + transformedAdditionalStorageCount, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesAdditionalStorageCount(original["additional_storage_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAdditionalStorageCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["additionalStorageCount"] = transformedAdditionalStorageCount + } + + transformedDbServerVersion, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesDbServerVersion(original["db_server_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDbServerVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dbServerVersion"] = transformedDbServerVersion + } + + transformedStorageServerVersion, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesStorageServerVersion(original["storage_server_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStorageServerVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["storageServerVersion"] = transformedStorageServerVersion + } + + transformedNextMaintenanceRunId, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesNextMaintenanceRunId(original["next_maintenance_run_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNextMaintenanceRunId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nextMaintenanceRunId"] = transformedNextMaintenanceRunId + } + + transformedNextMaintenanceRunTime, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesNextMaintenanceRunTime(original["next_maintenance_run_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNextMaintenanceRunTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nextMaintenanceRunTime"] = transformedNextMaintenanceRunTime + } + + transformedNextSecurityMaintenanceRunTime, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesNextSecurityMaintenanceRunTime(original["next_security_maintenance_run_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNextSecurityMaintenanceRunTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nextSecurityMaintenanceRunTime"] = transformedNextSecurityMaintenanceRunTime + } + + transformedCustomerContacts, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesCustomerContacts(original["customer_contacts"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomerContacts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customerContacts"] = transformedCustomerContacts + } + + transformedMonthlyStorageServerVersion, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMonthlyStorageServerVersion(original["monthly_storage_server_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonthlyStorageServerVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["monthlyStorageServerVersion"] = transformedMonthlyStorageServerVersion + } + + transformedMonthlyDbServerVersion, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMonthlyDbServerVersion(original["monthly_db_server_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonthlyDbServerVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["monthlyDbServerVersion"] = transformedMonthlyDbServerVersion + } + + return transformed, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesOcid(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesComputeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesStorageCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesTotalStorageSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesAvailableStorageSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPreference, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowPreference(original["preference"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPreference); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["preference"] = transformedPreference + } + + transformedMonths, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowMonths(original["months"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonths); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["months"] = transformedMonths + } + + transformedWeeksOfMonth, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowWeeksOfMonth(original["weeks_of_month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWeeksOfMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["weeksOfMonth"] = transformedWeeksOfMonth + } + + transformedDaysOfWeek, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowDaysOfWeek(original["days_of_week"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDaysOfWeek); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["daysOfWeek"] = transformedDaysOfWeek + } + + transformedHoursOfDay, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowHoursOfDay(original["hours_of_day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHoursOfDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hoursOfDay"] = transformedHoursOfDay + } + + transformedLeadTimeWeek, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowLeadTimeWeek(original["lead_time_week"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLeadTimeWeek); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["leadTimeWeek"] = transformedLeadTimeWeek + } + + transformedPatchingMode, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowPatchingMode(original["patching_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPatchingMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["patchingMode"] = transformedPatchingMode + } + + transformedCustomActionTimeoutMins, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowCustomActionTimeoutMins(original["custom_action_timeout_mins"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomActionTimeoutMins); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customActionTimeoutMins"] = transformedCustomActionTimeoutMins + } + + transformedIsCustomActionTimeoutEnabled, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowIsCustomActionTimeoutEnabled(original["is_custom_action_timeout_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIsCustomActionTimeoutEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["isCustomActionTimeoutEnabled"] = transformedIsCustomActionTimeoutEnabled + } + + return transformed, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowPreference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowMonths(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowWeeksOfMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowDaysOfWeek(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowHoursOfDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowLeadTimeWeek(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowPatchingMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowCustomActionTimeoutMins(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaintenanceWindowIsCustomActionTimeoutEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesShape(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesOciUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesCpuCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaxCpuCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMemorySizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaxMemoryGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesDbNodeStorageSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaxDbNodeStorageSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesDataStorageSizeTb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMaxDataStorageTb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesActivatedStorageCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesAdditionalStorageCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesDbServerVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesStorageServerVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesNextMaintenanceRunId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesNextMaintenanceRunTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesNextSecurityMaintenanceRunTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesCustomerContacts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEmail, err := expandOracleDatabaseCloudExadataInfrastructurePropertiesCustomerContactsEmail(original["email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["email"] = transformedEmail + } + + req = append(req, transformed) + } + return req, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesCustomerContactsEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMonthlyStorageServerVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructurePropertiesMonthlyDbServerVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudExadataInfrastructureEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_exadata_infrastructure_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_exadata_infrastructure_sweeper.go new file mode 100644 index 00000000000..81e3f7976c8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_exadata_infrastructure_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package oracledatabase + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("OracleDatabaseCloudExadataInfrastructure", testSweepOracleDatabaseCloudExadataInfrastructure) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepOracleDatabaseCloudExadataInfrastructure(region string) error { + resourceName := "OracleDatabaseCloudExadataInfrastructure" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://oracledatabase.googleapis.com/v1/projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["cloudExadataInfrastructures"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://oracledatabase.googleapis.com/v1/projects/{{project}}/locations/{{location}}/cloudExadataInfrastructures/{{cloud_exadata_infrastructure_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_vm_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_vm_cluster.go new file mode 100644 index 00000000000..83eda28c9a8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_vm_cluster.go @@ -0,0 +1,1556 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package oracledatabase + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceOracleDatabaseCloudVmCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceOracleDatabaseCloudVmClusterCreate, + Read: resourceOracleDatabaseCloudVmClusterRead, + Update: resourceOracleDatabaseCloudVmClusterUpdate, + Delete: resourceOracleDatabaseCloudVmClusterDelete, + + Importer: &schema.ResourceImporter{ + State: resourceOracleDatabaseCloudVmClusterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(120 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "backup_subnet_cidr": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `CIDR range of the backup subnet.`, + }, + "cidr": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Network settings. CIDR to use for cluster IP allocation.`, + }, + "cloud_vm_cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the VM Cluster to create. This value is restricted +to (^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$) and must be a maximum of 63 +characters in length. The value must start with a letter and end with +a letter or a number.`, + }, + "exadata_infrastructure": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the Exadata Infrastructure resource on which VM cluster +resource is created, in the following format: +projects/{project}/locations/{region}/cloudExadataInfrastuctures/{cloud_extradata_infrastructure}`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Resource ID segment making up resource 'name'. See documentation for resource type 'oracledatabase.googleapis.com/DbNode'.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the VPC network. +Format: projects/{project}/global/networks/{network}`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `User friendly name for this resource.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels or tags associated with the VM Cluster. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "properties": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Various properties and settings associated with Exadata VM cluster.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_core_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Number of enabled CPU cores.`, + }, + "license_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `License type of VM Cluster. + Possible values: + LICENSE_TYPE_UNSPECIFIED +LICENSE_INCLUDED +BRING_YOUR_OWN_LICENSE`, + }, + "cluster_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `OCI Cluster name.`, + }, + "data_storage_size_tb": { + Type: schema.TypeFloat, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The data disk group size to be allocated in TBs.`, + }, + "db_node_storage_size_gb": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Local storage per VM`, + }, + "db_server_ocids": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `OCID of database servers.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "diagnostics_data_collection_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Data collection options for diagnostics.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "diagnostics_events_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Indicates whether diagnostic collection is enabled for the VM cluster`, + }, + "health_monitoring_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Indicates whether health monitoring is enabled for the VM cluster`, + }, + "incident_logs_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Indicates whether incident logs and trace collection are enabled for the VM +cluster`, + }, + }, + }, + }, + "disk_redundancy": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The type of redundancy. + Possible values: + DISK_REDUNDANCY_UNSPECIFIED +HIGH +NORMAL`, + }, + "gi_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Grid Infrastructure Version.`, + }, + "hostname_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Prefix for VM cluster host names.`, + }, + "local_backup_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Use local backup.`, + }, + "memory_size_gb": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Memory allocated in GBs.`, + }, + "node_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Number of database servers.`, + }, + "ocpu_count": { + Type: schema.TypeFloat, + Computed: true, + Optional: true, + ForceNew: true, + Description: `OCPU count per VM. Minimum is 0.1.`, + }, + "sparse_diskgroup_enabled": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Use exadata sparse snapshots.`, + }, + "ssh_public_keys": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `SSH public keys to be stored with cluster.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "time_zone": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Represents a time zone from the +[IANA Time Zone Database](https://www.iana.org/time-zones).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `IANA Time Zone Database time zone, e.g. "America/New_York".`, + }, + }, + }, + }, + "compartment_id": { + Type: schema.TypeString, + Computed: true, + Description: `Compartment ID of cluster.`, + }, + "dns_listener_ip": { + Type: schema.TypeString, + Computed: true, + Description: `DNS listener IP.`, + }, + "domain": { + Type: schema.TypeString, + Computed: true, + Description: `Parent DNS domain where SCAN DNS and hosts names are qualified. +ex: ocispdelegated.ocisp10jvnet.oraclevcn.com`, + }, + "hostname": { + Type: schema.TypeString, + Computed: true, + Description: `host name without domain. +format: "-" with some suffix. +ex: sp2-yi0xq where "sp2" is the hostname_prefix.`, + }, + "oci_url": { + Type: schema.TypeString, + Computed: true, + Description: `Deep link to the OCI console to view this resource.`, + }, + "ocid": { + Type: schema.TypeString, + Computed: true, + Description: `Oracle Cloud Infrastructure ID of VM Cluster.`, + }, + "scan_dns": { + Type: schema.TypeString, + Computed: true, + Description: `SCAN DNS name. +ex: sp2-yi0xq-scan.ocispdelegated.ocisp10jvnet.oraclevcn.com`, + }, + "scan_dns_record_id": { + Type: schema.TypeString, + Computed: true, + Description: `OCID of scan DNS record.`, + }, + "scan_ip_ids": { + Type: schema.TypeList, + Computed: true, + Description: `OCIDs of scan IPs.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "scan_listener_port_tcp": { + Type: schema.TypeInt, + Computed: true, + Description: `SCAN listener port - TCP`, + }, + "scan_listener_port_tcp_ssl": { + Type: schema.TypeInt, + Computed: true, + Description: `SCAN listener port - TLS`, + }, + "shape": { + Type: schema.TypeString, + Computed: true, + Description: `Shape of VM Cluster.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the cluster. + Possible values: + STATE_UNSPECIFIED +PROVISIONING +AVAILABLE +UPDATING +TERMINATING +TERMINATED +FAILED +MAINTENANCE_IN_PROGRESS`, + }, + "storage_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: `The storage allocation for the disk group, in gigabytes (GB).`, + }, + "system_version": { + Type: schema.TypeString, + Computed: true, + Description: `Operating system version of the image.`, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The date and time that the VM cluster was created.`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "gcp_oracle_zone": { + Type: schema.TypeString, + Computed: true, + Description: `GCP location where Oracle Exadata is hosted. It is same as GCP Oracle zone +of Exadata infrastructure.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Identifier. The name of the VM Cluster resource with the format: +projects/{project}/locations/{region}/cloudVmClusters/{cloud_vm_cluster}`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceOracleDatabaseCloudVmClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + exadataInfrastructureProp, err := expandOracleDatabaseCloudVmClusterExadataInfrastructure(d.Get("exadata_infrastructure"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("exadata_infrastructure"); !tpgresource.IsEmptyValue(reflect.ValueOf(exadataInfrastructureProp)) && (ok || !reflect.DeepEqual(v, exadataInfrastructureProp)) { + obj["exadataInfrastructure"] = exadataInfrastructureProp + } + displayNameProp, err := expandOracleDatabaseCloudVmClusterDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + propertiesProp, err := expandOracleDatabaseCloudVmClusterProperties(d.Get("properties"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("properties"); !tpgresource.IsEmptyValue(reflect.ValueOf(propertiesProp)) && (ok || !reflect.DeepEqual(v, propertiesProp)) { + obj["properties"] = propertiesProp + } + cidrProp, err := expandOracleDatabaseCloudVmClusterCidr(d.Get("cidr"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cidr"); !tpgresource.IsEmptyValue(reflect.ValueOf(cidrProp)) && (ok || !reflect.DeepEqual(v, cidrProp)) { + obj["cidr"] = cidrProp + } + backupSubnetCidrProp, err := expandOracleDatabaseCloudVmClusterBackupSubnetCidr(d.Get("backup_subnet_cidr"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backup_subnet_cidr"); !tpgresource.IsEmptyValue(reflect.ValueOf(backupSubnetCidrProp)) && (ok || !reflect.DeepEqual(v, backupSubnetCidrProp)) { + obj["backupSubnetCidr"] = backupSubnetCidrProp + } + networkProp, err := expandOracleDatabaseCloudVmClusterNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + labelsProp, err := expandOracleDatabaseCloudVmClusterEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/cloudVmClusters?cloudVmClusterId={{cloud_vm_cluster_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new CloudVmCluster: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CloudVmCluster: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating CloudVmCluster: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudVmClusters/{{cloud_vm_cluster_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = OracleDatabaseOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating CloudVmCluster", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create CloudVmCluster: %s", err) + } + + if err := d.Set("name", flattenOracleDatabaseCloudVmClusterName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudVmClusters/{{cloud_vm_cluster_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating CloudVmCluster %q: %#v", d.Id(), res) + + return resourceOracleDatabaseCloudVmClusterRead(d, meta) +} + +func resourceOracleDatabaseCloudVmClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/cloudVmClusters/{{cloud_vm_cluster_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CloudVmCluster: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("OracleDatabaseCloudVmCluster %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + + if err := d.Set("name", flattenOracleDatabaseCloudVmClusterName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + if err := d.Set("exadata_infrastructure", flattenOracleDatabaseCloudVmClusterExadataInfrastructure(res["exadataInfrastructure"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + if err := d.Set("display_name", flattenOracleDatabaseCloudVmClusterDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + if err := d.Set("gcp_oracle_zone", flattenOracleDatabaseCloudVmClusterGcpOracleZone(res["gcpOracleZone"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + if err := d.Set("properties", flattenOracleDatabaseCloudVmClusterProperties(res["properties"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + if err := d.Set("labels", flattenOracleDatabaseCloudVmClusterLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + if err := d.Set("create_time", flattenOracleDatabaseCloudVmClusterCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + if err := d.Set("cidr", flattenOracleDatabaseCloudVmClusterCidr(res["cidr"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + if err := d.Set("backup_subnet_cidr", flattenOracleDatabaseCloudVmClusterBackupSubnetCidr(res["backupSubnetCidr"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + if err := d.Set("network", flattenOracleDatabaseCloudVmClusterNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + if err := d.Set("terraform_labels", flattenOracleDatabaseCloudVmClusterTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + if err := d.Set("effective_labels", flattenOracleDatabaseCloudVmClusterEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CloudVmCluster: %s", err) + } + + return nil +} + +func resourceOracleDatabaseCloudVmClusterUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the root field "labels" and "terraform_labels" are mutable + return resourceOracleDatabaseCloudVmClusterRead(d, meta) +} + +func resourceOracleDatabaseCloudVmClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CloudVmCluster: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{OracleDatabaseBasePath}}projects/{{project}}/locations/{{location}}/cloudVmClusters/{{cloud_vm_cluster_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting CloudVmCluster %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "CloudVmCluster") + } + + err = OracleDatabaseOperationWaitTime( + config, res, project, "Deleting CloudVmCluster", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting CloudVmCluster %q: %#v", d.Id(), res) + return nil +} + +func resourceOracleDatabaseCloudVmClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/cloudVmClusters/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/cloudVmClusters/{{cloud_vm_cluster_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenOracleDatabaseCloudVmClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterExadataInfrastructure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterGcpOracleZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ocid"] = + flattenOracleDatabaseCloudVmClusterPropertiesOcid(original["ocid"], d, config) + transformed["license_type"] = + flattenOracleDatabaseCloudVmClusterPropertiesLicenseType(original["licenseType"], d, config) + transformed["gi_version"] = + flattenOracleDatabaseCloudVmClusterPropertiesGiVersion(original["giVersion"], d, config) + transformed["time_zone"] = + flattenOracleDatabaseCloudVmClusterPropertiesTimeZone(original["timeZone"], d, config) + transformed["ssh_public_keys"] = + flattenOracleDatabaseCloudVmClusterPropertiesSshPublicKeys(original["sshPublicKeys"], d, config) + transformed["node_count"] = + flattenOracleDatabaseCloudVmClusterPropertiesNodeCount(original["nodeCount"], d, config) + transformed["shape"] = + flattenOracleDatabaseCloudVmClusterPropertiesShape(original["shape"], d, config) + transformed["ocpu_count"] = + flattenOracleDatabaseCloudVmClusterPropertiesOcpuCount(original["ocpuCount"], d, config) + transformed["memory_size_gb"] = + flattenOracleDatabaseCloudVmClusterPropertiesMemorySizeGb(original["memorySizeGb"], d, config) + transformed["db_node_storage_size_gb"] = + flattenOracleDatabaseCloudVmClusterPropertiesDbNodeStorageSizeGb(original["dbNodeStorageSizeGb"], d, config) + transformed["storage_size_gb"] = + flattenOracleDatabaseCloudVmClusterPropertiesStorageSizeGb(original["storageSizeGb"], d, config) + transformed["data_storage_size_tb"] = + flattenOracleDatabaseCloudVmClusterPropertiesDataStorageSizeTb(original["dataStorageSizeTb"], d, config) + transformed["disk_redundancy"] = + flattenOracleDatabaseCloudVmClusterPropertiesDiskRedundancy(original["diskRedundancy"], d, config) + transformed["sparse_diskgroup_enabled"] = + flattenOracleDatabaseCloudVmClusterPropertiesSparseDiskgroupEnabled(original["sparseDiskgroupEnabled"], d, config) + transformed["local_backup_enabled"] = + flattenOracleDatabaseCloudVmClusterPropertiesLocalBackupEnabled(original["localBackupEnabled"], d, config) + transformed["hostname_prefix"] = + flattenOracleDatabaseCloudVmClusterPropertiesHostnamePrefix(original["hostnamePrefix"], d, config) + transformed["diagnostics_data_collection_options"] = + flattenOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptions(original["diagnosticsDataCollectionOptions"], d, config) + transformed["state"] = + flattenOracleDatabaseCloudVmClusterPropertiesState(original["state"], d, config) + transformed["scan_listener_port_tcp"] = + flattenOracleDatabaseCloudVmClusterPropertiesScanListenerPortTcp(original["scanListenerPortTcp"], d, config) + transformed["scan_listener_port_tcp_ssl"] = + flattenOracleDatabaseCloudVmClusterPropertiesScanListenerPortTcpSsl(original["scanListenerPortTcpSsl"], d, config) + transformed["domain"] = + flattenOracleDatabaseCloudVmClusterPropertiesDomain(original["domain"], d, config) + transformed["scan_dns"] = + flattenOracleDatabaseCloudVmClusterPropertiesScanDns(original["scanDns"], d, config) + transformed["hostname"] = + flattenOracleDatabaseCloudVmClusterPropertiesHostname(original["hostname"], d, config) + transformed["cpu_core_count"] = + flattenOracleDatabaseCloudVmClusterPropertiesCpuCoreCount(original["cpuCoreCount"], d, config) + transformed["system_version"] = + flattenOracleDatabaseCloudVmClusterPropertiesSystemVersion(original["systemVersion"], d, config) + transformed["scan_ip_ids"] = + flattenOracleDatabaseCloudVmClusterPropertiesScanIpIds(original["scanIpIds"], d, config) + transformed["scan_dns_record_id"] = + flattenOracleDatabaseCloudVmClusterPropertiesScanDnsRecordId(original["scanDnsRecordId"], d, config) + transformed["oci_url"] = + flattenOracleDatabaseCloudVmClusterPropertiesOciUrl(original["ociUrl"], d, config) + transformed["db_server_ocids"] = + flattenOracleDatabaseCloudVmClusterPropertiesDbServerOcids(original["dbServerOcids"], d, config) + transformed["compartment_id"] = + flattenOracleDatabaseCloudVmClusterPropertiesCompartmentId(original["compartmentId"], d, config) + transformed["dns_listener_ip"] = + flattenOracleDatabaseCloudVmClusterPropertiesDnsListenerIp(original["dnsListenerIp"], d, config) + transformed["cluster_name"] = + flattenOracleDatabaseCloudVmClusterPropertiesClusterName(original["clusterName"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseCloudVmClusterPropertiesOcid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesLicenseType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesGiVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("properties.0.gi_version") +} + +func flattenOracleDatabaseCloudVmClusterPropertiesTimeZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["id"] = + flattenOracleDatabaseCloudVmClusterPropertiesTimeZoneId(original["id"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseCloudVmClusterPropertiesTimeZoneId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesSshPublicKeys(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudVmClusterPropertiesShape(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesOcpuCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesMemorySizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudVmClusterPropertiesDbNodeStorageSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudVmClusterPropertiesStorageSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudVmClusterPropertiesDataStorageSizeTb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesDiskRedundancy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesSparseDiskgroupEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesLocalBackupEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesHostnamePrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("properties.0.hostname_prefix") +} + +func flattenOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["diagnostics_events_enabled"] = + flattenOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsDiagnosticsEventsEnabled(original["diagnosticsEventsEnabled"], d, config) + transformed["health_monitoring_enabled"] = + flattenOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsHealthMonitoringEnabled(original["healthMonitoringEnabled"], d, config) + transformed["incident_logs_enabled"] = + flattenOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsIncidentLogsEnabled(original["incidentLogsEnabled"], d, config) + return []interface{}{transformed} +} +func flattenOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsDiagnosticsEventsEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsHealthMonitoringEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsIncidentLogsEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesScanListenerPortTcp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudVmClusterPropertiesScanListenerPortTcpSsl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudVmClusterPropertiesDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesScanDns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesHostname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesCpuCoreCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOracleDatabaseCloudVmClusterPropertiesSystemVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesScanIpIds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesScanDnsRecordId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesOciUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesDbServerOcids(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesCompartmentId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesDnsListenerIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterPropertiesClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenOracleDatabaseCloudVmClusterCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterCidr(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterBackupSubnetCidr(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOracleDatabaseCloudVmClusterTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenOracleDatabaseCloudVmClusterEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandOracleDatabaseCloudVmClusterExadataInfrastructure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOcid, err := expandOracleDatabaseCloudVmClusterPropertiesOcid(original["ocid"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOcid); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ocid"] = transformedOcid + } + + transformedLicenseType, err := expandOracleDatabaseCloudVmClusterPropertiesLicenseType(original["license_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLicenseType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["licenseType"] = transformedLicenseType + } + + transformedGiVersion, err := expandOracleDatabaseCloudVmClusterPropertiesGiVersion(original["gi_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGiVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["giVersion"] = transformedGiVersion + } + + transformedTimeZone, err := expandOracleDatabaseCloudVmClusterPropertiesTimeZone(original["time_zone"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeZone); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeZone"] = transformedTimeZone + } + + transformedSshPublicKeys, err := expandOracleDatabaseCloudVmClusterPropertiesSshPublicKeys(original["ssh_public_keys"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSshPublicKeys); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sshPublicKeys"] = transformedSshPublicKeys + } + + transformedNodeCount, err := expandOracleDatabaseCloudVmClusterPropertiesNodeCount(original["node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nodeCount"] = transformedNodeCount + } + + transformedShape, err := expandOracleDatabaseCloudVmClusterPropertiesShape(original["shape"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedShape); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["shape"] = transformedShape + } + + transformedOcpuCount, err := expandOracleDatabaseCloudVmClusterPropertiesOcpuCount(original["ocpu_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOcpuCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ocpuCount"] = transformedOcpuCount + } + + transformedMemorySizeGb, err := expandOracleDatabaseCloudVmClusterPropertiesMemorySizeGb(original["memory_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMemorySizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["memorySizeGb"] = transformedMemorySizeGb + } + + transformedDbNodeStorageSizeGb, err := expandOracleDatabaseCloudVmClusterPropertiesDbNodeStorageSizeGb(original["db_node_storage_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDbNodeStorageSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dbNodeStorageSizeGb"] = transformedDbNodeStorageSizeGb + } + + transformedStorageSizeGb, err := expandOracleDatabaseCloudVmClusterPropertiesStorageSizeGb(original["storage_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStorageSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["storageSizeGb"] = transformedStorageSizeGb + } + + transformedDataStorageSizeTb, err := expandOracleDatabaseCloudVmClusterPropertiesDataStorageSizeTb(original["data_storage_size_tb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataStorageSizeTb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataStorageSizeTb"] = transformedDataStorageSizeTb + } + + transformedDiskRedundancy, err := expandOracleDatabaseCloudVmClusterPropertiesDiskRedundancy(original["disk_redundancy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDiskRedundancy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["diskRedundancy"] = transformedDiskRedundancy + } + + transformedSparseDiskgroupEnabled, err := expandOracleDatabaseCloudVmClusterPropertiesSparseDiskgroupEnabled(original["sparse_diskgroup_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSparseDiskgroupEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sparseDiskgroupEnabled"] = transformedSparseDiskgroupEnabled + } + + transformedLocalBackupEnabled, err := expandOracleDatabaseCloudVmClusterPropertiesLocalBackupEnabled(original["local_backup_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalBackupEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localBackupEnabled"] = transformedLocalBackupEnabled + } + + transformedHostnamePrefix, err := expandOracleDatabaseCloudVmClusterPropertiesHostnamePrefix(original["hostname_prefix"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHostnamePrefix); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hostnamePrefix"] = transformedHostnamePrefix + } + + transformedDiagnosticsDataCollectionOptions, err := expandOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptions(original["diagnostics_data_collection_options"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDiagnosticsDataCollectionOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["diagnosticsDataCollectionOptions"] = transformedDiagnosticsDataCollectionOptions + } + + transformedState, err := expandOracleDatabaseCloudVmClusterPropertiesState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + transformedScanListenerPortTcp, err := expandOracleDatabaseCloudVmClusterPropertiesScanListenerPortTcp(original["scan_listener_port_tcp"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScanListenerPortTcp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scanListenerPortTcp"] = transformedScanListenerPortTcp + } + + transformedScanListenerPortTcpSsl, err := expandOracleDatabaseCloudVmClusterPropertiesScanListenerPortTcpSsl(original["scan_listener_port_tcp_ssl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScanListenerPortTcpSsl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scanListenerPortTcpSsl"] = transformedScanListenerPortTcpSsl + } + + transformedDomain, err := expandOracleDatabaseCloudVmClusterPropertiesDomain(original["domain"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDomain); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["domain"] = transformedDomain + } + + transformedScanDns, err := expandOracleDatabaseCloudVmClusterPropertiesScanDns(original["scan_dns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScanDns); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scanDns"] = transformedScanDns + } + + transformedHostname, err := expandOracleDatabaseCloudVmClusterPropertiesHostname(original["hostname"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHostname); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hostname"] = transformedHostname + } + + transformedCpuCoreCount, err := expandOracleDatabaseCloudVmClusterPropertiesCpuCoreCount(original["cpu_core_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCpuCoreCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cpuCoreCount"] = transformedCpuCoreCount + } + + transformedSystemVersion, err := expandOracleDatabaseCloudVmClusterPropertiesSystemVersion(original["system_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSystemVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["systemVersion"] = transformedSystemVersion + } + + transformedScanIpIds, err := expandOracleDatabaseCloudVmClusterPropertiesScanIpIds(original["scan_ip_ids"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScanIpIds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scanIpIds"] = transformedScanIpIds + } + + transformedScanDnsRecordId, err := expandOracleDatabaseCloudVmClusterPropertiesScanDnsRecordId(original["scan_dns_record_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScanDnsRecordId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scanDnsRecordId"] = transformedScanDnsRecordId + } + + transformedOciUrl, err := expandOracleDatabaseCloudVmClusterPropertiesOciUrl(original["oci_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOciUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ociUrl"] = transformedOciUrl + } + + transformedDbServerOcids, err := expandOracleDatabaseCloudVmClusterPropertiesDbServerOcids(original["db_server_ocids"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDbServerOcids); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dbServerOcids"] = transformedDbServerOcids + } + + transformedCompartmentId, err := expandOracleDatabaseCloudVmClusterPropertiesCompartmentId(original["compartment_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCompartmentId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["compartmentId"] = transformedCompartmentId + } + + transformedDnsListenerIp, err := expandOracleDatabaseCloudVmClusterPropertiesDnsListenerIp(original["dns_listener_ip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDnsListenerIp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dnsListenerIp"] = transformedDnsListenerIp + } + + transformedClusterName, err := expandOracleDatabaseCloudVmClusterPropertiesClusterName(original["cluster_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClusterName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clusterName"] = transformedClusterName + } + + return transformed, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesOcid(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesLicenseType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesGiVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesTimeZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandOracleDatabaseCloudVmClusterPropertiesTimeZoneId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + return transformed, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesTimeZoneId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesSshPublicKeys(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesShape(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesOcpuCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesMemorySizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesDbNodeStorageSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesStorageSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesDataStorageSizeTb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesDiskRedundancy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesSparseDiskgroupEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesLocalBackupEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesHostnamePrefix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDiagnosticsEventsEnabled, err := expandOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsDiagnosticsEventsEnabled(original["diagnostics_events_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDiagnosticsEventsEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["diagnosticsEventsEnabled"] = transformedDiagnosticsEventsEnabled + } + + transformedHealthMonitoringEnabled, err := expandOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsHealthMonitoringEnabled(original["health_monitoring_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHealthMonitoringEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["healthMonitoringEnabled"] = transformedHealthMonitoringEnabled + } + + transformedIncidentLogsEnabled, err := expandOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsIncidentLogsEnabled(original["incident_logs_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncidentLogsEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["incidentLogsEnabled"] = transformedIncidentLogsEnabled + } + + return transformed, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsDiagnosticsEventsEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsHealthMonitoringEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesDiagnosticsDataCollectionOptionsIncidentLogsEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesScanListenerPortTcp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesScanListenerPortTcpSsl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesScanDns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesHostname(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesCpuCoreCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesSystemVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesScanIpIds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesScanDnsRecordId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesOciUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesDbServerOcids(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesCompartmentId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesDnsListenerIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterPropertiesClusterName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterCidr(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterBackupSubnetCidr(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOracleDatabaseCloudVmClusterEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_vm_cluster_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_vm_cluster_sweeper.go new file mode 100644 index 00000000000..98090af8be5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oracledatabase/resource_oracle_database_cloud_vm_cluster_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package oracledatabase + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("OracleDatabaseCloudVmCluster", testSweepOracleDatabaseCloudVmCluster) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepOracleDatabaseCloudVmCluster(region string) error { + resourceName := "OracleDatabaseCloudVmCluster" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://oracledatabase.googleapis.com/v1/projects/{{project}}/locations/{{location}}/cloudVmClusters", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["cloudVmClusters"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://oracledatabase.googleapis.com/v1/projects/{{project}}/locations/{{location}}/cloudVmClusters/{{cloud_vm_cluster_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/orgpolicy/resource_org_policy_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/orgpolicy/resource_org_policy_policy.go index 5d940dbf577..00684bb57a2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/orgpolicy/resource_org_policy_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/orgpolicy/resource_org_policy_policy.go @@ -201,7 +201,7 @@ func ResourceOrgPolicyPolicy() *schema.Resource { "rules": { Type: schema.TypeList, Optional: true, - Description: `Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set 'enforced' to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence.`, + Description: `In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set 'enforced' to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "allow_all": { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_authority.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_authority.go index d91db1e0aff..ec5cfbf5d7d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_authority.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_authority.go @@ -838,16 +838,17 @@ fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045 "deletion_protection": { Type: schema.TypeBool, Optional: true, - Default: true, Description: `Whether Terraform will be prevented from destroying the CertificateAuthority. When the field is set to true or unset in Terraform state, a 'terraform apply' or 'terraform destroy' that would delete the CertificateAuthority will fail. When the field is set to false, deleting the CertificateAuthority is allowed.`, + Default: true, }, "desired_state": { - Type: schema.TypeString, - Optional: true, - Description: `Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA.`, + Type: schema.TypeString, + Optional: true, + Description: `Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. +Possible values: ENABLED, DISABLED, STAGED.`, }, "project": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/data_source_privileged_access_manager_entitlement.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/data_source_privileged_access_manager_entitlement.go new file mode 100644 index 00000000000..edff7609f6e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/data_source_privileged_access_manager_entitlement.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package privilegedaccessmanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGooglePrivilegedAccessManagerEntitlement() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourcePrivilegedAccessManagerEntitlement().Schema) + tpgresource.AddOptionalFieldsToSchema(dsSchema, "entitlement_id") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "parent") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "location") + + return &schema.Resource{ + Read: dataSourceGooglePrivilegedAccessManagerEntitlementRead, + Schema: dsSchema, + } +} + +func dataSourceGooglePrivilegedAccessManagerEntitlementRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/entitlements/{{entitlement_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + err = resourcePrivilegedAccessManagerEntitlementRead(d, meta) + if err != nil { + return err + } + + if err := tpgresource.SetDataSourceLabels(d); err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/privileged_access_manager_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/privileged_access_manager_operation.go new file mode 100644 index 00000000000..a7aeff3a4a0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/privileged_access_manager_operation.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package privilegedaccessmanager + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type PrivilegedAccessManagerOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + tpgresource.CommonOperationWaiter +} + +func (w *PrivilegedAccessManagerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.PrivilegedAccessManagerBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createPrivilegedAccessManagerWaiter(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string) (*PrivilegedAccessManagerOperationWaiter, error) { + w := &PrivilegedAccessManagerOperationWaiter{ + Config: config, + UserAgent: userAgent, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func PrivilegedAccessManagerOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + w, err := createPrivilegedAccessManagerWaiter(config, op, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + rawResponse := []byte(w.CommonOperationWaiter.Op.Response) + if len(rawResponse) == 0 { + return errors.New("`resource` not set in operation response") + } + return json.Unmarshal(rawResponse, response) +} + +func PrivilegedAccessManagerOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createPrivilegedAccessManagerWaiter(config, op, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement.go new file mode 100644 index 00000000000..f75b38b1860 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement.go @@ -0,0 +1,1355 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package privilegedaccessmanager + +import ( + "fmt" + "log" + "net/http" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +const deletedRegexp = `^deleted:` + +func validateDeletedPrincipals(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if regexp.MustCompile(deletedRegexp).MatchString(value) { + errors = append(errors, fmt.Errorf( + "Terraform does not support IAM policies for deleted principals: %s", k)) + } + + return +} + +const entitlementIdRegexp = `^[a-z][a-z0-9-]{3,62}$` + +func validateEntitlementId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(entitlementIdRegexp).MatchString(value) { + errors = append(errors, fmt.Errorf( + "Entitlement Id should be 4-63 characters, and valid characters are '[a-z]', '[0-9]', and '-'. The first character should be from [a-z]. : %s", k)) + } + + return +} + +func ResourcePrivilegedAccessManagerEntitlement() *schema.Resource { + return &schema.Resource{ + Create: resourcePrivilegedAccessManagerEntitlementCreate, + Read: resourcePrivilegedAccessManagerEntitlementRead, + Update: resourcePrivilegedAccessManagerEntitlementUpdate, + Delete: resourcePrivilegedAccessManagerEntitlementDelete, + + Importer: &schema.ResourceImporter{ + State: resourcePrivilegedAccessManagerEntitlementImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "eligible_users": { + Type: schema.TypeList, + Required: true, + Description: `Who can create Grants using Entitlement. This list should contain at most one entry`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "principals": { + Type: schema.TypeSet, + Required: true, + Description: `Users who are being allowed for the operation. Each entry should be a valid v1 IAM Principal Identifier. Format for these is documented at "https://cloud.google.com/iam/docs/principal-identifiers#v1"`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateDeletedPrincipals, + }, + Set: schema.HashString, + }, + }, + }, + }, + "entitlement_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateEntitlementId, + Description: `The ID to use for this Entitlement. This will become the last part of the resource name. +This value should be 4-63 characters, and valid characters are "[a-z]", "[0-9]", and "-". The first character should be from [a-z]. +This value should be unique among all other Entitlements under the specified 'parent'.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The region of the Entitlement resource.`, + }, + "max_request_duration": { + Type: schema.TypeString, + Required: true, + Description: `The maximum amount of time for which access would be granted for a request. +A requester can choose to ask for access for less than this duration but never more. +Format: calculate the time in seconds and concatenate it with 's' i.e. 2 hours = "7200s", 45 minutes = "2700s"`, + }, + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Format: projects/{project-id|project-number} or organizations/{organization-number} or folders/{folder-number}`, + }, + "privileged_access": { + Type: schema.TypeList, + Required: true, + Description: `Privileged access that this service can be used to gate.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gcp_iam_access": { + Type: schema.TypeList, + Required: true, + Description: `GcpIamAccess represents IAM based access control on a GCP resource. Refer to https://cloud.google.com/iam/docs to understand more about IAM.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource": { + Type: schema.TypeString, + Required: true, + Description: `Name of the resource.`, + }, + "resource_type": { + Type: schema.TypeString, + Required: true, + Description: `The type of this resource.`, + }, + "role_bindings": { + Type: schema.TypeList, + Required: true, + Description: `Role bindings to be created on successful grant.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + Description: `IAM role to be granted. https://cloud.google.com/iam/docs/roles-overview.`, + }, + "condition_expression": { + Type: schema.TypeString, + Optional: true, + Description: `The expression field of the IAM condition to be associated with the role. If specified, a user with an active grant for this entitlement would be able to access the resource only if this condition evaluates to true for their request. +https://cloud.google.com/iam/docs/conditions-overview#attributes.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "requester_justification_config": { + Type: schema.TypeList, + Required: true, + Description: `Defines the ways in which a requester should provide the justification while requesting for access.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "not_mandatory": { + Type: schema.TypeList, + Optional: true, + Description: `The justification is not mandatory but can be provided in any of the supported formats.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ConflictsWith: []string{}, + }, + "unstructured": { + Type: schema.TypeList, + Optional: true, + Description: `The requester has to provide a justification in the form of free flowing text.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ConflictsWith: []string{}, + }, + }, + }, + }, + "additional_notification_targets": { + Type: schema.TypeList, + Optional: true, + Description: `AdditionalNotificationTargets includes email addresses to be notified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_email_recipients": { + Type: schema.TypeSet, + Optional: true, + Description: `Optional. Additional email addresses to be notified when a principal(requester) is granted access.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "requester_email_recipients": { + Type: schema.TypeSet, + Optional: true, + Description: `Optional. Additional email address to be notified about an eligible entitlement.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + }, + }, + }, + "approval_workflow": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The approvals needed before access will be granted to a requester. +No approvals will be needed if this field is null. Different types of approval workflows that can be used to gate privileged access granting.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "manual_approvals": { + Type: schema.TypeList, + Required: true, + Description: `A manual approval workflow where users who are designated as approvers need to call the ApproveGrant/DenyGrant APIs for an Grant. +The workflow can consist of multiple serial steps where each step defines who can act as Approver in that step and how many of those users should approve before the workflow moves to the next step. +This can be used to create approval workflows such as +* Require an approval from any user in a group G. +* Require an approval from any k number of users from a Group G. +* Require an approval from any user in a group G and then from a user U. etc. +A single user might be part of 'approvers' ACL for multiple steps in this workflow but they can only approve once and that approval will only be considered to satisfy the approval step at which it was granted.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "steps": { + Type: schema.TypeList, + Required: true, + Description: `List of approval steps in this workflow. These steps would be followed in the specified order sequentially. 1 step is supported for now.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "approvers": { + Type: schema.TypeList, + Required: true, + Description: `The potential set of approvers in this step. This list should contain at only one entry.`, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "principals": { + Type: schema.TypeSet, + Required: true, + Description: `Users who are being allowed for the operation. Each entry should be a valid v1 IAM Principal Identifier. Format for these is documented at: https://cloud.google.com/iam/docs/principal-identifiers#v1`, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateDeletedPrincipals, + }, + Set: schema.HashString, + }, + }, + }, + }, + "approvals_needed": { + Type: schema.TypeInt, + Optional: true, + Description: `How many users from the above list need to approve. +If there are not enough distinct users in the list above then the workflow +will indefinitely block. Should always be greater than 0. Currently 1 is the only +supported value.`, + }, + "approver_email_recipients": { + Type: schema.TypeSet, + Optional: true, + Description: `Optional. Additional email addresses to be notified when a grant is pending approval.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + }, + }, + }, + "require_approver_justification": { + Type: schema.TypeBool, + Optional: true, + Description: `Optional. Do the approvers need to provide a justification for their actions?`, + }, + }, + }, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Create time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z"`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `For Resource freshness validation (https://google.aip.dev/154)`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Output Only. The entitlement's name follows a hierarchical structure, comprising the organization, folder, or project, alongside the region and a unique entitlement ID. +Formats: organizations/{organization-number}/locations/{region}/entitlements/{entitlement-id}, folders/{folder-number}/locations/{region}/entitlements/{entitlement-id}, and projects/{project-id|project-number}/locations/{region}/entitlements/{entitlement-id}.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The current state of the Entitlement.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Update time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourcePrivilegedAccessManagerEntitlementCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + eligibleUsersProp, err := expandPrivilegedAccessManagerEntitlementEligibleUsers(d.Get("eligible_users"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("eligible_users"); !tpgresource.IsEmptyValue(reflect.ValueOf(eligibleUsersProp)) && (ok || !reflect.DeepEqual(v, eligibleUsersProp)) { + obj["eligibleUsers"] = eligibleUsersProp + } + approvalWorkflowProp, err := expandPrivilegedAccessManagerEntitlementApprovalWorkflow(d.Get("approval_workflow"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("approval_workflow"); !tpgresource.IsEmptyValue(reflect.ValueOf(approvalWorkflowProp)) && (ok || !reflect.DeepEqual(v, approvalWorkflowProp)) { + obj["approvalWorkflow"] = approvalWorkflowProp + } + privilegedAccessProp, err := expandPrivilegedAccessManagerEntitlementPrivilegedAccess(d.Get("privileged_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("privileged_access"); !tpgresource.IsEmptyValue(reflect.ValueOf(privilegedAccessProp)) && (ok || !reflect.DeepEqual(v, privilegedAccessProp)) { + obj["privilegedAccess"] = privilegedAccessProp + } + maxRequestDurationProp, err := expandPrivilegedAccessManagerEntitlementMaxRequestDuration(d.Get("max_request_duration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("max_request_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(maxRequestDurationProp)) && (ok || !reflect.DeepEqual(v, maxRequestDurationProp)) { + obj["maxRequestDuration"] = maxRequestDurationProp + } + etagProp, err := expandPrivilegedAccessManagerEntitlementEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(etagProp)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + requesterJustificationConfigProp, err := expandPrivilegedAccessManagerEntitlementRequesterJustificationConfig(d.Get("requester_justification_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("requester_justification_config"); ok || !reflect.DeepEqual(v, requesterJustificationConfigProp) { + obj["requesterJustificationConfig"] = requesterJustificationConfigProp + } + additionalNotificationTargetsProp, err := expandPrivilegedAccessManagerEntitlementAdditionalNotificationTargets(d.Get("additional_notification_targets"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("additional_notification_targets"); ok || !reflect.DeepEqual(v, additionalNotificationTargetsProp) { + obj["additionalNotificationTargets"] = additionalNotificationTargetsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PrivilegedAccessManagerBasePath}}{{parent}}/locations/{{location}}/entitlements?entitlementId={{entitlement_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Entitlement: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Entitlement: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/entitlements/{{entitlement_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = PrivilegedAccessManagerOperationWaitTimeWithResponse( + config, res, &opRes, "Creating Entitlement", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Entitlement: %s", err) + } + + if err := d.Set("name", flattenPrivilegedAccessManagerEntitlementName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/entitlements/{{entitlement_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Entitlement %q: %#v", d.Id(), res) + + return resourcePrivilegedAccessManagerEntitlementRead(d, meta) +} + +func resourcePrivilegedAccessManagerEntitlementRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PrivilegedAccessManagerBasePath}}{{parent}}/locations/{{location}}/entitlements/{{entitlement_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("PrivilegedAccessManagerEntitlement %q", d.Id())) + } + + if err := d.Set("name", flattenPrivilegedAccessManagerEntitlementName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Entitlement: %s", err) + } + if err := d.Set("create_time", flattenPrivilegedAccessManagerEntitlementCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Entitlement: %s", err) + } + if err := d.Set("update_time", flattenPrivilegedAccessManagerEntitlementUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Entitlement: %s", err) + } + if err := d.Set("eligible_users", flattenPrivilegedAccessManagerEntitlementEligibleUsers(res["eligibleUsers"], d, config)); err != nil { + return fmt.Errorf("Error reading Entitlement: %s", err) + } + if err := d.Set("approval_workflow", flattenPrivilegedAccessManagerEntitlementApprovalWorkflow(res["approvalWorkflow"], d, config)); err != nil { + return fmt.Errorf("Error reading Entitlement: %s", err) + } + if err := d.Set("privileged_access", flattenPrivilegedAccessManagerEntitlementPrivilegedAccess(res["privilegedAccess"], d, config)); err != nil { + return fmt.Errorf("Error reading Entitlement: %s", err) + } + if err := d.Set("max_request_duration", flattenPrivilegedAccessManagerEntitlementMaxRequestDuration(res["maxRequestDuration"], d, config)); err != nil { + return fmt.Errorf("Error reading Entitlement: %s", err) + } + if err := d.Set("state", flattenPrivilegedAccessManagerEntitlementState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Entitlement: %s", err) + } + if err := d.Set("etag", flattenPrivilegedAccessManagerEntitlementEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading Entitlement: %s", err) + } + if err := d.Set("requester_justification_config", flattenPrivilegedAccessManagerEntitlementRequesterJustificationConfig(res["requesterJustificationConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Entitlement: %s", err) + } + if err := d.Set("additional_notification_targets", flattenPrivilegedAccessManagerEntitlementAdditionalNotificationTargets(res["additionalNotificationTargets"], d, config)); err != nil { + return fmt.Errorf("Error reading Entitlement: %s", err) + } + + return nil +} + +func resourcePrivilegedAccessManagerEntitlementUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + eligibleUsersProp, err := expandPrivilegedAccessManagerEntitlementEligibleUsers(d.Get("eligible_users"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("eligible_users"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, eligibleUsersProp)) { + obj["eligibleUsers"] = eligibleUsersProp + } + privilegedAccessProp, err := expandPrivilegedAccessManagerEntitlementPrivilegedAccess(d.Get("privileged_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("privileged_access"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, privilegedAccessProp)) { + obj["privilegedAccess"] = privilegedAccessProp + } + maxRequestDurationProp, err := expandPrivilegedAccessManagerEntitlementMaxRequestDuration(d.Get("max_request_duration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("max_request_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maxRequestDurationProp)) { + obj["maxRequestDuration"] = maxRequestDurationProp + } + etagProp, err := expandPrivilegedAccessManagerEntitlementEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + requesterJustificationConfigProp, err := expandPrivilegedAccessManagerEntitlementRequesterJustificationConfig(d.Get("requester_justification_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("requester_justification_config"); ok || !reflect.DeepEqual(v, requesterJustificationConfigProp) { + obj["requesterJustificationConfig"] = requesterJustificationConfigProp + } + additionalNotificationTargetsProp, err := expandPrivilegedAccessManagerEntitlementAdditionalNotificationTargets(d.Get("additional_notification_targets"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("additional_notification_targets"); ok || !reflect.DeepEqual(v, additionalNotificationTargetsProp) { + obj["additionalNotificationTargets"] = additionalNotificationTargetsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PrivilegedAccessManagerBasePath}}{{parent}}/locations/{{location}}/entitlements/{{entitlement_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Entitlement %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("eligible_users") { + updateMask = append(updateMask, "eligibleUsers") + } + + if d.HasChange("privileged_access") { + updateMask = append(updateMask, "privilegedAccess") + } + + if d.HasChange("max_request_duration") { + updateMask = append(updateMask, "maxRequestDuration") + } + + if d.HasChange("etag") { + updateMask = append(updateMask, "etag") + } + + if d.HasChange("requester_justification_config") { + updateMask = append(updateMask, "requesterJustificationConfig") + } + + if d.HasChange("additional_notification_targets") { + updateMask = append(updateMask, "additionalNotificationTargets") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + approvalWorkflowProp, err := expandPrivilegedAccessManagerEntitlementApprovalWorkflow(d.Get("approval_workflow"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("approval_workflow"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, approvalWorkflowProp)) { + obj["approvalWorkflow"] = approvalWorkflowProp + } + if d.HasChange("approval_workflow") { + updateMask = append(updateMask, "approvalWorkflow") + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating Entitlement %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Entitlement %q: %#v", d.Id(), res) + } + + err = PrivilegedAccessManagerOperationWaitTime( + config, res, "Updating Entitlement", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } + + return resourcePrivilegedAccessManagerEntitlementRead(d, meta) +} + +func resourcePrivilegedAccessManagerEntitlementDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{PrivilegedAccessManagerBasePath}}{{parent}}/locations/{{location}}/entitlements/{{entitlement_id}}?force=true") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting Entitlement %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Entitlement") + } + + err = PrivilegedAccessManagerOperationWaitTime( + config, res, "Deleting Entitlement", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Entitlement %q: %#v", d.Id(), res) + return nil +} + +func resourcePrivilegedAccessManagerEntitlementImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^(?P.+)/locations/(?P[^/]+)/entitlements/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/entitlements/{{entitlement_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenPrivilegedAccessManagerEntitlementName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPrivilegedAccessManagerEntitlementCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPrivilegedAccessManagerEntitlementUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPrivilegedAccessManagerEntitlementEligibleUsers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "principals": flattenPrivilegedAccessManagerEntitlementEligibleUsersPrincipals(original["principals"], d, config), + }) + } + return transformed +} +func flattenPrivilegedAccessManagerEntitlementEligibleUsersPrincipals(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenPrivilegedAccessManagerEntitlementApprovalWorkflow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["manual_approvals"] = + flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovals(original["manualApprovals"], d, config) + return []interface{}{transformed} +} +func flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovals(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["require_approver_justification"] = + flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsRequireApproverJustification(original["requireApproverJustification"], d, config) + transformed["steps"] = + flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsSteps(original["steps"], d, config) + return []interface{}{transformed} +} +func flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsRequireApproverJustification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsSteps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "approvers": flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApprovers(original["approvers"], d, config), + "approvals_needed": flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApprovalsNeeded(original["approvalsNeeded"], d, config), + "approver_email_recipients": flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApproverEmailRecipients(original["approverEmailRecipients"], d, config), + }) + } + return transformed +} +func flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApprovers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "principals": flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApproversPrincipals(original["principals"], d, config), + }) + } + return transformed +} +func flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApproversPrincipals(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApprovalsNeeded(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApproverEmailRecipients(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenPrivilegedAccessManagerEntitlementPrivilegedAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["gcp_iam_access"] = + flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccess(original["gcpIamAccess"], d, config) + return []interface{}{transformed} +} +func flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resource_type"] = + flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessResourceType(original["resourceType"], d, config) + transformed["resource"] = + flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessResource(original["resource"], d, config) + transformed["role_bindings"] = + flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindings(original["roleBindings"], d, config) + return []interface{}{transformed} +} +func flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessResourceType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "role": flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindingsRole(original["role"], d, config), + "condition_expression": flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindingsConditionExpression(original["conditionExpression"], d, config), + }) + } + return transformed +} +func flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindingsRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindingsConditionExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPrivilegedAccessManagerEntitlementMaxRequestDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPrivilegedAccessManagerEntitlementState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPrivilegedAccessManagerEntitlementEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPrivilegedAccessManagerEntitlementRequesterJustificationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["not_mandatory"] = + flattenPrivilegedAccessManagerEntitlementRequesterJustificationConfigNotMandatory(original["notMandatory"], d, config) + transformed["unstructured"] = + flattenPrivilegedAccessManagerEntitlementRequesterJustificationConfigUnstructured(original["unstructured"], d, config) + return []interface{}{transformed} +} +func flattenPrivilegedAccessManagerEntitlementRequesterJustificationConfigNotMandatory(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenPrivilegedAccessManagerEntitlementRequesterJustificationConfigUnstructured(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenPrivilegedAccessManagerEntitlementAdditionalNotificationTargets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["admin_email_recipients"] = + flattenPrivilegedAccessManagerEntitlementAdditionalNotificationTargetsAdminEmailRecipients(original["adminEmailRecipients"], d, config) + transformed["requester_email_recipients"] = + flattenPrivilegedAccessManagerEntitlementAdditionalNotificationTargetsRequesterEmailRecipients(original["requesterEmailRecipients"], d, config) + return []interface{}{transformed} +} +func flattenPrivilegedAccessManagerEntitlementAdditionalNotificationTargetsAdminEmailRecipients(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenPrivilegedAccessManagerEntitlementAdditionalNotificationTargetsRequesterEmailRecipients(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func expandPrivilegedAccessManagerEntitlementEligibleUsers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPrincipals, err := expandPrivilegedAccessManagerEntitlementEligibleUsersPrincipals(original["principals"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrincipals); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["principals"] = transformedPrincipals + } + + req = append(req, transformed) + } + return req, nil +} + +func expandPrivilegedAccessManagerEntitlementEligibleUsersPrincipals(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementApprovalWorkflow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedManualApprovals, err := expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovals(original["manual_approvals"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedManualApprovals); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["manualApprovals"] = transformedManualApprovals + } + + return transformed, nil +} + +func expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovals(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRequireApproverJustification, err := expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsRequireApproverJustification(original["require_approver_justification"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequireApproverJustification); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requireApproverJustification"] = transformedRequireApproverJustification + } + + transformedSteps, err := expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsSteps(original["steps"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSteps); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["steps"] = transformedSteps + } + + return transformed, nil +} + +func expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsRequireApproverJustification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsSteps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedApprovers, err := expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApprovers(original["approvers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApprovers); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["approvers"] = transformedApprovers + } + + transformedApprovalsNeeded, err := expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApprovalsNeeded(original["approvals_needed"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApprovalsNeeded); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["approvalsNeeded"] = transformedApprovalsNeeded + } + + transformedApproverEmailRecipients, err := expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApproverEmailRecipients(original["approver_email_recipients"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApproverEmailRecipients); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["approverEmailRecipients"] = transformedApproverEmailRecipients + } + + req = append(req, transformed) + } + return req, nil +} + +func expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApprovers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPrincipals, err := expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApproversPrincipals(original["principals"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrincipals); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["principals"] = transformedPrincipals + } + + req = append(req, transformed) + } + return req, nil +} + +func expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApproversPrincipals(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApprovalsNeeded(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementApprovalWorkflowManualApprovalsStepsApproverEmailRecipients(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementPrivilegedAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGcpIamAccess, err := expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccess(original["gcp_iam_access"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGcpIamAccess); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gcpIamAccess"] = transformedGcpIamAccess + } + + return transformed, nil +} + +func expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResourceType, err := expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessResourceType(original["resource_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceType"] = transformedResourceType + } + + transformedResource, err := expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessResource(original["resource"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resource"] = transformedResource + } + + transformedRoleBindings, err := expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindings(original["role_bindings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRoleBindings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["roleBindings"] = transformedRoleBindings + } + + return transformed, nil +} + +func expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessResourceType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRole, err := expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindingsRole(original["role"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRole); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["role"] = transformedRole + } + + transformedConditionExpression, err := expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindingsConditionExpression(original["condition_expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditionExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["conditionExpression"] = transformedConditionExpression + } + + req = append(req, transformed) + } + return req, nil +} + +func expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindingsRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementPrivilegedAccessGcpIamAccessRoleBindingsConditionExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementMaxRequestDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementEtag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementRequesterJustificationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNotMandatory, err := expandPrivilegedAccessManagerEntitlementRequesterJustificationConfigNotMandatory(original["not_mandatory"], d, config) + if err != nil { + return nil, err + } else { + transformed["notMandatory"] = transformedNotMandatory + } + + transformedUnstructured, err := expandPrivilegedAccessManagerEntitlementRequesterJustificationConfigUnstructured(original["unstructured"], d, config) + if err != nil { + return nil, err + } else { + transformed["unstructured"] = transformedUnstructured + } + + return transformed, nil +} + +func expandPrivilegedAccessManagerEntitlementRequesterJustificationConfigNotMandatory(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandPrivilegedAccessManagerEntitlementRequesterJustificationConfigUnstructured(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandPrivilegedAccessManagerEntitlementAdditionalNotificationTargets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAdminEmailRecipients, err := expandPrivilegedAccessManagerEntitlementAdditionalNotificationTargetsAdminEmailRecipients(original["admin_email_recipients"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAdminEmailRecipients); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["adminEmailRecipients"] = transformedAdminEmailRecipients + } + + transformedRequesterEmailRecipients, err := expandPrivilegedAccessManagerEntitlementAdditionalNotificationTargetsRequesterEmailRecipients(original["requester_email_recipients"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequesterEmailRecipients); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requesterEmailRecipients"] = transformedRequesterEmailRecipients + } + + return transformed, nil +} + +func expandPrivilegedAccessManagerEntitlementAdditionalNotificationTargetsAdminEmailRecipients(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandPrivilegedAccessManagerEntitlementAdditionalNotificationTargetsRequesterEmailRecipients(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement_sweeper.go new file mode 100644 index 00000000000..551c78be9e2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package privilegedaccessmanager + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("PrivilegedAccessManagerEntitlement", testSweepPrivilegedAccessManagerEntitlement) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepPrivilegedAccessManagerEntitlement(region string) error { + resourceName := "PrivilegedAccessManagerEntitlement" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://privilegedaccessmanager.googleapis.com/v1/{{parent}}/locations/{{location}}/entitlements", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["entitlements"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://privilegedaccessmanager.googleapis.com/v1/{{parent}}/locations/{{location}}/entitlements/{{entitlement_id}}?force=true" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_subscription.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_subscription.go index 4bd81d7e623..249c5522673 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_subscription.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_subscription.go @@ -32,6 +32,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func comparePubsubSubscriptionExpirationPolicy(_, old, new string, _ *schema.ResourceData) bool { @@ -202,6 +203,11 @@ If all three are empty, then the subscriber will pull and ack messages using API MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "use_topic_schema": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, the output Cloud Storage file will be serialized using the topic schema, if it exists.`, + }, "write_metadata": { Type: schema.TypeBool, Optional: true, @@ -239,6 +245,11 @@ May not exceed the subscription's acknowledgement deadline. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`, Default: "300s", }, + "max_messages": { + Type: schema.TypeInt, + Optional: true, + Description: `The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages.`, + }, "service_account_email": { Type: schema.TypeString, Optional: true, @@ -352,9 +363,10 @@ Example - "3.5s".`, }, }, "filter": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[\s\S]{0,256}$`), Description: `The subscription only delivers the messages that match the filter. Pub/Sub automatically acknowledges the messages that don't match the filter. You can filter messages by their attributes. The maximum length of a filter is 256 bytes. After creating the subscription, @@ -378,7 +390,7 @@ backlog, from the moment a message is published. If retain_acked_messages is true, then this also configures the retention of acknowledged messages, and thus configures how far back in time a subscriptions.seek can be done. Defaults to 7 days. Cannot be more -than 7 days ('"604800s"') or less than 10 minutes ('"600s"'). +than 31 days ('"2678400s"') or less than 10 minutes ('"600s"'). A duration in seconds with up to nine fractional digits, terminated by 's'. Example: '"600.5s"'.`, @@ -618,7 +630,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) retryPolicyProp, err := expandPubsubSubscriptionRetryPolicy(d.Get("retry_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("retry_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(retryPolicyProp)) && (ok || !reflect.DeepEqual(v, retryPolicyProp)) { + } else if v, ok := d.GetOkExists("retry_policy"); ok || !reflect.DeepEqual(v, retryPolicyProp) { obj["retryPolicy"] = retryPolicyProp } enableMessageOrderingProp, err := expandPubsubSubscriptionEnableMessageOrdering(d.Get("enable_message_ordering"), d, config) @@ -902,7 +914,7 @@ func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) retryPolicyProp, err := expandPubsubSubscriptionRetryPolicy(d.Get("retry_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("retry_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retryPolicyProp)) { + } else if v, ok := d.GetOkExists("retry_policy"); ok || !reflect.DeepEqual(v, retryPolicyProp) { obj["retryPolicy"] = retryPolicyProp } enableExactlyOnceDeliveryProp, err := expandPubsubSubscriptionEnableExactlyOnceDelivery(d.Get("enable_exactly_once_delivery"), d, config) @@ -1176,6 +1188,8 @@ func flattenPubsubSubscriptionCloudStorageConfig(v interface{}, d *schema.Resour flattenPubsubSubscriptionCloudStorageConfigMaxDuration(original["maxDuration"], d, config) transformed["max_bytes"] = flattenPubsubSubscriptionCloudStorageConfigMaxBytes(original["maxBytes"], d, config) + transformed["max_messages"] = + flattenPubsubSubscriptionCloudStorageConfigMaxMessages(original["maxMessages"], d, config) transformed["state"] = flattenPubsubSubscriptionCloudStorageConfigState(original["state"], d, config) transformed["avro_config"] = @@ -1221,6 +1235,23 @@ func flattenPubsubSubscriptionCloudStorageConfigMaxBytes(v interface{}, d *schem return v // let terraform core handle it otherwise } +func flattenPubsubSubscriptionCloudStorageConfigMaxMessages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + func flattenPubsubSubscriptionCloudStorageConfigState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1236,12 +1267,18 @@ func flattenPubsubSubscriptionCloudStorageConfigAvroConfig(v interface{}, d *sch transformed := make(map[string]interface{}) transformed["write_metadata"] = flattenPubsubSubscriptionCloudStorageConfigAvroConfigWriteMetadata(original["writeMetadata"], d, config) + transformed["use_topic_schema"] = + flattenPubsubSubscriptionCloudStorageConfigAvroConfigUseTopicSchema(original["useTopicSchema"], d, config) return []interface{}{transformed} } func flattenPubsubSubscriptionCloudStorageConfigAvroConfigWriteMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } +func flattenPubsubSubscriptionCloudStorageConfigAvroConfigUseTopicSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenPubsubSubscriptionCloudStorageConfigServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1397,9 +1434,6 @@ func flattenPubsubSubscriptionRetryPolicy(v interface{}, d *schema.ResourceData, return nil } original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } transformed := make(map[string]interface{}) transformed["minimum_backoff"] = flattenPubsubSubscriptionRetryPolicyMinimumBackoff(original["minimumBackoff"], d, config) @@ -1597,6 +1631,13 @@ func expandPubsubSubscriptionCloudStorageConfig(v interface{}, d tpgresource.Ter transformed["maxBytes"] = transformedMaxBytes } + transformedMaxMessages, err := expandPubsubSubscriptionCloudStorageConfigMaxMessages(original["max_messages"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxMessages); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxMessages"] = transformedMaxMessages + } + transformedState, err := expandPubsubSubscriptionCloudStorageConfigState(original["state"], d, config) if err != nil { return nil, err @@ -1645,6 +1686,10 @@ func expandPubsubSubscriptionCloudStorageConfigMaxBytes(v interface{}, d tpgreso return v, nil } +func expandPubsubSubscriptionCloudStorageConfigMaxMessages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandPubsubSubscriptionCloudStorageConfigState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -1665,6 +1710,13 @@ func expandPubsubSubscriptionCloudStorageConfigAvroConfig(v interface{}, d tpgre transformed["writeMetadata"] = transformedWriteMetadata } + transformedUseTopicSchema, err := expandPubsubSubscriptionCloudStorageConfigAvroConfigUseTopicSchema(original["use_topic_schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUseTopicSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["useTopicSchema"] = transformedUseTopicSchema + } + return transformed, nil } @@ -1672,6 +1724,10 @@ func expandPubsubSubscriptionCloudStorageConfigAvroConfigWriteMetadata(v interfa return v, nil } +func expandPubsubSubscriptionCloudStorageConfigAvroConfigUseTopicSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandPubsubSubscriptionCloudStorageConfigServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -1868,9 +1924,14 @@ func expandPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(v interface{}, func expandPubsubSubscriptionRetryPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { + if len(l) == 0 { return nil, nil } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } raw := l[0] original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_topic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_topic.go index b85fde5cff6..62fff814c89 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_topic.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_topic.go @@ -106,6 +106,103 @@ equals to this service account number.`, }, }, }, + ConflictsWith: []string{}, + }, + "cloud_storage": { + Type: schema.TypeList, + Optional: true, + Description: `Settings for ingestion from Cloud Storage.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `Cloud Storage bucket. The bucket name must be without any +prefix like "gs://". See the bucket naming requirements: +https://cloud.google.com/storage/docs/buckets#naming.`, + }, + "avro_format": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for reading Cloud Storage data in Avro binary format. The +bytes of each object will be set to the 'data' field of a Pub/Sub message.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{}, + }, + "match_glob": { + Type: schema.TypeString, + Optional: true, + Description: `Glob pattern used to match objects that will be ingested. If unset, all +objects will be ingested. See the supported patterns: +https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob`, + }, + "minimum_object_create_time": { + Type: schema.TypeString, + Optional: true, + Description: `The timestamp set in RFC3339 text format. If set, only objects with a +larger or equal timestamp will be ingested. Unset by default, meaning +all objects will be ingested.`, + }, + "pubsub_avro_format": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for reading Cloud Storage data written via Cloud Storage +subscriptions(See https://cloud.google.com/pubsub/docs/cloudstorage). The +data and attributes fields of the originally exported Pub/Sub message +will be restored when publishing.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{}, + }, + "text_format": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for reading Cloud Storage data in text format. Each line of +text as specified by the delimiter will be set to the 'data' field of a +Pub/Sub message.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delimiter": { + Type: schema.TypeString, + Optional: true, + Description: `The delimiter to use when using the 'text' format. Each line of text as +specified by the delimiter will be set to the 'data' field of a Pub/Sub +message. When unset, '\n' is used.`, + Default: "\n", + }, + }, + }, + ExactlyOneOf: []string{}, + }, + }, + }, + ConflictsWith: []string{}, + }, + "platform_logs_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings for Platform Logs regarding ingestion to Pub/Sub. If unset, +no Platform Logs will be generated.'`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "severity": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"SEVERITY_UNSPECIFIED", "DISABLED", "DEBUG", "INFO", "WARNING", "ERROR", ""}), + Description: `The minimum severity level of Platform Logs that will be written. If unspecified, +no Platform Logs will be written. Default value: "SEVERITY_UNSPECIFIED" Possible values: ["SEVERITY_UNSPECIFIED", "DISABLED", "DEBUG", "INFO", "WARNING", "ERROR"]`, + Default: "SEVERITY_UNSPECIFIED", + }, + }, + }, }, }, }, @@ -169,7 +266,6 @@ and is not a valid configuration.`, }, "schema_settings": { Type: schema.TypeList, - Computed: true, Optional: true, Description: `Settings for validating messages published against a schema.`, MaxItems: 1, @@ -719,6 +815,10 @@ func flattenPubsubTopicIngestionDataSourceSettings(v interface{}, d *schema.Reso transformed := make(map[string]interface{}) transformed["aws_kinesis"] = flattenPubsubTopicIngestionDataSourceSettingsAwsKinesis(original["awsKinesis"], d, config) + transformed["cloud_storage"] = + flattenPubsubTopicIngestionDataSourceSettingsCloudStorage(original["cloudStorage"], d, config) + transformed["platform_logs_settings"] = + flattenPubsubTopicIngestionDataSourceSettingsPlatformLogsSettings(original["platformLogsSettings"], d, config) return []interface{}{transformed} } func flattenPubsubTopicIngestionDataSourceSettingsAwsKinesis(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -756,6 +856,91 @@ func flattenPubsubTopicIngestionDataSourceSettingsAwsKinesisGcpServiceAccount(v return v } +func flattenPubsubTopicIngestionDataSourceSettingsCloudStorage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bucket"] = + flattenPubsubTopicIngestionDataSourceSettingsCloudStorageBucket(original["bucket"], d, config) + transformed["text_format"] = + flattenPubsubTopicIngestionDataSourceSettingsCloudStorageTextFormat(original["textFormat"], d, config) + transformed["avro_format"] = + flattenPubsubTopicIngestionDataSourceSettingsCloudStorageAvroFormat(original["avroFormat"], d, config) + transformed["pubsub_avro_format"] = + flattenPubsubTopicIngestionDataSourceSettingsCloudStoragePubsubAvroFormat(original["pubsubAvroFormat"], d, config) + transformed["minimum_object_create_time"] = + flattenPubsubTopicIngestionDataSourceSettingsCloudStorageMinimumObjectCreateTime(original["minimumObjectCreateTime"], d, config) + transformed["match_glob"] = + flattenPubsubTopicIngestionDataSourceSettingsCloudStorageMatchGlob(original["matchGlob"], d, config) + return []interface{}{transformed} +} +func flattenPubsubTopicIngestionDataSourceSettingsCloudStorageBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubTopicIngestionDataSourceSettingsCloudStorageTextFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["delimiter"] = + flattenPubsubTopicIngestionDataSourceSettingsCloudStorageTextFormatDelimiter(original["delimiter"], d, config) + return []interface{}{transformed} +} +func flattenPubsubTopicIngestionDataSourceSettingsCloudStorageTextFormatDelimiter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubTopicIngestionDataSourceSettingsCloudStorageAvroFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenPubsubTopicIngestionDataSourceSettingsCloudStoragePubsubAvroFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenPubsubTopicIngestionDataSourceSettingsCloudStorageMinimumObjectCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubTopicIngestionDataSourceSettingsCloudStorageMatchGlob(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubTopicIngestionDataSourceSettingsPlatformLogsSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["severity"] = + flattenPubsubTopicIngestionDataSourceSettingsPlatformLogsSettingsSeverity(original["severity"], d, config) + return []interface{}{transformed} +} +func flattenPubsubTopicIngestionDataSourceSettingsPlatformLogsSettingsSeverity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenPubsubTopicTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -860,6 +1045,20 @@ func expandPubsubTopicIngestionDataSourceSettings(v interface{}, d tpgresource.T transformed["awsKinesis"] = transformedAwsKinesis } + transformedCloudStorage, err := expandPubsubTopicIngestionDataSourceSettingsCloudStorage(original["cloud_storage"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStorage); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStorage"] = transformedCloudStorage + } + + transformedPlatformLogsSettings, err := expandPubsubTopicIngestionDataSourceSettingsPlatformLogsSettings(original["platform_logs_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPlatformLogsSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["platformLogsSettings"] = transformedPlatformLogsSettings + } + return transformed, nil } @@ -919,6 +1118,148 @@ func expandPubsubTopicIngestionDataSourceSettingsAwsKinesisGcpServiceAccount(v i return v, nil } +func expandPubsubTopicIngestionDataSourceSettingsCloudStorage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBucket, err := expandPubsubTopicIngestionDataSourceSettingsCloudStorageBucket(original["bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucket"] = transformedBucket + } + + transformedTextFormat, err := expandPubsubTopicIngestionDataSourceSettingsCloudStorageTextFormat(original["text_format"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTextFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["textFormat"] = transformedTextFormat + } + + transformedAvroFormat, err := expandPubsubTopicIngestionDataSourceSettingsCloudStorageAvroFormat(original["avro_format"], d, config) + if err != nil { + return nil, err + } else { + transformed["avroFormat"] = transformedAvroFormat + } + + transformedPubsubAvroFormat, err := expandPubsubTopicIngestionDataSourceSettingsCloudStoragePubsubAvroFormat(original["pubsub_avro_format"], d, config) + if err != nil { + return nil, err + } else { + transformed["pubsubAvroFormat"] = transformedPubsubAvroFormat + } + + transformedMinimumObjectCreateTime, err := expandPubsubTopicIngestionDataSourceSettingsCloudStorageMinimumObjectCreateTime(original["minimum_object_create_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinimumObjectCreateTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minimumObjectCreateTime"] = transformedMinimumObjectCreateTime + } + + transformedMatchGlob, err := expandPubsubTopicIngestionDataSourceSettingsCloudStorageMatchGlob(original["match_glob"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMatchGlob); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["matchGlob"] = transformedMatchGlob + } + + return transformed, nil +} + +func expandPubsubTopicIngestionDataSourceSettingsCloudStorageBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubTopicIngestionDataSourceSettingsCloudStorageTextFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDelimiter, err := expandPubsubTopicIngestionDataSourceSettingsCloudStorageTextFormatDelimiter(original["delimiter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDelimiter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["delimiter"] = transformedDelimiter + } + + return transformed, nil +} + +func expandPubsubTopicIngestionDataSourceSettingsCloudStorageTextFormatDelimiter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubTopicIngestionDataSourceSettingsCloudStorageAvroFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandPubsubTopicIngestionDataSourceSettingsCloudStoragePubsubAvroFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandPubsubTopicIngestionDataSourceSettingsCloudStorageMinimumObjectCreateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubTopicIngestionDataSourceSettingsCloudStorageMatchGlob(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubTopicIngestionDataSourceSettingsPlatformLogsSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSeverity, err := expandPubsubTopicIngestionDataSourceSettingsPlatformLogsSettingsSeverity(original["severity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeverity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["severity"] = transformedSeverity + } + + return transformed, nil +} + +func expandPubsubTopicIngestionDataSourceSettingsPlatformLogsSettingsSeverity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandPubsubTopicEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/resource_redis_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/resource_redis_cluster.go index 5d3a833df7a..aeed268ba8a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/resource_redis_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/resource_redis_cluster.go @@ -27,6 +27,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" @@ -95,6 +96,108 @@ projects/{network_project_id_or_number}/global/networks/{network_id}.`, Description: `Optional. The authorization mode of the Redis cluster. If not provided, auth feature is disabled for the cluster. Default value: "AUTH_MODE_DISABLED" Possible values: ["AUTH_MODE_UNSPECIFIED", "AUTH_MODE_IAM_AUTH", "AUTH_MODE_DISABLED"]`, Default: "AUTH_MODE_DISABLED", }, + "deletion_protection_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Optional. Indicates if the cluster is deletion protected or not. +If the value if set to true, any delete cluster operation will fail. +Default value is true.`, + Default: true, + }, + "maintenance_policy": { + Type: schema.TypeList, + Optional: true, + Description: `Maintenance policy for a cluster`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "weekly_maintenance_window": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. Maintenance window that is applied to resources covered by this policy. +Minimum 1. For the current version, the maximum number +of weekly_window is expected to be one.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), + Description: `Required. The day of week that maintenance updates occur. + +- DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. +- MONDAY: Monday +- TUESDAY: Tuesday +- WEDNESDAY: Wednesday +- THURSDAY: Thursday +- FRIDAY: Friday +- SATURDAY: Saturday +- SUNDAY: Sunday Possible values: ["DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "start_time": { + Type: schema.TypeList, + Required: true, + Description: `Required. Start time of the window in UTC time.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 23), + Description: `Hours of day in 24 hour format. Should be from 0 to 23. +An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 59), + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 999999999), + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 60), + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. +An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "duration": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Duration of the maintenance window. +The current window is fixed at 1 hour. +A duration in seconds with up to nine fractional digits, +terminated by 's'. Example: "3.5s".`, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time when the policy was created. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +resolution and up to nine fractional digits.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time when the policy was last updated. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +resolution and up to nine fractional digits.`, + }, + }, + }, + }, "node_type": { Type: schema.TypeString, Computed: true, @@ -203,6 +306,37 @@ projects/{network_project_id}/global/networks/{network_id}.`, }, }, }, + "maintenance_schedule": { + Type: schema.TypeList, + Computed: true, + Description: `Upcoming maintenance schedule.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The end time of any upcoming scheduled maintenance for this cluster. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +resolution and up to nine fractional digits.`, + }, + "schedule_deadline_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The deadline that the maintenance schedule start time +can not go beyond, including reschedule. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +resolution and up to nine fractional digits.`, + }, + "start_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The start time of any upcoming scheduled maintenance for this cluster. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +resolution and up to nine fractional digits.`, + }, + }, + }, + }, "precise_size_gb": { Type: schema.TypeFloat, Computed: true, @@ -347,12 +481,24 @@ func resourceRedisClusterCreate(d *schema.ResourceData, meta interface{}) error } else if v, ok := d.GetOkExists("shard_count"); !tpgresource.IsEmptyValue(reflect.ValueOf(shardCountProp)) && (ok || !reflect.DeepEqual(v, shardCountProp)) { obj["shardCount"] = shardCountProp } + deletionProtectionEnabledProp, err := expandRedisClusterDeletionProtectionEnabled(d.Get("deletion_protection_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deletion_protection_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(deletionProtectionEnabledProp)) && (ok || !reflect.DeepEqual(v, deletionProtectionEnabledProp)) { + obj["deletionProtectionEnabled"] = deletionProtectionEnabledProp + } redisConfigsProp, err := expandRedisClusterRedisConfigs(d.Get("redis_configs"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("redis_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(redisConfigsProp)) && (ok || !reflect.DeepEqual(v, redisConfigsProp)) { obj["redisConfigs"] = redisConfigsProp } + maintenancePolicyProp, err := expandRedisClusterMaintenancePolicy(d.Get("maintenance_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(maintenancePolicyProp)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { + obj["maintenancePolicy"] = maintenancePolicyProp + } url, err := tpgresource.ReplaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/clusters?clusterId={{name}}") if err != nil { @@ -502,9 +648,18 @@ func resourceRedisClusterRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("shard_count", flattenRedisClusterShardCount(res["shardCount"], d, config)); err != nil { return fmt.Errorf("Error reading Cluster: %s", err) } + if err := d.Set("deletion_protection_enabled", flattenRedisClusterDeletionProtectionEnabled(res["deletionProtectionEnabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } if err := d.Set("redis_configs", flattenRedisClusterRedisConfigs(res["redisConfigs"], d, config)); err != nil { return fmt.Errorf("Error reading Cluster: %s", err) } + if err := d.Set("maintenance_policy", flattenRedisClusterMaintenancePolicy(res["maintenancePolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("maintenance_schedule", flattenRedisClusterMaintenanceSchedule(res["maintenanceSchedule"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } return nil } @@ -543,12 +698,24 @@ func resourceRedisClusterUpdate(d *schema.ResourceData, meta interface{}) error } else if v, ok := d.GetOkExists("shard_count"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, shardCountProp)) { obj["shardCount"] = shardCountProp } + deletionProtectionEnabledProp, err := expandRedisClusterDeletionProtectionEnabled(d.Get("deletion_protection_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deletion_protection_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deletionProtectionEnabledProp)) { + obj["deletionProtectionEnabled"] = deletionProtectionEnabledProp + } redisConfigsProp, err := expandRedisClusterRedisConfigs(d.Get("redis_configs"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("redis_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, redisConfigsProp)) { obj["redisConfigs"] = redisConfigsProp } + maintenancePolicyProp, err := expandRedisClusterMaintenancePolicy(d.Get("maintenance_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { + obj["maintenancePolicy"] = maintenancePolicyProp + } url, err := tpgresource.ReplaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/clusters/{{name}}") if err != nil { @@ -571,9 +738,17 @@ func resourceRedisClusterUpdate(d *schema.ResourceData, meta interface{}) error updateMask = append(updateMask, "shardCount") } + if d.HasChange("deletion_protection_enabled") { + updateMask = append(updateMask, "deletionProtectionEnabled") + } + if d.HasChange("redis_configs") { updateMask = append(updateMask, "redisConfigs") } + + if d.HasChange("maintenance_policy") { + updateMask = append(updateMask, "maintenancePolicy") + } // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -958,10 +1133,180 @@ func flattenRedisClusterShardCount(v interface{}, d *schema.ResourceData, config return v // let terraform core handle it otherwise } +func flattenRedisClusterDeletionProtectionEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenRedisClusterRedisConfigs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } +func flattenRedisClusterMaintenancePolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["create_time"] = + flattenRedisClusterMaintenancePolicyCreateTime(original["createTime"], d, config) + transformed["update_time"] = + flattenRedisClusterMaintenancePolicyUpdateTime(original["updateTime"], d, config) + transformed["weekly_maintenance_window"] = + flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindow(original["weeklyMaintenanceWindow"], d, config) + return []interface{}{transformed} +} +func flattenRedisClusterMaintenancePolicyCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisClusterMaintenancePolicyUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "day": flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowDay(original["day"], d, config), + "duration": flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowDuration(original["duration"], d, config), + "start_time": flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime(original["startTime"], d, config), + }) + } + return transformed +} +func flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(original["hours"], d, config) + transformed["minutes"] = + flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisClusterMaintenanceSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["start_time"] = + flattenRedisClusterMaintenanceScheduleStartTime(original["startTime"], d, config) + transformed["end_time"] = + flattenRedisClusterMaintenanceScheduleEndTime(original["endTime"], d, config) + transformed["schedule_deadline_time"] = + flattenRedisClusterMaintenanceScheduleScheduleDeadlineTime(original["scheduleDeadlineTime"], d, config) + return []interface{}{transformed} +} +func flattenRedisClusterMaintenanceScheduleStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisClusterMaintenanceScheduleEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisClusterMaintenanceScheduleScheduleDeadlineTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func expandRedisClusterAuthorizationMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -1042,6 +1387,10 @@ func expandRedisClusterShardCount(v interface{}, d tpgresource.TerraformResource return v, nil } +func expandRedisClusterDeletionProtectionEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandRedisClusterRedisConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil @@ -1052,3 +1401,149 @@ func expandRedisClusterRedisConfigs(v interface{}, d tpgresource.TerraformResour } return m, nil } + +func expandRedisClusterMaintenancePolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCreateTime, err := expandRedisClusterMaintenancePolicyCreateTime(original["create_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCreateTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["createTime"] = transformedCreateTime + } + + transformedUpdateTime, err := expandRedisClusterMaintenancePolicyUpdateTime(original["update_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUpdateTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["updateTime"] = transformedUpdateTime + } + + transformedWeeklyMaintenanceWindow, err := expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindow(original["weekly_maintenance_window"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWeeklyMaintenanceWindow); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["weeklyMaintenanceWindow"] = transformedWeeklyMaintenanceWindow + } + + return transformed, nil +} + +func expandRedisClusterMaintenancePolicyCreateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisClusterMaintenancePolicyUpdateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDay, err := expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + transformedDuration, err := expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowDuration(original["duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["duration"] = transformedDuration + } + + transformedStartTime, err := expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime(original["start_time"], d, config) + if err != nil { + return nil, err + } else { + transformed["startTime"] = transformedStartTime + } + + req = append(req, transformed) + } + return req, nil +} + +func expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_client_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_client_config.go index 14fb1c54c66..2149f6a4959 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_client_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_client_config.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-provider-google/google/fwmodels" "github.com/hashicorp/terraform-provider-google/google/fwresource" @@ -33,11 +32,12 @@ type GoogleClientConfigDataSource struct { type GoogleClientConfigModel struct { // Id could/should be removed in future as it's not necessary in the plugin framework // https://github.com/hashicorp/terraform-plugin-testing/issues/84 - Id types.String `tfsdk:"id"` - Project types.String `tfsdk:"project"` - Region types.String `tfsdk:"region"` - Zone types.String `tfsdk:"zone"` - AccessToken types.String `tfsdk:"access_token"` + Id types.String `tfsdk:"id"` + Project types.String `tfsdk:"project"` + Region types.String `tfsdk:"region"` + Zone types.String `tfsdk:"zone"` + AccessToken types.String `tfsdk:"access_token"` + DefaultLabels types.Map `tfsdk:"default_labels"` } func (m *GoogleClientConfigModel) GetLocationDescription(providerConfig *fwtransport.FrameworkProviderConfig) fwresource.LocationDescription { @@ -88,6 +88,12 @@ func (d *GoogleClientConfigDataSource) Schema(ctx context.Context, req datasourc Computed: true, Sensitive: true, }, + "default_labels": schema.MapAttribute{ + Description: "The default labels configured on the provider.", + MarkdownDescription: "The default labels configured on the provider.", + Computed: true, + ElementType: types.StringType, + }, }, } } @@ -114,7 +120,6 @@ func (d *GoogleClientConfigDataSource) Configure(ctx context.Context, req dataso func (d *GoogleClientConfigDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { var data GoogleClientConfigModel var metaData *fwmodels.ProviderMetaModel - var diags diag.Diagnostics // Read Provider meta into the meta model resp.Diagnostics.Append(req.ProviderMeta.Get(ctx, &metaData)...) @@ -136,10 +141,11 @@ func (d *GoogleClientConfigDataSource) Read(ctx context.Context, req datasource. data.Project = d.providerConfig.Project data.Region = region data.Zone = zone + data.DefaultLabels = d.providerConfig.DefaultLabels token, err := d.providerConfig.TokenSource.Token() if err != nil { - diags.AddError("Error setting access_token", err.Error()) + resp.Diagnostics.AddError("Error setting access_token", err.Error()) return } data.AccessToken = types.StringValue(token.AccessToken) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folder.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folder.go index 5ddb5447bf7..d692875a3e1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folder.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folder.go @@ -52,6 +52,10 @@ func DataSourceGoogleFolder() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "deletion_protection": { + Type: schema.TypeBool, + Computed: true, + }, }, } } @@ -73,6 +77,10 @@ func dataSourceFolderRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("%s not found", id) } + if err := d.Set("deletion_protection", nil); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + if v, ok := d.GetOk("lookup_organization"); ok && v.(bool) { organization, err := lookupOrganizationName(d.Id(), userAgent, d, config) if err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account.go index bc17d636dac..5b5460eff24 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account.go @@ -43,6 +43,10 @@ func DataSourceGoogleServiceAccount() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "disabled": { + Type: schema.TypeBool, + Computed: true, + }, }, } } @@ -86,6 +90,9 @@ func dataSourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{} if err := d.Set("member", "serviceAccount:"+sa.Email); err != nil { return fmt.Errorf("Error setting member: %s", err) } + if err := d.Set("disabled", sa.Disabled); err != nil { + return fmt.Errorf("Error setting disabled: %s", err) + } return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_accounts.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_accounts.go new file mode 100644 index 00000000000..38e19612822 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_accounts.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleServiceAccounts() *schema.Resource { + return &schema.Resource{ + Read: datasourceGoogleServiceAccountsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + }, + "accounts": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Required: true, + }, + "disabled": { + Type: schema.TypeBool, + Computed: true, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + "email": { + Type: schema.TypeString, + Computed: true, + }, + "member": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "unique_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func datasourceGoogleServiceAccountsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for service accounts: %s", err) + } + + accounts := make([]map[string]interface{}, 0) + + accountList, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.List("projects/" + project).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Service accounts: %s", project)) + } + + for _, account := range accountList.Accounts { + accounts = append(accounts, map[string]interface{}{ + "account_id": strings.Split(account.Email, "@")[0], + "disabled": account.Disabled, + "email": account.Email, + "display_name": account.DisplayName, + "member": "serviceAccount:" + account.Email, + "name": account.Name, + "unique_id": account.UniqueId, + }) + } + + if err := d.Set("accounts", accounts); err != nil { + return fmt.Errorf("Error retrieving service accounts: %s", err) + } + + d.SetId(fmt.Sprintf( + "projects/%s", + project, + )) + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_folder.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_folder.go index 074017ba0da..d622bcb13c1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_folder.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_folder.go @@ -67,6 +67,19 @@ func ResourceGoogleFolder() *schema.Resource { Computed: true, Description: `Timestamp when the Folder was created. Assigned by the server. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, }, + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the instance will fail. When the field is set to false, deleting the instance is allowed.`, + }, + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource.`, + }, }, UseJSONNumber: true, } @@ -82,14 +95,19 @@ func resourceGoogleFolderCreate(d *schema.ResourceData, meta interface{}) error displayName := d.Get("display_name").(string) parent := d.Get("parent").(string) + folder := &resourceManagerV3.Folder{ + DisplayName: displayName, + Parent: parent, + } + if _, ok := d.GetOk("tags"); ok { + folder.Tags = tpgresource.ExpandStringMap(d, "tags") + } + var op *resourceManagerV3.Operation err = transport_tpg.Retry(transport_tpg.RetryOptions{ RetryFunc: func() error { var reqErr error - op, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Create(&resourceManagerV3.Folder{ - DisplayName: displayName, - Parent: parent, - }).Do() + op, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Create(folder).Do() return reqErr }, Timeout: d.Timeout(schema.TimeoutCreate), @@ -138,7 +156,12 @@ func resourceGoogleFolderRead(d *schema.ResourceData, meta interface{}) error { if err != nil { return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Folder Not Found : %s", d.Id())) } - + // Explicitly set client-side fields to default values if unset + if _, ok := d.GetOkExists("deletion_protection"); !ok { + if err := d.Set("deletion_protection", true); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + } if err := d.Set("name", folder.Name); err != nil { return fmt.Errorf("Error setting name: %s", err) } @@ -168,6 +191,19 @@ func resourceGoogleFolderUpdate(d *schema.ResourceData, meta interface{}) error if err != nil { return err } + + clientSideFields := map[string]bool{"deletion_protection": true} + clientSideOnly := true + for field := range ResourceGoogleFolder().Schema { + if d.HasChange(field) && !clientSideFields[field] { + clientSideOnly = false + break + } + } + if clientSideOnly { + return nil + } + displayName := d.Get("display_name").(string) d.Partial(true) @@ -224,6 +260,11 @@ func resourceGoogleFolderDelete(d *schema.ResourceData, meta interface{}) error if err != nil { return err } + + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("cannot destroy folder without setting deletion_protection=false and running `terraform apply`") + } + displayName := d.Get("display_name").(string) var op *resourceManagerV3.Operation diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project.go index 8c3017559d6..b98b199143e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" tpgserviceusage "github.com/hashicorp/terraform-provider-google/google/services/serviceusage" @@ -68,11 +69,13 @@ func ResourceGoogleProject() *schema.Resource { ValidateFunc: verify.ValidateProjectID(), Description: `The project ID. Changing this forces a new project to be created.`, }, - "skip_delete": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: `If true, the Terraform resource can be deleted without deleting the Project via the Google API.`, + "deletion_policy": { + Type: schema.TypeString, + Optional: true, + Default: "PREVENT", + Description: `The deletion policy for the Project. Setting PREVENT will protect the project against any destroy actions caused by a terraform apply or terraform destroy. Setting ABANDON allows the resource + to be abandoned rather than deleted. Possible values are: "PREVENT", "ABANDON", "DELETE"`, + ValidateFunc: validation.StringInSlice([]string{"PREVENT", "ABANDON", "DELETE"}, false), }, "auto_create_network": { Type: schema.TypeBool, @@ -132,6 +135,14 @@ func ResourceGoogleProject() *schema.Resource { Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, Elem: &schema.Schema{Type: schema.TypeString}, }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. This field is only set at create time and modifying this field after creation will trigger recreation. To apply tags to an existing resource, see the google_tags_tag_value resource.`, + }, }, UseJSONNumber: true, } @@ -165,6 +176,10 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error project.Labels = tpgresource.ExpandEffectiveLabels(d) } + if _, ok := d.GetOk("tags"); ok { + project.Tags = tpgresource.ExpandStringMap(d, "tags") + } + var op *cloudresourcemanager.Operation err = transport_tpg.Retry(transport_tpg.RetryOptions{ RetryFunc: func() (reqErr error) { @@ -306,7 +321,12 @@ func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { d.SetId("") return nil } - + // Explicitly set client-side fields to default values if unset + if _, ok := d.GetOkExists("deletion_policy"); !ok { + if err := d.Set("deletion_policy", "PREVENT"); err != nil { + return fmt.Errorf("Error setting deletion_policy: %s", err) + } + } if err := d.Set("project_id", pid); err != nil { return fmt.Errorf("Error setting project_id: %s", err) } @@ -500,8 +520,16 @@ func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error if err != nil { return err } - // Only delete projects if skip_delete isn't set - if !d.Get("skip_delete").(bool) { + deletionPolicy := d.Get("deletion_policy").(string) + + if deletionPolicy == "PREVENT" { + return fmt.Errorf("Cannot destroy project as deletion_policy is set to PREVENT.") + } else if deletionPolicy == "ABANDON" { + log.Printf("[WARN] The project has been abandoned as deletion_policy set to ABANDON.") + d.SetId("") + return nil + } else { + // Only delete projects if deletion_policy isn't PREVENT or ABANDON parts := strings.Split(d.Id(), "/") pid := parts[len(parts)-1] if err := transport_tpg.Retry(transport_tpg.RetryOptions{ diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_service.go index 721dcc0396b..6092ee0761e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_service.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_service.go @@ -258,7 +258,8 @@ func resourceGoogleProjectServiceDelete(d *schema.ResourceData, meta interface{} service := d.Get("service").(string) disableDependencies := d.Get("disable_dependent_services").(bool) - if err = disableServiceUsageProjectService(service, project, d, config, disableDependencies); err != nil { + err = disableServiceUsageProjectService(service, project, d, config, disableDependencies) + if err != nil { return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id())) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account.go index 30cf8684213..a679be21e7a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account.go @@ -121,53 +121,61 @@ func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{} ServiceAccount: sa, } - sa, err = config.NewIamClient(userAgent).Projects.ServiceAccounts.Create("projects/"+project, r).Do() + iamClient := config.NewIamClient(userAgent) + sa, err = iamClient.Projects.ServiceAccounts.Create("projects/"+project, r).Do() if err != nil { gerr, ok := err.(*googleapi.Error) alreadyExists := ok && gerr.Code == 409 && d.Get("create_ignore_already_exists").(bool) if alreadyExists { - sa = &iam.ServiceAccount{ - Name: fmt.Sprintf("projects/%s/serviceAccounts/%s@%s.iam.gserviceaccount.com", project, aid, project), - } + fullServiceAccountName := fmt.Sprintf("projects/%s/serviceAccounts/%s@%s.iam.gserviceaccount.com", project, aid, project) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + sa, saerr := iamClient.Projects.ServiceAccounts.Get(fullServiceAccountName).Do() + + if saerr != nil { + return saerr + } + + d.SetId(sa.Name) + return populateResourceData(d, sa) + }, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{ + transport_tpg.IsNotFoundRetryableError("service account creation"), + }, + }) + + return nil } else { return fmt.Errorf("Error creating service account: %s", err) } } d.SetId(sa.Name) - - err = transport_tpg.Retry(transport_tpg.RetryOptions{ - RetryFunc: func() (operr error) { - _, saerr := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(d.Id()).Do() - return saerr - }, - Timeout: d.Timeout(schema.TimeoutCreate), - ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{ - transport_tpg.IsNotFoundRetryableError("service account creation"), - transport_tpg.IsForbiddenIamServiceAccountRetryableError("service account creation"), - }, - }) - - if err != nil { - return fmt.Errorf("Error reading service account after creation: %s", err) - } + populateResourceData(d, sa) // We poll until the resource is found due to eventual consistency issue - // on part of the api https://cloud.google.com/iam/docs/overview#consistency - err = transport_tpg.PollingWaitTime(resourceServiceAccountPollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating Service Account", d.Timeout(schema.TimeoutCreate), 1) - - if err != nil { - return err - } + // on part of the api https://cloud.google.com/iam/docs/overview#consistency. + // Wait for at least 3 successful responses in a row to ensure result is consistent. + // IAM API returns 403 when the queried SA is not found, so we must ignore both 404 & 403 errors + transport_tpg.PollingWaitTime( + resourceServiceAccountPollRead(d, meta), + transport_tpg.PollCheckForExistence, + "Creating Service Account", + d.Timeout(schema.TimeoutCreate), + 3, // Number of consecutive occurences. + ) // We can't guarantee complete consistency even after polling, // so sleep for some additional time to reduce the likelihood of // eventual consistency failures. time.Sleep(10 * time.Second) - return resourceGoogleServiceAccountRead(d, meta) + return nil } +// PollReadFunc for checking Service Account existence. +// If resourceData is not nil, it will be updated with the response. func resourceServiceAccountPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { return func() (map[string]interface{}, error) { config := meta.(*transport_tpg.Config) @@ -199,6 +207,10 @@ func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Service Account %q", d.Id())) } + return populateResourceData(d, sa) +} + +func populateResourceData(d *schema.ResourceData, sa *iam.ServiceAccount) error { if err := d.Set("email", sa.Email); err != nil { return fmt.Errorf("Error setting email: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version.go index d099bcb74b2..698f8e125b9 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version.go @@ -54,6 +54,11 @@ func DataSourceSecretManagerSecretVersion() *schema.Resource { Computed: true, Sensitive: true, }, + "is_secret_data_base64": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, }, } } @@ -149,11 +154,17 @@ func dataSourceSecretManagerSecretVersionRead(d *schema.ResourceData, meta inter } data := resp["payload"].(map[string]interface{}) - secretData, err := base64.StdEncoding.DecodeString(data["data"].(string)) - if err != nil { - return fmt.Errorf("Error decoding secret manager secret version data: %s", err.Error()) + var secretData string + if d.Get("is_secret_data_base64").(bool) { + secretData = data["data"].(string) + } else { + payloadData, err := base64.StdEncoding.DecodeString(data["data"].(string)) + if err != nil { + return fmt.Errorf("error decoding secret manager secret version data: %s", err.Error()) + } + secretData = string(payloadData) } - if err := d.Set("secret_data", string(secretData)); err != nil { + if err := d.Set("secret_data", secretData); err != nil { return fmt.Errorf("Error setting secret_data: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version_access.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version_access.go index 230d2956086..24c5a1117fc 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version_access.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version_access.go @@ -42,6 +42,11 @@ func DataSourceSecretManagerSecretVersionAccess() *schema.Resource { Computed: true, Sensitive: true, }, + "is_secret_data_base64": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, }, } } @@ -114,11 +119,17 @@ func dataSourceSecretManagerSecretVersionAccessRead(d *schema.ResourceData, meta } data := resp["payload"].(map[string]interface{}) - secretData, err := base64.StdEncoding.DecodeString(data["data"].(string)) - if err != nil { - return fmt.Errorf("Error decoding secret manager secret version data: %s", err.Error()) + var secretData string + if d.Get("is_secret_data_base64").(bool) { + secretData = data["data"].(string) + } else { + payloadData, err := base64.StdEncoding.DecodeString(data["data"].(string)) + if err != nil { + return fmt.Errorf("error decoding secret manager secret version data: %s", err.Error()) + } + secretData = string(payloadData) } - if err := d.Set("secret_data", string(secretData)); err != nil { + if err := d.Set("secret_data", secretData); err != nil { return fmt.Errorf("Error setting secret_data: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secrets.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secrets.go index 959cb7fb46b..79ba8f3d0c4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secrets.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secrets.go @@ -146,17 +146,21 @@ func flattenSecretManagerSecretsSecrets(v interface{}, d *schema.ResourceData, c continue } transformed = append(transformed, map[string]interface{}{ - "replication": flattenSecretManagerSecretReplication(original["replication"], d, config), - "annotations": flattenSecretManagerSecretAnnotations(original["annotations"], d, config), - "expire_time": flattenSecretManagerSecretExpireTime(original["expireTime"], d, config), - "labels": flattenSecretManagerSecretLabels(original["labels"], d, config), - "rotation": flattenSecretManagerSecretRotation(original["rotation"], d, config), - "topics": flattenSecretManagerSecretTopics(original["topics"], d, config), - "version_aliases": flattenSecretManagerSecretVersionAliases(original["versionAliases"], d, config), - "create_time": flattenSecretManagerSecretCreateTime(original["createTime"], d, config), - "name": flattenSecretManagerSecretName(original["name"], d, config), - "project": getDataFromName(original["name"], 1), - "secret_id": getDataFromName(original["name"], 3), + "replication": flattenSecretManagerSecretReplication(original["replication"], d, config), + "annotations": flattenSecretManagerSecretEffectiveAnnotations(original["annotations"], d, config), + "effective_annotations": flattenSecretManagerSecretEffectiveAnnotations(original["annotations"], d, config), + "expire_time": flattenSecretManagerSecretExpireTime(original["expireTime"], d, config), + "labels": flattenSecretManagerSecretEffectiveLabels(original["labels"], d, config), + "effective_labels": flattenSecretManagerSecretEffectiveLabels(original["labels"], d, config), + "terraform_labels": flattenSecretManagerSecretEffectiveLabels(original["labels"], d, config), + "rotation": flattenSecretManagerSecretRotation(original["rotation"], d, config), + "topics": flattenSecretManagerSecretTopics(original["topics"], d, config), + "version_aliases": flattenSecretManagerSecretVersionAliases(original["versionAliases"], d, config), + "version_destroy_ttl": flattenSecretManagerSecretVersionDestroyTtl(original["versionDestroyTtl"], d, config), + "create_time": flattenSecretManagerSecretCreateTime(original["createTime"], d, config), + "name": flattenSecretManagerSecretName(original["name"], d, config), + "project": getDataFromName(original["name"], 1), + "secret_id": getDataFromName(original["name"], 3), }) } return transformed diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret_version.go index d0217d2662e..244cc815d56 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret_version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret_version.go @@ -98,13 +98,13 @@ func ResourceSecretManagerSecretVersion() *schema.Resource { "deletion_policy": { Type: schema.TypeString, Optional: true, - Default: "DELETE", Description: `The deletion policy for the secret version. Setting 'ABANDON' allows the resource to be abandoned rather than deleted. Setting 'DISABLE' allows the resource to be disabled rather than deleted. Default is 'DELETE'. Possible values are: * DELETE * DISABLE * ABANDON`, + Default: "DELETE", }, "is_secret_data_base64": { Type: schema.TypeBool, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret.go new file mode 100644 index 00000000000..60c99620fc1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package secretmanagerregional + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceSecretManagerRegionalRegionalSecret() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceSecretManagerRegionalRegionalSecret().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "secret_id") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "location") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceSecretManagerRegionalRegionalSecretRead, + Schema: dsSchema, + } +} + +func dataSourceSecretManagerRegionalRegionalSecretRead(d *schema.ResourceData, meta interface{}) error { + id, err := tpgresource.ReplaceVars(d, meta.(*transport_tpg.Config), "projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + err = resourceSecretManagerRegionalRegionalSecretRead(d, meta) + if err != nil { + return err + } + + if err := tpgresource.SetDataSourceLabels(d); err != nil { + return err + } + + if err := tpgresource.SetDataSourceAnnotations(d); err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret_version.go new file mode 100644 index 00000000000..acafa1ff6da --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret_version.go @@ -0,0 +1,219 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package secretmanagerregional + +import ( + "encoding/base64" + "fmt" + "log" + "regexp" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceSecretManagerRegionalRegionalSecretVersion() *schema.Resource { + return &schema.Resource{ + Read: dataSourceSecretManagerRegionalRegionalSecretVersionRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "secret": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + }, + "destroy_time": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "secret_data": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "customer_managed_encryption": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_version_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "is_secret_data_base64": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func dataSourceSecretManagerRegionalRegionalSecretVersionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + secretRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/secrets/(.+)$") + parts := secretRegex.FindStringSubmatch(d.Get("secret").(string)) + + var project string + + // if reference of the secret is provided in the secret field + if len(parts) == 4 { + // Store values of project to set in state + project = parts[1] + if d.Get("project").(string) != "" && d.Get("project").(string) != parts[1] { + return fmt.Errorf("The project set on this secret version (%s) is not equal to the project where this secret exists (%s).", d.Get("project").(string), parts[1]) + } + if d.Get("location").(string) != "" && d.Get("location").(string) != parts[2] { + return fmt.Errorf("The location set on this secret version (%s) is not equal to the location where this secret exists (%s).", d.Get("location").(string), parts[2]) + } + if err := d.Set("location", parts[2]); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("secret", parts[3]); err != nil { + return fmt.Errorf("Error setting secret: %s", err) + } + } else { // if secret name is provided in the secret field + // Store values of project to set in state + project, err = tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Secret: %s", err) + } + if d.Get("location").(string) == "" { + return fmt.Errorf("Location must be set when providing only secret name") + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + var url string + versionNum := d.Get("version") + + // set version if provided, else set version to latest + if versionNum != "" { + url, err = tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}projects/{{project}}/locations/{{location}}/secrets/{{secret}}/versions/{{version}}") + if err != nil { + return err + } + } else { + url, err = tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}projects/{{project}}/locations/{{location}}/secrets/{{secret}}/versions/latest") + if err != nil { + return err + } + } + + var secretVersion map[string]interface{} + secretVersion, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + + if err != nil { + return fmt.Errorf("Error retrieving available secret manager regional secret versions: %s", err.Error()) + } + + secretVersionRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/secrets/(.+)/versions/(.+)$") + parts = secretVersionRegex.FindStringSubmatch(secretVersion["name"].(string)) + + if len(parts) != 5 { + return fmt.Errorf("secret name, %s, does not match format, projects/{{project}}/locations/{{location}}/secrets/{{secret}}/versions/{{version}}", secretVersion["name"].(string)) + } + + log.Printf("[DEBUG] Received Google Secret Manager Regional Secret Version: %q", secretVersion) + + if err := d.Set("version", parts[4]); err != nil { + return fmt.Errorf("Error setting version: %s", err) + } + + url = fmt.Sprintf("%s:access", url) + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + + if err != nil { + return fmt.Errorf("Error retrieving available secret manager regional secret version access: %s", err.Error()) + } + + if err := d.Set("customer_managed_encryption", flattenSecretManagerRegionalRegionalSecretVersionCustomerManagedEncryption(secretVersion["customerManagedEncryption"], d, config)); err != nil { + return fmt.Errorf("Error setting customer_managed_encryption: %s", err) + } + + if err := d.Set("create_time", secretVersion["createTime"].(string)); err != nil { + return fmt.Errorf("Error setting create_time: %s", err) + } + + if secretVersion["destroyTime"] != nil { + if err := d.Set("destroy_time", secretVersion["destroyTime"].(string)); err != nil { + return fmt.Errorf("Error setting destroy_time: %s", err) + } + } + + if err := d.Set("name", secretVersion["name"].(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + + if err := d.Set("enabled", true); err != nil { + return fmt.Errorf("Error setting enabled: %s", err) + } + + data := resp["payload"].(map[string]interface{}) + var secretData string + if d.Get("is_secret_data_base64").(bool) { + secretData = data["data"].(string) + } else { + payloadData, err := base64.StdEncoding.DecodeString(data["data"].(string)) + if err != nil { + return fmt.Errorf("error decoding secret manager regional secret version data: %s", err.Error()) + } + secretData = string(payloadData) + } + if err := d.Set("secret_data", secretData); err != nil { + return fmt.Errorf("Error setting secret_data: %s", err) + } + + d.SetId(secretVersion["name"].(string)) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret_version_access.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret_version_access.go new file mode 100644 index 00000000000..606824c9715 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secret_version_access.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package secretmanagerregional + +import ( + "encoding/base64" + "fmt" + "log" + "regexp" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceSecretManagerRegionalRegionalSecretVersionAccess() *schema.Resource { + return &schema.Resource{ + Read: dataSourceSecretManagerRegionalRegionalSecretVersionAccessRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "secret": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "secret_data": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "is_secret_data_base64": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} +func dataSourceSecretManagerRegionalRegionalSecretVersionAccessRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + secretRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/secrets/(.+)$") + dSecret, ok := d.Get("secret").(string) + if !ok { + return fmt.Errorf("wrong type for secret field (%T), expected string", d.Get("secret")) + } + parts := secretRegex.FindStringSubmatch(dSecret) + + var project string + + // if reference of the secret is provided in the secret field + if len(parts) == 4 { + // Stores value of project to set in state + project = parts[1] + if dProject, ok := d.Get("project").(string); !ok { + return fmt.Errorf("wrong type for project (%T), expected string", d.Get("project")) + } else if dProject != "" && dProject != project { + return fmt.Errorf("project field value (%s) does not match project of secret (%s).", d.Get("project").(string), project) + } + if dLocation, ok := d.Get("location").(string); !ok { + return fmt.Errorf("wrong type for location (%T), expected string", d.Get("location")) + } else if dLocation != "" && dLocation != parts[2] { + return fmt.Errorf("location field value (%s) does not match location of secret (%s).", dLocation, parts[2]) + } + if err := d.Set("location", parts[2]); err != nil { + return fmt.Errorf("error setting location: %s", err) + } + if err := d.Set("secret", parts[3]); err != nil { + return fmt.Errorf("error setting secret: %s", err) + } + } else { // if secret name is provided in the secret field + // Stores value of project to set in state + project, err = tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("error fetching project for Secret: %s", err) + } + if dLocation, ok := d.Get("location").(string); ok && dLocation == "" { + return fmt.Errorf("location must be set when providing only secret name") + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("error setting project: %s", err) + } + + var url string + versionNum := d.Get("version") + + // set version if provided, else set version to latest + if versionNum != "" { + url, err = tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}projects/{{project}}/locations/{{location}}/secrets/{{secret}}/versions/{{version}}") + if err != nil { + return err + } + } else { + url, err = tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}projects/{{project}}/locations/{{location}}/secrets/{{secret}}/versions/latest") + if err != nil { + return err + } + } + + url = fmt.Sprintf("%s:access", url) + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + + if err != nil { + return fmt.Errorf("error retrieving available secret manager regional secret version access: %s", err.Error()) + } + + nameValue, ok := resp["name"] + if !ok { + return fmt.Errorf("read response didn't contain critical fields. Read may not have succeeded.") + } + if err := d.Set("name", nameValue.(string)); err != nil { + return fmt.Errorf("error setting name: %s", err) + } + + secretVersionRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/secrets/(.+)/versions/(.+)$") + parts = secretVersionRegex.FindStringSubmatch(nameValue.(string)) + if len(parts) != 5 { + return fmt.Errorf("secret name, %s, does not match format, projects/{{project}}/locations/{{location}}/secrets/{{secret}}/versions/{{version}}", nameValue.(string)) + } + + log.Printf("[DEBUG] Received Google SecretManager Version: %q", parts[3]) + + if err := d.Set("version", parts[4]); err != nil { + return fmt.Errorf("error setting version: %s", err) + } + + data := resp["payload"].(map[string]interface{}) + var secretData string + if d.Get("is_secret_data_base64").(bool) { + secretData = data["data"].(string) + } else { + payloadData, err := base64.StdEncoding.DecodeString(data["data"].(string)) + if err != nil { + return fmt.Errorf("error decoding secret manager regional secret version data: %s", err.Error()) + } + secretData = string(payloadData) + } + if err := d.Set("secret_data", secretData); err != nil { + return fmt.Errorf("error setting secret_data: %s", err) + } + + d.SetId(nameValue.(string)) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secrets.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secrets.go new file mode 100644 index 00000000000..b32d99da15c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/data_source_secret_manager_regional_secrets.go @@ -0,0 +1,178 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package secretmanagerregional + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceSecretManagerRegionalRegionalSecrets() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceSecretManagerRegionalRegionalSecret().Schema) + return &schema.Resource{ + Read: dataSourceSecretManagerRegionalRegionalSecretsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "location": { + Type: schema.TypeString, + Required: true, + }, + "filter": { + Type: schema.TypeString, + Description: `Filter string, adhering to the rules in List-operation filtering (https://cloud.google.com/secret-manager/docs/filtering). +List only secrets matching the filter. If filter is empty, all regional secrets are listed from the specified location.`, + Optional: true, + }, + "secrets": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: dsSchema, + }, + }, + }, + } +} + +func dataSourceSecretManagerRegionalRegionalSecretsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}projects/{{project}}/locations/{{location}}/secrets") + if err != nil { + return err + } + + filter, has_filter := d.GetOk("filter") + + if has_filter { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"filter": filter.(string)}) + if err != nil { + return err + } + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Secret: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // To handle pagination locally + allSecrets := make([]interface{}, 0) + token := "" + for paginate := true; paginate; { + if token != "" { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"pageToken": token}) + if err != nil { + return err + } + } + secrets, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecretManagerRegionalSecrets %q", d.Id())) + } + secretsInterface := secrets["secrets"] + if secretsInterface == nil { + break + } + allSecrets = append(allSecrets, secretsInterface.([]interface{})...) + tokenInterface := secrets["nextPageToken"] + if tokenInterface == nil { + paginate = false + } else { + paginate = true + token = tokenInterface.(string) + } + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("error setting project: %s", err) + } + + if err := d.Set("filter", filter); err != nil { + return fmt.Errorf("error setting filter: %s", err) + } + + if err := d.Set("secrets", flattenSecretManagerRegionalRegionalSecretsSecrets(allSecrets, d, config)); err != nil { + return fmt.Errorf("error setting secrets: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/secrets") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + if has_filter { + id += "/filter=" + filter.(string) + } + d.SetId(id) + + return nil +} + +func flattenSecretManagerRegionalRegionalSecretsSecrets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + + transformed = append(transformed, map[string]interface{}{ + "annotations": flattenSecretManagerRegionalRegionalSecretEffectiveAnnotations(original["annotations"], d, config), + "effective_annotations": flattenSecretManagerRegionalRegionalSecretEffectiveAnnotations(original["annotations"], d, config), + "expire_time": flattenSecretManagerRegionalRegionalSecretExpireTime(original["expireTime"], d, config), + "labels": flattenSecretManagerRegionalRegionalSecretEffectiveLabels(original["labels"], d, config), + "effective_labels": flattenSecretManagerRegionalRegionalSecretEffectiveLabels(original["labels"], d, config), + "terraform_labels": flattenSecretManagerRegionalRegionalSecretEffectiveLabels(original["labels"], d, config), + "version_aliases": flattenSecretManagerRegionalRegionalSecretVersionAliases(original["versionAliases"], d, config), + "rotation": flattenSecretManagerRegionalRegionalSecretRotation(original["rotation"], d, config), + "topics": flattenSecretManagerRegionalRegionalSecretTopics(original["topics"], d, config), + "version_destroy_ttl": flattenSecretManagerRegionalRegionalSecretVersionDestroyTtl(original["versionDestroyTtl"], d, config), + "customer_managed_encryption": flattenSecretManagerRegionalRegionalSecretCustomerManagedEncryption(original["customerManagedEncryption"], d, config), + "create_time": flattenSecretManagerRegionalRegionalSecretCreateTime(original["createTime"], d, config), + "name": flattenSecretManagerRegionalRegionalSecretName(original["name"], d, config), + "project": getDataFromName(original["name"], 1), + "location": getDataFromName(original["name"], 3), + "secret_id": getDataFromName(original["name"], 5), + }) + } + return transformed +} + +func getDataFromName(v interface{}, part int) string { + name := v.(string) + split := strings.Split(name, "/") + return split[part] +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/iam_secret_manager_regional_secret.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/iam_secret_manager_regional_secret.go new file mode 100644 index 00000000000..5c69959c821 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/iam_secret_manager_regional_secret.go @@ -0,0 +1,249 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package secretmanagerregional + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var SecretManagerRegionalRegionalSecretIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "secret_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type SecretManagerRegionalRegionalSecretIamUpdater struct { + project string + location string + secretId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func SecretManagerRegionalRegionalSecretIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("secret_id"); ok { + values["secret_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/secrets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("secret_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &SecretManagerRegionalRegionalSecretIamUpdater{ + project: values["project"], + location: values["location"], + secretId: values["secret_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("secret_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting secret_id: %s", err) + } + + return u, nil +} + +func SecretManagerRegionalRegionalSecretIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/secrets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &SecretManagerRegionalRegionalSecretIamUpdater{ + project: values["project"], + location: values["location"], + secretId: values["secret_id"], + d: d, + Config: config, + } + if err := d.Set("secret_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting secret_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *SecretManagerRegionalRegionalSecretIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyRegionalSecretUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + url, err = transport_tpg.AddQueryParams(url, map[string]string{"options.requestedPolicyVersion": fmt.Sprintf("%d", tpgiamresource.IamPolicyVersion)}) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *SecretManagerRegionalRegionalSecretIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyRegionalSecretUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *SecretManagerRegionalRegionalSecretIamUpdater) qualifyRegionalSecretUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{SecretManagerRegionalBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/secrets/%s", u.project, u.location, u.secretId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *SecretManagerRegionalRegionalSecretIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/secrets/%s", u.project, u.location, u.secretId) +} + +func (u *SecretManagerRegionalRegionalSecretIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-secretmanagerregional-regionalsecret-%s", u.GetResourceId()) +} + +func (u *SecretManagerRegionalRegionalSecretIamUpdater) DescribeResource() string { + return fmt.Sprintf("secretmanagerregional regionalsecret %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret.go new file mode 100644 index 00000000000..f5701e0eccb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret.go @@ -0,0 +1,936 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package secretmanagerregional + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecretManagerRegionalRegionalSecret() *schema.Resource { + return &schema.Resource{ + Create: resourceSecretManagerRegionalRegionalSecretCreate, + Read: resourceSecretManagerRegionalRegionalSecretRead, + Update: resourceSecretManagerRegionalRegionalSecretUpdate, + Delete: resourceSecretManagerRegionalRegionalSecretDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecretManagerRegionalRegionalSecretImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.SetAnnotationsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the regional secret. eg us-central1`, + }, + "secret_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the project.`, + }, + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: `Custom metadata about the regional secret. + +Annotations are distinct from various forms of labels. Annotations exist to allow +client tools to store their own state information without requiring a database. + +Annotation keys must be between 1 and 63 characters long, have a UTF-8 encoding of +maximum 128 bytes, begin and end with an alphanumeric character ([a-z0-9A-Z]), and +may have dashes (-), underscores (_), dots (.), and alphanumerics in between these +symbols. + +The total size of annotation keys and values must be less than 16KiB. + +An object containing a list of "key": value pairs. Example: +{ "name": "wrench", "mass": "1.3kg", "count": "3" }. + + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field 'effective_annotations' for all of the annotations present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "customer_managed_encryption": { + Type: schema.TypeList, + Optional: true, + Description: `The customer-managed encryption configuration of the regional secret.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads.`, + }, + }, + }, + }, + "expire_time": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Timestamp in UTC when the regional secret is scheduled to expire. This is always provided on +output, regardless of what was sent on input. A timestamp in RFC3339 UTC "Zulu" format, with +nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and +"2014-10-02T15:01:23.045123456Z". Only one of 'expire_time' or 'ttl' can be provided.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels assigned to this regional secret. + +Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, +and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + +Label values must be between 0 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, +and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + +No more than 64 labels can be assigned to a given resource. + +An object containing a list of "key": value pairs. Example: +{ "name": "wrench", "mass": "1.3kg", "count": "3" }. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "rotation": { + Type: schema.TypeList, + Optional: true, + Description: `The rotation time and period for a regional secret. At 'next_rotation_time', Secret Manager +will send a Pub/Sub notification to the topics configured on the Secret. 'topics' must be +set to configure rotation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "next_rotation_time": { + Type: schema.TypeString, + Optional: true, + Description: `Timestamp in UTC at which the Secret is scheduled to rotate. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine +fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "rotation_period": { + Type: schema.TypeString, + Optional: true, + Description: `The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) +and at most 3153600000s (100 years). If rotationPeriod is set, 'next_rotation_time' must +be set. 'next_rotation_time' will be advanced by this period when the service +automatically sends rotation notifications.`, + RequiredWith: []string{"rotation.0.next_rotation_time"}, + }, + }, + }, + RequiredWith: []string{"topics"}, + }, + "topics": { + Type: schema.TypeList, + Optional: true, + Description: `A list of up to 10 Pub/Sub topics to which messages are published when control plane +operations are called on the regional secret or its versions.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the Pub/Sub topic that will be published to, in the following format: +projects/*/topics/*. For publication to succeed, the Secret Manager Service +Agent service account must have pubsub.publisher permissions on the topic.`, + }, + }, + }, + }, + "ttl": { + Type: schema.TypeString, + Optional: true, + Description: `The TTL for the regional secret. A duration in seconds with up to nine fractional digits, +terminated by 's'. Example: "3.5s". Only one of 'ttl' or 'expire_time' can be provided.`, + }, + "version_aliases": { + Type: schema.TypeMap, + Optional: true, + Description: `Mapping from version alias to version name. + +A version alias is a string with a maximum length of 63 characters and can contain +uppercase and lowercase letters, numerals, and the hyphen (-) and underscore ('_') +characters. An alias string must start with a letter and cannot be the string +'latest' or 'NEW'. No more than 50 aliases can be assigned to a given secret. + +An object containing a list of "key": value pairs. Example: +{ "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "version_destroy_ttl": { + Type: schema.TypeString, + Optional: true, + Description: `Secret Version TTL after destruction request. +This is a part of the delayed delete feature on Secret Version. +For secret with versionDestroyTtl>0, version destruction doesn't happen immediately +on calling destroy instead the version goes to a disabled state and +the actual destruction happens after this TTL expires. It must be atleast 24h.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the regional secret was created.`, + }, + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: `All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the regional secret. Format: +'projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}'`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecretManagerRegionalRegionalSecretCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + versionAliasesProp, err := expandSecretManagerRegionalRegionalSecretVersionAliases(d.Get("version_aliases"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_aliases"); !tpgresource.IsEmptyValue(reflect.ValueOf(versionAliasesProp)) && (ok || !reflect.DeepEqual(v, versionAliasesProp)) { + obj["versionAliases"] = versionAliasesProp + } + customerManagedEncryptionProp, err := expandSecretManagerRegionalRegionalSecretCustomerManagedEncryption(d.Get("customer_managed_encryption"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("customer_managed_encryption"); !tpgresource.IsEmptyValue(reflect.ValueOf(customerManagedEncryptionProp)) && (ok || !reflect.DeepEqual(v, customerManagedEncryptionProp)) { + obj["customerManagedEncryption"] = customerManagedEncryptionProp + } + topicsProp, err := expandSecretManagerRegionalRegionalSecretTopics(d.Get("topics"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("topics"); !tpgresource.IsEmptyValue(reflect.ValueOf(topicsProp)) && (ok || !reflect.DeepEqual(v, topicsProp)) { + obj["topics"] = topicsProp + } + rotationProp, err := expandSecretManagerRegionalRegionalSecretRotation(d.Get("rotation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rotation"); !tpgresource.IsEmptyValue(reflect.ValueOf(rotationProp)) && (ok || !reflect.DeepEqual(v, rotationProp)) { + obj["rotation"] = rotationProp + } + expireTimeProp, err := expandSecretManagerRegionalRegionalSecretExpireTime(d.Get("expire_time"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("expire_time"); !tpgresource.IsEmptyValue(reflect.ValueOf(expireTimeProp)) && (ok || !reflect.DeepEqual(v, expireTimeProp)) { + obj["expireTime"] = expireTimeProp + } + ttlProp, err := expandSecretManagerRegionalRegionalSecretTtl(d.Get("ttl"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ttl"); !tpgresource.IsEmptyValue(reflect.ValueOf(ttlProp)) && (ok || !reflect.DeepEqual(v, ttlProp)) { + obj["ttl"] = ttlProp + } + versionDestroyTtlProp, err := expandSecretManagerRegionalRegionalSecretVersionDestroyTtl(d.Get("version_destroy_ttl"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_destroy_ttl"); !tpgresource.IsEmptyValue(reflect.ValueOf(versionDestroyTtlProp)) && (ok || !reflect.DeepEqual(v, versionDestroyTtlProp)) { + obj["versionDestroyTtl"] = versionDestroyTtlProp + } + labelsProp, err := expandSecretManagerRegionalRegionalSecretEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + annotationsProp, err := expandSecretManagerRegionalRegionalSecretEffectiveAnnotations(d.Get("effective_annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}projects/{{project}}/locations/{{location}}/secrets?secretId={{secret_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RegionalSecret: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionalSecret: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating RegionalSecret: %s", err) + } + if err := d.Set("name", flattenSecretManagerRegionalRegionalSecretName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating RegionalSecret %q: %#v", d.Id(), res) + + return resourceSecretManagerRegionalRegionalSecretRead(d, meta) +} + +func resourceSecretManagerRegionalRegionalSecretRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionalSecret: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecretManagerRegionalRegionalSecret %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + + if err := d.Set("name", flattenSecretManagerRegionalRegionalSecretName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("create_time", flattenSecretManagerRegionalRegionalSecretCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("labels", flattenSecretManagerRegionalRegionalSecretLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("annotations", flattenSecretManagerRegionalRegionalSecretAnnotations(res["annotations"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("version_aliases", flattenSecretManagerRegionalRegionalSecretVersionAliases(res["versionAliases"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("customer_managed_encryption", flattenSecretManagerRegionalRegionalSecretCustomerManagedEncryption(res["customerManagedEncryption"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("topics", flattenSecretManagerRegionalRegionalSecretTopics(res["topics"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("rotation", flattenSecretManagerRegionalRegionalSecretRotation(res["rotation"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("expire_time", flattenSecretManagerRegionalRegionalSecretExpireTime(res["expireTime"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("version_destroy_ttl", flattenSecretManagerRegionalRegionalSecretVersionDestroyTtl(res["versionDestroyTtl"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("terraform_labels", flattenSecretManagerRegionalRegionalSecretTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("effective_labels", flattenSecretManagerRegionalRegionalSecretEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + if err := d.Set("effective_annotations", flattenSecretManagerRegionalRegionalSecretEffectiveAnnotations(res["annotations"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecret: %s", err) + } + + return nil +} + +func resourceSecretManagerRegionalRegionalSecretUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionalSecret: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + versionAliasesProp, err := expandSecretManagerRegionalRegionalSecretVersionAliases(d.Get("version_aliases"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_aliases"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionAliasesProp)) { + obj["versionAliases"] = versionAliasesProp + } + customerManagedEncryptionProp, err := expandSecretManagerRegionalRegionalSecretCustomerManagedEncryption(d.Get("customer_managed_encryption"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("customer_managed_encryption"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customerManagedEncryptionProp)) { + obj["customerManagedEncryption"] = customerManagedEncryptionProp + } + topicsProp, err := expandSecretManagerRegionalRegionalSecretTopics(d.Get("topics"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("topics"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, topicsProp)) { + obj["topics"] = topicsProp + } + rotationProp, err := expandSecretManagerRegionalRegionalSecretRotation(d.Get("rotation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rotation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rotationProp)) { + obj["rotation"] = rotationProp + } + expireTimeProp, err := expandSecretManagerRegionalRegionalSecretExpireTime(d.Get("expire_time"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("expire_time"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, expireTimeProp)) { + obj["expireTime"] = expireTimeProp + } + ttlProp, err := expandSecretManagerRegionalRegionalSecretTtl(d.Get("ttl"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ttl"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ttlProp)) { + obj["ttl"] = ttlProp + } + versionDestroyTtlProp, err := expandSecretManagerRegionalRegionalSecretVersionDestroyTtl(d.Get("version_destroy_ttl"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_destroy_ttl"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionDestroyTtlProp)) { + obj["versionDestroyTtl"] = versionDestroyTtlProp + } + labelsProp, err := expandSecretManagerRegionalRegionalSecretEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + annotationsProp, err := expandSecretManagerRegionalRegionalSecretEffectiveAnnotations(d.Get("effective_annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating RegionalSecret %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("version_aliases") { + updateMask = append(updateMask, "versionAliases") + } + + if d.HasChange("customer_managed_encryption") { + updateMask = append(updateMask, "customerManagedEncryption") + } + + if d.HasChange("topics") { + updateMask = append(updateMask, "topics") + } + + if d.HasChange("rotation") { + updateMask = append(updateMask, "rotation") + } + + if d.HasChange("expire_time") { + updateMask = append(updateMask, "expireTime") + } + + if d.HasChange("ttl") { + updateMask = append(updateMask, "ttl") + } + + if d.HasChange("version_destroy_ttl") { + updateMask = append(updateMask, "versionDestroyTtl") + } + + if d.HasChange("effective_labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("effective_annotations") { + updateMask = append(updateMask, "annotations") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + // As the API expects only one of ttl or expireTime + if d.HasChange("ttl") && !d.HasChange("expire_time") { + delete(obj, "expireTime") + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating RegionalSecret %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RegionalSecret %q: %#v", d.Id(), res) + } + + } + + return resourceSecretManagerRegionalRegionalSecretRead(d, meta) +} + +func resourceSecretManagerRegionalRegionalSecretDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionalSecret: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting RegionalSecret %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionalSecret") + } + + log.Printf("[DEBUG] Finished deleting RegionalSecret %q: %#v", d.Id(), res) + return nil +} + +func resourceSecretManagerRegionalRegionalSecretImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/secrets/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecretManagerRegionalRegionalSecretName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenSecretManagerRegionalRegionalSecretAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("annotations"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenSecretManagerRegionalRegionalSecretVersionAliases(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretCustomerManagedEncryption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenSecretManagerRegionalRegionalSecretCustomerManagedEncryptionKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenSecretManagerRegionalRegionalSecretCustomerManagedEncryptionKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretTopics(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenSecretManagerRegionalRegionalSecretTopicsName(original["name"], d, config), + }) + } + return transformed +} +func flattenSecretManagerRegionalRegionalSecretTopicsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretRotation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["next_rotation_time"] = + flattenSecretManagerRegionalRegionalSecretRotationNextRotationTime(original["nextRotationTime"], d, config) + transformed["rotation_period"] = + flattenSecretManagerRegionalRegionalSecretRotationRotationPeriod(original["rotationPeriod"], d, config) + return []interface{}{transformed} +} +func flattenSecretManagerRegionalRegionalSecretRotationNextRotationTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretRotationRotationPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretExpireTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretVersionDestroyTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenSecretManagerRegionalRegionalSecretEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretEffectiveAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecretManagerRegionalRegionalSecretVersionAliases(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandSecretManagerRegionalRegionalSecretCustomerManagedEncryption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandSecretManagerRegionalRegionalSecretCustomerManagedEncryptionKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandSecretManagerRegionalRegionalSecretCustomerManagedEncryptionKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerRegionalRegionalSecretTopics(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandSecretManagerRegionalRegionalSecretTopicsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandSecretManagerRegionalRegionalSecretTopicsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerRegionalRegionalSecretRotation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNextRotationTime, err := expandSecretManagerRegionalRegionalSecretRotationNextRotationTime(original["next_rotation_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNextRotationTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nextRotationTime"] = transformedNextRotationTime + } + + transformedRotationPeriod, err := expandSecretManagerRegionalRegionalSecretRotationRotationPeriod(original["rotation_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRotationPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rotationPeriod"] = transformedRotationPeriod + } + + return transformed, nil +} + +func expandSecretManagerRegionalRegionalSecretRotationNextRotationTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerRegionalRegionalSecretRotationRotationPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerRegionalRegionalSecretExpireTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerRegionalRegionalSecretTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerRegionalRegionalSecretVersionDestroyTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerRegionalRegionalSecretEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandSecretManagerRegionalRegionalSecretEffectiveAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret_sweeper.go new file mode 100644 index 00000000000..3fccc809e65 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package secretmanagerregional + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecretManagerRegionalRegionalSecret", testSweepSecretManagerRegionalRegionalSecret) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecretManagerRegionalRegionalSecret(region string) error { + resourceName := "SecretManagerRegionalRegionalSecret" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://secretmanager.{{location}}.rep.googleapis.com/v1/projects/{{project}}/locations/{{location}}/secrets", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["regionalSecrets"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://secretmanager.{{location}}.rep.googleapis.com/v1/projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret_version.go new file mode 100644 index 00000000000..b72a08d82e3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional/resource_secret_manager_regional_secret_version.go @@ -0,0 +1,610 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package secretmanagerregional + +import ( + "encoding/base64" + "fmt" + "log" + "net/http" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/googleapi" +) + +func ResourceSecretManagerRegionalRegionalSecretVersion() *schema.Resource { + return &schema.Resource{ + Create: resourceSecretManagerRegionalRegionalSecretVersionCreate, + Read: resourceSecretManagerRegionalRegionalSecretVersionRead, + Update: resourceSecretManagerRegionalRegionalSecretVersionUpdate, + Delete: resourceSecretManagerRegionalRegionalSecretVersionDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecretManagerRegionalRegionalSecretVersionImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "secret_data": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The secret data. Must be no larger than 64KiB.`, + Sensitive: true, + }, + + "secret": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Secret Manager regional secret resource.`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `The current state of the regional secret version.`, + Default: true, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the regional secret version was created.`, + }, + "customer_managed_encryption": { + Type: schema.TypeList, + Computed: true, + Description: `The customer-managed encryption configuration of the regional secret.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_version_name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads.`, + }, + }, + }, + }, + "destroy_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the regional secret version was destroyed. Only present if state is DESTROYED.`, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Description: `Location of Secret Manager regional secret resource.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the regional secret version. Format: +'projects/{{project}}/locations/{{location}}/secrets/{{secret_id}}/versions/{{version}}'`, + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: `The version of the Regional Secret.`, + }, + "deletion_policy": { + Type: schema.TypeString, + Optional: true, + Description: `The deletion policy for the regional secret version. Setting 'ABANDON' allows the resource +to be abandoned rather than deleted. Setting 'DISABLE' allows the resource to be +disabled rather than deleted. Default is 'DELETE'. Possible values are: + * DELETE + * DISABLE + * ABANDON`, + Default: "DELETE", + }, + "is_secret_data_base64": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: `If set to 'true', the secret data is expected to be base64-encoded string and would be sent as is.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecretManagerRegionalRegionalSecretVersionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + stateProp, err := expandSecretManagerRegionalRegionalSecretVersionEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(stateProp)) && (ok || !reflect.DeepEqual(v, stateProp)) { + obj["state"] = stateProp + } + payloadProp, err := expandSecretManagerRegionalRegionalSecretVersionPayload(nil, d, config) + if err != nil { + return err + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(payloadProp)) { + obj["payload"] = payloadProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}{{secret}}:addVersion") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RegionalSecretVersion: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + secret := d.Get("secret").(string) + secretRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/secrets/(.+)$") + + parts := secretRegex.FindStringSubmatch(secret) + if len(parts) != 4 { + return fmt.Errorf("secret does not fit the format `projects/{{project}}/locations/{{location}}/secrets/{{secret}}`") + } + + if err := d.Set("location", parts[2]); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + + // Override the url after setting the location + url, err = tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}{{secret}}:addVersion") + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating RegionalSecretVersion: %s", err) + } + if err := d.Set("name", flattenSecretManagerRegionalRegionalSecretVersionName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + _, err = expandSecretManagerRegionalRegionalSecretVersionEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished creating RegionalSecretVersion %q: %#v", d.Id(), res) + + return resourceSecretManagerRegionalRegionalSecretVersionRead(d, meta) +} + +func resourceSecretManagerRegionalRegionalSecretVersionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + secret := d.Get("secret").(string) + secretRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/secrets/(.+)$") + + parts := secretRegex.FindStringSubmatch(secret) + if len(parts) != 4 { + return fmt.Errorf("secret does not fit the format `projects/{{project}}/locations/{{location}}/secrets/{{secret}}`") + } + + if err := d.Set("location", parts[2]); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + + // Override the url after setting the location + url, err = tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}{{name}}") + if err != nil { + return err + } + + // Explicitly set the field to default value if unset + if _, ok := d.GetOkExists("is_secret_data_base64"); !ok { + if err := d.Set("is_secret_data_base64", false); err != nil { + return fmt.Errorf("Error setting is_secret_data_base64: %s", err) + } + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecretManagerRegionalRegionalSecretVersion %q", d.Id())) + } + + res, err = resourceSecretManagerRegionalRegionalSecretVersionDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing SecretManagerRegionalRegionalSecretVersion because it no longer exists.") + d.SetId("") + return nil + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("deletion_policy"); !ok { + if err := d.Set("deletion_policy", "DELETE"); err != nil { + return fmt.Errorf("Error setting deletion_policy: %s", err) + } + } + + if err := d.Set("name", flattenSecretManagerRegionalRegionalSecretVersionName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecretVersion: %s", err) + } + if err := d.Set("create_time", flattenSecretManagerRegionalRegionalSecretVersionCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecretVersion: %s", err) + } + if err := d.Set("destroy_time", flattenSecretManagerRegionalRegionalSecretVersionDestroyTime(res["destroyTime"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecretVersion: %s", err) + } + if err := d.Set("customer_managed_encryption", flattenSecretManagerRegionalRegionalSecretVersionCustomerManagedEncryption(res["customerManagedEncryption"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecretVersion: %s", err) + } + if err := d.Set("version", flattenSecretManagerRegionalRegionalSecretVersionVersion(res["version"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecretVersion: %s", err) + } + if err := d.Set("enabled", flattenSecretManagerRegionalRegionalSecretVersionEnabled(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionalSecretVersion: %s", err) + } + // Terraform must set the top level schema field, but since this object contains collapsed properties + // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. + if flattenedProp := flattenSecretManagerRegionalRegionalSecretVersionPayload(res["payload"], d, config); flattenedProp != nil { + if gerr, ok := flattenedProp.(*googleapi.Error); ok { + return fmt.Errorf("Error reading RegionalSecretVersion: %s", gerr) + } + casted := flattenedProp.([]interface{})[0] + if casted != nil { + for k, v := range casted.(map[string]interface{}) { + if err := d.Set(k, v); err != nil { + return fmt.Errorf("Error setting %s: %s", k, err) + } + } + } + } + + return nil +} + +func resourceSecretManagerRegionalRegionalSecretVersionUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + _, err := expandSecretManagerRegionalRegionalSecretVersionEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } + + return resourceSecretManagerRegionalRegionalSecretVersionRead(d, meta) +} + +func resourceSecretManagerRegionalRegionalSecretVersionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}{{name}}:destroy") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + deletionPolicy := d.Get("deletion_policy") + + if deletionPolicy == "ABANDON" { + return nil + } else if deletionPolicy == "DISABLE" { + url, err = tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}{{name}}:disable") + if err != nil { + return err + } + } + + log.Printf("[DEBUG] Deleting RegionalSecretVersion %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionalSecretVersion") + } + + log.Printf("[DEBUG] Finished deleting RegionalSecretVersion %q: %#v", d.Id(), res) + return nil +} + +func resourceSecretManagerRegionalRegionalSecretVersionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + name := d.Get("name").(string) + secretRegex := regexp.MustCompile("(projects/.+/locations/.+/secrets/.+)/versions/.+$") + versionRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/secrets/(.+)/versions/(.+)$") + + parts := secretRegex.FindStringSubmatch(name) + if len(parts) != 2 { + return nil, fmt.Errorf("Version name does not fit the format `projects/{{project}}/locations/{{location}}/secrets/{{secret}}/versions/{{version}}`") + } + if err := d.Set("secret", parts[1]); err != nil { + return nil, fmt.Errorf("Error setting secret: %s", err) + } + + parts = versionRegex.FindStringSubmatch(name) + + if err := d.Set("version", parts[4]); err != nil { + return nil, fmt.Errorf("Error setting version: %s", err) + } + + // Explicitly set virtual fields to default values on import + if err := d.Set("deletion_policy", "DELETE"); err != nil { + return nil, fmt.Errorf("Error setting deletion policy: %s", err) + } + + if err := d.Set("location", parts[2]); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenSecretManagerRegionalRegionalSecretVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretVersionCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretVersionDestroyTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretVersionCustomerManagedEncryption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_version_name"] = + flattenSecretManagerRegionalRegionalSecretVersionCustomerManagedEncryptionKmsKeyVersionName(original["kmsKeyVersionName"], d, config) + return []interface{}{transformed} +} +func flattenSecretManagerRegionalRegionalSecretVersionCustomerManagedEncryptionKmsKeyVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerRegionalRegionalSecretVersionVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + name := d.Get("name").(string) + secretRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/secrets/(.+)/versions/(.+)$") + + parts := secretRegex.FindStringSubmatch(name) + if len(parts) != 5 { + return fmt.Errorf("Version name does not fit the format `projects/{{project}}/locations/{{location}}/secrets/{{secret}}/versions/{{version}}`") + } + + return parts[4] +} + +func flattenSecretManagerRegionalRegionalSecretVersionEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v.(string) == "ENABLED" { + return true + } + + return false +} + +func flattenSecretManagerRegionalRegionalSecretVersionPayload(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + transformed := make(map[string]interface{}) + + // if this secret version is disabled, the api will return an error, as the value cannot be accessed, return what we have + if d.Get("enabled").(bool) == false { + transformed["secret_data"] = d.Get("secret_data") + return []interface{}{transformed} + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}{{name}}:access") + if err != nil { + return err + } + + parts := strings.Split(d.Get("name").(string), "/") + project := parts[1] + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + accessRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return err + } + + if d.Get("is_secret_data_base64").(bool) { + transformed["secret_data"] = accessRes["payload"].(map[string]interface{})["data"].(string) + } else { + data, err := base64.StdEncoding.DecodeString(accessRes["payload"].(map[string]interface{})["data"].(string)) + if err != nil { + return err + } + transformed["secret_data"] = string(data) + } + return []interface{}{transformed} +} + +func expandSecretManagerRegionalRegionalSecretVersionEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + name := d.Get("name").(string) + if name == "" { + return "", nil + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerRegionalBasePath}}{{name}}") + if err != nil { + return nil, err + } + + if v == true { + url = fmt.Sprintf("%s:enable", url) + } else { + url = fmt.Sprintf("%s:disable", url) + } + + parts := strings.Split(name, "/") + project := parts[1] + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + return nil, nil +} + +func expandSecretManagerRegionalRegionalSecretVersionPayload(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + transformed := make(map[string]interface{}) + transformedSecretData, err := expandSecretManagerRegionalRegionalSecretVersionPayloadSecretData(d.Get("secret_data"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretData); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["data"] = transformedSecretData + } + + return transformed, nil +} + +func expandSecretManagerRegionalRegionalSecretVersionPayloadSecretData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + if d.Get("is_secret_data_base64").(bool) { + return v, nil + } + return base64.StdEncoding.EncodeToString([]byte(v.(string))), nil +} + +func resourceSecretManagerRegionalRegionalSecretVersionDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v := res["state"]; v == "DESTROYED" { + return nil, nil + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securesourcemanager/resource_secure_source_manager_branch_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securesourcemanager/resource_secure_source_manager_branch_rule.go new file mode 100644 index 00000000000..7702c81f29c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securesourcemanager/resource_secure_source_manager_branch_rule.go @@ -0,0 +1,660 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securesourcemanager + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecureSourceManagerBranchRule() *schema.Resource { + return &schema.Resource{ + Create: resourceSecureSourceManagerBranchRuleCreate, + Read: resourceSecureSourceManagerBranchRuleRead, + Update: resourceSecureSourceManagerBranchRuleUpdate, + Delete: resourceSecureSourceManagerBranchRuleDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecureSourceManagerBranchRuleImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "branch_rule_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID for the BranchRule.`, + }, + "include_pattern": { + Type: schema.TypeString, + Required: true, + Description: `The BranchRule matches branches based on the specified regular expression. Use .* to match all branches.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + Description: `The location for the Repository.`, + }, + "repository_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID for the Repository.`, + }, + "allow_stale_reviews": { + Type: schema.TypeBool, + Optional: true, + Description: `Determines if allow stale reviews or approvals before merging to the branch.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Determines if the branch rule is disabled or not.`, + }, + "minimum_approvals_count": { + Type: schema.TypeInt, + Optional: true, + Description: `The minimum number of approvals required for the branch rule to be matched.`, + }, + "minimum_reviews_count": { + Type: schema.TypeInt, + Optional: true, + Description: `The minimum number of reviews required for the branch rule to be matched.`, + }, + "require_comments_resolved": { + Type: schema.TypeBool, + Optional: true, + Description: `Determines if require comments resolved before merging to the branch.`, + }, + "require_linear_history": { + Type: schema.TypeBool, + Optional: true, + Description: `Determines if require linear history before merging to the branch.`, + }, + "require_pull_request": { + Type: schema.TypeBool, + Optional: true, + Description: `Determines if the branch rule requires a pull request or not.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the BranchRule was created in UTC.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name for the BranchRule.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Unique identifier of the BranchRule.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the BranchRule was updated in UTC.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecureSourceManagerBranchRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + includePatternProp, err := expandSecureSourceManagerBranchRuleIncludePattern(d.Get("include_pattern"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("include_pattern"); !tpgresource.IsEmptyValue(reflect.ValueOf(includePatternProp)) && (ok || !reflect.DeepEqual(v, includePatternProp)) { + obj["includePattern"] = includePatternProp + } + disabledProp, err := expandSecureSourceManagerBranchRuleDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + requirePullRequestProp, err := expandSecureSourceManagerBranchRuleRequirePullRequest(d.Get("require_pull_request"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("require_pull_request"); !tpgresource.IsEmptyValue(reflect.ValueOf(requirePullRequestProp)) && (ok || !reflect.DeepEqual(v, requirePullRequestProp)) { + obj["requirePullRequest"] = requirePullRequestProp + } + minimumReviewsCountProp, err := expandSecureSourceManagerBranchRuleMinimumReviewsCount(d.Get("minimum_reviews_count"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("minimum_reviews_count"); !tpgresource.IsEmptyValue(reflect.ValueOf(minimumReviewsCountProp)) && (ok || !reflect.DeepEqual(v, minimumReviewsCountProp)) { + obj["minimumReviewsCount"] = minimumReviewsCountProp + } + minimumApprovalsCountProp, err := expandSecureSourceManagerBranchRuleMinimumApprovalsCount(d.Get("minimum_approvals_count"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("minimum_approvals_count"); !tpgresource.IsEmptyValue(reflect.ValueOf(minimumApprovalsCountProp)) && (ok || !reflect.DeepEqual(v, minimumApprovalsCountProp)) { + obj["minimumApprovalsCount"] = minimumApprovalsCountProp + } + requireCommentsResolvedProp, err := expandSecureSourceManagerBranchRuleRequireCommentsResolved(d.Get("require_comments_resolved"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("require_comments_resolved"); !tpgresource.IsEmptyValue(reflect.ValueOf(requireCommentsResolvedProp)) && (ok || !reflect.DeepEqual(v, requireCommentsResolvedProp)) { + obj["requireCommentsResolved"] = requireCommentsResolvedProp + } + allowStaleReviewsProp, err := expandSecureSourceManagerBranchRuleAllowStaleReviews(d.Get("allow_stale_reviews"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow_stale_reviews"); !tpgresource.IsEmptyValue(reflect.ValueOf(allowStaleReviewsProp)) && (ok || !reflect.DeepEqual(v, allowStaleReviewsProp)) { + obj["allowStaleReviews"] = allowStaleReviewsProp + } + requireLinearHistoryProp, err := expandSecureSourceManagerBranchRuleRequireLinearHistory(d.Get("require_linear_history"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("require_linear_history"); !tpgresource.IsEmptyValue(reflect.ValueOf(requireLinearHistoryProp)) && (ok || !reflect.DeepEqual(v, requireLinearHistoryProp)) { + obj["requireLinearHistory"] = requireLinearHistoryProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecureSourceManagerBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/branchRules?branch_rule_id={{branch_rule_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new BranchRule: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BranchRule: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating BranchRule: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/branchRules/{{branch_rule_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = SecureSourceManagerOperationWaitTime( + config, res, project, "Creating BranchRule", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create BranchRule: %s", err) + } + + log.Printf("[DEBUG] Finished creating BranchRule %q: %#v", d.Id(), res) + + return resourceSecureSourceManagerBranchRuleRead(d, meta) +} + +func resourceSecureSourceManagerBranchRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecureSourceManagerBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/branchRules/{{branch_rule_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BranchRule: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecureSourceManagerBranchRule %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + + if err := d.Set("name", flattenSecureSourceManagerBranchRuleName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + if err := d.Set("uid", flattenSecureSourceManagerBranchRuleUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + if err := d.Set("create_time", flattenSecureSourceManagerBranchRuleCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + if err := d.Set("update_time", flattenSecureSourceManagerBranchRuleUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + if err := d.Set("include_pattern", flattenSecureSourceManagerBranchRuleIncludePattern(res["includePattern"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + if err := d.Set("disabled", flattenSecureSourceManagerBranchRuleDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + if err := d.Set("require_pull_request", flattenSecureSourceManagerBranchRuleRequirePullRequest(res["requirePullRequest"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + if err := d.Set("minimum_reviews_count", flattenSecureSourceManagerBranchRuleMinimumReviewsCount(res["minimumReviewsCount"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + if err := d.Set("minimum_approvals_count", flattenSecureSourceManagerBranchRuleMinimumApprovalsCount(res["minimumApprovalsCount"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + if err := d.Set("require_comments_resolved", flattenSecureSourceManagerBranchRuleRequireCommentsResolved(res["requireCommentsResolved"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + if err := d.Set("allow_stale_reviews", flattenSecureSourceManagerBranchRuleAllowStaleReviews(res["allowStaleReviews"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + if err := d.Set("require_linear_history", flattenSecureSourceManagerBranchRuleRequireLinearHistory(res["requireLinearHistory"], d, config)); err != nil { + return fmt.Errorf("Error reading BranchRule: %s", err) + } + + return nil +} + +func resourceSecureSourceManagerBranchRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BranchRule: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + includePatternProp, err := expandSecureSourceManagerBranchRuleIncludePattern(d.Get("include_pattern"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("include_pattern"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, includePatternProp)) { + obj["includePattern"] = includePatternProp + } + disabledProp, err := expandSecureSourceManagerBranchRuleDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + requirePullRequestProp, err := expandSecureSourceManagerBranchRuleRequirePullRequest(d.Get("require_pull_request"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("require_pull_request"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, requirePullRequestProp)) { + obj["requirePullRequest"] = requirePullRequestProp + } + minimumReviewsCountProp, err := expandSecureSourceManagerBranchRuleMinimumReviewsCount(d.Get("minimum_reviews_count"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("minimum_reviews_count"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, minimumReviewsCountProp)) { + obj["minimumReviewsCount"] = minimumReviewsCountProp + } + minimumApprovalsCountProp, err := expandSecureSourceManagerBranchRuleMinimumApprovalsCount(d.Get("minimum_approvals_count"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("minimum_approvals_count"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, minimumApprovalsCountProp)) { + obj["minimumApprovalsCount"] = minimumApprovalsCountProp + } + requireCommentsResolvedProp, err := expandSecureSourceManagerBranchRuleRequireCommentsResolved(d.Get("require_comments_resolved"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("require_comments_resolved"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, requireCommentsResolvedProp)) { + obj["requireCommentsResolved"] = requireCommentsResolvedProp + } + allowStaleReviewsProp, err := expandSecureSourceManagerBranchRuleAllowStaleReviews(d.Get("allow_stale_reviews"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow_stale_reviews"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, allowStaleReviewsProp)) { + obj["allowStaleReviews"] = allowStaleReviewsProp + } + requireLinearHistoryProp, err := expandSecureSourceManagerBranchRuleRequireLinearHistory(d.Get("require_linear_history"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("require_linear_history"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, requireLinearHistoryProp)) { + obj["requireLinearHistory"] = requireLinearHistoryProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecureSourceManagerBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/branchRules/{{branch_rule_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating BranchRule %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("include_pattern") { + updateMask = append(updateMask, "includePattern") + } + + if d.HasChange("disabled") { + updateMask = append(updateMask, "disabled") + } + + if d.HasChange("require_pull_request") { + updateMask = append(updateMask, "requirePullRequest") + } + + if d.HasChange("minimum_reviews_count") { + updateMask = append(updateMask, "minimumReviewsCount") + } + + if d.HasChange("minimum_approvals_count") { + updateMask = append(updateMask, "minimumApprovalsCount") + } + + if d.HasChange("require_comments_resolved") { + updateMask = append(updateMask, "requireCommentsResolved") + } + + if d.HasChange("allow_stale_reviews") { + updateMask = append(updateMask, "allowStaleReviews") + } + + if d.HasChange("require_linear_history") { + updateMask = append(updateMask, "requireLinearHistory") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating BranchRule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating BranchRule %q: %#v", d.Id(), res) + } + + } + + return resourceSecureSourceManagerBranchRuleRead(d, meta) +} + +func resourceSecureSourceManagerBranchRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BranchRule: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SecureSourceManagerBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/branchRules/{{branch_rule_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting BranchRule %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "BranchRule") + } + + err = SecureSourceManagerOperationWaitTime( + config, res, project, "Deleting BranchRule", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting BranchRule %q: %#v", d.Id(), res) + return nil +} + +func resourceSecureSourceManagerBranchRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/repositories/(?P[^/]+)/branchRules/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/branchRules/{{branch_rule_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecureSourceManagerBranchRuleName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecureSourceManagerBranchRuleUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecureSourceManagerBranchRuleCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecureSourceManagerBranchRuleUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecureSourceManagerBranchRuleIncludePattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecureSourceManagerBranchRuleDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecureSourceManagerBranchRuleRequirePullRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecureSourceManagerBranchRuleMinimumReviewsCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenSecureSourceManagerBranchRuleMinimumApprovalsCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenSecureSourceManagerBranchRuleRequireCommentsResolved(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecureSourceManagerBranchRuleAllowStaleReviews(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecureSourceManagerBranchRuleRequireLinearHistory(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecureSourceManagerBranchRuleIncludePattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecureSourceManagerBranchRuleDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecureSourceManagerBranchRuleRequirePullRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecureSourceManagerBranchRuleMinimumReviewsCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecureSourceManagerBranchRuleMinimumApprovalsCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecureSourceManagerBranchRuleRequireCommentsResolved(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecureSourceManagerBranchRuleAllowStaleReviews(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecureSourceManagerBranchRuleRequireLinearHistory(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securesourcemanager/resource_secure_source_manager_branch_rule_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securesourcemanager/resource_secure_source_manager_branch_rule_sweeper.go new file mode 100644 index 00000000000..2fed8329f93 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securesourcemanager/resource_secure_source_manager_branch_rule_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securesourcemanager + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecureSourceManagerBranchRule", testSweepSecureSourceManagerBranchRule) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecureSourceManagerBranchRule(region string) error { + resourceName := "SecureSourceManagerBranchRule" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securesourcemanager.googleapis.com/v1/projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/branchRules?branch_rule_id={{branch_rule_id}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["branchRules"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securesourcemanager.googleapis.com/v1/projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/branchRules/{{branch_rule_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_notification_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_notification_config.go new file mode 100644 index 00000000000..154b2efc373 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_notification_config.go @@ -0,0 +1,475 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterFolderNotificationConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterFolderNotificationConfigCreate, + Read: resourceSecurityCenterFolderNotificationConfigRead, + Update: resourceSecurityCenterFolderNotificationConfigUpdate, + Delete: resourceSecurityCenterFolderNotificationConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterFolderNotificationConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "config_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the organization.`, + }, + "folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Numerical ID of the parent folder.`, + }, + "pubsub_topic": { + Type: schema.TypeString, + Required: true, + Description: `The Pub/Sub topic to send notifications to. Its format is +"projects/[project_id]/topics/[topic]".`, + }, + "streaming_config": { + Type: schema.TypeList, + Required: true, + Description: `The config for triggering streaming-based notifications.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `Expression that defines the filter to apply across create/update +events of assets or findings as specified by the event type. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* >, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the notification config (max of 1024 characters).`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this notification config, in the format +'folders/{{folder}}/notificationConfigs/{{config_id}}'.`, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs "pubsub.topics.publish" permission to +publish to the Pub/Sub topic.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterFolderNotificationConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterFolderNotificationConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + pubsubTopicProp, err := expandSecurityCenterFolderNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(pubsubTopicProp)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { + obj["pubsubTopic"] = pubsubTopicProp + } + streamingConfigProp, err := expandSecurityCenterFolderNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("streaming_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(streamingConfigProp)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { + obj["streamingConfig"] = streamingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/notificationConfigs?configId={{config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new FolderNotificationConfig: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating FolderNotificationConfig: %s", err) + } + if err := d.Set("name", flattenSecurityCenterFolderNotificationConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/notificationConfigs/{{config_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating FolderNotificationConfig %q: %#v", d.Id(), res) + + return resourceSecurityCenterFolderNotificationConfigRead(d, meta) +} + +func resourceSecurityCenterFolderNotificationConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/notificationConfigs/{{config_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterFolderNotificationConfig %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterFolderNotificationConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderNotificationConfig: %s", err) + } + if err := d.Set("description", flattenSecurityCenterFolderNotificationConfigDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderNotificationConfig: %s", err) + } + if err := d.Set("pubsub_topic", flattenSecurityCenterFolderNotificationConfigPubsubTopic(res["pubsubTopic"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderNotificationConfig: %s", err) + } + if err := d.Set("service_account", flattenSecurityCenterFolderNotificationConfigServiceAccount(res["serviceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderNotificationConfig: %s", err) + } + if err := d.Set("streaming_config", flattenSecurityCenterFolderNotificationConfigStreamingConfig(res["streamingConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderNotificationConfig: %s", err) + } + + return nil +} + +func resourceSecurityCenterFolderNotificationConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterFolderNotificationConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + pubsubTopicProp, err := expandSecurityCenterFolderNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { + obj["pubsubTopic"] = pubsubTopicProp + } + streamingConfigProp, err := expandSecurityCenterFolderNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("streaming_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { + obj["streamingConfig"] = streamingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/notificationConfigs/{{config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating FolderNotificationConfig %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("pubsub_topic") { + updateMask = append(updateMask, "pubsubTopic") + } + + if d.HasChange("streaming_config") { + updateMask = append(updateMask, "streamingConfig.filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating FolderNotificationConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating FolderNotificationConfig %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterFolderNotificationConfigRead(d, meta) +} + +func resourceSecurityCenterFolderNotificationConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/notificationConfigs/{{config_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting FolderNotificationConfig %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "FolderNotificationConfig") + } + + log.Printf("[DEBUG] Finished deleting FolderNotificationConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterFolderNotificationConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^folders/(?P[^/]+)/notificationConfigs/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/notificationConfigs/{{config_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + idParts := strings.Split(d.Id(), "/") + if len(idParts) != 4 { + return nil, fmt.Errorf("unexpected format of ID (%q), expected folders/{{folder}}/notificationConfigs/{{config_id}}", d.Id()) + } + + if err := d.Set("folder", idParts[1]); err != nil { + return nil, fmt.Errorf("error setting folder: %s", err) + } + + if err := d.Set("config_id", idParts[3]); err != nil { + return nil, fmt.Errorf("error setting config_id: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterFolderNotificationConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderNotificationConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderNotificationConfigServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderNotificationConfigStreamingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["filter"] = + flattenSecurityCenterFolderNotificationConfigStreamingConfigFilter(original["filter"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterFolderNotificationConfigStreamingConfigFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterFolderNotificationConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderNotificationConfigPubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderNotificationConfigStreamingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFilter, err := expandSecurityCenterFolderNotificationConfigStreamingConfigFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filter"] = transformedFilter + } + + return transformed, nil +} + +func expandSecurityCenterFolderNotificationConfigStreamingConfigFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_notification_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_notification_config_sweeper.go new file mode 100644 index 00000000000..01ea5f7abcc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_notification_config_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterFolderNotificationConfig", testSweepSecurityCenterFolderNotificationConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterFolderNotificationConfig(region string) error { + resourceName := "SecurityCenterFolderNotificationConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v1/folders/{{folder}}/notificationConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["folderNotificationConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v1/folders/{{folder}}/notificationConfigs/{{config_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_scc_big_query_export.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_scc_big_query_export.go new file mode 100644 index 00000000000..4655e2e42b5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_scc_big_query_export.go @@ -0,0 +1,462 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterFolderSccBigQueryExport() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterFolderSccBigQueryExportCreate, + Read: resourceSecurityCenterFolderSccBigQueryExportRead, + Update: resourceSecurityCenterFolderSccBigQueryExportUpdate, + Delete: resourceSecurityCenterFolderSccBigQueryExportDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterFolderSccBigQueryExportImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "big_query_export_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the organization.`, + }, + "dataset": { + Type: schema.TypeString, + Required: true, + Description: `The dataset to write findings' updates to. +Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". +BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).`, + }, + "description": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the export (max of 1024 characters).`, + }, + "filter": { + Type: schema.TypeString, + Required: true, + Description: `Expression that defines the filter to apply across create/update +events of findings. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* >, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + "folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The folder where Cloud Security Command Center Big Query Export +Config lives in.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the BigQuery export was created. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "most_recent_editor": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who last edited the BigQuery export.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this export, in the format +'projects/{{project}}/bigQueryExports/{{big_query_export_id}}'. +This field is provided in responses, and is ignored when provided in create requests.`, + }, + "principal": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs permission to create table and upload data to the BigQuery dataset.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The most recent time at which the BigQuery export was updated. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterFolderSccBigQueryExportCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterFolderSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterFolderSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(datasetProp)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterFolderSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); ok || !reflect.DeepEqual(v, filterProp) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new FolderSccBigQueryExport: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating FolderSccBigQueryExport: %s", err) + } + if err := d.Set("name", flattenSecurityCenterFolderSccBigQueryExportName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating FolderSccBigQueryExport %q: %#v", d.Id(), res) + + return resourceSecurityCenterFolderSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterFolderSccBigQueryExportRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterFolderSccBigQueryExport %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterFolderSccBigQueryExportName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("description", flattenSecurityCenterFolderSccBigQueryExportDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("dataset", flattenSecurityCenterFolderSccBigQueryExportDataset(res["dataset"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("create_time", flattenSecurityCenterFolderSccBigQueryExportCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterFolderSccBigQueryExportUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("most_recent_editor", flattenSecurityCenterFolderSccBigQueryExportMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("principal", flattenSecurityCenterFolderSccBigQueryExportPrincipal(res["principal"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("filter", flattenSecurityCenterFolderSccBigQueryExportFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + + return nil +} + +func resourceSecurityCenterFolderSccBigQueryExportUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterFolderSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterFolderSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterFolderSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); ok || !reflect.DeepEqual(v, filterProp) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating FolderSccBigQueryExport %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("dataset") { + updateMask = append(updateMask, "dataset") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating FolderSccBigQueryExport %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating FolderSccBigQueryExport %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterFolderSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterFolderSccBigQueryExportDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting FolderSccBigQueryExport %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "FolderSccBigQueryExport") + } + + log.Printf("[DEBUG] Finished deleting FolderSccBigQueryExport %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterFolderSccBigQueryExportImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^folders/(?P[^/]+)/bigQueryExports/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterFolderSccBigQueryExportName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderSccBigQueryExportDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderSccBigQueryExportDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderSccBigQueryExportCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderSccBigQueryExportUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderSccBigQueryExportMostRecentEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderSccBigQueryExportPrincipal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderSccBigQueryExportFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterFolderSccBigQueryExportDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderSccBigQueryExportDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderSccBigQueryExportFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_scc_big_query_export_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_scc_big_query_export_sweeper.go new file mode 100644 index 00000000000..d0e315c638d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_folder_scc_big_query_export_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterFolderSccBigQueryExport", testSweepSecurityCenterFolderSccBigQueryExport) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterFolderSccBigQueryExport(region string) error { + resourceName := "SecurityCenterFolderSccBigQueryExport" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v1/folders/{{folder}}/bigQueryExports", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["folderSccBigQueryExports"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v1/folders/{{folder}}/bigQueryExports/{{big_query_export_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_notification_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_notification_config.go index 3bdd8045c8f..e8a710b6bdd 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_notification_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_notification_config.go @@ -154,7 +154,7 @@ func resourceSecurityCenterNotificationConfigCreate(d *schema.ResourceData, meta streamingConfigProp, err := expandSecurityCenterNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("streaming_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(streamingConfigProp)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { + } else if v, ok := d.GetOkExists("streaming_config"); ok || !reflect.DeepEqual(v, streamingConfigProp) { obj["streamingConfig"] = streamingConfigProp } @@ -295,7 +295,7 @@ func resourceSecurityCenterNotificationConfigUpdate(d *schema.ResourceData, meta streamingConfigProp, err := expandSecurityCenterNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("streaming_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { + } else if v, ok := d.GetOkExists("streaming_config"); ok || !reflect.DeepEqual(v, streamingConfigProp) { obj["streamingConfig"] = streamingConfigProp } @@ -441,9 +441,6 @@ func flattenSecurityCenterNotificationConfigStreamingConfig(v interface{}, d *sc return nil } original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } transformed := make(map[string]interface{}) transformed["filter"] = flattenSecurityCenterNotificationConfigStreamingConfigFilter(original["filter"], d, config) @@ -463,9 +460,14 @@ func expandSecurityCenterNotificationConfigPubsubTopic(v interface{}, d tpgresou func expandSecurityCenterNotificationConfigStreamingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { + if len(l) == 0 { return nil, nil } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } raw := l[0] original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) @@ -473,7 +475,7 @@ func expandSecurityCenterNotificationConfigStreamingConfig(v interface{}, d tpgr transformedFilter, err := expandSecurityCenterNotificationConfigStreamingConfigFilter(original["filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + } else { transformed["filter"] = transformedFilter } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_organization_scc_big_query_export.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_organization_scc_big_query_export.go new file mode 100644 index 00000000000..7d6343ff167 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_organization_scc_big_query_export.go @@ -0,0 +1,463 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterOrganizationSccBigQueryExport() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterOrganizationSccBigQueryExportCreate, + Read: resourceSecurityCenterOrganizationSccBigQueryExportRead, + Update: resourceSecurityCenterOrganizationSccBigQueryExportUpdate, + Delete: resourceSecurityCenterOrganizationSccBigQueryExportDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterOrganizationSccBigQueryExportImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "big_query_export_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the organization.`, + }, + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The organization whose Cloud Security Command Center the Big Query Export +Config lives in.`, + }, + "dataset": { + Type: schema.TypeString, + Optional: true, + Description: `The dataset to write findings' updates to. +Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". +BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the notification config (max of 1024 characters).`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `Expression that defines the filter to apply across create/update +events of findings. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* \>, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "most_recent_editor": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who last edited the BigQuery export. +This field is set by the server and will be ignored if provided on export creation or update.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this export, in the format +'organizations/{{organization}}/bigQueryExports/{{big_query_export_id}}'. +This field is provided in responses, and is ignored when provided in create requests.`, + }, + "principal": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs permission to create table and upload data to the BigQuery dataset.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterOrganizationSccBigQueryExportCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterOrganizationSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterOrganizationSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(datasetProp)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterOrganizationSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new OrganizationSccBigQueryExport: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("name", flattenSecurityCenterOrganizationSccBigQueryExportName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating OrganizationSccBigQueryExport %q: %#v", d.Id(), res) + + return resourceSecurityCenterOrganizationSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterOrganizationSccBigQueryExportRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterOrganizationSccBigQueryExport %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterOrganizationSccBigQueryExportName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("description", flattenSecurityCenterOrganizationSccBigQueryExportDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("dataset", flattenSecurityCenterOrganizationSccBigQueryExportDataset(res["dataset"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("create_time", flattenSecurityCenterOrganizationSccBigQueryExportCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterOrganizationSccBigQueryExportUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("most_recent_editor", flattenSecurityCenterOrganizationSccBigQueryExportMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("principal", flattenSecurityCenterOrganizationSccBigQueryExportPrincipal(res["principal"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("filter", flattenSecurityCenterOrganizationSccBigQueryExportFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + + return nil +} + +func resourceSecurityCenterOrganizationSccBigQueryExportUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterOrganizationSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterOrganizationSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterOrganizationSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating OrganizationSccBigQueryExport %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("dataset") { + updateMask = append(updateMask, "dataset") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating OrganizationSccBigQueryExport %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating OrganizationSccBigQueryExport %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterOrganizationSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterOrganizationSccBigQueryExportDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting OrganizationSccBigQueryExport %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "OrganizationSccBigQueryExport") + } + + log.Printf("[DEBUG] Finished deleting OrganizationSccBigQueryExport %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterOrganizationSccBigQueryExportImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^organizations/(?P[^/]+)/bigQueryExports/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterOrganizationSccBigQueryExportName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationSccBigQueryExportDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationSccBigQueryExportDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationSccBigQueryExportCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationSccBigQueryExportUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationSccBigQueryExportMostRecentEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationSccBigQueryExportPrincipal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationSccBigQueryExportFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterOrganizationSccBigQueryExportDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationSccBigQueryExportDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationSccBigQueryExportFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_organization_scc_big_query_export_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_organization_scc_big_query_export_sweeper.go new file mode 100644 index 00000000000..7325040f53e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_organization_scc_big_query_export_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterOrganizationSccBigQueryExport", testSweepSecurityCenterOrganizationSccBigQueryExport) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterOrganizationSccBigQueryExport(region string) error { + resourceName := "SecurityCenterOrganizationSccBigQueryExport" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v1/organizations/{{organization}}/bigQueryExports", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["organizationSccBigQueryExports"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v1/organizations/{{organization}}/bigQueryExports/{{big_query_export_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_notification_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_notification_config.go new file mode 100644 index 00000000000..83f1cf86065 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_notification_config.go @@ -0,0 +1,519 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterProjectNotificationConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterProjectNotificationConfigCreate, + Read: resourceSecurityCenterProjectNotificationConfigRead, + Update: resourceSecurityCenterProjectNotificationConfigUpdate, + Delete: resourceSecurityCenterProjectNotificationConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterProjectNotificationConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "config_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the organization.`, + }, + "pubsub_topic": { + Type: schema.TypeString, + Required: true, + Description: `The Pub/Sub topic to send notifications to. Its format is +"projects/[project_id]/topics/[topic]".`, + }, + "streaming_config": { + Type: schema.TypeList, + Required: true, + Description: `The config for triggering streaming-based notifications.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `Expression that defines the filter to apply across create/update +events of assets or findings as specified by the event type. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* >, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the notification config (max of 1024 characters).`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this notification config, in the format +'projects/{{projectId}}/notificationConfigs/{{config_id}}'.`, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs "pubsub.topics.publish" permission to +publish to the Pub/Sub topic.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterProjectNotificationConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterProjectNotificationConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + pubsubTopicProp, err := expandSecurityCenterProjectNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(pubsubTopicProp)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { + obj["pubsubTopic"] = pubsubTopicProp + } + streamingConfigProp, err := expandSecurityCenterProjectNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("streaming_config"); ok || !reflect.DeepEqual(v, streamingConfigProp) { + obj["streamingConfig"] = streamingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}projects/{{project}}/notificationConfigs?configId={{config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ProjectNotificationConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectNotificationConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating ProjectNotificationConfig: %s", err) + } + if err := d.Set("name", flattenSecurityCenterProjectNotificationConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating ProjectNotificationConfig %q: %#v", d.Id(), res) + + return resourceSecurityCenterProjectNotificationConfigRead(d, meta) +} + +func resourceSecurityCenterProjectNotificationConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectNotificationConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterProjectNotificationConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + + if err := d.Set("name", flattenSecurityCenterProjectNotificationConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + if err := d.Set("description", flattenSecurityCenterProjectNotificationConfigDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + if err := d.Set("pubsub_topic", flattenSecurityCenterProjectNotificationConfigPubsubTopic(res["pubsubTopic"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + if err := d.Set("service_account", flattenSecurityCenterProjectNotificationConfigServiceAccount(res["serviceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + if err := d.Set("streaming_config", flattenSecurityCenterProjectNotificationConfigStreamingConfig(res["streamingConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + + return nil +} + +func resourceSecurityCenterProjectNotificationConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectNotificationConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterProjectNotificationConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + pubsubTopicProp, err := expandSecurityCenterProjectNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { + obj["pubsubTopic"] = pubsubTopicProp + } + streamingConfigProp, err := expandSecurityCenterProjectNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("streaming_config"); ok || !reflect.DeepEqual(v, streamingConfigProp) { + obj["streamingConfig"] = streamingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ProjectNotificationConfig %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("pubsub_topic") { + updateMask = append(updateMask, "pubsubTopic") + } + + if d.HasChange("streaming_config") { + updateMask = append(updateMask, "streamingConfig.filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating ProjectNotificationConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ProjectNotificationConfig %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterProjectNotificationConfigRead(d, meta) +} + +func resourceSecurityCenterProjectNotificationConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectNotificationConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting ProjectNotificationConfig %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ProjectNotificationConfig") + } + + log.Printf("[DEBUG] Finished deleting ProjectNotificationConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterProjectNotificationConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) < 2 { + return nil, fmt.Errorf( + "Could not split project from name: %s", + d.Get("name"), + ) + } + + if err := d.Set("project", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterProjectNotificationConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterProjectNotificationConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterProjectNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterProjectNotificationConfigServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterProjectNotificationConfigStreamingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["filter"] = + flattenSecurityCenterProjectNotificationConfigStreamingConfigFilter(original["filter"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterProjectNotificationConfigStreamingConfigFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterProjectNotificationConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterProjectNotificationConfigPubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterProjectNotificationConfigStreamingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFilter, err := expandSecurityCenterProjectNotificationConfigStreamingConfigFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else { + transformed["filter"] = transformedFilter + } + + return transformed, nil +} + +func expandSecurityCenterProjectNotificationConfigStreamingConfigFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_notification_config_sweeper.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config_sweeper.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_notification_config_sweeper.go index e2743acf41d..a358836c690 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_notification_config_sweeper.go @@ -15,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package identityplatform +package securitycenter import ( "context" @@ -30,12 +30,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("IdentityPlatformProjectDefaultConfig", testSweepIdentityPlatformProjectDefaultConfig) + sweeper.AddTestSweepers("SecurityCenterProjectNotificationConfig", testSweepSecurityCenterProjectNotificationConfig) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepIdentityPlatformProjectDefaultConfig(region string) error { - resourceName := "IdentityPlatformProjectDefaultConfig" +func testSweepSecurityCenterProjectNotificationConfig(region string) error { + resourceName := "SecurityCenterProjectNotificationConfig" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) @@ -64,7 +64,7 @@ func testSweepIdentityPlatformProjectDefaultConfig(region string) error { }, } - listTemplate := strings.Split("https://identitytoolkit.googleapis.com/v2/projects/{{project}}/config", "?")[0] + listTemplate := strings.Split("https://securitycenter.googleapis.com/v1/projects/{{project}}/notificationConfigs", "?")[0] listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) @@ -83,7 +83,7 @@ func testSweepIdentityPlatformProjectDefaultConfig(region string) error { return nil } - resourceList, ok := res["projectDefaultConfigs"] + resourceList, ok := res["projectNotificationConfigs"] if !ok { log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") return nil @@ -108,7 +108,7 @@ func testSweepIdentityPlatformProjectDefaultConfig(region string) error { continue } - deleteTemplate := "https://identitytoolkit.googleapis.com/v2/projects/{{project}}/config" + deleteTemplate := "https://securitycenter.googleapis.com/v1/{{name}}" deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_scc_big_query_export.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_scc_big_query_export.go new file mode 100644 index 00000000000..8ad596560ef --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_scc_big_query_export.go @@ -0,0 +1,496 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterProjectSccBigQueryExport() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterProjectSccBigQueryExportCreate, + Read: resourceSecurityCenterProjectSccBigQueryExportRead, + Update: resourceSecurityCenterProjectSccBigQueryExportUpdate, + Delete: resourceSecurityCenterProjectSccBigQueryExportDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterProjectSccBigQueryExportImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "big_query_export_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the organization.`, + }, + "dataset": { + Type: schema.TypeString, + Optional: true, + Description: `The dataset to write findings' updates to. +Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". +BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the notification config (max of 1024 characters).`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `Expression that defines the filter to apply across create/update +events of findings. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* \>, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "most_recent_editor": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who last edited the BigQuery export. +This field is set by the server and will be ignored if provided on export creation or update.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this export, in the format +'projects/{{project}}/bigQueryExports/{{big_query_export_id}}'. +This field is provided in responses, and is ignored when provided in create requests.`, + }, + "principal": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs permission to create table and upload data to the BigQuery dataset.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterProjectSccBigQueryExportCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterProjectSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterProjectSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(datasetProp)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterProjectSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}projects/{{project}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ProjectSccBigQueryExport: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectSccBigQueryExport: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("name", flattenSecurityCenterProjectSccBigQueryExportName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ProjectSccBigQueryExport %q: %#v", d.Id(), res) + + return resourceSecurityCenterProjectSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterProjectSccBigQueryExportRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}projects/{{project}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectSccBigQueryExport: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterProjectSccBigQueryExport %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + + if err := d.Set("name", flattenSecurityCenterProjectSccBigQueryExportName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("description", flattenSecurityCenterProjectSccBigQueryExportDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("dataset", flattenSecurityCenterProjectSccBigQueryExportDataset(res["dataset"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("create_time", flattenSecurityCenterProjectSccBigQueryExportCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterProjectSccBigQueryExportUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("most_recent_editor", flattenSecurityCenterProjectSccBigQueryExportMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("principal", flattenSecurityCenterProjectSccBigQueryExportPrincipal(res["principal"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("filter", flattenSecurityCenterProjectSccBigQueryExportFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + + return nil +} + +func resourceSecurityCenterProjectSccBigQueryExportUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectSccBigQueryExport: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterProjectSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterProjectSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterProjectSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}projects/{{project}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ProjectSccBigQueryExport %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("dataset") { + updateMask = append(updateMask, "dataset") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating ProjectSccBigQueryExport %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ProjectSccBigQueryExport %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterProjectSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterProjectSccBigQueryExportDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectSccBigQueryExport: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}projects/{{project}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting ProjectSccBigQueryExport %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ProjectSccBigQueryExport") + } + + log.Printf("[DEBUG] Finished deleting ProjectSccBigQueryExport %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterProjectSccBigQueryExportImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/bigQueryExports/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterProjectSccBigQueryExportName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterProjectSccBigQueryExportDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterProjectSccBigQueryExportDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterProjectSccBigQueryExportCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterProjectSccBigQueryExportUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterProjectSccBigQueryExportMostRecentEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterProjectSccBigQueryExportPrincipal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterProjectSccBigQueryExportFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterProjectSccBigQueryExportDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterProjectSccBigQueryExportDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterProjectSccBigQueryExportFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_scc_big_query_export_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_scc_big_query_export_sweeper.go new file mode 100644 index 00000000000..18fe5820e4d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_project_scc_big_query_export_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterProjectSccBigQueryExport", testSweepSecurityCenterProjectSccBigQueryExport) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterProjectSccBigQueryExport(region string) error { + resourceName := "SecurityCenterProjectSccBigQueryExport" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v1/projects/{{project}}/bigQueryExports", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["projectSccBigQueryExports"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v1/projects/{{project}}/bigQueryExports/{{big_query_export_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/iam_scc_v2_organization_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/iam_scc_v2_organization_source.go new file mode 100644 index 00000000000..b715a463f55 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/iam_scc_v2_organization_source.go @@ -0,0 +1,202 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var SecurityCenterV2OrganizationSourceIamSchema = map[string]*schema.Schema{ + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "source": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type SecurityCenterV2OrganizationSourceIamUpdater struct { + organization string + source string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func SecurityCenterV2OrganizationSourceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("organization"); ok { + values["organization"] = v.(string) + } + + if v, ok := d.GetOk("source"); ok { + values["source"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"organizations/(?P[^/]+)/sources/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("source").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &SecurityCenterV2OrganizationSourceIamUpdater{ + organization: values["organization"], + source: values["source"], + d: d, + Config: config, + } + + if err := d.Set("organization", u.organization); err != nil { + return nil, fmt.Errorf("Error setting organization: %s", err) + } + if err := d.Set("source", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting source: %s", err) + } + + return u, nil +} + +func SecurityCenterV2OrganizationSourceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"organizations/(?P[^/]+)/sources/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &SecurityCenterV2OrganizationSourceIamUpdater{ + organization: values["organization"], + source: values["source"], + d: d, + Config: config, + } + if err := d.Set("source", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting source: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *SecurityCenterV2OrganizationSourceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyOrganizationSourceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *SecurityCenterV2OrganizationSourceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyOrganizationSourceUrl("setIamPolicy") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *SecurityCenterV2OrganizationSourceIamUpdater) qualifyOrganizationSourceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{SecurityCenterV2BasePath}}%s:%s", fmt.Sprintf("organizations/%s/sources/%s", u.organization, u.source), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *SecurityCenterV2OrganizationSourceIamUpdater) GetResourceId() string { + return fmt.Sprintf("organizations/%s/sources/%s", u.organization, u.source) +} + +func (u *SecurityCenterV2OrganizationSourceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-securitycenterv2-organizationsource-%s", u.GetResourceId()) +} + +func (u *SecurityCenterV2OrganizationSourceIamUpdater) DescribeResource() string { + return fmt.Sprintf("securitycenterv2 organizationsource %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_mute_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_mute_config.go new file mode 100644 index 00000000000..6e9fb636e84 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_mute_config.go @@ -0,0 +1,435 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterV2FolderMuteConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterV2FolderMuteConfigCreate, + Read: resourceSecurityCenterV2FolderMuteConfigRead, + Update: resourceSecurityCenterV2FolderMuteConfigUpdate, + Delete: resourceSecurityCenterV2FolderMuteConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterV2FolderMuteConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `An expression that defines the filter to apply across create/update +events of findings. While creating a filter string, be mindful of +the scope in which the mute configuration is being created. E.g., +If a filter contains project = X but is created under the +project = Y scope, it might not match any findings.`, + }, + "folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The folder whose Cloud Security Command Center the Mute +Config lives in.`, + }, + "mute_config_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Unique identifier provided by the client within the parent scope.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + Description: `The type of the mute config.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the mute config.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `location Id is provided by folder. If not provided, Use global as default.`, + Default: "global", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the mute config was created. This field is set by +the server and will be ignored if provided on config creation.`, + }, + "most_recent_editor": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who last edited the mute config. This +field is set by the server and will be ignored if provided on +config creation or update.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the mute config. Its format is +organizations/{organization}/locations/global/muteConfigs/{configId}, +folders/{folder}/locations/global/muteConfigs/{configId}, +or projects/{project}/locations/global/muteConfigs/{configId}`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The most recent time at which the mute config was +updated. This field is set by the server and will be ignored if +provided on config creation or update.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterV2FolderMuteConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2FolderMuteConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + filterProp, err := expandSecurityCenterV2FolderMuteConfigFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + typeProp, err := expandSecurityCenterV2FolderMuteConfigType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/muteConfigs?muteConfigId={{mute_config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new FolderMuteConfig: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating FolderMuteConfig: %s", err) + } + if err := d.Set("name", flattenSecurityCenterV2FolderMuteConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating FolderMuteConfig %q: %#v", d.Id(), res) + + return resourceSecurityCenterV2FolderMuteConfigRead(d, meta) +} + +func resourceSecurityCenterV2FolderMuteConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterV2FolderMuteConfig %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterV2FolderMuteConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderMuteConfig: %s", err) + } + if err := d.Set("description", flattenSecurityCenterV2FolderMuteConfigDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderMuteConfig: %s", err) + } + if err := d.Set("filter", flattenSecurityCenterV2FolderMuteConfigFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderMuteConfig: %s", err) + } + if err := d.Set("create_time", flattenSecurityCenterV2FolderMuteConfigCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderMuteConfig: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterV2FolderMuteConfigUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderMuteConfig: %s", err) + } + if err := d.Set("most_recent_editor", flattenSecurityCenterV2FolderMuteConfigMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderMuteConfig: %s", err) + } + if err := d.Set("type", flattenSecurityCenterV2FolderMuteConfigType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderMuteConfig: %s", err) + } + + return nil +} + +func resourceSecurityCenterV2FolderMuteConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2FolderMuteConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + filterProp, err := expandSecurityCenterV2FolderMuteConfigFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + typeProp, err := expandSecurityCenterV2FolderMuteConfigType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating FolderMuteConfig %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + + if d.HasChange("type") { + updateMask = append(updateMask, "type") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating FolderMuteConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating FolderMuteConfig %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterV2FolderMuteConfigRead(d, meta) +} + +func resourceSecurityCenterV2FolderMuteConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting FolderMuteConfig %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "FolderMuteConfig") + } + + log.Printf("[DEBUG] Finished deleting FolderMuteConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterV2FolderMuteConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^folders/(?P[^/]+)/locations/(?P[^/]+)/muteConfigs/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterV2FolderMuteConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderMuteConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderMuteConfigFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderMuteConfigCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderMuteConfigUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderMuteConfigMostRecentEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderMuteConfigType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterV2FolderMuteConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2FolderMuteConfigFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2FolderMuteConfigType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_mute_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_mute_config_sweeper.go new file mode 100644 index 00000000000..ad85a5fe747 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_mute_config_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterV2FolderMuteConfig", testSweepSecurityCenterV2FolderMuteConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterV2FolderMuteConfig(region string) error { + resourceName := "SecurityCenterV2FolderMuteConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v2/folders/{{folder}}/locations/{{location}}/muteConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["folderMuteConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v2/folders/{{folder}}/locations/{{location}}/muteConfigs/{{mute_config_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_notification_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_notification_config.go new file mode 100644 index 00000000000..935119a6cf8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_notification_config.go @@ -0,0 +1,482 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterV2FolderNotificationConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterV2FolderNotificationConfigCreate, + Read: resourceSecurityCenterV2FolderNotificationConfigRead, + Update: resourceSecurityCenterV2FolderNotificationConfigUpdate, + Delete: resourceSecurityCenterV2FolderNotificationConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterV2FolderNotificationConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "config_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the organization.`, + }, + "folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Numerical ID of the parent folder.`, + }, + "pubsub_topic": { + Type: schema.TypeString, + Required: true, + Description: `The Pub/Sub topic to send notifications to. Its format is +"projects/[project_id]/topics/[topic]".`, + }, + "streaming_config": { + Type: schema.TypeList, + Required: true, + Description: `The config for triggering streaming-based notifications.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `Expression that defines the filter to apply across create/update +events of assets or findings as specified by the event type. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* >, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the notification config (max of 1024 characters).`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Location ID of the parent organization. If not provided, 'global' will be used as the default location.`, + Default: "global", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this notification config, in the format +'folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}'.`, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs "pubsub.topics.publish" permission to +publish to the Pub/Sub topic.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterV2FolderNotificationConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2FolderNotificationConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + pubsubTopicProp, err := expandSecurityCenterV2FolderNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(pubsubTopicProp)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { + obj["pubsubTopic"] = pubsubTopicProp + } + streamingConfigProp, err := expandSecurityCenterV2FolderNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("streaming_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(streamingConfigProp)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { + obj["streamingConfig"] = streamingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/notificationConfigs?configId={{config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new FolderNotificationConfig: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating FolderNotificationConfig: %s", err) + } + if err := d.Set("name", flattenSecurityCenterV2FolderNotificationConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating FolderNotificationConfig %q: %#v", d.Id(), res) + + return resourceSecurityCenterV2FolderNotificationConfigRead(d, meta) +} + +func resourceSecurityCenterV2FolderNotificationConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterV2FolderNotificationConfig %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterV2FolderNotificationConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderNotificationConfig: %s", err) + } + if err := d.Set("description", flattenSecurityCenterV2FolderNotificationConfigDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderNotificationConfig: %s", err) + } + if err := d.Set("pubsub_topic", flattenSecurityCenterV2FolderNotificationConfigPubsubTopic(res["pubsubTopic"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderNotificationConfig: %s", err) + } + if err := d.Set("service_account", flattenSecurityCenterV2FolderNotificationConfigServiceAccount(res["serviceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderNotificationConfig: %s", err) + } + if err := d.Set("streaming_config", flattenSecurityCenterV2FolderNotificationConfigStreamingConfig(res["streamingConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderNotificationConfig: %s", err) + } + + return nil +} + +func resourceSecurityCenterV2FolderNotificationConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2FolderNotificationConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + pubsubTopicProp, err := expandSecurityCenterV2FolderNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { + obj["pubsubTopic"] = pubsubTopicProp + } + streamingConfigProp, err := expandSecurityCenterV2FolderNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("streaming_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { + obj["streamingConfig"] = streamingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating FolderNotificationConfig %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("pubsub_topic") { + updateMask = append(updateMask, "pubsubTopic") + } + + if d.HasChange("streaming_config") { + updateMask = append(updateMask, "streamingConfig.filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating FolderNotificationConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating FolderNotificationConfig %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterV2FolderNotificationConfigRead(d, meta) +} + +func resourceSecurityCenterV2FolderNotificationConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting FolderNotificationConfig %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "FolderNotificationConfig") + } + + log.Printf("[DEBUG] Finished deleting FolderNotificationConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterV2FolderNotificationConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^folders/(?P[^/]+)/locations/(?P[^/]+)/notificationConfigs/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + idParts := strings.Split(d.Id(), "/") + if len(idParts) != 6 { + return nil, fmt.Errorf("unexpected format of ID (%q), expected folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}", d.Id()) + } + + if err := d.Set("folder", idParts[1]); err != nil { + return nil, fmt.Errorf("error setting folder: %s", err) + } + + if err := d.Set("config_id", idParts[5]); err != nil { + return nil, fmt.Errorf("error setting config_id: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterV2FolderNotificationConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderNotificationConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderNotificationConfigServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderNotificationConfigStreamingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["filter"] = + flattenSecurityCenterV2FolderNotificationConfigStreamingConfigFilter(original["filter"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterV2FolderNotificationConfigStreamingConfigFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterV2FolderNotificationConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2FolderNotificationConfigPubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2FolderNotificationConfigStreamingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFilter, err := expandSecurityCenterV2FolderNotificationConfigStreamingConfigFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filter"] = transformedFilter + } + + return transformed, nil +} + +func expandSecurityCenterV2FolderNotificationConfigStreamingConfigFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_notification_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_notification_config_sweeper.go new file mode 100644 index 00000000000..ddfc371a4f0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_notification_config_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterV2FolderNotificationConfig", testSweepSecurityCenterV2FolderNotificationConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterV2FolderNotificationConfig(region string) error { + resourceName := "SecurityCenterV2FolderNotificationConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v2/folders/{{folder}}/locations/{{location}}/notificationConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["folderNotificationConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v2/folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_scc_big_query_export.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_scc_big_query_export.go new file mode 100644 index 00000000000..5811289673a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_scc_big_query_export.go @@ -0,0 +1,472 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterV2FolderSccBigQueryExport() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterV2FolderSccBigQueryExportCreate, + Read: resourceSecurityCenterV2FolderSccBigQueryExportRead, + Update: resourceSecurityCenterV2FolderSccBigQueryExportUpdate, + Delete: resourceSecurityCenterV2FolderSccBigQueryExportDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterV2FolderSccBigQueryExportImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "big_query_export_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the organization. It must consist of only lowercase letters, +numbers, and hyphens, must start with a letter, must end with either a letter or a number, +and must be 63 characters or less.`, + }, + "folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The folder where Cloud Security Command Center Big Query Export +Config lives in.`, + }, + "dataset": { + Type: schema.TypeString, + Optional: true, + Description: `The dataset to write findings' updates to. +Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". +BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the notification config (max of 1024 characters).`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `Expression that defines the filter to apply across create/update +events of findings. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* >, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The BigQuery export configuration is stored in this location. If not provided, Use global as default.`, + Default: "global", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "most_recent_editor": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who last edited the BigQuery export. +This field is set by the server and will be ignored if provided on export creation or update.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this export, in the format +'folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}'. +This field is provided in responses, and is ignored when provided in create requests.`, + }, + "principal": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs permission to create table and upload data to the BigQuery dataset.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterV2FolderSccBigQueryExportCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2FolderSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterV2FolderSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(datasetProp)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterV2FolderSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new FolderSccBigQueryExport: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating FolderSccBigQueryExport: %s", err) + } + if err := d.Set("name", flattenSecurityCenterV2FolderSccBigQueryExportName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating FolderSccBigQueryExport %q: %#v", d.Id(), res) + + return resourceSecurityCenterV2FolderSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterV2FolderSccBigQueryExportRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterV2FolderSccBigQueryExport %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterV2FolderSccBigQueryExportName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("description", flattenSecurityCenterV2FolderSccBigQueryExportDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("dataset", flattenSecurityCenterV2FolderSccBigQueryExportDataset(res["dataset"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("create_time", flattenSecurityCenterV2FolderSccBigQueryExportCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterV2FolderSccBigQueryExportUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("most_recent_editor", flattenSecurityCenterV2FolderSccBigQueryExportMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("principal", flattenSecurityCenterV2FolderSccBigQueryExportPrincipal(res["principal"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + if err := d.Set("filter", flattenSecurityCenterV2FolderSccBigQueryExportFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSccBigQueryExport: %s", err) + } + + return nil +} + +func resourceSecurityCenterV2FolderSccBigQueryExportUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2FolderSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterV2FolderSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterV2FolderSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating FolderSccBigQueryExport %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("dataset") { + updateMask = append(updateMask, "dataset") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating FolderSccBigQueryExport %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating FolderSccBigQueryExport %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterV2FolderSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterV2FolderSccBigQueryExportDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting FolderSccBigQueryExport %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "FolderSccBigQueryExport") + } + + log.Printf("[DEBUG] Finished deleting FolderSccBigQueryExport %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterV2FolderSccBigQueryExportImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^folders/(?P[^/]+)/locations/(?P[^/]+)/bigQueryExports/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterV2FolderSccBigQueryExportName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderSccBigQueryExportDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderSccBigQueryExportDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderSccBigQueryExportCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderSccBigQueryExportUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderSccBigQueryExportMostRecentEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderSccBigQueryExportPrincipal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2FolderSccBigQueryExportFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterV2FolderSccBigQueryExportDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2FolderSccBigQueryExportDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2FolderSccBigQueryExportFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_scc_big_query_export_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_scc_big_query_export_sweeper.go new file mode 100644 index 00000000000..efaf10e069b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_folder_scc_big_query_export_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterV2FolderSccBigQueryExport", testSweepSecurityCenterV2FolderSccBigQueryExport) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterV2FolderSccBigQueryExport(region string) error { + resourceName := "SecurityCenterV2FolderSccBigQueryExport" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v2/folders/{{folder}}/locations/{{location}}/bigQueryExports", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["folderSccBigQueryExports"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v2/folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_mute_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_mute_config.go new file mode 100644 index 00000000000..67eaf66b32c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_mute_config.go @@ -0,0 +1,435 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterV2OrganizationMuteConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterV2OrganizationMuteConfigCreate, + Read: resourceSecurityCenterV2OrganizationMuteConfigRead, + Update: resourceSecurityCenterV2OrganizationMuteConfigUpdate, + Delete: resourceSecurityCenterV2OrganizationMuteConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterV2OrganizationMuteConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `An expression that defines the filter to apply across create/update +events of findings. While creating a filter string, be mindful of +the scope in which the mute configuration is being created. E.g., +If a filter contains project = X but is created under the +project = Y scope, it might not match any findings.`, + }, + "mute_config_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Unique identifier provided by the client within the parent scope.`, + }, + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The organization whose Cloud Security Command Center the Mute +Config lives in.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + Description: `The type of the mute config.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the mute config.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `location Id is provided by organization. If not provided, Use global as default.`, + Default: "global", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the mute config was created. This field is set by +the server and will be ignored if provided on config creation.`, + }, + "most_recent_editor": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who last edited the mute config. This +field is set by the server and will be ignored if provided on +config creation or update.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the mute config. Its format is +organizations/{organization}/locations/global/muteConfigs/{configId}, +folders/{folder}/locations/global/muteConfigs/{configId}, +or projects/{project}/locations/global/muteConfigs/{configId}`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The most recent time at which the mute config was +updated. This field is set by the server and will be ignored if +provided on config creation or update.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterV2OrganizationMuteConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2OrganizationMuteConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + filterProp, err := expandSecurityCenterV2OrganizationMuteConfigFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + typeProp, err := expandSecurityCenterV2OrganizationMuteConfigType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/muteConfigs?muteConfigId={{mute_config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new OrganizationMuteConfig: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating OrganizationMuteConfig: %s", err) + } + if err := d.Set("name", flattenSecurityCenterV2OrganizationMuteConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating OrganizationMuteConfig %q: %#v", d.Id(), res) + + return resourceSecurityCenterV2OrganizationMuteConfigRead(d, meta) +} + +func resourceSecurityCenterV2OrganizationMuteConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterV2OrganizationMuteConfig %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterV2OrganizationMuteConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationMuteConfig: %s", err) + } + if err := d.Set("description", flattenSecurityCenterV2OrganizationMuteConfigDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationMuteConfig: %s", err) + } + if err := d.Set("filter", flattenSecurityCenterV2OrganizationMuteConfigFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationMuteConfig: %s", err) + } + if err := d.Set("create_time", flattenSecurityCenterV2OrganizationMuteConfigCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationMuteConfig: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterV2OrganizationMuteConfigUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationMuteConfig: %s", err) + } + if err := d.Set("most_recent_editor", flattenSecurityCenterV2OrganizationMuteConfigMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationMuteConfig: %s", err) + } + if err := d.Set("type", flattenSecurityCenterV2OrganizationMuteConfigType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationMuteConfig: %s", err) + } + + return nil +} + +func resourceSecurityCenterV2OrganizationMuteConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2OrganizationMuteConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + filterProp, err := expandSecurityCenterV2OrganizationMuteConfigFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + typeProp, err := expandSecurityCenterV2OrganizationMuteConfigType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating OrganizationMuteConfig %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + + if d.HasChange("type") { + updateMask = append(updateMask, "type") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating OrganizationMuteConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating OrganizationMuteConfig %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterV2OrganizationMuteConfigRead(d, meta) +} + +func resourceSecurityCenterV2OrganizationMuteConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting OrganizationMuteConfig %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "OrganizationMuteConfig") + } + + log.Printf("[DEBUG] Finished deleting OrganizationMuteConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterV2OrganizationMuteConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^organizations/(?P[^/]+)/locations/(?P[^/]+)/muteConfigs/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterV2OrganizationMuteConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationMuteConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationMuteConfigFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationMuteConfigCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationMuteConfigUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationMuteConfigMostRecentEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationMuteConfigType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterV2OrganizationMuteConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2OrganizationMuteConfigFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2OrganizationMuteConfigType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_mute_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_mute_config_sweeper.go new file mode 100644 index 00000000000..0f9b5bc72a9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_mute_config_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterV2OrganizationMuteConfig", testSweepSecurityCenterV2OrganizationMuteConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterV2OrganizationMuteConfig(region string) error { + resourceName := "SecurityCenterV2OrganizationMuteConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v2/organizations/{{organization}}/locations/{{location}}/muteConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["organizationMuteConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v2/organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_notification_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_notification_config.go index 4fa815681af..62af22d4178 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_notification_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_notification_config.go @@ -161,7 +161,7 @@ func resourceSecurityCenterV2OrganizationNotificationConfigCreate(d *schema.Reso streamingConfigProp, err := expandSecurityCenterV2OrganizationNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("streaming_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(streamingConfigProp)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { + } else if v, ok := d.GetOkExists("streaming_config"); ok || !reflect.DeepEqual(v, streamingConfigProp) { obj["streamingConfig"] = streamingConfigProp } @@ -302,7 +302,7 @@ func resourceSecurityCenterV2OrganizationNotificationConfigUpdate(d *schema.Reso streamingConfigProp, err := expandSecurityCenterV2OrganizationNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("streaming_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { + } else if v, ok := d.GetOkExists("streaming_config"); ok || !reflect.DeepEqual(v, streamingConfigProp) { obj["streamingConfig"] = streamingConfigProp } @@ -454,9 +454,6 @@ func flattenSecurityCenterV2OrganizationNotificationConfigStreamingConfig(v inte return nil } original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } transformed := make(map[string]interface{}) transformed["filter"] = flattenSecurityCenterV2OrganizationNotificationConfigStreamingConfigFilter(original["filter"], d, config) @@ -476,9 +473,14 @@ func expandSecurityCenterV2OrganizationNotificationConfigPubsubTopic(v interface func expandSecurityCenterV2OrganizationNotificationConfigStreamingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { + if len(l) == 0 { return nil, nil } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } raw := l[0] original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) @@ -486,7 +488,7 @@ func expandSecurityCenterV2OrganizationNotificationConfigStreamingConfig(v inter transformedFilter, err := expandSecurityCenterV2OrganizationNotificationConfigStreamingConfigFilter(original["filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + } else { transformed["filter"] = transformedFilter } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_export.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_export.go new file mode 100644 index 00000000000..e55b586f67d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_export.go @@ -0,0 +1,500 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterV2OrganizationSccBigQueryExport() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterV2OrganizationSccBigQueryExportCreate, + Read: resourceSecurityCenterV2OrganizationSccBigQueryExportRead, + Update: resourceSecurityCenterV2OrganizationSccBigQueryExportUpdate, + Delete: resourceSecurityCenterV2OrganizationSccBigQueryExportDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterV2OrganizationSccBigQueryExportImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "big_query_export_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the organization.`, + }, + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The organization whose Cloud Security Command Center the Big Query Export +Config lives in.`, + }, + "dataset": { + Type: schema.TypeString, + Optional: true, + Description: `The dataset to write findings' updates to. +Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". +BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the notification config (max of 1024 characters).`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `Expression that defines the filter to apply across create/update +events of findings. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* >, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `location Id is provided by organization. If not provided, Use global as default.`, + Default: "global", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: `The resource name of this export, in the format +'organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}'. +This field is provided in responses, and is ignored when provided in create requests.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "most_recent_editor": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who last edited the BigQuery export. +This field is set by the server and will be ignored if provided on export creation or update.`, + }, + "principal": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs permission to create table and upload data to the BigQuery dataset.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterV2OrganizationSccBigQueryExportCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(datasetProp)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new OrganizationSccBigQueryExport: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating OrganizationSccBigQueryExport: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating OrganizationSccBigQueryExport %q: %#v", d.Id(), res) + + return resourceSecurityCenterV2OrganizationSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterV2OrganizationSccBigQueryExportRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterV2OrganizationSccBigQueryExport %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterV2OrganizationSccBigQueryExportName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("description", flattenSecurityCenterV2OrganizationSccBigQueryExportDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("dataset", flattenSecurityCenterV2OrganizationSccBigQueryExportDataset(res["dataset"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("create_time", flattenSecurityCenterV2OrganizationSccBigQueryExportCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterV2OrganizationSccBigQueryExportUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("most_recent_editor", flattenSecurityCenterV2OrganizationSccBigQueryExportMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("principal", flattenSecurityCenterV2OrganizationSccBigQueryExportPrincipal(res["principal"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + if err := d.Set("filter", flattenSecurityCenterV2OrganizationSccBigQueryExportFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExport: %s", err) + } + + return nil +} + +func resourceSecurityCenterV2OrganizationSccBigQueryExportUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + nameProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating OrganizationSccBigQueryExport %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("name") { + updateMask = append(updateMask, "name") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("dataset") { + updateMask = append(updateMask, "dataset") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating OrganizationSccBigQueryExport %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating OrganizationSccBigQueryExport %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterV2OrganizationSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterV2OrganizationSccBigQueryExportDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting OrganizationSccBigQueryExport %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "OrganizationSccBigQueryExport") + } + + log.Printf("[DEBUG] Finished deleting OrganizationSccBigQueryExport %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterV2OrganizationSccBigQueryExportImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^organizations/(?P[^/]+)/locations/(?P[^/]+)/bigQueryExports/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + idParts := strings.Split(d.Id(), "/") + if len(idParts) != 6 { + return nil, fmt.Errorf("unexpected format of ID (%q), expected organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}", d.Id()) + } + + if err := d.Set("organization", idParts[1]); err != nil { + return nil, fmt.Errorf("error setting organization: %s", err) + } + + if err := d.Set("big_query_export_id", idParts[5]); err != nil { + return nil, fmt.Errorf("error setting big_query_export_id: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportMostRecentEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportPrincipal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterV2OrganizationSccBigQueryExportName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2OrganizationSccBigQueryExportDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2OrganizationSccBigQueryExportDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2OrganizationSccBigQueryExportFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_export_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_export_sweeper.go new file mode 100644 index 00000000000..660abe985c7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_export_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterV2OrganizationSccBigQueryExport", testSweepSecurityCenterV2OrganizationSccBigQueryExport) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterV2OrganizationSccBigQueryExport(region string) error { + resourceName := "SecurityCenterV2OrganizationSccBigQueryExport" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v2/organizations/{{organization}}/locations/{{location}}/bigQueryExports", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["organizationSccBigQueryExports"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v2/organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_exports.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_exports.go new file mode 100644 index 00000000000..1ae4349ef23 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_exports.go @@ -0,0 +1,502 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterV2OrganizationSccBigQueryExports() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterV2OrganizationSccBigQueryExportsCreate, + Read: resourceSecurityCenterV2OrganizationSccBigQueryExportsRead, + Update: resourceSecurityCenterV2OrganizationSccBigQueryExportsUpdate, + Delete: resourceSecurityCenterV2OrganizationSccBigQueryExportsDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterV2OrganizationSccBigQueryExportsImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + DeprecationMessage: "`google_scc_v2_organization_scc_big_query_exports` is deprecated and will be removed in a future major release. Use `google_scc_v2_organization_scc_big_query_export` instead.", + + Schema: map[string]*schema.Schema{ + "big_query_export_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the organization.`, + }, + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The organization whose Cloud Security Command Center the Big Query Export +Config lives in.`, + }, + "dataset": { + Type: schema.TypeString, + Optional: true, + Description: `The dataset to write findings' updates to. +Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". +BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the notification config (max of 1024 characters).`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `Expression that defines the filter to apply across create/update +events of findings. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* >, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `location Id is provided by organization. If not provided, Use global as default.`, + Default: "global", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: `The resource name of this export, in the format +'organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}'. +This field is provided in responses, and is ignored when provided in create requests.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "most_recent_editor": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who last edited the BigQuery export. +This field is set by the server and will be ignored if provided on export creation or update.`, + }, + "principal": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs permission to create table and upload data to the BigQuery dataset.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterV2OrganizationSccBigQueryExportsCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportsName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportsDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportsDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(datasetProp)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportsFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new OrganizationSccBigQueryExports: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating OrganizationSccBigQueryExports: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating OrganizationSccBigQueryExports %q: %#v", d.Id(), res) + + return resourceSecurityCenterV2OrganizationSccBigQueryExportsRead(d, meta) +} + +func resourceSecurityCenterV2OrganizationSccBigQueryExportsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterV2OrganizationSccBigQueryExports %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterV2OrganizationSccBigQueryExportsName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExports: %s", err) + } + if err := d.Set("description", flattenSecurityCenterV2OrganizationSccBigQueryExportsDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExports: %s", err) + } + if err := d.Set("dataset", flattenSecurityCenterV2OrganizationSccBigQueryExportsDataset(res["dataset"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExports: %s", err) + } + if err := d.Set("create_time", flattenSecurityCenterV2OrganizationSccBigQueryExportsCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExports: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterV2OrganizationSccBigQueryExportsUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExports: %s", err) + } + if err := d.Set("most_recent_editor", flattenSecurityCenterV2OrganizationSccBigQueryExportsMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExports: %s", err) + } + if err := d.Set("principal", flattenSecurityCenterV2OrganizationSccBigQueryExportsPrincipal(res["principal"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExports: %s", err) + } + if err := d.Set("filter", flattenSecurityCenterV2OrganizationSccBigQueryExportsFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSccBigQueryExports: %s", err) + } + + return nil +} + +func resourceSecurityCenterV2OrganizationSccBigQueryExportsUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + nameProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportsName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportsDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportsDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterV2OrganizationSccBigQueryExportsFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating OrganizationSccBigQueryExports %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("name") { + updateMask = append(updateMask, "name") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("dataset") { + updateMask = append(updateMask, "dataset") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating OrganizationSccBigQueryExports %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating OrganizationSccBigQueryExports %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterV2OrganizationSccBigQueryExportsRead(d, meta) +} + +func resourceSecurityCenterV2OrganizationSccBigQueryExportsDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting OrganizationSccBigQueryExports %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "OrganizationSccBigQueryExports") + } + + log.Printf("[DEBUG] Finished deleting OrganizationSccBigQueryExports %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterV2OrganizationSccBigQueryExportsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^organizations/(?P[^/]+)/locations/(?P[^/]+)/bigQueryExports/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + idParts := strings.Split(d.Id(), "/") + if len(idParts) != 6 { + return nil, fmt.Errorf("unexpected format of ID (%q), expected organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}", d.Id()) + } + + if err := d.Set("organization", idParts[1]); err != nil { + return nil, fmt.Errorf("error setting organization: %s", err) + } + + if err := d.Set("big_query_export_id", idParts[5]); err != nil { + return nil, fmt.Errorf("error setting big_query_export_id: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportsDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportsDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportsCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportsUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportsMostRecentEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportsPrincipal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSccBigQueryExportsFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterV2OrganizationSccBigQueryExportsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2OrganizationSccBigQueryExportsDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2OrganizationSccBigQueryExportsDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2OrganizationSccBigQueryExportsFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_exports_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_exports_sweeper.go new file mode 100644 index 00000000000..eda75035d41 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_scc_big_query_exports_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterV2OrganizationSccBigQueryExports", testSweepSecurityCenterV2OrganizationSccBigQueryExports) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterV2OrganizationSccBigQueryExports(region string) error { + resourceName := "SecurityCenterV2OrganizationSccBigQueryExports" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v2/organizations/{{organization}}/locations/{{location}}/bigQueryExports", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["organizationSccBigQueryExportss"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v2/organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_source.go new file mode 100644 index 00000000000..c436264002e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_organization_source.go @@ -0,0 +1,340 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceSecurityCenterV2OrganizationSource() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterV2OrganizationSourceCreate, + Read: resourceSecurityCenterV2OrganizationSourceRead, + Update: resourceSecurityCenterV2OrganizationSourceUpdate, + Delete: resourceSecurityCenterV2OrganizationSourceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterV2OrganizationSourceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRegexp(`[\p{L}\p{N}]({\p{L}\p{N}_- ]{0,30}[\p{L}\p{N}])?`), + Description: `The source’s display name. A source’s display name must be unique +amongst its siblings, for example, two sources with the same parent +can't share the same display name. The display name must start and end +with a letter or digit, may contain letters, digits, spaces, hyphens, +and underscores, and can be no longer than 32 characters.`, + }, + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The organization whose Cloud Security Command Center the Source +lives in.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the source (max of 1024 characters).`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this source, in the format +'organizations/{{organization}}/sources/{{source}}'.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterV2OrganizationSourceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2OrganizationSourceDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandSecurityCenterV2OrganizationSourceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}organizations/{{organization}}/sources") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new OrganizationSource: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating OrganizationSource: %s", err) + } + if err := d.Set("name", flattenSecurityCenterV2OrganizationSourceName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating OrganizationSource %q: %#v", d.Id(), res) + + return resourceSecurityCenterV2OrganizationSourceRead(d, meta) +} + +func resourceSecurityCenterV2OrganizationSourceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterV2OrganizationSource %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterV2OrganizationSourceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSource: %s", err) + } + if err := d.Set("description", flattenSecurityCenterV2OrganizationSourceDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSource: %s", err) + } + if err := d.Set("display_name", flattenSecurityCenterV2OrganizationSourceDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSource: %s", err) + } + + return nil +} + +func resourceSecurityCenterV2OrganizationSourceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2OrganizationSourceDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandSecurityCenterV2OrganizationSourceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating OrganizationSource %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating OrganizationSource %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating OrganizationSource %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterV2OrganizationSourceRead(d, meta) +} + +func resourceSecurityCenterV2OrganizationSourceDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] SecurityCenterV2 OrganizationSource resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceSecurityCenterV2OrganizationSourceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) != 4 { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "organizations/{{organization}}/sources/{{source}}", + ) + } + + if err := d.Set("organization", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting organization: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterV2OrganizationSourceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSourceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2OrganizationSourceDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterV2OrganizationSourceDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2OrganizationSourceDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_mute_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_mute_config.go new file mode 100644 index 00000000000..ee904136142 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_mute_config.go @@ -0,0 +1,468 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterV2ProjectMuteConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterV2ProjectMuteConfigCreate, + Read: resourceSecurityCenterV2ProjectMuteConfigRead, + Update: resourceSecurityCenterV2ProjectMuteConfigUpdate, + Delete: resourceSecurityCenterV2ProjectMuteConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterV2ProjectMuteConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `An expression that defines the filter to apply across create/update +events of findings. While creating a filter string, be mindful of +the scope in which the mute configuration is being created. E.g., +If a filter contains project = X but is created under the +project = Y scope, it might not match any findings.`, + }, + "mute_config_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Unique identifier provided by the client within the parent scope.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + Description: `The type of the mute config.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the mute config.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `location Id is provided by project. If not provided, Use global as default.`, + Default: "global", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the mute config was created. This field is set by +the server and will be ignored if provided on config creation.`, + }, + "most_recent_editor": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who last edited the mute config. This +field is set by the server and will be ignored if provided on +config creation or update.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the mute config. Its format is +projects/{project}/locations/global/muteConfigs/{configId}, +folders/{folder}/locations/global/muteConfigs/{configId}, +or organizations/{organization}/locations/global/muteConfigs/{configId}`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The most recent time at which the mute config was +updated. This field is set by the server and will be ignored if +provided on config creation or update.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterV2ProjectMuteConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2ProjectMuteConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + filterProp, err := expandSecurityCenterV2ProjectMuteConfigFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + typeProp, err := expandSecurityCenterV2ProjectMuteConfigType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}projects/{{project}}/locations/{{location}}/muteConfigs?muteConfigId={{mute_config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ProjectMuteConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectMuteConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating ProjectMuteConfig: %s", err) + } + if err := d.Set("name", flattenSecurityCenterV2ProjectMuteConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ProjectMuteConfig %q: %#v", d.Id(), res) + + return resourceSecurityCenterV2ProjectMuteConfigRead(d, meta) +} + +func resourceSecurityCenterV2ProjectMuteConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}projects/{{project}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectMuteConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterV2ProjectMuteConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ProjectMuteConfig: %s", err) + } + + if err := d.Set("name", flattenSecurityCenterV2ProjectMuteConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectMuteConfig: %s", err) + } + if err := d.Set("description", flattenSecurityCenterV2ProjectMuteConfigDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectMuteConfig: %s", err) + } + if err := d.Set("filter", flattenSecurityCenterV2ProjectMuteConfigFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectMuteConfig: %s", err) + } + if err := d.Set("create_time", flattenSecurityCenterV2ProjectMuteConfigCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectMuteConfig: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterV2ProjectMuteConfigUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectMuteConfig: %s", err) + } + if err := d.Set("most_recent_editor", flattenSecurityCenterV2ProjectMuteConfigMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectMuteConfig: %s", err) + } + if err := d.Set("type", flattenSecurityCenterV2ProjectMuteConfigType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectMuteConfig: %s", err) + } + + return nil +} + +func resourceSecurityCenterV2ProjectMuteConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectMuteConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2ProjectMuteConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + filterProp, err := expandSecurityCenterV2ProjectMuteConfigFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + typeProp, err := expandSecurityCenterV2ProjectMuteConfigType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}projects/{{project}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ProjectMuteConfig %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + + if d.HasChange("type") { + updateMask = append(updateMask, "type") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating ProjectMuteConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ProjectMuteConfig %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterV2ProjectMuteConfigRead(d, meta) +} + +func resourceSecurityCenterV2ProjectMuteConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectMuteConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}projects/{{project}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting ProjectMuteConfig %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ProjectMuteConfig") + } + + log.Printf("[DEBUG] Finished deleting ProjectMuteConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterV2ProjectMuteConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/muteConfigs/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/muteConfigs/{{mute_config_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterV2ProjectMuteConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectMuteConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectMuteConfigFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectMuteConfigCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectMuteConfigUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectMuteConfigMostRecentEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectMuteConfigType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterV2ProjectMuteConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2ProjectMuteConfigFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2ProjectMuteConfigType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_mute_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_mute_config_sweeper.go new file mode 100644 index 00000000000..1d92eac3121 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_mute_config_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterV2ProjectMuteConfig", testSweepSecurityCenterV2ProjectMuteConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterV2ProjectMuteConfig(region string) error { + resourceName := "SecurityCenterV2ProjectMuteConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v2/projects/{{project}}/locations/{{location}}/muteConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["projectMuteConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v2/projects/{{project}}/locations/{{location}}/muteConfigs/{{mute_config_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_notification_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_notification_config.go new file mode 100644 index 00000000000..6454bcc2e64 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_notification_config.go @@ -0,0 +1,526 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterV2ProjectNotificationConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterV2ProjectNotificationConfigCreate, + Read: resourceSecurityCenterV2ProjectNotificationConfigRead, + Update: resourceSecurityCenterV2ProjectNotificationConfigUpdate, + Delete: resourceSecurityCenterV2ProjectNotificationConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterV2ProjectNotificationConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "config_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the project.`, + }, + "streaming_config": { + Type: schema.TypeList, + Required: true, + Description: `The config for triggering streaming-based notifications.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `Expression that defines the filter to apply across create/update +events of assets or findings as specified by the event type. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* >, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the notification config (max of 1024 characters).`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Location ID of the parent organization. Only global is supported at the moment.`, + Default: "global", + }, + "pubsub_topic": { + Type: schema.TypeString, + Optional: true, + Description: `The Pub/Sub topic to send notifications to. Its format is +"projects/[project_id]/topics/[topic]".`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this notification config, in the format +'projects/{{projectId}}/locations/{{location}}/notificationConfigs/{{config_id}}'.`, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs "pubsub.topics.publish" permission to +publish to the Pub/Sub topic.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterV2ProjectNotificationConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2ProjectNotificationConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + pubsubTopicProp, err := expandSecurityCenterV2ProjectNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(pubsubTopicProp)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { + obj["pubsubTopic"] = pubsubTopicProp + } + streamingConfigProp, err := expandSecurityCenterV2ProjectNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("streaming_config"); ok || !reflect.DeepEqual(v, streamingConfigProp) { + obj["streamingConfig"] = streamingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}projects/{{project}}/locations/{{location}}/notificationConfigs?configId={{config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ProjectNotificationConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectNotificationConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating ProjectNotificationConfig: %s", err) + } + if err := d.Set("name", flattenSecurityCenterV2ProjectNotificationConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating ProjectNotificationConfig %q: %#v", d.Id(), res) + + return resourceSecurityCenterV2ProjectNotificationConfigRead(d, meta) +} + +func resourceSecurityCenterV2ProjectNotificationConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectNotificationConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterV2ProjectNotificationConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + + if err := d.Set("name", flattenSecurityCenterV2ProjectNotificationConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + if err := d.Set("description", flattenSecurityCenterV2ProjectNotificationConfigDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + if err := d.Set("pubsub_topic", flattenSecurityCenterV2ProjectNotificationConfigPubsubTopic(res["pubsubTopic"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + if err := d.Set("service_account", flattenSecurityCenterV2ProjectNotificationConfigServiceAccount(res["serviceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + if err := d.Set("streaming_config", flattenSecurityCenterV2ProjectNotificationConfigStreamingConfig(res["streamingConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectNotificationConfig: %s", err) + } + + return nil +} + +func resourceSecurityCenterV2ProjectNotificationConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectNotificationConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2ProjectNotificationConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + pubsubTopicProp, err := expandSecurityCenterV2ProjectNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { + obj["pubsubTopic"] = pubsubTopicProp + } + streamingConfigProp, err := expandSecurityCenterV2ProjectNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("streaming_config"); ok || !reflect.DeepEqual(v, streamingConfigProp) { + obj["streamingConfig"] = streamingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ProjectNotificationConfig %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("pubsub_topic") { + updateMask = append(updateMask, "pubsubTopic") + } + + if d.HasChange("streaming_config") { + updateMask = append(updateMask, "streamingConfig.filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating ProjectNotificationConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ProjectNotificationConfig %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterV2ProjectNotificationConfigRead(d, meta) +} + +func resourceSecurityCenterV2ProjectNotificationConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectNotificationConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting ProjectNotificationConfig %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ProjectNotificationConfig") + } + + log.Printf("[DEBUG] Finished deleting ProjectNotificationConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterV2ProjectNotificationConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) < 2 { + return nil, fmt.Errorf( + "Could not split project from name: %s", + d.Get("name"), + ) + } + + if err := d.Set("project", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterV2ProjectNotificationConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectNotificationConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectNotificationConfigServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectNotificationConfigStreamingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["filter"] = + flattenSecurityCenterV2ProjectNotificationConfigStreamingConfigFilter(original["filter"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterV2ProjectNotificationConfigStreamingConfigFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterV2ProjectNotificationConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2ProjectNotificationConfigPubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2ProjectNotificationConfigStreamingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFilter, err := expandSecurityCenterV2ProjectNotificationConfigStreamingConfigFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else { + transformed["filter"] = transformedFilter + } + + return transformed, nil +} + +func expandSecurityCenterV2ProjectNotificationConfigStreamingConfigFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_notification_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_notification_config_sweeper.go new file mode 100644 index 00000000000..9034f71db04 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_notification_config_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterV2ProjectNotificationConfig", testSweepSecurityCenterV2ProjectNotificationConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterV2ProjectNotificationConfig(region string) error { + resourceName := "SecurityCenterV2ProjectNotificationConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v2/projects/{{project}}/locations/{{location}}/notificationConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["projectNotificationConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v2/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_scc_big_query_export.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_scc_big_query_export.go new file mode 100644 index 00000000000..27bb68a9e4d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_scc_big_query_export.go @@ -0,0 +1,503 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterV2ProjectSccBigQueryExport() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterV2ProjectSccBigQueryExportCreate, + Read: resourceSecurityCenterV2ProjectSccBigQueryExportRead, + Update: resourceSecurityCenterV2ProjectSccBigQueryExportUpdate, + Delete: resourceSecurityCenterV2ProjectSccBigQueryExportDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterV2ProjectSccBigQueryExportImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "big_query_export_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the organization.`, + }, + "dataset": { + Type: schema.TypeString, + Optional: true, + Description: `The dataset to write findings' updates to. +Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". +BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the notification config (max of 1024 characters).`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `Expression that defines the filter to apply across create/update +events of findings. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* >, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `location Id is provided by organization. If not provided, Use global as default.`, + Default: "global", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "most_recent_editor": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who last edited the BigQuery export. +This field is set by the server and will be ignored if provided on export creation or update.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this export, in the format +'projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}'. +This field is provided in responses, and is ignored when provided in create requests.`, + }, + "principal": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs permission to create table and upload data to the BigQuery dataset.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterV2ProjectSccBigQueryExportCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2ProjectSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterV2ProjectSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(datasetProp)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterV2ProjectSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}projects/{{project}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ProjectSccBigQueryExport: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectSccBigQueryExport: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("name", flattenSecurityCenterV2ProjectSccBigQueryExportName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ProjectSccBigQueryExport %q: %#v", d.Id(), res) + + return resourceSecurityCenterV2ProjectSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterV2ProjectSccBigQueryExportRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectSccBigQueryExport: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterV2ProjectSccBigQueryExport %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + + if err := d.Set("name", flattenSecurityCenterV2ProjectSccBigQueryExportName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("description", flattenSecurityCenterV2ProjectSccBigQueryExportDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("dataset", flattenSecurityCenterV2ProjectSccBigQueryExportDataset(res["dataset"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("create_time", flattenSecurityCenterV2ProjectSccBigQueryExportCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterV2ProjectSccBigQueryExportUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("most_recent_editor", flattenSecurityCenterV2ProjectSccBigQueryExportMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("principal", flattenSecurityCenterV2ProjectSccBigQueryExportPrincipal(res["principal"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + if err := d.Set("filter", flattenSecurityCenterV2ProjectSccBigQueryExportFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSccBigQueryExport: %s", err) + } + + return nil +} + +func resourceSecurityCenterV2ProjectSccBigQueryExportUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectSccBigQueryExport: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterV2ProjectSccBigQueryExportDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + datasetProp, err := expandSecurityCenterV2ProjectSccBigQueryExportDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + filterProp, err := expandSecurityCenterV2ProjectSccBigQueryExportFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ProjectSccBigQueryExport %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("dataset") { + updateMask = append(updateMask, "dataset") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating ProjectSccBigQueryExport %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ProjectSccBigQueryExport %q: %#v", d.Id(), res) + } + + } + + return resourceSecurityCenterV2ProjectSccBigQueryExportRead(d, meta) +} + +func resourceSecurityCenterV2ProjectSccBigQueryExportDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectSccBigQueryExport: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterV2BasePath}}projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting ProjectSccBigQueryExport %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ProjectSccBigQueryExport") + } + + log.Printf("[DEBUG] Finished deleting ProjectSccBigQueryExport %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterV2ProjectSccBigQueryExportImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/bigQueryExports/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterV2ProjectSccBigQueryExportName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectSccBigQueryExportDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectSccBigQueryExportDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectSccBigQueryExportCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectSccBigQueryExportUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectSccBigQueryExportMostRecentEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectSccBigQueryExportPrincipal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterV2ProjectSccBigQueryExportFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterV2ProjectSccBigQueryExportDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2ProjectSccBigQueryExportDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterV2ProjectSccBigQueryExportFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_scc_big_query_export_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_scc_big_query_export_sweeper.go new file mode 100644 index 00000000000..86e49a875e5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenterv2/resource_scc_v2_project_scc_big_query_export_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenterv2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterV2ProjectSccBigQueryExport", testSweepSecurityCenterV2ProjectSccBigQueryExport) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterV2ProjectSccBigQueryExport(region string) error { + resourceName := "SecurityCenterV2ProjectSccBigQueryExport" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v2/projects/{{project}}/locations/{{location}}/bigQueryExports", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["projectSccBigQueryExports"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v2/projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/resource_service_networking_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/resource_service_networking_connection.go index 0c95ae62ddd..9dc9dd2f297 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/resource_service_networking_connection.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/resource_service_networking_connection.go @@ -71,6 +71,11 @@ func ResourceServiceNetworkingConnection() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "update_on_creation_fail": { + Type: schema.TypeBool, + Optional: true, + Description: `When set to true, enforce an update of the reserved peering ranges on the existing service networking connection in case of a new connection creation failure.`, + }, }, UseJSONNumber: true, } @@ -120,7 +125,21 @@ func resourceServiceNetworkingConnectionCreate(d *schema.ResourceData, meta inte } if err := ServiceNetworkingOperationWaitTimeHW(config, op, "Create Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutCreate)); err != nil { - return err + if strings.Contains(err.Error(), "Cannot modify allocated ranges in CreateConnection.") && d.Get("update_on_creation_fail").(bool) { + patchCall := config.NewServiceNetworkingClient(userAgent).Services.Connections.Patch(parentService+"/connections/-", connection).UpdateMask("reservedPeeringRanges").Force(true) + if config.UserProjectOverride { + patchCall.Header().Add("X-Goog-User-Project", project) + } + op, err := patchCall.Do() + if err != nil { + return err + } + if err := ServiceNetworkingOperationWaitTimeHW(config, op, "Update Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutUpdate)); err != nil { + return err + } + } else { + return err + } } connectionId := &connectionId{ diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/data_source_google_site_verification_token.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/data_source_google_site_verification_token.go new file mode 100644 index 00000000000..b0bbf20fecc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/data_source_google_site_verification_token.go @@ -0,0 +1,149 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package siteverification + +import ( + "fmt" + "log" + "net/http" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func DataSourceSiteVerificationToken() *schema.Resource { + return &schema.Resource{ + Read: dataSourceSiteVerificationTokenRead, + + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "identifier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The site identifier. If the type is set to SITE, the identifier is a URL. If the type is +set to INET_DOMAIN, the identifier is a domain name.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"INET_DOMAIN", "SITE"}), + Description: `The type of resource to be verified, either a domain or a web site. Possible values: ["INET_DOMAIN", "SITE"]`, + }, + "verification_method": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ANALYTICS", "DNS_CNAME", "DNS_TXT", "FILE", "META", "TAG_MANAGER"}), + Description: `The verification method for the Site Verification system to use to verify +this site or domain. Possible values: ["ANALYTICS", "DNS_CNAME", "DNS_TXT", "FILE", "META", "TAG_MANAGER"]`, + }, + "token": { + Type: schema.TypeString, + Computed: true, + Description: `The returned token for use in subsequent verification steps.`, + }, + }, + UseJSONNumber: true, + } +} + +func dataSourceSiteVerificationTokenRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + site := make(map[string]interface{}) + typeProp, err := expandSiteVerificationTokenType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + site["type"] = typeProp + } + identifierProp, err := expandSiteVerificationTokenIdentifier(d.Get("identifier"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("identifier"); !tpgresource.IsEmptyValue(reflect.ValueOf(identifierProp)) && (ok || !reflect.DeepEqual(v, identifierProp)) { + site["identifier"] = identifierProp + } + obj["site"] = site + verification_methodProp, err := expandSiteVerificationTokenVerificationMethod(d.Get("verification_method"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("verification_method"); !tpgresource.IsEmptyValue(reflect.ValueOf(verification_methodProp)) && (ok || !reflect.DeepEqual(v, verification_methodProp)) { + obj["verificationMethod"] = verification_methodProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SiteVerificationBasePath}}token") + if err != nil { + return err + } + + log.Printf("[DEBUG] Reading Token: %#v", obj) + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error reading Token: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{identifier}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + if token, ok := res["token"].(string); ok { + d.Set("token", token) + } + + log.Printf("[DEBUG] Finished reading Token %q: %#v", d.Id(), res) + + return nil +} + +func expandSiteVerificationTokenType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSiteVerificationTokenIdentifier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSiteVerificationTokenVerificationMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_owner.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_owner.go new file mode 100644 index 00000000000..fdc2820a240 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_owner.go @@ -0,0 +1,277 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package siteverification + +import ( + "fmt" + "log" + "net/http" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSiteVerificationOwner() *schema.Resource { + return &schema.Resource{ + Create: resourceSiteVerificationOwnerCreate, + Read: resourceSiteVerificationOwnerRead, + Delete: resourceSiteVerificationOwnerDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSiteVerificationOwnerImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The email address of the owner.`, + }, + "web_resource_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The id of the Web Resource to add this owner to, in the form "webResource/".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSiteVerificationOwnerCreate(d *schema.ResourceData, meta interface{}) error { + email := d.Get("email").(string) + + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + log.Printf("[DEBUG] Reading existing WebResource") + + url, err := tpgresource.ReplaceVars(d, config, "{{SiteVerificationBasePath}}{{web_resource_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + obj, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SiteVerificationWebResource %q", d.Id())) + } + + log.Printf("[DEBUG] Finished reading WebResource: %#v", obj) + + owners, ok := obj["owners"].([]interface{}) + if !ok { + return fmt.Errorf("WebResource has no existing owners") + } + found := false + for _, owner := range owners { + if s, ok := owner.(string); ok && s == email { + found = true + } + } + if !found { + owners = append(owners, email) + obj["owners"] = owners + + log.Printf("[DEBUG] Creating new Owner: %#v", obj) + + headers = make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Owner: %s", err) + } + + log.Printf("[DEBUG] Finished creating Owner %q: %#v", d.Id(), res) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{web_resource_id}}/{{email}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return resourceSiteVerificationOwnerRead(d, meta) +} + +func resourceSiteVerificationOwnerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SiteVerificationBasePath}}{{web_resource_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SiteVerificationOwner %q", d.Id())) + } + + owners, ok := res["owners"].([]interface{}) + if !ok { + return fmt.Errorf("WebResource has no owners") + } + + found := false + email := d.Get("email").(string) + for _, owner := range owners { + if s, ok := owner.(string); ok && s == email { + found = true + } + } + + if !found { + // Owner isn't there any more - remove from the state. + log.Printf("[DEBUG] Removing SiteVerificationOwner because it couldn't be matched.") + d.SetId("") + return nil + } + + return nil +} + +func resourceSiteVerificationOwnerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SiteVerificationBasePath}}{{web_resource_id}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + log.Printf("[DEBUG] Reading existing WebResource") + + headers := make(http.Header) + obj, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SiteVerificationWebResource %q", d.Id())) + } + + log.Printf("[DEBUG] Finished reading WebResource: %#v", obj) + + owners, ok := obj["owners"].([]interface{}) + if !ok { + return fmt.Errorf("WebResource has no existing owners") + } + var updatedOwners []interface{} + email := d.Get("email").(string) + for _, owner := range owners { + if s, ok := owner.(string); ok { + if s != email { + updatedOwners = append(updatedOwners, s) + } + } + } + obj["owners"] = updatedOwners + + headers = make(http.Header) + + log.Printf("[DEBUG] Deleting Owner %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Owner") + } + + log.Printf("[DEBUG] Finished deleting Owner %q: %#v", d.Id(), res) + return nil +} + +func resourceSiteVerificationOwnerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^(?PwebResource/[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{web_resource_id}}/{{email}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished importing Owner %q", d.Id()) + + return []*schema.ResourceData{d}, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_web_resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_web_resource.go new file mode 100644 index 00000000000..e4439074243 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_web_resource.go @@ -0,0 +1,344 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package siteverification + +import ( + "fmt" + "log" + "net/http" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceSiteVerificationWebResource() *schema.Resource { + return &schema.Resource{ + Create: resourceSiteVerificationWebResourceCreate, + Read: resourceSiteVerificationWebResourceRead, + Delete: resourceSiteVerificationWebResourceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSiteVerificationWebResourceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "site": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Container for the address and type of a site for which a verification token will be verified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identifier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The site identifier. If the type is set to SITE, the identifier is a URL. If the type is +set to INET_DOMAIN, the identifier is a domain name.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"INET_DOMAIN", "SITE"}), + Description: `The type of resource to be verified. Possible values: ["INET_DOMAIN", "SITE"]`, + }, + }, + }, + }, + "verification_method": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ANALYTICS", "DNS_CNAME", "DNS_TXT", "FILE", "META", "TAG_MANAGER"}), + Description: `The verification method for the Site Verification system to use to verify +this site or domain. Possible values: ["ANALYTICS", "DNS_CNAME", "DNS_TXT", "FILE", "META", "TAG_MANAGER"]`, + }, + "owners": { + Type: schema.TypeList, + Computed: true, + Description: `The email addresses of all direct, verified owners of this exact property. Indirect owners — +for example verified owners of the containing domain—are not included in this list.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "web_resource_id": { + Type: schema.TypeString, + Computed: true, + Description: `The string used to identify this web resource.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSiteVerificationWebResourceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + siteProp, err := expandSiteVerificationWebResourceSite(d.Get("site"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("site"); !tpgresource.IsEmptyValue(reflect.ValueOf(siteProp)) && (ok || !reflect.DeepEqual(v, siteProp)) { + obj["site"] = siteProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SiteVerificationBasePath}}webResource?verificationMethod={{verification_method}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new WebResource: %#v", obj) + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSiteVerificationRetryableError}, + }) + if err != nil { + return fmt.Errorf("Error creating WebResource: %s", err) + } + if err := d.Set("web_resource_id", flattenSiteVerificationWebResourceWebResourceId(res["id"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "web_resource_id": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "webResource/{{web_resource_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating WebResource %q: %#v", d.Id(), res) + + return resourceSiteVerificationWebResourceRead(d, meta) +} + +func resourceSiteVerificationWebResourceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SiteVerificationBasePath}}webResource/{{web_resource_id}}") + if err != nil { + return err + } + + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSiteVerificationRetryableError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SiteVerificationWebResource %q", d.Id())) + } + + if err := d.Set("web_resource_id", flattenSiteVerificationWebResourceWebResourceId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading WebResource: %s", err) + } + if err := d.Set("site", flattenSiteVerificationWebResourceSite(res["site"], d, config)); err != nil { + return fmt.Errorf("Error reading WebResource: %s", err) + } + if err := d.Set("owners", flattenSiteVerificationWebResourceOwners(res["owners"], d, config)); err != nil { + return fmt.Errorf("Error reading WebResource: %s", err) + } + + return nil +} + +func resourceSiteVerificationWebResourceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SiteVerificationBasePath}}webResource/{{web_resource_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting WebResource %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSiteVerificationRetryableError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "WebResource") + } + + log.Printf("[DEBUG] Finished deleting WebResource %q: %#v", d.Id(), res) + return nil +} + +func resourceSiteVerificationWebResourceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^webResource/(?P[^/]+)$", + "^(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "webResource/{{web_resource_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSiteVerificationWebResourceWebResourceId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSiteVerificationWebResourceSite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenSiteVerificationWebResourceSiteType(original["type"], d, config) + transformed["identifier"] = + flattenSiteVerificationWebResourceSiteIdentifier(original["identifier"], d, config) + return []interface{}{transformed} +} +func flattenSiteVerificationWebResourceSiteType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSiteVerificationWebResourceSiteIdentifier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSiteVerificationWebResourceOwners(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSiteVerificationWebResourceSite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandSiteVerificationWebResourceSiteType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedIdentifier, err := expandSiteVerificationWebResourceSiteIdentifier(original["identifier"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentifier); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identifier"] = transformedIdentifier + } + + return transformed, nil +} + +func expandSiteVerificationWebResourceSiteType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSiteVerificationWebResourceSiteIdentifier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_web_resource_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_web_resource_sweeper.go new file mode 100644 index 00000000000..390b2ac4548 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/siteverification/resource_site_verification_web_resource_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package siteverification + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SiteVerificationWebResource", testSweepSiteVerificationWebResource) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSiteVerificationWebResource(region string) error { + resourceName := "SiteVerificationWebResource" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://www.googleapis.com/siteVerification/v1/webResource", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["webResources"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://www.googleapis.com/siteVerification/v1/webResource/{{web_resource_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/resource_sourcerepo_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/resource_sourcerepo_repository.go index a2aadb12040..8015446f0aa 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/resource_sourcerepo_repository.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/resource_sourcerepo_repository.go @@ -28,6 +28,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/googleapi" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" @@ -51,6 +52,50 @@ func resourceSourceRepoRepositoryPubSubConfigsHash(v interface{}) int { return tpgresource.Hashcode(buf.String()) } +func resourceSourceRepoRepositoryPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos") + if err != nil { + return nil, err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("error fetching project for Repository: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // Confirm the source repository exists + headers := make(http.Header) + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + + if err != nil { + return nil, err + } + return nil, nil + } +} + func ResourceSourceRepoRepository() *schema.Resource { return &schema.Resource{ Create: resourceSourceRepoRepositoryCreate, @@ -122,6 +167,12 @@ If unspecified, it defaults to the compute engine default service account.`, Computed: true, Description: `URL to clone the repository from Google Cloud Source Repositories.`, }, + "create_ignore_already_exists": { + Type: schema.TypeBool, + Optional: true, + Computed: false, + Description: `If set to true, skip repository creation if a repository with the same name already exists.`, + }, "project": { Type: schema.TypeString, Optional: true, @@ -185,9 +236,39 @@ func resourceSourceRepoRepositoryCreate(d *schema.ResourceData, meta interface{} Headers: headers, }) if err != nil { - return fmt.Errorf("Error creating Repository: %s", err) + gerr, ok := err.(*googleapi.Error) + alreadyExists := ok && gerr.Code == 409 && d.Get("create_ignore_already_exists").(bool) + if alreadyExists { + log.Printf("[DEBUG] Calling get Repository after already exists error") + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SourceRepoRepository %q", d.Id())) + } + } else { + return fmt.Errorf("Error creating Repository: %s", err) + } } + // We poll until the resource is found due to eventual consistency issue + // on part of the api https://cloud.google.com/iam/docs/overview#consistency + err = transport_tpg.PollingWaitTime(resourceSourceRepoRepositoryPollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating Source Repository", d.Timeout(schema.TimeoutCreate), 1) + + if err != nil { + return err + } + + // We can't guarantee complete consistency even after polling, + // so sleep for some additional time to reduce the likelihood of + // eventual consistency failures. + time.Sleep(10 * time.Second) + // Store the ID now id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/repos/{{name}}") if err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_backup_schedule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_backup_schedule.go new file mode 100644 index 00000000000..d37f67a53d7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_backup_schedule.go @@ -0,0 +1,663 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package spanner + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceSpannerBackupSchedule() *schema.Resource { + return &schema.Resource{ + Create: resourceSpannerBackupScheduleCreate, + Read: resourceSpannerBackupScheduleRead, + Update: resourceSpannerBackupScheduleUpdate, + Delete: resourceSpannerBackupScheduleDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSpannerBackupScheduleImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "database": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The database to create the backup schedule on.`, + }, + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The instance to create the database on.`, + }, + "retention_duration": { + Type: schema.TypeString, + Required: true, + Description: `At what relative time in the future, compared to its creation time, the backup should be deleted, e.g. keep backups for 7 days. +A duration in seconds with up to nine fractional digits, ending with 's'. Example: '3.5s'. +You can set this to a value up to 366 days.`, + }, + "full_backup_spec": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The schedule creates only full backups..`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{"full_backup_spec", "incremental_backup_spec"}, + }, + "incremental_backup_spec": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The schedule creates incremental backup chains.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{"full_backup_spec", "incremental_backup_spec"}, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[a-z][a-z0-9_\-]*[a-z0-9]$`), + Description: `A unique identifier for the backup schedule, which cannot be changed after +the backup schedule is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].`, + }, + "spec": { + Type: schema.TypeList, + Optional: true, + Description: `Defines specifications of the backup schedule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cron_spec": { + Type: schema.TypeList, + Optional: true, + Description: `Cron style schedule specification..`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "text": { + Type: schema.TypeString, + Optional: true, + Description: `Textual representation of the crontab. User can customize the +backup frequency and the backup version time using the cron +expression. The version time must be in UTC timzeone. +The backup will contain an externally consistent copy of the +database at the version time. Allowed frequencies are 12 hour, 1 day, +1 week and 1 month. Examples of valid cron specifications: + 0 2/12 * * * : every 12 hours at (2, 14) hours past midnight in UTC. + 0 2,14 * * * : every 12 hours at (2,14) hours past midnight in UTC. + 0 2 * * * : once a day at 2 past midnight in UTC. + 0 2 * * 0 : once a week every Sunday at 2 past midnight in UTC. + 0 2 8 * * : once a month on 8th day at 2 past midnight in UTC.`, + }, + }, + }, + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSpannerBackupScheduleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandSpannerBackupScheduleName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + retentionDurationProp, err := expandSpannerBackupScheduleRetentionDuration(d.Get("retention_duration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("retention_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(retentionDurationProp)) && (ok || !reflect.DeepEqual(v, retentionDurationProp)) { + obj["retentionDuration"] = retentionDurationProp + } + specProp, err := expandSpannerBackupScheduleSpec(d.Get("spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spec"); ok || !reflect.DeepEqual(v, specProp) { + obj["spec"] = specProp + } + fullBackupSpecProp, err := expandSpannerBackupScheduleFullBackupSpec(d.Get("full_backup_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("full_backup_spec"); ok || !reflect.DeepEqual(v, fullBackupSpecProp) { + obj["fullBackupSpec"] = fullBackupSpecProp + } + incrementalBackupSpecProp, err := expandSpannerBackupScheduleIncrementalBackupSpec(d.Get("incremental_backup_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("incremental_backup_spec"); ok || !reflect.DeepEqual(v, incrementalBackupSpecProp) { + obj["incrementalBackupSpec"] = incrementalBackupSpecProp + } + + obj, err = resourceSpannerBackupScheduleEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{database}}/backupSchedules?backup_schedule_id={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new BackupSchedule: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackupSchedule: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating BackupSchedule: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{database}}/backupSchedules/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating BackupSchedule %q: %#v", d.Id(), res) + + return resourceSpannerBackupScheduleRead(d, meta) +} + +func resourceSpannerBackupScheduleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{database}}/backupSchedules/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackupSchedule: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SpannerBackupSchedule %q", d.Id())) + } + + res, err = resourceSpannerBackupScheduleDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing SpannerBackupSchedule because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading BackupSchedule: %s", err) + } + + if err := d.Set("name", flattenSpannerBackupScheduleName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupSchedule: %s", err) + } + if err := d.Set("retention_duration", flattenSpannerBackupScheduleRetentionDuration(res["retentionDuration"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupSchedule: %s", err) + } + if err := d.Set("spec", flattenSpannerBackupScheduleSpec(res["spec"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupSchedule: %s", err) + } + if err := d.Set("full_backup_spec", flattenSpannerBackupScheduleFullBackupSpec(res["fullBackupSpec"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupSchedule: %s", err) + } + if err := d.Set("incremental_backup_spec", flattenSpannerBackupScheduleIncrementalBackupSpec(res["incrementalBackupSpec"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupSchedule: %s", err) + } + + return nil +} + +func resourceSpannerBackupScheduleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackupSchedule: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + retentionDurationProp, err := expandSpannerBackupScheduleRetentionDuration(d.Get("retention_duration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("retention_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retentionDurationProp)) { + obj["retentionDuration"] = retentionDurationProp + } + specProp, err := expandSpannerBackupScheduleSpec(d.Get("spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spec"); ok || !reflect.DeepEqual(v, specProp) { + obj["spec"] = specProp + } + + obj, err = resourceSpannerBackupScheduleEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{database}}/backupSchedules/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating BackupSchedule %q: %#v", d.Id(), obj) + headers := make(http.Header) + updateMask := []string{} + + if d.HasChange("retention_duration") { + updateMask = append(updateMask, "retentionDuration") + } + + if d.HasChange("spec") { + updateMask = append(updateMask, "spec") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + // The generated code sets the wrong masks for the following fields. + newUpdateMask := []string{} + if d.HasChange("spec.0.cron_spec.0.text") { + newUpdateMask = append(newUpdateMask, "spec.cron_spec.text") + } + // Pull out any other set fields from the generated mask. + for _, mask := range updateMask { + if mask == "spec" { + continue + } + newUpdateMask = append(newUpdateMask, mask) + } + // Overwrite the previously set mask. + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating BackupSchedule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating BackupSchedule %q: %#v", d.Id(), res) + } + + } + + return resourceSpannerBackupScheduleRead(d, meta) +} + +func resourceSpannerBackupScheduleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackupSchedule: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{database}}/backupSchedules/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting BackupSchedule %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "BackupSchedule") + } + + log.Printf("[DEBUG] Finished deleting BackupSchedule %q: %#v", d.Id(), res) + return nil +} + +func resourceSpannerBackupScheduleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)/backupSchedules/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{database}}/backupSchedules/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSpannerBackupScheduleName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenSpannerBackupScheduleRetentionDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSpannerBackupScheduleSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["cron_spec"] = + flattenSpannerBackupScheduleSpecCronSpec(original["cronSpec"], d, config) + return []interface{}{transformed} +} +func flattenSpannerBackupScheduleSpecCronSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["text"] = + flattenSpannerBackupScheduleSpecCronSpecText(original["text"], d, config) + return []interface{}{transformed} +} +func flattenSpannerBackupScheduleSpecCronSpecText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSpannerBackupScheduleFullBackupSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenSpannerBackupScheduleIncrementalBackupSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func expandSpannerBackupScheduleName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerBackupScheduleRetentionDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerBackupScheduleSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCronSpec, err := expandSpannerBackupScheduleSpecCronSpec(original["cron_spec"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCronSpec); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cronSpec"] = transformedCronSpec + } + + return transformed, nil +} + +func expandSpannerBackupScheduleSpecCronSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedText, err := expandSpannerBackupScheduleSpecCronSpecText(original["text"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["text"] = transformedText + } + + return transformed, nil +} + +func expandSpannerBackupScheduleSpecCronSpecText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerBackupScheduleFullBackupSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandSpannerBackupScheduleIncrementalBackupSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func resourceSpannerBackupScheduleEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + obj["name"] = d.Get("name").(string) + if obj["name"] == nil || obj["name"] == "" { + if err := d.Set("name", id.PrefixedUniqueId("tfgen-spanid-")[:30]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } + config := meta.(*transport_tpg.Config) + var err error + obj["name"], err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{database}}/backupSchedules/{{name}}") + if err != nil { + return obj, err + } + delete(obj, "instance") + delete(obj, "database") + return obj, nil +} + +func resourceSpannerBackupScheduleDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + d.SetId(res["name"].(string)) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)/backupSchedules/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + res["project"] = d.Get("project").(string) + res["instance"] = d.Get("instance").(string) + res["database"] = d.Get("database").(string) + res["name"] = d.Get("name").(string) + id, err := tpgresource.ReplaceVars(d, config, "{{instance}}/{{database}}/{{name}}") + if err != nil { + return nil, err + } + d.SetId(id) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_backup_schedule_sweeper.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain_sweeper.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_backup_schedule_sweeper.go index 8189a43e189..238df7132bf 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_backup_schedule_sweeper.go @@ -15,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package activedirectory +package spanner import ( "context" @@ -30,12 +30,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("ActiveDirectoryDomain", testSweepActiveDirectoryDomain) + sweeper.AddTestSweepers("SpannerBackupSchedule", testSweepSpannerBackupSchedule) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepActiveDirectoryDomain(region string) error { - resourceName := "ActiveDirectoryDomain" +func testSweepSpannerBackupSchedule(region string) error { + resourceName := "SpannerBackupSchedule" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) @@ -64,7 +64,7 @@ func testSweepActiveDirectoryDomain(region string) error { }, } - listTemplate := strings.Split("https://managedidentities.googleapis.com/v1/projects/{{project}}/locations/global/domains", "?")[0] + listTemplate := strings.Split("https://spanner.googleapis.com/v1/projects/{{project}}/instances/{{instance}}/databases/{{database}}/backupSchedules", "?")[0] listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) @@ -83,7 +83,7 @@ func testSweepActiveDirectoryDomain(region string) error { return nil } - resourceList, ok := res["domains"] + resourceList, ok := res["backupSchedules"] if !ok { log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") return nil @@ -108,7 +108,7 @@ func testSweepActiveDirectoryDomain(region string) error { continue } - deleteTemplate := "https://managedidentities.googleapis.com/v1/projects/{{project}}/locations/global/domains/{{domain_name}}" + deleteTemplate := "https://spanner.googleapis.com/v1/projects/{{project}}/instances/{{instance}}/databases/{{database}}/backupSchedules/{{name}}" deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_database.go index a7d7f7ecf28..c2a5b4a30ad 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_database.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_database.go @@ -162,10 +162,22 @@ whereas setting “enableDropProtection” to true protects the database from de Schema: map[string]*schema.Schema{ "kms_key_name": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, Description: `Fully qualified name of the KMS key to use to encrypt this database. This key must exist in the same location as the Spanner Database.`, + ExactlyOneOf: []string{"encryption_config.0.kms_key_name", "encryption_config.0.kms_key_names"}, + }, + "kms_key_names": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist +in the same locations as the Spanner Database.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + ExactlyOneOf: []string{"encryption_config.0.kms_key_name", "encryption_config.0.kms_key_names"}, }, }, }, @@ -188,13 +200,13 @@ update the database's version_retention_period.`, "deletion_protection": { Type: schema.TypeBool, Optional: true, - Default: true, Description: `Whether Terraform will be prevented from destroying the database. Defaults to true. When a'terraform destroy' or 'terraform apply' would delete the database, the command will fail if this field is not set to false in Terraform state. When the field is set to true or unset in Terraform state, a 'terraform apply' or 'terraform destroy' that would delete the database will fail. When the field is set to false, deleting the database is allowed.`, + Default: true, }, "project": { Type: schema.TypeString, @@ -579,7 +591,6 @@ func resourceSpannerDatabaseUpdate(d *schema.ResourceData, meta interface{}) err if err != nil { return err } - if obj["statements"] != nil { if len(obj["statements"].([]string)) == 0 { // Return early to avoid making an API call that errors, @@ -659,7 +670,6 @@ func resourceSpannerDatabaseUpdate(d *schema.ResourceData, meta interface{}) err } headers := make(http.Header) - if obj["statements"] != nil { if len(obj["statements"].([]string)) == 0 { // Return early to avoid making an API call that errors, @@ -823,12 +833,46 @@ func flattenSpannerDatabaseEncryptionConfig(v interface{}, d *schema.ResourceDat transformed := make(map[string]interface{}) transformed["kms_key_name"] = flattenSpannerDatabaseEncryptionConfigKmsKeyName(original["kmsKeyName"], d, config) + transformed["kms_key_names"] = + flattenSpannerDatabaseEncryptionConfigKmsKeyNames(original["kmsKeyNames"], d, config) return []interface{}{transformed} } func flattenSpannerDatabaseEncryptionConfigKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } +func flattenSpannerDatabaseEncryptionConfigKmsKeyNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Ignore `kms_key_names` if `kms_key_name` is set, because that field takes precedence. + _, kmsNameSet := d.GetOk("encryption_config.0.kms_key_name") + if kmsNameSet { + return nil + } + + rawConfigValue := d.Get("encryption_config.0.kms_key_names") + + // Convert config value to []string + configValue, err := tpgresource.InterfaceSliceToStringSlice(rawConfigValue) + if err != nil { + log.Printf("[ERROR] Failed to convert config value: %s", err) + return v + } + + // Convert v to []string + apiStringValue, err := tpgresource.InterfaceSliceToStringSlice(v) + if err != nil { + log.Printf("[ERROR] Failed to convert API value: %s", err) + return v + } + + sortedStrings, err := tpgresource.SortStringsByConfigOrder(configValue, apiStringValue) + if err != nil { + log.Printf("[ERROR] Could not sort API response value: %s", err) + return v + } + + return sortedStrings +} + func flattenSpannerDatabaseDatabaseDialect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -872,6 +916,13 @@ func expandSpannerDatabaseEncryptionConfig(v interface{}, d tpgresource.Terrafor transformed["kmsKeyName"] = transformedKmsKeyName } + transformedKmsKeyNames, err := expandSpannerDatabaseEncryptionConfigKmsKeyNames(original["kms_key_names"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyNames"] = transformedKmsKeyNames + } + return transformed, nil } @@ -879,6 +930,10 @@ func expandSpannerDatabaseEncryptionConfigKmsKeyName(v interface{}, d tpgresourc return v, nil } +func expandSpannerDatabaseEncryptionConfigKmsKeyNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandSpannerDatabaseDatabaseDialect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -916,7 +971,6 @@ func resourceSpannerDatabaseEncoder(d *schema.ResourceData, meta interface{}, ob } func resourceSpannerDatabaseUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - if obj["versionRetentionPeriod"] != nil || obj["extraStatements"] != nil { old, new := d.GetChange("ddl") oldDdls := old.([]interface{}) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_instance.go index 8aa422069be..68698b4d9d4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_instance.go @@ -142,8 +142,6 @@ unique per project and between 4 and 30 characters in length.`, Description: `A unique identifier for the instance, which cannot be changed after the instance is created. The name must be between 6 and 30 characters in length. - - If not provided, a random string starting with 'tf-' will be selected.`, }, "autoscaling_config": { @@ -156,6 +154,60 @@ the instance.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "asymmetric_autoscaling_options": { + Type: schema.TypeList, + Optional: true, + Description: `Asymmetric autoscaling options for specific replicas.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "overrides": { + Type: schema.TypeList, + Required: true, + Description: `A nested object resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "autoscaling_limits": { + Type: schema.TypeList, + Required: true, + Description: `A nested object resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_nodes": { + Type: schema.TypeInt, + Required: true, + Description: `The maximum number of nodes for this specific replica.`, + }, + "min_nodes": { + Type: schema.TypeInt, + Required: true, + Description: `The minimum number of nodes for this specific replica.`, + }, + }, + }, + }, + }, + }, + }, + "replica_selection": { + Type: schema.TypeList, + Required: true, + Description: `A nested object resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + Description: `The location of the replica to apply asymmetric autoscaling options.`, + }, + }, + }, + }, + }, + }, + }, "autoscaling_limits": { Type: schema.TypeList, Optional: true, @@ -231,6 +283,13 @@ This number is on a scale from 0 (no utilization) to 100 (full utilization).`, }, ExactlyOneOf: []string{"num_nodes", "processing_units", "autoscaling_config"}, }, + "edition": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"EDITION_UNSPECIFIED", "STANDARD", "ENTERPRISE", "ENTERPRISE_PLUS", ""}), + Description: `The edition selected for this instance. Different editions provide different capabilities at different price points. Possible values: ["EDITION_UNSPECIFIED", "STANDARD", "ENTERPRISE", "ENTERPRISE_PLUS"]`, + }, "labels": { Type: schema.TypeMap, Optional: true, @@ -279,9 +338,9 @@ or node_count must be present in terraform.`, "force_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `When deleting a spanner instance, this boolean option will delete all backups of this instance. This must be set to true if you created a backup manually in the console.`, + Default: false, }, "project": { Type: schema.TypeString, @@ -338,6 +397,12 @@ func resourceSpannerInstanceCreate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("autoscaling_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(autoscalingConfigProp)) && (ok || !reflect.DeepEqual(v, autoscalingConfigProp)) { obj["autoscalingConfig"] = autoscalingConfigProp } + editionProp, err := expandSpannerInstanceEdition(d.Get("edition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("edition"); !tpgresource.IsEmptyValue(reflect.ValueOf(editionProp)) && (ok || !reflect.DeepEqual(v, editionProp)) { + obj["edition"] = editionProp + } labelsProp, err := expandSpannerInstanceEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -517,6 +582,9 @@ func resourceSpannerInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("autoscaling_config", flattenSpannerInstanceAutoscalingConfig(res["autoscalingConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } + if err := d.Set("edition", flattenSpannerInstanceEdition(res["edition"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } if err := d.Set("terraform_labels", flattenSpannerInstanceTerraformLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } @@ -567,6 +635,12 @@ func resourceSpannerInstanceUpdate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("autoscaling_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoscalingConfigProp)) { obj["autoscalingConfig"] = autoscalingConfigProp } + editionProp, err := expandSpannerInstanceEdition(d.Get("edition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("edition"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, editionProp)) { + obj["edition"] = editionProp + } labelsProp, err := expandSpannerInstanceEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -809,6 +883,8 @@ func flattenSpannerInstanceAutoscalingConfig(v interface{}, d *schema.ResourceDa flattenSpannerInstanceAutoscalingConfigAutoscalingLimits(original["autoscalingLimits"], d, config) transformed["autoscaling_targets"] = flattenSpannerInstanceAutoscalingConfigAutoscalingTargets(original["autoscalingTargets"], d, config) + transformed["asymmetric_autoscaling_options"] = + flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptions(original["asymmetricAutoscalingOptions"], d, config) return []interface{}{transformed} } func flattenSpannerInstanceAutoscalingConfigAutoscalingLimits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -947,6 +1023,108 @@ func flattenSpannerInstanceAutoscalingConfigAutoscalingTargetsStorageUtilization return v // let terraform core handle it otherwise } +func flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "replica_selection": flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsReplicaSelection(original["replicaSelection"], d, config), + "overrides": flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverrides(original["overrides"], d, config), + }) + } + return transformed +} +func flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsReplicaSelection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["location"] = + flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsReplicaSelectionLocation(original["location"], d, config) + return []interface{}{transformed} +} +func flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsReplicaSelectionLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverrides(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["autoscaling_limits"] = + flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimits(original["autoscalingLimits"], d, config) + return []interface{}{transformed} +} +func flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_nodes"] = + flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimitsMinNodes(original["minNodes"], d, config) + transformed["max_nodes"] = + flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimitsMaxNodes(original["maxNodes"], d, config) + return []interface{}{transformed} +} +func flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimitsMinNodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimitsMaxNodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenSpannerInstanceEdition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenSpannerInstanceTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -1019,6 +1197,13 @@ func expandSpannerInstanceAutoscalingConfig(v interface{}, d tpgresource.Terrafo transformed["autoscalingTargets"] = transformedAutoscalingTargets } + transformedAsymmetricAutoscalingOptions, err := expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptions(original["asymmetric_autoscaling_options"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAsymmetricAutoscalingOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["asymmetricAutoscalingOptions"] = transformedAsymmetricAutoscalingOptions + } + return transformed, nil } @@ -1112,6 +1297,115 @@ func expandSpannerInstanceAutoscalingConfigAutoscalingTargetsStorageUtilizationP return v, nil } +func expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedReplicaSelection, err := expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsReplicaSelection(original["replica_selection"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplicaSelection); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replicaSelection"] = transformedReplicaSelection + } + + transformedOverrides, err := expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverrides(original["overrides"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOverrides); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["overrides"] = transformedOverrides + } + + req = append(req, transformed) + } + return req, nil +} + +func expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsReplicaSelection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLocation, err := expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsReplicaSelectionLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + return transformed, nil +} + +func expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsReplicaSelectionLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverrides(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAutoscalingLimits, err := expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimits(original["autoscaling_limits"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAutoscalingLimits); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["autoscalingLimits"] = transformedAutoscalingLimits + } + + return transformed, nil +} + +func expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMinNodes, err := expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimitsMinNodes(original["min_nodes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinNodes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minNodes"] = transformedMinNodes + } + + transformedMaxNodes, err := expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimitsMaxNodes(original["max_nodes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxNodes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxNodes"] = transformedMaxNodes + } + + return transformed, nil +} + +func expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimitsMinNodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerInstanceAutoscalingConfigAsymmetricAutoscalingOptionsOverridesAutoscalingLimitsMaxNodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerInstanceEdition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandSpannerInstanceEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil @@ -1151,6 +1445,9 @@ func resourceSpannerInstanceUpdateEncoder(d *schema.ResourceData, meta interface newObj := make(map[string]interface{}) newObj["instance"] = obj updateMask := make([]string, 0) + if d.HasChange("edition") { + updateMask = append(updateMask, "edition") + } if d.HasChange("num_nodes") { updateMask = append(updateMask, "nodeCount") } @@ -1188,6 +1485,9 @@ func resourceSpannerInstanceUpdateEncoder(d *schema.ResourceData, meta interface if d.HasChange("autoscaling_config.0.autoscaling_targets.0.storage_utilization_percent") { updateMask = append(updateMask, "autoscalingConfig.autoscalingTargets.storageUtilizationPercent") } + if d.HasChange("autoscaling_config.0.asymmetric_autoscaling_options") { + updateMask = append(updateMask, "autoscalingConfig.asymmetricAutoscalingOptions") + } } } newObj["fieldMask"] = strings.Join(updateMask, ",") diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database.go index bcd1bd19d63..3f76b524ff2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database.go @@ -91,11 +91,11 @@ a value of 'en_US.UTF8' at creation time.`, "deletion_policy": { Type: schema.TypeString, Optional: true, - Default: "DELETE", Description: `The deletion policy for the database. Setting ABANDON allows the resource to be abandoned rather than deleted. This is useful for Postgres, where databases cannot be deleted from the API if there are users other than cloudsqlsuperuser with access. Possible values are: "ABANDON", "DELETE". Defaults to "DELETE".`, + Default: "DELETE", }, "project": { Type: schema.TypeString, @@ -234,6 +234,14 @@ func resourceSQLDatabaseRead(d *schema.ResourceData, meta interface{}) error { } headers := make(http.Header) + instance := d.Get("instance").(string) + databaseInstance, err := config.NewSqlAdminClient(userAgent).Instances.Get(project, instance).Do() + if err != nil { + return err + } + if databaseInstance.Settings.ActivationPolicy != "ALWAYS" { + return nil + } res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "GET", diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database_instance.go index fbc6b9b7cd2..c9f03c31c94 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database_instance.go @@ -76,12 +76,12 @@ var ( ipConfigurationKeys = []string{ "settings.0.ip_configuration.0.authorized_networks", "settings.0.ip_configuration.0.ipv4_enabled", - "settings.0.ip_configuration.0.require_ssl", "settings.0.ip_configuration.0.private_network", "settings.0.ip_configuration.0.allocated_ip_range", "settings.0.ip_configuration.0.enable_private_path_for_google_cloud_services", "settings.0.ip_configuration.0.psc_config", "settings.0.ip_configuration.0.ssl_mode", + "settings.0.ip_configuration.0.server_ca_mode", } maintenanceWindowKeys = []string{ @@ -179,7 +179,7 @@ func ResourceSqlDatabaseInstance() *schema.Resource { "edition": { Type: schema.TypeString, Optional: true, - Default: "ENTERPRISE", + Computed: true, ValidateFunc: validation.StringInSlice([]string{"ENTERPRISE", "ENTERPRISE_PLUS"}, false), Description: `The edition of the instance, can be ENTERPRISE or ENTERPRISE_PLUS.`, }, @@ -200,6 +200,7 @@ func ResourceSqlDatabaseInstance() *schema.Resource { "data_cache_config": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, Description: `Data cache configurations.`, Elem: &schema.Resource{ @@ -286,7 +287,6 @@ func ResourceSqlDatabaseInstance() *schema.Resource { }, "time_zone": { Type: schema.TypeString, - ForceNew: true, Optional: true, Description: `The time_zone to be used by the database engine (supported only for SQL Server), in SQL Server timezone format.`, }, @@ -402,6 +402,11 @@ is set to true. Defaults to ZONAL.`, Optional: true, Description: `Enables Vertex AI Integration.`, }, + "enable_dataplex_integration": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables Dataplex Integration.`, + }, "disk_size": { Type: schema.TypeInt, Optional: true, @@ -438,13 +443,6 @@ is set to true. Defaults to ZONAL.`, AtLeastOneOf: ipConfigurationKeys, Description: `Whether this Cloud SQL instance should be assigned a public IPV4 address. At least ipv4_enabled must be enabled or a private_network must be configured.`, }, - "require_ssl": { - Type: schema.TypeBool, - Optional: true, - AtLeastOneOf: ipConfigurationKeys, - Description: `Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in ssl_mode if it has been set too.`, - Deprecated: "`require_ssl` will be fully deprecated in a future major release. For now, please use `ssl_mode` with a compatible `require_ssl` value instead.", - }, "private_network": { Type: schema.TypeString, Optional: true, @@ -493,7 +491,15 @@ is set to true. Defaults to ZONAL.`, Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{"ALLOW_UNENCRYPTED_AND_ENCRYPTED", "ENCRYPTED_ONLY", "TRUSTED_CLIENT_CERTIFICATE_REQUIRED"}, false), - Description: `Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcment options compared to require_ssl. To change this field, also set the correspoding value in require_ssl until next major release.`, + Description: `Specify how SSL connection should be enforced in DB connections.`, + AtLeastOneOf: ipConfigurationKeys, + }, + "server_ca_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"CA_MODE_UNSPECIFIED", "GOOGLE_MANAGED_INTERNAL_CA", "GOOGLE_MANAGED_CAS_CA"}, false), + Description: `Specify how the server certificate's Certificate Authority is hosted.`, AtLeastOneOf: ipConfigurationKeys, }, }, @@ -572,6 +578,7 @@ is set to true. Defaults to ZONAL.`, "insights_config": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -660,7 +667,7 @@ is set to true. Defaults to ZONAL.`, Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{"NOT_REQUIRED", "REQUIRED"}, false), - Description: `Specifies if connections must use Cloud SQL connectors.`, + Description: `Enables the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections. If enabled, all the direct connections are rejected.`, }, "deletion_protection_enabled": { Type: schema.TypeBool, @@ -1274,7 +1281,7 @@ func expandSqlDatabaseInstanceSettings(configured []interface{}, databaseVersion Tier: _settings["tier"].(string), Edition: _settings["edition"].(string), AdvancedMachineFeatures: expandSqlServerAdvancedMachineFeatures(_settings["advanced_machine_features"].([]interface{})), - ForceSendFields: []string{"StorageAutoResize", "EnableGoogleMlIntegration"}, + ForceSendFields: []string{"StorageAutoResize", "EnableGoogleMlIntegration", "EnableDataplexIntegration"}, ActivationPolicy: _settings["activation_policy"].(string), ActiveDirectoryConfig: expandActiveDirectoryConfig(_settings["active_directory_config"].([]interface{})), DenyMaintenancePeriods: expandDenyMaintenancePeriod(_settings["deny_maintenance_period"].([]interface{})), @@ -1288,6 +1295,7 @@ func expandSqlDatabaseInstanceSettings(configured []interface{}, databaseVersion PricingPlan: _settings["pricing_plan"].(string), DeletionProtectionEnabled: _settings["deletion_protection_enabled"].(bool), EnableGoogleMlIntegration: _settings["enable_google_ml_integration"].(bool), + EnableDataplexIntegration: _settings["enable_dataplex_integration"].(bool), UserLabels: tpgresource.ConvertStringMap(_settings["user_labels"].(map[string]interface{})), BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})), DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].(*schema.Set).List()), @@ -1386,7 +1394,8 @@ func expandIpConfiguration(configured []interface{}, databaseVersion string) *sq _ipConfiguration := configured[0].(map[string]interface{}) - forceSendFields := []string{"Ipv4Enabled", "RequireSsl"} + forceSendFields := []string{"Ipv4Enabled"} + nullFields := []string{"RequireSsl"} if !strings.HasPrefix(databaseVersion, "SQLSERVER") { forceSendFields = append(forceSendFields, "EnablePrivatePathForGoogleCloudServices") @@ -1394,14 +1403,15 @@ func expandIpConfiguration(configured []interface{}, databaseVersion string) *sq return &sqladmin.IpConfiguration{ Ipv4Enabled: _ipConfiguration["ipv4_enabled"].(bool), - RequireSsl: _ipConfiguration["require_ssl"].(bool), PrivateNetwork: _ipConfiguration["private_network"].(string), AllocatedIpRange: _ipConfiguration["allocated_ip_range"].(string), AuthorizedNetworks: expandAuthorizedNetworks(_ipConfiguration["authorized_networks"].(*schema.Set).List()), EnablePrivatePathForGoogleCloudServices: _ipConfiguration["enable_private_path_for_google_cloud_services"].(bool), ForceSendFields: forceSendFields, + NullFields: nullFields, PscConfig: expandPscConfig(_ipConfiguration["psc_config"].(*schema.Set).List()), SslMode: _ipConfiguration["ssl_mode"].(string), + ServerCaMode: _ipConfiguration["server_ca_mode"].(string), } } @@ -1971,6 +1981,31 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) } } + // Check if timezone is updated + if d.HasChange("settings.0.time_zone") { + timezone := d.Get("settings.0.time_zone").(string) + instance = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{TimeZone: timezone}} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return err + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + return resourceSqlDatabaseInstanceRead(d, meta) } @@ -2112,6 +2147,7 @@ func flattenSettings(settings *sqladmin.Settings, d *schema.ResourceData) []map[ data["disk_autoresize_limit"] = settings.StorageAutoResizeLimit data["enable_google_ml_integration"] = settings.EnableGoogleMlIntegration + data["enable_dataplex_integration"] = settings.EnableDataplexIntegration if settings.UserLabels != nil { data["user_labels"] = settings.UserLabels @@ -2240,8 +2276,9 @@ func flattenIpConfiguration(ipConfiguration *sqladmin.IpConfiguration, d *schema "ipv4_enabled": ipConfiguration.Ipv4Enabled, "private_network": ipConfiguration.PrivateNetwork, "allocated_ip_range": ipConfiguration.AllocatedIpRange, - "require_ssl": ipConfiguration.RequireSsl, "enable_private_path_for_google_cloud_services": ipConfiguration.EnablePrivatePathForGoogleCloudServices, + "ssl_mode": ipConfiguration.SslMode, + "server_ca_mode": ipConfiguration.ServerCaMode, } if ipConfiguration.AuthorizedNetworks != nil { @@ -2252,11 +2289,6 @@ func flattenIpConfiguration(ipConfiguration *sqladmin.IpConfiguration, d *schema data["psc_config"] = flattenPscConfigs(ipConfiguration.PscConfig) } - // We store the ssl_mode value only if the customer already uses `ssl_mode`. - if _, ok := d.GetOk("settings.0.ip_configuration.0.ssl_mode"); ok { - data["ssl_mode"] = ipConfiguration.SslMode - } - return []map[string]interface{}{data} } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_user.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_user.go index 06aecb190ec..ac11555a22e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_user.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_user.go @@ -328,6 +328,13 @@ func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { instance := d.Get("instance").(string) name := d.Get("name").(string) host := d.Get("host").(string) + databaseInstance, err := config.NewSqlAdminClient(userAgent).Instances.Get(project, instance).Do() + if err != nil { + return err + } + if databaseInstance.Settings.ActivationPolicy != "ALWAYS" { + return nil + } var users *sqladmin.UsersListResponse err = nil @@ -344,16 +351,14 @@ func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { } var user *sqladmin.User - databaseInstance, err := config.NewSqlAdminClient(userAgent).Instances.Get(project, instance).Do() - if err != nil { - return err - } - for _, currentUser := range users.Items { + var username string if !(strings.Contains(databaseInstance.DatabaseVersion, "POSTGRES") || currentUser.Type == "CLOUD_IAM_GROUP") { - name = strings.Split(name, "@")[0] + username = strings.Split(name, "@")[0] + } else { + username = name } - if currentUser.Name == name { + if currentUser.Name == username { // Host can only be empty for postgres instances, // so don't compare the host if the API host is empty. if host == "" || currentUser.Host == host { @@ -572,6 +577,19 @@ func resourceSqlUserImporter(d *schema.ResourceData, meta interface{}) ([]*schem if err := d.Set("name", parts[3]); err != nil { return nil, fmt.Errorf("Error setting name: %s", err) } + } else if len(parts) == 5 { + if err := d.Set("project", parts[0]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("instance", parts[1]); err != nil { + return nil, fmt.Errorf("Error setting instance: %s", err) + } + if err := d.Set("host", fmt.Sprintf("%s/%s", parts[2], parts[3])); err != nil { + return nil, fmt.Errorf("Error setting host: %s", err) + } + if err := d.Set("name", parts[4]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } } else { return nil, fmt.Errorf("Invalid specifier. Expecting {project}/{instance}/{name} for postgres instance and {project}/{instance}/{host}/{name} for MySQL instance") } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_bucket_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_bucket_object.go index 42cd71d6c11..8ad9d958c6e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_bucket_object.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_bucket_object.go @@ -91,8 +91,28 @@ func dataSourceGoogleStorageBucketObjectRead(d *schema.ResourceData, meta interf if err := d.Set("metadata", res["metadata"]); err != nil { return fmt.Errorf("Error setting metadata: %s", err) } + if err := d.Set("generation", flattenStorageBucketObjectGeneration(res["generation"], d, config)); err != nil { + return fmt.Errorf("Error setting generation: %s", err) + } d.SetId(bucket + "-" + name) return nil } + +func flattenStorageBucketObjectGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket.go index 1ac1a847b75..56862be511f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket.go @@ -49,13 +49,23 @@ func ResourceStorageBucket() *schema.Resource { Read: schema.DefaultTimeout(4 * time.Minute), }, - SchemaVersion: 1, + SchemaVersion: 3, StateUpgraders: []schema.StateUpgrader{ { Type: resourceStorageBucketV0().CoreConfigSchema().ImpliedType(), Upgrade: ResourceStorageBucketStateUpgradeV0, Version: 0, }, + { + Type: resourceStorageBucketV1().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceStorageBucketStateUpgradeV1, + Version: 1, + }, + { + Type: resourceStorageBucketV2().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceStorageBucketStateUpgradeV2, + Version: 2, + }, }, Schema: map[string]*schema.Schema{ @@ -228,11 +238,6 @@ func ResourceStorageBucket() *schema.Resource { Optional: true, Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, }, - "no_age": { - Type: schema.TypeBool, - Optional: true, - Description: `While set true, age value will be omitted.Required to set true when age is unset in the config file.`, - }, "with_state": { Type: schema.TypeString, Computed: true, @@ -263,6 +268,11 @@ func ResourceStorageBucket() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Description: `One or more matching name suffixes to satisfy this condition.`, }, + "send_age_if_zero": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, age value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the age field. It can be used alone or together with age.`, + }, "send_days_since_noncurrent_time_if_zero": { Type: schema.TypeBool, Optional: true, @@ -541,6 +551,24 @@ func ResourceStorageBucket() *schema.Resource { }, }, }, + "hierarchical_namespace": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + DiffSuppressFunc: hierachicalNamespaceDiffSuppress, + Description: `The bucket's HNS configuration, which defines bucket can organize folders in logical file system structure.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Set this field true to organize bucket with logical file system structure.`, + }, + }, + }, + }, }, UseJSONNumber: true, } @@ -688,6 +716,10 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error sb.SoftDeletePolicy = expandBucketSoftDeletePolicy(v.([]interface{})) } + if v, ok := d.GetOk("hierarchical_namespace"); ok { + sb.HierarchicalNamespace = expandBucketHierachicalNamespace(v.([]interface{})) + } + var res *storage.Bucket err = transport_tpg.Retry(transport_tpg.RetryOptions{ @@ -699,7 +731,8 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error res, err = insertCall.Do() return err }, - Timeout: d.Timeout(schema.TimeoutCreate), + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429RetryableQuotaError}, }) if err != nil { @@ -930,18 +963,9 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { // Get the bucket and acl bucket := d.Get("name").(string) - var res *storage.Bucket // There seems to be some eventual consistency errors in some cases, so we want to check a few times // to make sure it exists before moving on - err = transport_tpg.Retry(transport_tpg.RetryOptions{ - RetryFunc: func() (operr error) { - var retryErr error - res, retryErr = config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - return retryErr - }, - Timeout: d.Timeout(schema.TimeoutRead), - ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsNotFoundRetryableError("bucket read")}, - }) + res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() if err != nil { return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Storage Bucket %q", d.Get("name").(string))) @@ -1285,6 +1309,38 @@ func flattenBucketSoftDeletePolicy(softDeletePolicy *storage.BucketSoftDeletePol return policies } +func expandBucketHierachicalNamespace(configured interface{}) *storage.BucketHierarchicalNamespace { + configuredHierachicalNamespace := configured.([]interface{}) + if len(configuredHierachicalNamespace) == 0 { + return nil + } + configuredHierachicalNamespacePolicy := configuredHierachicalNamespace[0].(map[string]interface{}) + hierachicalNamespacePolicy := &storage.BucketHierarchicalNamespace{ + Enabled: (configuredHierachicalNamespacePolicy["enabled"].(bool)), + } + hierachicalNamespacePolicy.ForceSendFields = append(hierachicalNamespacePolicy.ForceSendFields, "Enabled") + return hierachicalNamespacePolicy +} + +func flattenBucketHierarchicalNamespacePolicy(hierachicalNamespacePolicy *storage.BucketHierarchicalNamespace) []map[string]interface{} { + policies := make([]map[string]interface{}, 0, 1) + if hierachicalNamespacePolicy == nil { + // a null object returned from the API is equivalent to a block with enabled = false + // to handle this consistently, always write a null response as a hydrated block with false + defaultPolicy := map[string]interface{}{ + "enabled": false, + } + + policies = append(policies, defaultPolicy) + return policies + } + policy := map[string]interface{}{ + "enabled": hierachicalNamespacePolicy.Enabled, + } + policies = append(policies, policy) + return policies +} + func expandBucketVersioning(configured interface{}) *storage.BucketVersioning { versionings := configured.([]interface{}) if len(versionings) == 0 { @@ -1397,13 +1453,19 @@ func flattenBucketLifecycleRuleCondition(index int, d *schema.ResourceData, cond ruleCondition["with_state"] = "ARCHIVED" } } - // setting no_age value from state config since it is terraform only variable and not getting value from backend. + // Setting the lifecycle condition virtual fields from the state file if they + // are already present otherwise setting them to individual default values. if v, ok := d.GetOk(fmt.Sprintf("lifecycle_rule.%d.condition", index)); ok { state_condition := v.(*schema.Set).List()[0].(map[string]interface{}) - ruleCondition["no_age"] = state_condition["no_age"].(bool) ruleCondition["send_days_since_noncurrent_time_if_zero"] = state_condition["send_days_since_noncurrent_time_if_zero"].(bool) ruleCondition["send_days_since_custom_time_if_zero"] = state_condition["send_days_since_custom_time_if_zero"].(bool) ruleCondition["send_num_newer_versions_if_zero"] = state_condition["send_num_newer_versions_if_zero"].(bool) + ruleCondition["send_age_if_zero"] = state_condition["send_age_if_zero"].(bool) + } else { + ruleCondition["send_age_if_zero"] = false + ruleCondition["send_days_since_noncurrent_time_if_zero"] = false + ruleCondition["send_days_since_custom_time_if_zero"] = false + ruleCondition["send_num_newer_versions_if_zero"] = false } return ruleCondition @@ -1553,13 +1615,10 @@ func expandStorageBucketLifecycleRuleCondition(v interface{}) (*storage.BucketLi condition := conditions[0].(map[string]interface{}) transformed := &storage.BucketLifecycleRuleCondition{} - // Setting high precedence of no_age over age when both used together. - // Only sets age value when no_age is not present or no_age is present and has false value - if v, ok := condition["no_age"]; !ok || !(v.(bool)) { - if v, ok := condition["age"]; ok { - age := int64(v.(int)) + if v, ok := condition["age"]; ok { + age := int64(v.(int)) + if u, ok := condition["send_age_if_zero"]; age > 0 || (ok && u.(bool)) { transformed.Age = &age - transformed.ForceSendFields = append(transformed.ForceSendFields, "Age") } } @@ -1670,12 +1729,8 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) - if v, ok := m["no_age"]; ok && v.(bool) { - buf.WriteString(fmt.Sprintf("%t-", v.(bool))) - } else { - if v, ok := m["age"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } + if v, ok := m["age"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) } if v, ok := m["days_since_custom_time"]; ok { @@ -1719,6 +1774,10 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%d-", v.(int))) } + if v, ok := m["send_age_if_zero"]; ok { + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } + if v, ok := m["send_days_since_noncurrent_time_if_zero"]; ok { buf.WriteString(fmt.Sprintf("%t-", v.(bool))) } @@ -1841,8 +1900,7 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res if err := d.Set("autoclass", flattenBucketAutoclass(res.Autoclass)); err != nil { return fmt.Errorf("Error setting autoclass: %s", err) } - // lifecycle_rule contains terraform only variable no_age. - // Passing config("d") to flattener function to set no_age separately. + // Passing config("d") to flattener function to set virtual fields separately. if err := d.Set("lifecycle_rule", flattenBucketLifecycle(d, res.Lifecycle)); err != nil { return fmt.Errorf("Error setting lifecycle_rule: %s", err) } @@ -1874,6 +1932,9 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res if err := d.Set("soft_delete_policy", flattenBucketSoftDeletePolicy(res.SoftDeletePolicy)); err != nil { return fmt.Errorf("Error setting soft_delete_policy: %s", err) } + if err := d.Set("hierarchical_namespace", flattenBucketHierarchicalNamespacePolicy(res.HierarchicalNamespace)); err != nil { + return fmt.Errorf("Error setting hierarchical namespace: %s", err) + } if res.IamConfiguration != nil && res.IamConfiguration.UniformBucketLevelAccess != nil { if err := d.Set("uniform_bucket_level_access", res.IamConfiguration.UniformBucketLevelAccess.Enabled); err != nil { return fmt.Errorf("Error setting uniform_bucket_level_access: %s", err) @@ -1903,3 +1964,14 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res d.SetId(res.Id) return nil } + +func hierachicalNamespaceDiffSuppress(k, old, new string, r *schema.ResourceData) bool { + if k == "hierarchical_namespace.#" && old == "1" && new == "0" { + o, _ := r.GetChange("hierarchical_namespace.0.enabled") + if !o.(bool) { + return true + } + } + + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_600_migration.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_600_migration.go new file mode 100644 index 00000000000..a9f3a81f503 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_600_migration.go @@ -0,0 +1,1058 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage + +import ( + "context" + "log" + "math" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func resourceStorageBucketV1() *schema.Resource { + return &schema.Resource{ + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceStorageBucketV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceStorageBucketStateUpgradeV0, + Version: 0, + }, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the bucket.`, + ValidateFunc: verify.ValidateGCSName, + }, + + "encryption": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified. You must pay attention to whether the crypto key is available in the location that this bucket is created in. See the docs for more details.`, + }, + }, + }, + Description: `The bucket's encryption configuration.`, + }, + + "requester_pays": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables Requester Pays on a storage bucket.`, + }, + + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `When deleting a bucket, this boolean option will delete all contained objects. If you try to delete a bucket that contains objects, Terraform will fail that run.`, + }, + + "labels": { + Type: schema.TypeMap, + ValidateFunc: labelKeyValidator, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A set of key/value label pairs to assign to the bucket.`, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(s interface{}) string { + return strings.ToUpper(s.(string)) + }, + Description: `The Google Cloud Storage location`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "project_number": { + Type: schema.TypeInt, + Computed: true, + Description: `The project number of the project in which the resource belongs.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + + "url": { + Type: schema.TypeString, + Computed: true, + Description: `The base URL of the bucket, in the format gs://.`, + }, + + "storage_class": { + Type: schema.TypeString, + Optional: true, + Default: "STANDARD", + Description: `The Storage Class of the new bucket. Supported values include: STANDARD, MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE.`, + }, + + "lifecycle_rule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 100, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 1, + Set: resourceGCSBucketLifecycleRuleActionHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + Description: `The type of the action of this Lifecycle Rule. Supported values include: Delete, SetStorageClass and AbortIncompleteMultipartUpload.`, + }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + Description: `The target Storage Class of objects affected by this Lifecycle Rule. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE.`, + }, + }, + }, + Description: `The Lifecycle Rule's action configuration. A single block of this type is supported.`, + }, + "condition": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 1, + Set: resourceGCSBucketLifecycleRuleConditionHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "age": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum age of an object in days to satisfy this condition.`, + }, + "created_before": { + Type: schema.TypeString, + Optional: true, + Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, + }, + "custom_time_before": { + Type: schema.TypeString, + Optional: true, + Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, + }, + "days_since_custom_time": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of days elapsed since the user-specified timestamp set on an object.`, + }, + "days_since_noncurrent_time": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of days elapsed since the noncurrent timestamp of an object. This + condition is relevant only for versioned objects.`, + }, + "noncurrent_time_before": { + Type: schema.TypeString, + Optional: true, + Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, + }, + "no_age": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, age value will be omitted.Required to set true when age is unset in the config file.`, + }, + "with_state": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"LIVE", "ARCHIVED", "ANY", ""}, false), + Description: `Match to live and/or archived objects. Unversioned buckets have only live objects. Supported values include: "LIVE", "ARCHIVED", "ANY".`, + }, + "matches_storage_class": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Storage Class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, DURABLE_REDUCED_AVAILABILITY.`, + }, + "num_newer_versions": { + Type: schema.TypeInt, + Optional: true, + Description: `Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition.`, + }, + "matches_prefix": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `One or more matching name prefixes to satisfy this condition.`, + }, + "matches_suffix": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `One or more matching name suffixes to satisfy this condition.`, + }, + "send_days_since_noncurrent_time_if_zero": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, days_since_noncurrent_time value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the days_since_noncurrent_time field. It can be used alone or together with days_since_noncurrent_time.`, + }, + "send_days_since_custom_time_if_zero": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, days_since_custom_time value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the days_since_custom_time field. It can be used alone or together with days_since_custom_time.`, + }, + "send_num_newer_versions_if_zero": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, num_newer_versions value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the num_newer_versions field. It can be used alone or together with num_newer_versions.`, + }, + }, + }, + Description: `The Lifecycle Rule's condition configuration.`, + }, + }, + }, + Description: `The bucket's Lifecycle Rules configuration.`, + }, + + "enable_object_retention": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Enables each object in the bucket to have its own retention policy, which prevents deletion until stored for a specific length of time.`, + }, + + "versioning": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `While set to true, versioning is fully enabled for this bucket.`, + }, + }, + }, + Description: `The bucket's Versioning configuration.`, + }, + + "autoclass": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `While set to true, autoclass automatically transitions objects in your bucket to appropriate storage classes based on each object's access pattern.`, + }, + "terminal_storage_class": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The storage class that objects in the bucket eventually transition to if they are not read for a certain length of time. Supported values include: NEARLINE, ARCHIVE.`, + }, + }, + }, + Description: `The bucket's autoclass configuration.`, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + _, n := d.GetChange(strings.TrimSuffix(k, ".#")) + if !strings.HasSuffix(k, ".#") { + return false + } + var l []interface{} + if new == "1" && old == "0" { + l = n.([]interface{}) + contents, ok := l[0].(map[string]interface{}) + if !ok { + return false + } + if contents["enabled"] == false { + return true + } + } + if new == "0" && old == "1" { + n := d.Get(strings.TrimSuffix(k, ".#")) + l = n.([]interface{}) + contents := l[0].(map[string]interface{}) + if contents["enabled"] == false { + return true + } + } + return false + }, + }, + "website": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_page_suffix": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"website.0.not_found_page", "website.0.main_page_suffix"}, + Description: `Behaves as the bucket's directory index where missing objects are treated as potential directories.`, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return old != "" && new == "" + }, + }, + "not_found_page": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"website.0.main_page_suffix", "website.0.not_found_page"}, + Description: `The custom object to return when a requested resource is not found.`, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return old != "" && new == "" + }, + }, + }, + }, + Description: `Configuration if the bucket acts as a website.`, + }, + + "retention_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "is_locked": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If set to true, the bucket will be locked and permanently restrict edits to the bucket's retention policy. Caution: Locking a bucket is an irreversible action.`, + }, + "retention_period": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, math.MaxInt32), + Description: `The period of time, in seconds, that objects in the bucket must be retained and cannot be deleted, overwritten, or archived. The value must be less than 3,155,760,000 seconds.`, + }, + }, + }, + Description: `Configuration of the bucket's data retention policy for how long objects in the bucket should be retained.`, + }, + + "cors": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "origin": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `The list of Origins eligible to receive CORS response headers. Note: "*" is permitted in the list of origins, and means "any Origin".`, + }, + "method": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means "any method".`, + }, + "response_header": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.`, + }, + "max_age_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.`, + }, + }, + }, + Description: `The bucket's Cross-Origin Resource Sharing (CORS) configuration.`, + }, + + "default_event_based_hold": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not to automatically apply an eventBasedHold to new objects added to the bucket.`, + }, + + "logging": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_bucket": { + Type: schema.TypeString, + Required: true, + Description: `The bucket that will receive log objects.`, + }, + "log_object_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The object prefix for log objects. If it's not provided, by default Google Cloud Storage sets this to this bucket's name.`, + }, + }, + }, + Description: `The bucket's Access & Storage Logs configuration.`, + }, + "uniform_bucket_level_access": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Enables uniform bucket-level access on a bucket.`, + }, + "custom_placement_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_locations": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + MaxItems: 2, + MinItems: 2, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(s interface{}) string { + return strings.ToUpper(s.(string)) + }, + }, + Description: `The list of individual regions that comprise a dual-region bucket. See the docs for a list of acceptable regions. Note: If any of the data_locations changes, it will recreate the bucket.`, + }, + }, + }, + Description: `The bucket's custom location configuration, which specifies the individual regions that comprise a dual-region bucket. If the bucket is designated a single or multi-region, the parameters are empty.`, + }, + "rpo": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Specifies the RPO setting of bucket. If set 'ASYNC_TURBO', The Turbo Replication will be enabled for the dual-region bucket. Value 'DEFAULT' will set RPO setting to default. Turbo Replication is only for buckets in dual-regions.See the docs for more details.`, + }, + "public_access_prevention": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Prevents public access to a bucket.`, + }, + "soft_delete_policy": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `The bucket's soft delete policy, which defines the period of time that soft-deleted objects will be retained, and cannot be permanently deleted. If it is not provided, by default Google Cloud Storage sets this to default soft delete policy`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retention_duration_seconds": { + Type: schema.TypeInt, + Default: 604800, + Optional: true, + Description: `The duration in seconds that soft-deleted objects in the bucket will be retained and cannot be permanently deleted. Default value is 604800.`, + }, + "effective_time": { + Type: schema.TypeString, + Computed: true, + Description: `Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceStorageBucketStateUpgradeV1(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + if rawState["lifecycle_rule"] != nil { + rawRules := rawState["lifecycle_rule"].([]interface{}) + for i, r := range rawRules { + newRule := r.(map[string]interface{}) + if newRule["condition"] != nil { + newCondition := newRule["condition"].([]interface{})[0].(map[string]interface{}) + newCondition["send_age_if_zero"] = true + newRule["condition"].([]interface{})[0] = newCondition + } + rawState["lifecycle_rule"].([]interface{})[i] = newRule + } + } + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} + +func resourceStorageBucketV2() *schema.Resource { + return &schema.Resource{ + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceStorageBucketV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceStorageBucketStateUpgradeV0, + Version: 0, + }, + { + Type: resourceStorageBucketV1().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceStorageBucketStateUpgradeV1, + Version: 1, + }, + }, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the bucket.`, + ValidateFunc: verify.ValidateGCSName, + }, + + "encryption": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified. You must pay attention to whether the crypto key is available in the location that this bucket is created in. See the docs for more details.`, + }, + }, + }, + Description: `The bucket's encryption configuration.`, + }, + + "requester_pays": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables Requester Pays on a storage bucket.`, + }, + + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `When deleting a bucket, this boolean option will delete all contained objects. If you try to delete a bucket that contains objects, Terraform will fail that run.`, + }, + + "labels": { + Type: schema.TypeMap, + ValidateFunc: labelKeyValidator, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A set of key/value label pairs to assign to the bucket.`, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(s interface{}) string { + return strings.ToUpper(s.(string)) + }, + Description: `The Google Cloud Storage location`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "project_number": { + Type: schema.TypeInt, + Computed: true, + Description: `The project number of the project in which the resource belongs.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + + "url": { + Type: schema.TypeString, + Computed: true, + Description: `The base URL of the bucket, in the format gs://.`, + }, + + "storage_class": { + Type: schema.TypeString, + Optional: true, + Default: "STANDARD", + Description: `The Storage Class of the new bucket. Supported values include: STANDARD, MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE.`, + }, + + "lifecycle_rule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 100, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 1, + Set: resourceGCSBucketLifecycleRuleActionHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + Description: `The type of the action of this Lifecycle Rule. Supported values include: Delete, SetStorageClass and AbortIncompleteMultipartUpload.`, + }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + Description: `The target Storage Class of objects affected by this Lifecycle Rule. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE.`, + }, + }, + }, + Description: `The Lifecycle Rule's action configuration. A single block of this type is supported.`, + }, + "condition": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 1, + Set: resourceGCSBucketLifecycleRuleConditionHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "age": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum age of an object in days to satisfy this condition.`, + }, + "created_before": { + Type: schema.TypeString, + Optional: true, + Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, + }, + "custom_time_before": { + Type: schema.TypeString, + Optional: true, + Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, + }, + "days_since_custom_time": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of days elapsed since the user-specified timestamp set on an object.`, + }, + "days_since_noncurrent_time": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of days elapsed since the noncurrent timestamp of an object. This + condition is relevant only for versioned objects.`, + }, + "noncurrent_time_before": { + Type: schema.TypeString, + Optional: true, + Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, + }, + "no_age": { + Type: schema.TypeBool, + Deprecated: "`no_age` is deprecated and will be removed in a future major release. Use `send_age_if_zero` instead.", + Optional: true, + Description: `While set true, age value will be omitted.Required to set true when age is unset in the config file.`, + }, + "with_state": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"LIVE", "ARCHIVED", "ANY", ""}, false), + Description: `Match to live and/or archived objects. Unversioned buckets have only live objects. Supported values include: "LIVE", "ARCHIVED", "ANY".`, + }, + "matches_storage_class": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Storage Class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, DURABLE_REDUCED_AVAILABILITY.`, + }, + "num_newer_versions": { + Type: schema.TypeInt, + Optional: true, + Description: `Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition.`, + }, + "matches_prefix": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `One or more matching name prefixes to satisfy this condition.`, + }, + "matches_suffix": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `One or more matching name suffixes to satisfy this condition.`, + }, + "send_age_if_zero": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `While set true, age value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the age field. It can be used alone or together with age.`, + }, + "send_days_since_noncurrent_time_if_zero": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, days_since_noncurrent_time value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the days_since_noncurrent_time field. It can be used alone or together with days_since_noncurrent_time.`, + }, + "send_days_since_custom_time_if_zero": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, days_since_custom_time value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the days_since_custom_time field. It can be used alone or together with days_since_custom_time.`, + }, + "send_num_newer_versions_if_zero": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, num_newer_versions value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the num_newer_versions field. It can be used alone or together with num_newer_versions.`, + }, + }, + }, + Description: `The Lifecycle Rule's condition configuration.`, + }, + }, + }, + Description: `The bucket's Lifecycle Rules configuration.`, + }, + + "enable_object_retention": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Enables each object in the bucket to have its own retention policy, which prevents deletion until stored for a specific length of time.`, + }, + + "versioning": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `While set to true, versioning is fully enabled for this bucket.`, + }, + }, + }, + Description: `The bucket's Versioning configuration.`, + }, + + "autoclass": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `While set to true, autoclass automatically transitions objects in your bucket to appropriate storage classes based on each object's access pattern.`, + }, + "terminal_storage_class": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The storage class that objects in the bucket eventually transition to if they are not read for a certain length of time. Supported values include: NEARLINE, ARCHIVE.`, + }, + }, + }, + Description: `The bucket's autoclass configuration.`, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + _, n := d.GetChange(strings.TrimSuffix(k, ".#")) + if !strings.HasSuffix(k, ".#") { + return false + } + var l []interface{} + if new == "1" && old == "0" { + l = n.([]interface{}) + contents, ok := l[0].(map[string]interface{}) + if !ok { + return false + } + if contents["enabled"] == false { + return true + } + } + if new == "0" && old == "1" { + n := d.Get(strings.TrimSuffix(k, ".#")) + l = n.([]interface{}) + contents := l[0].(map[string]interface{}) + if contents["enabled"] == false { + return true + } + } + return false + }, + }, + "website": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_page_suffix": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"website.0.not_found_page", "website.0.main_page_suffix"}, + Description: `Behaves as the bucket's directory index where missing objects are treated as potential directories.`, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return old != "" && new == "" + }, + }, + "not_found_page": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"website.0.main_page_suffix", "website.0.not_found_page"}, + Description: `The custom object to return when a requested resource is not found.`, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return old != "" && new == "" + }, + }, + }, + }, + Description: `Configuration if the bucket acts as a website.`, + }, + + "retention_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "is_locked": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If set to true, the bucket will be locked and permanently restrict edits to the bucket's retention policy. Caution: Locking a bucket is an irreversible action.`, + }, + "retention_period": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, math.MaxInt32), + Description: `The period of time, in seconds, that objects in the bucket must be retained and cannot be deleted, overwritten, or archived. The value must be less than 3,155,760,000 seconds.`, + }, + }, + }, + Description: `Configuration of the bucket's data retention policy for how long objects in the bucket should be retained.`, + }, + + "cors": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "origin": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `The list of Origins eligible to receive CORS response headers. Note: "*" is permitted in the list of origins, and means "any Origin".`, + }, + "method": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means "any method".`, + }, + "response_header": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.`, + }, + "max_age_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.`, + }, + }, + }, + Description: `The bucket's Cross-Origin Resource Sharing (CORS) configuration.`, + }, + + "default_event_based_hold": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not to automatically apply an eventBasedHold to new objects added to the bucket.`, + }, + + "logging": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_bucket": { + Type: schema.TypeString, + Required: true, + Description: `The bucket that will receive log objects.`, + }, + "log_object_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The object prefix for log objects. If it's not provided, by default Google Cloud Storage sets this to this bucket's name.`, + }, + }, + }, + Description: `The bucket's Access & Storage Logs configuration.`, + }, + "uniform_bucket_level_access": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Enables uniform bucket-level access on a bucket.`, + }, + "custom_placement_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_locations": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + MaxItems: 2, + MinItems: 2, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(s interface{}) string { + return strings.ToUpper(s.(string)) + }, + }, + Description: `The list of individual regions that comprise a dual-region bucket. See the docs for a list of acceptable regions. Note: If any of the data_locations changes, it will recreate the bucket.`, + }, + }, + }, + Description: `The bucket's custom location configuration, which specifies the individual regions that comprise a dual-region bucket. If the bucket is designated a single or multi-region, the parameters are empty.`, + }, + "rpo": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Specifies the RPO setting of bucket. If set 'ASYNC_TURBO', The Turbo Replication will be enabled for the dual-region bucket. Value 'DEFAULT' will set RPO setting to default. Turbo Replication is only for buckets in dual-regions.See the docs for more details.`, + }, + "public_access_prevention": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Prevents public access to a bucket.`, + }, + "soft_delete_policy": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `The bucket's soft delete policy, which defines the period of time that soft-deleted objects will be retained, and cannot be permanently deleted. If it is not provided, by default Google Cloud Storage sets this to default soft delete policy`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retention_duration_seconds": { + Type: schema.TypeInt, + Default: 604800, + Optional: true, + Description: `The duration in seconds that soft-deleted objects in the bucket will be retained and cannot be permanently deleted. Default value is 604800.`, + }, + "effective_time": { + Type: schema.TypeString, + Computed: true, + Description: `Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceStorageBucketStateUpgradeV2(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + if rawState["lifecycle_rule"] != nil { + rawRules := rawState["lifecycle_rule"].([]interface{}) + for i, r := range rawRules { + newRule := r.(map[string]interface{}) + if newRule["condition"] != nil { + newCondition := newRule["condition"].([]interface{})[0].(map[string]interface{}) + newCondition["send_age_if_zero"] = false + newRule["condition"].([]interface{})[0] = newCondition + } + rawState["lifecycle_rule"].([]interface{})[i] = newRule + } + } + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_object.go index de943829d13..06812ce8a62 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_object.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_object.go @@ -97,6 +97,12 @@ func ResourceStorageBucketObject() *schema.Resource { Description: `Data as string to be uploaded. Must be defined if source is not. Note: The content field is marked as sensitive. To view the raw contents of the object, please define an output.`, }, + "generation": { + Type: schema.TypeInt, + Computed: true, + Description: `The content generation of this object. Used for object versioning and soft delete.`, + }, + "crc32c": { Type: schema.TypeString, Computed: true, @@ -456,6 +462,9 @@ func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) e if err := d.Set("detect_md5hash", res.Md5Hash); err != nil { return fmt.Errorf("Error setting detect_md5hash: %s", err) } + if err := d.Set("generation", res.Generation); err != nil { + return fmt.Errorf("Error setting generation: %s", err) + } if err := d.Set("crc32c", res.Crc32c); err != nil { return fmt.Errorf("Error setting crc32c: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_managed_folder.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_managed_folder.go index 86fce4baf82..0d7bb084550 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_managed_folder.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_managed_folder.go @@ -35,6 +35,7 @@ func ResourceStorageManagedFolder() *schema.Resource { return &schema.Resource{ Create: resourceStorageManagedFolderCreate, Read: resourceStorageManagedFolderRead, + Update: resourceStorageManagedFolderUpdate, Delete: resourceStorageManagedFolderDelete, Importer: &schema.ResourceImporter{ @@ -43,6 +44,7 @@ func ResourceStorageManagedFolder() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), }, @@ -77,6 +79,16 @@ trailing '/'. For example, 'example_dir/example_dir2/'.`, Computed: true, Description: `The timestamp at which this managed folder was most recently updated.`, }, + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Description: `Allows the deletion of a managed folder even if contains +objects. If a non-empty managed folder is deleted, any objects +within the folder will remain in a simulated folder with the +same name.`, + Default: false, + }, + "self_link": { Type: schema.TypeString, Computed: true, @@ -179,6 +191,13 @@ func resourceStorageManagedFolderRead(d *schema.ResourceData, meta interface{}) return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("StorageManagedFolder %q", d.Id())) } + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("force_destroy"); !ok { + if err := d.Set("force_destroy", false); err != nil { + return fmt.Errorf("Error setting force_destroy: %s", err) + } + } + if err := d.Set("create_time", flattenStorageManagedFolderCreateTime(res["createTime"], d, config)); err != nil { return fmt.Errorf("Error reading ManagedFolder: %s", err) } @@ -201,6 +220,21 @@ func resourceStorageManagedFolderRead(d *schema.ResourceData, meta interface{}) return nil } +func resourceStorageManagedFolderUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + _ = config + + // we can only get here if force_destroy was updated + if d.Get("force_destroy") != nil { + if err := d.Set("force_destroy", d.Get("force_destroy")); err != nil { + return fmt.Errorf("Error updating force_destroy: %s", err) + } + } + + // all other fields are immutable, don't do anything else + return nil +} + func resourceStorageManagedFolderDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) @@ -210,7 +244,7 @@ func resourceStorageManagedFolderDelete(d *schema.ResourceData, meta interface{} billingProject := "" - url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/managedFolders/{{%name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/managedFolders/{{%name}}?allowNonEmpty={{force_destroy}}") if err != nil { return err } @@ -259,6 +293,11 @@ func resourceStorageManagedFolderImport(d *schema.ResourceData, meta interface{} } d.SetId(id) + // Explicitly set virtual fields to default values on import + if err := d.Set("force_destroy", false); err != nil { + return nil, fmt.Errorf("Error setting force_destroy: %s", err) + } + return []*schema.ResourceData{d}, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_location_tag_bindings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_location_tag_bindings.go index 66f18576ad2..9995b8e1734 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_location_tag_bindings.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_location_tag_bindings.go @@ -9,8 +9,10 @@ import ( "strings" "time" + "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/googleapi" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -182,7 +184,7 @@ func resourceTagsLocationTagBindingRead(d *schema.ResourceData, meta interface{} UserAgent: userAgent, }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) + return transport_tpg.HandleNotFoundError(transformTagsLocationTagBindingReadError(err), d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) } log.Printf("[DEBUG] Skipping res with name for import = %#v,)", res) @@ -197,7 +199,7 @@ func resourceTagsLocationTagBindingRead(d *schema.ResourceData, meta interface{} for pageToken != "" { url, err = transport_tpg.AddQueryParams(url, map[string]string{"pageToken": fmt.Sprintf("%s", res["nextPageToken"])}) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) + return transport_tpg.HandleNotFoundError(transformTagsLocationTagBindingReadError(err), d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) } resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, @@ -388,3 +390,27 @@ func resourceTagsLocationTagBindingFindNestedObjectInList(d *schema.ResourceData } return -1, nil, nil } + +func transformTagsLocationTagBindingReadError(err error) error { + if gErr, ok := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error); ok && gErr.Code == 403 { + for _, detail := range gErr.Details { + if detailMap, ok := detail.(map[string]interface{}); ok { + if detailType, ok := detailMap["@type"].(string); ok && detailType == "type.googleapis.com/google.rpc.ResourceInfo" { + if description, ok := detailMap["description"].(string); ok && strings.Contains(description, "(or the resource may not exist in this location)") { + // This error occurs when either the tag binding parent does not exist, or permission is denied. It is + // deliberately ambiguous so that existence information is not revealed to the caller. However, for + // the Read function, we can only assume that the membership does not exist, and proceed with attempting + // other operations. Since HandleNotFoundError(...) expects an error code of 404 when a resource does not + // exist, to get the desired behavior, we modify the error code to be 404. + gErr.Code = 404 + } + } + } + } + + log.Printf("[DEBUG] Transformed TagsLocationTagBinding error") + return gErr + } + + return err +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_key.go index c82deb40282..46051664406 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_key.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_key.go @@ -62,10 +62,10 @@ func ResourceTagsTagKey() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 63), + ValidateFunc: validation.StringLenBetween(1, 256), Description: `Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace. -The short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between.`, +The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\\), and forward slashes (/).`, }, "description": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_value.go index f9d40f2aa5e..40decd9f454 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_value.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_value.go @@ -60,10 +60,10 @@ func ResourceTagsTagValue() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 63), + ValidateFunc: validation.StringLenBetween(1, 256), Description: `Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. -The short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between.`, +The short name can have a maximum length of 256 characters. The permitted character set for the shortName includes all UTF-8 encoded Unicode characters except single quotes ('), double quotes ("), backslashes (\\), and forward slashes (/).`, }, "description": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job.go new file mode 100644 index 00000000000..9fe9319568a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job.go @@ -0,0 +1,2976 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package transcoder + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceTranscoderJob() *schema.Resource { + return &schema.Resource{ + Create: resourceTranscoderJobCreate, + Read: resourceTranscoderJobRead, + Update: resourceTranscoderJobUpdate, + Delete: resourceTranscoderJobDelete, + + Importer: &schema.ResourceImporter{ + State: resourceTranscoderJobImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the transcoding job resource.`, + }, + "config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The configuration for this template.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ad_breaks": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Ad break.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_time_offset": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Start time in seconds for the ad break, relative to the output file timeline`, + }, + }, + }, + }, + "edit_list": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of input assets stored in Cloud Storage.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "inputs": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of values identifying files that should be used in this atom.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "key": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A unique key for this atom.`, + }, + "start_time_offset": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Start time in seconds for the atom, relative to the input file timeline. The default is '0s'.`, + }, + }, + }, + }, + "elementary_streams": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of input assets stored in Cloud Storage.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audio_stream": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Encoding of an audio stream.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bitrate_bps": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Audio bitrate in bits per second.`, + }, + "channel_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Number of audio channels. The default is '2'.`, + }, + "channel_layout": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A list of channel names specifying layout of the audio channels. The default is ["fl", "fr"].`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "codec": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The codec for this audio stream. The default is 'aac'.`, + }, + "sample_rate_hertz": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The audio sample rate in Hertz. The default is '48000'.`, + }, + }, + }, + }, + "key": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A unique key for this atom.`, + }, + "video_stream": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Encoding of a video stream.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "h264": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `H264 codec settings`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bitrate_bps": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The video bitrate in bits per second.`, + }, + "frame_rate": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The target video frame rate in frames per second (FPS).`, + }, + "crf_level": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Target CRF level. The default is '21'.`, + }, + "entropy_coder": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The entropy coder to use. The default is 'cabac'.`, + }, + "gop_duration": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Select the GOP size based on the specified duration. The default is '3s'.`, + }, + "height_pixels": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The height of the video in pixels.`, + }, + "hlg": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HLG color format setting for H264.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "pixel_format": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Pixel format to use. The default is 'yuv420p'.`, + }, + "preset": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Enforces the specified codec preset. The default is 'veryfast'.`, + }, + "profile": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Enforces the specified codec profile.`, + }, + "rate_control_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Specify the mode. The default is 'vbr'.`, + }, + "sdr": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `SDR color format setting for H264.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "vbv_fullness_bits": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Initial fullness of the Video Buffering Verifier (VBV) buffer in bits.`, + }, + "vbv_size_bits": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Size of the Video Buffering Verifier (VBV) buffer in bits.`, + }, + "width_pixels": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The width of the video in pixels.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "encryptions": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of encryption configurations for the content.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Identifier for this set of encryption options.`, + }, + "aes128": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Configuration for AES-128 encryption.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "drm_systems": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `DRM system(s) to use; at least one must be specified. If a DRM system is omitted, it is considered disabled.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "clearkey": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Clearkey configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "fairplay": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Fairplay configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "playready": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Playready configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "widevine": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Widevine configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + }, + }, + }, + "mpeg_cenc": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Configuration for MPEG Common Encryption (MPEG-CENC).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scheme": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Specify the encryption scheme.`, + }, + }, + }, + }, + "sample_aes": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Configuration for SAMPLE-AES encryption.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "secret_manager_key_source": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Configuration for secrets stored in Google Secret Manager.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the Secret Version containing the encryption key in the following format: projects/{project}/secrets/{secret_id}/versions/{version_number}.`, + }, + }, + }, + }, + }, + }, + }, + "inputs": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of input assets stored in Cloud Storage.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A unique key for this input. Must be specified when using advanced mapping and edit lists.`, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `URI of the media. Input files must be at least 5 seconds in duration and stored in Cloud Storage (for example, gs://bucket/inputs/file.mp4). +If empty, the value is populated from Job.input_uri.`, + }, + }, + }, + }, + "manifests": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Manifest configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"MANIFEST_TYPE_UNSPECIFIED", "HLS", "DASH"}), + Description: `Type of the manifest. Possible values: ["MANIFEST_TYPE_UNSPECIFIED", "HLS", "DASH"]`, + }, + "file_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The name of the generated file. The default is 'manifest'.`, + }, + "mux_streams": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of user supplied MuxStream.key values that should appear in this manifest.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "mux_streams": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Multiplexing settings for output stream.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The container format. The default is 'mp4'.`, + }, + "elementary_streams": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of ElementaryStream.key values multiplexed in this stream.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "encryption_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Identifier of the encryption configuration to use.`, + }, + "file_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The name of the generated file.`, + }, + "key": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A unique key for this multiplexed stream.`, + }, + "segment_settings": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Segment settings for ts, fmp4 and vtt.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "segment_duration": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Duration of the segments in seconds. The default is '6.0s'.`, + }, + }, + }, + }, + }, + }, + }, + "output": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Location of output file(s) in a Cloud Storage bucket.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `URI for the output file(s). For example, gs://my-bucket/outputs/.`, + }, + }, + }, + }, + "overlays": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of overlays on the output video, in descending Z-order.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "animations": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of animations. The list should be chronological, without any time overlap.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "animation_fade": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Display overlay object with fade animation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fade_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"FADE_TYPE_UNSPECIFIED", "FADE_IN", "FADE_OUT"}), + Description: `Required. Type of fade animation: 'FADE_IN' or 'FADE_OUT'. +The possible values are: + +* 'FADE_TYPE_UNSPECIFIED': The fade type is not specified. + +* 'FADE_IN': Fade the overlay object into view. + +* 'FADE_OUT': Fade the overlay object out of view. Possible values: ["FADE_TYPE_UNSPECIFIED", "FADE_IN", "FADE_OUT"]`, + }, + "end_time_offset": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The time to end the fade animation, in seconds.`, + }, + "start_time_offset": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The time to start the fade animation, in seconds.`, + }, + "xy": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Normalized coordinates based on output video resolution.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "x": { + Type: schema.TypeFloat, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Normalized x coordinate.`, + }, + "y": { + Type: schema.TypeFloat, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Normalized y coordinate.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "image": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Image overlay.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `URI of the image in Cloud Storage. For example, gs://bucket/inputs/image.png.`, + }, + }, + }, + }, + }, + }, + }, + "pubsub_destination": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Pub/Sub destination.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the Pub/Sub topic to publish job completion notification to. For example: projects/{project}/topics/{topic}.`, + }, + }, + }, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels associated with this job. You can use these to organize and group your jobs. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "template_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Specify the templateId to use for populating Job.config. +The default is preset/web-hd, which is the only supported preset.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time the job was created.`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "end_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time the transcoding finished.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the job.`, + }, + "start_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time the transcoding started.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the job.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceTranscoderJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + templateIdProp, err := expandTranscoderJobTemplateId(d.Get("template_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("template_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(templateIdProp)) && (ok || !reflect.DeepEqual(v, templateIdProp)) { + obj["templateId"] = templateIdProp + } + configProp, err := expandTranscoderJobConfig(d.Get("config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("config"); !tpgresource.IsEmptyValue(reflect.ValueOf(configProp)) && (ok || !reflect.DeepEqual(v, configProp)) { + obj["config"] = configProp + } + labelsProp, err := expandTranscoderJobEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TranscoderBasePath}}projects/{{project}}/locations/{{location}}/jobs") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Job: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Job: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Job: %s", err) + } + if err := d.Set("name", flattenTranscoderJobName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Job %q: %#v", d.Id(), res) + + return resourceTranscoderJobRead(d, meta) +} + +func resourceTranscoderJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TranscoderBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Job: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TranscoderJob %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + + if err := d.Set("name", flattenTranscoderJobName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("create_time", flattenTranscoderJobCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("start_time", flattenTranscoderJobStartTime(res["startTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("end_time", flattenTranscoderJobEndTime(res["endTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("state", flattenTranscoderJobState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("labels", flattenTranscoderJobLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("config", flattenTranscoderJobConfig(res["config"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("terraform_labels", flattenTranscoderJobTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("effective_labels", flattenTranscoderJobEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + + return nil +} + +func resourceTranscoderJobUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the root field "labels" and "terraform_labels" are mutable + return resourceTranscoderJobRead(d, meta) +} + +func resourceTranscoderJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Job: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{TranscoderBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting Job %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Job") + } + + log.Printf("[DEBUG] Finished deleting Job %q: %#v", d.Id(), res) + return nil +} + +func resourceTranscoderJobImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + return nil, err + } + + return []*schema.ResourceData{d}, nil +} + +func flattenTranscoderJobName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenTranscoderJobConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["inputs"] = + flattenTranscoderJobConfigInputs(original["inputs"], d, config) + transformed["edit_list"] = + flattenTranscoderJobConfigEditList(original["editList"], d, config) + transformed["elementary_streams"] = + flattenTranscoderJobConfigElementaryStreams(original["elementaryStreams"], d, config) + transformed["mux_streams"] = + flattenTranscoderJobConfigMuxStreams(original["muxStreams"], d, config) + transformed["manifests"] = + flattenTranscoderJobConfigManifests(original["manifests"], d, config) + transformed["output"] = + flattenTranscoderJobConfigOutput(original["output"], d, config) + transformed["ad_breaks"] = + flattenTranscoderJobConfigAdBreaks(original["adBreaks"], d, config) + transformed["pubsub_destination"] = + flattenTranscoderJobConfigPubsubDestination(original["pubsubDestination"], d, config) + transformed["overlays"] = + flattenTranscoderJobConfigOverlays(original["overlays"], d, config) + transformed["encryptions"] = + flattenTranscoderJobConfigEncryptions(original["encryptions"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigInputs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "key": flattenTranscoderJobConfigInputsKey(original["key"], d, config), + "uri": flattenTranscoderJobConfigInputsUri(original["uri"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobConfigInputsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigInputsUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigEditList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "key": flattenTranscoderJobConfigEditListKey(original["key"], d, config), + "inputs": flattenTranscoderJobConfigEditListInputs(original["inputs"], d, config), + "start_time_offset": flattenTranscoderJobConfigEditListStartTimeOffset(original["startTimeOffset"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobConfigEditListKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigEditListInputs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigEditListStartTimeOffset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigElementaryStreams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "key": flattenTranscoderJobConfigElementaryStreamsKey(original["key"], d, config), + "video_stream": flattenTranscoderJobConfigElementaryStreamsVideoStream(original["videoStream"], d, config), + "audio_stream": flattenTranscoderJobConfigElementaryStreamsAudioStream(original["audioStream"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobConfigElementaryStreamsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStream(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["h264"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264(original["h264"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["width_pixels"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264WidthPixels(original["widthPixels"], d, config) + transformed["height_pixels"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264HeightPixels(original["heightPixels"], d, config) + transformed["frame_rate"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264FrameRate(original["frameRate"], d, config) + transformed["bitrate_bps"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264BitrateBps(original["bitrateBps"], d, config) + transformed["pixel_format"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264PixelFormat(original["pixelFormat"], d, config) + transformed["rate_control_mode"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264RateControlMode(original["rateControlMode"], d, config) + transformed["crf_level"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264CrfLevel(original["crfLevel"], d, config) + transformed["vbv_size_bits"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264VbvSizeBits(original["vbvSizeBits"], d, config) + transformed["vbv_fullness_bits"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264VbvFullnessBits(original["vbvFullnessBits"], d, config) + transformed["entropy_coder"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264EntropyCoder(original["entropyCoder"], d, config) + transformed["profile"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264Profile(original["profile"], d, config) + transformed["preset"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264Preset(original["preset"], d, config) + transformed["gop_duration"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264GopDuration(original["gopDuration"], d, config) + transformed["sdr"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264Sdr(original["sdr"], d, config) + transformed["hlg"] = + flattenTranscoderJobConfigElementaryStreamsVideoStreamH264Hlg(original["hlg"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264WidthPixels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264HeightPixels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264FrameRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264BitrateBps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264PixelFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264RateControlMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264CrfLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264VbvSizeBits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264VbvFullnessBits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264EntropyCoder(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264Profile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264Preset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264GopDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264Sdr(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobConfigElementaryStreamsVideoStreamH264Hlg(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobConfigElementaryStreamsAudioStream(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["codec"] = + flattenTranscoderJobConfigElementaryStreamsAudioStreamCodec(original["codec"], d, config) + transformed["bitrate_bps"] = + flattenTranscoderJobConfigElementaryStreamsAudioStreamBitrateBps(original["bitrateBps"], d, config) + transformed["channel_count"] = + flattenTranscoderJobConfigElementaryStreamsAudioStreamChannelCount(original["channelCount"], d, config) + transformed["channel_layout"] = + flattenTranscoderJobConfigElementaryStreamsAudioStreamChannelLayout(original["channelLayout"], d, config) + transformed["sample_rate_hertz"] = + flattenTranscoderJobConfigElementaryStreamsAudioStreamSampleRateHertz(original["sampleRateHertz"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigElementaryStreamsAudioStreamCodec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigElementaryStreamsAudioStreamBitrateBps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobConfigElementaryStreamsAudioStreamChannelCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobConfigElementaryStreamsAudioStreamChannelLayout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigElementaryStreamsAudioStreamSampleRateHertz(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobConfigMuxStreams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "key": flattenTranscoderJobConfigMuxStreamsKey(original["key"], d, config), + "file_name": flattenTranscoderJobConfigMuxStreamsFileName(original["fileName"], d, config), + "container": flattenTranscoderJobConfigMuxStreamsContainer(original["container"], d, config), + "elementary_streams": flattenTranscoderJobConfigMuxStreamsElementaryStreams(original["elementaryStreams"], d, config), + "segment_settings": flattenTranscoderJobConfigMuxStreamsSegmentSettings(original["segmentSettings"], d, config), + "encryption_id": flattenTranscoderJobConfigMuxStreamsEncryptionId(original["encryptionId"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobConfigMuxStreamsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigMuxStreamsFileName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigMuxStreamsContainer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigMuxStreamsElementaryStreams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigMuxStreamsSegmentSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["segment_duration"] = + flattenTranscoderJobConfigMuxStreamsSegmentSettingsSegmentDuration(original["segmentDuration"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigMuxStreamsSegmentSettingsSegmentDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigMuxStreamsEncryptionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigManifests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "file_name": flattenTranscoderJobConfigManifestsFileName(original["fileName"], d, config), + "type": flattenTranscoderJobConfigManifestsType(original["type"], d, config), + "mux_streams": flattenTranscoderJobConfigManifestsMuxStreams(original["muxStreams"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobConfigManifestsFileName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigManifestsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigManifestsMuxStreams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigOutput(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenTranscoderJobConfigOutputUri(original["uri"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigOutputUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigAdBreaks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "start_time_offset": flattenTranscoderJobConfigAdBreaksStartTimeOffset(original["startTimeOffset"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobConfigAdBreaksStartTimeOffset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigPubsubDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["topic"] = + flattenTranscoderJobConfigPubsubDestinationTopic(original["topic"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigPubsubDestinationTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigOverlays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "image": flattenTranscoderJobConfigOverlaysImage(original["image"], d, config), + "animations": flattenTranscoderJobConfigOverlaysAnimations(original["animations"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobConfigOverlaysImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenTranscoderJobConfigOverlaysImageUri(original["uri"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigOverlaysImageUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigOverlaysAnimations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "animation_fade": flattenTranscoderJobConfigOverlaysAnimationsAnimationFade(original["animationFade"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobConfigOverlaysAnimationsAnimationFade(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["xy"] = + flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeXy(original["xy"], d, config) + transformed["start_time_offset"] = + flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeStartTimeOffset(original["startTimeOffset"], d, config) + transformed["end_time_offset"] = + flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeEndTimeOffset(original["endTimeOffset"], d, config) + transformed["fade_type"] = + flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeFadeType(original["fadeType"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeXy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["x"] = + flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeXyX(original["x"], d, config) + transformed["y"] = + flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeXyY(original["y"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeXyX(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeXyY(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeStartTimeOffset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeEndTimeOffset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigOverlaysAnimationsAnimationFadeFadeType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigEncryptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenTranscoderJobConfigEncryptionsId(original["id"], d, config), + "drm_systems": flattenTranscoderJobConfigEncryptionsDrmSystems(original["drmSystems"], d, config), + "aes128": flattenTranscoderJobConfigEncryptionsAes128(original["aes128"], d, config), + "sample_aes": flattenTranscoderJobConfigEncryptionsSampleAes(original["sampleAes"], d, config), + "mpeg_cenc": flattenTranscoderJobConfigEncryptionsMpegCenc(original["mpegCenc"], d, config), + "secret_manager_key_source": flattenTranscoderJobConfigEncryptionsSecretManagerKeySource(original["secretManagerKeySource"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobConfigEncryptionsId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigEncryptionsDrmSystems(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["widevine"] = + flattenTranscoderJobConfigEncryptionsDrmSystemsWidevine(original["widevine"], d, config) + transformed["fairplay"] = + flattenTranscoderJobConfigEncryptionsDrmSystemsFairplay(original["fairplay"], d, config) + transformed["playready"] = + flattenTranscoderJobConfigEncryptionsDrmSystemsPlayready(original["playready"], d, config) + transformed["clearkey"] = + flattenTranscoderJobConfigEncryptionsDrmSystemsClearkey(original["clearkey"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigEncryptionsDrmSystemsWidevine(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobConfigEncryptionsDrmSystemsFairplay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobConfigEncryptionsDrmSystemsPlayready(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobConfigEncryptionsDrmSystemsClearkey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobConfigEncryptionsAes128(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobConfigEncryptionsSampleAes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobConfigEncryptionsMpegCenc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["scheme"] = + flattenTranscoderJobConfigEncryptionsMpegCencScheme(original["scheme"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigEncryptionsMpegCencScheme(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobConfigEncryptionsSecretManagerKeySource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret_version"] = + flattenTranscoderJobConfigEncryptionsSecretManagerKeySourceSecretVersion(original["secretVersion"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobConfigEncryptionsSecretManagerKeySourceSecretVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenTranscoderJobEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandTranscoderJobTemplateId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInputs, err := expandTranscoderJobConfigInputs(original["inputs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInputs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inputs"] = transformedInputs + } + + transformedEditList, err := expandTranscoderJobConfigEditList(original["edit_list"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEditList); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["editList"] = transformedEditList + } + + transformedElementaryStreams, err := expandTranscoderJobConfigElementaryStreams(original["elementary_streams"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedElementaryStreams); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["elementaryStreams"] = transformedElementaryStreams + } + + transformedMuxStreams, err := expandTranscoderJobConfigMuxStreams(original["mux_streams"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMuxStreams); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["muxStreams"] = transformedMuxStreams + } + + transformedManifests, err := expandTranscoderJobConfigManifests(original["manifests"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedManifests); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["manifests"] = transformedManifests + } + + transformedOutput, err := expandTranscoderJobConfigOutput(original["output"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOutput); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["output"] = transformedOutput + } + + transformedAdBreaks, err := expandTranscoderJobConfigAdBreaks(original["ad_breaks"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAdBreaks); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["adBreaks"] = transformedAdBreaks + } + + transformedPubsubDestination, err := expandTranscoderJobConfigPubsubDestination(original["pubsub_destination"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubsubDestination); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubsubDestination"] = transformedPubsubDestination + } + + transformedOverlays, err := expandTranscoderJobConfigOverlays(original["overlays"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOverlays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["overlays"] = transformedOverlays + } + + transformedEncryptions, err := expandTranscoderJobConfigEncryptions(original["encryptions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncryptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encryptions"] = transformedEncryptions + } + + return transformed, nil +} + +func expandTranscoderJobConfigInputs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandTranscoderJobConfigInputsKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedUri, err := expandTranscoderJobConfigInputsUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobConfigInputsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigInputsUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigEditList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandTranscoderJobConfigEditListKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedInputs, err := expandTranscoderJobConfigEditListInputs(original["inputs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInputs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inputs"] = transformedInputs + } + + transformedStartTimeOffset, err := expandTranscoderJobConfigEditListStartTimeOffset(original["start_time_offset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTimeOffset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTimeOffset"] = transformedStartTimeOffset + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobConfigEditListKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigEditListInputs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigEditListStartTimeOffset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandTranscoderJobConfigElementaryStreamsKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedVideoStream, err := expandTranscoderJobConfigElementaryStreamsVideoStream(original["video_stream"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVideoStream); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["videoStream"] = transformedVideoStream + } + + transformedAudioStream, err := expandTranscoderJobConfigElementaryStreamsAudioStream(original["audio_stream"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAudioStream); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["audioStream"] = transformedAudioStream + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobConfigElementaryStreamsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStream(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedH264, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264(original["h264"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedH264); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["h264"] = transformedH264 + } + + return transformed, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWidthPixels, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264WidthPixels(original["width_pixels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWidthPixels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["widthPixels"] = transformedWidthPixels + } + + transformedHeightPixels, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264HeightPixels(original["height_pixels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeightPixels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["heightPixels"] = transformedHeightPixels + } + + transformedFrameRate, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264FrameRate(original["frame_rate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFrameRate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["frameRate"] = transformedFrameRate + } + + transformedBitrateBps, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264BitrateBps(original["bitrate_bps"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBitrateBps); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bitrateBps"] = transformedBitrateBps + } + + transformedPixelFormat, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264PixelFormat(original["pixel_format"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPixelFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pixelFormat"] = transformedPixelFormat + } + + transformedRateControlMode, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264RateControlMode(original["rate_control_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRateControlMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rateControlMode"] = transformedRateControlMode + } + + transformedCrfLevel, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264CrfLevel(original["crf_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCrfLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["crfLevel"] = transformedCrfLevel + } + + transformedVbvSizeBits, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264VbvSizeBits(original["vbv_size_bits"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVbvSizeBits); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vbvSizeBits"] = transformedVbvSizeBits + } + + transformedVbvFullnessBits, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264VbvFullnessBits(original["vbv_fullness_bits"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVbvFullnessBits); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vbvFullnessBits"] = transformedVbvFullnessBits + } + + transformedEntropyCoder, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264EntropyCoder(original["entropy_coder"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEntropyCoder); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["entropyCoder"] = transformedEntropyCoder + } + + transformedProfile, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264Profile(original["profile"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProfile); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["profile"] = transformedProfile + } + + transformedPreset, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264Preset(original["preset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPreset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["preset"] = transformedPreset + } + + transformedGopDuration, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264GopDuration(original["gop_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGopDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gopDuration"] = transformedGopDuration + } + + transformedSdr, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264Sdr(original["sdr"], d, config) + if err != nil { + return nil, err + } else { + transformed["sdr"] = transformedSdr + } + + transformedHlg, err := expandTranscoderJobConfigElementaryStreamsVideoStreamH264Hlg(original["hlg"], d, config) + if err != nil { + return nil, err + } else { + transformed["hlg"] = transformedHlg + } + + return transformed, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264WidthPixels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264HeightPixels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264FrameRate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264BitrateBps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264PixelFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264RateControlMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264CrfLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264VbvSizeBits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264VbvFullnessBits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264EntropyCoder(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264Profile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264Preset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264GopDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264Sdr(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobConfigElementaryStreamsVideoStreamH264Hlg(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobConfigElementaryStreamsAudioStream(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCodec, err := expandTranscoderJobConfigElementaryStreamsAudioStreamCodec(original["codec"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCodec); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["codec"] = transformedCodec + } + + transformedBitrateBps, err := expandTranscoderJobConfigElementaryStreamsAudioStreamBitrateBps(original["bitrate_bps"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBitrateBps); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bitrateBps"] = transformedBitrateBps + } + + transformedChannelCount, err := expandTranscoderJobConfigElementaryStreamsAudioStreamChannelCount(original["channel_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedChannelCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["channelCount"] = transformedChannelCount + } + + transformedChannelLayout, err := expandTranscoderJobConfigElementaryStreamsAudioStreamChannelLayout(original["channel_layout"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedChannelLayout); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["channelLayout"] = transformedChannelLayout + } + + transformedSampleRateHertz, err := expandTranscoderJobConfigElementaryStreamsAudioStreamSampleRateHertz(original["sample_rate_hertz"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSampleRateHertz); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sampleRateHertz"] = transformedSampleRateHertz + } + + return transformed, nil +} + +func expandTranscoderJobConfigElementaryStreamsAudioStreamCodec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsAudioStreamBitrateBps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsAudioStreamChannelCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsAudioStreamChannelLayout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigElementaryStreamsAudioStreamSampleRateHertz(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigMuxStreams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandTranscoderJobConfigMuxStreamsKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedFileName, err := expandTranscoderJobConfigMuxStreamsFileName(original["file_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileName"] = transformedFileName + } + + transformedContainer, err := expandTranscoderJobConfigMuxStreamsContainer(original["container"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["container"] = transformedContainer + } + + transformedElementaryStreams, err := expandTranscoderJobConfigMuxStreamsElementaryStreams(original["elementary_streams"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedElementaryStreams); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["elementaryStreams"] = transformedElementaryStreams + } + + transformedSegmentSettings, err := expandTranscoderJobConfigMuxStreamsSegmentSettings(original["segment_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSegmentSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["segmentSettings"] = transformedSegmentSettings + } + + transformedEncryptionId, err := expandTranscoderJobConfigMuxStreamsEncryptionId(original["encryption_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncryptionId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encryptionId"] = transformedEncryptionId + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobConfigMuxStreamsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigMuxStreamsFileName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigMuxStreamsContainer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigMuxStreamsElementaryStreams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigMuxStreamsSegmentSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSegmentDuration, err := expandTranscoderJobConfigMuxStreamsSegmentSettingsSegmentDuration(original["segment_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSegmentDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["segmentDuration"] = transformedSegmentDuration + } + + return transformed, nil +} + +func expandTranscoderJobConfigMuxStreamsSegmentSettingsSegmentDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigMuxStreamsEncryptionId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigManifests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFileName, err := expandTranscoderJobConfigManifestsFileName(original["file_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileName"] = transformedFileName + } + + transformedType, err := expandTranscoderJobConfigManifestsType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedMuxStreams, err := expandTranscoderJobConfigManifestsMuxStreams(original["mux_streams"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMuxStreams); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["muxStreams"] = transformedMuxStreams + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobConfigManifestsFileName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigManifestsType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigManifestsMuxStreams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigOutput(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandTranscoderJobConfigOutputUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + return transformed, nil +} + +func expandTranscoderJobConfigOutputUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigAdBreaks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStartTimeOffset, err := expandTranscoderJobConfigAdBreaksStartTimeOffset(original["start_time_offset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTimeOffset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTimeOffset"] = transformedStartTimeOffset + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobConfigAdBreaksStartTimeOffset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigPubsubDestination(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTopic, err := expandTranscoderJobConfigPubsubDestinationTopic(original["topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["topic"] = transformedTopic + } + + return transformed, nil +} + +func expandTranscoderJobConfigPubsubDestinationTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigOverlays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedImage, err := expandTranscoderJobConfigOverlaysImage(original["image"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImage); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["image"] = transformedImage + } + + transformedAnimations, err := expandTranscoderJobConfigOverlaysAnimations(original["animations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAnimations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["animations"] = transformedAnimations + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobConfigOverlaysImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandTranscoderJobConfigOverlaysImageUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + return transformed, nil +} + +func expandTranscoderJobConfigOverlaysImageUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigOverlaysAnimations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAnimationFade, err := expandTranscoderJobConfigOverlaysAnimationsAnimationFade(original["animation_fade"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAnimationFade); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["animationFade"] = transformedAnimationFade + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobConfigOverlaysAnimationsAnimationFade(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedXy, err := expandTranscoderJobConfigOverlaysAnimationsAnimationFadeXy(original["xy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedXy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["xy"] = transformedXy + } + + transformedStartTimeOffset, err := expandTranscoderJobConfigOverlaysAnimationsAnimationFadeStartTimeOffset(original["start_time_offset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTimeOffset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTimeOffset"] = transformedStartTimeOffset + } + + transformedEndTimeOffset, err := expandTranscoderJobConfigOverlaysAnimationsAnimationFadeEndTimeOffset(original["end_time_offset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndTimeOffset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["endTimeOffset"] = transformedEndTimeOffset + } + + transformedFadeType, err := expandTranscoderJobConfigOverlaysAnimationsAnimationFadeFadeType(original["fade_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFadeType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fadeType"] = transformedFadeType + } + + return transformed, nil +} + +func expandTranscoderJobConfigOverlaysAnimationsAnimationFadeXy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedX, err := expandTranscoderJobConfigOverlaysAnimationsAnimationFadeXyX(original["x"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedX); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["x"] = transformedX + } + + transformedY, err := expandTranscoderJobConfigOverlaysAnimationsAnimationFadeXyY(original["y"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedY); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["y"] = transformedY + } + + return transformed, nil +} + +func expandTranscoderJobConfigOverlaysAnimationsAnimationFadeXyX(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigOverlaysAnimationsAnimationFadeXyY(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigOverlaysAnimationsAnimationFadeStartTimeOffset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigOverlaysAnimationsAnimationFadeEndTimeOffset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigOverlaysAnimationsAnimationFadeFadeType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigEncryptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandTranscoderJobConfigEncryptionsId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedDrmSystems, err := expandTranscoderJobConfigEncryptionsDrmSystems(original["drm_systems"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDrmSystems); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["drmSystems"] = transformedDrmSystems + } + + transformedAes128, err := expandTranscoderJobConfigEncryptionsAes128(original["aes128"], d, config) + if err != nil { + return nil, err + } else { + transformed["aes128"] = transformedAes128 + } + + transformedSampleAes, err := expandTranscoderJobConfigEncryptionsSampleAes(original["sample_aes"], d, config) + if err != nil { + return nil, err + } else { + transformed["sampleAes"] = transformedSampleAes + } + + transformedMpegCenc, err := expandTranscoderJobConfigEncryptionsMpegCenc(original["mpeg_cenc"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMpegCenc); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mpegCenc"] = transformedMpegCenc + } + + transformedSecretManagerKeySource, err := expandTranscoderJobConfigEncryptionsSecretManagerKeySource(original["secret_manager_key_source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretManagerKeySource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretManagerKeySource"] = transformedSecretManagerKeySource + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobConfigEncryptionsId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigEncryptionsDrmSystems(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWidevine, err := expandTranscoderJobConfigEncryptionsDrmSystemsWidevine(original["widevine"], d, config) + if err != nil { + return nil, err + } else { + transformed["widevine"] = transformedWidevine + } + + transformedFairplay, err := expandTranscoderJobConfigEncryptionsDrmSystemsFairplay(original["fairplay"], d, config) + if err != nil { + return nil, err + } else { + transformed["fairplay"] = transformedFairplay + } + + transformedPlayready, err := expandTranscoderJobConfigEncryptionsDrmSystemsPlayready(original["playready"], d, config) + if err != nil { + return nil, err + } else { + transformed["playready"] = transformedPlayready + } + + transformedClearkey, err := expandTranscoderJobConfigEncryptionsDrmSystemsClearkey(original["clearkey"], d, config) + if err != nil { + return nil, err + } else { + transformed["clearkey"] = transformedClearkey + } + + return transformed, nil +} + +func expandTranscoderJobConfigEncryptionsDrmSystemsWidevine(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobConfigEncryptionsDrmSystemsFairplay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobConfigEncryptionsDrmSystemsPlayready(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobConfigEncryptionsDrmSystemsClearkey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobConfigEncryptionsAes128(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobConfigEncryptionsSampleAes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobConfigEncryptionsMpegCenc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScheme, err := expandTranscoderJobConfigEncryptionsMpegCencScheme(original["scheme"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScheme); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scheme"] = transformedScheme + } + + return transformed, nil +} + +func expandTranscoderJobConfigEncryptionsMpegCencScheme(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobConfigEncryptionsSecretManagerKeySource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecretVersion, err := expandTranscoderJobConfigEncryptionsSecretManagerKeySourceSecretVersion(original["secret_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretVersion"] = transformedSecretVersion + } + + return transformed, nil +} + +func expandTranscoderJobConfigEncryptionsSecretManagerKeySourceSecretVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job_sweeper.go similarity index 90% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job_sweeper.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job_sweeper.go index 88c7e7d47c8..63567478ec9 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job_sweeper.go @@ -15,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package cloudrunv2 +package transcoder import ( "context" @@ -30,12 +30,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("CloudRunV2Job", testSweepCloudRunV2Job) + sweeper.AddTestSweepers("TranscoderJob", testSweepTranscoderJob) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepCloudRunV2Job(region string) error { - resourceName := "CloudRunV2Job" +func testSweepTranscoderJob(region string) error { + resourceName := "TranscoderJob" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) @@ -64,7 +64,7 @@ func testSweepCloudRunV2Job(region string) error { }, } - listTemplate := strings.Split("https://run.googleapis.com/v2/projects/{{project}}/locations/{{location}}/jobs", "?")[0] + listTemplate := strings.Split("https://transcoder.googleapis.com/v1/projects/{{project}}/locations/{{location}}/jobs", "?")[0] listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) @@ -108,7 +108,7 @@ func testSweepCloudRunV2Job(region string) error { continue } - deleteTemplate := "https://run.googleapis.com/v2/projects/{{project}}/locations/{{location}}/jobs/{{name}}" + deleteTemplate := "https://transcoder.googleapis.com/v1/{{name}}" deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job_template.go new file mode 100644 index 00000000000..68c25d28b05 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job_template.go @@ -0,0 +1,2927 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package transcoder + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceTranscoderJobTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceTranscoderJobTemplateCreate, + Read: resourceTranscoderJobTemplateRead, + Update: resourceTranscoderJobTemplateUpdate, + Delete: resourceTranscoderJobTemplateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceTranscoderJobTemplateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "job_template_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID to use for the Transcoding job template.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the transcoding job template resource.`, + }, + "config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The configuration for this template.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ad_breaks": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Ad break.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_time_offset": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Start time in seconds for the ad break, relative to the output file timeline`, + }, + }, + }, + }, + "edit_list": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of input assets stored in Cloud Storage.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "inputs": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of values identifying files that should be used in this atom.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "key": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A unique key for this atom.`, + }, + "start_time_offset": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Start time in seconds for the atom, relative to the input file timeline. The default is '0s'.`, + }, + }, + }, + }, + "elementary_streams": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of input assets stored in Cloud Storage.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audio_stream": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Encoding of an audio stream.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bitrate_bps": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Audio bitrate in bits per second.`, + }, + "channel_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Number of audio channels. The default is '2'.`, + }, + "channel_layout": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A list of channel names specifying layout of the audio channels. The default is ["fl", "fr"].`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "codec": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The codec for this audio stream. The default is 'aac'.`, + }, + "sample_rate_hertz": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The audio sample rate in Hertz. The default is '48000'.`, + }, + }, + }, + }, + "key": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A unique key for this atom.`, + }, + "video_stream": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Encoding of a video stream.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "h264": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `H264 codec settings`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bitrate_bps": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The video bitrate in bits per second.`, + }, + "frame_rate": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The target video frame rate in frames per second (FPS).`, + }, + "crf_level": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Target CRF level. The default is '21'.`, + }, + "entropy_coder": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The entropy coder to use. The default is 'cabac'.`, + }, + "gop_duration": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Select the GOP size based on the specified duration. The default is '3s'.`, + }, + "height_pixels": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The height of the video in pixels.`, + }, + "hlg": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HLG color format setting for H264.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "pixel_format": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Pixel format to use. The default is 'yuv420p'.`, + }, + "preset": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Enforces the specified codec preset. The default is 'veryfast'.`, + }, + "profile": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Enforces the specified codec profile.`, + }, + "rate_control_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Specify the mode. The default is 'vbr'.`, + }, + "sdr": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `SDR color format setting for H264.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "vbv_fullness_bits": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Initial fullness of the Video Buffering Verifier (VBV) buffer in bits.`, + }, + "vbv_size_bits": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Size of the Video Buffering Verifier (VBV) buffer in bits.`, + }, + "width_pixels": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The width of the video in pixels.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "encryptions": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of encryption configurations for the content.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Identifier for this set of encryption options.`, + }, + "aes128": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Configuration for AES-128 encryption.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "drm_systems": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `DRM system(s) to use; at least one must be specified. If a DRM system is omitted, it is considered disabled.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "clearkey": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Clearkey configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "fairplay": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Fairplay configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "playready": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Playready configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "widevine": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Widevine configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + }, + }, + }, + "mpeg_cenc": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Configuration for MPEG Common Encryption (MPEG-CENC).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scheme": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Specify the encryption scheme.`, + }, + }, + }, + }, + "sample_aes": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Configuration for SAMPLE-AES encryption.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "secret_manager_key_source": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Configuration for secrets stored in Google Secret Manager.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the Secret Version containing the encryption key in the following format: projects/{project}/secrets/{secret_id}/versions/{version_number}.`, + }, + }, + }, + }, + }, + }, + }, + "inputs": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of input assets stored in Cloud Storage.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A unique key for this input. Must be specified when using advanced mapping and edit lists.`, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `URI of the media. Input files must be at least 5 seconds in duration and stored in Cloud Storage (for example, gs://bucket/inputs/file.mp4). +If empty, the value is populated from Job.input_uri.`, + }, + }, + }, + }, + "manifests": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Manifest configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"MANIFEST_TYPE_UNSPECIFIED", "HLS", "DASH"}), + Description: `Type of the manifest. Possible values: ["MANIFEST_TYPE_UNSPECIFIED", "HLS", "DASH"]`, + }, + "file_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The name of the generated file. The default is 'manifest'.`, + }, + "mux_streams": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of user supplied MuxStream.key values that should appear in this manifest.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "mux_streams": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Multiplexing settings for output stream.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The container format. The default is 'mp4'.`, + }, + "elementary_streams": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of ElementaryStream.key values multiplexed in this stream.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "encryption_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Identifier of the encryption configuration to use.`, + }, + "file_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The name of the generated file.`, + }, + "key": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A unique key for this multiplexed stream.`, + }, + "segment_settings": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Segment settings for ts, fmp4 and vtt.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "segment_duration": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Duration of the segments in seconds. The default is '6.0s'.`, + }, + }, + }, + }, + }, + }, + }, + "output": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Location of output file(s) in a Cloud Storage bucket.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `URI for the output file(s). For example, gs://my-bucket/outputs/.`, + }, + }, + }, + }, + "overlays": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of overlays on the output video, in descending Z-order.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "animations": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `List of animations. The list should be chronological, without any time overlap.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "animation_fade": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Display overlay object with fade animation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fade_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"FADE_TYPE_UNSPECIFIED", "FADE_IN", "FADE_OUT"}), + Description: `Required. Type of fade animation: 'FADE_IN' or 'FADE_OUT'. +The possible values are: + +* 'FADE_TYPE_UNSPECIFIED': The fade type is not specified. + +* 'FADE_IN': Fade the overlay object into view. + +* 'FADE_OUT': Fade the overlay object out of view. Possible values: ["FADE_TYPE_UNSPECIFIED", "FADE_IN", "FADE_OUT"]`, + }, + "end_time_offset": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The time to end the fade animation, in seconds.`, + }, + "start_time_offset": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The time to start the fade animation, in seconds.`, + }, + "xy": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Normalized coordinates based on output video resolution.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "x": { + Type: schema.TypeFloat, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Normalized x coordinate.`, + }, + "y": { + Type: schema.TypeFloat, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Normalized y coordinate.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "image": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Image overlay.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `URI of the image in Cloud Storage. For example, gs://bucket/inputs/image.png.`, + }, + }, + }, + }, + }, + }, + }, + "pubsub_destination": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Pub/Sub destination.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the Pub/Sub topic to publish job completion notification to. For example: projects/{project}/topics/{topic}.`, + }, + }, + }, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels associated with this job template. You can use these to organize and group your job templates. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the job template.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceTranscoderJobTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + configProp, err := expandTranscoderJobTemplateConfig(d.Get("config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("config"); !tpgresource.IsEmptyValue(reflect.ValueOf(configProp)) && (ok || !reflect.DeepEqual(v, configProp)) { + obj["config"] = configProp + } + labelsProp, err := expandTranscoderJobTemplateEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TranscoderBasePath}}projects/{{project}}/locations/{{location}}/jobTemplates?jobTemplateId={{job_template_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new JobTemplate: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for JobTemplate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating JobTemplate: %s", err) + } + if err := d.Set("name", flattenTranscoderJobTemplateName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/jobTemplates/{{job_template_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating JobTemplate %q: %#v", d.Id(), res) + + return resourceTranscoderJobTemplateRead(d, meta) +} + +func resourceTranscoderJobTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TranscoderBasePath}}projects/{{project}}/locations/{{location}}/jobTemplates/{{job_template_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for JobTemplate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TranscoderJobTemplate %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading JobTemplate: %s", err) + } + + if err := d.Set("name", flattenTranscoderJobTemplateName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTemplate: %s", err) + } + if err := d.Set("labels", flattenTranscoderJobTemplateLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTemplate: %s", err) + } + if err := d.Set("config", flattenTranscoderJobTemplateConfig(res["config"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTemplate: %s", err) + } + if err := d.Set("terraform_labels", flattenTranscoderJobTemplateTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTemplate: %s", err) + } + if err := d.Set("effective_labels", flattenTranscoderJobTemplateEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTemplate: %s", err) + } + + return nil +} + +func resourceTranscoderJobTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the root field "labels" and "terraform_labels" are mutable + return resourceTranscoderJobTemplateRead(d, meta) +} + +func resourceTranscoderJobTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for JobTemplate: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{TranscoderBasePath}}projects/{{project}}/locations/{{location}}/jobTemplates/{{job_template_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting JobTemplate %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "JobTemplate") + } + + log.Printf("[DEBUG] Finished deleting JobTemplate %q: %#v", d.Id(), res) + return nil +} + +func resourceTranscoderJobTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/jobTemplates/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/jobTemplates/{{job_template_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenTranscoderJobTemplateName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenTranscoderJobTemplateLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenTranscoderJobTemplateConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["inputs"] = + flattenTranscoderJobTemplateConfigInputs(original["inputs"], d, config) + transformed["edit_list"] = + flattenTranscoderJobTemplateConfigEditList(original["editList"], d, config) + transformed["elementary_streams"] = + flattenTranscoderJobTemplateConfigElementaryStreams(original["elementaryStreams"], d, config) + transformed["mux_streams"] = + flattenTranscoderJobTemplateConfigMuxStreams(original["muxStreams"], d, config) + transformed["manifests"] = + flattenTranscoderJobTemplateConfigManifests(original["manifests"], d, config) + transformed["output"] = + flattenTranscoderJobTemplateConfigOutput(original["output"], d, config) + transformed["ad_breaks"] = + flattenTranscoderJobTemplateConfigAdBreaks(original["adBreaks"], d, config) + transformed["pubsub_destination"] = + flattenTranscoderJobTemplateConfigPubsubDestination(original["pubsubDestination"], d, config) + transformed["overlays"] = + flattenTranscoderJobTemplateConfigOverlays(original["overlays"], d, config) + transformed["encryptions"] = + flattenTranscoderJobTemplateConfigEncryptions(original["encryptions"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigInputs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "key": flattenTranscoderJobTemplateConfigInputsKey(original["key"], d, config), + "uri": flattenTranscoderJobTemplateConfigInputsUri(original["uri"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobTemplateConfigInputsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigInputsUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigEditList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "key": flattenTranscoderJobTemplateConfigEditListKey(original["key"], d, config), + "inputs": flattenTranscoderJobTemplateConfigEditListInputs(original["inputs"], d, config), + "start_time_offset": flattenTranscoderJobTemplateConfigEditListStartTimeOffset(original["startTimeOffset"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobTemplateConfigEditListKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigEditListInputs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigEditListStartTimeOffset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigElementaryStreams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "key": flattenTranscoderJobTemplateConfigElementaryStreamsKey(original["key"], d, config), + "video_stream": flattenTranscoderJobTemplateConfigElementaryStreamsVideoStream(original["videoStream"], d, config), + "audio_stream": flattenTranscoderJobTemplateConfigElementaryStreamsAudioStream(original["audioStream"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobTemplateConfigElementaryStreamsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStream(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["h264"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264(original["h264"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["width_pixels"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264WidthPixels(original["widthPixels"], d, config) + transformed["height_pixels"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264HeightPixels(original["heightPixels"], d, config) + transformed["frame_rate"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264FrameRate(original["frameRate"], d, config) + transformed["bitrate_bps"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264BitrateBps(original["bitrateBps"], d, config) + transformed["pixel_format"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264PixelFormat(original["pixelFormat"], d, config) + transformed["rate_control_mode"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264RateControlMode(original["rateControlMode"], d, config) + transformed["crf_level"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264CrfLevel(original["crfLevel"], d, config) + transformed["vbv_size_bits"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264VbvSizeBits(original["vbvSizeBits"], d, config) + transformed["vbv_fullness_bits"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264VbvFullnessBits(original["vbvFullnessBits"], d, config) + transformed["entropy_coder"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264EntropyCoder(original["entropyCoder"], d, config) + transformed["profile"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Profile(original["profile"], d, config) + transformed["preset"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Preset(original["preset"], d, config) + transformed["gop_duration"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264GopDuration(original["gopDuration"], d, config) + transformed["sdr"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Sdr(original["sdr"], d, config) + transformed["hlg"] = + flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Hlg(original["hlg"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264WidthPixels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264HeightPixels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264FrameRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264BitrateBps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264PixelFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264RateControlMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264CrfLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264VbvSizeBits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264VbvFullnessBits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264EntropyCoder(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Profile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Preset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264GopDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Sdr(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Hlg(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsAudioStream(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["codec"] = + flattenTranscoderJobTemplateConfigElementaryStreamsAudioStreamCodec(original["codec"], d, config) + transformed["bitrate_bps"] = + flattenTranscoderJobTemplateConfigElementaryStreamsAudioStreamBitrateBps(original["bitrateBps"], d, config) + transformed["channel_count"] = + flattenTranscoderJobTemplateConfigElementaryStreamsAudioStreamChannelCount(original["channelCount"], d, config) + transformed["channel_layout"] = + flattenTranscoderJobTemplateConfigElementaryStreamsAudioStreamChannelLayout(original["channelLayout"], d, config) + transformed["sample_rate_hertz"] = + flattenTranscoderJobTemplateConfigElementaryStreamsAudioStreamSampleRateHertz(original["sampleRateHertz"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigElementaryStreamsAudioStreamCodec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsAudioStreamBitrateBps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsAudioStreamChannelCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsAudioStreamChannelLayout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigElementaryStreamsAudioStreamSampleRateHertz(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTranscoderJobTemplateConfigMuxStreams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "key": flattenTranscoderJobTemplateConfigMuxStreamsKey(original["key"], d, config), + "file_name": flattenTranscoderJobTemplateConfigMuxStreamsFileName(original["fileName"], d, config), + "container": flattenTranscoderJobTemplateConfigMuxStreamsContainer(original["container"], d, config), + "elementary_streams": flattenTranscoderJobTemplateConfigMuxStreamsElementaryStreams(original["elementaryStreams"], d, config), + "segment_settings": flattenTranscoderJobTemplateConfigMuxStreamsSegmentSettings(original["segmentSettings"], d, config), + "encryption_id": flattenTranscoderJobTemplateConfigMuxStreamsEncryptionId(original["encryptionId"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobTemplateConfigMuxStreamsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigMuxStreamsFileName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigMuxStreamsContainer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigMuxStreamsElementaryStreams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigMuxStreamsSegmentSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["segment_duration"] = + flattenTranscoderJobTemplateConfigMuxStreamsSegmentSettingsSegmentDuration(original["segmentDuration"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigMuxStreamsSegmentSettingsSegmentDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigMuxStreamsEncryptionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigManifests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "file_name": flattenTranscoderJobTemplateConfigManifestsFileName(original["fileName"], d, config), + "type": flattenTranscoderJobTemplateConfigManifestsType(original["type"], d, config), + "mux_streams": flattenTranscoderJobTemplateConfigManifestsMuxStreams(original["muxStreams"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobTemplateConfigManifestsFileName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigManifestsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigManifestsMuxStreams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigOutput(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenTranscoderJobTemplateConfigOutputUri(original["uri"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigOutputUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigAdBreaks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "start_time_offset": flattenTranscoderJobTemplateConfigAdBreaksStartTimeOffset(original["startTimeOffset"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobTemplateConfigAdBreaksStartTimeOffset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigPubsubDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["topic"] = + flattenTranscoderJobTemplateConfigPubsubDestinationTopic(original["topic"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigPubsubDestinationTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigOverlays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "image": flattenTranscoderJobTemplateConfigOverlaysImage(original["image"], d, config), + "animations": flattenTranscoderJobTemplateConfigOverlaysAnimations(original["animations"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobTemplateConfigOverlaysImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenTranscoderJobTemplateConfigOverlaysImageUri(original["uri"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigOverlaysImageUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigOverlaysAnimations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "animation_fade": flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFade(original["animationFade"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFade(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["xy"] = + flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXy(original["xy"], d, config) + transformed["start_time_offset"] = + flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeStartTimeOffset(original["startTimeOffset"], d, config) + transformed["end_time_offset"] = + flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeEndTimeOffset(original["endTimeOffset"], d, config) + transformed["fade_type"] = + flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeFadeType(original["fadeType"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["x"] = + flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXyX(original["x"], d, config) + transformed["y"] = + flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXyY(original["y"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXyX(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXyY(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeStartTimeOffset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeEndTimeOffset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeFadeType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigEncryptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenTranscoderJobTemplateConfigEncryptionsId(original["id"], d, config), + "drm_systems": flattenTranscoderJobTemplateConfigEncryptionsDrmSystems(original["drmSystems"], d, config), + "aes128": flattenTranscoderJobTemplateConfigEncryptionsAes128(original["aes128"], d, config), + "sample_aes": flattenTranscoderJobTemplateConfigEncryptionsSampleAes(original["sampleAes"], d, config), + "mpeg_cenc": flattenTranscoderJobTemplateConfigEncryptionsMpegCenc(original["mpegCenc"], d, config), + "secret_manager_key_source": flattenTranscoderJobTemplateConfigEncryptionsSecretManagerKeySource(original["secretManagerKeySource"], d, config), + }) + } + return transformed +} +func flattenTranscoderJobTemplateConfigEncryptionsId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigEncryptionsDrmSystems(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["widevine"] = + flattenTranscoderJobTemplateConfigEncryptionsDrmSystemsWidevine(original["widevine"], d, config) + transformed["fairplay"] = + flattenTranscoderJobTemplateConfigEncryptionsDrmSystemsFairplay(original["fairplay"], d, config) + transformed["playready"] = + flattenTranscoderJobTemplateConfigEncryptionsDrmSystemsPlayready(original["playready"], d, config) + transformed["clearkey"] = + flattenTranscoderJobTemplateConfigEncryptionsDrmSystemsClearkey(original["clearkey"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigEncryptionsDrmSystemsWidevine(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobTemplateConfigEncryptionsDrmSystemsFairplay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobTemplateConfigEncryptionsDrmSystemsPlayready(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobTemplateConfigEncryptionsDrmSystemsClearkey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobTemplateConfigEncryptionsAes128(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobTemplateConfigEncryptionsSampleAes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenTranscoderJobTemplateConfigEncryptionsMpegCenc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["scheme"] = + flattenTranscoderJobTemplateConfigEncryptionsMpegCencScheme(original["scheme"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigEncryptionsMpegCencScheme(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateConfigEncryptionsSecretManagerKeySource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret_version"] = + flattenTranscoderJobTemplateConfigEncryptionsSecretManagerKeySourceSecretVersion(original["secretVersion"], d, config) + return []interface{}{transformed} +} +func flattenTranscoderJobTemplateConfigEncryptionsSecretManagerKeySourceSecretVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTranscoderJobTemplateTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenTranscoderJobTemplateEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandTranscoderJobTemplateConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInputs, err := expandTranscoderJobTemplateConfigInputs(original["inputs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInputs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inputs"] = transformedInputs + } + + transformedEditList, err := expandTranscoderJobTemplateConfigEditList(original["edit_list"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEditList); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["editList"] = transformedEditList + } + + transformedElementaryStreams, err := expandTranscoderJobTemplateConfigElementaryStreams(original["elementary_streams"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedElementaryStreams); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["elementaryStreams"] = transformedElementaryStreams + } + + transformedMuxStreams, err := expandTranscoderJobTemplateConfigMuxStreams(original["mux_streams"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMuxStreams); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["muxStreams"] = transformedMuxStreams + } + + transformedManifests, err := expandTranscoderJobTemplateConfigManifests(original["manifests"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedManifests); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["manifests"] = transformedManifests + } + + transformedOutput, err := expandTranscoderJobTemplateConfigOutput(original["output"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOutput); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["output"] = transformedOutput + } + + transformedAdBreaks, err := expandTranscoderJobTemplateConfigAdBreaks(original["ad_breaks"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAdBreaks); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["adBreaks"] = transformedAdBreaks + } + + transformedPubsubDestination, err := expandTranscoderJobTemplateConfigPubsubDestination(original["pubsub_destination"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubsubDestination); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubsubDestination"] = transformedPubsubDestination + } + + transformedOverlays, err := expandTranscoderJobTemplateConfigOverlays(original["overlays"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOverlays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["overlays"] = transformedOverlays + } + + transformedEncryptions, err := expandTranscoderJobTemplateConfigEncryptions(original["encryptions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncryptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encryptions"] = transformedEncryptions + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigInputs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandTranscoderJobTemplateConfigInputsKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedUri, err := expandTranscoderJobTemplateConfigInputsUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobTemplateConfigInputsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigInputsUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigEditList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandTranscoderJobTemplateConfigEditListKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedInputs, err := expandTranscoderJobTemplateConfigEditListInputs(original["inputs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInputs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inputs"] = transformedInputs + } + + transformedStartTimeOffset, err := expandTranscoderJobTemplateConfigEditListStartTimeOffset(original["start_time_offset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTimeOffset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTimeOffset"] = transformedStartTimeOffset + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobTemplateConfigEditListKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigEditListInputs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigEditListStartTimeOffset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandTranscoderJobTemplateConfigElementaryStreamsKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedVideoStream, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStream(original["video_stream"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVideoStream); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["videoStream"] = transformedVideoStream + } + + transformedAudioStream, err := expandTranscoderJobTemplateConfigElementaryStreamsAudioStream(original["audio_stream"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAudioStream); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["audioStream"] = transformedAudioStream + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStream(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedH264, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264(original["h264"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedH264); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["h264"] = transformedH264 + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWidthPixels, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264WidthPixels(original["width_pixels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWidthPixels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["widthPixels"] = transformedWidthPixels + } + + transformedHeightPixels, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264HeightPixels(original["height_pixels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeightPixels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["heightPixels"] = transformedHeightPixels + } + + transformedFrameRate, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264FrameRate(original["frame_rate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFrameRate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["frameRate"] = transformedFrameRate + } + + transformedBitrateBps, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264BitrateBps(original["bitrate_bps"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBitrateBps); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bitrateBps"] = transformedBitrateBps + } + + transformedPixelFormat, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264PixelFormat(original["pixel_format"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPixelFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pixelFormat"] = transformedPixelFormat + } + + transformedRateControlMode, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264RateControlMode(original["rate_control_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRateControlMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rateControlMode"] = transformedRateControlMode + } + + transformedCrfLevel, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264CrfLevel(original["crf_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCrfLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["crfLevel"] = transformedCrfLevel + } + + transformedVbvSizeBits, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264VbvSizeBits(original["vbv_size_bits"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVbvSizeBits); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vbvSizeBits"] = transformedVbvSizeBits + } + + transformedVbvFullnessBits, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264VbvFullnessBits(original["vbv_fullness_bits"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVbvFullnessBits); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vbvFullnessBits"] = transformedVbvFullnessBits + } + + transformedEntropyCoder, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264EntropyCoder(original["entropy_coder"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEntropyCoder); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["entropyCoder"] = transformedEntropyCoder + } + + transformedProfile, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Profile(original["profile"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProfile); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["profile"] = transformedProfile + } + + transformedPreset, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Preset(original["preset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPreset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["preset"] = transformedPreset + } + + transformedGopDuration, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264GopDuration(original["gop_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGopDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gopDuration"] = transformedGopDuration + } + + transformedSdr, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Sdr(original["sdr"], d, config) + if err != nil { + return nil, err + } else { + transformed["sdr"] = transformedSdr + } + + transformedHlg, err := expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Hlg(original["hlg"], d, config) + if err != nil { + return nil, err + } else { + transformed["hlg"] = transformedHlg + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264WidthPixels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264HeightPixels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264FrameRate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264BitrateBps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264PixelFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264RateControlMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264CrfLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264VbvSizeBits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264VbvFullnessBits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264EntropyCoder(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Profile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Preset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264GopDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Sdr(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsVideoStreamH264Hlg(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsAudioStream(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCodec, err := expandTranscoderJobTemplateConfigElementaryStreamsAudioStreamCodec(original["codec"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCodec); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["codec"] = transformedCodec + } + + transformedBitrateBps, err := expandTranscoderJobTemplateConfigElementaryStreamsAudioStreamBitrateBps(original["bitrate_bps"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBitrateBps); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bitrateBps"] = transformedBitrateBps + } + + transformedChannelCount, err := expandTranscoderJobTemplateConfigElementaryStreamsAudioStreamChannelCount(original["channel_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedChannelCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["channelCount"] = transformedChannelCount + } + + transformedChannelLayout, err := expandTranscoderJobTemplateConfigElementaryStreamsAudioStreamChannelLayout(original["channel_layout"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedChannelLayout); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["channelLayout"] = transformedChannelLayout + } + + transformedSampleRateHertz, err := expandTranscoderJobTemplateConfigElementaryStreamsAudioStreamSampleRateHertz(original["sample_rate_hertz"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSampleRateHertz); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sampleRateHertz"] = transformedSampleRateHertz + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsAudioStreamCodec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsAudioStreamBitrateBps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsAudioStreamChannelCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsAudioStreamChannelLayout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigElementaryStreamsAudioStreamSampleRateHertz(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigMuxStreams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandTranscoderJobTemplateConfigMuxStreamsKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedFileName, err := expandTranscoderJobTemplateConfigMuxStreamsFileName(original["file_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileName"] = transformedFileName + } + + transformedContainer, err := expandTranscoderJobTemplateConfigMuxStreamsContainer(original["container"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["container"] = transformedContainer + } + + transformedElementaryStreams, err := expandTranscoderJobTemplateConfigMuxStreamsElementaryStreams(original["elementary_streams"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedElementaryStreams); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["elementaryStreams"] = transformedElementaryStreams + } + + transformedSegmentSettings, err := expandTranscoderJobTemplateConfigMuxStreamsSegmentSettings(original["segment_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSegmentSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["segmentSettings"] = transformedSegmentSettings + } + + transformedEncryptionId, err := expandTranscoderJobTemplateConfigMuxStreamsEncryptionId(original["encryption_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncryptionId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encryptionId"] = transformedEncryptionId + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobTemplateConfigMuxStreamsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigMuxStreamsFileName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigMuxStreamsContainer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigMuxStreamsElementaryStreams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigMuxStreamsSegmentSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSegmentDuration, err := expandTranscoderJobTemplateConfigMuxStreamsSegmentSettingsSegmentDuration(original["segment_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSegmentDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["segmentDuration"] = transformedSegmentDuration + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigMuxStreamsSegmentSettingsSegmentDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigMuxStreamsEncryptionId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigManifests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFileName, err := expandTranscoderJobTemplateConfigManifestsFileName(original["file_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileName"] = transformedFileName + } + + transformedType, err := expandTranscoderJobTemplateConfigManifestsType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedMuxStreams, err := expandTranscoderJobTemplateConfigManifestsMuxStreams(original["mux_streams"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMuxStreams); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["muxStreams"] = transformedMuxStreams + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobTemplateConfigManifestsFileName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigManifestsType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigManifestsMuxStreams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigOutput(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandTranscoderJobTemplateConfigOutputUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigOutputUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigAdBreaks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStartTimeOffset, err := expandTranscoderJobTemplateConfigAdBreaksStartTimeOffset(original["start_time_offset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTimeOffset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTimeOffset"] = transformedStartTimeOffset + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobTemplateConfigAdBreaksStartTimeOffset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigPubsubDestination(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTopic, err := expandTranscoderJobTemplateConfigPubsubDestinationTopic(original["topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["topic"] = transformedTopic + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigPubsubDestinationTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigOverlays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedImage, err := expandTranscoderJobTemplateConfigOverlaysImage(original["image"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImage); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["image"] = transformedImage + } + + transformedAnimations, err := expandTranscoderJobTemplateConfigOverlaysAnimations(original["animations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAnimations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["animations"] = transformedAnimations + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobTemplateConfigOverlaysImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandTranscoderJobTemplateConfigOverlaysImageUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigOverlaysImageUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigOverlaysAnimations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAnimationFade, err := expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFade(original["animation_fade"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAnimationFade); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["animationFade"] = transformedAnimationFade + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFade(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedXy, err := expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXy(original["xy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedXy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["xy"] = transformedXy + } + + transformedStartTimeOffset, err := expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeStartTimeOffset(original["start_time_offset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTimeOffset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTimeOffset"] = transformedStartTimeOffset + } + + transformedEndTimeOffset, err := expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeEndTimeOffset(original["end_time_offset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndTimeOffset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["endTimeOffset"] = transformedEndTimeOffset + } + + transformedFadeType, err := expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeFadeType(original["fade_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFadeType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fadeType"] = transformedFadeType + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedX, err := expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXyX(original["x"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedX); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["x"] = transformedX + } + + transformedY, err := expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXyY(original["y"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedY); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["y"] = transformedY + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXyX(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeXyY(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeStartTimeOffset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeEndTimeOffset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigOverlaysAnimationsAnimationFadeFadeType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigEncryptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandTranscoderJobTemplateConfigEncryptionsId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedDrmSystems, err := expandTranscoderJobTemplateConfigEncryptionsDrmSystems(original["drm_systems"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDrmSystems); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["drmSystems"] = transformedDrmSystems + } + + transformedAes128, err := expandTranscoderJobTemplateConfigEncryptionsAes128(original["aes128"], d, config) + if err != nil { + return nil, err + } else { + transformed["aes128"] = transformedAes128 + } + + transformedSampleAes, err := expandTranscoderJobTemplateConfigEncryptionsSampleAes(original["sample_aes"], d, config) + if err != nil { + return nil, err + } else { + transformed["sampleAes"] = transformedSampleAes + } + + transformedMpegCenc, err := expandTranscoderJobTemplateConfigEncryptionsMpegCenc(original["mpeg_cenc"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMpegCenc); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mpegCenc"] = transformedMpegCenc + } + + transformedSecretManagerKeySource, err := expandTranscoderJobTemplateConfigEncryptionsSecretManagerKeySource(original["secret_manager_key_source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretManagerKeySource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretManagerKeySource"] = transformedSecretManagerKeySource + } + + req = append(req, transformed) + } + return req, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsDrmSystems(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWidevine, err := expandTranscoderJobTemplateConfigEncryptionsDrmSystemsWidevine(original["widevine"], d, config) + if err != nil { + return nil, err + } else { + transformed["widevine"] = transformedWidevine + } + + transformedFairplay, err := expandTranscoderJobTemplateConfigEncryptionsDrmSystemsFairplay(original["fairplay"], d, config) + if err != nil { + return nil, err + } else { + transformed["fairplay"] = transformedFairplay + } + + transformedPlayready, err := expandTranscoderJobTemplateConfigEncryptionsDrmSystemsPlayready(original["playready"], d, config) + if err != nil { + return nil, err + } else { + transformed["playready"] = transformedPlayready + } + + transformedClearkey, err := expandTranscoderJobTemplateConfigEncryptionsDrmSystemsClearkey(original["clearkey"], d, config) + if err != nil { + return nil, err + } else { + transformed["clearkey"] = transformedClearkey + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsDrmSystemsWidevine(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsDrmSystemsFairplay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsDrmSystemsPlayready(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsDrmSystemsClearkey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsAes128(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsSampleAes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsMpegCenc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScheme, err := expandTranscoderJobTemplateConfigEncryptionsMpegCencScheme(original["scheme"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScheme); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scheme"] = transformedScheme + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsMpegCencScheme(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsSecretManagerKeySource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecretVersion, err := expandTranscoderJobTemplateConfigEncryptionsSecretManagerKeySourceSecretVersion(original["secret_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretVersion"] = transformedSecretVersion + } + + return transformed, nil +} + +func expandTranscoderJobTemplateConfigEncryptionsSecretManagerKeySourceSecretVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTranscoderJobTemplateEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job_template_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job_template_sweeper.go new file mode 100644 index 00000000000..3162ab341fb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/transcoder/resource_transcoder_job_template_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package transcoder + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("TranscoderJobTemplate", testSweepTranscoderJobTemplate) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepTranscoderJobTemplate(region string) error { + resourceName := "TranscoderJobTemplate" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://transcoder.googleapis.com/v1/projects/{{project}}/locations/{{location}}/jobTemplates", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["jobTemplates"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://transcoder.googleapis.com/v1/projects/{{project}}/locations/{{location}}/jobTemplates/{{job_template_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_feature_online_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_feature_online_store.go index 45700a0a098..36044dea115 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_feature_online_store.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_feature_online_store.go @@ -205,8 +205,8 @@ Please refer to the field 'effective_labels' for all of the labels present on th "force_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `If set to true, any FeatureViews and Features for this FeatureOnlineStore will also be deleted.`, + Default: false, }, "project": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore.go index cb0209697f3..f676de05ae3 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore.go @@ -161,8 +161,8 @@ Please refer to the field 'effective_labels' for all of the labels present on th "force_destroy": { Type: schema.TypeBool, Optional: true, - Default: false, Description: `If set to true, any EntityTypes and Features for this Featurestore will also be deleted`, + Default: false, }, "project": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_endpoint_deployed_index.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_endpoint_deployed_index.go new file mode 100644 index 00000000000..18a93fe8862 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_endpoint_deployed_index.go @@ -0,0 +1,1163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceVertexAIIndexEndpointDeployedIndex() *schema.Resource { + return &schema.Resource{ + Create: resourceVertexAIIndexEndpointDeployedIndexCreate, + Read: resourceVertexAIIndexEndpointDeployedIndexRead, + Update: resourceVertexAIIndexEndpointDeployedIndexUpdate, + Delete: resourceVertexAIIndexEndpointDeployedIndexDelete, + + Importer: &schema.ResourceImporter{ + State: resourceVertexAIIndexEndpointDeployedIndexImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(45 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "deployed_index_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The user specified ID of the DeployedIndex. The ID can be up to 128 characters long and must start with a letter and only contain letters, numbers, and underscores. The ID must be unique within the project it is created in.`, + }, + "index": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `The name of the Index this is the deployment of.`, + }, + "index_endpoint": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Identifies the index endpoint. Must be in the format +'projects/{{project}}/locations/{{region}}/indexEndpoints/{{indexEndpoint}}'`, + }, + "automatic_resources": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `A description of resources that the DeployedIndex uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_replica_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount. The max allowed replica count is 1000. + +The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.`, + }, + "min_replica_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The minimum number of replicas this DeployedModel will be always deployed on. If minReplicaCount is not set, the default value is 2 (we don't provide SLA when minReplicaCount=1). + +If traffic against it increases, it may dynamically be deployed onto more replicas up to [maxReplicaCount](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/AutomaticResources#FIELDS.max_replica_count), and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.`, + }, + }, + }, + }, + "dedicated_resources": { + Type: schema.TypeList, + Optional: true, + Description: `A description of resources that are dedicated to the DeployedIndex, and that need a higher degree of manual configuration. The field minReplicaCount must be set to a value strictly greater than 0, or else validation will fail. We don't provide SLA when minReplicaCount=1. If maxReplicaCount is not set, the default value is minReplicaCount. The max allowed replica count is 1000. + +Available machine types for SMALL shard: e2-standard-2 and all machine types available for MEDIUM and LARGE shard. + +Available machine types for MEDIUM shard: e2-standard-16 and all machine types available for LARGE shard. + +Available machine types for LARGE shard: e2-highmem-16, n2d-standard-32. + +n1-standard-16 and n1-standard-32 are still available, but we recommend e2-standard-16 and e2-highmem-16 for cost efficiency.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_spec": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The minimum number of replicas this DeployedModel will be always deployed on.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The type of the machine. + +See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) + +See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). + +For [DeployedModel](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.endpoints#DeployedModel) this field is optional, and the default value is n1-standard-2. For [BatchPredictionJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchPredictionJob) or as part of [WorkerPoolSpec](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec#WorkerPoolSpec) this field is required.`, + }, + }, + }, + }, + "min_replica_count": { + Type: schema.TypeInt, + Required: true, + Description: `The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1.`, + }, + "max_replica_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount`, + }, + }, + }, + }, + "deployed_index_auth_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `If set, the authentication is enabled for the private endpoint.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auth_provider": { + Type: schema.TypeList, + Optional: true, + Description: `Defines the authentication provider that the DeployedIndex uses.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_issuers": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of allowed JWT issuers. Each entry must be a valid Google service account, in the following format: service-account-name@project-id.iam.gserviceaccount.com`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "audiences": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of JWT audiences. that are allowed to access. A JWT containing any of these audiences will be accepted.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "deployment_group": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The deployment group can be no longer than 64 characters (eg: 'test', 'prod'). If not set, we will use the 'default' deployment group. +Creating deployment_groups with reserved_ip_ranges is a recommended practice when the peered network has multiple peering ranges. This creates your deployments from predictable IP spaces for easier traffic administration. Also, one deployment_group (except 'default') can only be used with the same reserved_ip_ranges which means if the deployment_group has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or [d, e] is disallowed. [See the official documentation here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpoints#DeployedIndex.FIELDS.deployment_group). +Note: we only support up to 5 deployment groups (not including 'default').`, + Default: "default", + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The display name of the Index. The name can be up to 128 characters long and can consist of any UTF-8 characters.`, + }, + "enable_access_logging": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true, private endpoint's access logs are sent to Cloud Logging.`, + Default: false, + }, + "reserved_ip_ranges": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of reserved ip ranges under the VPC network that can be used for this DeployedIndex. +If set, we will deploy the index within the provided ip ranges. Otherwise, the index might be deployed to any ip ranges under the provided VPC network. + +The value should be the name of the address (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) Example: ['vertex-ai-ip-range']. + +For more information about subnets and network IP ranges, please see https://cloud.google.com/vpc/docs/subnets#manually_created_subnet_ip_ranges.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the Index was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "index_sync_time": { + Type: schema.TypeString, + Computed: true, + Description: `The DeployedIndex may depend on various data on its original Index. Additionally when certain changes to the original Index are being done (e.g. when what the Index contains is being changed) the DeployedIndex may be asynchronously updated in the background to reflect these changes. If this timestamp's value is at least the [Index.update_time](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexes#Index.FIELDS.update_time) of the original Index, it means that this DeployedIndex and the original Index are in sync. If this timestamp is older, then to see which updates this DeployedIndex already contains (and which it does not), one must [list](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.operations/list#google.longrunning.Operations.ListOperations) the operations that are running on the original Index. Only the successfully completed Operations with updateTime equal or before this sync time are contained in this DeployedIndex. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the DeployedIndex resource.`, + }, + "private_endpoints": { + Type: schema.TypeList, + Computed: true, + Description: `Provides paths for users to send requests directly to the deployed index services running on Cloud via private services access. This field is populated if [network](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpoints#IndexEndpoint.FIELDS.network) is configured.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "match_grpc_address": { + Type: schema.TypeString, + Computed: true, + Description: `The ip address used to send match gRPC requests.`, + }, + "psc_automated_endpoints": { + Type: schema.TypeList, + Computed: true, + Description: `PscAutomatedEndpoints is populated if private service connect is enabled if PscAutomatedConfig is set.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "match_address": { + Type: schema.TypeString, + Computed: true, + Description: `ip Address created by the automated forwarding rule.`, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Description: `Corresponding network in pscAutomationConfigs.`, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Description: `Corresponding projectId in pscAutomationConfigs`, + }, + }, + }, + }, + "service_attachment": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the service attachment resource. Populated if private service connect is enabled.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceVertexAIIndexEndpointDeployedIndexCreate(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + deployedIndexIdProp, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexId(d.Get("deployed_index_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployed_index_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(deployedIndexIdProp)) && (ok || !reflect.DeepEqual(v, deployedIndexIdProp)) { + obj["deployedIndexId"] = deployedIndexIdProp + } + indexProp, err := expandVertexAIIndexEndpointDeployedIndexIndex(d.Get("index"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("index"); !tpgresource.IsEmptyValue(reflect.ValueOf(indexProp)) && (ok || !reflect.DeepEqual(v, indexProp)) { + obj["index"] = indexProp + } + displayNameProp, err := expandVertexAIIndexEndpointDeployedIndexDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + automaticResourcesProp, err := expandVertexAIIndexEndpointDeployedIndexAutomaticResources(d.Get("automatic_resources"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("automatic_resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(automaticResourcesProp)) && (ok || !reflect.DeepEqual(v, automaticResourcesProp)) { + obj["automaticResources"] = automaticResourcesProp + } + dedicatedResourcesProp, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResources(d.Get("dedicated_resources"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dedicated_resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(dedicatedResourcesProp)) && (ok || !reflect.DeepEqual(v, dedicatedResourcesProp)) { + obj["dedicatedResources"] = dedicatedResourcesProp + } + enableAccessLoggingProp, err := expandVertexAIIndexEndpointDeployedIndexEnableAccessLogging(d.Get("enable_access_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_access_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableAccessLoggingProp)) && (ok || !reflect.DeepEqual(v, enableAccessLoggingProp)) { + obj["enableAccessLogging"] = enableAccessLoggingProp + } + deployedIndexAuthConfigProp, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfig(d.Get("deployed_index_auth_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployed_index_auth_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(deployedIndexAuthConfigProp)) && (ok || !reflect.DeepEqual(v, deployedIndexAuthConfigProp)) { + obj["deployedIndexAuthConfig"] = deployedIndexAuthConfigProp + } + reservedIpRangesProp, err := expandVertexAIIndexEndpointDeployedIndexReservedIpRanges(d.Get("reserved_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reserved_ip_ranges"); !tpgresource.IsEmptyValue(reflect.ValueOf(reservedIpRangesProp)) && (ok || !reflect.DeepEqual(v, reservedIpRangesProp)) { + obj["reservedIpRanges"] = reservedIpRangesProp + } + deploymentGroupProp, err := expandVertexAIIndexEndpointDeployedIndexDeploymentGroup(d.Get("deployment_group"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployment_group"); !tpgresource.IsEmptyValue(reflect.ValueOf(deploymentGroupProp)) && (ok || !reflect.DeepEqual(v, deploymentGroupProp)) { + obj["deploymentGroup"] = deploymentGroupProp + } + + obj, err = resourceVertexAIIndexEndpointDeployedIndexEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{index_endpoint}}:deployIndex") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new IndexEndpointDeployedIndex: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating IndexEndpointDeployedIndex: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{index_endpoint}}/deployedIndex/{{deployed_index_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = VertexAIOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating IndexEndpointDeployedIndex", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create IndexEndpointDeployedIndex: %s", err) + } + + opRes, err = resourceVertexAIIndexEndpointDeployedIndexDecoder(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error decoding response from operation: %s", err) + } + if opRes == nil { + return fmt.Errorf("Error decoding response from operation, could not find object") + } + + if err := d.Set("name", flattenVertexAIIndexEndpointDeployedIndexName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{index_endpoint}}/deployedIndex/{{deployed_index_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating IndexEndpointDeployedIndex %q: %#v", d.Id(), res) + + return resourceVertexAIIndexEndpointDeployedIndexRead(d, meta) +} + +func resourceVertexAIIndexEndpointDeployedIndexRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{index_endpoint}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VertexAIIndexEndpointDeployedIndex %q", d.Id())) + } + + res, err = resourceVertexAIIndexEndpointDeployedIndexDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing VertexAIIndexEndpointDeployedIndex because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenVertexAIIndexEndpointDeployedIndexName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("deployed_index_id", flattenVertexAIIndexEndpointDeployedIndexDeployedIndexId(res["deployedIndexId"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("index", flattenVertexAIIndexEndpointDeployedIndexIndex(res["index"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("display_name", flattenVertexAIIndexEndpointDeployedIndexDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("create_time", flattenVertexAIIndexEndpointDeployedIndexCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("private_endpoints", flattenVertexAIIndexEndpointDeployedIndexPrivateEndpoints(res["privateEndpoints"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("index_sync_time", flattenVertexAIIndexEndpointDeployedIndexIndexSyncTime(res["indexSyncTime"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("automatic_resources", flattenVertexAIIndexEndpointDeployedIndexAutomaticResources(res["automaticResources"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("dedicated_resources", flattenVertexAIIndexEndpointDeployedIndexDedicatedResources(res["dedicatedResources"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("enable_access_logging", flattenVertexAIIndexEndpointDeployedIndexEnableAccessLogging(res["enableAccessLogging"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("deployed_index_auth_config", flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfig(res["deployedIndexAuthConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("reserved_ip_ranges", flattenVertexAIIndexEndpointDeployedIndexReservedIpRanges(res["reservedIpRanges"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("deployment_group", flattenVertexAIIndexEndpointDeployedIndexDeploymentGroup(res["deploymentGroup"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + + return nil +} + +func resourceVertexAIIndexEndpointDeployedIndexUpdate(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + deployedIndexIdProp, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexId(d.Get("deployed_index_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployed_index_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deployedIndexIdProp)) { + obj["deployedIndexId"] = deployedIndexIdProp + } + indexProp, err := expandVertexAIIndexEndpointDeployedIndexIndex(d.Get("index"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("index"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, indexProp)) { + obj["index"] = indexProp + } + displayNameProp, err := expandVertexAIIndexEndpointDeployedIndexDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + automaticResourcesProp, err := expandVertexAIIndexEndpointDeployedIndexAutomaticResources(d.Get("automatic_resources"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("automatic_resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, automaticResourcesProp)) { + obj["automaticResources"] = automaticResourcesProp + } + dedicatedResourcesProp, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResources(d.Get("dedicated_resources"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dedicated_resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dedicatedResourcesProp)) { + obj["dedicatedResources"] = dedicatedResourcesProp + } + enableAccessLoggingProp, err := expandVertexAIIndexEndpointDeployedIndexEnableAccessLogging(d.Get("enable_access_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_access_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableAccessLoggingProp)) { + obj["enableAccessLogging"] = enableAccessLoggingProp + } + deployedIndexAuthConfigProp, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfig(d.Get("deployed_index_auth_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployed_index_auth_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deployedIndexAuthConfigProp)) { + obj["deployedIndexAuthConfig"] = deployedIndexAuthConfigProp + } + reservedIpRangesProp, err := expandVertexAIIndexEndpointDeployedIndexReservedIpRanges(d.Get("reserved_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reserved_ip_ranges"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, reservedIpRangesProp)) { + obj["reservedIpRanges"] = reservedIpRangesProp + } + deploymentGroupProp, err := expandVertexAIIndexEndpointDeployedIndexDeploymentGroup(d.Get("deployment_group"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployment_group"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deploymentGroupProp)) { + obj["deploymentGroup"] = deploymentGroupProp + } + + obj, err = resourceVertexAIIndexEndpointDeployedIndexUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{index_endpoint}}:mutateDeployedIndex") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating IndexEndpointDeployedIndex %q: %#v", d.Id(), obj) + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating IndexEndpointDeployedIndex %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating IndexEndpointDeployedIndex %q: %#v", d.Id(), res) + } + + err = VertexAIOperationWaitTime( + config, res, project, "Updating IndexEndpointDeployedIndex", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceVertexAIIndexEndpointDeployedIndexRead(d, meta) +} + +func resourceVertexAIIndexEndpointDeployedIndexDelete(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{index_endpoint}}:undeployIndex") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + obj = map[string]interface{}{ + "deployedIndexId": d.Get("deployed_index_id"), + } + + log.Printf("[DEBUG] Deleting IndexEndpointDeployedIndex %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "IndexEndpointDeployedIndex") + } + + err = VertexAIOperationWaitTime( + config, res, project, "Deleting IndexEndpointDeployedIndex", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting IndexEndpointDeployedIndex %q: %#v", d.Id(), res) + return nil +} + +func resourceVertexAIIndexEndpointDeployedIndexImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/deployedIndex/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{index_endpoint}}/deployedIndex/{{deployed_index_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenVertexAIIndexEndpointDeployedIndexName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDeployedIndexId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexIndex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["match_grpc_address"] = + flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsMatchGrpcAddress(original["matchGrpcAddress"], d, config) + transformed["service_attachment"] = + flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsServiceAttachment(original["serviceAttachment"], d, config) + transformed["psc_automated_endpoints"] = + flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpoints(original["pscAutomatedEndpoints"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsMatchGrpcAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsServiceAttachment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "project_id": flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsProjectId(original["projectId"], d, config), + "network": flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsNetwork(original["network"], d, config), + "match_address": flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsMatchAddress(original["matchAddress"], d, config), + }) + } + return transformed +} +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsMatchAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexIndexSyncTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexAutomaticResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_replica_count"] = + flattenVertexAIIndexEndpointDeployedIndexAutomaticResourcesMinReplicaCount(original["minReplicaCount"], d, config) + transformed["max_replica_count"] = + flattenVertexAIIndexEndpointDeployedIndexAutomaticResourcesMaxReplicaCount(original["maxReplicaCount"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexAutomaticResourcesMinReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexEndpointDeployedIndexAutomaticResourcesMaxReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexEndpointDeployedIndexDedicatedResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["machine_spec"] = + flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpec(original["machineSpec"], d, config) + transformed["min_replica_count"] = + flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMinReplicaCount(original["minReplicaCount"], d, config) + transformed["max_replica_count"] = + flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMaxReplicaCount(original["maxReplicaCount"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["machine_type"] = + flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpecMachineType(original["machineType"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpecMachineType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMinReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMaxReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexEndpointDeployedIndexEnableAccessLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["auth_provider"] = + flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProvider(original["authProvider"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProvider(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["audiences"] = + flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAudiences(original["audiences"], d, config) + transformed["allowed_issuers"] = + flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAllowedIssuers(original["allowedIssuers"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAudiences(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAllowedIssuers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexReservedIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDeploymentGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandVertexAIIndexEndpointDeployedIndexDeployedIndexId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexIndex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexAutomaticResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMinReplicaCount, err := expandVertexAIIndexEndpointDeployedIndexAutomaticResourcesMinReplicaCount(original["min_replica_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinReplicaCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minReplicaCount"] = transformedMinReplicaCount + } + + transformedMaxReplicaCount, err := expandVertexAIIndexEndpointDeployedIndexAutomaticResourcesMaxReplicaCount(original["max_replica_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxReplicaCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxReplicaCount"] = transformedMaxReplicaCount + } + + return transformed, nil +} + +func expandVertexAIIndexEndpointDeployedIndexAutomaticResourcesMinReplicaCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexAutomaticResourcesMaxReplicaCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDedicatedResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMachineSpec, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpec(original["machine_spec"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineSpec); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineSpec"] = transformedMachineSpec + } + + transformedMinReplicaCount, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMinReplicaCount(original["min_replica_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinReplicaCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minReplicaCount"] = transformedMinReplicaCount + } + + transformedMaxReplicaCount, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMaxReplicaCount(original["max_replica_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxReplicaCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxReplicaCount"] = transformedMaxReplicaCount + } + + return transformed, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMachineType, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpecMachineType(original["machine_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineType"] = transformedMachineType + } + + return transformed, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpecMachineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMinReplicaCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMaxReplicaCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexEnableAccessLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAuthProvider, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProvider(original["auth_provider"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAuthProvider); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["authProvider"] = transformedAuthProvider + } + + return transformed, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProvider(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAudiences, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAudiences(original["audiences"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAudiences); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["audiences"] = transformedAudiences + } + + transformedAllowedIssuers, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAllowedIssuers(original["allowed_issuers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedIssuers); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedIssuers"] = transformedAllowedIssuers + } + + return transformed, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAudiences(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAllowedIssuers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexReservedIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDeploymentGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceVertexAIIndexEndpointDeployedIndexEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + req := make(map[string]interface{}) + obj["id"] = d.Get("deployed_index_id") + delete(obj, "deployedIndexId") + delete(obj, "name") + delete(obj, "indexEndpoint") + req["deployedIndex"] = obj + return req, nil +} + +func resourceVertexAIIndexEndpointDeployedIndexUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + obj["id"] = obj["deployedIndexId"] + delete(obj, "deployedIndexId") + return obj, nil +} + +func resourceVertexAIIndexEndpointDeployedIndexDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + v, ok := res["deployedIndexes"] + if !ok || v == nil { // CREATE + res["name"] = res["deployedIndexId"] + delete(res, "deployedIndexId") + return res, nil + } + dpIndex := make(map[string]interface{}) + for _, v := range v.([]interface{}) { + dpI := v.(map[string]interface{}) + if dpI["id"] == d.Get("deployed_index_id").(string) { + dpI["indexEndpoint"] = d.Get("index_endpoint") + dpI["deployedIndexId"] = d.Get("deployed_index_id") + dpIndex = dpI + break + } + } + if dpIndex == nil { + return nil, fmt.Errorf("Error: Deployment Index not Found") + } + return dpIndex, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_cluster.go index 5f0b81de8c2..e8adc3b1c6e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_cluster.go @@ -63,6 +63,131 @@ func ResourceVmwareengineCluster() *schema.Resource { Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. For example: projects/my-project/locations/us-west1-a/privateClouds/my-cloud`, }, + "autoscaling_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration of the autoscaling applied to this cluster`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "autoscaling_policies": { + Type: schema.TypeSet, + Required: true, + Description: `The map with autoscaling policies applied to the cluster. +The key is the identifier of the policy. +It must meet the following requirements: + * Only contains 1-63 alphanumeric characters and hyphens + * Begins with an alphabetical character + * Ends with a non-hyphen character + * Not formatted as a UUID + * Complies with [RFC 1034](https://datatracker.ietf.org/doc/html/rfc1034) (section 3.5) + +Currently the map must contain only one element +that describes the autoscaling policy for compute nodes.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "autoscale_policy_id": { + Type: schema.TypeString, + Required: true, + }, + "node_type_id": { + Type: schema.TypeString, + Required: true, + Description: `The canonical identifier of the node type to add or remove.`, + }, + "scale_out_size": { + Type: schema.TypeInt, + Required: true, + Description: `Number of nodes to add to a cluster during a scale-out operation. +Must be divisible by 2 for stretched clusters.`, + }, + "consumed_memory_thresholds": { + Type: schema.TypeList, + Optional: true, + Description: `Utilization thresholds pertaining to amount of consumed memory.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scale_in": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-in operation in percent.`, + }, + "scale_out": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-out operation in percent.`, + }, + }, + }, + }, + "cpu_thresholds": { + Type: schema.TypeList, + Optional: true, + Description: `Utilization thresholds pertaining to CPU utilization.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scale_in": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-in operation in percent.`, + }, + "scale_out": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-out operation in percent.`, + }, + }, + }, + }, + "storage_thresholds": { + Type: schema.TypeList, + Optional: true, + Description: `Utilization thresholds pertaining to amount of consumed storage.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scale_in": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-in operation in percent.`, + }, + "scale_out": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-out operation in percent.`, + }, + }, + }, + }, + }, + }, + }, + "cool_down_period": { + Type: schema.TypeString, + Optional: true, + Description: `The minimum duration between consecutive autoscale operations. +It starts once addition or removal of nodes is fully completed. +Minimum cool down period is 30m. +Cool down period must be in whole minutes (for example, 30m, 31m, 50m). +Mandatory for successful addition of autoscaling settings in cluster.`, + }, + "max_cluster_node_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Maximum number of nodes of any type in a cluster. +Mandatory for successful addition of autoscaling settings in cluster.`, + }, + "min_cluster_node_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum number of nodes of any type in a cluster. +Mandatory for successful addition of autoscaling settings in cluster.`, + }, + }, + }, + }, "node_type_configs": { Type: schema.TypeSet, Optional: true, @@ -127,6 +252,12 @@ func resourceVmwareengineClusterCreate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("node_type_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(nodeTypeConfigsProp)) && (ok || !reflect.DeepEqual(v, nodeTypeConfigsProp)) { obj["nodeTypeConfigs"] = nodeTypeConfigsProp } + autoscalingSettingsProp, err := expandVmwareengineClusterAutoscalingSettings(d.Get("autoscaling_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("autoscaling_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(autoscalingSettingsProp)) && (ok || !reflect.DeepEqual(v, autoscalingSettingsProp)) { + obj["autoscalingSettings"] = autoscalingSettingsProp + } url, err := tpgresource.ReplaceVars(d, config, "{{VmwareengineBasePath}}{{parent}}/clusters?clusterId={{name}}") if err != nil { @@ -224,6 +355,9 @@ func resourceVmwareengineClusterRead(d *schema.ResourceData, meta interface{}) e if err := d.Set("node_type_configs", flattenVmwareengineClusterNodeTypeConfigs(res["nodeTypeConfigs"], d, config)); err != nil { return fmt.Errorf("Error reading Cluster: %s", err) } + if err := d.Set("autoscaling_settings", flattenVmwareengineClusterAutoscalingSettings(res["autoscalingSettings"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } return nil } @@ -245,6 +379,12 @@ func resourceVmwareengineClusterUpdate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("node_type_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nodeTypeConfigsProp)) { obj["nodeTypeConfigs"] = nodeTypeConfigsProp } + autoscalingSettingsProp, err := expandVmwareengineClusterAutoscalingSettings(d.Get("autoscaling_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("autoscaling_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoscalingSettingsProp)) { + obj["autoscalingSettings"] = autoscalingSettingsProp + } url, err := tpgresource.ReplaceVars(d, config, "{{VmwareengineBasePath}}{{parent}}/clusters/{{name}}") if err != nil { @@ -258,6 +398,10 @@ func resourceVmwareengineClusterUpdate(d *schema.ResourceData, meta interface{}) if d.HasChange("node_type_configs") { updateMask = append(updateMask, "nodeTypeConfigs.*.nodeCount") } + + if d.HasChange("autoscaling_settings") { + updateMask = append(updateMask, "autoscalingSettings") + } // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -434,6 +578,250 @@ func flattenVmwareengineClusterNodeTypeConfigsCustomCoreCount(v interface{}, d * return v // let terraform core handle it otherwise } +func flattenVmwareengineClusterAutoscalingSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["autoscaling_policies"] = + flattenVmwareengineClusterAutoscalingSettingsAutoscalingPolicies(original["autoscalingPolicies"], d, config) + transformed["min_cluster_node_count"] = + flattenVmwareengineClusterAutoscalingSettingsMinClusterNodeCount(original["minClusterNodeCount"], d, config) + transformed["max_cluster_node_count"] = + flattenVmwareengineClusterAutoscalingSettingsMaxClusterNodeCount(original["maxClusterNodeCount"], d, config) + transformed["cool_down_period"] = + flattenVmwareengineClusterAutoscalingSettingsCoolDownPeriod(original["coolDownPeriod"], d, config) + return []interface{}{transformed} +} +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(l)) + for k, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "autoscale_policy_id": k, + "node_type_id": flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesNodeTypeId(original["nodeTypeId"], d, config), + "scale_out_size": flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesScaleOutSize(original["scaleOutSize"], d, config), + "cpu_thresholds": flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholds(original["cpuThresholds"], d, config), + "consumed_memory_thresholds": flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholds(original["consumedMemoryThresholds"], d, config), + "storage_thresholds": flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholds(original["storageThresholds"], d, config), + }) + } + return transformed +} +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesNodeTypeId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesScaleOutSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["scale_out"] = + flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleOut(original["scaleOut"], d, config) + transformed["scale_in"] = + flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleIn(original["scaleIn"], d, config) + return []interface{}{transformed} +} +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleOut(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleIn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["scale_out"] = + flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleOut(original["scaleOut"], d, config) + transformed["scale_in"] = + flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleIn(original["scaleIn"], d, config) + return []interface{}{transformed} +} +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleOut(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleIn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["scale_out"] = + flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleOut(original["scaleOut"], d, config) + transformed["scale_in"] = + flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleIn(original["scaleIn"], d, config) + return []interface{}{transformed} +} +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleOut(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleIn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareengineClusterAutoscalingSettingsMinClusterNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareengineClusterAutoscalingSettingsMaxClusterNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareengineClusterAutoscalingSettingsCoolDownPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func expandVmwareengineClusterNodeTypeConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { if v == nil { return map[string]interface{}{}, nil @@ -473,3 +861,218 @@ func expandVmwareengineClusterNodeTypeConfigsNodeCount(v interface{}, d tpgresou func expandVmwareengineClusterNodeTypeConfigsCustomCoreCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandVmwareengineClusterAutoscalingSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAutoscalingPolicies, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPolicies(original["autoscaling_policies"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAutoscalingPolicies); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["autoscalingPolicies"] = transformedAutoscalingPolicies + } + + transformedMinClusterNodeCount, err := expandVmwareengineClusterAutoscalingSettingsMinClusterNodeCount(original["min_cluster_node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinClusterNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minClusterNodeCount"] = transformedMinClusterNodeCount + } + + transformedMaxClusterNodeCount, err := expandVmwareengineClusterAutoscalingSettingsMaxClusterNodeCount(original["max_cluster_node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxClusterNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxClusterNodeCount"] = transformedMaxClusterNodeCount + } + + transformedCoolDownPeriod, err := expandVmwareengineClusterAutoscalingSettingsCoolDownPeriod(original["cool_down_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCoolDownPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["coolDownPeriod"] = transformedCoolDownPeriod + } + + return transformed, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNodeTypeId, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesNodeTypeId(original["node_type_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNodeTypeId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nodeTypeId"] = transformedNodeTypeId + } + + transformedScaleOutSize, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesScaleOutSize(original["scale_out_size"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleOutSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleOutSize"] = transformedScaleOutSize + } + + transformedCpuThresholds, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholds(original["cpu_thresholds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCpuThresholds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cpuThresholds"] = transformedCpuThresholds + } + + transformedConsumedMemoryThresholds, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholds(original["consumed_memory_thresholds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConsumedMemoryThresholds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["consumedMemoryThresholds"] = transformedConsumedMemoryThresholds + } + + transformedStorageThresholds, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholds(original["storage_thresholds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStorageThresholds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["storageThresholds"] = transformedStorageThresholds + } + + transformedAutoscalePolicyId, err := tpgresource.ExpandString(original["autoscale_policy_id"], d, config) + if err != nil { + return nil, err + } + m[transformedAutoscalePolicyId] = transformed + } + return m, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesNodeTypeId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesScaleOutSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScaleOut, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleOut(original["scale_out"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleOut); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleOut"] = transformedScaleOut + } + + transformedScaleIn, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleIn(original["scale_in"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleIn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleIn"] = transformedScaleIn + } + + return transformed, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleOut(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleIn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScaleOut, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleOut(original["scale_out"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleOut); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleOut"] = transformedScaleOut + } + + transformedScaleIn, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleIn(original["scale_in"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleIn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleIn"] = transformedScaleIn + } + + return transformed, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleOut(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleIn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScaleOut, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleOut(original["scale_out"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleOut); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleOut"] = transformedScaleOut + } + + transformedScaleIn, err := expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleIn(original["scale_in"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleIn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleIn"] = transformedScaleIn + } + + return transformed, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleOut(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareengineClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleIn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareengineClusterAutoscalingSettingsMinClusterNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareengineClusterAutoscalingSettingsMaxClusterNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareengineClusterAutoscalingSettingsCoolDownPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_cluster_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_cluster_sweeper.go index 85d223f3457..cee327b0061 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_cluster_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_cluster_sweeper.go @@ -4,9 +4,7 @@ package vmwareengine import ( "context" - "fmt" "log" - "strings" "testing" "github.com/hashicorp/terraform-provider-google/google/envvar" @@ -59,15 +57,26 @@ func testSweepVmwareengineCluster(region string) error { } log.Printf("[INFO][SWEEPER_LOG] looking for parent resources in location '%s'.", location) - privateCloudNames, err := listPrivateCloudsInLocation(d, config) + + parentResponseField := "privateClouds" + parentListUrlTemplate := "https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/privateClouds" + parentNames, err := sweeper.ListParentResourcesInLocation(d, config, parentListUrlTemplate, parentResponseField) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error finding parental resources in location %s: %s", location, err) continue } - for _, parent := range privateCloudNames { + for _, parent := range parentNames { // `parent` will be string of form projects/my-project/locations/us-central1-a/privateClouds/my-cloud - listUrl := fmt.Sprintf("https://vmwareengine.googleapis.com/v1/projects/%s/clusters", parent) + // Change on each loop, so new value used in tpgresource.ReplaceVars + d.Set("parent", parent) + + listTemplate := "https://vmwareengine.googleapis.com/v1/{{parent}}/clusters" + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue + } res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, @@ -136,41 +145,3 @@ func testSweepVmwareengineCluster(region string) error { } return nil } - -func listPrivateCloudsInLocation(d *tpgresource.ResourceDataMock, config *transport_tpg.Config) ([]string, error) { - listTemplate := strings.Split("https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/privateClouds", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil, err - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil, err - } - - resourceList, ok := res["privateClouds"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil, fmt.Errorf("nothing found in response") - } - - rl := resourceList.([]interface{}) - privateCloudNames := []string{} - for _, r := range rl { - resource := r.(map[string]interface{}) - if name, ok := resource["name"]; ok { - privateCloudNames = append(privateCloudNames, name.(string)) - } - - } - return privateCloudNames, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_external_access_rule_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_external_access_rule_sweeper.go index 7012b80410b..b5b19fa4277 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_external_access_rule_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_external_access_rule_sweeper.go @@ -1,26 +1,10 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package vmwareengine import ( "context" "log" - "strings" "testing" "github.com/hashicorp/terraform-provider-google/google/envvar" @@ -53,87 +37,111 @@ func testSweepVmwareengineExternalAccessRule(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &tpgresource.ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, - } - - listTemplate := strings.Split("https://vmwareengine.googleapis.com/v1/{{parent}}/externalAccessRules", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["externalAccessRules"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil + // List of location values includes: + // * zones used for this resource type's acc tests in the past + // * the 'region' passed to the sweeper + locations := []string{region, "us-central1", "southamerica-west1", "me-west1"} + log.Printf("[INFO][SWEEPER_LOG] Sweeping will include these locations: %v.", locations) + for _, location := range locations { + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": location, + "location": location, + "zone": location, + "billing_account": billingId, + }, } - name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { - nonPrefixCount++ - continue - } + log.Printf("[INFO][SWEEPER_LOG] looking for parent resources in location '%s'.", location) - deleteTemplate := "https://vmwareengine.googleapis.com/v1/{{parent}}/externalAccessRules/{{name}}" - deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil - } - deleteUrl = deleteUrl + name - - // Don't wait on operations as we may have a lot to delete - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "DELETE", - Project: config.Project, - RawURL: deleteUrl, - UserAgent: config.UserAgent, - }) + parentResponseField := "networkPolicies" + parentListUrlTemplate := "https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/networkPolicies" + parentNames, err := sweeper.ListParentResourcesInLocation(d, config, parentListUrlTemplate, parentResponseField) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + log.Printf("[INFO][SWEEPER_LOG] error finding parental resources in location %s: %s", location, err) + continue } - } - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + for _, parent := range parentNames { + + // `parent` will be string of form projects/my-project/locations/us-central1-a/privateClouds/my-cloud + // Change on each loop, so new value used in tpgresource.ReplaceVars + d.Set("parent", parent) + + listTemplate := "https://vmwareengine.googleapis.com/v1/{{parent}}/externalAccessRules" + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["externalAccessRules"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://vmwareengine.googleapis.com/v1/{{parent}}/externalAccessRules/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + } } - return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_external_address_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_external_address_sweeper.go index 3f90b490b20..4a2def60245 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_external_address_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_external_address_sweeper.go @@ -1,26 +1,10 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package vmwareengine import ( "context" "log" - "strings" "testing" "github.com/hashicorp/terraform-provider-google/google/envvar" @@ -53,87 +37,111 @@ func testSweepVmwareengineExternalAddress(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &tpgresource.ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, - } - - listTemplate := strings.Split("https://vmwareengine.googleapis.com/v1/{{parent}}/externalAddresses", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["externalAddresss"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil + // List of location values includes: + // * zones used for this resource type's acc tests in the past + // * the 'region' passed to the sweeper + locations := []string{region, "us-central1-a", "us-central1-b", "southamerica-west1-a", "southamerica-west1-b", "me-west1-a", "me-west1-b"} + log.Printf("[INFO][SWEEPER_LOG] Sweeping will include these locations: %v.", locations) + for _, location := range locations { + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": location, + "location": location, + "zone": location, + "billing_account": billingId, + "parent": "", // Set in loop below + }, } - name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { - nonPrefixCount++ - continue - } + log.Printf("[INFO][SWEEPER_LOG] looking for parent resources in location '%s'.", location) - deleteTemplate := "https://vmwareengine.googleapis.com/v1/{{parent}}/externalAddresses/{{name}}" - deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + parentResponseField := "privateClouds" + parentListUrlTemplate := "https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/privateClouds" + parentNames, err := sweeper.ListParentResourcesInLocation(d, config, parentListUrlTemplate, parentResponseField) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil + log.Printf("[INFO][SWEEPER_LOG] error finding parental resources in location %s: %s", location, err) + continue } - deleteUrl = deleteUrl + name - - // Don't wait on operations as we may have a lot to delete - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "DELETE", - Project: config.Project, - RawURL: deleteUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + for _, parent := range parentNames { + + // `parent` will be string of form projects/my-project/locations/us-central1-a/privateClouds/my-cloud + // Change on each loop, so new value used in tpgresource.ReplaceVars + d.Set("parent", parent) + + listTemplate := "https://vmwareengine.googleapis.com/v1/{{parent}}/externalAddresses" + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue + } + + resourceList, ok := res["externalAddresses"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://vmwareengine.googleapis.com/v1/{{parent}}/externalAddresses/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } } } - - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) - } - return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_private_cloud.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_private_cloud.go index e64d0ebe1aa..e4ecf04449b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_private_cloud.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_private_cloud.go @@ -27,19 +27,93 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/googleapi" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "github.com/hashicorp/terraform-provider-google/google/verify" ) -func vmwareenginePrivateCloudStandardTypeDiffSuppressFunc(_, old, new string, _ *schema.ResourceData) bool { +func vmwareenginePrivateCloudStandardTypeDiffSuppressFunc(_, old, new string, d *schema.ResourceData) bool { if (old == "STANDARD" && new == "") || (old == "" && new == "STANDARD") { return true } + if isMultiNodePrivateCloud(d) && old == "TIME_LIMITED" && new == "STANDARD" { + log.Printf("[DEBUG] Multinode Private Cloud found, facilitating TYPE change to STANDARD") + return true + } return false } +func isMultiNodePrivateCloud(d *schema.ResourceData) bool { + nodeConfigMap := d.Get("management_cluster.0.node_type_configs").(*schema.Set).List() + totalNodeCount := 0 + for _, nodeConfig := range nodeConfigMap { + configMap, ok := nodeConfig.(map[string]interface{}) + if !ok { + log.Printf("[DEBUG] Invalid node configuration format for private cloud.") + continue + } + nodeCount, ok := configMap["node_count"].(int) + if !ok { + log.Printf("[DEBUG] Invalid node_count format for private cloud.") + continue + } + totalNodeCount += nodeCount + } + log.Printf("[DEBUG] The node count of the private cloud is found to be %v nodes.", totalNodeCount) + if totalNodeCount > 2 { + return true + } + return false +} + +func isPrivateCloudInDeletedState(config *transport_tpg.Config, d *schema.ResourceData, billingProject string, userAgent string) (bool, error) { + baseurl, err := tpgresource.ReplaceVars(d, config, "{{VmwareengineBasePath}}projects/{{project}}/locations/{{location}}/privateClouds/{{name}}") + if err != nil { + return false, err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: baseurl, + UserAgent: userAgent, + }) + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[DEBUG] No existing private cloud found") + return false, nil + } + return false, err + } + // if resource exists but is marked for deletion + v, ok := res["state"] + if ok && v.(string) == "DELETED" { + log.Printf("[DEBUG] The Private cloud exists and is marked for deletion.") + return true, nil + } + return false, nil +} + +// Check if private cloud is absent or if it exists in a deleted state. +func pollCheckForPrivateCloudAbsence(resp map[string]interface{}, respErr error) transport_tpg.PollResult { + if respErr != nil { + if transport_tpg.IsGoogleApiErrorWithCode(respErr, 404) { + return transport_tpg.SuccessPollResult() + } + return transport_tpg.ErrorPollResult(respErr) + } + // if resource exists but is marked for deletion + log.Printf("[DEBUG] Fetching state of the private cloud.") + v, ok := resp["state"] + if ok && v.(string) == "DELETED" { + log.Printf("[DEBUG] The Private cloud has been successfully marked for delayed deletion.") + return transport_tpg.SuccessPollResult() + } + return transport_tpg.PendingStatusPollResult("found") +} + func ResourceVmwareenginePrivateCloud() *schema.Resource { return &schema.Resource{ Create: resourceVmwareenginePrivateCloudCreate, @@ -85,6 +159,132 @@ func ResourceVmwareenginePrivateCloud() *schema.Resource { * Not formatted as a UUID * Complies with RFC 1034 (https://datatracker.ietf.org/doc/html/rfc1034) (section 3.5)`, }, + "autoscaling_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration of the autoscaling applied to this cluster +Private cloud must have a minimum of 3 nodes to add autoscale settings`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "autoscaling_policies": { + Type: schema.TypeSet, + Required: true, + Description: `The map with autoscaling policies applied to the cluster. +The key is the identifier of the policy. +It must meet the following requirements: + * Only contains 1-63 alphanumeric characters and hyphens + * Begins with an alphabetical character + * Ends with a non-hyphen character + * Not formatted as a UUID + * Complies with [RFC 1034](https://datatracker.ietf.org/doc/html/rfc1034) (section 3.5) + +Currently the map must contain only one element +that describes the autoscaling policy for compute nodes.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "autoscale_policy_id": { + Type: schema.TypeString, + Required: true, + }, + "node_type_id": { + Type: schema.TypeString, + Required: true, + Description: `The canonical identifier of the node type to add or remove.`, + }, + "scale_out_size": { + Type: schema.TypeInt, + Required: true, + Description: `Number of nodes to add to a cluster during a scale-out operation. +Must be divisible by 2 for stretched clusters.`, + }, + "consumed_memory_thresholds": { + Type: schema.TypeList, + Optional: true, + Description: `Utilization thresholds pertaining to amount of consumed memory.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scale_in": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-in operation in percent.`, + }, + "scale_out": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-out operation in percent.`, + }, + }, + }, + }, + "cpu_thresholds": { + Type: schema.TypeList, + Optional: true, + Description: `Utilization thresholds pertaining to CPU utilization.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scale_in": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-in operation in percent.`, + }, + "scale_out": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-out operation in percent.`, + }, + }, + }, + }, + "storage_thresholds": { + Type: schema.TypeList, + Optional: true, + Description: `Utilization thresholds pertaining to amount of consumed storage.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scale_in": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-in operation in percent.`, + }, + "scale_out": { + Type: schema.TypeInt, + Required: true, + Description: `The utilization triggering the scale-out operation in percent.`, + }, + }, + }, + }, + }, + }, + }, + "cool_down_period": { + Type: schema.TypeString, + Optional: true, + Description: `The minimum duration between consecutive autoscale operations. +It starts once addition or removal of nodes is fully completed. +Minimum cool down period is 30m. +Cool down period must be in whole minutes (for example, 30m, 31m, 50m). +Mandatory for successful addition of autoscaling settings in cluster.`, + }, + "max_cluster_node_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Maximum number of nodes of any type in a cluster. +Mandatory for successful addition of autoscaling settings in cluster.`, + }, + "min_cluster_node_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum number of nodes of any type in a cluster. +Mandatory for successful addition of autoscaling settings in cluster.`, + }, + }, + }, + }, "node_type_configs": { Type: schema.TypeSet, Optional: true, @@ -299,6 +499,16 @@ the form: projects/{project_number}/locations/{location}/vmwareEngineNetworks/{v }, }, }, + "deletion_delay_hours": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of hours to delay this request. You can set this value to an hour between 0 to 8, where setting it to 0 starts the deletion request immediately. If no value is set, a default value is set at the API Level.`, + }, + "send_deletion_delay_hours_if_zero": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, deletion_delay_hours value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the deletion_delay_hours field. It can be used both alone and together with deletion_delay_hours.`, + }, "project": { Type: schema.TypeString, Optional: true, @@ -363,6 +573,21 @@ func resourceVmwareenginePrivateCloudCreate(d *schema.ResourceData, meta interfa } headers := make(http.Header) + // Check if the project exists in a deleted state + pcMarkedForDeletion, err := isPrivateCloudInDeletedState(config, d, billingProject, userAgent) + if err != nil { + return fmt.Errorf("Error checking if Private Cloud exists and is marked for deletion: %s", err) + } + if pcMarkedForDeletion { + log.Printf("[DEBUG] Private Cloud exists and is marked for deletion. Triggering UNDELETE of the Private Cloud.\n") + url, err = tpgresource.ReplaceVars(d, config, "{{VmwareengineBasePath}}projects/{{project}}/locations/{{location}}/privateClouds/{{name}}:undelete") + if err != nil { + return err + } + obj = make(map[string]interface{}) + } else { + log.Printf("[DEBUG] Private Cloud is not found to be marked for deletion. Triggering CREATE of the Private Cloud.\n") + } res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "POST", @@ -395,6 +620,61 @@ func resourceVmwareenginePrivateCloudCreate(d *schema.ResourceData, meta interfa return fmt.Errorf("Error waiting to create PrivateCloud: %s", err) } + mgmtClusterProp, err := expandVmwareenginePrivateCloudManagementCluster(d.Get("management_cluster"), d, config) + if v, ok := d.GetOkExists("management_cluster"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mgmtClusterProp)) { + obj["managementCluster"] = mgmtClusterProp + } + + mgmtMap := mgmtClusterProp.(map[string]interface{}) + parentUrl, err := tpgresource.ReplaceVars(d, config, "{{VmwareengineBasePath}}projects/{{project}}/locations/{{location}}/privateClouds/{{name}}") + if err != nil { + return err + } + + clusterUrl := fmt.Sprintf("%s/clusters/%s", parentUrl, mgmtMap["clusterId"]) + clusterUpdateMask := []string{} + clusterObj := make(map[string]interface{}) + + if v, ok := d.GetOkExists("management_cluster"); !tpgresource.IsEmptyValue(reflect.ValueOf(mgmtClusterProp)) && (ok || !reflect.DeepEqual(v, mgmtClusterProp)) { + clusterObj["autoscalingSettings"] = mgmtMap["autoscalingSettings"] + } + + if d.HasChange("management_cluster") { + clusterUpdateMask = append(clusterUpdateMask, "autoscalingSettings") + } + + clusterPatchUrl, err := transport_tpg.AddQueryParams(clusterUrl, map[string]string{"updateMask": strings.Join(clusterUpdateMask, ",")}) + if err != nil { + return err + } + + // check if there is anything to update to avoid API call if not required. + if len(clusterUpdateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: clusterPatchUrl, + UserAgent: userAgent, + Body: clusterObj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating magament cluster %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating magament cluster %q: %#v", d.Id(), res) + } + + err = VmwareengineOperationWaitTime( + config, res, project, "Updating Managment Cluster", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } + log.Printf("[DEBUG] Finished creating PrivateCloud %q: %#v", d.Id(), res) return resourceVmwareenginePrivateCloudRead(d, meta) @@ -451,6 +731,7 @@ func resourceVmwareenginePrivateCloudRead(d *schema.ResourceData, meta interface return nil } + // Explicitly set virtual fields to default values if unset if err := d.Set("project", project); err != nil { return fmt.Errorf("Error reading PrivateCloud: %s", err) } @@ -573,10 +854,12 @@ func resourceVmwareenginePrivateCloudUpdate(d *schema.ResourceData, meta interfa if v, ok := d.GetOkExists("management_cluster"); !tpgresource.IsEmptyValue(reflect.ValueOf(mgmtClusterProp)) && (ok || !reflect.DeepEqual(v, mgmtClusterProp)) { clusterObj["nodeTypeConfigs"] = mgmtMap["nodeTypeConfigs"] + clusterObj["autoscalingSettings"] = mgmtMap["autoscalingSettings"] } if d.HasChange("management_cluster") { clusterUpdateMask = append(clusterUpdateMask, "nodeTypeConfigs.*.nodeCount") + clusterUpdateMask = append(clusterUpdateMask, "autoscalingSettings") } clusterPatchUrl, err := transport_tpg.AddQueryParams(clusterUrl, map[string]string{"updateMask": strings.Join(clusterUpdateMask, ",")}) @@ -628,7 +911,7 @@ func resourceVmwareenginePrivateCloudDelete(d *schema.ResourceData, meta interfa } billingProject = project - url, err := tpgresource.ReplaceVars(d, config, "{{VmwareengineBasePath}}projects/{{project}}/locations/{{location}}/privateClouds/{{name}}?delay_hours=0") + url, err := tpgresource.ReplaceVars(d, config, "{{VmwareengineBasePath}}projects/{{project}}/locations/{{location}}/privateClouds/{{name}}") if err != nil { return err } @@ -641,6 +924,14 @@ func resourceVmwareenginePrivateCloudDelete(d *schema.ResourceData, meta interfa } headers := make(http.Header) + // Delay deletion of the Private Cloud if delationDelayHours value is set + delationDelayHours := d.Get("deletion_delay_hours").(int) + if delationDelayHours > 0 || (delationDelayHours == 0 && d.Get("send_deletion_delay_hours_if_zero").(bool) == true) { + log.Printf("[DEBUG] Triggering delete of the Private Cloud with a delay of %v hours.\n", delationDelayHours) + url = url + "?delay_hours=" + fmt.Sprintf("%v", delationDelayHours) + } else { + log.Printf("[DEBUG] No deletion delay provided, triggering DELETE API without setting delay hours.\n") + } log.Printf("[DEBUG] Deleting PrivateCloud %q", d.Id()) res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ @@ -700,7 +991,7 @@ func resourceVmwareenginePrivateCloudDelete(d *schema.ResourceData, meta interfa } } - err = transport_tpg.PollingWaitTime(privateCloudPollRead(d, meta), transport_tpg.PollCheckForAbsence, "Deleting PrivateCloud", d.Timeout(schema.TimeoutDelete), 10) + err = transport_tpg.PollingWaitTime(privateCloudPollRead(d, meta), pollCheckForPrivateCloudAbsence, "Deleting PrivateCloud", d.Timeout(schema.TimeoutDelete), 10) if err != nil { return fmt.Errorf("Error waiting to delete PrivateCloud: %s", err) } @@ -726,6 +1017,8 @@ func resourceVmwareenginePrivateCloudImport(d *schema.ResourceData, meta interfa } d.SetId(id) + // Explicitly set virtual fields to default values on import + return []*schema.ResourceData{d}, nil } @@ -810,6 +1103,8 @@ func flattenVmwareenginePrivateCloudManagementCluster(v interface{}, d *schema.R flattenVmwareenginePrivateCloudManagementClusterNodeTypeConfigs(original["nodeTypeConfigs"], d, config) transformed["stretched_cluster_config"] = flattenVmwareenginePrivateCloudManagementClusterStretchedClusterConfig(original["stretchedClusterConfig"], d, config) + transformed["autoscaling_settings"] = + flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettings(original["autoscalingSettings"], d, config) return []interface{}{transformed} } func flattenVmwareenginePrivateCloudManagementClusterClusterId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -889,6 +1184,250 @@ func flattenVmwareenginePrivateCloudManagementClusterStretchedClusterConfigSecon return v } +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["autoscaling_policies"] = + flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicies(original["autoscalingPolicies"], d, config) + transformed["min_cluster_node_count"] = + flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsMinClusterNodeCount(original["minClusterNodeCount"], d, config) + transformed["max_cluster_node_count"] = + flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsMaxClusterNodeCount(original["maxClusterNodeCount"], d, config) + transformed["cool_down_period"] = + flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsCoolDownPeriod(original["coolDownPeriod"], d, config) + return []interface{}{transformed} +} +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(l)) + for k, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "autoscale_policy_id": k, + "node_type_id": flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesNodeTypeId(original["nodeTypeId"], d, config), + "scale_out_size": flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesScaleOutSize(original["scaleOutSize"], d, config), + "cpu_thresholds": flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholds(original["cpuThresholds"], d, config), + "consumed_memory_thresholds": flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholds(original["consumedMemoryThresholds"], d, config), + "storage_thresholds": flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholds(original["storageThresholds"], d, config), + }) + } + return transformed +} +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesNodeTypeId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesScaleOutSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["scale_out"] = + flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleOut(original["scaleOut"], d, config) + transformed["scale_in"] = + flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleIn(original["scaleIn"], d, config) + return []interface{}{transformed} +} +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleOut(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleIn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["scale_out"] = + flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleOut(original["scaleOut"], d, config) + transformed["scale_in"] = + flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleIn(original["scaleIn"], d, config) + return []interface{}{transformed} +} +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleOut(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleIn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["scale_out"] = + flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleOut(original["scaleOut"], d, config) + transformed["scale_in"] = + flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleIn(original["scaleIn"], d, config) + return []interface{}{transformed} +} +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleOut(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleIn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsMinClusterNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsMaxClusterNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVmwareenginePrivateCloudManagementClusterAutoscalingSettingsCoolDownPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenVmwareenginePrivateCloudHcx(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -1095,6 +1634,13 @@ func expandVmwareenginePrivateCloudManagementCluster(v interface{}, d tpgresourc transformed["stretchedClusterConfig"] = transformedStretchedClusterConfig } + transformedAutoscalingSettings, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettings(original["autoscaling_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAutoscalingSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["autoscalingSettings"] = transformedAutoscalingSettings + } + return transformed, nil } @@ -1176,6 +1722,221 @@ func expandVmwareenginePrivateCloudManagementClusterStretchedClusterConfigSecond return v, nil } +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAutoscalingPolicies, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicies(original["autoscaling_policies"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAutoscalingPolicies); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["autoscalingPolicies"] = transformedAutoscalingPolicies + } + + transformedMinClusterNodeCount, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsMinClusterNodeCount(original["min_cluster_node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinClusterNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minClusterNodeCount"] = transformedMinClusterNodeCount + } + + transformedMaxClusterNodeCount, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsMaxClusterNodeCount(original["max_cluster_node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxClusterNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxClusterNodeCount"] = transformedMaxClusterNodeCount + } + + transformedCoolDownPeriod, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsCoolDownPeriod(original["cool_down_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCoolDownPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["coolDownPeriod"] = transformedCoolDownPeriod + } + + return transformed, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNodeTypeId, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesNodeTypeId(original["node_type_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNodeTypeId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nodeTypeId"] = transformedNodeTypeId + } + + transformedScaleOutSize, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesScaleOutSize(original["scale_out_size"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleOutSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleOutSize"] = transformedScaleOutSize + } + + transformedCpuThresholds, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholds(original["cpu_thresholds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCpuThresholds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cpuThresholds"] = transformedCpuThresholds + } + + transformedConsumedMemoryThresholds, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholds(original["consumed_memory_thresholds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConsumedMemoryThresholds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["consumedMemoryThresholds"] = transformedConsumedMemoryThresholds + } + + transformedStorageThresholds, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholds(original["storage_thresholds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStorageThresholds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["storageThresholds"] = transformedStorageThresholds + } + + transformedAutoscalePolicyId, err := tpgresource.ExpandString(original["autoscale_policy_id"], d, config) + if err != nil { + return nil, err + } + m[transformedAutoscalePolicyId] = transformed + } + return m, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesNodeTypeId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesScaleOutSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScaleOut, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleOut(original["scale_out"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleOut); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleOut"] = transformedScaleOut + } + + transformedScaleIn, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleIn(original["scale_in"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleIn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleIn"] = transformedScaleIn + } + + return transformed, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleOut(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesCpuThresholdsScaleIn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScaleOut, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleOut(original["scale_out"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleOut); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleOut"] = transformedScaleOut + } + + transformedScaleIn, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleIn(original["scale_in"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleIn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleIn"] = transformedScaleIn + } + + return transformed, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleOut(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesConsumedMemoryThresholdsScaleIn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScaleOut, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleOut(original["scale_out"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleOut); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleOut"] = transformedScaleOut + } + + transformedScaleIn, err := expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleIn(original["scale_in"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleIn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaleIn"] = transformedScaleIn + } + + return transformed, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleOut(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsAutoscalingPoliciesStorageThresholdsScaleIn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsMinClusterNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsMaxClusterNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVmwareenginePrivateCloudManagementClusterAutoscalingSettingsCoolDownPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandVmwareenginePrivateCloudType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go index ee26c9ac01f..2cbcd9d5a49 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go @@ -42,7 +42,7 @@ func testSweepVmwareenginePrivateCloud(region string) error { // List of location values includes: // * zones used for this resource type's acc tests in the past // * the 'region' passed to the sweeper - locations := []string{region, "southamerica-west1-a", "me-west1-a"} + locations := []string{region, "southamerica-west1-a", "me-west1-a", "me-west1-b"} log.Printf("[INFO][SWEEPER_LOG] Sweeping will include these locations: %v.", locations) for _, location := range locations { log.Printf("[INFO][SWEEPER_LOG] Beginning the process of sweeping location '%s'.", location) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/resource_vpc_access_connector.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/resource_vpc_access_connector.go index 9d3879fb7b9..bd064cd6d01 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/resource_vpc_access_connector.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/resource_vpc_access_connector.go @@ -79,17 +79,18 @@ func ResourceVPCAccessConnector() *schema.Resource { ForceNew: true, Description: `Maximum value of instances in autoscaling group underlying the connector. Value must be between 3 and 10, inclusive. Must be higher than the value specified by min_instances.`, + ConflictsWith: []string{"max_throughput"}, }, "max_throughput": { Type: schema.TypeInt, + Computed: true, Optional: true, ForceNew: true, ValidateFunc: validation.IntBetween(200, 1000), Description: `Maximum throughput of the connector in Mbps, must be greater than 'min_throughput'. Default is 300. Refers to the expected throughput when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by -min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of -max_throughput is discouraged in favor of max_instances.`, - Default: 300, +min_throughput. Only one of 'max_throughput' and 'max_instances' can be specified. The use of max_throughput is discouraged in favor of max_instances.`, + ConflictsWith: []string{"max_instances"}, }, "min_instances": { Type: schema.TypeInt, @@ -98,16 +99,18 @@ max_throughput is discouraged in favor of max_instances.`, ForceNew: true, Description: `Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be lower than the value specified by max_instances.`, + ConflictsWith: []string{"min_throughput"}, }, "min_throughput": { Type: schema.TypeInt, + Computed: true, Optional: true, ForceNew: true, ValidateFunc: validation.IntBetween(200, 1000), Description: `Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type. -Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and -min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances.`, - Default: 200, +Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. +Only one of 'min_throughput' and 'min_instances' can be specified. The use of min_throughput is discouraged in favor of min_instances.`, + ConflictsWith: []string{"min_instances"}, }, "network": { Type: schema.TypeString, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workbench/resource_workbench_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workbench/resource_workbench_instance.go index 67dd0156921..1d7953aed12 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workbench/resource_workbench_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workbench/resource_workbench_instance.go @@ -63,6 +63,7 @@ var WorkbenchInstanceProvidedMetadata = []string{ "agent-health-check-interval-seconds", "agent-health-check-path", "container", + "cos-update-strategy", "custom-container-image", "custom-container-payload", "data-disk-uri", @@ -81,6 +82,7 @@ var WorkbenchInstanceProvidedMetadata = []string{ "generate-diagnostics-bucket", "generate-diagnostics-file", "generate-diagnostics-options", + "google-logging-enabled", "image-url", "install-monitoring-agent", "install-nvidia-driver", @@ -103,6 +105,7 @@ var WorkbenchInstanceProvidedMetadata = []string{ "report-system-status", "restriction", "serial-port-logging-enable", + "service-account-mode", "shutdown-script", "title", "use-collaborative", @@ -215,7 +218,6 @@ func WorkbenchInstanceKmsDiffSuppress(_, old, new string, _ *schema.ResourceData } return false } - func waitForWorkbenchOperation(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string, response map[string]interface{}) error { var opRes map[string]interface{} err := WorkbenchOperationWaitTimeWithResponse( @@ -271,6 +273,26 @@ func resizeWorkbenchInstanceDisk(config *transport_tpg.Config, d *schema.Resourc return nil } +// mergeLabels takes two maps of labels and returns a new map with the labels merged. +// If a key exists in old_labels but not in new_labels, it is added to the new map with an empty value. +func mergeLabels(oldLabels, newLabels map[string]interface{}) map[string]string { + modifiedLabels := make(map[string]string) + + // Add all labels from newLabels to modifiedLabels + for k, v := range newLabels { + modifiedLabels[k] = v.(string) + } + + // Add any keys from oldLabels that are not in newLabels with an empty value + for k := range oldLabels { + if _, ok := newLabels[k]; !ok { + modifiedLabels[k] = "" + } + } + + return modifiedLabels +} + func ResourceWorkbenchInstance() *schema.Resource { return &schema.Resource{ Create: resourceWorkbenchInstanceCreate, @@ -283,7 +305,7 @@ func ResourceWorkbenchInstance() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), + Create: schema.DefaultTimeout(20 * time.Minute), Update: schema.DefaultTimeout(20 * time.Minute), Delete: schema.DefaultTimeout(20 * time.Minute), }, @@ -491,6 +513,30 @@ https://cloud.google.com/vpc/docs/using-routes#canipforward`, Description: `The network interfaces for the VM. Supports only one interface.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "access_configs": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Optional. An array of configurations for this interface. Currently, only one access +config, ONE_TO_ONE_NAT, is supported. If no accessConfigs specified, the +instance will have an external internet access through an ephemeral +external IP address.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "external_ip": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `An external IP address associated with this instance. Specify an unused +static external IP address available to the project or leave this field +undefined to use an IP from a shared ephemeral IP address pool. If you +specify a static external IP address, it must live in the same region as +the zone of the instance.`, + }, + }, + }, + }, "network": { Type: schema.TypeString, Computed: true, @@ -609,14 +655,12 @@ a workbench instance with the environment installed directly on the VM.`, ForceNew: true, Description: `Optional. Use this VM image family to find the image; the newest image in this family will be used.`, - ExactlyOneOf: []string{}, }, "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Optional. Use VM image name to find the image.`, - ExactlyOneOf: []string{}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Optional. Use VM image name to find the image.`, }, "project": { Type: schema.TypeString, @@ -774,8 +818,8 @@ The milliseconds portion (".SSS") is optional.`, "desired_state": { Type: schema.TypeString, Optional: true, - Default: "ACTIVE", Description: `Desired state of the Workbench Instance. Set this field to 'ACTIVE' to start the Instance, and 'STOPPED' to stop the Instance.`, + Default: "ACTIVE", }, "project": { Type: schema.TypeString, @@ -1079,6 +1123,13 @@ func resourceWorkbenchInstanceUpdate(d *schema.ResourceData, meta interface{}) e return err } + if d.HasChange("effective_labels") { + old_labels_interface, new_labels_interface := d.GetChange("effective_labels") + old_labels := old_labels_interface.(map[string]interface{}) + new_labels := new_labels_interface.(map[string]interface{}) + obj["labels"] = mergeLabels(old_labels, new_labels) + } + name := d.Get("name").(string) if stopInstance { state := d.Get("state").(string) @@ -1488,9 +1539,10 @@ func flattenWorkbenchInstanceGceSetupNetworkInterfaces(v interface{}, d *schema. continue } transformed = append(transformed, map[string]interface{}{ - "network": flattenWorkbenchInstanceGceSetupNetworkInterfacesNetwork(original["network"], d, config), - "subnet": flattenWorkbenchInstanceGceSetupNetworkInterfacesSubnet(original["subnet"], d, config), - "nic_type": flattenWorkbenchInstanceGceSetupNetworkInterfacesNicType(original["nicType"], d, config), + "network": flattenWorkbenchInstanceGceSetupNetworkInterfacesNetwork(original["network"], d, config), + "subnet": flattenWorkbenchInstanceGceSetupNetworkInterfacesSubnet(original["subnet"], d, config), + "nic_type": flattenWorkbenchInstanceGceSetupNetworkInterfacesNicType(original["nicType"], d, config), + "access_configs": flattenWorkbenchInstanceGceSetupNetworkInterfacesAccessConfigs(original["accessConfigs"], d, config), }) } return transformed @@ -1507,6 +1559,28 @@ func flattenWorkbenchInstanceGceSetupNetworkInterfacesNicType(v interface{}, d * return v } +func flattenWorkbenchInstanceGceSetupNetworkInterfacesAccessConfigs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "external_ip": flattenWorkbenchInstanceGceSetupNetworkInterfacesAccessConfigsExternalIp(original["externalIp"], d, config), + }) + } + return transformed +} +func flattenWorkbenchInstanceGceSetupNetworkInterfacesAccessConfigsExternalIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenWorkbenchInstanceGceSetupDisablePublicIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -2115,6 +2189,13 @@ func expandWorkbenchInstanceGceSetupNetworkInterfaces(v interface{}, d tpgresour transformed["nicType"] = transformedNicType } + transformedAccessConfigs, err := expandWorkbenchInstanceGceSetupNetworkInterfacesAccessConfigs(original["access_configs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAccessConfigs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accessConfigs"] = transformedAccessConfigs + } + req = append(req, transformed) } return req, nil @@ -2132,6 +2213,32 @@ func expandWorkbenchInstanceGceSetupNetworkInterfacesNicType(v interface{}, d tp return v, nil } +func expandWorkbenchInstanceGceSetupNetworkInterfacesAccessConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExternalIp, err := expandWorkbenchInstanceGceSetupNetworkInterfacesAccessConfigsExternalIp(original["external_ip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExternalIp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["externalIp"] = transformedExternalIp + } + + req = append(req, transformed) + } + return req, nil +} + +func expandWorkbenchInstanceGceSetupNetworkInterfacesAccessConfigsExternalIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandWorkbenchInstanceGceSetupDisablePublicIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/resource_workflows_workflow.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/resource_workflows_workflow.go index cb80bd1f16d..75db8259139 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/resource_workflows_workflow.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/resource_workflows_workflow.go @@ -733,7 +733,12 @@ func resourceWorkflowsWorkflowEncoder(d *schema.ResourceData, meta interface{}, if v, ok := d.GetOk("name"); ok { ResName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - ResName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + ResName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + ResName = id.PrefixedUniqueId(prefix) + } } else { ResName = id.UniqueId() } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sweeper/gcp_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sweeper/gcp_sweeper.go index d4252497c09..aa77680877d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sweeper/gcp_sweeper.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sweeper/gcp_sweeper.go @@ -6,12 +6,14 @@ import ( "encoding/hex" "fmt" "hash/crc32" + "log" "runtime" "strings" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -62,6 +64,45 @@ func IsSweepableTestResource(resourceName string) bool { return false } +// ListParentResourcesInLocation calls a provided list endpoint and returns the names of any resources found in the response. +// This function is intended to be used in sweepers where the resources being swept can only be found with knowledge about existing parental resources. +func ListParentResourcesInLocation(d *tpgresource.ResourceDataMock, config *transport_tpg.Config, listTemplate, responseField string) ([]string, error) { + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil, err + } + + resourceList, ok := res[responseField] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil, fmt.Errorf("nothing found in response") + } + + rl := resourceList.([]interface{}) + names := []string{} + for _, r := range rl { + resource := r.(map[string]interface{}) + if name, ok := resource["name"]; ok { + names = append(names, name.(string)) + } + + } + return names, nil +} + func AddTestSweepers(name string, sweeper func(region string) error) { _, filename, _, _ := runtime.Caller(0) hash := crc32.NewIEEE() diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/datasource_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/datasource_helpers.go index 71309d7f3d4..b0445ab3a19 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/datasource_helpers.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/datasource_helpers.go @@ -73,3 +73,9 @@ func AddRequiredFieldsToSchema(schema map[string]*schema.Schema, keys ...string) func AddOptionalFieldsToSchema(schema map[string]*schema.Schema, keys ...string) { FixDatasourceSchemaFlags(schema, false, keys...) } + +func DeleteFieldsFromSchema(schema map[string]*schema.Schema, keys ...string) { + for _, key := range keys { + delete(schema, key) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/field_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/field_helpers.go index 4147d13f571..0ff72268637 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/field_helpers.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/field_helpers.go @@ -76,6 +76,9 @@ func ParseInstanceFieldValue(instance string, d TerraformResourceData, config *t func ParseInstanceGroupFieldValue(instanceGroup string, d TerraformResourceData, config *transport_tpg.Config) (*ZonalFieldValue, error) { return ParseZonalFieldValue("instanceGroups", instanceGroup, "project", "zone", d, config, false) } +func ParseRegionalInstanceGroupManagersFieldValue(instanceGroupManager string, d TerraformResourceData, config *transport_tpg.Config) (*RegionalFieldValue, error) { + return ParseRegionalFieldValue("instanceGroupManagers", instanceGroupManager, "project", "region", "zone", d, config, false) +} func ParseInstanceTemplateFieldValue(instanceTemplate string, d TerraformResourceData, config *transport_tpg.Config) (*GlobalFieldValue, error) { return ParseGlobalFieldValue("instanceTemplates", instanceTemplate, "project", d, config, false) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/labels.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/labels.go index b64bc794753..ac6db4f4f90 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/labels.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/labels.go @@ -57,8 +57,9 @@ func SetDataSourceLabels(d *schema.ResourceData) error { return nil } -func SetLabelsDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { - raw := d.Get("labels") +// Sets the values of terraform_labels and effective_labels fields when labels field is in root level +func setLabelsFields(labelsField string, d *schema.ResourceDiff, meta interface{}, skipAttribution bool) error { + raw := d.Get(labelsField) if raw == nil { return nil } @@ -73,7 +74,7 @@ func SetLabelsDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) // If "labels" field is computed, set "terraform_labels" and "effective_labels" to computed. // https://github.com/hashicorp/terraform-provider-google/issues/16217 - if !d.GetRawPlan().GetAttr("labels").IsWhollyKnown() { + if !d.GetRawPlan().GetAttr(labelsField).IsWhollyKnown() { if err := d.SetNewComputed("terraform_labels"); err != nil { return fmt.Errorf("error setting terraform_labels to computed: %w", err) } @@ -93,7 +94,7 @@ func SetLabelsDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) } // Append optional label indicating the resource was provisioned using Terraform - if config.AddTerraformAttributionLabel { + if !skipAttribution && config.AddTerraformAttributionLabel { if el, ok := d.Get("effective_labels").(map[string]any); ok { _, hasExistingLabel := el[transport_tpg.AttributionKey] if hasExistingLabel || @@ -133,6 +134,24 @@ func SetLabelsDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) return nil } +func SetLabelsDiffWithoutAttributionLabel(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + return setLabelsFields("labels", d, meta, true) +} + +// The CustomizeDiff func to set the values of terraform_labels and effective_labels fields +// when labels field is at the root level and named "labels". +func SetLabelsDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + return setLabelsFields("labels", d, meta, false) +} + +// The CustomizeDiff func to set the values of terraform_labels and effective_labels fields +// when labels field is at the root level and has a diffent name (e.g. resource_labels) than "labels" +func SetDiffForLabelsWithCustomizedName(labelsField string) func(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + return func(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + return setLabelsFields(labelsField, d, meta, false) + } +} + func SetMetadataLabelsDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { l := d.Get("metadata").([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/resource_test_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/resource_test_utils.go index fc8093c2d6b..1b25c2226ae 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/resource_test_utils.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/resource_test_utils.go @@ -11,7 +11,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-plugin-testing/terraform" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/self_link_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/self_link_helpers.go index 3efc9e82544..79cb1d75591 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/self_link_helpers.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/self_link_helpers.go @@ -172,3 +172,14 @@ func GetRegionFromRegionalSelfLink(selfLink string) string { } return selfLink } + +func GetProjectFromRegionalSelfLink(selfLink string) string { + re := regexp.MustCompile("projects/([a-zA-Z0-9-]*)/(?:locations|regions)/[a-zA-Z0-9-]*") + switch { + case re.MatchString(selfLink): + if res := re.FindStringSubmatch(selfLink); len(res) == 2 && res[1] != "" { + return res[1] + } + } + return selfLink +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/utils.go index ab376408eb9..0d12a711955 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/utils.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/utils.go @@ -24,6 +24,7 @@ import ( "github.com/hashicorp/go-cty/cty" fwDiags "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "golang.org/x/exp/maps" @@ -239,6 +240,25 @@ func ExpandStringMap(d TerraformResourceData, key string) map[string]string { return ConvertStringMap(v.(map[string]interface{})) } +// InterfaceSliceToStringSlice converts a []interface{} containing strings to []string +func InterfaceSliceToStringSlice(v interface{}) ([]string, error) { + interfaceSlice, ok := v.([]interface{}) + if !ok { + return nil, fmt.Errorf("expected []interface{}, got %T", v) + } + + stringSlice := make([]string, len(interfaceSlice)) + for i, item := range interfaceSlice { + strItem, ok := item.(string) + if !ok { + return nil, fmt.Errorf("expected string, got %T at index %d", item, i) + } + stringSlice[i] = strItem + } + + return stringSlice, nil +} + // SortStringsByConfigOrder takes a slice of map[string]interface{} from a TF config // and API data, and returns a new slice containing the API data, reorderd to match // the TF config as closely as possible (with new items at the end of the list.) @@ -880,3 +900,16 @@ func DefaultProviderZone(_ context.Context, diff *schema.ResourceDiff, meta inte return nil } + +// id.UniqueId() returns a timestamp + incremental hash +// This function truncates the timestamp to provide a prefix + 9 using +// YYmmdd + last 3 digits of the incremental hash +func ReducedPrefixedUniqueId(prefix string) string { + // uniqueID is timestamp + 8 digit counter (YYYYmmddHHMMSSssss + 12345678) + uniqueId := id.PrefixedUniqueId("") + // last three digits of the counter (678) + counter := uniqueId[len(uniqueId)-3:] + // YYmmdd of date + date := uniqueId[2:8] + return prefix + date + counter +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/config.go index c6423664439..6738c6191cd 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/config.go @@ -34,6 +34,7 @@ import ( appengine "google.golang.org/api/appengine/v1" "google.golang.org/api/bigquery/v2" "google.golang.org/api/bigtableadmin/v2" + "google.golang.org/api/certificatemanager/v1" "google.golang.org/api/cloudbilling/v1" "google.golang.org/api/cloudbuild/v1" "google.golang.org/api/cloudfunctions/v1" @@ -235,7 +236,6 @@ type Config struct { DataplexBasePath string DataprocBasePath string DataprocMetastoreBasePath string - DatastoreBasePath string DatastreamBasePath string DeploymentManagerBasePath string DialogflowBasePath string @@ -275,16 +275,19 @@ type Config struct { NetworkSecurityBasePath string NetworkServicesBasePath string NotebooksBasePath string + OracleDatabaseBasePath string OrgPolicyBasePath string OSConfigBasePath string OSLoginBasePath string PrivatecaBasePath string + PrivilegedAccessManagerBasePath string PublicCABasePath string PubsubBasePath string PubsubLiteBasePath string RedisBasePath string ResourceManagerBasePath string SecretManagerBasePath string + SecretManagerRegionalBasePath string SecureSourceManagerBasePath string SecurityCenterBasePath string SecurityCenterManagementBasePath string @@ -293,6 +296,7 @@ type Config struct { ServiceManagementBasePath string ServiceNetworkingBasePath string ServiceUsageBasePath string + SiteVerificationBasePath string SourceRepoBasePath string SpannerBasePath string SQLBasePath string @@ -301,6 +305,7 @@ type Config struct { StorageTransferBasePath string TagsBasePath string TPUBasePath string + TranscoderBasePath string VertexAIBasePath string VmwareengineBasePath string VPCAccessBasePath string @@ -373,7 +378,6 @@ const DataPipelineBasePathKey = "DataPipeline" const DataplexBasePathKey = "Dataplex" const DataprocBasePathKey = "Dataproc" const DataprocMetastoreBasePathKey = "DataprocMetastore" -const DatastoreBasePathKey = "Datastore" const DatastreamBasePathKey = "Datastream" const DeploymentManagerBasePathKey = "DeploymentManager" const DialogflowBasePathKey = "Dialogflow" @@ -413,16 +417,19 @@ const NetworkManagementBasePathKey = "NetworkManagement" const NetworkSecurityBasePathKey = "NetworkSecurity" const NetworkServicesBasePathKey = "NetworkServices" const NotebooksBasePathKey = "Notebooks" +const OracleDatabaseBasePathKey = "OracleDatabase" const OrgPolicyBasePathKey = "OrgPolicy" const OSConfigBasePathKey = "OSConfig" const OSLoginBasePathKey = "OSLogin" const PrivatecaBasePathKey = "Privateca" +const PrivilegedAccessManagerBasePathKey = "PrivilegedAccessManager" const PublicCABasePathKey = "PublicCA" const PubsubBasePathKey = "Pubsub" const PubsubLiteBasePathKey = "PubsubLite" const RedisBasePathKey = "Redis" const ResourceManagerBasePathKey = "ResourceManager" const SecretManagerBasePathKey = "SecretManager" +const SecretManagerRegionalBasePathKey = "SecretManagerRegional" const SecureSourceManagerBasePathKey = "SecureSourceManager" const SecurityCenterBasePathKey = "SecurityCenter" const SecurityCenterManagementBasePathKey = "SecurityCenterManagement" @@ -431,6 +438,7 @@ const SecuritypostureBasePathKey = "Securityposture" const ServiceManagementBasePathKey = "ServiceManagement" const ServiceNetworkingBasePathKey = "ServiceNetworking" const ServiceUsageBasePathKey = "ServiceUsage" +const SiteVerificationBasePathKey = "SiteVerification" const SourceRepoBasePathKey = "SourceRepo" const SpannerBasePathKey = "Spanner" const SQLBasePathKey = "SQL" @@ -439,6 +447,7 @@ const StorageInsightsBasePathKey = "StorageInsights" const StorageTransferBasePathKey = "StorageTransfer" const TagsBasePathKey = "Tags" const TPUBasePathKey = "TPU" +const TranscoderBasePathKey = "Transcoder" const VertexAIBasePathKey = "VertexAI" const VmwareengineBasePathKey = "Vmwareengine" const VPCAccessBasePathKey = "VPCAccess" @@ -505,7 +514,6 @@ var DefaultBasePaths = map[string]string{ DataplexBasePathKey: "https://dataplex.googleapis.com/v1/", DataprocBasePathKey: "https://dataproc.googleapis.com/v1/", DataprocMetastoreBasePathKey: "https://metastore.googleapis.com/v1/", - DatastoreBasePathKey: "https://datastore.googleapis.com/v1/", DatastreamBasePathKey: "https://datastream.googleapis.com/v1/", DeploymentManagerBasePathKey: "https://www.googleapis.com/deploymentmanager/v2/", DialogflowBasePathKey: "https://dialogflow.googleapis.com/v2/", @@ -545,16 +553,19 @@ var DefaultBasePaths = map[string]string{ NetworkSecurityBasePathKey: "https://networksecurity.googleapis.com/v1/", NetworkServicesBasePathKey: "https://networkservices.googleapis.com/v1/", NotebooksBasePathKey: "https://notebooks.googleapis.com/v1/", + OracleDatabaseBasePathKey: "https://oracledatabase.googleapis.com/v1/", OrgPolicyBasePathKey: "https://orgpolicy.googleapis.com/v2/", OSConfigBasePathKey: "https://osconfig.googleapis.com/v1/", OSLoginBasePathKey: "https://oslogin.googleapis.com/v1/", PrivatecaBasePathKey: "https://privateca.googleapis.com/v1/", + PrivilegedAccessManagerBasePathKey: "https://privilegedaccessmanager.googleapis.com/v1/", PublicCABasePathKey: "https://publicca.googleapis.com/v1/", PubsubBasePathKey: "https://pubsub.googleapis.com/v1/", PubsubLiteBasePathKey: "https://{{region}}-pubsublite.googleapis.com/v1/admin/", RedisBasePathKey: "https://redis.googleapis.com/v1/", ResourceManagerBasePathKey: "https://cloudresourcemanager.googleapis.com/v1/", SecretManagerBasePathKey: "https://secretmanager.googleapis.com/v1/", + SecretManagerRegionalBasePathKey: "https://secretmanager.{{location}}.rep.googleapis.com/v1/", SecureSourceManagerBasePathKey: "https://securesourcemanager.googleapis.com/v1/", SecurityCenterBasePathKey: "https://securitycenter.googleapis.com/v1/", SecurityCenterManagementBasePathKey: "https://securitycentermanagement.googleapis.com/v1/", @@ -563,6 +574,7 @@ var DefaultBasePaths = map[string]string{ ServiceManagementBasePathKey: "https://servicemanagement.googleapis.com/v1/", ServiceNetworkingBasePathKey: "https://servicenetworking.googleapis.com/v1/", ServiceUsageBasePathKey: "https://serviceusage.googleapis.com/v1/", + SiteVerificationBasePathKey: "https://www.googleapis.com/siteVerification/v1/", SourceRepoBasePathKey: "https://sourcerepo.googleapis.com/v1/", SpannerBasePathKey: "https://spanner.googleapis.com/v1/", SQLBasePathKey: "https://sqladmin.googleapis.com/sql/v1beta4/", @@ -571,6 +583,7 @@ var DefaultBasePaths = map[string]string{ StorageTransferBasePathKey: "https://storagetransfer.googleapis.com/v1/", TagsBasePathKey: "https://cloudresourcemanager.googleapis.com/v3/", TPUBasePathKey: "https://tpu.googleapis.com/v1/", + TranscoderBasePathKey: "https://transcoder.googleapis.com/v1/", VertexAIBasePathKey: "https://{{region}}-aiplatform.googleapis.com/v1/", VmwareengineBasePathKey: "https://vmwareengine.googleapis.com/v1/", VPCAccessBasePathKey: "https://vpcaccess.googleapis.com/v1/", @@ -900,11 +913,6 @@ func SetEndpointDefaults(d *schema.ResourceData) error { "GOOGLE_DATAPROC_METASTORE_CUSTOM_ENDPOINT", }, DefaultBasePaths[DataprocMetastoreBasePathKey])) } - if d.Get("datastore_custom_endpoint") == "" { - d.Set("datastore_custom_endpoint", MultiEnvDefault([]string{ - "GOOGLE_DATASTORE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DatastoreBasePathKey])) - } if d.Get("datastream_custom_endpoint") == "" { d.Set("datastream_custom_endpoint", MultiEnvDefault([]string{ "GOOGLE_DATASTREAM_CUSTOM_ENDPOINT", @@ -1100,6 +1108,11 @@ func SetEndpointDefaults(d *schema.ResourceData) error { "GOOGLE_NOTEBOOKS_CUSTOM_ENDPOINT", }, DefaultBasePaths[NotebooksBasePathKey])) } + if d.Get("oracle_database_custom_endpoint") == "" { + d.Set("oracle_database_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_ORACLE_DATABASE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[OracleDatabaseBasePathKey])) + } if d.Get("org_policy_custom_endpoint") == "" { d.Set("org_policy_custom_endpoint", MultiEnvDefault([]string{ "GOOGLE_ORG_POLICY_CUSTOM_ENDPOINT", @@ -1120,6 +1133,11 @@ func SetEndpointDefaults(d *schema.ResourceData) error { "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", }, DefaultBasePaths[PrivatecaBasePathKey])) } + if d.Get("privileged_access_manager_custom_endpoint") == "" { + d.Set("privileged_access_manager_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_PRIVILEGED_ACCESS_MANAGER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[PrivilegedAccessManagerBasePathKey])) + } if d.Get("public_ca_custom_endpoint") == "" { d.Set("public_ca_custom_endpoint", MultiEnvDefault([]string{ "GOOGLE_PUBLIC_CA_CUSTOM_ENDPOINT", @@ -1150,6 +1168,11 @@ func SetEndpointDefaults(d *schema.ResourceData) error { "GOOGLE_SECRET_MANAGER_CUSTOM_ENDPOINT", }, DefaultBasePaths[SecretManagerBasePathKey])) } + if d.Get("secret_manager_regional_custom_endpoint") == "" { + d.Set("secret_manager_regional_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_SECRET_MANAGER_REGIONAL_CUSTOM_ENDPOINT", + }, DefaultBasePaths[SecretManagerRegionalBasePathKey])) + } if d.Get("secure_source_manager_custom_endpoint") == "" { d.Set("secure_source_manager_custom_endpoint", MultiEnvDefault([]string{ "GOOGLE_SECURE_SOURCE_MANAGER_CUSTOM_ENDPOINT", @@ -1190,6 +1213,11 @@ func SetEndpointDefaults(d *schema.ResourceData) error { "GOOGLE_SERVICE_USAGE_CUSTOM_ENDPOINT", }, DefaultBasePaths[ServiceUsageBasePathKey])) } + if d.Get("site_verification_custom_endpoint") == "" { + d.Set("site_verification_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_SITE_VERIFICATION_CUSTOM_ENDPOINT", + }, DefaultBasePaths[SiteVerificationBasePathKey])) + } if d.Get("source_repo_custom_endpoint") == "" { d.Set("source_repo_custom_endpoint", MultiEnvDefault([]string{ "GOOGLE_SOURCE_REPO_CUSTOM_ENDPOINT", @@ -1230,6 +1258,11 @@ func SetEndpointDefaults(d *schema.ResourceData) error { "GOOGLE_TPU_CUSTOM_ENDPOINT", }, DefaultBasePaths[TPUBasePathKey])) } + if d.Get("transcoder_custom_endpoint") == "" { + d.Set("transcoder_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_TRANSCODER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[TranscoderBasePathKey])) + } if d.Get("vertex_ai_custom_endpoint") == "" { d.Set("vertex_ai_custom_endpoint", MultiEnvDefault([]string{ "GOOGLE_VERTEX_AI_CUSTOM_ENDPOINT", @@ -1510,6 +1543,20 @@ func (c *Config) getTokenSource(clientScopes []string, initialCredentialsOnly bo // while most only want the host URL, some older ones also want the version and some // of those "projects" as well. You can find out if this is required by looking at // the basePath value in the client library file. +func (c *Config) NewCertificateManagerClient(userAgent string) *certificatemanager.Service { + certificateManagerClientBasePath := RemoveBasePathVersion(c.CertificateManagerBasePath) + log.Printf("[INFO] Instantiating Certificate Manager client for path %s", certificateManagerClientBasePath) + clientCertificateManager, err := certificatemanager.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client certificate manager: %s", err) + return nil + } + clientCertificateManager.UserAgent = userAgent + clientCertificateManager.BasePath = certificateManagerClientBasePath + + return clientCertificateManager +} + func (c *Config) NewComputeClient(userAgent string) *compute.Service { log.Printf("[INFO] Instantiating GCE client for path %s", c.ComputeBasePath) clientCompute, err := compute.NewService(c.Context, option.WithHTTPClient(c.Client)) @@ -2196,7 +2243,6 @@ func ConfigureBasePaths(c *Config) { c.DataplexBasePath = DefaultBasePaths[DataplexBasePathKey] c.DataprocBasePath = DefaultBasePaths[DataprocBasePathKey] c.DataprocMetastoreBasePath = DefaultBasePaths[DataprocMetastoreBasePathKey] - c.DatastoreBasePath = DefaultBasePaths[DatastoreBasePathKey] c.DatastreamBasePath = DefaultBasePaths[DatastreamBasePathKey] c.DeploymentManagerBasePath = DefaultBasePaths[DeploymentManagerBasePathKey] c.DialogflowBasePath = DefaultBasePaths[DialogflowBasePathKey] @@ -2236,16 +2282,19 @@ func ConfigureBasePaths(c *Config) { c.NetworkSecurityBasePath = DefaultBasePaths[NetworkSecurityBasePathKey] c.NetworkServicesBasePath = DefaultBasePaths[NetworkServicesBasePathKey] c.NotebooksBasePath = DefaultBasePaths[NotebooksBasePathKey] + c.OracleDatabaseBasePath = DefaultBasePaths[OracleDatabaseBasePathKey] c.OrgPolicyBasePath = DefaultBasePaths[OrgPolicyBasePathKey] c.OSConfigBasePath = DefaultBasePaths[OSConfigBasePathKey] c.OSLoginBasePath = DefaultBasePaths[OSLoginBasePathKey] c.PrivatecaBasePath = DefaultBasePaths[PrivatecaBasePathKey] + c.PrivilegedAccessManagerBasePath = DefaultBasePaths[PrivilegedAccessManagerBasePathKey] c.PublicCABasePath = DefaultBasePaths[PublicCABasePathKey] c.PubsubBasePath = DefaultBasePaths[PubsubBasePathKey] c.PubsubLiteBasePath = DefaultBasePaths[PubsubLiteBasePathKey] c.RedisBasePath = DefaultBasePaths[RedisBasePathKey] c.ResourceManagerBasePath = DefaultBasePaths[ResourceManagerBasePathKey] c.SecretManagerBasePath = DefaultBasePaths[SecretManagerBasePathKey] + c.SecretManagerRegionalBasePath = DefaultBasePaths[SecretManagerRegionalBasePathKey] c.SecureSourceManagerBasePath = DefaultBasePaths[SecureSourceManagerBasePathKey] c.SecurityCenterBasePath = DefaultBasePaths[SecurityCenterBasePathKey] c.SecurityCenterManagementBasePath = DefaultBasePaths[SecurityCenterManagementBasePathKey] @@ -2254,6 +2303,7 @@ func ConfigureBasePaths(c *Config) { c.ServiceManagementBasePath = DefaultBasePaths[ServiceManagementBasePathKey] c.ServiceNetworkingBasePath = DefaultBasePaths[ServiceNetworkingBasePathKey] c.ServiceUsageBasePath = DefaultBasePaths[ServiceUsageBasePathKey] + c.SiteVerificationBasePath = DefaultBasePaths[SiteVerificationBasePathKey] c.SourceRepoBasePath = DefaultBasePaths[SourceRepoBasePathKey] c.SpannerBasePath = DefaultBasePaths[SpannerBasePathKey] c.SQLBasePath = DefaultBasePaths[SQLBasePathKey] @@ -2262,6 +2312,7 @@ func ConfigureBasePaths(c *Config) { c.StorageTransferBasePath = DefaultBasePaths[StorageTransferBasePathKey] c.TagsBasePath = DefaultBasePaths[TagsBasePathKey] c.TPUBasePath = DefaultBasePaths[TPUBasePathKey] + c.TranscoderBasePath = DefaultBasePaths[TranscoderBasePathKey] c.VertexAIBasePath = DefaultBasePaths[VertexAIBasePathKey] c.VmwareengineBasePath = DefaultBasePaths[VmwareengineBasePathKey] c.VPCAccessBasePath = DefaultBasePaths[VPCAccessBasePathKey] diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/error_retry_predicates.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/error_retry_predicates.go index b43ef816e15..01f22f105d4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/error_retry_predicates.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/error_retry_predicates.go @@ -574,3 +574,13 @@ func ExternalIpServiceNotActive(err error) (bool, string) { } return false, "" } + +// Site verification may return a 400 error while waiting for DNS propagation. +func IsSiteVerificationRetryableError(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 400 && strings.Contains(strings.ToLower(gerr.Body), "verification token could not be found") { + return true, "Waiting for verification token to be visible" + } + } + return false, "" +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_dcl_client_creation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_dcl_client_creation.go index 46bd039cb56..bcf800d8bbf 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_dcl_client_creation.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_dcl_client_creation.go @@ -37,7 +37,6 @@ import ( eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules" gkehub "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub" - networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise" ) @@ -340,29 +339,6 @@ func NewDCLGkeHubClient(config *Config, userAgent, billingProject string, timeou return gkehub.NewClient(dclConfig) } -func NewDCLNetworkConnectivityClient(config *Config, userAgent, billingProject string, timeout time.Duration) *networkconnectivity.Client { - configOptions := []dcl.ConfigOption{ - dcl.WithHTTPClient(config.Client), - dcl.WithUserAgent(userAgent), - dcl.WithLogger(dclLogger{}), - dcl.WithBasePath(config.NetworkConnectivityBasePath), - } - - if timeout != 0 { - configOptions = append(configOptions, dcl.WithTimeout(timeout)) - } - - if config.UserProjectOverride { - configOptions = append(configOptions, dcl.WithUserProjectOverride()) - if billingProject != "" { - configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) - } - } - - dclConfig := dcl.NewConfig(configOptions...) - return networkconnectivity.NewClient(dclConfig) -} - func NewDCLRecaptchaEnterpriseClient(config *Config, userAgent, billingProject string, timeout time.Duration) *recaptchaenterprise.Client { configOptions := []dcl.ConfigOption{ dcl.WithHTTPClient(config.Client), diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/verify/validation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/verify/validation.go index 710a1a106ef..b046ff60e6f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/verify/validation.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/verify/validation.go @@ -24,7 +24,7 @@ const ( SubnetworkLinkRegex = "projects/(" + ProjectRegex + ")/regions/(" + RegionRegex + ")/subnetworks/(" + SubnetworkRegex + ")$" - RFC1035NameTemplate = "[a-z](?:[-a-z0-9]{%d,%d}[a-z0-9])" + RFC1035NameTemplate = "[a-z]([-a-z0-9]%v[a-z0-9])?" CloudIoTIdRegex = "^[a-zA-Z][-a-zA-Z0-9._+~%]{2,254}$" // Format of default Compute service accounts created by Google @@ -43,7 +43,7 @@ var ( // The first and last characters have different restrictions, than // the middle characters. The middle characters length must be between // 4 and 28 since the first and last character are excluded. - ServiceAccountNameRegex = fmt.Sprintf(RFC1035NameTemplate, 4, 28) + ServiceAccountNameRegex = fmt.Sprintf(RFC1035NameTemplate, "{4,28}") ServiceAccountLinkRegexPrefix = "projects/" + ProjectRegexWildCard + "/serviceAccounts/" PossibleServiceAccountNames = []string{ @@ -56,7 +56,7 @@ var ( ServiceAccountKeyNameRegex = ServiceAccountLinkRegexPrefix + "(.+)/keys/(.+)" // Format of service accounts created through the API - CreatedServiceAccountNameRegex = fmt.Sprintf(RFC1035NameTemplate, 4, 28) + "@" + ProjectNameInDNSFormRegex + "\\.iam\\.gserviceaccount\\.com$" + CreatedServiceAccountNameRegex = fmt.Sprintf(RFC1035NameTemplate, "{4,28}") + "@" + ProjectNameInDNSFormRegex + "\\.iam\\.gserviceaccount\\.com$" // Format of service-created service account // examples are: @@ -196,19 +196,26 @@ func ValidateRFC3339Time(v interface{}, k string) (warnings []string, errors []e } func ValidateRFC1035Name(min, max int) schema.SchemaValidateFunc { - if min < 2 || max < min { - return func(i interface{}, k string) (s []string, errors []error) { - if min < 2 { - errors = append(errors, fmt.Errorf("min must be at least 2. Got: %d", min)) - } - if max < min { - errors = append(errors, fmt.Errorf("max must greater than min. Got [%d, %d]", min, max)) - } - return + return func(i interface{}, k string) (s []string, errors []error) { + value := i.(string) + re := fmt.Sprintf("^"+RFC1035NameTemplate+"$", "*") + if min < 1 { + errors = append(errors, fmt.Errorf("min must be at least 1. Got: %d", min)) + } + if max < min { + errors = append(errors, fmt.Errorf("max must greater than min. Got [%d, %d]", min, max)) + } + + if len(value) < min || len(value) > max { + errors = append(errors, fmt.Errorf("%q (%q) must be between %d and %d characters long", k, value, min, max)) + } + + if !regexp.MustCompile(re).MatchString(value) { + errors = append(errors, fmt.Errorf("%q (%q) must match regex %q", k, value, re)) } - } - return ValidateRegexp(fmt.Sprintf("^"+RFC1035NameTemplate+"$", min-2, max-2)) + return + } } func ValidateIpCidrRange(v interface{}, k string) (warnings []string, errors []error) { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/main.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/main.go index eae0f9a400b..fd0d9ce0d6d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/main.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/main.go @@ -14,16 +14,6 @@ import ( "github.com/hashicorp/terraform-provider-google/google/fwprovider" "github.com/hashicorp/terraform-provider-google/google/provider" - ver "github.com/hashicorp/terraform-provider-google/version" -) - -var ( - // these will be set by the goreleaser configuration - // to appropriate values for the compiled binary - version string = ver.ProviderVersion - - // goreleaser can also pass the specific commit if you want - // commit string = "" ) func main() { @@ -34,8 +24,8 @@ func main() { // concat with sdkv2 provider providers := []func() tfprotov5.ProviderServer{ - providerserver.NewProtocol5(fwprovider.New(version)), // framework provider - provider.Provider().GRPCProvider, // sdk provider + providerserver.NewProtocol5(fwprovider.New()), // framework provider + provider.Provider().GRPCProvider, // sdk provider } // use the muxer diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/version/version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/version/version.go index e47ba87d557..80e56e768ce 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/version/version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/version/version.go @@ -4,5 +4,5 @@ package version var ( // ProviderVersion is set during the release process to the release version of the binary - ProviderVersion = "dev" + ProviderVersion = "dev6" ) diff --git a/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/LICENSE b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/LICENSE new file mode 100644 index 00000000000..dc61de8465c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2021, PlanetScale Inc. All rights reserved. +Copyright (c) 2013, The GoGo Authors. All rights reserved. +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go new file mode 100644 index 00000000000..64bde83ed0c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go @@ -0,0 +1,122 @@ +// Package protohelpers provides helper functions for encoding and decoding protobuf messages. +// The spec can be found at https://protobuf.dev/programming-guides/encoding/. +package protohelpers + +import ( + "fmt" + "io" + "math/bits" +) + +var ( + // ErrInvalidLength is returned when decoding a negative length. + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + // ErrIntOverflow is returned when decoding a varint representation of an integer that overflows 64 bits. + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + // ErrUnexpectedEndOfGroup is returned when decoding a group end without a corresponding group start. + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) + +// EncodeVarint encodes a uint64 into a varint-encoded byte slice and returns the offset of the encoded value. +// The provided offset is the offset after the last byte of the encoded value. +func EncodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= SizeOfVarint(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} + +// SizeOfVarint returns the size of the varint-encoded value. +func SizeOfVarint(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} + +// SizeOfZigzag returns the size of the zigzag-encoded value. +func SizeOfZigzag(x uint64) (n int) { + return SizeOfVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// Skip the first record of the byte slice and return the offset of the next record. +func Skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} diff --git a/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/anypb/any_vtproto.pb.go b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/anypb/any_vtproto.pb.go new file mode 100644 index 00000000000..c99b9e6106e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/anypb/any_vtproto.pb.go @@ -0,0 +1,389 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/any.proto + +package anypb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + io "io" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Any anypb.Any + +func (m *Any) CloneVT() *Any { + if m == nil { + return (*Any)(nil) + } + r := new(Any) + r.TypeUrl = m.TypeUrl + if rhs := m.Value; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Value = tmpBytes + } + return r +} + +func (this *Any) EqualVT(that *Any) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.TypeUrl != that.TypeUrl { + return false + } + if string(this.Value) != string(that.Value) { + return false + } + return true +} + +func (m *Any) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Any) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Any) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + return n +} + +func (m *Any) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Any) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.TypeUrl = stringValue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = dAtA[iNdEx:postIndex] + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/durationpb/duration_vtproto.pb.go b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/durationpb/duration_vtproto.pb.go new file mode 100644 index 00000000000..681ae9f72e8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/durationpb/duration_vtproto.pb.go @@ -0,0 +1,317 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/duration.proto + +package durationpb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Duration durationpb.Duration + +func (m *Duration) CloneVT() *Duration { + if m == nil { + return (*Duration)(nil) + } + r := new(Duration) + r.Seconds = m.Seconds + r.Nanos = m.Nanos + return r +} + +func (this *Duration) EqualVT(that *Duration) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Seconds != that.Seconds { + return false + } + if this.Nanos != that.Nanos { + return false + } + return true +} + +func (m *Duration) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Duration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Duration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Nanos)) + } + return n +} + +func (m *Duration) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Duration) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/emptypb/empty_vtproto.pb.go b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/emptypb/empty_vtproto.pb.go new file mode 100644 index 00000000000..470c82ac52d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/emptypb/empty_vtproto.pb.go @@ -0,0 +1,207 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/empty.proto + +package emptypb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Empty emptypb.Empty + +func (m *Empty) CloneVT() *Empty { + if m == nil { + return (*Empty)(nil) + } + r := new(Empty) + return r +} + +func (this *Empty) EqualVT(that *Empty) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + return true +} + +func (m *Empty) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *Empty) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *Empty) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *Empty) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Empty) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/structpb/struct_vtproto.pb.go b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/structpb/struct_vtproto.pb.go new file mode 100644 index 00000000000..be8b40e338d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/structpb/struct_vtproto.pb.go @@ -0,0 +1,2004 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/struct.proto + +package structpb + +import ( + binary "encoding/binary" + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + io "io" + math "math" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Struct structpb.Struct +type Value structpb.Value +type Value_NullValue structpb.Value_NullValue +type Value_NumberValue structpb.Value_NumberValue +type Value_StringValue structpb.Value_StringValue +type Value_BoolValue structpb.Value_BoolValue +type Value_StructValue structpb.Value_StructValue +type Value_ListValue structpb.Value_ListValue +type ListValue structpb.ListValue + +func (m *Struct) CloneVT() *Struct { + if m == nil { + return (*Struct)(nil) + } + r := new(Struct) + if rhs := m.Fields; rhs != nil { + tmpContainer := make(map[string]*structpb.Value, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = (*structpb.Value)((*Value)(v).CloneVT()) + } + r.Fields = tmpContainer + } + return r +} + +func (m *Value) CloneVT() *Value { + if m == nil { + return (*Value)(nil) + } + r := new(Value) + if m.Kind != nil { + switch c := m.Kind.(type) { + case *structpb.Value_NullValue: + r.Kind = (*structpb.Value_NullValue)((*Value_NullValue)(c).CloneVT()) + case *structpb.Value_NumberValue: + r.Kind = (*structpb.Value_NumberValue)((*Value_NumberValue)(c).CloneVT()) + case *structpb.Value_StringValue: + r.Kind = (*structpb.Value_StringValue)((*Value_StringValue)(c).CloneVT()) + case *structpb.Value_BoolValue: + r.Kind = (*structpb.Value_BoolValue)((*Value_BoolValue)(c).CloneVT()) + case *structpb.Value_StructValue: + r.Kind = (*structpb.Value_StructValue)((*Value_StructValue)(c).CloneVT()) + case *structpb.Value_ListValue: + r.Kind = (*structpb.Value_ListValue)((*Value_ListValue)(c).CloneVT()) + } + } + return r +} + +func (m *Value_NullValue) CloneVT() *Value_NullValue { + if m == nil { + return (*Value_NullValue)(nil) + } + r := new(Value_NullValue) + r.NullValue = m.NullValue + return r +} + +func (m *Value_NumberValue) CloneVT() *Value_NumberValue { + if m == nil { + return (*Value_NumberValue)(nil) + } + r := new(Value_NumberValue) + r.NumberValue = m.NumberValue + return r +} + +func (m *Value_StringValue) CloneVT() *Value_StringValue { + if m == nil { + return (*Value_StringValue)(nil) + } + r := new(Value_StringValue) + r.StringValue = m.StringValue + return r +} + +func (m *Value_BoolValue) CloneVT() *Value_BoolValue { + if m == nil { + return (*Value_BoolValue)(nil) + } + r := new(Value_BoolValue) + r.BoolValue = m.BoolValue + return r +} + +func (m *Value_StructValue) CloneVT() *Value_StructValue { + if m == nil { + return (*Value_StructValue)(nil) + } + r := new(Value_StructValue) + r.StructValue = (*structpb.Struct)((*Struct)(m.StructValue).CloneVT()) + return r +} + +func (m *Value_ListValue) CloneVT() *Value_ListValue { + if m == nil { + return (*Value_ListValue)(nil) + } + r := new(Value_ListValue) + r.ListValue = (*structpb.ListValue)((*ListValue)(m.ListValue).CloneVT()) + return r +} + +func (m *ListValue) CloneVT() *ListValue { + if m == nil { + return (*ListValue)(nil) + } + r := new(ListValue) + if rhs := m.Values; rhs != nil { + tmpContainer := make([]*structpb.Value, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = (*structpb.Value)((*Value)(v).CloneVT()) + } + r.Values = tmpContainer + } + return r +} + +func (this *Struct) EqualVT(that *Struct) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Fields) != len(that.Fields) { + return false + } + for i, vx := range this.Fields { + vy, ok := that.Fields[i] + if !ok { + return false + } + if p, q := vx, vy; p != q { + if p == nil { + p = &structpb.Value{} + } + if q == nil { + q = &structpb.Value{} + } + if !(*Value)(p).EqualVT((*Value)(q)) { + return false + } + } + } + return true +} + +func (this *Value) EqualVT(that *Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Kind == nil && that.Kind != nil { + return false + } else if this.Kind != nil { + if that.Kind == nil { + return false + } + switch c := this.Kind.(type) { + case *structpb.Value_NullValue: + if !(*Value_NullValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_NumberValue: + if !(*Value_NumberValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_StringValue: + if !(*Value_StringValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_BoolValue: + if !(*Value_BoolValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_StructValue: + if !(*Value_StructValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_ListValue: + if !(*Value_ListValue)(c).EqualVT(that.Kind) { + return false + } + } + } + return true +} + +func (this *Value_NullValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_NullValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_NullValue); ok { + that = (*Value_NullValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.NullValue != that.NullValue { + return false + } + return true +} + +func (this *Value_NumberValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_NumberValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_NumberValue); ok { + that = (*Value_NumberValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.NumberValue != that.NumberValue { + return false + } + return true +} + +func (this *Value_StringValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_StringValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_StringValue); ok { + that = (*Value_StringValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.StringValue != that.StringValue { + return false + } + return true +} + +func (this *Value_BoolValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_BoolValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_BoolValue); ok { + that = (*Value_BoolValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.BoolValue != that.BoolValue { + return false + } + return true +} + +func (this *Value_StructValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_StructValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_StructValue); ok { + that = (*Value_StructValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if p, q := this.StructValue, that.StructValue; p != q { + if p == nil { + p = &structpb.Struct{} + } + if q == nil { + q = &structpb.Struct{} + } + if !(*Struct)(p).EqualVT((*Struct)(q)) { + return false + } + } + return true +} + +func (this *Value_ListValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_ListValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_ListValue); ok { + that = (*Value_ListValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if p, q := this.ListValue, that.ListValue; p != q { + if p == nil { + p = &structpb.ListValue{} + } + if q == nil { + q = &structpb.ListValue{} + } + if !(*ListValue)(p).EqualVT((*ListValue)(q)) { + return false + } + } + return true +} + +func (this *ListValue) EqualVT(that *ListValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Values) != len(that.Values) { + return false + } + for i, vx := range this.Values { + vy := that.Values[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &structpb.Value{} + } + if q == nil { + q = &structpb.Value{} + } + if !(*Value)(p).EqualVT((*Value)(q)) { + return false + } + } + } + return true +} + +func (m *Struct) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + size, err := (*Value)(v).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + switch c := m.Kind.(type) { + case *structpb.Value_NullValue: + size, err := (*Value_NullValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_NumberValue: + size, err := (*Value_NumberValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_StringValue: + size, err := (*Value_StringValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_BoolValue: + size, err := (*Value_BoolValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_StructValue: + size, err := (*Value_StructValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_ListValue: + size, err := (*Value_ListValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_NullValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_NumberValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_StringValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_BoolValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_StructValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + size, err := (*Struct)(m.StructValue).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_ListValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + size, err := (*ListValue)(m.ListValue).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*Value)(m.Values[iNdEx]).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Struct) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + size, err := (*Value)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m, ok := m.Kind.(*structpb.Value_ListValue); ok { + msg := ((*Value_ListValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_StructValue); ok { + msg := ((*Value_StructValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_BoolValue); ok { + msg := ((*Value_BoolValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_StringValue); ok { + msg := ((*Value_StringValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_NumberValue); ok { + msg := ((*Value_NumberValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_NullValue); ok { + msg := ((*Value_NullValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_NullValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_NumberValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_StringValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_BoolValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_StructValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + size, err := (*Struct)(m.StructValue).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_ListValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + size, err := (*ListValue)(m.ListValue).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*Value)(m.Values[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Struct) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = (*Value)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + switch c := m.Kind.(type) { + case *structpb.Value_NullValue: + n += (*Value_NullValue)(c).SizeVT() + case *structpb.Value_NumberValue: + n += (*Value_NumberValue)(c).SizeVT() + case *structpb.Value_StringValue: + n += (*Value_StringValue)(c).SizeVT() + case *structpb.Value_BoolValue: + n += (*Value_BoolValue)(c).SizeVT() + case *structpb.Value_StructValue: + n += (*Value_StructValue)(c).SizeVT() + case *structpb.Value_ListValue: + n += (*Value_ListValue)(c).SizeVT() + } + return n +} + +func (m *Value_NullValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *Value_BoolValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StructValue != nil { + l = (*Struct)(m.StructValue).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Value_ListValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListValue != nil { + l = (*ListValue)(m.ListValue).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *ListValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = (*Value)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + return n +} + +func (m *Struct) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*structpb.Value) + } + var mapkey string + var mapvalue *structpb.Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return protohelpers.ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &structpb.Value{} + if err := (*Value)(mapvalue).UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v structpb.NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= structpb.NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &structpb.Value_NullValue{NullValue: v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &structpb.Value_NumberValue{NumberValue: float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &structpb.Value_StringValue{StringValue: string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &structpb.Value_BoolValue{BoolValue: b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_StructValue); ok { + if err := (*Struct)(oneof.StructValue).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.Struct{} + if err := (*Struct)(v).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_StructValue{StructValue: v} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_ListValue); ok { + if err := (*ListValue)(oneof.ListValue).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.ListValue{} + if err := (*ListValue)(v).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_ListValue{ListValue: v} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &structpb.Value{}) + if err := (*Value)(m.Values[len(m.Values)-1]).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Struct) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*structpb.Value) + } + var mapkey string + var mapvalue *structpb.Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + if intStringLenmapkey == 0 { + mapkey = "" + } else { + mapkey = unsafe.String(&dAtA[iNdEx], intStringLenmapkey) + } + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return protohelpers.ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &structpb.Value{} + if err := (*Value)(mapvalue).UnmarshalVTUnsafe(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v structpb.NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= structpb.NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &structpb.Value_NullValue{NullValue: v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &structpb.Value_NumberValue{NumberValue: float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Kind = &structpb.Value_StringValue{StringValue: stringValue} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &structpb.Value_BoolValue{BoolValue: b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_StructValue); ok { + if err := (*Struct)(oneof.StructValue).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.Struct{} + if err := (*Struct)(v).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_StructValue{StructValue: v} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_ListValue); ok { + if err := (*ListValue)(oneof.ListValue).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.ListValue{} + if err := (*ListValue)(v).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_ListValue{ListValue: v} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &structpb.Value{}) + if err := (*Value)(m.Values[len(m.Values)-1]).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/timestamppb/timestamp_vtproto.pb.go b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/timestamppb/timestamp_vtproto.pb.go new file mode 100644 index 00000000000..5c63f308f08 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/timestamppb/timestamp_vtproto.pb.go @@ -0,0 +1,317 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/timestamp.proto + +package timestamppb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Timestamp timestamppb.Timestamp + +func (m *Timestamp) CloneVT() *Timestamp { + if m == nil { + return (*Timestamp)(nil) + } + r := new(Timestamp) + r.Seconds = m.Seconds + r.Nanos = m.Nanos + return r +} + +func (this *Timestamp) EqualVT(that *Timestamp) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Seconds != that.Seconds { + return false + } + if this.Nanos != that.Nanos { + return false + } + return true +} + +func (m *Timestamp) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Timestamp) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Timestamp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Nanos)) + } + return n +} + +func (m *Timestamp) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Timestamp) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/wrapperspb/wrappers_vtproto.pb.go b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/wrapperspb/wrappers_vtproto.pb.go new file mode 100644 index 00000000000..0dac9b28229 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/planetscale/vtprotobuf/types/known/wrapperspb/wrappers_vtproto.pb.go @@ -0,0 +1,2240 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/wrappers.proto + +package wrapperspb + +import ( + binary "encoding/binary" + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + io "io" + math "math" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type DoubleValue wrapperspb.DoubleValue +type FloatValue wrapperspb.FloatValue +type Int64Value wrapperspb.Int64Value +type UInt64Value wrapperspb.UInt64Value +type Int32Value wrapperspb.Int32Value +type UInt32Value wrapperspb.UInt32Value +type BoolValue wrapperspb.BoolValue +type StringValue wrapperspb.StringValue +type BytesValue wrapperspb.BytesValue + +func (m *DoubleValue) CloneVT() *DoubleValue { + if m == nil { + return (*DoubleValue)(nil) + } + r := new(DoubleValue) + r.Value = m.Value + return r +} + +func (m *FloatValue) CloneVT() *FloatValue { + if m == nil { + return (*FloatValue)(nil) + } + r := new(FloatValue) + r.Value = m.Value + return r +} + +func (m *Int64Value) CloneVT() *Int64Value { + if m == nil { + return (*Int64Value)(nil) + } + r := new(Int64Value) + r.Value = m.Value + return r +} + +func (m *UInt64Value) CloneVT() *UInt64Value { + if m == nil { + return (*UInt64Value)(nil) + } + r := new(UInt64Value) + r.Value = m.Value + return r +} + +func (m *Int32Value) CloneVT() *Int32Value { + if m == nil { + return (*Int32Value)(nil) + } + r := new(Int32Value) + r.Value = m.Value + return r +} + +func (m *UInt32Value) CloneVT() *UInt32Value { + if m == nil { + return (*UInt32Value)(nil) + } + r := new(UInt32Value) + r.Value = m.Value + return r +} + +func (m *BoolValue) CloneVT() *BoolValue { + if m == nil { + return (*BoolValue)(nil) + } + r := new(BoolValue) + r.Value = m.Value + return r +} + +func (m *StringValue) CloneVT() *StringValue { + if m == nil { + return (*StringValue)(nil) + } + r := new(StringValue) + r.Value = m.Value + return r +} + +func (m *BytesValue) CloneVT() *BytesValue { + if m == nil { + return (*BytesValue)(nil) + } + r := new(BytesValue) + if rhs := m.Value; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Value = tmpBytes + } + return r +} + +func (this *DoubleValue) EqualVT(that *DoubleValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *FloatValue) EqualVT(that *FloatValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *Int64Value) EqualVT(that *Int64Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *UInt64Value) EqualVT(that *UInt64Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *Int32Value) EqualVT(that *Int32Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *UInt32Value) EqualVT(that *UInt32Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *BoolValue) EqualVT(that *BoolValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *StringValue) EqualVT(that *StringValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *BytesValue) EqualVT(that *BytesValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if string(this.Value) != string(that.Value) { + return false + } + return true +} + +func (m *DoubleValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DoubleValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DoubleValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + return n +} + +func (m *FloatValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 5 + } + return n +} + +func (m *Int64Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *UInt64Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *Int32Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *UInt32Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *BoolValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + return n +} + +func (m *StringValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + return n +} + +func (m *BytesValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + return n +} + +func (m *DoubleValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Value = stringValue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = dAtA[iNdEx:postIndex] + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go b/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go index 1816bb9c968..25df19b0a2b 100644 --- a/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go +++ b/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go @@ -1506,8 +1506,8 @@ func Keys(inputMap cty.Value) (cty.Value, error) { } // Lookup performs a dynamic lookup into a map. -// There are two required arguments, map and key, plus an optional default, -// which is a value to return if no key is found in map. +// There are three required arguments, inputMap and key, plus a defaultValue, +// which is a value to return if the given key is not found in the inputMap. func Lookup(inputMap, key, defaultValue cty.Value) (cty.Value, error) { return LookupFunc.Call([]cty.Value{inputMap, key, defaultValue}) } diff --git a/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go b/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go index 5d06a4519ed..406dea23325 100644 --- a/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go +++ b/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go @@ -30,8 +30,9 @@ func MakeToFunc(wantTy cty.Type) function.Function { // messages to be more appropriate for an explicit type // conversion, whereas the cty function system produces // messages aimed at _implicit_ type conversions. - Type: cty.DynamicPseudoType, - AllowNull: true, + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowDynamicType: true, }, }, Type: func(args []cty.Value) (cty.Type, error) { diff --git a/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/json/marshal.go b/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/json/marshal.go index 7a14ce81a42..07d9f33178b 100644 --- a/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/json/marshal.go +++ b/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/json/marshal.go @@ -12,6 +12,9 @@ func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error { if val.IsMarked() { return path.NewErrorf("value has marks, so it cannot be serialized as JSON") } + if !val.IsKnown() { + return path.NewErrorf("value is not known") + } // If we're going to decode as DynamicPseudoType then we need to save // dynamic type information to recover the real type. @@ -24,10 +27,6 @@ func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error { return nil } - if !val.IsKnown() { - return path.NewErrorf("value is not known") - } - // The caller should've guaranteed that the given val is conformant with // the given type t, so we'll proceed under that assumption here. @@ -185,7 +184,10 @@ func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error { return path.NewErrorf("failed to serialize type: %s", err) } b.WriteString(`{"value":`) - marshal(val, val.Type(), path, b) + err = marshal(val, val.Type(), path, b) + if err != nil { + return path.NewErrorf("failed to serialize value: %s", err) + } b.WriteString(`,"type":`) b.Write(typeJSON) b.WriteRune('}') diff --git a/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/primitive_type.go index 3ce2540bb6b..2beea652d87 100644 --- a/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/primitive_type.go +++ b/terraform/providers/google/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -1,6 +1,8 @@ package cty -import "math/big" +import ( + "math/big" +) // primitiveType is the hidden implementation of the various primitive types // that are exposed as variables in this package. @@ -77,6 +79,18 @@ func rawNumberEqual(a, b *big.Float) bool { case a.Sign() != b.Sign(): return false default: + // First check if these are integers, and compare them directly. Floats + // need a more nuanced approach. + aInt, aAcc := a.Int(nil) + bInt, bAcc := b.Int(nil) + if aAcc != bAcc { + // only one is an exact integer value, so they can't be equal + return false + } + if aAcc == big.Exact { + return aInt.Cmp(bInt) == 0 + } + // This format and precision matches that used by cty/json.Marshal, // and thus achieves our definition of "two numbers are equal if // we'd use the same JSON serialization for both of them". diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index 06282ce79c6..18436eaedff 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -1,20 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" import ( + "google.golang.org/grpc/stats" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" @@ -31,18 +22,28 @@ const ( GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) -// Filter is a predicate used to determine whether a given request in -// interceptor info should be traced. A Filter must return true if +// InterceptorFilter is a predicate used to determine whether a given request in +// interceptor info should be instrumented. A InterceptorFilter must return true if // the request should be traced. -type Filter func(*InterceptorInfo) bool +// +// Deprecated: Use stats handlers instead. +type InterceptorFilter func(*InterceptorInfo) bool + +// Filter is a predicate used to determine whether a given request in +// should be instrumented by the attached RPC tag info. +// A Filter must return true if the request should be instrumented. +type Filter func(*stats.RPCTagInfo) bool // config is a group of options for this instrumentation. type config struct { - Filter Filter - Propagators propagation.TextMapPropagator - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider - SpanStartOptions []trace.SpanStartOption + Filter Filter + InterceptorFilter InterceptorFilter + Propagators propagation.TextMapPropagator + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + SpanStartOptions []trace.SpanStartOption + SpanAttributes []attribute.KeyValue + MetricAttributes []attribute.KeyValue ReceivedEvent bool SentEvent bool @@ -163,15 +164,30 @@ func (o tracerProviderOption) apply(c *config) { // WithInterceptorFilter returns an Option to use the request filter. // // Deprecated: Use stats handlers instead. -func WithInterceptorFilter(f Filter) Option { +func WithInterceptorFilter(f InterceptorFilter) Option { return interceptorFilterOption{f: f} } type interceptorFilterOption struct { - f Filter + f InterceptorFilter } func (o interceptorFilterOption) apply(c *config) { + if o.f != nil { + c.InterceptorFilter = o.f + } +} + +// WithFilter returns an Option to use the request filter. +func WithFilter(f Filter) Option { + return filterOption{f: f} +} + +type filterOption struct { + f Filter +} + +func (o filterOption) apply(c *config) { if o.f != nil { c.Filter = o.f } @@ -243,3 +259,29 @@ func (o spanStartOption) apply(c *config) { func WithSpanOptions(opts ...trace.SpanStartOption) Option { return spanStartOption{opts} } + +type spanAttributesOption struct{ a []attribute.KeyValue } + +func (o spanAttributesOption) apply(c *config) { + if o.a != nil { + c.SpanAttributes = o.a + } +} + +// WithSpanAttributes returns an Option to add custom attributes to the spans. +func WithSpanAttributes(a ...attribute.KeyValue) Option { + return spanAttributesOption{a: a} +} + +type metricAttributesOption struct{ a []attribute.KeyValue } + +func (o metricAttributesOption) apply(c *config) { + if o.a != nil { + c.MetricAttributes = o.a + } +} + +// WithMetricAttributes returns an Option to add custom attributes to the metrics. +func WithMetricAttributes(a ...attribute.KeyValue) Option { + return metricAttributesOption{a: a} +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go index 958dcd87a4c..b8b836b00fb 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otelgrpc is the instrumentation library for [google.golang.org/grpc]. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index 3b487a93623..7d5ed058082 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -18,6 +7,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md import ( "context" + "errors" "io" "net" "strconv" @@ -59,7 +49,7 @@ var ( ) // UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable -// for use in a grpc.Dial call. +// for use in a grpc.NewClient call. // // Deprecated: Use [NewClientHandler] instead. func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { @@ -81,7 +71,7 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { Method: method, Type: UnaryClient, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return invoker(ctx, method, req, reply, cc, callOpts...) } @@ -147,7 +137,7 @@ func (w *clientStream) RecvMsg(m interface{}) error { if err == nil && !w.desc.ServerStreams { w.endSpan(nil) - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { w.endSpan(nil) } else if err != nil { w.endSpan(err) @@ -196,7 +186,7 @@ func (w *clientStream) CloseSend() error { return err } -func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { +func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { return &clientStream{ ClientStream: s, span: span, @@ -219,7 +209,7 @@ func (w *clientStream) endSpan(err error) { } // StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable -// for use in a grpc.Dial call. +// for use in a grpc.NewClient call. // // Deprecated: Use [NewClientHandler] instead. func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { @@ -241,7 +231,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { Method: method, Type: StreamClient, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return streamer(ctx, desc, cc, method, callOpts...) } @@ -270,7 +260,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { span.End() return s, err } - stream := wrapClientStream(ctx, s, desc, span, cfg) + stream := wrapClientStream(s, desc, span, cfg) return stream, nil } } @@ -296,7 +286,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { UnaryServerInfo: info, Type: UnaryServer, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return handler(ctx, req) } @@ -344,7 +334,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) metricAttrs = append(metricAttrs, grpcStatusCodeAttr) - cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) return resp, err } @@ -422,7 +412,7 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { StreamServerInfo: info, Type: StreamServer, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return handler(srv, wrapServerStream(ctx, ss, cfg)) } diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go index f6116946bfd..b62f7cd7c46 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go index cf32a9e978c..bef07b7a3ca 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go index f585fb6ae0c..3aa37915df2 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go index b65fab308f3..409c621b74c 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 73d2b8b6b27..fbcbfb84e04 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -38,6 +27,7 @@ type gRPCContext struct { messagesReceived int64 messagesSent int64 metricAttrs []attribute.KeyValue + record bool } type serverHandler struct { @@ -72,11 +62,15 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) } return context.WithValue(ctx, gRPCContextKey{}, &gctx) } @@ -108,11 +102,15 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) } return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) @@ -141,6 +139,9 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) if gctx != nil { + if !gctx.record { + return + } metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) metricAttrs = append(metricAttrs, gctx.metricAttrs...) } @@ -150,7 +151,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.ReceivedEvent { @@ -166,7 +167,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.SentEvent { @@ -203,14 +204,17 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool span.End() metricAttrs = append(metricAttrs, rpcStatusAttr) + // Allocate vararg slice once. + recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))} // Use floating point division here for higher precision (instead of Millisecond method). + // Measure right before calling Record() to capture as much elapsed time as possible. elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) - c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) + c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) } default: return diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index d633c4bef0c..04f425edfef 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -1,22 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.49.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 92b8cf73c97..6aae83bfd20 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -23,7 +12,7 @@ import ( ) // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. -// Please be careful of intitialization order - for example, if you change +// Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index cabf645a5b5..5d6e6156b7b 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -29,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - // Client HTTP metrics. const ( clientRequestSize = "http.client.request.size" // Outgoing request bytes total diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index a1b5b5e5aa8..a01bfafbe07 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -19,6 +8,8 @@ import ( "net/http" "net/http/httptrace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" @@ -44,8 +35,9 @@ type config struct { SpanNameFormatter func(string, *http.Request) string ClientTrace func(context.Context) *httptrace.ClientTrace - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue } // Option interface used for setting optional config properties. @@ -111,7 +103,7 @@ func WithPublicEndpoint() Option { }) } -// WithPublicEndpointFn runs with every request, and allows conditionnally +// WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. @@ -205,3 +197,11 @@ func WithServerName(server string) Option { c.ServerName = server }) } + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go index 38c7f01c71a..56b24b982ae 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package otelhttp provides an http.Handler and functions that are intended // to be used to add tracing by wrapping existing handlers (with Handler) and diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 1fc15019e65..33580a35b77 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -1,32 +1,18 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( - "io" "net/http" "time" "github.com/felixge/httpsnoop" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" "go.opentelemetry.io/otel/trace" ) @@ -36,7 +22,6 @@ type middleware struct { server string tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption readEvent bool @@ -46,9 +31,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram + semconv semconv.HTTPServer } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -76,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han c := newConfig(append(defaultOpts, opts...)...) h.configure(c) - h.createMeasures() return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -87,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han func (h *middleware) configure(c *config) { h.tracer = c.Tracer - h.meter = c.Meter h.propagators = c.Propagators h.spanStartOptions = c.SpanStartOptions h.readEvent = c.ReadEvent @@ -97,6 +78,7 @@ func (h *middleware) configure(c *config) { h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName + h.semconv = semconv.NewHTTPServer(c.Meter) } func handleErr(err error) { @@ -105,30 +87,6 @@ func handleErr(err error) { } } -func (h *middleware) createMeasures() { - var err error - h.requestBytesCounter, err = h.meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - h.responseBytesCounter, err = h.meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - h.serverLatencyMeasure, err = h.meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -143,12 +101,9 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), - } - if h.server != "" { - hostAttr := semconv.NetHostName(h.server) - opts = append(opts, trace.WithAttributes(hostAttr)) + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), } + opts = append(opts, h.spanStartOptions...) if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { opts = append(opts, trace.WithNewRoot()) @@ -178,14 +133,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - bw.record = readRecordFunc - r.Body = &bw + r.Body = bw } writeRecordFunc := func(int64) {} @@ -195,13 +148,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - rww := &respWriterWrapper{ - ResponseWriter: w, - record: writeRecordFunc, - ctx: ctx, - props: h.propagators, - statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything - } + rww := request.NewRespWriterWrapper(w, writeRecordFunc) // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -217,61 +164,48 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { return rww.WriteHeader }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, }) - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } next.ServeHTTP(w, r.WithContext(ctx)) - setAfterServeAttributes(span, bw.read.Load(), rww.written, rww.statusCode, bw.err, rww.err) - - // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) - if rww.statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) - } - o := metric.WithAttributes(attributes...) - h.requestBytesCounter.Add(ctx, bw.read.Load(), o) - h.responseBytesCounter.Add(ctx, rww.written, o) + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), + })...) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.serverLatencyMeasure.Record(ctx, elapsedTime, o) -} - -func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { - attributes := []attribute.KeyValue{} - - // TODO: Consider adding an event after each read and write, possibly as an - // option (defaulting to off), so as to not create needlessly verbose spans. - if read > 0 { - attributes = append(attributes, ReadBytesKey.Int64(read)) - } - if rerr != nil && rerr != io.EOF { - attributes = append(attributes, ReadErrorKey.String(rerr.Error())) - } - if wrote > 0 { - attributes = append(attributes, WroteBytesKey.Int64(wrote)) - } - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) - - if werr != nil && werr != io.EOF { - attributes = append(attributes, WriteErrorKey.String(werr.Error())) - } - span.SetAttributes(attributes...) + h.semconv.RecordMetrics(ctx, semconv.MetricData{ + ServerName: h.server, + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + RequestSize: bw.BytesRead(), + ResponseSize: bytesWritten, + ElapsedTime: elapsedTime, + }) } // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { + attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - attr := semconv.HTTPRouteKey.String(route) - span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 00000000000..a945f556616 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 00000000000..aea171fb260 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(http.StatusOK) + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.WriteHeader(http.StatusOK) + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go new file mode 100644 index 00000000000..9cae4cab86a --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -0,0 +1,165 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "context" + "fmt" + "net/http" + "os" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" +) + +type ResponseTelemetry struct { + StatusCode int + ReadBytes int64 + ReadError error + WriteBytes int64 + WriteError error +} + +type HTTPServer struct { + duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram +} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) +} + +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return oldHTTPServer{}.Route(route) +} + +// Status returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (s HTTPServer) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} + +type MetricData struct { + ServerName string + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue + + RequestSize int64 + ResponseSize int64 + ElapsedTime float64 +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + return + } + + attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + return server +} + +type HTTPClient struct { + duplicate bool +} + +func NewHTTPClient() HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + return HTTPClient{duplicate: env == "http/dup"} +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + } + return oldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return oldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return newHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go new file mode 100644 index 00000000000..745b8c67bc4 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -0,0 +1,348 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type newHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} + +type newHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go new file mode 100644 index 00000000000..e6e14924f57 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) // nolint: gosec // Byte size checked 16 above. +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go new file mode 100644 index 00000000000..c999b05e675 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -0,0 +1,192 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "errors" + "io" + "net/http" + "slices" + "strings" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +type oldHTTPServer struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPServerRequest(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + attributes := []attribute.KeyValue{} + + if resp.ReadBytes > 0 { + attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) + } + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes))) + } + if resp.StatusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) + } + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) + } + + return attributes +} + +// Route returns the attribute for the route. +func (o oldHTTPServer) Route(route string) attribute.KeyValue { + return semconv.HTTPRoute(route) +} + +// HTTPStatusCode returns the attribute for the HTTP status code. +// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added. +func HTTPStatusCode(status int) attribute.KeyValue { + return semconv.HTTPStatusCode(status) +} + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + o.methodMetric(req.Method), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} + +func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type oldHTTPClient struct{} + +func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go index edf4ce3d315..7aa5f99e815 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index 0efd5261f62..a73bb06e90e 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -2,18 +2,7 @@ // source: internal/shared/semconvutil/httpconv.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index d3a06e0cada..b80a1db61fa 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -2,17 +2,7 @@ // source: internal/shared/semconvutil/netconv.go.tmpl // Copyright The OpenTelemetry Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" @@ -102,7 +92,7 @@ func (c *netConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } @@ -148,7 +138,7 @@ func (c *netConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } @@ -205,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. } func netProtocol(proto string) (name string, version string) { diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index 26a51a18050..ea504e396f1 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -48,8 +37,12 @@ type labelerContextKeyType int const lablelerContextKey labelerContextKeyType = 0 -func injectLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, lablelerContextKey, l) +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 43e937a67a6..b4119d3438b 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -22,15 +11,16 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/otel/metric" - + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) // Transport implements the http.RoundTripper interface and wraps @@ -38,14 +28,16 @@ import ( type Transport struct { rt http.RoundTripper - tracer trace.Tracer - meter metric.Meter - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - filters []Filter - spanNameFormatter func(string, *http.Request) string - clientTrace func(context.Context) *httptrace.ClientTrace + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + semconv semconv.HTTPClient requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram @@ -65,7 +57,8 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, + rt: base, + semconv: semconv.NewHTTPClient(), } defaultOpts := []Option{ @@ -88,6 +81,7 @@ func (t *Transport) applyConfig(c *config) { t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.metricAttributesFn = c.MetricAttributesFn } func (t *Transport) createMeasures() { @@ -148,49 +142,56 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - // use a body wrapper to determine the request size - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - // noop to prevent nil panic. not using this record fun yet. - bw.record = func(int64) {} - r.Body = &bw + r.Body = bw } - span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) if err != nil { - span.RecordError(err) + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + span.SetStatus(codes.Error, err.Error()) span.End() return res, err } // metrics - metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) + metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } - o := metric.WithAttributes(metricAttrs...) - t.requestBytesCounter.Add(ctx, bw.read.Load(), o) + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + + t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { t.responseBytesCounter.Add(ctx, n, o) } // traces - span.SetAttributes(semconvutil.HTTPClientResponse(res)...) - span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) res.Body = newWrappedBody(span, readRecordFunc, res.Body) @@ -202,6 +203,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { return res, err } +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest +} + // newWrappedBody returns a new and appropriately scoped *wrappedBody as an // io.ReadCloser. If the passed body implements io.Writer, the returned value // will implement io.ReadWriteCloser. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 35254e888fb..502c1bdafc7 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -1,22 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.49.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go deleted file mode 100644 index 2852ec97171..00000000000 --- a/terraform/providers/google/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - -import ( - "context" - "io" - "net/http" - "sync/atomic" - - "go.opentelemetry.io/otel/propagation" -) - -var _ io.ReadCloser = &bodyWrapper{} - -// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error. -type bodyWrapper struct { - io.ReadCloser - record func(n int64) // must not be nil - - read atomic.Int64 - err error -} - -func (w *bodyWrapper) Read(b []byte) (int, error) { - n, err := w.ReadCloser.Read(b) - n1 := int64(n) - w.read.Add(n1) - w.err = err - w.record(n1) - return n, err -} - -func (w *bodyWrapper) Close() error { - return w.ReadCloser.Close() -} - -var _ http.ResponseWriter = &respWriterWrapper{} - -// respWriterWrapper wraps a http.ResponseWriter in order to track the number of -// bytes written, the last error, and to catch the first written statusCode. -// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional -// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) -// that may be useful when using it in real life situations. -type respWriterWrapper struct { - http.ResponseWriter - record func(n int64) // must not be nil - - // used to inject the header - ctx context.Context - - props propagation.TextMapPropagator - - written int64 - statusCode int - err error - wroteHeader bool -} - -func (w *respWriterWrapper) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *respWriterWrapper) Write(p []byte) (int, error) { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - n, err := w.ResponseWriter.Write(p) - n1 := int64(n) - w.record(n1) - w.written += n1 - w.err = err - return n, err -} - -// WriteHeader persists initial statusCode for span attribution. -// All calls to WriteHeader will be propagated to the underlying ResponseWriter -// and will persist the statusCode from the first call. -// Blocking consecutive calls to WriteHeader alters expected behavior and will -// remove warning logs from net/http where developers will notice incorrect handler implementations. -func (w *respWriterWrapper) WriteHeader(statusCode int) { - if !w.wroteHeader { - w.wroteHeader = true - w.statusCode = statusCode - } - w.ResponseWriter.WriteHeader(statusCode) -} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/.codespellignore b/terraform/providers/google/vendor/go.opentelemetry.io/otel/.codespellignore index 120b63a9c7d..6bf3abc41e7 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/.codespellignore @@ -5,3 +5,5 @@ collison consequentially ans nam +valu +thirdparty diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/.codespellrc b/terraform/providers/google/vendor/go.opentelemetry.io/otel/.codespellrc index 4afbb1fb3bd..e2cb3ea944b 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/.codespellrc +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/.codespellrc @@ -5,6 +5,6 @@ check-filenames = check-hidden = ignore-words = .codespellignore interactive = 1 -skip = .git,go.mod,go.sum,semconv,venv,.tools +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools uri-ignore-words-list = * write = diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/.gitmodules b/terraform/providers/google/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f56982b..00000000000 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/.golangci.yml b/terraform/providers/google/vendor/go.opentelemetry.io/otel/.golangci.yml index a62511f382e..d9abe194d94 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,8 +9,11 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck + - errorlint - godot - gofumpt - goimports @@ -21,8 +24,11 @@ linters: - misspell - revive - staticcheck + - tenv - typecheck + - unconvert - unused + - unparam issues: # Maximum issues count per one linter. @@ -124,6 +130,8 @@ linters-settings: - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/internal$" desc: Do not use cross-module internal packages. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 98f2d204384..6107c17b89f 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,218 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + +## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 + +### Added + +- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) +- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) +- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. + This new module contains the Go implementation of the OpenTelemetry Logs SDK. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. + This new module contains an OTLP exporter that transmits log telemetry using HTTP. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. + This new module contains an exporter prints log records to STDOUT. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. + The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) + +### Changed + +- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) +- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) + +## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 + +### Added + +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) +- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) +- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. + This method is used to notify users if a log record will be emitted or not. (#5071) +- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. + This value represents an unset severity level. (#5072) +- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) +- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. + This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. + At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) +- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) +- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) +- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) + +### Changed + +- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. + +### Fixed + +- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) + +### Removed + +- Drop support for [Go 1.20]. (#4967) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) + ## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 This release is the last to support [Go 1.20]. @@ -22,6 +234,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -138,7 +351,7 @@ See our [versioning policy](VERSIONING.md) for more information about these stab ## [1.20.0/0.43.0] 2023-11-10 -This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ### Added @@ -170,15 +383,15 @@ This release brings a breaking change for custom trace API implementations. Some - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) - The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. This extends the `Tracer` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. This extends the `Span` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) @@ -814,7 +1027,7 @@ The next release will require at least [Go 1.19]. - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) - Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) - Prevent duplicate Prometheus description, unit, and type. (#3469) @@ -1859,7 +2072,7 @@ with major version 0. - `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) - Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) - Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) - Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) - Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) - `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) @@ -2849,7 +3062,12 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.24.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 +[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 +[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 [1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 [1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 [1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 @@ -2928,6 +3146,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 @@ -2937,3 +3158,5 @@ It contains api and sdk for trace and meter. [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric [trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/CODEOWNERS b/terraform/providers/google/vendor/go.opentelemetry.io/otel/CODEOWNERS index 31d336d9222..5904bb7070e 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,13 +5,13 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index c9f2bac55bf..b7402576f98 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -201,6 +201,16 @@ You can install and run a "local Go Doc site" in the following way: [`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is an example of a very well-documented package. +### README files + +Each (non-internal, non-test, non-documentation) package must contain a +`README.md` file containing at least a title, and a `pkg.go.dev` badge. + +The README should not be a repetition of Go doc comments. + +You can verify the presence of all README files with the `make verify-readmes` +command. + ## Style Guide One of the primary goals of this project is that it is actually used by @@ -560,6 +570,9 @@ functionality should be added, each one will need their own super-set interfaces and will duplicate the pattern. For this reason, the simple targeted interface that defines the specific functionality should be preferred. +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + ### Testing The tests should never leak goroutines. @@ -615,17 +628,15 @@ should be canceled. ### Approvers -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Maintainers -- [David Ashpole](https://github.com/dashpole), Google - [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Tyler Yahn](https://github.com/MrAlias), Splunk ### Emeritus @@ -633,11 +644,13 @@ should be canceled. - [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Evan Torrie](https://github.com/evantorrie), Yahoo ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/Makefile b/terraform/providers/google/vendor/go.opentelemetry.io/otel/Makefile index 6de95219be7..070b1e57df1 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/Makefile +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/Makefile @@ -1,16 +1,5 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 TOOLS_MOD_DIR := ./internal/tools @@ -25,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default -ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -34,7 +23,7 @@ TOOLS = $(CURDIR)/.tools $(TOOLS): @mkdir -p $@ -$(TOOLS)/%: | $(TOOLS) +$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) cd $(TOOLS_MOD_DIR) && \ $(GO) build -o $@ $(PACKAGE) @@ -50,9 +39,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit -DBOTCONF = $(TOOLS)/dbotconf -$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf - GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -81,7 +67,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -110,7 +96,7 @@ $(PYTOOLS): @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" # Install python packages into the virtual environment. -$(PYTOOLS)/%: | $(PYTOOLS) +$(PYTOOLS)/%: $(PYTOOLS) @$(DOCKERPY) $(PIP) install -r requirements.txt CODESPELL = $(PYTOOLS)/codespell @@ -124,18 +110,18 @@ generate: go-generate vanity-import-fix .PHONY: go-generate go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) go-generate/%: DIR=$* -go-generate/%: | $(STRINGER) $(GOTMPL) +go-generate/%: $(STRINGER) $(GOTMPL) @echo "$(GO) generate $(DIR)/..." \ && cd $(DIR) \ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... .PHONY: vanity-import-fix -vanity-import-fix: | $(PORTO) +vanity-import-fix: $(PORTO) @$(PORTO) --include-internal -w . # Generate go.work file for local development. .PHONY: go-work -go-work: | $(CROSSLINK) +go-work: $(CROSSLINK) $(CROSSLINK) work --root=$(shell pwd) # Build @@ -178,7 +164,7 @@ test/%: COVERAGE_MODE = atomic COVERAGE_PROFILE = coverage.out .PHONY: test-coverage -test-coverage: | $(GOCOVMERGE) +test-coverage: $(GOCOVMERGE) @set -e; \ printf "" > coverage.txt; \ for dir in $(ALL_COVERAGE_MOD_DIRS); do \ @@ -192,40 +178,37 @@ test-coverage: | $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) golangci-lint/%: DIR=$* -golangci-lint/%: | $(GOLANGCI_LINT) +golangci-lint/%: $(GOLANGCI_LINT) @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ && cd $(DIR) \ && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) .PHONY: crosslink -crosslink: | $(CROSSLINK) +crosslink: $(CROSSLINK) @echo "Updating intra-repository dependencies in all go modules" \ && $(CROSSLINK) --root=$(shell pwd) --prune .PHONY: go-mod-tidy go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) go-mod-tidy/%: DIR=$* -go-mod-tidy/%: | crosslink +go-mod-tidy/%: crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ - && $(GO) mod tidy -compat=1.20 + && $(GO) mod tidy -compat=1.21 .PHONY: lint-modules lint-modules: go-mod-tidy @@ -234,23 +217,23 @@ lint-modules: go-mod-tidy lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check -vanity-import-check: | $(PORTO) +vanity-import-check: $(PORTO) @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) .PHONY: misspell -misspell: | $(MISSPELL) +misspell: $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) .PHONY: govulncheck govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) govulncheck/%: DIR=$* -govulncheck/%: | $(GOVULNCHECK) +govulncheck/%: $(GOVULNCHECK) @echo "govulncheck ./... in $(DIR)" \ && cd $(DIR) \ && $(GOVULNCHECK) ./... .PHONY: codespell -codespell: | $(CODESPELL) +codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) .PHONY: license-check @@ -263,15 +246,6 @@ license-check: exit 1; \ fi -DEPENDABOT_CONFIG = .github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: | $(DBOTCONF) - @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) - -.PHONY: dependabot-generate -dependabot-generate: | $(DBOTCONF) - @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) - .PHONY: check-clean-work-tree check-clean-work-tree: @if ! git diff --quiet; then \ @@ -284,13 +258,11 @@ check-clean-work-tree: SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate -semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) +semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: gorelease @@ -302,17 +274,25 @@ gorelease/%:| $(GORELEASE) && $(GORELEASE) \ || echo "" +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + .PHONY: prerelease -prerelease: | $(MULTIMOD) +prerelease: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + $(MULTIMOD) prerelease -m ${MODSET} COMMIT ?= "HEAD" .PHONY: add-tags -add-tags: | $(MULTIMOD) +add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown -lint-markdown: +lint-markdown: docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + +.PHONY: verify-readmes +verify-readmes: + ./verify_readmes.sh diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/README.md index 7766259a5c1..657df347103 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/README.md +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/README.md @@ -15,7 +15,7 @@ It provides a set of APIs to directly measure performance and behavior of your s |---------|--------------------| | Traces | Stable | | Metrics | Stable | -| Logs | In development[^1] | +| Logs | Beta[^1] | Progress and status specific to this repository is tracked in our [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) @@ -47,23 +47,29 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.20 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Ubuntu | 1.20 | 386 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| MacOS | 1.20 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.20 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | -| Windows | 1.20 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.21 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS 13 | 1.21 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| macOS | 1.21 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | +| Windows | 1.21 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -100,12 +106,12 @@ export pipeline to send that telemetry to an observability platform. All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). -| Exporter | Metrics | Traces | -|---------------------------------------|:-------:|:------:| -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | ## Contributing diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/RELEASING.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/RELEASING.md index d2691d0bd8b..59992984d42 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + ## Pre-Release First, decide which module sets will be released and update their versions @@ -63,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 00000000000..5b3da8f14ca --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/doc.go index dafe7424dfb..eef51ebc2a2 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package attribute provides key and value attributes. package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/encoder.go index fe2bc5766cf..318e42fcabe 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/filter.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/filter.go index 638c213d59a..be9cd922d87 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/iterator.go index 841b271fb7d..f2ba89ce4bc 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/key.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/key.go index 0656a04e43b..d9a22c65020 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/kv.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/kv.go index 1ddf3ce0580..3028f9a40f8 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/set.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/set.go index fb6da51450c..bff9c7fdbb9 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -1,24 +1,14 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" import ( + "cmp" "encoding/json" "reflect" + "slices" "sort" - "sync" ) type ( @@ -26,23 +16,33 @@ type ( // immutable set of attributes, with an internal cache for storing // attribute encodings. // - // This type supports the Equivalent method of comparison using values of - // type Distinct. + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. Set struct { equivalent Distinct } - // Distinct wraps a variable-size array of KeyValue, constructed with keys - // in sorted order. This can be used as a map key or for equality checking - // between Sets. + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. Distinct struct { iface interface{} } - // Sortable implements sort.Interface, used for sorting KeyValue. This is - // an exported type to support a memory optimization. A pointer to one of - // these is needed for the call to sort.Stable(), which the caller may - // provide in order to avoid an allocation. See NewSetWithSortable(). + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). Sortable []KeyValue ) @@ -56,12 +56,6 @@ var ( iface: [0]KeyValue{}, }, } - - // sortables is a pool of Sortables used to create Sets with a user does - // not provide one. - sortables = sync.Pool{ - New: func() interface{} { return new(Sortable) }, - } ) // EmptySet returns a reference to a Set with no elements. @@ -187,13 +181,7 @@ func empty() Set { // Except for empty sets, this method adds an additional allocation compared // with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - srt := sortables.Get().(*Sortable) - s, _ := NewSetWithSortableFiltered(kvs, srt, nil) - sortables.Put(srt) + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -201,12 +189,10 @@ func NewSet(kvs ...KeyValue) Set { // NewSetWithSortableFiltered for more details. // // This call includes a Sortable option as a memory optimization. -func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -220,48 +206,12 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if len(kvs) == 0 { return empty(), nil } - srt := sortables.Get().(*Sortable) - s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) - sortables.Put(srt) - return s, filtered -} - -// NewSetWithSortableFiltered returns a new Set. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on Set, although this returns Set. Callers -// can avoid memory allocations by: -// -// - allocating a Sortable for use as a temporary in this method -// - allocating a Set for storing the return value of this constructor. -// -// The result maintains a cache of encoded attributes, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second []KeyValue return value is a list of attributes that were -// excluded by the Filter (if non-nil). -func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - *tmp = kvs // Stable sort so the following de-duplication can implement // last-value-wins semantics. - sort.Stable(tmp) - - *tmp = nil + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) position := len(kvs) - 1 offset := position - 1 @@ -289,6 +239,35 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S return Set{equivalent: computeDistinct(kvs)}, nil } +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + // filteredToFront filters slice in-place using keep function. All KeyValues that need to // be removed are moved to the front. All KeyValues that need to be kept are // moved (in-order) to the back. The index for the first KeyValue to be kept is diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/value.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/value.go index cb21dd5c096..9ea0ecbbd27 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" @@ -242,15 +231,27 @@ func (v Value) Emit() string { case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(v.asInt64Slice()) + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(v.asFloat64Slice()) + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(v.asStringSlice()) + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) case STRING: return v.stringly default: diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/README.md new file mode 100644 index 00000000000..7d798435e12 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/README.md @@ -0,0 +1,3 @@ +# Baggage + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 7d27cf77d5c..b3569e95e5c 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" @@ -19,6 +8,7 @@ import ( "fmt" "net/url" "strings" + "unicode/utf8" "go.opentelemetry.io/otel/internal/baggage" ) @@ -54,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -67,11 +63,15 @@ func NewKeyProperty(key string) (Property, error) { // NewKeyValueProperty returns a new Property for key with value. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewKeyValuePropertyRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -84,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -125,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -148,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -213,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -232,14 +257,18 @@ type Member struct { hasData bool } -// NewMemberRaw returns a new Member from the passed arguments. +// NewMember returns a new Member from the passed arguments. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewMemberRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -252,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -304,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } - // Decode a precent-encoded value. - value, err := url.PathUnescape(val) + // Decode a percent-encoded value. + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -324,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -341,13 +405,18 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + s += propertyDelimiter + m.properties.String() } return s } @@ -458,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -479,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -538,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -616,11 +693,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) { return } - // Decode a precent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + // Decode a percent-encoded value. + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -641,6 +720,113 @@ func skipSpace(s string, offset int) int { return i } +var safeKeyCharset = [utf8.RuneSelf]bool{ + // 0x23 to 0x27 + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + + // 0x30 to 0x39 + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + + // 0x41 to 0x5a + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + + // 0x5e to 0x7a + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + + // remainder + '!': true, + '*': true, + '+': true, + '-': true, + '.': true, + '|': true, + '~': true, +} + +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -656,19 +842,10 @@ func validateKey(s string) bool { } func validateKeyChar(c int32) bool { - return (c >= 0x23 && c <= 0x27) || - (c >= 0x30 && c <= 0x39) || - (c >= 0x41 && c <= 0x5a) || - (c >= 0x5e && c <= 0x7a) || - c == 0x21 || - c == 0x2a || - c == 0x2b || - c == 0x2d || - c == 0x2e || - c == 0x7c || - c == 0x7e + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { @@ -679,12 +856,109 @@ func validateValue(s string) bool { return true } +var safeValueCharset = [utf8.RuneSelf]bool{ + '!': true, // 0x21 + + // 0x23 to 0x2b + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + + // 0x2d to 0x3a + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + + // 0x3c to 0x5b + '<': true, // 0x3C + '=': true, // 0x3D + '>': true, // 0x3E + '?': true, // 0x3F + '@': true, // 0x40 + 'A': true, // 0x41 + 'B': true, // 0x42 + 'C': true, // 0x43 + 'D': true, // 0x44 + 'E': true, // 0x45 + 'F': true, // 0x46 + 'G': true, // 0x47 + 'H': true, // 0x48 + 'I': true, // 0x49 + 'J': true, // 0x4A + 'K': true, // 0x4B + 'L': true, // 0x4C + 'M': true, // 0x4D + 'N': true, // 0x4E + 'O': true, // 0x4F + 'P': true, // 0x50 + 'Q': true, // 0x51 + 'R': true, // 0x52 + 'S': true, // 0x53 + 'T': true, // 0x54 + 'U': true, // 0x55 + 'V': true, // 0x56 + 'W': true, // 0x57 + 'X': true, // 0x58 + 'Y': true, // 0x59 + 'Z': true, // 0x5A + '[': true, // 0x5B + + // 0x5d to 0x7e + ']': true, // 0x5D + '^': true, // 0x5E + '_': true, // 0x5F + '`': true, // 0x60 + 'a': true, // 0x61 + 'b': true, // 0x62 + 'c': true, // 0x63 + 'd': true, // 0x64 + 'e': true, // 0x65 + 'f': true, // 0x66 + 'g': true, // 0x67 + 'h': true, // 0x68 + 'i': true, // 0x69 + 'j': true, // 0x6A + 'k': true, // 0x6B + 'l': true, // 0x6C + 'm': true, // 0x6D + 'n': true, // 0x6E + 'o': true, // 0x6F + 'p': true, // 0x70 + 'q': true, // 0x71 + 'r': true, // 0x72 + 's': true, // 0x73 + 't': true, // 0x74 + 'u': true, // 0x75 + 'v': true, // 0x76 + 'w': true, // 0x77 + 'x': true, // 0x78 + 'y': true, // 0x79 + 'z': true, // 0x7A + '{': true, // 0x7B + '|': true, // 0x7C + '}': true, // 0x7D + '~': true, // 0x7E +} + func validateValueChar(c int32) bool { - return c == 0x21 || - (c >= 0x23 && c <= 0x2b) || - (c >= 0x2d && c <= 0x3a) || - (c >= 0x3c && c <= 0x5b) || - (c >= 0x5d && c <= 0x7e) + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] } // valueEscape escapes the string so it can be safely placed inside a baggage value, diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/context.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/context.go index 24b34b7564a..a572461a05f 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/context.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/doc.go index 4545100df67..b51d87cab70 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/doc.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides functionality for storing and retrieving diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 00000000000..24c52b387d2 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/codes.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/codes.go index 587ebae4e30..2acbac35466 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package codes // import "go.opentelemetry.io/otel/codes" @@ -94,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/doc.go index 4e328fbb4b3..ee8db448b8b 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/doc.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package codes defines the canonical error codes used by OpenTelemetry. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/doc.go index 36d7c24e88e..921f85961ad 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/doc.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otel provides global access to the OpenTelemetry API. The subpackages of @@ -28,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/error_handler.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/error_handler.go index 72fad85412b..67414c71e05 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/error_handler.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/error_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/terraform/providers/google/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh index 9a58fb1d372..93e80ea306c 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -1,18 +1,7 @@ #!/usr/bin/env bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/handler.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/handler.go index 4115fe3bbb5..07623b67914 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/handler.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" @@ -18,12 +7,8 @@ import ( "go.opentelemetry.io/otel/internal/global" ) -var ( - // Compile-time check global.ErrDelegator implements ErrorHandler. - _ ErrorHandler = (*global.ErrDelegator)(nil) - // Compile-time check global.ErrLogger implements ErrorHandler. - _ ErrorHandler = (*global.ErrLogger)(nil) -) +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) // GetErrorHandler returns the global ErrorHandler instance. // @@ -44,5 +29,5 @@ func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } // delegate errors to h. func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { global.Handle(err) } +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 622c3ee3f27..822d8479474 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package attribute provide several helper functions for some commonly used @@ -25,33 +14,33 @@ import ( // BoolSliceValue converts a bool slice into an array with same elements as slice. func BoolSliceValue(v []bool) interface{} { var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Int64SliceValue converts an int64 slice into an array with same elements as slice. func Int64SliceValue(v []int64) interface{} { var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Float64SliceValue converts a float64 slice into an array with same elements as slice. func Float64SliceValue(v []float64) interface{} { var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // StringSliceValue converts a string slice into an array with same elements as slice. func StringSliceValue(v []string) interface{} { var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // AsBoolSlice converts a bool array into a slice into with same elements as array. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go index b96e5408e69..b4f85f44a93 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides base types and functionality to store and retrieve diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/baggage/context.go index 4469700d9cb..3aea9c491f0 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/internal/baggage" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/gen.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/gen.go index f532f07e9e5..4259f0320d4 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/gen.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/handler.go index 5e9b8304792..c657ff8e755 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,38 +1,13 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" - "os" "sync/atomic" ) -var ( - // GlobalErrorHandler provides an ErrorHandler that can be used - // throughout an OpenTelemetry instrumented project. When a user - // specified ErrorHandler is registered (`SetErrorHandler`) all calls to - // `Handle` and will be delegated to the registered ErrorHandler. - GlobalErrorHandler = defaultErrorHandler() - - // Compile-time check that delegator implements ErrorHandler. - _ ErrorHandler = (*ErrDelegator)(nil) - // Compile-time check that errLogger implements ErrorHandler. - _ ErrorHandler = (*ErrLogger)(nil) -) - // ErrorHandler handles irremediable events. type ErrorHandler interface { // Handle handles any error deemed irremediable by an OpenTelemetry @@ -44,59 +19,18 @@ type ErrDelegator struct { delegate atomic.Pointer[ErrorHandler] } -func (d *ErrDelegator) Handle(err error) { - d.getDelegate().Handle(err) -} +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) -func (d *ErrDelegator) getDelegate() ErrorHandler { - return *d.delegate.Load() +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) } // setDelegate sets the ErrorHandler delegate. func (d *ErrDelegator) setDelegate(eh ErrorHandler) { d.delegate.Store(&eh) } - -func defaultErrorHandler() *ErrDelegator { - d := &ErrDelegator{} - d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) - return d -} - -// ErrLogger logs errors if no delegate is set, otherwise they are delegated. -type ErrLogger struct { - l *log.Logger -} - -// Handle logs err if no delegate is set, otherwise it is delegated. -func (h *ErrLogger) Handle(err error) { - h.l.Print(err) -} - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return GlobalErrorHandler -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - GlobalErrorHandler.setDelegate(h) -} - -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { - GetErrorHandler().Handle(err) -} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index ebb13c20678..3a0cc42f6a4 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -292,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco } } +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + type siCounter struct { embedded.Int64Counter @@ -369,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record ctr.(metric.Int64Histogram).Record(ctx, x, opts...) } } + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index c6f305a2b76..adbca7d3477 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -23,17 +12,20 @@ import ( "github.com/go-logr/stdr" ) -// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// globalLogger holds a reference to the [logr.Logger] used within +// go.opentelemetry.io/otel. // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. -var globalLogger atomic.Pointer[logr.Logger] +var globalLogger = func() *atomic.Pointer[logr.Logger] { + l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -func init() { - SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -} + p := new(atomic.Pointer[logr.Logger]) + p.Store(&l) + return p +}() -// SetLogger overrides the globalLogger with l. +// SetLogger sets the global Logger to l. // // To see Warn messages use a logger with `l.V(1).Enabled() == true` // To see Info messages use a logger with `l.V(4).Enabled() == true` @@ -42,28 +34,29 @@ func SetLogger(l logr.Logger) { globalLogger.Store(&l) } -func getLogger() logr.Logger { +// GetLogger returns the global logger. +func GetLogger() logr.Logger { return *globalLogger.Load() } // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { - getLogger().V(4).Info(msg, keysAndValues...) + GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { - getLogger().Error(err, msg, keysAndValues...) + GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { - getLogger().V(8).Info(msg, keysAndValues...) + GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. func Warn(msg string, keysAndValues ...interface{}) { - getLogger().V(1).Info(msg, keysAndValues...) + GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/meter.go index 7ed61c0e256..cfd1df9bfa2 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -76,6 +65,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.meters == nil { @@ -175,6 +165,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return i, nil } +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64ObservableCounter(name, options...) @@ -241,6 +242,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return i, nil } +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64ObservableCounter(name, options...) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/propagator.go index 06bac35c2fe..38560ff9915 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/propagator.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/state.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/state.go index 386c8bfdc08..204ea142a50 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -25,6 +14,10 @@ import ( ) type ( + errorHandlerHolder struct { + eh ErrorHandler + } + tracerProviderHolder struct { tp trace.TracerProvider } @@ -39,15 +32,59 @@ type ( ) var ( + globalErrorHandler = defaultErrorHandler() globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() + delegateErrorHandlerOnce sync.Once delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once delegateMeterOnce sync.Once ) +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + // TracerProvider is the internal implementation for global.TracerProvider. func TracerProvider() trace.TracerProvider { return globalTracer.Load().(tracerProviderHolder).tp @@ -137,6 +174,12 @@ func SetMeterProvider(mp metric.MeterProvider) { globalMeterProvider.Store(meterProviderHolder{mp: mp}) } +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + func defaultTracerValue() *atomic.Value { v := &atomic.Value{} v.Store(tracerProviderHolder{tp: &tracerProvider{}}) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 3f61ec12a34..e31f442b48f 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -97,6 +86,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.tracers == nil { @@ -112,10 +102,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct { - name string - version string -} +type il struct{ name, version, schema string } // tracer is a placeholder for a trace.Tracer. // @@ -193,6 +180,9 @@ func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + // SetName does nothing. func (nonRecordingSpan) SetName(string) {} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index e07e7940004..9b1da2c02b9 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" @@ -35,7 +24,8 @@ func Int64ToRaw(i int64) uint64 { } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -47,9 +37,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal_logging.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal_logging.go index c4f8acd5d83..6de7f2e4d88 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal_logging.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric.go index f955171951f..1e6473b32f3 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 00000000000..0cf902e01f0 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index 072baa8e8d0..cf23db77803 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -50,7 +39,7 @@ type Float64ObservableCounter interface { } // Float64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableCounterConfig struct { description string unit string @@ -108,7 +97,7 @@ type Float64ObservableUpDownCounter interface { } // Float64ObservableUpDownCounterConfig contains options for asynchronous -// counter instruments that record int64 values. +// counter instruments that record float64 values. type Float64ObservableUpDownCounterConfig struct { description string unit string @@ -165,7 +154,7 @@ type Float64ObservableGauge interface { } // Float64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableGaugeConfig struct { description string unit string diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index 9bd6ebf0205..c82ba5324e2 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/config.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/config.go index 778ad2d748b..d9e3b13e4d1 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/doc.go index 54716e13b35..f153745b005 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package metric provides the OpenTelemetry API used to measure metrics about @@ -68,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and See the [OpenTelemetry documentation] for more information about instruments and their intended use. +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + # Measurements Measurements are made by recording values and information about the values with @@ -164,6 +170,7 @@ It is strongly recommended that authors only embed That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax [OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ [GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider */ diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 00000000000..1f6e0efa73d --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go index ae0bdbd2e64..1a9dc68093f 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // metric API]. @@ -113,6 +102,16 @@ type Float64Counter interface{ float64Counter() } // the API package). type Float64Histogram interface{ float64Histogram() } +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + // Float64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. // @@ -185,6 +184,16 @@ type Int64Counter interface{ int64Counter() } // the API package). type Int64Histogram interface{ int64Histogram() } +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + // Int64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. // diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/instrument.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/instrument.go index be89cd53341..ea52e402331 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -27,6 +16,7 @@ type InstrumentOption interface { Int64CounterOption Int64UpDownCounterOption Int64HistogramOption + Int64GaugeOption Int64ObservableCounterOption Int64ObservableUpDownCounterOption Int64ObservableGaugeOption @@ -34,6 +24,7 @@ type InstrumentOption interface { Float64CounterOption Float64UpDownCounterOption Float64HistogramOption + Float64GaugeOption Float64ObservableCounterOption Float64ObservableUpDownCounterOption Float64ObservableGaugeOption @@ -62,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.description = string(o) return c @@ -92,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.description = string(o) return c @@ -127,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.unit = string(o) return c @@ -157,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.unit = string(o) return c diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/meter.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/meter.go index 2520bc74af1..14e08c24a4b 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -58,17 +47,41 @@ type Meter interface { // Int64Counter returns a new Int64Counter instrument identified by name // and configured with options. The instrument is used to synchronously // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -78,7 +91,12 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -88,7 +106,12 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -98,23 +121,51 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by // name and configured with options. The instrument is used to // synchronously record increasing float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -124,7 +175,12 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -134,7 +190,12 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -144,6 +205,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a @@ -189,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/noop/README.md new file mode 100644 index 00000000000..bb89694356b --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/noop/README.md @@ -0,0 +1,3 @@ +# Metric Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/noop/noop.go index acc9a670b22..ca6fcbdc099 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/noop/noop.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package noop provides an implementation of the OpenTelemetry metric API that // produces no telemetry and minimizes used computation resources. @@ -43,6 +32,8 @@ var ( _ metric.Float64UpDownCounter = Float64UpDownCounter{} _ metric.Int64Histogram = Int64Histogram{} _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} _ metric.Int64ObservableCounter = Int64ObservableCounter{} _ metric.Float64ObservableCounter = Float64ObservableCounter{} _ metric.Int64ObservableGauge = Int64ObservableGauge{} @@ -87,6 +78,12 @@ func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int6 return Int64Histogram{}, nil } +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { @@ -123,6 +120,12 @@ func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric. return Float64Histogram{}, nil } +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { @@ -208,6 +211,20 @@ type Float64Histogram struct{ embedded.Float64Histogram } // Record performs no operation. func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + // Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record // int64 measurements. It produces no telemetry. type Int64ObservableCounter struct { diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index 0a4825ae6a7..8403a4bad2d 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -39,7 +28,7 @@ type Float64Counter interface { } // Float64CounterConfig contains options for synchronous counter instruments that -// record int64 values. +// record float64 values. type Float64CounterConfig struct { description string unit string @@ -92,7 +81,7 @@ type Float64UpDownCounter interface { } // Float64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64UpDownCounterConfig struct { description string unit string @@ -144,8 +133,8 @@ type Float64Histogram interface { Record(ctx context.Context, incr float64, options ...RecordOption) } -// Float64HistogramConfig contains options for synchronous counter instruments -// that record int64 values. +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. type Float64HistogramConfig struct { description string unit string @@ -183,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Float64HistogramOption interface { applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig } + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 56667d32fc0..783fdfba773 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -144,7 +133,7 @@ type Int64Histogram interface { Record(ctx context.Context, incr int64, options ...RecordOption) } -// Int64HistogramConfig contains options for synchronous counter instruments +// Int64HistogramConfig contains options for synchronous histogram instruments // that record int64 values. type Int64HistogramConfig struct { description string @@ -183,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Int64HistogramOption interface { applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig } + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation.go index d29aaa32c0b..2fd9497338f 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/README.md new file mode 100644 index 00000000000..e2959ac747a --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/README.md @@ -0,0 +1,3 @@ +# Propagation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 303cdf1cbff..552263ba734 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/doc.go index c119eb2858b..33a3baf15f1 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package propagation contains OpenTelemetry context propagators. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/propagation.go index c94438f73a5..8c8286aab4d 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 63e5d62221f..6870e316dc0 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" @@ -46,7 +35,7 @@ var ( versionPart = fmt.Sprintf("%.2X", supportedVersion) ) -// Inject set tracecontext from the Context into the carrier. +// Inject injects the trace context from ctx into carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/renovate.json b/terraform/providers/google/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 00000000000..8c5ac55ca93 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchFileNames": ["internal/tools/**"], + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": false + } + ] +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/requirements.txt b/terraform/providers/google/vendor/go.opentelemetry.io/otel/requirements.txt index e0a43e13840..ab09daf9d53 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.6 +codespell==2.3.0 diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/README.md new file mode 100644 index 00000000000..f81b1576ad4 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/README.md @@ -0,0 +1,3 @@ +# SDK + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md new file mode 100644 index 00000000000..06e6d868548 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md @@ -0,0 +1,3 @@ +# SDK Instrumentation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/instrumentation)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go new file mode 100644 index 00000000000..a4faa6a03d5 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package instrumentation provides types to represent the code libraries that +// provide OpenTelemetry instrumentation. These types are used in the +// OpenTelemetry signal pipelines to identify the source of telemetry. +// +// See +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md +// and +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md +// for more information. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go new file mode 100644 index 00000000000..f2cdf3c6518 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Library represents the instrumentation library. +// +// Deprecated: use [Scope] instead. +type Library = Scope diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go new file mode 100644 index 00000000000..728115045bb --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Scope represents the instrumentation scope. +type Scope struct { + // Name is the name of the instrumentation scope. This should be the + // Go package name of that scope. + Name string + // Version is the version of the instrumentation scope. + Version string + // SchemaURL of the telemetry emitted by the scope. + SchemaURL string +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md new file mode 100644 index 00000000000..fab61647c2d --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md @@ -0,0 +1,46 @@ +# Experimental Features + +The SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Resource](#resource) + +### Resource + +[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental. +To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable. +The value set must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + + + +[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/ +[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector + +#### Examples + +Enable experimental resource semantic conventions. + +```console +export OTEL_GO_X_RESOURCE=true +``` + +Disable experimental resource semantic conventions. + +```console +unset OTEL_GO_X_RESOURCE +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go new file mode 100644 index 00000000000..68d296cbed3 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import ( + "os" + "strings" +) + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature("RESOURCE", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/README.md new file mode 100644 index 00000000000..017f072a51b --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/README.md @@ -0,0 +1,3 @@ +# Metric SDK + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go new file mode 100644 index 00000000000..e6f5cfb2ad9 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "errors" + "fmt" + "slices" +) + +// errAgg is wrapped by misconfigured aggregations. +var errAgg = errors.New("aggregation") + +// Aggregation is the aggregation used to summarize recorded measurements. +type Aggregation interface { + // copy returns a deep copy of the Aggregation. + copy() Aggregation + + // err returns an error for any misconfigured Aggregation. + err() error +} + +// AggregationDrop is an Aggregation that drops all recorded data. +type AggregationDrop struct{} // AggregationDrop has no parameters. + +var _ Aggregation = AggregationDrop{} + +// copy returns a deep copy of d. +func (d AggregationDrop) copy() Aggregation { return d } + +// err returns an error for any misconfiguration. A drop aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationDrop) err() error { return nil } + +// AggregationDefault is an Aggregation that uses the default instrument kind selection +// mapping to select another Aggregation. A metric reader can be configured to +// make an aggregation selection based on instrument kind that differs from +// the default. This Aggregation ensures the default is used. +// +// See the [DefaultAggregationSelector] for information about the default +// instrument kind selection mapping. +type AggregationDefault struct{} // AggregationDefault has no parameters. + +var _ Aggregation = AggregationDefault{} + +// copy returns a deep copy of d. +func (d AggregationDefault) copy() Aggregation { return d } + +// err returns an error for any misconfiguration. A default aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationDefault) err() error { return nil } + +// AggregationSum is an Aggregation that summarizes a set of measurements as their +// arithmetic sum. +type AggregationSum struct{} // AggregationSum has no parameters. + +var _ Aggregation = AggregationSum{} + +// copy returns a deep copy of s. +func (s AggregationSum) copy() Aggregation { return s } + +// err returns an error for any misconfiguration. A sum aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationSum) err() error { return nil } + +// AggregationLastValue is an Aggregation that summarizes a set of measurements as the +// last one made. +type AggregationLastValue struct{} // AggregationLastValue has no parameters. + +var _ Aggregation = AggregationLastValue{} + +// copy returns a deep copy of l. +func (l AggregationLastValue) copy() Aggregation { return l } + +// err returns an error for any misconfiguration. A last-value aggregation has +// no parameters and cannot be misconfigured, therefore this always returns +// nil. +func (AggregationLastValue) err() error { return nil } + +// AggregationExplicitBucketHistogram is an Aggregation that summarizes a set of +// measurements as an histogram with explicitly defined buckets. +type AggregationExplicitBucketHistogram struct { + // Boundaries are the increasing bucket boundary values. Boundary values + // define bucket upper bounds. Buckets are exclusive of their lower + // boundary and inclusive of their upper bound (except at positive + // infinity). A measurement is defined to fall into the greatest-numbered + // bucket with a boundary that is greater than or equal to the + // measurement. As an example, boundaries defined as: + // + // []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000} + // + // Will define these buckets: + // + // (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0], + // (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0], + // (500.0, 1000.0], (1000.0, +∞) + Boundaries []float64 + // NoMinMax indicates whether to not record the min and max of the + // distribution. By default, these extrema are recorded. + // + // Recording these extrema for cumulative data is expected to have little + // value, they will represent the entire life of the instrument instead of + // just the current collection cycle. It is recommended to set this to true + // for that type of data to avoid computing the low-value extrema. + NoMinMax bool +} + +var _ Aggregation = AggregationExplicitBucketHistogram{} + +// errHist is returned by misconfigured ExplicitBucketHistograms. +var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg) + +// err returns an error for any misconfiguration. +func (h AggregationExplicitBucketHistogram) err() error { + if len(h.Boundaries) <= 1 { + return nil + } + + // Check boundaries are monotonic. + i := h.Boundaries[0] + for _, j := range h.Boundaries[1:] { + if i >= j { + return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries) + } + i = j + } + + return nil +} + +// copy returns a deep copy of h. +func (h AggregationExplicitBucketHistogram) copy() Aggregation { + return AggregationExplicitBucketHistogram{ + Boundaries: slices.Clone(h.Boundaries), + NoMinMax: h.NoMinMax, + } +} + +// AggregationBase2ExponentialHistogram is an Aggregation that summarizes a set of +// measurements as an histogram with bucket widths that grow exponentially. +type AggregationBase2ExponentialHistogram struct { + // MaxSize is the maximum number of buckets to use for the histogram. + MaxSize int32 + // MaxScale is the maximum resolution scale to use for the histogram. + // + // MaxScale has a maximum value of 20. Using a value of 20 means the + // maximum number of buckets that can fit within the range of a + // signed 32-bit integer index could be used. + // + // MaxScale has a minimum value of -10. Using a value of -10 means only + // two buckets will be used. + MaxScale int32 + + // NoMinMax indicates whether to not record the min and max of the + // distribution. By default, these extrema are recorded. + // + // Recording these extrema for cumulative data is expected to have little + // value, they will represent the entire life of the instrument instead of + // just the current collection cycle. It is recommended to set this to true + // for that type of data to avoid computing the low-value extrema. + NoMinMax bool +} + +var _ Aggregation = AggregationBase2ExponentialHistogram{} + +// copy returns a deep copy of the Aggregation. +func (e AggregationBase2ExponentialHistogram) copy() Aggregation { + return e +} + +const ( + expoMaxScale = 20 + expoMinScale = -10 +) + +// errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms. +var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg) + +// err returns an error for any misconfigured Aggregation. +func (e AggregationBase2ExponentialHistogram) err() error { + if e.MaxScale > expoMaxScale { + return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale) + } + if e.MaxSize <= 0 { + return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize) + } + return nil +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go new file mode 100644 index 00000000000..63b88f08664 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go @@ -0,0 +1,83 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "sync" +) + +// cache is a locking storage used to quickly return already computed values. +// +// The zero value of a cache is empty and ready to use. +// +// A cache must not be copied after first use. +// +// All methods of a cache are safe to call concurrently. +type cache[K comparable, V any] struct { + sync.Mutex + data map[K]V +} + +// Lookup returns the value stored in the cache with the associated key if it +// exists. Otherwise, f is called and its returned value is set in the cache +// for key and returned. +// +// Lookup is safe to call concurrently. It will hold the cache lock, so f +// should not block excessively. +func (c *cache[K, V]) Lookup(key K, f func() V) V { + c.Lock() + defer c.Unlock() + + if c.data == nil { + val := f() + c.data = map[K]V{key: val} + return val + } + if v, ok := c.data[key]; ok { + return v + } + val := f() + c.data[key] = val + return val +} + +// HasKey returns true if Lookup has previously been called with that key +// +// HasKey is safe to call concurrently. +func (c *cache[K, V]) HasKey(key K) bool { + c.Lock() + defer c.Unlock() + _, ok := c.data[key] + return ok +} + +// cacheWithErr is a locking storage used to quickly return already computed values and an error. +// +// The zero value of a cacheWithErr is empty and ready to use. +// +// A cacheWithErr must not be copied after first use. +// +// All methods of a cacheWithErr are safe to call concurrently. +type cacheWithErr[K comparable, V any] struct { + cache[K, valAndErr[V]] +} + +type valAndErr[V any] struct { + val V + err error +} + +// Lookup returns the value stored in the cacheWithErr with the associated key +// if it exists. Otherwise, f is called and its returned value is set in the +// cacheWithErr for key and returned. +// +// Lookup is safe to call concurrently. It will hold the cacheWithErr lock, so f +// should not block excessively. +func (c *cacheWithErr[K, V]) Lookup(key K, f func() (V, error)) (V, error) { + combined := c.cache.Lookup(key, func() valAndErr[V] { + val, err := f() + return valAndErr[V]{val: val, err: err} + }) + return combined.val, combined.err +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/config.go new file mode 100644 index 00000000000..bbe7bf671fd --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -0,0 +1,137 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel/sdk/resource" +) + +// config contains configuration options for a MeterProvider. +type config struct { + res *resource.Resource + readers []Reader + views []View +} + +// readerSignals returns a force-flush and shutdown function for a +// MeterProvider to call in their respective options. All Readers c contains +// will have their force-flush and shutdown methods unified into returned +// single functions. +func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) { + var fFuncs, sFuncs []func(context.Context) error + for _, r := range c.readers { + sFuncs = append(sFuncs, r.Shutdown) + if f, ok := r.(interface{ ForceFlush(context.Context) error }); ok { + fFuncs = append(fFuncs, f.ForceFlush) + } + } + + return unify(fFuncs), unifyShutdown(sFuncs) +} + +// unify unifies calling all of funcs into a single function call. All errors +// returned from calls to funcs will be unify into a single error return +// value. +func unify(funcs []func(context.Context) error) func(context.Context) error { + return func(ctx context.Context) error { + var errs []error + for _, f := range funcs { + if err := f(ctx); err != nil { + errs = append(errs, err) + } + } + return unifyErrors(errs) + } +} + +// unifyErrors combines multiple errors into a single error. +func unifyErrors(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return fmt.Errorf("%v", errs) + } +} + +// unifyShutdown unifies calling all of funcs once for a shutdown. If called +// more than once, an ErrReaderShutdown error is returned. +func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error { + f := unify(funcs) + var once sync.Once + return func(ctx context.Context) error { + err := ErrReaderShutdown + once.Do(func() { err = f(ctx) }) + return err + } +} + +// newConfig returns a config configured with options. +func newConfig(options []Option) config { + conf := config{res: resource.Default()} + for _, o := range options { + conf = o.apply(conf) + } + return conf +} + +// Option applies a configuration option value to a MeterProvider. +type Option interface { + apply(config) config +} + +// optionFunc applies a set of options to a config. +type optionFunc func(config) config + +// apply returns a config with option(s) applied. +func (o optionFunc) apply(conf config) config { + return o(conf) +} + +// WithResource associates a Resource with a MeterProvider. This Resource +// represents the entity producing telemetry and is associated with all Meters +// the MeterProvider will create. +// +// By default, if this Option is not used, the default Resource from the +// go.opentelemetry.io/otel/sdk/resource package will be used. +func WithResource(res *resource.Resource) Option { + return optionFunc(func(conf config) config { + conf.res = res + return conf + }) +} + +// WithReader associates Reader r with a MeterProvider. +// +// By default, if this option is not used, the MeterProvider will perform no +// operations; no data will be exported without a Reader. +func WithReader(r Reader) Option { + return optionFunc(func(cfg config) config { + if r == nil { + return cfg + } + cfg.readers = append(cfg.readers, r) + return cfg + }) +} + +// WithView associates views with a MeterProvider. +// +// Views are appended to existing ones in a MeterProvider if this option is +// used multiple times. +// +// By default, if this option is not used, the MeterProvider will use the +// default view. +func WithView(views ...View) Option { + return optionFunc(func(cfg config) config { + cfg.views = append(cfg.views, views...) + return cfg + }) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go new file mode 100644 index 00000000000..90a4ae16c1a --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package metric provides an implementation of the OpenTelemetry metrics SDK. +// +// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information +// about the concept of OpenTelemetry metrics and +// https://opentelemetry.io/docs/concepts/components/ for more information +// about OpenTelemetry SDKs. +// +// The entry point for the metric package is the MeterProvider. It is the +// object that all API calls use to create Meters, instruments, and ultimately +// make metric measurements. Also, it is an object that should be used to +// control the life-cycle (start, flush, and shutdown) of the SDK. +// +// A MeterProvider needs to be configured to export the measured data, this is +// done by configuring it with a Reader implementation (using the WithReader +// MeterProviderOption). Readers take two forms: ones that push to an endpoint +// (NewPeriodicReader), and ones that an endpoint pulls from. See +// [go.opentelemetry.io/otel/exporters] for exporters that can be used as +// or with these Readers. +// +// Each Reader, when registered with the MeterProvider, can be augmented with a +// View. Views allow users that run OpenTelemetry instrumented code to modify +// the generated data of that instrumentation. +// +// The data generated by a MeterProvider needs to include information about its +// origin. A MeterProvider needs to be configured with a Resource, using the +// WithResource MeterProviderOption, to include this information. This Resource +// should be used to describe the unique runtime environment instrumented code +// is being run on. That way when multiple instances of the code are collected +// at a single endpoint their origin is decipherable. +// +// To avoid leaking memory, the SDK returns the same instrument for calls to +// create new instruments with the same Name, Unit, and Description. +// Importantly, callbacks provided using metric.WithFloat64Callback or +// metric.WithInt64Callback will only apply for the first instrument created +// with a given Name, Unit, and Description. Instead, use +// Meter.RegisterCallback and Registration.Unregister to add and remove +// callbacks without leaking memory. +// +// See [go.opentelemetry.io/otel/metric] for more information about +// the metric API. +// +// See [go.opentelemetry.io/otel/sdk/metric/internal/x] for information about +// the experimental features. +package metric // import "go.opentelemetry.io/otel/sdk/metric" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/env.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/env.go new file mode 100644 index 00000000000..a6c403797f6 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/env.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "os" + "strconv" + "time" + + "go.opentelemetry.io/otel/internal/global" +) + +// Environment variable names. +const ( + // The time interval (in milliseconds) between the start of two export attempts. + envInterval = "OTEL_METRIC_EXPORT_INTERVAL" + // Maximum allowed time (in milliseconds) to export data. + envTimeout = "OTEL_METRIC_EXPORT_TIMEOUT" +) + +// envDuration returns an environment variable's value as duration in milliseconds if it is exists, +// or the defaultValue if the environment variable is not defined or the value is not valid. +func envDuration(key string, defaultValue time.Duration) time.Duration { + v := os.Getenv(key) + if v == "" { + return defaultValue + } + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "environment variable", key, "value", v) + return defaultValue + } + if d <= 0 { + global.Error(errNonPositiveDuration, "non-positive duration", "environment variable", key, "value", v) + return defaultValue + } + return time.Duration(d) * time.Millisecond +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go new file mode 100644 index 00000000000..82619da78ec --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "os" + "runtime" + "slices" + + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/internal/x" +) + +// reservoirFunc returns the appropriately configured exemplar reservoir +// creation func based on the passed InstrumentKind and user defined +// environment variables. +// +// Note: This will only return non-nil values when the experimental exemplar +// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable +// is not set to always_off. +func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] { + if !x.Exemplars.Enabled() { + return nil + } + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar + const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" + + var filter exemplar.Filter + + switch os.Getenv(filterEnvKey) { + case "always_on": + filter = exemplar.AlwaysOnFilter + case "always_off": + return exemplar.Drop + case "trace_based": + fallthrough + default: + filter = exemplar.SampledFilter + } + + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults + // Explicit bucket histogram aggregation with more than 1 bucket will + // use AlignedHistogramBucketExemplarReservoir. + a, ok := agg.(AggregationExplicitBucketHistogram) + if ok && len(a.Boundaries) > 0 { + cp := slices.Clone(a.Boundaries) + return func() exemplar.FilteredReservoir[N] { + bounds := cp + return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds)) + } + } + + var n int + if a, ok := agg.(AggregationBase2ExponentialHistogram); ok { + // Base2 Exponential Histogram Aggregation SHOULD use a + // SimpleFixedSizeExemplarReservoir with a reservoir equal to the + // smaller of the maximum number of buckets configured on the + // aggregation or twenty (e.g. min(20, max_buckets)). + n = int(a.MaxSize) + if n > 20 { + n = 20 + } + } else { + // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir + // This Exemplar reservoir MAY take a configuration parameter for + // the size of the reservoir. If no size configuration is + // provided, the default size MAY be the number of possible + // concurrent threads (e.g. number of CPUs) to help reduce + // contention. Otherwise, a default size of 1 SHOULD be used. + n = runtime.NumCPU() + if n < 1 { + // Should never be the case, but be defensive. + n = 1 + } + } + + return func() exemplar.FilteredReservoir[N] { + return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n)) + } +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go new file mode 100644 index 00000000000..1a3cccb6775 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// ErrExporterShutdown is returned if Export or Shutdown are called after an +// Exporter has been Shutdown. +var ErrExporterShutdown = fmt.Errorf("exporter is shutdown") + +// Exporter handles the delivery of metric data to external receivers. This is +// the final component in the metric push pipeline. +type Exporter interface { + // Temporality returns the Temporality to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Exporter methods. + Temporality(InstrumentKind) metricdata.Temporality + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Aggregation returns the Aggregation to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Exporter methods. + Aggregation(InstrumentKind) Aggregation + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Export serializes and transmits metric data to a receiver. + // + // This is called synchronously, there is no concurrency safety + // requirement. Because of this, it is critical that all timeouts and + // cancellations of the passed context be honored. + // + // All retry logic must be contained in this function. The SDK does not + // implement any retry logic. All errors returned by this function are + // considered unrecoverable and will be reported to a configured error + // Handler. + // + // The passed ResourceMetrics may be reused when the call completes. If an + // exporter needs to hold this data after it returns, it needs to make a + // copy. + Export(context.Context, *metricdata.ResourceMetrics) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ForceFlush flushes any metric data held by an exporter. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + // + // This method needs to be concurrent safe. + ForceFlush(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown flushes all metric data held by an exporter and releases any + // held computational resources. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + // + // After Shutdown is called, calls to Export will perform no operation and + // instead will return an error indicating the shutdown state. + // + // This method needs to be concurrent safe. + Shutdown(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go new file mode 100644 index 00000000000..b52a330b3bc --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -0,0 +1,347 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=InstrumentKind -trimprefix=InstrumentKind + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" +) + +var zeroScope instrumentation.Scope + +// InstrumentKind is the identifier of a group of instruments that all +// performing the same function. +type InstrumentKind uint8 + +const ( + // instrumentKindUndefined is an undefined instrument kind, it should not + // be used by any initialized type. + instrumentKindUndefined InstrumentKind = 0 // nolint:deadcode,varcheck,unused + // InstrumentKindCounter identifies a group of instruments that record + // increasing values synchronously with the code path they are measuring. + InstrumentKindCounter InstrumentKind = 1 + // InstrumentKindUpDownCounter identifies a group of instruments that + // record increasing and decreasing values synchronously with the code path + // they are measuring. + InstrumentKindUpDownCounter InstrumentKind = 2 + // InstrumentKindHistogram identifies a group of instruments that record a + // distribution of values synchronously with the code path they are + // measuring. + InstrumentKindHistogram InstrumentKind = 3 + // InstrumentKindObservableCounter identifies a group of instruments that + // record increasing values in an asynchronous callback. + InstrumentKindObservableCounter InstrumentKind = 4 + // InstrumentKindObservableUpDownCounter identifies a group of instruments + // that record increasing and decreasing values in an asynchronous + // callback. + InstrumentKindObservableUpDownCounter InstrumentKind = 5 + // InstrumentKindObservableGauge identifies a group of instruments that + // record current values in an asynchronous callback. + InstrumentKindObservableGauge InstrumentKind = 6 + // InstrumentKindGauge identifies a group of instruments that record + // instantaneous values synchronously with the code path they are + // measuring. + InstrumentKindGauge InstrumentKind = 7 +) + +type nonComparable [0]func() // nolint: unused // This is indeed used. + +// Instrument describes properties an instrument is created with. +type Instrument struct { + // Name is the human-readable identifier of the instrument. + Name string + // Description describes the purpose of the instrument. + Description string + // Kind defines the functional group of the instrument. + Kind InstrumentKind + // Unit is the unit of measurement recorded by the instrument. + Unit string + // Scope identifies the instrumentation that created the instrument. + Scope instrumentation.Scope + + // Ensure forward compatibility if non-comparable fields need to be added. + nonComparable // nolint: unused +} + +// IsEmpty returns if all Instrument fields are their zero-value. +func (i Instrument) IsEmpty() bool { + return i.Name == "" && + i.Description == "" && + i.Kind == instrumentKindUndefined && + i.Unit == "" && + i.Scope == zeroScope +} + +// matches returns whether all the non-zero-value fields of i match the +// corresponding fields of other. If i is empty it will match all other, and +// true will always be returned. +func (i Instrument) matches(other Instrument) bool { + return i.matchesName(other) && + i.matchesDescription(other) && + i.matchesKind(other) && + i.matchesUnit(other) && + i.matchesScope(other) +} + +// matchesName returns true if the Name of i is "" or it equals the Name of +// other, otherwise false. +func (i Instrument) matchesName(other Instrument) bool { + return i.Name == "" || i.Name == other.Name +} + +// matchesDescription returns true if the Description of i is "" or it equals +// the Description of other, otherwise false. +func (i Instrument) matchesDescription(other Instrument) bool { + return i.Description == "" || i.Description == other.Description +} + +// matchesKind returns true if the Kind of i is its zero-value or it equals the +// Kind of other, otherwise false. +func (i Instrument) matchesKind(other Instrument) bool { + return i.Kind == instrumentKindUndefined || i.Kind == other.Kind +} + +// matchesUnit returns true if the Unit of i is its zero-value or it equals the +// Unit of other, otherwise false. +func (i Instrument) matchesUnit(other Instrument) bool { + return i.Unit == "" || i.Unit == other.Unit +} + +// matchesScope returns true if the Scope of i is its zero-value or it equals +// the Scope of other, otherwise false. +func (i Instrument) matchesScope(other Instrument) bool { + return (i.Scope.Name == "" || i.Scope.Name == other.Scope.Name) && + (i.Scope.Version == "" || i.Scope.Version == other.Scope.Version) && + (i.Scope.SchemaURL == "" || i.Scope.SchemaURL == other.Scope.SchemaURL) +} + +// Stream describes the stream of data an instrument produces. +type Stream struct { + // Name is the human-readable identifier of the stream. + Name string + // Description describes the purpose of the data. + Description string + // Unit is the unit of measurement recorded. + Unit string + // Aggregation the stream uses for an instrument. + Aggregation Aggregation + // AttributeFilter is an attribute Filter applied to the attributes + // recorded for an instrument's measurement. If the filter returns false + // the attribute will not be recorded, otherwise, if it returns true, it + // will record the attribute. + // + // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to + // provide an allow-list of attribute keys here. + AttributeFilter attribute.Filter +} + +// instID are the identifying properties of a instrument. +type instID struct { + // Name is the name of the stream. + Name string + // Description is the description of the stream. + Description string + // Kind defines the functional group of the instrument. + Kind InstrumentKind + // Unit is the unit of the stream. + Unit string + // Number is the number type of the stream. + Number string +} + +// Returns a normalized copy of the instID i. +// +// Instrument names are considered case-insensitive. Standardize the instrument +// name to always be lowercase for the returned instID so it can be compared +// without the name casing affecting the comparison. +func (i instID) normalize() instID { + i.Name = strings.ToLower(i.Name) + return i +} + +type int64Inst struct { + measures []aggregate.Measure[int64] + + embedded.Int64Counter + embedded.Int64UpDownCounter + embedded.Int64Histogram + embedded.Int64Gauge +} + +var ( + _ metric.Int64Counter = (*int64Inst)(nil) + _ metric.Int64UpDownCounter = (*int64Inst)(nil) + _ metric.Int64Histogram = (*int64Inst)(nil) + _ metric.Int64Gauge = (*int64Inst)(nil) +) + +func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) { + c := metric.NewAddConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.RecordOption) { + c := metric.NewRecordConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method. + for _, in := range i.measures { + in(ctx, val, s) + } +} + +type float64Inst struct { + measures []aggregate.Measure[float64] + + embedded.Float64Counter + embedded.Float64UpDownCounter + embedded.Float64Histogram + embedded.Float64Gauge +} + +var ( + _ metric.Float64Counter = (*float64Inst)(nil) + _ metric.Float64UpDownCounter = (*float64Inst)(nil) + _ metric.Float64Histogram = (*float64Inst)(nil) + _ metric.Float64Gauge = (*float64Inst)(nil) +) + +func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) { + c := metric.NewAddConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.RecordOption) { + c := metric.NewRecordConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) { + for _, in := range i.measures { + in(ctx, val, s) + } +} + +// observablID is a comparable unique identifier of an observable. +type observablID[N int64 | float64] struct { + name string + description string + kind InstrumentKind + unit string + scope instrumentation.Scope +} + +type float64Observable struct { + metric.Float64Observable + *observable[float64] + + embedded.Float64ObservableCounter + embedded.Float64ObservableUpDownCounter + embedded.Float64ObservableGauge +} + +var ( + _ metric.Float64ObservableCounter = float64Observable{} + _ metric.Float64ObservableUpDownCounter = float64Observable{} + _ metric.Float64ObservableGauge = float64Observable{} +) + +func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string) float64Observable { + return float64Observable{ + observable: newObservable[float64](m, kind, name, desc, u), + } +} + +type int64Observable struct { + metric.Int64Observable + *observable[int64] + + embedded.Int64ObservableCounter + embedded.Int64ObservableUpDownCounter + embedded.Int64ObservableGauge +} + +var ( + _ metric.Int64ObservableCounter = int64Observable{} + _ metric.Int64ObservableUpDownCounter = int64Observable{} + _ metric.Int64ObservableGauge = int64Observable{} +) + +func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int64Observable { + return int64Observable{ + observable: newObservable[int64](m, kind, name, desc, u), + } +} + +type observable[N int64 | float64] struct { + metric.Observable + observablID[N] + + meter *meter + measures measures[N] + dropAggregation bool +} + +func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] { + return &observable[N]{ + observablID: observablID[N]{ + name: name, + description: desc, + kind: kind, + unit: u, + scope: m.scope, + }, + meter: m, + } +} + +// observe records the val for the set of attrs. +func (o *observable[N]) observe(val N, s attribute.Set) { + o.measures.observe(val, s) +} + +func (o *observable[N]) appendMeasures(meas []aggregate.Measure[N]) { + o.measures = append(o.measures, meas...) +} + +type measures[N int64 | float64] []aggregate.Measure[N] + +// observe records the val for the set of attrs. +func (m measures[N]) observe(val N, s attribute.Set) { + for _, in := range m { + in(context.Background(), val, s) + } +} + +var errEmptyAgg = errors.New("no aggregators for observable instrument") + +// registerable returns an error if the observable o should not be registered, +// and nil if it should. An errEmptyAgg error is returned if o is effectively a +// no-op because it does not have any aggregators. Also, an error is returned +// if scope defines a Meter other than the one o was created by. +func (o *observable[N]) registerable(m *meter) error { + if len(o.measures) == 0 { + return errEmptyAgg + } + if m != o.meter { + return fmt.Errorf( + "invalid registration: observable %q from Meter %q, registered with Meter %q", + o.name, + o.scope.Name, + m.scope.Name, + ) + } + return nil +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go new file mode 100644 index 00000000000..25ea6244e57 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=InstrumentKind -trimprefix=InstrumentKind"; DO NOT EDIT. + +package metric + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[instrumentKindUndefined-0] + _ = x[InstrumentKindCounter-1] + _ = x[InstrumentKindUpDownCounter-2] + _ = x[InstrumentKindHistogram-3] + _ = x[InstrumentKindObservableCounter-4] + _ = x[InstrumentKindObservableUpDownCounter-5] + _ = x[InstrumentKindObservableGauge-6] + _ = x[InstrumentKindGauge-7] +} + +const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGaugeGauge" + +var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112} + +func (i InstrumentKind) String() string { + if i >= InstrumentKind(len(_InstrumentKind_index)-1) { + return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go new file mode 100644 index 00000000000..b18ee719bd1 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// now is used to return the current local time while allowing tests to +// override the default time.Now function. +var now = time.Now + +// Measure receives measurements to be aggregated. +type Measure[N int64 | float64] func(context.Context, N, attribute.Set) + +// ComputeAggregation stores the aggregate of measurements into dest and +// returns the number of aggregate data-points output. +type ComputeAggregation func(dest *metricdata.Aggregation) int + +// Builder builds an aggregate function. +type Builder[N int64 | float64] struct { + // Temporality is the temporality used for the returned aggregate function. + // + // If this is not provided a default of cumulative will be used (except for + // the last-value aggregate function where delta is the only appropriate + // temporality). + Temporality metricdata.Temporality + // Filter is the attribute filter the aggregate function will use on the + // input of measurements. + Filter attribute.Filter + // ReservoirFunc is the factory function used by aggregate functions to + // create new exemplar reservoirs for a new seen attribute set. + // + // If this is not provided a default factory function that returns an + // exemplar.Drop reservoir will be used. + ReservoirFunc func() exemplar.FilteredReservoir[N] + // AggregationLimit is the cardinality limit of measurement attributes. Any + // measurement for new attributes once the limit has been reached will be + // aggregated into a single aggregate for the "otel.metric.overflow" + // attribute. + // + // If AggregationLimit is less than or equal to zero there will not be an + // aggregation limit imposed (i.e. unlimited attribute sets). + AggregationLimit int +} + +func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] { + if b.ReservoirFunc != nil { + return b.ReservoirFunc + } + + return exemplar.Drop +} + +type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) + +func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] { + if b.Filter != nil { + fltr := b.Filter // Copy to make it immutable after assignment. + return func(ctx context.Context, n N, a attribute.Set) { + fAttr, dropped := a.Filter(fltr) + f(ctx, n, fAttr, dropped) + } + } + return func(ctx context.Context, n N, a attribute.Set) { + f(ctx, n, a, nil) + } +} + +// LastValue returns a last-value aggregate function input and output. +func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) { + lv := newLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative + } +} + +// PrecomputedLastValue returns a last-value aggregate function input and +// output. The aggregation returned from the returned ComputeAggregation +// function will always only return values from the previous collection cycle. +func (b Builder[N]) PrecomputedLastValue() (Measure[N], ComputeAggregation) { + lv := newPrecomputedLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative + } +} + +// PrecomputedSum returns a sum aggregate function input and output. The +// arguments passed to the input are expected to be the precomputed sum values. +func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) { + s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(s.measure), s.delta + default: + return b.filter(s.measure), s.cumulative + } +} + +// Sum returns a sum aggregate function input and output. +func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { + s := newSum[N](monotonic, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(s.measure), s.delta + default: + return b.filter(s.measure), s.cumulative + } +} + +// ExplicitBucketHistogram returns a histogram aggregate function input and +// output. +func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { + h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(h.measure), h.delta + default: + return b.filter(h.measure), h.cumulative + } +} + +// ExponentialBucketHistogram returns a histogram aggregate function input and +// output. +func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { + h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(h.measure), h.delta + default: + return b.filter(h.measure), h.cumulative + } +} + +// reset ensures s has capacity and sets it length. If the capacity of s too +// small, a new slice is returned with the specified capacity and length. +func reset[T any](s []T, length, capacity int) []T { + if cap(s) < capacity { + return make([]T, length, capacity) + } + return s[:length] +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go new file mode 100644 index 00000000000..7b7225e6ef9 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go @@ -0,0 +1,7 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package aggregate provides aggregate types used compute aggregations and +// cycle the state of metric measurements made by the SDK. These types and +// functionality are meant only for internal SDK use. +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go new file mode 100644 index 00000000000..170ae8e58e2 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "sync" + + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +var exemplarPool = sync.Pool{ + New: func() any { return new([]exemplar.Exemplar) }, +} + +func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) { + dest := exemplarPool.Get().(*[]exemplar.Exemplar) + defer func() { + *dest = (*dest)[:0] + exemplarPool.Put(dest) + }() + + *dest = reset(*dest, len(*out), cap(*out)) + + f(dest) + + *out = reset(*out, len(*dest), cap(*dest)) + for i, e := range *dest { + (*out)[i].FilteredAttributes = e.FilteredAttributes + (*out)[i].Time = e.Time + (*out)[i].SpanID = e.SpanID + (*out)[i].TraceID = e.TraceID + + switch e.Value.Type() { + case exemplar.Int64ValueType: + (*out)[i].Value = N(e.Value.Int64()) + case exemplar.Float64ValueType: + (*out)[i].Value = N(e.Value.Float64()) + } + } +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go new file mode 100644 index 00000000000..707342408ac --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -0,0 +1,444 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "errors" + "math" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +const ( + expoMaxScale = 20 + expoMinScale = -10 + + smallestNonZeroNormalFloat64 = 0x1p-1022 + + // These redefine the Math constants with a type, so the compiler won't coerce + // them into an int on 32 bit platforms. + maxInt64 int64 = math.MaxInt64 + minInt64 int64 = math.MinInt64 +) + +// expoHistogramDataPoint is a single data point in an exponential histogram. +type expoHistogramDataPoint[N int64 | float64] struct { + attrs attribute.Set + res exemplar.FilteredReservoir[N] + + count uint64 + min N + max N + sum N + + maxSize int + noMinMax bool + noSum bool + + scale int32 + + posBuckets expoBuckets + negBuckets expoBuckets + zeroCount uint64 +} + +func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] { + f := math.MaxFloat64 + max := N(f) // if N is int64, max will overflow to -9223372036854775808 + min := N(-f) + if N(maxInt64) > N(f) { + max = N(maxInt64) + min = N(minInt64) + } + return &expoHistogramDataPoint[N]{ + attrs: attrs, + min: max, + max: min, + maxSize: maxSize, + noMinMax: noMinMax, + noSum: noSum, + scale: maxScale, + } +} + +// record adds a new measurement to the histogram. It will rescale the buckets if needed. +func (p *expoHistogramDataPoint[N]) record(v N) { + p.count++ + + if !p.noMinMax { + if v < p.min { + p.min = v + } + if v > p.max { + p.max = v + } + } + if !p.noSum { + p.sum += v + } + + absV := math.Abs(float64(v)) + + if float64(absV) == 0.0 { + p.zeroCount++ + return + } + + bin := p.getBin(absV) + + bucket := &p.posBuckets + if v < 0 { + bucket = &p.negBuckets + } + + // If the new bin would make the counts larger than maxScale, we need to + // downscale current measurements. + if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 { + if p.scale-scaleDelta < expoMinScale { + // With a scale of -10 there is only two buckets for the whole range of float64 values. + // This can only happen if there is a max size of 1. + otel.Handle(errors.New("exponential histogram scale underflow")) + return + } + // Downscale + p.scale -= scaleDelta + p.posBuckets.downscale(scaleDelta) + p.negBuckets.downscale(scaleDelta) + + bin = p.getBin(absV) + } + + bucket.record(bin) +} + +// getBin returns the bin v should be recorded into. +func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 { + frac, expInt := math.Frexp(v) + // 11-bit exponential. + exp := int32(expInt) // nolint: gosec + if p.scale <= 0 { + // Because of the choice of fraction is always 1 power of two higher than we want. + var correction int32 = 1 + if frac == .5 { + // If v is an exact power of two the frac will be .5 and the exp + // will be one higher than we want. + correction = 2 + } + return (exp - correction) >> (-p.scale) + } + return exp<= bin { + low = int(bin) + high = int(startBin) + length - 1 + } + + var count int32 + for high-low >= p.maxSize { + low = low >> 1 + high = high >> 1 + count++ + if count > expoMaxScale-expoMinScale { + return count + } + } + return count +} + +// expoBuckets is a set of buckets in an exponential histogram. +type expoBuckets struct { + startBin int32 + counts []uint64 +} + +// record increments the count for the given bin, and expands the buckets if needed. +// Size changes must be done before calling this function. +func (b *expoBuckets) record(bin int32) { + if len(b.counts) == 0 { + b.counts = []uint64{1} + b.startBin = bin + return + } + + endBin := int(b.startBin) + len(b.counts) - 1 + + // if the new bin is inside the current range + if bin >= b.startBin && int(bin) <= endBin { + b.counts[bin-b.startBin]++ + return + } + // if the new bin is before the current start add spaces to the counts + if bin < b.startBin { + origLen := len(b.counts) + newLength := endBin - int(bin) + 1 + shift := b.startBin - bin + + if newLength > cap(b.counts) { + b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) + } + + copy(b.counts[shift:origLen+int(shift)], b.counts[:]) + b.counts = b.counts[:newLength] + for i := 1; i < int(shift); i++ { + b.counts[i] = 0 + } + b.startBin = bin + b.counts[0] = 1 + return + } + // if the new is after the end add spaces to the end + if int(bin) > endBin { + if int(bin-b.startBin) < cap(b.counts) { + b.counts = b.counts[:bin-b.startBin+1] + for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ { + b.counts[i] = 0 + } + b.counts[bin-b.startBin] = 1 + return + } + + end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1) + b.counts = append(b.counts, end...) + b.counts[bin-b.startBin] = 1 + } +} + +// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the +// correct lower resolution bucket. +func (b *expoBuckets) downscale(delta int32) { + // Example + // delta = 2 + // Original offset: -6 + // Counts: [ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + // bins: -6 -5, -4, -3, -2, -1, 0, 1, 2, 3, 4 + // new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1 + // new Offset: -2 + // new Counts: [4, 14, 30, 10] + + if len(b.counts) <= 1 || delta < 1 { + b.startBin = b.startBin >> delta + return + } + + steps := int32(1) << delta + offset := b.startBin % steps + offset = (offset + steps) % steps // to make offset positive + for i := 1; i < len(b.counts); i++ { + idx := i + int(offset) + if idx%int(steps) == 0 { + b.counts[idx/int(steps)] = b.counts[i] + continue + } + b.counts[idx/int(steps)] += b.counts[i] + } + + lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) + b.counts = b.counts[:lastIdx+1] + b.startBin = b.startBin >> delta +} + +// newExponentialHistogram returns an Aggregator that summarizes a set of +// measurements as an exponential histogram. Each histogram is scoped by attributes +// and the aggregation cycle the measurements were made in. +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] { + return &expoHistogram[N]{ + noSum: noSum, + noMinMax: noMinMax, + maxSize: int(maxSize), + maxScale: maxScale, + + newRes: r, + limit: newLimiter[*expoHistogramDataPoint[N]](limit), + values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]), + + start: now(), + } +} + +// expoHistogram summarizes a set of measurements as an histogram with exponentially +// defined buckets. +type expoHistogram[N int64 | float64] struct { + noSum bool + noMinMax bool + maxSize int + maxScale int32 + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[*expoHistogramDataPoint[N]] + values map[attribute.Distinct]*expoHistogramDataPoint[N] + valuesMu sync.Mutex + + start time.Time +} + +func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + // Ignore NaN and infinity. + if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) { + return + } + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + attr := e.limit.Attributes(fltrAttr, e.values) + v, ok := e.values[attr.Equivalent()] + if !ok { + v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) + v.res = e.newRes() + + e.values[attr.Equivalent()] = v + } + v.record(value) + v.res.Offer(ctx, value, droppedAttr) +} + +func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. + // In that case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.ExponentialHistogram[N]) + h.Temporality = metricdata.DeltaTemporality + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + n := len(e.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range e.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = e.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Scale = val.scale + hDPts[i].ZeroCount = val.zeroCount + hDPts[i].ZeroThreshold = 0.0 + + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin + hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) + copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin + hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) + copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + + if !e.noSum { + hDPts[i].Sum = val.sum + } + if !e.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(e.values) + + e.start = t + h.DataPoints = hDPts + *dest = h + return n +} + +func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. + // In that case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.ExponentialHistogram[N]) + h.Temporality = metricdata.CumulativeTemporality + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + n := len(e.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range e.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = e.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Scale = val.scale + hDPts[i].ZeroCount = val.zeroCount + hDPts[i].ZeroThreshold = 0.0 + + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin + hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) + copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin + hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) + copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + + if !e.noSum { + hDPts[i].Sum = val.sum + } + if !e.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + } + + h.DataPoints = hDPts + *dest = h + return n +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go new file mode 100644 index 00000000000..ade0941f5f5 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -0,0 +1,233 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "slices" + "sort" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +type buckets[N int64 | float64] struct { + attrs attribute.Set + res exemplar.FilteredReservoir[N] + + counts []uint64 + count uint64 + total N + min, max N +} + +// newBuckets returns buckets with n bins. +func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] { + return &buckets[N]{attrs: attrs, counts: make([]uint64, n)} +} + +func (b *buckets[N]) sum(value N) { b.total += value } + +func (b *buckets[N]) bin(idx int, value N) { + b.counts[idx]++ + b.count++ + if value < b.min { + b.min = value + } else if value > b.max { + b.max = value + } +} + +// histValues summarizes a set of measurements as an histValues with +// explicitly defined buckets. +type histValues[N int64 | float64] struct { + noSum bool + bounds []float64 + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[*buckets[N]] + values map[attribute.Distinct]*buckets[N] + valuesMu sync.Mutex +} + +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] { + // The responsibility of keeping all buckets correctly associated with the + // passed boundaries is ultimately this type's responsibility. Make a copy + // here so we can always guarantee this. Or, in the case of failure, have + // complete control over the fix. + b := slices.Clone(bounds) + slices.Sort(b) + return &histValues[N]{ + noSum: noSum, + bounds: b, + newRes: r, + limit: newLimiter[*buckets[N]](limit), + values: make(map[attribute.Distinct]*buckets[N]), + } +} + +// Aggregate records the measurement value, scoped by attr, and aggregates it +// into a histogram. +func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + // This search will return an index in the range [0, len(s.bounds)], where + // it will return len(s.bounds) if value is greater than the last element + // of s.bounds. This aligns with the buckets in that the length of buckets + // is len(s.bounds)+1, with the last bucket representing: + // (s.bounds[len(s.bounds)-1], +∞). + idx := sort.SearchFloat64s(s.bounds, float64(value)) + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + b, ok := s.values[attr.Equivalent()] + if !ok { + // N+1 buckets. For example: + // + // bounds = [0, 5, 10] + // + // Then, + // + // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) + b = newBuckets[N](attr, len(s.bounds)+1) + b.res = s.newRes() + + // Ensure min and max are recorded values (not zero), for new buckets. + b.min, b.max = value, value + s.values[attr.Equivalent()] = b + } + b.bin(idx, value) + if !s.noSum { + b.sum(value) + } + b.res.Offer(ctx, value, droppedAttr) +} + +// newHistogram returns an Aggregator that summarizes a set of measurements as +// an histogram. +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] { + return &histogram[N]{ + histValues: newHistValues[N](boundaries, noSum, limit, r), + noMinMax: noMinMax, + start: now(), + } +} + +// histogram summarizes a set of measurements as an histogram with explicitly +// defined buckets. +type histogram[N int64 | float64] struct { + *histValues[N] + + noMinMax bool + start time.Time +} + +func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Histogram, memory reuse is missed. In that + // case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.Histogram[N]) + h.Temporality = metricdata.DeltaTemporality + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + // Do not allow modification of our copy of bounds. + bounds := slices.Clone(s.bounds) + + n := len(s.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range s.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = s.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Bounds = bounds + hDPts[i].BucketCounts = val.counts + + if !s.noSum { + hDPts[i].Sum = val.total + } + + if !s.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(s.values) + // The delta collection cycle resets. + s.start = t + + h.DataPoints = hDPts + *dest = h + + return n +} + +func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Histogram, memory reuse is missed. In that + // case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.Histogram[N]) + h.Temporality = metricdata.CumulativeTemporality + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + // Do not allow modification of our copy of bounds. + bounds := slices.Clone(s.bounds) + + n := len(s.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range s.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = s.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Bounds = bounds + + // The HistogramDataPoint field values returned need to be copies of + // the buckets value as we will keep updating them. + // + // TODO (#3047): Making copies for bounds and counts incurs a large + // memory allocation footprint. Alternatives should be explored. + hDPts[i].BucketCounts = slices.Clone(val.counts) + + if !s.noSum { + hDPts[i].Sum = val.total + } + + if !s.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + } + + h.DataPoints = hDPts + *dest = h + + return n +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go new file mode 100644 index 00000000000..c359368403e --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -0,0 +1,162 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// datapoint is timestamped measurement data. +type datapoint[N int64 | float64] struct { + attrs attribute.Set + value N + res exemplar.FilteredReservoir[N] +} + +func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] { + return &lastValue[N]{ + newRes: r, + limit: newLimiter[datapoint[N]](limit), + values: make(map[attribute.Distinct]datapoint[N]), + start: now(), + } +} + +// lastValue summarizes a set of measurements as the last one made. +type lastValue[N int64 | float64] struct { + sync.Mutex + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[datapoint[N]] + values map[attribute.Distinct]datapoint[N] + start time.Time +} + +func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + s.Lock() + defer s.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + d, ok := s.values[attr.Equivalent()] + if !ok { + d.res = s.newRes() + } + + d.attrs = attr + d.value = value + d.res.Offer(ctx, value, droppedAttr) + + s.values[attr.Equivalent()] = d +} + +func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = t + + *dest = gData + + return n +} + +func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + *dest = gData + + return n +} + +// copyDpts copies the datapoints held by s into dest. The number of datapoints +// copied is returned. +func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) int { + n := len(s.values) + *dest = reset(*dest, n, n) + + var i int + for _, v := range s.values { + (*dest)[i].Attributes = v.attrs + (*dest)[i].StartTime = s.start + (*dest)[i].Time = t + (*dest)[i].Value = v.value + collectExemplars(&(*dest)[i].Exemplars, v.res.Collect) + i++ + } + return n +} + +// newPrecomputedLastValue returns an aggregator that summarizes a set of +// observations as the last one made. +func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] { + return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} +} + +// precomputedLastValue summarizes a set of observations as the last one made. +type precomputedLastValue[N int64 | float64] struct { + *lastValue[N] +} + +func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = t + + *dest = gData + + return n +} + +func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + *dest = gData + + return n +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go new file mode 100644 index 00000000000..9ea0251edd7 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import "go.opentelemetry.io/otel/attribute" + +// overflowSet is the attribute set used to record a measurement when adding +// another distinct attribute set to the aggregate would exceed the aggregate +// limit. +var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true)) + +// limiter limits aggregate values. +type limiter[V any] struct { + // aggLimit is the maximum number of metric streams that can be aggregated. + // + // Any metric stream with attributes distinct from any set already + // aggregated once the aggLimit will be meet will instead be aggregated + // into an "overflow" metric stream. That stream will only contain the + // "otel.metric.overflow"=true attribute. + aggLimit int +} + +// newLimiter returns a new Limiter with the provided aggregation limit. +func newLimiter[V any](aggregation int) limiter[V] { + return limiter[V]{aggLimit: aggregation} +} + +// Attributes checks if adding a measurement for attrs will exceed the +// aggregation cardinality limit for the existing measurements. If it will, +// overflowSet is returned. Otherwise, if it will not exceed the limit, or the +// limit is not set (limit <= 0), attr is returned. +func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]V) attribute.Set { + if l.aggLimit > 0 { + _, exists := measurements[attrs.Equivalent()] + if !exists && len(measurements) >= l.aggLimit-1 { + return overflowSet + } + } + + return attrs +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go new file mode 100644 index 00000000000..89136692260 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -0,0 +1,238 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +type sumValue[N int64 | float64] struct { + n N + res exemplar.FilteredReservoir[N] + attrs attribute.Set +} + +// valueMap is the storage for sums. +type valueMap[N int64 | float64] struct { + sync.Mutex + newRes func() exemplar.FilteredReservoir[N] + limit limiter[sumValue[N]] + values map[attribute.Distinct]sumValue[N] +} + +func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] { + return &valueMap[N]{ + newRes: r, + limit: newLimiter[sumValue[N]](limit), + values: make(map[attribute.Distinct]sumValue[N]), + } +} + +func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + s.Lock() + defer s.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + v, ok := s.values[attr.Equivalent()] + if !ok { + v.res = s.newRes() + } + + v.attrs = attr + v.n += value + v.res.Offer(ctx, value, droppedAttr) + + s.values[attr.Equivalent()] = v +} + +// newSum returns an aggregator that summarizes a set of measurements as their +// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle +// the measurements were made in. +func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] { + return &sum[N]{ + valueMap: newValueMap[N](limit, r), + monotonic: monotonic, + start: now(), + } +} + +// sum summarizes a set of measurements made as their arithmetic sum. +type sum[N int64 | float64] struct { + *valueMap[N] + + monotonic bool + start time.Time +} + +func (s *sum[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.DeltaTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, val := range s.values { + dPts[i].Attributes = val.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = val.n + collectExemplars(&dPts[i].Exemplars, val.res.Collect) + i++ + } + // Do not report stale values. + clear(s.values) + // The delta collection cycle resets. + s.start = t + + sData.DataPoints = dPts + *dest = sData + + return n +} + +func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.CumulativeTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, value := range s.values { + dPts[i].Attributes = value.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = value.n + collectExemplars(&dPts[i].Exemplars, value.res.Collect) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + i++ + } + + sData.DataPoints = dPts + *dest = sData + + return n +} + +// newPrecomputedSum returns an aggregator that summarizes a set of +// observatrions as their arithmetic sum. Each sum is scoped by attributes and +// the aggregation cycle the measurements were made in. +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] { + return &precomputedSum[N]{ + valueMap: newValueMap[N](limit, r), + monotonic: monotonic, + start: now(), + } +} + +// precomputedSum summarizes a set of observatrions as their arithmetic sum. +type precomputedSum[N int64 | float64] struct { + *valueMap[N] + + monotonic bool + start time.Time + + reported map[attribute.Distinct]N +} + +func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { + t := now() + newReported := make(map[attribute.Distinct]N) + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.DeltaTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for key, value := range s.values { + delta := value.n - s.reported[key] + + dPts[i].Attributes = value.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = delta + collectExemplars(&dPts[i].Exemplars, value.res.Collect) + + newReported[key] = value.n + i++ + } + // Unused attribute sets do not report. + clear(s.values) + s.reported = newReported + // The delta collection cycle resets. + s.start = t + + sData.DataPoints = dPts + *dest = sData + + return n +} + +func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.CumulativeTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, val := range s.values { + dPts[i].Attributes = val.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = val.n + collectExemplars(&dPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(s.values) + + sData.DataPoints = dPts + *dest = sData + + return n +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go new file mode 100644 index 00000000000..5394f48e0df --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package exemplar provides an implementation of the OpenTelemetry exemplar +// reservoir to be used in metric collection pipelines. +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go new file mode 100644 index 00000000000..5a0f39ae147 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Drop returns a [FilteredReservoir] that drops all measurements it is offered. +func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} } + +type dropRes[N int64 | float64] struct{} + +// Offer does nothing, all measurements offered will be dropped. +func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} + +// Collect resets dest. No exemplars will ever be returned. +func (r *dropRes[N]) Collect(dest *[]Exemplar) { + *dest = (*dest)[:0] +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go new file mode 100644 index 00000000000..fcaa6a4697c --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Exemplar is a measurement sampled from a timeseries providing a typical +// example. +type Exemplar struct { + // FilteredAttributes are the attributes recorded with the measurement but + // filtered out of the timeseries' aggregated data. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was recorded. + Time time.Time + // Value is the measured value. + Value Value + // SpanID is the ID of the span that was active during the measurement. If + // no span was active or the span was not sampled this will be empty. + SpanID []byte `json:",omitempty"` + // TraceID is the ID of the trace the active span belonged to during the + // measurement. If no span was active or the span was not sampled this will + // be empty. + TraceID []byte `json:",omitempty"` +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go new file mode 100644 index 00000000000..152a069a09e --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + + "go.opentelemetry.io/otel/trace" +) + +// Filter determines if a measurement should be offered. +// +// The passed ctx needs to contain any baggage or span that were active +// when the measurement was made. This information may be used by the +// Reservoir in making a sampling decision. +type Filter func(context.Context) bool + +// SampledFilter is a [Filter] that will only offer measurements +// if the passed context associated with the measurement contains a sampled +// [go.opentelemetry.io/otel/trace.SpanContext]. +func SampledFilter(ctx context.Context) bool { + return trace.SpanContextFromContext(ctx).IsSampled() +} + +// AlwaysOnFilter is a [Filter] that always offers measurements. +func AlwaysOnFilter(ctx context.Context) bool { + return true +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go new file mode 100644 index 00000000000..9fedfa4be68 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// FilteredReservoir wraps a [Reservoir] with a filter. +type FilteredReservoir[N int64 | float64] interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the filter decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + Offer(ctx context.Context, val N, attr []attribute.KeyValue) + // Collect returns all the held exemplars in the reservoir. + Collect(dest *[]Exemplar) +} + +// filteredReservoir handles the pre-sampled exemplar of measurements made. +type filteredReservoir[N int64 | float64] struct { + filter Filter + reservoir Reservoir +} + +// NewFilteredReservoir creates a [FilteredReservoir] which only offers values +// that are allowed by the filter. +func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] { + return &filteredReservoir[N]{ + filter: f, + reservoir: r, + } +} + +func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { + if f.filter(ctx) { + // only record the current time if we are sampling this measurment. + f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr) + } +} + +func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) } diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go new file mode 100644 index 00000000000..a6ff86d0271 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "slices" + "sort" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Histogram returns a [Reservoir] that samples the last measurement that falls +// within a histogram bucket. The histogram bucket upper-boundaries are define +// by bounds. +// +// The passed bounds will be sorted by this function. +func Histogram(bounds []float64) Reservoir { + slices.Sort(bounds) + return &histRes{ + bounds: bounds, + storage: newStorage(len(bounds) + 1), + } +} + +type histRes struct { + *storage + + // bounds are bucket bounds in ascending order. + bounds []float64 +} + +func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { + var x float64 + switch v.Type() { + case Int64ValueType: + x = float64(v.Int64()) + case Float64ValueType: + x = v.Float64() + default: + panic("unknown value type") + } + r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go new file mode 100644 index 00000000000..199a2608f71 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go @@ -0,0 +1,191 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "math" + "math/rand" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +var ( + // rng is used to make sampling decisions. + // + // Do not use crypto/rand. There is no reason for the decrease in performance + // given this is not a security sensitive decision. + rng = rand.New(rand.NewSource(time.Now().UnixNano())) + // Ensure concurrent safe accecess to rng and its underlying source. + rngMu sync.Mutex +) + +// random returns, as a float64, a uniform pseudo-random number in the open +// interval (0.0,1.0). +func random() float64 { + // TODO: This does not return a uniform number. rng.Float64 returns a + // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it + // returns multiples of 2^-53, and not all floating point numbers between 0 + // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand + // are always going to be 0). + // + // An alternative algorithm should be considered that will actually return + // a uniform number in the interval (0,1). For example, since the default + // rand source provides a uniform distribution for Int63, this can be + // converted following the prototypical code of Mersenne Twister 64 (Takuji + // Nishimura and Makoto Matsumoto: + // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c) + // + // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0) + // + // There are likely many other methods to explore here as well. + + rngMu.Lock() + defer rngMu.Unlock() + + f := rng.Float64() + for f == 0 { + f = rng.Float64() + } + return f +} + +// FixedSize returns a [Reservoir] that samples at most k exemplars. If there +// are k or less measurements made, the Reservoir will sample each one. If +// there are more than k, the Reservoir will then randomly sample all +// additional measurement with a decreasing probability. +func FixedSize(k int) Reservoir { + r := &randRes{storage: newStorage(k)} + r.reset() + return r +} + +type randRes struct { + *storage + + // count is the number of measurement seen. + count int64 + // next is the next count that will store a measurement at a random index + // once the reservoir has been filled. + next int64 + // w is the largest random number in a distribution that is used to compute + // the next next. + w float64 +} + +func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { + // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December + // 1994). "Reservoir-Sampling Algorithms of Time Complexity + // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): + // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435). + // + // A high-level overview of "Algorithm L": + // 0) Pre-calculate the random count greater than the storage size when + // an exemplar will be replaced. + // 1) Accept all measurements offered until the configured storage size is + // reached. + // 2) Loop: + // a) When the pre-calculate count is reached, replace a random + // existing exemplar with the offered measurement. + // b) Calculate the next random count greater than the existing one + // which will replace another exemplars + // + // The way a "replacement" count is computed is by looking at `n` number of + // independent random numbers each corresponding to an offered measurement. + // Of these numbers the smallest `k` (the same size as the storage + // capacity) of them are kept as a subset. The maximum value in this + // subset, called `w` is used to weight another random number generation + // for the next count that will be considered. + // + // By weighting the next count computation like described, it is able to + // perform a uniformly-weighted sampling algorithm based on the number of + // samples the reservoir has seen so far. The sampling will "slow down" as + // more and more samples are offered so as to reduce a bias towards those + // offered just prior to the end of the collection. + // + // This algorithm is preferred because of its balance of simplicity and + // performance. It will compute three random numbers (the bulk of + // computation time) for each item that becomes part of the reservoir, but + // it does not spend any time on items that do not. In particular it has an + // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of + // measurements offered and k is the reservoir size. + // + // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of + // this and other reservoir sampling algorithms. See + // https://github.com/MrAlias/reservoir-sampling for a performance + // comparison of reservoir sampling algorithms. + + if int(r.count) < cap(r.store) { + r.store[r.count] = newMeasurement(ctx, t, n, a) + } else { + if r.count == r.next { + // Overwrite a random existing measurement with the one offered. + idx := int(rng.Int63n(int64(cap(r.store)))) + r.store[idx] = newMeasurement(ctx, t, n, a) + r.advance() + } + } + r.count++ +} + +// reset resets r to the initial state. +func (r *randRes) reset() { + // This resets the number of exemplars known. + r.count = 0 + // Random index inserts should only happen after the storage is full. + r.next = int64(cap(r.store)) + + // Initial random number in the series used to generate r.next. + // + // This is set before r.advance to reset or initialize the random number + // series. Without doing so it would always be 0 or never restart a new + // random number series. + // + // This maps the uniform random number in (0,1) to a geometric distribution + // over the same interval. The mean of the distribution is inversely + // proportional to the storage capacity. + r.w = math.Exp(math.Log(random()) / float64(cap(r.store))) + + r.advance() +} + +// advance updates the count at which the offered measurement will overwrite an +// existing exemplar. +func (r *randRes) advance() { + // Calculate the next value in the random number series. + // + // The current value of r.w is based on the max of a distribution of random + // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity + // of the storage and each `u` in the interval (0,w)). To calculate the + // next r.w we use the fact that when the next exemplar is selected to be + // included in the storage an existing one will be dropped, and the + // corresponding random number in the set used to calculate r.w will also + // be replaced. The replacement random number will also be within (0,w), + // therefore the next r.w will be based on the same distribution (i.e. + // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by + // computing the next random number `u` and take r.w as `w * u^(1/k)`. + r.w *= math.Exp(math.Log(random()) / float64(cap(r.store))) + // Use the new random number in the series to calculate the count of the + // next measurement that will be stored. + // + // Given 0 < r.w < 1, each iteration will result in subsequent r.w being + // smaller. This translates here into the next next being selected against + // a distribution with a higher mean (i.e. the expected value will increase + // and replacements become less likely) + // + // Important to note, the new r.next will always be at least 1 more than + // the last r.next. + r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 +} + +func (r *randRes) Collect(dest *[]Exemplar) { + r.storage.Collect(dest) + // Call reset here even though it will reset r.count and restart the random + // number series. This will persist any old exemplars as long as no new + // measurements are offered, but it will also prioritize those new + // measurements that are made over the older collection cycle ones. + r.reset() +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go new file mode 100644 index 00000000000..80fa59554f2 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Reservoir holds the sampled exemplar of measurements made. +type Reservoir interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the Reservoir decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + // + // The time t is the time when the measurement was made. The val and attr + // parameters are the value and dropped (filtered) attributes of the + // measurement respectively. + Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue) + + // Collect returns all the held exemplars. + // + // The Reservoir state is preserved after this call. + Collect(dest *[]Exemplar) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go new file mode 100644 index 00000000000..10b2976f796 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// storage is an exemplar storage for [Reservoir] implementations. +type storage struct { + // store are the measurements sampled. + // + // This does not use []metricdata.Exemplar because it potentially would + // require an allocation for trace and span IDs in the hot path of Offer. + store []measurement +} + +func newStorage(n int) *storage { + return &storage{store: make([]measurement, n)} +} + +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *storage) Collect(dest *[]Exemplar) { + *dest = reset(*dest, len(r.store), len(r.store)) + var n int + for _, m := range r.store { + if !m.valid { + continue + } + + m.Exemplar(&(*dest)[n]) + n++ + } + *dest = (*dest)[:n] +} + +// measurement is a measurement made by a telemetry system. +type measurement struct { + // FilteredAttributes are the attributes dropped during the measurement. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was made. + Time time.Time + // Value is the value of the measurement. + Value Value + // SpanContext is the SpanContext active when a measurement was made. + SpanContext trace.SpanContext + + valid bool +} + +// newMeasurement returns a new non-empty Measurement. +func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement { + return measurement{ + FilteredAttributes: droppedAttr, + Time: ts, + Value: v, + SpanContext: trace.SpanContextFromContext(ctx), + valid: true, + } +} + +// Exemplar returns m as an [Exemplar]. +func (m measurement) Exemplar(dest *Exemplar) { + dest.FilteredAttributes = m.FilteredAttributes + dest.Time = m.Time + dest.Value = m.Value + + if m.SpanContext.HasTraceID() { + traceID := m.SpanContext.TraceID() + dest.TraceID = traceID[:] + } else { + dest.TraceID = dest.TraceID[:0] + } + + if m.SpanContext.HasSpanID() { + spanID := m.SpanContext.SpanID() + dest.SpanID = spanID[:] + } else { + dest.SpanID = dest.SpanID[:0] + } +} + +func reset[T any](s []T, length, capacity int) []T { + if cap(s) < capacity { + return make([]T, length, capacity) + } + return s[:length] +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go new file mode 100644 index 00000000000..1957d6b1e3a --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import "math" + +// ValueType identifies the type of value used in exemplar data. +type ValueType uint8 + +const ( + // UnknownValueType should not be used. It represents a misconfigured + // Value. + UnknownValueType ValueType = 0 + // Int64ValueType represents a Value with int64 data. + Int64ValueType ValueType = 1 + // Float64ValueType represents a Value with float64 data. + Float64ValueType ValueType = 2 +) + +// Value is the value of data held by an exemplar. +type Value struct { + t ValueType + val uint64 +} + +// NewValue returns a new [Value] for the provided value. +func NewValue[N int64 | float64](value N) Value { + switch v := any(value).(type) { + case int64: + return Value{t: Int64ValueType, val: uint64(v)} + case float64: + return Value{t: Float64ValueType, val: math.Float64bits(v)} + } + return Value{} +} + +// Type returns the [ValueType] of data held by v. +func (v Value) Type() ValueType { return v.t } + +// Int64 returns the value of v as an int64. If the ValueType of v is not an +// Int64ValueType, 0 is returned. +func (v Value) Int64() int64 { + if v.t == Int64ValueType { + // Assumes the correct int64 was stored in v.val based on type. + return int64(v.val) // nolint: gosec + } + return 0 +} + +// Float64 returns the value of v as an float64. If the ValueType of v is not +// an Float64ValueType, 0 is returned. +func (v Value) Float64() float64 { + if v.t == Float64ValueType { + return math.Float64frombits(v.val) + } + return 0 +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go new file mode 100644 index 00000000000..19ec6806ff7 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +// ReuseSlice returns a zeroed view of slice if its capacity is greater than or +// equal to n. Otherwise, it returns a new []T with capacity equal to n. +func ReuseSlice[T any](slice []T, n int) []T { + if cap(slice) >= n { + return slice[:n] + } + return make([]T, n) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md new file mode 100644 index 00000000000..aba69d65471 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md @@ -0,0 +1,112 @@ +# Experimental Features + +The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Cardinality Limit](#cardinality-limit) +- [Exemplars](#exemplars) + +### Cardinality Limit + +The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument. + +This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value. +The value must be an integer value. +All other values are ignored. + +If the value set is less than or equal to `0`, no limit will be applied. + +#### Examples + +Set the cardinality limit to 2000. + +```console +export OTEL_GO_X_CARDINALITY_LIMIT=2000 +``` + +Set an infinite cardinality limit (functionally equivalent to disabling the feature). + +```console +export OTEL_GO_X_CARDINALITY_LIMIT=-1 +``` + +Disable the cardinality limit. + +```console +unset OTEL_GO_X_CARDINALITY_LIMIT +``` + +### Exemplars + +A sample of measurements made may be exported directly as a set of exemplars. + +This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable. +The value of must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + +Exemplar filters are a supported. +The exemplar filter applies to all measurements made. +They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir. + +To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable. +The value must be the case-sensitive string defined by the [OpenTelemetry specification]. + +- `"always_on"`: allows all measurements +- `"always_off"`: denies all measurements +- `"trace_based"`: allows only sampled measurements + +All values other than these will result in the default, `"trace_based"`, exemplar filter being used. + +[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar + +#### Examples + +Enable exemplars to be exported. + +```console +export OTEL_GO_X_EXEMPLAR=true +``` + +Disable exemplars from being exported. + +```console +unset OTEL_GO_X_EXEMPLAR +``` + +Set the exemplar filter to allow all measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=always_on +``` + +Set the exemplar filter to deny all measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=always_off +``` + +Set the exemplar filter to only allow sampled measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=trace_based +``` + +Revert to the default exemplar filter (`"trace_based"`) + +```console +unset OTEL_METRICS_EXEMPLAR_FILTER +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go new file mode 100644 index 00000000000..8cd2f37417b --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel metric SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" + +import ( + "os" + "strconv" + "strings" +) + +var ( + // Exemplars is an experimental feature flag that defines if exemplars + // should be recorded for metric data-points. + // + // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable + // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" + // will also enable this). + Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false + }) + + // CardinalityLimit is an experimental feature flag that defines if + // cardinality limits should be applied to the recorded metric data-points. + // + // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment + // variable to the integer limit value you want to use. + // + // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 + // will disable the cardinality limits. + CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { + n, err := strconv.Atoi(v) + if err != nil { + return 0, false + } + return n, true + }) +) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go new file mode 100644 index 00000000000..e0fd86ca78d --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -0,0 +1,203 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// ManualReader is a simple Reader that allows an application to +// read metrics on demand. +type ManualReader struct { + sdkProducer atomic.Value + shutdownOnce sync.Once + + mu sync.Mutex + isShutdown bool + externalProducers atomic.Value + + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector +} + +// Compile time check the manualReader implements Reader and is comparable. +var _ = map[Reader]struct{}{&ManualReader{}: {}} + +// NewManualReader returns a Reader which is directly called to collect metrics. +func NewManualReader(opts ...ManualReaderOption) *ManualReader { + cfg := newManualReaderConfig(opts) + r := &ManualReader{ + temporalitySelector: cfg.temporalitySelector, + aggregationSelector: cfg.aggregationSelector, + } + r.externalProducers.Store(cfg.producers) + return r +} + +// register stores the sdkProducer which enables the caller +// to read metrics from the SDK on demand. +func (mr *ManualReader) register(p sdkProducer) { + // Only register once. If producer is already set, do nothing. + if !mr.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { + msg := "did not register manual reader" + global.Error(errDuplicateRegister, msg) + } +} + +// temporality reports the Temporality for the instrument kind provided. +func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality { + return mr.temporalitySelector(kind) +} + +// aggregation returns what Aggregation to use for kind. +func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. + return mr.aggregationSelector(kind) +} + +// Shutdown closes any connections and frees any resources used by the reader. +// +// This method is safe to call concurrently. +func (mr *ManualReader) Shutdown(context.Context) error { + err := ErrReaderShutdown + mr.shutdownOnce.Do(func() { + // Any future call to Collect will now return ErrReaderShutdown. + mr.sdkProducer.Store(produceHolder{ + produce: shutdownProducer{}.produce, + }) + mr.mu.Lock() + defer mr.mu.Unlock() + mr.isShutdown = true + // release references to Producer(s) + mr.externalProducers.Store([]Producer{}) + err = nil + }) + return err +} + +// Collect gathers all metric data related to the Reader from +// the SDK and other Producers and stores the result in rm. +// +// Collect will return an error if called after shutdown. +// Collect will return an error if rm is a nil ResourceMetrics. +// Collect will return an error if the context's Done channel is closed. +// +// This method is safe to call concurrently. +func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { + if rm == nil { + return errors.New("manual reader: *metricdata.ResourceMetrics is nil") + } + p := mr.sdkProducer.Load() + if p == nil { + return ErrReaderNotRegistered + } + + ph, ok := p.(produceHolder) + if !ok { + // The atomic.Value is entirely in the periodicReader's control so + // this should never happen. In the unforeseen case that this does + // happen, return an error instead of panicking so a users code does + // not halt in the processes. + err := fmt.Errorf("manual reader: invalid producer: %T", p) + return err + } + + err := ph.produce(ctx, rm) + if err != nil { + return err + } + var errs []error + for _, producer := range mr.externalProducers.Load().([]Producer) { + externalMetrics, err := producer.Produce(ctx) + if err != nil { + errs = append(errs, err) + } + rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) + } + + global.Debug("ManualReader collection", "Data", rm) + + return unifyErrors(errs) +} + +// MarshalLog returns logging data about the ManualReader. +func (r *ManualReader) MarshalLog() interface{} { + r.mu.Lock() + down := r.isShutdown + r.mu.Unlock() + return struct { + Type string + Registered bool + Shutdown bool + }{ + Type: "ManualReader", + Registered: r.sdkProducer.Load() != nil, + Shutdown: down, + } +} + +// manualReaderConfig contains configuration options for a ManualReader. +type manualReaderConfig struct { + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector + producers []Producer +} + +// newManualReaderConfig returns a manualReaderConfig configured with options. +func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig { + cfg := manualReaderConfig{ + temporalitySelector: DefaultTemporalitySelector, + aggregationSelector: DefaultAggregationSelector, + } + for _, opt := range opts { + cfg = opt.applyManual(cfg) + } + return cfg +} + +// ManualReaderOption applies a configuration option value to a ManualReader. +type ManualReaderOption interface { + applyManual(manualReaderConfig) manualReaderConfig +} + +// WithTemporalitySelector sets the TemporalitySelector a reader will use to +// determine the Temporality of an instrument based on its kind. If this +// option is not used, the reader will use the DefaultTemporalitySelector. +func WithTemporalitySelector(selector TemporalitySelector) ManualReaderOption { + return temporalitySelectorOption{selector: selector} +} + +type temporalitySelectorOption struct { + selector func(instrument InstrumentKind) metricdata.Temporality +} + +// applyManual returns a manualReaderConfig with option applied. +func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig { + mrc.temporalitySelector = t.selector + return mrc +} + +// WithAggregationSelector sets the AggregationSelector a reader will use to +// determine the aggregation to use for an instrument based on its kind. If +// this option is not used, the reader will use the DefaultAggregationSelector +// or the aggregation explicitly passed for a view matching an instrument. +func WithAggregationSelector(selector AggregationSelector) ManualReaderOption { + return aggregationSelectorOption{selector: selector} +} + +type aggregationSelectorOption struct { + selector AggregationSelector +} + +// applyManual returns a manualReaderConfig with option applied. +func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig { + c.aggregationSelector = t.selector + return c +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go new file mode 100644 index 00000000000..2309e5b2b0f --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -0,0 +1,729 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" +) + +// ErrInstrumentName indicates the created instrument has an invalid name. +// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter. +var ErrInstrumentName = errors.New("invalid instrument name") + +// meter handles the creation and coordination of all metric instruments. A +// meter represents a single instrumentation scope; all metric telemetry +// produced by an instrumentation scope will use metric instruments from a +// single meter. +type meter struct { + embedded.Meter + + scope instrumentation.Scope + pipes pipelines + + int64Insts *cacheWithErr[instID, *int64Inst] + float64Insts *cacheWithErr[instID, *float64Inst] + int64ObservableInsts *cacheWithErr[instID, int64Observable] + float64ObservableInsts *cacheWithErr[instID, float64Observable] + + int64Resolver resolver[int64] + float64Resolver resolver[float64] +} + +func newMeter(s instrumentation.Scope, p pipelines) *meter { + // viewCache ensures instrument conflicts, including number conflicts, this + // meter is asked to create are logged to the user. + var viewCache cache[string, instID] + + var int64Insts cacheWithErr[instID, *int64Inst] + var float64Insts cacheWithErr[instID, *float64Inst] + var int64ObservableInsts cacheWithErr[instID, int64Observable] + var float64ObservableInsts cacheWithErr[instID, float64Observable] + + return &meter{ + scope: s, + pipes: p, + int64Insts: &int64Insts, + float64Insts: &float64Insts, + int64ObservableInsts: &int64ObservableInsts, + float64ObservableInsts: &float64ObservableInsts, + int64Resolver: newResolver[int64](p, &viewCache), + float64Resolver: newResolver[float64](p, &viewCache), + } +} + +// Compile-time check meter implements metric.Meter. +var _ metric.Meter = (*meter)(nil) + +// Int64Counter returns a new instrument identified by name and configured with +// options. The instrument is used to synchronously record increasing int64 +// measurements during a computational operation. +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + cfg := metric.NewInt64CounterConfig(options...) + const kind = InstrumentKindCounter + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64UpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to synchronously record +// int64 measurements during a computational operation. +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + cfg := metric.NewInt64UpDownCounterConfig(options...) + const kind = InstrumentKindUpDownCounter + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64Histogram returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of int64 measurements during a computational operation. +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + cfg := metric.NewInt64HistogramConfig(options...) + p := int64InstProvider{m} + i, err := p.lookupHistogram(name, cfg) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of int64 measurements during a computational operation. +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + cfg := metric.NewInt64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// int64ObservableInstrument returns a new observable identified by the Instrument. +// It registers callbacks for each reader's pipeline. +func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) { + key := instID{ + Name: id.Name, + Description: id.Description, + Unit: id.Unit, + Kind: id.Kind, + } + if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 { + warnRepeatedObservableCallbacks(id) + } + return m.int64ObservableInsts.Lookup(key, func() (int64Observable, error) { + inst := newInt64Observable(m, id.Kind, id.Name, id.Description, id.Unit) + for _, insert := range m.int64Resolver.inserters { + // Connect the measure functions for instruments in this pipeline with the + // callbacks for this pipeline. + in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind)) + if err != nil { + return inst, err + } + // Drop aggregation + if len(in) == 0 { + inst.dropAggregation = true + continue + } + inst.appendMeasures(in) + for _, cback := range callbacks { + inst := int64Observer{measures: in} + fn := cback + insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) }) + } + } + return inst, validateInstrumentName(id.Name) + }) +} + +// Int64ObservableCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// increasing int64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Int64ObservableCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableCounter, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Int64ObservableUpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// int64 measurements once per a measurement collection cycle. Only the +// measurements recorded during the collection cycle are exported. +// +// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableUpDownCounter, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Int64ObservableGauge returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// instantaneous int64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Int64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableGauge, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64Counter returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record increasing +// float64 measurements during a computational operation. +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + cfg := metric.NewFloat64CounterConfig(options...) + const kind = InstrumentKindCounter + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64UpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to synchronously record +// float64 measurements during a computational operation. +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + cfg := metric.NewFloat64UpDownCounterConfig(options...) + const kind = InstrumentKindUpDownCounter + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64Histogram returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of float64 measurements during a computational operation. +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + cfg := metric.NewFloat64HistogramConfig(options...) + p := float64InstProvider{m} + i, err := p.lookupHistogram(name, cfg) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of float64 measurements during a computational operation. +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + cfg := metric.NewFloat64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// float64ObservableInstrument returns a new observable identified by the Instrument. +// It registers callbacks for each reader's pipeline. +func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) { + key := instID{ + Name: id.Name, + Description: id.Description, + Unit: id.Unit, + Kind: id.Kind, + } + if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 { + warnRepeatedObservableCallbacks(id) + } + return m.float64ObservableInsts.Lookup(key, func() (float64Observable, error) { + inst := newFloat64Observable(m, id.Kind, id.Name, id.Description, id.Unit) + for _, insert := range m.float64Resolver.inserters { + // Connect the measure functions for instruments in this pipeline with the + // callbacks for this pipeline. + in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind)) + if err != nil { + return inst, err + } + // Drop aggregation + if len(in) == 0 { + inst.dropAggregation = true + continue + } + inst.appendMeasures(in) + for _, cback := range callbacks { + inst := float64Observer{measures: in} + fn := cback + insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) }) + } + } + return inst, validateInstrumentName(id.Name) + }) +} + +// Float64ObservableCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// increasing float64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Float64ObservableCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableCounter, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64ObservableUpDownCounter returns a new instrument identified by name +// and configured with options. The instrument is used to asynchronously record +// float64 measurements once per a measurement collection cycle. Only the +// measurements recorded during the collection cycle are exported. +// +// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableUpDownCounter, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64ObservableGauge returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// instantaneous float64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Float64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableGauge, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +func validateInstrumentName(name string) error { + if len(name) == 0 { + return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name) + } + if len(name) > 255 { + return fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, name) + } + if !isAlpha([]rune(name)[0]) { + return fmt.Errorf("%w: %s: must start with a letter", ErrInstrumentName, name) + } + if len(name) == 1 { + return nil + } + for _, c := range name[1:] { + if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' { + return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name) + } + } + return nil +} + +func isAlpha(c rune) bool { + return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} + +func isAlphanumeric(c rune) bool { + return isAlpha(c) || ('0' <= c && c <= '9') +} + +func warnRepeatedObservableCallbacks(id Instrument) { + inst := fmt.Sprintf( + "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}", + id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit, + ) + global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.", + "instrument", inst, + ) +} + +// RegisterCallback registers f to be called each collection cycle so it will +// make observations for insts during those cycles. +// +// The only instruments f can make observations for are insts. All other +// observations will be dropped and an error will be logged. +// +// Only instruments from this meter can be registered with f, an error is +// returned if other instrument are provided. +// +// Only observations made in the callback will be exported. Unlike synchronous +// instruments, asynchronous callbacks can "forget" attribute sets that are no +// longer relevant by omitting the observation during the callback. +// +// The returned Registration can be used to unregister f. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + if len(insts) == 0 { + // Don't allocate a observer if not needed. + return noopRegister{}, nil + } + + reg := newObserver() + var errs multierror + for _, inst := range insts { + // Unwrap any global. + if u, ok := inst.(interface { + Unwrap() metric.Observable + }); ok { + inst = u.Unwrap() + } + + switch o := inst.(type) { + case int64Observable: + if err := o.registerable(m); err != nil { + if !errors.Is(err, errEmptyAgg) { + errs.append(err) + } + continue + } + reg.registerInt64(o.observablID) + case float64Observable: + if err := o.registerable(m); err != nil { + if !errors.Is(err, errEmptyAgg) { + errs.append(err) + } + continue + } + reg.registerFloat64(o.observablID) + default: + // Instrument external to the SDK. + return nil, fmt.Errorf("invalid observable: from different implementation") + } + } + + err := errs.errorOrNil() + if reg.len() == 0 { + // All insts use drop aggregation or are invalid. + return noopRegister{}, err + } + + // Some or all instruments were valid. + cback := func(ctx context.Context) error { return f(ctx, reg) } + return m.pipes.registerMultiCallback(cback), err +} + +type observer struct { + embedded.Observer + + float64 map[observablID[float64]]struct{} + int64 map[observablID[int64]]struct{} +} + +func newObserver() observer { + return observer{ + float64: make(map[observablID[float64]]struct{}), + int64: make(map[observablID[int64]]struct{}), + } +} + +func (r observer) len() int { + return len(r.float64) + len(r.int64) +} + +func (r observer) registerFloat64(id observablID[float64]) { + r.float64[id] = struct{}{} +} + +func (r observer) registerInt64(id observablID[int64]) { + r.int64[id] = struct{}{} +} + +var ( + errUnknownObserver = errors.New("unknown observable instrument") + errUnregObserver = errors.New("observable instrument not registered for callback") +) + +func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...metric.ObserveOption) { + var oImpl float64Observable + switch conv := o.(type) { + case float64Observable: + oImpl = conv + case interface { + Unwrap() metric.Observable + }: + // Unwrap any global. + async := conv.Unwrap() + var ok bool + if oImpl, ok = async.(float64Observable); !ok { + global.Error(errUnknownObserver, "failed to record asynchronous") + return + } + default: + global.Error(errUnknownObserver, "failed to record") + return + } + + if _, registered := r.float64[oImpl.observablID]; !registered { + if !oImpl.dropAggregation { + global.Error(errUnregObserver, "failed to record", + "name", oImpl.name, + "description", oImpl.description, + "unit", oImpl.unit, + "number", fmt.Sprintf("%T", float64(0)), + ) + } + return + } + c := metric.NewObserveConfig(opts) + oImpl.observe(v, c.Attributes()) +} + +func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) { + var oImpl int64Observable + switch conv := o.(type) { + case int64Observable: + oImpl = conv + case interface { + Unwrap() metric.Observable + }: + // Unwrap any global. + async := conv.Unwrap() + var ok bool + if oImpl, ok = async.(int64Observable); !ok { + global.Error(errUnknownObserver, "failed to record asynchronous") + return + } + default: + global.Error(errUnknownObserver, "failed to record") + return + } + + if _, registered := r.int64[oImpl.observablID]; !registered { + if !oImpl.dropAggregation { + global.Error(errUnregObserver, "failed to record", + "name", oImpl.name, + "description", oImpl.description, + "unit", oImpl.unit, + "number", fmt.Sprintf("%T", int64(0)), + ) + } + return + } + c := metric.NewObserveConfig(opts) + oImpl.observe(v, c.Attributes()) +} + +type noopRegister struct{ embedded.Registration } + +func (noopRegister) Unregister() error { + return nil +} + +// int64InstProvider provides int64 OpenTelemetry instruments. +type int64InstProvider struct{ *meter } + +func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[int64], error) { + inst := Instrument{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + Scope: p.scope, + } + return p.int64Resolver.Aggregators(inst) +} + +func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) { + boundaries := cfg.ExplicitBucketBoundaries() + aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() + if aggError != nil { + // If boundaries are invalid, ignore them. + boundaries = nil + } + inst := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + Scope: p.scope, + } + measures, err := p.int64Resolver.HistogramAggregators(inst, boundaries) + return measures, errors.Join(aggError, err) +} + +// lookup returns the resolved instrumentImpl. +func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) { + return p.meter.int64Insts.Lookup(instID{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + }, func() (*int64Inst, error) { + aggs, err := p.aggs(kind, name, desc, u) + return &int64Inst{measures: aggs}, err + }) +} + +// lookupHistogram returns the resolved instrumentImpl. +func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) { + return p.meter.int64Insts.Lookup(instID{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + }, func() (*int64Inst, error) { + aggs, err := p.histogramAggs(name, cfg) + return &int64Inst{measures: aggs}, err + }) +} + +// float64InstProvider provides float64 OpenTelemetry instruments. +type float64InstProvider struct{ *meter } + +func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[float64], error) { + inst := Instrument{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + Scope: p.scope, + } + return p.float64Resolver.Aggregators(inst) +} + +func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) { + boundaries := cfg.ExplicitBucketBoundaries() + aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() + if aggError != nil { + // If boundaries are invalid, ignore them. + boundaries = nil + } + inst := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + Scope: p.scope, + } + measures, err := p.float64Resolver.HistogramAggregators(inst, boundaries) + return measures, errors.Join(aggError, err) +} + +// lookup returns the resolved instrumentImpl. +func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) { + return p.meter.float64Insts.Lookup(instID{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + }, func() (*float64Inst, error) { + aggs, err := p.aggs(kind, name, desc, u) + return &float64Inst{measures: aggs}, err + }) +} + +// lookupHistogram returns the resolved instrumentImpl. +func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) { + return p.meter.float64Insts.Lookup(instID{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + }, func() (*float64Inst, error) { + aggs, err := p.histogramAggs(name, cfg) + return &float64Inst{measures: aggs}, err + }) +} + +type int64Observer struct { + embedded.Int64Observer + measures[int64] +} + +func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) { + c := metric.NewObserveConfig(opts) + o.observe(val, c.Attributes()) +} + +type float64Observer struct { + embedded.Float64Observer + measures[float64] +} + +func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) { + c := metric.NewObserveConfig(opts) + o.observe(val, c.Attributes()) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md new file mode 100644 index 00000000000..d1390df1b5e --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md @@ -0,0 +1,3 @@ +# SDK Metric data + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric/metricdata)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/metricdata) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go new file mode 100644 index 00000000000..d32cfc67d92 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go @@ -0,0 +1,296 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" + +import ( + "encoding/json" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" +) + +// ResourceMetrics is a collection of ScopeMetrics and the associated Resource +// that created them. +type ResourceMetrics struct { + // Resource represents the entity that collected the metrics. + Resource *resource.Resource + // ScopeMetrics are the collection of metrics with unique Scopes. + ScopeMetrics []ScopeMetrics +} + +// ScopeMetrics is a collection of Metrics Produces by a Meter. +type ScopeMetrics struct { + // Scope is the Scope that the Meter was created with. + Scope instrumentation.Scope + // Metrics are a list of aggregations created by the Meter. + Metrics []Metrics +} + +// Metrics is a collection of one or more aggregated timeseries from an Instrument. +type Metrics struct { + // Name is the name of the Instrument that created this data. + Name string + // Description is the description of the Instrument, which can be used in documentation. + Description string + // Unit is the unit in which the Instrument reports. + Unit string + // Data is the aggregated data from an Instrument. + Data Aggregation +} + +// Aggregation is the store of data reported by an Instrument. +// It will be one of: Gauge, Sum, Histogram. +type Aggregation interface { + privateAggregation() +} + +// Gauge represents a measurement of the current value of an instrument. +type Gauge[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []DataPoint[N] +} + +func (Gauge[N]) privateAggregation() {} + +// Sum represents the sum of all measurements of values from an instrument. +type Sum[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []DataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality + // IsMonotonic represents if this aggregation only increases or decreases. + IsMonotonic bool +} + +func (Sum[N]) privateAggregation() {} + +// DataPoint is a single data point in a timeseries. +type DataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. (optional) + StartTime time.Time `json:",omitempty"` + // Time is the time when the timeseries was recorded. (optional) + Time time.Time `json:",omitempty"` + // Value is the value of this data point. + Value N + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// Histogram represents the histogram of all measurements of values from an instrument. +type Histogram[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []HistogramDataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality +} + +func (Histogram[N]) privateAggregation() {} + +// HistogramDataPoint is a single histogram data point in a timeseries. +type HistogramDataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this histogram has been calculated with. + Count uint64 + // Bounds are the upper bounds of the buckets of the histogram. Because the + // last boundary is +infinity this one is implied. + Bounds []float64 + // BucketCounts is the count of each of the buckets. + BucketCounts []uint64 + + // Min is the minimum value recorded. (optional) + Min Extrema[N] + // Max is the maximum value recorded. (optional) + Max Extrema[N] + // Sum is the sum of the values recorded. + Sum N + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// ExponentialHistogram represents the histogram of all measurements of values from an instrument. +type ExponentialHistogram[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // attributes. + DataPoints []ExponentialHistogramDataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality +} + +func (ExponentialHistogram[N]) privateAggregation() {} + +// ExponentialHistogramDataPoint is a single exponential histogram data point in a timeseries. +type ExponentialHistogramDataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this histogram has been calculated with. + Count uint64 + // Min is the minimum value recorded. (optional) + Min Extrema[N] + // Max is the maximum value recorded. (optional) + Max Extrema[N] + // Sum is the sum of the values recorded. + Sum N + + // Scale describes the resolution of the histogram. Boundaries are + // located at powers of the base, where: + // + // base = 2 ^ (2 ^ -Scale) + Scale int32 + // ZeroCount is the number of values whose absolute value + // is less than or equal to [ZeroThreshold]. + // When ZeroThreshold is 0, this is the number of values that + // cannot be expressed using the standard exponential formula + // as well as values that have been rounded to zero. + // ZeroCount represents the special zero count bucket. + ZeroCount uint64 + + // PositiveBucket is range of positive value bucket counts. + PositiveBucket ExponentialBucket + // NegativeBucket is range of negative value bucket counts. + NegativeBucket ExponentialBucket + + // ZeroThreshold is the width of the zero region. Where the zero region is + // defined as the closed interval [-ZeroThreshold, ZeroThreshold]. + ZeroThreshold float64 + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// ExponentialBucket are a set of bucket counts, encoded in a contiguous array +// of counts. +type ExponentialBucket struct { + // Offset is the bucket index of the first entry in the Counts slice. + Offset int32 + // Counts is an slice where Counts[i] carries the count of the bucket at + // index (Offset+i). Counts[i] is the count of values greater than + // base^(Offset+i) and less than or equal to base^(Offset+i+1). + Counts []uint64 +} + +// Extrema is the minimum or maximum value of a dataset. +type Extrema[N int64 | float64] struct { + value N + valid bool +} + +// MarshalText converts the Extrema value to text. +func (e Extrema[N]) MarshalText() ([]byte, error) { + if !e.valid { + return json.Marshal(nil) + } + return json.Marshal(e.value) +} + +// MarshalJSON converts the Extrema value to JSON number. +func (e *Extrema[N]) MarshalJSON() ([]byte, error) { + return e.MarshalText() +} + +// NewExtrema returns an Extrema set to v. +func NewExtrema[N int64 | float64](v N) Extrema[N] { + return Extrema[N]{value: v, valid: true} +} + +// Value returns the Extrema value and true if the Extrema is defined. +// Otherwise, if the Extrema is its zero-value, defined will be false. +func (e Extrema[N]) Value() (v N, defined bool) { + return e.value, e.valid +} + +// Exemplar is a measurement sampled from a timeseries providing a typical +// example. +type Exemplar[N int64 | float64] struct { + // FilteredAttributes are the attributes recorded with the measurement but + // filtered out of the timeseries' aggregated data. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was recorded. + Time time.Time + // Value is the measured value. + Value N + // SpanID is the ID of the span that was active during the measurement. If + // no span was active or the span was not sampled this will be empty. + SpanID []byte `json:",omitempty"` + // TraceID is the ID of the trace the active span belonged to during the + // measurement. If no span was active or the span was not sampled this will + // be empty. + TraceID []byte `json:",omitempty"` +} + +// Summary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// data type. +// +// These data points cannot always be merged in a meaningful way. The Summary +// type is only used by bridges from other metrics libraries, and cannot be +// produced using OpenTelemetry instrumentation. +type Summary struct { + // DataPoints are the individual aggregated measurements with unique + // attributes. + DataPoints []SummaryDataPoint +} + +func (Summary) privateAggregation() {} + +// SummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +type SummaryDataPoint struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this summary has been calculated with. + Count uint64 + + // Sum is the sum of the values recorded. + Sum float64 + + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + QuantileValues []QuantileValue +} + +// QuantileValue is the value at a given quantile of a summary. +type QuantileValue struct { + // Quantile is the quantile of this value. + // + // Must be in the interval [0.0, 1.0]. + Quantile float64 + + // Value is the value at the given quantile of a summary. + // + // Quantile values must NOT be negative. + Value float64 +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go new file mode 100644 index 00000000000..187713dadf7 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=Temporality + +package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" + +// Temporality defines the window that an aggregation was calculated over. +type Temporality uint8 + +const ( + // undefinedTemporality represents an unset Temporality. + //nolint:deadcode,unused,varcheck + undefinedTemporality Temporality = iota + + // CumulativeTemporality defines a measurement interval that continues to + // expand forward in time from a starting point. New measurements are + // added to all previous measurements since a start time. + CumulativeTemporality + + // DeltaTemporality defines a measurement interval that resets each cycle. + // Measurements from one cycle are recorded independently, measurements + // from other cycles do not affect them. + DeltaTemporality +) + +// MarshalText returns the byte encoded of t. +func (t Temporality) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go new file mode 100644 index 00000000000..4da833cdce2 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Temporality"; DO NOT EDIT. + +package metricdata + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[undefinedTemporality-0] + _ = x[CumulativeTemporality-1] + _ = x[DeltaTemporality-2] +} + +const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality" + +var _Temporality_index = [...]uint8{0, 20, 41, 57} + +func (i Temporality) String() string { + if i >= Temporality(len(_Temporality_index)-1) { + return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]] +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go new file mode 100644 index 00000000000..67ee1b11a2e --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -0,0 +1,370 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// Default periodic reader timing. +const ( + defaultTimeout = time.Millisecond * 30000 + defaultInterval = time.Millisecond * 60000 +) + +// periodicReaderConfig contains configuration options for a PeriodicReader. +type periodicReaderConfig struct { + interval time.Duration + timeout time.Duration + producers []Producer +} + +// newPeriodicReaderConfig returns a periodicReaderConfig configured with +// options. +func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig { + c := periodicReaderConfig{ + interval: envDuration(envInterval, defaultInterval), + timeout: envDuration(envTimeout, defaultTimeout), + } + for _, o := range options { + c = o.applyPeriodic(c) + } + return c +} + +// PeriodicReaderOption applies a configuration option value to a PeriodicReader. +type PeriodicReaderOption interface { + applyPeriodic(periodicReaderConfig) periodicReaderConfig +} + +// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig. +type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig + +// applyPeriodic returns a periodicReaderConfig with option(s) applied. +func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig { + return o(conf) +} + +// WithTimeout configures the time a PeriodicReader waits for an export to +// complete before canceling it. This includes an export which occurs as part +// of Shutdown or ForceFlush if the user passed context does not have a +// deadline. If the user passed context does have a deadline, it will be used +// instead. +// +// This option overrides any value set for the +// OTEL_METRIC_EXPORT_TIMEOUT environment variable. +// +// If this option is not used or d is less than or equal to zero, 30 seconds +// is used as the default. +func WithTimeout(d time.Duration) PeriodicReaderOption { + return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { + if d <= 0 { + return conf + } + conf.timeout = d + return conf + }) +} + +// WithInterval configures the intervening time between exports for a +// PeriodicReader. +// +// This option overrides any value set for the +// OTEL_METRIC_EXPORT_INTERVAL environment variable. +// +// If this option is not used or d is less than or equal to zero, 60 seconds +// is used as the default. +func WithInterval(d time.Duration) PeriodicReaderOption { + return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { + if d <= 0 { + return conf + } + conf.interval = d + return conf + }) +} + +// NewPeriodicReader returns a Reader that collects and exports metric data to +// the exporter at a defined interval. By default, the returned Reader will +// collect and export data every 60 seconds, and will cancel any attempts that +// exceed 30 seconds, collect and export combined. The collect and export time +// are not counted towards the interval between attempts. +// +// The Collect method of the returned Reader continues to gather and return +// metric data to the user. It will not automatically send that data to the +// exporter. That is left to the user to accomplish. +func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader { + conf := newPeriodicReaderConfig(options) + ctx, cancel := context.WithCancel(context.Background()) + r := &PeriodicReader{ + interval: conf.interval, + timeout: conf.timeout, + exporter: exporter, + flushCh: make(chan chan error), + cancel: cancel, + done: make(chan struct{}), + rmPool: sync.Pool{ + New: func() interface{} { + return &metricdata.ResourceMetrics{} + }, + }, + } + r.externalProducers.Store(conf.producers) + + go func() { + defer func() { close(r.done) }() + r.run(ctx, conf.interval) + }() + + return r +} + +// PeriodicReader is a Reader that continuously collects and exports metric +// data at a set interval. +type PeriodicReader struct { + sdkProducer atomic.Value + + mu sync.Mutex + isShutdown bool + externalProducers atomic.Value + + interval time.Duration + timeout time.Duration + exporter Exporter + flushCh chan chan error + + done chan struct{} + cancel context.CancelFunc + shutdownOnce sync.Once + + rmPool sync.Pool +} + +// Compile time check the periodicReader implements Reader and is comparable. +var _ = map[Reader]struct{}{&PeriodicReader{}: {}} + +// newTicker allows testing override. +var newTicker = time.NewTicker + +// run continuously collects and exports metric data at the specified +// interval. This will run until ctx is canceled or times out. +func (r *PeriodicReader) run(ctx context.Context, interval time.Duration) { + ticker := newTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + err := r.collectAndExport(ctx) + if err != nil { + otel.Handle(err) + } + case errCh := <-r.flushCh: + errCh <- r.collectAndExport(ctx) + ticker.Reset(interval) + case <-ctx.Done(): + return + } + } +} + +// register registers p as the producer of this reader. +func (r *PeriodicReader) register(p sdkProducer) { + // Only register once. If producer is already set, do nothing. + if !r.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { + msg := "did not register periodic reader" + global.Error(errDuplicateRegister, msg) + } +} + +// temporality reports the Temporality for the instrument kind provided. +func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality { + return r.exporter.Temporality(kind) +} + +// aggregation returns what Aggregation to use for kind. +func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. + return r.exporter.Aggregation(kind) +} + +// collectAndExport gather all metric data related to the periodicReader r from +// the SDK and exports it with r's exporter. +func (r *PeriodicReader) collectAndExport(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, r.timeout) + defer cancel() + + // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect. + rm := r.rmPool.Get().(*metricdata.ResourceMetrics) + err := r.Collect(ctx, rm) + if err == nil { + err = r.export(ctx, rm) + } + r.rmPool.Put(rm) + return err +} + +// Collect gathers all metric data related to the Reader from +// the SDK and other Producers and stores the result in rm. The metric +// data is not exported to the configured exporter, it is left to the caller to +// handle that if desired. +// +// Collect will return an error if called after shutdown. +// Collect will return an error if rm is a nil ResourceMetrics. +// Collect will return an error if the context's Done channel is closed. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { + if rm == nil { + return errors.New("periodic reader: *metricdata.ResourceMetrics is nil") + } + // TODO (#3047): When collect is updated to accept output as param, pass rm. + return r.collect(ctx, r.sdkProducer.Load(), rm) +} + +// collect unwraps p as a produceHolder and returns its produce results. +func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error { + if p == nil { + return ErrReaderNotRegistered + } + + ph, ok := p.(produceHolder) + if !ok { + // The atomic.Value is entirely in the periodicReader's control so + // this should never happen. In the unforeseen case that this does + // happen, return an error instead of panicking so a users code does + // not halt in the processes. + err := fmt.Errorf("periodic reader: invalid producer: %T", p) + return err + } + + err := ph.produce(ctx, rm) + if err != nil { + return err + } + var errs []error + for _, producer := range r.externalProducers.Load().([]Producer) { + externalMetrics, err := producer.Produce(ctx) + if err != nil { + errs = append(errs, err) + } + rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) + } + + global.Debug("PeriodicReader collection", "Data", rm) + + return unifyErrors(errs) +} + +// export exports metric data m using r's exporter. +func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetrics) error { + return r.exporter.Export(ctx, m) +} + +// ForceFlush flushes pending telemetry. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) ForceFlush(ctx context.Context) error { + // Prioritize the ctx timeout if it is set. + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.timeout) + defer cancel() + } + + errCh := make(chan error, 1) + select { + case r.flushCh <- errCh: + select { + case err := <-errCh: + if err != nil { + return err + } + close(errCh) + case <-ctx.Done(): + return ctx.Err() + } + case <-r.done: + return ErrReaderShutdown + case <-ctx.Done(): + return ctx.Err() + } + return r.exporter.ForceFlush(ctx) +} + +// Shutdown flushes pending telemetry and then stops the export pipeline. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) Shutdown(ctx context.Context) error { + err := ErrReaderShutdown + r.shutdownOnce.Do(func() { + // Prioritize the ctx timeout if it is set. + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.timeout) + defer cancel() + } + + // Stop the run loop. + r.cancel() + <-r.done + + // Any future call to Collect will now return ErrReaderShutdown. + ph := r.sdkProducer.Swap(produceHolder{ + produce: shutdownProducer{}.produce, + }) + + if ph != nil { // Reader was registered. + // Flush pending telemetry. + m := r.rmPool.Get().(*metricdata.ResourceMetrics) + err = r.collect(ctx, ph, m) + if err == nil { + err = r.export(ctx, m) + } + r.rmPool.Put(m) + } + + sErr := r.exporter.Shutdown(ctx) + if err == nil || errors.Is(err, ErrReaderShutdown) { + err = sErr + } + + r.mu.Lock() + defer r.mu.Unlock() + r.isShutdown = true + // release references to Producer(s) + r.externalProducers.Store([]Producer{}) + }) + return err +} + +// MarshalLog returns logging data about the PeriodicReader. +func (r *PeriodicReader) MarshalLog() interface{} { + r.mu.Lock() + down := r.isShutdown + r.mu.Unlock() + return struct { + Type string + Exporter Exporter + Registered bool + Shutdown bool + Interval time.Duration + Timeout time.Duration + }{ + Type: "PeriodicReader", + Exporter: r.exporter, + Registered: r.sdkProducer.Load() != nil, + Shutdown: down, + Interval: r.interval, + Timeout: r.timeout, + } +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go new file mode 100644 index 00000000000..823bf2fe3d2 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -0,0 +1,655 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "container/list" + "context" + "errors" + "fmt" + "strings" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/internal" + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + "go.opentelemetry.io/otel/sdk/metric/internal/x" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" +) + +var ( + errCreatingAggregators = errors.New("could not create all aggregators") + errIncompatibleAggregation = errors.New("incompatible aggregation") + errUnknownAggregation = errors.New("unrecognized aggregation") +) + +// instrumentSync is a synchronization point between a pipeline and an +// instrument's aggregate function. +type instrumentSync struct { + name string + description string + unit string + compAgg aggregate.ComputeAggregation +} + +func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline { + if res == nil { + res = resource.Empty() + } + return &pipeline{ + resource: res, + reader: reader, + views: views, + // aggregations is lazy allocated when needed. + } +} + +// pipeline connects all of the instruments created by a meter provider to a Reader. +// This is the object that will be `Reader.register()` when a meter provider is created. +// +// As instruments are created the instrument should be checked if it exists in +// the views of a the Reader, and if so each aggregate function should be added +// to the pipeline. +type pipeline struct { + resource *resource.Resource + + reader Reader + views []View + + sync.Mutex + aggregations map[instrumentation.Scope][]instrumentSync + callbacks []func(context.Context) error + multiCallbacks list.List +} + +// addSync adds the instrumentSync to pipeline p with scope. This method is not +// idempotent. Duplicate calls will result in duplicate additions, it is the +// callers responsibility to ensure this is called with unique values. +func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) { + p.Lock() + defer p.Unlock() + if p.aggregations == nil { + p.aggregations = map[instrumentation.Scope][]instrumentSync{ + scope: {iSync}, + } + return + } + p.aggregations[scope] = append(p.aggregations[scope], iSync) +} + +type multiCallback func(context.Context) error + +// addMultiCallback registers a multi-instrument callback to be run when +// `produce()` is called. +func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) { + p.Lock() + defer p.Unlock() + e := p.multiCallbacks.PushBack(c) + return func() { + p.Lock() + p.multiCallbacks.Remove(e) + p.Unlock() + } +} + +// produce returns aggregated metrics from a single collection. +// +// This method is safe to call concurrently. +func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error { + p.Lock() + defer p.Unlock() + + var errs multierror + for _, c := range p.callbacks { + // TODO make the callbacks parallel. ( #3034 ) + if err := c(ctx); err != nil { + errs.append(err) + } + if err := ctx.Err(); err != nil { + rm.Resource = nil + rm.ScopeMetrics = rm.ScopeMetrics[:0] + return err + } + } + for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { + // TODO make the callbacks parallel. ( #3034 ) + f := e.Value.(multiCallback) + if err := f(ctx); err != nil { + errs.append(err) + } + if err := ctx.Err(); err != nil { + // This means the context expired before we finished running callbacks. + rm.Resource = nil + rm.ScopeMetrics = rm.ScopeMetrics[:0] + return err + } + } + + rm.Resource = p.resource + rm.ScopeMetrics = internal.ReuseSlice(rm.ScopeMetrics, len(p.aggregations)) + + i := 0 + for scope, instruments := range p.aggregations { + rm.ScopeMetrics[i].Metrics = internal.ReuseSlice(rm.ScopeMetrics[i].Metrics, len(instruments)) + j := 0 + for _, inst := range instruments { + data := rm.ScopeMetrics[i].Metrics[j].Data + if n := inst.compAgg(&data); n > 0 { + rm.ScopeMetrics[i].Metrics[j].Name = inst.name + rm.ScopeMetrics[i].Metrics[j].Description = inst.description + rm.ScopeMetrics[i].Metrics[j].Unit = inst.unit + rm.ScopeMetrics[i].Metrics[j].Data = data + j++ + } + } + rm.ScopeMetrics[i].Metrics = rm.ScopeMetrics[i].Metrics[:j] + if len(rm.ScopeMetrics[i].Metrics) > 0 { + rm.ScopeMetrics[i].Scope = scope + i++ + } + } + + rm.ScopeMetrics = rm.ScopeMetrics[:i] + + return errs.errorOrNil() +} + +// inserter facilitates inserting of new instruments from a single scope into a +// pipeline. +type inserter[N int64 | float64] struct { + // aggregators is a cache that holds aggregate function inputs whose + // outputs have been inserted into the underlying reader pipeline. This + // cache ensures no duplicate aggregate functions are inserted into the + // reader pipeline and if a new request during an instrument creation asks + // for the same aggregate function input the same instance is returned. + aggregators *cache[instID, aggVal[N]] + + // views is a cache that holds instrument identifiers for all the + // instruments a Meter has created, it is provided from the Meter that owns + // this inserter. This cache ensures during the creation of instruments + // with the same name but different options (e.g. description, unit) a + // warning message is logged. + views *cache[string, instID] + + pipeline *pipeline +} + +func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *inserter[N] { + if vc == nil { + vc = &cache[string, instID]{} + } + return &inserter[N]{ + aggregators: &cache[instID, aggVal[N]]{}, + views: vc, + pipeline: p, + } +} + +// Instrument inserts the instrument inst with instUnit into a pipeline. All +// views the pipeline contains are matched against, and any matching view that +// creates a unique aggregate function will have its output inserted into the +// pipeline and its input included in the returned slice. +// +// The returned aggregate function inputs are ensured to be deduplicated and +// unique. If another view in another pipeline that is cached by this +// inserter's cache has already inserted the same aggregate function for the +// same instrument, that functions input instance is returned. +// +// If another instrument has already been inserted by this inserter, or any +// other using the same cache, and it conflicts with the instrument being +// inserted in this call, an aggregate function input matching the arguments +// will still be returned but an Info level log message will also be logged to +// the OTel global logger. +// +// If the passed instrument would result in an incompatible aggregate function, +// an error is returned and that aggregate function output is not inserted nor +// is its input returned. +// +// If an instrument is determined to use a Drop aggregation, that instrument is +// not inserted nor returned. +func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) ([]aggregate.Measure[N], error) { + var ( + matched bool + measures []aggregate.Measure[N] + ) + + errs := &multierror{wrapped: errCreatingAggregators} + seen := make(map[uint64]struct{}) + for _, v := range i.pipeline.views { + stream, match := v(inst) + if !match { + continue + } + matched = true + in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if err != nil { + errs.append(err) + } + if in == nil { // Drop aggregation. + continue + } + if _, ok := seen[id]; ok { + // This aggregate function has already been added. + continue + } + seen[id] = struct{}{} + measures = append(measures, in) + } + + if matched { + return measures, errs.errorOrNil() + } + + // Apply implicit default view if no explicit matched. + stream := Stream{ + Name: inst.Name, + Description: inst.Description, + Unit: inst.Unit, + } + in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if err != nil { + errs.append(err) + } + if in != nil { + // Ensured to have not seen given matched was false. + measures = append(measures, in) + } + return measures, errs.errorOrNil() +} + +// addCallback registers a single instrument callback to be run when +// `produce()` is called. +func (i *inserter[N]) addCallback(cback func(context.Context) error) { + i.pipeline.Lock() + defer i.pipeline.Unlock() + i.pipeline.callbacks = append(i.pipeline.callbacks, cback) +} + +var aggIDCount uint64 + +// aggVal is the cached value in an aggregators cache. +type aggVal[N int64 | float64] struct { + ID uint64 + Measure aggregate.Measure[N] + Err error +} + +// readerDefaultAggregation returns the default aggregation for the instrument +// kind based on the reader's aggregation preferences. This is used unless the +// aggregation is overridden with a view. +func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation { + aggregation := i.pipeline.reader.aggregation(kind) + switch aggregation.(type) { + case nil, AggregationDefault: + // If the reader returns default or nil use the default selector. + aggregation = DefaultAggregationSelector(kind) + default: + // Deep copy and validate before using. + aggregation = aggregation.copy() + if err := aggregation.err(); err != nil { + orig := aggregation + aggregation = DefaultAggregationSelector(kind) + global.Error( + err, "using default aggregation instead", + "aggregation", orig, + "replacement", aggregation, + ) + } + } + return aggregation +} + +// cachedAggregator returns the appropriate aggregate input and output +// functions for an instrument configuration. If the exact instrument has been +// created within the inst.Scope, those aggregate function instances will be +// returned. Otherwise, new computed aggregate functions will be cached and +// returned. +// +// If the instrument configuration conflicts with an instrument that has +// already been created (e.g. description, unit, data type) a warning will be +// logged at the "Info" level with the global OTel logger. Valid new aggregate +// functions for the instrument configuration will still be returned without an +// error. +// +// If the instrument defines an unknown or incompatible aggregation, an error +// is returned. +func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) { + switch stream.Aggregation.(type) { + case nil: + // The aggregation was not overridden with a view. Use the aggregation + // provided by the reader. + stream.Aggregation = readerAggregation + case AggregationDefault: + // The view explicitly requested the default aggregation. + stream.Aggregation = DefaultAggregationSelector(kind) + } + + if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil { + return nil, 0, fmt.Errorf( + "creating aggregator with instrumentKind: %d, aggregation %v: %w", + kind, stream.Aggregation, err, + ) + } + + id := i.instID(kind, stream) + // If there is a conflict, the specification says the view should + // still be applied and a warning should be logged. + i.logConflict(id) + + // If there are requests for the same instrument with different name + // casing, the first-seen needs to be returned. Use a normalize ID for the + // cache lookup to ensure the correct comparison. + normID := id.normalize() + cv := i.aggregators.Lookup(normID, func() aggVal[N] { + b := aggregate.Builder[N]{ + Temporality: i.pipeline.reader.temporality(kind), + ReservoirFunc: reservoirFunc[N](stream.Aggregation), + } + b.Filter = stream.AttributeFilter + // A value less than or equal to zero will disable the aggregation + // limits for the builder (an all the created aggregates). + // CardinalityLimit.Lookup returns 0 by default if unset (or + // unrecognized input). Use that value directly. + b.AggregationLimit, _ = x.CardinalityLimit.Lookup() + + in, out, err := i.aggregateFunc(b, stream.Aggregation, kind) + if err != nil { + return aggVal[N]{0, nil, err} + } + if in == nil { // Drop aggregator. + return aggVal[N]{0, nil, nil} + } + i.pipeline.addSync(scope, instrumentSync{ + // Use the first-seen name casing for this and all subsequent + // requests of this instrument. + name: stream.Name, + description: stream.Description, + unit: stream.Unit, + compAgg: out, + }) + id := atomic.AddUint64(&aggIDCount, 1) + return aggVal[N]{id, in, err} + }) + return cv.Measure, cv.ID, cv.Err +} + +// logConflict validates if an instrument with the same case-insensitive name +// as id has already been created. If that instrument conflicts with id, a +// warning is logged. +func (i *inserter[N]) logConflict(id instID) { + // The API specification defines names as case-insensitive. If there is a + // different casing of a name it needs to be a conflict. + name := id.normalize().Name + existing := i.views.Lookup(name, func() instID { return id }) + if id == existing { + return + } + + const msg = "duplicate metric stream definitions" + args := []interface{}{ + "names", fmt.Sprintf("%q, %q", existing.Name, id.Name), + "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description), + "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind), + "units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit), + "numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number), + } + + // The specification recommends logging a suggested view to resolve + // conflicts if possible. + // + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#duplicate-instrument-registration + if id.Unit != existing.Unit || id.Number != existing.Number { + // There is no view resolution for these, don't make a suggestion. + global.Warn(msg, args...) + return + } + + var stream string + if id.Name != existing.Name || id.Kind != existing.Kind { + stream = `Stream{Name: "{{NEW_NAME}}"}` + } else if id.Description != existing.Description { + stream = fmt.Sprintf("Stream{Description: %q}", existing.Description) + } + + inst := fmt.Sprintf( + "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}", + id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit, + ) + args = append(args, "suggested.view", fmt.Sprintf("NewView(%s, %s)", inst, stream)) + + global.Warn(msg, args...) +} + +func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID { + var zero N + return instID{ + Name: stream.Name, + Description: stream.Description, + Unit: stream.Unit, + Kind: kind, + Number: fmt.Sprintf("%T", zero), + } +} + +// aggregateFunc returns new aggregate functions matching agg, kind, and +// monotonic. If the agg is unknown or temporality is invalid, an error is +// returned. +func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) { + switch a := agg.(type) { + case AggregationDefault: + return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind) + case AggregationDrop: + // Return nil in and out to signify the drop aggregator. + case AggregationLastValue: + switch kind { + case InstrumentKindGauge: + meas, comp = b.LastValue() + case InstrumentKindObservableGauge: + meas, comp = b.PrecomputedLastValue() + } + case AggregationSum: + switch kind { + case InstrumentKindObservableCounter: + meas, comp = b.PrecomputedSum(true) + case InstrumentKindObservableUpDownCounter: + meas, comp = b.PrecomputedSum(false) + case InstrumentKindCounter, InstrumentKindHistogram: + meas, comp = b.Sum(true) + default: + // InstrumentKindUpDownCounter, InstrumentKindObservableGauge, and + // instrumentKindUndefined or other invalid instrument kinds. + meas, comp = b.Sum(false) + } + case AggregationExplicitBucketHistogram: + var noSum bool + switch kind { + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: + // The sum should not be collected for any instrument that can make + // negative measurements: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations + noSum = true + } + meas, comp = b.ExplicitBucketHistogram(a.Boundaries, a.NoMinMax, noSum) + case AggregationBase2ExponentialHistogram: + var noSum bool + switch kind { + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: + // The sum should not be collected for any instrument that can make + // negative measurements: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations + noSum = true + } + meas, comp = b.ExponentialBucketHistogram(a.MaxSize, a.MaxScale, a.NoMinMax, noSum) + + default: + err = errUnknownAggregation + } + + return meas, comp, err +} + +// isAggregatorCompatible checks if the aggregation can be used by the instrument. +// Current compatibility: +// +// | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram | +// |--------------------------|------|-----------|-----|-----------|-----------------------| +// | Counter | ✓ | | ✓ | ✓ | ✓ | +// | UpDownCounter | ✓ | | ✓ | ✓ | ✓ | +// | Histogram | ✓ | | ✓ | ✓ | ✓ | +// | Gauge | ✓ | ✓ | | ✓ | ✓ | +// | Observable Counter | ✓ | | ✓ | ✓ | ✓ | +// | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ | +// | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |. +func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { + switch agg.(type) { + case AggregationDefault: + return nil + case AggregationExplicitBucketHistogram, AggregationBase2ExponentialHistogram: + switch kind { + case InstrumentKindCounter, + InstrumentKindUpDownCounter, + InstrumentKindHistogram, + InstrumentKindGauge, + InstrumentKindObservableCounter, + InstrumentKindObservableUpDownCounter, + InstrumentKindObservableGauge: + return nil + default: + return errIncompatibleAggregation + } + case AggregationSum: + switch kind { + case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter: + return nil + default: + // TODO: review need for aggregation check after + // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 + return errIncompatibleAggregation + } + case AggregationLastValue: + switch kind { + case InstrumentKindObservableGauge, InstrumentKindGauge: + return nil + } + // TODO: review need for aggregation check after + // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 + return errIncompatibleAggregation + case AggregationDrop: + return nil + default: + // This is used passed checking for default, it should be an error at this point. + return fmt.Errorf("%w: %v", errUnknownAggregation, agg) + } +} + +// pipelines is the group of pipelines connecting Readers with instrument +// measurement. +type pipelines []*pipeline + +func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines { + pipes := make([]*pipeline, 0, len(readers)) + for _, r := range readers { + p := newPipeline(res, r, views) + r.register(p) + pipes = append(pipes, p) + } + return pipes +} + +func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration { + unregs := make([]func(), len(p)) + for i, pipe := range p { + unregs[i] = pipe.addMultiCallback(c) + } + return unregisterFuncs{f: unregs} +} + +type unregisterFuncs struct { + embedded.Registration + f []func() +} + +func (u unregisterFuncs) Unregister() error { + for _, f := range u.f { + f() + } + return nil +} + +// resolver facilitates resolving aggregate functions an instrument calls to +// aggregate measurements with while updating all pipelines that need to pull +// from those aggregations. +type resolver[N int64 | float64] struct { + inserters []*inserter[N] +} + +func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) resolver[N] { + in := make([]*inserter[N], len(p)) + for i := range in { + in[i] = newInserter[N](p[i], vc) + } + return resolver[N]{in} +} + +// Aggregators returns the Aggregators that must be updated by the instrument +// defined by key. +func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) { + var measures []aggregate.Measure[N] + + errs := &multierror{} + for _, i := range r.inserters { + in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) + if err != nil { + errs.append(err) + } + measures = append(measures, in...) + } + return measures, errs.errorOrNil() +} + +// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument +// defined by key. If boundaries were provided on instrument instantiation, those take precedence +// over boundaries provided by the reader. +func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) { + var measures []aggregate.Measure[N] + + errs := &multierror{} + for _, i := range r.inserters { + agg := i.readerDefaultAggregation(id.Kind) + if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 { + histAgg.Boundaries = boundaries + agg = histAgg + } + in, err := i.Instrument(id, agg) + if err != nil { + errs.append(err) + } + measures = append(measures, in...) + } + return measures, errs.errorOrNil() +} + +type multierror struct { + wrapped error + errors []string +} + +func (m *multierror) errorOrNil() error { + if len(m.errors) == 0 { + return nil + } + if m.wrapped == nil { + return errors.New(strings.Join(m.errors, "; ")) + } + return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; ")) +} + +func (m *multierror) append(err error) { + m.errors = append(m.errors, err.Error()) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go new file mode 100644 index 00000000000..a82af538e67 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" +) + +// MeterProvider handles the creation and coordination of Meters. All Meters +// created by a MeterProvider will be associated with the same Resource, have +// the same Views applied to them, and have their produced metric telemetry +// passed to the configured Readers. +type MeterProvider struct { + embedded.MeterProvider + + pipes pipelines + meters cache[instrumentation.Scope, *meter] + + forceFlush, shutdown func(context.Context) error + stopped atomic.Bool +} + +// Compile-time check MeterProvider implements metric.MeterProvider. +var _ metric.MeterProvider = (*MeterProvider)(nil) + +// NewMeterProvider returns a new and configured MeterProvider. +// +// By default, the returned MeterProvider is configured with the default +// Resource and no Readers. Readers cannot be added after a MeterProvider is +// created. This means the returned MeterProvider, one created with no +// Readers, will perform no operations. +func NewMeterProvider(options ...Option) *MeterProvider { + conf := newConfig(options) + flush, sdown := conf.readerSignals() + + mp := &MeterProvider{ + pipes: newPipelines(conf.res, conf.readers, conf.views), + forceFlush: flush, + shutdown: sdown, + } + // Log after creation so all readers show correctly they are registered. + global.Info("MeterProvider created", + "Resource", conf.res, + "Readers", conf.readers, + "Views", len(conf.views), + ) + return mp +} + +// Meter returns a Meter with the given name and configured with options. +// +// The name should be the name of the instrumentation scope creating +// telemetry. This name may be the same as the instrumented code only if that +// code provides built-in instrumentation. +// +// Calls to the Meter method after Shutdown has been called will return Meters +// that perform no operations. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter { + if name == "" { + global.Warn("Invalid Meter name.", "name", name) + } + + if mp.stopped.Load() { + return noop.Meter{} + } + + c := metric.NewMeterConfig(options...) + s := instrumentation.Scope{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } + + global.Info("Meter created", + "Name", s.Name, + "Version", s.Version, + "SchemaURL", s.SchemaURL, + ) + + return mp.meters.Lookup(s, func() *meter { + return newMeter(s, mp.pipes) + }) +} + +// ForceFlush flushes all pending telemetry. +// +// This method honors the deadline or cancellation of ctx. An appropriate +// error will be returned in these situations. There is no guaranteed that all +// telemetry be flushed or all resources have been released in these +// situations. +// +// ForceFlush calls ForceFlush(context.Context) error +// on all Readers that implements this method. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) ForceFlush(ctx context.Context) error { + if mp.forceFlush != nil { + return mp.forceFlush(ctx) + } + return nil +} + +// Shutdown shuts down the MeterProvider flushing all pending telemetry and +// releasing any held computational resources. +// +// This call is idempotent. The first call will perform all flush and +// releasing operations. Subsequent calls will perform no action and will +// return an error stating this. +// +// Measurements made by instruments from meters this MeterProvider created +// will not be exported after Shutdown is called. +// +// This method honors the deadline or cancellation of ctx. An appropriate +// error will be returned in these situations. There is no guaranteed that all +// telemetry be flushed or all resources have been released in these +// situations. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) Shutdown(ctx context.Context) error { + // Even though it may seem like there is a synchronization issue between the + // call to `Store` and checking `shutdown`, the Go concurrency model ensures + // that is not the case, as all the atomic operations executed in a program + // behave as though executed in some sequentially consistent order. This + // definition provides the same semantics as C++'s sequentially consistent + // atomics and Java's volatile variables. + // See https://go.dev/ref/mem#atomic and https://pkg.go.dev/sync/atomic. + + mp.stopped.Store(true) + if mp.shutdown != nil { + return mp.shutdown(ctx) + } + return nil +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go new file mode 100644 index 00000000000..d94bdee75b7 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// errDuplicateRegister is logged by a Reader when an attempt to registered it +// more than once occurs. +var errDuplicateRegister = fmt.Errorf("duplicate reader registration") + +// ErrReaderNotRegistered is returned if Collect or Shutdown are called before +// the reader is registered with a MeterProvider. +var ErrReaderNotRegistered = fmt.Errorf("reader is not registered") + +// ErrReaderShutdown is returned if Collect or Shutdown are called after a +// reader has been Shutdown once. +var ErrReaderShutdown = fmt.Errorf("reader is shutdown") + +// errNonPositiveDuration is logged when an environmental variable +// has non-positive value. +var errNonPositiveDuration = fmt.Errorf("non-positive duration") + +// Reader is the interface used between the SDK and an +// exporter. Control flow is bi-directional through the +// Reader, since the SDK initiates ForceFlush and Shutdown +// while the exporter initiates collection. The Register() method here +// informs the Reader that it can begin reading, signaling the +// start of bi-directional control flow. +// +// Typically, push-based exporters that are periodic will +// implement PeriodicExporter themselves and construct a +// PeriodicReader to satisfy this interface. +// +// Pull-based exporters will typically implement Register +// themselves, since they read on demand. +// +// Warning: methods may be added to this interface in minor releases. +type Reader interface { + // register registers a Reader with a MeterProvider. + // The producer argument allows the Reader to signal the sdk to collect + // and send aggregated metric measurements. + register(sdkProducer) + + // temporality reports the Temporality for the instrument kind provided. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. + temporality(InstrumentKind) metricdata.Temporality + + // aggregation returns what Aggregation to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. + aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type. + + // Collect gathers and returns all metric data related to the Reader from + // the SDK and stores it in out. An error is returned if this is called + // after Shutdown or if out is nil. + // + // This method needs to be concurrent safe, and the cancellation of the + // passed context is expected to be honored. + Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown flushes all metric measurements held in an export pipeline and releases any + // held computational resources. + // + // This deadline or cancellation of the passed context are honored. An appropriate + // error will be returned in these situations. There is no guaranteed that all + // telemetry be flushed or all resources have been released in these + // situations. + // + // After Shutdown is called, calls to Collect will perform no operation and instead will return + // an error indicating the shutdown state. + // + // This method needs to be concurrent safe. + Shutdown(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// sdkProducer produces metrics for a Reader. +type sdkProducer interface { + // produce returns aggregated metrics from a single collection. + // + // This method is safe to call concurrently. + produce(context.Context, *metricdata.ResourceMetrics) error +} + +// Producer produces metrics for a Reader from an external source. +type Producer interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Produce returns aggregated metrics from an external source. + // + // This method should be safe to call concurrently. + Produce(context.Context) ([]metricdata.ScopeMetrics, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// produceHolder is used as an atomic.Value to wrap the non-concrete producer +// type. +type produceHolder struct { + produce func(context.Context, *metricdata.ResourceMetrics) error +} + +// shutdownProducer produces an ErrReaderShutdown error always. +type shutdownProducer struct{} + +// produce returns an ErrReaderShutdown error. +func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error { + return ErrReaderShutdown +} + +// TemporalitySelector selects the temporality to use based on the InstrumentKind. +type TemporalitySelector func(InstrumentKind) metricdata.Temporality + +// DefaultTemporalitySelector is the default TemporalitySelector used if +// WithTemporalitySelector is not provided. CumulativeTemporality will be used +// for all instrument kinds if this TemporalitySelector is used. +func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +// AggregationSelector selects the aggregation and the parameters to use for +// that aggregation based on the InstrumentKind. +// +// If the Aggregation returned is nil or DefaultAggregation, the selection from +// DefaultAggregationSelector will be used. +type AggregationSelector func(InstrumentKind) Aggregation + +// DefaultAggregationSelector returns the default aggregation and parameters +// that will be used to summarize measurement made from an instrument of +// InstrumentKind. This AggregationSelector using the following selection +// mapping: Counter ⇨ Sum, Observable Counter ⇨ Sum, UpDownCounter ⇨ Sum, +// Observable UpDownCounter ⇨ Sum, Observable Gauge ⇨ LastValue, +// Histogram ⇨ ExplicitBucketHistogram. +func DefaultAggregationSelector(ik InstrumentKind) Aggregation { + switch ik { + case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter: + return AggregationSum{} + case InstrumentKindObservableGauge, InstrumentKindGauge: + return AggregationLastValue{} + case InstrumentKindHistogram: + return AggregationExplicitBucketHistogram{ + Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + NoMinMax: false, + } + } + panic("unknown instrument kind") +} + +// ReaderOption is an option which can be applied to manual or Periodic +// readers. +type ReaderOption interface { + PeriodicReaderOption + ManualReaderOption +} + +// WithProducer registers producers as an external Producer of metric data +// for this Reader. +func WithProducer(p Producer) ReaderOption { + return producerOption{p: p} +} + +type producerOption struct { + p Producer +} + +// applyManual returns a manualReaderConfig with option applied. +func (o producerOption) applyManual(c manualReaderConfig) manualReaderConfig { + c.producers = append(c.producers, o.p) + return c +} + +// applyPeriodic returns a periodicReaderConfig with option applied. +func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig { + c.producers = append(c.producers, o.p) + return c +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/version.go new file mode 100644 index 00000000000..44316caa11b --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +// version is the current release version of the metric SDK in use. +func version() string { + return "1.29.0" +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/view.go new file mode 100644 index 00000000000..cd08c673248 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/metric/view.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "errors" + "regexp" + "strings" + + "go.opentelemetry.io/otel/internal/global" +) + +var ( + errMultiInst = errors.New("name replacement for multiple instruments") + errEmptyView = errors.New("no criteria provided for view") + + emptyView = func(Instrument) (Stream, bool) { return Stream{}, false } +) + +// View is an override to the default behavior of the SDK. It defines how data +// should be collected for certain instruments. It returns true and the exact +// Stream to use for matching Instruments. Otherwise, if the view does not +// match, false is returned. +type View func(Instrument) (Stream, bool) + +// NewView returns a View that applies the Stream mask for all instruments that +// match criteria. The returned View will only apply mask if all non-zero-value +// fields of criteria match the corresponding Instrument passed to the view. If +// no criteria are provided, all field of criteria are their zero-values, a +// view that matches no instruments is returned. If you need to match a +// zero-value field, create a View directly. +// +// The Name field of criteria supports wildcard pattern matching. The "*" +// wildcard is recognized as matching zero or more characters, and "?" is +// recognized as matching exactly one character. For example, a pattern of "*" +// matches all instrument names. +// +// The Stream mask only applies updates for non-zero-value fields. By default, +// the Instrument the View matches against will be use for the Name, +// Description, and Unit of the returned Stream and no Aggregation or +// AttributeFilter are set. All non-zero-value fields of mask are used instead +// of the default. If you need to zero out an Stream field returned from a +// View, create a View directly. +func NewView(criteria Instrument, mask Stream) View { + if criteria.IsEmpty() { + global.Error( + errEmptyView, "dropping view", + "mask", mask, + ) + return emptyView + } + + var matchFunc func(Instrument) bool + if strings.ContainsAny(criteria.Name, "*?") { + if mask.Name != "" { + global.Error( + errMultiInst, "dropping view", + "criteria", criteria, + "mask", mask, + ) + return emptyView + } + + // Handle branching here in NewView instead of criteria.matches so + // criteria.matches remains inlinable for the simple case. + pattern := regexp.QuoteMeta(criteria.Name) + pattern = "^" + pattern + "$" + pattern = strings.ReplaceAll(pattern, `\?`, ".") + pattern = strings.ReplaceAll(pattern, `\*`, ".*") + re := regexp.MustCompile(pattern) + matchFunc = func(i Instrument) bool { + return re.MatchString(i.Name) && + criteria.matchesDescription(i) && + criteria.matchesKind(i) && + criteria.matchesUnit(i) && + criteria.matchesScope(i) + } + } else { + matchFunc = criteria.matches + } + + var agg Aggregation + if mask.Aggregation != nil { + agg = mask.Aggregation.copy() + if err := agg.err(); err != nil { + global.Error( + err, "not using aggregation with view", + "criteria", criteria, + "mask", mask, + ) + agg = nil + } + } + + return func(i Instrument) (Stream, bool) { + if matchFunc(i) { + return Stream{ + Name: nonZero(mask.Name, i.Name), + Description: nonZero(mask.Description, i.Description), + Unit: nonZero(mask.Unit, i.Unit), + Aggregation: agg, + AttributeFilter: mask.AttributeFilter, + }, true + } + return Stream{}, false + } +} + +// nonZero returns v if it is non-zero-valued, otherwise alt. +func nonZero[T comparable](v, alt T) T { + var zero T + if v != zero { + return v + } + return alt +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/README.md new file mode 100644 index 00000000000..4ad864d716e --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/README.md @@ -0,0 +1,3 @@ +# SDK Resource + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/resource)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go new file mode 100644 index 00000000000..95a61d61d49 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "strings" +) + +// ErrPartialResource is returned by a detector when complete source +// information for a Resource is unavailable or the source information +// contains invalid values that are omitted from the returned Resource. +var ErrPartialResource = errors.New("partial resource") + +// Detector detects OpenTelemetry resource information. +type Detector interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Detect returns an initialized Resource based on gathered information. + // If the source information to construct a Resource contains invalid + // values, a Resource is returned with the valid parts of the source + // information used for initialization along with an appropriately + // wrapped ErrPartialResource error. + Detect(ctx context.Context) (*Resource, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// Detect returns a new [Resource] merged from all the Resources each of the +// detectors produces. Each of the detectors are called sequentially, in the +// order they are passed, merging the produced resource into the previous. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if that error is returned from a detector. It may also +// return a merge-conflicting Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from different detectors results +// in a schema URL conflict. It is up to the caller to determine if this +// returned Resource should be used or not. +// +// If one of the detectors returns an error that is not [ErrPartialResource], +// the resource produced by the detector will not be merged and the returned +// error will wrap that detector's error. +func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { + r := new(Resource) + return r, detect(ctx, r, detectors) +} + +// detect runs all detectors using ctx and merges the result into res. This +// assumes res is allocated and not nil, it will panic otherwise. +// +// If the detectors or merging resources produces any errors (i.e. +// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of +// these errors will be returned. Otherwise, nil is returned. +func detect(ctx context.Context, res *Resource, detectors []Detector) error { + var ( + r *Resource + errs detectErrs + err error + ) + + for _, detector := range detectors { + if detector == nil { + continue + } + r, err = detector.Detect(ctx) + if err != nil { + errs = append(errs, err) + if !errors.Is(err, ErrPartialResource) { + continue + } + } + r, err = Merge(res, r) + if err != nil { + errs = append(errs, err) + } + *res = *r + } + + if len(errs) == 0 { + return nil + } + if errors.Is(errs, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } + return errs +} + +type detectErrs []error + +func (e detectErrs) Error() string { + errStr := make([]string, len(e)) + for i, err := range e { + errStr[i] = fmt.Sprintf("* %s", err) + } + + format := "%d errors occurred detecting resource:\n\t%s" + return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) +} + +func (e detectErrs) Unwrap() error { + switch len(e) { + case 0: + return nil + case 1: + return e[0] + } + return e[1:] +} + +func (e detectErrs) Is(target error) bool { + return len(e) != 0 && errors.Is(e[0], target) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go new file mode 100644 index 00000000000..6ac1cdbf7b4 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/google/uuid" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + // telemetrySDK is a Detector that provides information about + // the OpenTelemetry SDK used. This Detector is included as a + // builtin. If these resource attributes are not wanted, use + // the WithTelemetrySDK(nil) or WithoutBuiltin() options to + // explicitly disable them. + telemetrySDK struct{} + + // host is a Detector that provides information about the host + // being run on. This Detector is included as a builtin. If + // these resource attributes are not wanted, use the + // WithHost(nil) or WithoutBuiltin() options to explicitly + // disable them. + host struct{} + + stringDetector struct { + schemaURL string + K attribute.Key + F func() (string, error) + } + + defaultServiceNameDetector struct{} + + defaultServiceInstanceIDDetector struct{} +) + +var ( + _ Detector = telemetrySDK{} + _ Detector = host{} + _ Detector = stringDetector{} + _ Detector = defaultServiceNameDetector{} + _ Detector = defaultServiceInstanceIDDetector{} +) + +// Detect returns a *Resource that describes the OpenTelemetry SDK used. +func (telemetrySDK) Detect(context.Context) (*Resource, error) { + return NewWithAttributes( + semconv.SchemaURL, + semconv.TelemetrySDKName("opentelemetry"), + semconv.TelemetrySDKLanguageGo, + semconv.TelemetrySDKVersion(sdk.Version()), + ), nil +} + +// Detect returns a *Resource that describes the host being run on. +func (host) Detect(ctx context.Context) (*Resource, error) { + return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) +} + +// StringDetector returns a Detector that will produce a *Resource +// containing the string as a value corresponding to k. The resulting Resource +// will have the specified schemaURL. +func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { + return stringDetector{schemaURL: schemaURL, K: k, F: f} +} + +// Detect returns a *Resource that describes the string as a value +// corresponding to attribute.Key as well as the specific schemaURL. +func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { + value, err := sd.F() + if err != nil { + return nil, fmt.Errorf("%s: %w", string(sd.K), err) + } + a := sd.K.String(value) + if !a.Valid() { + return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) + } + return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil +} + +// Detect implements Detector. +func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceNameKey, + func() (string, error) { + executable, err := os.Executable() + if err != nil { + return "unknown_service:go", nil + } + return "unknown_service:" + filepath.Base(executable), nil + }, + ).Detect(ctx) +} + +// Detect implements Detector. +func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceInstanceIDKey, + func() (string, error) { + version4Uuid, err := uuid.NewRandom() + if err != nil { + return "", err + } + + return version4Uuid.String(), nil + }, + ).Detect(ctx) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/config.go new file mode 100644 index 00000000000..0d6e213d924 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -0,0 +1,195 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// config contains configuration for Resource creation. +type config struct { + // detectors that will be evaluated. + detectors []Detector + // SchemaURL to associate with the Resource. + schemaURL string +} + +// Option is the interface that applies a configuration option. +type Option interface { + // apply sets the Option value of a config. + apply(config) config +} + +// WithAttributes adds attributes to the configured Resource. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return WithDetectors(detectAttributes{attributes}) +} + +type detectAttributes struct { + attributes []attribute.KeyValue +} + +func (d detectAttributes) Detect(context.Context) (*Resource, error) { + return NewSchemaless(d.attributes...), nil +} + +// WithDetectors adds detectors to be evaluated for the configured resource. +func WithDetectors(detectors ...Detector) Option { + return detectorsOption{detectors: detectors} +} + +type detectorsOption struct { + detectors []Detector +} + +func (o detectorsOption) apply(cfg config) config { + cfg.detectors = append(cfg.detectors, o.detectors...) + return cfg +} + +// WithFromEnv adds attributes from environment variables to the configured resource. +func WithFromEnv() Option { + return WithDetectors(fromEnv{}) +} + +// WithHost adds attributes from the host to the configured resource. +func WithHost() Option { + return WithDetectors(host{}) +} + +// WithHostID adds host ID information to the configured resource. +func WithHostID() Option { + return WithDetectors(hostIDDetector{}) +} + +// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. +func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) +} + +// WithSchemaURL sets the schema URL for the configured resource. +func WithSchemaURL(schemaURL string) Option { + return schemaURLOption(schemaURL) +} + +type schemaURLOption string + +func (o schemaURLOption) apply(cfg config) config { + cfg.schemaURL = string(o) + return cfg +} + +// WithOS adds all the OS attributes to the configured Resource. +// See individual WithOS* functions to configure specific attributes. +func WithOS() Option { + return WithDetectors( + osTypeDetector{}, + osDescriptionDetector{}, + ) +} + +// WithOSType adds an attribute with the operating system type to the configured Resource. +func WithOSType() Option { + return WithDetectors(osTypeDetector{}) +} + +// WithOSDescription adds an attribute with the operating system description to the +// configured Resource. The formatted string is equivalent to the output of the +// `uname -snrvm` command. +func WithOSDescription() Option { + return WithDetectors(osDescriptionDetector{}) +} + +// WithProcess adds all the Process attributes to the configured Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +// +// This option is equivalent to calling WithProcessPID, +// WithProcessExecutableName, WithProcessExecutablePath, +// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, +// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each +// option function for information about what resource attributes each +// includes. +func WithProcess() Option { + return WithDetectors( + processPIDDetector{}, + processExecutableNameDetector{}, + processExecutablePathDetector{}, + processCommandArgsDetector{}, + processOwnerDetector{}, + processRuntimeNameDetector{}, + processRuntimeVersionDetector{}, + processRuntimeDescriptionDetector{}, + ) +} + +// WithProcessPID adds an attribute with the process identifier (PID) to the +// configured Resource. +func WithProcessPID() Option { + return WithDetectors(processPIDDetector{}) +} + +// WithProcessExecutableName adds an attribute with the name of the process +// executable to the configured Resource. +func WithProcessExecutableName() Option { + return WithDetectors(processExecutableNameDetector{}) +} + +// WithProcessExecutablePath adds an attribute with the full path to the process +// executable to the configured Resource. +func WithProcessExecutablePath() Option { + return WithDetectors(processExecutablePathDetector{}) +} + +// WithProcessCommandArgs adds an attribute with all the command arguments (including +// the command/executable itself) as received by the process to the configured +// Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +func WithProcessCommandArgs() Option { + return WithDetectors(processCommandArgsDetector{}) +} + +// WithProcessOwner adds an attribute with the username of the user that owns the process +// to the configured Resource. +func WithProcessOwner() Option { + return WithDetectors(processOwnerDetector{}) +} + +// WithProcessRuntimeName adds an attribute with the name of the runtime of this +// process to the configured Resource. +func WithProcessRuntimeName() Option { + return WithDetectors(processRuntimeNameDetector{}) +} + +// WithProcessRuntimeVersion adds an attribute with the version of the runtime of +// this process to the configured Resource. +func WithProcessRuntimeVersion() Option { + return WithDetectors(processRuntimeVersionDetector{}) +} + +// WithProcessRuntimeDescription adds an attribute with an additional description +// about the runtime of the process to the configured Resource. +func WithProcessRuntimeDescription() Option { + return WithDetectors(processRuntimeDescriptionDetector{}) +} + +// WithContainer adds all the Container attributes to the configured Resource. +// See individual WithContainer* functions to configure specific attributes. +func WithContainer() Option { + return WithDetectors( + cgroupContainerIDDetector{}, + ) +} + +// WithContainerID adds an attribute with the id of the container to the configured Resource. +// Note: WithContainerID will not extract the correct container ID in an ECS environment. +// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). +func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/container.go new file mode 100644 index 00000000000..5ecd859a52d --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "context" + "errors" + "io" + "os" + "regexp" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type containerIDProvider func() (string, error) + +var ( + containerID containerIDProvider = getContainerIDFromCGroup + cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`) +) + +type cgroupContainerIDDetector struct{} + +const cgroupPath = "/proc/self/cgroup" + +// Detect returns a *Resource that describes the id of the container. +// If no container id found, an empty resource will be returned. +func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { + containerID, err := containerID() + if err != nil { + return nil, err + } + + if containerID == "" { + return Empty(), nil + } + return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil +} + +var ( + defaultOSStat = os.Stat + osStat = defaultOSStat + + defaultOSOpen = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + osOpen = defaultOSOpen +) + +// getContainerIDFromCGroup returns the id of the container from the cgroup file. +// If no container id found, an empty string will be returned. +func getContainerIDFromCGroup() (string, error) { + if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { + // File does not exist, skip + return "", nil + } + + file, err := osOpen(cgroupPath) + if err != nil { + return "", err + } + defer file.Close() + + return getContainerIDFromReader(file), nil +} + +// getContainerIDFromReader returns the id of the container from reader. +func getContainerIDFromReader(reader io.Reader) string { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + + if id := getContainerIDFromLine(line); id != "" { + return id + } + } + return "" +} + +// getContainerIDFromLine returns the id of the container from one string line. +func getContainerIDFromLine(line string) string { + matches := cgroupContainerIDRe.FindStringSubmatch(line) + if len(matches) <= 1 { + return "" + } + return matches[1] +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go new file mode 100644 index 00000000000..64939a27131 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package resource provides detecting and representing resources. +// +// The fundamental struct is a Resource which holds identifying information +// about the entities for which telemetry is exported. +// +// To automatically construct Resources from an environment a Detector +// interface is defined. Implementations of this interface can be passed to +// the Detect function to generate a Resource from the merged information. +// +// To load a user defined Resource from the environment variable +// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret +// the value as a list of comma delimited key/value pairs +// (e.g. `=,=,...`). +// +// While this package provides a stable API, +// the attributes added by resource detectors may change. +package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/env.go new file mode 100644 index 00000000000..813f0562424 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. + resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" +) + +// errMissingValue is returned when a resource value is missing. +var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) + +// fromEnv is a Detector that implements the Detector and collects +// resources from environment. This Detector is included as a +// builtin. +type fromEnv struct{} + +// compile time assertion that FromEnv implements Detector interface. +var _ Detector = fromEnv{} + +// Detect collects resources from environment. +func (fromEnv) Detect(context.Context) (*Resource, error) { + attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) + svcName := strings.TrimSpace(os.Getenv(svcNameKey)) + + if attrs == "" && svcName == "" { + return Empty(), nil + } + + var res *Resource + + if svcName != "" { + res = NewSchemaless(semconv.ServiceName(svcName)) + } + + r2, err := constructOTResources(attrs) + + // Ensure that the resource with the service name from OTEL_SERVICE_NAME + // takes precedence, if it was defined. + res, err2 := Merge(r2, res) + + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return res, err +} + +func constructOTResources(s string) (*Resource, error) { + if s == "" { + return Empty(), nil + } + pairs := strings.Split(s, ",") + var attrs []attribute.KeyValue + var invalid []string + for _, p := range pairs { + k, v, found := strings.Cut(p, "=") + if !found { + invalid = append(invalid, p) + continue + } + key := strings.TrimSpace(k) + val, err := url.PathUnescape(strings.TrimSpace(v)) + if err != nil { + // Retain original value if decoding fails, otherwise it will be + // an empty string. + val = v + otel.Handle(err) + } + attrs = append(attrs, attribute.String(key, val)) + } + var err error + if len(invalid) > 0 { + err = fmt.Errorf("%w: %v", errMissingValue, invalid) + } + return NewSchemaless(attrs...), err +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go new file mode 100644 index 00000000000..2d0f65498a0 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "strings" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type hostIDProvider func() (string, error) + +var defaultHostIDProvider hostIDProvider = platformHostIDReader.read + +var hostID = defaultHostIDProvider + +type hostIDReader interface { + read() (string, error) +} + +type fileReader func(string) (string, error) + +type commandExecutor func(string, ...string) (string, error) + +// hostIDReaderBSD implements hostIDReader. +type hostIDReaderBSD struct { + execCommand commandExecutor + readFile fileReader +} + +// read attempts to read the machine-id from /etc/hostid. If not found it will +// execute `kenv -q smbios.system.uuid`. If neither location yields an id an +// error will be returned. +func (r *hostIDReaderBSD) read() (string, error) { + if result, err := r.readFile("/etc/hostid"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/hostid or kenv") +} + +// hostIDReaderDarwin implements hostIDReader. +type hostIDReaderDarwin struct { + execCommand commandExecutor +} + +// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id +// from the IOPlatformUUID line. If the command fails or the uuid cannot be +// parsed an error will be returned. +func (r *hostIDReaderDarwin) read() (string, error) { + result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice") + if err != nil { + return "", err + } + + lines := strings.Split(result, "\n") + for _, line := range lines { + if strings.Contains(line, "IOPlatformUUID") { + parts := strings.Split(line, " = ") + if len(parts) == 2 { + return strings.Trim(parts[1], "\""), nil + } + break + } + } + + return "", errors.New("could not parse IOPlatformUUID") +} + +type hostIDReaderLinux struct { + readFile fileReader +} + +// read attempts to read the machine-id from /etc/machine-id followed by +// /var/lib/dbus/machine-id. If neither location yields an ID an error will +// be returned. +func (r *hostIDReaderLinux) read() (string, error) { + if result, err := r.readFile("/etc/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id") +} + +type hostIDDetector struct{} + +// Detect returns a *Resource containing the platform specific host id. +func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { + hostID, err := hostID() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.HostID(hostID), + ), nil +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go new file mode 100644 index 00000000000..cc8b8938ed5 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -0,0 +1,12 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build dragonfly || freebsd || netbsd || openbsd || solaris +// +build dragonfly freebsd netbsd openbsd solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderBSD{ + execCommand: execCommand, + readFile: readFile, +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go new file mode 100644 index 00000000000..b09fde3b735 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderDarwin{ + execCommand: execCommand, +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go new file mode 100644 index 00000000000..d9e5d1a8fff --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os/exec" + +func execCommand(name string, arg ...string) (string, error) { + cmd := exec.Command(name, arg...) + b, err := cmd.Output() + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go new file mode 100644 index 00000000000..f84f173240f --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux +// +build linux + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderLinux{ + readFile: readFile, +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go new file mode 100644 index 00000000000..6354b356022 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os" + +func readFile(filename string) (string, error) { + b, err := os.ReadFile(filename) + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go new file mode 100644 index 00000000000..df12c44c564 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// hostIDReaderUnsupported is a placeholder implementation for operating systems +// for which this project currently doesn't support host.id +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +type hostIDReaderUnsupported struct{} + +func (*hostIDReaderUnsupported) read() (string, error) { + return "", nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go new file mode 100644 index 00000000000..71386e2da4c --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build windows +// +build windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "golang.org/x/sys/windows/registry" +) + +// implements hostIDReader +type hostIDReaderWindows struct{} + +// read reads MachineGuid from the windows registry key: +// SOFTWARE\Microsoft\Cryptography +func (*hostIDReaderWindows) read() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, + registry.QUERY_VALUE|registry.WOW64_64KEY, + ) + + if err != nil { + return "", err + } + defer k.Close() + + guid, _, err := k.GetStringValue("MachineGuid") + if err != nil { + return "", err + } + + return guid, nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderWindows{} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os.go new file mode 100644 index 00000000000..8a48ab4fa32 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type osDescriptionProvider func() (string, error) + +var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription + +var osDescription = defaultOSDescriptionProvider + +func setDefaultOSDescriptionProvider() { + setOSDescriptionProvider(defaultOSDescriptionProvider) +} + +func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider +} + +type ( + osTypeDetector struct{} + osDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the operating system type the +// service is running on. +func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + osType := runtimeOS() + + osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) + + return NewWithAttributes( + semconv.SchemaURL, + osTypeAttribute, + ), nil +} + +// Detect returns a *Resource that describes the operating system the +// service is running on. +func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.OSDescription(description), + ), nil +} + +// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime +// into an OS type attribute with the corresponding value defined by the semantic +// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase +// and used as the value for the returned OS type attribute. +func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ + "aix": semconv.OSTypeAIX, + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, + "linux": semconv.OSTypeLinux, + "netbsd": semconv.OSTypeNetBSD, + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, + "zos": semconv.OSTypeZOS, + } + + var osTypeAttribute attribute.KeyValue + + if attr, ok := osTypeAttributeMap[osType]; ok { + osTypeAttribute = attr + } else { + osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) + } + + return osTypeAttribute +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go new file mode 100644 index 00000000000..ce455dc544b --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "encoding/xml" + "fmt" + "io" + "os" +) + +type plist struct { + XMLName xml.Name `xml:"plist"` + Dict dict `xml:"dict"` +} + +type dict struct { + Key []string `xml:"key"` + String []string `xml:"string"` +} + +// osRelease builds a string describing the operating system release based on the +// contents of the property list (.plist) system files. If no .plist files are found, +// or if the required properties to build the release description string are missing, +// an empty string is returned instead. The generated string resembles the output of +// the `sw_vers` commandline program, but in a single-line string. For more information +// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. +func osRelease() string { + file, err := getPlistFile() + if err != nil { + return "" + } + + defer file.Close() + + values, err := parsePlistFile(file) + if err != nil { + return "" + } + + return buildOSRelease(values) +} + +// getPlistFile returns a *os.File pointing to one of the well-known .plist files +// available on macOS. If no file can be opened, it returns an error. +func getPlistFile() (*os.File, error) { + return getFirstAvailableFile([]string{ + "/System/Library/CoreServices/SystemVersion.plist", + "/System/Library/CoreServices/ServerVersion.plist", + }) +} + +// parsePlistFile process the file pointed by `file` as a .plist file and returns +// a map with the key-values for each pair of correlated and elements +// contained in it. +func parsePlistFile(file io.Reader) (map[string]string, error) { + var v plist + + err := xml.NewDecoder(file).Decode(&v) + if err != nil { + return nil, err + } + + if len(v.Dict.Key) != len(v.Dict.String) { + return nil, fmt.Errorf("the number of and elements doesn't match") + } + + properties := make(map[string]string, len(v.Dict.Key)) + for i, key := range v.Dict.Key { + properties[key] = v.Dict.String[i] + } + + return properties, nil +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It tries to find the `ProductName`, `ProductVersion` +// and `ProductBuildVersion` properties. If some of these properties are not found, +// it returns an empty string. +func buildOSRelease(properties map[string]string) string { + productName := properties["ProductName"] + productVersion := properties["ProductVersion"] + productBuildVersion := properties["ProductBuildVersion"] + + if productName == "" || productVersion == "" || productBuildVersion == "" { + return "" + } + + return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go new file mode 100644 index 00000000000..f537e5ca5c4 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// osRelease builds a string describing the operating system release based on the +// properties of the os-release file. If no os-release file is found, or if the +// required properties to build the release description string are missing, an empty +// string is returned instead. For more information about os-release files, see: +// https://www.freedesktop.org/software/systemd/man/os-release.html +func osRelease() string { + file, err := getOSReleaseFile() + if err != nil { + return "" + } + + defer file.Close() + + values := parseOSReleaseFile(file) + + return buildOSRelease(values) +} + +// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release +// files, according to their order of preference. If no file can be opened, it +// returns an error. +func getOSReleaseFile() (*os.File, error) { + return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) +} + +// parseOSReleaseFile process the file pointed by `file` as an os-release file and +// returns a map with the key-values contained in it. Empty lines or lines starting +// with a '#' character are ignored, as well as lines with the missing key=value +// separator. Values are unquoted and unescaped. +func parseOSReleaseFile(file io.Reader) map[string]string { + values := make(map[string]string) + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + + if skip(line) { + continue + } + + key, value, ok := parse(line) + if ok { + values[key] = value + } + } + + return values +} + +// skip returns true if the line is blank or starts with a '#' character, and +// therefore should be skipped from processing. +func skip(line string) bool { + line = strings.TrimSpace(line) + + return len(line) == 0 || strings.HasPrefix(line, "#") +} + +// parse attempts to split the provided line on the first '=' character, and then +// sanitize each side of the split before returning them as a key-value pair. +func parse(line string) (string, string, bool) { + k, v, found := strings.Cut(line, "=") + + if !found || len(k) == 0 { + return "", "", false + } + + key := strings.TrimSpace(k) + value := unescape(unquote(strings.TrimSpace(v))) + + return key, value, true +} + +// unquote checks whether the string `s` is quoted with double or single quotes +// and, if so, returns a version of the string without them. Otherwise it returns +// the provided string unchanged. +func unquote(s string) string { + if len(s) < 2 { + return s + } + + if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { + return s[1 : len(s)-1] + } + + return s +} + +// unescape removes the `\` prefix from some characters that are expected +// to have it added in front of them for escaping purposes. +func unescape(s string) string { + return strings.NewReplacer( + `\$`, `$`, + `\"`, `"`, + `\'`, `'`, + `\\`, `\`, + "\\`", "`", + ).Replace(s) +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It favors a combination of the `NAME` and `VERSION` +// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't +// found), and using `PRETTY_NAME` alone if some of the previous are not present. If +// none of these properties are found, it returns an empty string. +// +// The rationale behind not using `PRETTY_NAME` as first choice was that, for some +// Linux distributions, it doesn't include the same detail that can be found on the +// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with +// other properties can produce "pretty" redundant strings in some cases. +func buildOSRelease(values map[string]string) string { + var osRelease string + + name := values["NAME"] + version := values["VERSION"] + + if version == "" { + version = values["VERSION_ID"] + } + + if name != "" && version != "" { + osRelease = fmt.Sprintf("%s %s", name, version) + } else { + osRelease = values["PRETTY_NAME"] + } + + return osRelease +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go new file mode 100644 index 00000000000..a6ff26a4d27 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +type unameProvider func(buf *unix.Utsname) (err error) + +var defaultUnameProvider unameProvider = unix.Uname + +var currentUnameProvider = defaultUnameProvider + +func setDefaultUnameProvider() { + setUnameProvider(defaultUnameProvider) +} + +func setUnameProvider(unameProvider unameProvider) { + currentUnameProvider = unameProvider +} + +// platformOSDescription returns a human readable OS version information string. +// The final string combines OS release information (where available) and the +// result of the `uname` system call. +func platformOSDescription() (string, error) { + uname, err := uname() + if err != nil { + return "", err + } + + osRelease := osRelease() + if osRelease != "" { + return fmt.Sprintf("%s (%s)", osRelease, uname), nil + } + + return uname, nil +} + +// uname issues a uname(2) system call (or equivalent on systems which doesn't +// have one) and formats the output in a single string, similar to the output +// of the `uname` commandline program. The final string resembles the one +// obtained with a call to `uname -snrvm`. +func uname() (string, error) { + var utsName unix.Utsname + + err := currentUnameProvider(&utsName) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s %s %s %s", + unix.ByteSliceToString(utsName.Sysname[:]), + unix.ByteSliceToString(utsName.Nodename[:]), + unix.ByteSliceToString(utsName.Release[:]), + unix.ByteSliceToString(utsName.Version[:]), + unix.ByteSliceToString(utsName.Machine[:]), + ), nil +} + +// getFirstAvailableFile returns an *os.File of the first available +// file from a list of candidate file paths. +func getFirstAvailableFile(candidates []string) (*os.File, error) { + for _, c := range candidates { + file, err := os.Open(c) + if err == nil { + return file, nil + } + } + + return nil, fmt.Errorf("no candidate file available: %v", candidates) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go new file mode 100644 index 00000000000..a77742b0771 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// platformOSDescription is a placeholder implementation for OSes +// for which this project currently doesn't support os.description +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +func platformOSDescription() (string, error) { + return "", nil +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go new file mode 100644 index 00000000000..5e3d199d785 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows/registry" +) + +// platformOSDescription returns a human readable OS version information string. +// It does so by querying registry values under the +// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string +// resembles the one displayed by the Version Reporter Applet (winver.exe). +func platformOSDescription() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + + if err != nil { + return "", err + } + + defer k.Close() + + var ( + productName = readProductName(k) + displayVersion = readDisplayVersion(k) + releaseID = readReleaseID(k) + currentMajorVersionNumber = readCurrentMajorVersionNumber(k) + currentMinorVersionNumber = readCurrentMinorVersionNumber(k) + currentBuildNumber = readCurrentBuildNumber(k) + ubr = readUBR(k) + ) + + if displayVersion != "" { + displayVersion += " " + } + + return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", + productName, + displayVersion, + releaseID, + currentMajorVersionNumber, + currentMinorVersionNumber, + currentBuildNumber, + ubr, + ), nil +} + +func getStringValue(name string, k registry.Key) string { + value, _, _ := k.GetStringValue(name) + + return value +} + +func getIntegerValue(name string, k registry.Key) uint64 { + value, _, _ := k.GetIntegerValue(name) + + return value +} + +func readProductName(k registry.Key) string { + return getStringValue("ProductName", k) +} + +func readDisplayVersion(k registry.Key) string { + return getStringValue("DisplayVersion", k) +} + +func readReleaseID(k registry.Key) string { + return getStringValue("ReleaseID", k) +} + +func readCurrentMajorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) +} + +func readCurrentMinorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) +} + +func readCurrentBuildNumber(k registry.Key) string { + return getStringValue("CurrentBuildNumber", k) +} + +func readUBR(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("UBR", k), 10) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/process.go new file mode 100644 index 00000000000..085fe68fd77 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "os/user" + "path/filepath" + "runtime" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + pidProvider func() int + executablePathProvider func() (string, error) + commandArgsProvider func() []string + ownerProvider func() (*user.User, error) + runtimeNameProvider func() string + runtimeVersionProvider func() string + runtimeOSProvider func() string + runtimeArchProvider func() string +) + +var ( + defaultPidProvider pidProvider = os.Getpid + defaultExecutablePathProvider executablePathProvider = os.Executable + defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } + defaultOwnerProvider ownerProvider = user.Current + defaultRuntimeNameProvider runtimeNameProvider = func() string { + if runtime.Compiler == "gc" { + return "go" + } + return runtime.Compiler + } + defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version + defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } + defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } +) + +var ( + pid = defaultPidProvider + executablePath = defaultExecutablePathProvider + commandArgs = defaultCommandArgsProvider + owner = defaultOwnerProvider + runtimeName = defaultRuntimeNameProvider + runtimeVersion = defaultRuntimeVersionProvider + runtimeOS = defaultRuntimeOSProvider + runtimeArch = defaultRuntimeArchProvider +) + +func setDefaultOSProviders() { + setOSProviders( + defaultPidProvider, + defaultExecutablePathProvider, + defaultCommandArgsProvider, + ) +} + +func setOSProviders( + pidProvider pidProvider, + executablePathProvider executablePathProvider, + commandArgsProvider commandArgsProvider, +) { + pid = pidProvider + executablePath = executablePathProvider + commandArgs = commandArgsProvider +} + +func setDefaultRuntimeProviders() { + setRuntimeProviders( + defaultRuntimeNameProvider, + defaultRuntimeVersionProvider, + defaultRuntimeOSProvider, + defaultRuntimeArchProvider, + ) +} + +func setRuntimeProviders( + runtimeNameProvider runtimeNameProvider, + runtimeVersionProvider runtimeVersionProvider, + runtimeOSProvider runtimeOSProvider, + runtimeArchProvider runtimeArchProvider, +) { + runtimeName = runtimeNameProvider + runtimeVersion = runtimeVersionProvider + runtimeOS = runtimeOSProvider + runtimeArch = runtimeArchProvider +} + +func setDefaultUserProviders() { + setUserProviders(defaultOwnerProvider) +} + +func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider +} + +type ( + processPIDDetector struct{} + processExecutableNameDetector struct{} + processExecutablePathDetector struct{} + processCommandArgsDetector struct{} + processOwnerDetector struct{} + processRuntimeNameDetector struct{} + processRuntimeVersionDetector struct{} + processRuntimeDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the process identifier (PID) of the +// executing process. +func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil +} + +// Detect returns a *Resource that describes the name of the process executable. +func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil +} + +// Detect returns a *Resource that describes the full path of the process executable. +func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { + executablePath, err := executablePath() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil +} + +// Detect returns a *Resource that describes all the command arguments as received +// by the process. +func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil +} + +// Detect returns a *Resource that describes the username of the user that owns the +// process. +func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + owner, err := owner() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil +} + +// Detect returns a *Resource that describes the name of the compiler used to compile +// this process image. +func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil +} + +// Detect returns a *Resource that describes the version of the runtime of this process. +func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil +} + +// Detect returns a *Resource that describes the runtime of this process. +func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + runtimeDescription := fmt.Sprintf( + "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) + + return NewWithAttributes( + semconv.SchemaURL, + semconv.ProcessRuntimeDescription(runtimeDescription), + ), nil +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go new file mode 100644 index 00000000000..ad4b50df404 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -0,0 +1,294 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" +) + +// Resource describes an entity about which identifying information +// and metadata is exposed. Resource is an immutable object, +// equivalent to a map from key to unique value. +// +// Resources should be passed and stored as pointers +// (`*resource.Resource`). The `nil` value is equivalent to an empty +// Resource. +type Resource struct { + attrs attribute.Set + schemaURL string +} + +var ( + defaultResource *Resource + defaultResourceOnce sync.Once +) + +// ErrSchemaURLConflict is an error returned when two Resources are merged +// together that contain different, non-empty, schema URLs. +var ErrSchemaURLConflict = errors.New("conflicting Schema URL") + +// New returns a [Resource] built using opts. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if options that provide a [Detector] are used and that +// error is returned from one or more of the Detectors. It may also return a +// merge-conflict Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from the opts results in a +// schema URL conflict (see [Resource.Merge] for more information). It is up to +// the caller to determine if this returned Resource should be used or not +// based on these errors. +func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + r := &Resource{schemaURL: cfg.schemaURL} + return r, detect(ctx, r, cfg.detectors) +} + +// NewWithAttributes creates a resource from attrs and associates the resource with a +// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs +// contains any invalid items those items will be dropped. The attrs are assumed to be +// in a schema identified by schemaURL. +func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { + resource := NewSchemaless(attrs...) + resource.schemaURL = schemaURL + return resource +} + +// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, +// the last value will be used. If attrs contains any invalid items those items will +// be dropped. The resource will not be associated with a schema URL. If the schema +// of the attrs is known use NewWithAttributes instead. +func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { + return &Resource{} + } + + // Ensure attributes comply with the specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { + return &Resource{} + } + + return &Resource{attrs: s} //nolint +} + +// String implements the Stringer interface and provides a +// human-readable form of the resource. +// +// Avoid using this representation as the key in a map of resources, +// use Equivalent() as the key instead. +func (r *Resource) String() string { + if r == nil { + return "" + } + return r.attrs.Encoded(attribute.DefaultEncoder()) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Resource. +func (r *Resource) MarshalLog() interface{} { + return struct { + Attributes attribute.Set + SchemaURL string + }{ + Attributes: r.attrs, + SchemaURL: r.schemaURL, + } +} + +// Attributes returns a copy of attributes from the resource in a sorted order. +// To avoid allocating a new slice, use an iterator. +func (r *Resource) Attributes() []attribute.KeyValue { + if r == nil { + r = Empty() + } + return r.attrs.ToSlice() +} + +// SchemaURL returns the schema URL associated with Resource r. +func (r *Resource) SchemaURL() string { + if r == nil { + return "" + } + return r.schemaURL +} + +// Iter returns an iterator of the Resource attributes. +// This is ideal to use if you do not want a copy of the attributes. +func (r *Resource) Iter() attribute.Iterator { + if r == nil { + r = Empty() + } + return r.attrs.Iter() +} + +// Equal returns true when a Resource is equivalent to this Resource. +func (r *Resource) Equal(eq *Resource) bool { + if r == nil { + r = Empty() + } + if eq == nil { + eq = Empty() + } + return r.Equivalent() == eq.Equivalent() +} + +// Merge creates a new [Resource] by merging a and b. +// +// If there are common keys between a and b, then the value from b will +// overwrite the value from a, even if b's value is empty. +// +// The SchemaURL of the resources will be merged according to the +// [OpenTelemetry specification rules]: +// +// - If a's schema URL is empty then the returned Resource's schema URL will +// be set to the schema URL of b, +// - Else if b's schema URL is empty then the returned Resource's schema URL +// will be set to the schema URL of a, +// - Else if the schema URLs of a and b are the same then that will be the +// schema URL of the returned Resource, +// - Else this is a merging error. If the resources have different, +// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will +// be returned with the merged Resource. The merged Resource will have an +// empty schema URL. It may be the case that some unintended attributes +// have been overwritten or old semantic conventions persisted in the +// returned Resource. It is up to the caller to determine if this returned +// Resource should be used or not. +// +// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge +func Merge(a, b *Resource) (*Resource, error) { + if a == nil && b == nil { + return Empty(), nil + } + if a == nil { + return b, nil + } + if b == nil { + return a, nil + } + + // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() + // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) + mi := attribute.NewMergeIterator(b.Set(), a.Set()) + combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for mi.Next() { + combine = append(combine, mi.Attribute()) + } + + switch { + case a.schemaURL == "": + return NewWithAttributes(b.schemaURL, combine...), nil + case b.schemaURL == "": + return NewWithAttributes(a.schemaURL, combine...), nil + case a.schemaURL == b.schemaURL: + return NewWithAttributes(a.schemaURL, combine...), nil + } + // Return the merged resource with an appropriate error. It is up to + // the user to decide if the returned resource can be used or not. + return NewSchemaless(combine...), fmt.Errorf( + "%w: %s and %s", + ErrSchemaURLConflict, + a.schemaURL, + b.schemaURL, + ) +} + +// Empty returns an instance of Resource with no attributes. It is +// equivalent to a `nil` Resource. +func Empty() *Resource { + return &Resource{} +} + +// Default returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +func Default() *Resource { + defaultResourceOnce.Do(func() { + var err error + defaultDetectors := []Detector{ + defaultServiceNameDetector{}, + fromEnv{}, + telemetrySDK{}, + } + if x.Resource.Enabled() { + defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) + } + defaultResource, err = Detect( + context.Background(), + defaultDetectors..., + ) + if err != nil { + otel.Handle(err) + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { + defaultResource = &Resource{} + } + }) + return defaultResource +} + +// Environment returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func Environment() *Resource { + detector := &fromEnv{} + resource, err := detector.Detect(context.Background()) + if err != nil { + otel.Handle(err) + } + return resource +} + +// Equivalent returns an object that can be compared for equality +// between two resources. This value is suitable for use as a key in +// a map. +func (r *Resource) Equivalent() attribute.Distinct { + return r.Set().Equivalent() +} + +// Set returns the equivalent *attribute.Set of this resource's attributes. +func (r *Resource) Set() *attribute.Set { + if r == nil { + r = Empty() + } + return &r.attrs +} + +// MarshalJSON encodes the resource attributes as a JSON list of { "Key": +// "...", "Value": ... } pairs in order sorted by key. +func (r *Resource) MarshalJSON() ([]byte, error) { + if r == nil { + r = Empty() + } + return r.attrs.MarshalJSON() +} + +// Len returns the number of unique key-values in this Resource. +func (r *Resource) Len() int { + if r == nil { + return 0 + } + return r.attrs.Len() +} + +// Encoded returns an encoded representation of the resource. +func (r *Resource) Encoded(enc attribute.Encoder) string { + if r == nil { + return "" + } + return r.attrs.Encoded(enc) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/version.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/version.go new file mode 100644 index 00000000000..b7cede891c4 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk // import "go.opentelemetry.io/otel/sdk" + +// Version is the current release version of the OpenTelemetry SDK in use. +func Version() string { + return "1.29.0" +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md new file mode 100644 index 00000000000..87b842c5d11 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.17.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go index 71a1f7748d5..e087c9c04d9 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go index 679c40c4de4..c7b804bbe2e 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go index 9b8c559de42..137acc67de0 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go index d5c4b5c136a..d318221e59f 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go index 39a2eab3a6a..7e365e82ce8 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go index 42fc525d165..634a1dce07a 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go index 8c4a7299d27..21497bb6bc6 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md new file mode 100644 index 00000000000..82e1f46b4ea --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.20.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go index 67d1d4c44d7..6685c392b50 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go index 359c5a69624..0d1f55a8fe9 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go index 8ac9350d2b2..63776393217 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go index 09ff4dfdbf7..f40c97825aa 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go index 342aede95f1..9c1840631b6 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go index a2b906742a8..3d44dae2750 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go index e449e5c3b9f..95d0210e38f 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go index 8517741485c..90b1b0452cc 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 00000000000..2de1fc3c6be --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 00000000000..d8dc822b263 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go new file mode 100644 index 00000000000..d031bbea784 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go new file mode 100644 index 00000000000..bfaee0d56e3 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go new file mode 100644 index 00000000000..fcdb9f48596 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -0,0 +1,1307 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.min` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the deprecated, use `db.client.connection.pending_requests` instead. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // deprecated, use `db.client.connection.timeouts` instead. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Stable + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Stable + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Stable + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Stable + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" + + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go new file mode 100644 index 00000000000..4c87c7adcc7 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace.go index caf7249de85..6836c65478b 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 00000000000..58ccaba69b1 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/config.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/config.go index 3aadc66cf7a..273d58e0014 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/context.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/context.go index 76f9a083c40..5650a174b4a 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -47,12 +36,12 @@ func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) conte // performs no operations is returned. func SpanFromContext(ctx context.Context) Span { if ctx == nil { - return noopSpan{} + return noopSpanInstance } if span, ok := ctx.Value(currentSpanKey).(Span); ok { return span } - return noopSpan{} + return noopSpanInstance } // SpanContextFromContext returns the current Span's SpanContext. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/doc.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/doc.go index 440f3d7565a..d661c5d100f 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package trace provides an implementation of the tracing part of the diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 00000000000..7754a239ee6 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go index 898db5a7546..3e359a00bf4 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // trace API]. diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/nonrecording.go index 88fcb81611f..c00221e7be9 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/noop.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/noop.go index c125491caeb..ca20e9997ab 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -52,7 +41,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure - span = noopSpan{} + span = noopSpanInstance } return ContextWithSpan(ctx, span), span } @@ -60,7 +49,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption // noopSpan is an implementation of Span that performs no operations. type noopSpan struct{ embedded.Span } -var _ Span = noopSpan{} +var noopSpanInstance Span = noopSpan{} // SpanContext returns an empty span context. func (noopSpan) SpanContext() SpanContext { return SpanContext{} } @@ -86,6 +75,9 @@ func (noopSpan) RecordError(error, ...EventOption) {} // AddEvent does nothing. func (noopSpan) AddEvent(string, ...EventOption) {} +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + // SetName does nothing. func (noopSpan) SetName(string) {} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/provider.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 00000000000..ef85cb70c6d --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/span.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 00000000000..d3aa476ee12 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/trace.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/trace.go index 26a4b2260ec..d49adf671b9 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -1,28 +1,12 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -337,241 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/tracer.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 00000000000..77952d2a0b3 --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/tracestate.go index db936ba5b73..dc5e34cad0d 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -271,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_examples.sh b/terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_examples.sh index dbb61a42279..e57bf57fce8 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -1,18 +1,7 @@ #!/bin/bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_readmes.sh new file mode 100644 index 00000000000..1e87855eeaa --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_readmes.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) + +missingReadme=false +for dir in $dirs; do + if [ ! -f "$dir/README.md" ]; then + echo "couldn't find README.md for $dir" + missingReadme=true + fi +done + +if [ "$missingReadme" = true ] ; then + echo "Error: some READMEs couldn't be found." + exit 1 +fi diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 00000000000..c9b7cdbbfef --- /dev/null +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/version.go b/terraform/providers/google/vendor/go.opentelemetry.io/otel/version.go index 7b2993a1fef..f67039ed1f9 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/version.go +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/version.go @@ -1,20 +1,9 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.24.0" + return "1.29.0" } diff --git a/terraform/providers/google/vendor/go.opentelemetry.io/otel/versions.yaml b/terraform/providers/google/vendor/go.opentelemetry.io/otel/versions.yaml index 1b556e6782b..3ba611d7136 100644 --- a/terraform/providers/google/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/terraform/providers/google/vendor/go.opentelemetry.io/otel/versions.yaml @@ -1,20 +1,9 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 module-sets: stable-v1: - version: v1.24.0 + version: v1.29.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -40,16 +29,20 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.46.0 + version: v0.51.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.0.1-alpha + version: v0.5.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.7 + version: v0.0.8 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/LICENSE b/terraform/providers/google/vendor/golang.org/x/crypto/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/LICENSE +++ b/terraform/providers/google/vendor/golang.org/x/crypto/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/terraform/providers/google/vendor/golang.org/x/crypto/argon2/blamka_amd64.s index 6713accac09..c3895478ed0 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ b/terraform/providers/google/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -1,243 +1,2791 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blamka_amd64.go -out ../blamka_amd64.s -pkg argon2. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFD $0xB1, v6, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - PSHUFB c40, v2; \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFB c48, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - MOVO v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v7, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - PSHUFB c40, v3; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFB c48, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - MOVO v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG_0(block, off) \ - MOVOU 8*(off+0)(block), X0; \ - MOVOU 8*(off+2)(block), X1; \ - MOVOU 8*(off+4)(block), X2; \ - MOVOU 8*(off+6)(block), X3; \ - MOVOU 8*(off+8)(block), X4; \ - MOVOU 8*(off+10)(block), X5; \ - MOVOU 8*(off+12)(block), X6; \ - MOVOU 8*(off+14)(block), X7 - -#define STORE_MSG_0(block, off) \ - MOVOU X0, 8*(off+0)(block); \ - MOVOU X1, 8*(off+2)(block); \ - MOVOU X2, 8*(off+4)(block); \ - MOVOU X3, 8*(off+6)(block); \ - MOVOU X4, 8*(off+8)(block); \ - MOVOU X5, 8*(off+10)(block); \ - MOVOU X6, 8*(off+12)(block); \ - MOVOU X7, 8*(off+14)(block) - -#define LOAD_MSG_1(block, off) \ - MOVOU 8*off+0*8(block), X0; \ - MOVOU 8*off+16*8(block), X1; \ - MOVOU 8*off+32*8(block), X2; \ - MOVOU 8*off+48*8(block), X3; \ - MOVOU 8*off+64*8(block), X4; \ - MOVOU 8*off+80*8(block), X5; \ - MOVOU 8*off+96*8(block), X6; \ - MOVOU 8*off+112*8(block), X7 - -#define STORE_MSG_1(block, off) \ - MOVOU X0, 8*off+0*8(block); \ - MOVOU X1, 8*off+16*8(block); \ - MOVOU X2, 8*off+32*8(block); \ - MOVOU X3, 8*off+48*8(block); \ - MOVOU X4, 8*off+64*8(block); \ - MOVOU X5, 8*off+80*8(block); \ - MOVOU X6, 8*off+96*8(block); \ - MOVOU X7, 8*off+112*8(block) - -#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ - LOAD_MSG_0(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_0(block, off) - -#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ - LOAD_MSG_1(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_1(block, off) - // func blamkaSSE4(b *block) -TEXT ·blamkaSSE4(SB), 4, $0-8 - MOVQ b+0(FP), AX - - MOVOU ·c40<>(SB), X10 - MOVOU ·c48<>(SB), X11 +// Requires: SSE2, SSSE3 +TEXT ·blamkaSSE4(SB), NOSPLIT, $0-8 + MOVQ b+0(FP), AX + MOVOU ·c40<>+0(SB), X10 + MOVOU ·c48<>+0(SB), X11 + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU 64(AX), X4 + MOVOU 80(AX), X5 + MOVOU 96(AX), X6 + MOVOU 112(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, 32(AX) + MOVOU X3, 48(AX) + MOVOU X4, 64(AX) + MOVOU X5, 80(AX) + MOVOU X6, 96(AX) + MOVOU X7, 112(AX) + MOVOU 128(AX), X0 + MOVOU 144(AX), X1 + MOVOU 160(AX), X2 + MOVOU 176(AX), X3 + MOVOU 192(AX), X4 + MOVOU 208(AX), X5 + MOVOU 224(AX), X6 + MOVOU 240(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 128(AX) + MOVOU X1, 144(AX) + MOVOU X2, 160(AX) + MOVOU X3, 176(AX) + MOVOU X4, 192(AX) + MOVOU X5, 208(AX) + MOVOU X6, 224(AX) + MOVOU X7, 240(AX) + MOVOU 256(AX), X0 + MOVOU 272(AX), X1 + MOVOU 288(AX), X2 + MOVOU 304(AX), X3 + MOVOU 320(AX), X4 + MOVOU 336(AX), X5 + MOVOU 352(AX), X6 + MOVOU 368(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 256(AX) + MOVOU X1, 272(AX) + MOVOU X2, 288(AX) + MOVOU X3, 304(AX) + MOVOU X4, 320(AX) + MOVOU X5, 336(AX) + MOVOU X6, 352(AX) + MOVOU X7, 368(AX) + MOVOU 384(AX), X0 + MOVOU 400(AX), X1 + MOVOU 416(AX), X2 + MOVOU 432(AX), X3 + MOVOU 448(AX), X4 + MOVOU 464(AX), X5 + MOVOU 480(AX), X6 + MOVOU 496(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 384(AX) + MOVOU X1, 400(AX) + MOVOU X2, 416(AX) + MOVOU X3, 432(AX) + MOVOU X4, 448(AX) + MOVOU X5, 464(AX) + MOVOU X6, 480(AX) + MOVOU X7, 496(AX) + MOVOU 512(AX), X0 + MOVOU 528(AX), X1 + MOVOU 544(AX), X2 + MOVOU 560(AX), X3 + MOVOU 576(AX), X4 + MOVOU 592(AX), X5 + MOVOU 608(AX), X6 + MOVOU 624(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 512(AX) + MOVOU X1, 528(AX) + MOVOU X2, 544(AX) + MOVOU X3, 560(AX) + MOVOU X4, 576(AX) + MOVOU X5, 592(AX) + MOVOU X6, 608(AX) + MOVOU X7, 624(AX) + MOVOU 640(AX), X0 + MOVOU 656(AX), X1 + MOVOU 672(AX), X2 + MOVOU 688(AX), X3 + MOVOU 704(AX), X4 + MOVOU 720(AX), X5 + MOVOU 736(AX), X6 + MOVOU 752(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 640(AX) + MOVOU X1, 656(AX) + MOVOU X2, 672(AX) + MOVOU X3, 688(AX) + MOVOU X4, 704(AX) + MOVOU X5, 720(AX) + MOVOU X6, 736(AX) + MOVOU X7, 752(AX) + MOVOU 768(AX), X0 + MOVOU 784(AX), X1 + MOVOU 800(AX), X2 + MOVOU 816(AX), X3 + MOVOU 832(AX), X4 + MOVOU 848(AX), X5 + MOVOU 864(AX), X6 + MOVOU 880(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 768(AX) + MOVOU X1, 784(AX) + MOVOU X2, 800(AX) + MOVOU X3, 816(AX) + MOVOU X4, 832(AX) + MOVOU X5, 848(AX) + MOVOU X6, 864(AX) + MOVOU X7, 880(AX) + MOVOU 896(AX), X0 + MOVOU 912(AX), X1 + MOVOU 928(AX), X2 + MOVOU 944(AX), X3 + MOVOU 960(AX), X4 + MOVOU 976(AX), X5 + MOVOU 992(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 896(AX) + MOVOU X1, 912(AX) + MOVOU X2, 928(AX) + MOVOU X3, 944(AX) + MOVOU X4, 960(AX) + MOVOU X5, 976(AX) + MOVOU X6, 992(AX) + MOVOU X7, 1008(AX) + MOVOU (AX), X0 + MOVOU 128(AX), X1 + MOVOU 256(AX), X2 + MOVOU 384(AX), X3 + MOVOU 512(AX), X4 + MOVOU 640(AX), X5 + MOVOU 768(AX), X6 + MOVOU 896(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 128(AX) + MOVOU X2, 256(AX) + MOVOU X3, 384(AX) + MOVOU X4, 512(AX) + MOVOU X5, 640(AX) + MOVOU X6, 768(AX) + MOVOU X7, 896(AX) + MOVOU 16(AX), X0 + MOVOU 144(AX), X1 + MOVOU 272(AX), X2 + MOVOU 400(AX), X3 + MOVOU 528(AX), X4 + MOVOU 656(AX), X5 + MOVOU 784(AX), X6 + MOVOU 912(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 16(AX) + MOVOU X1, 144(AX) + MOVOU X2, 272(AX) + MOVOU X3, 400(AX) + MOVOU X4, 528(AX) + MOVOU X5, 656(AX) + MOVOU X6, 784(AX) + MOVOU X7, 912(AX) + MOVOU 32(AX), X0 + MOVOU 160(AX), X1 + MOVOU 288(AX), X2 + MOVOU 416(AX), X3 + MOVOU 544(AX), X4 + MOVOU 672(AX), X5 + MOVOU 800(AX), X6 + MOVOU 928(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 32(AX) + MOVOU X1, 160(AX) + MOVOU X2, 288(AX) + MOVOU X3, 416(AX) + MOVOU X4, 544(AX) + MOVOU X5, 672(AX) + MOVOU X6, 800(AX) + MOVOU X7, 928(AX) + MOVOU 48(AX), X0 + MOVOU 176(AX), X1 + MOVOU 304(AX), X2 + MOVOU 432(AX), X3 + MOVOU 560(AX), X4 + MOVOU 688(AX), X5 + MOVOU 816(AX), X6 + MOVOU 944(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 48(AX) + MOVOU X1, 176(AX) + MOVOU X2, 304(AX) + MOVOU X3, 432(AX) + MOVOU X4, 560(AX) + MOVOU X5, 688(AX) + MOVOU X6, 816(AX) + MOVOU X7, 944(AX) + MOVOU 64(AX), X0 + MOVOU 192(AX), X1 + MOVOU 320(AX), X2 + MOVOU 448(AX), X3 + MOVOU 576(AX), X4 + MOVOU 704(AX), X5 + MOVOU 832(AX), X6 + MOVOU 960(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 64(AX) + MOVOU X1, 192(AX) + MOVOU X2, 320(AX) + MOVOU X3, 448(AX) + MOVOU X4, 576(AX) + MOVOU X5, 704(AX) + MOVOU X6, 832(AX) + MOVOU X7, 960(AX) + MOVOU 80(AX), X0 + MOVOU 208(AX), X1 + MOVOU 336(AX), X2 + MOVOU 464(AX), X3 + MOVOU 592(AX), X4 + MOVOU 720(AX), X5 + MOVOU 848(AX), X6 + MOVOU 976(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 80(AX) + MOVOU X1, 208(AX) + MOVOU X2, 336(AX) + MOVOU X3, 464(AX) + MOVOU X4, 592(AX) + MOVOU X5, 720(AX) + MOVOU X6, 848(AX) + MOVOU X7, 976(AX) + MOVOU 96(AX), X0 + MOVOU 224(AX), X1 + MOVOU 352(AX), X2 + MOVOU 480(AX), X3 + MOVOU 608(AX), X4 + MOVOU 736(AX), X5 + MOVOU 864(AX), X6 + MOVOU 992(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 96(AX) + MOVOU X1, 224(AX) + MOVOU X2, 352(AX) + MOVOU X3, 480(AX) + MOVOU X4, 608(AX) + MOVOU X5, 736(AX) + MOVOU X6, 864(AX) + MOVOU X7, 992(AX) + MOVOU 112(AX), X0 + MOVOU 240(AX), X1 + MOVOU 368(AX), X2 + MOVOU 496(AX), X3 + MOVOU 624(AX), X4 + MOVOU 752(AX), X5 + MOVOU 880(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 112(AX) + MOVOU X1, 240(AX) + MOVOU X2, 368(AX) + MOVOU X3, 496(AX) + MOVOU X4, 624(AX) + MOVOU X5, 752(AX) + MOVOU X6, 880(AX) + MOVOU X7, 1008(AX) + RET - BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 - BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) - RET +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 -// func mixBlocksSSE2(out, a, b, c *block) -TEXT ·mixBlocksSSE2(SB), 4, $0-32 +// func mixBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·mixBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 PXOR X1, X0 PXOR X2, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET -// func xorBlocksSSE2(out, a, b, c *block) -TEXT ·xorBlocksSSE2(SB), 4, $0-32 +// func xorBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·xorBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 - MOVOU 0(DX), X3 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 + MOVOU (DX), X3 PXOR X1, X0 PXOR X2, X0 PXOR X3, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/terraform/providers/google/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 9ae8206c201..f75162e039c 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/terraform/providers/google/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -1,722 +1,4517 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2bAVX2_amd64_asm.go -out ../../blake2bAVX2_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 - -#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 -#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 -#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e -#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 -#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 - -#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ - VPADDQ m0, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m1, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y1_Y1; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y3_Y3; \ - VPADDQ m2, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m3, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y3_Y3; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y1_Y1 - -#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E -#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 -#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E -#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 -#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E - -#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n -#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n -#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n -#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n -#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n - -#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 -#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 -#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 -#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 -#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 - -#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 - -#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 -#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 - -// load msg: Y12 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y12, Y12 - -// load msg: Y13 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ - VMOVQ_SI_X13(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X13(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y13, Y13 - -// load msg: Y14 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ - VMOVQ_SI_X14(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X14(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y14, Y14 - -// load msg: Y15 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ - VMOVQ_SI_X15(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X15(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X11(6*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ - LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ - LOAD_MSG_AVX2_Y15(9, 11, 13, 15) - -#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ - LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ - LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ - VMOVQ_SI_X11(11*8); \ - VPSHUFD $0x4E, 0*8(SI), X14; \ - VPINSRQ_1_SI_X11(5*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(12, 2, 7, 3) - -#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ - VMOVQ_SI_X11(5*8); \ - VMOVDQU 11*8(SI), X12; \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - VMOVQ_SI_X13(8*8); \ - VMOVQ_SI_X11(2*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X11(13*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ - LOAD_MSG_AVX2_Y15(14, 6, 1, 4) - -#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ - LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ - LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ - LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ - VMOVQ_SI_X15(6*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X15(10*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ - LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X13(7*8); \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ - LOAD_MSG_AVX2_Y15(1, 12, 8, 13) - -#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ - LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ - LOAD_MSG_AVX2_Y15(13, 5, 14, 9) - -#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ - LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ - LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ - VMOVQ_SI_X14_0; \ - VPSHUFD $0x4E, 8*8(SI), X11; \ - VPINSRQ_1_SI_X14(6*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(7, 3, 2, 11) - -#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ - LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ - LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ - LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ - VMOVQ_SI_X15_0; \ - VMOVQ_SI_X11(6*8); \ - VPINSRQ_1_SI_X15(4*8); \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ - VMOVQ_SI_X12(6*8); \ - VMOVQ_SI_X11(11*8); \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ - VMOVQ_SI_X11(1*8); \ - VMOVDQU 12*8(SI), X14; \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - VMOVQ_SI_X15(2*8); \ - VMOVDQU 4*8(SI), X11; \ - VPINSRQ_1_SI_X15(7*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ - LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ - VMOVQ_SI_X13(2*8); \ - VPSHUFD $0x4E, 5*8(SI), X11; \ - VPINSRQ_1_SI_X13(4*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ - VMOVQ_SI_X15(11*8); \ - VMOVQ_SI_X11(12*8); \ - VPINSRQ_1_SI_X15(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y15, Y15 - // func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, DX - ADDQ $31, DX - ANDQ $~31, DX - - MOVQ CX, 16(DX) - XORQ CX, CX - MOVQ CX, 24(DX) - - VMOVDQU ·AVX2_c40<>(SB), Y4 - VMOVDQU ·AVX2_c48<>(SB), Y5 - - VMOVDQU 0(AX), Y8 +// Requires: AVX, AVX2 +TEXT ·hashBlocksAVX2(SB), NOSPLIT, $320-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, DX + ADDQ $+31, DX + ANDQ $-32, DX + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + VMOVDQU ·AVX2_c40<>+0(SB), Y4 + VMOVDQU ·AVX2_c48<>+0(SB), Y5 + VMOVDQU (AX), Y8 VMOVDQU 32(AX), Y9 - VMOVDQU ·AVX2_iv0<>(SB), Y6 - VMOVDQU ·AVX2_iv1<>(SB), Y7 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - MOVQ R9, 8(DX) + VMOVDQU ·AVX2_iv0<>+0(SB), Y6 + VMOVDQU ·AVX2_iv1<>+0(SB), Y7 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) loop: - ADDQ $128, R8 - MOVQ R8, 0(DX) - CMPQ R8, $128 + ADDQ $0x80, R8 + MOVQ R8, (DX) + CMPQ R8, $0x80 JGE noinc INCQ R9 MOVQ R9, 8(DX) noinc: - VMOVDQA Y8, Y0 - VMOVDQA Y9, Y1 - VMOVDQA Y6, Y2 - VPXOR 0(DX), Y7, Y3 - - LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(DX) - VMOVDQA Y13, 64(DX) - VMOVDQA Y14, 96(DX) - VMOVDQA Y15, 128(DX) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(DX) - VMOVDQA Y13, 192(DX) - VMOVDQA Y14, 224(DX) - VMOVDQA Y15, 256(DX) - - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - - ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) - ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) - - VPXOR Y0, Y8, Y8 - VPXOR Y1, Y9, Y9 - VPXOR Y2, Y8, Y8 - VPXOR Y3, Y9, Y9 - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VMOVDQU Y8, 0(AX) - VMOVDQU Y9, 32(AX) + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR (DX), Y7, Y3 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x48 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + VPSHUFD $0x4e, (SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x28 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + VMOVDQU 88(SI), X12 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + VPSHUFD $0x4e, 64(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x10 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + VMOVDQU 96(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + VMOVDQU 32(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + VPSHUFD $0x4e, 40(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 32(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 64(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 96(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 128(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 160(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 192(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 224(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 256(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VMOVDQU Y8, (AX) + VMOVDQU Y9, 32(AX) VZEROUPPER - RET -#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA -#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB -#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF -#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD -#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE - -#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF -#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF - -#define SHUFFLE_AVX() \ - VMOVDQA X6, X13; \ - VMOVDQA X2, X14; \ - VMOVDQA X4, X6; \ - VPUNPCKLQDQ_X13_X13_X15; \ - VMOVDQA X5, X4; \ - VMOVDQA X6, X5; \ - VPUNPCKHQDQ_X15_X7_X6; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X13_X7; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VPUNPCKHQDQ_X15_X2_X2; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X3_X3; \ - -#define SHUFFLE_AVX_INV() \ - VMOVDQA X2, X13; \ - VMOVDQA X4, X14; \ - VPUNPCKLQDQ_X2_X2_X15; \ - VMOVDQA X5, X4; \ - VPUNPCKHQDQ_X15_X3_X2; \ - VMOVDQA X14, X5; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VMOVDQA X6, X14; \ - VPUNPCKHQDQ_X15_X13_X3; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X6_X6; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X7_X7; \ - -#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - VPADDQ m0, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m1, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFD $-79, v6, v6; \ - VPSHUFD $-79, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPSHUFB c40, v2, v2; \ - VPSHUFB c40, v3, v3; \ - VPADDQ m2, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m3, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFB c48, v6, v6; \ - VPSHUFB c48, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPADDQ v2, v2, t0; \ - VPSRLQ $63, v2, v2; \ - VPXOR t0, v2, v2; \ - VPADDQ v3, v3, t0; \ - VPSRLQ $63, v3, v3; \ - VPXOR t0, v3, v3 - -// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) -// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 -#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X13(i2*8); \ - VMOVQ_SI_X14(i4*8); \ - VMOVQ_SI_X15(i6*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X13(i3*8); \ - VPINSRQ_1_SI_X14(i5*8); \ - VPINSRQ_1_SI_X15(i7*8) - -// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) -#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(1*8); \ - VMOVQ_SI_X15(5*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X13(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(7*8) - -// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) -#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ - VPSHUFD $0x4E, 0*8(SI), X12; \ - VMOVQ_SI_X13(11*8); \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(7*8); \ - VPINSRQ_1_SI_X13(5*8); \ - VPINSRQ_1_SI_X14(2*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) -#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ - VMOVDQU 11*8(SI), X12; \ - VMOVQ_SI_X13(5*8); \ - VMOVQ_SI_X14(8*8); \ - VMOVQ_SI_X15(2*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14_0; \ - VPINSRQ_1_SI_X15(13*8) - -// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) -#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(6*8); \ - VMOVQ_SI_X15_0; \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(8*8) +DATA ·AVX2_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+16(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+24(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) -#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ - VMOVQ_SI_X12(9*8); \ - VMOVQ_SI_X13(2*8); \ - VMOVQ_SI_X14_0; \ - VMOVQ_SI_X15(4*8); \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VPINSRQ_1_SI_X15(15*8) +DATA ·AVX2_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+16(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+24(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) -#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(11*8); \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X13(8*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(3*8) +DATA ·AVX2_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+16(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+24(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) -#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ - MOVQ 0*8(SI), X12; \ - VPSHUFD $0x4E, 8*8(SI), X13; \ - MOVQ 7*8(SI), X14; \ - MOVQ 2*8(SI), X15; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(11*8) - -// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) -#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ - MOVQ 6*8(SI), X12; \ - MOVQ 11*8(SI), X13; \ - MOVQ 15*8(SI), X14; \ - MOVQ 3*8(SI), X15; \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X14(9*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) -#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ - MOVQ 5*8(SI), X12; \ - MOVQ 8*8(SI), X13; \ - MOVQ 0*8(SI), X14; \ - MOVQ 6*8(SI), X15; \ - VPINSRQ_1_SI_X12(15*8); \ - VPINSRQ_1_SI_X13(2*8); \ - VPINSRQ_1_SI_X14(4*8); \ - VPINSRQ_1_SI_X15(10*8) - -// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) -#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ - VMOVDQU 12*8(SI), X12; \ - MOVQ 1*8(SI), X13; \ - MOVQ 2*8(SI), X14; \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VMOVDQU 4*8(SI), X15 - -// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) -#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ - MOVQ 15*8(SI), X12; \ - MOVQ 3*8(SI), X13; \ - MOVQ 11*8(SI), X14; \ - MOVQ 12*8(SI), X15; \ - VPINSRQ_1_SI_X12(9*8); \ - VPINSRQ_1_SI_X13(13*8); \ - VPINSRQ_1_SI_X14(14*8); \ - VPINSRQ_1_SI_X15_0 +DATA ·AVX2_iv1<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+8(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+16(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+24(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), RODATA|NOPTR, $32 // func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - VMOVDQU ·AVX_c40<>(SB), X0 - VMOVDQU ·AVX_c48<>(SB), X1 +// Requires: AVX, SSE2 +TEXT ·hashBlocksAVX(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + VMOVDQU ·AVX_c40<>+0(SB), X0 + VMOVDQU ·AVX_c48<>+0(SB), X1 VMOVDQA X0, X8 VMOVDQA X1, X9 - - VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) - - VMOVDQU 0(AX), X10 + VMOVDQU ·AVX_iv3<>+0(SB), X0 + VMOVDQA X0, (R10) + XORQ CX, (R10) + VMOVDQU (AX), X10 VMOVDQU 16(AX), X11 VMOVDQU 32(AX), X2 VMOVDQU 48(AX), X3 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - VMOVQ_R8_X15 - VPINSRQ_1_R9_X15 - + BYTE $0xc4 + BYTE $0x41 + BYTE $0xf9 + BYTE $0x6e + BYTE $0xf8 + BYTE $0xc4 + BYTE $0x43 + BYTE $0x81 + BYTE $0x22 + BYTE $0xf9 + BYTE $0x01 VMOVDQA X10, X0 VMOVDQA X11, X1 - VMOVDQU ·AVX_iv0<>(SB), X4 - VMOVDQU ·AVX_iv1<>(SB), X5 - VMOVDQU ·AVX_iv2<>(SB), X6 - + VMOVDQU ·AVX_iv0<>+0(SB), X4 + VMOVDQU ·AVX_iv1<>+0(SB), X5 + VMOVDQU ·AVX_iv2<>+0(SB), X6 VPXOR X15, X6, X6 - VMOVDQA 0(R10), X7 - - LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA (R10), X7 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 VMOVDQA X12, 16(R10) VMOVDQA X13, 32(R10) VMOVDQA X14, 48(R10) VMOVDQA X15, 64(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 VMOVDQA X12, 80(R10) VMOVDQA X13, 96(R10) VMOVDQA X14, 112(R10) VMOVDQA X15, 128(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 VMOVDQA X12, 144(R10) VMOVDQA X13, 160(R10) VMOVDQA X14, 176(R10) VMOVDQA X15, 192(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPSHUFD $0x4e, (SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 VMOVDQA X12, 208(R10) VMOVDQA X13, 224(R10) VMOVDQA X14, 240(R10) VMOVDQA X15, 256(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_11_12_5_15_8_0_2_13() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_2_5_4_15_6_10_0_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_9_5_2_10_0_7_4_15() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_2_6_0_8_12_10_11_3() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_0_6_9_8_7_3_2_11() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_5_15_8_2_0_4_6_10() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_6_14_11_0_15_9_3_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_12_13_1_10_2_7_4_5() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_15_9_3_13_11_14_12_0() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VMOVDQU 88(SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x36 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ (SI), X12 + VPSHUFD $0x4e, 64(SI), X13 + MOVQ 56(SI), X14 + MOVQ 16(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 40(SI), X12 + MOVQ 64(SI), X13 + MOVQ (SI), X14 + MOVQ 48(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + MOVQ 48(SI), X12 + MOVQ 88(SI), X13 + MOVQ 120(SI), X14 + MOVQ 24(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VMOVDQU 96(SI), X12 + MOVQ 8(SI), X13 + MOVQ 16(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + VMOVDQU 32(SI), X15 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 120(SI), X12 + MOVQ 24(SI), X13 + MOVQ 88(SI), X14 + MOVQ 96(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x3e + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 16(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 32(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 48(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 64(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 80(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 96(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 112(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 128(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 144(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 160(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 176(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 192(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 208(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 224(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 240(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 256(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff VMOVDQU 32(AX), X14 VMOVDQU 48(AX), X15 VPXOR X0, X10, X10 @@ -729,16 +4524,36 @@ noinc: VPXOR X7, X15, X3 VMOVDQU X2, 32(AX) VMOVDQU X3, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + VMOVDQU X10, (AX) + VMOVDQU X11, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VZEROUPPER + RET - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop +DATA ·AVX_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), RODATA|NOPTR, $16 - VMOVDQU X10, 0(AX) - VMOVDQU X11, 16(AX) +DATA ·AVX_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), RODATA|NOPTR, $16 - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - VZEROUPPER +DATA ·AVX_iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), RODATA|NOPTR, $16 - RET +DATA ·AVX_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), RODATA|NOPTR, $16 diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/terraform/providers/google/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index adfac00c15c..9a0ce212446 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/terraform/providers/google/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -1,278 +1,1441 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2b_amd64_asm.go -out ../../blake2b_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - PADDQ m0, v0; \ - PADDQ m1, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v6, v6; \ - PSHUFD $0xB1, v7, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - PSHUFB c40, v2; \ - PSHUFB c40, v3; \ - PADDQ m2, v0; \ - PADDQ m3, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFB c48, v6; \ - PSHUFB c48, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - MOVOU v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVOU v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), m0; \ - PINSRQ $1, i1*8(src), m0; \ - MOVQ i2*8(src), m1; \ - PINSRQ $1, i3*8(src), m1; \ - MOVQ i4*8(src), m2; \ - PINSRQ $1, i5*8(src), m2; \ - MOVQ i6*8(src), m3; \ - PINSRQ $1, i7*8(src), m3 - // func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) - - MOVOU ·c40<>(SB), X13 - MOVOU ·c48<>(SB), X14 - - MOVOU 0(AX), X12 +// Requires: SSE2, SSE4.1, SSSE3 +TEXT ·hashBlocksSSE4(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + MOVOU ·iv3<>+0(SB), X0 + MOVO X0, (R10) + XORQ CX, (R10) + MOVOU ·c40<>+0(SB), X13 + MOVOU ·c48<>+0(SB), X14 + MOVOU (AX), X12 MOVOU 16(AX), X15 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - MOVQ R8, X8 - PINSRQ $1, R9, X8 - - MOVO X12, X0 - MOVO X15, X1 - MOVOU 32(AX), X2 - MOVOU 48(AX), X3 - MOVOU ·iv0<>(SB), X4 - MOVOU ·iv1<>(SB), X5 - MOVOU ·iv2<>(SB), X6 - - PXOR X8, X6 - MOVO 0(R10), X7 - - LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(R10) - MOVO X9, 32(R10) - MOVO X10, 48(R10) - MOVO X11, 64(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(R10) - MOVO X9, 96(R10) - MOVO X10, 112(R10) - MOVO X11, 128(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(R10) - MOVO X9, 160(R10) - MOVO X10, 176(R10) - MOVO X11, 192(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(R10) - MOVO X9, 224(R10) - MOVO X10, 240(R10) - MOVO X11, 256(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + MOVQ R8, X8 + PINSRQ $0x01, R9, X8 + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>+0(SB), X4 + MOVOU ·iv1<>+0(SB), X5 + MOVOU ·iv2<>+0(SB), X6 + PXOR X8, X6 + MOVO (R10), X7 + MOVQ (SI), X8 + PINSRQ $0x01, 16(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 48(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 40(SI), X11 + PINSRQ $0x01, 56(SI), X11 + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 64(SI), X8 + PINSRQ $0x01, 80(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 112(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 88(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 120(SI), X11 + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 112(SI), X8 + PINSRQ $0x01, 32(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 80(SI), X10 + PINSRQ $0x01, 64(SI), X10 + MOVQ 120(SI), X11 + PINSRQ $0x01, 48(SI), X11 + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 8(SI), X8 + PINSRQ $0x01, (SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, 40(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 16(SI), X10 + MOVQ 56(SI), X11 + PINSRQ $0x01, 24(SI), X11 + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 88(SI), X8 + PINSRQ $0x01, 96(SI), X8 + MOVQ 40(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 64(SI), X10 + PINSRQ $0x01, (SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 80(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 72(SI), X9 + MOVQ 112(SI), X10 + PINSRQ $0x01, 48(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 32(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 56(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 104(SI), X9 + PINSRQ $0x01, 88(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 8(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, 112(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 16(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 48(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ (SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 72(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 16(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 120(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 112(SI), X8 + PINSRQ $0x01, 88(SI), X8 + MOVQ 48(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 96(SI), X10 + MOVQ 64(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 16(SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ (SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ 88(SI), X11 + PINSRQ $0x01, 24(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 32(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 120(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 104(SI), X10 + PINSRQ $0x01, 40(SI), X10 + MOVQ 112(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 96(SI), X8 + PINSRQ $0x01, 8(SI), X8 + MOVQ 112(SI), X9 + PINSRQ $0x01, 32(SI), X9 + MOVQ 40(SI), X10 + PINSRQ $0x01, 120(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ (SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 56(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 88(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 104(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 40(SI), X8 + PINSRQ $0x01, 120(SI), X8 + MOVQ 64(SI), X9 + PINSRQ $0x01, 16(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 48(SI), X8 + PINSRQ $0x01, 112(SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, (SI), X9 + MOVQ 120(SI), X10 + PINSRQ $0x01, 72(SI), X10 + MOVQ 24(SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 96(SI), X8 + PINSRQ $0x01, 104(SI), X8 + MOVQ 8(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 80(SI), X8 + PINSRQ $0x01, 64(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 120(SI), X8 + PINSRQ $0x01, 72(SI), X8 + MOVQ 24(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, (SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 16(R10), X0 + PADDQ 32(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 48(R10), X0 + PADDQ 64(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 80(R10), X0 + PADDQ 96(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 112(R10), X0 + PADDQ 128(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 144(R10), X0 + PADDQ 160(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 176(R10), X0 + PADDQ 192(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 208(R10), X0 + PADDQ 224(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 240(R10), X0 + PADDQ 256(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVOU X12, (AX) + MOVOU X15, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + RET - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) +DATA ·iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), RODATA|NOPTR, $16 - MOVOU 32(AX), X10 - MOVOU 48(AX), X11 - PXOR X0, X12 - PXOR X1, X15 - PXOR X2, X10 - PXOR X3, X11 - PXOR X4, X12 - PXOR X5, X15 - PXOR X6, X10 - PXOR X7, X11 - MOVOU X10, 32(AX) - MOVOU X11, 48(AX) +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 - MOVOU X12, 0(AX) - MOVOU X15, 16(AX) +DATA ·iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), RODATA|NOPTR, $16 - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) +DATA ·iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), RODATA|NOPTR, $16 - RET +DATA ·iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), RODATA|NOPTR, $16 diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/cast5/cast5.go b/terraform/providers/google/vendor/golang.org/x/crypto/cast5/cast5.go index 425e8eecb06..016e90215cd 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/cast5/cast5.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/cast5/cast5.go @@ -11,7 +11,7 @@ // Deprecated: any new system should use AES (from crypto/aes, if necessary in // an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from // golang.org/x/crypto/chacha20poly1305). -package cast5 // import "golang.org/x/crypto/cast5" +package cast5 import ( "errors" diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go index 93da7322bc4..8cf5d8112e4 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -5,7 +5,7 @@ // Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its // extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and // draft-irtf-cfrg-xchacha-01. -package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" +package chacha20poly1305 import ( "crypto/cipher" diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s index 731d2ac6dbc..fd5ee845f9f 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +++ b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -1,2715 +1,9762 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. +// Code generated by command: go run chacha20poly1305_amd64_asm.go -out ../chacha20poly1305_amd64.s -pkg chacha20poly1305. DO NOT EDIT. //go:build gc && !purego #include "textflag.h" -// General register allocation -#define oup DI -#define inp SI -#define inl BX -#define adp CX // free to reuse, after we hash the additional data -#define keyp R8 // free to reuse, when we copy the key to stack -#define itr2 R9 // general iterator -#define itr1 CX // general iterator -#define acc0 R10 -#define acc1 R11 -#define acc2 R12 -#define t0 R13 -#define t1 R14 -#define t2 R15 -#define t3 R8 -// Register and stack allocation for the SSE code -#define rStore (0*16)(BP) -#define sStore (1*16)(BP) -#define state1Store (2*16)(BP) -#define state2Store (3*16)(BP) -#define tmpStore (4*16)(BP) -#define ctr0Store (5*16)(BP) -#define ctr1Store (6*16)(BP) -#define ctr2Store (7*16)(BP) -#define ctr3Store (8*16)(BP) -#define A0 X0 -#define A1 X1 -#define A2 X2 -#define B0 X3 -#define B1 X4 -#define B2 X5 -#define C0 X6 -#define C1 X7 -#define C2 X8 -#define D0 X9 -#define D1 X10 -#define D2 X11 -#define T0 X12 -#define T1 X13 -#define T2 X14 -#define T3 X15 -#define A3 T0 -#define B3 T1 -#define C3 T2 -#define D3 T3 -// Register and stack allocation for the AVX2 code -#define rsStoreAVX2 (0*32)(BP) -#define state1StoreAVX2 (1*32)(BP) -#define state2StoreAVX2 (2*32)(BP) -#define ctr0StoreAVX2 (3*32)(BP) -#define ctr1StoreAVX2 (4*32)(BP) -#define ctr2StoreAVX2 (5*32)(BP) -#define ctr3StoreAVX2 (6*32)(BP) -#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack -#define AA0 Y0 -#define AA1 Y5 -#define AA2 Y6 -#define AA3 Y7 -#define BB0 Y14 -#define BB1 Y9 -#define BB2 Y10 -#define BB3 Y11 -#define CC0 Y12 -#define CC1 Y13 -#define CC2 Y8 -#define CC3 Y15 -#define DD0 Y4 -#define DD1 Y1 -#define DD2 Y2 -#define DD3 Y3 -#define TT0 DD3 -#define TT1 AA3 -#define TT2 BB3 -#define TT3 CC3 -// ChaCha20 constants -DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 -DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 -// <<< 16 with PSHUFB -DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A -DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A -// <<< 8 with PSHUFB -DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B -DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B - -DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 -DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 - -DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 -DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 -// Poly1305 key clamp -DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF - -DATA ·sseIncMask<>+0x00(SB)/8, $0x1 -DATA ·sseIncMask<>+0x08(SB)/8, $0x0 -// To load/store the last < 16 bytes in a buffer -DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff -DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff - -GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 -GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 -GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 -// No PALIGNR in Go ASM yet (but VPALIGNR is present). -#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 -#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 -#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 -#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 -#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 -#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 -#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 -#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 -#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 -#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 -#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 -#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 -#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 -#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 -#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 -#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 -#define shiftC0Right shiftC0Left -#define shiftC1Right shiftC1Left -#define shiftC2Right shiftC2Left -#define shiftC3Right shiftC3Left -#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 -#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 -#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 -#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 - -// Some macros - -// ROL rotates the uint32s in register R left by N bits, using temporary T. -#define ROL(N, R, T) \ - MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R - -// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. -#ifdef GOAMD64_v2 -#define ROL16(R, T) PSHUFB ·rol16<>(SB), R -#else -#define ROL16(R, T) ROL(16, R, T) -#endif - -// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. -#ifdef GOAMD64_v2 -#define ROL8(R, T) PSHUFB ·rol8<>(SB), R -#else -#define ROL8(R, T) ROL(8, R, T) -#endif - -#define chachaQR(A, B, C, D, T) \ - PADDD B, A; PXOR A, D; ROL16(D, T) \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ - PADDD B, A; PXOR A, D; ROL8(D, T) \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B - -#define chachaQR_AVX2(A, B, C, D, T) \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B - -#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 -#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX -#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 -#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 - -#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 -#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 - -#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage -#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage -// ---------------------------------------------------------------------------- + +// func polyHashADInternal<>() TEXT polyHashADInternal<>(SB), NOSPLIT, $0 - // adp points to beginning of additional data - // itr2 holds ad length - XORQ acc0, acc0 - XORQ acc1, acc1 - XORQ acc2, acc2 - CMPQ itr2, $13 - JNE hashADLoop - -openFastTLSAD: - // Special treatment for the TLS case of 13 bytes - MOVQ (adp), acc0 - MOVQ 5(adp), acc1 - SHRQ $24, acc1 - MOVQ $1, acc2 - polyMul + // Hack: Must declare #define macros inside of a function due to Avo constraints + // ROL rotates the uint32s in register R left by N bits, using temporary T. + #define ROL(N, R, T) \ + MOVO R, T; \ + PSLLL $(N), T; \ + PSRLL $(32-(N)), R; \ + PXOR T, R + + // ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. + #ifdef GOAMD64_v2 + #define ROL8(R, T) PSHUFB ·rol8<>(SB), R + #else + #define ROL8(R, T) ROL(8, R, T) + #endif + + // ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. + #ifdef GOAMD64_v2 + #define ROL16(R, T) PSHUFB ·rol16<>(SB), R + #else + #define ROL16(R, T) ROL(16, R, T) + #endif + XORQ R10, R10 + XORQ R11, R11 + XORQ R12, R12 + CMPQ R9, $0x0d + JNE hashADLoop + MOVQ (CX), R10 + MOVQ 5(CX), R11 + SHRQ $0x18, R11 + MOVQ $0x00000001, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 RET hashADLoop: // Hash in 16 byte chunks - CMPQ itr2, $16 - JB hashADTail - polyAdd(0(adp)) - LEAQ (1*16)(adp), adp - SUBQ $16, itr2 - polyMul - JMP hashADLoop + CMPQ R9, $0x10 + JB hashADTail + ADDQ (CX), R10 + ADCQ 8(CX), R11 + ADCQ $0x01, R12 + LEAQ 16(CX), CX + SUBQ $0x10, R9 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + JMP hashADLoop hashADTail: - CMPQ itr2, $0 + CMPQ R9, $0x00 JE hashADDone // Hash last < 16 byte tail - XORQ t0, t0 - XORQ t1, t1 - XORQ t2, t2 - ADDQ itr2, adp + XORQ R13, R13 + XORQ R14, R14 + XORQ R15, R15 + ADDQ R9, CX hashADTailLoop: - SHLQ $8, t0, t1 - SHLQ $8, t0 - MOVB -1(adp), t2 - XORQ t2, t0 - DECQ adp - DECQ itr2 - JNE hashADTailLoop - -hashADTailFinish: - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - // Finished AD + SHLQ $0x08, R13, R14 + SHLQ $0x08, R13 + MOVB -1(CX), R15 + XORQ R15, R13 + DECQ CX + DECQ R9 + JNE hashADTailLoop + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + hashADDone: RET -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Open(dst, key, src, ad []byte) bool -TEXT ·chacha20Poly1305Open(SB), 0, $288-97 +// func chacha20Poly1305Open(dst []byte, key []uint32, src []byte, ad []byte) bool +// Requires: AVX, AVX2, BMI2, CMOV, SSE2 +TEXT ·chacha20Poly1305Open(SB), $288-97 // For aligned stack access MOVQ SP, BP - ADDQ $32, BP + ADDQ $0x20, BP ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp + MOVQ dst_base+0(FP), DI + MOVQ key_base+24(FP), R8 + MOVQ src_base+48(FP), SI + MOVQ src_len+56(FP), BX + MOVQ ad_base+72(FP), CX // Check for AVX2 support - CMPB ·useAVX2(SB), $1 + CMPB ·useAVX2+0(SB), $0x01 JE chacha20Poly1305Open_AVX2 // Special optimization, for very short buffers - CMPQ inl, $128 - JBE openSSE128 // About 16% faster + CMPQ BX, $0x80 + JBE openSSE128 // For long buffers, prepare the poly key first - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 - MOVO D0, T1 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X9, X13 // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store - MOVO D0, ctr3Store - MOVQ $10, itr2 + MOVO X3, 32(BP) + MOVO X6, 48(BP) + MOVO X9, 128(BP) + MOVQ $0x0000000a, R9 openSSEPreparePolyKey: - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - DECQ itr2 - JNE openSSEPreparePolyKey + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + DECQ R9 + JNE openSSEPreparePolyKey // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL 32(BP), X3 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore; MOVO B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVO X0, (BP) + MOVO X3, 16(BP) // Hash AAD - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openSSEMainLoop: - CMPQ inl, $256 + CMPQ BX, $0x00000100 JB openSSEMainLoopDone // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) - // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 - MOVQ $4, itr1 - MOVQ inp, itr2 + // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash + // 2 blocks, and for the remaining 4 only 1 block - for a total of 16 + MOVQ $0x00000004, CX + MOVQ SI, R9 openSSEInternalLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(itr2)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(itr2), itr2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr1 - JGE openSSEInternalLoop - - polyAdd(0(itr2)) - polyMul - LEAQ (2*8)(itr2), itr2 - - CMPQ itr1, $-6 - JG openSSEInternalLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + LEAQ 16(R9), R9 + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ CX + JGE openSSEInternalLoop + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + CMPQ CX, $-6 + JG openSSEInternalLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 // Load - xor - store - MOVO D3, tmpStore - MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) - MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) - MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) - MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) - MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) - MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) - MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) - MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) - MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) - MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) - MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) - MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) - MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) - MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) - LEAQ 256(inp), inp - LEAQ 256(oup), oup - SUBQ $256, inl + MOVO X15, 64(BP) + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU X0, (DI) + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU X3, 16(DI) + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU X6, 32(DI) + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X9, 48(DI) + MOVOU 64(SI), X9 + PXOR X9, X1 + MOVOU X1, 64(DI) + MOVOU 80(SI), X9 + PXOR X9, X4 + MOVOU X4, 80(DI) + MOVOU 96(SI), X9 + PXOR X9, X7 + MOVOU X7, 96(DI) + MOVOU 112(SI), X9 + PXOR X9, X10 + MOVOU X10, 112(DI) + MOVOU 128(SI), X9 + PXOR X9, X2 + MOVOU X2, 128(DI) + MOVOU 144(SI), X9 + PXOR X9, X5 + MOVOU X5, 144(DI) + MOVOU 160(SI), X9 + PXOR X9, X8 + MOVOU X8, 160(DI) + MOVOU 176(SI), X9 + PXOR X9, X11 + MOVOU X11, 176(DI) + MOVOU 192(SI), X9 + PXOR X9, X12 + MOVOU X12, 192(DI) + MOVOU 208(SI), X9 + PXOR X9, X13 + MOVOU X13, 208(DI) + MOVOU 224(SI), X9 + PXOR X9, X14 + MOVOU X14, 224(DI) + MOVOU 240(SI), X9 + PXOR 64(BP), X9 + MOVOU X9, 240(DI) + LEAQ 256(SI), SI + LEAQ 256(DI), DI + SUBQ $0x00000100, BX JMP openSSEMainLoop openSSEMainLoopDone: // Handle the various tail sizes efficiently - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize - CMPQ inl, $64 + CMPQ BX, $0x40 JBE openSSETail64 - CMPQ inl, $128 + CMPQ BX, $0x80 JBE openSSETail128 - CMPQ inl, $192 + CMPQ BX, $0xc0 JBE openSSETail192 JMP openSSETail256 openSSEFinalize: // Hash in the PT, AAD lengths - ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 - polyMul + ADDQ ad_len+80(FP), R10 + ADCQ src_len+56(FP), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 + MOVQ R10, R13 + MOVQ R11, R14 + MOVQ R12, R15 + SUBQ $-5, R10 + SBBQ $-1, R11 + SBBQ $0x03, R12 + CMOVQCS R13, R10 + CMOVQCS R14, R11 + CMOVQCS R15, R12 // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 + ADDQ 16(BP), R10 + ADCQ 24(BP), R11 // Finally, constant time compare to the tag at the end of the message XORQ AX, AX - MOVQ $1, DX - XORQ (0*8)(inp), acc0 - XORQ (1*8)(inp), acc1 - ORQ acc1, acc0 + MOVQ $0x00000001, DX + XORQ (SI), R10 + XORQ 8(SI), R11 + ORQ R11, R10 CMOVQEQ DX, AX // Return true iff tags are equal MOVB AX, ret+96(FP) RET -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 129 bytes openSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X3, X13 + MOVO X6, X14 + MOVO X10, X15 + MOVQ $0x0000000a, R9 openSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE openSSE128InnerCipherLoop + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ R9 + JNE openSSE128InnerCipherLoop // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL X13, X3 + PADDL X13, X4 + PADDL X13, X5 + PADDL X14, X7 + PADDL X14, X8 + PADDL X15, X10 + PADDL ·sseIncMask<>+0(SB), X15 + PADDL X15, X11 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore; MOVOU B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVOU X0, (BP) + MOVOU X3, 16(BP) // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openSSE128Open: - CMPQ inl, $16 + CMPQ BX, $0x10 JB openSSETail16 - SUBQ $16, inl + SUBQ $0x10, BX // Load for hashing - polyAdd(0(inp)) + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 // Load for decryption - MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - polyMul + MOVOU (SI), X12 + PXOR X12, X1 + MOVOU X1, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 + MOVO X4, X1 + MOVO X7, X4 + MOVO X10, X7 + MOVO X2, X10 + MOVO X5, X2 + MOVO X8, X5 + MOVO X11, X8 JMP openSSE128Open openSSETail16: - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize // We can safely load the CT from the end, because it is padded with the MAC - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVOU (inp), T0 - ADDQ inl, inp - PAND -16(t0)(itr2*1), T0 - MOVO T0, 0+tmpStore - MOVQ T0, t0 - MOVQ 8+tmpStore, t1 - PXOR A1, T0 + MOVQ BX, R9 + SHLQ $0x04, R9 + LEAQ ·andMask<>+0(SB), R13 + MOVOU (SI), X12 + ADDQ BX, SI + PAND -16(R13)(R9*1), X12 + MOVO X12, 64(BP) + MOVQ X12, R13 + MOVQ 72(BP), R14 + PXOR X1, X12 // We can only store one byte at a time, since plaintext can be shorter than 16 bytes openSSETail16Store: - MOVQ T0, t3 - MOVB t3, (oup) - PSRLDQ $1, T0 - INCQ oup - DECQ inl + MOVQ X12, R8 + MOVB R8, (DI) + PSRLDQ $0x01, X12 + INCQ DI + DECQ BX JNE openSSETail16Store - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 JMP openSSEFinalize -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of ciphertext openSSETail64: - // Need to decrypt up to 64 bytes - prepare single block - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - XORQ itr2, itr2 - MOVQ inl, itr1 - CMPQ itr1, $16 - JB openSSETail64LoopB + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + XORQ R9, R9 + MOVQ BX, CX + CMPQ CX, $0x10 + JB openSSETail64LoopB openSSETail64LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul - SUBQ $16, itr1 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX openSSETail64LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - - CMPQ itr1, $16 - JAE openSSETail64LoopA - - CMPQ itr2, $160 - JNE openSSETail64LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + CMPQ CX, $0x10 + JAE openSSETail64LoopA + CMPQ R9, $0xa0 + JNE openSSETail64LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL 32(BP), X3 + PADDL 48(BP), X6 + PADDL 80(BP), X9 openSSETail64DecLoop: - CMPQ inl, $16 + CMPQ BX, $0x10 JB openSSETail64DecLoopDone - SUBQ $16, inl - MOVOU (inp), T0 - PXOR T0, A0 - MOVOU A0, (oup) - LEAQ 16(inp), inp - LEAQ 16(oup), oup - MOVO B0, A0 - MOVO C0, B0 - MOVO D0, C0 + SUBQ $0x10, BX + MOVOU (SI), X12 + PXOR X12, X0 + MOVOU X0, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + MOVO X3, X0 + MOVO X6, X3 + MOVO X9, X6 JMP openSSETail64DecLoop openSSETail64DecLoopDone: - MOVO A0, A1 + MOVO X0, X1 JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext openSSETail128: - // Need to decrypt up to 128 bytes - prepare two blocks - MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 + MOVO ·chacha20Constants<>+0(SB), X1 + MOVO 32(BP), X4 + MOVO 48(BP), X7 + MOVO 128(BP), X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 80(BP) + MOVO X1, X0 + MOVO X4, X3 + MOVO X7, X6 + MOVO X10, X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 96(BP) + XORQ R9, R9 + MOVQ BX, CX + ANDQ $-16, CX openSSETail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSETail128LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - CMPQ itr2, itr1 - JB openSSETail128LoopA - - CMPQ itr2, $160 - JNE openSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr1Store, D0; PADDL ctr0Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - - SUBQ $64, inl - LEAQ 64(inp), inp - LEAQ 64(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of ciphertext + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + CMPQ R9, CX + JB openSSETail128LoopA + CMPQ R9, $0xa0 + JNE openSSETail128LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 96(BP), X9 + PADDL 80(BP), X10 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, (DI) + MOVOU X4, 16(DI) + MOVOU X7, 32(DI) + MOVOU X10, 48(DI) + SUBQ $0x40, BX + LEAQ 64(SI), SI + LEAQ 64(DI), DI + JMP openSSETail64DecLoop + openSSETail192: - // Need to decrypt up to 192 bytes - prepare three blocks - MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store - MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store - - MOVQ inl, itr1 - MOVQ $160, itr2 - CMPQ itr1, $160 - CMOVQGT itr2, itr1 - ANDQ $-16, itr1 - XORQ itr2, itr2 + MOVO ·chacha20Constants<>+0(SB), X2 + MOVO 32(BP), X5 + MOVO 48(BP), X8 + MOVO 128(BP), X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X11, 80(BP) + MOVO X2, X1 + MOVO X5, X4 + MOVO X8, X7 + MOVO X11, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) + MOVO X1, X0 + MOVO X4, X3 + MOVO X7, X6 + MOVO X10, X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 112(BP) + MOVQ BX, CX + MOVQ $0x000000a0, R9 + CMPQ CX, $0xa0 + CMOVQGT R9, CX + ANDQ $-16, CX + XORQ R9, R9 openSSLTail192LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSLTail192LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - CMPQ itr2, itr1 - JB openSSLTail192LoopA - - CMPQ itr2, $160 - JNE openSSLTail192LoopB - - CMPQ inl, $176 - JB openSSLTail192Store - - polyAdd(160(inp)) - polyMul - - CMPQ inl, $192 - JB openSSLTail192Store - - polyAdd(176(inp)) - polyMul + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + CMPQ R9, CX + JB openSSLTail192LoopA + CMPQ R9, $0xa0 + JNE openSSLTail192LoopB + CMPQ BX, $0xb0 + JB openSSLTail192Store + ADDQ 160(SI), R10 + ADCQ 168(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + CMPQ BX, $0xc0 + JB openSSLTail192Store + ADDQ 176(SI), R10 + ADCQ 184(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSLTail192Store: - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 - MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) - - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - SUBQ $128, inl - LEAQ 128(inp), inp - LEAQ 128(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 32(BP), X5 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 48(BP), X8 + PADDL 112(BP), X9 + PADDL 96(BP), X10 + PADDL 80(BP), X11 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X2 + PXOR X13, X5 + PXOR X14, X8 + PXOR X15, X11 + MOVOU X2, (DI) + MOVOU X5, 16(DI) + MOVOU X8, 32(DI) + MOVOU X11, 48(DI) + MOVOU 64(SI), X12 + MOVOU 80(SI), X13 + MOVOU 96(SI), X14 + MOVOU 112(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + SUBQ $0x80, BX + LEAQ 128(SI), SI + LEAQ 128(DI), DI + JMP openSSETail64DecLoop + openSSETail256: - // Need to decrypt up to 256 bytes - prepare four blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - XORQ itr2, itr2 + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + XORQ R9, R9 openSSETail256Loop: - // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication - polyAdd(0(inp)(itr2*1)) - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulStage3 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - ADDQ $2*8, itr2 - CMPQ itr2, $160 - JB openSSETail256Loop - MOVQ inl, itr1 - ANDQ $-16, itr1 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + ADDQ $0x10, R9 + CMPQ R9, $0xa0 + JB openSSETail256Loop + MOVQ BX, CX + ANDQ $-16, CX openSSETail256HashLoop: - polyAdd(0(inp)(itr2*1)) - polyMul - ADDQ $2*8, itr2 - CMPQ itr2, itr1 - JB openSSETail256HashLoop + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ $0x10, R9 + CMPQ R9, CX + JB openSSETail256HashLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + MOVO X15, 64(BP) // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - LEAQ 192(inp), inp - LEAQ 192(oup), oup - SUBQ $192, inl - MOVO A3, A0 - MOVO B3, B0 - MOVO C3, C0 - MOVO tmpStore, D0 - - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVOU 128(SI), X0 + MOVOU 144(SI), X3 + MOVOU 160(SI), X6 + MOVOU 176(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 128(DI) + MOVOU X5, 144(DI) + MOVOU X8, 160(DI) + MOVOU X11, 176(DI) + LEAQ 192(SI), SI + LEAQ 192(DI), DI + SUBQ $0xc0, BX + MOVO X12, X0 + MOVO X13, X3 + MOVO X14, X6 + MOVO 64(BP), X9 + JMP openSSETail64DecLoop + chacha20Poly1305Open_AVX2: VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x70 + BYTE $0x10 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x20 + BYTE $0xc4 + BYTE $0xc2 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x30 + VPADDD ·avx2InitMask<>+0(SB), Y4, Y4 // Special optimization, for very short buffers - CMPQ inl, $192 + CMPQ BX, $0xc0 JBE openAVX2192 - CMPQ inl, $320 + CMPQ BX, $0x00000140 JBE openAVX2320 // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, state2StoreAVX2 - VMOVDQA DD0, ctr3StoreAVX2 - MOVQ $10, itr2 + VMOVDQA Y14, 32(BP) + VMOVDQA Y12, 64(BP) + VMOVDQA Y4, 192(BP) + MOVQ $0x0000000a, R9 openAVX2PreparePolyKey: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - DECQ itr2 - JNE openAVX2PreparePolyKey - - VPADDD ·chacha20Constants<>(SB), AA0, AA0 - VPADDD state1StoreAVX2, BB0, BB0 - VPADDD state2StoreAVX2, CC0, CC0 - VPADDD ctr3StoreAVX2, DD0, DD0 - - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + DECQ R9 + JNE openAVX2PreparePolyKey + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD 32(BP), Y14, Y14 + VPADDD 64(BP), Y12, Y12 + VPADDD 192(BP), Y4, Y4 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for the first 64 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 // Hash AD + first 64 bytes - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX openAVX2InitialHash64: - polyAdd(0(inp)(itr1*1)) - polyMulAVX2 - ADDQ $16, itr1 - CMPQ itr1, $64 - JNE openAVX2InitialHash64 + ADDQ (SI)(CX*1), R10 + ADCQ 8(SI)(CX*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ $0x10, CX + CMPQ CX, $0x40 + JNE openAVX2InitialHash64 // Decrypt the first 64 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), BB0, BB0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU BB0, (1*32)(oup) - LEAQ (2*32)(inp), inp - LEAQ (2*32)(oup), oup - SUBQ $64, inl + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y14, Y14 + VMOVDQU Y0, (DI) + VMOVDQU Y14, 32(DI) + LEAQ 64(SI), SI + LEAQ 64(DI), DI + SUBQ $0x40, BX openAVX2MainLoop: - CMPQ inl, $512 + CMPQ BX, $0x00000200 JB openAVX2MainLoopDone // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + XORQ CX, CX openAVX2InternalLoop: - // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications - // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext - polyAdd(0*8(inp)(itr1*1)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(inp)(itr1*1)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(inp)(itr1*1)) - LEAQ (6*8)(itr1), itr1 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - CMPQ itr1, $480 + ADDQ (SI)(CX*1), R10 + ADCQ 8(SI)(CX*1), R11 + ADCQ $0x01, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + ADDQ 16(SI)(CX*1), R10 + ADCQ 24(SI)(CX*1), R11 + ADCQ $0x01, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 32(SI)(CX*1), R10 + ADCQ 40(SI)(CX*1), R11 + ADCQ $0x01, R12 + LEAQ 48(CX), CX + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + CMPQ CX, $0x000001e0 JNE openAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(480(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + ADDQ 480(SI), R10 + ADCQ 488(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) // and here - polyAdd(496(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - LEAQ (32*16)(oup), oup - SUBQ $(32*16), inl + ADDQ 496(SI), R10 + ADCQ 504(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + VPXOR 384(SI), Y0, Y0 + VPXOR 416(SI), Y14, Y14 + VPXOR 448(SI), Y12, Y12 + VPXOR 480(SI), Y4, Y4 + VMOVDQU Y0, 384(DI) + VMOVDQU Y14, 416(DI) + VMOVDQU Y12, 448(DI) + VMOVDQU Y4, 480(DI) + LEAQ 512(SI), SI + LEAQ 512(DI), DI + SUBQ $0x00000200, BX JMP openAVX2MainLoop openAVX2MainLoopDone: // Handle the various tail sizes efficiently - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize - CMPQ inl, $128 + CMPQ BX, $0x80 JBE openAVX2Tail128 - CMPQ inl, $256 + CMPQ BX, $0x00000100 JBE openAVX2Tail256 - CMPQ inl, $384 + CMPQ BX, $0x00000180 JBE openAVX2Tail384 JMP openAVX2Tail512 -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes openAVX2192: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VMOVDQA Y4, Y2 + VMOVDQA Y1, Y15 + MOVQ $0x0000000a, R9 openAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ R9 JNE openAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y6, Y0, Y0 + VPADDD Y6, Y5, Y5 + VPADDD Y10, Y14, Y14 + VPADDD Y10, Y9, Y9 + VPADDD Y8, Y12, Y12 + VPADDD Y8, Y13, Y13 + VPADDD Y2, Y4, Y4 + VPADDD Y15, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 openAVX2ShortOpen: // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openAVX2ShortOpenLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB openAVX2ShortTail32 - SUBQ $32, inl + SUBQ $0x20, BX // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 - polyAdd(2*8(inp)) - polyMulAVX2 + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(SI), R10 + ADCQ 24(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI + LEAQ 32(DI), DI // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 + VMOVDQA Y5, Y4 + VMOVDQA Y9, Y5 + VMOVDQA Y13, Y9 + VMOVDQA Y1, Y13 + VMOVDQA Y6, Y1 + VMOVDQA Y10, Y6 JMP openAVX2ShortOpenLoop openAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB openAVX2ShortDone - - SUBQ $16, inl + SUBQ $0x10, BX // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 openAVX2ShortDone: VZEROUPPER JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes openAVX2320: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y14, Y7 + VMOVDQA Y12, Y11 + VMOVDQA Y4, Y15 + MOVQ $0x0000000a, R9 openAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ R9 JNE openAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 + VMOVDQA ·chacha20Constants<>+0(SB), Y3 + VPADDD Y3, Y0, Y0 + VPADDD Y3, Y5, Y5 + VPADDD Y3, Y6, Y6 + VPADDD Y7, Y14, Y14 + VPADDD Y7, Y9, Y9 + VPADDD Y7, Y10, Y10 + VPADDD Y11, Y12, Y12 + VPADDD Y11, Y13, Y13 + VPADDD Y11, Y8, Y8 + VMOVDQA ·avx2IncMask<>+0(SB), Y3 + VPADDD Y15, Y4, Y4 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y1, Y1 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y2, Y2 // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + VPERM2I128 $0x02, Y6, Y10, Y13 + VPERM2I128 $0x02, Y8, Y2, Y1 + VPERM2I128 $0x13, Y6, Y10, Y6 + VPERM2I128 $0x13, Y8, Y2, Y10 JMP openAVX2ShortOpen -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext openAVX2Tail128: // Need to decrypt up to 128 bytes - prepare two blocks - VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD1 - VMOVDQA DD1, DD0 - - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 - TESTQ itr1, itr1 - JE openAVX2Tail128LoopB + VMOVDQA ·chacha20Constants<>+0(SB), Y5 + VMOVDQA 32(BP), Y9 + VMOVDQA 64(BP), Y13 + VMOVDQA 192(BP), Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y1 + VMOVDQA Y1, Y4 + XORQ R9, R9 + MOVQ BX, CX + ANDQ $-16, CX + TESTQ CX, CX + JE openAVX2Tail128LoopB openAVX2Tail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMulAVX2 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openAVX2Tail128LoopB: - ADDQ $16, itr2 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 - JB openAVX2Tail128LoopA - CMPQ itr2, $160 - JNE openAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC1, CC1 - VPADDD DD0, DD1, DD1 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + ADDQ $0x10, R9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + CMPQ R9, CX + JB openAVX2Tail128LoopA + CMPQ R9, $0xa0 + JNE openAVX2Tail128LoopB + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y13, Y13 + VPADDD Y4, Y1, Y1 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 openAVX2TailLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB openAVX2Tail - SUBQ $32, inl + SUBQ $0x20, BX // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI + LEAQ 32(DI), DI + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 JMP openAVX2TailLoop openAVX2Tail: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB openAVX2TailDone - SUBQ $16, inl + SUBQ $0x10, BX // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 openAVX2TailDone: VZEROUPPER JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext openAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare four blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 // Compute the number of iterations that will hash data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $128, itr1 - SHRQ $4, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 + MOVQ BX, 224(BP) + MOVQ BX, CX + SUBQ $0x80, CX + SHRQ $0x04, CX + MOVQ $0x0000000a, R9 + CMPQ CX, $0x0a + CMOVQGT R9, CX + MOVQ SI, BX + XORQ R9, R9 openAVX2Tail256LoopA: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX - // Perform ChaCha rounds, while hashing the remaining input openAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + INCQ R9 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + CMPQ R9, CX JB openAVX2Tail256LoopA + CMPQ R9, $0x0a + JNE openAVX2Tail256LoopB + MOVQ BX, R9 + SUBQ SI, BX + MOVQ BX, CX + MOVQ 224(BP), BX - CMPQ itr2, $10 - JNE openAVX2Tail256LoopB - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl - - // Hash the remainder of data (if any) openAVX2Tail256Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail256HashEnd - polyAdd (0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail256Hash - -// Store 128 bytes safely, then go to store loop + ADDQ $0x10, CX + CMPQ CX, BX + JGT openAVX2Tail256HashEnd + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + JMP openAVX2Tail256Hash + openAVX2Tail256HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - - VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 - VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) - LEAQ (4*32)(inp), inp - LEAQ (4*32)(oup), oup - SUBQ $4*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y6 + VPERM2I128 $0x02, Y12, Y4, Y10 + VPERM2I128 $0x13, Y0, Y14, Y8 + VPERM2I128 $0x13, Y12, Y4, Y2 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR (SI), Y6, Y6 + VPXOR 32(SI), Y10, Y10 + VPXOR 64(SI), Y8, Y8 + VPXOR 96(SI), Y2, Y2 + VMOVDQU Y6, (DI) + VMOVDQU Y10, 32(DI) + VMOVDQU Y8, 64(DI) + VMOVDQU Y2, 96(DI) + LEAQ 128(SI), SI + LEAQ 128(DI), DI + SUBQ $0x80, BX + JMP openAVX2TailLoop + openAVX2Tail384: // Need to decrypt up to 384 bytes - prepare six blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, ctr0StoreAVX2 - VMOVDQA DD1, ctr1StoreAVX2 - VMOVDQA DD2, ctr2StoreAVX2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) // Compute the number of iterations that will hash two blocks of data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $256, itr1 - SHRQ $4, itr1 - ADDQ $6, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 - - // Perform ChaCha rounds, while hashing the remaining input + MOVQ BX, 224(BP) + MOVQ BX, CX + SUBQ $0x00000100, CX + SHRQ $0x04, CX + ADDQ $0x06, CX + MOVQ $0x0000000a, R9 + CMPQ CX, $0x0a + CMOVQGT R9, CX + MOVQ SI, BX + XORQ R9, R9 + openAVX2Tail384LoopB: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX openAVX2Tail384LoopA: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - - CMPQ itr2, itr1 - JB openAVX2Tail384LoopB - - CMPQ itr2, $10 - JNE openAVX2Tail384LoopA - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX + INCQ R9 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + CMPQ R9, CX + JB openAVX2Tail384LoopB + CMPQ R9, $0x0a + JNE openAVX2Tail384LoopA + MOVQ BX, R9 + SUBQ SI, BX + MOVQ BX, CX + MOVQ 224(BP), BX openAVX2Tail384Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail384HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail384Hash - -// Store 256 bytes safely, then go to store loop + ADDQ $0x10, CX + CMPQ CX, BX + JGT openAVX2Tail384HashEnd + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + JMP openAVX2Tail384Hash + openAVX2Tail384HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - LEAQ (8*32)(inp), inp - LEAQ (8*32)(oup), oup - SUBQ $8*32, inl + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y3 + VPERM2I128 $0x02, Y13, Y1, Y7 + VPERM2I128 $0x13, Y5, Y9, Y11 + VPERM2I128 $0x13, Y13, Y1, Y15 + VPXOR 128(SI), Y3, Y3 + VPXOR 160(SI), Y7, Y7 + VPXOR 192(SI), Y11, Y11 + VPXOR 224(SI), Y15, Y15 + VMOVDQU Y3, 128(DI) + VMOVDQU Y7, 160(DI) + VMOVDQU Y11, 192(DI) + VMOVDQU Y15, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + LEAQ 256(SI), SI + LEAQ 256(DI), DI + SUBQ $0x00000100, BX JMP openAVX2TailLoop -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext openAVX2Tail512: - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 - MOVQ inp, itr2 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + XORQ CX, CX + MOVQ SI, R9 openAVX2Tail512LoopB: - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ (2*8)(itr2), itr2 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 openAVX2Tail512LoopA: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(itr2)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(itr2)) - polyMulAVX2 - LEAQ (4*8)(itr2), itr2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - INCQ itr1 - CMPQ itr1, $4 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 16(R9), R10 + ADCQ 24(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(R9), R9 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + INCQ CX + CMPQ CX, $0x04 JLT openAVX2Tail512LoopB - - CMPQ itr1, $10 - JNE openAVX2Tail512LoopA - - MOVQ inl, itr1 - SUBQ $384, itr1 - ANDQ $-16, itr1 + CMPQ CX, $0x0a + JNE openAVX2Tail512LoopA + MOVQ BX, CX + SUBQ $0x00000180, CX + ANDQ $-16, CX openAVX2Tail512HashLoop: - TESTQ itr1, itr1 + TESTQ CX, CX JE openAVX2Tail512HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - SUBQ $16, itr1 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + SUBQ $0x10, CX JMP openAVX2Tail512HashLoop openAVX2Tail512HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - LEAQ (12*32)(inp), inp - LEAQ (12*32)(oup), oup - SUBQ $12*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Seal(dst, key, src, ad []byte) -TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 - // For aligned stack access + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + LEAQ 384(SI), SI + LEAQ 384(DI), DI + SUBQ $0x00000180, BX + JMP openAVX2TailLoop + +DATA ·chacha20Constants<>+0(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+4(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+8(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+12(SB)/4, $0x6b206574 +DATA ·chacha20Constants<>+16(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+20(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+24(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+28(SB)/4, $0x6b206574 +GLOBL ·chacha20Constants<>(SB), RODATA|NOPTR, $32 + +DATA ·polyClampMask<>+0(SB)/8, $0x0ffffffc0fffffff +DATA ·polyClampMask<>+8(SB)/8, $0x0ffffffc0ffffffc +DATA ·polyClampMask<>+16(SB)/8, $0xffffffffffffffff +DATA ·polyClampMask<>+24(SB)/8, $0xffffffffffffffff +GLOBL ·polyClampMask<>(SB), RODATA|NOPTR, $32 + +DATA ·sseIncMask<>+0(SB)/8, $0x0000000000000001 +DATA ·sseIncMask<>+8(SB)/8, $0x0000000000000000 +GLOBL ·sseIncMask<>(SB), RODATA|NOPTR, $16 + +DATA ·andMask<>+0(SB)/8, $0x00000000000000ff +DATA ·andMask<>+8(SB)/8, $0x0000000000000000 +DATA ·andMask<>+16(SB)/8, $0x000000000000ffff +DATA ·andMask<>+24(SB)/8, $0x0000000000000000 +DATA ·andMask<>+32(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+40(SB)/8, $0x0000000000000000 +DATA ·andMask<>+48(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+56(SB)/8, $0x0000000000000000 +DATA ·andMask<>+64(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+72(SB)/8, $0x0000000000000000 +DATA ·andMask<>+80(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+88(SB)/8, $0x0000000000000000 +DATA ·andMask<>+96(SB)/8, $0x00ffffffffffffff +DATA ·andMask<>+104(SB)/8, $0x0000000000000000 +DATA ·andMask<>+112(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+120(SB)/8, $0x0000000000000000 +DATA ·andMask<>+128(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+136(SB)/8, $0x00000000000000ff +DATA ·andMask<>+144(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+152(SB)/8, $0x000000000000ffff +DATA ·andMask<>+160(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+168(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+176(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+184(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+192(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+200(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+208(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+216(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+224(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+232(SB)/8, $0x00ffffffffffffff +GLOBL ·andMask<>(SB), RODATA|NOPTR, $240 + +DATA ·avx2InitMask<>+0(SB)/8, $0x0000000000000000 +DATA ·avx2InitMask<>+8(SB)/8, $0x0000000000000000 +DATA ·avx2InitMask<>+16(SB)/8, $0x0000000000000001 +DATA ·avx2InitMask<>+24(SB)/8, $0x0000000000000000 +GLOBL ·avx2InitMask<>(SB), RODATA|NOPTR, $32 + +DATA ·rol16<>+0(SB)/8, $0x0504070601000302 +DATA ·rol16<>+8(SB)/8, $0x0d0c0f0e09080b0a +DATA ·rol16<>+16(SB)/8, $0x0504070601000302 +DATA ·rol16<>+24(SB)/8, $0x0d0c0f0e09080b0a +GLOBL ·rol16<>(SB), RODATA|NOPTR, $32 + +DATA ·rol8<>+0(SB)/8, $0x0605040702010003 +DATA ·rol8<>+8(SB)/8, $0x0e0d0c0f0a09080b +DATA ·rol8<>+16(SB)/8, $0x0605040702010003 +DATA ·rol8<>+24(SB)/8, $0x0e0d0c0f0a09080b +GLOBL ·rol8<>(SB), RODATA|NOPTR, $32 + +DATA ·avx2IncMask<>+0(SB)/8, $0x0000000000000002 +DATA ·avx2IncMask<>+8(SB)/8, $0x0000000000000000 +DATA ·avx2IncMask<>+16(SB)/8, $0x0000000000000002 +DATA ·avx2IncMask<>+24(SB)/8, $0x0000000000000000 +GLOBL ·avx2IncMask<>(SB), RODATA|NOPTR, $32 + +// func chacha20Poly1305Seal(dst []byte, key []uint32, src []byte, ad []byte) +// Requires: AVX, AVX2, BMI2, CMOV, SSE2 +TEXT ·chacha20Poly1305Seal(SB), $288-96 MOVQ SP, BP - ADDQ $32, BP + ADDQ $0x20, BP ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp - - CMPB ·useAVX2(SB), $1 + MOVQ dst_base+0(FP), DI + MOVQ key_base+24(FP), R8 + MOVQ src_base+48(FP), SI + MOVQ src_len+56(FP), BX + MOVQ ad_base+72(FP), CX + CMPB ·useAVX2+0(SB), $0x01 JE chacha20Poly1305Seal_AVX2 // Special optimization, for very short buffers - CMPQ inl, $128 - JBE sealSSE128 // About 15% faster + CMPQ BX, $0x80 + JBE sealSSE128 // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store + MOVO X3, 32(BP) + MOVO X6, 48(BP) // Load state, increment counter blocks - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - MOVQ $10, itr2 + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + MOVQ $0x0000000a, R9 sealSSEIntroLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JNE sealSSEIntroLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ R9 + JNE sealSSEIntroLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore - MOVO B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVO X0, (BP) + MOVO X3, 16(BP) // Hash AAD - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) - - MOVQ $128, itr1 - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 - - CMPQ inl, $64 - JBE sealSSE128SealHash - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) - - ADDQ $64, itr1 - SUBQ $64, inl - LEAQ 64(inp), inp - - MOVQ $2, itr1 - MOVQ $8, itr2 - - CMPQ inl, $64 - JBE sealSSETail64 - CMPQ inl, $128 - JBE sealSSETail128 - CMPQ inl, $192 - JBE sealSSETail192 + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, (DI) + MOVOU X4, 16(DI) + MOVOU X7, 32(DI) + MOVOU X10, 48(DI) + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 64(DI) + MOVOU X5, 80(DI) + MOVOU X8, 96(DI) + MOVOU X11, 112(DI) + MOVQ $0x00000080, CX + SUBQ $0x80, BX + LEAQ 128(SI), SI + MOVO X12, X1 + MOVO X13, X4 + MOVO X14, X7 + MOVO X15, X10 + CMPQ BX, $0x40 + JBE sealSSE128SealHash + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X12 + PXOR X3, X13 + PXOR X6, X14 + PXOR X9, X15 + MOVOU X12, 128(DI) + MOVOU X13, 144(DI) + MOVOU X14, 160(DI) + MOVOU X15, 176(DI) + ADDQ $0x40, CX + SUBQ $0x40, BX + LEAQ 64(SI), SI + MOVQ $0x00000002, CX + MOVQ $0x00000008, R9 + CMPQ BX, $0x40 + JBE sealSSETail64 + CMPQ BX, $0x80 + JBE sealSSETail128 + CMPQ BX, $0xc0 + JBE sealSSETail192 sealSSEMainLoop: // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) sealSSEInnerLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(oup)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(oup), oup - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JGE sealSSEInnerLoop - polyAdd(0(oup)) - polyMul - LEAQ (2*8)(oup), oup - DECQ itr1 - JG sealSSEInnerLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + LEAQ 16(DI), DI + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ R9 + JGE sealSSEInnerLoop + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + DECQ CX + JG sealSSEInnerLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + MOVO X15, 64(BP) // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVO tmpStore, D3 - - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - ADDQ $192, inp - MOVQ $192, itr1 - SUBQ $192, inl - MOVO A3, A1 - MOVO B3, B1 - MOVO C3, C1 - MOVO D3, D1 - CMPQ inl, $64 + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVO 64(BP), X15 + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVOU 128(SI), X0 + MOVOU 144(SI), X3 + MOVOU 160(SI), X6 + MOVOU 176(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 128(DI) + MOVOU X5, 144(DI) + MOVOU X8, 160(DI) + MOVOU X11, 176(DI) + ADDQ $0xc0, SI + MOVQ $0x000000c0, CX + SUBQ $0xc0, BX + MOVO X12, X1 + MOVO X13, X4 + MOVO X14, X7 + MOVO X15, X10 + CMPQ BX, $0x40 JBE sealSSE128SealHash - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) - LEAQ 64(inp), inp - SUBQ $64, inl - MOVQ $6, itr1 - MOVQ $4, itr2 - CMPQ inl, $192 + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X12 + PXOR X3, X13 + PXOR X6, X14 + PXOR X9, X15 + MOVOU X12, 192(DI) + MOVOU X13, 208(DI) + MOVOU X14, 224(DI) + MOVOU X15, 240(DI) + LEAQ 64(SI), SI + SUBQ $0x40, BX + MOVQ $0x00000006, CX + MOVQ $0x00000004, R9 + CMPQ BX, $0xc0 JG sealSSEMainLoop - - MOVQ inl, itr1 - TESTQ inl, inl + MOVQ BX, CX + TESTQ BX, BX JE sealSSE128SealHash - MOVQ $6, itr1 - CMPQ inl, $64 + MOVQ $0x00000006, CX + CMPQ BX, $0x40 JBE sealSSETail64 - CMPQ inl, $128 + CMPQ BX, $0x80 JBE sealSSETail128 JMP sealSSETail192 -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of plaintext sealSSETail64: - // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A1 - MOVO state1Store, B1 - MOVO state2Store, C1 - MOVO ctr3Store, D1 - PADDL ·sseIncMask<>(SB), D1 - MOVO D1, ctr0Store + MOVO ·chacha20Constants<>+0(SB), X1 + MOVO 32(BP), X4 + MOVO 48(BP), X7 + MOVO 128(BP), X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 80(BP) sealSSETail64LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail64LoopB: - chachaQR(A1, B1, C1, D1, T1) - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A1, B1, C1, D1, T1) - shiftB1Right; shiftC1Right; shiftD1Right - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - DECQ itr1 - JG sealSSETail64LoopA - - DECQ itr2 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x0c, X13 + PSRLL $0x14, X4 + PXOR X13, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x07, X13 + PSRLL $0x19, X4 + PXOR X13, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x0c, X13 + PSRLL $0x14, X4 + PXOR X13, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x07, X13 + PSRLL $0x19, X4 + PXOR X13, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + DECQ CX + JG sealSSETail64LoopA + DECQ R9 JGE sealSSETail64LoopB - PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B1 - PADDL state2Store, C1 - PADDL ctr0Store, D1 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X4 + PADDL 48(BP), X7 + PADDL 80(BP), X10 + JMP sealSSE128Seal - JMP sealSSE128Seal - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of plaintext sealSSETail128: - // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) sealSSETail128LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail128LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - DECQ itr1 - JG sealSSETail128LoopA - - DECQ itr2 - JGE sealSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr0Store, D0; PADDL ctr1Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - - MOVQ $64, itr1 - LEAQ 64(inp), inp - SUBQ $64, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of plaintext + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + DECQ CX + JG sealSSETail128LoopA + DECQ R9 + JGE sealSSETail128LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 80(BP), X9 + PADDL 96(BP), X10 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X0 + PXOR X13, X3 + PXOR X14, X6 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVQ $0x00000040, CX + LEAQ 64(SI), SI + SUBQ $0x40, BX + JMP sealSSE128SealHash + sealSSETail192: - // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X11, 112(BP) sealSSETail192LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail192LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - DECQ itr1 - JG sealSSETail192LoopA - - DECQ itr2 - JGE sealSSETail192LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - MOVO A2, A1 - MOVO B2, B1 - MOVO C2, C1 - MOVO D2, D1 - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special seal optimization for buffers smaller than 129 bytes + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ CX + JG sealSSETail192LoopA + DECQ R9 + JGE sealSSETail192LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 32(BP), X5 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 48(BP), X8 + PADDL 80(BP), X9 + PADDL 96(BP), X10 + PADDL 112(BP), X11 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X0 + PXOR X13, X3 + PXOR X14, X6 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVOU 64(SI), X12 + MOVOU 80(SI), X13 + MOVOU 96(SI), X14 + MOVOU 112(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVO X2, X1 + MOVO X5, X4 + MOVO X8, X7 + MOVO X11, X10 + MOVQ $0x00000080, CX + LEAQ 128(SI), SI + SUBQ $0x80, BX + JMP sealSSE128SealHash + sealSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X3, X13 + MOVO X6, X14 + MOVO X10, X15 + MOVQ $0x0000000a, R9 sealSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE sealSSE128InnerCipherLoop + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ R9 + JNE sealSSE128InnerCipherLoop // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore - MOVOU B0, sStore + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL X13, X3 + PADDL X13, X4 + PADDL X13, X5 + PADDL X14, X7 + PADDL X14, X8 + PADDL X15, X10 + PADDL ·sseIncMask<>+0(SB), X15 + PADDL X15, X11 + PAND ·polyClampMask<>+0(SB), X0 + MOVOU X0, (BP) + MOVOU X3, 16(BP) // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX sealSSE128SealHash: - // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealSSE128Seal - polyAdd(0(oup)) - polyMul - - SUBQ $16, itr1 - ADDQ $16, oup - - JMP sealSSE128SealHash + CMPQ CX, $0x10 + JB sealSSE128Seal + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX + ADDQ $0x10, DI + JMP sealSSE128SealHash sealSSE128Seal: - CMPQ inl, $16 + CMPQ BX, $0x10 JB sealSSETail - SUBQ $16, inl + SUBQ $0x10, BX // Load for decryption - MOVOU (inp), T0 - PXOR T0, A1 - MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup + MOVOU (SI), X12 + PXOR X12, X1 + MOVOU X1, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI // Extract for hashing - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul + MOVQ X1, R13 + PSRLDQ $0x08, X1 + MOVQ X1, R14 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 + MOVO X4, X1 + MOVO X7, X4 + MOVO X10, X7 + MOVO X2, X10 + MOVO X5, X2 + MOVO X8, X5 + MOVO X11, X8 JMP sealSSE128Seal sealSSETail: - TESTQ inl, inl + TESTQ BX, BX JE sealSSEFinalize // We can only load the PT one byte at a time to avoid read after end of buffer - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVQ inl, itr1 - LEAQ -1(inp)(inl*1), inp - XORQ t2, t2 - XORQ t3, t3 + MOVQ BX, R9 + SHLQ $0x04, R9 + LEAQ ·andMask<>+0(SB), R13 + MOVQ BX, CX + LEAQ -1(SI)(BX*1), SI + XORQ R15, R15 + XORQ R8, R8 XORQ AX, AX sealSSETailLoadLoop: - SHLQ $8, t2, t3 - SHLQ $8, t2 - MOVB (inp), AX - XORQ AX, t2 - LEAQ -1(inp), inp - DECQ itr1 + SHLQ $0x08, R15, R8 + SHLQ $0x08, R15 + MOVB (SI), AX + XORQ AX, R15 + LEAQ -1(SI), SI + DECQ CX JNE sealSSETailLoadLoop - MOVQ t2, 0+tmpStore - MOVQ t3, 8+tmpStore - PXOR 0+tmpStore, A1 - MOVOU A1, (oup) - MOVOU -16(t0)(itr2*1), T0 - PAND T0, A1 - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - ADDQ inl, oup + MOVQ R15, 64(BP) + MOVQ R8, 72(BP) + PXOR 64(BP), X1 + MOVOU X1, (DI) + MOVOU -16(R13)(R9*1), X12 + PAND X12, X1 + MOVQ X1, R13 + PSRLDQ $0x08, X1 + MOVQ X1, R14 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ BX, DI sealSSEFinalize: // Hash in the buffer lengths - ADDQ ad_len+80(FP), acc0 - ADCQ src_len+56(FP), acc1 - ADCQ $1, acc2 - polyMul + ADDQ ad_len+80(FP), R10 + ADCQ src_len+56(FP), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 + MOVQ R10, R13 + MOVQ R11, R14 + MOVQ R12, R15 + SUBQ $-5, R10 + SBBQ $-1, R11 + SBBQ $0x03, R12 + CMOVQCS R13, R10 + CMOVQCS R14, R11 + CMOVQCS R15, R12 // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 + ADDQ 16(BP), R10 + ADCQ 24(BP), R11 // Finally store the tag at the end of the message - MOVQ acc0, (0*8)(oup) - MOVQ acc1, (1*8)(oup) + MOVQ R10, (DI) + MOVQ R11, 8(DI) RET -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- chacha20Poly1305Seal_AVX2: VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x70 + BYTE $0x10 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x20 + BYTE $0xc4 + BYTE $0xc2 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x30 + VPADDD ·avx2InitMask<>+0(SB), Y4, Y4 // Special optimizations, for very short buffers - CMPQ inl, $192 - JBE seal192AVX2 // 33% faster - CMPQ inl, $320 - JBE seal320AVX2 // 17% faster + CMPQ BX, $0x000000c0 + JBE seal192AVX2 + CMPQ BX, $0x00000140 + JBE seal320AVX2 // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 - VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA Y14, 32(BP) + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA Y12, 64(BP) + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, 96(BP) + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y1, 128(BP) + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + MOVQ $0x0000000a, R9 sealAVX2IntroLoop: - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr2 - JNE sealAVX2IntroLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - - VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 - VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key - VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y3, Y3, Y3 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ R9 + JNE sealAVX2IntroLoop + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPERM2I128 $0x02, Y0, Y14, Y4 + VPERM2I128 $0x13, Y0, Y14, Y0 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), DD0, DD0 - VMOVDQA DD0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y4, Y4 + VMOVDQA Y4, (BP) // Hash AD - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) // Can store at least 320 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), CC0, CC0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU CC0, (1*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 - VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 - VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) - - MOVQ $320, itr1 - SUBQ $320, inl - LEAQ 320(inp), inp - - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 - CMPQ inl, $128 + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y12, Y12 + VMOVDQU Y0, (DI) + VMOVDQU Y12, 32(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 64(SI), Y0, Y0 + VPXOR 96(SI), Y14, Y14 + VPXOR 128(SI), Y12, Y12 + VPXOR 160(SI), Y4, Y4 + VMOVDQU Y0, 64(DI) + VMOVDQU Y14, 96(DI) + VMOVDQU Y12, 128(DI) + VMOVDQU Y4, 160(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 192(SI), Y0, Y0 + VPXOR 224(SI), Y14, Y14 + VPXOR 256(SI), Y12, Y12 + VPXOR 288(SI), Y4, Y4 + VMOVDQU Y0, 192(DI) + VMOVDQU Y14, 224(DI) + VMOVDQU Y12, 256(DI) + VMOVDQU Y4, 288(DI) + MOVQ $0x00000140, CX + SUBQ $0x00000140, BX + LEAQ 320(SI), SI + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, Y15, Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, Y15, Y3, Y4 + CMPQ BX, $0x80 JBE sealAVX2SealHash - - VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 - VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVQ $8, itr1 - MOVQ $2, itr2 - - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - CMPQ inl, $512 - JBE sealAVX2Tail512 + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y14, Y14 + VPXOR 64(SI), Y12, Y12 + VPXOR 96(SI), Y4, Y4 + VMOVDQU Y0, 320(DI) + VMOVDQU Y14, 352(DI) + VMOVDQU Y12, 384(DI) + VMOVDQU Y4, 416(DI) + SUBQ $0x80, BX + LEAQ 128(SI), SI + MOVQ $0x00000008, CX + MOVQ $0x00000002, R9 + CMPQ BX, $0x80 + JBE sealAVX2Tail128 + CMPQ BX, $0x00000100 + JBE sealAVX2Tail256 + CMPQ BX, $0x00000180 + JBE sealAVX2Tail384 + CMPQ BX, $0x00000200 + JBE sealAVX2Tail512 // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - - SUBQ $16, oup // Adjust the pointer - MOVQ $9, itr1 - JMP sealAVX2InternalLoopStart + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y3, Y3, Y3 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + SUBQ $0x10, DI + MOVQ $0x00000009, CX + JMP sealAVX2InternalLoopStart sealAVX2MainLoop: - // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr1 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + MOVQ $0x0000000a, CX sealAVX2InternalLoop: - polyAdd(0*8(oup)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 sealAVX2InternalLoopStart: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(oup)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(oup)) - LEAQ (6*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr1 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 32(DI), R10 + ADCQ 40(DI), R11 + ADCQ $0x01, R12 + LEAQ 48(DI), DI + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ CX JNE sealAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) // and here - polyAdd(-2*8(oup)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - SUBQ $(32*16), inl - CMPQ inl, $512 + ADDQ -16(DI), R10 + ADCQ -8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + VPXOR 384(SI), Y0, Y0 + VPXOR 416(SI), Y14, Y14 + VPXOR 448(SI), Y12, Y12 + VPXOR 480(SI), Y4, Y4 + VMOVDQU Y0, 384(DI) + VMOVDQU Y14, 416(DI) + VMOVDQU Y12, 448(DI) + VMOVDQU Y4, 480(DI) + LEAQ 512(SI), SI + SUBQ $0x00000200, BX + CMPQ BX, $0x00000200 JG sealAVX2MainLoop // Tail can only hash 480 bytes - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ 32(oup), oup - - MOVQ $10, itr1 - MOVQ $0, itr2 - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - JMP sealAVX2Tail512 - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + MOVQ $0x0000000a, CX + MOVQ $0x00000000, R9 + CMPQ BX, $0x80 + JBE sealAVX2Tail128 + CMPQ BX, $0x00000100 + JBE sealAVX2Tail256 + CMPQ BX, $0x00000180 + JBE sealAVX2Tail384 + JMP sealAVX2Tail512 + seal192AVX2: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VMOVDQA Y4, Y2 + VMOVDQA Y1, Y15 + MOVQ $0x0000000a, R9 sealAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ R9 JNE sealAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y6, Y0, Y0 + VPADDD Y6, Y5, Y5 + VPADDD Y10, Y14, Y14 + VPADDD Y10, Y9, Y9 + VPADDD Y8, Y12, Y12 + VPADDD Y8, Y13, Y13 + VPADDD Y2, Y4, Y4 + VPADDD Y15, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 sealAVX2ShortSeal: // Hash aad - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX sealAVX2SealHash: // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealAVX2ShortSealLoop - polyAdd(0(oup)) - polyMul - SUBQ $16, itr1 - ADDQ $16, oup - JMP sealAVX2SealHash + CMPQ CX, $0x10 + JB sealAVX2ShortSealLoop + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX + ADDQ $0x10, DI + JMP sealAVX2SealHash sealAVX2ShortSealLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB sealAVX2ShortTail32 - SUBQ $32, inl + SUBQ $0x20, BX // Load for encryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI // Now can hash - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (1*32)(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 + VMOVDQA Y5, Y4 + VMOVDQA Y9, Y5 + VMOVDQA Y13, Y9 + VMOVDQA Y1, Y13 + VMOVDQA Y6, Y1 + VMOVDQA Y10, Y6 JMP sealAVX2ShortSealLoop sealAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB sealAVX2ShortDone - - SUBQ $16, inl + SUBQ $0x10, BX // Load for encryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI // Hash - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 sealAVX2ShortDone: VZEROUPPER JMP sealSSETail -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes seal320AVX2: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y14, Y7 + VMOVDQA Y12, Y11 + VMOVDQA Y4, Y15 + MOVQ $0x0000000a, R9 sealAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ R9 JNE sealAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 + VMOVDQA ·chacha20Constants<>+0(SB), Y3 + VPADDD Y3, Y0, Y0 + VPADDD Y3, Y5, Y5 + VPADDD Y3, Y6, Y6 + VPADDD Y7, Y14, Y14 + VPADDD Y7, Y9, Y9 + VPADDD Y7, Y10, Y10 + VPADDD Y11, Y12, Y12 + VPADDD Y11, Y13, Y13 + VPADDD Y11, Y8, Y8 + VMOVDQA ·avx2IncMask<>+0(SB), Y3 + VPADDD Y15, Y4, Y4 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y1, Y1 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y2, Y2 // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + VPERM2I128 $0x02, Y6, Y10, Y13 + VPERM2I128 $0x02, Y8, Y2, Y1 + VPERM2I128 $0x13, Y6, Y10, Y6 + VPERM2I128 $0x13, Y8, Y2, Y10 JMP sealAVX2ShortSeal -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext sealAVX2Tail128: - // Need to decrypt up to 128 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0 - VMOVDQA state1StoreAVX2, BB0 - VMOVDQA state2StoreAVX2, CC0 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VMOVDQA DD0, DD1 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA 32(BP), Y14 + VMOVDQA 64(BP), Y12 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VMOVDQA Y4, Y1 sealAVX2Tail128LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail128LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $4, DD0, DD0, DD0 - DECQ itr1 - JG sealAVX2Tail128LoopA - DECQ itr2 - JGE sealAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA1 - VPADDD state1StoreAVX2, BB0, BB1 - VPADDD state2StoreAVX2, CC0, CC1 - VPADDD DD1, DD0, DD1 - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + DECQ CX + JG sealAVX2Tail128LoopA + DECQ R9 + JGE sealAVX2Tail128LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y5 + VPADDD 32(BP), Y14, Y9 + VPADDD 64(BP), Y12, Y13 + VPADDD Y1, Y4, Y1 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 JMP sealAVX2ShortSealLoop -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext sealAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA ·chacha20Constants<>+0(SB), Y5 + VMOVDQA 32(BP), Y14 + VMOVDQA 32(BP), Y9 + VMOVDQA 64(BP), Y12 + VMOVDQA 64(BP), Y13 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 sealAVX2Tail256LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr1 - JG sealAVX2Tail256LoopA - DECQ itr2 - JGE sealAVX2Tail256LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ CX + JG sealAVX2Tail256LoopA + DECQ R9 + JGE sealAVX2Tail256LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + MOVQ $0x00000080, CX + LEAQ 128(SI), SI + SUBQ $0x80, BX + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + JMP sealAVX2SealHash + sealAVX2Tail384: - // Need to decrypt up to 384 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 + VMOVDQA Y2, Y15 sealAVX2Tail384LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail384LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr1 - JG sealAVX2Tail384LoopA - DECQ itr2 - JGE sealAVX2Tail384LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0 - VPERM2I128 $0x02, CC1, DD1, TT1 - VPERM2I128 $0x13, AA1, BB1, TT2 - VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - MOVQ $256, itr1 - LEAQ 256(inp), inp - SUBQ $256, inl - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ CX + JG sealAVX2Tail384LoopA + DECQ R9 + JGE sealAVX2Tail384LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPADDD Y15, Y2, Y2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y3 + VPERM2I128 $0x02, Y13, Y1, Y7 + VPERM2I128 $0x13, Y5, Y9, Y11 + VPERM2I128 $0x13, Y13, Y1, Y15 + VPXOR 128(SI), Y3, Y3 + VPXOR 160(SI), Y7, Y7 + VPXOR 192(SI), Y11, Y11 + VPXOR 224(SI), Y15, Y15 + VMOVDQU Y3, 128(DI) + VMOVDQU Y7, 160(DI) + VMOVDQU Y11, 192(DI) + VMOVDQU Y15, 224(DI) + MOVQ $0x00000100, CX + LEAQ 256(SI), SI + SUBQ $0x00000100, BX + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + JMP sealAVX2SealHash + sealAVX2Tail512: - // Need to decrypt up to 512 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) sealAVX2Tail512LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail512LoopB: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(oup)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - - DECQ itr1 - JG sealAVX2Tail512LoopA - DECQ itr2 - JGE sealAVX2Tail512LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3 - VPXOR (0*32)(inp), CC3, CC3 - VMOVDQU CC3, (0*32)(oup) - VPERM2I128 $0x02, CC0, DD0, CC3 - VPXOR (1*32)(inp), CC3, CC3 - VMOVDQU CC3, (1*32)(oup) - VPERM2I128 $0x13, AA0, BB0, CC3 - VPXOR (2*32)(inp), CC3, CC3 - VMOVDQU CC3, (2*32)(oup) - VPERM2I128 $0x13, CC0, DD0, CC3 - VPXOR (3*32)(inp), CC3, CC3 - VMOVDQU CC3, (3*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - - MOVQ $384, itr1 - LEAQ 384(inp), inp - SUBQ $384, inl - VPERM2I128 $0x02, AA3, BB3, AA0 - VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 - VPERM2I128 $0x13, AA3, BB3, CC0 - VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - JMP sealAVX2SealHash + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ CX + JG sealAVX2Tail512LoopA + DECQ R9 + JGE sealAVX2Tail512LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + VPERM2I128 $0x02, Y0, Y14, Y15 + VPXOR (SI), Y15, Y15 + VMOVDQU Y15, (DI) + VPERM2I128 $0x02, Y12, Y4, Y15 + VPXOR 32(SI), Y15, Y15 + VMOVDQU Y15, 32(DI) + VPERM2I128 $0x13, Y0, Y14, Y15 + VPXOR 64(SI), Y15, Y15 + VMOVDQU Y15, 64(DI) + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + MOVQ $0x00000180, CX + LEAQ 384(SI), SI + SUBQ $0x00000180, BX + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + JMP sealAVX2SealHash diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go index cda8e3edfd5..90ef6a241de 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -4,7 +4,7 @@ // Package asn1 contains supporting types for parsing and building ASN.1 // messages with the cryptobyte package. -package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" +package asn1 // Tag represents an ASN.1 identifier octet, consisting of a tag number // (indicating a type) and class (such as context-specific or constructed). diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/string.go b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/string.go index 10692a8a315..4b0f8097f9e 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/string.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -15,7 +15,7 @@ // // See the documentation and examples for the Builder and String types to get // started. -package cryptobyte // import "golang.org/x/crypto/cryptobyte" +package cryptobyte // String represents a string of bytes. It provides methods for parsing // fixed-length and length-prefixed values from it. diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/hkdf/hkdf.go b/terraform/providers/google/vendor/golang.org/x/crypto/hkdf/hkdf.go index f4ded5fee2f..3bee66294ec 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -8,7 +8,7 @@ // HKDF is a cryptographic key derivation function (KDF) with the goal of // expanding limited input keying material into one or more cryptographically // strong secret keys. -package hkdf // import "golang.org/x/crypto/hkdf" +package hkdf import ( "crypto/hmac" diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/terraform/providers/google/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s index e0d3c647566..133757384b7 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s +++ b/terraform/providers/google/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -1,108 +1,93 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run sum_amd64_asm.go -out ../sum_amd64.s -pkg poly1305. DO NOT EDIT. //go:build gc && !purego -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -// func update(state *[7]uint64, msg []byte) +// func update(state *macState, msg []byte) TEXT ·update(SB), $0-32 MOVQ state+0(FP), DI MOVQ msg_base+8(FP), SI MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 + MOVQ (DI), R8 + MOVQ 8(DI), R9 + MOVQ 16(DI), R10 + MOVQ 24(DI), R11 + MOVQ 32(DI), R12 + CMPQ R15, $0x10 JB bytes_between_0_and_15 loop: - POLY1305_ADD(SI, R8, R9, R10) + ADDQ (SI), R8 + ADCQ 8(SI), R9 + ADCQ $0x01, R10 + LEAQ 16(SI), SI multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop + MOVQ R11, AX + MULQ R8 + MOVQ AX, BX + MOVQ DX, CX + MOVQ R11, AX + MULQ R9 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ R11, R13 + IMULQ R10, R13 + ADDQ DX, R13 + MOVQ R12, AX + MULQ R8 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ DX, R8 + MOVQ R12, R14 + IMULQ R10, R14 + MOVQ R12, AX + MULQ R9 + ADDQ AX, R13 + ADCQ DX, R14 + ADDQ R8, R13 + ADCQ $0x00, R14 + MOVQ BX, R8 + MOVQ CX, R9 + MOVQ R13, R10 + ANDQ $0x03, R10 + MOVQ R13, BX + ANDQ $-4, BX + ADDQ BX, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SHRQ $0x02, R14, R13 + SHRQ $0x02, R14 + ADDQ R13, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SUBQ $0x10, R15 + CMPQ R15, $0x10 + JAE loop bytes_between_0_and_15: TESTQ R15, R15 JZ done - MOVQ $1, BX + MOVQ $0x00000001, BX XORQ CX, CX XORQ R13, R13 ADDQ R15, SI flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX + SHLQ $0x08, BX, CX + SHLQ $0x08, BX MOVB -1(SI), R13 XORQ R13, BX DECQ SI DECQ R15 JNZ flush_buffer - ADDQ BX, R8 ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 + ADCQ $0x00, R10 + MOVQ $0x00000010, R15 JMP multiply done: - MOVQ R8, 0(DI) + MOVQ R8, (DI) MOVQ R9, 8(DI) MOVQ R10, 16(DI) RET diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/sha3/doc.go b/terraform/providers/google/vendor/golang.org/x/crypto/sha3/doc.go index decd8cf9bf7..7e023090707 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/sha3/doc.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/sha3/doc.go @@ -59,4 +59,4 @@ // They produce output of the same length, with the same security strengths // against all attacks. This means, in particular, that SHA3-256 only has // 128-bit collision resistance, because its output length is 32 bytes. -package sha3 // import "golang.org/x/crypto/sha3" +package sha3 diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/sha3/hashes.go b/terraform/providers/google/vendor/golang.org/x/crypto/sha3/hashes.go index 5eae6cb922f..c544b29e5f2 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/sha3/hashes.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/sha3/hashes.go @@ -9,6 +9,7 @@ package sha3 // bytes. import ( + "crypto" "hash" ) @@ -40,6 +41,13 @@ func New512() hash.Hash { return new512() } +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} + func new224Generic() *state { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} } diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/terraform/providers/google/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s index 1f539388619..99e2f16e971 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ b/terraform/providers/google/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -1,390 +1,5419 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run keccakf_amd64_asm.go -out ../keccakf_amd64.s -pkg sha3. DO NOT EDIT. //go:build amd64 && !purego && gc -// This code was translated into a form compatible with 6a from the public -// domain sources at https://github.com/gvanas/KeccakCodePackage - -// Offsets in state -#define _ba (0*8) -#define _be (1*8) -#define _bi (2*8) -#define _bo (3*8) -#define _bu (4*8) -#define _ga (5*8) -#define _ge (6*8) -#define _gi (7*8) -#define _go (8*8) -#define _gu (9*8) -#define _ka (10*8) -#define _ke (11*8) -#define _ki (12*8) -#define _ko (13*8) -#define _ku (14*8) -#define _ma (15*8) -#define _me (16*8) -#define _mi (17*8) -#define _mo (18*8) -#define _mu (19*8) -#define _sa (20*8) -#define _se (21*8) -#define _si (22*8) -#define _so (23*8) -#define _su (24*8) - -// Temporary registers -#define rT1 AX - -// Round vars -#define rpState DI -#define rpStack SP - -#define rDa BX -#define rDe CX -#define rDi DX -#define rDo R8 -#define rDu R9 - -#define rBa R10 -#define rBe R11 -#define rBi R12 -#define rBo R13 -#define rBu R14 - -#define rCa SI -#define rCe BP -#define rCi rBi -#define rCo rBo -#define rCu R15 - -#define MOVQ_RBI_RCE MOVQ rBi, rCe -#define XORQ_RT1_RCA XORQ rT1, rCa -#define XORQ_RT1_RCE XORQ rT1, rCe -#define XORQ_RBA_RCU XORQ rBa, rCu -#define XORQ_RBE_RCU XORQ rBe, rCu -#define XORQ_RDU_RCU XORQ rDu, rCu -#define XORQ_RDA_RCA XORQ rDa, rCa -#define XORQ_RDE_RCE XORQ rDe, rCe - -#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \ - /* Prepare round */ \ - MOVQ rCe, rDa; \ - ROLQ $1, rDa; \ - \ - MOVQ _bi(iState), rCi; \ - XORQ _gi(iState), rDi; \ - XORQ rCu, rDa; \ - XORQ _ki(iState), rCi; \ - XORQ _mi(iState), rDi; \ - XORQ rDi, rCi; \ - \ - MOVQ rCi, rDe; \ - ROLQ $1, rDe; \ - \ - MOVQ _bo(iState), rCo; \ - XORQ _go(iState), rDo; \ - XORQ rCa, rDe; \ - XORQ _ko(iState), rCo; \ - XORQ _mo(iState), rDo; \ - XORQ rDo, rCo; \ - \ - MOVQ rCo, rDi; \ - ROLQ $1, rDi; \ - \ - MOVQ rCu, rDo; \ - XORQ rCe, rDi; \ - ROLQ $1, rDo; \ - \ - MOVQ rCa, rDu; \ - XORQ rCi, rDo; \ - ROLQ $1, rDu; \ - \ - /* Result b */ \ - MOVQ _ba(iState), rBa; \ - MOVQ _ge(iState), rBe; \ - XORQ rCo, rDu; \ - MOVQ _ki(iState), rBi; \ - MOVQ _mo(iState), rBo; \ - MOVQ _su(iState), rBu; \ - XORQ rDe, rBe; \ - ROLQ $44, rBe; \ - XORQ rDi, rBi; \ - XORQ rDa, rBa; \ - ROLQ $43, rBi; \ - \ - MOVQ rBe, rCa; \ - MOVQ rc, rT1; \ - ORQ rBi, rCa; \ - XORQ rBa, rT1; \ - XORQ rT1, rCa; \ - MOVQ rCa, _ba(oState); \ - \ - XORQ rDu, rBu; \ - ROLQ $14, rBu; \ - MOVQ rBa, rCu; \ - ANDQ rBe, rCu; \ - XORQ rBu, rCu; \ - MOVQ rCu, _bu(oState); \ - \ - XORQ rDo, rBo; \ - ROLQ $21, rBo; \ - MOVQ rBo, rT1; \ - ANDQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _bi(oState); \ - \ - NOTQ rBi; \ - ORQ rBa, rBu; \ - ORQ rBo, rBi; \ - XORQ rBo, rBu; \ - XORQ rBe, rBi; \ - MOVQ rBu, _bo(oState); \ - MOVQ rBi, _be(oState); \ - B_RBI_RCE; \ - \ - /* Result g */ \ - MOVQ _gu(iState), rBe; \ - XORQ rDu, rBe; \ - MOVQ _ka(iState), rBi; \ - ROLQ $20, rBe; \ - XORQ rDa, rBi; \ - ROLQ $3, rBi; \ - MOVQ _bo(iState), rBa; \ - MOVQ rBe, rT1; \ - ORQ rBi, rT1; \ - XORQ rDo, rBa; \ - MOVQ _me(iState), rBo; \ - MOVQ _si(iState), rBu; \ - ROLQ $28, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ga(oState); \ - G_RT1_RCA; \ - \ - XORQ rDe, rBo; \ - ROLQ $45, rBo; \ - MOVQ rBi, rT1; \ - ANDQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _ge(oState); \ - G_RT1_RCE; \ - \ - XORQ rDi, rBu; \ - ROLQ $61, rBu; \ - MOVQ rBu, rT1; \ - ORQ rBa, rT1; \ - XORQ rBo, rT1; \ - MOVQ rT1, _go(oState); \ - \ - ANDQ rBe, rBa; \ - XORQ rBu, rBa; \ - MOVQ rBa, _gu(oState); \ - NOTQ rBu; \ - G_RBA_RCU; \ - \ - ORQ rBu, rBo; \ - XORQ rBi, rBo; \ - MOVQ rBo, _gi(oState); \ - \ - /* Result k */ \ - MOVQ _be(iState), rBa; \ - MOVQ _gi(iState), rBe; \ - MOVQ _ko(iState), rBi; \ - MOVQ _mu(iState), rBo; \ - MOVQ _sa(iState), rBu; \ - XORQ rDi, rBe; \ - ROLQ $6, rBe; \ - XORQ rDo, rBi; \ - ROLQ $25, rBi; \ - MOVQ rBe, rT1; \ - ORQ rBi, rT1; \ - XORQ rDe, rBa; \ - ROLQ $1, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ka(oState); \ - K_RT1_RCA; \ - \ - XORQ rDu, rBo; \ - ROLQ $8, rBo; \ - MOVQ rBi, rT1; \ - ANDQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _ke(oState); \ - K_RT1_RCE; \ - \ - XORQ rDa, rBu; \ - ROLQ $18, rBu; \ - NOTQ rBo; \ - MOVQ rBo, rT1; \ - ANDQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _ki(oState); \ - \ - MOVQ rBu, rT1; \ - ORQ rBa, rT1; \ - XORQ rBo, rT1; \ - MOVQ rT1, _ko(oState); \ - \ - ANDQ rBe, rBa; \ - XORQ rBu, rBa; \ - MOVQ rBa, _ku(oState); \ - K_RBA_RCU; \ - \ - /* Result m */ \ - MOVQ _ga(iState), rBe; \ - XORQ rDa, rBe; \ - MOVQ _ke(iState), rBi; \ - ROLQ $36, rBe; \ - XORQ rDe, rBi; \ - MOVQ _bu(iState), rBa; \ - ROLQ $10, rBi; \ - MOVQ rBe, rT1; \ - MOVQ _mi(iState), rBo; \ - ANDQ rBi, rT1; \ - XORQ rDu, rBa; \ - MOVQ _so(iState), rBu; \ - ROLQ $27, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ma(oState); \ - M_RT1_RCA; \ - \ - XORQ rDi, rBo; \ - ROLQ $15, rBo; \ - MOVQ rBi, rT1; \ - ORQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _me(oState); \ - M_RT1_RCE; \ - \ - XORQ rDo, rBu; \ - ROLQ $56, rBu; \ - NOTQ rBo; \ - MOVQ rBo, rT1; \ - ORQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _mi(oState); \ - \ - ORQ rBa, rBe; \ - XORQ rBu, rBe; \ - MOVQ rBe, _mu(oState); \ - \ - ANDQ rBa, rBu; \ - XORQ rBo, rBu; \ - MOVQ rBu, _mo(oState); \ - M_RBE_RCU; \ - \ - /* Result s */ \ - MOVQ _bi(iState), rBa; \ - MOVQ _go(iState), rBe; \ - MOVQ _ku(iState), rBi; \ - XORQ rDi, rBa; \ - MOVQ _ma(iState), rBo; \ - ROLQ $62, rBa; \ - XORQ rDo, rBe; \ - MOVQ _se(iState), rBu; \ - ROLQ $55, rBe; \ - \ - XORQ rDu, rBi; \ - MOVQ rBa, rDu; \ - XORQ rDe, rBu; \ - ROLQ $2, rBu; \ - ANDQ rBe, rDu; \ - XORQ rBu, rDu; \ - MOVQ rDu, _su(oState); \ - \ - ROLQ $39, rBi; \ - S_RDU_RCU; \ - NOTQ rBe; \ - XORQ rDa, rBo; \ - MOVQ rBe, rDa; \ - ANDQ rBi, rDa; \ - XORQ rBa, rDa; \ - MOVQ rDa, _sa(oState); \ - S_RDA_RCA; \ - \ - ROLQ $41, rBo; \ - MOVQ rBi, rDe; \ - ORQ rBo, rDe; \ - XORQ rBe, rDe; \ - MOVQ rDe, _se(oState); \ - S_RDE_RCE; \ - \ - MOVQ rBo, rDi; \ - MOVQ rBu, rDo; \ - ANDQ rBu, rDi; \ - ORQ rBa, rDo; \ - XORQ rBi, rDi; \ - XORQ rBo, rDo; \ - MOVQ rDi, _si(oState); \ - MOVQ rDo, _so(oState) \ - // func keccakF1600(a *[25]uint64) -TEXT ·keccakF1600(SB), 0, $200-8 - MOVQ a+0(FP), rpState +TEXT ·keccakF1600(SB), $200-8 + MOVQ a+0(FP), DI // Convert the user state into an internal state - NOTQ _be(rpState) - NOTQ _bi(rpState) - NOTQ _go(rpState) - NOTQ _ki(rpState) - NOTQ _mi(rpState) - NOTQ _sa(rpState) + NOTQ 8(DI) + NOTQ 16(DI) + NOTQ 64(DI) + NOTQ 96(DI) + NOTQ 136(DI) + NOTQ 160(DI) // Execute the KeccakF permutation - MOVQ _ba(rpState), rCa - MOVQ _be(rpState), rCe - MOVQ _bu(rpState), rCu - - XORQ _ga(rpState), rCa - XORQ _ge(rpState), rCe - XORQ _gu(rpState), rCu - - XORQ _ka(rpState), rCa - XORQ _ke(rpState), rCe - XORQ _ku(rpState), rCu - - XORQ _ma(rpState), rCa - XORQ _me(rpState), rCe - XORQ _mu(rpState), rCu - - XORQ _sa(rpState), rCa - XORQ _se(rpState), rCe - MOVQ _si(rpState), rDi - MOVQ _so(rpState), rDo - XORQ _su(rpState), rCu - - mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP) + MOVQ (DI), SI + MOVQ 8(DI), BP + MOVQ 32(DI), R15 + XORQ 40(DI), SI + XORQ 48(DI), BP + XORQ 72(DI), R15 + XORQ 80(DI), SI + XORQ 88(DI), BP + XORQ 112(DI), R15 + XORQ 120(DI), SI + XORQ 128(DI), BP + XORQ 152(DI), R15 + XORQ 160(DI), SI + XORQ 168(DI), BP + MOVQ 176(DI), DX + MOVQ 184(DI), R8 + XORQ 192(DI), R15 - // Revert the internal state to the user state - NOTQ _be(rpState) - NOTQ _bi(rpState) - NOTQ _go(rpState) - NOTQ _ki(rpState) - NOTQ _mi(rpState) - NOTQ _sa(rpState) + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000008082, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000000000808a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008000, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000808b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008081, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008009, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000008a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000000088, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080008009, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000008000000a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000008000808b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000000000008b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008089, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008003, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008002, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000000080, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000800a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000008000000a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008081, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008080, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008008, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + NOP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + NOP + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + NOP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + NOP + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + NOP + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + NOP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + NOP + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + NOP + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + NOP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + NOP + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + NOP + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + NOP + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + NOP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Revert the internal state to the user state + NOTQ 8(DI) + NOTQ 16(DI) + NOTQ 64(DI) + NOTQ 96(DI) + NOTQ 136(DI) + NOTQ 160(DI) RET diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/sha3/register.go b/terraform/providers/google/vendor/golang.org/x/crypto/sha3/register.go deleted file mode 100644 index addfd5049bb..00000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/sha3/register.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.4 - -package sha3 - -import ( - "crypto" -) - -func init() { - crypto.RegisterHash(crypto.SHA3_224, New224) - crypto.RegisterHash(crypto.SHA3_256, New256) - crypto.RegisterHash(crypto.SHA3_384, New384) - crypto.RegisterHash(crypto.SHA3_512, New512) -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/sha3/shake.go b/terraform/providers/google/vendor/golang.org/x/crypto/sha3/shake.go index 1ea9275b8b7..a01ef43577d 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/sha3/shake.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/sha3/shake.go @@ -85,9 +85,9 @@ func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { // leftEncode returns max 9 bytes c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) c.initBlock = append(c.initBlock, N...) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) c.initBlock = append(c.initBlock, S...) c.Write(bytepad(c.initBlock, c.rate)) return &c diff --git a/terraform/providers/google/vendor/golang.org/x/exp/constraints/constraints.go b/terraform/providers/google/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 00000000000..2c033dff47e --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/terraform/providers/google/vendor/golang.org/x/net/LICENSE b/terraform/providers/google/vendor/golang.org/x/net/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/LICENSE +++ b/terraform/providers/google/vendor/golang.org/x/net/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/config.go b/terraform/providers/google/vendor/golang.org/x/net/http2/config.go new file mode 100644 index 00000000000..de58dfb8dc4 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/config.go @@ -0,0 +1,122 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "math" + "net/http" + "time" +) + +// http2Config is a package-internal version of net/http.HTTP2Config. +// +// http.HTTP2Config was added in Go 1.24. +// When running with a version of net/http that includes HTTP2Config, +// we merge the configuration with the fields in Transport or Server +// to produce an http2Config. +// +// Zero valued fields in http2Config are interpreted as in the +// net/http.HTTPConfig documentation. +// +// Precedence order for reconciling configurations is: +// +// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero. +// - Otherwise use the http2.{Server.Transport} value. +// - If the resulting value is zero or out of range, use a default. +type http2Config struct { + MaxConcurrentStreams uint32 + MaxDecoderHeaderTableSize uint32 + MaxEncoderHeaderTableSize uint32 + MaxReadFrameSize uint32 + MaxUploadBufferPerConnection int32 + MaxUploadBufferPerStream int32 + SendPingTimeout time.Duration + PingTimeout time.Duration + WriteByteTimeout time.Duration + PermitProhibitedCipherSuites bool + CountError func(errType string) +} + +// configFromServer merges configuration settings from +// net/http.Server.HTTP2Config and http2.Server. +func configFromServer(h1 *http.Server, h2 *Server) http2Config { + conf := http2Config{ + MaxConcurrentStreams: h2.MaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection, + MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, + CountError: h2.CountError, + } + fillNetHTTPServerConfig(&conf, h1) + setConfigDefaults(&conf, true) + return conf +} + +// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// (the net/http Transport). +func configFromTransport(h2 *Transport) http2Config { + conf := http2Config{ + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + } + + // Unlike most config fields, where out-of-range values revert to the default, + // Transport.MaxReadFrameSize clips. + if conf.MaxReadFrameSize < minMaxFrameSize { + conf.MaxReadFrameSize = minMaxFrameSize + } else if conf.MaxReadFrameSize > maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/config_go124.go b/terraform/providers/google/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 00000000000..e3784123c81 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/config_pre_go124.go b/terraform/providers/google/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 00000000000..060fd6c64c6 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/http2.go b/terraform/providers/google/vendor/golang.org/x/net/http2/http2.go index 003e649f30c..7688c356b7c 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/http2/http2.go +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/server.go b/terraform/providers/google/vendor/golang.org/x/net/http2/server.go index 6c349f3ec64..617b4a47623 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/http2/server.go +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +584,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +604,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +615,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -937,18 +915,18 @@ func (sc *serverConn) serve() { sc.writeFrame(FrameWriteRequest{ write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxFrameSize, conf.MaxReadFrameSize}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, }, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +946,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +971,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1083,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/transport.go b/terraform/providers/google/vendor/golang.org/x/net/http2/transport.go index 98a49c6b6ee..0c5f64aa8be 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/http2/transport.go +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -370,11 +355,14 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - } + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,30 +775,25 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -838,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -852,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -871,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -2203,7 +2164,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2349,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/write.go b/terraform/providers/google/vendor/golang.org/x/net/http2/write.go index 33f61398a12..6ff6bee7e95 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/http2/write.go +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/terraform/providers/google/vendor/golang.org/x/oauth2/LICENSE b/terraform/providers/google/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/terraform/providers/google/vendor/golang.org/x/oauth2/LICENSE +++ b/terraform/providers/google/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/terraform/providers/google/vendor/golang.org/x/oauth2/token.go b/terraform/providers/google/vendor/golang.org/x/oauth2/token.go index 5bbb3321748..109997d77ce 100644 --- a/terraform/providers/google/vendor/golang.org/x/oauth2/token.go +++ b/terraform/providers/google/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/terraform/providers/google/vendor/golang.org/x/sync/LICENSE b/terraform/providers/google/vendor/golang.org/x/sync/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/terraform/providers/google/vendor/golang.org/x/sync/LICENSE +++ b/terraform/providers/google/vendor/golang.org/x/sync/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/terraform/providers/google/vendor/golang.org/x/sync/errgroup/errgroup.go b/terraform/providers/google/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 00000000000..948a3ee63d4 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,135 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. +package errgroup + +import ( + "context" + "fmt" + "sync" +) + +type token struct{} + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. +type Group struct { + cancel func(error) + + wg sync.WaitGroup + + sem chan token + + errOnce sync.Once + err error +} + +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := withCancelCause(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel(g.err) + } + return g.err +} + +// Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. +// +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. +func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) +} diff --git a/terraform/providers/google/vendor/golang.org/x/sync/errgroup/go120.go b/terraform/providers/google/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 00000000000..f93c740b638 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/terraform/providers/google/vendor/golang.org/x/sync/errgroup/pre_go120.go b/terraform/providers/google/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 00000000000..88ce33434e2 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/LICENSE b/terraform/providers/google/vendor/golang.org/x/sys/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/LICENSE +++ b/terraform/providers/google/vendor/golang.org/x/sys/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu.go b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu.go index 8fa707aa4ba..02609d5b21d 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu.go @@ -105,6 +105,8 @@ var ARM64 struct { HasSVE bool // Scalable Vector Extensions HasSVE2 bool // Scalable Vector Extensions 2 HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + HasDIT bool // Data Independent Timing support + HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions _ CacheLinePad } @@ -199,6 +201,25 @@ var S390X struct { _ CacheLinePad } +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + _ CacheLinePad +} + func init() { archInit() initOptions() diff --git a/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 0e27a21e1f8..af2aa99f9f0 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -38,6 +38,8 @@ func initOptions() { {Name: "dcpop", Feature: &ARM64.HasDCPOP}, {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + {Name: "dit", Feature: &ARM64.HasDIT}, + {Name: "i8mm", Feature: &ARM64.HasI8MM}, } } @@ -145,6 +147,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { ARM64.HasLRCPC = true } + switch extractBits(isar1, 52, 55) { + case 1: + ARM64.HasI8MM = true + } + // ID_AA64PFR0_EL1 switch extractBits(pfr0, 16, 19) { case 0: @@ -168,6 +175,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { parseARM64SVERegister(getzfr0()) } + + switch extractBits(pfr0, 48, 51) { + case 1: + ARM64.HasDIT = true + } } func parseARM64SVERegister(zfr0 uint64) { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 3d386d0fc21..08f35ea1773 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -35,8 +35,10 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 hwcap2_SVE2 = 1 << 1 + hwcap2_I8MM = 1 << 13 ) // linuxKernelCanEmulateCPUID reports whether we're running @@ -106,9 +108,12 @@ func doinit() { ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) ARM64.HasSVE = isSet(hwCap, hwcap_SVE) ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + ARM64.HasDIT = isSet(hwCap, hwcap_DIT) + // HWCAP2 feature bits ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) + ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) } func isSet(hwc uint, value uint) bool { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index cd63e733557..7d902b6847b 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 package cpu diff --git a/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 00000000000..cb4a0c57280 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,137 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index 7f0c79c004b..aca3199c911 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -8,4 +8,13 @@ package cpu const cacheLineSize = 64 -func initOptions() {} +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/README.md b/terraform/providers/google/vendor/golang.org/x/sys/unix/README.md index 7d3c060e122..6e08a76a716 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/README.md +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/mkerrors.sh b/terraform/providers/google/vendor/golang.org/x/sys/unix/mkerrors.sh index 4ed2e488b61..ac54ecaba0a 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -58,6 +58,7 @@ includes_Darwin=' #define _DARWIN_USE_64_BIT_INODE #define __APPLE_USE_RFC_3542 #include +#include #include #include #include @@ -551,6 +552,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -654,7 +656,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -664,7 +666,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/mremap.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/mremap.go index fd45fe529da..3a5e776f895 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/mremap.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef2d5..6f15ba1eaff 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_darwin.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a897d2..099867deede 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } +//sys renamexNp(from string, to string, flag uint32) (err error) + +func RenamexNp(from string, to string, flag uint32) (err error) { + return renamexNp(from, to, flag) +} + +//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) + +func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + return renameatxNp(fromfd, from, tofd, to, flag) +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { @@ -542,6 +554,55 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_hurd.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f8e3..a6a2d2fc2b9 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux.go index 5682e2628ad..f08abd434ff 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) @@ -2592,3 +2653,4 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) +//sys Mseal(b []byte, flags uint) (err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c75ef..745e5c7e6c0 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e98451f8..dd2262a4079 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a288944d..8cf3670bda6 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_openbsd.go index b25343c71a4..b86ded549c6 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -293,6 +293,7 @@ func Uname(uname *Utsname) error { //sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_unix.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8c7d..4e92e5aa406 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 00000000000..07ac8e09d1b --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go new file mode 100644 index 00000000000..297e97bce92 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux || !go1.24 + +package unix + +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e40fa85245f..d73c4652e6c 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index bb02aa6c056..4a55a400588 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux.go index 877a62b479a..de3b462489c 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -457,6 +457,7 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd + BCACHEFS_SUPER_MAGIC = 0xca451a4e BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -494,6 +495,7 @@ const ( BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -928,6 +930,7 @@ const ( EPOLL_CTL_ADD = 0x1 EPOLL_CTL_DEL = 0x2 EPOLL_CTL_MOD = 0x3 + EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 ESP_V4_FLOW = 0xa ESP_V6_FLOW = 0xc @@ -941,9 +944,6 @@ const ( ETHTOOL_FEC_OFF = 0x4 ETHTOOL_FEC_RS = 0x8 ETHTOOL_FLAG_ALL = 0x7 - ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 - ETHTOOL_FLAG_OMIT_REPLY = 0x2 - ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_FLASHDEV = 0x33 ETHTOOL_FLASH_MAX_FILENAME = 0x80 ETHTOOL_FWVERS_LEN = 0x20 @@ -1705,6 +1705,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_CRASH_HOTPLUG_SUPPORT = 0x8 KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 @@ -1780,6 +1781,7 @@ const ( KEY_SPEC_USER_KEYRING = -0x4 KEY_SPEC_USER_SESSION_KEYRING = -0x5 LANDLOCK_ACCESS_FS_EXECUTE = 0x1 + LANDLOCK_ACCESS_FS_IOCTL_DEV = 0x8000 LANDLOCK_ACCESS_FS_MAKE_BLOCK = 0x800 LANDLOCK_ACCESS_FS_MAKE_CHAR = 0x40 LANDLOCK_ACCESS_FS_MAKE_DIR = 0x80 @@ -1861,6 +1863,19 @@ const ( MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 + MAP_HUGE_16GB = 0x88000000 + MAP_HUGE_16KB = 0x38000000 + MAP_HUGE_16MB = 0x60000000 + MAP_HUGE_1GB = 0x78000000 + MAP_HUGE_1MB = 0x50000000 + MAP_HUGE_256MB = 0x70000000 + MAP_HUGE_2GB = 0x7c000000 + MAP_HUGE_2MB = 0x54000000 + MAP_HUGE_32MB = 0x64000000 + MAP_HUGE_512KB = 0x4c000000 + MAP_HUGE_512MB = 0x74000000 + MAP_HUGE_64KB = 0x40000000 + MAP_HUGE_8MB = 0x5c000000 MAP_HUGE_MASK = 0x3f MAP_HUGE_SHIFT = 0x1a MAP_PRIVATE = 0x2 @@ -1908,6 +1923,7 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2173,7 +2189,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2342,9 +2358,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2417,6 +2435,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2498,6 +2517,23 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PPC_DEXCR_CTRL_CLEAR = 0x4 + PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 + PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 + PR_PPC_DEXCR_CTRL_MASK = 0x1f + PR_PPC_DEXCR_CTRL_SET = 0x2 + PR_PPC_DEXCR_CTRL_SET_ONEXEC = 0x8 + PR_PPC_DEXCR_IBRTPD = 0x1 + PR_PPC_DEXCR_NPHIE = 0x3 + PR_PPC_DEXCR_SBHE = 0x0 + PR_PPC_DEXCR_SRAPD = 0x2 + PR_PPC_GET_DEXCR = 0x48 + PR_PPC_SET_DEXCR = 0x49 + PR_RISCV_CTX_SW_FENCEI_OFF = 0x1 + PR_RISCV_CTX_SW_FENCEI_ON = 0x0 + PR_RISCV_SCOPE_PER_PROCESS = 0x0 + PR_RISCV_SCOPE_PER_THREAD = 0x1 + PR_RISCV_SET_ICACHE_FLUSH_CTX = 0x47 PR_RISCV_V_GET_CONTROL = 0x46 PR_RISCV_V_SET_CONTROL = 0x45 PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 @@ -2902,11 +2938,12 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3179,6 +3216,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3192,8 +3230,10 @@ const ( STATX_MTIME = 0x40 STATX_NLINK = 0x4 STATX_SIZE = 0x200 + STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3592,6 +3632,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index e4bc0bd57c7..8aa6d77c018 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -151,9 +153,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 689317afdbf..da428f42533 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -151,9 +153,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 5cca668ac30..bf45bfec78a 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 14270508b04..71c67162b73 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 ESR_MAGIC = 0x45535201 EXTPROC = 0x10000 @@ -152,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 28e39afdcb4..9476628fa02 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -152,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index cd66e92cb42..b9e85f3cf0c 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c1595eba78e..a48b68a7647 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ee9456b0da7..ea00e8522a1 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8cfca81e1b5..91c64687176 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 60b0deb3af7..8cbf38d6390 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -150,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f90aa7281bf..a2df7341917 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -150,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ba9e0150338..24791379233 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -150,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 07cdfd6e9fd..d265f146ee0 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 2f1dd214a74..3f2d6443964 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f40519d9018..5d8b727a1c8 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -82,6 +82,8 @@ const ( EFD_CLOEXEC = 0x400000 EFD_NONBLOCK = 0x4000 EMT_TAGOVF = 0x1 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x400000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -153,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab3d9..1ec2b1407b1 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f240a4..24b346e1a35 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb284028..ebd213100b3 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997b52..824b9c2d5e0 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1ab74..4f178a22934 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 87d8612a1dc..af30da55780 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -971,23 +971,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -2229,3 +2212,19 @@ func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mseal(b []byte, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSEAL, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9dc42410b78..1851df14e87 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 41b5617316c..0b43c693656 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d3a0751cd4..e1ec0dbe4ec 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 4019a656f6d..880c6d6e316 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index c39f7776db3..7c8452a63e9 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index ac4af24f908..b8ef95b0fa1 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 57571d072fe..2ffdf861f75 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index f77d532121b..2af3b5c762f 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index e62963e67e2..1da08d52675 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index fae140b62c9..b7a251353b0 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 00831354c82..6e85b0aac95 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 9d1e0ff06d0..f15dadf0552 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -555,6 +555,12 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mount(SB) + RET +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_nanosleep(SB) RET diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 79029ed5848..28b487df251 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index da115f9a4b6..1e7f321e436 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 53aef5dc58d..524b0820cbc 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -457,4 +457,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 71d524763d3..f485dbf4565 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 @@ -379,4 +380,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c747706131c..70b35bf3b09 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -421,4 +421,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index f96e214f6d4..1893e2fe884 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -324,4 +324,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 28425346cf1..16a4017da0a 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 @@ -318,4 +320,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index d0953018dae..7e567f1efff 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 295c7f4b818..38ae55e5ef8 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d1a9eaca7a4..55e92e60a82 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index bec157c39fd..60658d6a021 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 7ee7bdc435c..e203e8a7ed4 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -448,4 +448,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index fad1f25b449..5944b97d546 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 7d3e16357d6..c66d416dad1 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 0ed53ad9f7e..a5459e766f5 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -325,4 +325,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 2fba04ad500..01d86825bb9 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -386,4 +386,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 621d00d741b..7b703e77cda 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -399,4 +399,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f3a5..d003c3d4378 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef74d0..0d45a941aae 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a3aa..51e13eb055f 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee77f..d002d8ef3cc 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee8b1..3f863d898dd 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12acfd9..61c72931066 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c79006..b5d17414f03 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux.go index 4740b834854..3a69e454962 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,30 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - _ [12]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -515,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -556,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -2485,7 +2514,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -3473,7 +3502,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x6 ) type FsverityDigest struct { @@ -3765,7 +3794,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2c ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3805,7 +3834,10 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 + ETHTOOL_FLAG_OMIT_REPLY = 0x2 + ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3947,7 +3979,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -3975,7 +4007,7 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x5 + ETHTOOL_A_TSINFO_MAX = 0x6 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4605,7 +4637,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5209,7 +5241,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc04142f..ad05b51a603 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/dll_windows.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fba66..4e613cf6335 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/dll_windows.go @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/key.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 00000000000..fd8632444ec --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 00000000000..bbf86ccf0c0 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/syscall.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 00000000000..f533091c19e --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/value.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 00000000000..74db26b94df --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 00000000000..fc1835d8a23 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/security_windows.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/security_windows.go index 6f7d2ac70a9..b6e1ab76f82 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/windows/security_windows.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/security_windows.go @@ -894,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1087,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1158,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/syscall_windows.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/syscall_windows.go index 6525c62f3c2..5cee9a3143f 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -17,8 +17,10 @@ import ( "unsafe" ) -type Handle uintptr -type HWND uintptr +type ( + Handle uintptr + HWND uintptr +) const ( InvalidHandle = ^Handle(0) @@ -211,6 +213,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId +//sys LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) [failretval==0] = user32.LoadKeyboardLayoutW +//sys UnloadKeyboardLayout(hkl Handle) (err error) = user32.UnloadKeyboardLayout +//sys GetKeyboardLayout(tid uint32) (hkl Handle) = user32.GetKeyboardLayout +//sys ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) = user32.ToUnicodeEx //sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow //sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW //sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx @@ -307,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole @@ -1368,9 +1378,11 @@ func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) } + func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) } + func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/types_windows.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/types_windows.go index d8cb71db0a6..7b97a154c95 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/windows/types_windows.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 @@ -2003,7 +2004,21 @@ const ( MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 ) -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 +// Flags for GetAdaptersAddresses, see +// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses. +const ( + GAA_FLAG_SKIP_UNICAST = 0x1 + GAA_FLAG_SKIP_ANYCAST = 0x2 + GAA_FLAG_SKIP_MULTICAST = 0x4 + GAA_FLAG_SKIP_DNS_SERVER = 0x8 + GAA_FLAG_INCLUDE_PREFIX = 0x10 + GAA_FLAG_SKIP_FRIENDLY_NAME = 0x20 + GAA_FLAG_INCLUDE_WINS_INFO = 0x40 + GAA_FLAG_INCLUDE_GATEWAYS = 0x80 + GAA_FLAG_INCLUDE_ALL_INTERFACES = 0x100 + GAA_FLAG_INCLUDE_ALL_COMPARTMENTS = 0x200 + GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER = 0x400 +) const ( IF_TYPE_OTHER = 1 @@ -2017,6 +2032,50 @@ const ( IF_TYPE_IEEE1394 = 144 ) +// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin +const ( + IpPrefixOriginOther = 0 + IpPrefixOriginManual = 1 + IpPrefixOriginWellKnown = 2 + IpPrefixOriginDhcp = 3 + IpPrefixOriginRouterAdvertisement = 4 + IpPrefixOriginUnchanged = 1 << 4 +) + +// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin +const ( + NlsoOther = 0 + NlsoManual = 1 + NlsoWellKnown = 2 + NlsoDhcp = 3 + NlsoLinkLayerAddress = 4 + NlsoRandom = 5 + IpSuffixOriginOther = 0 + IpSuffixOriginManual = 1 + IpSuffixOriginWellKnown = 2 + IpSuffixOriginDhcp = 3 + IpSuffixOriginLinkLayerAddress = 4 + IpSuffixOriginRandom = 5 + IpSuffixOriginUnchanged = 1 << 4 +) + +// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state +const ( + NldsInvalid = 0 + NldsTentative = 1 + NldsDuplicate = 2 + NldsDeprecated = 3 + NldsPreferred = 4 + IpDadStateInvalid = 0 + IpDadStateTentative = 1 + IpDadStateDuplicate = 2 + IpDadStateDeprecated = 3 + IpDadStatePreferred = 4 +) + type SocketAddress struct { Sockaddr *syscall.RawSockaddrAny SockaddrLength int32 @@ -3404,3 +3463,14 @@ type DCB struct { EvtChar byte wReserved1 uint16 } + +// Keyboard Layout Flags. +// See https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-loadkeyboardlayoutw +const ( + KLF_ACTIVATE = 0x00000001 + KLF_SUBSTITUTE_OK = 0x00000002 + KLF_REORDER = 0x00000008 + KLF_REPLACELANG = 0x00000010 + KLF_NOTELLSHELL = 0x00000080 + KLF_SETFORPROCESS = 0x00000100 +) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9f73df75b5f..4c2e1bdc01e 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -246,7 +247,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -346,8 +349,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -477,12 +482,16 @@ var ( procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") + procGetKeyboardLayout = moduser32.NewProc("GetKeyboardLayout") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") procIsWindow = moduser32.NewProc("IsWindow") procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procLoadKeyboardLayoutW = moduser32.NewProc("LoadKeyboardLayoutW") procMessageBoxW = moduser32.NewProc("MessageBoxW") + procToUnicodeEx = moduser32.NewProc("ToUnicodeEx") + procUnloadKeyboardLayout = moduser32.NewProc("UnloadKeyboardLayout") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") @@ -788,6 +797,14 @@ func FreeSid(sid *SID) (err error) { return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { + r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetLengthSid(sid *SID) (len uint32) { r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) len = uint32(r0) @@ -2149,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2157,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3025,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3041,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { @@ -4073,6 +4124,12 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { return } +func GetKeyboardLayout(tid uint32) (hkl Handle) { + r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + hkl = Handle(r0) + return +} + func GetShellWindow() (shellWindow HWND) { r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) shellWindow = HWND(r0) @@ -4106,6 +4163,15 @@ func IsWindowVisible(hwnd HWND) (isVisible bool) { return } +func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + hkl = Handle(r0) + if hkl == 0 { + err = errnoErr(e1) + } + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) ret = int32(r0) @@ -4115,6 +4181,20 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i return } +func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { + r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + ret = int32(r0) + return +} + +func UnloadKeyboardLayout(hkl Handle) (err error) { + r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { var _p0 uint32 if inheritExisting { diff --git a/terraform/providers/google/vendor/golang.org/x/text/LICENSE b/terraform/providers/google/vendor/golang.org/x/text/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/terraform/providers/google/vendor/golang.org/x/text/LICENSE +++ b/terraform/providers/google/vendor/golang.org/x/text/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/terraform/providers/google/vendor/golang.org/x/time/LICENSE b/terraform/providers/google/vendor/golang.org/x/time/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/terraform/providers/google/vendor/golang.org/x/time/LICENSE +++ b/terraform/providers/google/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/terraform/providers/google/vendor/golang.org/x/time/rate/rate.go b/terraform/providers/google/vendor/golang.org/x/time/rate/rate.go index 8f6c7f493f8..93a798ab637 100644 --- a/terraform/providers/google/vendor/golang.org/x/time/rate/rate.go +++ b/terraform/providers/google/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/terraform/providers/google/vendor/golang.org/x/tools/LICENSE b/terraform/providers/google/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/terraform/providers/google/vendor/golang.org/x/tools/PATENTS b/terraform/providers/google/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 00000000000..733099041f8 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/terraform/providers/google/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/terraform/providers/google/vendor/golang.org/x/tools/cmd/stringer/stringer.go new file mode 100644 index 00000000000..2b19c93e8ea --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -0,0 +1,660 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer +// interface. Given the name of a (signed or unsigned) integer type T that has constants +// defined, stringer will create a new self-contained Go source file implementing +// +// func (t T) String() string +// +// The file is created in the same package and directory as the package that defines T. +// It has helpful defaults designed for use with go generate. +// +// Stringer works best with constants that are consecutive values such as created using iota, +// but creates good code regardless. In the future it might also provide custom support for +// constant sets that are bit patterns. +// +// For example, given this snippet, +// +// package painkiller +// +// type Pill int +// +// const ( +// Placebo Pill = iota +// Aspirin +// Ibuprofen +// Paracetamol +// Acetaminophen = Paracetamol +// ) +// +// running this command +// +// stringer -type=Pill +// +// in the same directory will create the file pill_string.go, in package painkiller, +// containing a definition of +// +// func (Pill) String() string +// +// That method will translate the value of a Pill constant to the string representation +// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will +// print the string "Aspirin". +// +// Typically this process would be run using go generate, like this: +// +// //go:generate stringer -type=Pill +// +// If multiple constants have the same value, the lexically first matching name will +// be used (in the example, Acetaminophen will print as "Paracetamol"). +// +// With no arguments, it processes the package in the current directory. +// Otherwise, the arguments must name a single directory holding a Go package +// or a set of Go source files that represent a single Go package. +// +// The -type flag accepts a comma-separated list of types so a single run can +// generate methods for multiple types. The default output file is t_string.go, +// where t is the lower-cased name of the first type listed. It can be overridden +// with the -output flag. +// +// The -linecomment flag tells stringer to generate the text of any line comment, trimmed +// of leading spaces, instead of the constant name. For instance, if the constants above had a +// Pill prefix, one could write +// +// PillAspirin // Aspirin +// +// to suppress it in the output. +package main // import "golang.org/x/tools/cmd/stringer" + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/constant" + "go/format" + "go/token" + "go/types" + "log" + "os" + "path/filepath" + "sort" + "strings" + + "golang.org/x/tools/go/packages" +) + +var ( + typeNames = flag.String("type", "", "comma-separated list of type names; must be set") + output = flag.String("output", "", "output file name; default srcdir/_string.go") + trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names") + linecomment = flag.Bool("linecomment", false, "use line comment text as printed text when present") + buildTags = flag.String("tags", "", "comma-separated list of build tags to apply") +) + +// Usage is a replacement usage function for the flags package. +func Usage() { + fmt.Fprintf(os.Stderr, "Usage of stringer:\n") + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n") + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n") + fmt.Fprintf(os.Stderr, "For more information, see:\n") + fmt.Fprintf(os.Stderr, "\thttps://pkg.go.dev/golang.org/x/tools/cmd/stringer\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() +} + +func main() { + log.SetFlags(0) + log.SetPrefix("stringer: ") + flag.Usage = Usage + flag.Parse() + if len(*typeNames) == 0 { + flag.Usage() + os.Exit(2) + } + types := strings.Split(*typeNames, ",") + var tags []string + if len(*buildTags) > 0 { + tags = strings.Split(*buildTags, ",") + } + + // We accept either one directory or a list of files. Which do we have? + args := flag.Args() + if len(args) == 0 { + // Default: process whole package in current directory. + args = []string{"."} + } + + // Parse the package once. + var dir string + g := Generator{ + trimPrefix: *trimprefix, + lineComment: *linecomment, + } + // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). + if len(args) == 1 && isDirectory(args[0]) { + dir = args[0] + } else { + if len(tags) != 0 { + log.Fatal("-tags option applies only to directories, not when files are specified") + } + dir = filepath.Dir(args[0]) + } + + g.parsePackage(args, tags) + + // Print the header and package clause. + g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) + g.Printf("\n") + g.Printf("package %s", g.pkg.name) + g.Printf("\n") + g.Printf("import \"strconv\"\n") // Used by all methods. + + // Run generate for each type. + for _, typeName := range types { + g.generate(typeName) + } + + // Format the output. + src := g.format() + + // Write to file. + outputName := *output + if outputName == "" { + baseName := fmt.Sprintf("%s_string.go", types[0]) + outputName = filepath.Join(dir, strings.ToLower(baseName)) + } + err := os.WriteFile(outputName, src, 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } +} + +// isDirectory reports whether the named file is a directory. +func isDirectory(name string) bool { + info, err := os.Stat(name) + if err != nil { + log.Fatal(err) + } + return info.IsDir() +} + +// Generator holds the state of the analysis. Primarily used to buffer +// the output for format.Source. +type Generator struct { + buf bytes.Buffer // Accumulated output. + pkg *Package // Package we are scanning. + + trimPrefix string + lineComment bool + + logf func(format string, args ...interface{}) // test logging hook; nil when not testing +} + +func (g *Generator) Printf(format string, args ...interface{}) { + fmt.Fprintf(&g.buf, format, args...) +} + +// File holds a single parsed file and associated data. +type File struct { + pkg *Package // Package to which this file belongs. + file *ast.File // Parsed AST. + // These fields are reset for each type being generated. + typeName string // Name of the constant type. + values []Value // Accumulator for constant values of that type. + + trimPrefix string + lineComment bool +} + +type Package struct { + name string + defs map[*ast.Ident]types.Object + files []*File +} + +// parsePackage analyzes the single package constructed from the patterns and tags. +// parsePackage exits if there is an error. +func (g *Generator) parsePackage(patterns []string, tags []string) { + cfg := &packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, + // TODO: Need to think about constants in test files. Maybe write type_string_test.go + // in a separate pass? For later. + Tests: false, + BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, + Logf: g.logf, + } + pkgs, err := packages.Load(cfg, patterns...) + if err != nil { + log.Fatal(err) + } + if len(pkgs) != 1 { + log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " ")) + } + g.addPackage(pkgs[0]) +} + +// addPackage adds a type checked Package and its syntax files to the generator. +func (g *Generator) addPackage(pkg *packages.Package) { + g.pkg = &Package{ + name: pkg.Name, + defs: pkg.TypesInfo.Defs, + files: make([]*File, len(pkg.Syntax)), + } + + for i, file := range pkg.Syntax { + g.pkg.files[i] = &File{ + file: file, + pkg: g.pkg, + trimPrefix: g.trimPrefix, + lineComment: g.lineComment, + } + } +} + +// generate produces the String method for the named type. +func (g *Generator) generate(typeName string) { + values := make([]Value, 0, 100) + for _, file := range g.pkg.files { + // Set the state for this run of the walker. + file.typeName = typeName + file.values = nil + if file.file != nil { + ast.Inspect(file.file, file.genDecl) + values = append(values, file.values...) + } + } + + if len(values) == 0 { + log.Fatalf("no values defined for type %s", typeName) + } + // Generate code that will fail if the constants change value. + g.Printf("func _() {\n") + g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n") + g.Printf("\t// Re-run the stringer command to generate them again.\n") + g.Printf("\tvar x [1]struct{}\n") + for _, v := range values { + g.Printf("\t_ = x[%s - %s]\n", v.originalName, v.str) + } + g.Printf("}\n") + runs := splitIntoRuns(values) + // The decision of which pattern to use depends on the number of + // runs in the numbers. If there's only one, it's easy. For more than + // one, there's a tradeoff between complexity and size of the data + // and code vs. the simplicity of a map. A map takes more space, + // but so does the code. The decision here (crossover at 10) is + // arbitrary, but considers that for large numbers of runs the cost + // of the linear scan in the switch might become important, and + // rather than use yet another algorithm such as binary search, + // we punt and use a map. In any case, the likelihood of a map + // being necessary for any realistic example other than bitmasks + // is very low. And bitmasks probably deserve their own analysis, + // to be done some other day. + switch { + case len(runs) == 1: + g.buildOneRun(runs, typeName) + case len(runs) <= 10: + g.buildMultipleRuns(runs, typeName) + default: + g.buildMap(runs, typeName) + } +} + +// splitIntoRuns breaks the values into runs of contiguous sequences. +// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}. +// The input slice is known to be non-empty. +func splitIntoRuns(values []Value) [][]Value { + // We use stable sort so the lexically first name is chosen for equal elements. + sort.Stable(byValue(values)) + // Remove duplicates. Stable sort has put the one we want to print first, + // so use that one. The String method won't care about which named constant + // was the argument, so the first name for the given value is the only one to keep. + // We need to do this because identical values would cause the switch or map + // to fail to compile. + j := 1 + for i := 1; i < len(values); i++ { + if values[i].value != values[i-1].value { + values[j] = values[i] + j++ + } + } + values = values[:j] + runs := make([][]Value, 0, 10) + for len(values) > 0 { + // One contiguous sequence per outer loop. + i := 1 + for i < len(values) && values[i].value == values[i-1].value+1 { + i++ + } + runs = append(runs, values[:i]) + values = values[i:] + } + return runs +} + +// format returns the gofmt-ed contents of the Generator's buffer. +func (g *Generator) format() []byte { + src, err := format.Source(g.buf.Bytes()) + if err != nil { + // Should never happen, but can arise when developing this code. + // The user can compile the output to see the error. + log.Printf("warning: internal error: invalid Go generated: %s", err) + log.Printf("warning: compile the package to analyze the error") + return g.buf.Bytes() + } + return src +} + +// Value represents a declared constant. +type Value struct { + originalName string // The name of the constant. + name string // The name with trimmed prefix. + // The value is stored as a bit pattern alone. The boolean tells us + // whether to interpret it as an int64 or a uint64; the only place + // this matters is when sorting. + // Much of the time the str field is all we need; it is printed + // by Value.String. + value uint64 // Will be converted to int64 when needed. + signed bool // Whether the constant is a signed type. + str string // The string representation given by the "go/constant" package. +} + +func (v *Value) String() string { + return v.str +} + +// byValue lets us sort the constants into increasing order. +// We take care in the Less method to sort in signed or unsigned order, +// as appropriate. +type byValue []Value + +func (b byValue) Len() int { return len(b) } +func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byValue) Less(i, j int) bool { + if b[i].signed { + return int64(b[i].value) < int64(b[j].value) + } + return b[i].value < b[j].value +} + +// genDecl processes one declaration clause. +func (f *File) genDecl(node ast.Node) bool { + decl, ok := node.(*ast.GenDecl) + if !ok || decl.Tok != token.CONST { + // We only care about const declarations. + return true + } + // The name of the type of the constants we are declaring. + // Can change if this is a multi-element declaration. + typ := "" + // Loop over the elements of the declaration. Each element is a ValueSpec: + // a list of names possibly followed by a type, possibly followed by values. + // If the type and value are both missing, we carry down the type (and value, + // but the "go/types" package takes care of that). + for _, spec := range decl.Specs { + vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST. + if vspec.Type == nil && len(vspec.Values) > 0 { + // "X = 1". With no type but a value. If the constant is untyped, + // skip this vspec and reset the remembered type. + typ = "" + + // If this is a simple type conversion, remember the type. + // We don't mind if this is actually a call; a qualified call won't + // be matched (that will be SelectorExpr, not Ident), and only unusual + // situations will result in a function call that appears to be + // a type conversion. + ce, ok := vspec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + id, ok := ce.Fun.(*ast.Ident) + if !ok { + continue + } + typ = id.Name + } + if vspec.Type != nil { + // "X T". We have a type. Remember it. + ident, ok := vspec.Type.(*ast.Ident) + if !ok { + continue + } + typ = ident.Name + } + if typ != f.typeName { + // This is not the type we're looking for. + continue + } + // We now have a list of names (from one line of source code) all being + // declared with the desired type. + // Grab their names and actual values and store them in f.values. + for _, name := range vspec.Names { + if name.Name == "_" { + continue + } + // This dance lets the type checker find the values for us. It's a + // bit tricky: look up the object declared by the name, find its + // types.Const, and extract its value. + obj, ok := f.pkg.defs[name] + if !ok { + log.Fatalf("no value for constant %s", name) + } + info := obj.Type().Underlying().(*types.Basic).Info() + if info&types.IsInteger == 0 { + log.Fatalf("can't handle non-integer constant type %s", typ) + } + value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST. + if value.Kind() != constant.Int { + log.Fatalf("can't happen: constant is not an integer %s", name) + } + i64, isInt := constant.Int64Val(value) + u64, isUint := constant.Uint64Val(value) + if !isInt && !isUint { + log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String()) + } + if !isInt { + u64 = uint64(i64) + } + v := Value{ + originalName: name.Name, + value: u64, + signed: info&types.IsUnsigned == 0, + str: value.String(), + } + if c := vspec.Comment; f.lineComment && c != nil && len(c.List) == 1 { + v.name = strings.TrimSpace(c.Text()) + } else { + v.name = strings.TrimPrefix(v.originalName, f.trimPrefix) + } + f.values = append(f.values, v) + } + } + return false +} + +// Helpers + +// usize returns the number of bits of the smallest unsigned integer +// type that will hold n. Used to create the smallest possible slice of +// integers to use as indexes into the concatenated strings. +func usize(n int) int { + switch { + case n < 1<<8: + return 8 + case n < 1<<16: + return 16 + default: + // 2^32 is enough constants for anyone. + return 32 + } +} + +// declareIndexAndNameVars declares the index slices and concatenated names +// strings representing the runs of values. +func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) { + var indexes, names []string + for i, run := range runs { + index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i)) + if len(run) != 1 { + indexes = append(indexes, index) + } + names = append(names, name) + } + g.Printf("const (\n") + for _, name := range names { + g.Printf("\t%s\n", name) + } + g.Printf(")\n\n") + + if len(indexes) > 0 { + g.Printf("var (") + for _, index := range indexes { + g.Printf("\t%s\n", index) + } + g.Printf(")\n\n") + } +} + +// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars +func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) { + index, name := g.createIndexAndNameDecl(run, typeName, "") + g.Printf("const %s\n", name) + g.Printf("var %s\n", index) +} + +// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var". +func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) { + b := new(bytes.Buffer) + indexes := make([]int, len(run)) + for i := range run { + b.WriteString(run[i].name) + indexes[i] = b.Len() + } + nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String()) + nameLen := b.Len() + b.Reset() + fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen)) + for i, v := range indexes { + if i > 0 { + fmt.Fprintf(b, ", ") + } + fmt.Fprintf(b, "%d", v) + } + fmt.Fprintf(b, "}") + return b.String(), nameConst +} + +// declareNameVars declares the concatenated names string representing all the values in the runs. +func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) { + g.Printf("const _%s_name%s = \"", typeName, suffix) + for _, run := range runs { + for i := range run { + g.Printf("%s", run[i].name) + } + } + g.Printf("\"\n") +} + +// buildOneRun generates the variables and String method for a single run of contiguous values. +func (g *Generator) buildOneRun(runs [][]Value, typeName string) { + values := runs[0] + g.Printf("\n") + g.declareIndexAndNameVar(values, typeName) + // The generated code is simple enough to write as a Printf format. + lessThanZero := "" + if values[0].signed { + lessThanZero = "i < 0 || " + } + if values[0].value == 0 { // Signed or unsigned, 0 is still 0. + g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero) + } else { + g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero) + } +} + +// Arguments to format are: +// +// [1]: type name +// [2]: size of index element (8 for uint8 etc.) +// [3]: less than zero check (for signed types) +const stringOneRun = `func (i %[1]s) String() string { + if %[3]si >= %[1]s(len(_%[1]s_index)-1) { + return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]] +} +` + +// Arguments to format are: +// [1]: type name +// [2]: lowest defined value for type, as a string +// [3]: size of index element (8 for uint8 etc.) +// [4]: less than zero check (for signed types) +/* + */ +const stringOneRunWithOffset = `func (i %[1]s) String() string { + i -= %[2]s + if %[4]si >= %[1]s(len(_%[1]s_index)-1) { + return "%[1]s(" + strconv.FormatInt(int64(i + %[2]s), 10) + ")" + } + return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]] +} +` + +// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values. +// For this pattern, a single Printf format won't do. +func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareIndexAndNameVars(runs, typeName) + g.Printf("func (i %s) String() string {\n", typeName) + g.Printf("\tswitch {\n") + for i, values := range runs { + if len(values) == 1 { + g.Printf("\tcase i == %s:\n", &values[0]) + g.Printf("\t\treturn _%s_name_%d\n", typeName, i) + continue + } + if values[0].value == 0 && !values[0].signed { + // For an unsigned lower bound of 0, "0 <= i" would be redundant. + g.Printf("\tcase i <= %s:\n", &values[len(values)-1]) + } else { + g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1]) + } + if values[0].value != 0 { + g.Printf("\t\ti -= %s\n", &values[0]) + } + g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n", + typeName, i, typeName, i, typeName, i) + } + g.Printf("\tdefault:\n") + g.Printf("\t\treturn \"%s(\" + strconv.FormatInt(int64(i), 10) + \")\"\n", typeName) + g.Printf("\t}\n") + g.Printf("}\n") +} + +// buildMap handles the case where the space is so sparse a map is a reasonable fallback. +// It's a rare situation but has simple code. +func (g *Generator) buildMap(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareNameVars(runs, typeName, "") + g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName) + n := 0 + for _, values := range runs { + for _, value := range values { + g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name)) + n += len(value.name) + } + } + g.Printf("}\n\n") + g.Printf(stringMap, typeName) +} + +// Argument to format is the type name. +const stringMap = `func (i %[1]s) String() string { + if str, ok := _%[1]s_map[i]; ok { + return str + } + return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" +} +` diff --git a/terraform/providers/google/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/terraform/providers/google/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 00000000000..137cc8df1d8 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,186 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "go/token" + "go/types" + "io" + "os/exec" + + "golang.org/x/tools/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the go command. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func Find(importPath, srcDir string) (filename, path string) { + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.Output() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, size, err := gcimporter.FindExportData(buf) + if err != nil { + return nil, err + } + + if size >= 0 { + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil + } else { + // We were given an object file. As such, we don't know how large + // the export data is and must return the entire file. + return buf, nil + } +} + +// readAll works the same way as io.ReadAll, but avoids allocations and copies +// by preallocating a byte slice of the necessary size if the size is known up +// front. This is always possible when the input is an archive. In that case, +// NewReader will return the known size using an io.LimitedReader. +func readAll(r io.Reader) ([]byte, error) { + if lr, ok := r.(*io.LimitedReader); ok { + data := make([]byte, lr.N) + _, err := io.ReadFull(lr, data) + return data, err + } + return io.ReadAll(r) +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// +// The package path (effectively its linker symbol prefix) is +// specified by path, since unlike the package name, this information +// may not be recorded in the export data. +// +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + + case 'u': // unified, from go1.20 + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + if _, err := io.WriteString(out, "i"); err != nil { + return err + } + return gcimporter.IExportData(out, fset, pkg) +} + +// ReadBundle reads an export bundle from in, decodes it, and returns type +// information for the packages. +// File position information is added to fset. +// +// ReadBundle may inspect and add to the imports map to ensure that references +// within the export bundle to other packages are consistent. +// +// On return, the state of the reader is undefined. +// +// Experimental: This API is experimental and may change in the future. +func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export bundle: %v", err) + } + return gcimporter.IImportBundle(fset, imports, data) +} + +// WriteBundle writes encoded type information for the specified packages to out. +// The FileSet provides file position information for named objects. +// +// Experimental: This API is experimental and may change in the future. +func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + return gcimporter.IExportBundle(out, fset, pkgs) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/terraform/providers/google/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 00000000000..37a7247e268 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/terraform/providers/google/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 00000000000..333676b7cfc --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,53 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "context" + "fmt" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return "", "", enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr + } else { + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return compiler, goarch, nil +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/go/packages/doc.go b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 00000000000..a8d7b06ac09 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,250 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The [Load] function takes as input a list of patterns and returns a +list of [Package] values describing individual packages matched by those +patterns. +A [Config] specifies configuration options, the most important of which is +the [LoadMode], which controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool. +The default build tool is the go command. +Its supported patterns are described at +https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. +Other build systems may be supported by providing a "driver"; +see [The driver protocol]. + +All patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypesInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to [Load], so that it can interpret them +according to the conventions of the underlying build system. + +See the Example function for typical usage. + +# The driver protocol + +[Load] may be used to load Go packages even in Go projects that use +alternative build systems, by installing an appropriate "driver" +program for the build system and specifying its location in the +GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. + +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +those patterns identify. Drivers must also support the special "file=" +and "pattern=" patterns described above. + +The patterns are provided as positional command-line arguments. A +JSON-encoded [DriverRequest] message providing additional information +is written to the driver's standard input. The driver must write a +JSON-encoded [DriverResponse] message to its standard output. (This +message differs from the JSON schema produced by 'go list'.) +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/terraform/providers/google/vendor/golang.org/x/tools/go/packages/external.go b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 00000000000..4335c1eb14c --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,140 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// This file defines the protocol that enables an external "driver" +// tool to supply package metadata in place of 'go list'. + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "strings" +) + +// DriverRequest defines the schema of a request for package metadata +// from an external driver program. The JSON-encoded DriverRequest +// message is provided to the driver program's standard input. The +// query patterns are provided as command-line arguments. +// +// See the package documentation for an overview. +type DriverRequest struct { + Mode LoadMode `json:"mode"` + + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + + // Overlay maps file paths (relative to the driver's working directory) to the byte contents + // of overlay files. + Overlay map[string][]byte `json:"overlay"` +} + +// DriverResponse defines the schema of a response from an external +// driver program, providing the results of a query for package +// metadata. The driver program must write a JSON-encoded +// DriverResponse message to its standard output. +// +// See the package documentation for an overview. +type DriverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the DriverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*DriverResponse, error) { + req, err := json.Marshal(DriverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) + } + + var response DriverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/go/packages/golist.go b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 00000000000..22305d9c90a --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1106 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a DriverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *DriverResponse +} + +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &DriverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a DriverResponse. +func (r *responseDeduper) addAll(dr *DriverResponse) { + for _, pkg := range dr.Packages { + r.addPackage(pkg) + } + for _, root := range dr.Roots { + r.addRoot(root) + } + r.dr.GoVersion = dr.GoVersion +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + goVersionOnce sync.Once + goVersionError error + goVersion int // The X in Go 1.X. + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool +} + +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError +} + +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() + if err != nil { + panic(fmt.Sprintf("mustGetEnv: %v", err)) + } + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, + } + + // Fill in response.Sizes asynchronously if necessary. + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + errCh := make(chan error) + go func() { + compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + response.dr.Compiler = compiler + response.dr.Arch = arch + errCh <- err + }() + defer func() { + if sizesErr := <-errCh; sizesErr != nil { + err = sizesErr + } + }() + } + + // Determine files requested in contains patterns + var containFiles []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := state.createDriverResponse(restPatterns...) + if err != nil { + return nil, err + } + response.addAll(dr) + } + + if len(containFiles) != 0 { + if err := state.runContainsQueries(response, containFiles); err != nil { + return nil, err + } + } + + // (We may yet return an error due to defer.) + return response.dr, nil +} + +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { + var queryErr error + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { + return err // return the original error + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { + response, err := state.createDriverResponse(query) + if err != nil { + return nil, err + } + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), + }) + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + } + return response, nil +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + IgnoredGoFiles []string + IgnoredOtherFiles []string + EmbedPatterns []string + EmbedFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + Module *Module + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *packagesinternal.PackageError + DepsErrors []*packagesinternal.PackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) + if err != nil { + return nil, err + } + + seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) + // Decode the JSON and convert it to Package form. + response := &DriverResponse{ + GoVersion: goVersion, + } + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue + } + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 1 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] + if importingPkg == old.ImportPath { + // Using an older version of Go which put this package itself on top of import + // stack, instead of the importer. Look for importer in second from top + // position. + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) + } + importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] + } + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + EmbedFiles: absJoin(p.Dir, p.EmbedFiles), + EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), + IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), + forTest: p.ForTest, + depsErrors: p.DepsErrors, + Module: p.Module, + } + + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if len(p.CompiledGoFiles) > len(p.GoFiles) { + // We need the cgo definitions, which are in the first + // CompiledGoFile after the non-cgo ones. This is a hack but there + // isn't currently a better way to find it. We also need the pure + // Go files and unprocessed cgo files, all of which are already + // in pkg.GoFiles. + cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] + pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) + } else { + // golang/go#38990: go list silently fails to do cgo processing + pkg.CompiledGoFiles = nil + pkg.Errors = append(pkg.Errors, Error{ + Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.", + Kind: ListError, + }) + } + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Remove files from CompiledGoFiles that are non-go files + // (or are not files that look like they are from the cache). + if len(pkg.CompiledGoFiles) > 0 { + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + // + // TODO(rfindley): remove this heuristic, in favor of considering + // InvalidGoFiles from the list driver. + if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + + if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, + }) + } + + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { + response.Packages = append(response.Packages, pkg) + } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) + + return response, nil +} + +func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { + if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { + return false + } + + goV, err := state.getGoVersion() + if err != nil { + return false + } + + // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. + // The import stack behaves differently for these versions than newer Go versions. + if goV < 15 { + return len(p.Error.ImportStack) == 0 + } + + // On Go 1.15 and later, only parse filenames out of error if there's no import stack, + // or the current package is at the top of the import stack. This is not guaranteed + // to work perfectly, but should avoid some cases where files in errors don't belong to this + // package. + return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath +} + +// getGoVersion returns the effective minor version of the go command. +func (state *golistState) getGoVersion() (int, error) { + state.goVersionOnce.Do(func() { + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + }) + return state.goVersion, state.goVersionError +} + +// getPkgPath finds the package path of a directory if it's relative to a root +// directory. +func (state *golistState) getPkgPath(dir string) (string, bool, error) { + absDir, err := filepath.Abs(dir) + if err != nil { + return "", false, err + } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { + // Make sure that the directory is in the module, + // to avoid creating a path relative to another module. + if !strings.HasPrefix(absDir, rdir) { + continue + } + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only one root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true, nil + } + return filepath.ToSlash(r), true, nil + } + return "", false, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func jsonFlag(cfg *Config, goVersion int) string { + if goVersion < 19 { + return "-json" + } + var fields []string + added := make(map[string]bool) + addFields := func(fs ...string) { + for _, f := range fs { + if !added[f] { + added[f] = true + fields = append(fields, f) + } + } + } + addFields("Name", "ImportPath", "Error") // These fields are always needed + if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", + "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", + "SwigFiles", "SwigCXXFiles", "SysoFiles") + if cfg.Tests { + addFields("TestGoFiles", "XTestGoFiles") + } + } + if cfg.Mode&NeedTypes != 0 { + // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, + // even when -compiled isn't passed in. + // TODO(#52435): Should we make the test ask for -compiled, or automatically + // request CompiledGoFiles in certain circumstances? + addFields("Dir", "CompiledGoFiles") + } + if cfg.Mode&NeedCompiledGoFiles != 0 { + addFields("Dir", "CompiledGoFiles", "Export") + } + if cfg.Mode&NeedImports != 0 { + // When imports are requested, DepOnly is used to distinguish between packages + // explicitly requested and transitive imports of those packages. + addFields("DepOnly", "Imports", "ImportMap") + if cfg.Tests { + addFields("TestImports", "XTestImports") + } + } + if cfg.Mode&NeedDeps != 0 { + addFields("DepOnly") + } + if usesExportData(cfg) { + // Request Dir in the unlikely case Export is not absolute. + addFields("Dir", "Export") + } + if cfg.Mode&needInternalForTest != 0 { + addFields("ForTest") + } + if cfg.Mode&needInternalDepsErrors != 0 { + addFields("DepsErrors") + } + if cfg.Mode&NeedModule != 0 { + addFields("Module") + } + if cfg.Mode&NeedEmbedFiles != 0 { + addFields("EmbedFiles") + } + if cfg.Mode&NeedEmbedPatterns != 0 { + addFields("EmbedPatterns") + } + return "-json=" + strings.Join(fields, ",") +} + +func golistargs(cfg *Config, words []string, goVersion int) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "-e", jsonFlag(cfg, goVersion), + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), + } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// cfgInvocation returns an Invocation that reflects cfg's settings. +func (state *golistState) cfgInvocation() gocommand.Invocation { + cfg := state.cfg + return gocommand.Invocation{ + BuildFlags: cfg.BuildFlags, + ModFile: cfg.modFile, + ModFlag: cfg.modFlag, + CleanEnv: cfg.Env != nil, + Env: cfg.Env, + Logf: cfg.Logf, + WorkingDir: cfg.Dir, + } +} + +// invokeGo returns the stdout of a go command invocation. +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := state.cfgInvocation() + + // For Go versions 1.16 and above, `go list` accepts overlays directly via + // the -overlay flag. Set it, if it's available. + // + // The check for "list" is not necessarily required, but we should avoid + // getting the go version if possible. + if verb == "list" { + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + if goVersion >= 16 { + filename, cleanup, err := state.writeOverlays() + if err != nil { + return nil, err + } + defer cleanup() + inv.Overlay = filename + } + } + inv.Verb = verb + inv.Args = args + gocmdRunner := cfg.gocmdRunner + if gocmdRunner == nil { + gocmdRunner = &gocommand.Runner{} + } + stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + if err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't run 'go': %w", err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, friendlyErr + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) + } + // golang/go#36770: Handle case where cmd/go prints module download messages before the error. + msg := stderr.String() + for strings.HasPrefix(msg, "go: downloading") { + msg = msg[strings.IndexRune(msg, '\n')+1:] + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + msg := msg[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Another variation of the previous error + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // Don't clobber stdout if `go list` actually returned something. + if len(stdout.String()) > 0 { + return stdout, nil + } + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, friendlyErr + } + } + return stdout, nil +} + +// OverlayJSON is the format overlay files are expected to be in. +// The Replace map maps from overlaid paths to replacement paths: +// the Go command will forward all reads trying to open +// each overlaid path to its replacement path, or consider the overlaid +// path not to exist if the replacement path is empty. +// +// From golang/go#39958. +type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` +} + +// writeOverlays writes out files for go list's -overlay flag, as described +// above. +func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(state.cfg.Overlay) == 0 { + return "", func() {}, nil + } + dir, err := os.MkdirTemp("", "gopackages-*") + if err != nil { + return "", nil, err + } + // The caller must clean up this directory, unless this function returns an + // error. + cleanup = func() { + os.RemoveAll(dir) + } + defer func() { + if err != nil { + cleanup() + } + }() + overlays := map[string]string{} + for k, v := range state.cfg.Overlay { + // Create a unique filename for the overlaid files, to avoid + // creating nested directories. + noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") + f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) + if err != nil { + return "", func() {}, err + } + if _, err := f.Write(v); err != nil { + return "", func() {}, err + } + if err := f.Close(); err != nil { + return "", func() {}, err + } + overlays[k] = f.Name() + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", func() {}, err + } + // Write out the overlay file that contains the filepath mappings. + filename = filepath.Join(dir, "overlay.json") + if err := os.WriteFile(filename, b, 0665); err != nil { + return "", func() {}, err + } + return filename, cleanup, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + k, v := split[0], split[1] + env[k] = v + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 00000000000..d823c474ad3 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,83 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "encoding/json" + "path/filepath" + + "golang.org/x/tools/internal/gocommand" +) + +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() + if err != nil { + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + // List all of the modules--the first will be the directory for the main + // module. Any replaced modules will also need to be treated as roots. + // Editing files in the module cache isn't a great idea, so we don't + // plan to ever support that. + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + // 'go list all' will fail if we're outside of a module and + // GO111MODULE=on. Try falling back without 'all'. + var innerErr error + out, innerErr = state.invokeGo("list", "-m", "-json") + if innerErr != nil { + return nil, err + } + } + roots := map[string]string{} + modules := map[string]string{} + var i int + for dec := json.NewDecoder(out); dec.More(); { + mod := new(gocommand.ModuleJSON) + if err := dec.Decode(mod); err != nil { + return nil, err + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + modules[absDir] = mod.Path + // The first result is the main module. + if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { + roots[absDir] = mod.Path + } + } + i++ + } + return roots, nil +} + +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { + m := map[string]string{} + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" + } + return m, nil +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 00000000000..5c080d21b54 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, +} + +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { + return "LoadMode(0)" + } + var out []string + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x + } + } + if m != 0 { + out = append(out, "Unknown") + } + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/go/packages/packages.go b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 00000000000..3ea1b3fa46d --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1445 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/sync/errgroup" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// ID and Errors (if present) will always be filled. +// Load may return more information than requested. +type LoadMode int + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportFile adds ExportFile. + NeedExportFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes + + // needInternalDepsErrors adds the internal deps errors field for use by gopls. + needInternalDepsErrors + + // needInternalForTest adds the internal forTest field. + // Tests must also be set on the context for this field to be populated. + needInternalForTest + + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // Modifies CompiledGoFiles and Types, and has no effect on its own. + typecheckCgo + + // NeedModule adds Module. + NeedModule + + // NeedEmbedFiles adds EmbedFiles. + NeedEmbedFiles + + // NeedEmbedPatterns adds EmbedPatterns. + NeedEmbedPatterns +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax | NeedDeps + + // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + NeedExportsFile = NeedExportFile +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // Cancelling the context may cause [Load] to abort and + // return an error. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // gocmdRunner guards go command calls from concurrency errors. + gocmdRunner *gocommand.Runner + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// If any of the patterns was invalid as defined by the +// underlying build system, Load returns an error. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + ld := newLoader(cfg) + response, external, err := defaultDriver(&ld.Config, patterns...) + if err != nil { + return nil, err + } + + ld.sizes = types.SizesFor(response.Compiler, response.Arch) + if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 { + // Type size information is needed but unavailable. + if external { + // An external driver may fail to populate the Compiler/GOARCH fields, + // especially since they are relatively new (see #63700). + // Provide a sensible fallback in this case. + ld.sizes = types.SizesFor("gc", runtime.GOARCH) + if ld.sizes == nil { // gccgo-only arch + ld.sizes = types.SizesFor("gc", "amd64") + } + } else { + // Go list should never fail to deliver accurate size information. + // Reject the whole Load since the error is the same for every package. + return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q", + response.Compiler, response.Arch) + } + } + + return ld.refine(response) +} + +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. +// The boolean result indicates that an external driver handled the request. +func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { + const ( + // windowsArgMax specifies the maximum command line length for + // the Windows' CreateProcess function. + windowsArgMax = 32767 + // maxEnvSize is a very rough estimation of the maximum environment + // size of a user. + maxEnvSize = 16384 + // safeArgMax specifies the maximum safe command line length to use + // by the underlying driver excl. the environment. We choose the Windows' + // ARG_MAX as the starting point because it's one of the lowest ARG_MAX + // constants out of the different supported platforms, + // e.g., https://www.in-ulm.de/~mascheck/various/argmax/#results. + safeArgMax = windowsArgMax - maxEnvSize + ) + chunks, err := splitIntoChunks(patterns, safeArgMax) + if err != nil { + return nil, false, err + } + + if driver := findExternalDriver(cfg); driver != nil { + response, err := callDriverOnChunks(driver, cfg, chunks) + if err != nil { + return nil, false, err + } else if !response.NotHandled { + return response, true, nil + } + // (fall through) + } + + response, err := callDriverOnChunks(goListDriver, cfg, chunks) + if err != nil { + return nil, false, err + } + return response, false, err +} + +// splitIntoChunks chunks the slice so that the total number of characters +// in a chunk is no longer than argMax. +func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { + if argMax <= 0 { + return nil, errors.New("failed to split patterns into chunks, negative safe argMax value") + } + var chunks [][]string + charsInChunk := 0 + nextChunkStart := 0 + for i, v := range patterns { + vChars := len(v) + if vChars > argMax { + // a single pattern is longer than the maximum safe ARG_MAX, hardly should happen + return nil, errors.New("failed to split patterns into chunks, a pattern is too long") + } + charsInChunk += vChars + 1 // +1 is for a whitespace between patterns that has to be counted too + if charsInChunk > argMax { + chunks = append(chunks, patterns[nextChunkStart:i]) + nextChunkStart = i + charsInChunk = vChars + } + } + // add the last chunk + if nextChunkStart < len(patterns) { + chunks = append(chunks, patterns[nextChunkStart:]) + } + return chunks, nil +} + +func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { + if len(chunks) == 0 { + return driver(cfg) + } + responses := make([]*DriverResponse, len(chunks)) + errNotHandled := errors.New("driver returned NotHandled") + var g errgroup.Group + for i, chunk := range chunks { + i := i + chunk := chunk + g.Go(func() (err error) { + responses[i], err = driver(cfg, chunk...) + if responses[i] != nil && responses[i].NotHandled { + err = errNotHandled + } + return err + }) + } + if err := g.Wait(); err != nil { + if errors.Is(err, errNotHandled) { + return &DriverResponse{NotHandled: true}, nil + } + return nil, err + } + return mergeResponses(responses...), nil +} + +func mergeResponses(responses ...*DriverResponse) *DriverResponse { + if len(responses) == 0 { + return nil + } + response := newDeduper() + response.dr.NotHandled = false + response.dr.Compiler = responses[0].Compiler + response.dr.Arch = responses[0].Arch + response.dr.GoVersion = responses[0].GoVersion + for _, v := range responses { + response.addAll(v) + } + return response.dr +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // TypeErrors contains the subset of errors produced during type checking. + TypeErrors []types.Error + + // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that are suitable for type checking. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // EmbedFiles lists the absolute file paths of the package's files + // embedded with go:embed. + EmbedFiles []string + + // EmbedPatterns lists the absolute file patterns of the package's + // files embedded with go:embed. + EmbedPatterns []string + + // IgnoredFiles lists source files that are not part of the package + // using the current build configuration but that might be part of + // the package using other build configurations. + IgnoredFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + // + // Each call to [Load] returns a consistent set of type + // symbols, as defined by the comment at [types.Identical]. + // Avoid mixing type information from two or more calls to [Load]. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + // + // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are + // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes + + // forTest is the package under test, if any. + forTest string + + // depsErrors is the DepsErrors field from the go list response, if any. + depsErrors []*packagesinternal.PackageError + + // module is the module information for the package if it exists. + Module *Module +} + +// Module provides module information for a package. +type Module struct { + Path string // module path + Version string // module version + Replace *Module // replaced by this module + Time *time.Time // time version was created + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module + Error *ModuleError // error loading module +} + +// ModuleError holds errors loading a module. +type ModuleError struct { + Err string // the error itself +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } + packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + return p.(*Package).depsErrors + } + packagesinternal.SetModFile = func(config interface{}, value string) { + config.(*Config).modFile = value + } + packagesinternal.SetModFlag = func(config interface{}, value string) { + config.(*Config).modFlag = value + } + packagesinternal.TypecheckCgo = int(typecheckCgo) + packagesinternal.DepsErrors = int(needInternalDepsErrors) + packagesinternal.ForTest = int(needInternalForTest) +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + EmbedFiles []string `json:",omitempty"` + EmbedPatterns []string `json:",omitempty"` + IgnoredFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + EmbedFiles: p.EmbedFiles, + EmbedPatterns: p.EmbedPatterns, + IgnoredFiles: p.IgnoredFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + EmbedFiles: flat.EmbedFiles, + EmbedPatterns: flat.EmbedPatterns, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes // non-nil if needed by mode + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Config.gocmdRunner == nil { + ld.Config.gocmdRunner = &gocommand.Runner{} + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { + roots := response.Roots + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range response.Packages { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + lpkg := &loaderPackage{ + Package: pkg, + needtypes: needtypes, + needsrc: needsrc, + goVersion: response.GoVersion, + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + if ld.Mode&NeedImports != 0 { + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, + // the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true + } + } + + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + + } else { + // !NeedImports: drop the stub (ID-only) import packages + // that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } + + // Load type data and syntax if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + // If the context is done, return its error and + // throw out [likely] incomplete packages. + if err := ld.Context.Err(); err != nil { + return nil, err + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, + // to catch programs that use more than they request. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + ld.pkgs[i].IgnoredFiles = nil + } + if ld.requestedMode&NeedEmbedFiles == 0 { + ld.pkgs[i].EmbedFiles = nil + } + if ld.requestedMode&NeedEmbedPatterns == 0 { + ld.pkgs[i].EmbedPatterns = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.requestedMode&NeedModule == 0 { + ld.pkgs[i].Module = nil + } + } + + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Start shutting down if the context is done and do not load + // source or export data files. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // (Hence this return is after the Types assignment.) + // The Diamond test exercises this case. + if !lpkg.needtypes && !lpkg.needsrc { + return + } + if !lpkg.needsrc { + if err := ld.loadFromExportData(lpkg); err != nil { + lpkg.Errors = append(lpkg.Errors, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, // e.g. can't find/open/parse export data + }) + } + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + lpkg.TypeErrors = append(lpkg.TypeErrors, err) + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + // If the go command on the PATH is newer than the runtime, + // then the go/{scanner,ast,parser,types} packages from the + // standard library may be unable to process the files + // selected by go list. + // + // There is currently no way to downgrade the effective + // version of the go command (see issue 52078), so we proceed + // with the newer go command but, in case of parse or type + // errors, we emit an additional diagnostic. + // + // See: + // - golang.org/issue/52078 (flag to set release tags) + // - golang.org/issue/50825 (gopls legacy version support) + // - golang.org/issue/55883 (go/packages confusing error) + // + // Should we assert a hard minimum of (currently) go1.16 here? + var runtimeVersion int + if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { + defer func() { + if len(lpkg.Errors) > 0 { + appendError(Error{ + Pos: "-", + Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), + Kind: UnknownError, + }) + } + }() + } + + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + _ = ld.loadFromExportData(lpkg) // ignore any secondary errors + + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + if ld.Config.Mode&NeedTypes == 0 { + return + } + + // Start shutting down if the context is done and do not type check. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + versions.InitFileVersions(lpkg.TypesInfo) + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, // may be nil + } + if lpkg.Module != nil && lpkg.Module.GoVersion != "" { + tc.GoVersion = "go" + lpkg.Module.GoVersion + } + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { + appendError(Error{ + Msg: "typecheckCgo requires Go 1.15+", + Kind: ListError, + }) + return + } + } + + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + lpkg.importErrors = nil // no longer needed + + // In go/types go1.21 and go1.22, Checker.Files failed fast with a + // a "too new" error, without calling tc.Error and without + // proceeding to type-check the package (#66525). + // We rely on the runtimeVersion error to give the suggested remedy. + if typErr != nil && len(lpkg.Errors) == 0 && len(lpkg.Syntax) > 0 { + if msg := typErr.Error(); strings.HasPrefix(msg, "package requires newer Go version") { + appendError(types.Error{ + Fset: ld.Fset, + Pos: lpkg.Syntax[0].Package, + Msg: msg, + }) + } + } + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // If types.Checker.Files had an error that was unreported, + // make sure to report the unknown error so the package is illTyped. + if typErr != nil && len(lpkg.Errors) == 0 { + appendError(typErr) + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = os.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData ensures that type information is present for the specified +// package, loading it from an export data file on the first request. +// On success it sets lpkg.Types to a new Package. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) error { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the lpkg.Types field and the + // types.Package it points to, for each loaderPackage in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if _, ok := view["go.shape"]; ok { + // Account for the pseudopackage "go.shape" that gets + // created by generic code. + viewLen++ + } + if viewLen != len(view) { + log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + return nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { + // All these things require knowing the import graph. + loadMode |= NeedImports + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} + +var _ interface{} = io.Discard // assert build toolchain is go1.16 or later diff --git a/terraform/providers/google/vendor/golang.org/x/tools/go/packages/visit.go b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 00000000000..a1dcc40b727 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/terraform/providers/google/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 00000000000..a2386c347a2 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,753 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// +// type A struct{ X int } +// type B A +// +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "go/types" + "strconv" + "strings" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" +) + +// TODO(adonovan): think about generic aliases. + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRUTC]; +// one of these (TypeParam) requires an integer operand, +// which is encoded as a string of decimal digits. +// - The TO operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// The indices used are implementation specific and may not correspond to +// the argument to the go/types function. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) +) + +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { + return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects +} + +// For returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func (enc *Encoder) For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.TypeName: + if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { + // With the exception of type parameters, only package-level type names + // have a path. + return "", fmt.Errorf("no path for %v", obj) + } + case *types.Const, // Only package-level constants have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + + if path, ok := enc.concreteMethod(obj); ok { + // Fast path for concrete methods that avoids looping over scope. + return path, nil + } + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + objs := enc.scopeObjects(scope) + for _, o := range objs { + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, o.Name()...) + path = append(path, opType) + + T := o.Type() + + if tname.IsAlias() { + // type alias + if r := find(obj, T, path, nil); r != nil { + return Path(r), nil + } + } else { + if named, _ := T.(*types.Named); named != nil { + if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { + // generic named type + return Path(r), nil + } + } + // defined (named) type + if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, o := range objs { + path := append(empty, o.Name()...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { + path = append(path, opType) + // The method index here is always with respect + // to the underlying go/types data structures, + // which ultimately derives from source order + // and must be preserved by export data. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// concreteMethod returns the path for meth, which must have a non-nil receiver. +// The second return value indicates success and may be false if the method is +// an interface method or if it is an instantiated method. +// +// This function is just an optimization that avoids the general scope walking +// approach. You are expected to fall back to the general approach if this +// function fails. +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { + // Concrete methods can only be declared on package-scoped named types. For + // that reason we can skip the expensive walk over the package scope: the + // path will always be package -> named type -> method. We can trivially get + // the type name from the receiver, and only have to look over the type's + // methods to find the method index. + // + // Methods on generic types require special consideration, however. Consider + // the following package: + // + // L1: type S[T any] struct{} + // L2: func (recv S[A]) Foo() { recv.Bar() } + // L3: func (recv S[B]) Bar() { } + // L4: type Alias = S[int] + // L5: func _[T any]() { var s S[int]; s.Foo() } + // + // The receivers of methods on generic types are instantiations. L2 and L3 + // instantiate S with the type-parameters A and B, which are scoped to the + // respective methods. L4 and L5 each instantiate S with int. Each of these + // instantiations has its own method set, full of methods (and thus objects) + // with receivers whose types are the respective instantiations. In other + // words, we have + // + // S[A].Foo, S[A].Bar + // S[B].Foo, S[B].Bar + // S[int].Foo, S[int].Bar + // + // We may thus be trying to produce object paths for any of these objects. + // + // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo + // and S.Bar, which are the paths that this function naturally produces. + // + // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that + // don't correspond to the origin methods. For S[int], this is significant. + // The most precise object path for S[int].Foo, for example, is Alias.Foo, + // not S.Foo. Our function, however, would produce S.Foo, which would + // resolve to a different object. + // + // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are + // still the correct paths, since only the origin methods have meaningful + // paths. But this is likely only true for trivial cases and has edge cases. + // Since this function is only an optimization, we err on the side of giving + // up, deferring to the slower but definitely correct algorithm. Most users + // of objectpath will only be giving us origin methods, anyway, as referring + // to instantiated methods is usually not useful. + + if meth.Origin() != meth { + return "", false + } + + _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + if named == nil { + return "", false + } + + if types.IsInterface(named) { + // Named interfaces don't have to be package-scoped + // + // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface + // methods, too, I think. + return "", false + } + + // Preallocate space for the name, opType, opMethod, and some digits. + name := named.Obj().Name() + path := make([]byte, 0, len(name)+8) + path = append(path, name...) + path = append(path, opType) + + // Method indices are w.r.t. the go/types data structures, + // ultimately deriving from source order, + // which is preserved by export data. + for i := 0; i < named.NumMethods(); i++ { + if named.Method(i) == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + + // Due to golang/go#59944, go/types fails to associate the receiver with + // certain methods on cgo types. + // + // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go + // versions gopls supports. + return "", false + // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) +} + +// find finds obj within type T, returning the path to it, or nil if not found. +// +// The seen map is used to short circuit cycles through type parameters. If +// nil, it will be allocated as necessary. +func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { + switch T := T.(type) { + case *aliases.Alias: + return find(obj, aliases.Unalias(T), path, seen) + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Signature: + if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { + return r + } + if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults), seen) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + fld := T.Field(i) + path2 := appendOpArg(path, opField, i) + if fld == obj { + return path2 // found field var + } + if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.TypeParam: + name := T.Obj() + if name == obj { + return append(path, opObj) + } + if seen[name] { + return nil + } + if seen == nil { + seen = make(map[*types.TypeName]bool) + } + seen[name] = true + if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + return r + } + return nil + } + panic(T) +} + +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + path2 := appendOpArg(path, opTypeParam, i) + if r := find(obj, tparam, path2, seen); r != nil { + return r + } + } + return nil +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + pathstr := string(p) + if pathstr == "" { + return nil, fmt.Errorf("empty path") + } + + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Named,Signature} + type hasTypeParams interface { + TypeParams() *types.TypeParamList + } + // abstraction of *types.{Named,TypeParam} + type hasObj interface { + Obj() *types.TypeName + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFM] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod, opTypeParam: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + t = aliases.Unalias(t) + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) + } + t = named.Underlying() + + case opTypeParam: + hasTypeParams, ok := t.(hasTypeParams) // Named, Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) + } + tparams := hasTypeParams.TypeParams() + if n := tparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = tparams.At(index) + + case opConstraint: + tparam, ok := t.(*types.TypeParam) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) + } + t = tparam.Constraint() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) + + default: + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) + } + t = nil + + case opObj: + hasObj, ok := t.(hasObj) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) + } + obj = hasObj.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} + +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo + if m == nil { + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m + } + objs, ok := m[scope] + if !ok { + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs + } + return objs +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases.go new file mode 100644 index 00000000000..c24c2eee457 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliases + +import ( + "go/token" + "go/types" +) + +// Package aliases defines backward compatible shims +// for the types.Alias type representation added in 1.22. +// This defines placeholders for x/tools until 1.26. + +// NewAlias creates a new TypeName in Package pkg that +// is an alias for the type rhs. +// +// The enabled parameter determines whether the resulting [TypeName]'s +// type is an [types.Alias]. Its value must be the result of a call to +// [Enabled], which computes the effective value of +// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled +// function is expensive and should be called once per task (e.g. +// package import), not once per call to NewAlias. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { + if enabled { + tname := types.NewTypeName(pos, pkg, name, nil) + newAlias(tname, rhs) + return tname + } + return types.NewTypeName(pos, pkg, name, rhs) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go new file mode 100644 index 00000000000..c027b9f315f --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go @@ -0,0 +1,31 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package aliases + +import ( + "go/types" +) + +// Alias is a placeholder for a go/types.Alias for <=1.21. +// It will never be created by go/types. +type Alias struct{} + +func (*Alias) String() string { panic("unreachable") } +func (*Alias) Underlying() types.Type { panic("unreachable") } +func (*Alias) Obj() *types.TypeName { panic("unreachable") } +func Rhs(alias *Alias) types.Type { panic("unreachable") } + +// Unalias returns the type t for go <=1.21. +func Unalias(t types.Type) types.Type { return t } + +func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } + +// Enabled reports whether [NewAlias] should create [types.Alias] types. +// +// Before go1.22, this function always returns false. +func Enabled() bool { return false } diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go new file mode 100644 index 00000000000..b3299548419 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -0,0 +1,63 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package aliases + +import ( + "go/ast" + "go/parser" + "go/token" + "go/types" +) + +// Alias is an alias of types.Alias. +type Alias = types.Alias + +// Rhs returns the type on the right-hand side of the alias declaration. +func Rhs(alias *Alias) types.Type { + if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { + return alias.Rhs() // go1.23+ + } + + // go1.22's Alias didn't have the Rhs method, + // so Unalias is the best we can do. + return Unalias(alias) +} + +// Unalias is a wrapper of types.Unalias. +func Unalias(t types.Type) types.Type { return types.Unalias(t) } + +// newAlias is an internal alias around types.NewAlias. +// Direct usage is discouraged as the moment. +// Try to use NewAlias instead. +func newAlias(tname *types.TypeName, rhs types.Type) *Alias { + a := types.NewAlias(tname, rhs) + // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. + Unalias(a) + return a +} + +// Enabled reports whether [NewAlias] should create [types.Alias] types. +// +// This function is expensive! Call it sparingly. +func Enabled() bool { + // The only reliable way to compute the answer is to invoke go/types. + // We don't parse the GODEBUG environment variable, because + // (a) it's tricky to do so in a manner that is consistent + // with the godebug package; in particular, a simple + // substring check is not good enough. The value is a + // rightmost-wins list of options. But more importantly: + // (b) it is impossible to detect changes to the effective + // setting caused by os.Setenv("GODEBUG"), as happens in + // many tests. Therefore any attempt to cache the result + // is just incorrect. + fset := token.NewFileSet() + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) + _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) + return enabled +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/event.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/event.go new file mode 100644 index 00000000000..a6cf0e64a4b --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/event.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package core provides support for event based telemetry. +package core + +import ( + "fmt" + "time" + + "golang.org/x/tools/internal/event/label" +) + +// Event holds the information about an event of note that occurred. +type Event struct { + at time.Time + + // As events are often on the stack, storing the first few labels directly + // in the event can avoid an allocation at all for the very common cases of + // simple events. + // The length needs to be large enough to cope with the majority of events + // but no so large as to cause undue stack pressure. + // A log message with two values will use 3 labels (one for each value and + // one for the message itself). + + static [3]label.Label // inline storage for the first few labels + dynamic []label.Label // dynamically sized storage for remaining labels +} + +// eventLabelMap implements label.Map for a the labels of an Event. +type eventLabelMap struct { + event Event +} + +func (ev Event) At() time.Time { return ev.at } + +func (ev Event) Format(f fmt.State, r rune) { + if !ev.at.IsZero() { + fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) + } + for index := 0; ev.Valid(index); index++ { + if l := ev.Label(index); l.Valid() { + fmt.Fprintf(f, "\n\t%v", l) + } + } +} + +func (ev Event) Valid(index int) bool { + return index >= 0 && index < len(ev.static)+len(ev.dynamic) +} + +func (ev Event) Label(index int) label.Label { + if index < len(ev.static) { + return ev.static[index] + } + return ev.dynamic[index-len(ev.static)] +} + +func (ev Event) Find(key label.Key) label.Label { + for _, l := range ev.static { + if l.Key() == key { + return l + } + } + for _, l := range ev.dynamic { + if l.Key() == key { + return l + } + } + return label.Label{} +} + +func MakeEvent(static [3]label.Label, labels []label.Label) Event { + return Event{ + static: static, + dynamic: labels, + } +} + +// CloneEvent event returns a copy of the event with the time adjusted to at. +func CloneEvent(ev Event, at time.Time) Event { + ev.at = at + return ev +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/export.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/export.go new file mode 100644 index 00000000000..05f3a9a5791 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/export.go @@ -0,0 +1,70 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "sync/atomic" + "time" + "unsafe" + + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, Event, label.Map) context.Context + +var ( + exporter unsafe.Pointer +) + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + p := unsafe.Pointer(&e) + if e == nil { + // &e is always valid, and so p is always valid, but for the early abort + // of ProcessEvent to be efficient it needs to make the nil check on the + // pointer without having to dereference it, so we make the nil function + // also a nil pointer + p = nil + } + atomic.StorePointer(&exporter, p) +} + +// deliver is called to deliver an event to the supplied exporter. +// it will fill in the time. +func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { + // add the current time to the event + ev.at = time.Now() + // hand the event off to the current exporter + return exporter(ctx, ev, ev) +} + +// Export is called to deliver an event to the global exporter if set. +func Export(ctx context.Context, ev Event) context.Context { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx + } + return deliver(ctx, *exporterPtr, ev) +} + +// ExportPair is called to deliver a start event to the supplied exporter. +// It also returns a function that will deliver the end event to the same +// exporter. +// It will fill in the time. +func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx, func() {} + } + ctx = deliver(ctx, *exporterPtr, begin) + return ctx, func() { deliver(ctx, *exporterPtr, end) } +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/fast.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/fast.go new file mode 100644 index 00000000000..06c1d4615e6 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/core/fast.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Log1 takes a message and one label delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log1(ctx context.Context, message string, t1 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + }, nil)) +} + +// Log2 takes a message and two labels and delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + t2, + }, nil)) +} + +// Metric1 sends a label event to the exporter with the supplied labels. +func Metric1(ctx context.Context, t1 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + }, nil)) +} + +// Metric2 sends a label event to the exporter with the supplied labels. +func Metric2(ctx context.Context, t1, t2 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + t2, + }, nil)) +} + +// Start1 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// Start2 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + t2, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/event/doc.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/doc.go new file mode 100644 index 00000000000..5dc6e6babed --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package event provides a set of packages that cover the main +// concepts of telemetry in an implementation agnostic way. +package event diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/event/event.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/event.go new file mode 100644 index 00000000000..4d55e577d1a --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/event.go @@ -0,0 +1,127 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package event + +import ( + "context" + + "golang.org/x/tools/internal/event/core" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, core.Event, label.Map) context.Context + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + core.SetExporter(core.Exporter(e)) +} + +// Log takes a message and a label list and combines them into a single event +// before delivering them to the exporter. +func Log(ctx context.Context, message string, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + }, labels)) +} + +// IsLog returns true if the event was built by the Log function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLog(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg +} + +// Error takes a message and a label list and combines them into a single event +// before delivering them to the exporter. It captures the error in the +// delivered event. +func Error(ctx context.Context, message string, err error, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + keys.Err.Of(err), + }, labels)) +} + +// IsError returns true if the event was built by the Error function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsError(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg && + ev.Label(1).Key() == keys.Err +} + +// Metric sends a label event to the exporter with the supplied labels. +func Metric(ctx context.Context, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Metric.New(), + }, labels)) +} + +// IsMetric returns true if the event was built by the Metric function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsMetric(ev core.Event) bool { + return ev.Label(0).Key() == keys.Metric +} + +// Label sends a label event to the exporter with the supplied labels. +func Label(ctx context.Context, labels ...label.Label) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Label.New(), + }, labels)) +} + +// IsLabel returns true if the event was built by the Label function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLabel(ev core.Event) bool { + return ev.Label(0).Key() == keys.Label +} + +// Start sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) { + return core.ExportPair(ctx, + core.MakeEvent([3]label.Label{ + keys.Start.Of(name), + }, labels), + core.MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// IsStart returns true if the event was built by the Start function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsStart(ev core.Event) bool { + return ev.Label(0).Key() == keys.Start +} + +// IsEnd returns true if the event was built by the End function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsEnd(ev core.Event) bool { + return ev.Label(0).Key() == keys.End +} + +// Detach returns a context without an associated span. +// This allows the creation of spans that are not children of the current span. +func Detach(ctx context.Context) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Detach.New(), + }, nil)) +} + +// IsDetach returns true if the event was built by the Detach function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsDetach(ev core.Event) bool { + return ev.Label(0).Key() == keys.Detach +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/keys.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/keys.go new file mode 100644 index 00000000000..a02206e3015 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -0,0 +1,564 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "fmt" + "io" + "math" + "strconv" + + "golang.org/x/tools/internal/event/label" +) + +// Value represents a key for untyped values. +type Value struct { + name string + description string +} + +// New creates a new Key for untyped values. +func New(name, description string) *Value { + return &Value{name: name, description: description} +} + +func (k *Value) Name() string { return k.name } +func (k *Value) Description() string { return k.description } + +func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { + fmt.Fprint(w, k.From(l)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Value) Get(lm label.Map) interface{} { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } + +// Of creates a new Label with this key and the supplied value. +func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } + +// Tag represents a key for tagging labels that have no value. +// These are used when the existence of the label is the entire information it +// carries, such as marking events to be of a specific kind, or from a specific +// package. +type Tag struct { + name string + description string +} + +// NewTag creates a new Key for tagging labels. +func NewTag(name, description string) *Tag { + return &Tag{name: name, description: description} +} + +func (k *Tag) Name() string { return k.name } +func (k *Tag) Description() string { return k.description } + +func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} + +// New creates a new Label with this key. +func (k *Tag) New() label.Label { return label.OfValue(k, nil) } + +// Int represents a key +type Int struct { + name string + description string +} + +// NewInt creates a new Key for int values. +func NewInt(name, description string) *Int { + return &Int{name: name, description: description} +} + +func (k *Int) Name() string { return k.name } +func (k *Int) Description() string { return k.description } + +func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int) Get(lm label.Map) int { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } + +// Int8 represents a key +type Int8 struct { + name string + description string +} + +// NewInt8 creates a new Key for int8 values. +func NewInt8(name, description string) *Int8 { + return &Int8{name: name, description: description} +} + +func (k *Int8) Name() string { return k.name } +func (k *Int8) Description() string { return k.description } + +func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int8) Get(lm label.Map) int8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } + +// Int16 represents a key +type Int16 struct { + name string + description string +} + +// NewInt16 creates a new Key for int16 values. +func NewInt16(name, description string) *Int16 { + return &Int16{name: name, description: description} +} + +func (k *Int16) Name() string { return k.name } +func (k *Int16) Description() string { return k.description } + +func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int16) Get(lm label.Map) int16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } + +// Int32 represents a key +type Int32 struct { + name string + description string +} + +// NewInt32 creates a new Key for int32 values. +func NewInt32(name, description string) *Int32 { + return &Int32{name: name, description: description} +} + +func (k *Int32) Name() string { return k.name } +func (k *Int32) Description() string { return k.description } + +func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int32) Get(lm label.Map) int32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } + +// Int64 represents a key +type Int64 struct { + name string + description string +} + +// NewInt64 creates a new Key for int64 values. +func NewInt64(name, description string) *Int64 { + return &Int64{name: name, description: description} +} + +func (k *Int64) Name() string { return k.name } +func (k *Int64) Description() string { return k.description } + +func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int64) Get(lm label.Map) int64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } + +// UInt represents a key +type UInt struct { + name string + description string +} + +// NewUInt creates a new Key for uint values. +func NewUInt(name, description string) *UInt { + return &UInt{name: name, description: description} +} + +func (k *UInt) Name() string { return k.name } +func (k *UInt) Description() string { return k.description } + +func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt) Get(lm label.Map) uint { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } + +// UInt8 represents a key +type UInt8 struct { + name string + description string +} + +// NewUInt8 creates a new Key for uint8 values. +func NewUInt8(name, description string) *UInt8 { + return &UInt8{name: name, description: description} +} + +func (k *UInt8) Name() string { return k.name } +func (k *UInt8) Description() string { return k.description } + +func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt8) Get(lm label.Map) uint8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } + +// UInt16 represents a key +type UInt16 struct { + name string + description string +} + +// NewUInt16 creates a new Key for uint16 values. +func NewUInt16(name, description string) *UInt16 { + return &UInt16{name: name, description: description} +} + +func (k *UInt16) Name() string { return k.name } +func (k *UInt16) Description() string { return k.description } + +func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt16) Get(lm label.Map) uint16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } + +// UInt32 represents a key +type UInt32 struct { + name string + description string +} + +// NewUInt32 creates a new Key for uint32 values. +func NewUInt32(name, description string) *UInt32 { + return &UInt32{name: name, description: description} +} + +func (k *UInt32) Name() string { return k.name } +func (k *UInt32) Description() string { return k.description } + +func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt32) Get(lm label.Map) uint32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } + +// UInt64 represents a key +type UInt64 struct { + name string + description string +} + +// NewUInt64 creates a new Key for uint64 values. +func NewUInt64(name, description string) *UInt64 { + return &UInt64{name: name, description: description} +} + +func (k *UInt64) Name() string { return k.name } +func (k *UInt64) Description() string { return k.description } + +func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt64) Get(lm label.Map) uint64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } + +// Float32 represents a key +type Float32 struct { + name string + description string +} + +// NewFloat32 creates a new Key for float32 values. +func NewFloat32(name, description string) *Float32 { + return &Float32{name: name, description: description} +} + +func (k *Float32) Name() string { return k.name } +func (k *Float32) Description() string { return k.description } + +func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float32) Of(v float32) label.Label { + return label.Of64(k, uint64(math.Float32bits(v))) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float32) Get(lm label.Map) float32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float32) From(t label.Label) float32 { + return math.Float32frombits(uint32(t.Unpack64())) +} + +// Float64 represents a key +type Float64 struct { + name string + description string +} + +// NewFloat64 creates a new Key for int64 values. +func NewFloat64(name, description string) *Float64 { + return &Float64{name: name, description: description} +} + +func (k *Float64) Name() string { return k.name } +func (k *Float64) Description() string { return k.description } + +func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float64) Of(v float64) label.Label { + return label.Of64(k, math.Float64bits(v)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float64) Get(lm label.Map) float64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float64) From(t label.Label) float64 { + return math.Float64frombits(t.Unpack64()) +} + +// String represents a key +type String struct { + name string + description string +} + +// NewString creates a new Key for int64 values. +func NewString(name, description string) *String { + return &String{name: name, description: description} +} + +func (k *String) Name() string { return k.name } +func (k *String) Description() string { return k.description } + +func (k *String) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendQuote(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *String) Of(v string) label.Label { return label.OfString(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *String) Get(lm label.Map) string { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return "" +} + +// From can be used to get a value from a Label. +func (k *String) From(t label.Label) string { return t.UnpackString() } + +// Boolean represents a key +type Boolean struct { + name string + description string +} + +// NewBoolean creates a new Key for bool values. +func NewBoolean(name, description string) *Boolean { + return &Boolean{name: name, description: description} +} + +func (k *Boolean) Name() string { return k.name } +func (k *Boolean) Description() string { return k.description } + +func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendBool(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Boolean) Of(v bool) label.Label { + if v { + return label.Of64(k, 1) + } + return label.Of64(k, 0) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Boolean) Get(lm label.Map) bool { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return false +} + +// From can be used to get a value from a Label. +func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } + +// Error represents a key +type Error struct { + name string + description string +} + +// NewError creates a new Key for int64 values. +func NewError(name, description string) *Error { + return &Error{name: name, description: description} +} + +func (k *Error) Name() string { return k.name } +func (k *Error) Description() string { return k.description } + +func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { + io.WriteString(w, k.From(l).Error()) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Error) Get(lm label.Map) error { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Error) From(t label.Label) error { + err, _ := t.UnpackValue().(error) + return err +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/standard.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/standard.go new file mode 100644 index 00000000000..7e958665921 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/standard.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +var ( + // Msg is a key used to add message strings to label lists. + Msg = NewString("message", "a readable message") + // Label is a key used to indicate an event adds labels to the context. + Label = NewTag("label", "a label context marker") + // Start is used for things like traces that have a name. + Start = NewString("start", "span start") + // Metric is a key used to indicate an event records metrics. + End = NewTag("end", "a span end marker") + // Metric is a key used to indicate an event records metrics. + Detach = NewTag("detach", "a span detach marker") + // Err is a key used to add error values to label lists. + Err = NewError("error", "an error that occurred") + // Metric is a key used to indicate an event records metrics. + Metric = NewTag("metric", "a metric event marker") +) diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/util.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 00000000000..c0e8e731c90 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/event/label/label.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/label/label.go new file mode 100644 index 00000000000..0f526e1f9ab --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/event/label/label.go @@ -0,0 +1,215 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package label + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +// Key is used as the identity of a Label. +// Keys are intended to be compared by pointer only, the name should be unique +// for communicating with external systems, but it is not required or enforced. +type Key interface { + // Name returns the key name. + Name() string + // Description returns a string that can be used to describe the value. + Description() string + + // Format is used in formatting to append the value of the label to the + // supplied buffer. + // The formatter may use the supplied buf as a scratch area to avoid + // allocations. + Format(w io.Writer, buf []byte, l Label) +} + +// Label holds a key and value pair. +// It is normally used when passing around lists of labels. +type Label struct { + key Key + packed uint64 + untyped interface{} +} + +// Map is the interface to a collection of Labels indexed by key. +type Map interface { + // Find returns the label that matches the supplied key. + Find(key Key) Label +} + +// List is the interface to something that provides an iterable +// list of labels. +// Iteration should start from 0 and continue until Valid returns false. +type List interface { + // Valid returns true if the index is within range for the list. + // It does not imply the label at that index will itself be valid. + Valid(index int) bool + // Label returns the label at the given index. + Label(index int) Label +} + +// list implements LabelList for a list of Labels. +type list struct { + labels []Label +} + +// filter wraps a LabelList filtering out specific labels. +type filter struct { + keys []Key + underlying List +} + +// listMap implements LabelMap for a simple list of labels. +type listMap struct { + labels []Label +} + +// mapChain implements LabelMap for a list of underlying LabelMap. +type mapChain struct { + maps []Map +} + +// OfValue creates a new label from the key and value. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } + +// UnpackValue assumes the label was built using LabelOfValue and returns the value +// that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackValue() interface{} { return t.untyped } + +// Of64 creates a new label from a key and a uint64. This is often +// used for non uint64 values that can be packed into a uint64. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} } + +// Unpack64 assumes the label was built using LabelOf64 and returns the value that +// was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) Unpack64() uint64 { return t.packed } + +type stringptr unsafe.Pointer + +// OfString creates a new label from a key and a string. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfString(k Key, v string) Label { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + return Label{ + key: k, + packed: uint64(hdr.Len), + untyped: stringptr(hdr.Data), + } +} + +// UnpackString assumes the label was built using LabelOfString and returns the +// value that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackString() string { + var v string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + hdr.Data = uintptr(t.untyped.(stringptr)) + hdr.Len = int(t.packed) + return v +} + +// Valid returns true if the Label is a valid one (it has a key). +func (t Label) Valid() bool { return t.key != nil } + +// Key returns the key of this Label. +func (t Label) Key() Key { return t.key } + +// Format is used for debug printing of labels. +func (t Label) Format(f fmt.State, r rune) { + if !t.Valid() { + io.WriteString(f, `nil`) + return + } + io.WriteString(f, t.Key().Name()) + io.WriteString(f, "=") + var buf [128]byte + t.Key().Format(f, buf[:0], t) +} + +func (l *list) Valid(index int) bool { + return index >= 0 && index < len(l.labels) +} + +func (l *list) Label(index int) Label { + return l.labels[index] +} + +func (f *filter) Valid(index int) bool { + return f.underlying.Valid(index) +} + +func (f *filter) Label(index int) Label { + l := f.underlying.Label(index) + for _, f := range f.keys { + if l.Key() == f { + return Label{} + } + } + return l +} + +func (lm listMap) Find(key Key) Label { + for _, l := range lm.labels { + if l.Key() == key { + return l + } + } + return Label{} +} + +func (c mapChain) Find(key Key) Label { + for _, src := range c.maps { + l := src.Find(key) + if l.Valid() { + return l + } + } + return Label{} +} + +var emptyList = &list{} + +func NewList(labels ...Label) List { + if len(labels) == 0 { + return emptyList + } + return &list{labels: labels} +} + +func Filter(l List, keys ...Key) List { + if len(keys) == 0 { + return l + } + return &filter{keys: keys, underlying: l} +} + +func NewMap(labels ...Label) Map { + return listMap{labels: labels} +} + +func MergeMaps(srcs ...Map) Map { + var nonNil []Map + for _, src := range srcs { + if src != nil { + nonNil = append(nonNil, src) + } + } + if len(nonNil) == 1 { + return nonNil[0] + } + return mapChain{maps: nonNil} +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/bimport.go new file mode 100644 index 00000000000..d98b0db2a9a --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -0,0 +1,150 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" + "sync" +) + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*fileInfo +} + +type fileInfo struct { + file *token.File + lastline int +} + +const maxlines = 64 * 1024 + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we reserve maxlines + // positions per file. We delay calling token.File.SetLines until all + // positions have been calculated (by way of fakeFileSet.setLines), so that + // we can avoid setting unnecessary lines. See also golang/go#46586. + f := s.files[file] + if f == nil { + f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)} + s.files[file] = f + } + if line > maxlines { + line = 1 + } + if line > f.lastline { + f.lastline = line + } + + // Return a fake position assuming that f.file consists only of newlines. + return token.Pos(f.file.Base() + line - 1) +} + +func (s *fakeFileSet) setLines() { + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + for _, f := range s.files { + f.file.SetLines(fakeLines[:f.lastline]) + } +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + predecl = append(predecl, additionalPredeclared()...) + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go new file mode 100644 index 00000000000..f6437feb1cf --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -0,0 +1,99 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + length, err := strconv.Atoi(s) + size = int64(length) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// The size result is the length of the export data in bytes, or -1 if not known. +func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, size, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + hdr = string(line) + if size < 0 { + size = -1 + } + + return +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go new file mode 100644 index 00000000000..39df91124a4 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -0,0 +1,266 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. +package gcimporter // import "golang.org/x/tools/internal/gcimporter" + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "go/token" + "go/types" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +const ( + // Enable debug during development: it adds some additional checks, and + // prevents errors from being recovered. + debug = false + + // If trace is set, debugging output is printed to std out. + trace = false +) + +var exportMap sync.Map // package dir → func() (string, bool) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +func lookupGorootExport(pkgDir string) (string, bool) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { + listOnce.Do(func() { + cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + var output []byte + output, err := cmd.Output() + if err != nil { + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + return + } + + exportPath = exports[0] + }) + + return exportPath, exportPath != "" + }) + } + + return f.(func() (string, bool))() +} + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + var ok bool + if bp.Goroot && bp.Dir != "" { + filename, ok = lookupGorootExport(bp.Dir) + } + if !ok { + id = path // make sure we have an id to print in error message + return + } + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + } + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + if filename != "" { + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + var size int64 + buf := bufio.NewReader(rc) + if hdr, size, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$B\n": + var data []byte + data, err = io.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'u': // unified, from go1.20 + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/iexport.go new file mode 100644 index 00000000000..deeb67f315a --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -0,0 +1,1332 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/tokeninternal" +) + +// IExportShallow encodes "shallow" export data for the specified package. +// +// No promises are made about the encoding other than that it can be decoded by +// the same version of IIExportShallow. If you plan to save export data in the +// file system, be sure to include a cryptographic digest of the executable in +// the key to avoid version skew. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during export. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { + // In principle this operation can only fail if out.Write fails, + // but that's impossible for bytes.Buffer---and as a matter of + // fact iexportCommon doesn't even check for I/O errors. + // TODO(adonovan): handle I/O errors properly. + // TODO(adonovan): use byte slices throughout, avoiding copying. + const bundle, shallow = false, true + var out bytes.Buffer + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + return out.Bytes(), err +} + +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from +// cmd/compile or gcexportdata.Write. +// +// The importer calls getPackages to obtain package symbols for all +// packages mentioned in the export data, including the one being +// decoded. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during import. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { + const bundle = false + const shallow = true + pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) + if err != nil { + return nil, err + } + return pkgs[0], nil +} + +// ReportFunc is the type of a function used to report formatted bugs. +type ReportFunc = func(string, ...interface{}) + +// Current bundled export format version. Increase with each format change. +// 0: initial implementation +const bundleVersion = 0 + +// IExportData writes indexed export data for pkg to out. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + const bundle, shallow = false, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) +} + +// IExportBundle writes an indexed export bundle for pkgs to out. +func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + const bundle, shallow = true, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) +} + +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { + if !debug { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + } + + p := iexporter{ + fset: fset, + version: version, + shallow: shallow, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + tparamNames: map[types.Object]string{}, + typIndex: map[types.Type]uint64{}, + } + if !bundle { + p.localpkg = pkgs[0] + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + for _, pkg := range pkgs { + scope := pkg.Scope() + for _, name := range scope.Names() { + if token.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + if bundle { + // Ensure pkg and its imports are included in the index. + p.allPkgs[pkg] = true + for _, imp := range pkg.Imports() { + p.allPkgs[imp] = true + } + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Produce index of offset of each file record in files. + var files intWriter + var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i + if p.shallow { + fileOffset = make([]uint64, len(p.fileInfos)) + for i, info := range p.fileInfos { + fileOffset[i] = uint64(files.Len()) + p.encodeFile(&files, info.file, info.needed) + } + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + + if bundle { + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.pkg(pkg) + imps := pkg.Imports() + w.uint64(uint64(len(imps))) + for _, imp := range imps { + w.pkg(imp) + } + } + } + w.flush() + + // Assemble header. + var hdr intWriter + if bundle { + hdr.uint64(bundleVersion) + } + hdr.uint64(uint64(p.version)) + hdr.uint64(uint64(p.strings.Len())) + if p.shallow { + hdr.uint64(uint64(files.Len())) + hdr.uint64(uint64(len(fileOffset))) + for _, offset := range fileOffset { + hdr.uint64(offset) + } + } + hdr.uint64(dataLen) + + // Flush output. + io.Copy(out, &hdr) + io.Copy(out, &p.strings) + if p.shallow { + io.Copy(out, &files) + } + io.Copy(out, &p.data0) + + return nil +} + +// encodeFile writes to w a representation of the file sufficient to +// faithfully restore position information about all needed offsets. +// Mutates the needed array. +func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) { + _ = needed[0] // precondition: needed is non-empty + + w.uint64(p.stringOff(file.Name())) + + size := uint64(file.Size()) + w.uint64(size) + + // Sort the set of needed offsets. Duplicates are harmless. + sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) + + lines := tokeninternal.GetLines(file) // byte offset of each line start + w.uint64(uint64(len(lines))) + + // Rather than record the entire array of line start offsets, + // we save only a sparse list of (index, offset) pairs for + // the start of each line that contains a needed position. + var sparse [][2]int // (index, offset) pairs +outer: + for i, lineStart := range lines { + lineEnd := size + if i < len(lines)-1 { + lineEnd = uint64(lines[i+1]) + } + // Does this line contains a needed offset? + if needed[0] < lineEnd { + sparse = append(sparse, [2]int{i, lineStart}) + for needed[0] < lineEnd { + needed = needed[1:] + if len(needed) == 0 { + break outer + } + } + } + } + + // Delta-encode the columns. + w.uint64(uint64(len(sparse))) + var prev [2]int + for _, pair := range sparse { + w.uint64(uint64(pair[0] - prev[0])) + w.uint64(uint64(pair[1] - prev[1])) + prev = pair + } +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + type pkgObj struct { + obj types.Object + name string // qualified name; differs from obj.Name for type params + } + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]pkgObj{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + if w.p.localpkg != nil { + pkgObjs[w.p.localpkg] = nil + } + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + name := w.p.exportName(obj) + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name}) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].name < objs[j].name + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.name) + w.uint64(index[obj.obj]) + } + } +} + +// exportName returns the 'exported' name of an object. It differs from +// obj.Name() only for type parameters (see tparamExportName for details). +func (p *iexporter) exportName(obj types.Object) (res string) { + if name := p.tparamNames[obj]; name != "" { + return name + } + return obj.Name() +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + version int + + shallow bool // don't put types from other packages in the index + objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated + localpkg *types.Package // (nil in bundle mode) + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + // In shallow mode, object positions are encoded as (file, offset). + // Each file is recorded as a line-number table. + // Only the lines of needed positions are saved faithfully. + fileInfo map[*token.File]uint64 // value is index in fileInfos + fileInfos []*filePositions + + data0 intWriter + declIndex map[types.Object]uint64 + tparamNames map[types.Object]string // typeparam->exported name + typIndex map[types.Type]uint64 + + indent int // for tracing support +} + +type filePositions struct { + file *token.File + needed []uint64 // unordered list of needed file offsets +} + +func (p *iexporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +// objectpathEncoder returns the lazily allocated objectpath.Encoder to use +// when encoding objects in other packages during shallow export. +// +// Using a shared Encoder amortizes some of cost of objectpath search. +func (p *iexporter) objectpathEncoder() *objectpath.Encoder { + if p.objEncoder == nil { + p.objEncoder = new(objectpath.Encoder) + } + return p.objEncoder +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it. +func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) { + index, ok := p.fileInfo[file] + if !ok { + index = uint64(len(p.fileInfo)) + p.fileInfos = append(p.fileInfos, &filePositions{file: file}) + if p.fileInfo == nil { + p.fileInfo = make(map[*token.File]uint64) + } + p.fileInfo[file] = index + } + // Record each needed offset. + info := p.fileInfos[index] + offset := uint64(file.Offset(pos)) + info.needed = append(info.needed, offset) + + return index, offset +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + // Caller should not ask us to do export it. + if obj.Pkg() == types.Unsafe { + panic("cannot export package unsafe") + } + + // Shallow export data: don't index decls from other packages. + if p.shallow && obj.Pkg() != p.localpkg { + return + } + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark obj present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + prevFile string + prevLine int64 + prevColumn int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + if trace { + p.trace("exporting decl %v (%T)", obj, obj) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", obj) + }() + } + w := p.newWriter() + + switch obj := obj.(type) { + case *types.Var: + w.tag(varTag) + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + // We shouldn't see methods in the package scope, + // but the type checker may repair "func () F() {}" + // to "func (Invalid) F()" and then treat it like "func F()", + // so allow that. See golang/go#57729. + if sig.Recv().Type() != types.Typ[types.Invalid] { + panic(internalErrorf("unexpected method: %v", sig)) + } + } + + // Function. + if sig.TypeParams().Len() == 0 { + w.tag(funcTag) + } else { + w.tag(genericFuncTag) + } + w.pos(obj.Pos()) + // The tparam list of the function type is the declaration of the type + // params. So, write out the type params right now. Then those type params + // will be referenced via their type offset (via typOff) in all other + // places in the signature and function where they are used. + // + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + if tparams := sig.TypeParams(); tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + w.signature(sig) + + case *types.Const: + w.tag(constTag) + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + t := obj.Type() + + if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + w.tag(typeParamTag) + w.pos(obj.Pos()) + constraint := tparam.Constraint() + if p.version >= iexportVersionGo1_18 { + implicit := false + if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + implicit = iface.IsImplicit() + } + w.bool(implicit) + } + w.typ(constraint, obj.Pkg()) + break + } + + if obj.IsAlias() { + w.tag(aliasTag) + w.pos(obj.Pos()) + if alias, ok := t.(*aliases.Alias); ok { + // Preserve materialized aliases, + // even of non-exported types. + t = aliases.Rhs(alias) + } + w.typ(t, obj.Pkg()) + break + } + + // Defined type. + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + if named.TypeParams().Len() == 0 { + w.tag(typeTag) + } else { + w.tag(genericTypeTag) + } + w.pos(obj.Pos()) + + if named.TypeParams().Len() > 0 { + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) + } + + underlying := named.Underlying() + w.typ(underlying, obj.Pkg()) + + if types.IsInterface(t) { + break + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + + // Receiver type parameters are type arguments of the receiver type, so + // their name must be qualified before exporting recv. + if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { + prefix := obj.Name() + "." + m.Name() + for i := 0; i < rparams.Len(); i++ { + rparam := rparams.At(i) + name := tparamExportName(prefix, rparam) + w.p.tparamNames[rparam.Obj()] = name + } + } + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.shallow { + w.posV2(pos) + } else if w.p.version >= iexportVersionPosCol { + w.posV1(pos) + } else { + w.posV0(pos) + } +} + +// posV2 encoding (used only in shallow mode) records positions as +// (file, offset), where file is the index in the token.File table +// (which records the file name and newline offsets) and offset is a +// byte offset. It effectively ignores //line directives. +func (w *exportWriter) posV2(pos token.Pos) { + if pos == token.NoPos { + w.uint64(0) + return + } + file := w.p.fset.File(pos) // fset must be non-nil + index, offset := w.p.fileIndexAndOffset(file, pos) + w.uint64(1 + index) + w.uint64(offset) +} + +func (w *exportWriter) posV1(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + column := int64(p.Column) + + deltaColumn := (column - w.prevColumn) << 1 + deltaLine := (line - w.prevLine) << 1 + + if file != w.prevFile { + deltaLine |= 1 + } + if deltaLine != 0 { + deltaColumn |= 1 + } + + w.int64(deltaColumn) + if deltaColumn&1 != 0 { + w.int64(deltaLine) + if deltaLine&1 != 0 { + w.string(file) + } + } + + w.prevFile = file + w.prevLine = line + w.prevColumn = column +} + +func (w *exportWriter) posV0(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedType(obj *types.TypeName) { + name := w.p.exportName(obj) + + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + w.string(name) + w.pkg(obj.Pkg()) +} + +// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass +// it in explicitly into signatures and structs that may use it for +// constructing fields. +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + if trace { + w.p.trace("exporting type %s (%T)", t, t) + w.p.indent++ + defer func() { + w.p.indent-- + w.p.trace("=> %s", t) + }() + } + switch t := t.(type) { + case *aliases.Alias: + // TODO(adonovan): support parameterized aliases, following *types.Named. + w.startType(aliasType) + w.qualifiedType(t.Obj()) + + case *types.Named: + if targs := t.TypeArgs(); targs.Len() > 0 { + w.startType(instanceType) + // TODO(rfindley): investigate if this position is correct, and if it + // matters. + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(t.Origin(), pkg) + return + } + w.startType(definedType) + w.qualifiedType(t.Obj()) + + case *types.TypeParam: + w.startType(typeParamType) + w.qualifiedType(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.pkg(pkg) + w.signature(t) + + case *types.Struct: + w.startType(structType) + n := t.NumFields() + // Even for struct{} we must emit some qualifying package, because that's + // what the compiler does, and thus that's what the importer expects. + fieldPkg := pkg + if n > 0 { + fieldPkg = t.Field(0).Pkg() + } + if fieldPkg == nil { + // TODO(rfindley): improve this very hacky logic. + // + // The importer expects a package to be set for all struct types, even + // those with no fields. A better encoding might be to set NumFields + // before pkg. setPkg panics with a nil package, which may be possible + // to reach with invalid packages (and perhaps valid packages, too?), so + // (arbitrarily) set the localpkg if available. + // + // Alternatively, we may be able to simply guarantee that pkg != nil, by + // reconsidering the encoding of constant values. + if w.p.shallow { + fieldPkg = w.p.localpkg + } else { + panic(internalErrorf("no package to set for empty struct")) + } + } + w.pkg(fieldPkg) + w.uint64(uint64(n)) + + for i := 0; i < n; i++ { + f := t.Field(i) + if w.p.shallow { + w.objectPath(f) + } + w.pos(f.Pos()) + w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg + w.typ(f.Type(), fieldPkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.pkg(pkg) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + ft := t.EmbeddedType(i) + tPkg := pkg + if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { + w.pos(named.Obj().Pos()) + } else { + w.pos(token.NoPos) + } + w.typ(ft, tPkg) + } + + // See comment for struct fields. In shallow mode we change the encoding + // for interface methods that are promoted from other packages. + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + if w.p.shallow { + w.objectPath(m) + } + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + case *types.Union: + w.startType(unionType) + nt := t.Len() + w.uint64(uint64(nt)) + for i := 0; i < nt; i++ { + term := t.Term(i) + w.bool(term.Tilde()) + w.typ(term.Type(), pkg) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +// objectPath writes the package and objectPath to use to look up obj in a +// different package, when encoding in "shallow" mode. +// +// When doing a shallow import, the importer creates only the local package, +// and requests package symbols for dependencies from the client. +// However, certain types defined in the local package may hold objects defined +// (perhaps deeply) within another package. +// +// For example, consider the following: +// +// package a +// func F() chan * map[string] struct { X int } +// +// package b +// import "a" +// var B = a.F() +// +// In this example, the type of b.B holds fields defined in package a. +// In order to have the correct canonical objects for the field defined in the +// type of B, they are encoded as objectPaths and later looked up in the +// importer. The same problem applies to interface methods. +func (w *exportWriter) objectPath(obj types.Object) { + if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { + // obj.Pkg() may be nil for the builtin error.Error. + // In this case, or if obj is declared in the local package, no need to + // encode. + w.string("") + return + } + objectPath, err := w.p.objectpathEncoder().For(obj) + if err != nil { + // Fall back to the empty string, which will cause the importer to create a + // new object, which matches earlier behavior. Creating a new object is + // sufficient for many purposes (such as type checking), but causes certain + // references algorithms to fail (golang/go#60819). However, we didn't + // notice this problem during months of gopls@v0.12.0 testing. + // + // TODO(golang/go#61674): this workaround is insufficient, as in the case + // where the field forwarded from an instantiated type that may not appear + // in the export data of the original package: + // + // // package a + // type A[P any] struct{ F P } + // + // // package b + // type B a.A[int] + // + // We need to update references algorithms not to depend on this + // de-duplication, at which point we may want to simply remove the + // workaround here. + w.string("") + return + } + w.string(string(objectPath)) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { + w.uint64(uint64(ts.Len())) + for i := 0; i < ts.Len(); i++ { + w.typ(ts.At(i), pkg) + } +} + +func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { + ll := uint64(list.Len()) + w.uint64(ll) + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + // Set the type parameter exportName before exporting its type. + exportName := tparamExportName(prefix, tparam) + w.p.tparamNames[tparam.Obj()] = exportName + w.typ(list.At(i), pkg) + } +} + +const blankMarker = "$" + +// tparamExportName returns the 'exported' name of a type parameter, which +// differs from its actual object name: it is prefixed with a qualifier, and +// blank type parameter names are disambiguated by their index in the type +// parameter list. +func tparamExportName(prefix string, tparam *types.TypeParam) string { + assert(prefix != "") + name := tparam.Obj().Name() + if name == "_" { + name = blankMarker + strconv.Itoa(tparam.Index()) + } + return prefix + "." + name +} + +// tparamName returns the real name of a type parameter, after stripping its +// qualifying prefix and reverting blank-name encoding. See tparamExportName +// for details. +func tparamName(exportName string) string { + // Remove the "path" from the type param name that makes it unique. + ix := strings.LastIndex(exportName, ".") + if ix < 0 { + errorf("malformed type parameter export name %s: missing prefix", exportName) + } + name := exportName[ix+1:] + if strings.HasPrefix(name, blankMarker) { + return "_" + } + return name +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + if w.p.version >= iexportVersionGo1_18 { + w.int64(int64(v.Kind())) + } + + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + w.bool(constant.BoolVal(v)) + case types.IsInteger: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case types.IsFloat: + f := constantToFloat(v) + w.mpfloat(f, typ) + case types.IsComplex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case types.IsString: + w.string(constant.StringVal(v)) + default: + if b.Kind() == types.Invalid { + // package contains type errors + break + } + panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + x = constant.ToFloat(x) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +// TODO(adonovan): make this call panic, so that it's symmetric with errorf. +// Otherwise it's easy to forget to do anything with the error. +// +// TODO(adonovan): also, consider switching the names "errorf" and +// "internalErrorf" as the former is used for bugs, whose cause is +// internal inconsistency, whereas the latter is used for ordinary +// situations like bad input, whose cause is external. +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/iimport.go new file mode 100644 index 00000000000..136aa03653c --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -0,0 +1,1100 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "sort" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +// Keep this in sync with constants in iexport.go. +const ( + iexportVersionGo1_11 = 0 + iexportVersionPosCol = 1 + iexportVersionGo1_18 = 2 + iexportVersionGenerics = 2 + + iexportVersionCurrent = 2 +) + +type ident struct { + pkg *types.Package + name string +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType + typeParamType + instanceType + unionType + aliasType +) + +// Object tags +const ( + varTag = 'V' + funcTag = 'F' + genericFuncTag = 'G' + constTag = 'C' + aliasTag = 'A' + genericAliasTag = 'B' + typeParamTag = 'P' + typeTag = 'T' + genericTypeTag = 'U' +) + +// IImportData imports a package from the serialized package data +// and returns 0 and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { + pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) + if err != nil { + return 0, nil, err + } + return 0, pkgs[0], nil +} + +// IImportBundle imports a set of packages from the serialized package bundle. +func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { + return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) +} + +// A GetPackagesFunc function obtains the non-nil symbols for a set of +// packages, creating and recursively importing them as needed. An +// implementation should store each package symbol is in the Pkg +// field of the items array. +// +// Any error causes importing to fail. This can be used to quickly read +// the import manifest of an export data file without fully decoding it. +type GetPackagesFunc = func(items []GetPackagesItem) error + +// A GetPackagesItem is a request from the importer for the package +// symbol of the specified name and path. +type GetPackagesItem struct { + Name, Path string + Pkg *types.Package // to be filled in by GetPackagesFunc call + + // private importer state + pathOffset uint64 + nameIndex map[string]uint64 +} + +// GetPackagesFromMap returns a GetPackagesFunc that retrieves +// packages from the given map of package path to package. +// +// The returned function may mutate m: each requested package that is not +// found is created with types.NewPackage and inserted into m. +func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { + return func(items []GetPackagesItem) error { + for i, item := range items { + pkg, ok := m[item.Path] + if !ok { + pkg = types.NewPackage(item.Path, item.Name) + m[item.Path] = pkg + } + items[i].Pkg = pkg + } + return nil + } +} + +func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { + const currentVersion = iexportVersionCurrent + version := int64(-1) + if !debug { + defer func() { + if e := recover(); e != nil { + if bundle { + err = fmt.Errorf("%v", e) + } else if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) + } + } + }() + } + + r := &intReader{bytes.NewReader(data), path} + + if bundle { + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) + } + } + + version = int64(r.uint64()) + switch version { + case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11: + default: + if version > iexportVersionGo1_18 { + errorf("unstable iexport format version %d, just rebuild compiler and std library", version) + } else { + errorf("unknown iexport format version %d", version) + } + } + + sLen := int64(r.uint64()) + var fLen int64 + var fileOffset []uint64 + if shallow { + // Shallow mode uses a different position encoding. + fLen = int64(r.uint64()) + fileOffset = make([]uint64, r.uint64()) + for i := range fileOffset { + fileOffset[i] = r.uint64() + } + } + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + fileData := data[whence+sLen : whence+sLen+fLen] + declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen] + r.Seek(sLen+fLen+dLen, io.SeekCurrent) + + p := iimporter{ + version: int(version), + ipath: path, + aliases: aliases.Enabled(), + shallow: shallow, + reportf: reportf, + + stringData: stringData, + stringCache: make(map[uint64]string), + fileOffset: fileOffset, + fileData: fileData, + fileCache: make([]*token.File, len(fileOffset)), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + // Separate map for typeparams, keyed by their package and unique + // name. + tparamIndex: make(map[ident]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + } + defer p.fake.setLines() // set lines for files in fset + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + // Gather the relevant packages from the manifest. + items := make([]GetPackagesItem, r.uint64()) + uniquePkgPaths := make(map[string]bool) + for i := range items { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + items[i].Name = pkgName + items[i].Path = pkgPath + items[i].pathOffset = pkgPathOff + + // Read index for package. + nameIndex := make(map[string]uint64) + nSyms := r.uint64() + // In shallow mode, only the current package (i=0) has an index. + assert(!(shallow && i > 0 && nSyms != 0)) + for ; nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + items[i].nameIndex = nameIndex + + uniquePkgPaths[pkgPath] = true + } + // Debugging #63822; hypothesis: there are duplicate PkgPaths. + if len(uniquePkgPaths) != len(items) { + reportf("found duplicate PkgPaths while reading export data manifest: %v", items) + } + + // Request packages all at once from the client, + // enabling a parallel implementation. + if err := getPackages(items); err != nil { + return nil, err // don't wrap this error + } + + // Check the results and complete the index. + pkgList := make([]*types.Package, len(items)) + for i, item := range items { + pkg := item.Pkg + if pkg == nil { + errorf("internal error: getPackages returned nil package for %q", item.Path) + } else if pkg.Path() != item.Path { + errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) + } else if pkg.Name() != item.Name { + errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) + } + p.pkgCache[item.pathOffset] = pkg + p.pkgIndex[pkg] = item.nameIndex + pkgList[i] = pkg + } + + if bundle { + pkgs = make([]*types.Package, r.uint64()) + for i := range pkgs { + pkg := p.pkgAt(r.uint64()) + imps := make([]*types.Package, r.uint64()) + for j := range imps { + imps[j] = p.pkgAt(r.uint64()) + } + pkg.SetImports(imps) + pkgs[i] = pkg + } + } else { + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + pkgs = pkgList[:1] + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + pkgs[0].SetImports(list) + } + + for _, pkg := range pkgs { + if pkg.Complete() { + continue + } + + names := make([]string, 0, len(p.pkgIndex[pkg])) + for name := range p.pkgIndex[pkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(pkg, name) + } + + // package was imported completely and without errors + pkg.MarkComplete() + } + + // SetConstraint can't be called if the constraint type is not yet complete. + // When type params are created in the typeParamTag case of (*importReader).obj(), + // the associated constraint type may not be complete due to recursion. + // Therefore, we defer calling SetConstraint there, and call it here instead + // after all types are complete. + for _, d := range p.later { + d.t.SetConstraint(d.constraint) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + // Workaround for golang/go#61561. See the doc for instanceList for details. + for _, typ := range p.instanceList { + if iface, _ := typ.Underlying().(*types.Interface); iface != nil { + iface.Complete() + } + } + + return pkgs, nil +} + +type setConstraintArgs struct { + t *types.TypeParam + constraint types.Type +} + +type iimporter struct { + version int + ipath string + + aliases bool + shallow bool + reportf ReportFunc // if non-nil, used to report bugs + + stringData []byte + stringCache map[uint64]string + fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i + fileData []byte + fileCache []*token.File // memoized decoding of file encoded as i + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + tparamIndex map[ident]types.Type + + fake fakeFileSet + interfaceList []*types.Interface + + // Workaround for the go/types bug golang/go#61561: instances produced during + // instantiation may contain incomplete interfaces. Here we only complete the + // underlying type of the instance, which is the most common case but doesn't + // handle parameterized interface literals defined deeper in the type. + instanceList []types.Type // instances for later completion (see golang/go#61561) + + // Arguments for calls to SetConstraint that are deferred due to recursive types + later []setConstraintArgs + + indent int // for tracing support +} + +func (p *iimporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + if debug { + p.trace("import decl %s", name) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", name) + }() + } + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + // In deep mode, the index should be complete. In shallow + // mode, we should have already recursively loaded necessary + // dependencies so the above Lookup succeeds. + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) fileAt(index uint64) *token.File { + file := p.fileCache[index] + if file == nil { + off := p.fileOffset[index] + file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath}) + p.fileCache[index] = file + } + return file +} + +func (p *iimporter) decodeFile(rd intReader) *token.File { + filename := p.stringAt(rd.uint64()) + size := int(rd.uint64()) + file := p.fake.fset.AddFile(filename, -1, size) + + // SetLines requires a nondecreasing sequence. + // Because it is common for clients to derive the interval + // [start, start+len(name)] from a start position, and we + // want to ensure that the end offset is on the same line, + // we fill in the gaps of the sparse encoding with values + // that strictly increase by the largest possible amount. + // This allows us to avoid having to record the actual end + // offset of each needed line. + + lines := make([]int, int(rd.uint64())) + var index, offset int + for i, n := 0, int(rd.uint64()); i < n; i++ { + index += int(rd.uint64()) + offset += int(rd.uint64()) + lines[index] = offset + + // Ensure monotonicity between points. + for j := index - 1; j > 0 && lines[j] == 0; j-- { + lines[j] = lines[j+1] - 1 + } + } + + // Ensure monotonicity after last point. + for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- { + size-- + lines[j] = size + } + + if !file.SetLines(lines) { + errorf("SetLines failed: %d", lines) // can't happen + } + return file +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && canReuse(base, t) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if canReuse(base, t) { + p.typCache[off] = t + } + return t +} + +// canReuse reports whether the type rhs on the RHS of the declaration for def +// may be re-used. +// +// Specifically, if def is non-nil and rhs is an interface type with methods, it +// may not be re-used because we have a convention of setting the receiver type +// for interface methods to def. +func canReuse(def *types.Named, rhs types.Type) bool { + if def == nil { + return true + } + iface, _ := aliases.Unalias(rhs).(*types.Interface) + if iface == nil { + return true + } + // Don't use iface.Empty() here as iface may not be complete. + return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0 +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case aliasTag: + typ := r.typ() + // TODO(adonovan): support generic aliases: + // if tag == genericAliasTag { + // tparams := r.tparamList() + // alias.SetTypeParams(tparams) + // } + r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + + case constTag: + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case funcTag, genericFuncTag: + var tparams []*types.TypeParam + if tag == genericFuncTag { + tparams = r.tparamList() + } + sig := r.signature(nil, nil, tparams) + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case typeTag, genericTypeTag: + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + // Declare obj before calling r.tparamList, so the new type name is recognized + // if used in the constraint of one of its own typeparams (see #48280). + r.declare(obj) + if tag == genericTypeTag { + tparams := r.tparamList() + named.SetTypeParams(tparams) + } + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + + // If the receiver has any targs, set those as the + // rparams of the method (since those are the + // typeparams being used in the method sig/body). + _, recvNamed := typesinternal.ReceiverNamed(recv) + targs := recvNamed.TypeArgs() + var rparams []*types.TypeParam + if targs.Len() > 0 { + rparams = make([]*types.TypeParam, targs.Len()) + for i := range rparams { + rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) + } + } + msig := r.signature(recv, rparams, nil) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case typeParamTag: + // We need to "declare" a typeparam in order to have a name that + // can be referenced recursively (if needed) in the type param's + // bound. + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + name0 := tparamName(name) + tn := types.NewTypeName(pos, r.currPkg, name0, nil) + t := types.NewTypeParam(tn, nil) + + // To handle recursive references to the typeparam within its + // bound, save the partial type in tparamIndex before reading the bounds. + id := ident{r.currPkg, name} + r.p.tparamIndex[id] = t + var implicit bool + if r.p.version >= iexportVersionGo1_18 { + implicit = r.bool() + } + constraint := r.typ() + if implicit { + iface, _ := aliases.Unalias(constraint).(*types.Interface) + if iface == nil { + errorf("non-interface constraint marked implicit") + } + iface.MarkImplicit() + } + // The constraint type may not be complete, if we + // are in the middle of a type recursion involving type + // constraints. So, we defer SetConstraint until we have + // completely set up all types in ImportData. + r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) + + case varTag: + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + if r.p.version >= iexportVersionGo1_18 { + // TODO: add support for using the kind. + _ = constant.Kind(r.int64()) + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + x.SetInt64(v) + return + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) + if signed && n&1 != 0 { + x.Neg(x) + } +} + +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) + } + return constant.Make(&f) +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.shallow { + // precise offsets are encoded only in shallow mode + return r.posv2() + } + if r.p.version >= iexportVersionPosCol { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) posv2() token.Pos { + file := r.uint64() + if file == 0 { + return token.NoPos + } + tf := r.p.fileAt(file - 1) + return tf.Pos(int(r.uint64())) +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := aliases.Unalias(t).(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) (res types.Type) { + k := r.kind() + if debug { + r.p.trace("importing type %d (base: %s)", k, base) + r.p.indent++ + defer func() { + r.p.indent-- + r.p.trace("=> %s", res) + }() + } + switch k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case aliasType, definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil, nil, nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + var field *types.Var + if r.p.shallow { + field, _ = r.objectPathObject().(*types.Var) + } + + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + // Either this is not a shallow import, the field is local, or the + // encoded objectPath failed to produce an object (a bug). + // + // Even in this last, buggy case, fall back on creating a new field. As + // discussed in iexport.go, this is not correct, but mostly works and is + // preferable to failing (for now at least). + if field == nil { + field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + } + + fields[i] = field + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + var method *types.Func + if r.p.shallow { + method, _ = r.objectPathObject().(*types.Func) + } + + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + msig := r.signature(recv, nil, nil) + + if method == nil { + method = types.NewFunc(mpos, r.currPkg, mname, msig) + } + methods[i] = method + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + + case typeParamType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + pkg, name := r.qualifiedIdent() + id := ident{pkg, name} + if t, ok := r.p.tparamIndex[id]; ok { + // We're already in the process of importing this typeparam. + return t + } + // Otherwise, import the definition of the typeparam now. + r.p.doDecl(pkg, name) + return r.p.tparamIndex[id] + + case instanceType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + // pos does not matter for instances: they are positioned on the original + // type. + _ = r.pos() + len := r.uint64() + targs := make([]types.Type, len) + for i := range targs { + targs[i] = r.typ() + } + baseType := r.typ() + // The imported instantiated type doesn't include any methods, so + // we must always use the methods of the base (orig) type. + // TODO provide a non-nil *Environment + t, _ := types.Instantiate(nil, baseType, targs, false) + + // Workaround for golang/go#61561. See the doc for instanceList for details. + r.p.instanceList = append(r.p.instanceList, t) + return t + + case unionType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + terms := make([]*types.Term, r.uint64()) + for i := range terms { + terms[i] = types.NewTerm(r.bool(), r.typ()) + } + return types.NewUnion(terms) + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +// objectPathObject is the inverse of exportWriter.objectPath. +// +// In shallow mode, certain fields and methods may need to be looked up in an +// imported package. See the doc for exportWriter.objectPath for a full +// explanation. +func (r *importReader) objectPathObject() types.Object { + objPath := objectpath.Path(r.string()) + if objPath == "" { + return nil + } + pkg := r.pkg() + obj, err := objectpath.Object(pkg, objPath) + if err != nil { + if r.p.reportf != nil { + r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) + } + } + return obj +} + +func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) +} + +func (r *importReader) tparamList() []*types.TypeParam { + n := r.uint64() + if n == 0 { + return nil + } + xs := make([]*types.TypeParam, n) + for i := range xs { + // Note: the standard library importer is tolerant of nil types here, + // though would panic in SetTypeParams. + xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) + } + return xs +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go new file mode 100644 index 00000000000..8b163e3d058 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go new file mode 100644 index 00000000000..49984f40fd8 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go new file mode 100644 index 00000000000..0cd3b91b65a --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -0,0 +1,34 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGenerics + +// additionalPredeclared returns additional predeclared types in go.1.18. +func additionalPredeclared() []types.Type { + return []types.Type{ + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + types.Universe.Lookup("any").Type(), + } +} + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go new file mode 100644 index 00000000000..38b624cadab --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !goexperiment.unified +// +build !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go new file mode 100644 index 00000000000..b5118d0b3a5 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.unified +// +build goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go new file mode 100644 index 00000000000..2c077068877 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -0,0 +1,728 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/pkgbits" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + aliases bool // create types.Alias nodes + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() + // laterFors is used in case of 'type A B' to ensure that B is processed before A. + laterFors map[types.Type]int + + // ifaces holds a list of constructed Interfaces, which need to have + // Complete called after importing is done. + ifaces []*types.Interface +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + + s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. +func (pr *pkgReader) laterFor(t types.Type, fn func()) { + if pr.laterFors == nil { + pr.laterFors = make(map[types.Type]int) + } + pr.laterFors[t] = len(pr.laterFns) + pr.laterFns = append(pr.laterFns, fn) +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + aliases: aliases.Enabled(), + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // has init + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + for _, iface := range pr.ifaces { + iface.Complete() + } + + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // devived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.TempDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) retireReader(r *reader) { + pr.RetireDecoder(&r.Decoder) +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + var filename string + { + r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename = r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + pr.retireReader(r) + } + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + return pkg +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + var typ types.Type + { + r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ = r.doTyp() + assert(typ != nil) + pr.retireReader(r) + } + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + + // We need to call iface.Complete(), but if there are any embedded + // defined types, then we may not have set their underlying + // interface type yet. So we need to defer calling Complete until + // after we've called SetUnderlying everywhere. + // + // TODO(mdempsky): After CL 424876 lands, it should be safe to call + // iface.Complete() immediately. + r.p.ifaces = append(r.p.ifaces, iface) + + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + + var objPkg *types.Package + var objName string + var tag pkgbits.CodeObj + { + rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName = rname.qualifiedIdent() + assert(objName != "") + + tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + pr.retireReader(rname) + } + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + // Ignore local types promoted to global scope (#55110). + if _, suffix := splitVargenSuffix(objName); suffix != "" { + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + setUnderlying := func(underlying types.Type) { + // If the underlying type is an interface, we need to + // duplicate its methods so we can replace the receiver + // parameter's type (#49906). + if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + methods := make([]*types.Func, iface.NumExplicitMethods()) + for i := range methods { + fn := iface.ExplicitMethod(i) + sig := fn.Type().(*types.Signature) + + recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + } + + embeds := make([]types.Type, iface.NumEmbeddeds()) + for i := range embeds { + embeds[i] = iface.EmbeddedType(i) + } + + newIface := types.NewInterfaceType(methods, embeds) + r.p.ifaces = append(r.p.ifaces, newIface) + underlying = newIface + } + + named.SetUnderlying(underlying) + } + + // Since go.dev/cl/455279, we can assume rhs.Underlying() will + // always be non-nil. However, to temporarily support users of + // older snapshot releases, we continue to fallback to the old + // behavior for now. + // + // TODO(mdempsky): Remove fallback code and simplify after + // allowing time for snapshot users to upgrade. + rhs := r.typ() + if underlying := rhs.Underlying(); underlying != nil { + setUnderlying(underlying) + } else { + pk := r.p + pk.laterFor(named, func() { + // First be sure that the rhs is initialized, if it needs to be initialized. + delete(pk.laterFors, named) // prevent cycles + if i, ok := pk.laterFors[rhs]; ok { + f := pk.laterFns[i] + pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op + f() // initialize RHS + } + setUnderlying(rhs.Underlying()) + }) + } + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + declare(types.NewVar(pos, objPkg, objName, typ)) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + + var dict readerDict + + { + r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + pr.retireReader(r) + } + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/invoke.go new file mode 100644 index 00000000000..eb7a8282f9e --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -0,0 +1,470 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocommand is a helper for calling the go command. +package gocommand + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log" + "os" + "os/exec" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// An Runner will run go command invocations and serialize +// them if it sees a concurrency error. +type Runner struct { + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) +} + +// 1.13: go: updates to go.mod needed, but contents have changed +// 1.14: go: updating go.mod: existing contents have changed since last read +var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) + +// event keys for go command invocations +var ( + verb = keys.NewString("verb", "go command verb") + directory = keys.NewString("directory", "") +) + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), directory.Of(inv.WorkingDir)} +} + +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. +func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) + return stdout, friendly +} + +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over +// go.mod changes. +// Postcondition: both error results have same nilness. +func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() + // Make sure the runner is always initialized. + runner.initialize() + + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) { + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + } + + return stdout, stderr, friendlyErr, err +} + +// Postcondition: both error results have same nilness. +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, ctx.Err(), ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +// Postcondition: both error results have same nilness. +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return ctx.Err(), ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for i := 0; i < maxInFlight; i++ { + select { + case <-ctx.Done(): + return ctx.Err(), ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() + } + } + + return inv.runWithFriendlyError(ctx, stdout, stderr) +} + +// An Invocation represents a call to the go command. +type Invocation struct { + Verb string + Args []string + BuildFlags []string + + // If ModFlag is set, the go command is invoked with -mod=ModFlag. + // TODO(rfindley): remove, in favor of Args. + ModFlag string + + // If ModFile is set, the go command is invoked with -modfile=ModFile. + // TODO(rfindley): remove, in favor of Args. + ModFile string + + // If Overlay is set, the go command is invoked with -overlay=Overlay. + // TODO(rfindley): remove, in favor of Args. + Overlay string + + // If CleanEnv is set, the invocation will run only with the environment + // in Env, not starting with os.Environ. + CleanEnv bool + Env []string + WorkingDir string + Logf func(format string, args ...interface{}) +} + +// Postcondition: both error results have same nilness. +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) + if rawError != nil { + friendlyError = rawError + // Check for 'go' executable not being found. + if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + friendlyError = fmt.Errorf("go command required, not found: %v", ee) + } + if ctx.Err() != nil { + friendlyError = ctx.Err() + } + friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr) + } + return +} + +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { + log := i.Logf + if log == nil { + log = func(string, ...interface{}) {} + } + + goArgs := []string{i.Verb} + + appendModFile := func() { + if i.ModFile != "" { + goArgs = append(goArgs, "-modfile="+i.ModFile) + } + } + appendModFlag := func() { + if i.ModFlag != "" { + goArgs = append(goArgs, "-mod="+i.ModFlag) + } + } + appendOverlayFlag := func() { + if i.Overlay != "" { + goArgs = append(goArgs, "-overlay="+i.Overlay) + } + } + + switch i.Verb { + case "env", "version": + goArgs = append(goArgs, i.Args...) + case "mod": + // mod needs the sub-verb before flags. + goArgs = append(goArgs, i.Args[0]) + appendModFile() + goArgs = append(goArgs, i.Args[1:]...) + case "get": + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + goArgs = append(goArgs, i.Args...) + + default: // notably list and build. + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + appendModFlag() + appendOverlayFlag() + goArgs = append(goArgs, i.Args...) + } + cmd := exec.Command("go", goArgs...) + cmd.Stdout = stdout + cmd.Stderr = stderr + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + if !i.CleanEnv { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, i.Env...) + if i.WorkingDir != "" { + cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) + cmd.Dir = i.WorkingDir + } + + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + + return runCmdContext(ctx, cmd) +} + +// DebugHangingGoCommands may be set by tests to enable additional +// instrumentation (including panics) for debugging hanging Go commands. +// +// See golang/go#54461 for details. +var DebugHangingGoCommands = false + +// runCmdContext is like exec.CommandContext except it sends os.Interrupt +// before os.Kill. +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that it has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that it still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { + return err + } + + resChan := make(chan error, 1) + go func() { + resChan <- cmd.Wait() + }() + + // If we're interested in debugging hanging Go commands, stop waiting after a + // minute and panic with interesting information. + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + HandleHangingGoCommand(cmd.Process) + case <-ctx.Done(): + } + } else { + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } + } + + // Cancelled. Interrupt and see if it ends voluntarily. + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } + } + + // Didn't shut down in response to interrupt. Kill it hard. + // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT + // on certain platforms, such as unix. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { + log.Printf("error killing the Go command: %v", err) + } + + return <-resChan +} + +func HandleHangingGoCommand(proc *os.Process) { + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "netbsd": + fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND + +The gopls test runner has detected a hanging go command. In order to debug +this, the output of ps and lsof/fstat is printed below. + +See golang/go#54461 for more details.`) + + fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") + fmt.Fprintln(os.Stderr, "-------------------------") + psCmd := exec.Command("ps", "axo", "ppid,pid,command") + psCmd.Stdout = os.Stderr + psCmd.Stderr = os.Stderr + if err := psCmd.Run(); err != nil { + panic(fmt.Sprintf("running ps: %v", err)) + } + + listFiles := "lsof" + if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" { + listFiles = "fstat" + } + + fmt.Fprintln(os.Stderr, "\n"+listFiles+":") + fmt.Fprintln(os.Stderr, "-----") + listFilesCmd := exec.Command(listFiles) + listFilesCmd.Stdout = os.Stderr + listFilesCmd.Stderr = os.Stderr + if err := listFilesCmd.Run(); err != nil { + panic(fmt.Sprintf("running %s: %v", listFiles, err)) + } + } + panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid)) +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + if len(split) == 2 { + k, v := split[0], split[1] + env[k] = v + } + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 00000000000..e38d1fb4888 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,163 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Version string // module version + Versions []string // available module versions (with -versions) + Replace *ModuleJSON // replaced by this module + Time *time.Time // time version was created + Update *ModuleJSON // available update, if any (with -u) + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return false, nil, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + // Don't override an explicit '-mod=' argument. + if modFlag == "vendor" { + return true, mainMod, nil + } else if modFlag != "" { + return false, nil, nil + } + if mainMod == nil || !go114 { + return false, nil, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return true, mainMod, nil + } + } + return false, nil, nil +} + +// getMainModuleAnd114 gets one of the main modules' information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} + +// WorkspaceVendorEnabled reports whether workspace vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func WorkspaceVendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, []*ModuleJSON, error) { + inv.Verb = "env" + inv.Args = []string{"GOWORK"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goWork := string(bytes.TrimSpace(stdout.Bytes())) + if fi, err := os.Stat(filepath.Join(filepath.Dir(goWork), "vendor")); err == nil && fi.IsDir() { + mainMods, err := getWorkspaceMainModules(ctx, inv, r) + if err != nil { + return false, nil, err + } + return true, mainMods, nil + } + return false, nil, nil +} + +// getWorkspaceMainModules gets the main modules' information. +// This is the information needed to figure out if vendoring should be enabled. +func getWorkspaceMainModules(ctx context.Context, inv Invocation, r *Runner) ([]*ModuleJSON, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, err + } + + lines := strings.Split(strings.TrimSuffix(stdout.String(), "\n"), "\n") + if len(lines) < 4 { + return nil, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mods := make([]*ModuleJSON, 0, len(lines)/4) + for i := 0; i < len(lines); i += 4 { + mods = append(mods, &ModuleJSON{ + Path: lines[i], + Dir: lines[i+1], + GoMod: lines[i+2], + GoVersion: lines[i+3], + Main: true, + }) + } + return mods, nil +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/version.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/version.go new file mode 100644 index 00000000000..446c5846a60 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -0,0 +1,71 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "context" + "fmt" + "regexp" + "strings" +) + +// GoVersion reports the minor version number of the highest release +// tag built into the go command on the PATH. +// +// Note that this may be higher than the version of the go tool used +// to build this application, and thus the versions of the standard +// go/{scanner,parser,ast,types} packages that are linked into it. +// In that case, callers should either downgrade to the version of +// go used to build the application, or report an error that the +// application is too old to use the go command on the PATH. +func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { + inv.Verb = "list" + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} + inv.BuildFlags = nil // This is not a build command. + inv.ModFlag = "" + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + + stdoutBytes, err := r.Run(ctx, inv) + if err != nil { + return 0, err + } + stdout := stdoutBytes.String() + if len(stdout) < 3 { + return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) + } + // Split up "[go1.1 go1.15]" and return highest go1.X value. + tags := strings.Fields(stdout[1 : len(stdout)-2]) + for i := len(tags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil { + continue + } + return version, nil + } + return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) +} + +// GoVersionOutput returns the complete output of the go version command. +func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) { + inv.Verb = "version" + goVersion, err := r.Run(ctx, inv) + if err != nil { + return "", err + } + return goVersion.String(), nil +} + +// ParseGoVersionOutput extracts the Go version string +// from the output of the "go version" command. +// Given an unrecognized form, it returns an empty string. +func ParseGoVersionOutput(data string) string { + re := regexp.MustCompile(`^go version (go\S+|devel \S+)`) + m := re.FindStringSubmatch(data) + if len(m) != 2 { + return "" // unrecognized version + } + return m[1] +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 00000000000..44719de173b --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +var GetForTest = func(p interface{}) string { return "" } +var GetDepsErrors = func(p interface{}) []*PackageError { return nil } + +type PackageError struct { + ImportStack []string // shortest path from package named on command line to this one + Pos string // position of error (if present, file:line:col) + Err string // the error itself +} + +var TypecheckCgo int +var DepsErrors int // must be set as a LoadMode to call GetDepsErrors +var ForTest int // must be set as a LoadMode to call GetForTest + +var SetModFlag = func(config interface{}, value string) {} +var SetModFile = func(config interface{}, value string) {} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/codes.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/codes.go new file mode 100644 index 00000000000..f0cabde96eb --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/codes.go @@ -0,0 +1,77 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A Code is an enum value that can be encoded into bitstreams. +// +// Code types are preferable for enum types, because they allow +// Decoder to detect desyncs. +type Code interface { + // Marker returns the SyncMarker for the Code's dynamic type. + Marker() SyncMarker + + // Value returns the Code's ordinal value. + Value() int +} + +// A CodeVal distinguishes among go/constant.Value encodings. +type CodeVal int + +func (c CodeVal) Marker() SyncMarker { return SyncVal } +func (c CodeVal) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ValBool CodeVal = iota + ValString + ValInt64 + ValBigInt + ValBigRat + ValBigFloat +) + +// A CodeType distinguishes among go/types.Type encodings. +type CodeType int + +func (c CodeType) Marker() SyncMarker { return SyncType } +func (c CodeType) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + TypeBasic CodeType = iota + TypeNamed + TypePointer + TypeSlice + TypeArray + TypeChan + TypeMap + TypeSignature + TypeStruct + TypeInterface + TypeUnion + TypeTypeParam +) + +// A CodeObj distinguishes among go/types.Object encodings. +type CodeObj int + +func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } +func (c CodeObj) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ObjAlias CodeObj = iota + ObjConst + ObjType + ObjFunc + ObjVar + ObjStub +) diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/decoder.go new file mode 100644 index 00000000000..2acd85851e3 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -0,0 +1,521 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "errors" + "fmt" + "go/constant" + "go/token" + "io" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version uint32 + + // aliases determines whether types.Aliases should be created + aliases bool + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 + + scratchRelocEnt []RelocEnt +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +// +// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + //aliases: aliases.Enabled(), + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, io.SeekCurrent) + assert(err == nil) + + pr.elemData = input[pos:] + assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// TempDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +// If possible the Decoder should be RetireDecoder'd when it is no longer +// needed, this will avoid heap allocations. +func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.TempDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +func (pr *PkgDecoder) RetireDecoder(d *Decoder) { + pr.scratchRelocEnt = d.Relocs + d.Relocs = nil +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. + r.Data = *strings.NewReader(pr.DataIdx(k, idx)) + + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + r.Data.Reset(pr.DataIdx(k, idx)) + r.Sync(SyncRelocs) + l := r.Len() + if cap(pr.scratchRelocEnt) >= l { + r.Relocs = pr.scratchRelocEnt[:l] + pr.scratchRelocEnt = nil + } else { + r.Relocs = make([]RelocEnt, l) + } + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + errorf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := readUvarint(&r.Data) + r.checkErr(err) + return x +} + +// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. +// This avoids the interface conversion and thus has better escape properties, +// which flows up the stack. +func readUvarint(r *strings.Reader) (uint64, error) { + var x uint64 + var s uint + for i := 0; i < binary.MaxVarintLen64; i++ { + b, err := r.ReadByte() + if err != nil { + if i > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return x, err + } + if b < 0x80 { + if i == binary.MaxVarintLen64-1 && b > 1 { + return x, overflow + } + return x | uint64(b)<> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, io.SeekCurrent) + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Uint64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + var path string + { + r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef) + path = r.String() + pr.RetireDecoder(&r) + } + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + var ridx Index + var name string + var rcode int + { + r := pr.TempDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + ridx = r.Reloc(RelocPkg) + name = r.String() + rcode = r.Code(SyncCodeObj) + pr.RetireDecoder(&r) + } + + path := pr.PeekPkgPath(ridx) + assert(name != "") + + tag := CodeObj(rcode) + + return path, name, tag +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/doc.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/doc.go new file mode 100644 index 00000000000..c8a2796b5e4 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/doc.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgbits implements low-level coding abstractions for +// Unified IR's export data format. +// +// At a low-level, a package is a collection of bitstream elements. +// Each element has a "kind" and a dense, non-negative index. +// Elements can be randomly accessed given their kind and index. +// +// Individual elements are sequences of variable-length values (e.g., +// integers, booleans, strings, go/constant values, cross-references +// to other elements). Package pkgbits provides APIs for encoding and +// decoding these low-level values, but the details of mapping +// higher-level Go constructs into elements is left to higher-level +// abstractions. +// +// Elements may cross-reference each other with "relocations." For +// example, an element representing a pointer type has a relocation +// referring to the element type. +// +// Go constructs may be composed as a constellation of multiple +// elements. For example, a declared function may have one element to +// describe the object (e.g., its name, type, position), and a +// separate element to describe its function body. This allows readers +// some flexibility in efficiently seeking or re-reading data (e.g., +// inlining requires re-reading the function body for each inlined +// call, without needing to re-read the object-level details). +// +// This is a copy of internal/pkgbits in the Go implementation. +package pkgbits diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/encoder.go new file mode 100644 index 00000000000..6482617a4fc --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -0,0 +1,383 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "go/constant" + "io" + "math/big" + "runtime" +) + +// currentVersion is the current version number. +// +// - v0: initial prototype +// +// - v1: adds the flags uint32 word +const currentVersion uint32 = 1 + +// A PkgEncoder provides methods for encoding a package's Unified IR +// export data. +type PkgEncoder struct { + // elems holds the bitstream for previously encoded elements. + elems [numRelocs][]string + + // stringsIdx maps previously encoded strings to their index within + // the RelocString section, to allow deduplication. That is, + // elems[RelocString][stringsIdx[s]] == s (if present). + stringsIdx map[string]Index + + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. + syncFrames int +} + +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + +// NewPkgEncoder returns an initialized PkgEncoder. +// +// syncFrames is the number of caller frames that should be serialized +// at Sync points. Serializing additional frames results in larger +// export data files, but can help diagnosing desync errors in +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. +func NewPkgEncoder(syncFrames int) PkgEncoder { + return PkgEncoder{ + stringsIdx: make(map[string]Index), + syncFrames: syncFrames, + } +} + +// DumpTo writes the package's encoded data to out0 and returns the +// package fingerprint. +func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { + h := md5.New() + out := io.MultiWriter(out0, h) + + writeUint32 := func(x uint32) { + assert(binary.Write(out, binary.LittleEndian, x) == nil) + } + + writeUint32(currentVersion) + + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) + + // Write elemEndsEnds. + var sum uint32 + for _, elems := range &pw.elems { + sum += uint32(len(elems)) + writeUint32(sum) + } + + // Write elemEnds. + sum = 0 + for _, elems := range &pw.elems { + for _, elem := range elems { + sum += uint32(len(elem)) + writeUint32(sum) + } + } + + // Write elemData. + for _, elems := range &pw.elems { + for _, elem := range elems { + _, err := io.WriteString(out, elem) + assert(err == nil) + } + } + + // Write fingerprint. + copy(fingerprint[:], h.Sum(nil)) + _, err := out0.Write(fingerprint[:]) + assert(err == nil) + + return +} + +// StringIdx adds a string value to the strings section, if not +// already present, and returns its index. +func (pw *PkgEncoder) StringIdx(s string) Index { + if idx, ok := pw.stringsIdx[s]; ok { + assert(pw.elems[RelocString][idx] == s) + return idx + } + + idx := Index(len(pw.elems[RelocString])) + pw.elems[RelocString] = append(pw.elems[RelocString], s) + pw.stringsIdx[s] = idx + return idx +} + +// NewEncoder returns an Encoder for a new element within the given +// section, and encodes the given SyncMarker as the start of the +// element bitstream. +func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { + e := pw.NewEncoderRaw(k) + e.Sync(marker) + return e +} + +// NewEncoderRaw returns an Encoder for a new element within the given +// section. +// +// Most callers should use NewEncoder instead. +func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { + idx := Index(len(pw.elems[k])) + pw.elems[k] = append(pw.elems[k], "") // placeholder + + return Encoder{ + p: pw, + k: k, + Idx: idx, + } +} + +// An Encoder provides methods for encoding an individual element's +// bitstream data. +type Encoder struct { + p *PkgEncoder + + Relocs []RelocEnt + RelocMap map[RelocEnt]uint32 + Data bytes.Buffer // accumulated element bitstream data + + encodingRelocHeader bool + + k RelocKind + Idx Index // index within relocation section +} + +// Flush finalizes the element's bitstream and returns its Index. +func (w *Encoder) Flush() Index { + var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + + // Backup the data so we write the relocations at the front. + var tmp bytes.Buffer + io.Copy(&tmp, &w.Data) + + // TODO(mdempsky): Consider writing these out separately so they're + // easier to strip, along with function bodies, so that we can prune + // down to just the data that's relevant to go/types. + if w.encodingRelocHeader { + panic("encodingRelocHeader already true; recursive flush?") + } + w.encodingRelocHeader = true + w.Sync(SyncRelocs) + w.Len(len(w.Relocs)) + for _, rEnt := range w.Relocs { + w.Sync(SyncReloc) + w.Len(int(rEnt.Kind)) + w.Len(int(rEnt.Idx)) + } + + io.Copy(&sb, &w.Data) + io.Copy(&sb, &tmp) + w.p.elems[w.k][w.Idx] = sb.String() + + return w.Idx +} + +func (w *Encoder) checkErr(err error) { + if err != nil { + errorf("unexpected encoding error: %v", err) + } +} + +func (w *Encoder) rawUvarint(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, err := w.Data.Write(buf[:n]) + w.checkErr(err) +} + +func (w *Encoder) rawVarint(x int64) { + // Zig-zag encode. + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + + w.rawUvarint(ux) +} + +func (w *Encoder) rawReloc(r RelocKind, idx Index) int { + e := RelocEnt{r, idx} + if w.RelocMap != nil { + if i, ok := w.RelocMap[e]; ok { + return int(i) + } + } else { + w.RelocMap = make(map[RelocEnt]uint32) + } + + i := len(w.Relocs) + w.RelocMap[e] = uint32(i) + w.Relocs = append(w.Relocs, e) + return i +} + +func (w *Encoder) Sync(m SyncMarker) { + if !w.p.SyncMarkers() { + return + } + + // Writing out stack frame string references requires working + // relocations, but writing out the relocations themselves involves + // sync markers. To prevent infinite recursion, we simply trim the + // stack frame for sync markers within the relocation header. + var frames []string + if !w.encodingRelocHeader && w.p.syncFrames > 0 { + pcs := make([]uintptr, w.p.syncFrames) + n := runtime.Callers(2, pcs) + frames = fmtFrames(pcs[:n]...) + } + + // TODO(mdempsky): Save space by writing out stack frames as a + // linked list so we can share common stack frames. + w.rawUvarint(uint64(m)) + w.rawUvarint(uint64(len(frames))) + for _, frame := range frames { + w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) + } +} + +// Bool encodes and writes a bool value into the element bitstream, +// and then returns the bool value. +// +// For simple, 2-alternative encodings, the idiomatic way to call Bool +// is something like: +// +// if w.Bool(x != 0) { +// // alternative #1 +// } else { +// // alternative #2 +// } +// +// For multi-alternative encodings, use Code instead. +func (w *Encoder) Bool(b bool) bool { + w.Sync(SyncBool) + var x byte + if b { + x = 1 + } + err := w.Data.WriteByte(x) + w.checkErr(err) + return b +} + +// Int64 encodes and writes an int64 value into the element bitstream. +func (w *Encoder) Int64(x int64) { + w.Sync(SyncInt64) + w.rawVarint(x) +} + +// Uint64 encodes and writes a uint64 value into the element bitstream. +func (w *Encoder) Uint64(x uint64) { + w.Sync(SyncUint64) + w.rawUvarint(x) +} + +// Len encodes and writes a non-negative int value into the element bitstream. +func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } + +// Int encodes and writes an int value into the element bitstream. +func (w *Encoder) Int(x int) { w.Int64(int64(x)) } + +// Uint encodes and writes a uint value into the element bitstream. +func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } + +// Reloc encodes and writes a relocation for the given (section, +// index) pair into the element bitstream. +// +// Note: Only the index is formally written into the element +// bitstream, so bitstream decoders must know from context which +// section an encoded relocation refers to. +func (w *Encoder) Reloc(r RelocKind, idx Index) { + w.Sync(SyncUseReloc) + w.Len(w.rawReloc(r, idx)) +} + +// Code encodes and writes a Code value into the element bitstream. +func (w *Encoder) Code(c Code) { + w.Sync(c.Marker()) + w.Len(c.Value()) +} + +// String encodes and writes a string value into the element +// bitstream. +// +// Internally, strings are deduplicated by adding them to the strings +// section (if not already present), and then writing a relocation +// into the element bitstream. +func (w *Encoder) String(s string) { + w.Sync(SyncString) + w.Reloc(RelocString, w.p.StringIdx(s)) +} + +// Strings encodes and writes a variable-length slice of strings into +// the element bitstream. +func (w *Encoder) Strings(ss []string) { + w.Len(len(ss)) + for _, s := range ss { + w.String(s) + } +} + +// Value encodes and writes a constant.Value into the element +// bitstream. +func (w *Encoder) Value(val constant.Value) { + w.Sync(SyncValue) + if w.Bool(val.Kind() == constant.Complex) { + w.scalar(constant.Real(val)) + w.scalar(constant.Imag(val)) + } else { + w.scalar(val) + } +} + +func (w *Encoder) scalar(val constant.Value) { + switch v := constant.Val(val).(type) { + default: + errorf("unhandled %v (%v)", val, val.Kind()) + case bool: + w.Code(ValBool) + w.Bool(v) + case string: + w.Code(ValString) + w.String(v) + case int64: + w.Code(ValInt64) + w.Int64(v) + case *big.Int: + w.Code(ValBigInt) + w.bigInt(v) + case *big.Rat: + w.Code(ValBigRat) + w.bigInt(v.Num()) + w.bigInt(v.Denom()) + case *big.Float: + w.Code(ValBigFloat) + w.bigFloat(v) + } +} + +func (w *Encoder) bigInt(v *big.Int) { + b := v.Bytes() + w.String(string(b)) // TODO: More efficient encoding. + w.Bool(v.Sign() < 0) +} + +func (w *Encoder) bigFloat(v *big.Float) { + b := v.Append(nil, 'p', -1) + w.String(string(b)) // TODO: More efficient encoding. +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/flags.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/flags.go new file mode 100644 index 00000000000..654222745fa --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go new file mode 100644 index 00000000000..5294f6a63ed --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +// TODO(mdempsky): Remove after #44505 is resolved + +package pkgbits + +import "runtime" + +func walkFrames(pcs []uintptr, visit frameVisitor) { + for _, pc := range pcs { + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + + visit(file, line, fn.Name(), pc-fn.Entry()) + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go new file mode 100644 index 00000000000..2324ae7adfe --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package pkgbits + +import "runtime" + +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/reloc.go new file mode 100644 index 00000000000..fcdfb97ca99 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/reloc.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A RelocKind indicates a particular section within a unified IR export. +type RelocKind int32 + +// An Index represents a bitstream element index within a particular +// section. +type Index int32 + +// A relocEnt (relocation entry) is an entry in an element's local +// reference table. +// +// TODO(mdempsky): Rename this too. +type RelocEnt struct { + Kind RelocKind + Idx Index +} + +// Reserved indices within the meta relocation section. +const ( + PublicRootIdx Index = 0 + PrivateRootIdx Index = 1 +) + +const ( + RelocString RelocKind = iota + RelocMeta + RelocPosBase + RelocPkg + RelocName + RelocType + RelocObj + RelocObjExt + RelocObjDict + RelocBody + + numRelocs = iota +) diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/support.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/support.go new file mode 100644 index 00000000000..ad26d3b28ca --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import "fmt" + +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/sync.go new file mode 100644 index 00000000000..5bd51ef7170 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -0,0 +1,113 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "fmt" + "strings" +) + +// fmtFrames formats a backtrace for reporting reader/writer desyncs. +func fmtFrames(pcs ...uintptr) []string { + res := make([]string, 0, len(pcs)) + walkFrames(pcs, func(file string, line int, name string, offset uintptr) { + // Trim package from function name. It's just redundant noise. + name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") + + res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) + }) + return res +} + +type frameVisitor func(file string, line int, name string, offset uintptr) + +// SyncMarker is an enum type that represents markers that may be +// written to export data to ensure the reader and writer stay +// synchronized. +type SyncMarker int + +//go:generate stringer -type=SyncMarker -trimprefix=Sync + +const ( + _ SyncMarker = iota + + // Public markers (known to go/types importers). + + // Low-level coding markers. + SyncEOF + SyncBool + SyncInt64 + SyncUint64 + SyncString + SyncValue + SyncVal + SyncRelocs + SyncReloc + SyncUseReloc + + // Higher-level object and type markers. + SyncPublic + SyncPos + SyncPosBase + SyncObject + SyncObject1 + SyncPkg + SyncPkgDef + SyncMethod + SyncType + SyncTypeIdx + SyncTypeParamNames + SyncSignature + SyncParams + SyncParam + SyncCodeObj + SyncSym + SyncLocalIdent + SyncSelector + + // Private markers (only known to cmd/compile). + SyncPrivate + + SyncFuncExt + SyncVarExt + SyncTypeExt + SyncPragma + + SyncExprList + SyncExprs + SyncExpr + SyncExprType + SyncAssign + SyncOp + SyncFuncLit + SyncCompLit + + SyncDecl + SyncFuncBody + SyncOpenScope + SyncCloseScope + SyncCloseAnotherScope + SyncDeclNames + SyncDeclName + + SyncStmts + SyncBlockStmt + SyncIfStmt + SyncForStmt + SyncSwitchStmt + SyncRangeStmt + SyncCaseClause + SyncCommClause + SyncSelectStmt + SyncDecls + SyncLabeledStmt + SyncUseObjLocal + SyncAddLocal + SyncLinkname + SyncStmt1 + SyncStmtsEnd + SyncLabel + SyncOptLabel +) diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go new file mode 100644 index 00000000000..4a5b0ca5f2f --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -0,0 +1,89 @@ +// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. + +package pkgbits + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SyncEOF-1] + _ = x[SyncBool-2] + _ = x[SyncInt64-3] + _ = x[SyncUint64-4] + _ = x[SyncString-5] + _ = x[SyncValue-6] + _ = x[SyncVal-7] + _ = x[SyncRelocs-8] + _ = x[SyncReloc-9] + _ = x[SyncUseReloc-10] + _ = x[SyncPublic-11] + _ = x[SyncPos-12] + _ = x[SyncPosBase-13] + _ = x[SyncObject-14] + _ = x[SyncObject1-15] + _ = x[SyncPkg-16] + _ = x[SyncPkgDef-17] + _ = x[SyncMethod-18] + _ = x[SyncType-19] + _ = x[SyncTypeIdx-20] + _ = x[SyncTypeParamNames-21] + _ = x[SyncSignature-22] + _ = x[SyncParams-23] + _ = x[SyncParam-24] + _ = x[SyncCodeObj-25] + _ = x[SyncSym-26] + _ = x[SyncLocalIdent-27] + _ = x[SyncSelector-28] + _ = x[SyncPrivate-29] + _ = x[SyncFuncExt-30] + _ = x[SyncVarExt-31] + _ = x[SyncTypeExt-32] + _ = x[SyncPragma-33] + _ = x[SyncExprList-34] + _ = x[SyncExprs-35] + _ = x[SyncExpr-36] + _ = x[SyncExprType-37] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] +} + +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" + +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} + +func (i SyncMarker) String() string { + i -= 1 + if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { + return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/stdlib/manifest.go new file mode 100644 index 00000000000..fd6892075ee --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -0,0 +1,17320 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate.go. DO NOT EDIT. + +package stdlib + +var PackageSymbols = map[string][]Symbol{ + "archive/tar": { + {"(*Header).FileInfo", Method, 1}, + {"(*Reader).Next", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Writer).AddFS", Method, 22}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteHeader", Method, 0}, + {"(Format).String", Method, 10}, + {"ErrFieldTooLong", Var, 0}, + {"ErrHeader", Var, 0}, + {"ErrInsecurePath", Var, 20}, + {"ErrWriteAfterClose", Var, 0}, + {"ErrWriteTooLong", Var, 0}, + {"FileInfoHeader", Func, 1}, + {"Format", Type, 10}, + {"FormatGNU", Const, 10}, + {"FormatPAX", Const, 10}, + {"FormatUSTAR", Const, 10}, + {"FormatUnknown", Const, 10}, + {"Header", Type, 0}, + {"Header.AccessTime", Field, 0}, + {"Header.ChangeTime", Field, 0}, + {"Header.Devmajor", Field, 0}, + {"Header.Devminor", Field, 0}, + {"Header.Format", Field, 10}, + {"Header.Gid", Field, 0}, + {"Header.Gname", Field, 0}, + {"Header.Linkname", Field, 0}, + {"Header.ModTime", Field, 0}, + {"Header.Mode", Field, 0}, + {"Header.Name", Field, 0}, + {"Header.PAXRecords", Field, 10}, + {"Header.Size", Field, 0}, + {"Header.Typeflag", Field, 0}, + {"Header.Uid", Field, 0}, + {"Header.Uname", Field, 0}, + {"Header.Xattrs", Field, 3}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Reader", Type, 0}, + {"TypeBlock", Const, 0}, + {"TypeChar", Const, 0}, + {"TypeCont", Const, 0}, + {"TypeDir", Const, 0}, + {"TypeFifo", Const, 0}, + {"TypeGNULongLink", Const, 1}, + {"TypeGNULongName", Const, 1}, + {"TypeGNUSparse", Const, 3}, + {"TypeLink", Const, 0}, + {"TypeReg", Const, 0}, + {"TypeRegA", Const, 0}, + {"TypeSymlink", Const, 0}, + {"TypeXGlobalHeader", Const, 0}, + {"TypeXHeader", Const, 0}, + {"Writer", Type, 0}, + }, + "archive/zip": { + {"(*File).DataOffset", Method, 2}, + {"(*File).FileInfo", Method, 0}, + {"(*File).ModTime", Method, 0}, + {"(*File).Mode", Method, 0}, + {"(*File).Open", Method, 0}, + {"(*File).OpenRaw", Method, 17}, + {"(*File).SetModTime", Method, 0}, + {"(*File).SetMode", Method, 0}, + {"(*FileHeader).FileInfo", Method, 0}, + {"(*FileHeader).ModTime", Method, 0}, + {"(*FileHeader).Mode", Method, 0}, + {"(*FileHeader).SetModTime", Method, 0}, + {"(*FileHeader).SetMode", Method, 0}, + {"(*ReadCloser).Close", Method, 0}, + {"(*ReadCloser).Open", Method, 16}, + {"(*ReadCloser).RegisterDecompressor", Method, 6}, + {"(*Reader).Open", Method, 16}, + {"(*Reader).RegisterDecompressor", Method, 6}, + {"(*Writer).AddFS", Method, 22}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Copy", Method, 17}, + {"(*Writer).Create", Method, 0}, + {"(*Writer).CreateHeader", Method, 0}, + {"(*Writer).CreateRaw", Method, 17}, + {"(*Writer).Flush", Method, 4}, + {"(*Writer).RegisterCompressor", Method, 6}, + {"(*Writer).SetComment", Method, 10}, + {"(*Writer).SetOffset", Method, 5}, + {"Compressor", Type, 2}, + {"Decompressor", Type, 2}, + {"Deflate", Const, 0}, + {"ErrAlgorithm", Var, 0}, + {"ErrChecksum", Var, 0}, + {"ErrFormat", Var, 0}, + {"ErrInsecurePath", Var, 20}, + {"File", Type, 0}, + {"File.FileHeader", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.CRC32", Field, 0}, + {"FileHeader.Comment", Field, 0}, + {"FileHeader.CompressedSize", Field, 0}, + {"FileHeader.CompressedSize64", Field, 1}, + {"FileHeader.CreatorVersion", Field, 0}, + {"FileHeader.ExternalAttrs", Field, 0}, + {"FileHeader.Extra", Field, 0}, + {"FileHeader.Flags", Field, 0}, + {"FileHeader.Method", Field, 0}, + {"FileHeader.Modified", Field, 10}, + {"FileHeader.ModifiedDate", Field, 0}, + {"FileHeader.ModifiedTime", Field, 0}, + {"FileHeader.Name", Field, 0}, + {"FileHeader.NonUTF8", Field, 10}, + {"FileHeader.ReaderVersion", Field, 0}, + {"FileHeader.UncompressedSize", Field, 0}, + {"FileHeader.UncompressedSize64", Field, 1}, + {"FileInfoHeader", Func, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"OpenReader", Func, 0}, + {"ReadCloser", Type, 0}, + {"ReadCloser.Reader", Field, 0}, + {"Reader", Type, 0}, + {"Reader.Comment", Field, 0}, + {"Reader.File", Field, 0}, + {"RegisterCompressor", Func, 2}, + {"RegisterDecompressor", Func, 2}, + {"Store", Const, 0}, + {"Writer", Type, 0}, + }, + "bufio": { + {"(*Reader).Buffered", Method, 0}, + {"(*Reader).Discard", Method, 5}, + {"(*Reader).Peek", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadBytes", Method, 0}, + {"(*Reader).ReadLine", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).ReadSlice", Method, 0}, + {"(*Reader).ReadString", Method, 0}, + {"(*Reader).Reset", Method, 2}, + {"(*Reader).Size", Method, 10}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"(*Scanner).Buffer", Method, 6}, + {"(*Scanner).Bytes", Method, 1}, + {"(*Scanner).Err", Method, 1}, + {"(*Scanner).Scan", Method, 1}, + {"(*Scanner).Split", Method, 1}, + {"(*Scanner).Text", Method, 1}, + {"(*Writer).Available", Method, 0}, + {"(*Writer).AvailableBuffer", Method, 18}, + {"(*Writer).Buffered", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).ReadFrom", Method, 1}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Size", Method, 10}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteByte", Method, 0}, + {"(*Writer).WriteRune", Method, 0}, + {"(*Writer).WriteString", Method, 0}, + {"(ReadWriter).Available", Method, 0}, + {"(ReadWriter).AvailableBuffer", Method, 18}, + {"(ReadWriter).Discard", Method, 5}, + {"(ReadWriter).Flush", Method, 0}, + {"(ReadWriter).Peek", Method, 0}, + {"(ReadWriter).Read", Method, 0}, + {"(ReadWriter).ReadByte", Method, 0}, + {"(ReadWriter).ReadBytes", Method, 0}, + {"(ReadWriter).ReadFrom", Method, 1}, + {"(ReadWriter).ReadLine", Method, 0}, + {"(ReadWriter).ReadRune", Method, 0}, + {"(ReadWriter).ReadSlice", Method, 0}, + {"(ReadWriter).ReadString", Method, 0}, + {"(ReadWriter).UnreadByte", Method, 0}, + {"(ReadWriter).UnreadRune", Method, 0}, + {"(ReadWriter).Write", Method, 0}, + {"(ReadWriter).WriteByte", Method, 0}, + {"(ReadWriter).WriteRune", Method, 0}, + {"(ReadWriter).WriteString", Method, 0}, + {"(ReadWriter).WriteTo", Method, 1}, + {"ErrAdvanceTooFar", Var, 1}, + {"ErrBadReadCount", Var, 15}, + {"ErrBufferFull", Var, 0}, + {"ErrFinalToken", Var, 6}, + {"ErrInvalidUnreadByte", Var, 0}, + {"ErrInvalidUnreadRune", Var, 0}, + {"ErrNegativeAdvance", Var, 1}, + {"ErrNegativeCount", Var, 0}, + {"ErrTooLong", Var, 1}, + {"MaxScanTokenSize", Const, 1}, + {"NewReadWriter", Func, 0}, + {"NewReader", Func, 0}, + {"NewReaderSize", Func, 0}, + {"NewScanner", Func, 1}, + {"NewWriter", Func, 0}, + {"NewWriterSize", Func, 0}, + {"ReadWriter", Type, 0}, + {"ReadWriter.Reader", Field, 0}, + {"ReadWriter.Writer", Field, 0}, + {"Reader", Type, 0}, + {"ScanBytes", Func, 1}, + {"ScanLines", Func, 1}, + {"ScanRunes", Func, 1}, + {"ScanWords", Func, 1}, + {"Scanner", Type, 1}, + {"SplitFunc", Type, 1}, + {"Writer", Type, 0}, + }, + "bytes": { + {"(*Buffer).Available", Method, 21}, + {"(*Buffer).AvailableBuffer", Method, 21}, + {"(*Buffer).Bytes", Method, 0}, + {"(*Buffer).Cap", Method, 5}, + {"(*Buffer).Grow", Method, 1}, + {"(*Buffer).Len", Method, 0}, + {"(*Buffer).Next", Method, 0}, + {"(*Buffer).Read", Method, 0}, + {"(*Buffer).ReadByte", Method, 0}, + {"(*Buffer).ReadBytes", Method, 0}, + {"(*Buffer).ReadFrom", Method, 0}, + {"(*Buffer).ReadRune", Method, 0}, + {"(*Buffer).ReadString", Method, 0}, + {"(*Buffer).Reset", Method, 0}, + {"(*Buffer).String", Method, 0}, + {"(*Buffer).Truncate", Method, 0}, + {"(*Buffer).UnreadByte", Method, 0}, + {"(*Buffer).UnreadRune", Method, 0}, + {"(*Buffer).Write", Method, 0}, + {"(*Buffer).WriteByte", Method, 0}, + {"(*Buffer).WriteRune", Method, 0}, + {"(*Buffer).WriteString", Method, 0}, + {"(*Buffer).WriteTo", Method, 0}, + {"(*Reader).Len", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAt", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).Reset", Method, 7}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).Size", Method, 5}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"Buffer", Type, 0}, + {"Clone", Func, 20}, + {"Compare", Func, 0}, + {"Contains", Func, 0}, + {"ContainsAny", Func, 7}, + {"ContainsFunc", Func, 21}, + {"ContainsRune", Func, 7}, + {"Count", Func, 0}, + {"Cut", Func, 18}, + {"CutPrefix", Func, 20}, + {"CutSuffix", Func, 20}, + {"Equal", Func, 0}, + {"EqualFold", Func, 0}, + {"ErrTooLarge", Var, 0}, + {"Fields", Func, 0}, + {"FieldsFunc", Func, 0}, + {"HasPrefix", Func, 0}, + {"HasSuffix", Func, 0}, + {"Index", Func, 0}, + {"IndexAny", Func, 0}, + {"IndexByte", Func, 0}, + {"IndexFunc", Func, 0}, + {"IndexRune", Func, 0}, + {"Join", Func, 0}, + {"LastIndex", Func, 0}, + {"LastIndexAny", Func, 0}, + {"LastIndexByte", Func, 5}, + {"LastIndexFunc", Func, 0}, + {"Map", Func, 0}, + {"MinRead", Const, 0}, + {"NewBuffer", Func, 0}, + {"NewBufferString", Func, 0}, + {"NewReader", Func, 0}, + {"Reader", Type, 0}, + {"Repeat", Func, 0}, + {"Replace", Func, 0}, + {"ReplaceAll", Func, 12}, + {"Runes", Func, 0}, + {"Split", Func, 0}, + {"SplitAfter", Func, 0}, + {"SplitAfterN", Func, 0}, + {"SplitN", Func, 0}, + {"Title", Func, 0}, + {"ToLower", Func, 0}, + {"ToLowerSpecial", Func, 0}, + {"ToTitle", Func, 0}, + {"ToTitleSpecial", Func, 0}, + {"ToUpper", Func, 0}, + {"ToUpperSpecial", Func, 0}, + {"ToValidUTF8", Func, 13}, + {"Trim", Func, 0}, + {"TrimFunc", Func, 0}, + {"TrimLeft", Func, 0}, + {"TrimLeftFunc", Func, 0}, + {"TrimPrefix", Func, 1}, + {"TrimRight", Func, 0}, + {"TrimRightFunc", Func, 0}, + {"TrimSpace", Func, 0}, + {"TrimSuffix", Func, 1}, + }, + "cmp": { + {"Compare", Func, 21}, + {"Less", Func, 21}, + {"Or", Func, 22}, + {"Ordered", Type, 21}, + }, + "compress/bzip2": { + {"(StructuralError).Error", Method, 0}, + {"NewReader", Func, 0}, + {"StructuralError", Type, 0}, + }, + "compress/flate": { + {"(*ReadError).Error", Method, 0}, + {"(*WriteError).Error", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(InternalError).Error", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"CorruptInputError", Type, 0}, + {"DefaultCompression", Const, 0}, + {"HuffmanOnly", Const, 7}, + {"InternalError", Type, 0}, + {"NewReader", Func, 0}, + {"NewReaderDict", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterDict", Func, 0}, + {"NoCompression", Const, 0}, + {"ReadError", Type, 0}, + {"ReadError.Err", Field, 0}, + {"ReadError.Offset", Field, 0}, + {"Reader", Type, 0}, + {"Resetter", Type, 4}, + {"WriteError", Type, 0}, + {"WriteError.Err", Field, 0}, + {"WriteError.Offset", Field, 0}, + {"Writer", Type, 0}, + }, + "compress/gzip": { + {"(*Reader).Close", Method, 0}, + {"(*Reader).Multistream", Method, 4}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).Reset", Method, 3}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 1}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"DefaultCompression", Const, 0}, + {"ErrChecksum", Var, 0}, + {"ErrHeader", Var, 0}, + {"Header", Type, 0}, + {"Header.Comment", Field, 0}, + {"Header.Extra", Field, 0}, + {"Header.ModTime", Field, 0}, + {"Header.Name", Field, 0}, + {"Header.OS", Field, 0}, + {"HuffmanOnly", Const, 8}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterLevel", Func, 0}, + {"NoCompression", Const, 0}, + {"Reader", Type, 0}, + {"Reader.Header", Field, 0}, + {"Writer", Type, 0}, + {"Writer.Header", Field, 0}, + }, + "compress/lzw": { + {"(*Reader).Close", Method, 17}, + {"(*Reader).Read", Method, 17}, + {"(*Reader).Reset", Method, 17}, + {"(*Writer).Close", Method, 17}, + {"(*Writer).Reset", Method, 17}, + {"(*Writer).Write", Method, 17}, + {"LSB", Const, 0}, + {"MSB", Const, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Order", Type, 0}, + {"Reader", Type, 17}, + {"Writer", Type, 17}, + }, + "compress/zlib": { + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"DefaultCompression", Const, 0}, + {"ErrChecksum", Var, 0}, + {"ErrDictionary", Var, 0}, + {"ErrHeader", Var, 0}, + {"HuffmanOnly", Const, 8}, + {"NewReader", Func, 0}, + {"NewReaderDict", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterLevel", Func, 0}, + {"NewWriterLevelDict", Func, 0}, + {"NoCompression", Const, 0}, + {"Resetter", Type, 4}, + {"Writer", Type, 0}, + }, + "container/heap": { + {"Fix", Func, 2}, + {"Init", Func, 0}, + {"Interface", Type, 0}, + {"Pop", Func, 0}, + {"Push", Func, 0}, + {"Remove", Func, 0}, + }, + "container/list": { + {"(*Element).Next", Method, 0}, + {"(*Element).Prev", Method, 0}, + {"(*List).Back", Method, 0}, + {"(*List).Front", Method, 0}, + {"(*List).Init", Method, 0}, + {"(*List).InsertAfter", Method, 0}, + {"(*List).InsertBefore", Method, 0}, + {"(*List).Len", Method, 0}, + {"(*List).MoveAfter", Method, 2}, + {"(*List).MoveBefore", Method, 2}, + {"(*List).MoveToBack", Method, 0}, + {"(*List).MoveToFront", Method, 0}, + {"(*List).PushBack", Method, 0}, + {"(*List).PushBackList", Method, 0}, + {"(*List).PushFront", Method, 0}, + {"(*List).PushFrontList", Method, 0}, + {"(*List).Remove", Method, 0}, + {"Element", Type, 0}, + {"Element.Value", Field, 0}, + {"List", Type, 0}, + {"New", Func, 0}, + }, + "container/ring": { + {"(*Ring).Do", Method, 0}, + {"(*Ring).Len", Method, 0}, + {"(*Ring).Link", Method, 0}, + {"(*Ring).Move", Method, 0}, + {"(*Ring).Next", Method, 0}, + {"(*Ring).Prev", Method, 0}, + {"(*Ring).Unlink", Method, 0}, + {"New", Func, 0}, + {"Ring", Type, 0}, + {"Ring.Value", Field, 0}, + }, + "context": { + {"AfterFunc", Func, 21}, + {"Background", Func, 7}, + {"CancelCauseFunc", Type, 20}, + {"CancelFunc", Type, 7}, + {"Canceled", Var, 7}, + {"Cause", Func, 20}, + {"Context", Type, 7}, + {"DeadlineExceeded", Var, 7}, + {"TODO", Func, 7}, + {"WithCancel", Func, 7}, + {"WithCancelCause", Func, 20}, + {"WithDeadline", Func, 7}, + {"WithDeadlineCause", Func, 21}, + {"WithTimeout", Func, 7}, + {"WithTimeoutCause", Func, 21}, + {"WithValue", Func, 7}, + {"WithoutCancel", Func, 21}, + }, + "crypto": { + {"(Hash).Available", Method, 0}, + {"(Hash).HashFunc", Method, 4}, + {"(Hash).New", Method, 0}, + {"(Hash).Size", Method, 0}, + {"(Hash).String", Method, 15}, + {"BLAKE2b_256", Const, 9}, + {"BLAKE2b_384", Const, 9}, + {"BLAKE2b_512", Const, 9}, + {"BLAKE2s_256", Const, 9}, + {"Decrypter", Type, 5}, + {"DecrypterOpts", Type, 5}, + {"Hash", Type, 0}, + {"MD4", Const, 0}, + {"MD5", Const, 0}, + {"MD5SHA1", Const, 0}, + {"PrivateKey", Type, 0}, + {"PublicKey", Type, 2}, + {"RIPEMD160", Const, 0}, + {"RegisterHash", Func, 0}, + {"SHA1", Const, 0}, + {"SHA224", Const, 0}, + {"SHA256", Const, 0}, + {"SHA384", Const, 0}, + {"SHA3_224", Const, 4}, + {"SHA3_256", Const, 4}, + {"SHA3_384", Const, 4}, + {"SHA3_512", Const, 4}, + {"SHA512", Const, 0}, + {"SHA512_224", Const, 5}, + {"SHA512_256", Const, 5}, + {"Signer", Type, 4}, + {"SignerOpts", Type, 4}, + }, + "crypto/aes": { + {"(KeySizeError).Error", Method, 0}, + {"BlockSize", Const, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + }, + "crypto/cipher": { + {"(StreamReader).Read", Method, 0}, + {"(StreamWriter).Close", Method, 0}, + {"(StreamWriter).Write", Method, 0}, + {"AEAD", Type, 2}, + {"Block", Type, 0}, + {"BlockMode", Type, 0}, + {"NewCBCDecrypter", Func, 0}, + {"NewCBCEncrypter", Func, 0}, + {"NewCFBDecrypter", Func, 0}, + {"NewCFBEncrypter", Func, 0}, + {"NewCTR", Func, 0}, + {"NewGCM", Func, 2}, + {"NewGCMWithNonceSize", Func, 5}, + {"NewGCMWithTagSize", Func, 11}, + {"NewOFB", Func, 0}, + {"Stream", Type, 0}, + {"StreamReader", Type, 0}, + {"StreamReader.R", Field, 0}, + {"StreamReader.S", Field, 0}, + {"StreamWriter", Type, 0}, + {"StreamWriter.Err", Field, 0}, + {"StreamWriter.S", Field, 0}, + {"StreamWriter.W", Field, 0}, + }, + "crypto/des": { + {"(KeySizeError).Error", Method, 0}, + {"BlockSize", Const, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + {"NewTripleDESCipher", Func, 0}, + }, + "crypto/dsa": { + {"ErrInvalidPublicKey", Var, 0}, + {"GenerateKey", Func, 0}, + {"GenerateParameters", Func, 0}, + {"L1024N160", Const, 0}, + {"L2048N224", Const, 0}, + {"L2048N256", Const, 0}, + {"L3072N256", Const, 0}, + {"ParameterSizes", Type, 0}, + {"Parameters", Type, 0}, + {"Parameters.G", Field, 0}, + {"Parameters.P", Field, 0}, + {"Parameters.Q", Field, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PrivateKey.X", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.Parameters", Field, 0}, + {"PublicKey.Y", Field, 0}, + {"Sign", Func, 0}, + {"Verify", Func, 0}, + }, + "crypto/ecdh": { + {"(*PrivateKey).Bytes", Method, 20}, + {"(*PrivateKey).Curve", Method, 20}, + {"(*PrivateKey).ECDH", Method, 20}, + {"(*PrivateKey).Equal", Method, 20}, + {"(*PrivateKey).Public", Method, 20}, + {"(*PrivateKey).PublicKey", Method, 20}, + {"(*PublicKey).Bytes", Method, 20}, + {"(*PublicKey).Curve", Method, 20}, + {"(*PublicKey).Equal", Method, 20}, + {"Curve", Type, 20}, + {"P256", Func, 20}, + {"P384", Func, 20}, + {"P521", Func, 20}, + {"PrivateKey", Type, 20}, + {"PublicKey", Type, 20}, + {"X25519", Func, 20}, + }, + "crypto/ecdsa": { + {"(*PrivateKey).ECDH", Method, 20}, + {"(*PrivateKey).Equal", Method, 15}, + {"(*PrivateKey).Public", Method, 4}, + {"(*PrivateKey).Sign", Method, 4}, + {"(*PublicKey).ECDH", Method, 20}, + {"(*PublicKey).Equal", Method, 15}, + {"(PrivateKey).Add", Method, 0}, + {"(PrivateKey).Double", Method, 0}, + {"(PrivateKey).IsOnCurve", Method, 0}, + {"(PrivateKey).Params", Method, 0}, + {"(PrivateKey).ScalarBaseMult", Method, 0}, + {"(PrivateKey).ScalarMult", Method, 0}, + {"(PublicKey).Add", Method, 0}, + {"(PublicKey).Double", Method, 0}, + {"(PublicKey).IsOnCurve", Method, 0}, + {"(PublicKey).Params", Method, 0}, + {"(PublicKey).ScalarBaseMult", Method, 0}, + {"(PublicKey).ScalarMult", Method, 0}, + {"GenerateKey", Func, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.D", Field, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.Curve", Field, 0}, + {"PublicKey.X", Field, 0}, + {"PublicKey.Y", Field, 0}, + {"Sign", Func, 0}, + {"SignASN1", Func, 15}, + {"Verify", Func, 0}, + {"VerifyASN1", Func, 15}, + }, + "crypto/ed25519": { + {"(*Options).HashFunc", Method, 20}, + {"(PrivateKey).Equal", Method, 15}, + {"(PrivateKey).Public", Method, 13}, + {"(PrivateKey).Seed", Method, 13}, + {"(PrivateKey).Sign", Method, 13}, + {"(PublicKey).Equal", Method, 15}, + {"GenerateKey", Func, 13}, + {"NewKeyFromSeed", Func, 13}, + {"Options", Type, 20}, + {"Options.Context", Field, 20}, + {"Options.Hash", Field, 20}, + {"PrivateKey", Type, 13}, + {"PrivateKeySize", Const, 13}, + {"PublicKey", Type, 13}, + {"PublicKeySize", Const, 13}, + {"SeedSize", Const, 13}, + {"Sign", Func, 13}, + {"SignatureSize", Const, 13}, + {"Verify", Func, 13}, + {"VerifyWithOptions", Func, 20}, + }, + "crypto/elliptic": { + {"(*CurveParams).Add", Method, 0}, + {"(*CurveParams).Double", Method, 0}, + {"(*CurveParams).IsOnCurve", Method, 0}, + {"(*CurveParams).Params", Method, 0}, + {"(*CurveParams).ScalarBaseMult", Method, 0}, + {"(*CurveParams).ScalarMult", Method, 0}, + {"Curve", Type, 0}, + {"CurveParams", Type, 0}, + {"CurveParams.B", Field, 0}, + {"CurveParams.BitSize", Field, 0}, + {"CurveParams.Gx", Field, 0}, + {"CurveParams.Gy", Field, 0}, + {"CurveParams.N", Field, 0}, + {"CurveParams.Name", Field, 5}, + {"CurveParams.P", Field, 0}, + {"GenerateKey", Func, 0}, + {"Marshal", Func, 0}, + {"MarshalCompressed", Func, 15}, + {"P224", Func, 0}, + {"P256", Func, 0}, + {"P384", Func, 0}, + {"P521", Func, 0}, + {"Unmarshal", Func, 0}, + {"UnmarshalCompressed", Func, 15}, + }, + "crypto/hmac": { + {"Equal", Func, 1}, + {"New", Func, 0}, + }, + "crypto/md5": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Sum", Func, 2}, + }, + "crypto/rand": { + {"Int", Func, 0}, + {"Prime", Func, 0}, + {"Read", Func, 0}, + {"Reader", Var, 0}, + }, + "crypto/rc4": { + {"(*Cipher).Reset", Method, 0}, + {"(*Cipher).XORKeyStream", Method, 0}, + {"(KeySizeError).Error", Method, 0}, + {"Cipher", Type, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + }, + "crypto/rsa": { + {"(*PSSOptions).HashFunc", Method, 4}, + {"(*PrivateKey).Decrypt", Method, 5}, + {"(*PrivateKey).Equal", Method, 15}, + {"(*PrivateKey).Precompute", Method, 0}, + {"(*PrivateKey).Public", Method, 4}, + {"(*PrivateKey).Sign", Method, 4}, + {"(*PrivateKey).Size", Method, 11}, + {"(*PrivateKey).Validate", Method, 0}, + {"(*PublicKey).Equal", Method, 15}, + {"(*PublicKey).Size", Method, 11}, + {"CRTValue", Type, 0}, + {"CRTValue.Coeff", Field, 0}, + {"CRTValue.Exp", Field, 0}, + {"CRTValue.R", Field, 0}, + {"DecryptOAEP", Func, 0}, + {"DecryptPKCS1v15", Func, 0}, + {"DecryptPKCS1v15SessionKey", Func, 0}, + {"EncryptOAEP", Func, 0}, + {"EncryptPKCS1v15", Func, 0}, + {"ErrDecryption", Var, 0}, + {"ErrMessageTooLong", Var, 0}, + {"ErrVerification", Var, 0}, + {"GenerateKey", Func, 0}, + {"GenerateMultiPrimeKey", Func, 0}, + {"OAEPOptions", Type, 5}, + {"OAEPOptions.Hash", Field, 5}, + {"OAEPOptions.Label", Field, 5}, + {"OAEPOptions.MGFHash", Field, 20}, + {"PKCS1v15DecryptOptions", Type, 5}, + {"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5}, + {"PSSOptions", Type, 2}, + {"PSSOptions.Hash", Field, 4}, + {"PSSOptions.SaltLength", Field, 2}, + {"PSSSaltLengthAuto", Const, 2}, + {"PSSSaltLengthEqualsHash", Const, 2}, + {"PrecomputedValues", Type, 0}, + {"PrecomputedValues.CRTValues", Field, 0}, + {"PrecomputedValues.Dp", Field, 0}, + {"PrecomputedValues.Dq", Field, 0}, + {"PrecomputedValues.Qinv", Field, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.D", Field, 0}, + {"PrivateKey.Precomputed", Field, 0}, + {"PrivateKey.Primes", Field, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.E", Field, 0}, + {"PublicKey.N", Field, 0}, + {"SignPKCS1v15", Func, 0}, + {"SignPSS", Func, 2}, + {"VerifyPKCS1v15", Func, 0}, + {"VerifyPSS", Func, 2}, + }, + "crypto/sha1": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Sum", Func, 2}, + }, + "crypto/sha256": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"New224", Func, 0}, + {"Size", Const, 0}, + {"Size224", Const, 0}, + {"Sum224", Func, 2}, + {"Sum256", Func, 2}, + }, + "crypto/sha512": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"New384", Func, 0}, + {"New512_224", Func, 5}, + {"New512_256", Func, 5}, + {"Size", Const, 0}, + {"Size224", Const, 5}, + {"Size256", Const, 5}, + {"Size384", Const, 0}, + {"Sum384", Func, 2}, + {"Sum512", Func, 2}, + {"Sum512_224", Func, 5}, + {"Sum512_256", Func, 5}, + }, + "crypto/subtle": { + {"ConstantTimeByteEq", Func, 0}, + {"ConstantTimeCompare", Func, 0}, + {"ConstantTimeCopy", Func, 0}, + {"ConstantTimeEq", Func, 0}, + {"ConstantTimeLessOrEq", Func, 2}, + {"ConstantTimeSelect", Func, 0}, + {"XORBytes", Func, 20}, + }, + "crypto/tls": { + {"(*CertificateRequestInfo).Context", Method, 17}, + {"(*CertificateRequestInfo).SupportsCertificate", Method, 14}, + {"(*CertificateVerificationError).Error", Method, 20}, + {"(*CertificateVerificationError).Unwrap", Method, 20}, + {"(*ClientHelloInfo).Context", Method, 17}, + {"(*ClientHelloInfo).SupportsCertificate", Method, 14}, + {"(*ClientSessionState).ResumptionState", Method, 21}, + {"(*Config).BuildNameToCertificate", Method, 0}, + {"(*Config).Clone", Method, 8}, + {"(*Config).DecryptTicket", Method, 21}, + {"(*Config).EncryptTicket", Method, 21}, + {"(*Config).SetSessionTicketKeys", Method, 5}, + {"(*Conn).Close", Method, 0}, + {"(*Conn).CloseWrite", Method, 8}, + {"(*Conn).ConnectionState", Method, 0}, + {"(*Conn).Handshake", Method, 0}, + {"(*Conn).HandshakeContext", Method, 17}, + {"(*Conn).LocalAddr", Method, 0}, + {"(*Conn).NetConn", Method, 18}, + {"(*Conn).OCSPResponse", Method, 0}, + {"(*Conn).Read", Method, 0}, + {"(*Conn).RemoteAddr", Method, 0}, + {"(*Conn).SetDeadline", Method, 0}, + {"(*Conn).SetReadDeadline", Method, 0}, + {"(*Conn).SetWriteDeadline", Method, 0}, + {"(*Conn).VerifyHostname", Method, 0}, + {"(*Conn).Write", Method, 0}, + {"(*ConnectionState).ExportKeyingMaterial", Method, 11}, + {"(*Dialer).Dial", Method, 15}, + {"(*Dialer).DialContext", Method, 15}, + {"(*QUICConn).Close", Method, 21}, + {"(*QUICConn).ConnectionState", Method, 21}, + {"(*QUICConn).HandleData", Method, 21}, + {"(*QUICConn).NextEvent", Method, 21}, + {"(*QUICConn).SendSessionTicket", Method, 21}, + {"(*QUICConn).SetTransportParameters", Method, 21}, + {"(*QUICConn).Start", Method, 21}, + {"(*SessionState).Bytes", Method, 21}, + {"(AlertError).Error", Method, 21}, + {"(ClientAuthType).String", Method, 15}, + {"(CurveID).String", Method, 15}, + {"(QUICEncryptionLevel).String", Method, 21}, + {"(RecordHeaderError).Error", Method, 6}, + {"(SignatureScheme).String", Method, 15}, + {"AlertError", Type, 21}, + {"Certificate", Type, 0}, + {"Certificate.Certificate", Field, 0}, + {"Certificate.Leaf", Field, 0}, + {"Certificate.OCSPStaple", Field, 0}, + {"Certificate.PrivateKey", Field, 0}, + {"Certificate.SignedCertificateTimestamps", Field, 5}, + {"Certificate.SupportedSignatureAlgorithms", Field, 14}, + {"CertificateRequestInfo", Type, 8}, + {"CertificateRequestInfo.AcceptableCAs", Field, 8}, + {"CertificateRequestInfo.SignatureSchemes", Field, 8}, + {"CertificateRequestInfo.Version", Field, 14}, + {"CertificateVerificationError", Type, 20}, + {"CertificateVerificationError.Err", Field, 20}, + {"CertificateVerificationError.UnverifiedCertificates", Field, 20}, + {"CipherSuite", Type, 14}, + {"CipherSuite.ID", Field, 14}, + {"CipherSuite.Insecure", Field, 14}, + {"CipherSuite.Name", Field, 14}, + {"CipherSuite.SupportedVersions", Field, 14}, + {"CipherSuiteName", Func, 14}, + {"CipherSuites", Func, 14}, + {"Client", Func, 0}, + {"ClientAuthType", Type, 0}, + {"ClientHelloInfo", Type, 4}, + {"ClientHelloInfo.CipherSuites", Field, 4}, + {"ClientHelloInfo.Conn", Field, 8}, + {"ClientHelloInfo.ServerName", Field, 4}, + {"ClientHelloInfo.SignatureSchemes", Field, 8}, + {"ClientHelloInfo.SupportedCurves", Field, 4}, + {"ClientHelloInfo.SupportedPoints", Field, 4}, + {"ClientHelloInfo.SupportedProtos", Field, 8}, + {"ClientHelloInfo.SupportedVersions", Field, 8}, + {"ClientSessionCache", Type, 3}, + {"ClientSessionState", Type, 3}, + {"Config", Type, 0}, + {"Config.Certificates", Field, 0}, + {"Config.CipherSuites", Field, 0}, + {"Config.ClientAuth", Field, 0}, + {"Config.ClientCAs", Field, 0}, + {"Config.ClientSessionCache", Field, 3}, + {"Config.CurvePreferences", Field, 3}, + {"Config.DynamicRecordSizingDisabled", Field, 7}, + {"Config.GetCertificate", Field, 4}, + {"Config.GetClientCertificate", Field, 8}, + {"Config.GetConfigForClient", Field, 8}, + {"Config.InsecureSkipVerify", Field, 0}, + {"Config.KeyLogWriter", Field, 8}, + {"Config.MaxVersion", Field, 2}, + {"Config.MinVersion", Field, 2}, + {"Config.NameToCertificate", Field, 0}, + {"Config.NextProtos", Field, 0}, + {"Config.PreferServerCipherSuites", Field, 1}, + {"Config.Rand", Field, 0}, + {"Config.Renegotiation", Field, 7}, + {"Config.RootCAs", Field, 0}, + {"Config.ServerName", Field, 0}, + {"Config.SessionTicketKey", Field, 1}, + {"Config.SessionTicketsDisabled", Field, 1}, + {"Config.Time", Field, 0}, + {"Config.UnwrapSession", Field, 21}, + {"Config.VerifyConnection", Field, 15}, + {"Config.VerifyPeerCertificate", Field, 8}, + {"Config.WrapSession", Field, 21}, + {"Conn", Type, 0}, + {"ConnectionState", Type, 0}, + {"ConnectionState.CipherSuite", Field, 0}, + {"ConnectionState.DidResume", Field, 1}, + {"ConnectionState.HandshakeComplete", Field, 0}, + {"ConnectionState.NegotiatedProtocol", Field, 0}, + {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0}, + {"ConnectionState.OCSPResponse", Field, 5}, + {"ConnectionState.PeerCertificates", Field, 0}, + {"ConnectionState.ServerName", Field, 0}, + {"ConnectionState.SignedCertificateTimestamps", Field, 5}, + {"ConnectionState.TLSUnique", Field, 4}, + {"ConnectionState.VerifiedChains", Field, 0}, + {"ConnectionState.Version", Field, 3}, + {"CurveID", Type, 3}, + {"CurveP256", Const, 3}, + {"CurveP384", Const, 3}, + {"CurveP521", Const, 3}, + {"Dial", Func, 0}, + {"DialWithDialer", Func, 3}, + {"Dialer", Type, 15}, + {"Dialer.Config", Field, 15}, + {"Dialer.NetDialer", Field, 15}, + {"ECDSAWithP256AndSHA256", Const, 8}, + {"ECDSAWithP384AndSHA384", Const, 8}, + {"ECDSAWithP521AndSHA512", Const, 8}, + {"ECDSAWithSHA1", Const, 10}, + {"Ed25519", Const, 13}, + {"InsecureCipherSuites", Func, 14}, + {"Listen", Func, 0}, + {"LoadX509KeyPair", Func, 0}, + {"NewLRUClientSessionCache", Func, 3}, + {"NewListener", Func, 0}, + {"NewResumptionState", Func, 21}, + {"NoClientCert", Const, 0}, + {"PKCS1WithSHA1", Const, 8}, + {"PKCS1WithSHA256", Const, 8}, + {"PKCS1WithSHA384", Const, 8}, + {"PKCS1WithSHA512", Const, 8}, + {"PSSWithSHA256", Const, 8}, + {"PSSWithSHA384", Const, 8}, + {"PSSWithSHA512", Const, 8}, + {"ParseSessionState", Func, 21}, + {"QUICClient", Func, 21}, + {"QUICConfig", Type, 21}, + {"QUICConfig.TLSConfig", Field, 21}, + {"QUICConn", Type, 21}, + {"QUICEncryptionLevel", Type, 21}, + {"QUICEncryptionLevelApplication", Const, 21}, + {"QUICEncryptionLevelEarly", Const, 21}, + {"QUICEncryptionLevelHandshake", Const, 21}, + {"QUICEncryptionLevelInitial", Const, 21}, + {"QUICEvent", Type, 21}, + {"QUICEvent.Data", Field, 21}, + {"QUICEvent.Kind", Field, 21}, + {"QUICEvent.Level", Field, 21}, + {"QUICEvent.Suite", Field, 21}, + {"QUICEventKind", Type, 21}, + {"QUICHandshakeDone", Const, 21}, + {"QUICNoEvent", Const, 21}, + {"QUICRejectedEarlyData", Const, 21}, + {"QUICServer", Func, 21}, + {"QUICSessionTicketOptions", Type, 21}, + {"QUICSessionTicketOptions.EarlyData", Field, 21}, + {"QUICSetReadSecret", Const, 21}, + {"QUICSetWriteSecret", Const, 21}, + {"QUICTransportParameters", Const, 21}, + {"QUICTransportParametersRequired", Const, 21}, + {"QUICWriteData", Const, 21}, + {"RecordHeaderError", Type, 6}, + {"RecordHeaderError.Conn", Field, 12}, + {"RecordHeaderError.Msg", Field, 6}, + {"RecordHeaderError.RecordHeader", Field, 6}, + {"RenegotiateFreelyAsClient", Const, 7}, + {"RenegotiateNever", Const, 7}, + {"RenegotiateOnceAsClient", Const, 7}, + {"RenegotiationSupport", Type, 7}, + {"RequestClientCert", Const, 0}, + {"RequireAndVerifyClientCert", Const, 0}, + {"RequireAnyClientCert", Const, 0}, + {"Server", Func, 0}, + {"SessionState", Type, 21}, + {"SessionState.EarlyData", Field, 21}, + {"SessionState.Extra", Field, 21}, + {"SignatureScheme", Type, 8}, + {"TLS_AES_128_GCM_SHA256", Const, 12}, + {"TLS_AES_256_GCM_SHA384", Const, 12}, + {"TLS_CHACHA20_POLY1305_SHA256", Const, 12}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14}, + {"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2}, + {"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2}, + {"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1}, + {"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14}, + {"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0}, + {"TLS_FALLBACK_SCSV", Const, 4}, + {"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0}, + {"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0}, + {"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6}, + {"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1}, + {"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6}, + {"TLS_RSA_WITH_RC4_128_SHA", Const, 0}, + {"VerifyClientCertIfGiven", Const, 0}, + {"VersionName", Func, 21}, + {"VersionSSL30", Const, 2}, + {"VersionTLS10", Const, 2}, + {"VersionTLS11", Const, 2}, + {"VersionTLS12", Const, 2}, + {"VersionTLS13", Const, 12}, + {"X25519", Const, 8}, + {"X509KeyPair", Func, 0}, + }, + "crypto/x509": { + {"(*CertPool).AddCert", Method, 0}, + {"(*CertPool).AddCertWithConstraint", Method, 22}, + {"(*CertPool).AppendCertsFromPEM", Method, 0}, + {"(*CertPool).Clone", Method, 19}, + {"(*CertPool).Equal", Method, 19}, + {"(*CertPool).Subjects", Method, 0}, + {"(*Certificate).CheckCRLSignature", Method, 0}, + {"(*Certificate).CheckSignature", Method, 0}, + {"(*Certificate).CheckSignatureFrom", Method, 0}, + {"(*Certificate).CreateCRL", Method, 0}, + {"(*Certificate).Equal", Method, 0}, + {"(*Certificate).Verify", Method, 0}, + {"(*Certificate).VerifyHostname", Method, 0}, + {"(*CertificateRequest).CheckSignature", Method, 5}, + {"(*RevocationList).CheckSignatureFrom", Method, 19}, + {"(CertificateInvalidError).Error", Method, 0}, + {"(ConstraintViolationError).Error", Method, 0}, + {"(HostnameError).Error", Method, 0}, + {"(InsecureAlgorithmError).Error", Method, 6}, + {"(OID).Equal", Method, 22}, + {"(OID).EqualASN1OID", Method, 22}, + {"(OID).String", Method, 22}, + {"(PublicKeyAlgorithm).String", Method, 10}, + {"(SignatureAlgorithm).String", Method, 6}, + {"(SystemRootsError).Error", Method, 1}, + {"(SystemRootsError).Unwrap", Method, 16}, + {"(UnhandledCriticalExtension).Error", Method, 0}, + {"(UnknownAuthorityError).Error", Method, 0}, + {"CANotAuthorizedForExtKeyUsage", Const, 10}, + {"CANotAuthorizedForThisName", Const, 0}, + {"CertPool", Type, 0}, + {"Certificate", Type, 0}, + {"Certificate.AuthorityKeyId", Field, 0}, + {"Certificate.BasicConstraintsValid", Field, 0}, + {"Certificate.CRLDistributionPoints", Field, 2}, + {"Certificate.DNSNames", Field, 0}, + {"Certificate.EmailAddresses", Field, 0}, + {"Certificate.ExcludedDNSDomains", Field, 9}, + {"Certificate.ExcludedEmailAddresses", Field, 10}, + {"Certificate.ExcludedIPRanges", Field, 10}, + {"Certificate.ExcludedURIDomains", Field, 10}, + {"Certificate.ExtKeyUsage", Field, 0}, + {"Certificate.Extensions", Field, 2}, + {"Certificate.ExtraExtensions", Field, 2}, + {"Certificate.IPAddresses", Field, 1}, + {"Certificate.IsCA", Field, 0}, + {"Certificate.Issuer", Field, 0}, + {"Certificate.IssuingCertificateURL", Field, 2}, + {"Certificate.KeyUsage", Field, 0}, + {"Certificate.MaxPathLen", Field, 0}, + {"Certificate.MaxPathLenZero", Field, 4}, + {"Certificate.NotAfter", Field, 0}, + {"Certificate.NotBefore", Field, 0}, + {"Certificate.OCSPServer", Field, 2}, + {"Certificate.PermittedDNSDomains", Field, 0}, + {"Certificate.PermittedDNSDomainsCritical", Field, 0}, + {"Certificate.PermittedEmailAddresses", Field, 10}, + {"Certificate.PermittedIPRanges", Field, 10}, + {"Certificate.PermittedURIDomains", Field, 10}, + {"Certificate.Policies", Field, 22}, + {"Certificate.PolicyIdentifiers", Field, 0}, + {"Certificate.PublicKey", Field, 0}, + {"Certificate.PublicKeyAlgorithm", Field, 0}, + {"Certificate.Raw", Field, 0}, + {"Certificate.RawIssuer", Field, 0}, + {"Certificate.RawSubject", Field, 0}, + {"Certificate.RawSubjectPublicKeyInfo", Field, 0}, + {"Certificate.RawTBSCertificate", Field, 0}, + {"Certificate.SerialNumber", Field, 0}, + {"Certificate.Signature", Field, 0}, + {"Certificate.SignatureAlgorithm", Field, 0}, + {"Certificate.Subject", Field, 0}, + {"Certificate.SubjectKeyId", Field, 0}, + {"Certificate.URIs", Field, 10}, + {"Certificate.UnhandledCriticalExtensions", Field, 5}, + {"Certificate.UnknownExtKeyUsage", Field, 0}, + {"Certificate.Version", Field, 0}, + {"CertificateInvalidError", Type, 0}, + {"CertificateInvalidError.Cert", Field, 0}, + {"CertificateInvalidError.Detail", Field, 10}, + {"CertificateInvalidError.Reason", Field, 0}, + {"CertificateRequest", Type, 3}, + {"CertificateRequest.Attributes", Field, 3}, + {"CertificateRequest.DNSNames", Field, 3}, + {"CertificateRequest.EmailAddresses", Field, 3}, + {"CertificateRequest.Extensions", Field, 3}, + {"CertificateRequest.ExtraExtensions", Field, 3}, + {"CertificateRequest.IPAddresses", Field, 3}, + {"CertificateRequest.PublicKey", Field, 3}, + {"CertificateRequest.PublicKeyAlgorithm", Field, 3}, + {"CertificateRequest.Raw", Field, 3}, + {"CertificateRequest.RawSubject", Field, 3}, + {"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3}, + {"CertificateRequest.RawTBSCertificateRequest", Field, 3}, + {"CertificateRequest.Signature", Field, 3}, + {"CertificateRequest.SignatureAlgorithm", Field, 3}, + {"CertificateRequest.Subject", Field, 3}, + {"CertificateRequest.URIs", Field, 10}, + {"CertificateRequest.Version", Field, 3}, + {"ConstraintViolationError", Type, 0}, + {"CreateCertificate", Func, 0}, + {"CreateCertificateRequest", Func, 3}, + {"CreateRevocationList", Func, 15}, + {"DSA", Const, 0}, + {"DSAWithSHA1", Const, 0}, + {"DSAWithSHA256", Const, 0}, + {"DecryptPEMBlock", Func, 1}, + {"ECDSA", Const, 1}, + {"ECDSAWithSHA1", Const, 1}, + {"ECDSAWithSHA256", Const, 1}, + {"ECDSAWithSHA384", Const, 1}, + {"ECDSAWithSHA512", Const, 1}, + {"Ed25519", Const, 13}, + {"EncryptPEMBlock", Func, 1}, + {"ErrUnsupportedAlgorithm", Var, 0}, + {"Expired", Const, 0}, + {"ExtKeyUsage", Type, 0}, + {"ExtKeyUsageAny", Const, 0}, + {"ExtKeyUsageClientAuth", Const, 0}, + {"ExtKeyUsageCodeSigning", Const, 0}, + {"ExtKeyUsageEmailProtection", Const, 0}, + {"ExtKeyUsageIPSECEndSystem", Const, 1}, + {"ExtKeyUsageIPSECTunnel", Const, 1}, + {"ExtKeyUsageIPSECUser", Const, 1}, + {"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10}, + {"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10}, + {"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1}, + {"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1}, + {"ExtKeyUsageOCSPSigning", Const, 0}, + {"ExtKeyUsageServerAuth", Const, 0}, + {"ExtKeyUsageTimeStamping", Const, 0}, + {"HostnameError", Type, 0}, + {"HostnameError.Certificate", Field, 0}, + {"HostnameError.Host", Field, 0}, + {"IncompatibleUsage", Const, 1}, + {"IncorrectPasswordError", Var, 1}, + {"InsecureAlgorithmError", Type, 6}, + {"InvalidReason", Type, 0}, + {"IsEncryptedPEMBlock", Func, 1}, + {"KeyUsage", Type, 0}, + {"KeyUsageCRLSign", Const, 0}, + {"KeyUsageCertSign", Const, 0}, + {"KeyUsageContentCommitment", Const, 0}, + {"KeyUsageDataEncipherment", Const, 0}, + {"KeyUsageDecipherOnly", Const, 0}, + {"KeyUsageDigitalSignature", Const, 0}, + {"KeyUsageEncipherOnly", Const, 0}, + {"KeyUsageKeyAgreement", Const, 0}, + {"KeyUsageKeyEncipherment", Const, 0}, + {"MD2WithRSA", Const, 0}, + {"MD5WithRSA", Const, 0}, + {"MarshalECPrivateKey", Func, 2}, + {"MarshalPKCS1PrivateKey", Func, 0}, + {"MarshalPKCS1PublicKey", Func, 10}, + {"MarshalPKCS8PrivateKey", Func, 10}, + {"MarshalPKIXPublicKey", Func, 0}, + {"NameConstraintsWithoutSANs", Const, 10}, + {"NameMismatch", Const, 8}, + {"NewCertPool", Func, 0}, + {"NotAuthorizedToSign", Const, 0}, + {"OID", Type, 22}, + {"OIDFromInts", Func, 22}, + {"PEMCipher", Type, 1}, + {"PEMCipher3DES", Const, 1}, + {"PEMCipherAES128", Const, 1}, + {"PEMCipherAES192", Const, 1}, + {"PEMCipherAES256", Const, 1}, + {"PEMCipherDES", Const, 1}, + {"ParseCRL", Func, 0}, + {"ParseCertificate", Func, 0}, + {"ParseCertificateRequest", Func, 3}, + {"ParseCertificates", Func, 0}, + {"ParseDERCRL", Func, 0}, + {"ParseECPrivateKey", Func, 1}, + {"ParsePKCS1PrivateKey", Func, 0}, + {"ParsePKCS1PublicKey", Func, 10}, + {"ParsePKCS8PrivateKey", Func, 0}, + {"ParsePKIXPublicKey", Func, 0}, + {"ParseRevocationList", Func, 19}, + {"PublicKeyAlgorithm", Type, 0}, + {"PureEd25519", Const, 13}, + {"RSA", Const, 0}, + {"RevocationList", Type, 15}, + {"RevocationList.AuthorityKeyId", Field, 19}, + {"RevocationList.Extensions", Field, 19}, + {"RevocationList.ExtraExtensions", Field, 15}, + {"RevocationList.Issuer", Field, 19}, + {"RevocationList.NextUpdate", Field, 15}, + {"RevocationList.Number", Field, 15}, + {"RevocationList.Raw", Field, 19}, + {"RevocationList.RawIssuer", Field, 19}, + {"RevocationList.RawTBSRevocationList", Field, 19}, + {"RevocationList.RevokedCertificateEntries", Field, 21}, + {"RevocationList.RevokedCertificates", Field, 15}, + {"RevocationList.Signature", Field, 19}, + {"RevocationList.SignatureAlgorithm", Field, 15}, + {"RevocationList.ThisUpdate", Field, 15}, + {"RevocationListEntry", Type, 21}, + {"RevocationListEntry.Extensions", Field, 21}, + {"RevocationListEntry.ExtraExtensions", Field, 21}, + {"RevocationListEntry.Raw", Field, 21}, + {"RevocationListEntry.ReasonCode", Field, 21}, + {"RevocationListEntry.RevocationTime", Field, 21}, + {"RevocationListEntry.SerialNumber", Field, 21}, + {"SHA1WithRSA", Const, 0}, + {"SHA256WithRSA", Const, 0}, + {"SHA256WithRSAPSS", Const, 8}, + {"SHA384WithRSA", Const, 0}, + {"SHA384WithRSAPSS", Const, 8}, + {"SHA512WithRSA", Const, 0}, + {"SHA512WithRSAPSS", Const, 8}, + {"SetFallbackRoots", Func, 20}, + {"SignatureAlgorithm", Type, 0}, + {"SystemCertPool", Func, 7}, + {"SystemRootsError", Type, 1}, + {"SystemRootsError.Err", Field, 7}, + {"TooManyConstraints", Const, 10}, + {"TooManyIntermediates", Const, 0}, + {"UnconstrainedName", Const, 10}, + {"UnhandledCriticalExtension", Type, 0}, + {"UnknownAuthorityError", Type, 0}, + {"UnknownAuthorityError.Cert", Field, 8}, + {"UnknownPublicKeyAlgorithm", Const, 0}, + {"UnknownSignatureAlgorithm", Const, 0}, + {"VerifyOptions", Type, 0}, + {"VerifyOptions.CurrentTime", Field, 0}, + {"VerifyOptions.DNSName", Field, 0}, + {"VerifyOptions.Intermediates", Field, 0}, + {"VerifyOptions.KeyUsages", Field, 1}, + {"VerifyOptions.MaxConstraintComparisions", Field, 10}, + {"VerifyOptions.Roots", Field, 0}, + }, + "crypto/x509/pkix": { + {"(*CertificateList).HasExpired", Method, 0}, + {"(*Name).FillFromRDNSequence", Method, 0}, + {"(Name).String", Method, 10}, + {"(Name).ToRDNSequence", Method, 0}, + {"(RDNSequence).String", Method, 10}, + {"AlgorithmIdentifier", Type, 0}, + {"AlgorithmIdentifier.Algorithm", Field, 0}, + {"AlgorithmIdentifier.Parameters", Field, 0}, + {"AttributeTypeAndValue", Type, 0}, + {"AttributeTypeAndValue.Type", Field, 0}, + {"AttributeTypeAndValue.Value", Field, 0}, + {"AttributeTypeAndValueSET", Type, 3}, + {"AttributeTypeAndValueSET.Type", Field, 3}, + {"AttributeTypeAndValueSET.Value", Field, 3}, + {"CertificateList", Type, 0}, + {"CertificateList.SignatureAlgorithm", Field, 0}, + {"CertificateList.SignatureValue", Field, 0}, + {"CertificateList.TBSCertList", Field, 0}, + {"Extension", Type, 0}, + {"Extension.Critical", Field, 0}, + {"Extension.Id", Field, 0}, + {"Extension.Value", Field, 0}, + {"Name", Type, 0}, + {"Name.CommonName", Field, 0}, + {"Name.Country", Field, 0}, + {"Name.ExtraNames", Field, 5}, + {"Name.Locality", Field, 0}, + {"Name.Names", Field, 0}, + {"Name.Organization", Field, 0}, + {"Name.OrganizationalUnit", Field, 0}, + {"Name.PostalCode", Field, 0}, + {"Name.Province", Field, 0}, + {"Name.SerialNumber", Field, 0}, + {"Name.StreetAddress", Field, 0}, + {"RDNSequence", Type, 0}, + {"RelativeDistinguishedNameSET", Type, 0}, + {"RevokedCertificate", Type, 0}, + {"RevokedCertificate.Extensions", Field, 0}, + {"RevokedCertificate.RevocationTime", Field, 0}, + {"RevokedCertificate.SerialNumber", Field, 0}, + {"TBSCertificateList", Type, 0}, + {"TBSCertificateList.Extensions", Field, 0}, + {"TBSCertificateList.Issuer", Field, 0}, + {"TBSCertificateList.NextUpdate", Field, 0}, + {"TBSCertificateList.Raw", Field, 0}, + {"TBSCertificateList.RevokedCertificates", Field, 0}, + {"TBSCertificateList.Signature", Field, 0}, + {"TBSCertificateList.ThisUpdate", Field, 0}, + {"TBSCertificateList.Version", Field, 0}, + }, + "database/sql": { + {"(*ColumnType).DatabaseTypeName", Method, 8}, + {"(*ColumnType).DecimalSize", Method, 8}, + {"(*ColumnType).Length", Method, 8}, + {"(*ColumnType).Name", Method, 8}, + {"(*ColumnType).Nullable", Method, 8}, + {"(*ColumnType).ScanType", Method, 8}, + {"(*Conn).BeginTx", Method, 9}, + {"(*Conn).Close", Method, 9}, + {"(*Conn).ExecContext", Method, 9}, + {"(*Conn).PingContext", Method, 9}, + {"(*Conn).PrepareContext", Method, 9}, + {"(*Conn).QueryContext", Method, 9}, + {"(*Conn).QueryRowContext", Method, 9}, + {"(*Conn).Raw", Method, 13}, + {"(*DB).Begin", Method, 0}, + {"(*DB).BeginTx", Method, 8}, + {"(*DB).Close", Method, 0}, + {"(*DB).Conn", Method, 9}, + {"(*DB).Driver", Method, 0}, + {"(*DB).Exec", Method, 0}, + {"(*DB).ExecContext", Method, 8}, + {"(*DB).Ping", Method, 1}, + {"(*DB).PingContext", Method, 8}, + {"(*DB).Prepare", Method, 0}, + {"(*DB).PrepareContext", Method, 8}, + {"(*DB).Query", Method, 0}, + {"(*DB).QueryContext", Method, 8}, + {"(*DB).QueryRow", Method, 0}, + {"(*DB).QueryRowContext", Method, 8}, + {"(*DB).SetConnMaxIdleTime", Method, 15}, + {"(*DB).SetConnMaxLifetime", Method, 6}, + {"(*DB).SetMaxIdleConns", Method, 1}, + {"(*DB).SetMaxOpenConns", Method, 2}, + {"(*DB).Stats", Method, 5}, + {"(*Null).Scan", Method, 22}, + {"(*NullBool).Scan", Method, 0}, + {"(*NullByte).Scan", Method, 17}, + {"(*NullFloat64).Scan", Method, 0}, + {"(*NullInt16).Scan", Method, 17}, + {"(*NullInt32).Scan", Method, 13}, + {"(*NullInt64).Scan", Method, 0}, + {"(*NullString).Scan", Method, 0}, + {"(*NullTime).Scan", Method, 13}, + {"(*Row).Err", Method, 15}, + {"(*Row).Scan", Method, 0}, + {"(*Rows).Close", Method, 0}, + {"(*Rows).ColumnTypes", Method, 8}, + {"(*Rows).Columns", Method, 0}, + {"(*Rows).Err", Method, 0}, + {"(*Rows).Next", Method, 0}, + {"(*Rows).NextResultSet", Method, 8}, + {"(*Rows).Scan", Method, 0}, + {"(*Stmt).Close", Method, 0}, + {"(*Stmt).Exec", Method, 0}, + {"(*Stmt).ExecContext", Method, 8}, + {"(*Stmt).Query", Method, 0}, + {"(*Stmt).QueryContext", Method, 8}, + {"(*Stmt).QueryRow", Method, 0}, + {"(*Stmt).QueryRowContext", Method, 8}, + {"(*Tx).Commit", Method, 0}, + {"(*Tx).Exec", Method, 0}, + {"(*Tx).ExecContext", Method, 8}, + {"(*Tx).Prepare", Method, 0}, + {"(*Tx).PrepareContext", Method, 8}, + {"(*Tx).Query", Method, 0}, + {"(*Tx).QueryContext", Method, 8}, + {"(*Tx).QueryRow", Method, 0}, + {"(*Tx).QueryRowContext", Method, 8}, + {"(*Tx).Rollback", Method, 0}, + {"(*Tx).Stmt", Method, 0}, + {"(*Tx).StmtContext", Method, 8}, + {"(IsolationLevel).String", Method, 11}, + {"(Null).Value", Method, 22}, + {"(NullBool).Value", Method, 0}, + {"(NullByte).Value", Method, 17}, + {"(NullFloat64).Value", Method, 0}, + {"(NullInt16).Value", Method, 17}, + {"(NullInt32).Value", Method, 13}, + {"(NullInt64).Value", Method, 0}, + {"(NullString).Value", Method, 0}, + {"(NullTime).Value", Method, 13}, + {"ColumnType", Type, 8}, + {"Conn", Type, 9}, + {"DB", Type, 0}, + {"DBStats", Type, 5}, + {"DBStats.Idle", Field, 11}, + {"DBStats.InUse", Field, 11}, + {"DBStats.MaxIdleClosed", Field, 11}, + {"DBStats.MaxIdleTimeClosed", Field, 15}, + {"DBStats.MaxLifetimeClosed", Field, 11}, + {"DBStats.MaxOpenConnections", Field, 11}, + {"DBStats.OpenConnections", Field, 5}, + {"DBStats.WaitCount", Field, 11}, + {"DBStats.WaitDuration", Field, 11}, + {"Drivers", Func, 4}, + {"ErrConnDone", Var, 9}, + {"ErrNoRows", Var, 0}, + {"ErrTxDone", Var, 0}, + {"IsolationLevel", Type, 8}, + {"LevelDefault", Const, 8}, + {"LevelLinearizable", Const, 8}, + {"LevelReadCommitted", Const, 8}, + {"LevelReadUncommitted", Const, 8}, + {"LevelRepeatableRead", Const, 8}, + {"LevelSerializable", Const, 8}, + {"LevelSnapshot", Const, 8}, + {"LevelWriteCommitted", Const, 8}, + {"Named", Func, 8}, + {"NamedArg", Type, 8}, + {"NamedArg.Name", Field, 8}, + {"NamedArg.Value", Field, 8}, + {"Null", Type, 22}, + {"Null.V", Field, 22}, + {"Null.Valid", Field, 22}, + {"NullBool", Type, 0}, + {"NullBool.Bool", Field, 0}, + {"NullBool.Valid", Field, 0}, + {"NullByte", Type, 17}, + {"NullByte.Byte", Field, 17}, + {"NullByte.Valid", Field, 17}, + {"NullFloat64", Type, 0}, + {"NullFloat64.Float64", Field, 0}, + {"NullFloat64.Valid", Field, 0}, + {"NullInt16", Type, 17}, + {"NullInt16.Int16", Field, 17}, + {"NullInt16.Valid", Field, 17}, + {"NullInt32", Type, 13}, + {"NullInt32.Int32", Field, 13}, + {"NullInt32.Valid", Field, 13}, + {"NullInt64", Type, 0}, + {"NullInt64.Int64", Field, 0}, + {"NullInt64.Valid", Field, 0}, + {"NullString", Type, 0}, + {"NullString.String", Field, 0}, + {"NullString.Valid", Field, 0}, + {"NullTime", Type, 13}, + {"NullTime.Time", Field, 13}, + {"NullTime.Valid", Field, 13}, + {"Open", Func, 0}, + {"OpenDB", Func, 10}, + {"Out", Type, 9}, + {"Out.Dest", Field, 9}, + {"Out.In", Field, 9}, + {"RawBytes", Type, 0}, + {"Register", Func, 0}, + {"Result", Type, 0}, + {"Row", Type, 0}, + {"Rows", Type, 0}, + {"Scanner", Type, 0}, + {"Stmt", Type, 0}, + {"Tx", Type, 0}, + {"TxOptions", Type, 8}, + {"TxOptions.Isolation", Field, 8}, + {"TxOptions.ReadOnly", Field, 8}, + }, + "database/sql/driver": { + {"(NotNull).ConvertValue", Method, 0}, + {"(Null).ConvertValue", Method, 0}, + {"(RowsAffected).LastInsertId", Method, 0}, + {"(RowsAffected).RowsAffected", Method, 0}, + {"Bool", Var, 0}, + {"ColumnConverter", Type, 0}, + {"Conn", Type, 0}, + {"ConnBeginTx", Type, 8}, + {"ConnPrepareContext", Type, 8}, + {"Connector", Type, 10}, + {"DefaultParameterConverter", Var, 0}, + {"Driver", Type, 0}, + {"DriverContext", Type, 10}, + {"ErrBadConn", Var, 0}, + {"ErrRemoveArgument", Var, 9}, + {"ErrSkip", Var, 0}, + {"Execer", Type, 0}, + {"ExecerContext", Type, 8}, + {"Int32", Var, 0}, + {"IsScanValue", Func, 0}, + {"IsValue", Func, 0}, + {"IsolationLevel", Type, 8}, + {"NamedValue", Type, 8}, + {"NamedValue.Name", Field, 8}, + {"NamedValue.Ordinal", Field, 8}, + {"NamedValue.Value", Field, 8}, + {"NamedValueChecker", Type, 9}, + {"NotNull", Type, 0}, + {"NotNull.Converter", Field, 0}, + {"Null", Type, 0}, + {"Null.Converter", Field, 0}, + {"Pinger", Type, 8}, + {"Queryer", Type, 1}, + {"QueryerContext", Type, 8}, + {"Result", Type, 0}, + {"ResultNoRows", Var, 0}, + {"Rows", Type, 0}, + {"RowsAffected", Type, 0}, + {"RowsColumnTypeDatabaseTypeName", Type, 8}, + {"RowsColumnTypeLength", Type, 8}, + {"RowsColumnTypeNullable", Type, 8}, + {"RowsColumnTypePrecisionScale", Type, 8}, + {"RowsColumnTypeScanType", Type, 8}, + {"RowsNextResultSet", Type, 8}, + {"SessionResetter", Type, 10}, + {"Stmt", Type, 0}, + {"StmtExecContext", Type, 8}, + {"StmtQueryContext", Type, 8}, + {"String", Var, 0}, + {"Tx", Type, 0}, + {"TxOptions", Type, 8}, + {"TxOptions.Isolation", Field, 8}, + {"TxOptions.ReadOnly", Field, 8}, + {"Validator", Type, 15}, + {"Value", Type, 0}, + {"ValueConverter", Type, 0}, + {"Valuer", Type, 0}, + }, + "debug/buildinfo": { + {"BuildInfo", Type, 18}, + {"Read", Func, 18}, + {"ReadFile", Func, 18}, + }, + "debug/dwarf": { + {"(*AddrType).Basic", Method, 0}, + {"(*AddrType).Common", Method, 0}, + {"(*AddrType).Size", Method, 0}, + {"(*AddrType).String", Method, 0}, + {"(*ArrayType).Common", Method, 0}, + {"(*ArrayType).Size", Method, 0}, + {"(*ArrayType).String", Method, 0}, + {"(*BasicType).Basic", Method, 0}, + {"(*BasicType).Common", Method, 0}, + {"(*BasicType).Size", Method, 0}, + {"(*BasicType).String", Method, 0}, + {"(*BoolType).Basic", Method, 0}, + {"(*BoolType).Common", Method, 0}, + {"(*BoolType).Size", Method, 0}, + {"(*BoolType).String", Method, 0}, + {"(*CharType).Basic", Method, 0}, + {"(*CharType).Common", Method, 0}, + {"(*CharType).Size", Method, 0}, + {"(*CharType).String", Method, 0}, + {"(*CommonType).Common", Method, 0}, + {"(*CommonType).Size", Method, 0}, + {"(*ComplexType).Basic", Method, 0}, + {"(*ComplexType).Common", Method, 0}, + {"(*ComplexType).Size", Method, 0}, + {"(*ComplexType).String", Method, 0}, + {"(*Data).AddSection", Method, 14}, + {"(*Data).AddTypes", Method, 3}, + {"(*Data).LineReader", Method, 5}, + {"(*Data).Ranges", Method, 7}, + {"(*Data).Reader", Method, 0}, + {"(*Data).Type", Method, 0}, + {"(*DotDotDotType).Common", Method, 0}, + {"(*DotDotDotType).Size", Method, 0}, + {"(*DotDotDotType).String", Method, 0}, + {"(*Entry).AttrField", Method, 5}, + {"(*Entry).Val", Method, 0}, + {"(*EnumType).Common", Method, 0}, + {"(*EnumType).Size", Method, 0}, + {"(*EnumType).String", Method, 0}, + {"(*FloatType).Basic", Method, 0}, + {"(*FloatType).Common", Method, 0}, + {"(*FloatType).Size", Method, 0}, + {"(*FloatType).String", Method, 0}, + {"(*FuncType).Common", Method, 0}, + {"(*FuncType).Size", Method, 0}, + {"(*FuncType).String", Method, 0}, + {"(*IntType).Basic", Method, 0}, + {"(*IntType).Common", Method, 0}, + {"(*IntType).Size", Method, 0}, + {"(*IntType).String", Method, 0}, + {"(*LineReader).Files", Method, 14}, + {"(*LineReader).Next", Method, 5}, + {"(*LineReader).Reset", Method, 5}, + {"(*LineReader).Seek", Method, 5}, + {"(*LineReader).SeekPC", Method, 5}, + {"(*LineReader).Tell", Method, 5}, + {"(*PtrType).Common", Method, 0}, + {"(*PtrType).Size", Method, 0}, + {"(*PtrType).String", Method, 0}, + {"(*QualType).Common", Method, 0}, + {"(*QualType).Size", Method, 0}, + {"(*QualType).String", Method, 0}, + {"(*Reader).AddressSize", Method, 5}, + {"(*Reader).ByteOrder", Method, 14}, + {"(*Reader).Next", Method, 0}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).SeekPC", Method, 7}, + {"(*Reader).SkipChildren", Method, 0}, + {"(*StructType).Common", Method, 0}, + {"(*StructType).Defn", Method, 0}, + {"(*StructType).Size", Method, 0}, + {"(*StructType).String", Method, 0}, + {"(*TypedefType).Common", Method, 0}, + {"(*TypedefType).Size", Method, 0}, + {"(*TypedefType).String", Method, 0}, + {"(*UcharType).Basic", Method, 0}, + {"(*UcharType).Common", Method, 0}, + {"(*UcharType).Size", Method, 0}, + {"(*UcharType).String", Method, 0}, + {"(*UintType).Basic", Method, 0}, + {"(*UintType).Common", Method, 0}, + {"(*UintType).Size", Method, 0}, + {"(*UintType).String", Method, 0}, + {"(*UnspecifiedType).Basic", Method, 4}, + {"(*UnspecifiedType).Common", Method, 4}, + {"(*UnspecifiedType).Size", Method, 4}, + {"(*UnspecifiedType).String", Method, 4}, + {"(*UnsupportedType).Common", Method, 13}, + {"(*UnsupportedType).Size", Method, 13}, + {"(*UnsupportedType).String", Method, 13}, + {"(*VoidType).Common", Method, 0}, + {"(*VoidType).Size", Method, 0}, + {"(*VoidType).String", Method, 0}, + {"(Attr).GoString", Method, 0}, + {"(Attr).String", Method, 0}, + {"(Class).GoString", Method, 5}, + {"(Class).String", Method, 5}, + {"(DecodeError).Error", Method, 0}, + {"(Tag).GoString", Method, 0}, + {"(Tag).String", Method, 0}, + {"AddrType", Type, 0}, + {"AddrType.BasicType", Field, 0}, + {"ArrayType", Type, 0}, + {"ArrayType.CommonType", Field, 0}, + {"ArrayType.Count", Field, 0}, + {"ArrayType.StrideBitSize", Field, 0}, + {"ArrayType.Type", Field, 0}, + {"Attr", Type, 0}, + {"AttrAbstractOrigin", Const, 0}, + {"AttrAccessibility", Const, 0}, + {"AttrAddrBase", Const, 14}, + {"AttrAddrClass", Const, 0}, + {"AttrAlignment", Const, 14}, + {"AttrAllocated", Const, 0}, + {"AttrArtificial", Const, 0}, + {"AttrAssociated", Const, 0}, + {"AttrBaseTypes", Const, 0}, + {"AttrBinaryScale", Const, 14}, + {"AttrBitOffset", Const, 0}, + {"AttrBitSize", Const, 0}, + {"AttrByteSize", Const, 0}, + {"AttrCallAllCalls", Const, 14}, + {"AttrCallAllSourceCalls", Const, 14}, + {"AttrCallAllTailCalls", Const, 14}, + {"AttrCallColumn", Const, 0}, + {"AttrCallDataLocation", Const, 14}, + {"AttrCallDataValue", Const, 14}, + {"AttrCallFile", Const, 0}, + {"AttrCallLine", Const, 0}, + {"AttrCallOrigin", Const, 14}, + {"AttrCallPC", Const, 14}, + {"AttrCallParameter", Const, 14}, + {"AttrCallReturnPC", Const, 14}, + {"AttrCallTailCall", Const, 14}, + {"AttrCallTarget", Const, 14}, + {"AttrCallTargetClobbered", Const, 14}, + {"AttrCallValue", Const, 14}, + {"AttrCalling", Const, 0}, + {"AttrCommonRef", Const, 0}, + {"AttrCompDir", Const, 0}, + {"AttrConstExpr", Const, 14}, + {"AttrConstValue", Const, 0}, + {"AttrContainingType", Const, 0}, + {"AttrCount", Const, 0}, + {"AttrDataBitOffset", Const, 14}, + {"AttrDataLocation", Const, 0}, + {"AttrDataMemberLoc", Const, 0}, + {"AttrDecimalScale", Const, 14}, + {"AttrDecimalSign", Const, 14}, + {"AttrDeclColumn", Const, 0}, + {"AttrDeclFile", Const, 0}, + {"AttrDeclLine", Const, 0}, + {"AttrDeclaration", Const, 0}, + {"AttrDefaultValue", Const, 0}, + {"AttrDefaulted", Const, 14}, + {"AttrDeleted", Const, 14}, + {"AttrDescription", Const, 0}, + {"AttrDigitCount", Const, 14}, + {"AttrDiscr", Const, 0}, + {"AttrDiscrList", Const, 0}, + {"AttrDiscrValue", Const, 0}, + {"AttrDwoName", Const, 14}, + {"AttrElemental", Const, 14}, + {"AttrEncoding", Const, 0}, + {"AttrEndianity", Const, 14}, + {"AttrEntrypc", Const, 0}, + {"AttrEnumClass", Const, 14}, + {"AttrExplicit", Const, 14}, + {"AttrExportSymbols", Const, 14}, + {"AttrExtension", Const, 0}, + {"AttrExternal", Const, 0}, + {"AttrFrameBase", Const, 0}, + {"AttrFriend", Const, 0}, + {"AttrHighpc", Const, 0}, + {"AttrIdentifierCase", Const, 0}, + {"AttrImport", Const, 0}, + {"AttrInline", Const, 0}, + {"AttrIsOptional", Const, 0}, + {"AttrLanguage", Const, 0}, + {"AttrLinkageName", Const, 14}, + {"AttrLocation", Const, 0}, + {"AttrLoclistsBase", Const, 14}, + {"AttrLowerBound", Const, 0}, + {"AttrLowpc", Const, 0}, + {"AttrMacroInfo", Const, 0}, + {"AttrMacros", Const, 14}, + {"AttrMainSubprogram", Const, 14}, + {"AttrMutable", Const, 14}, + {"AttrName", Const, 0}, + {"AttrNamelistItem", Const, 0}, + {"AttrNoreturn", Const, 14}, + {"AttrObjectPointer", Const, 14}, + {"AttrOrdering", Const, 0}, + {"AttrPictureString", Const, 14}, + {"AttrPriority", Const, 0}, + {"AttrProducer", Const, 0}, + {"AttrPrototyped", Const, 0}, + {"AttrPure", Const, 14}, + {"AttrRanges", Const, 0}, + {"AttrRank", Const, 14}, + {"AttrRecursive", Const, 14}, + {"AttrReference", Const, 14}, + {"AttrReturnAddr", Const, 0}, + {"AttrRnglistsBase", Const, 14}, + {"AttrRvalueReference", Const, 14}, + {"AttrSegment", Const, 0}, + {"AttrSibling", Const, 0}, + {"AttrSignature", Const, 14}, + {"AttrSmall", Const, 14}, + {"AttrSpecification", Const, 0}, + {"AttrStartScope", Const, 0}, + {"AttrStaticLink", Const, 0}, + {"AttrStmtList", Const, 0}, + {"AttrStrOffsetsBase", Const, 14}, + {"AttrStride", Const, 0}, + {"AttrStrideSize", Const, 0}, + {"AttrStringLength", Const, 0}, + {"AttrStringLengthBitSize", Const, 14}, + {"AttrStringLengthByteSize", Const, 14}, + {"AttrThreadsScaled", Const, 14}, + {"AttrTrampoline", Const, 0}, + {"AttrType", Const, 0}, + {"AttrUpperBound", Const, 0}, + {"AttrUseLocation", Const, 0}, + {"AttrUseUTF8", Const, 0}, + {"AttrVarParam", Const, 0}, + {"AttrVirtuality", Const, 0}, + {"AttrVisibility", Const, 0}, + {"AttrVtableElemLoc", Const, 0}, + {"BasicType", Type, 0}, + {"BasicType.BitOffset", Field, 0}, + {"BasicType.BitSize", Field, 0}, + {"BasicType.CommonType", Field, 0}, + {"BasicType.DataBitOffset", Field, 18}, + {"BoolType", Type, 0}, + {"BoolType.BasicType", Field, 0}, + {"CharType", Type, 0}, + {"CharType.BasicType", Field, 0}, + {"Class", Type, 5}, + {"ClassAddrPtr", Const, 14}, + {"ClassAddress", Const, 5}, + {"ClassBlock", Const, 5}, + {"ClassConstant", Const, 5}, + {"ClassExprLoc", Const, 5}, + {"ClassFlag", Const, 5}, + {"ClassLinePtr", Const, 5}, + {"ClassLocList", Const, 14}, + {"ClassLocListPtr", Const, 5}, + {"ClassMacPtr", Const, 5}, + {"ClassRangeListPtr", Const, 5}, + {"ClassReference", Const, 5}, + {"ClassReferenceAlt", Const, 5}, + {"ClassReferenceSig", Const, 5}, + {"ClassRngList", Const, 14}, + {"ClassRngListsPtr", Const, 14}, + {"ClassStrOffsetsPtr", Const, 14}, + {"ClassString", Const, 5}, + {"ClassStringAlt", Const, 5}, + {"ClassUnknown", Const, 6}, + {"CommonType", Type, 0}, + {"CommonType.ByteSize", Field, 0}, + {"CommonType.Name", Field, 0}, + {"ComplexType", Type, 0}, + {"ComplexType.BasicType", Field, 0}, + {"Data", Type, 0}, + {"DecodeError", Type, 0}, + {"DecodeError.Err", Field, 0}, + {"DecodeError.Name", Field, 0}, + {"DecodeError.Offset", Field, 0}, + {"DotDotDotType", Type, 0}, + {"DotDotDotType.CommonType", Field, 0}, + {"Entry", Type, 0}, + {"Entry.Children", Field, 0}, + {"Entry.Field", Field, 0}, + {"Entry.Offset", Field, 0}, + {"Entry.Tag", Field, 0}, + {"EnumType", Type, 0}, + {"EnumType.CommonType", Field, 0}, + {"EnumType.EnumName", Field, 0}, + {"EnumType.Val", Field, 0}, + {"EnumValue", Type, 0}, + {"EnumValue.Name", Field, 0}, + {"EnumValue.Val", Field, 0}, + {"ErrUnknownPC", Var, 5}, + {"Field", Type, 0}, + {"Field.Attr", Field, 0}, + {"Field.Class", Field, 5}, + {"Field.Val", Field, 0}, + {"FloatType", Type, 0}, + {"FloatType.BasicType", Field, 0}, + {"FuncType", Type, 0}, + {"FuncType.CommonType", Field, 0}, + {"FuncType.ParamType", Field, 0}, + {"FuncType.ReturnType", Field, 0}, + {"IntType", Type, 0}, + {"IntType.BasicType", Field, 0}, + {"LineEntry", Type, 5}, + {"LineEntry.Address", Field, 5}, + {"LineEntry.BasicBlock", Field, 5}, + {"LineEntry.Column", Field, 5}, + {"LineEntry.Discriminator", Field, 5}, + {"LineEntry.EndSequence", Field, 5}, + {"LineEntry.EpilogueBegin", Field, 5}, + {"LineEntry.File", Field, 5}, + {"LineEntry.ISA", Field, 5}, + {"LineEntry.IsStmt", Field, 5}, + {"LineEntry.Line", Field, 5}, + {"LineEntry.OpIndex", Field, 5}, + {"LineEntry.PrologueEnd", Field, 5}, + {"LineFile", Type, 5}, + {"LineFile.Length", Field, 5}, + {"LineFile.Mtime", Field, 5}, + {"LineFile.Name", Field, 5}, + {"LineReader", Type, 5}, + {"LineReaderPos", Type, 5}, + {"New", Func, 0}, + {"Offset", Type, 0}, + {"PtrType", Type, 0}, + {"PtrType.CommonType", Field, 0}, + {"PtrType.Type", Field, 0}, + {"QualType", Type, 0}, + {"QualType.CommonType", Field, 0}, + {"QualType.Qual", Field, 0}, + {"QualType.Type", Field, 0}, + {"Reader", Type, 0}, + {"StructField", Type, 0}, + {"StructField.BitOffset", Field, 0}, + {"StructField.BitSize", Field, 0}, + {"StructField.ByteOffset", Field, 0}, + {"StructField.ByteSize", Field, 0}, + {"StructField.DataBitOffset", Field, 18}, + {"StructField.Name", Field, 0}, + {"StructField.Type", Field, 0}, + {"StructType", Type, 0}, + {"StructType.CommonType", Field, 0}, + {"StructType.Field", Field, 0}, + {"StructType.Incomplete", Field, 0}, + {"StructType.Kind", Field, 0}, + {"StructType.StructName", Field, 0}, + {"Tag", Type, 0}, + {"TagAccessDeclaration", Const, 0}, + {"TagArrayType", Const, 0}, + {"TagAtomicType", Const, 14}, + {"TagBaseType", Const, 0}, + {"TagCallSite", Const, 14}, + {"TagCallSiteParameter", Const, 14}, + {"TagCatchDwarfBlock", Const, 0}, + {"TagClassType", Const, 0}, + {"TagCoarrayType", Const, 14}, + {"TagCommonDwarfBlock", Const, 0}, + {"TagCommonInclusion", Const, 0}, + {"TagCompileUnit", Const, 0}, + {"TagCondition", Const, 3}, + {"TagConstType", Const, 0}, + {"TagConstant", Const, 0}, + {"TagDwarfProcedure", Const, 0}, + {"TagDynamicType", Const, 14}, + {"TagEntryPoint", Const, 0}, + {"TagEnumerationType", Const, 0}, + {"TagEnumerator", Const, 0}, + {"TagFileType", Const, 0}, + {"TagFormalParameter", Const, 0}, + {"TagFriend", Const, 0}, + {"TagGenericSubrange", Const, 14}, + {"TagImmutableType", Const, 14}, + {"TagImportedDeclaration", Const, 0}, + {"TagImportedModule", Const, 0}, + {"TagImportedUnit", Const, 0}, + {"TagInheritance", Const, 0}, + {"TagInlinedSubroutine", Const, 0}, + {"TagInterfaceType", Const, 0}, + {"TagLabel", Const, 0}, + {"TagLexDwarfBlock", Const, 0}, + {"TagMember", Const, 0}, + {"TagModule", Const, 0}, + {"TagMutableType", Const, 0}, + {"TagNamelist", Const, 0}, + {"TagNamelistItem", Const, 0}, + {"TagNamespace", Const, 0}, + {"TagPackedType", Const, 0}, + {"TagPartialUnit", Const, 0}, + {"TagPointerType", Const, 0}, + {"TagPtrToMemberType", Const, 0}, + {"TagReferenceType", Const, 0}, + {"TagRestrictType", Const, 0}, + {"TagRvalueReferenceType", Const, 3}, + {"TagSetType", Const, 0}, + {"TagSharedType", Const, 3}, + {"TagSkeletonUnit", Const, 14}, + {"TagStringType", Const, 0}, + {"TagStructType", Const, 0}, + {"TagSubprogram", Const, 0}, + {"TagSubrangeType", Const, 0}, + {"TagSubroutineType", Const, 0}, + {"TagTemplateAlias", Const, 3}, + {"TagTemplateTypeParameter", Const, 0}, + {"TagTemplateValueParameter", Const, 0}, + {"TagThrownType", Const, 0}, + {"TagTryDwarfBlock", Const, 0}, + {"TagTypeUnit", Const, 3}, + {"TagTypedef", Const, 0}, + {"TagUnionType", Const, 0}, + {"TagUnspecifiedParameters", Const, 0}, + {"TagUnspecifiedType", Const, 0}, + {"TagVariable", Const, 0}, + {"TagVariant", Const, 0}, + {"TagVariantPart", Const, 0}, + {"TagVolatileType", Const, 0}, + {"TagWithStmt", Const, 0}, + {"Type", Type, 0}, + {"TypedefType", Type, 0}, + {"TypedefType.CommonType", Field, 0}, + {"TypedefType.Type", Field, 0}, + {"UcharType", Type, 0}, + {"UcharType.BasicType", Field, 0}, + {"UintType", Type, 0}, + {"UintType.BasicType", Field, 0}, + {"UnspecifiedType", Type, 4}, + {"UnspecifiedType.BasicType", Field, 4}, + {"UnsupportedType", Type, 13}, + {"UnsupportedType.CommonType", Field, 13}, + {"UnsupportedType.Tag", Field, 13}, + {"VoidType", Type, 0}, + {"VoidType.CommonType", Field, 0}, + }, + "debug/elf": { + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).DynString", Method, 1}, + {"(*File).DynValue", Method, 21}, + {"(*File).DynamicSymbols", Method, 4}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*File).SectionByType", Method, 0}, + {"(*File).Symbols", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Prog).Open", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(Class).GoString", Method, 0}, + {"(Class).String", Method, 0}, + {"(CompressionType).GoString", Method, 6}, + {"(CompressionType).String", Method, 6}, + {"(Data).GoString", Method, 0}, + {"(Data).String", Method, 0}, + {"(DynFlag).GoString", Method, 0}, + {"(DynFlag).String", Method, 0}, + {"(DynFlag1).GoString", Method, 21}, + {"(DynFlag1).String", Method, 21}, + {"(DynTag).GoString", Method, 0}, + {"(DynTag).String", Method, 0}, + {"(Machine).GoString", Method, 0}, + {"(Machine).String", Method, 0}, + {"(NType).GoString", Method, 0}, + {"(NType).String", Method, 0}, + {"(OSABI).GoString", Method, 0}, + {"(OSABI).String", Method, 0}, + {"(Prog).ReadAt", Method, 0}, + {"(ProgFlag).GoString", Method, 0}, + {"(ProgFlag).String", Method, 0}, + {"(ProgType).GoString", Method, 0}, + {"(ProgType).String", Method, 0}, + {"(R_386).GoString", Method, 0}, + {"(R_386).String", Method, 0}, + {"(R_390).GoString", Method, 7}, + {"(R_390).String", Method, 7}, + {"(R_AARCH64).GoString", Method, 4}, + {"(R_AARCH64).String", Method, 4}, + {"(R_ALPHA).GoString", Method, 0}, + {"(R_ALPHA).String", Method, 0}, + {"(R_ARM).GoString", Method, 0}, + {"(R_ARM).String", Method, 0}, + {"(R_LARCH).GoString", Method, 19}, + {"(R_LARCH).String", Method, 19}, + {"(R_MIPS).GoString", Method, 6}, + {"(R_MIPS).String", Method, 6}, + {"(R_PPC).GoString", Method, 0}, + {"(R_PPC).String", Method, 0}, + {"(R_PPC64).GoString", Method, 5}, + {"(R_PPC64).String", Method, 5}, + {"(R_RISCV).GoString", Method, 11}, + {"(R_RISCV).String", Method, 11}, + {"(R_SPARC).GoString", Method, 0}, + {"(R_SPARC).String", Method, 0}, + {"(R_X86_64).GoString", Method, 0}, + {"(R_X86_64).String", Method, 0}, + {"(Section).ReadAt", Method, 0}, + {"(SectionFlag).GoString", Method, 0}, + {"(SectionFlag).String", Method, 0}, + {"(SectionIndex).GoString", Method, 0}, + {"(SectionIndex).String", Method, 0}, + {"(SectionType).GoString", Method, 0}, + {"(SectionType).String", Method, 0}, + {"(SymBind).GoString", Method, 0}, + {"(SymBind).String", Method, 0}, + {"(SymType).GoString", Method, 0}, + {"(SymType).String", Method, 0}, + {"(SymVis).GoString", Method, 0}, + {"(SymVis).String", Method, 0}, + {"(Type).GoString", Method, 0}, + {"(Type).String", Method, 0}, + {"(Version).GoString", Method, 0}, + {"(Version).String", Method, 0}, + {"ARM_MAGIC_TRAMP_NUMBER", Const, 0}, + {"COMPRESS_HIOS", Const, 6}, + {"COMPRESS_HIPROC", Const, 6}, + {"COMPRESS_LOOS", Const, 6}, + {"COMPRESS_LOPROC", Const, 6}, + {"COMPRESS_ZLIB", Const, 6}, + {"COMPRESS_ZSTD", Const, 21}, + {"Chdr32", Type, 6}, + {"Chdr32.Addralign", Field, 6}, + {"Chdr32.Size", Field, 6}, + {"Chdr32.Type", Field, 6}, + {"Chdr64", Type, 6}, + {"Chdr64.Addralign", Field, 6}, + {"Chdr64.Size", Field, 6}, + {"Chdr64.Type", Field, 6}, + {"Class", Type, 0}, + {"CompressionType", Type, 6}, + {"DF_1_CONFALT", Const, 21}, + {"DF_1_DIRECT", Const, 21}, + {"DF_1_DISPRELDNE", Const, 21}, + {"DF_1_DISPRELPND", Const, 21}, + {"DF_1_EDITED", Const, 21}, + {"DF_1_ENDFILTEE", Const, 21}, + {"DF_1_GLOBAL", Const, 21}, + {"DF_1_GLOBAUDIT", Const, 21}, + {"DF_1_GROUP", Const, 21}, + {"DF_1_IGNMULDEF", Const, 21}, + {"DF_1_INITFIRST", Const, 21}, + {"DF_1_INTERPOSE", Const, 21}, + {"DF_1_KMOD", Const, 21}, + {"DF_1_LOADFLTR", Const, 21}, + {"DF_1_NOCOMMON", Const, 21}, + {"DF_1_NODEFLIB", Const, 21}, + {"DF_1_NODELETE", Const, 21}, + {"DF_1_NODIRECT", Const, 21}, + {"DF_1_NODUMP", Const, 21}, + {"DF_1_NOHDR", Const, 21}, + {"DF_1_NOKSYMS", Const, 21}, + {"DF_1_NOOPEN", Const, 21}, + {"DF_1_NORELOC", Const, 21}, + {"DF_1_NOW", Const, 21}, + {"DF_1_ORIGIN", Const, 21}, + {"DF_1_PIE", Const, 21}, + {"DF_1_SINGLETON", Const, 21}, + {"DF_1_STUB", Const, 21}, + {"DF_1_SYMINTPOSE", Const, 21}, + {"DF_1_TRANS", Const, 21}, + {"DF_1_WEAKFILTER", Const, 21}, + {"DF_BIND_NOW", Const, 0}, + {"DF_ORIGIN", Const, 0}, + {"DF_STATIC_TLS", Const, 0}, + {"DF_SYMBOLIC", Const, 0}, + {"DF_TEXTREL", Const, 0}, + {"DT_ADDRRNGHI", Const, 16}, + {"DT_ADDRRNGLO", Const, 16}, + {"DT_AUDIT", Const, 16}, + {"DT_AUXILIARY", Const, 16}, + {"DT_BIND_NOW", Const, 0}, + {"DT_CHECKSUM", Const, 16}, + {"DT_CONFIG", Const, 16}, + {"DT_DEBUG", Const, 0}, + {"DT_DEPAUDIT", Const, 16}, + {"DT_ENCODING", Const, 0}, + {"DT_FEATURE", Const, 16}, + {"DT_FILTER", Const, 16}, + {"DT_FINI", Const, 0}, + {"DT_FINI_ARRAY", Const, 0}, + {"DT_FINI_ARRAYSZ", Const, 0}, + {"DT_FLAGS", Const, 0}, + {"DT_FLAGS_1", Const, 16}, + {"DT_GNU_CONFLICT", Const, 16}, + {"DT_GNU_CONFLICTSZ", Const, 16}, + {"DT_GNU_HASH", Const, 16}, + {"DT_GNU_LIBLIST", Const, 16}, + {"DT_GNU_LIBLISTSZ", Const, 16}, + {"DT_GNU_PRELINKED", Const, 16}, + {"DT_HASH", Const, 0}, + {"DT_HIOS", Const, 0}, + {"DT_HIPROC", Const, 0}, + {"DT_INIT", Const, 0}, + {"DT_INIT_ARRAY", Const, 0}, + {"DT_INIT_ARRAYSZ", Const, 0}, + {"DT_JMPREL", Const, 0}, + {"DT_LOOS", Const, 0}, + {"DT_LOPROC", Const, 0}, + {"DT_MIPS_AUX_DYNAMIC", Const, 16}, + {"DT_MIPS_BASE_ADDRESS", Const, 16}, + {"DT_MIPS_COMPACT_SIZE", Const, 16}, + {"DT_MIPS_CONFLICT", Const, 16}, + {"DT_MIPS_CONFLICTNO", Const, 16}, + {"DT_MIPS_CXX_FLAGS", Const, 16}, + {"DT_MIPS_DELTA_CLASS", Const, 16}, + {"DT_MIPS_DELTA_CLASSSYM", Const, 16}, + {"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16}, + {"DT_MIPS_DELTA_CLASS_NO", Const, 16}, + {"DT_MIPS_DELTA_INSTANCE", Const, 16}, + {"DT_MIPS_DELTA_INSTANCE_NO", Const, 16}, + {"DT_MIPS_DELTA_RELOC", Const, 16}, + {"DT_MIPS_DELTA_RELOC_NO", Const, 16}, + {"DT_MIPS_DELTA_SYM", Const, 16}, + {"DT_MIPS_DELTA_SYM_NO", Const, 16}, + {"DT_MIPS_DYNSTR_ALIGN", Const, 16}, + {"DT_MIPS_FLAGS", Const, 16}, + {"DT_MIPS_GOTSYM", Const, 16}, + {"DT_MIPS_GP_VALUE", Const, 16}, + {"DT_MIPS_HIDDEN_GOTIDX", Const, 16}, + {"DT_MIPS_HIPAGENO", Const, 16}, + {"DT_MIPS_ICHECKSUM", Const, 16}, + {"DT_MIPS_INTERFACE", Const, 16}, + {"DT_MIPS_INTERFACE_SIZE", Const, 16}, + {"DT_MIPS_IVERSION", Const, 16}, + {"DT_MIPS_LIBLIST", Const, 16}, + {"DT_MIPS_LIBLISTNO", Const, 16}, + {"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16}, + {"DT_MIPS_LOCAL_GOTIDX", Const, 16}, + {"DT_MIPS_LOCAL_GOTNO", Const, 16}, + {"DT_MIPS_MSYM", Const, 16}, + {"DT_MIPS_OPTIONS", Const, 16}, + {"DT_MIPS_PERF_SUFFIX", Const, 16}, + {"DT_MIPS_PIXIE_INIT", Const, 16}, + {"DT_MIPS_PLTGOT", Const, 16}, + {"DT_MIPS_PROTECTED_GOTIDX", Const, 16}, + {"DT_MIPS_RLD_MAP", Const, 16}, + {"DT_MIPS_RLD_MAP_REL", Const, 16}, + {"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16}, + {"DT_MIPS_RLD_VERSION", Const, 16}, + {"DT_MIPS_RWPLT", Const, 16}, + {"DT_MIPS_SYMBOL_LIB", Const, 16}, + {"DT_MIPS_SYMTABNO", Const, 16}, + {"DT_MIPS_TIME_STAMP", Const, 16}, + {"DT_MIPS_UNREFEXTNO", Const, 16}, + {"DT_MOVEENT", Const, 16}, + {"DT_MOVESZ", Const, 16}, + {"DT_MOVETAB", Const, 16}, + {"DT_NEEDED", Const, 0}, + {"DT_NULL", Const, 0}, + {"DT_PLTGOT", Const, 0}, + {"DT_PLTPAD", Const, 16}, + {"DT_PLTPADSZ", Const, 16}, + {"DT_PLTREL", Const, 0}, + {"DT_PLTRELSZ", Const, 0}, + {"DT_POSFLAG_1", Const, 16}, + {"DT_PPC64_GLINK", Const, 16}, + {"DT_PPC64_OPD", Const, 16}, + {"DT_PPC64_OPDSZ", Const, 16}, + {"DT_PPC64_OPT", Const, 16}, + {"DT_PPC_GOT", Const, 16}, + {"DT_PPC_OPT", Const, 16}, + {"DT_PREINIT_ARRAY", Const, 0}, + {"DT_PREINIT_ARRAYSZ", Const, 0}, + {"DT_REL", Const, 0}, + {"DT_RELA", Const, 0}, + {"DT_RELACOUNT", Const, 16}, + {"DT_RELAENT", Const, 0}, + {"DT_RELASZ", Const, 0}, + {"DT_RELCOUNT", Const, 16}, + {"DT_RELENT", Const, 0}, + {"DT_RELSZ", Const, 0}, + {"DT_RPATH", Const, 0}, + {"DT_RUNPATH", Const, 0}, + {"DT_SONAME", Const, 0}, + {"DT_SPARC_REGISTER", Const, 16}, + {"DT_STRSZ", Const, 0}, + {"DT_STRTAB", Const, 0}, + {"DT_SYMBOLIC", Const, 0}, + {"DT_SYMENT", Const, 0}, + {"DT_SYMINENT", Const, 16}, + {"DT_SYMINFO", Const, 16}, + {"DT_SYMINSZ", Const, 16}, + {"DT_SYMTAB", Const, 0}, + {"DT_SYMTAB_SHNDX", Const, 16}, + {"DT_TEXTREL", Const, 0}, + {"DT_TLSDESC_GOT", Const, 16}, + {"DT_TLSDESC_PLT", Const, 16}, + {"DT_USED", Const, 16}, + {"DT_VALRNGHI", Const, 16}, + {"DT_VALRNGLO", Const, 16}, + {"DT_VERDEF", Const, 16}, + {"DT_VERDEFNUM", Const, 16}, + {"DT_VERNEED", Const, 0}, + {"DT_VERNEEDNUM", Const, 0}, + {"DT_VERSYM", Const, 0}, + {"Data", Type, 0}, + {"Dyn32", Type, 0}, + {"Dyn32.Tag", Field, 0}, + {"Dyn32.Val", Field, 0}, + {"Dyn64", Type, 0}, + {"Dyn64.Tag", Field, 0}, + {"Dyn64.Val", Field, 0}, + {"DynFlag", Type, 0}, + {"DynFlag1", Type, 21}, + {"DynTag", Type, 0}, + {"EI_ABIVERSION", Const, 0}, + {"EI_CLASS", Const, 0}, + {"EI_DATA", Const, 0}, + {"EI_NIDENT", Const, 0}, + {"EI_OSABI", Const, 0}, + {"EI_PAD", Const, 0}, + {"EI_VERSION", Const, 0}, + {"ELFCLASS32", Const, 0}, + {"ELFCLASS64", Const, 0}, + {"ELFCLASSNONE", Const, 0}, + {"ELFDATA2LSB", Const, 0}, + {"ELFDATA2MSB", Const, 0}, + {"ELFDATANONE", Const, 0}, + {"ELFMAG", Const, 0}, + {"ELFOSABI_86OPEN", Const, 0}, + {"ELFOSABI_AIX", Const, 0}, + {"ELFOSABI_ARM", Const, 0}, + {"ELFOSABI_AROS", Const, 11}, + {"ELFOSABI_CLOUDABI", Const, 11}, + {"ELFOSABI_FENIXOS", Const, 11}, + {"ELFOSABI_FREEBSD", Const, 0}, + {"ELFOSABI_HPUX", Const, 0}, + {"ELFOSABI_HURD", Const, 0}, + {"ELFOSABI_IRIX", Const, 0}, + {"ELFOSABI_LINUX", Const, 0}, + {"ELFOSABI_MODESTO", Const, 0}, + {"ELFOSABI_NETBSD", Const, 0}, + {"ELFOSABI_NONE", Const, 0}, + {"ELFOSABI_NSK", Const, 0}, + {"ELFOSABI_OPENBSD", Const, 0}, + {"ELFOSABI_OPENVMS", Const, 0}, + {"ELFOSABI_SOLARIS", Const, 0}, + {"ELFOSABI_STANDALONE", Const, 0}, + {"ELFOSABI_TRU64", Const, 0}, + {"EM_386", Const, 0}, + {"EM_486", Const, 0}, + {"EM_56800EX", Const, 11}, + {"EM_68HC05", Const, 11}, + {"EM_68HC08", Const, 11}, + {"EM_68HC11", Const, 11}, + {"EM_68HC12", Const, 0}, + {"EM_68HC16", Const, 11}, + {"EM_68K", Const, 0}, + {"EM_78KOR", Const, 11}, + {"EM_8051", Const, 11}, + {"EM_860", Const, 0}, + {"EM_88K", Const, 0}, + {"EM_960", Const, 0}, + {"EM_AARCH64", Const, 4}, + {"EM_ALPHA", Const, 0}, + {"EM_ALPHA_STD", Const, 0}, + {"EM_ALTERA_NIOS2", Const, 11}, + {"EM_AMDGPU", Const, 11}, + {"EM_ARC", Const, 0}, + {"EM_ARCA", Const, 11}, + {"EM_ARC_COMPACT", Const, 11}, + {"EM_ARC_COMPACT2", Const, 11}, + {"EM_ARM", Const, 0}, + {"EM_AVR", Const, 11}, + {"EM_AVR32", Const, 11}, + {"EM_BA1", Const, 11}, + {"EM_BA2", Const, 11}, + {"EM_BLACKFIN", Const, 11}, + {"EM_BPF", Const, 11}, + {"EM_C166", Const, 11}, + {"EM_CDP", Const, 11}, + {"EM_CE", Const, 11}, + {"EM_CLOUDSHIELD", Const, 11}, + {"EM_COGE", Const, 11}, + {"EM_COLDFIRE", Const, 0}, + {"EM_COOL", Const, 11}, + {"EM_COREA_1ST", Const, 11}, + {"EM_COREA_2ND", Const, 11}, + {"EM_CR", Const, 11}, + {"EM_CR16", Const, 11}, + {"EM_CRAYNV2", Const, 11}, + {"EM_CRIS", Const, 11}, + {"EM_CRX", Const, 11}, + {"EM_CSR_KALIMBA", Const, 11}, + {"EM_CUDA", Const, 11}, + {"EM_CYPRESS_M8C", Const, 11}, + {"EM_D10V", Const, 11}, + {"EM_D30V", Const, 11}, + {"EM_DSP24", Const, 11}, + {"EM_DSPIC30F", Const, 11}, + {"EM_DXP", Const, 11}, + {"EM_ECOG1", Const, 11}, + {"EM_ECOG16", Const, 11}, + {"EM_ECOG1X", Const, 11}, + {"EM_ECOG2", Const, 11}, + {"EM_ETPU", Const, 11}, + {"EM_EXCESS", Const, 11}, + {"EM_F2MC16", Const, 11}, + {"EM_FIREPATH", Const, 11}, + {"EM_FR20", Const, 0}, + {"EM_FR30", Const, 11}, + {"EM_FT32", Const, 11}, + {"EM_FX66", Const, 11}, + {"EM_H8S", Const, 0}, + {"EM_H8_300", Const, 0}, + {"EM_H8_300H", Const, 0}, + {"EM_H8_500", Const, 0}, + {"EM_HUANY", Const, 11}, + {"EM_IA_64", Const, 0}, + {"EM_INTEL205", Const, 11}, + {"EM_INTEL206", Const, 11}, + {"EM_INTEL207", Const, 11}, + {"EM_INTEL208", Const, 11}, + {"EM_INTEL209", Const, 11}, + {"EM_IP2K", Const, 11}, + {"EM_JAVELIN", Const, 11}, + {"EM_K10M", Const, 11}, + {"EM_KM32", Const, 11}, + {"EM_KMX16", Const, 11}, + {"EM_KMX32", Const, 11}, + {"EM_KMX8", Const, 11}, + {"EM_KVARC", Const, 11}, + {"EM_L10M", Const, 11}, + {"EM_LANAI", Const, 11}, + {"EM_LATTICEMICO32", Const, 11}, + {"EM_LOONGARCH", Const, 19}, + {"EM_M16C", Const, 11}, + {"EM_M32", Const, 0}, + {"EM_M32C", Const, 11}, + {"EM_M32R", Const, 11}, + {"EM_MANIK", Const, 11}, + {"EM_MAX", Const, 11}, + {"EM_MAXQ30", Const, 11}, + {"EM_MCHP_PIC", Const, 11}, + {"EM_MCST_ELBRUS", Const, 11}, + {"EM_ME16", Const, 0}, + {"EM_METAG", Const, 11}, + {"EM_MICROBLAZE", Const, 11}, + {"EM_MIPS", Const, 0}, + {"EM_MIPS_RS3_LE", Const, 0}, + {"EM_MIPS_RS4_BE", Const, 0}, + {"EM_MIPS_X", Const, 0}, + {"EM_MMA", Const, 0}, + {"EM_MMDSP_PLUS", Const, 11}, + {"EM_MMIX", Const, 11}, + {"EM_MN10200", Const, 11}, + {"EM_MN10300", Const, 11}, + {"EM_MOXIE", Const, 11}, + {"EM_MSP430", Const, 11}, + {"EM_NCPU", Const, 0}, + {"EM_NDR1", Const, 0}, + {"EM_NDS32", Const, 11}, + {"EM_NONE", Const, 0}, + {"EM_NORC", Const, 11}, + {"EM_NS32K", Const, 11}, + {"EM_OPEN8", Const, 11}, + {"EM_OPENRISC", Const, 11}, + {"EM_PARISC", Const, 0}, + {"EM_PCP", Const, 0}, + {"EM_PDP10", Const, 11}, + {"EM_PDP11", Const, 11}, + {"EM_PDSP", Const, 11}, + {"EM_PJ", Const, 11}, + {"EM_PPC", Const, 0}, + {"EM_PPC64", Const, 0}, + {"EM_PRISM", Const, 11}, + {"EM_QDSP6", Const, 11}, + {"EM_R32C", Const, 11}, + {"EM_RCE", Const, 0}, + {"EM_RH32", Const, 0}, + {"EM_RISCV", Const, 11}, + {"EM_RL78", Const, 11}, + {"EM_RS08", Const, 11}, + {"EM_RX", Const, 11}, + {"EM_S370", Const, 0}, + {"EM_S390", Const, 0}, + {"EM_SCORE7", Const, 11}, + {"EM_SEP", Const, 11}, + {"EM_SE_C17", Const, 11}, + {"EM_SE_C33", Const, 11}, + {"EM_SH", Const, 0}, + {"EM_SHARC", Const, 11}, + {"EM_SLE9X", Const, 11}, + {"EM_SNP1K", Const, 11}, + {"EM_SPARC", Const, 0}, + {"EM_SPARC32PLUS", Const, 0}, + {"EM_SPARCV9", Const, 0}, + {"EM_ST100", Const, 0}, + {"EM_ST19", Const, 11}, + {"EM_ST200", Const, 11}, + {"EM_ST7", Const, 11}, + {"EM_ST9PLUS", Const, 11}, + {"EM_STARCORE", Const, 0}, + {"EM_STM8", Const, 11}, + {"EM_STXP7X", Const, 11}, + {"EM_SVX", Const, 11}, + {"EM_TILE64", Const, 11}, + {"EM_TILEGX", Const, 11}, + {"EM_TILEPRO", Const, 11}, + {"EM_TINYJ", Const, 0}, + {"EM_TI_ARP32", Const, 11}, + {"EM_TI_C2000", Const, 11}, + {"EM_TI_C5500", Const, 11}, + {"EM_TI_C6000", Const, 11}, + {"EM_TI_PRU", Const, 11}, + {"EM_TMM_GPP", Const, 11}, + {"EM_TPC", Const, 11}, + {"EM_TRICORE", Const, 0}, + {"EM_TRIMEDIA", Const, 11}, + {"EM_TSK3000", Const, 11}, + {"EM_UNICORE", Const, 11}, + {"EM_V800", Const, 0}, + {"EM_V850", Const, 11}, + {"EM_VAX", Const, 11}, + {"EM_VIDEOCORE", Const, 11}, + {"EM_VIDEOCORE3", Const, 11}, + {"EM_VIDEOCORE5", Const, 11}, + {"EM_VISIUM", Const, 11}, + {"EM_VPP500", Const, 0}, + {"EM_X86_64", Const, 0}, + {"EM_XCORE", Const, 11}, + {"EM_XGATE", Const, 11}, + {"EM_XIMO16", Const, 11}, + {"EM_XTENSA", Const, 11}, + {"EM_Z80", Const, 11}, + {"EM_ZSP", Const, 11}, + {"ET_CORE", Const, 0}, + {"ET_DYN", Const, 0}, + {"ET_EXEC", Const, 0}, + {"ET_HIOS", Const, 0}, + {"ET_HIPROC", Const, 0}, + {"ET_LOOS", Const, 0}, + {"ET_LOPROC", Const, 0}, + {"ET_NONE", Const, 0}, + {"ET_REL", Const, 0}, + {"EV_CURRENT", Const, 0}, + {"EV_NONE", Const, 0}, + {"ErrNoSymbols", Var, 4}, + {"File", Type, 0}, + {"File.FileHeader", Field, 0}, + {"File.Progs", Field, 0}, + {"File.Sections", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.ABIVersion", Field, 0}, + {"FileHeader.ByteOrder", Field, 0}, + {"FileHeader.Class", Field, 0}, + {"FileHeader.Data", Field, 0}, + {"FileHeader.Entry", Field, 1}, + {"FileHeader.Machine", Field, 0}, + {"FileHeader.OSABI", Field, 0}, + {"FileHeader.Type", Field, 0}, + {"FileHeader.Version", Field, 0}, + {"FormatError", Type, 0}, + {"Header32", Type, 0}, + {"Header32.Ehsize", Field, 0}, + {"Header32.Entry", Field, 0}, + {"Header32.Flags", Field, 0}, + {"Header32.Ident", Field, 0}, + {"Header32.Machine", Field, 0}, + {"Header32.Phentsize", Field, 0}, + {"Header32.Phnum", Field, 0}, + {"Header32.Phoff", Field, 0}, + {"Header32.Shentsize", Field, 0}, + {"Header32.Shnum", Field, 0}, + {"Header32.Shoff", Field, 0}, + {"Header32.Shstrndx", Field, 0}, + {"Header32.Type", Field, 0}, + {"Header32.Version", Field, 0}, + {"Header64", Type, 0}, + {"Header64.Ehsize", Field, 0}, + {"Header64.Entry", Field, 0}, + {"Header64.Flags", Field, 0}, + {"Header64.Ident", Field, 0}, + {"Header64.Machine", Field, 0}, + {"Header64.Phentsize", Field, 0}, + {"Header64.Phnum", Field, 0}, + {"Header64.Phoff", Field, 0}, + {"Header64.Shentsize", Field, 0}, + {"Header64.Shnum", Field, 0}, + {"Header64.Shoff", Field, 0}, + {"Header64.Shstrndx", Field, 0}, + {"Header64.Type", Field, 0}, + {"Header64.Version", Field, 0}, + {"ImportedSymbol", Type, 0}, + {"ImportedSymbol.Library", Field, 0}, + {"ImportedSymbol.Name", Field, 0}, + {"ImportedSymbol.Version", Field, 0}, + {"Machine", Type, 0}, + {"NT_FPREGSET", Const, 0}, + {"NT_PRPSINFO", Const, 0}, + {"NT_PRSTATUS", Const, 0}, + {"NType", Type, 0}, + {"NewFile", Func, 0}, + {"OSABI", Type, 0}, + {"Open", Func, 0}, + {"PF_MASKOS", Const, 0}, + {"PF_MASKPROC", Const, 0}, + {"PF_R", Const, 0}, + {"PF_W", Const, 0}, + {"PF_X", Const, 0}, + {"PT_AARCH64_ARCHEXT", Const, 16}, + {"PT_AARCH64_UNWIND", Const, 16}, + {"PT_ARM_ARCHEXT", Const, 16}, + {"PT_ARM_EXIDX", Const, 16}, + {"PT_DYNAMIC", Const, 0}, + {"PT_GNU_EH_FRAME", Const, 16}, + {"PT_GNU_MBIND_HI", Const, 16}, + {"PT_GNU_MBIND_LO", Const, 16}, + {"PT_GNU_PROPERTY", Const, 16}, + {"PT_GNU_RELRO", Const, 16}, + {"PT_GNU_STACK", Const, 16}, + {"PT_HIOS", Const, 0}, + {"PT_HIPROC", Const, 0}, + {"PT_INTERP", Const, 0}, + {"PT_LOAD", Const, 0}, + {"PT_LOOS", Const, 0}, + {"PT_LOPROC", Const, 0}, + {"PT_MIPS_ABIFLAGS", Const, 16}, + {"PT_MIPS_OPTIONS", Const, 16}, + {"PT_MIPS_REGINFO", Const, 16}, + {"PT_MIPS_RTPROC", Const, 16}, + {"PT_NOTE", Const, 0}, + {"PT_NULL", Const, 0}, + {"PT_OPENBSD_BOOTDATA", Const, 16}, + {"PT_OPENBSD_RANDOMIZE", Const, 16}, + {"PT_OPENBSD_WXNEEDED", Const, 16}, + {"PT_PAX_FLAGS", Const, 16}, + {"PT_PHDR", Const, 0}, + {"PT_S390_PGSTE", Const, 16}, + {"PT_SHLIB", Const, 0}, + {"PT_SUNWSTACK", Const, 16}, + {"PT_SUNW_EH_FRAME", Const, 16}, + {"PT_TLS", Const, 0}, + {"Prog", Type, 0}, + {"Prog.ProgHeader", Field, 0}, + {"Prog.ReaderAt", Field, 0}, + {"Prog32", Type, 0}, + {"Prog32.Align", Field, 0}, + {"Prog32.Filesz", Field, 0}, + {"Prog32.Flags", Field, 0}, + {"Prog32.Memsz", Field, 0}, + {"Prog32.Off", Field, 0}, + {"Prog32.Paddr", Field, 0}, + {"Prog32.Type", Field, 0}, + {"Prog32.Vaddr", Field, 0}, + {"Prog64", Type, 0}, + {"Prog64.Align", Field, 0}, + {"Prog64.Filesz", Field, 0}, + {"Prog64.Flags", Field, 0}, + {"Prog64.Memsz", Field, 0}, + {"Prog64.Off", Field, 0}, + {"Prog64.Paddr", Field, 0}, + {"Prog64.Type", Field, 0}, + {"Prog64.Vaddr", Field, 0}, + {"ProgFlag", Type, 0}, + {"ProgHeader", Type, 0}, + {"ProgHeader.Align", Field, 0}, + {"ProgHeader.Filesz", Field, 0}, + {"ProgHeader.Flags", Field, 0}, + {"ProgHeader.Memsz", Field, 0}, + {"ProgHeader.Off", Field, 0}, + {"ProgHeader.Paddr", Field, 0}, + {"ProgHeader.Type", Field, 0}, + {"ProgHeader.Vaddr", Field, 0}, + {"ProgType", Type, 0}, + {"R_386", Type, 0}, + {"R_386_16", Const, 10}, + {"R_386_32", Const, 0}, + {"R_386_32PLT", Const, 10}, + {"R_386_8", Const, 10}, + {"R_386_COPY", Const, 0}, + {"R_386_GLOB_DAT", Const, 0}, + {"R_386_GOT32", Const, 0}, + {"R_386_GOT32X", Const, 10}, + {"R_386_GOTOFF", Const, 0}, + {"R_386_GOTPC", Const, 0}, + {"R_386_IRELATIVE", Const, 10}, + {"R_386_JMP_SLOT", Const, 0}, + {"R_386_NONE", Const, 0}, + {"R_386_PC16", Const, 10}, + {"R_386_PC32", Const, 0}, + {"R_386_PC8", Const, 10}, + {"R_386_PLT32", Const, 0}, + {"R_386_RELATIVE", Const, 0}, + {"R_386_SIZE32", Const, 10}, + {"R_386_TLS_DESC", Const, 10}, + {"R_386_TLS_DESC_CALL", Const, 10}, + {"R_386_TLS_DTPMOD32", Const, 0}, + {"R_386_TLS_DTPOFF32", Const, 0}, + {"R_386_TLS_GD", Const, 0}, + {"R_386_TLS_GD_32", Const, 0}, + {"R_386_TLS_GD_CALL", Const, 0}, + {"R_386_TLS_GD_POP", Const, 0}, + {"R_386_TLS_GD_PUSH", Const, 0}, + {"R_386_TLS_GOTDESC", Const, 10}, + {"R_386_TLS_GOTIE", Const, 0}, + {"R_386_TLS_IE", Const, 0}, + {"R_386_TLS_IE_32", Const, 0}, + {"R_386_TLS_LDM", Const, 0}, + {"R_386_TLS_LDM_32", Const, 0}, + {"R_386_TLS_LDM_CALL", Const, 0}, + {"R_386_TLS_LDM_POP", Const, 0}, + {"R_386_TLS_LDM_PUSH", Const, 0}, + {"R_386_TLS_LDO_32", Const, 0}, + {"R_386_TLS_LE", Const, 0}, + {"R_386_TLS_LE_32", Const, 0}, + {"R_386_TLS_TPOFF", Const, 0}, + {"R_386_TLS_TPOFF32", Const, 0}, + {"R_390", Type, 7}, + {"R_390_12", Const, 7}, + {"R_390_16", Const, 7}, + {"R_390_20", Const, 7}, + {"R_390_32", Const, 7}, + {"R_390_64", Const, 7}, + {"R_390_8", Const, 7}, + {"R_390_COPY", Const, 7}, + {"R_390_GLOB_DAT", Const, 7}, + {"R_390_GOT12", Const, 7}, + {"R_390_GOT16", Const, 7}, + {"R_390_GOT20", Const, 7}, + {"R_390_GOT32", Const, 7}, + {"R_390_GOT64", Const, 7}, + {"R_390_GOTENT", Const, 7}, + {"R_390_GOTOFF", Const, 7}, + {"R_390_GOTOFF16", Const, 7}, + {"R_390_GOTOFF64", Const, 7}, + {"R_390_GOTPC", Const, 7}, + {"R_390_GOTPCDBL", Const, 7}, + {"R_390_GOTPLT12", Const, 7}, + {"R_390_GOTPLT16", Const, 7}, + {"R_390_GOTPLT20", Const, 7}, + {"R_390_GOTPLT32", Const, 7}, + {"R_390_GOTPLT64", Const, 7}, + {"R_390_GOTPLTENT", Const, 7}, + {"R_390_GOTPLTOFF16", Const, 7}, + {"R_390_GOTPLTOFF32", Const, 7}, + {"R_390_GOTPLTOFF64", Const, 7}, + {"R_390_JMP_SLOT", Const, 7}, + {"R_390_NONE", Const, 7}, + {"R_390_PC16", Const, 7}, + {"R_390_PC16DBL", Const, 7}, + {"R_390_PC32", Const, 7}, + {"R_390_PC32DBL", Const, 7}, + {"R_390_PC64", Const, 7}, + {"R_390_PLT16DBL", Const, 7}, + {"R_390_PLT32", Const, 7}, + {"R_390_PLT32DBL", Const, 7}, + {"R_390_PLT64", Const, 7}, + {"R_390_RELATIVE", Const, 7}, + {"R_390_TLS_DTPMOD", Const, 7}, + {"R_390_TLS_DTPOFF", Const, 7}, + {"R_390_TLS_GD32", Const, 7}, + {"R_390_TLS_GD64", Const, 7}, + {"R_390_TLS_GDCALL", Const, 7}, + {"R_390_TLS_GOTIE12", Const, 7}, + {"R_390_TLS_GOTIE20", Const, 7}, + {"R_390_TLS_GOTIE32", Const, 7}, + {"R_390_TLS_GOTIE64", Const, 7}, + {"R_390_TLS_IE32", Const, 7}, + {"R_390_TLS_IE64", Const, 7}, + {"R_390_TLS_IEENT", Const, 7}, + {"R_390_TLS_LDCALL", Const, 7}, + {"R_390_TLS_LDM32", Const, 7}, + {"R_390_TLS_LDM64", Const, 7}, + {"R_390_TLS_LDO32", Const, 7}, + {"R_390_TLS_LDO64", Const, 7}, + {"R_390_TLS_LE32", Const, 7}, + {"R_390_TLS_LE64", Const, 7}, + {"R_390_TLS_LOAD", Const, 7}, + {"R_390_TLS_TPOFF", Const, 7}, + {"R_AARCH64", Type, 4}, + {"R_AARCH64_ABS16", Const, 4}, + {"R_AARCH64_ABS32", Const, 4}, + {"R_AARCH64_ABS64", Const, 4}, + {"R_AARCH64_ADD_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_ADR_GOT_PAGE", Const, 4}, + {"R_AARCH64_ADR_PREL_LO21", Const, 4}, + {"R_AARCH64_ADR_PREL_PG_HI21", Const, 4}, + {"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4}, + {"R_AARCH64_CALL26", Const, 4}, + {"R_AARCH64_CONDBR19", Const, 4}, + {"R_AARCH64_COPY", Const, 4}, + {"R_AARCH64_GLOB_DAT", Const, 4}, + {"R_AARCH64_GOT_LD_PREL19", Const, 4}, + {"R_AARCH64_IRELATIVE", Const, 4}, + {"R_AARCH64_JUMP26", Const, 4}, + {"R_AARCH64_JUMP_SLOT", Const, 4}, + {"R_AARCH64_LD64_GOTOFF_LO15", Const, 10}, + {"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10}, + {"R_AARCH64_LD64_GOT_LO12_NC", Const, 4}, + {"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LD_PREL_LO19", Const, 4}, + {"R_AARCH64_MOVW_SABS_G0", Const, 4}, + {"R_AARCH64_MOVW_SABS_G1", Const, 4}, + {"R_AARCH64_MOVW_SABS_G2", Const, 4}, + {"R_AARCH64_MOVW_UABS_G0", Const, 4}, + {"R_AARCH64_MOVW_UABS_G0_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G1", Const, 4}, + {"R_AARCH64_MOVW_UABS_G1_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G2", Const, 4}, + {"R_AARCH64_MOVW_UABS_G2_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G3", Const, 4}, + {"R_AARCH64_NONE", Const, 4}, + {"R_AARCH64_NULL", Const, 4}, + {"R_AARCH64_P32_ABS16", Const, 4}, + {"R_AARCH64_P32_ABS32", Const, 4}, + {"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4}, + {"R_AARCH64_P32_ADR_PREL_LO21", Const, 4}, + {"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4}, + {"R_AARCH64_P32_CALL26", Const, 4}, + {"R_AARCH64_P32_CONDBR19", Const, 4}, + {"R_AARCH64_P32_COPY", Const, 4}, + {"R_AARCH64_P32_GLOB_DAT", Const, 4}, + {"R_AARCH64_P32_GOT_LD_PREL19", Const, 4}, + {"R_AARCH64_P32_IRELATIVE", Const, 4}, + {"R_AARCH64_P32_JUMP26", Const, 4}, + {"R_AARCH64_P32_JUMP_SLOT", Const, 4}, + {"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LD_PREL_LO19", Const, 4}, + {"R_AARCH64_P32_MOVW_SABS_G0", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G0", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G1", Const, 4}, + {"R_AARCH64_P32_PREL16", Const, 4}, + {"R_AARCH64_P32_PREL32", Const, 4}, + {"R_AARCH64_P32_RELATIVE", Const, 4}, + {"R_AARCH64_P32_TLSDESC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4}, + {"R_AARCH64_P32_TLSDESC_CALL", Const, 4}, + {"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4}, + {"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4}, + {"R_AARCH64_P32_TLS_DTPMOD", Const, 4}, + {"R_AARCH64_P32_TLS_DTPREL", Const, 4}, + {"R_AARCH64_P32_TLS_TPREL", Const, 4}, + {"R_AARCH64_P32_TSTBR14", Const, 4}, + {"R_AARCH64_PREL16", Const, 4}, + {"R_AARCH64_PREL32", Const, 4}, + {"R_AARCH64_PREL64", Const, 4}, + {"R_AARCH64_RELATIVE", Const, 4}, + {"R_AARCH64_TLSDESC", Const, 4}, + {"R_AARCH64_TLSDESC_ADD", Const, 4}, + {"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4}, + {"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4}, + {"R_AARCH64_TLSDESC_CALL", Const, 4}, + {"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4}, + {"R_AARCH64_TLSDESC_LDR", Const, 4}, + {"R_AARCH64_TLSDESC_LD_PREL19", Const, 4}, + {"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4}, + {"R_AARCH64_TLSDESC_OFF_G1", Const, 4}, + {"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4}, + {"R_AARCH64_TLSGD_ADR_PREL21", Const, 10}, + {"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10}, + {"R_AARCH64_TLSGD_MOVW_G1", Const, 10}, + {"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4}, + {"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4}, + {"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4}, + {"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10}, + {"R_AARCH64_TLSLD_ADR_PREL21", Const, 10}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10}, + {"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4}, + {"R_AARCH64_TLS_DTPMOD64", Const, 4}, + {"R_AARCH64_TLS_DTPREL64", Const, 4}, + {"R_AARCH64_TLS_TPREL64", Const, 4}, + {"R_AARCH64_TSTBR14", Const, 4}, + {"R_ALPHA", Type, 0}, + {"R_ALPHA_BRADDR", Const, 0}, + {"R_ALPHA_COPY", Const, 0}, + {"R_ALPHA_GLOB_DAT", Const, 0}, + {"R_ALPHA_GPDISP", Const, 0}, + {"R_ALPHA_GPREL32", Const, 0}, + {"R_ALPHA_GPRELHIGH", Const, 0}, + {"R_ALPHA_GPRELLOW", Const, 0}, + {"R_ALPHA_GPVALUE", Const, 0}, + {"R_ALPHA_HINT", Const, 0}, + {"R_ALPHA_IMMED_BR_HI32", Const, 0}, + {"R_ALPHA_IMMED_GP_16", Const, 0}, + {"R_ALPHA_IMMED_GP_HI32", Const, 0}, + {"R_ALPHA_IMMED_LO32", Const, 0}, + {"R_ALPHA_IMMED_SCN_HI32", Const, 0}, + {"R_ALPHA_JMP_SLOT", Const, 0}, + {"R_ALPHA_LITERAL", Const, 0}, + {"R_ALPHA_LITUSE", Const, 0}, + {"R_ALPHA_NONE", Const, 0}, + {"R_ALPHA_OP_PRSHIFT", Const, 0}, + {"R_ALPHA_OP_PSUB", Const, 0}, + {"R_ALPHA_OP_PUSH", Const, 0}, + {"R_ALPHA_OP_STORE", Const, 0}, + {"R_ALPHA_REFLONG", Const, 0}, + {"R_ALPHA_REFQUAD", Const, 0}, + {"R_ALPHA_RELATIVE", Const, 0}, + {"R_ALPHA_SREL16", Const, 0}, + {"R_ALPHA_SREL32", Const, 0}, + {"R_ALPHA_SREL64", Const, 0}, + {"R_ARM", Type, 0}, + {"R_ARM_ABS12", Const, 0}, + {"R_ARM_ABS16", Const, 0}, + {"R_ARM_ABS32", Const, 0}, + {"R_ARM_ABS32_NOI", Const, 10}, + {"R_ARM_ABS8", Const, 0}, + {"R_ARM_ALU_PCREL_15_8", Const, 10}, + {"R_ARM_ALU_PCREL_23_15", Const, 10}, + {"R_ARM_ALU_PCREL_7_0", Const, 10}, + {"R_ARM_ALU_PC_G0", Const, 10}, + {"R_ARM_ALU_PC_G0_NC", Const, 10}, + {"R_ARM_ALU_PC_G1", Const, 10}, + {"R_ARM_ALU_PC_G1_NC", Const, 10}, + {"R_ARM_ALU_PC_G2", Const, 10}, + {"R_ARM_ALU_SBREL_19_12_NC", Const, 10}, + {"R_ARM_ALU_SBREL_27_20_CK", Const, 10}, + {"R_ARM_ALU_SB_G0", Const, 10}, + {"R_ARM_ALU_SB_G0_NC", Const, 10}, + {"R_ARM_ALU_SB_G1", Const, 10}, + {"R_ARM_ALU_SB_G1_NC", Const, 10}, + {"R_ARM_ALU_SB_G2", Const, 10}, + {"R_ARM_AMP_VCALL9", Const, 0}, + {"R_ARM_BASE_ABS", Const, 10}, + {"R_ARM_CALL", Const, 10}, + {"R_ARM_COPY", Const, 0}, + {"R_ARM_GLOB_DAT", Const, 0}, + {"R_ARM_GNU_VTENTRY", Const, 0}, + {"R_ARM_GNU_VTINHERIT", Const, 0}, + {"R_ARM_GOT32", Const, 0}, + {"R_ARM_GOTOFF", Const, 0}, + {"R_ARM_GOTOFF12", Const, 10}, + {"R_ARM_GOTPC", Const, 0}, + {"R_ARM_GOTRELAX", Const, 10}, + {"R_ARM_GOT_ABS", Const, 10}, + {"R_ARM_GOT_BREL12", Const, 10}, + {"R_ARM_GOT_PREL", Const, 10}, + {"R_ARM_IRELATIVE", Const, 10}, + {"R_ARM_JUMP24", Const, 10}, + {"R_ARM_JUMP_SLOT", Const, 0}, + {"R_ARM_LDC_PC_G0", Const, 10}, + {"R_ARM_LDC_PC_G1", Const, 10}, + {"R_ARM_LDC_PC_G2", Const, 10}, + {"R_ARM_LDC_SB_G0", Const, 10}, + {"R_ARM_LDC_SB_G1", Const, 10}, + {"R_ARM_LDC_SB_G2", Const, 10}, + {"R_ARM_LDRS_PC_G0", Const, 10}, + {"R_ARM_LDRS_PC_G1", Const, 10}, + {"R_ARM_LDRS_PC_G2", Const, 10}, + {"R_ARM_LDRS_SB_G0", Const, 10}, + {"R_ARM_LDRS_SB_G1", Const, 10}, + {"R_ARM_LDRS_SB_G2", Const, 10}, + {"R_ARM_LDR_PC_G1", Const, 10}, + {"R_ARM_LDR_PC_G2", Const, 10}, + {"R_ARM_LDR_SBREL_11_10_NC", Const, 10}, + {"R_ARM_LDR_SB_G0", Const, 10}, + {"R_ARM_LDR_SB_G1", Const, 10}, + {"R_ARM_LDR_SB_G2", Const, 10}, + {"R_ARM_ME_TOO", Const, 10}, + {"R_ARM_MOVT_ABS", Const, 10}, + {"R_ARM_MOVT_BREL", Const, 10}, + {"R_ARM_MOVT_PREL", Const, 10}, + {"R_ARM_MOVW_ABS_NC", Const, 10}, + {"R_ARM_MOVW_BREL", Const, 10}, + {"R_ARM_MOVW_BREL_NC", Const, 10}, + {"R_ARM_MOVW_PREL_NC", Const, 10}, + {"R_ARM_NONE", Const, 0}, + {"R_ARM_PC13", Const, 0}, + {"R_ARM_PC24", Const, 0}, + {"R_ARM_PLT32", Const, 0}, + {"R_ARM_PLT32_ABS", Const, 10}, + {"R_ARM_PREL31", Const, 10}, + {"R_ARM_PRIVATE_0", Const, 10}, + {"R_ARM_PRIVATE_1", Const, 10}, + {"R_ARM_PRIVATE_10", Const, 10}, + {"R_ARM_PRIVATE_11", Const, 10}, + {"R_ARM_PRIVATE_12", Const, 10}, + {"R_ARM_PRIVATE_13", Const, 10}, + {"R_ARM_PRIVATE_14", Const, 10}, + {"R_ARM_PRIVATE_15", Const, 10}, + {"R_ARM_PRIVATE_2", Const, 10}, + {"R_ARM_PRIVATE_3", Const, 10}, + {"R_ARM_PRIVATE_4", Const, 10}, + {"R_ARM_PRIVATE_5", Const, 10}, + {"R_ARM_PRIVATE_6", Const, 10}, + {"R_ARM_PRIVATE_7", Const, 10}, + {"R_ARM_PRIVATE_8", Const, 10}, + {"R_ARM_PRIVATE_9", Const, 10}, + {"R_ARM_RABS32", Const, 0}, + {"R_ARM_RBASE", Const, 0}, + {"R_ARM_REL32", Const, 0}, + {"R_ARM_REL32_NOI", Const, 10}, + {"R_ARM_RELATIVE", Const, 0}, + {"R_ARM_RPC24", Const, 0}, + {"R_ARM_RREL32", Const, 0}, + {"R_ARM_RSBREL32", Const, 0}, + {"R_ARM_RXPC25", Const, 10}, + {"R_ARM_SBREL31", Const, 10}, + {"R_ARM_SBREL32", Const, 0}, + {"R_ARM_SWI24", Const, 0}, + {"R_ARM_TARGET1", Const, 10}, + {"R_ARM_TARGET2", Const, 10}, + {"R_ARM_THM_ABS5", Const, 0}, + {"R_ARM_THM_ALU_ABS_G0_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G1_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G2_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G3", Const, 10}, + {"R_ARM_THM_ALU_PREL_11_0", Const, 10}, + {"R_ARM_THM_GOT_BREL12", Const, 10}, + {"R_ARM_THM_JUMP11", Const, 10}, + {"R_ARM_THM_JUMP19", Const, 10}, + {"R_ARM_THM_JUMP24", Const, 10}, + {"R_ARM_THM_JUMP6", Const, 10}, + {"R_ARM_THM_JUMP8", Const, 10}, + {"R_ARM_THM_MOVT_ABS", Const, 10}, + {"R_ARM_THM_MOVT_BREL", Const, 10}, + {"R_ARM_THM_MOVT_PREL", Const, 10}, + {"R_ARM_THM_MOVW_ABS_NC", Const, 10}, + {"R_ARM_THM_MOVW_BREL", Const, 10}, + {"R_ARM_THM_MOVW_BREL_NC", Const, 10}, + {"R_ARM_THM_MOVW_PREL_NC", Const, 10}, + {"R_ARM_THM_PC12", Const, 10}, + {"R_ARM_THM_PC22", Const, 0}, + {"R_ARM_THM_PC8", Const, 0}, + {"R_ARM_THM_RPC22", Const, 0}, + {"R_ARM_THM_SWI8", Const, 0}, + {"R_ARM_THM_TLS_CALL", Const, 10}, + {"R_ARM_THM_TLS_DESCSEQ16", Const, 10}, + {"R_ARM_THM_TLS_DESCSEQ32", Const, 10}, + {"R_ARM_THM_XPC22", Const, 0}, + {"R_ARM_TLS_CALL", Const, 10}, + {"R_ARM_TLS_DESCSEQ", Const, 10}, + {"R_ARM_TLS_DTPMOD32", Const, 10}, + {"R_ARM_TLS_DTPOFF32", Const, 10}, + {"R_ARM_TLS_GD32", Const, 10}, + {"R_ARM_TLS_GOTDESC", Const, 10}, + {"R_ARM_TLS_IE12GP", Const, 10}, + {"R_ARM_TLS_IE32", Const, 10}, + {"R_ARM_TLS_LDM32", Const, 10}, + {"R_ARM_TLS_LDO12", Const, 10}, + {"R_ARM_TLS_LDO32", Const, 10}, + {"R_ARM_TLS_LE12", Const, 10}, + {"R_ARM_TLS_LE32", Const, 10}, + {"R_ARM_TLS_TPOFF32", Const, 10}, + {"R_ARM_V4BX", Const, 10}, + {"R_ARM_XPC25", Const, 0}, + {"R_INFO", Func, 0}, + {"R_INFO32", Func, 0}, + {"R_LARCH", Type, 19}, + {"R_LARCH_32", Const, 19}, + {"R_LARCH_32_PCREL", Const, 20}, + {"R_LARCH_64", Const, 19}, + {"R_LARCH_64_PCREL", Const, 22}, + {"R_LARCH_ABS64_HI12", Const, 20}, + {"R_LARCH_ABS64_LO20", Const, 20}, + {"R_LARCH_ABS_HI20", Const, 20}, + {"R_LARCH_ABS_LO12", Const, 20}, + {"R_LARCH_ADD16", Const, 19}, + {"R_LARCH_ADD24", Const, 19}, + {"R_LARCH_ADD32", Const, 19}, + {"R_LARCH_ADD6", Const, 22}, + {"R_LARCH_ADD64", Const, 19}, + {"R_LARCH_ADD8", Const, 19}, + {"R_LARCH_ADD_ULEB128", Const, 22}, + {"R_LARCH_ALIGN", Const, 22}, + {"R_LARCH_B16", Const, 20}, + {"R_LARCH_B21", Const, 20}, + {"R_LARCH_B26", Const, 20}, + {"R_LARCH_CFA", Const, 22}, + {"R_LARCH_COPY", Const, 19}, + {"R_LARCH_DELETE", Const, 22}, + {"R_LARCH_GNU_VTENTRY", Const, 20}, + {"R_LARCH_GNU_VTINHERIT", Const, 20}, + {"R_LARCH_GOT64_HI12", Const, 20}, + {"R_LARCH_GOT64_LO20", Const, 20}, + {"R_LARCH_GOT64_PC_HI12", Const, 20}, + {"R_LARCH_GOT64_PC_LO20", Const, 20}, + {"R_LARCH_GOT_HI20", Const, 20}, + {"R_LARCH_GOT_LO12", Const, 20}, + {"R_LARCH_GOT_PC_HI20", Const, 20}, + {"R_LARCH_GOT_PC_LO12", Const, 20}, + {"R_LARCH_IRELATIVE", Const, 19}, + {"R_LARCH_JUMP_SLOT", Const, 19}, + {"R_LARCH_MARK_LA", Const, 19}, + {"R_LARCH_MARK_PCREL", Const, 19}, + {"R_LARCH_NONE", Const, 19}, + {"R_LARCH_PCALA64_HI12", Const, 20}, + {"R_LARCH_PCALA64_LO20", Const, 20}, + {"R_LARCH_PCALA_HI20", Const, 20}, + {"R_LARCH_PCALA_LO12", Const, 20}, + {"R_LARCH_PCREL20_S2", Const, 22}, + {"R_LARCH_RELATIVE", Const, 19}, + {"R_LARCH_RELAX", Const, 20}, + {"R_LARCH_SOP_ADD", Const, 19}, + {"R_LARCH_SOP_AND", Const, 19}, + {"R_LARCH_SOP_ASSERT", Const, 19}, + {"R_LARCH_SOP_IF_ELSE", Const, 19}, + {"R_LARCH_SOP_NOT", Const, 19}, + {"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_12", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_16", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_5", Const, 19}, + {"R_LARCH_SOP_POP_32_S_5_20", Const, 19}, + {"R_LARCH_SOP_POP_32_U", Const, 19}, + {"R_LARCH_SOP_POP_32_U_10_12", Const, 19}, + {"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19}, + {"R_LARCH_SOP_PUSH_DUP", Const, 19}, + {"R_LARCH_SOP_PUSH_GPREL", Const, 19}, + {"R_LARCH_SOP_PUSH_PCREL", Const, 19}, + {"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_GD", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19}, + {"R_LARCH_SOP_SL", Const, 19}, + {"R_LARCH_SOP_SR", Const, 19}, + {"R_LARCH_SOP_SUB", Const, 19}, + {"R_LARCH_SUB16", Const, 19}, + {"R_LARCH_SUB24", Const, 19}, + {"R_LARCH_SUB32", Const, 19}, + {"R_LARCH_SUB6", Const, 22}, + {"R_LARCH_SUB64", Const, 19}, + {"R_LARCH_SUB8", Const, 19}, + {"R_LARCH_SUB_ULEB128", Const, 22}, + {"R_LARCH_TLS_DTPMOD32", Const, 19}, + {"R_LARCH_TLS_DTPMOD64", Const, 19}, + {"R_LARCH_TLS_DTPREL32", Const, 19}, + {"R_LARCH_TLS_DTPREL64", Const, 19}, + {"R_LARCH_TLS_GD_HI20", Const, 20}, + {"R_LARCH_TLS_GD_PC_HI20", Const, 20}, + {"R_LARCH_TLS_IE64_HI12", Const, 20}, + {"R_LARCH_TLS_IE64_LO20", Const, 20}, + {"R_LARCH_TLS_IE64_PC_HI12", Const, 20}, + {"R_LARCH_TLS_IE64_PC_LO20", Const, 20}, + {"R_LARCH_TLS_IE_HI20", Const, 20}, + {"R_LARCH_TLS_IE_LO12", Const, 20}, + {"R_LARCH_TLS_IE_PC_HI20", Const, 20}, + {"R_LARCH_TLS_IE_PC_LO12", Const, 20}, + {"R_LARCH_TLS_LD_HI20", Const, 20}, + {"R_LARCH_TLS_LD_PC_HI20", Const, 20}, + {"R_LARCH_TLS_LE64_HI12", Const, 20}, + {"R_LARCH_TLS_LE64_LO20", Const, 20}, + {"R_LARCH_TLS_LE_HI20", Const, 20}, + {"R_LARCH_TLS_LE_LO12", Const, 20}, + {"R_LARCH_TLS_TPREL32", Const, 19}, + {"R_LARCH_TLS_TPREL64", Const, 19}, + {"R_MIPS", Type, 6}, + {"R_MIPS_16", Const, 6}, + {"R_MIPS_26", Const, 6}, + {"R_MIPS_32", Const, 6}, + {"R_MIPS_64", Const, 6}, + {"R_MIPS_ADD_IMMEDIATE", Const, 6}, + {"R_MIPS_CALL16", Const, 6}, + {"R_MIPS_CALL_HI16", Const, 6}, + {"R_MIPS_CALL_LO16", Const, 6}, + {"R_MIPS_DELETE", Const, 6}, + {"R_MIPS_GOT16", Const, 6}, + {"R_MIPS_GOT_DISP", Const, 6}, + {"R_MIPS_GOT_HI16", Const, 6}, + {"R_MIPS_GOT_LO16", Const, 6}, + {"R_MIPS_GOT_OFST", Const, 6}, + {"R_MIPS_GOT_PAGE", Const, 6}, + {"R_MIPS_GPREL16", Const, 6}, + {"R_MIPS_GPREL32", Const, 6}, + {"R_MIPS_HI16", Const, 6}, + {"R_MIPS_HIGHER", Const, 6}, + {"R_MIPS_HIGHEST", Const, 6}, + {"R_MIPS_INSERT_A", Const, 6}, + {"R_MIPS_INSERT_B", Const, 6}, + {"R_MIPS_JALR", Const, 6}, + {"R_MIPS_LITERAL", Const, 6}, + {"R_MIPS_LO16", Const, 6}, + {"R_MIPS_NONE", Const, 6}, + {"R_MIPS_PC16", Const, 6}, + {"R_MIPS_PC32", Const, 22}, + {"R_MIPS_PJUMP", Const, 6}, + {"R_MIPS_REL16", Const, 6}, + {"R_MIPS_REL32", Const, 6}, + {"R_MIPS_RELGOT", Const, 6}, + {"R_MIPS_SCN_DISP", Const, 6}, + {"R_MIPS_SHIFT5", Const, 6}, + {"R_MIPS_SHIFT6", Const, 6}, + {"R_MIPS_SUB", Const, 6}, + {"R_MIPS_TLS_DTPMOD32", Const, 6}, + {"R_MIPS_TLS_DTPMOD64", Const, 6}, + {"R_MIPS_TLS_DTPREL32", Const, 6}, + {"R_MIPS_TLS_DTPREL64", Const, 6}, + {"R_MIPS_TLS_DTPREL_HI16", Const, 6}, + {"R_MIPS_TLS_DTPREL_LO16", Const, 6}, + {"R_MIPS_TLS_GD", Const, 6}, + {"R_MIPS_TLS_GOTTPREL", Const, 6}, + {"R_MIPS_TLS_LDM", Const, 6}, + {"R_MIPS_TLS_TPREL32", Const, 6}, + {"R_MIPS_TLS_TPREL64", Const, 6}, + {"R_MIPS_TLS_TPREL_HI16", Const, 6}, + {"R_MIPS_TLS_TPREL_LO16", Const, 6}, + {"R_PPC", Type, 0}, + {"R_PPC64", Type, 5}, + {"R_PPC64_ADDR14", Const, 5}, + {"R_PPC64_ADDR14_BRNTAKEN", Const, 5}, + {"R_PPC64_ADDR14_BRTAKEN", Const, 5}, + {"R_PPC64_ADDR16", Const, 5}, + {"R_PPC64_ADDR16_DS", Const, 5}, + {"R_PPC64_ADDR16_HA", Const, 5}, + {"R_PPC64_ADDR16_HI", Const, 5}, + {"R_PPC64_ADDR16_HIGH", Const, 10}, + {"R_PPC64_ADDR16_HIGHA", Const, 10}, + {"R_PPC64_ADDR16_HIGHER", Const, 5}, + {"R_PPC64_ADDR16_HIGHER34", Const, 20}, + {"R_PPC64_ADDR16_HIGHERA", Const, 5}, + {"R_PPC64_ADDR16_HIGHERA34", Const, 20}, + {"R_PPC64_ADDR16_HIGHEST", Const, 5}, + {"R_PPC64_ADDR16_HIGHEST34", Const, 20}, + {"R_PPC64_ADDR16_HIGHESTA", Const, 5}, + {"R_PPC64_ADDR16_HIGHESTA34", Const, 20}, + {"R_PPC64_ADDR16_LO", Const, 5}, + {"R_PPC64_ADDR16_LO_DS", Const, 5}, + {"R_PPC64_ADDR24", Const, 5}, + {"R_PPC64_ADDR32", Const, 5}, + {"R_PPC64_ADDR64", Const, 5}, + {"R_PPC64_ADDR64_LOCAL", Const, 10}, + {"R_PPC64_COPY", Const, 20}, + {"R_PPC64_D28", Const, 20}, + {"R_PPC64_D34", Const, 20}, + {"R_PPC64_D34_HA30", Const, 20}, + {"R_PPC64_D34_HI30", Const, 20}, + {"R_PPC64_D34_LO", Const, 20}, + {"R_PPC64_DTPMOD64", Const, 5}, + {"R_PPC64_DTPREL16", Const, 5}, + {"R_PPC64_DTPREL16_DS", Const, 5}, + {"R_PPC64_DTPREL16_HA", Const, 5}, + {"R_PPC64_DTPREL16_HI", Const, 5}, + {"R_PPC64_DTPREL16_HIGH", Const, 10}, + {"R_PPC64_DTPREL16_HIGHA", Const, 10}, + {"R_PPC64_DTPREL16_HIGHER", Const, 5}, + {"R_PPC64_DTPREL16_HIGHERA", Const, 5}, + {"R_PPC64_DTPREL16_HIGHEST", Const, 5}, + {"R_PPC64_DTPREL16_HIGHESTA", Const, 5}, + {"R_PPC64_DTPREL16_LO", Const, 5}, + {"R_PPC64_DTPREL16_LO_DS", Const, 5}, + {"R_PPC64_DTPREL34", Const, 20}, + {"R_PPC64_DTPREL64", Const, 5}, + {"R_PPC64_ENTRY", Const, 10}, + {"R_PPC64_GLOB_DAT", Const, 20}, + {"R_PPC64_GNU_VTENTRY", Const, 20}, + {"R_PPC64_GNU_VTINHERIT", Const, 20}, + {"R_PPC64_GOT16", Const, 5}, + {"R_PPC64_GOT16_DS", Const, 5}, + {"R_PPC64_GOT16_HA", Const, 5}, + {"R_PPC64_GOT16_HI", Const, 5}, + {"R_PPC64_GOT16_LO", Const, 5}, + {"R_PPC64_GOT16_LO_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL16_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL16_HA", Const, 5}, + {"R_PPC64_GOT_DTPREL16_HI", Const, 5}, + {"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL_PCREL34", Const, 20}, + {"R_PPC64_GOT_PCREL34", Const, 20}, + {"R_PPC64_GOT_TLSGD16", Const, 5}, + {"R_PPC64_GOT_TLSGD16_HA", Const, 5}, + {"R_PPC64_GOT_TLSGD16_HI", Const, 5}, + {"R_PPC64_GOT_TLSGD16_LO", Const, 5}, + {"R_PPC64_GOT_TLSGD_PCREL34", Const, 20}, + {"R_PPC64_GOT_TLSLD16", Const, 5}, + {"R_PPC64_GOT_TLSLD16_HA", Const, 5}, + {"R_PPC64_GOT_TLSLD16_HI", Const, 5}, + {"R_PPC64_GOT_TLSLD16_LO", Const, 5}, + {"R_PPC64_GOT_TLSLD_PCREL34", Const, 20}, + {"R_PPC64_GOT_TPREL16_DS", Const, 5}, + {"R_PPC64_GOT_TPREL16_HA", Const, 5}, + {"R_PPC64_GOT_TPREL16_HI", Const, 5}, + {"R_PPC64_GOT_TPREL16_LO_DS", Const, 5}, + {"R_PPC64_GOT_TPREL_PCREL34", Const, 20}, + {"R_PPC64_IRELATIVE", Const, 10}, + {"R_PPC64_JMP_IREL", Const, 10}, + {"R_PPC64_JMP_SLOT", Const, 5}, + {"R_PPC64_NONE", Const, 5}, + {"R_PPC64_PCREL28", Const, 20}, + {"R_PPC64_PCREL34", Const, 20}, + {"R_PPC64_PCREL_OPT", Const, 20}, + {"R_PPC64_PLT16_HA", Const, 20}, + {"R_PPC64_PLT16_HI", Const, 20}, + {"R_PPC64_PLT16_LO", Const, 20}, + {"R_PPC64_PLT16_LO_DS", Const, 10}, + {"R_PPC64_PLT32", Const, 20}, + {"R_PPC64_PLT64", Const, 20}, + {"R_PPC64_PLTCALL", Const, 20}, + {"R_PPC64_PLTCALL_NOTOC", Const, 20}, + {"R_PPC64_PLTGOT16", Const, 10}, + {"R_PPC64_PLTGOT16_DS", Const, 10}, + {"R_PPC64_PLTGOT16_HA", Const, 10}, + {"R_PPC64_PLTGOT16_HI", Const, 10}, + {"R_PPC64_PLTGOT16_LO", Const, 10}, + {"R_PPC64_PLTGOT_LO_DS", Const, 10}, + {"R_PPC64_PLTREL32", Const, 20}, + {"R_PPC64_PLTREL64", Const, 20}, + {"R_PPC64_PLTSEQ", Const, 20}, + {"R_PPC64_PLTSEQ_NOTOC", Const, 20}, + {"R_PPC64_PLT_PCREL34", Const, 20}, + {"R_PPC64_PLT_PCREL34_NOTOC", Const, 20}, + {"R_PPC64_REL14", Const, 5}, + {"R_PPC64_REL14_BRNTAKEN", Const, 5}, + {"R_PPC64_REL14_BRTAKEN", Const, 5}, + {"R_PPC64_REL16", Const, 5}, + {"R_PPC64_REL16DX_HA", Const, 10}, + {"R_PPC64_REL16_HA", Const, 5}, + {"R_PPC64_REL16_HI", Const, 5}, + {"R_PPC64_REL16_HIGH", Const, 20}, + {"R_PPC64_REL16_HIGHA", Const, 20}, + {"R_PPC64_REL16_HIGHER", Const, 20}, + {"R_PPC64_REL16_HIGHER34", Const, 20}, + {"R_PPC64_REL16_HIGHERA", Const, 20}, + {"R_PPC64_REL16_HIGHERA34", Const, 20}, + {"R_PPC64_REL16_HIGHEST", Const, 20}, + {"R_PPC64_REL16_HIGHEST34", Const, 20}, + {"R_PPC64_REL16_HIGHESTA", Const, 20}, + {"R_PPC64_REL16_HIGHESTA34", Const, 20}, + {"R_PPC64_REL16_LO", Const, 5}, + {"R_PPC64_REL24", Const, 5}, + {"R_PPC64_REL24_NOTOC", Const, 10}, + {"R_PPC64_REL24_P9NOTOC", Const, 21}, + {"R_PPC64_REL30", Const, 20}, + {"R_PPC64_REL32", Const, 5}, + {"R_PPC64_REL64", Const, 5}, + {"R_PPC64_RELATIVE", Const, 18}, + {"R_PPC64_SECTOFF", Const, 20}, + {"R_PPC64_SECTOFF_DS", Const, 10}, + {"R_PPC64_SECTOFF_HA", Const, 20}, + {"R_PPC64_SECTOFF_HI", Const, 20}, + {"R_PPC64_SECTOFF_LO", Const, 20}, + {"R_PPC64_SECTOFF_LO_DS", Const, 10}, + {"R_PPC64_TLS", Const, 5}, + {"R_PPC64_TLSGD", Const, 5}, + {"R_PPC64_TLSLD", Const, 5}, + {"R_PPC64_TOC", Const, 5}, + {"R_PPC64_TOC16", Const, 5}, + {"R_PPC64_TOC16_DS", Const, 5}, + {"R_PPC64_TOC16_HA", Const, 5}, + {"R_PPC64_TOC16_HI", Const, 5}, + {"R_PPC64_TOC16_LO", Const, 5}, + {"R_PPC64_TOC16_LO_DS", Const, 5}, + {"R_PPC64_TOCSAVE", Const, 10}, + {"R_PPC64_TPREL16", Const, 5}, + {"R_PPC64_TPREL16_DS", Const, 5}, + {"R_PPC64_TPREL16_HA", Const, 5}, + {"R_PPC64_TPREL16_HI", Const, 5}, + {"R_PPC64_TPREL16_HIGH", Const, 10}, + {"R_PPC64_TPREL16_HIGHA", Const, 10}, + {"R_PPC64_TPREL16_HIGHER", Const, 5}, + {"R_PPC64_TPREL16_HIGHERA", Const, 5}, + {"R_PPC64_TPREL16_HIGHEST", Const, 5}, + {"R_PPC64_TPREL16_HIGHESTA", Const, 5}, + {"R_PPC64_TPREL16_LO", Const, 5}, + {"R_PPC64_TPREL16_LO_DS", Const, 5}, + {"R_PPC64_TPREL34", Const, 20}, + {"R_PPC64_TPREL64", Const, 5}, + {"R_PPC64_UADDR16", Const, 20}, + {"R_PPC64_UADDR32", Const, 20}, + {"R_PPC64_UADDR64", Const, 20}, + {"R_PPC_ADDR14", Const, 0}, + {"R_PPC_ADDR14_BRNTAKEN", Const, 0}, + {"R_PPC_ADDR14_BRTAKEN", Const, 0}, + {"R_PPC_ADDR16", Const, 0}, + {"R_PPC_ADDR16_HA", Const, 0}, + {"R_PPC_ADDR16_HI", Const, 0}, + {"R_PPC_ADDR16_LO", Const, 0}, + {"R_PPC_ADDR24", Const, 0}, + {"R_PPC_ADDR32", Const, 0}, + {"R_PPC_COPY", Const, 0}, + {"R_PPC_DTPMOD32", Const, 0}, + {"R_PPC_DTPREL16", Const, 0}, + {"R_PPC_DTPREL16_HA", Const, 0}, + {"R_PPC_DTPREL16_HI", Const, 0}, + {"R_PPC_DTPREL16_LO", Const, 0}, + {"R_PPC_DTPREL32", Const, 0}, + {"R_PPC_EMB_BIT_FLD", Const, 0}, + {"R_PPC_EMB_MRKREF", Const, 0}, + {"R_PPC_EMB_NADDR16", Const, 0}, + {"R_PPC_EMB_NADDR16_HA", Const, 0}, + {"R_PPC_EMB_NADDR16_HI", Const, 0}, + {"R_PPC_EMB_NADDR16_LO", Const, 0}, + {"R_PPC_EMB_NADDR32", Const, 0}, + {"R_PPC_EMB_RELSDA", Const, 0}, + {"R_PPC_EMB_RELSEC16", Const, 0}, + {"R_PPC_EMB_RELST_HA", Const, 0}, + {"R_PPC_EMB_RELST_HI", Const, 0}, + {"R_PPC_EMB_RELST_LO", Const, 0}, + {"R_PPC_EMB_SDA21", Const, 0}, + {"R_PPC_EMB_SDA2I16", Const, 0}, + {"R_PPC_EMB_SDA2REL", Const, 0}, + {"R_PPC_EMB_SDAI16", Const, 0}, + {"R_PPC_GLOB_DAT", Const, 0}, + {"R_PPC_GOT16", Const, 0}, + {"R_PPC_GOT16_HA", Const, 0}, + {"R_PPC_GOT16_HI", Const, 0}, + {"R_PPC_GOT16_LO", Const, 0}, + {"R_PPC_GOT_TLSGD16", Const, 0}, + {"R_PPC_GOT_TLSGD16_HA", Const, 0}, + {"R_PPC_GOT_TLSGD16_HI", Const, 0}, + {"R_PPC_GOT_TLSGD16_LO", Const, 0}, + {"R_PPC_GOT_TLSLD16", Const, 0}, + {"R_PPC_GOT_TLSLD16_HA", Const, 0}, + {"R_PPC_GOT_TLSLD16_HI", Const, 0}, + {"R_PPC_GOT_TLSLD16_LO", Const, 0}, + {"R_PPC_GOT_TPREL16", Const, 0}, + {"R_PPC_GOT_TPREL16_HA", Const, 0}, + {"R_PPC_GOT_TPREL16_HI", Const, 0}, + {"R_PPC_GOT_TPREL16_LO", Const, 0}, + {"R_PPC_JMP_SLOT", Const, 0}, + {"R_PPC_LOCAL24PC", Const, 0}, + {"R_PPC_NONE", Const, 0}, + {"R_PPC_PLT16_HA", Const, 0}, + {"R_PPC_PLT16_HI", Const, 0}, + {"R_PPC_PLT16_LO", Const, 0}, + {"R_PPC_PLT32", Const, 0}, + {"R_PPC_PLTREL24", Const, 0}, + {"R_PPC_PLTREL32", Const, 0}, + {"R_PPC_REL14", Const, 0}, + {"R_PPC_REL14_BRNTAKEN", Const, 0}, + {"R_PPC_REL14_BRTAKEN", Const, 0}, + {"R_PPC_REL24", Const, 0}, + {"R_PPC_REL32", Const, 0}, + {"R_PPC_RELATIVE", Const, 0}, + {"R_PPC_SDAREL16", Const, 0}, + {"R_PPC_SECTOFF", Const, 0}, + {"R_PPC_SECTOFF_HA", Const, 0}, + {"R_PPC_SECTOFF_HI", Const, 0}, + {"R_PPC_SECTOFF_LO", Const, 0}, + {"R_PPC_TLS", Const, 0}, + {"R_PPC_TPREL16", Const, 0}, + {"R_PPC_TPREL16_HA", Const, 0}, + {"R_PPC_TPREL16_HI", Const, 0}, + {"R_PPC_TPREL16_LO", Const, 0}, + {"R_PPC_TPREL32", Const, 0}, + {"R_PPC_UADDR16", Const, 0}, + {"R_PPC_UADDR32", Const, 0}, + {"R_RISCV", Type, 11}, + {"R_RISCV_32", Const, 11}, + {"R_RISCV_32_PCREL", Const, 12}, + {"R_RISCV_64", Const, 11}, + {"R_RISCV_ADD16", Const, 11}, + {"R_RISCV_ADD32", Const, 11}, + {"R_RISCV_ADD64", Const, 11}, + {"R_RISCV_ADD8", Const, 11}, + {"R_RISCV_ALIGN", Const, 11}, + {"R_RISCV_BRANCH", Const, 11}, + {"R_RISCV_CALL", Const, 11}, + {"R_RISCV_CALL_PLT", Const, 11}, + {"R_RISCV_COPY", Const, 11}, + {"R_RISCV_GNU_VTENTRY", Const, 11}, + {"R_RISCV_GNU_VTINHERIT", Const, 11}, + {"R_RISCV_GOT_HI20", Const, 11}, + {"R_RISCV_GPREL_I", Const, 11}, + {"R_RISCV_GPREL_S", Const, 11}, + {"R_RISCV_HI20", Const, 11}, + {"R_RISCV_JAL", Const, 11}, + {"R_RISCV_JUMP_SLOT", Const, 11}, + {"R_RISCV_LO12_I", Const, 11}, + {"R_RISCV_LO12_S", Const, 11}, + {"R_RISCV_NONE", Const, 11}, + {"R_RISCV_PCREL_HI20", Const, 11}, + {"R_RISCV_PCREL_LO12_I", Const, 11}, + {"R_RISCV_PCREL_LO12_S", Const, 11}, + {"R_RISCV_RELATIVE", Const, 11}, + {"R_RISCV_RELAX", Const, 11}, + {"R_RISCV_RVC_BRANCH", Const, 11}, + {"R_RISCV_RVC_JUMP", Const, 11}, + {"R_RISCV_RVC_LUI", Const, 11}, + {"R_RISCV_SET16", Const, 11}, + {"R_RISCV_SET32", Const, 11}, + {"R_RISCV_SET6", Const, 11}, + {"R_RISCV_SET8", Const, 11}, + {"R_RISCV_SUB16", Const, 11}, + {"R_RISCV_SUB32", Const, 11}, + {"R_RISCV_SUB6", Const, 11}, + {"R_RISCV_SUB64", Const, 11}, + {"R_RISCV_SUB8", Const, 11}, + {"R_RISCV_TLS_DTPMOD32", Const, 11}, + {"R_RISCV_TLS_DTPMOD64", Const, 11}, + {"R_RISCV_TLS_DTPREL32", Const, 11}, + {"R_RISCV_TLS_DTPREL64", Const, 11}, + {"R_RISCV_TLS_GD_HI20", Const, 11}, + {"R_RISCV_TLS_GOT_HI20", Const, 11}, + {"R_RISCV_TLS_TPREL32", Const, 11}, + {"R_RISCV_TLS_TPREL64", Const, 11}, + {"R_RISCV_TPREL_ADD", Const, 11}, + {"R_RISCV_TPREL_HI20", Const, 11}, + {"R_RISCV_TPREL_I", Const, 11}, + {"R_RISCV_TPREL_LO12_I", Const, 11}, + {"R_RISCV_TPREL_LO12_S", Const, 11}, + {"R_RISCV_TPREL_S", Const, 11}, + {"R_SPARC", Type, 0}, + {"R_SPARC_10", Const, 0}, + {"R_SPARC_11", Const, 0}, + {"R_SPARC_13", Const, 0}, + {"R_SPARC_16", Const, 0}, + {"R_SPARC_22", Const, 0}, + {"R_SPARC_32", Const, 0}, + {"R_SPARC_5", Const, 0}, + {"R_SPARC_6", Const, 0}, + {"R_SPARC_64", Const, 0}, + {"R_SPARC_7", Const, 0}, + {"R_SPARC_8", Const, 0}, + {"R_SPARC_COPY", Const, 0}, + {"R_SPARC_DISP16", Const, 0}, + {"R_SPARC_DISP32", Const, 0}, + {"R_SPARC_DISP64", Const, 0}, + {"R_SPARC_DISP8", Const, 0}, + {"R_SPARC_GLOB_DAT", Const, 0}, + {"R_SPARC_GLOB_JMP", Const, 0}, + {"R_SPARC_GOT10", Const, 0}, + {"R_SPARC_GOT13", Const, 0}, + {"R_SPARC_GOT22", Const, 0}, + {"R_SPARC_H44", Const, 0}, + {"R_SPARC_HH22", Const, 0}, + {"R_SPARC_HI22", Const, 0}, + {"R_SPARC_HIPLT22", Const, 0}, + {"R_SPARC_HIX22", Const, 0}, + {"R_SPARC_HM10", Const, 0}, + {"R_SPARC_JMP_SLOT", Const, 0}, + {"R_SPARC_L44", Const, 0}, + {"R_SPARC_LM22", Const, 0}, + {"R_SPARC_LO10", Const, 0}, + {"R_SPARC_LOPLT10", Const, 0}, + {"R_SPARC_LOX10", Const, 0}, + {"R_SPARC_M44", Const, 0}, + {"R_SPARC_NONE", Const, 0}, + {"R_SPARC_OLO10", Const, 0}, + {"R_SPARC_PC10", Const, 0}, + {"R_SPARC_PC22", Const, 0}, + {"R_SPARC_PCPLT10", Const, 0}, + {"R_SPARC_PCPLT22", Const, 0}, + {"R_SPARC_PCPLT32", Const, 0}, + {"R_SPARC_PC_HH22", Const, 0}, + {"R_SPARC_PC_HM10", Const, 0}, + {"R_SPARC_PC_LM22", Const, 0}, + {"R_SPARC_PLT32", Const, 0}, + {"R_SPARC_PLT64", Const, 0}, + {"R_SPARC_REGISTER", Const, 0}, + {"R_SPARC_RELATIVE", Const, 0}, + {"R_SPARC_UA16", Const, 0}, + {"R_SPARC_UA32", Const, 0}, + {"R_SPARC_UA64", Const, 0}, + {"R_SPARC_WDISP16", Const, 0}, + {"R_SPARC_WDISP19", Const, 0}, + {"R_SPARC_WDISP22", Const, 0}, + {"R_SPARC_WDISP30", Const, 0}, + {"R_SPARC_WPLT30", Const, 0}, + {"R_SYM32", Func, 0}, + {"R_SYM64", Func, 0}, + {"R_TYPE32", Func, 0}, + {"R_TYPE64", Func, 0}, + {"R_X86_64", Type, 0}, + {"R_X86_64_16", Const, 0}, + {"R_X86_64_32", Const, 0}, + {"R_X86_64_32S", Const, 0}, + {"R_X86_64_64", Const, 0}, + {"R_X86_64_8", Const, 0}, + {"R_X86_64_COPY", Const, 0}, + {"R_X86_64_DTPMOD64", Const, 0}, + {"R_X86_64_DTPOFF32", Const, 0}, + {"R_X86_64_DTPOFF64", Const, 0}, + {"R_X86_64_GLOB_DAT", Const, 0}, + {"R_X86_64_GOT32", Const, 0}, + {"R_X86_64_GOT64", Const, 10}, + {"R_X86_64_GOTOFF64", Const, 10}, + {"R_X86_64_GOTPC32", Const, 10}, + {"R_X86_64_GOTPC32_TLSDESC", Const, 10}, + {"R_X86_64_GOTPC64", Const, 10}, + {"R_X86_64_GOTPCREL", Const, 0}, + {"R_X86_64_GOTPCREL64", Const, 10}, + {"R_X86_64_GOTPCRELX", Const, 10}, + {"R_X86_64_GOTPLT64", Const, 10}, + {"R_X86_64_GOTTPOFF", Const, 0}, + {"R_X86_64_IRELATIVE", Const, 10}, + {"R_X86_64_JMP_SLOT", Const, 0}, + {"R_X86_64_NONE", Const, 0}, + {"R_X86_64_PC16", Const, 0}, + {"R_X86_64_PC32", Const, 0}, + {"R_X86_64_PC32_BND", Const, 10}, + {"R_X86_64_PC64", Const, 10}, + {"R_X86_64_PC8", Const, 0}, + {"R_X86_64_PLT32", Const, 0}, + {"R_X86_64_PLT32_BND", Const, 10}, + {"R_X86_64_PLTOFF64", Const, 10}, + {"R_X86_64_RELATIVE", Const, 0}, + {"R_X86_64_RELATIVE64", Const, 10}, + {"R_X86_64_REX_GOTPCRELX", Const, 10}, + {"R_X86_64_SIZE32", Const, 10}, + {"R_X86_64_SIZE64", Const, 10}, + {"R_X86_64_TLSDESC", Const, 10}, + {"R_X86_64_TLSDESC_CALL", Const, 10}, + {"R_X86_64_TLSGD", Const, 0}, + {"R_X86_64_TLSLD", Const, 0}, + {"R_X86_64_TPOFF32", Const, 0}, + {"R_X86_64_TPOFF64", Const, 0}, + {"Rel32", Type, 0}, + {"Rel32.Info", Field, 0}, + {"Rel32.Off", Field, 0}, + {"Rel64", Type, 0}, + {"Rel64.Info", Field, 0}, + {"Rel64.Off", Field, 0}, + {"Rela32", Type, 0}, + {"Rela32.Addend", Field, 0}, + {"Rela32.Info", Field, 0}, + {"Rela32.Off", Field, 0}, + {"Rela64", Type, 0}, + {"Rela64.Addend", Field, 0}, + {"Rela64.Info", Field, 0}, + {"Rela64.Off", Field, 0}, + {"SHF_ALLOC", Const, 0}, + {"SHF_COMPRESSED", Const, 6}, + {"SHF_EXECINSTR", Const, 0}, + {"SHF_GROUP", Const, 0}, + {"SHF_INFO_LINK", Const, 0}, + {"SHF_LINK_ORDER", Const, 0}, + {"SHF_MASKOS", Const, 0}, + {"SHF_MASKPROC", Const, 0}, + {"SHF_MERGE", Const, 0}, + {"SHF_OS_NONCONFORMING", Const, 0}, + {"SHF_STRINGS", Const, 0}, + {"SHF_TLS", Const, 0}, + {"SHF_WRITE", Const, 0}, + {"SHN_ABS", Const, 0}, + {"SHN_COMMON", Const, 0}, + {"SHN_HIOS", Const, 0}, + {"SHN_HIPROC", Const, 0}, + {"SHN_HIRESERVE", Const, 0}, + {"SHN_LOOS", Const, 0}, + {"SHN_LOPROC", Const, 0}, + {"SHN_LORESERVE", Const, 0}, + {"SHN_UNDEF", Const, 0}, + {"SHN_XINDEX", Const, 0}, + {"SHT_DYNAMIC", Const, 0}, + {"SHT_DYNSYM", Const, 0}, + {"SHT_FINI_ARRAY", Const, 0}, + {"SHT_GNU_ATTRIBUTES", Const, 0}, + {"SHT_GNU_HASH", Const, 0}, + {"SHT_GNU_LIBLIST", Const, 0}, + {"SHT_GNU_VERDEF", Const, 0}, + {"SHT_GNU_VERNEED", Const, 0}, + {"SHT_GNU_VERSYM", Const, 0}, + {"SHT_GROUP", Const, 0}, + {"SHT_HASH", Const, 0}, + {"SHT_HIOS", Const, 0}, + {"SHT_HIPROC", Const, 0}, + {"SHT_HIUSER", Const, 0}, + {"SHT_INIT_ARRAY", Const, 0}, + {"SHT_LOOS", Const, 0}, + {"SHT_LOPROC", Const, 0}, + {"SHT_LOUSER", Const, 0}, + {"SHT_MIPS_ABIFLAGS", Const, 17}, + {"SHT_NOBITS", Const, 0}, + {"SHT_NOTE", Const, 0}, + {"SHT_NULL", Const, 0}, + {"SHT_PREINIT_ARRAY", Const, 0}, + {"SHT_PROGBITS", Const, 0}, + {"SHT_REL", Const, 0}, + {"SHT_RELA", Const, 0}, + {"SHT_SHLIB", Const, 0}, + {"SHT_STRTAB", Const, 0}, + {"SHT_SYMTAB", Const, 0}, + {"SHT_SYMTAB_SHNDX", Const, 0}, + {"STB_GLOBAL", Const, 0}, + {"STB_HIOS", Const, 0}, + {"STB_HIPROC", Const, 0}, + {"STB_LOCAL", Const, 0}, + {"STB_LOOS", Const, 0}, + {"STB_LOPROC", Const, 0}, + {"STB_WEAK", Const, 0}, + {"STT_COMMON", Const, 0}, + {"STT_FILE", Const, 0}, + {"STT_FUNC", Const, 0}, + {"STT_HIOS", Const, 0}, + {"STT_HIPROC", Const, 0}, + {"STT_LOOS", Const, 0}, + {"STT_LOPROC", Const, 0}, + {"STT_NOTYPE", Const, 0}, + {"STT_OBJECT", Const, 0}, + {"STT_SECTION", Const, 0}, + {"STT_TLS", Const, 0}, + {"STV_DEFAULT", Const, 0}, + {"STV_HIDDEN", Const, 0}, + {"STV_INTERNAL", Const, 0}, + {"STV_PROTECTED", Const, 0}, + {"ST_BIND", Func, 0}, + {"ST_INFO", Func, 0}, + {"ST_TYPE", Func, 0}, + {"ST_VISIBILITY", Func, 0}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.SectionHeader", Field, 0}, + {"Section32", Type, 0}, + {"Section32.Addr", Field, 0}, + {"Section32.Addralign", Field, 0}, + {"Section32.Entsize", Field, 0}, + {"Section32.Flags", Field, 0}, + {"Section32.Info", Field, 0}, + {"Section32.Link", Field, 0}, + {"Section32.Name", Field, 0}, + {"Section32.Off", Field, 0}, + {"Section32.Size", Field, 0}, + {"Section32.Type", Field, 0}, + {"Section64", Type, 0}, + {"Section64.Addr", Field, 0}, + {"Section64.Addralign", Field, 0}, + {"Section64.Entsize", Field, 0}, + {"Section64.Flags", Field, 0}, + {"Section64.Info", Field, 0}, + {"Section64.Link", Field, 0}, + {"Section64.Name", Field, 0}, + {"Section64.Off", Field, 0}, + {"Section64.Size", Field, 0}, + {"Section64.Type", Field, 0}, + {"SectionFlag", Type, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Addr", Field, 0}, + {"SectionHeader.Addralign", Field, 0}, + {"SectionHeader.Entsize", Field, 0}, + {"SectionHeader.FileSize", Field, 6}, + {"SectionHeader.Flags", Field, 0}, + {"SectionHeader.Info", Field, 0}, + {"SectionHeader.Link", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"SectionHeader.Type", Field, 0}, + {"SectionIndex", Type, 0}, + {"SectionType", Type, 0}, + {"Sym32", Type, 0}, + {"Sym32.Info", Field, 0}, + {"Sym32.Name", Field, 0}, + {"Sym32.Other", Field, 0}, + {"Sym32.Shndx", Field, 0}, + {"Sym32.Size", Field, 0}, + {"Sym32.Value", Field, 0}, + {"Sym32Size", Const, 0}, + {"Sym64", Type, 0}, + {"Sym64.Info", Field, 0}, + {"Sym64.Name", Field, 0}, + {"Sym64.Other", Field, 0}, + {"Sym64.Shndx", Field, 0}, + {"Sym64.Size", Field, 0}, + {"Sym64.Value", Field, 0}, + {"Sym64Size", Const, 0}, + {"SymBind", Type, 0}, + {"SymType", Type, 0}, + {"SymVis", Type, 0}, + {"Symbol", Type, 0}, + {"Symbol.Info", Field, 0}, + {"Symbol.Library", Field, 13}, + {"Symbol.Name", Field, 0}, + {"Symbol.Other", Field, 0}, + {"Symbol.Section", Field, 0}, + {"Symbol.Size", Field, 0}, + {"Symbol.Value", Field, 0}, + {"Symbol.Version", Field, 13}, + {"Type", Type, 0}, + {"Version", Type, 0}, + }, + "debug/gosym": { + {"(*DecodingError).Error", Method, 0}, + {"(*LineTable).LineToPC", Method, 0}, + {"(*LineTable).PCToLine", Method, 0}, + {"(*Sym).BaseName", Method, 0}, + {"(*Sym).PackageName", Method, 0}, + {"(*Sym).ReceiverName", Method, 0}, + {"(*Sym).Static", Method, 0}, + {"(*Table).LineToPC", Method, 0}, + {"(*Table).LookupFunc", Method, 0}, + {"(*Table).LookupSym", Method, 0}, + {"(*Table).PCToFunc", Method, 0}, + {"(*Table).PCToLine", Method, 0}, + {"(*Table).SymByAddr", Method, 0}, + {"(*UnknownLineError).Error", Method, 0}, + {"(Func).BaseName", Method, 0}, + {"(Func).PackageName", Method, 0}, + {"(Func).ReceiverName", Method, 0}, + {"(Func).Static", Method, 0}, + {"(UnknownFileError).Error", Method, 0}, + {"DecodingError", Type, 0}, + {"Func", Type, 0}, + {"Func.End", Field, 0}, + {"Func.Entry", Field, 0}, + {"Func.FrameSize", Field, 0}, + {"Func.LineTable", Field, 0}, + {"Func.Locals", Field, 0}, + {"Func.Obj", Field, 0}, + {"Func.Params", Field, 0}, + {"Func.Sym", Field, 0}, + {"LineTable", Type, 0}, + {"LineTable.Data", Field, 0}, + {"LineTable.Line", Field, 0}, + {"LineTable.PC", Field, 0}, + {"NewLineTable", Func, 0}, + {"NewTable", Func, 0}, + {"Obj", Type, 0}, + {"Obj.Funcs", Field, 0}, + {"Obj.Paths", Field, 0}, + {"Sym", Type, 0}, + {"Sym.Func", Field, 0}, + {"Sym.GoType", Field, 0}, + {"Sym.Name", Field, 0}, + {"Sym.Type", Field, 0}, + {"Sym.Value", Field, 0}, + {"Table", Type, 0}, + {"Table.Files", Field, 0}, + {"Table.Funcs", Field, 0}, + {"Table.Objs", Field, 0}, + {"Table.Syms", Field, 0}, + {"UnknownFileError", Type, 0}, + {"UnknownLineError", Type, 0}, + {"UnknownLineError.File", Field, 0}, + {"UnknownLineError.Line", Field, 0}, + }, + "debug/macho": { + {"(*FatFile).Close", Method, 3}, + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*File).Segment", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(*Segment).Data", Method, 0}, + {"(*Segment).Open", Method, 0}, + {"(Cpu).GoString", Method, 0}, + {"(Cpu).String", Method, 0}, + {"(Dylib).Raw", Method, 0}, + {"(Dysymtab).Raw", Method, 0}, + {"(FatArch).Close", Method, 3}, + {"(FatArch).DWARF", Method, 3}, + {"(FatArch).ImportedLibraries", Method, 3}, + {"(FatArch).ImportedSymbols", Method, 3}, + {"(FatArch).Section", Method, 3}, + {"(FatArch).Segment", Method, 3}, + {"(LoadBytes).Raw", Method, 0}, + {"(LoadCmd).GoString", Method, 0}, + {"(LoadCmd).String", Method, 0}, + {"(RelocTypeARM).GoString", Method, 10}, + {"(RelocTypeARM).String", Method, 10}, + {"(RelocTypeARM64).GoString", Method, 10}, + {"(RelocTypeARM64).String", Method, 10}, + {"(RelocTypeGeneric).GoString", Method, 10}, + {"(RelocTypeGeneric).String", Method, 10}, + {"(RelocTypeX86_64).GoString", Method, 10}, + {"(RelocTypeX86_64).String", Method, 10}, + {"(Rpath).Raw", Method, 10}, + {"(Section).ReadAt", Method, 0}, + {"(Segment).Raw", Method, 0}, + {"(Segment).ReadAt", Method, 0}, + {"(Symtab).Raw", Method, 0}, + {"(Type).GoString", Method, 10}, + {"(Type).String", Method, 10}, + {"ARM64_RELOC_ADDEND", Const, 10}, + {"ARM64_RELOC_BRANCH26", Const, 10}, + {"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10}, + {"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_PAGE21", Const, 10}, + {"ARM64_RELOC_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_POINTER_TO_GOT", Const, 10}, + {"ARM64_RELOC_SUBTRACTOR", Const, 10}, + {"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10}, + {"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_UNSIGNED", Const, 10}, + {"ARM_RELOC_BR24", Const, 10}, + {"ARM_RELOC_HALF", Const, 10}, + {"ARM_RELOC_HALF_SECTDIFF", Const, 10}, + {"ARM_RELOC_LOCAL_SECTDIFF", Const, 10}, + {"ARM_RELOC_PAIR", Const, 10}, + {"ARM_RELOC_PB_LA_PTR", Const, 10}, + {"ARM_RELOC_SECTDIFF", Const, 10}, + {"ARM_RELOC_VANILLA", Const, 10}, + {"ARM_THUMB_32BIT_BRANCH", Const, 10}, + {"ARM_THUMB_RELOC_BR22", Const, 10}, + {"Cpu", Type, 0}, + {"Cpu386", Const, 0}, + {"CpuAmd64", Const, 0}, + {"CpuArm", Const, 3}, + {"CpuArm64", Const, 11}, + {"CpuPpc", Const, 3}, + {"CpuPpc64", Const, 3}, + {"Dylib", Type, 0}, + {"Dylib.CompatVersion", Field, 0}, + {"Dylib.CurrentVersion", Field, 0}, + {"Dylib.LoadBytes", Field, 0}, + {"Dylib.Name", Field, 0}, + {"Dylib.Time", Field, 0}, + {"DylibCmd", Type, 0}, + {"DylibCmd.Cmd", Field, 0}, + {"DylibCmd.CompatVersion", Field, 0}, + {"DylibCmd.CurrentVersion", Field, 0}, + {"DylibCmd.Len", Field, 0}, + {"DylibCmd.Name", Field, 0}, + {"DylibCmd.Time", Field, 0}, + {"Dysymtab", Type, 0}, + {"Dysymtab.DysymtabCmd", Field, 0}, + {"Dysymtab.IndirectSyms", Field, 0}, + {"Dysymtab.LoadBytes", Field, 0}, + {"DysymtabCmd", Type, 0}, + {"DysymtabCmd.Cmd", Field, 0}, + {"DysymtabCmd.Extrefsymoff", Field, 0}, + {"DysymtabCmd.Extreloff", Field, 0}, + {"DysymtabCmd.Iextdefsym", Field, 0}, + {"DysymtabCmd.Ilocalsym", Field, 0}, + {"DysymtabCmd.Indirectsymoff", Field, 0}, + {"DysymtabCmd.Iundefsym", Field, 0}, + {"DysymtabCmd.Len", Field, 0}, + {"DysymtabCmd.Locreloff", Field, 0}, + {"DysymtabCmd.Modtaboff", Field, 0}, + {"DysymtabCmd.Nextdefsym", Field, 0}, + {"DysymtabCmd.Nextrefsyms", Field, 0}, + {"DysymtabCmd.Nextrel", Field, 0}, + {"DysymtabCmd.Nindirectsyms", Field, 0}, + {"DysymtabCmd.Nlocalsym", Field, 0}, + {"DysymtabCmd.Nlocrel", Field, 0}, + {"DysymtabCmd.Nmodtab", Field, 0}, + {"DysymtabCmd.Ntoc", Field, 0}, + {"DysymtabCmd.Nundefsym", Field, 0}, + {"DysymtabCmd.Tocoffset", Field, 0}, + {"ErrNotFat", Var, 3}, + {"FatArch", Type, 3}, + {"FatArch.FatArchHeader", Field, 3}, + {"FatArch.File", Field, 3}, + {"FatArchHeader", Type, 3}, + {"FatArchHeader.Align", Field, 3}, + {"FatArchHeader.Cpu", Field, 3}, + {"FatArchHeader.Offset", Field, 3}, + {"FatArchHeader.Size", Field, 3}, + {"FatArchHeader.SubCpu", Field, 3}, + {"FatFile", Type, 3}, + {"FatFile.Arches", Field, 3}, + {"FatFile.Magic", Field, 3}, + {"File", Type, 0}, + {"File.ByteOrder", Field, 0}, + {"File.Dysymtab", Field, 0}, + {"File.FileHeader", Field, 0}, + {"File.Loads", Field, 0}, + {"File.Sections", Field, 0}, + {"File.Symtab", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.Cmdsz", Field, 0}, + {"FileHeader.Cpu", Field, 0}, + {"FileHeader.Flags", Field, 0}, + {"FileHeader.Magic", Field, 0}, + {"FileHeader.Ncmd", Field, 0}, + {"FileHeader.SubCpu", Field, 0}, + {"FileHeader.Type", Field, 0}, + {"FlagAllModsBound", Const, 10}, + {"FlagAllowStackExecution", Const, 10}, + {"FlagAppExtensionSafe", Const, 10}, + {"FlagBindAtLoad", Const, 10}, + {"FlagBindsToWeak", Const, 10}, + {"FlagCanonical", Const, 10}, + {"FlagDeadStrippableDylib", Const, 10}, + {"FlagDyldLink", Const, 10}, + {"FlagForceFlat", Const, 10}, + {"FlagHasTLVDescriptors", Const, 10}, + {"FlagIncrLink", Const, 10}, + {"FlagLazyInit", Const, 10}, + {"FlagNoFixPrebinding", Const, 10}, + {"FlagNoHeapExecution", Const, 10}, + {"FlagNoMultiDefs", Const, 10}, + {"FlagNoReexportedDylibs", Const, 10}, + {"FlagNoUndefs", Const, 10}, + {"FlagPIE", Const, 10}, + {"FlagPrebindable", Const, 10}, + {"FlagPrebound", Const, 10}, + {"FlagRootSafe", Const, 10}, + {"FlagSetuidSafe", Const, 10}, + {"FlagSplitSegs", Const, 10}, + {"FlagSubsectionsViaSymbols", Const, 10}, + {"FlagTwoLevel", Const, 10}, + {"FlagWeakDefines", Const, 10}, + {"FormatError", Type, 0}, + {"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10}, + {"GENERIC_RELOC_PAIR", Const, 10}, + {"GENERIC_RELOC_PB_LA_PTR", Const, 10}, + {"GENERIC_RELOC_SECTDIFF", Const, 10}, + {"GENERIC_RELOC_TLV", Const, 10}, + {"GENERIC_RELOC_VANILLA", Const, 10}, + {"Load", Type, 0}, + {"LoadBytes", Type, 0}, + {"LoadCmd", Type, 0}, + {"LoadCmdDylib", Const, 0}, + {"LoadCmdDylinker", Const, 0}, + {"LoadCmdDysymtab", Const, 0}, + {"LoadCmdRpath", Const, 10}, + {"LoadCmdSegment", Const, 0}, + {"LoadCmdSegment64", Const, 0}, + {"LoadCmdSymtab", Const, 0}, + {"LoadCmdThread", Const, 0}, + {"LoadCmdUnixThread", Const, 0}, + {"Magic32", Const, 0}, + {"Magic64", Const, 0}, + {"MagicFat", Const, 3}, + {"NewFatFile", Func, 3}, + {"NewFile", Func, 0}, + {"Nlist32", Type, 0}, + {"Nlist32.Desc", Field, 0}, + {"Nlist32.Name", Field, 0}, + {"Nlist32.Sect", Field, 0}, + {"Nlist32.Type", Field, 0}, + {"Nlist32.Value", Field, 0}, + {"Nlist64", Type, 0}, + {"Nlist64.Desc", Field, 0}, + {"Nlist64.Name", Field, 0}, + {"Nlist64.Sect", Field, 0}, + {"Nlist64.Type", Field, 0}, + {"Nlist64.Value", Field, 0}, + {"Open", Func, 0}, + {"OpenFat", Func, 3}, + {"Regs386", Type, 0}, + {"Regs386.AX", Field, 0}, + {"Regs386.BP", Field, 0}, + {"Regs386.BX", Field, 0}, + {"Regs386.CS", Field, 0}, + {"Regs386.CX", Field, 0}, + {"Regs386.DI", Field, 0}, + {"Regs386.DS", Field, 0}, + {"Regs386.DX", Field, 0}, + {"Regs386.ES", Field, 0}, + {"Regs386.FLAGS", Field, 0}, + {"Regs386.FS", Field, 0}, + {"Regs386.GS", Field, 0}, + {"Regs386.IP", Field, 0}, + {"Regs386.SI", Field, 0}, + {"Regs386.SP", Field, 0}, + {"Regs386.SS", Field, 0}, + {"RegsAMD64", Type, 0}, + {"RegsAMD64.AX", Field, 0}, + {"RegsAMD64.BP", Field, 0}, + {"RegsAMD64.BX", Field, 0}, + {"RegsAMD64.CS", Field, 0}, + {"RegsAMD64.CX", Field, 0}, + {"RegsAMD64.DI", Field, 0}, + {"RegsAMD64.DX", Field, 0}, + {"RegsAMD64.FLAGS", Field, 0}, + {"RegsAMD64.FS", Field, 0}, + {"RegsAMD64.GS", Field, 0}, + {"RegsAMD64.IP", Field, 0}, + {"RegsAMD64.R10", Field, 0}, + {"RegsAMD64.R11", Field, 0}, + {"RegsAMD64.R12", Field, 0}, + {"RegsAMD64.R13", Field, 0}, + {"RegsAMD64.R14", Field, 0}, + {"RegsAMD64.R15", Field, 0}, + {"RegsAMD64.R8", Field, 0}, + {"RegsAMD64.R9", Field, 0}, + {"RegsAMD64.SI", Field, 0}, + {"RegsAMD64.SP", Field, 0}, + {"Reloc", Type, 10}, + {"Reloc.Addr", Field, 10}, + {"Reloc.Extern", Field, 10}, + {"Reloc.Len", Field, 10}, + {"Reloc.Pcrel", Field, 10}, + {"Reloc.Scattered", Field, 10}, + {"Reloc.Type", Field, 10}, + {"Reloc.Value", Field, 10}, + {"RelocTypeARM", Type, 10}, + {"RelocTypeARM64", Type, 10}, + {"RelocTypeGeneric", Type, 10}, + {"RelocTypeX86_64", Type, 10}, + {"Rpath", Type, 10}, + {"Rpath.LoadBytes", Field, 10}, + {"Rpath.Path", Field, 10}, + {"RpathCmd", Type, 10}, + {"RpathCmd.Cmd", Field, 10}, + {"RpathCmd.Len", Field, 10}, + {"RpathCmd.Path", Field, 10}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.Relocs", Field, 10}, + {"Section.SectionHeader", Field, 0}, + {"Section32", Type, 0}, + {"Section32.Addr", Field, 0}, + {"Section32.Align", Field, 0}, + {"Section32.Flags", Field, 0}, + {"Section32.Name", Field, 0}, + {"Section32.Nreloc", Field, 0}, + {"Section32.Offset", Field, 0}, + {"Section32.Reloff", Field, 0}, + {"Section32.Reserve1", Field, 0}, + {"Section32.Reserve2", Field, 0}, + {"Section32.Seg", Field, 0}, + {"Section32.Size", Field, 0}, + {"Section64", Type, 0}, + {"Section64.Addr", Field, 0}, + {"Section64.Align", Field, 0}, + {"Section64.Flags", Field, 0}, + {"Section64.Name", Field, 0}, + {"Section64.Nreloc", Field, 0}, + {"Section64.Offset", Field, 0}, + {"Section64.Reloff", Field, 0}, + {"Section64.Reserve1", Field, 0}, + {"Section64.Reserve2", Field, 0}, + {"Section64.Reserve3", Field, 0}, + {"Section64.Seg", Field, 0}, + {"Section64.Size", Field, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Addr", Field, 0}, + {"SectionHeader.Align", Field, 0}, + {"SectionHeader.Flags", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.Nreloc", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.Reloff", Field, 0}, + {"SectionHeader.Seg", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"Segment", Type, 0}, + {"Segment.LoadBytes", Field, 0}, + {"Segment.ReaderAt", Field, 0}, + {"Segment.SegmentHeader", Field, 0}, + {"Segment32", Type, 0}, + {"Segment32.Addr", Field, 0}, + {"Segment32.Cmd", Field, 0}, + {"Segment32.Filesz", Field, 0}, + {"Segment32.Flag", Field, 0}, + {"Segment32.Len", Field, 0}, + {"Segment32.Maxprot", Field, 0}, + {"Segment32.Memsz", Field, 0}, + {"Segment32.Name", Field, 0}, + {"Segment32.Nsect", Field, 0}, + {"Segment32.Offset", Field, 0}, + {"Segment32.Prot", Field, 0}, + {"Segment64", Type, 0}, + {"Segment64.Addr", Field, 0}, + {"Segment64.Cmd", Field, 0}, + {"Segment64.Filesz", Field, 0}, + {"Segment64.Flag", Field, 0}, + {"Segment64.Len", Field, 0}, + {"Segment64.Maxprot", Field, 0}, + {"Segment64.Memsz", Field, 0}, + {"Segment64.Name", Field, 0}, + {"Segment64.Nsect", Field, 0}, + {"Segment64.Offset", Field, 0}, + {"Segment64.Prot", Field, 0}, + {"SegmentHeader", Type, 0}, + {"SegmentHeader.Addr", Field, 0}, + {"SegmentHeader.Cmd", Field, 0}, + {"SegmentHeader.Filesz", Field, 0}, + {"SegmentHeader.Flag", Field, 0}, + {"SegmentHeader.Len", Field, 0}, + {"SegmentHeader.Maxprot", Field, 0}, + {"SegmentHeader.Memsz", Field, 0}, + {"SegmentHeader.Name", Field, 0}, + {"SegmentHeader.Nsect", Field, 0}, + {"SegmentHeader.Offset", Field, 0}, + {"SegmentHeader.Prot", Field, 0}, + {"Symbol", Type, 0}, + {"Symbol.Desc", Field, 0}, + {"Symbol.Name", Field, 0}, + {"Symbol.Sect", Field, 0}, + {"Symbol.Type", Field, 0}, + {"Symbol.Value", Field, 0}, + {"Symtab", Type, 0}, + {"Symtab.LoadBytes", Field, 0}, + {"Symtab.Syms", Field, 0}, + {"Symtab.SymtabCmd", Field, 0}, + {"SymtabCmd", Type, 0}, + {"SymtabCmd.Cmd", Field, 0}, + {"SymtabCmd.Len", Field, 0}, + {"SymtabCmd.Nsyms", Field, 0}, + {"SymtabCmd.Stroff", Field, 0}, + {"SymtabCmd.Strsize", Field, 0}, + {"SymtabCmd.Symoff", Field, 0}, + {"Thread", Type, 0}, + {"Thread.Cmd", Field, 0}, + {"Thread.Data", Field, 0}, + {"Thread.Len", Field, 0}, + {"Thread.Type", Field, 0}, + {"Type", Type, 0}, + {"TypeBundle", Const, 3}, + {"TypeDylib", Const, 3}, + {"TypeExec", Const, 0}, + {"TypeObj", Const, 0}, + {"X86_64_RELOC_BRANCH", Const, 10}, + {"X86_64_RELOC_GOT", Const, 10}, + {"X86_64_RELOC_GOT_LOAD", Const, 10}, + {"X86_64_RELOC_SIGNED", Const, 10}, + {"X86_64_RELOC_SIGNED_1", Const, 10}, + {"X86_64_RELOC_SIGNED_2", Const, 10}, + {"X86_64_RELOC_SIGNED_4", Const, 10}, + {"X86_64_RELOC_SUBTRACTOR", Const, 10}, + {"X86_64_RELOC_TLV", Const, 10}, + {"X86_64_RELOC_UNSIGNED", Const, 10}, + }, + "debug/pe": { + {"(*COFFSymbol).FullName", Method, 8}, + {"(*File).COFFSymbolReadSectionDefAux", Method, 19}, + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(Section).ReadAt", Method, 0}, + {"(StringTable).String", Method, 8}, + {"COFFSymbol", Type, 1}, + {"COFFSymbol.Name", Field, 1}, + {"COFFSymbol.NumberOfAuxSymbols", Field, 1}, + {"COFFSymbol.SectionNumber", Field, 1}, + {"COFFSymbol.StorageClass", Field, 1}, + {"COFFSymbol.Type", Field, 1}, + {"COFFSymbol.Value", Field, 1}, + {"COFFSymbolAuxFormat5", Type, 19}, + {"COFFSymbolAuxFormat5.Checksum", Field, 19}, + {"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19}, + {"COFFSymbolAuxFormat5.NumRelocs", Field, 19}, + {"COFFSymbolAuxFormat5.SecNum", Field, 19}, + {"COFFSymbolAuxFormat5.Selection", Field, 19}, + {"COFFSymbolAuxFormat5.Size", Field, 19}, + {"COFFSymbolSize", Const, 1}, + {"DataDirectory", Type, 3}, + {"DataDirectory.Size", Field, 3}, + {"DataDirectory.VirtualAddress", Field, 3}, + {"File", Type, 0}, + {"File.COFFSymbols", Field, 8}, + {"File.FileHeader", Field, 0}, + {"File.OptionalHeader", Field, 3}, + {"File.Sections", Field, 0}, + {"File.StringTable", Field, 8}, + {"File.Symbols", Field, 1}, + {"FileHeader", Type, 0}, + {"FileHeader.Characteristics", Field, 0}, + {"FileHeader.Machine", Field, 0}, + {"FileHeader.NumberOfSections", Field, 0}, + {"FileHeader.NumberOfSymbols", Field, 0}, + {"FileHeader.PointerToSymbolTable", Field, 0}, + {"FileHeader.SizeOfOptionalHeader", Field, 0}, + {"FileHeader.TimeDateStamp", Field, 0}, + {"FormatError", Type, 0}, + {"IMAGE_COMDAT_SELECT_ANY", Const, 19}, + {"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19}, + {"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19}, + {"IMAGE_COMDAT_SELECT_LARGEST", Const, 19}, + {"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19}, + {"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19}, + {"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11}, + {"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15}, + {"IMAGE_FILE_32BIT_MACHINE", Const, 15}, + {"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15}, + {"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15}, + {"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15}, + {"IMAGE_FILE_DEBUG_STRIPPED", Const, 15}, + {"IMAGE_FILE_DLL", Const, 15}, + {"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15}, + {"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15}, + {"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15}, + {"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15}, + {"IMAGE_FILE_MACHINE_AM33", Const, 0}, + {"IMAGE_FILE_MACHINE_AMD64", Const, 0}, + {"IMAGE_FILE_MACHINE_ARM", Const, 0}, + {"IMAGE_FILE_MACHINE_ARM64", Const, 11}, + {"IMAGE_FILE_MACHINE_ARMNT", Const, 12}, + {"IMAGE_FILE_MACHINE_EBC", Const, 0}, + {"IMAGE_FILE_MACHINE_I386", Const, 0}, + {"IMAGE_FILE_MACHINE_IA64", Const, 0}, + {"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19}, + {"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19}, + {"IMAGE_FILE_MACHINE_M32R", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPS16", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0}, + {"IMAGE_FILE_MACHINE_POWERPC", Const, 0}, + {"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0}, + {"IMAGE_FILE_MACHINE_R4000", Const, 0}, + {"IMAGE_FILE_MACHINE_RISCV128", Const, 20}, + {"IMAGE_FILE_MACHINE_RISCV32", Const, 20}, + {"IMAGE_FILE_MACHINE_RISCV64", Const, 20}, + {"IMAGE_FILE_MACHINE_SH3", Const, 0}, + {"IMAGE_FILE_MACHINE_SH3DSP", Const, 0}, + {"IMAGE_FILE_MACHINE_SH4", Const, 0}, + {"IMAGE_FILE_MACHINE_SH5", Const, 0}, + {"IMAGE_FILE_MACHINE_THUMB", Const, 0}, + {"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0}, + {"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0}, + {"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15}, + {"IMAGE_FILE_RELOCS_STRIPPED", Const, 15}, + {"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15}, + {"IMAGE_FILE_SYSTEM", Const, 15}, + {"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15}, + {"IMAGE_SCN_CNT_CODE", Const, 19}, + {"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19}, + {"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19}, + {"IMAGE_SCN_LNK_COMDAT", Const, 19}, + {"IMAGE_SCN_MEM_DISCARDABLE", Const, 19}, + {"IMAGE_SCN_MEM_EXECUTE", Const, 19}, + {"IMAGE_SCN_MEM_READ", Const, 19}, + {"IMAGE_SCN_MEM_WRITE", Const, 19}, + {"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15}, + {"IMAGE_SUBSYSTEM_NATIVE", Const, 15}, + {"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15}, + {"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15}, + {"IMAGE_SUBSYSTEM_XBOX", Const, 15}, + {"ImportDirectory", Type, 0}, + {"ImportDirectory.FirstThunk", Field, 0}, + {"ImportDirectory.ForwarderChain", Field, 0}, + {"ImportDirectory.Name", Field, 0}, + {"ImportDirectory.OriginalFirstThunk", Field, 0}, + {"ImportDirectory.TimeDateStamp", Field, 0}, + {"NewFile", Func, 0}, + {"Open", Func, 0}, + {"OptionalHeader32", Type, 3}, + {"OptionalHeader32.AddressOfEntryPoint", Field, 3}, + {"OptionalHeader32.BaseOfCode", Field, 3}, + {"OptionalHeader32.BaseOfData", Field, 3}, + {"OptionalHeader32.CheckSum", Field, 3}, + {"OptionalHeader32.DataDirectory", Field, 3}, + {"OptionalHeader32.DllCharacteristics", Field, 3}, + {"OptionalHeader32.FileAlignment", Field, 3}, + {"OptionalHeader32.ImageBase", Field, 3}, + {"OptionalHeader32.LoaderFlags", Field, 3}, + {"OptionalHeader32.Magic", Field, 3}, + {"OptionalHeader32.MajorImageVersion", Field, 3}, + {"OptionalHeader32.MajorLinkerVersion", Field, 3}, + {"OptionalHeader32.MajorOperatingSystemVersion", Field, 3}, + {"OptionalHeader32.MajorSubsystemVersion", Field, 3}, + {"OptionalHeader32.MinorImageVersion", Field, 3}, + {"OptionalHeader32.MinorLinkerVersion", Field, 3}, + {"OptionalHeader32.MinorOperatingSystemVersion", Field, 3}, + {"OptionalHeader32.MinorSubsystemVersion", Field, 3}, + {"OptionalHeader32.NumberOfRvaAndSizes", Field, 3}, + {"OptionalHeader32.SectionAlignment", Field, 3}, + {"OptionalHeader32.SizeOfCode", Field, 3}, + {"OptionalHeader32.SizeOfHeaders", Field, 3}, + {"OptionalHeader32.SizeOfHeapCommit", Field, 3}, + {"OptionalHeader32.SizeOfHeapReserve", Field, 3}, + {"OptionalHeader32.SizeOfImage", Field, 3}, + {"OptionalHeader32.SizeOfInitializedData", Field, 3}, + {"OptionalHeader32.SizeOfStackCommit", Field, 3}, + {"OptionalHeader32.SizeOfStackReserve", Field, 3}, + {"OptionalHeader32.SizeOfUninitializedData", Field, 3}, + {"OptionalHeader32.Subsystem", Field, 3}, + {"OptionalHeader32.Win32VersionValue", Field, 3}, + {"OptionalHeader64", Type, 3}, + {"OptionalHeader64.AddressOfEntryPoint", Field, 3}, + {"OptionalHeader64.BaseOfCode", Field, 3}, + {"OptionalHeader64.CheckSum", Field, 3}, + {"OptionalHeader64.DataDirectory", Field, 3}, + {"OptionalHeader64.DllCharacteristics", Field, 3}, + {"OptionalHeader64.FileAlignment", Field, 3}, + {"OptionalHeader64.ImageBase", Field, 3}, + {"OptionalHeader64.LoaderFlags", Field, 3}, + {"OptionalHeader64.Magic", Field, 3}, + {"OptionalHeader64.MajorImageVersion", Field, 3}, + {"OptionalHeader64.MajorLinkerVersion", Field, 3}, + {"OptionalHeader64.MajorOperatingSystemVersion", Field, 3}, + {"OptionalHeader64.MajorSubsystemVersion", Field, 3}, + {"OptionalHeader64.MinorImageVersion", Field, 3}, + {"OptionalHeader64.MinorLinkerVersion", Field, 3}, + {"OptionalHeader64.MinorOperatingSystemVersion", Field, 3}, + {"OptionalHeader64.MinorSubsystemVersion", Field, 3}, + {"OptionalHeader64.NumberOfRvaAndSizes", Field, 3}, + {"OptionalHeader64.SectionAlignment", Field, 3}, + {"OptionalHeader64.SizeOfCode", Field, 3}, + {"OptionalHeader64.SizeOfHeaders", Field, 3}, + {"OptionalHeader64.SizeOfHeapCommit", Field, 3}, + {"OptionalHeader64.SizeOfHeapReserve", Field, 3}, + {"OptionalHeader64.SizeOfImage", Field, 3}, + {"OptionalHeader64.SizeOfInitializedData", Field, 3}, + {"OptionalHeader64.SizeOfStackCommit", Field, 3}, + {"OptionalHeader64.SizeOfStackReserve", Field, 3}, + {"OptionalHeader64.SizeOfUninitializedData", Field, 3}, + {"OptionalHeader64.Subsystem", Field, 3}, + {"OptionalHeader64.Win32VersionValue", Field, 3}, + {"Reloc", Type, 8}, + {"Reloc.SymbolTableIndex", Field, 8}, + {"Reloc.Type", Field, 8}, + {"Reloc.VirtualAddress", Field, 8}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.Relocs", Field, 8}, + {"Section.SectionHeader", Field, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Characteristics", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.NumberOfLineNumbers", Field, 0}, + {"SectionHeader.NumberOfRelocations", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.PointerToLineNumbers", Field, 0}, + {"SectionHeader.PointerToRelocations", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"SectionHeader.VirtualAddress", Field, 0}, + {"SectionHeader.VirtualSize", Field, 0}, + {"SectionHeader32", Type, 0}, + {"SectionHeader32.Characteristics", Field, 0}, + {"SectionHeader32.Name", Field, 0}, + {"SectionHeader32.NumberOfLineNumbers", Field, 0}, + {"SectionHeader32.NumberOfRelocations", Field, 0}, + {"SectionHeader32.PointerToLineNumbers", Field, 0}, + {"SectionHeader32.PointerToRawData", Field, 0}, + {"SectionHeader32.PointerToRelocations", Field, 0}, + {"SectionHeader32.SizeOfRawData", Field, 0}, + {"SectionHeader32.VirtualAddress", Field, 0}, + {"SectionHeader32.VirtualSize", Field, 0}, + {"StringTable", Type, 8}, + {"Symbol", Type, 1}, + {"Symbol.Name", Field, 1}, + {"Symbol.SectionNumber", Field, 1}, + {"Symbol.StorageClass", Field, 1}, + {"Symbol.Type", Field, 1}, + {"Symbol.Value", Field, 1}, + }, + "debug/plan9obj": { + {"(*File).Close", Method, 3}, + {"(*File).Section", Method, 3}, + {"(*File).Symbols", Method, 3}, + {"(*Section).Data", Method, 3}, + {"(*Section).Open", Method, 3}, + {"(Section).ReadAt", Method, 3}, + {"ErrNoSymbols", Var, 18}, + {"File", Type, 3}, + {"File.FileHeader", Field, 3}, + {"File.Sections", Field, 3}, + {"FileHeader", Type, 3}, + {"FileHeader.Bss", Field, 3}, + {"FileHeader.Entry", Field, 3}, + {"FileHeader.HdrSize", Field, 4}, + {"FileHeader.LoadAddress", Field, 4}, + {"FileHeader.Magic", Field, 3}, + {"FileHeader.PtrSize", Field, 3}, + {"Magic386", Const, 3}, + {"Magic64", Const, 3}, + {"MagicAMD64", Const, 3}, + {"MagicARM", Const, 3}, + {"NewFile", Func, 3}, + {"Open", Func, 3}, + {"Section", Type, 3}, + {"Section.ReaderAt", Field, 3}, + {"Section.SectionHeader", Field, 3}, + {"SectionHeader", Type, 3}, + {"SectionHeader.Name", Field, 3}, + {"SectionHeader.Offset", Field, 3}, + {"SectionHeader.Size", Field, 3}, + {"Sym", Type, 3}, + {"Sym.Name", Field, 3}, + {"Sym.Type", Field, 3}, + {"Sym.Value", Field, 3}, + }, + "embed": { + {"(FS).Open", Method, 16}, + {"(FS).ReadDir", Method, 16}, + {"(FS).ReadFile", Method, 16}, + {"FS", Type, 16}, + }, + "encoding": { + {"BinaryMarshaler", Type, 2}, + {"BinaryUnmarshaler", Type, 2}, + {"TextMarshaler", Type, 2}, + {"TextUnmarshaler", Type, 2}, + }, + "encoding/ascii85": { + {"(CorruptInputError).Error", Method, 0}, + {"CorruptInputError", Type, 0}, + {"Decode", Func, 0}, + {"Encode", Func, 0}, + {"MaxEncodedLen", Func, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + }, + "encoding/asn1": { + {"(BitString).At", Method, 0}, + {"(BitString).RightAlign", Method, 0}, + {"(ObjectIdentifier).Equal", Method, 0}, + {"(ObjectIdentifier).String", Method, 3}, + {"(StructuralError).Error", Method, 0}, + {"(SyntaxError).Error", Method, 0}, + {"BitString", Type, 0}, + {"BitString.BitLength", Field, 0}, + {"BitString.Bytes", Field, 0}, + {"ClassApplication", Const, 6}, + {"ClassContextSpecific", Const, 6}, + {"ClassPrivate", Const, 6}, + {"ClassUniversal", Const, 6}, + {"Enumerated", Type, 0}, + {"Flag", Type, 0}, + {"Marshal", Func, 0}, + {"MarshalWithParams", Func, 10}, + {"NullBytes", Var, 9}, + {"NullRawValue", Var, 9}, + {"ObjectIdentifier", Type, 0}, + {"RawContent", Type, 0}, + {"RawValue", Type, 0}, + {"RawValue.Bytes", Field, 0}, + {"RawValue.Class", Field, 0}, + {"RawValue.FullBytes", Field, 0}, + {"RawValue.IsCompound", Field, 0}, + {"RawValue.Tag", Field, 0}, + {"StructuralError", Type, 0}, + {"StructuralError.Msg", Field, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Msg", Field, 0}, + {"TagBMPString", Const, 14}, + {"TagBitString", Const, 6}, + {"TagBoolean", Const, 6}, + {"TagEnum", Const, 6}, + {"TagGeneralString", Const, 6}, + {"TagGeneralizedTime", Const, 6}, + {"TagIA5String", Const, 6}, + {"TagInteger", Const, 6}, + {"TagNull", Const, 9}, + {"TagNumericString", Const, 10}, + {"TagOID", Const, 6}, + {"TagOctetString", Const, 6}, + {"TagPrintableString", Const, 6}, + {"TagSequence", Const, 6}, + {"TagSet", Const, 6}, + {"TagT61String", Const, 6}, + {"TagUTCTime", Const, 6}, + {"TagUTF8String", Const, 6}, + {"Unmarshal", Func, 0}, + {"UnmarshalWithParams", Func, 0}, + }, + "encoding/base32": { + {"(*Encoding).AppendDecode", Method, 22}, + {"(*Encoding).AppendEncode", Method, 22}, + {"(*Encoding).Decode", Method, 0}, + {"(*Encoding).DecodeString", Method, 0}, + {"(*Encoding).DecodedLen", Method, 0}, + {"(*Encoding).Encode", Method, 0}, + {"(*Encoding).EncodeToString", Method, 0}, + {"(*Encoding).EncodedLen", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(Encoding).WithPadding", Method, 9}, + {"CorruptInputError", Type, 0}, + {"Encoding", Type, 0}, + {"HexEncoding", Var, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewEncoding", Func, 0}, + {"NoPadding", Const, 9}, + {"StdEncoding", Var, 0}, + {"StdPadding", Const, 9}, + }, + "encoding/base64": { + {"(*Encoding).AppendDecode", Method, 22}, + {"(*Encoding).AppendEncode", Method, 22}, + {"(*Encoding).Decode", Method, 0}, + {"(*Encoding).DecodeString", Method, 0}, + {"(*Encoding).DecodedLen", Method, 0}, + {"(*Encoding).Encode", Method, 0}, + {"(*Encoding).EncodeToString", Method, 0}, + {"(*Encoding).EncodedLen", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(Encoding).Strict", Method, 8}, + {"(Encoding).WithPadding", Method, 5}, + {"CorruptInputError", Type, 0}, + {"Encoding", Type, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewEncoding", Func, 0}, + {"NoPadding", Const, 5}, + {"RawStdEncoding", Var, 5}, + {"RawURLEncoding", Var, 5}, + {"StdEncoding", Var, 0}, + {"StdPadding", Const, 5}, + {"URLEncoding", Var, 0}, + }, + "encoding/binary": { + {"AppendByteOrder", Type, 19}, + {"AppendUvarint", Func, 19}, + {"AppendVarint", Func, 19}, + {"BigEndian", Var, 0}, + {"ByteOrder", Type, 0}, + {"LittleEndian", Var, 0}, + {"MaxVarintLen16", Const, 0}, + {"MaxVarintLen32", Const, 0}, + {"MaxVarintLen64", Const, 0}, + {"NativeEndian", Var, 21}, + {"PutUvarint", Func, 0}, + {"PutVarint", Func, 0}, + {"Read", Func, 0}, + {"ReadUvarint", Func, 0}, + {"ReadVarint", Func, 0}, + {"Size", Func, 0}, + {"Uvarint", Func, 0}, + {"Varint", Func, 0}, + {"Write", Func, 0}, + }, + "encoding/csv": { + {"(*ParseError).Error", Method, 0}, + {"(*ParseError).Unwrap", Method, 13}, + {"(*Reader).FieldPos", Method, 17}, + {"(*Reader).InputOffset", Method, 19}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAll", Method, 0}, + {"(*Writer).Error", Method, 1}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteAll", Method, 0}, + {"ErrBareQuote", Var, 0}, + {"ErrFieldCount", Var, 0}, + {"ErrQuote", Var, 0}, + {"ErrTrailingComma", Var, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Column", Field, 0}, + {"ParseError.Err", Field, 0}, + {"ParseError.Line", Field, 0}, + {"ParseError.StartLine", Field, 10}, + {"Reader", Type, 0}, + {"Reader.Comma", Field, 0}, + {"Reader.Comment", Field, 0}, + {"Reader.FieldsPerRecord", Field, 0}, + {"Reader.LazyQuotes", Field, 0}, + {"Reader.ReuseRecord", Field, 9}, + {"Reader.TrailingComma", Field, 0}, + {"Reader.TrimLeadingSpace", Field, 0}, + {"Writer", Type, 0}, + {"Writer.Comma", Field, 0}, + {"Writer.UseCRLF", Field, 0}, + }, + "encoding/gob": { + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DecodeValue", Method, 0}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).EncodeValue", Method, 0}, + {"CommonType", Type, 0}, + {"CommonType.Id", Field, 0}, + {"CommonType.Name", Field, 0}, + {"Decoder", Type, 0}, + {"Encoder", Type, 0}, + {"GobDecoder", Type, 0}, + {"GobEncoder", Type, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"Register", Func, 0}, + {"RegisterName", Func, 0}, + }, + "encoding/hex": { + {"(InvalidByteError).Error", Method, 0}, + {"AppendDecode", Func, 22}, + {"AppendEncode", Func, 22}, + {"Decode", Func, 0}, + {"DecodeString", Func, 0}, + {"DecodedLen", Func, 0}, + {"Dump", Func, 0}, + {"Dumper", Func, 0}, + {"Encode", Func, 0}, + {"EncodeToString", Func, 0}, + {"EncodedLen", Func, 0}, + {"ErrLength", Var, 0}, + {"InvalidByteError", Type, 0}, + {"NewDecoder", Func, 10}, + {"NewEncoder", Func, 10}, + }, + "encoding/json": { + {"(*Decoder).Buffered", Method, 1}, + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DisallowUnknownFields", Method, 10}, + {"(*Decoder).InputOffset", Method, 14}, + {"(*Decoder).More", Method, 5}, + {"(*Decoder).Token", Method, 5}, + {"(*Decoder).UseNumber", Method, 1}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).SetEscapeHTML", Method, 7}, + {"(*Encoder).SetIndent", Method, 7}, + {"(*InvalidUTF8Error).Error", Method, 0}, + {"(*InvalidUnmarshalError).Error", Method, 0}, + {"(*MarshalerError).Error", Method, 0}, + {"(*MarshalerError).Unwrap", Method, 13}, + {"(*RawMessage).MarshalJSON", Method, 0}, + {"(*RawMessage).UnmarshalJSON", Method, 0}, + {"(*SyntaxError).Error", Method, 0}, + {"(*UnmarshalFieldError).Error", Method, 0}, + {"(*UnmarshalTypeError).Error", Method, 0}, + {"(*UnsupportedTypeError).Error", Method, 0}, + {"(*UnsupportedValueError).Error", Method, 0}, + {"(Delim).String", Method, 5}, + {"(Number).Float64", Method, 1}, + {"(Number).Int64", Method, 1}, + {"(Number).String", Method, 1}, + {"(RawMessage).MarshalJSON", Method, 8}, + {"Compact", Func, 0}, + {"Decoder", Type, 0}, + {"Delim", Type, 5}, + {"Encoder", Type, 0}, + {"HTMLEscape", Func, 0}, + {"Indent", Func, 0}, + {"InvalidUTF8Error", Type, 0}, + {"InvalidUTF8Error.S", Field, 0}, + {"InvalidUnmarshalError", Type, 0}, + {"InvalidUnmarshalError.Type", Field, 0}, + {"Marshal", Func, 0}, + {"MarshalIndent", Func, 0}, + {"Marshaler", Type, 0}, + {"MarshalerError", Type, 0}, + {"MarshalerError.Err", Field, 0}, + {"MarshalerError.Type", Field, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"Number", Type, 1}, + {"RawMessage", Type, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Offset", Field, 0}, + {"Token", Type, 5}, + {"Unmarshal", Func, 0}, + {"UnmarshalFieldError", Type, 0}, + {"UnmarshalFieldError.Field", Field, 0}, + {"UnmarshalFieldError.Key", Field, 0}, + {"UnmarshalFieldError.Type", Field, 0}, + {"UnmarshalTypeError", Type, 0}, + {"UnmarshalTypeError.Field", Field, 8}, + {"UnmarshalTypeError.Offset", Field, 5}, + {"UnmarshalTypeError.Struct", Field, 8}, + {"UnmarshalTypeError.Type", Field, 0}, + {"UnmarshalTypeError.Value", Field, 0}, + {"Unmarshaler", Type, 0}, + {"UnsupportedTypeError", Type, 0}, + {"UnsupportedTypeError.Type", Field, 0}, + {"UnsupportedValueError", Type, 0}, + {"UnsupportedValueError.Str", Field, 0}, + {"UnsupportedValueError.Value", Field, 0}, + {"Valid", Func, 9}, + }, + "encoding/pem": { + {"Block", Type, 0}, + {"Block.Bytes", Field, 0}, + {"Block.Headers", Field, 0}, + {"Block.Type", Field, 0}, + {"Decode", Func, 0}, + {"Encode", Func, 0}, + {"EncodeToMemory", Func, 0}, + }, + "encoding/xml": { + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DecodeElement", Method, 0}, + {"(*Decoder).InputOffset", Method, 4}, + {"(*Decoder).InputPos", Method, 19}, + {"(*Decoder).RawToken", Method, 0}, + {"(*Decoder).Skip", Method, 0}, + {"(*Decoder).Token", Method, 0}, + {"(*Encoder).Close", Method, 20}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).EncodeElement", Method, 2}, + {"(*Encoder).EncodeToken", Method, 2}, + {"(*Encoder).Flush", Method, 2}, + {"(*Encoder).Indent", Method, 1}, + {"(*SyntaxError).Error", Method, 0}, + {"(*TagPathError).Error", Method, 0}, + {"(*UnsupportedTypeError).Error", Method, 0}, + {"(CharData).Copy", Method, 0}, + {"(Comment).Copy", Method, 0}, + {"(Directive).Copy", Method, 0}, + {"(ProcInst).Copy", Method, 0}, + {"(StartElement).Copy", Method, 0}, + {"(StartElement).End", Method, 2}, + {"(UnmarshalError).Error", Method, 0}, + {"Attr", Type, 0}, + {"Attr.Name", Field, 0}, + {"Attr.Value", Field, 0}, + {"CharData", Type, 0}, + {"Comment", Type, 0}, + {"CopyToken", Func, 0}, + {"Decoder", Type, 0}, + {"Decoder.AutoClose", Field, 0}, + {"Decoder.CharsetReader", Field, 0}, + {"Decoder.DefaultSpace", Field, 1}, + {"Decoder.Entity", Field, 0}, + {"Decoder.Strict", Field, 0}, + {"Directive", Type, 0}, + {"Encoder", Type, 0}, + {"EndElement", Type, 0}, + {"EndElement.Name", Field, 0}, + {"Escape", Func, 0}, + {"EscapeText", Func, 1}, + {"HTMLAutoClose", Var, 0}, + {"HTMLEntity", Var, 0}, + {"Header", Const, 0}, + {"Marshal", Func, 0}, + {"MarshalIndent", Func, 0}, + {"Marshaler", Type, 2}, + {"MarshalerAttr", Type, 2}, + {"Name", Type, 0}, + {"Name.Local", Field, 0}, + {"Name.Space", Field, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewTokenDecoder", Func, 10}, + {"ProcInst", Type, 0}, + {"ProcInst.Inst", Field, 0}, + {"ProcInst.Target", Field, 0}, + {"StartElement", Type, 0}, + {"StartElement.Attr", Field, 0}, + {"StartElement.Name", Field, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Line", Field, 0}, + {"SyntaxError.Msg", Field, 0}, + {"TagPathError", Type, 0}, + {"TagPathError.Field1", Field, 0}, + {"TagPathError.Field2", Field, 0}, + {"TagPathError.Struct", Field, 0}, + {"TagPathError.Tag1", Field, 0}, + {"TagPathError.Tag2", Field, 0}, + {"Token", Type, 0}, + {"TokenReader", Type, 10}, + {"Unmarshal", Func, 0}, + {"UnmarshalError", Type, 0}, + {"Unmarshaler", Type, 2}, + {"UnmarshalerAttr", Type, 2}, + {"UnsupportedTypeError", Type, 0}, + {"UnsupportedTypeError.Type", Field, 0}, + }, + "errors": { + {"As", Func, 13}, + {"ErrUnsupported", Var, 21}, + {"Is", Func, 13}, + {"Join", Func, 20}, + {"New", Func, 0}, + {"Unwrap", Func, 13}, + }, + "expvar": { + {"(*Float).Add", Method, 0}, + {"(*Float).Set", Method, 0}, + {"(*Float).String", Method, 0}, + {"(*Float).Value", Method, 8}, + {"(*Int).Add", Method, 0}, + {"(*Int).Set", Method, 0}, + {"(*Int).String", Method, 0}, + {"(*Int).Value", Method, 8}, + {"(*Map).Add", Method, 0}, + {"(*Map).AddFloat", Method, 0}, + {"(*Map).Delete", Method, 12}, + {"(*Map).Do", Method, 0}, + {"(*Map).Get", Method, 0}, + {"(*Map).Init", Method, 0}, + {"(*Map).Set", Method, 0}, + {"(*Map).String", Method, 0}, + {"(*String).Set", Method, 0}, + {"(*String).String", Method, 0}, + {"(*String).Value", Method, 8}, + {"(Func).String", Method, 0}, + {"(Func).Value", Method, 8}, + {"Do", Func, 0}, + {"Float", Type, 0}, + {"Func", Type, 0}, + {"Get", Func, 0}, + {"Handler", Func, 8}, + {"Int", Type, 0}, + {"KeyValue", Type, 0}, + {"KeyValue.Key", Field, 0}, + {"KeyValue.Value", Field, 0}, + {"Map", Type, 0}, + {"NewFloat", Func, 0}, + {"NewInt", Func, 0}, + {"NewMap", Func, 0}, + {"NewString", Func, 0}, + {"Publish", Func, 0}, + {"String", Type, 0}, + {"Var", Type, 0}, + }, + "flag": { + {"(*FlagSet).Arg", Method, 0}, + {"(*FlagSet).Args", Method, 0}, + {"(*FlagSet).Bool", Method, 0}, + {"(*FlagSet).BoolFunc", Method, 21}, + {"(*FlagSet).BoolVar", Method, 0}, + {"(*FlagSet).Duration", Method, 0}, + {"(*FlagSet).DurationVar", Method, 0}, + {"(*FlagSet).ErrorHandling", Method, 10}, + {"(*FlagSet).Float64", Method, 0}, + {"(*FlagSet).Float64Var", Method, 0}, + {"(*FlagSet).Func", Method, 16}, + {"(*FlagSet).Init", Method, 0}, + {"(*FlagSet).Int", Method, 0}, + {"(*FlagSet).Int64", Method, 0}, + {"(*FlagSet).Int64Var", Method, 0}, + {"(*FlagSet).IntVar", Method, 0}, + {"(*FlagSet).Lookup", Method, 0}, + {"(*FlagSet).NArg", Method, 0}, + {"(*FlagSet).NFlag", Method, 0}, + {"(*FlagSet).Name", Method, 10}, + {"(*FlagSet).Output", Method, 10}, + {"(*FlagSet).Parse", Method, 0}, + {"(*FlagSet).Parsed", Method, 0}, + {"(*FlagSet).PrintDefaults", Method, 0}, + {"(*FlagSet).Set", Method, 0}, + {"(*FlagSet).SetOutput", Method, 0}, + {"(*FlagSet).String", Method, 0}, + {"(*FlagSet).StringVar", Method, 0}, + {"(*FlagSet).TextVar", Method, 19}, + {"(*FlagSet).Uint", Method, 0}, + {"(*FlagSet).Uint64", Method, 0}, + {"(*FlagSet).Uint64Var", Method, 0}, + {"(*FlagSet).UintVar", Method, 0}, + {"(*FlagSet).Var", Method, 0}, + {"(*FlagSet).Visit", Method, 0}, + {"(*FlagSet).VisitAll", Method, 0}, + {"Arg", Func, 0}, + {"Args", Func, 0}, + {"Bool", Func, 0}, + {"BoolFunc", Func, 21}, + {"BoolVar", Func, 0}, + {"CommandLine", Var, 2}, + {"ContinueOnError", Const, 0}, + {"Duration", Func, 0}, + {"DurationVar", Func, 0}, + {"ErrHelp", Var, 0}, + {"ErrorHandling", Type, 0}, + {"ExitOnError", Const, 0}, + {"Flag", Type, 0}, + {"Flag.DefValue", Field, 0}, + {"Flag.Name", Field, 0}, + {"Flag.Usage", Field, 0}, + {"Flag.Value", Field, 0}, + {"FlagSet", Type, 0}, + {"FlagSet.Usage", Field, 0}, + {"Float64", Func, 0}, + {"Float64Var", Func, 0}, + {"Func", Func, 16}, + {"Getter", Type, 2}, + {"Int", Func, 0}, + {"Int64", Func, 0}, + {"Int64Var", Func, 0}, + {"IntVar", Func, 0}, + {"Lookup", Func, 0}, + {"NArg", Func, 0}, + {"NFlag", Func, 0}, + {"NewFlagSet", Func, 0}, + {"PanicOnError", Const, 0}, + {"Parse", Func, 0}, + {"Parsed", Func, 0}, + {"PrintDefaults", Func, 0}, + {"Set", Func, 0}, + {"String", Func, 0}, + {"StringVar", Func, 0}, + {"TextVar", Func, 19}, + {"Uint", Func, 0}, + {"Uint64", Func, 0}, + {"Uint64Var", Func, 0}, + {"UintVar", Func, 0}, + {"UnquoteUsage", Func, 5}, + {"Usage", Var, 0}, + {"Value", Type, 0}, + {"Var", Func, 0}, + {"Visit", Func, 0}, + {"VisitAll", Func, 0}, + }, + "fmt": { + {"Append", Func, 19}, + {"Appendf", Func, 19}, + {"Appendln", Func, 19}, + {"Errorf", Func, 0}, + {"FormatString", Func, 20}, + {"Formatter", Type, 0}, + {"Fprint", Func, 0}, + {"Fprintf", Func, 0}, + {"Fprintln", Func, 0}, + {"Fscan", Func, 0}, + {"Fscanf", Func, 0}, + {"Fscanln", Func, 0}, + {"GoStringer", Type, 0}, + {"Print", Func, 0}, + {"Printf", Func, 0}, + {"Println", Func, 0}, + {"Scan", Func, 0}, + {"ScanState", Type, 0}, + {"Scanf", Func, 0}, + {"Scanln", Func, 0}, + {"Scanner", Type, 0}, + {"Sprint", Func, 0}, + {"Sprintf", Func, 0}, + {"Sprintln", Func, 0}, + {"Sscan", Func, 0}, + {"Sscanf", Func, 0}, + {"Sscanln", Func, 0}, + {"State", Type, 0}, + {"Stringer", Type, 0}, + }, + "go/ast": { + {"(*ArrayType).End", Method, 0}, + {"(*ArrayType).Pos", Method, 0}, + {"(*AssignStmt).End", Method, 0}, + {"(*AssignStmt).Pos", Method, 0}, + {"(*BadDecl).End", Method, 0}, + {"(*BadDecl).Pos", Method, 0}, + {"(*BadExpr).End", Method, 0}, + {"(*BadExpr).Pos", Method, 0}, + {"(*BadStmt).End", Method, 0}, + {"(*BadStmt).Pos", Method, 0}, + {"(*BasicLit).End", Method, 0}, + {"(*BasicLit).Pos", Method, 0}, + {"(*BinaryExpr).End", Method, 0}, + {"(*BinaryExpr).Pos", Method, 0}, + {"(*BlockStmt).End", Method, 0}, + {"(*BlockStmt).Pos", Method, 0}, + {"(*BranchStmt).End", Method, 0}, + {"(*BranchStmt).Pos", Method, 0}, + {"(*CallExpr).End", Method, 0}, + {"(*CallExpr).Pos", Method, 0}, + {"(*CaseClause).End", Method, 0}, + {"(*CaseClause).Pos", Method, 0}, + {"(*ChanType).End", Method, 0}, + {"(*ChanType).Pos", Method, 0}, + {"(*CommClause).End", Method, 0}, + {"(*CommClause).Pos", Method, 0}, + {"(*Comment).End", Method, 0}, + {"(*Comment).Pos", Method, 0}, + {"(*CommentGroup).End", Method, 0}, + {"(*CommentGroup).Pos", Method, 0}, + {"(*CommentGroup).Text", Method, 0}, + {"(*CompositeLit).End", Method, 0}, + {"(*CompositeLit).Pos", Method, 0}, + {"(*DeclStmt).End", Method, 0}, + {"(*DeclStmt).Pos", Method, 0}, + {"(*DeferStmt).End", Method, 0}, + {"(*DeferStmt).Pos", Method, 0}, + {"(*Ellipsis).End", Method, 0}, + {"(*Ellipsis).Pos", Method, 0}, + {"(*EmptyStmt).End", Method, 0}, + {"(*EmptyStmt).Pos", Method, 0}, + {"(*ExprStmt).End", Method, 0}, + {"(*ExprStmt).Pos", Method, 0}, + {"(*Field).End", Method, 0}, + {"(*Field).Pos", Method, 0}, + {"(*FieldList).End", Method, 0}, + {"(*FieldList).NumFields", Method, 0}, + {"(*FieldList).Pos", Method, 0}, + {"(*File).End", Method, 0}, + {"(*File).Pos", Method, 0}, + {"(*ForStmt).End", Method, 0}, + {"(*ForStmt).Pos", Method, 0}, + {"(*FuncDecl).End", Method, 0}, + {"(*FuncDecl).Pos", Method, 0}, + {"(*FuncLit).End", Method, 0}, + {"(*FuncLit).Pos", Method, 0}, + {"(*FuncType).End", Method, 0}, + {"(*FuncType).Pos", Method, 0}, + {"(*GenDecl).End", Method, 0}, + {"(*GenDecl).Pos", Method, 0}, + {"(*GoStmt).End", Method, 0}, + {"(*GoStmt).Pos", Method, 0}, + {"(*Ident).End", Method, 0}, + {"(*Ident).IsExported", Method, 0}, + {"(*Ident).Pos", Method, 0}, + {"(*Ident).String", Method, 0}, + {"(*IfStmt).End", Method, 0}, + {"(*IfStmt).Pos", Method, 0}, + {"(*ImportSpec).End", Method, 0}, + {"(*ImportSpec).Pos", Method, 0}, + {"(*IncDecStmt).End", Method, 0}, + {"(*IncDecStmt).Pos", Method, 0}, + {"(*IndexExpr).End", Method, 0}, + {"(*IndexExpr).Pos", Method, 0}, + {"(*IndexListExpr).End", Method, 18}, + {"(*IndexListExpr).Pos", Method, 18}, + {"(*InterfaceType).End", Method, 0}, + {"(*InterfaceType).Pos", Method, 0}, + {"(*KeyValueExpr).End", Method, 0}, + {"(*KeyValueExpr).Pos", Method, 0}, + {"(*LabeledStmt).End", Method, 0}, + {"(*LabeledStmt).Pos", Method, 0}, + {"(*MapType).End", Method, 0}, + {"(*MapType).Pos", Method, 0}, + {"(*Object).Pos", Method, 0}, + {"(*Package).End", Method, 0}, + {"(*Package).Pos", Method, 0}, + {"(*ParenExpr).End", Method, 0}, + {"(*ParenExpr).Pos", Method, 0}, + {"(*RangeStmt).End", Method, 0}, + {"(*RangeStmt).Pos", Method, 0}, + {"(*ReturnStmt).End", Method, 0}, + {"(*ReturnStmt).Pos", Method, 0}, + {"(*Scope).Insert", Method, 0}, + {"(*Scope).Lookup", Method, 0}, + {"(*Scope).String", Method, 0}, + {"(*SelectStmt).End", Method, 0}, + {"(*SelectStmt).Pos", Method, 0}, + {"(*SelectorExpr).End", Method, 0}, + {"(*SelectorExpr).Pos", Method, 0}, + {"(*SendStmt).End", Method, 0}, + {"(*SendStmt).Pos", Method, 0}, + {"(*SliceExpr).End", Method, 0}, + {"(*SliceExpr).Pos", Method, 0}, + {"(*StarExpr).End", Method, 0}, + {"(*StarExpr).Pos", Method, 0}, + {"(*StructType).End", Method, 0}, + {"(*StructType).Pos", Method, 0}, + {"(*SwitchStmt).End", Method, 0}, + {"(*SwitchStmt).Pos", Method, 0}, + {"(*TypeAssertExpr).End", Method, 0}, + {"(*TypeAssertExpr).Pos", Method, 0}, + {"(*TypeSpec).End", Method, 0}, + {"(*TypeSpec).Pos", Method, 0}, + {"(*TypeSwitchStmt).End", Method, 0}, + {"(*TypeSwitchStmt).Pos", Method, 0}, + {"(*UnaryExpr).End", Method, 0}, + {"(*UnaryExpr).Pos", Method, 0}, + {"(*ValueSpec).End", Method, 0}, + {"(*ValueSpec).Pos", Method, 0}, + {"(CommentMap).Comments", Method, 1}, + {"(CommentMap).Filter", Method, 1}, + {"(CommentMap).String", Method, 1}, + {"(CommentMap).Update", Method, 1}, + {"(ObjKind).String", Method, 0}, + {"ArrayType", Type, 0}, + {"ArrayType.Elt", Field, 0}, + {"ArrayType.Lbrack", Field, 0}, + {"ArrayType.Len", Field, 0}, + {"AssignStmt", Type, 0}, + {"AssignStmt.Lhs", Field, 0}, + {"AssignStmt.Rhs", Field, 0}, + {"AssignStmt.Tok", Field, 0}, + {"AssignStmt.TokPos", Field, 0}, + {"Bad", Const, 0}, + {"BadDecl", Type, 0}, + {"BadDecl.From", Field, 0}, + {"BadDecl.To", Field, 0}, + {"BadExpr", Type, 0}, + {"BadExpr.From", Field, 0}, + {"BadExpr.To", Field, 0}, + {"BadStmt", Type, 0}, + {"BadStmt.From", Field, 0}, + {"BadStmt.To", Field, 0}, + {"BasicLit", Type, 0}, + {"BasicLit.Kind", Field, 0}, + {"BasicLit.Value", Field, 0}, + {"BasicLit.ValuePos", Field, 0}, + {"BinaryExpr", Type, 0}, + {"BinaryExpr.Op", Field, 0}, + {"BinaryExpr.OpPos", Field, 0}, + {"BinaryExpr.X", Field, 0}, + {"BinaryExpr.Y", Field, 0}, + {"BlockStmt", Type, 0}, + {"BlockStmt.Lbrace", Field, 0}, + {"BlockStmt.List", Field, 0}, + {"BlockStmt.Rbrace", Field, 0}, + {"BranchStmt", Type, 0}, + {"BranchStmt.Label", Field, 0}, + {"BranchStmt.Tok", Field, 0}, + {"BranchStmt.TokPos", Field, 0}, + {"CallExpr", Type, 0}, + {"CallExpr.Args", Field, 0}, + {"CallExpr.Ellipsis", Field, 0}, + {"CallExpr.Fun", Field, 0}, + {"CallExpr.Lparen", Field, 0}, + {"CallExpr.Rparen", Field, 0}, + {"CaseClause", Type, 0}, + {"CaseClause.Body", Field, 0}, + {"CaseClause.Case", Field, 0}, + {"CaseClause.Colon", Field, 0}, + {"CaseClause.List", Field, 0}, + {"ChanDir", Type, 0}, + {"ChanType", Type, 0}, + {"ChanType.Arrow", Field, 1}, + {"ChanType.Begin", Field, 0}, + {"ChanType.Dir", Field, 0}, + {"ChanType.Value", Field, 0}, + {"CommClause", Type, 0}, + {"CommClause.Body", Field, 0}, + {"CommClause.Case", Field, 0}, + {"CommClause.Colon", Field, 0}, + {"CommClause.Comm", Field, 0}, + {"Comment", Type, 0}, + {"Comment.Slash", Field, 0}, + {"Comment.Text", Field, 0}, + {"CommentGroup", Type, 0}, + {"CommentGroup.List", Field, 0}, + {"CommentMap", Type, 1}, + {"CompositeLit", Type, 0}, + {"CompositeLit.Elts", Field, 0}, + {"CompositeLit.Incomplete", Field, 11}, + {"CompositeLit.Lbrace", Field, 0}, + {"CompositeLit.Rbrace", Field, 0}, + {"CompositeLit.Type", Field, 0}, + {"Con", Const, 0}, + {"Decl", Type, 0}, + {"DeclStmt", Type, 0}, + {"DeclStmt.Decl", Field, 0}, + {"DeferStmt", Type, 0}, + {"DeferStmt.Call", Field, 0}, + {"DeferStmt.Defer", Field, 0}, + {"Ellipsis", Type, 0}, + {"Ellipsis.Ellipsis", Field, 0}, + {"Ellipsis.Elt", Field, 0}, + {"EmptyStmt", Type, 0}, + {"EmptyStmt.Implicit", Field, 5}, + {"EmptyStmt.Semicolon", Field, 0}, + {"Expr", Type, 0}, + {"ExprStmt", Type, 0}, + {"ExprStmt.X", Field, 0}, + {"Field", Type, 0}, + {"Field.Comment", Field, 0}, + {"Field.Doc", Field, 0}, + {"Field.Names", Field, 0}, + {"Field.Tag", Field, 0}, + {"Field.Type", Field, 0}, + {"FieldFilter", Type, 0}, + {"FieldList", Type, 0}, + {"FieldList.Closing", Field, 0}, + {"FieldList.List", Field, 0}, + {"FieldList.Opening", Field, 0}, + {"File", Type, 0}, + {"File.Comments", Field, 0}, + {"File.Decls", Field, 0}, + {"File.Doc", Field, 0}, + {"File.FileEnd", Field, 20}, + {"File.FileStart", Field, 20}, + {"File.GoVersion", Field, 21}, + {"File.Imports", Field, 0}, + {"File.Name", Field, 0}, + {"File.Package", Field, 0}, + {"File.Scope", Field, 0}, + {"File.Unresolved", Field, 0}, + {"FileExports", Func, 0}, + {"Filter", Type, 0}, + {"FilterDecl", Func, 0}, + {"FilterFile", Func, 0}, + {"FilterFuncDuplicates", Const, 0}, + {"FilterImportDuplicates", Const, 0}, + {"FilterPackage", Func, 0}, + {"FilterUnassociatedComments", Const, 0}, + {"ForStmt", Type, 0}, + {"ForStmt.Body", Field, 0}, + {"ForStmt.Cond", Field, 0}, + {"ForStmt.For", Field, 0}, + {"ForStmt.Init", Field, 0}, + {"ForStmt.Post", Field, 0}, + {"Fprint", Func, 0}, + {"Fun", Const, 0}, + {"FuncDecl", Type, 0}, + {"FuncDecl.Body", Field, 0}, + {"FuncDecl.Doc", Field, 0}, + {"FuncDecl.Name", Field, 0}, + {"FuncDecl.Recv", Field, 0}, + {"FuncDecl.Type", Field, 0}, + {"FuncLit", Type, 0}, + {"FuncLit.Body", Field, 0}, + {"FuncLit.Type", Field, 0}, + {"FuncType", Type, 0}, + {"FuncType.Func", Field, 0}, + {"FuncType.Params", Field, 0}, + {"FuncType.Results", Field, 0}, + {"FuncType.TypeParams", Field, 18}, + {"GenDecl", Type, 0}, + {"GenDecl.Doc", Field, 0}, + {"GenDecl.Lparen", Field, 0}, + {"GenDecl.Rparen", Field, 0}, + {"GenDecl.Specs", Field, 0}, + {"GenDecl.Tok", Field, 0}, + {"GenDecl.TokPos", Field, 0}, + {"GoStmt", Type, 0}, + {"GoStmt.Call", Field, 0}, + {"GoStmt.Go", Field, 0}, + {"Ident", Type, 0}, + {"Ident.Name", Field, 0}, + {"Ident.NamePos", Field, 0}, + {"Ident.Obj", Field, 0}, + {"IfStmt", Type, 0}, + {"IfStmt.Body", Field, 0}, + {"IfStmt.Cond", Field, 0}, + {"IfStmt.Else", Field, 0}, + {"IfStmt.If", Field, 0}, + {"IfStmt.Init", Field, 0}, + {"ImportSpec", Type, 0}, + {"ImportSpec.Comment", Field, 0}, + {"ImportSpec.Doc", Field, 0}, + {"ImportSpec.EndPos", Field, 0}, + {"ImportSpec.Name", Field, 0}, + {"ImportSpec.Path", Field, 0}, + {"Importer", Type, 0}, + {"IncDecStmt", Type, 0}, + {"IncDecStmt.Tok", Field, 0}, + {"IncDecStmt.TokPos", Field, 0}, + {"IncDecStmt.X", Field, 0}, + {"IndexExpr", Type, 0}, + {"IndexExpr.Index", Field, 0}, + {"IndexExpr.Lbrack", Field, 0}, + {"IndexExpr.Rbrack", Field, 0}, + {"IndexExpr.X", Field, 0}, + {"IndexListExpr", Type, 18}, + {"IndexListExpr.Indices", Field, 18}, + {"IndexListExpr.Lbrack", Field, 18}, + {"IndexListExpr.Rbrack", Field, 18}, + {"IndexListExpr.X", Field, 18}, + {"Inspect", Func, 0}, + {"InterfaceType", Type, 0}, + {"InterfaceType.Incomplete", Field, 0}, + {"InterfaceType.Interface", Field, 0}, + {"InterfaceType.Methods", Field, 0}, + {"IsExported", Func, 0}, + {"IsGenerated", Func, 21}, + {"KeyValueExpr", Type, 0}, + {"KeyValueExpr.Colon", Field, 0}, + {"KeyValueExpr.Key", Field, 0}, + {"KeyValueExpr.Value", Field, 0}, + {"LabeledStmt", Type, 0}, + {"LabeledStmt.Colon", Field, 0}, + {"LabeledStmt.Label", Field, 0}, + {"LabeledStmt.Stmt", Field, 0}, + {"Lbl", Const, 0}, + {"MapType", Type, 0}, + {"MapType.Key", Field, 0}, + {"MapType.Map", Field, 0}, + {"MapType.Value", Field, 0}, + {"MergeMode", Type, 0}, + {"MergePackageFiles", Func, 0}, + {"NewCommentMap", Func, 1}, + {"NewIdent", Func, 0}, + {"NewObj", Func, 0}, + {"NewPackage", Func, 0}, + {"NewScope", Func, 0}, + {"Node", Type, 0}, + {"NotNilFilter", Func, 0}, + {"ObjKind", Type, 0}, + {"Object", Type, 0}, + {"Object.Data", Field, 0}, + {"Object.Decl", Field, 0}, + {"Object.Kind", Field, 0}, + {"Object.Name", Field, 0}, + {"Object.Type", Field, 0}, + {"Package", Type, 0}, + {"Package.Files", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.Name", Field, 0}, + {"Package.Scope", Field, 0}, + {"PackageExports", Func, 0}, + {"ParenExpr", Type, 0}, + {"ParenExpr.Lparen", Field, 0}, + {"ParenExpr.Rparen", Field, 0}, + {"ParenExpr.X", Field, 0}, + {"Pkg", Const, 0}, + {"Print", Func, 0}, + {"RECV", Const, 0}, + {"RangeStmt", Type, 0}, + {"RangeStmt.Body", Field, 0}, + {"RangeStmt.For", Field, 0}, + {"RangeStmt.Key", Field, 0}, + {"RangeStmt.Range", Field, 20}, + {"RangeStmt.Tok", Field, 0}, + {"RangeStmt.TokPos", Field, 0}, + {"RangeStmt.Value", Field, 0}, + {"RangeStmt.X", Field, 0}, + {"ReturnStmt", Type, 0}, + {"ReturnStmt.Results", Field, 0}, + {"ReturnStmt.Return", Field, 0}, + {"SEND", Const, 0}, + {"Scope", Type, 0}, + {"Scope.Objects", Field, 0}, + {"Scope.Outer", Field, 0}, + {"SelectStmt", Type, 0}, + {"SelectStmt.Body", Field, 0}, + {"SelectStmt.Select", Field, 0}, + {"SelectorExpr", Type, 0}, + {"SelectorExpr.Sel", Field, 0}, + {"SelectorExpr.X", Field, 0}, + {"SendStmt", Type, 0}, + {"SendStmt.Arrow", Field, 0}, + {"SendStmt.Chan", Field, 0}, + {"SendStmt.Value", Field, 0}, + {"SliceExpr", Type, 0}, + {"SliceExpr.High", Field, 0}, + {"SliceExpr.Lbrack", Field, 0}, + {"SliceExpr.Low", Field, 0}, + {"SliceExpr.Max", Field, 2}, + {"SliceExpr.Rbrack", Field, 0}, + {"SliceExpr.Slice3", Field, 2}, + {"SliceExpr.X", Field, 0}, + {"SortImports", Func, 0}, + {"Spec", Type, 0}, + {"StarExpr", Type, 0}, + {"StarExpr.Star", Field, 0}, + {"StarExpr.X", Field, 0}, + {"Stmt", Type, 0}, + {"StructType", Type, 0}, + {"StructType.Fields", Field, 0}, + {"StructType.Incomplete", Field, 0}, + {"StructType.Struct", Field, 0}, + {"SwitchStmt", Type, 0}, + {"SwitchStmt.Body", Field, 0}, + {"SwitchStmt.Init", Field, 0}, + {"SwitchStmt.Switch", Field, 0}, + {"SwitchStmt.Tag", Field, 0}, + {"Typ", Const, 0}, + {"TypeAssertExpr", Type, 0}, + {"TypeAssertExpr.Lparen", Field, 2}, + {"TypeAssertExpr.Rparen", Field, 2}, + {"TypeAssertExpr.Type", Field, 0}, + {"TypeAssertExpr.X", Field, 0}, + {"TypeSpec", Type, 0}, + {"TypeSpec.Assign", Field, 9}, + {"TypeSpec.Comment", Field, 0}, + {"TypeSpec.Doc", Field, 0}, + {"TypeSpec.Name", Field, 0}, + {"TypeSpec.Type", Field, 0}, + {"TypeSpec.TypeParams", Field, 18}, + {"TypeSwitchStmt", Type, 0}, + {"TypeSwitchStmt.Assign", Field, 0}, + {"TypeSwitchStmt.Body", Field, 0}, + {"TypeSwitchStmt.Init", Field, 0}, + {"TypeSwitchStmt.Switch", Field, 0}, + {"UnaryExpr", Type, 0}, + {"UnaryExpr.Op", Field, 0}, + {"UnaryExpr.OpPos", Field, 0}, + {"UnaryExpr.X", Field, 0}, + {"Unparen", Func, 22}, + {"ValueSpec", Type, 0}, + {"ValueSpec.Comment", Field, 0}, + {"ValueSpec.Doc", Field, 0}, + {"ValueSpec.Names", Field, 0}, + {"ValueSpec.Type", Field, 0}, + {"ValueSpec.Values", Field, 0}, + {"Var", Const, 0}, + {"Visitor", Type, 0}, + {"Walk", Func, 0}, + }, + "go/build": { + {"(*Context).Import", Method, 0}, + {"(*Context).ImportDir", Method, 0}, + {"(*Context).MatchFile", Method, 2}, + {"(*Context).SrcDirs", Method, 0}, + {"(*MultiplePackageError).Error", Method, 4}, + {"(*NoGoError).Error", Method, 0}, + {"(*Package).IsCommand", Method, 0}, + {"AllowBinary", Const, 0}, + {"ArchChar", Func, 0}, + {"Context", Type, 0}, + {"Context.BuildTags", Field, 0}, + {"Context.CgoEnabled", Field, 0}, + {"Context.Compiler", Field, 0}, + {"Context.Dir", Field, 14}, + {"Context.GOARCH", Field, 0}, + {"Context.GOOS", Field, 0}, + {"Context.GOPATH", Field, 0}, + {"Context.GOROOT", Field, 0}, + {"Context.HasSubdir", Field, 0}, + {"Context.InstallSuffix", Field, 1}, + {"Context.IsAbsPath", Field, 0}, + {"Context.IsDir", Field, 0}, + {"Context.JoinPath", Field, 0}, + {"Context.OpenFile", Field, 0}, + {"Context.ReadDir", Field, 0}, + {"Context.ReleaseTags", Field, 1}, + {"Context.SplitPathList", Field, 0}, + {"Context.ToolTags", Field, 17}, + {"Context.UseAllFiles", Field, 0}, + {"Default", Var, 0}, + {"Directive", Type, 21}, + {"Directive.Pos", Field, 21}, + {"Directive.Text", Field, 21}, + {"FindOnly", Const, 0}, + {"IgnoreVendor", Const, 6}, + {"Import", Func, 0}, + {"ImportComment", Const, 4}, + {"ImportDir", Func, 0}, + {"ImportMode", Type, 0}, + {"IsLocalImport", Func, 0}, + {"MultiplePackageError", Type, 4}, + {"MultiplePackageError.Dir", Field, 4}, + {"MultiplePackageError.Files", Field, 4}, + {"MultiplePackageError.Packages", Field, 4}, + {"NoGoError", Type, 0}, + {"NoGoError.Dir", Field, 0}, + {"Package", Type, 0}, + {"Package.AllTags", Field, 2}, + {"Package.BinDir", Field, 0}, + {"Package.BinaryOnly", Field, 7}, + {"Package.CFiles", Field, 0}, + {"Package.CXXFiles", Field, 2}, + {"Package.CgoCFLAGS", Field, 0}, + {"Package.CgoCPPFLAGS", Field, 2}, + {"Package.CgoCXXFLAGS", Field, 2}, + {"Package.CgoFFLAGS", Field, 7}, + {"Package.CgoFiles", Field, 0}, + {"Package.CgoLDFLAGS", Field, 0}, + {"Package.CgoPkgConfig", Field, 0}, + {"Package.ConflictDir", Field, 2}, + {"Package.Dir", Field, 0}, + {"Package.Directives", Field, 21}, + {"Package.Doc", Field, 0}, + {"Package.EmbedPatternPos", Field, 16}, + {"Package.EmbedPatterns", Field, 16}, + {"Package.FFiles", Field, 7}, + {"Package.GoFiles", Field, 0}, + {"Package.Goroot", Field, 0}, + {"Package.HFiles", Field, 0}, + {"Package.IgnoredGoFiles", Field, 1}, + {"Package.IgnoredOtherFiles", Field, 16}, + {"Package.ImportComment", Field, 4}, + {"Package.ImportPath", Field, 0}, + {"Package.ImportPos", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.InvalidGoFiles", Field, 6}, + {"Package.MFiles", Field, 3}, + {"Package.Name", Field, 0}, + {"Package.PkgObj", Field, 0}, + {"Package.PkgRoot", Field, 0}, + {"Package.PkgTargetRoot", Field, 5}, + {"Package.Root", Field, 0}, + {"Package.SFiles", Field, 0}, + {"Package.SrcRoot", Field, 0}, + {"Package.SwigCXXFiles", Field, 1}, + {"Package.SwigFiles", Field, 1}, + {"Package.SysoFiles", Field, 0}, + {"Package.TestDirectives", Field, 21}, + {"Package.TestEmbedPatternPos", Field, 16}, + {"Package.TestEmbedPatterns", Field, 16}, + {"Package.TestGoFiles", Field, 0}, + {"Package.TestImportPos", Field, 0}, + {"Package.TestImports", Field, 0}, + {"Package.XTestDirectives", Field, 21}, + {"Package.XTestEmbedPatternPos", Field, 16}, + {"Package.XTestEmbedPatterns", Field, 16}, + {"Package.XTestGoFiles", Field, 0}, + {"Package.XTestImportPos", Field, 0}, + {"Package.XTestImports", Field, 0}, + {"ToolDir", Var, 0}, + }, + "go/build/constraint": { + {"(*AndExpr).Eval", Method, 16}, + {"(*AndExpr).String", Method, 16}, + {"(*NotExpr).Eval", Method, 16}, + {"(*NotExpr).String", Method, 16}, + {"(*OrExpr).Eval", Method, 16}, + {"(*OrExpr).String", Method, 16}, + {"(*SyntaxError).Error", Method, 16}, + {"(*TagExpr).Eval", Method, 16}, + {"(*TagExpr).String", Method, 16}, + {"AndExpr", Type, 16}, + {"AndExpr.X", Field, 16}, + {"AndExpr.Y", Field, 16}, + {"Expr", Type, 16}, + {"GoVersion", Func, 21}, + {"IsGoBuild", Func, 16}, + {"IsPlusBuild", Func, 16}, + {"NotExpr", Type, 16}, + {"NotExpr.X", Field, 16}, + {"OrExpr", Type, 16}, + {"OrExpr.X", Field, 16}, + {"OrExpr.Y", Field, 16}, + {"Parse", Func, 16}, + {"PlusBuildLines", Func, 16}, + {"SyntaxError", Type, 16}, + {"SyntaxError.Err", Field, 16}, + {"SyntaxError.Offset", Field, 16}, + {"TagExpr", Type, 16}, + {"TagExpr.Tag", Field, 16}, + }, + "go/constant": { + {"(Kind).String", Method, 18}, + {"BinaryOp", Func, 5}, + {"BitLen", Func, 5}, + {"Bool", Const, 5}, + {"BoolVal", Func, 5}, + {"Bytes", Func, 5}, + {"Compare", Func, 5}, + {"Complex", Const, 5}, + {"Denom", Func, 5}, + {"Float", Const, 5}, + {"Float32Val", Func, 5}, + {"Float64Val", Func, 5}, + {"Imag", Func, 5}, + {"Int", Const, 5}, + {"Int64Val", Func, 5}, + {"Kind", Type, 5}, + {"Make", Func, 13}, + {"MakeBool", Func, 5}, + {"MakeFloat64", Func, 5}, + {"MakeFromBytes", Func, 5}, + {"MakeFromLiteral", Func, 5}, + {"MakeImag", Func, 5}, + {"MakeInt64", Func, 5}, + {"MakeString", Func, 5}, + {"MakeUint64", Func, 5}, + {"MakeUnknown", Func, 5}, + {"Num", Func, 5}, + {"Real", Func, 5}, + {"Shift", Func, 5}, + {"Sign", Func, 5}, + {"String", Const, 5}, + {"StringVal", Func, 5}, + {"ToComplex", Func, 6}, + {"ToFloat", Func, 6}, + {"ToInt", Func, 6}, + {"Uint64Val", Func, 5}, + {"UnaryOp", Func, 5}, + {"Unknown", Const, 5}, + {"Val", Func, 13}, + {"Value", Type, 5}, + }, + "go/doc": { + {"(*Package).Filter", Method, 0}, + {"(*Package).HTML", Method, 19}, + {"(*Package).Markdown", Method, 19}, + {"(*Package).Parser", Method, 19}, + {"(*Package).Printer", Method, 19}, + {"(*Package).Synopsis", Method, 19}, + {"(*Package).Text", Method, 19}, + {"AllDecls", Const, 0}, + {"AllMethods", Const, 0}, + {"Example", Type, 0}, + {"Example.Code", Field, 0}, + {"Example.Comments", Field, 0}, + {"Example.Doc", Field, 0}, + {"Example.EmptyOutput", Field, 1}, + {"Example.Name", Field, 0}, + {"Example.Order", Field, 1}, + {"Example.Output", Field, 0}, + {"Example.Play", Field, 1}, + {"Example.Suffix", Field, 14}, + {"Example.Unordered", Field, 7}, + {"Examples", Func, 0}, + {"Filter", Type, 0}, + {"Func", Type, 0}, + {"Func.Decl", Field, 0}, + {"Func.Doc", Field, 0}, + {"Func.Examples", Field, 14}, + {"Func.Level", Field, 0}, + {"Func.Name", Field, 0}, + {"Func.Orig", Field, 0}, + {"Func.Recv", Field, 0}, + {"IllegalPrefixes", Var, 1}, + {"IsPredeclared", Func, 8}, + {"Mode", Type, 0}, + {"New", Func, 0}, + {"NewFromFiles", Func, 14}, + {"Note", Type, 1}, + {"Note.Body", Field, 1}, + {"Note.End", Field, 1}, + {"Note.Pos", Field, 1}, + {"Note.UID", Field, 1}, + {"Package", Type, 0}, + {"Package.Bugs", Field, 0}, + {"Package.Consts", Field, 0}, + {"Package.Doc", Field, 0}, + {"Package.Examples", Field, 14}, + {"Package.Filenames", Field, 0}, + {"Package.Funcs", Field, 0}, + {"Package.ImportPath", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.Name", Field, 0}, + {"Package.Notes", Field, 1}, + {"Package.Types", Field, 0}, + {"Package.Vars", Field, 0}, + {"PreserveAST", Const, 12}, + {"Synopsis", Func, 0}, + {"ToHTML", Func, 0}, + {"ToText", Func, 0}, + {"Type", Type, 0}, + {"Type.Consts", Field, 0}, + {"Type.Decl", Field, 0}, + {"Type.Doc", Field, 0}, + {"Type.Examples", Field, 14}, + {"Type.Funcs", Field, 0}, + {"Type.Methods", Field, 0}, + {"Type.Name", Field, 0}, + {"Type.Vars", Field, 0}, + {"Value", Type, 0}, + {"Value.Decl", Field, 0}, + {"Value.Doc", Field, 0}, + {"Value.Names", Field, 0}, + }, + "go/doc/comment": { + {"(*DocLink).DefaultURL", Method, 19}, + {"(*Heading).DefaultID", Method, 19}, + {"(*List).BlankBefore", Method, 19}, + {"(*List).BlankBetween", Method, 19}, + {"(*Parser).Parse", Method, 19}, + {"(*Printer).Comment", Method, 19}, + {"(*Printer).HTML", Method, 19}, + {"(*Printer).Markdown", Method, 19}, + {"(*Printer).Text", Method, 19}, + {"Block", Type, 19}, + {"Code", Type, 19}, + {"Code.Text", Field, 19}, + {"DefaultLookupPackage", Func, 19}, + {"Doc", Type, 19}, + {"Doc.Content", Field, 19}, + {"Doc.Links", Field, 19}, + {"DocLink", Type, 19}, + {"DocLink.ImportPath", Field, 19}, + {"DocLink.Name", Field, 19}, + {"DocLink.Recv", Field, 19}, + {"DocLink.Text", Field, 19}, + {"Heading", Type, 19}, + {"Heading.Text", Field, 19}, + {"Italic", Type, 19}, + {"Link", Type, 19}, + {"Link.Auto", Field, 19}, + {"Link.Text", Field, 19}, + {"Link.URL", Field, 19}, + {"LinkDef", Type, 19}, + {"LinkDef.Text", Field, 19}, + {"LinkDef.URL", Field, 19}, + {"LinkDef.Used", Field, 19}, + {"List", Type, 19}, + {"List.ForceBlankBefore", Field, 19}, + {"List.ForceBlankBetween", Field, 19}, + {"List.Items", Field, 19}, + {"ListItem", Type, 19}, + {"ListItem.Content", Field, 19}, + {"ListItem.Number", Field, 19}, + {"Paragraph", Type, 19}, + {"Paragraph.Text", Field, 19}, + {"Parser", Type, 19}, + {"Parser.LookupPackage", Field, 19}, + {"Parser.LookupSym", Field, 19}, + {"Parser.Words", Field, 19}, + {"Plain", Type, 19}, + {"Printer", Type, 19}, + {"Printer.DocLinkBaseURL", Field, 19}, + {"Printer.DocLinkURL", Field, 19}, + {"Printer.HeadingID", Field, 19}, + {"Printer.HeadingLevel", Field, 19}, + {"Printer.TextCodePrefix", Field, 19}, + {"Printer.TextPrefix", Field, 19}, + {"Printer.TextWidth", Field, 19}, + {"Text", Type, 19}, + }, + "go/format": { + {"Node", Func, 1}, + {"Source", Func, 1}, + }, + "go/importer": { + {"Default", Func, 5}, + {"For", Func, 5}, + {"ForCompiler", Func, 12}, + {"Lookup", Type, 5}, + }, + "go/parser": { + {"AllErrors", Const, 1}, + {"DeclarationErrors", Const, 0}, + {"ImportsOnly", Const, 0}, + {"Mode", Type, 0}, + {"PackageClauseOnly", Const, 0}, + {"ParseComments", Const, 0}, + {"ParseDir", Func, 0}, + {"ParseExpr", Func, 0}, + {"ParseExprFrom", Func, 5}, + {"ParseFile", Func, 0}, + {"SkipObjectResolution", Const, 17}, + {"SpuriousErrors", Const, 0}, + {"Trace", Const, 0}, + }, + "go/printer": { + {"(*Config).Fprint", Method, 0}, + {"CommentedNode", Type, 0}, + {"CommentedNode.Comments", Field, 0}, + {"CommentedNode.Node", Field, 0}, + {"Config", Type, 0}, + {"Config.Indent", Field, 1}, + {"Config.Mode", Field, 0}, + {"Config.Tabwidth", Field, 0}, + {"Fprint", Func, 0}, + {"Mode", Type, 0}, + {"RawFormat", Const, 0}, + {"SourcePos", Const, 0}, + {"TabIndent", Const, 0}, + {"UseSpaces", Const, 0}, + }, + "go/scanner": { + {"(*ErrorList).Add", Method, 0}, + {"(*ErrorList).RemoveMultiples", Method, 0}, + {"(*ErrorList).Reset", Method, 0}, + {"(*Scanner).Init", Method, 0}, + {"(*Scanner).Scan", Method, 0}, + {"(Error).Error", Method, 0}, + {"(ErrorList).Err", Method, 0}, + {"(ErrorList).Error", Method, 0}, + {"(ErrorList).Len", Method, 0}, + {"(ErrorList).Less", Method, 0}, + {"(ErrorList).Sort", Method, 0}, + {"(ErrorList).Swap", Method, 0}, + {"Error", Type, 0}, + {"Error.Msg", Field, 0}, + {"Error.Pos", Field, 0}, + {"ErrorHandler", Type, 0}, + {"ErrorList", Type, 0}, + {"Mode", Type, 0}, + {"PrintError", Func, 0}, + {"ScanComments", Const, 0}, + {"Scanner", Type, 0}, + {"Scanner.ErrorCount", Field, 0}, + }, + "go/token": { + {"(*File).AddLine", Method, 0}, + {"(*File).AddLineColumnInfo", Method, 11}, + {"(*File).AddLineInfo", Method, 0}, + {"(*File).Base", Method, 0}, + {"(*File).Line", Method, 0}, + {"(*File).LineCount", Method, 0}, + {"(*File).LineStart", Method, 12}, + {"(*File).Lines", Method, 21}, + {"(*File).MergeLine", Method, 2}, + {"(*File).Name", Method, 0}, + {"(*File).Offset", Method, 0}, + {"(*File).Pos", Method, 0}, + {"(*File).Position", Method, 0}, + {"(*File).PositionFor", Method, 4}, + {"(*File).SetLines", Method, 0}, + {"(*File).SetLinesForContent", Method, 0}, + {"(*File).Size", Method, 0}, + {"(*FileSet).AddFile", Method, 0}, + {"(*FileSet).Base", Method, 0}, + {"(*FileSet).File", Method, 0}, + {"(*FileSet).Iterate", Method, 0}, + {"(*FileSet).Position", Method, 0}, + {"(*FileSet).PositionFor", Method, 4}, + {"(*FileSet).Read", Method, 0}, + {"(*FileSet).RemoveFile", Method, 20}, + {"(*FileSet).Write", Method, 0}, + {"(*Position).IsValid", Method, 0}, + {"(Pos).IsValid", Method, 0}, + {"(Position).String", Method, 0}, + {"(Token).IsKeyword", Method, 0}, + {"(Token).IsLiteral", Method, 0}, + {"(Token).IsOperator", Method, 0}, + {"(Token).Precedence", Method, 0}, + {"(Token).String", Method, 0}, + {"ADD", Const, 0}, + {"ADD_ASSIGN", Const, 0}, + {"AND", Const, 0}, + {"AND_ASSIGN", Const, 0}, + {"AND_NOT", Const, 0}, + {"AND_NOT_ASSIGN", Const, 0}, + {"ARROW", Const, 0}, + {"ASSIGN", Const, 0}, + {"BREAK", Const, 0}, + {"CASE", Const, 0}, + {"CHAN", Const, 0}, + {"CHAR", Const, 0}, + {"COLON", Const, 0}, + {"COMMA", Const, 0}, + {"COMMENT", Const, 0}, + {"CONST", Const, 0}, + {"CONTINUE", Const, 0}, + {"DEC", Const, 0}, + {"DEFAULT", Const, 0}, + {"DEFER", Const, 0}, + {"DEFINE", Const, 0}, + {"ELLIPSIS", Const, 0}, + {"ELSE", Const, 0}, + {"EOF", Const, 0}, + {"EQL", Const, 0}, + {"FALLTHROUGH", Const, 0}, + {"FLOAT", Const, 0}, + {"FOR", Const, 0}, + {"FUNC", Const, 0}, + {"File", Type, 0}, + {"FileSet", Type, 0}, + {"GEQ", Const, 0}, + {"GO", Const, 0}, + {"GOTO", Const, 0}, + {"GTR", Const, 0}, + {"HighestPrec", Const, 0}, + {"IDENT", Const, 0}, + {"IF", Const, 0}, + {"ILLEGAL", Const, 0}, + {"IMAG", Const, 0}, + {"IMPORT", Const, 0}, + {"INC", Const, 0}, + {"INT", Const, 0}, + {"INTERFACE", Const, 0}, + {"IsExported", Func, 13}, + {"IsIdentifier", Func, 13}, + {"IsKeyword", Func, 13}, + {"LAND", Const, 0}, + {"LBRACE", Const, 0}, + {"LBRACK", Const, 0}, + {"LEQ", Const, 0}, + {"LOR", Const, 0}, + {"LPAREN", Const, 0}, + {"LSS", Const, 0}, + {"Lookup", Func, 0}, + {"LowestPrec", Const, 0}, + {"MAP", Const, 0}, + {"MUL", Const, 0}, + {"MUL_ASSIGN", Const, 0}, + {"NEQ", Const, 0}, + {"NOT", Const, 0}, + {"NewFileSet", Func, 0}, + {"NoPos", Const, 0}, + {"OR", Const, 0}, + {"OR_ASSIGN", Const, 0}, + {"PACKAGE", Const, 0}, + {"PERIOD", Const, 0}, + {"Pos", Type, 0}, + {"Position", Type, 0}, + {"Position.Column", Field, 0}, + {"Position.Filename", Field, 0}, + {"Position.Line", Field, 0}, + {"Position.Offset", Field, 0}, + {"QUO", Const, 0}, + {"QUO_ASSIGN", Const, 0}, + {"RANGE", Const, 0}, + {"RBRACE", Const, 0}, + {"RBRACK", Const, 0}, + {"REM", Const, 0}, + {"REM_ASSIGN", Const, 0}, + {"RETURN", Const, 0}, + {"RPAREN", Const, 0}, + {"SELECT", Const, 0}, + {"SEMICOLON", Const, 0}, + {"SHL", Const, 0}, + {"SHL_ASSIGN", Const, 0}, + {"SHR", Const, 0}, + {"SHR_ASSIGN", Const, 0}, + {"STRING", Const, 0}, + {"STRUCT", Const, 0}, + {"SUB", Const, 0}, + {"SUB_ASSIGN", Const, 0}, + {"SWITCH", Const, 0}, + {"TILDE", Const, 18}, + {"TYPE", Const, 0}, + {"Token", Type, 0}, + {"UnaryPrec", Const, 0}, + {"VAR", Const, 0}, + {"XOR", Const, 0}, + {"XOR_ASSIGN", Const, 0}, + }, + "go/types": { + {"(*Alias).Obj", Method, 22}, + {"(*Alias).String", Method, 22}, + {"(*Alias).Underlying", Method, 22}, + {"(*ArgumentError).Error", Method, 18}, + {"(*ArgumentError).Unwrap", Method, 18}, + {"(*Array).Elem", Method, 5}, + {"(*Array).Len", Method, 5}, + {"(*Array).String", Method, 5}, + {"(*Array).Underlying", Method, 5}, + {"(*Basic).Info", Method, 5}, + {"(*Basic).Kind", Method, 5}, + {"(*Basic).Name", Method, 5}, + {"(*Basic).String", Method, 5}, + {"(*Basic).Underlying", Method, 5}, + {"(*Builtin).Exported", Method, 5}, + {"(*Builtin).Id", Method, 5}, + {"(*Builtin).Name", Method, 5}, + {"(*Builtin).Parent", Method, 5}, + {"(*Builtin).Pkg", Method, 5}, + {"(*Builtin).Pos", Method, 5}, + {"(*Builtin).String", Method, 5}, + {"(*Builtin).Type", Method, 5}, + {"(*Chan).Dir", Method, 5}, + {"(*Chan).Elem", Method, 5}, + {"(*Chan).String", Method, 5}, + {"(*Chan).Underlying", Method, 5}, + {"(*Checker).Files", Method, 5}, + {"(*Config).Check", Method, 5}, + {"(*Const).Exported", Method, 5}, + {"(*Const).Id", Method, 5}, + {"(*Const).Name", Method, 5}, + {"(*Const).Parent", Method, 5}, + {"(*Const).Pkg", Method, 5}, + {"(*Const).Pos", Method, 5}, + {"(*Const).String", Method, 5}, + {"(*Const).Type", Method, 5}, + {"(*Const).Val", Method, 5}, + {"(*Func).Exported", Method, 5}, + {"(*Func).FullName", Method, 5}, + {"(*Func).Id", Method, 5}, + {"(*Func).Name", Method, 5}, + {"(*Func).Origin", Method, 19}, + {"(*Func).Parent", Method, 5}, + {"(*Func).Pkg", Method, 5}, + {"(*Func).Pos", Method, 5}, + {"(*Func).Scope", Method, 5}, + {"(*Func).String", Method, 5}, + {"(*Func).Type", Method, 5}, + {"(*Info).ObjectOf", Method, 5}, + {"(*Info).PkgNameOf", Method, 22}, + {"(*Info).TypeOf", Method, 5}, + {"(*Initializer).String", Method, 5}, + {"(*Interface).Complete", Method, 5}, + {"(*Interface).Embedded", Method, 5}, + {"(*Interface).EmbeddedType", Method, 11}, + {"(*Interface).Empty", Method, 5}, + {"(*Interface).ExplicitMethod", Method, 5}, + {"(*Interface).IsComparable", Method, 18}, + {"(*Interface).IsImplicit", Method, 18}, + {"(*Interface).IsMethodSet", Method, 18}, + {"(*Interface).MarkImplicit", Method, 18}, + {"(*Interface).Method", Method, 5}, + {"(*Interface).NumEmbeddeds", Method, 5}, + {"(*Interface).NumExplicitMethods", Method, 5}, + {"(*Interface).NumMethods", Method, 5}, + {"(*Interface).String", Method, 5}, + {"(*Interface).Underlying", Method, 5}, + {"(*Label).Exported", Method, 5}, + {"(*Label).Id", Method, 5}, + {"(*Label).Name", Method, 5}, + {"(*Label).Parent", Method, 5}, + {"(*Label).Pkg", Method, 5}, + {"(*Label).Pos", Method, 5}, + {"(*Label).String", Method, 5}, + {"(*Label).Type", Method, 5}, + {"(*Map).Elem", Method, 5}, + {"(*Map).Key", Method, 5}, + {"(*Map).String", Method, 5}, + {"(*Map).Underlying", Method, 5}, + {"(*MethodSet).At", Method, 5}, + {"(*MethodSet).Len", Method, 5}, + {"(*MethodSet).Lookup", Method, 5}, + {"(*MethodSet).String", Method, 5}, + {"(*Named).AddMethod", Method, 5}, + {"(*Named).Method", Method, 5}, + {"(*Named).NumMethods", Method, 5}, + {"(*Named).Obj", Method, 5}, + {"(*Named).Origin", Method, 18}, + {"(*Named).SetTypeParams", Method, 18}, + {"(*Named).SetUnderlying", Method, 5}, + {"(*Named).String", Method, 5}, + {"(*Named).TypeArgs", Method, 18}, + {"(*Named).TypeParams", Method, 18}, + {"(*Named).Underlying", Method, 5}, + {"(*Nil).Exported", Method, 5}, + {"(*Nil).Id", Method, 5}, + {"(*Nil).Name", Method, 5}, + {"(*Nil).Parent", Method, 5}, + {"(*Nil).Pkg", Method, 5}, + {"(*Nil).Pos", Method, 5}, + {"(*Nil).String", Method, 5}, + {"(*Nil).Type", Method, 5}, + {"(*Package).Complete", Method, 5}, + {"(*Package).GoVersion", Method, 21}, + {"(*Package).Imports", Method, 5}, + {"(*Package).MarkComplete", Method, 5}, + {"(*Package).Name", Method, 5}, + {"(*Package).Path", Method, 5}, + {"(*Package).Scope", Method, 5}, + {"(*Package).SetImports", Method, 5}, + {"(*Package).SetName", Method, 6}, + {"(*Package).String", Method, 5}, + {"(*PkgName).Exported", Method, 5}, + {"(*PkgName).Id", Method, 5}, + {"(*PkgName).Imported", Method, 5}, + {"(*PkgName).Name", Method, 5}, + {"(*PkgName).Parent", Method, 5}, + {"(*PkgName).Pkg", Method, 5}, + {"(*PkgName).Pos", Method, 5}, + {"(*PkgName).String", Method, 5}, + {"(*PkgName).Type", Method, 5}, + {"(*Pointer).Elem", Method, 5}, + {"(*Pointer).String", Method, 5}, + {"(*Pointer).Underlying", Method, 5}, + {"(*Scope).Child", Method, 5}, + {"(*Scope).Contains", Method, 5}, + {"(*Scope).End", Method, 5}, + {"(*Scope).Innermost", Method, 5}, + {"(*Scope).Insert", Method, 5}, + {"(*Scope).Len", Method, 5}, + {"(*Scope).Lookup", Method, 5}, + {"(*Scope).LookupParent", Method, 5}, + {"(*Scope).Names", Method, 5}, + {"(*Scope).NumChildren", Method, 5}, + {"(*Scope).Parent", Method, 5}, + {"(*Scope).Pos", Method, 5}, + {"(*Scope).String", Method, 5}, + {"(*Scope).WriteTo", Method, 5}, + {"(*Selection).Index", Method, 5}, + {"(*Selection).Indirect", Method, 5}, + {"(*Selection).Kind", Method, 5}, + {"(*Selection).Obj", Method, 5}, + {"(*Selection).Recv", Method, 5}, + {"(*Selection).String", Method, 5}, + {"(*Selection).Type", Method, 5}, + {"(*Signature).Params", Method, 5}, + {"(*Signature).Recv", Method, 5}, + {"(*Signature).RecvTypeParams", Method, 18}, + {"(*Signature).Results", Method, 5}, + {"(*Signature).String", Method, 5}, + {"(*Signature).TypeParams", Method, 18}, + {"(*Signature).Underlying", Method, 5}, + {"(*Signature).Variadic", Method, 5}, + {"(*Slice).Elem", Method, 5}, + {"(*Slice).String", Method, 5}, + {"(*Slice).Underlying", Method, 5}, + {"(*StdSizes).Alignof", Method, 5}, + {"(*StdSizes).Offsetsof", Method, 5}, + {"(*StdSizes).Sizeof", Method, 5}, + {"(*Struct).Field", Method, 5}, + {"(*Struct).NumFields", Method, 5}, + {"(*Struct).String", Method, 5}, + {"(*Struct).Tag", Method, 5}, + {"(*Struct).Underlying", Method, 5}, + {"(*Term).String", Method, 18}, + {"(*Term).Tilde", Method, 18}, + {"(*Term).Type", Method, 18}, + {"(*Tuple).At", Method, 5}, + {"(*Tuple).Len", Method, 5}, + {"(*Tuple).String", Method, 5}, + {"(*Tuple).Underlying", Method, 5}, + {"(*TypeList).At", Method, 18}, + {"(*TypeList).Len", Method, 18}, + {"(*TypeName).Exported", Method, 5}, + {"(*TypeName).Id", Method, 5}, + {"(*TypeName).IsAlias", Method, 9}, + {"(*TypeName).Name", Method, 5}, + {"(*TypeName).Parent", Method, 5}, + {"(*TypeName).Pkg", Method, 5}, + {"(*TypeName).Pos", Method, 5}, + {"(*TypeName).String", Method, 5}, + {"(*TypeName).Type", Method, 5}, + {"(*TypeParam).Constraint", Method, 18}, + {"(*TypeParam).Index", Method, 18}, + {"(*TypeParam).Obj", Method, 18}, + {"(*TypeParam).SetConstraint", Method, 18}, + {"(*TypeParam).String", Method, 18}, + {"(*TypeParam).Underlying", Method, 18}, + {"(*TypeParamList).At", Method, 18}, + {"(*TypeParamList).Len", Method, 18}, + {"(*Union).Len", Method, 18}, + {"(*Union).String", Method, 18}, + {"(*Union).Term", Method, 18}, + {"(*Union).Underlying", Method, 18}, + {"(*Var).Anonymous", Method, 5}, + {"(*Var).Embedded", Method, 11}, + {"(*Var).Exported", Method, 5}, + {"(*Var).Id", Method, 5}, + {"(*Var).IsField", Method, 5}, + {"(*Var).Name", Method, 5}, + {"(*Var).Origin", Method, 19}, + {"(*Var).Parent", Method, 5}, + {"(*Var).Pkg", Method, 5}, + {"(*Var).Pos", Method, 5}, + {"(*Var).String", Method, 5}, + {"(*Var).Type", Method, 5}, + {"(Checker).ObjectOf", Method, 5}, + {"(Checker).PkgNameOf", Method, 22}, + {"(Checker).TypeOf", Method, 5}, + {"(Error).Error", Method, 5}, + {"(TypeAndValue).Addressable", Method, 5}, + {"(TypeAndValue).Assignable", Method, 5}, + {"(TypeAndValue).HasOk", Method, 5}, + {"(TypeAndValue).IsBuiltin", Method, 5}, + {"(TypeAndValue).IsNil", Method, 5}, + {"(TypeAndValue).IsType", Method, 5}, + {"(TypeAndValue).IsValue", Method, 5}, + {"(TypeAndValue).IsVoid", Method, 5}, + {"Alias", Type, 22}, + {"ArgumentError", Type, 18}, + {"ArgumentError.Err", Field, 18}, + {"ArgumentError.Index", Field, 18}, + {"Array", Type, 5}, + {"AssertableTo", Func, 5}, + {"AssignableTo", Func, 5}, + {"Basic", Type, 5}, + {"BasicInfo", Type, 5}, + {"BasicKind", Type, 5}, + {"Bool", Const, 5}, + {"Builtin", Type, 5}, + {"Byte", Const, 5}, + {"Chan", Type, 5}, + {"ChanDir", Type, 5}, + {"CheckExpr", Func, 13}, + {"Checker", Type, 5}, + {"Checker.Info", Field, 5}, + {"Comparable", Func, 5}, + {"Complex128", Const, 5}, + {"Complex64", Const, 5}, + {"Config", Type, 5}, + {"Config.Context", Field, 18}, + {"Config.DisableUnusedImportCheck", Field, 5}, + {"Config.Error", Field, 5}, + {"Config.FakeImportC", Field, 5}, + {"Config.GoVersion", Field, 18}, + {"Config.IgnoreFuncBodies", Field, 5}, + {"Config.Importer", Field, 5}, + {"Config.Sizes", Field, 5}, + {"Const", Type, 5}, + {"Context", Type, 18}, + {"ConvertibleTo", Func, 5}, + {"DefPredeclaredTestFuncs", Func, 5}, + {"Default", Func, 8}, + {"Error", Type, 5}, + {"Error.Fset", Field, 5}, + {"Error.Msg", Field, 5}, + {"Error.Pos", Field, 5}, + {"Error.Soft", Field, 5}, + {"Eval", Func, 5}, + {"ExprString", Func, 5}, + {"FieldVal", Const, 5}, + {"Float32", Const, 5}, + {"Float64", Const, 5}, + {"Func", Type, 5}, + {"Id", Func, 5}, + {"Identical", Func, 5}, + {"IdenticalIgnoreTags", Func, 8}, + {"Implements", Func, 5}, + {"ImportMode", Type, 6}, + {"Importer", Type, 5}, + {"ImporterFrom", Type, 6}, + {"Info", Type, 5}, + {"Info.Defs", Field, 5}, + {"Info.FileVersions", Field, 22}, + {"Info.Implicits", Field, 5}, + {"Info.InitOrder", Field, 5}, + {"Info.Instances", Field, 18}, + {"Info.Scopes", Field, 5}, + {"Info.Selections", Field, 5}, + {"Info.Types", Field, 5}, + {"Info.Uses", Field, 5}, + {"Initializer", Type, 5}, + {"Initializer.Lhs", Field, 5}, + {"Initializer.Rhs", Field, 5}, + {"Instance", Type, 18}, + {"Instance.Type", Field, 18}, + {"Instance.TypeArgs", Field, 18}, + {"Instantiate", Func, 18}, + {"Int", Const, 5}, + {"Int16", Const, 5}, + {"Int32", Const, 5}, + {"Int64", Const, 5}, + {"Int8", Const, 5}, + {"Interface", Type, 5}, + {"Invalid", Const, 5}, + {"IsBoolean", Const, 5}, + {"IsComplex", Const, 5}, + {"IsConstType", Const, 5}, + {"IsFloat", Const, 5}, + {"IsInteger", Const, 5}, + {"IsInterface", Func, 5}, + {"IsNumeric", Const, 5}, + {"IsOrdered", Const, 5}, + {"IsString", Const, 5}, + {"IsUnsigned", Const, 5}, + {"IsUntyped", Const, 5}, + {"Label", Type, 5}, + {"LookupFieldOrMethod", Func, 5}, + {"Map", Type, 5}, + {"MethodExpr", Const, 5}, + {"MethodSet", Type, 5}, + {"MethodVal", Const, 5}, + {"MissingMethod", Func, 5}, + {"Named", Type, 5}, + {"NewAlias", Func, 22}, + {"NewArray", Func, 5}, + {"NewChan", Func, 5}, + {"NewChecker", Func, 5}, + {"NewConst", Func, 5}, + {"NewContext", Func, 18}, + {"NewField", Func, 5}, + {"NewFunc", Func, 5}, + {"NewInterface", Func, 5}, + {"NewInterfaceType", Func, 11}, + {"NewLabel", Func, 5}, + {"NewMap", Func, 5}, + {"NewMethodSet", Func, 5}, + {"NewNamed", Func, 5}, + {"NewPackage", Func, 5}, + {"NewParam", Func, 5}, + {"NewPkgName", Func, 5}, + {"NewPointer", Func, 5}, + {"NewScope", Func, 5}, + {"NewSignature", Func, 5}, + {"NewSignatureType", Func, 18}, + {"NewSlice", Func, 5}, + {"NewStruct", Func, 5}, + {"NewTerm", Func, 18}, + {"NewTuple", Func, 5}, + {"NewTypeName", Func, 5}, + {"NewTypeParam", Func, 18}, + {"NewUnion", Func, 18}, + {"NewVar", Func, 5}, + {"Nil", Type, 5}, + {"Object", Type, 5}, + {"ObjectString", Func, 5}, + {"Package", Type, 5}, + {"PkgName", Type, 5}, + {"Pointer", Type, 5}, + {"Qualifier", Type, 5}, + {"RecvOnly", Const, 5}, + {"RelativeTo", Func, 5}, + {"Rune", Const, 5}, + {"Satisfies", Func, 20}, + {"Scope", Type, 5}, + {"Selection", Type, 5}, + {"SelectionKind", Type, 5}, + {"SelectionString", Func, 5}, + {"SendOnly", Const, 5}, + {"SendRecv", Const, 5}, + {"Signature", Type, 5}, + {"Sizes", Type, 5}, + {"SizesFor", Func, 9}, + {"Slice", Type, 5}, + {"StdSizes", Type, 5}, + {"StdSizes.MaxAlign", Field, 5}, + {"StdSizes.WordSize", Field, 5}, + {"String", Const, 5}, + {"Struct", Type, 5}, + {"Term", Type, 18}, + {"Tuple", Type, 5}, + {"Typ", Var, 5}, + {"Type", Type, 5}, + {"TypeAndValue", Type, 5}, + {"TypeAndValue.Type", Field, 5}, + {"TypeAndValue.Value", Field, 5}, + {"TypeList", Type, 18}, + {"TypeName", Type, 5}, + {"TypeParam", Type, 18}, + {"TypeParamList", Type, 18}, + {"TypeString", Func, 5}, + {"Uint", Const, 5}, + {"Uint16", Const, 5}, + {"Uint32", Const, 5}, + {"Uint64", Const, 5}, + {"Uint8", Const, 5}, + {"Uintptr", Const, 5}, + {"Unalias", Func, 22}, + {"Union", Type, 18}, + {"Universe", Var, 5}, + {"Unsafe", Var, 5}, + {"UnsafePointer", Const, 5}, + {"UntypedBool", Const, 5}, + {"UntypedComplex", Const, 5}, + {"UntypedFloat", Const, 5}, + {"UntypedInt", Const, 5}, + {"UntypedNil", Const, 5}, + {"UntypedRune", Const, 5}, + {"UntypedString", Const, 5}, + {"Var", Type, 5}, + {"WriteExpr", Func, 5}, + {"WriteSignature", Func, 5}, + {"WriteType", Func, 5}, + }, + "go/version": { + {"Compare", Func, 22}, + {"IsValid", Func, 22}, + {"Lang", Func, 22}, + }, + "hash": { + {"Hash", Type, 0}, + {"Hash32", Type, 0}, + {"Hash64", Type, 0}, + }, + "hash/adler32": { + {"Checksum", Func, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + }, + "hash/crc32": { + {"Castagnoli", Const, 0}, + {"Checksum", Func, 0}, + {"ChecksumIEEE", Func, 0}, + {"IEEE", Const, 0}, + {"IEEETable", Var, 0}, + {"Koopman", Const, 0}, + {"MakeTable", Func, 0}, + {"New", Func, 0}, + {"NewIEEE", Func, 0}, + {"Size", Const, 0}, + {"Table", Type, 0}, + {"Update", Func, 0}, + }, + "hash/crc64": { + {"Checksum", Func, 0}, + {"ECMA", Const, 0}, + {"ISO", Const, 0}, + {"MakeTable", Func, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Table", Type, 0}, + {"Update", Func, 0}, + }, + "hash/fnv": { + {"New128", Func, 9}, + {"New128a", Func, 9}, + {"New32", Func, 0}, + {"New32a", Func, 0}, + {"New64", Func, 0}, + {"New64a", Func, 0}, + }, + "hash/maphash": { + {"(*Hash).BlockSize", Method, 14}, + {"(*Hash).Reset", Method, 14}, + {"(*Hash).Seed", Method, 14}, + {"(*Hash).SetSeed", Method, 14}, + {"(*Hash).Size", Method, 14}, + {"(*Hash).Sum", Method, 14}, + {"(*Hash).Sum64", Method, 14}, + {"(*Hash).Write", Method, 14}, + {"(*Hash).WriteByte", Method, 14}, + {"(*Hash).WriteString", Method, 14}, + {"Bytes", Func, 19}, + {"Hash", Type, 14}, + {"MakeSeed", Func, 14}, + {"Seed", Type, 14}, + {"String", Func, 19}, + }, + "html": { + {"EscapeString", Func, 0}, + {"UnescapeString", Func, 0}, + }, + "html/template": { + {"(*Error).Error", Method, 0}, + {"(*Template).AddParseTree", Method, 0}, + {"(*Template).Clone", Method, 0}, + {"(*Template).DefinedTemplates", Method, 6}, + {"(*Template).Delims", Method, 0}, + {"(*Template).Execute", Method, 0}, + {"(*Template).ExecuteTemplate", Method, 0}, + {"(*Template).Funcs", Method, 0}, + {"(*Template).Lookup", Method, 0}, + {"(*Template).Name", Method, 0}, + {"(*Template).New", Method, 0}, + {"(*Template).Option", Method, 5}, + {"(*Template).Parse", Method, 0}, + {"(*Template).ParseFS", Method, 16}, + {"(*Template).ParseFiles", Method, 0}, + {"(*Template).ParseGlob", Method, 0}, + {"(*Template).Templates", Method, 0}, + {"CSS", Type, 0}, + {"ErrAmbigContext", Const, 0}, + {"ErrBadHTML", Const, 0}, + {"ErrBranchEnd", Const, 0}, + {"ErrEndContext", Const, 0}, + {"ErrJSTemplate", Const, 21}, + {"ErrNoSuchTemplate", Const, 0}, + {"ErrOutputContext", Const, 0}, + {"ErrPartialCharset", Const, 0}, + {"ErrPartialEscape", Const, 0}, + {"ErrPredefinedEscaper", Const, 9}, + {"ErrRangeLoopReentry", Const, 0}, + {"ErrSlashAmbig", Const, 0}, + {"Error", Type, 0}, + {"Error.Description", Field, 0}, + {"Error.ErrorCode", Field, 0}, + {"Error.Line", Field, 0}, + {"Error.Name", Field, 0}, + {"Error.Node", Field, 4}, + {"ErrorCode", Type, 0}, + {"FuncMap", Type, 0}, + {"HTML", Type, 0}, + {"HTMLAttr", Type, 0}, + {"HTMLEscape", Func, 0}, + {"HTMLEscapeString", Func, 0}, + {"HTMLEscaper", Func, 0}, + {"IsTrue", Func, 6}, + {"JS", Type, 0}, + {"JSEscape", Func, 0}, + {"JSEscapeString", Func, 0}, + {"JSEscaper", Func, 0}, + {"JSStr", Type, 0}, + {"Must", Func, 0}, + {"New", Func, 0}, + {"OK", Const, 0}, + {"ParseFS", Func, 16}, + {"ParseFiles", Func, 0}, + {"ParseGlob", Func, 0}, + {"Srcset", Type, 10}, + {"Template", Type, 0}, + {"Template.Tree", Field, 2}, + {"URL", Type, 0}, + {"URLQueryEscaper", Func, 0}, + }, + "image": { + {"(*Alpha).AlphaAt", Method, 4}, + {"(*Alpha).At", Method, 0}, + {"(*Alpha).Bounds", Method, 0}, + {"(*Alpha).ColorModel", Method, 0}, + {"(*Alpha).Opaque", Method, 0}, + {"(*Alpha).PixOffset", Method, 0}, + {"(*Alpha).RGBA64At", Method, 17}, + {"(*Alpha).Set", Method, 0}, + {"(*Alpha).SetAlpha", Method, 0}, + {"(*Alpha).SetRGBA64", Method, 17}, + {"(*Alpha).SubImage", Method, 0}, + {"(*Alpha16).Alpha16At", Method, 4}, + {"(*Alpha16).At", Method, 0}, + {"(*Alpha16).Bounds", Method, 0}, + {"(*Alpha16).ColorModel", Method, 0}, + {"(*Alpha16).Opaque", Method, 0}, + {"(*Alpha16).PixOffset", Method, 0}, + {"(*Alpha16).RGBA64At", Method, 17}, + {"(*Alpha16).Set", Method, 0}, + {"(*Alpha16).SetAlpha16", Method, 0}, + {"(*Alpha16).SetRGBA64", Method, 17}, + {"(*Alpha16).SubImage", Method, 0}, + {"(*CMYK).At", Method, 5}, + {"(*CMYK).Bounds", Method, 5}, + {"(*CMYK).CMYKAt", Method, 5}, + {"(*CMYK).ColorModel", Method, 5}, + {"(*CMYK).Opaque", Method, 5}, + {"(*CMYK).PixOffset", Method, 5}, + {"(*CMYK).RGBA64At", Method, 17}, + {"(*CMYK).Set", Method, 5}, + {"(*CMYK).SetCMYK", Method, 5}, + {"(*CMYK).SetRGBA64", Method, 17}, + {"(*CMYK).SubImage", Method, 5}, + {"(*Gray).At", Method, 0}, + {"(*Gray).Bounds", Method, 0}, + {"(*Gray).ColorModel", Method, 0}, + {"(*Gray).GrayAt", Method, 4}, + {"(*Gray).Opaque", Method, 0}, + {"(*Gray).PixOffset", Method, 0}, + {"(*Gray).RGBA64At", Method, 17}, + {"(*Gray).Set", Method, 0}, + {"(*Gray).SetGray", Method, 0}, + {"(*Gray).SetRGBA64", Method, 17}, + {"(*Gray).SubImage", Method, 0}, + {"(*Gray16).At", Method, 0}, + {"(*Gray16).Bounds", Method, 0}, + {"(*Gray16).ColorModel", Method, 0}, + {"(*Gray16).Gray16At", Method, 4}, + {"(*Gray16).Opaque", Method, 0}, + {"(*Gray16).PixOffset", Method, 0}, + {"(*Gray16).RGBA64At", Method, 17}, + {"(*Gray16).Set", Method, 0}, + {"(*Gray16).SetGray16", Method, 0}, + {"(*Gray16).SetRGBA64", Method, 17}, + {"(*Gray16).SubImage", Method, 0}, + {"(*NRGBA).At", Method, 0}, + {"(*NRGBA).Bounds", Method, 0}, + {"(*NRGBA).ColorModel", Method, 0}, + {"(*NRGBA).NRGBAAt", Method, 4}, + {"(*NRGBA).Opaque", Method, 0}, + {"(*NRGBA).PixOffset", Method, 0}, + {"(*NRGBA).RGBA64At", Method, 17}, + {"(*NRGBA).Set", Method, 0}, + {"(*NRGBA).SetNRGBA", Method, 0}, + {"(*NRGBA).SetRGBA64", Method, 17}, + {"(*NRGBA).SubImage", Method, 0}, + {"(*NRGBA64).At", Method, 0}, + {"(*NRGBA64).Bounds", Method, 0}, + {"(*NRGBA64).ColorModel", Method, 0}, + {"(*NRGBA64).NRGBA64At", Method, 4}, + {"(*NRGBA64).Opaque", Method, 0}, + {"(*NRGBA64).PixOffset", Method, 0}, + {"(*NRGBA64).RGBA64At", Method, 17}, + {"(*NRGBA64).Set", Method, 0}, + {"(*NRGBA64).SetNRGBA64", Method, 0}, + {"(*NRGBA64).SetRGBA64", Method, 17}, + {"(*NRGBA64).SubImage", Method, 0}, + {"(*NYCbCrA).AOffset", Method, 6}, + {"(*NYCbCrA).At", Method, 6}, + {"(*NYCbCrA).Bounds", Method, 6}, + {"(*NYCbCrA).COffset", Method, 6}, + {"(*NYCbCrA).ColorModel", Method, 6}, + {"(*NYCbCrA).NYCbCrAAt", Method, 6}, + {"(*NYCbCrA).Opaque", Method, 6}, + {"(*NYCbCrA).RGBA64At", Method, 17}, + {"(*NYCbCrA).SubImage", Method, 6}, + {"(*NYCbCrA).YCbCrAt", Method, 6}, + {"(*NYCbCrA).YOffset", Method, 6}, + {"(*Paletted).At", Method, 0}, + {"(*Paletted).Bounds", Method, 0}, + {"(*Paletted).ColorIndexAt", Method, 0}, + {"(*Paletted).ColorModel", Method, 0}, + {"(*Paletted).Opaque", Method, 0}, + {"(*Paletted).PixOffset", Method, 0}, + {"(*Paletted).RGBA64At", Method, 17}, + {"(*Paletted).Set", Method, 0}, + {"(*Paletted).SetColorIndex", Method, 0}, + {"(*Paletted).SetRGBA64", Method, 17}, + {"(*Paletted).SubImage", Method, 0}, + {"(*RGBA).At", Method, 0}, + {"(*RGBA).Bounds", Method, 0}, + {"(*RGBA).ColorModel", Method, 0}, + {"(*RGBA).Opaque", Method, 0}, + {"(*RGBA).PixOffset", Method, 0}, + {"(*RGBA).RGBA64At", Method, 17}, + {"(*RGBA).RGBAAt", Method, 4}, + {"(*RGBA).Set", Method, 0}, + {"(*RGBA).SetRGBA", Method, 0}, + {"(*RGBA).SetRGBA64", Method, 17}, + {"(*RGBA).SubImage", Method, 0}, + {"(*RGBA64).At", Method, 0}, + {"(*RGBA64).Bounds", Method, 0}, + {"(*RGBA64).ColorModel", Method, 0}, + {"(*RGBA64).Opaque", Method, 0}, + {"(*RGBA64).PixOffset", Method, 0}, + {"(*RGBA64).RGBA64At", Method, 4}, + {"(*RGBA64).Set", Method, 0}, + {"(*RGBA64).SetRGBA64", Method, 0}, + {"(*RGBA64).SubImage", Method, 0}, + {"(*Uniform).At", Method, 0}, + {"(*Uniform).Bounds", Method, 0}, + {"(*Uniform).ColorModel", Method, 0}, + {"(*Uniform).Convert", Method, 0}, + {"(*Uniform).Opaque", Method, 0}, + {"(*Uniform).RGBA", Method, 0}, + {"(*Uniform).RGBA64At", Method, 17}, + {"(*YCbCr).At", Method, 0}, + {"(*YCbCr).Bounds", Method, 0}, + {"(*YCbCr).COffset", Method, 0}, + {"(*YCbCr).ColorModel", Method, 0}, + {"(*YCbCr).Opaque", Method, 0}, + {"(*YCbCr).RGBA64At", Method, 17}, + {"(*YCbCr).SubImage", Method, 0}, + {"(*YCbCr).YCbCrAt", Method, 4}, + {"(*YCbCr).YOffset", Method, 0}, + {"(Point).Add", Method, 0}, + {"(Point).Div", Method, 0}, + {"(Point).Eq", Method, 0}, + {"(Point).In", Method, 0}, + {"(Point).Mod", Method, 0}, + {"(Point).Mul", Method, 0}, + {"(Point).String", Method, 0}, + {"(Point).Sub", Method, 0}, + {"(Rectangle).Add", Method, 0}, + {"(Rectangle).At", Method, 5}, + {"(Rectangle).Bounds", Method, 5}, + {"(Rectangle).Canon", Method, 0}, + {"(Rectangle).ColorModel", Method, 5}, + {"(Rectangle).Dx", Method, 0}, + {"(Rectangle).Dy", Method, 0}, + {"(Rectangle).Empty", Method, 0}, + {"(Rectangle).Eq", Method, 0}, + {"(Rectangle).In", Method, 0}, + {"(Rectangle).Inset", Method, 0}, + {"(Rectangle).Intersect", Method, 0}, + {"(Rectangle).Overlaps", Method, 0}, + {"(Rectangle).RGBA64At", Method, 17}, + {"(Rectangle).Size", Method, 0}, + {"(Rectangle).String", Method, 0}, + {"(Rectangle).Sub", Method, 0}, + {"(Rectangle).Union", Method, 0}, + {"(YCbCrSubsampleRatio).String", Method, 0}, + {"Alpha", Type, 0}, + {"Alpha.Pix", Field, 0}, + {"Alpha.Rect", Field, 0}, + {"Alpha.Stride", Field, 0}, + {"Alpha16", Type, 0}, + {"Alpha16.Pix", Field, 0}, + {"Alpha16.Rect", Field, 0}, + {"Alpha16.Stride", Field, 0}, + {"Black", Var, 0}, + {"CMYK", Type, 5}, + {"CMYK.Pix", Field, 5}, + {"CMYK.Rect", Field, 5}, + {"CMYK.Stride", Field, 5}, + {"Config", Type, 0}, + {"Config.ColorModel", Field, 0}, + {"Config.Height", Field, 0}, + {"Config.Width", Field, 0}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"ErrFormat", Var, 0}, + {"Gray", Type, 0}, + {"Gray.Pix", Field, 0}, + {"Gray.Rect", Field, 0}, + {"Gray.Stride", Field, 0}, + {"Gray16", Type, 0}, + {"Gray16.Pix", Field, 0}, + {"Gray16.Rect", Field, 0}, + {"Gray16.Stride", Field, 0}, + {"Image", Type, 0}, + {"NRGBA", Type, 0}, + {"NRGBA.Pix", Field, 0}, + {"NRGBA.Rect", Field, 0}, + {"NRGBA.Stride", Field, 0}, + {"NRGBA64", Type, 0}, + {"NRGBA64.Pix", Field, 0}, + {"NRGBA64.Rect", Field, 0}, + {"NRGBA64.Stride", Field, 0}, + {"NYCbCrA", Type, 6}, + {"NYCbCrA.A", Field, 6}, + {"NYCbCrA.AStride", Field, 6}, + {"NYCbCrA.YCbCr", Field, 6}, + {"NewAlpha", Func, 0}, + {"NewAlpha16", Func, 0}, + {"NewCMYK", Func, 5}, + {"NewGray", Func, 0}, + {"NewGray16", Func, 0}, + {"NewNRGBA", Func, 0}, + {"NewNRGBA64", Func, 0}, + {"NewNYCbCrA", Func, 6}, + {"NewPaletted", Func, 0}, + {"NewRGBA", Func, 0}, + {"NewRGBA64", Func, 0}, + {"NewUniform", Func, 0}, + {"NewYCbCr", Func, 0}, + {"Opaque", Var, 0}, + {"Paletted", Type, 0}, + {"Paletted.Palette", Field, 0}, + {"Paletted.Pix", Field, 0}, + {"Paletted.Rect", Field, 0}, + {"Paletted.Stride", Field, 0}, + {"PalettedImage", Type, 0}, + {"Point", Type, 0}, + {"Point.X", Field, 0}, + {"Point.Y", Field, 0}, + {"Pt", Func, 0}, + {"RGBA", Type, 0}, + {"RGBA.Pix", Field, 0}, + {"RGBA.Rect", Field, 0}, + {"RGBA.Stride", Field, 0}, + {"RGBA64", Type, 0}, + {"RGBA64.Pix", Field, 0}, + {"RGBA64.Rect", Field, 0}, + {"RGBA64.Stride", Field, 0}, + {"RGBA64Image", Type, 17}, + {"Rect", Func, 0}, + {"Rectangle", Type, 0}, + {"Rectangle.Max", Field, 0}, + {"Rectangle.Min", Field, 0}, + {"RegisterFormat", Func, 0}, + {"Transparent", Var, 0}, + {"Uniform", Type, 0}, + {"Uniform.C", Field, 0}, + {"White", Var, 0}, + {"YCbCr", Type, 0}, + {"YCbCr.CStride", Field, 0}, + {"YCbCr.Cb", Field, 0}, + {"YCbCr.Cr", Field, 0}, + {"YCbCr.Rect", Field, 0}, + {"YCbCr.SubsampleRatio", Field, 0}, + {"YCbCr.Y", Field, 0}, + {"YCbCr.YStride", Field, 0}, + {"YCbCrSubsampleRatio", Type, 0}, + {"YCbCrSubsampleRatio410", Const, 5}, + {"YCbCrSubsampleRatio411", Const, 5}, + {"YCbCrSubsampleRatio420", Const, 0}, + {"YCbCrSubsampleRatio422", Const, 0}, + {"YCbCrSubsampleRatio440", Const, 1}, + {"YCbCrSubsampleRatio444", Const, 0}, + {"ZP", Var, 0}, + {"ZR", Var, 0}, + }, + "image/color": { + {"(Alpha).RGBA", Method, 0}, + {"(Alpha16).RGBA", Method, 0}, + {"(CMYK).RGBA", Method, 5}, + {"(Gray).RGBA", Method, 0}, + {"(Gray16).RGBA", Method, 0}, + {"(NRGBA).RGBA", Method, 0}, + {"(NRGBA64).RGBA", Method, 0}, + {"(NYCbCrA).RGBA", Method, 6}, + {"(Palette).Convert", Method, 0}, + {"(Palette).Index", Method, 0}, + {"(RGBA).RGBA", Method, 0}, + {"(RGBA64).RGBA", Method, 0}, + {"(YCbCr).RGBA", Method, 0}, + {"Alpha", Type, 0}, + {"Alpha.A", Field, 0}, + {"Alpha16", Type, 0}, + {"Alpha16.A", Field, 0}, + {"Alpha16Model", Var, 0}, + {"AlphaModel", Var, 0}, + {"Black", Var, 0}, + {"CMYK", Type, 5}, + {"CMYK.C", Field, 5}, + {"CMYK.K", Field, 5}, + {"CMYK.M", Field, 5}, + {"CMYK.Y", Field, 5}, + {"CMYKModel", Var, 5}, + {"CMYKToRGB", Func, 5}, + {"Color", Type, 0}, + {"Gray", Type, 0}, + {"Gray.Y", Field, 0}, + {"Gray16", Type, 0}, + {"Gray16.Y", Field, 0}, + {"Gray16Model", Var, 0}, + {"GrayModel", Var, 0}, + {"Model", Type, 0}, + {"ModelFunc", Func, 0}, + {"NRGBA", Type, 0}, + {"NRGBA.A", Field, 0}, + {"NRGBA.B", Field, 0}, + {"NRGBA.G", Field, 0}, + {"NRGBA.R", Field, 0}, + {"NRGBA64", Type, 0}, + {"NRGBA64.A", Field, 0}, + {"NRGBA64.B", Field, 0}, + {"NRGBA64.G", Field, 0}, + {"NRGBA64.R", Field, 0}, + {"NRGBA64Model", Var, 0}, + {"NRGBAModel", Var, 0}, + {"NYCbCrA", Type, 6}, + {"NYCbCrA.A", Field, 6}, + {"NYCbCrA.YCbCr", Field, 6}, + {"NYCbCrAModel", Var, 6}, + {"Opaque", Var, 0}, + {"Palette", Type, 0}, + {"RGBA", Type, 0}, + {"RGBA.A", Field, 0}, + {"RGBA.B", Field, 0}, + {"RGBA.G", Field, 0}, + {"RGBA.R", Field, 0}, + {"RGBA64", Type, 0}, + {"RGBA64.A", Field, 0}, + {"RGBA64.B", Field, 0}, + {"RGBA64.G", Field, 0}, + {"RGBA64.R", Field, 0}, + {"RGBA64Model", Var, 0}, + {"RGBAModel", Var, 0}, + {"RGBToCMYK", Func, 5}, + {"RGBToYCbCr", Func, 0}, + {"Transparent", Var, 0}, + {"White", Var, 0}, + {"YCbCr", Type, 0}, + {"YCbCr.Cb", Field, 0}, + {"YCbCr.Cr", Field, 0}, + {"YCbCr.Y", Field, 0}, + {"YCbCrModel", Var, 0}, + {"YCbCrToRGB", Func, 0}, + }, + "image/color/palette": { + {"Plan9", Var, 2}, + {"WebSafe", Var, 2}, + }, + "image/draw": { + {"(Op).Draw", Method, 2}, + {"Draw", Func, 0}, + {"DrawMask", Func, 0}, + {"Drawer", Type, 2}, + {"FloydSteinberg", Var, 2}, + {"Image", Type, 0}, + {"Op", Type, 0}, + {"Over", Const, 0}, + {"Quantizer", Type, 2}, + {"RGBA64Image", Type, 17}, + {"Src", Const, 0}, + }, + "image/gif": { + {"Decode", Func, 0}, + {"DecodeAll", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DisposalBackground", Const, 5}, + {"DisposalNone", Const, 5}, + {"DisposalPrevious", Const, 5}, + {"Encode", Func, 2}, + {"EncodeAll", Func, 2}, + {"GIF", Type, 0}, + {"GIF.BackgroundIndex", Field, 5}, + {"GIF.Config", Field, 5}, + {"GIF.Delay", Field, 0}, + {"GIF.Disposal", Field, 5}, + {"GIF.Image", Field, 0}, + {"GIF.LoopCount", Field, 0}, + {"Options", Type, 2}, + {"Options.Drawer", Field, 2}, + {"Options.NumColors", Field, 2}, + {"Options.Quantizer", Field, 2}, + }, + "image/jpeg": { + {"(FormatError).Error", Method, 0}, + {"(UnsupportedError).Error", Method, 0}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DefaultQuality", Const, 0}, + {"Encode", Func, 0}, + {"FormatError", Type, 0}, + {"Options", Type, 0}, + {"Options.Quality", Field, 0}, + {"Reader", Type, 0}, + {"UnsupportedError", Type, 0}, + }, + "image/png": { + {"(*Encoder).Encode", Method, 4}, + {"(FormatError).Error", Method, 0}, + {"(UnsupportedError).Error", Method, 0}, + {"BestCompression", Const, 4}, + {"BestSpeed", Const, 4}, + {"CompressionLevel", Type, 4}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DefaultCompression", Const, 4}, + {"Encode", Func, 0}, + {"Encoder", Type, 4}, + {"Encoder.BufferPool", Field, 9}, + {"Encoder.CompressionLevel", Field, 4}, + {"EncoderBuffer", Type, 9}, + {"EncoderBufferPool", Type, 9}, + {"FormatError", Type, 0}, + {"NoCompression", Const, 4}, + {"UnsupportedError", Type, 0}, + }, + "index/suffixarray": { + {"(*Index).Bytes", Method, 0}, + {"(*Index).FindAllIndex", Method, 0}, + {"(*Index).Lookup", Method, 0}, + {"(*Index).Read", Method, 0}, + {"(*Index).Write", Method, 0}, + {"Index", Type, 0}, + {"New", Func, 0}, + }, + "io": { + {"(*LimitedReader).Read", Method, 0}, + {"(*OffsetWriter).Seek", Method, 20}, + {"(*OffsetWriter).Write", Method, 20}, + {"(*OffsetWriter).WriteAt", Method, 20}, + {"(*PipeReader).Close", Method, 0}, + {"(*PipeReader).CloseWithError", Method, 0}, + {"(*PipeReader).Read", Method, 0}, + {"(*PipeWriter).Close", Method, 0}, + {"(*PipeWriter).CloseWithError", Method, 0}, + {"(*PipeWriter).Write", Method, 0}, + {"(*SectionReader).Outer", Method, 22}, + {"(*SectionReader).Read", Method, 0}, + {"(*SectionReader).ReadAt", Method, 0}, + {"(*SectionReader).Seek", Method, 0}, + {"(*SectionReader).Size", Method, 0}, + {"ByteReader", Type, 0}, + {"ByteScanner", Type, 0}, + {"ByteWriter", Type, 1}, + {"Closer", Type, 0}, + {"Copy", Func, 0}, + {"CopyBuffer", Func, 5}, + {"CopyN", Func, 0}, + {"Discard", Var, 16}, + {"EOF", Var, 0}, + {"ErrClosedPipe", Var, 0}, + {"ErrNoProgress", Var, 1}, + {"ErrShortBuffer", Var, 0}, + {"ErrShortWrite", Var, 0}, + {"ErrUnexpectedEOF", Var, 0}, + {"LimitReader", Func, 0}, + {"LimitedReader", Type, 0}, + {"LimitedReader.N", Field, 0}, + {"LimitedReader.R", Field, 0}, + {"MultiReader", Func, 0}, + {"MultiWriter", Func, 0}, + {"NewOffsetWriter", Func, 20}, + {"NewSectionReader", Func, 0}, + {"NopCloser", Func, 16}, + {"OffsetWriter", Type, 20}, + {"Pipe", Func, 0}, + {"PipeReader", Type, 0}, + {"PipeWriter", Type, 0}, + {"ReadAll", Func, 16}, + {"ReadAtLeast", Func, 0}, + {"ReadCloser", Type, 0}, + {"ReadFull", Func, 0}, + {"ReadSeekCloser", Type, 16}, + {"ReadSeeker", Type, 0}, + {"ReadWriteCloser", Type, 0}, + {"ReadWriteSeeker", Type, 0}, + {"ReadWriter", Type, 0}, + {"Reader", Type, 0}, + {"ReaderAt", Type, 0}, + {"ReaderFrom", Type, 0}, + {"RuneReader", Type, 0}, + {"RuneScanner", Type, 0}, + {"SectionReader", Type, 0}, + {"SeekCurrent", Const, 7}, + {"SeekEnd", Const, 7}, + {"SeekStart", Const, 7}, + {"Seeker", Type, 0}, + {"StringWriter", Type, 12}, + {"TeeReader", Func, 0}, + {"WriteCloser", Type, 0}, + {"WriteSeeker", Type, 0}, + {"WriteString", Func, 0}, + {"Writer", Type, 0}, + {"WriterAt", Type, 0}, + {"WriterTo", Type, 0}, + }, + "io/fs": { + {"(*PathError).Error", Method, 16}, + {"(*PathError).Timeout", Method, 16}, + {"(*PathError).Unwrap", Method, 16}, + {"(FileMode).IsDir", Method, 16}, + {"(FileMode).IsRegular", Method, 16}, + {"(FileMode).Perm", Method, 16}, + {"(FileMode).String", Method, 16}, + {"(FileMode).Type", Method, 16}, + {"DirEntry", Type, 16}, + {"ErrClosed", Var, 16}, + {"ErrExist", Var, 16}, + {"ErrInvalid", Var, 16}, + {"ErrNotExist", Var, 16}, + {"ErrPermission", Var, 16}, + {"FS", Type, 16}, + {"File", Type, 16}, + {"FileInfo", Type, 16}, + {"FileInfoToDirEntry", Func, 17}, + {"FileMode", Type, 16}, + {"FormatDirEntry", Func, 21}, + {"FormatFileInfo", Func, 21}, + {"Glob", Func, 16}, + {"GlobFS", Type, 16}, + {"ModeAppend", Const, 16}, + {"ModeCharDevice", Const, 16}, + {"ModeDevice", Const, 16}, + {"ModeDir", Const, 16}, + {"ModeExclusive", Const, 16}, + {"ModeIrregular", Const, 16}, + {"ModeNamedPipe", Const, 16}, + {"ModePerm", Const, 16}, + {"ModeSetgid", Const, 16}, + {"ModeSetuid", Const, 16}, + {"ModeSocket", Const, 16}, + {"ModeSticky", Const, 16}, + {"ModeSymlink", Const, 16}, + {"ModeTemporary", Const, 16}, + {"ModeType", Const, 16}, + {"PathError", Type, 16}, + {"PathError.Err", Field, 16}, + {"PathError.Op", Field, 16}, + {"PathError.Path", Field, 16}, + {"ReadDir", Func, 16}, + {"ReadDirFS", Type, 16}, + {"ReadDirFile", Type, 16}, + {"ReadFile", Func, 16}, + {"ReadFileFS", Type, 16}, + {"SkipAll", Var, 20}, + {"SkipDir", Var, 16}, + {"Stat", Func, 16}, + {"StatFS", Type, 16}, + {"Sub", Func, 16}, + {"SubFS", Type, 16}, + {"ValidPath", Func, 16}, + {"WalkDir", Func, 16}, + {"WalkDirFunc", Type, 16}, + }, + "io/ioutil": { + {"Discard", Var, 0}, + {"NopCloser", Func, 0}, + {"ReadAll", Func, 0}, + {"ReadDir", Func, 0}, + {"ReadFile", Func, 0}, + {"TempDir", Func, 0}, + {"TempFile", Func, 0}, + {"WriteFile", Func, 0}, + }, + "log": { + {"(*Logger).Fatal", Method, 0}, + {"(*Logger).Fatalf", Method, 0}, + {"(*Logger).Fatalln", Method, 0}, + {"(*Logger).Flags", Method, 0}, + {"(*Logger).Output", Method, 0}, + {"(*Logger).Panic", Method, 0}, + {"(*Logger).Panicf", Method, 0}, + {"(*Logger).Panicln", Method, 0}, + {"(*Logger).Prefix", Method, 0}, + {"(*Logger).Print", Method, 0}, + {"(*Logger).Printf", Method, 0}, + {"(*Logger).Println", Method, 0}, + {"(*Logger).SetFlags", Method, 0}, + {"(*Logger).SetOutput", Method, 5}, + {"(*Logger).SetPrefix", Method, 0}, + {"(*Logger).Writer", Method, 12}, + {"Default", Func, 16}, + {"Fatal", Func, 0}, + {"Fatalf", Func, 0}, + {"Fatalln", Func, 0}, + {"Flags", Func, 0}, + {"LUTC", Const, 5}, + {"Ldate", Const, 0}, + {"Llongfile", Const, 0}, + {"Lmicroseconds", Const, 0}, + {"Lmsgprefix", Const, 14}, + {"Logger", Type, 0}, + {"Lshortfile", Const, 0}, + {"LstdFlags", Const, 0}, + {"Ltime", Const, 0}, + {"New", Func, 0}, + {"Output", Func, 5}, + {"Panic", Func, 0}, + {"Panicf", Func, 0}, + {"Panicln", Func, 0}, + {"Prefix", Func, 0}, + {"Print", Func, 0}, + {"Printf", Func, 0}, + {"Println", Func, 0}, + {"SetFlags", Func, 0}, + {"SetOutput", Func, 0}, + {"SetPrefix", Func, 0}, + {"Writer", Func, 13}, + }, + "log/slog": { + {"(*JSONHandler).Enabled", Method, 21}, + {"(*JSONHandler).Handle", Method, 21}, + {"(*JSONHandler).WithAttrs", Method, 21}, + {"(*JSONHandler).WithGroup", Method, 21}, + {"(*Level).UnmarshalJSON", Method, 21}, + {"(*Level).UnmarshalText", Method, 21}, + {"(*LevelVar).Level", Method, 21}, + {"(*LevelVar).MarshalText", Method, 21}, + {"(*LevelVar).Set", Method, 21}, + {"(*LevelVar).String", Method, 21}, + {"(*LevelVar).UnmarshalText", Method, 21}, + {"(*Logger).Debug", Method, 21}, + {"(*Logger).DebugContext", Method, 21}, + {"(*Logger).Enabled", Method, 21}, + {"(*Logger).Error", Method, 21}, + {"(*Logger).ErrorContext", Method, 21}, + {"(*Logger).Handler", Method, 21}, + {"(*Logger).Info", Method, 21}, + {"(*Logger).InfoContext", Method, 21}, + {"(*Logger).Log", Method, 21}, + {"(*Logger).LogAttrs", Method, 21}, + {"(*Logger).Warn", Method, 21}, + {"(*Logger).WarnContext", Method, 21}, + {"(*Logger).With", Method, 21}, + {"(*Logger).WithGroup", Method, 21}, + {"(*Record).Add", Method, 21}, + {"(*Record).AddAttrs", Method, 21}, + {"(*TextHandler).Enabled", Method, 21}, + {"(*TextHandler).Handle", Method, 21}, + {"(*TextHandler).WithAttrs", Method, 21}, + {"(*TextHandler).WithGroup", Method, 21}, + {"(Attr).Equal", Method, 21}, + {"(Attr).String", Method, 21}, + {"(Kind).String", Method, 21}, + {"(Level).Level", Method, 21}, + {"(Level).MarshalJSON", Method, 21}, + {"(Level).MarshalText", Method, 21}, + {"(Level).String", Method, 21}, + {"(Record).Attrs", Method, 21}, + {"(Record).Clone", Method, 21}, + {"(Record).NumAttrs", Method, 21}, + {"(Value).Any", Method, 21}, + {"(Value).Bool", Method, 21}, + {"(Value).Duration", Method, 21}, + {"(Value).Equal", Method, 21}, + {"(Value).Float64", Method, 21}, + {"(Value).Group", Method, 21}, + {"(Value).Int64", Method, 21}, + {"(Value).Kind", Method, 21}, + {"(Value).LogValuer", Method, 21}, + {"(Value).Resolve", Method, 21}, + {"(Value).String", Method, 21}, + {"(Value).Time", Method, 21}, + {"(Value).Uint64", Method, 21}, + {"Any", Func, 21}, + {"AnyValue", Func, 21}, + {"Attr", Type, 21}, + {"Attr.Key", Field, 21}, + {"Attr.Value", Field, 21}, + {"Bool", Func, 21}, + {"BoolValue", Func, 21}, + {"Debug", Func, 21}, + {"DebugContext", Func, 21}, + {"Default", Func, 21}, + {"Duration", Func, 21}, + {"DurationValue", Func, 21}, + {"Error", Func, 21}, + {"ErrorContext", Func, 21}, + {"Float64", Func, 21}, + {"Float64Value", Func, 21}, + {"Group", Func, 21}, + {"GroupValue", Func, 21}, + {"Handler", Type, 21}, + {"HandlerOptions", Type, 21}, + {"HandlerOptions.AddSource", Field, 21}, + {"HandlerOptions.Level", Field, 21}, + {"HandlerOptions.ReplaceAttr", Field, 21}, + {"Info", Func, 21}, + {"InfoContext", Func, 21}, + {"Int", Func, 21}, + {"Int64", Func, 21}, + {"Int64Value", Func, 21}, + {"IntValue", Func, 21}, + {"JSONHandler", Type, 21}, + {"Kind", Type, 21}, + {"KindAny", Const, 21}, + {"KindBool", Const, 21}, + {"KindDuration", Const, 21}, + {"KindFloat64", Const, 21}, + {"KindGroup", Const, 21}, + {"KindInt64", Const, 21}, + {"KindLogValuer", Const, 21}, + {"KindString", Const, 21}, + {"KindTime", Const, 21}, + {"KindUint64", Const, 21}, + {"Level", Type, 21}, + {"LevelDebug", Const, 21}, + {"LevelError", Const, 21}, + {"LevelInfo", Const, 21}, + {"LevelKey", Const, 21}, + {"LevelVar", Type, 21}, + {"LevelWarn", Const, 21}, + {"Leveler", Type, 21}, + {"Log", Func, 21}, + {"LogAttrs", Func, 21}, + {"LogValuer", Type, 21}, + {"Logger", Type, 21}, + {"MessageKey", Const, 21}, + {"New", Func, 21}, + {"NewJSONHandler", Func, 21}, + {"NewLogLogger", Func, 21}, + {"NewRecord", Func, 21}, + {"NewTextHandler", Func, 21}, + {"Record", Type, 21}, + {"Record.Level", Field, 21}, + {"Record.Message", Field, 21}, + {"Record.PC", Field, 21}, + {"Record.Time", Field, 21}, + {"SetDefault", Func, 21}, + {"SetLogLoggerLevel", Func, 22}, + {"Source", Type, 21}, + {"Source.File", Field, 21}, + {"Source.Function", Field, 21}, + {"Source.Line", Field, 21}, + {"SourceKey", Const, 21}, + {"String", Func, 21}, + {"StringValue", Func, 21}, + {"TextHandler", Type, 21}, + {"Time", Func, 21}, + {"TimeKey", Const, 21}, + {"TimeValue", Func, 21}, + {"Uint64", Func, 21}, + {"Uint64Value", Func, 21}, + {"Value", Type, 21}, + {"Warn", Func, 21}, + {"WarnContext", Func, 21}, + {"With", Func, 21}, + }, + "log/syslog": { + {"(*Writer).Alert", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Crit", Method, 0}, + {"(*Writer).Debug", Method, 0}, + {"(*Writer).Emerg", Method, 0}, + {"(*Writer).Err", Method, 0}, + {"(*Writer).Info", Method, 0}, + {"(*Writer).Notice", Method, 0}, + {"(*Writer).Warning", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"Dial", Func, 0}, + {"LOG_ALERT", Const, 0}, + {"LOG_AUTH", Const, 1}, + {"LOG_AUTHPRIV", Const, 1}, + {"LOG_CRIT", Const, 0}, + {"LOG_CRON", Const, 1}, + {"LOG_DAEMON", Const, 1}, + {"LOG_DEBUG", Const, 0}, + {"LOG_EMERG", Const, 0}, + {"LOG_ERR", Const, 0}, + {"LOG_FTP", Const, 1}, + {"LOG_INFO", Const, 0}, + {"LOG_KERN", Const, 1}, + {"LOG_LOCAL0", Const, 1}, + {"LOG_LOCAL1", Const, 1}, + {"LOG_LOCAL2", Const, 1}, + {"LOG_LOCAL3", Const, 1}, + {"LOG_LOCAL4", Const, 1}, + {"LOG_LOCAL5", Const, 1}, + {"LOG_LOCAL6", Const, 1}, + {"LOG_LOCAL7", Const, 1}, + {"LOG_LPR", Const, 1}, + {"LOG_MAIL", Const, 1}, + {"LOG_NEWS", Const, 1}, + {"LOG_NOTICE", Const, 0}, + {"LOG_SYSLOG", Const, 1}, + {"LOG_USER", Const, 1}, + {"LOG_UUCP", Const, 1}, + {"LOG_WARNING", Const, 0}, + {"New", Func, 0}, + {"NewLogger", Func, 0}, + {"Priority", Type, 0}, + {"Writer", Type, 0}, + }, + "maps": { + {"Clone", Func, 21}, + {"Copy", Func, 21}, + {"DeleteFunc", Func, 21}, + {"Equal", Func, 21}, + {"EqualFunc", Func, 21}, + }, + "math": { + {"Abs", Func, 0}, + {"Acos", Func, 0}, + {"Acosh", Func, 0}, + {"Asin", Func, 0}, + {"Asinh", Func, 0}, + {"Atan", Func, 0}, + {"Atan2", Func, 0}, + {"Atanh", Func, 0}, + {"Cbrt", Func, 0}, + {"Ceil", Func, 0}, + {"Copysign", Func, 0}, + {"Cos", Func, 0}, + {"Cosh", Func, 0}, + {"Dim", Func, 0}, + {"E", Const, 0}, + {"Erf", Func, 0}, + {"Erfc", Func, 0}, + {"Erfcinv", Func, 10}, + {"Erfinv", Func, 10}, + {"Exp", Func, 0}, + {"Exp2", Func, 0}, + {"Expm1", Func, 0}, + {"FMA", Func, 14}, + {"Float32bits", Func, 0}, + {"Float32frombits", Func, 0}, + {"Float64bits", Func, 0}, + {"Float64frombits", Func, 0}, + {"Floor", Func, 0}, + {"Frexp", Func, 0}, + {"Gamma", Func, 0}, + {"Hypot", Func, 0}, + {"Ilogb", Func, 0}, + {"Inf", Func, 0}, + {"IsInf", Func, 0}, + {"IsNaN", Func, 0}, + {"J0", Func, 0}, + {"J1", Func, 0}, + {"Jn", Func, 0}, + {"Ldexp", Func, 0}, + {"Lgamma", Func, 0}, + {"Ln10", Const, 0}, + {"Ln2", Const, 0}, + {"Log", Func, 0}, + {"Log10", Func, 0}, + {"Log10E", Const, 0}, + {"Log1p", Func, 0}, + {"Log2", Func, 0}, + {"Log2E", Const, 0}, + {"Logb", Func, 0}, + {"Max", Func, 0}, + {"MaxFloat32", Const, 0}, + {"MaxFloat64", Const, 0}, + {"MaxInt", Const, 17}, + {"MaxInt16", Const, 0}, + {"MaxInt32", Const, 0}, + {"MaxInt64", Const, 0}, + {"MaxInt8", Const, 0}, + {"MaxUint", Const, 17}, + {"MaxUint16", Const, 0}, + {"MaxUint32", Const, 0}, + {"MaxUint64", Const, 0}, + {"MaxUint8", Const, 0}, + {"Min", Func, 0}, + {"MinInt", Const, 17}, + {"MinInt16", Const, 0}, + {"MinInt32", Const, 0}, + {"MinInt64", Const, 0}, + {"MinInt8", Const, 0}, + {"Mod", Func, 0}, + {"Modf", Func, 0}, + {"NaN", Func, 0}, + {"Nextafter", Func, 0}, + {"Nextafter32", Func, 4}, + {"Phi", Const, 0}, + {"Pi", Const, 0}, + {"Pow", Func, 0}, + {"Pow10", Func, 0}, + {"Remainder", Func, 0}, + {"Round", Func, 10}, + {"RoundToEven", Func, 10}, + {"Signbit", Func, 0}, + {"Sin", Func, 0}, + {"Sincos", Func, 0}, + {"Sinh", Func, 0}, + {"SmallestNonzeroFloat32", Const, 0}, + {"SmallestNonzeroFloat64", Const, 0}, + {"Sqrt", Func, 0}, + {"Sqrt2", Const, 0}, + {"SqrtE", Const, 0}, + {"SqrtPhi", Const, 0}, + {"SqrtPi", Const, 0}, + {"Tan", Func, 0}, + {"Tanh", Func, 0}, + {"Trunc", Func, 0}, + {"Y0", Func, 0}, + {"Y1", Func, 0}, + {"Yn", Func, 0}, + }, + "math/big": { + {"(*Float).Abs", Method, 5}, + {"(*Float).Acc", Method, 5}, + {"(*Float).Add", Method, 5}, + {"(*Float).Append", Method, 5}, + {"(*Float).Cmp", Method, 5}, + {"(*Float).Copy", Method, 5}, + {"(*Float).Float32", Method, 5}, + {"(*Float).Float64", Method, 5}, + {"(*Float).Format", Method, 5}, + {"(*Float).GobDecode", Method, 7}, + {"(*Float).GobEncode", Method, 7}, + {"(*Float).Int", Method, 5}, + {"(*Float).Int64", Method, 5}, + {"(*Float).IsInf", Method, 5}, + {"(*Float).IsInt", Method, 5}, + {"(*Float).MantExp", Method, 5}, + {"(*Float).MarshalText", Method, 6}, + {"(*Float).MinPrec", Method, 5}, + {"(*Float).Mode", Method, 5}, + {"(*Float).Mul", Method, 5}, + {"(*Float).Neg", Method, 5}, + {"(*Float).Parse", Method, 5}, + {"(*Float).Prec", Method, 5}, + {"(*Float).Quo", Method, 5}, + {"(*Float).Rat", Method, 5}, + {"(*Float).Scan", Method, 8}, + {"(*Float).Set", Method, 5}, + {"(*Float).SetFloat64", Method, 5}, + {"(*Float).SetInf", Method, 5}, + {"(*Float).SetInt", Method, 5}, + {"(*Float).SetInt64", Method, 5}, + {"(*Float).SetMantExp", Method, 5}, + {"(*Float).SetMode", Method, 5}, + {"(*Float).SetPrec", Method, 5}, + {"(*Float).SetRat", Method, 5}, + {"(*Float).SetString", Method, 5}, + {"(*Float).SetUint64", Method, 5}, + {"(*Float).Sign", Method, 5}, + {"(*Float).Signbit", Method, 5}, + {"(*Float).Sqrt", Method, 10}, + {"(*Float).String", Method, 5}, + {"(*Float).Sub", Method, 5}, + {"(*Float).Text", Method, 5}, + {"(*Float).Uint64", Method, 5}, + {"(*Float).UnmarshalText", Method, 6}, + {"(*Int).Abs", Method, 0}, + {"(*Int).Add", Method, 0}, + {"(*Int).And", Method, 0}, + {"(*Int).AndNot", Method, 0}, + {"(*Int).Append", Method, 6}, + {"(*Int).Binomial", Method, 0}, + {"(*Int).Bit", Method, 0}, + {"(*Int).BitLen", Method, 0}, + {"(*Int).Bits", Method, 0}, + {"(*Int).Bytes", Method, 0}, + {"(*Int).Cmp", Method, 0}, + {"(*Int).CmpAbs", Method, 10}, + {"(*Int).Div", Method, 0}, + {"(*Int).DivMod", Method, 0}, + {"(*Int).Exp", Method, 0}, + {"(*Int).FillBytes", Method, 15}, + {"(*Int).Float64", Method, 21}, + {"(*Int).Format", Method, 0}, + {"(*Int).GCD", Method, 0}, + {"(*Int).GobDecode", Method, 0}, + {"(*Int).GobEncode", Method, 0}, + {"(*Int).Int64", Method, 0}, + {"(*Int).IsInt64", Method, 9}, + {"(*Int).IsUint64", Method, 9}, + {"(*Int).Lsh", Method, 0}, + {"(*Int).MarshalJSON", Method, 1}, + {"(*Int).MarshalText", Method, 3}, + {"(*Int).Mod", Method, 0}, + {"(*Int).ModInverse", Method, 0}, + {"(*Int).ModSqrt", Method, 5}, + {"(*Int).Mul", Method, 0}, + {"(*Int).MulRange", Method, 0}, + {"(*Int).Neg", Method, 0}, + {"(*Int).Not", Method, 0}, + {"(*Int).Or", Method, 0}, + {"(*Int).ProbablyPrime", Method, 0}, + {"(*Int).Quo", Method, 0}, + {"(*Int).QuoRem", Method, 0}, + {"(*Int).Rand", Method, 0}, + {"(*Int).Rem", Method, 0}, + {"(*Int).Rsh", Method, 0}, + {"(*Int).Scan", Method, 0}, + {"(*Int).Set", Method, 0}, + {"(*Int).SetBit", Method, 0}, + {"(*Int).SetBits", Method, 0}, + {"(*Int).SetBytes", Method, 0}, + {"(*Int).SetInt64", Method, 0}, + {"(*Int).SetString", Method, 0}, + {"(*Int).SetUint64", Method, 1}, + {"(*Int).Sign", Method, 0}, + {"(*Int).Sqrt", Method, 8}, + {"(*Int).String", Method, 0}, + {"(*Int).Sub", Method, 0}, + {"(*Int).Text", Method, 6}, + {"(*Int).TrailingZeroBits", Method, 13}, + {"(*Int).Uint64", Method, 1}, + {"(*Int).UnmarshalJSON", Method, 1}, + {"(*Int).UnmarshalText", Method, 3}, + {"(*Int).Xor", Method, 0}, + {"(*Rat).Abs", Method, 0}, + {"(*Rat).Add", Method, 0}, + {"(*Rat).Cmp", Method, 0}, + {"(*Rat).Denom", Method, 0}, + {"(*Rat).Float32", Method, 4}, + {"(*Rat).Float64", Method, 1}, + {"(*Rat).FloatPrec", Method, 22}, + {"(*Rat).FloatString", Method, 0}, + {"(*Rat).GobDecode", Method, 0}, + {"(*Rat).GobEncode", Method, 0}, + {"(*Rat).Inv", Method, 0}, + {"(*Rat).IsInt", Method, 0}, + {"(*Rat).MarshalText", Method, 3}, + {"(*Rat).Mul", Method, 0}, + {"(*Rat).Neg", Method, 0}, + {"(*Rat).Num", Method, 0}, + {"(*Rat).Quo", Method, 0}, + {"(*Rat).RatString", Method, 0}, + {"(*Rat).Scan", Method, 0}, + {"(*Rat).Set", Method, 0}, + {"(*Rat).SetFloat64", Method, 1}, + {"(*Rat).SetFrac", Method, 0}, + {"(*Rat).SetFrac64", Method, 0}, + {"(*Rat).SetInt", Method, 0}, + {"(*Rat).SetInt64", Method, 0}, + {"(*Rat).SetString", Method, 0}, + {"(*Rat).SetUint64", Method, 13}, + {"(*Rat).Sign", Method, 0}, + {"(*Rat).String", Method, 0}, + {"(*Rat).Sub", Method, 0}, + {"(*Rat).UnmarshalText", Method, 3}, + {"(Accuracy).String", Method, 5}, + {"(ErrNaN).Error", Method, 5}, + {"(RoundingMode).String", Method, 5}, + {"Above", Const, 5}, + {"Accuracy", Type, 5}, + {"AwayFromZero", Const, 5}, + {"Below", Const, 5}, + {"ErrNaN", Type, 5}, + {"Exact", Const, 5}, + {"Float", Type, 5}, + {"Int", Type, 0}, + {"Jacobi", Func, 5}, + {"MaxBase", Const, 0}, + {"MaxExp", Const, 5}, + {"MaxPrec", Const, 5}, + {"MinExp", Const, 5}, + {"NewFloat", Func, 5}, + {"NewInt", Func, 0}, + {"NewRat", Func, 0}, + {"ParseFloat", Func, 5}, + {"Rat", Type, 0}, + {"RoundingMode", Type, 5}, + {"ToNearestAway", Const, 5}, + {"ToNearestEven", Const, 5}, + {"ToNegativeInf", Const, 5}, + {"ToPositiveInf", Const, 5}, + {"ToZero", Const, 5}, + {"Word", Type, 0}, + }, + "math/bits": { + {"Add", Func, 12}, + {"Add32", Func, 12}, + {"Add64", Func, 12}, + {"Div", Func, 12}, + {"Div32", Func, 12}, + {"Div64", Func, 12}, + {"LeadingZeros", Func, 9}, + {"LeadingZeros16", Func, 9}, + {"LeadingZeros32", Func, 9}, + {"LeadingZeros64", Func, 9}, + {"LeadingZeros8", Func, 9}, + {"Len", Func, 9}, + {"Len16", Func, 9}, + {"Len32", Func, 9}, + {"Len64", Func, 9}, + {"Len8", Func, 9}, + {"Mul", Func, 12}, + {"Mul32", Func, 12}, + {"Mul64", Func, 12}, + {"OnesCount", Func, 9}, + {"OnesCount16", Func, 9}, + {"OnesCount32", Func, 9}, + {"OnesCount64", Func, 9}, + {"OnesCount8", Func, 9}, + {"Rem", Func, 14}, + {"Rem32", Func, 14}, + {"Rem64", Func, 14}, + {"Reverse", Func, 9}, + {"Reverse16", Func, 9}, + {"Reverse32", Func, 9}, + {"Reverse64", Func, 9}, + {"Reverse8", Func, 9}, + {"ReverseBytes", Func, 9}, + {"ReverseBytes16", Func, 9}, + {"ReverseBytes32", Func, 9}, + {"ReverseBytes64", Func, 9}, + {"RotateLeft", Func, 9}, + {"RotateLeft16", Func, 9}, + {"RotateLeft32", Func, 9}, + {"RotateLeft64", Func, 9}, + {"RotateLeft8", Func, 9}, + {"Sub", Func, 12}, + {"Sub32", Func, 12}, + {"Sub64", Func, 12}, + {"TrailingZeros", Func, 9}, + {"TrailingZeros16", Func, 9}, + {"TrailingZeros32", Func, 9}, + {"TrailingZeros64", Func, 9}, + {"TrailingZeros8", Func, 9}, + {"UintSize", Const, 9}, + }, + "math/cmplx": { + {"Abs", Func, 0}, + {"Acos", Func, 0}, + {"Acosh", Func, 0}, + {"Asin", Func, 0}, + {"Asinh", Func, 0}, + {"Atan", Func, 0}, + {"Atanh", Func, 0}, + {"Conj", Func, 0}, + {"Cos", Func, 0}, + {"Cosh", Func, 0}, + {"Cot", Func, 0}, + {"Exp", Func, 0}, + {"Inf", Func, 0}, + {"IsInf", Func, 0}, + {"IsNaN", Func, 0}, + {"Log", Func, 0}, + {"Log10", Func, 0}, + {"NaN", Func, 0}, + {"Phase", Func, 0}, + {"Polar", Func, 0}, + {"Pow", Func, 0}, + {"Rect", Func, 0}, + {"Sin", Func, 0}, + {"Sinh", Func, 0}, + {"Sqrt", Func, 0}, + {"Tan", Func, 0}, + {"Tanh", Func, 0}, + }, + "math/rand": { + {"(*Rand).ExpFloat64", Method, 0}, + {"(*Rand).Float32", Method, 0}, + {"(*Rand).Float64", Method, 0}, + {"(*Rand).Int", Method, 0}, + {"(*Rand).Int31", Method, 0}, + {"(*Rand).Int31n", Method, 0}, + {"(*Rand).Int63", Method, 0}, + {"(*Rand).Int63n", Method, 0}, + {"(*Rand).Intn", Method, 0}, + {"(*Rand).NormFloat64", Method, 0}, + {"(*Rand).Perm", Method, 0}, + {"(*Rand).Read", Method, 6}, + {"(*Rand).Seed", Method, 0}, + {"(*Rand).Shuffle", Method, 10}, + {"(*Rand).Uint32", Method, 0}, + {"(*Rand).Uint64", Method, 8}, + {"(*Zipf).Uint64", Method, 0}, + {"ExpFloat64", Func, 0}, + {"Float32", Func, 0}, + {"Float64", Func, 0}, + {"Int", Func, 0}, + {"Int31", Func, 0}, + {"Int31n", Func, 0}, + {"Int63", Func, 0}, + {"Int63n", Func, 0}, + {"Intn", Func, 0}, + {"New", Func, 0}, + {"NewSource", Func, 0}, + {"NewZipf", Func, 0}, + {"NormFloat64", Func, 0}, + {"Perm", Func, 0}, + {"Rand", Type, 0}, + {"Read", Func, 6}, + {"Seed", Func, 0}, + {"Shuffle", Func, 10}, + {"Source", Type, 0}, + {"Source64", Type, 8}, + {"Uint32", Func, 0}, + {"Uint64", Func, 8}, + {"Zipf", Type, 0}, + }, + "math/rand/v2": { + {"(*ChaCha8).MarshalBinary", Method, 22}, + {"(*ChaCha8).Seed", Method, 22}, + {"(*ChaCha8).Uint64", Method, 22}, + {"(*ChaCha8).UnmarshalBinary", Method, 22}, + {"(*PCG).MarshalBinary", Method, 22}, + {"(*PCG).Seed", Method, 22}, + {"(*PCG).Uint64", Method, 22}, + {"(*PCG).UnmarshalBinary", Method, 22}, + {"(*Rand).ExpFloat64", Method, 22}, + {"(*Rand).Float32", Method, 22}, + {"(*Rand).Float64", Method, 22}, + {"(*Rand).Int", Method, 22}, + {"(*Rand).Int32", Method, 22}, + {"(*Rand).Int32N", Method, 22}, + {"(*Rand).Int64", Method, 22}, + {"(*Rand).Int64N", Method, 22}, + {"(*Rand).IntN", Method, 22}, + {"(*Rand).NormFloat64", Method, 22}, + {"(*Rand).Perm", Method, 22}, + {"(*Rand).Shuffle", Method, 22}, + {"(*Rand).Uint32", Method, 22}, + {"(*Rand).Uint32N", Method, 22}, + {"(*Rand).Uint64", Method, 22}, + {"(*Rand).Uint64N", Method, 22}, + {"(*Rand).UintN", Method, 22}, + {"(*Zipf).Uint64", Method, 22}, + {"ChaCha8", Type, 22}, + {"ExpFloat64", Func, 22}, + {"Float32", Func, 22}, + {"Float64", Func, 22}, + {"Int", Func, 22}, + {"Int32", Func, 22}, + {"Int32N", Func, 22}, + {"Int64", Func, 22}, + {"Int64N", Func, 22}, + {"IntN", Func, 22}, + {"N", Func, 22}, + {"New", Func, 22}, + {"NewChaCha8", Func, 22}, + {"NewPCG", Func, 22}, + {"NewZipf", Func, 22}, + {"NormFloat64", Func, 22}, + {"PCG", Type, 22}, + {"Perm", Func, 22}, + {"Rand", Type, 22}, + {"Shuffle", Func, 22}, + {"Source", Type, 22}, + {"Uint32", Func, 22}, + {"Uint32N", Func, 22}, + {"Uint64", Func, 22}, + {"Uint64N", Func, 22}, + {"UintN", Func, 22}, + {"Zipf", Type, 22}, + }, + "mime": { + {"(*WordDecoder).Decode", Method, 5}, + {"(*WordDecoder).DecodeHeader", Method, 5}, + {"(WordEncoder).Encode", Method, 5}, + {"AddExtensionType", Func, 0}, + {"BEncoding", Const, 5}, + {"ErrInvalidMediaParameter", Var, 9}, + {"ExtensionsByType", Func, 5}, + {"FormatMediaType", Func, 0}, + {"ParseMediaType", Func, 0}, + {"QEncoding", Const, 5}, + {"TypeByExtension", Func, 0}, + {"WordDecoder", Type, 5}, + {"WordDecoder.CharsetReader", Field, 5}, + {"WordEncoder", Type, 5}, + }, + "mime/multipart": { + {"(*FileHeader).Open", Method, 0}, + {"(*Form).RemoveAll", Method, 0}, + {"(*Part).Close", Method, 0}, + {"(*Part).FileName", Method, 0}, + {"(*Part).FormName", Method, 0}, + {"(*Part).Read", Method, 0}, + {"(*Reader).NextPart", Method, 0}, + {"(*Reader).NextRawPart", Method, 14}, + {"(*Reader).ReadForm", Method, 0}, + {"(*Writer).Boundary", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).CreateFormField", Method, 0}, + {"(*Writer).CreateFormFile", Method, 0}, + {"(*Writer).CreatePart", Method, 0}, + {"(*Writer).FormDataContentType", Method, 0}, + {"(*Writer).SetBoundary", Method, 1}, + {"(*Writer).WriteField", Method, 0}, + {"ErrMessageTooLarge", Var, 9}, + {"File", Type, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.Filename", Field, 0}, + {"FileHeader.Header", Field, 0}, + {"FileHeader.Size", Field, 9}, + {"Form", Type, 0}, + {"Form.File", Field, 0}, + {"Form.Value", Field, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Part", Type, 0}, + {"Part.Header", Field, 0}, + {"Reader", Type, 0}, + {"Writer", Type, 0}, + }, + "mime/quotedprintable": { + {"(*Reader).Read", Method, 5}, + {"(*Writer).Close", Method, 5}, + {"(*Writer).Write", Method, 5}, + {"NewReader", Func, 5}, + {"NewWriter", Func, 5}, + {"Reader", Type, 5}, + {"Writer", Type, 5}, + {"Writer.Binary", Field, 5}, + }, + "net": { + {"(*AddrError).Error", Method, 0}, + {"(*AddrError).Temporary", Method, 0}, + {"(*AddrError).Timeout", Method, 0}, + {"(*Buffers).Read", Method, 8}, + {"(*Buffers).WriteTo", Method, 8}, + {"(*DNSConfigError).Error", Method, 0}, + {"(*DNSConfigError).Temporary", Method, 0}, + {"(*DNSConfigError).Timeout", Method, 0}, + {"(*DNSConfigError).Unwrap", Method, 13}, + {"(*DNSError).Error", Method, 0}, + {"(*DNSError).Temporary", Method, 0}, + {"(*DNSError).Timeout", Method, 0}, + {"(*Dialer).Dial", Method, 1}, + {"(*Dialer).DialContext", Method, 7}, + {"(*Dialer).MultipathTCP", Method, 21}, + {"(*Dialer).SetMultipathTCP", Method, 21}, + {"(*IP).UnmarshalText", Method, 2}, + {"(*IPAddr).Network", Method, 0}, + {"(*IPAddr).String", Method, 0}, + {"(*IPConn).Close", Method, 0}, + {"(*IPConn).File", Method, 0}, + {"(*IPConn).LocalAddr", Method, 0}, + {"(*IPConn).Read", Method, 0}, + {"(*IPConn).ReadFrom", Method, 0}, + {"(*IPConn).ReadFromIP", Method, 0}, + {"(*IPConn).ReadMsgIP", Method, 1}, + {"(*IPConn).RemoteAddr", Method, 0}, + {"(*IPConn).SetDeadline", Method, 0}, + {"(*IPConn).SetReadBuffer", Method, 0}, + {"(*IPConn).SetReadDeadline", Method, 0}, + {"(*IPConn).SetWriteBuffer", Method, 0}, + {"(*IPConn).SetWriteDeadline", Method, 0}, + {"(*IPConn).SyscallConn", Method, 9}, + {"(*IPConn).Write", Method, 0}, + {"(*IPConn).WriteMsgIP", Method, 1}, + {"(*IPConn).WriteTo", Method, 0}, + {"(*IPConn).WriteToIP", Method, 0}, + {"(*IPNet).Contains", Method, 0}, + {"(*IPNet).Network", Method, 0}, + {"(*IPNet).String", Method, 0}, + {"(*Interface).Addrs", Method, 0}, + {"(*Interface).MulticastAddrs", Method, 0}, + {"(*ListenConfig).Listen", Method, 11}, + {"(*ListenConfig).ListenPacket", Method, 11}, + {"(*ListenConfig).MultipathTCP", Method, 21}, + {"(*ListenConfig).SetMultipathTCP", Method, 21}, + {"(*OpError).Error", Method, 0}, + {"(*OpError).Temporary", Method, 0}, + {"(*OpError).Timeout", Method, 0}, + {"(*OpError).Unwrap", Method, 13}, + {"(*ParseError).Error", Method, 0}, + {"(*ParseError).Temporary", Method, 17}, + {"(*ParseError).Timeout", Method, 17}, + {"(*Resolver).LookupAddr", Method, 8}, + {"(*Resolver).LookupCNAME", Method, 8}, + {"(*Resolver).LookupHost", Method, 8}, + {"(*Resolver).LookupIP", Method, 15}, + {"(*Resolver).LookupIPAddr", Method, 8}, + {"(*Resolver).LookupMX", Method, 8}, + {"(*Resolver).LookupNS", Method, 8}, + {"(*Resolver).LookupNetIP", Method, 18}, + {"(*Resolver).LookupPort", Method, 8}, + {"(*Resolver).LookupSRV", Method, 8}, + {"(*Resolver).LookupTXT", Method, 8}, + {"(*TCPAddr).AddrPort", Method, 18}, + {"(*TCPAddr).Network", Method, 0}, + {"(*TCPAddr).String", Method, 0}, + {"(*TCPConn).Close", Method, 0}, + {"(*TCPConn).CloseRead", Method, 0}, + {"(*TCPConn).CloseWrite", Method, 0}, + {"(*TCPConn).File", Method, 0}, + {"(*TCPConn).LocalAddr", Method, 0}, + {"(*TCPConn).MultipathTCP", Method, 21}, + {"(*TCPConn).Read", Method, 0}, + {"(*TCPConn).ReadFrom", Method, 0}, + {"(*TCPConn).RemoteAddr", Method, 0}, + {"(*TCPConn).SetDeadline", Method, 0}, + {"(*TCPConn).SetKeepAlive", Method, 0}, + {"(*TCPConn).SetKeepAlivePeriod", Method, 2}, + {"(*TCPConn).SetLinger", Method, 0}, + {"(*TCPConn).SetNoDelay", Method, 0}, + {"(*TCPConn).SetReadBuffer", Method, 0}, + {"(*TCPConn).SetReadDeadline", Method, 0}, + {"(*TCPConn).SetWriteBuffer", Method, 0}, + {"(*TCPConn).SetWriteDeadline", Method, 0}, + {"(*TCPConn).SyscallConn", Method, 9}, + {"(*TCPConn).Write", Method, 0}, + {"(*TCPConn).WriteTo", Method, 22}, + {"(*TCPListener).Accept", Method, 0}, + {"(*TCPListener).AcceptTCP", Method, 0}, + {"(*TCPListener).Addr", Method, 0}, + {"(*TCPListener).Close", Method, 0}, + {"(*TCPListener).File", Method, 0}, + {"(*TCPListener).SetDeadline", Method, 0}, + {"(*TCPListener).SyscallConn", Method, 10}, + {"(*UDPAddr).AddrPort", Method, 18}, + {"(*UDPAddr).Network", Method, 0}, + {"(*UDPAddr).String", Method, 0}, + {"(*UDPConn).Close", Method, 0}, + {"(*UDPConn).File", Method, 0}, + {"(*UDPConn).LocalAddr", Method, 0}, + {"(*UDPConn).Read", Method, 0}, + {"(*UDPConn).ReadFrom", Method, 0}, + {"(*UDPConn).ReadFromUDP", Method, 0}, + {"(*UDPConn).ReadFromUDPAddrPort", Method, 18}, + {"(*UDPConn).ReadMsgUDP", Method, 1}, + {"(*UDPConn).ReadMsgUDPAddrPort", Method, 18}, + {"(*UDPConn).RemoteAddr", Method, 0}, + {"(*UDPConn).SetDeadline", Method, 0}, + {"(*UDPConn).SetReadBuffer", Method, 0}, + {"(*UDPConn).SetReadDeadline", Method, 0}, + {"(*UDPConn).SetWriteBuffer", Method, 0}, + {"(*UDPConn).SetWriteDeadline", Method, 0}, + {"(*UDPConn).SyscallConn", Method, 9}, + {"(*UDPConn).Write", Method, 0}, + {"(*UDPConn).WriteMsgUDP", Method, 1}, + {"(*UDPConn).WriteMsgUDPAddrPort", Method, 18}, + {"(*UDPConn).WriteTo", Method, 0}, + {"(*UDPConn).WriteToUDP", Method, 0}, + {"(*UDPConn).WriteToUDPAddrPort", Method, 18}, + {"(*UnixAddr).Network", Method, 0}, + {"(*UnixAddr).String", Method, 0}, + {"(*UnixConn).Close", Method, 0}, + {"(*UnixConn).CloseRead", Method, 1}, + {"(*UnixConn).CloseWrite", Method, 1}, + {"(*UnixConn).File", Method, 0}, + {"(*UnixConn).LocalAddr", Method, 0}, + {"(*UnixConn).Read", Method, 0}, + {"(*UnixConn).ReadFrom", Method, 0}, + {"(*UnixConn).ReadFromUnix", Method, 0}, + {"(*UnixConn).ReadMsgUnix", Method, 0}, + {"(*UnixConn).RemoteAddr", Method, 0}, + {"(*UnixConn).SetDeadline", Method, 0}, + {"(*UnixConn).SetReadBuffer", Method, 0}, + {"(*UnixConn).SetReadDeadline", Method, 0}, + {"(*UnixConn).SetWriteBuffer", Method, 0}, + {"(*UnixConn).SetWriteDeadline", Method, 0}, + {"(*UnixConn).SyscallConn", Method, 9}, + {"(*UnixConn).Write", Method, 0}, + {"(*UnixConn).WriteMsgUnix", Method, 0}, + {"(*UnixConn).WriteTo", Method, 0}, + {"(*UnixConn).WriteToUnix", Method, 0}, + {"(*UnixListener).Accept", Method, 0}, + {"(*UnixListener).AcceptUnix", Method, 0}, + {"(*UnixListener).Addr", Method, 0}, + {"(*UnixListener).Close", Method, 0}, + {"(*UnixListener).File", Method, 0}, + {"(*UnixListener).SetDeadline", Method, 0}, + {"(*UnixListener).SetUnlinkOnClose", Method, 8}, + {"(*UnixListener).SyscallConn", Method, 10}, + {"(Flags).String", Method, 0}, + {"(HardwareAddr).String", Method, 0}, + {"(IP).DefaultMask", Method, 0}, + {"(IP).Equal", Method, 0}, + {"(IP).IsGlobalUnicast", Method, 0}, + {"(IP).IsInterfaceLocalMulticast", Method, 0}, + {"(IP).IsLinkLocalMulticast", Method, 0}, + {"(IP).IsLinkLocalUnicast", Method, 0}, + {"(IP).IsLoopback", Method, 0}, + {"(IP).IsMulticast", Method, 0}, + {"(IP).IsPrivate", Method, 17}, + {"(IP).IsUnspecified", Method, 0}, + {"(IP).MarshalText", Method, 2}, + {"(IP).Mask", Method, 0}, + {"(IP).String", Method, 0}, + {"(IP).To16", Method, 0}, + {"(IP).To4", Method, 0}, + {"(IPMask).Size", Method, 0}, + {"(IPMask).String", Method, 0}, + {"(InvalidAddrError).Error", Method, 0}, + {"(InvalidAddrError).Temporary", Method, 0}, + {"(InvalidAddrError).Timeout", Method, 0}, + {"(UnknownNetworkError).Error", Method, 0}, + {"(UnknownNetworkError).Temporary", Method, 0}, + {"(UnknownNetworkError).Timeout", Method, 0}, + {"Addr", Type, 0}, + {"AddrError", Type, 0}, + {"AddrError.Addr", Field, 0}, + {"AddrError.Err", Field, 0}, + {"Buffers", Type, 8}, + {"CIDRMask", Func, 0}, + {"Conn", Type, 0}, + {"DNSConfigError", Type, 0}, + {"DNSConfigError.Err", Field, 0}, + {"DNSError", Type, 0}, + {"DNSError.Err", Field, 0}, + {"DNSError.IsNotFound", Field, 13}, + {"DNSError.IsTemporary", Field, 6}, + {"DNSError.IsTimeout", Field, 0}, + {"DNSError.Name", Field, 0}, + {"DNSError.Server", Field, 0}, + {"DefaultResolver", Var, 8}, + {"Dial", Func, 0}, + {"DialIP", Func, 0}, + {"DialTCP", Func, 0}, + {"DialTimeout", Func, 0}, + {"DialUDP", Func, 0}, + {"DialUnix", Func, 0}, + {"Dialer", Type, 1}, + {"Dialer.Cancel", Field, 6}, + {"Dialer.Control", Field, 11}, + {"Dialer.ControlContext", Field, 20}, + {"Dialer.Deadline", Field, 1}, + {"Dialer.DualStack", Field, 2}, + {"Dialer.FallbackDelay", Field, 5}, + {"Dialer.KeepAlive", Field, 3}, + {"Dialer.LocalAddr", Field, 1}, + {"Dialer.Resolver", Field, 8}, + {"Dialer.Timeout", Field, 1}, + {"ErrClosed", Var, 16}, + {"ErrWriteToConnected", Var, 0}, + {"Error", Type, 0}, + {"FileConn", Func, 0}, + {"FileListener", Func, 0}, + {"FilePacketConn", Func, 0}, + {"FlagBroadcast", Const, 0}, + {"FlagLoopback", Const, 0}, + {"FlagMulticast", Const, 0}, + {"FlagPointToPoint", Const, 0}, + {"FlagRunning", Const, 20}, + {"FlagUp", Const, 0}, + {"Flags", Type, 0}, + {"HardwareAddr", Type, 0}, + {"IP", Type, 0}, + {"IPAddr", Type, 0}, + {"IPAddr.IP", Field, 0}, + {"IPAddr.Zone", Field, 1}, + {"IPConn", Type, 0}, + {"IPMask", Type, 0}, + {"IPNet", Type, 0}, + {"IPNet.IP", Field, 0}, + {"IPNet.Mask", Field, 0}, + {"IPv4", Func, 0}, + {"IPv4Mask", Func, 0}, + {"IPv4allrouter", Var, 0}, + {"IPv4allsys", Var, 0}, + {"IPv4bcast", Var, 0}, + {"IPv4len", Const, 0}, + {"IPv4zero", Var, 0}, + {"IPv6interfacelocalallnodes", Var, 0}, + {"IPv6len", Const, 0}, + {"IPv6linklocalallnodes", Var, 0}, + {"IPv6linklocalallrouters", Var, 0}, + {"IPv6loopback", Var, 0}, + {"IPv6unspecified", Var, 0}, + {"IPv6zero", Var, 0}, + {"Interface", Type, 0}, + {"Interface.Flags", Field, 0}, + {"Interface.HardwareAddr", Field, 0}, + {"Interface.Index", Field, 0}, + {"Interface.MTU", Field, 0}, + {"Interface.Name", Field, 0}, + {"InterfaceAddrs", Func, 0}, + {"InterfaceByIndex", Func, 0}, + {"InterfaceByName", Func, 0}, + {"Interfaces", Func, 0}, + {"InvalidAddrError", Type, 0}, + {"JoinHostPort", Func, 0}, + {"Listen", Func, 0}, + {"ListenConfig", Type, 11}, + {"ListenConfig.Control", Field, 11}, + {"ListenConfig.KeepAlive", Field, 13}, + {"ListenIP", Func, 0}, + {"ListenMulticastUDP", Func, 0}, + {"ListenPacket", Func, 0}, + {"ListenTCP", Func, 0}, + {"ListenUDP", Func, 0}, + {"ListenUnix", Func, 0}, + {"ListenUnixgram", Func, 0}, + {"Listener", Type, 0}, + {"LookupAddr", Func, 0}, + {"LookupCNAME", Func, 0}, + {"LookupHost", Func, 0}, + {"LookupIP", Func, 0}, + {"LookupMX", Func, 0}, + {"LookupNS", Func, 1}, + {"LookupPort", Func, 0}, + {"LookupSRV", Func, 0}, + {"LookupTXT", Func, 0}, + {"MX", Type, 0}, + {"MX.Host", Field, 0}, + {"MX.Pref", Field, 0}, + {"NS", Type, 1}, + {"NS.Host", Field, 1}, + {"OpError", Type, 0}, + {"OpError.Addr", Field, 0}, + {"OpError.Err", Field, 0}, + {"OpError.Net", Field, 0}, + {"OpError.Op", Field, 0}, + {"OpError.Source", Field, 5}, + {"PacketConn", Type, 0}, + {"ParseCIDR", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Text", Field, 0}, + {"ParseError.Type", Field, 0}, + {"ParseIP", Func, 0}, + {"ParseMAC", Func, 0}, + {"Pipe", Func, 0}, + {"ResolveIPAddr", Func, 0}, + {"ResolveTCPAddr", Func, 0}, + {"ResolveUDPAddr", Func, 0}, + {"ResolveUnixAddr", Func, 0}, + {"Resolver", Type, 8}, + {"Resolver.Dial", Field, 9}, + {"Resolver.PreferGo", Field, 8}, + {"Resolver.StrictErrors", Field, 9}, + {"SRV", Type, 0}, + {"SRV.Port", Field, 0}, + {"SRV.Priority", Field, 0}, + {"SRV.Target", Field, 0}, + {"SRV.Weight", Field, 0}, + {"SplitHostPort", Func, 0}, + {"TCPAddr", Type, 0}, + {"TCPAddr.IP", Field, 0}, + {"TCPAddr.Port", Field, 0}, + {"TCPAddr.Zone", Field, 1}, + {"TCPAddrFromAddrPort", Func, 18}, + {"TCPConn", Type, 0}, + {"TCPListener", Type, 0}, + {"UDPAddr", Type, 0}, + {"UDPAddr.IP", Field, 0}, + {"UDPAddr.Port", Field, 0}, + {"UDPAddr.Zone", Field, 1}, + {"UDPAddrFromAddrPort", Func, 18}, + {"UDPConn", Type, 0}, + {"UnixAddr", Type, 0}, + {"UnixAddr.Name", Field, 0}, + {"UnixAddr.Net", Field, 0}, + {"UnixConn", Type, 0}, + {"UnixListener", Type, 0}, + {"UnknownNetworkError", Type, 0}, + }, + "net/http": { + {"(*Client).CloseIdleConnections", Method, 12}, + {"(*Client).Do", Method, 0}, + {"(*Client).Get", Method, 0}, + {"(*Client).Head", Method, 0}, + {"(*Client).Post", Method, 0}, + {"(*Client).PostForm", Method, 0}, + {"(*Cookie).String", Method, 0}, + {"(*Cookie).Valid", Method, 18}, + {"(*MaxBytesError).Error", Method, 19}, + {"(*ProtocolError).Error", Method, 0}, + {"(*ProtocolError).Is", Method, 21}, + {"(*Request).AddCookie", Method, 0}, + {"(*Request).BasicAuth", Method, 4}, + {"(*Request).Clone", Method, 13}, + {"(*Request).Context", Method, 7}, + {"(*Request).Cookie", Method, 0}, + {"(*Request).Cookies", Method, 0}, + {"(*Request).FormFile", Method, 0}, + {"(*Request).FormValue", Method, 0}, + {"(*Request).MultipartReader", Method, 0}, + {"(*Request).ParseForm", Method, 0}, + {"(*Request).ParseMultipartForm", Method, 0}, + {"(*Request).PathValue", Method, 22}, + {"(*Request).PostFormValue", Method, 1}, + {"(*Request).ProtoAtLeast", Method, 0}, + {"(*Request).Referer", Method, 0}, + {"(*Request).SetBasicAuth", Method, 0}, + {"(*Request).SetPathValue", Method, 22}, + {"(*Request).UserAgent", Method, 0}, + {"(*Request).WithContext", Method, 7}, + {"(*Request).Write", Method, 0}, + {"(*Request).WriteProxy", Method, 0}, + {"(*Response).Cookies", Method, 0}, + {"(*Response).Location", Method, 0}, + {"(*Response).ProtoAtLeast", Method, 0}, + {"(*Response).Write", Method, 0}, + {"(*ResponseController).EnableFullDuplex", Method, 21}, + {"(*ResponseController).Flush", Method, 20}, + {"(*ResponseController).Hijack", Method, 20}, + {"(*ResponseController).SetReadDeadline", Method, 20}, + {"(*ResponseController).SetWriteDeadline", Method, 20}, + {"(*ServeMux).Handle", Method, 0}, + {"(*ServeMux).HandleFunc", Method, 0}, + {"(*ServeMux).Handler", Method, 1}, + {"(*ServeMux).ServeHTTP", Method, 0}, + {"(*Server).Close", Method, 8}, + {"(*Server).ListenAndServe", Method, 0}, + {"(*Server).ListenAndServeTLS", Method, 0}, + {"(*Server).RegisterOnShutdown", Method, 9}, + {"(*Server).Serve", Method, 0}, + {"(*Server).ServeTLS", Method, 9}, + {"(*Server).SetKeepAlivesEnabled", Method, 3}, + {"(*Server).Shutdown", Method, 8}, + {"(*Transport).CancelRequest", Method, 1}, + {"(*Transport).Clone", Method, 13}, + {"(*Transport).CloseIdleConnections", Method, 0}, + {"(*Transport).RegisterProtocol", Method, 0}, + {"(*Transport).RoundTrip", Method, 0}, + {"(ConnState).String", Method, 3}, + {"(Dir).Open", Method, 0}, + {"(HandlerFunc).ServeHTTP", Method, 0}, + {"(Header).Add", Method, 0}, + {"(Header).Clone", Method, 13}, + {"(Header).Del", Method, 0}, + {"(Header).Get", Method, 0}, + {"(Header).Set", Method, 0}, + {"(Header).Values", Method, 14}, + {"(Header).Write", Method, 0}, + {"(Header).WriteSubset", Method, 0}, + {"AllowQuerySemicolons", Func, 17}, + {"CanonicalHeaderKey", Func, 0}, + {"Client", Type, 0}, + {"Client.CheckRedirect", Field, 0}, + {"Client.Jar", Field, 0}, + {"Client.Timeout", Field, 3}, + {"Client.Transport", Field, 0}, + {"CloseNotifier", Type, 1}, + {"ConnState", Type, 3}, + {"Cookie", Type, 0}, + {"Cookie.Domain", Field, 0}, + {"Cookie.Expires", Field, 0}, + {"Cookie.HttpOnly", Field, 0}, + {"Cookie.MaxAge", Field, 0}, + {"Cookie.Name", Field, 0}, + {"Cookie.Path", Field, 0}, + {"Cookie.Raw", Field, 0}, + {"Cookie.RawExpires", Field, 0}, + {"Cookie.SameSite", Field, 11}, + {"Cookie.Secure", Field, 0}, + {"Cookie.Unparsed", Field, 0}, + {"Cookie.Value", Field, 0}, + {"CookieJar", Type, 0}, + {"DefaultClient", Var, 0}, + {"DefaultMaxHeaderBytes", Const, 0}, + {"DefaultMaxIdleConnsPerHost", Const, 0}, + {"DefaultServeMux", Var, 0}, + {"DefaultTransport", Var, 0}, + {"DetectContentType", Func, 0}, + {"Dir", Type, 0}, + {"ErrAbortHandler", Var, 8}, + {"ErrBodyNotAllowed", Var, 0}, + {"ErrBodyReadAfterClose", Var, 0}, + {"ErrContentLength", Var, 0}, + {"ErrHandlerTimeout", Var, 0}, + {"ErrHeaderTooLong", Var, 0}, + {"ErrHijacked", Var, 0}, + {"ErrLineTooLong", Var, 0}, + {"ErrMissingBoundary", Var, 0}, + {"ErrMissingContentLength", Var, 0}, + {"ErrMissingFile", Var, 0}, + {"ErrNoCookie", Var, 0}, + {"ErrNoLocation", Var, 0}, + {"ErrNotMultipart", Var, 0}, + {"ErrNotSupported", Var, 0}, + {"ErrSchemeMismatch", Var, 21}, + {"ErrServerClosed", Var, 8}, + {"ErrShortBody", Var, 0}, + {"ErrSkipAltProtocol", Var, 6}, + {"ErrUnexpectedTrailer", Var, 0}, + {"ErrUseLastResponse", Var, 7}, + {"ErrWriteAfterFlush", Var, 0}, + {"Error", Func, 0}, + {"FS", Func, 16}, + {"File", Type, 0}, + {"FileServer", Func, 0}, + {"FileServerFS", Func, 22}, + {"FileSystem", Type, 0}, + {"Flusher", Type, 0}, + {"Get", Func, 0}, + {"Handle", Func, 0}, + {"HandleFunc", Func, 0}, + {"Handler", Type, 0}, + {"HandlerFunc", Type, 0}, + {"Head", Func, 0}, + {"Header", Type, 0}, + {"Hijacker", Type, 0}, + {"ListenAndServe", Func, 0}, + {"ListenAndServeTLS", Func, 0}, + {"LocalAddrContextKey", Var, 7}, + {"MaxBytesError", Type, 19}, + {"MaxBytesError.Limit", Field, 19}, + {"MaxBytesHandler", Func, 18}, + {"MaxBytesReader", Func, 0}, + {"MethodConnect", Const, 6}, + {"MethodDelete", Const, 6}, + {"MethodGet", Const, 6}, + {"MethodHead", Const, 6}, + {"MethodOptions", Const, 6}, + {"MethodPatch", Const, 6}, + {"MethodPost", Const, 6}, + {"MethodPut", Const, 6}, + {"MethodTrace", Const, 6}, + {"NewFileTransport", Func, 0}, + {"NewFileTransportFS", Func, 22}, + {"NewRequest", Func, 0}, + {"NewRequestWithContext", Func, 13}, + {"NewResponseController", Func, 20}, + {"NewServeMux", Func, 0}, + {"NoBody", Var, 8}, + {"NotFound", Func, 0}, + {"NotFoundHandler", Func, 0}, + {"ParseHTTPVersion", Func, 0}, + {"ParseTime", Func, 1}, + {"Post", Func, 0}, + {"PostForm", Func, 0}, + {"ProtocolError", Type, 0}, + {"ProtocolError.ErrorString", Field, 0}, + {"ProxyFromEnvironment", Func, 0}, + {"ProxyURL", Func, 0}, + {"PushOptions", Type, 8}, + {"PushOptions.Header", Field, 8}, + {"PushOptions.Method", Field, 8}, + {"Pusher", Type, 8}, + {"ReadRequest", Func, 0}, + {"ReadResponse", Func, 0}, + {"Redirect", Func, 0}, + {"RedirectHandler", Func, 0}, + {"Request", Type, 0}, + {"Request.Body", Field, 0}, + {"Request.Cancel", Field, 5}, + {"Request.Close", Field, 0}, + {"Request.ContentLength", Field, 0}, + {"Request.Form", Field, 0}, + {"Request.GetBody", Field, 8}, + {"Request.Header", Field, 0}, + {"Request.Host", Field, 0}, + {"Request.Method", Field, 0}, + {"Request.MultipartForm", Field, 0}, + {"Request.PostForm", Field, 1}, + {"Request.Proto", Field, 0}, + {"Request.ProtoMajor", Field, 0}, + {"Request.ProtoMinor", Field, 0}, + {"Request.RemoteAddr", Field, 0}, + {"Request.RequestURI", Field, 0}, + {"Request.Response", Field, 7}, + {"Request.TLS", Field, 0}, + {"Request.Trailer", Field, 0}, + {"Request.TransferEncoding", Field, 0}, + {"Request.URL", Field, 0}, + {"Response", Type, 0}, + {"Response.Body", Field, 0}, + {"Response.Close", Field, 0}, + {"Response.ContentLength", Field, 0}, + {"Response.Header", Field, 0}, + {"Response.Proto", Field, 0}, + {"Response.ProtoMajor", Field, 0}, + {"Response.ProtoMinor", Field, 0}, + {"Response.Request", Field, 0}, + {"Response.Status", Field, 0}, + {"Response.StatusCode", Field, 0}, + {"Response.TLS", Field, 3}, + {"Response.Trailer", Field, 0}, + {"Response.TransferEncoding", Field, 0}, + {"Response.Uncompressed", Field, 7}, + {"ResponseController", Type, 20}, + {"ResponseWriter", Type, 0}, + {"RoundTripper", Type, 0}, + {"SameSite", Type, 11}, + {"SameSiteDefaultMode", Const, 11}, + {"SameSiteLaxMode", Const, 11}, + {"SameSiteNoneMode", Const, 13}, + {"SameSiteStrictMode", Const, 11}, + {"Serve", Func, 0}, + {"ServeContent", Func, 0}, + {"ServeFile", Func, 0}, + {"ServeFileFS", Func, 22}, + {"ServeMux", Type, 0}, + {"ServeTLS", Func, 9}, + {"Server", Type, 0}, + {"Server.Addr", Field, 0}, + {"Server.BaseContext", Field, 13}, + {"Server.ConnContext", Field, 13}, + {"Server.ConnState", Field, 3}, + {"Server.DisableGeneralOptionsHandler", Field, 20}, + {"Server.ErrorLog", Field, 3}, + {"Server.Handler", Field, 0}, + {"Server.IdleTimeout", Field, 8}, + {"Server.MaxHeaderBytes", Field, 0}, + {"Server.ReadHeaderTimeout", Field, 8}, + {"Server.ReadTimeout", Field, 0}, + {"Server.TLSConfig", Field, 0}, + {"Server.TLSNextProto", Field, 1}, + {"Server.WriteTimeout", Field, 0}, + {"ServerContextKey", Var, 7}, + {"SetCookie", Func, 0}, + {"StateActive", Const, 3}, + {"StateClosed", Const, 3}, + {"StateHijacked", Const, 3}, + {"StateIdle", Const, 3}, + {"StateNew", Const, 3}, + {"StatusAccepted", Const, 0}, + {"StatusAlreadyReported", Const, 7}, + {"StatusBadGateway", Const, 0}, + {"StatusBadRequest", Const, 0}, + {"StatusConflict", Const, 0}, + {"StatusContinue", Const, 0}, + {"StatusCreated", Const, 0}, + {"StatusEarlyHints", Const, 13}, + {"StatusExpectationFailed", Const, 0}, + {"StatusFailedDependency", Const, 7}, + {"StatusForbidden", Const, 0}, + {"StatusFound", Const, 0}, + {"StatusGatewayTimeout", Const, 0}, + {"StatusGone", Const, 0}, + {"StatusHTTPVersionNotSupported", Const, 0}, + {"StatusIMUsed", Const, 7}, + {"StatusInsufficientStorage", Const, 7}, + {"StatusInternalServerError", Const, 0}, + {"StatusLengthRequired", Const, 0}, + {"StatusLocked", Const, 7}, + {"StatusLoopDetected", Const, 7}, + {"StatusMethodNotAllowed", Const, 0}, + {"StatusMisdirectedRequest", Const, 11}, + {"StatusMovedPermanently", Const, 0}, + {"StatusMultiStatus", Const, 7}, + {"StatusMultipleChoices", Const, 0}, + {"StatusNetworkAuthenticationRequired", Const, 6}, + {"StatusNoContent", Const, 0}, + {"StatusNonAuthoritativeInfo", Const, 0}, + {"StatusNotAcceptable", Const, 0}, + {"StatusNotExtended", Const, 7}, + {"StatusNotFound", Const, 0}, + {"StatusNotImplemented", Const, 0}, + {"StatusNotModified", Const, 0}, + {"StatusOK", Const, 0}, + {"StatusPartialContent", Const, 0}, + {"StatusPaymentRequired", Const, 0}, + {"StatusPermanentRedirect", Const, 7}, + {"StatusPreconditionFailed", Const, 0}, + {"StatusPreconditionRequired", Const, 6}, + {"StatusProcessing", Const, 7}, + {"StatusProxyAuthRequired", Const, 0}, + {"StatusRequestEntityTooLarge", Const, 0}, + {"StatusRequestHeaderFieldsTooLarge", Const, 6}, + {"StatusRequestTimeout", Const, 0}, + {"StatusRequestURITooLong", Const, 0}, + {"StatusRequestedRangeNotSatisfiable", Const, 0}, + {"StatusResetContent", Const, 0}, + {"StatusSeeOther", Const, 0}, + {"StatusServiceUnavailable", Const, 0}, + {"StatusSwitchingProtocols", Const, 0}, + {"StatusTeapot", Const, 0}, + {"StatusTemporaryRedirect", Const, 0}, + {"StatusText", Func, 0}, + {"StatusTooEarly", Const, 12}, + {"StatusTooManyRequests", Const, 6}, + {"StatusUnauthorized", Const, 0}, + {"StatusUnavailableForLegalReasons", Const, 6}, + {"StatusUnprocessableEntity", Const, 7}, + {"StatusUnsupportedMediaType", Const, 0}, + {"StatusUpgradeRequired", Const, 7}, + {"StatusUseProxy", Const, 0}, + {"StatusVariantAlsoNegotiates", Const, 7}, + {"StripPrefix", Func, 0}, + {"TimeFormat", Const, 0}, + {"TimeoutHandler", Func, 0}, + {"TrailerPrefix", Const, 8}, + {"Transport", Type, 0}, + {"Transport.Dial", Field, 0}, + {"Transport.DialContext", Field, 7}, + {"Transport.DialTLS", Field, 4}, + {"Transport.DialTLSContext", Field, 14}, + {"Transport.DisableCompression", Field, 0}, + {"Transport.DisableKeepAlives", Field, 0}, + {"Transport.ExpectContinueTimeout", Field, 6}, + {"Transport.ForceAttemptHTTP2", Field, 13}, + {"Transport.GetProxyConnectHeader", Field, 16}, + {"Transport.IdleConnTimeout", Field, 7}, + {"Transport.MaxConnsPerHost", Field, 11}, + {"Transport.MaxIdleConns", Field, 7}, + {"Transport.MaxIdleConnsPerHost", Field, 0}, + {"Transport.MaxResponseHeaderBytes", Field, 7}, + {"Transport.OnProxyConnectResponse", Field, 20}, + {"Transport.Proxy", Field, 0}, + {"Transport.ProxyConnectHeader", Field, 8}, + {"Transport.ReadBufferSize", Field, 13}, + {"Transport.ResponseHeaderTimeout", Field, 1}, + {"Transport.TLSClientConfig", Field, 0}, + {"Transport.TLSHandshakeTimeout", Field, 3}, + {"Transport.TLSNextProto", Field, 6}, + {"Transport.WriteBufferSize", Field, 13}, + }, + "net/http/cgi": { + {"(*Handler).ServeHTTP", Method, 0}, + {"Handler", Type, 0}, + {"Handler.Args", Field, 0}, + {"Handler.Dir", Field, 0}, + {"Handler.Env", Field, 0}, + {"Handler.InheritEnv", Field, 0}, + {"Handler.Logger", Field, 0}, + {"Handler.Path", Field, 0}, + {"Handler.PathLocationHandler", Field, 0}, + {"Handler.Root", Field, 0}, + {"Handler.Stderr", Field, 7}, + {"Request", Func, 0}, + {"RequestFromMap", Func, 0}, + {"Serve", Func, 0}, + }, + "net/http/cookiejar": { + {"(*Jar).Cookies", Method, 1}, + {"(*Jar).SetCookies", Method, 1}, + {"Jar", Type, 1}, + {"New", Func, 1}, + {"Options", Type, 1}, + {"Options.PublicSuffixList", Field, 1}, + {"PublicSuffixList", Type, 1}, + }, + "net/http/fcgi": { + {"ErrConnClosed", Var, 5}, + {"ErrRequestAborted", Var, 5}, + {"ProcessEnv", Func, 9}, + {"Serve", Func, 0}, + }, + "net/http/httptest": { + {"(*ResponseRecorder).Flush", Method, 0}, + {"(*ResponseRecorder).Header", Method, 0}, + {"(*ResponseRecorder).Result", Method, 7}, + {"(*ResponseRecorder).Write", Method, 0}, + {"(*ResponseRecorder).WriteHeader", Method, 0}, + {"(*ResponseRecorder).WriteString", Method, 6}, + {"(*Server).Certificate", Method, 9}, + {"(*Server).Client", Method, 9}, + {"(*Server).Close", Method, 0}, + {"(*Server).CloseClientConnections", Method, 0}, + {"(*Server).Start", Method, 0}, + {"(*Server).StartTLS", Method, 0}, + {"DefaultRemoteAddr", Const, 0}, + {"NewRecorder", Func, 0}, + {"NewRequest", Func, 7}, + {"NewServer", Func, 0}, + {"NewTLSServer", Func, 0}, + {"NewUnstartedServer", Func, 0}, + {"ResponseRecorder", Type, 0}, + {"ResponseRecorder.Body", Field, 0}, + {"ResponseRecorder.Code", Field, 0}, + {"ResponseRecorder.Flushed", Field, 0}, + {"ResponseRecorder.HeaderMap", Field, 0}, + {"Server", Type, 0}, + {"Server.Config", Field, 0}, + {"Server.EnableHTTP2", Field, 14}, + {"Server.Listener", Field, 0}, + {"Server.TLS", Field, 0}, + {"Server.URL", Field, 0}, + }, + "net/http/httptrace": { + {"ClientTrace", Type, 7}, + {"ClientTrace.ConnectDone", Field, 7}, + {"ClientTrace.ConnectStart", Field, 7}, + {"ClientTrace.DNSDone", Field, 7}, + {"ClientTrace.DNSStart", Field, 7}, + {"ClientTrace.GetConn", Field, 7}, + {"ClientTrace.Got100Continue", Field, 7}, + {"ClientTrace.Got1xxResponse", Field, 11}, + {"ClientTrace.GotConn", Field, 7}, + {"ClientTrace.GotFirstResponseByte", Field, 7}, + {"ClientTrace.PutIdleConn", Field, 7}, + {"ClientTrace.TLSHandshakeDone", Field, 8}, + {"ClientTrace.TLSHandshakeStart", Field, 8}, + {"ClientTrace.Wait100Continue", Field, 7}, + {"ClientTrace.WroteHeaderField", Field, 11}, + {"ClientTrace.WroteHeaders", Field, 7}, + {"ClientTrace.WroteRequest", Field, 7}, + {"ContextClientTrace", Func, 7}, + {"DNSDoneInfo", Type, 7}, + {"DNSDoneInfo.Addrs", Field, 7}, + {"DNSDoneInfo.Coalesced", Field, 7}, + {"DNSDoneInfo.Err", Field, 7}, + {"DNSStartInfo", Type, 7}, + {"DNSStartInfo.Host", Field, 7}, + {"GotConnInfo", Type, 7}, + {"GotConnInfo.Conn", Field, 7}, + {"GotConnInfo.IdleTime", Field, 7}, + {"GotConnInfo.Reused", Field, 7}, + {"GotConnInfo.WasIdle", Field, 7}, + {"WithClientTrace", Func, 7}, + {"WroteRequestInfo", Type, 7}, + {"WroteRequestInfo.Err", Field, 7}, + }, + "net/http/httputil": { + {"(*ClientConn).Close", Method, 0}, + {"(*ClientConn).Do", Method, 0}, + {"(*ClientConn).Hijack", Method, 0}, + {"(*ClientConn).Pending", Method, 0}, + {"(*ClientConn).Read", Method, 0}, + {"(*ClientConn).Write", Method, 0}, + {"(*ProxyRequest).SetURL", Method, 20}, + {"(*ProxyRequest).SetXForwarded", Method, 20}, + {"(*ReverseProxy).ServeHTTP", Method, 0}, + {"(*ServerConn).Close", Method, 0}, + {"(*ServerConn).Hijack", Method, 0}, + {"(*ServerConn).Pending", Method, 0}, + {"(*ServerConn).Read", Method, 0}, + {"(*ServerConn).Write", Method, 0}, + {"BufferPool", Type, 6}, + {"ClientConn", Type, 0}, + {"DumpRequest", Func, 0}, + {"DumpRequestOut", Func, 0}, + {"DumpResponse", Func, 0}, + {"ErrClosed", Var, 0}, + {"ErrLineTooLong", Var, 0}, + {"ErrPersistEOF", Var, 0}, + {"ErrPipeline", Var, 0}, + {"NewChunkedReader", Func, 0}, + {"NewChunkedWriter", Func, 0}, + {"NewClientConn", Func, 0}, + {"NewProxyClientConn", Func, 0}, + {"NewServerConn", Func, 0}, + {"NewSingleHostReverseProxy", Func, 0}, + {"ProxyRequest", Type, 20}, + {"ProxyRequest.In", Field, 20}, + {"ProxyRequest.Out", Field, 20}, + {"ReverseProxy", Type, 0}, + {"ReverseProxy.BufferPool", Field, 6}, + {"ReverseProxy.Director", Field, 0}, + {"ReverseProxy.ErrorHandler", Field, 11}, + {"ReverseProxy.ErrorLog", Field, 4}, + {"ReverseProxy.FlushInterval", Field, 0}, + {"ReverseProxy.ModifyResponse", Field, 8}, + {"ReverseProxy.Rewrite", Field, 20}, + {"ReverseProxy.Transport", Field, 0}, + {"ServerConn", Type, 0}, + }, + "net/http/pprof": { + {"Cmdline", Func, 0}, + {"Handler", Func, 0}, + {"Index", Func, 0}, + {"Profile", Func, 0}, + {"Symbol", Func, 0}, + {"Trace", Func, 5}, + }, + "net/mail": { + {"(*Address).String", Method, 0}, + {"(*AddressParser).Parse", Method, 5}, + {"(*AddressParser).ParseList", Method, 5}, + {"(Header).AddressList", Method, 0}, + {"(Header).Date", Method, 0}, + {"(Header).Get", Method, 0}, + {"Address", Type, 0}, + {"Address.Address", Field, 0}, + {"Address.Name", Field, 0}, + {"AddressParser", Type, 5}, + {"AddressParser.WordDecoder", Field, 5}, + {"ErrHeaderNotPresent", Var, 0}, + {"Header", Type, 0}, + {"Message", Type, 0}, + {"Message.Body", Field, 0}, + {"Message.Header", Field, 0}, + {"ParseAddress", Func, 1}, + {"ParseAddressList", Func, 1}, + {"ParseDate", Func, 8}, + {"ReadMessage", Func, 0}, + }, + "net/netip": { + {"(*Addr).UnmarshalBinary", Method, 18}, + {"(*Addr).UnmarshalText", Method, 18}, + {"(*AddrPort).UnmarshalBinary", Method, 18}, + {"(*AddrPort).UnmarshalText", Method, 18}, + {"(*Prefix).UnmarshalBinary", Method, 18}, + {"(*Prefix).UnmarshalText", Method, 18}, + {"(Addr).AppendTo", Method, 18}, + {"(Addr).As16", Method, 18}, + {"(Addr).As4", Method, 18}, + {"(Addr).AsSlice", Method, 18}, + {"(Addr).BitLen", Method, 18}, + {"(Addr).Compare", Method, 18}, + {"(Addr).Is4", Method, 18}, + {"(Addr).Is4In6", Method, 18}, + {"(Addr).Is6", Method, 18}, + {"(Addr).IsGlobalUnicast", Method, 18}, + {"(Addr).IsInterfaceLocalMulticast", Method, 18}, + {"(Addr).IsLinkLocalMulticast", Method, 18}, + {"(Addr).IsLinkLocalUnicast", Method, 18}, + {"(Addr).IsLoopback", Method, 18}, + {"(Addr).IsMulticast", Method, 18}, + {"(Addr).IsPrivate", Method, 18}, + {"(Addr).IsUnspecified", Method, 18}, + {"(Addr).IsValid", Method, 18}, + {"(Addr).Less", Method, 18}, + {"(Addr).MarshalBinary", Method, 18}, + {"(Addr).MarshalText", Method, 18}, + {"(Addr).Next", Method, 18}, + {"(Addr).Prefix", Method, 18}, + {"(Addr).Prev", Method, 18}, + {"(Addr).String", Method, 18}, + {"(Addr).StringExpanded", Method, 18}, + {"(Addr).Unmap", Method, 18}, + {"(Addr).WithZone", Method, 18}, + {"(Addr).Zone", Method, 18}, + {"(AddrPort).Addr", Method, 18}, + {"(AddrPort).AppendTo", Method, 18}, + {"(AddrPort).Compare", Method, 22}, + {"(AddrPort).IsValid", Method, 18}, + {"(AddrPort).MarshalBinary", Method, 18}, + {"(AddrPort).MarshalText", Method, 18}, + {"(AddrPort).Port", Method, 18}, + {"(AddrPort).String", Method, 18}, + {"(Prefix).Addr", Method, 18}, + {"(Prefix).AppendTo", Method, 18}, + {"(Prefix).Bits", Method, 18}, + {"(Prefix).Contains", Method, 18}, + {"(Prefix).IsSingleIP", Method, 18}, + {"(Prefix).IsValid", Method, 18}, + {"(Prefix).MarshalBinary", Method, 18}, + {"(Prefix).MarshalText", Method, 18}, + {"(Prefix).Masked", Method, 18}, + {"(Prefix).Overlaps", Method, 18}, + {"(Prefix).String", Method, 18}, + {"Addr", Type, 18}, + {"AddrFrom16", Func, 18}, + {"AddrFrom4", Func, 18}, + {"AddrFromSlice", Func, 18}, + {"AddrPort", Type, 18}, + {"AddrPortFrom", Func, 18}, + {"IPv4Unspecified", Func, 18}, + {"IPv6LinkLocalAllNodes", Func, 18}, + {"IPv6LinkLocalAllRouters", Func, 20}, + {"IPv6Loopback", Func, 20}, + {"IPv6Unspecified", Func, 18}, + {"MustParseAddr", Func, 18}, + {"MustParseAddrPort", Func, 18}, + {"MustParsePrefix", Func, 18}, + {"ParseAddr", Func, 18}, + {"ParseAddrPort", Func, 18}, + {"ParsePrefix", Func, 18}, + {"Prefix", Type, 18}, + {"PrefixFrom", Func, 18}, + }, + "net/rpc": { + {"(*Client).Call", Method, 0}, + {"(*Client).Close", Method, 0}, + {"(*Client).Go", Method, 0}, + {"(*Server).Accept", Method, 0}, + {"(*Server).HandleHTTP", Method, 0}, + {"(*Server).Register", Method, 0}, + {"(*Server).RegisterName", Method, 0}, + {"(*Server).ServeCodec", Method, 0}, + {"(*Server).ServeConn", Method, 0}, + {"(*Server).ServeHTTP", Method, 0}, + {"(*Server).ServeRequest", Method, 0}, + {"(ServerError).Error", Method, 0}, + {"Accept", Func, 0}, + {"Call", Type, 0}, + {"Call.Args", Field, 0}, + {"Call.Done", Field, 0}, + {"Call.Error", Field, 0}, + {"Call.Reply", Field, 0}, + {"Call.ServiceMethod", Field, 0}, + {"Client", Type, 0}, + {"ClientCodec", Type, 0}, + {"DefaultDebugPath", Const, 0}, + {"DefaultRPCPath", Const, 0}, + {"DefaultServer", Var, 0}, + {"Dial", Func, 0}, + {"DialHTTP", Func, 0}, + {"DialHTTPPath", Func, 0}, + {"ErrShutdown", Var, 0}, + {"HandleHTTP", Func, 0}, + {"NewClient", Func, 0}, + {"NewClientWithCodec", Func, 0}, + {"NewServer", Func, 0}, + {"Register", Func, 0}, + {"RegisterName", Func, 0}, + {"Request", Type, 0}, + {"Request.Seq", Field, 0}, + {"Request.ServiceMethod", Field, 0}, + {"Response", Type, 0}, + {"Response.Error", Field, 0}, + {"Response.Seq", Field, 0}, + {"Response.ServiceMethod", Field, 0}, + {"ServeCodec", Func, 0}, + {"ServeConn", Func, 0}, + {"ServeRequest", Func, 0}, + {"Server", Type, 0}, + {"ServerCodec", Type, 0}, + {"ServerError", Type, 0}, + }, + "net/rpc/jsonrpc": { + {"Dial", Func, 0}, + {"NewClient", Func, 0}, + {"NewClientCodec", Func, 0}, + {"NewServerCodec", Func, 0}, + {"ServeConn", Func, 0}, + }, + "net/smtp": { + {"(*Client).Auth", Method, 0}, + {"(*Client).Close", Method, 2}, + {"(*Client).Data", Method, 0}, + {"(*Client).Extension", Method, 0}, + {"(*Client).Hello", Method, 1}, + {"(*Client).Mail", Method, 0}, + {"(*Client).Noop", Method, 10}, + {"(*Client).Quit", Method, 0}, + {"(*Client).Rcpt", Method, 0}, + {"(*Client).Reset", Method, 0}, + {"(*Client).StartTLS", Method, 0}, + {"(*Client).TLSConnectionState", Method, 5}, + {"(*Client).Verify", Method, 0}, + {"Auth", Type, 0}, + {"CRAMMD5Auth", Func, 0}, + {"Client", Type, 0}, + {"Client.Text", Field, 0}, + {"Dial", Func, 0}, + {"NewClient", Func, 0}, + {"PlainAuth", Func, 0}, + {"SendMail", Func, 0}, + {"ServerInfo", Type, 0}, + {"ServerInfo.Auth", Field, 0}, + {"ServerInfo.Name", Field, 0}, + {"ServerInfo.TLS", Field, 0}, + }, + "net/textproto": { + {"(*Conn).Close", Method, 0}, + {"(*Conn).Cmd", Method, 0}, + {"(*Conn).DotReader", Method, 0}, + {"(*Conn).DotWriter", Method, 0}, + {"(*Conn).EndRequest", Method, 0}, + {"(*Conn).EndResponse", Method, 0}, + {"(*Conn).Next", Method, 0}, + {"(*Conn).PrintfLine", Method, 0}, + {"(*Conn).ReadCodeLine", Method, 0}, + {"(*Conn).ReadContinuedLine", Method, 0}, + {"(*Conn).ReadContinuedLineBytes", Method, 0}, + {"(*Conn).ReadDotBytes", Method, 0}, + {"(*Conn).ReadDotLines", Method, 0}, + {"(*Conn).ReadLine", Method, 0}, + {"(*Conn).ReadLineBytes", Method, 0}, + {"(*Conn).ReadMIMEHeader", Method, 0}, + {"(*Conn).ReadResponse", Method, 0}, + {"(*Conn).StartRequest", Method, 0}, + {"(*Conn).StartResponse", Method, 0}, + {"(*Error).Error", Method, 0}, + {"(*Pipeline).EndRequest", Method, 0}, + {"(*Pipeline).EndResponse", Method, 0}, + {"(*Pipeline).Next", Method, 0}, + {"(*Pipeline).StartRequest", Method, 0}, + {"(*Pipeline).StartResponse", Method, 0}, + {"(*Reader).DotReader", Method, 0}, + {"(*Reader).ReadCodeLine", Method, 0}, + {"(*Reader).ReadContinuedLine", Method, 0}, + {"(*Reader).ReadContinuedLineBytes", Method, 0}, + {"(*Reader).ReadDotBytes", Method, 0}, + {"(*Reader).ReadDotLines", Method, 0}, + {"(*Reader).ReadLine", Method, 0}, + {"(*Reader).ReadLineBytes", Method, 0}, + {"(*Reader).ReadMIMEHeader", Method, 0}, + {"(*Reader).ReadResponse", Method, 0}, + {"(*Writer).DotWriter", Method, 0}, + {"(*Writer).PrintfLine", Method, 0}, + {"(MIMEHeader).Add", Method, 0}, + {"(MIMEHeader).Del", Method, 0}, + {"(MIMEHeader).Get", Method, 0}, + {"(MIMEHeader).Set", Method, 0}, + {"(MIMEHeader).Values", Method, 14}, + {"(ProtocolError).Error", Method, 0}, + {"CanonicalMIMEHeaderKey", Func, 0}, + {"Conn", Type, 0}, + {"Conn.Pipeline", Field, 0}, + {"Conn.Reader", Field, 0}, + {"Conn.Writer", Field, 0}, + {"Dial", Func, 0}, + {"Error", Type, 0}, + {"Error.Code", Field, 0}, + {"Error.Msg", Field, 0}, + {"MIMEHeader", Type, 0}, + {"NewConn", Func, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Pipeline", Type, 0}, + {"ProtocolError", Type, 0}, + {"Reader", Type, 0}, + {"Reader.R", Field, 0}, + {"TrimBytes", Func, 1}, + {"TrimString", Func, 1}, + {"Writer", Type, 0}, + {"Writer.W", Field, 0}, + }, + "net/url": { + {"(*Error).Error", Method, 0}, + {"(*Error).Temporary", Method, 6}, + {"(*Error).Timeout", Method, 6}, + {"(*Error).Unwrap", Method, 13}, + {"(*URL).EscapedFragment", Method, 15}, + {"(*URL).EscapedPath", Method, 5}, + {"(*URL).Hostname", Method, 8}, + {"(*URL).IsAbs", Method, 0}, + {"(*URL).JoinPath", Method, 19}, + {"(*URL).MarshalBinary", Method, 8}, + {"(*URL).Parse", Method, 0}, + {"(*URL).Port", Method, 8}, + {"(*URL).Query", Method, 0}, + {"(*URL).Redacted", Method, 15}, + {"(*URL).RequestURI", Method, 0}, + {"(*URL).ResolveReference", Method, 0}, + {"(*URL).String", Method, 0}, + {"(*URL).UnmarshalBinary", Method, 8}, + {"(*Userinfo).Password", Method, 0}, + {"(*Userinfo).String", Method, 0}, + {"(*Userinfo).Username", Method, 0}, + {"(EscapeError).Error", Method, 0}, + {"(InvalidHostError).Error", Method, 6}, + {"(Values).Add", Method, 0}, + {"(Values).Del", Method, 0}, + {"(Values).Encode", Method, 0}, + {"(Values).Get", Method, 0}, + {"(Values).Has", Method, 17}, + {"(Values).Set", Method, 0}, + {"Error", Type, 0}, + {"Error.Err", Field, 0}, + {"Error.Op", Field, 0}, + {"Error.URL", Field, 0}, + {"EscapeError", Type, 0}, + {"InvalidHostError", Type, 6}, + {"JoinPath", Func, 19}, + {"Parse", Func, 0}, + {"ParseQuery", Func, 0}, + {"ParseRequestURI", Func, 0}, + {"PathEscape", Func, 8}, + {"PathUnescape", Func, 8}, + {"QueryEscape", Func, 0}, + {"QueryUnescape", Func, 0}, + {"URL", Type, 0}, + {"URL.ForceQuery", Field, 7}, + {"URL.Fragment", Field, 0}, + {"URL.Host", Field, 0}, + {"URL.OmitHost", Field, 19}, + {"URL.Opaque", Field, 0}, + {"URL.Path", Field, 0}, + {"URL.RawFragment", Field, 15}, + {"URL.RawPath", Field, 5}, + {"URL.RawQuery", Field, 0}, + {"URL.Scheme", Field, 0}, + {"URL.User", Field, 0}, + {"User", Func, 0}, + {"UserPassword", Func, 0}, + {"Userinfo", Type, 0}, + {"Values", Type, 0}, + }, + "os": { + {"(*File).Chdir", Method, 0}, + {"(*File).Chmod", Method, 0}, + {"(*File).Chown", Method, 0}, + {"(*File).Close", Method, 0}, + {"(*File).Fd", Method, 0}, + {"(*File).Name", Method, 0}, + {"(*File).Read", Method, 0}, + {"(*File).ReadAt", Method, 0}, + {"(*File).ReadDir", Method, 16}, + {"(*File).ReadFrom", Method, 15}, + {"(*File).Readdir", Method, 0}, + {"(*File).Readdirnames", Method, 0}, + {"(*File).Seek", Method, 0}, + {"(*File).SetDeadline", Method, 10}, + {"(*File).SetReadDeadline", Method, 10}, + {"(*File).SetWriteDeadline", Method, 10}, + {"(*File).Stat", Method, 0}, + {"(*File).Sync", Method, 0}, + {"(*File).SyscallConn", Method, 12}, + {"(*File).Truncate", Method, 0}, + {"(*File).Write", Method, 0}, + {"(*File).WriteAt", Method, 0}, + {"(*File).WriteString", Method, 0}, + {"(*File).WriteTo", Method, 22}, + {"(*LinkError).Error", Method, 0}, + {"(*LinkError).Unwrap", Method, 13}, + {"(*PathError).Error", Method, 0}, + {"(*PathError).Timeout", Method, 10}, + {"(*PathError).Unwrap", Method, 13}, + {"(*Process).Kill", Method, 0}, + {"(*Process).Release", Method, 0}, + {"(*Process).Signal", Method, 0}, + {"(*Process).Wait", Method, 0}, + {"(*ProcessState).ExitCode", Method, 12}, + {"(*ProcessState).Exited", Method, 0}, + {"(*ProcessState).Pid", Method, 0}, + {"(*ProcessState).String", Method, 0}, + {"(*ProcessState).Success", Method, 0}, + {"(*ProcessState).Sys", Method, 0}, + {"(*ProcessState).SysUsage", Method, 0}, + {"(*ProcessState).SystemTime", Method, 0}, + {"(*ProcessState).UserTime", Method, 0}, + {"(*SyscallError).Error", Method, 0}, + {"(*SyscallError).Timeout", Method, 10}, + {"(*SyscallError).Unwrap", Method, 13}, + {"(FileMode).IsDir", Method, 0}, + {"(FileMode).IsRegular", Method, 1}, + {"(FileMode).Perm", Method, 0}, + {"(FileMode).String", Method, 0}, + {"Args", Var, 0}, + {"Chdir", Func, 0}, + {"Chmod", Func, 0}, + {"Chown", Func, 0}, + {"Chtimes", Func, 0}, + {"Clearenv", Func, 0}, + {"Create", Func, 0}, + {"CreateTemp", Func, 16}, + {"DevNull", Const, 0}, + {"DirEntry", Type, 16}, + {"DirFS", Func, 16}, + {"Environ", Func, 0}, + {"ErrClosed", Var, 8}, + {"ErrDeadlineExceeded", Var, 15}, + {"ErrExist", Var, 0}, + {"ErrInvalid", Var, 0}, + {"ErrNoDeadline", Var, 10}, + {"ErrNotExist", Var, 0}, + {"ErrPermission", Var, 0}, + {"ErrProcessDone", Var, 16}, + {"Executable", Func, 8}, + {"Exit", Func, 0}, + {"Expand", Func, 0}, + {"ExpandEnv", Func, 0}, + {"File", Type, 0}, + {"FileInfo", Type, 0}, + {"FileMode", Type, 0}, + {"FindProcess", Func, 0}, + {"Getegid", Func, 0}, + {"Getenv", Func, 0}, + {"Geteuid", Func, 0}, + {"Getgid", Func, 0}, + {"Getgroups", Func, 0}, + {"Getpagesize", Func, 0}, + {"Getpid", Func, 0}, + {"Getppid", Func, 0}, + {"Getuid", Func, 0}, + {"Getwd", Func, 0}, + {"Hostname", Func, 0}, + {"Interrupt", Var, 0}, + {"IsExist", Func, 0}, + {"IsNotExist", Func, 0}, + {"IsPathSeparator", Func, 0}, + {"IsPermission", Func, 0}, + {"IsTimeout", Func, 10}, + {"Kill", Var, 0}, + {"Lchown", Func, 0}, + {"Link", Func, 0}, + {"LinkError", Type, 0}, + {"LinkError.Err", Field, 0}, + {"LinkError.New", Field, 0}, + {"LinkError.Old", Field, 0}, + {"LinkError.Op", Field, 0}, + {"LookupEnv", Func, 5}, + {"Lstat", Func, 0}, + {"Mkdir", Func, 0}, + {"MkdirAll", Func, 0}, + {"MkdirTemp", Func, 16}, + {"ModeAppend", Const, 0}, + {"ModeCharDevice", Const, 0}, + {"ModeDevice", Const, 0}, + {"ModeDir", Const, 0}, + {"ModeExclusive", Const, 0}, + {"ModeIrregular", Const, 11}, + {"ModeNamedPipe", Const, 0}, + {"ModePerm", Const, 0}, + {"ModeSetgid", Const, 0}, + {"ModeSetuid", Const, 0}, + {"ModeSocket", Const, 0}, + {"ModeSticky", Const, 0}, + {"ModeSymlink", Const, 0}, + {"ModeTemporary", Const, 0}, + {"ModeType", Const, 0}, + {"NewFile", Func, 0}, + {"NewSyscallError", Func, 0}, + {"O_APPEND", Const, 0}, + {"O_CREATE", Const, 0}, + {"O_EXCL", Const, 0}, + {"O_RDONLY", Const, 0}, + {"O_RDWR", Const, 0}, + {"O_SYNC", Const, 0}, + {"O_TRUNC", Const, 0}, + {"O_WRONLY", Const, 0}, + {"Open", Func, 0}, + {"OpenFile", Func, 0}, + {"PathError", Type, 0}, + {"PathError.Err", Field, 0}, + {"PathError.Op", Field, 0}, + {"PathError.Path", Field, 0}, + {"PathListSeparator", Const, 0}, + {"PathSeparator", Const, 0}, + {"Pipe", Func, 0}, + {"ProcAttr", Type, 0}, + {"ProcAttr.Dir", Field, 0}, + {"ProcAttr.Env", Field, 0}, + {"ProcAttr.Files", Field, 0}, + {"ProcAttr.Sys", Field, 0}, + {"Process", Type, 0}, + {"Process.Pid", Field, 0}, + {"ProcessState", Type, 0}, + {"ReadDir", Func, 16}, + {"ReadFile", Func, 16}, + {"Readlink", Func, 0}, + {"Remove", Func, 0}, + {"RemoveAll", Func, 0}, + {"Rename", Func, 0}, + {"SEEK_CUR", Const, 0}, + {"SEEK_END", Const, 0}, + {"SEEK_SET", Const, 0}, + {"SameFile", Func, 0}, + {"Setenv", Func, 0}, + {"Signal", Type, 0}, + {"StartProcess", Func, 0}, + {"Stat", Func, 0}, + {"Stderr", Var, 0}, + {"Stdin", Var, 0}, + {"Stdout", Var, 0}, + {"Symlink", Func, 0}, + {"SyscallError", Type, 0}, + {"SyscallError.Err", Field, 0}, + {"SyscallError.Syscall", Field, 0}, + {"TempDir", Func, 0}, + {"Truncate", Func, 0}, + {"Unsetenv", Func, 4}, + {"UserCacheDir", Func, 11}, + {"UserConfigDir", Func, 13}, + {"UserHomeDir", Func, 12}, + {"WriteFile", Func, 16}, + }, + "os/exec": { + {"(*Cmd).CombinedOutput", Method, 0}, + {"(*Cmd).Environ", Method, 19}, + {"(*Cmd).Output", Method, 0}, + {"(*Cmd).Run", Method, 0}, + {"(*Cmd).Start", Method, 0}, + {"(*Cmd).StderrPipe", Method, 0}, + {"(*Cmd).StdinPipe", Method, 0}, + {"(*Cmd).StdoutPipe", Method, 0}, + {"(*Cmd).String", Method, 13}, + {"(*Cmd).Wait", Method, 0}, + {"(*Error).Error", Method, 0}, + {"(*Error).Unwrap", Method, 13}, + {"(*ExitError).Error", Method, 0}, + {"(ExitError).ExitCode", Method, 12}, + {"(ExitError).Exited", Method, 0}, + {"(ExitError).Pid", Method, 0}, + {"(ExitError).String", Method, 0}, + {"(ExitError).Success", Method, 0}, + {"(ExitError).Sys", Method, 0}, + {"(ExitError).SysUsage", Method, 0}, + {"(ExitError).SystemTime", Method, 0}, + {"(ExitError).UserTime", Method, 0}, + {"Cmd", Type, 0}, + {"Cmd.Args", Field, 0}, + {"Cmd.Cancel", Field, 20}, + {"Cmd.Dir", Field, 0}, + {"Cmd.Env", Field, 0}, + {"Cmd.Err", Field, 19}, + {"Cmd.ExtraFiles", Field, 0}, + {"Cmd.Path", Field, 0}, + {"Cmd.Process", Field, 0}, + {"Cmd.ProcessState", Field, 0}, + {"Cmd.Stderr", Field, 0}, + {"Cmd.Stdin", Field, 0}, + {"Cmd.Stdout", Field, 0}, + {"Cmd.SysProcAttr", Field, 0}, + {"Cmd.WaitDelay", Field, 20}, + {"Command", Func, 0}, + {"CommandContext", Func, 7}, + {"ErrDot", Var, 19}, + {"ErrNotFound", Var, 0}, + {"ErrWaitDelay", Var, 20}, + {"Error", Type, 0}, + {"Error.Err", Field, 0}, + {"Error.Name", Field, 0}, + {"ExitError", Type, 0}, + {"ExitError.ProcessState", Field, 0}, + {"ExitError.Stderr", Field, 6}, + {"LookPath", Func, 0}, + }, + "os/signal": { + {"Ignore", Func, 5}, + {"Ignored", Func, 11}, + {"Notify", Func, 0}, + {"NotifyContext", Func, 16}, + {"Reset", Func, 5}, + {"Stop", Func, 1}, + }, + "os/user": { + {"(*User).GroupIds", Method, 7}, + {"(UnknownGroupError).Error", Method, 7}, + {"(UnknownGroupIdError).Error", Method, 7}, + {"(UnknownUserError).Error", Method, 0}, + {"(UnknownUserIdError).Error", Method, 0}, + {"Current", Func, 0}, + {"Group", Type, 7}, + {"Group.Gid", Field, 7}, + {"Group.Name", Field, 7}, + {"Lookup", Func, 0}, + {"LookupGroup", Func, 7}, + {"LookupGroupId", Func, 7}, + {"LookupId", Func, 0}, + {"UnknownGroupError", Type, 7}, + {"UnknownGroupIdError", Type, 7}, + {"UnknownUserError", Type, 0}, + {"UnknownUserIdError", Type, 0}, + {"User", Type, 0}, + {"User.Gid", Field, 0}, + {"User.HomeDir", Field, 0}, + {"User.Name", Field, 0}, + {"User.Uid", Field, 0}, + {"User.Username", Field, 0}, + }, + "path": { + {"Base", Func, 0}, + {"Clean", Func, 0}, + {"Dir", Func, 0}, + {"ErrBadPattern", Var, 0}, + {"Ext", Func, 0}, + {"IsAbs", Func, 0}, + {"Join", Func, 0}, + {"Match", Func, 0}, + {"Split", Func, 0}, + }, + "path/filepath": { + {"Abs", Func, 0}, + {"Base", Func, 0}, + {"Clean", Func, 0}, + {"Dir", Func, 0}, + {"ErrBadPattern", Var, 0}, + {"EvalSymlinks", Func, 0}, + {"Ext", Func, 0}, + {"FromSlash", Func, 0}, + {"Glob", Func, 0}, + {"HasPrefix", Func, 0}, + {"IsAbs", Func, 0}, + {"IsLocal", Func, 20}, + {"Join", Func, 0}, + {"ListSeparator", Const, 0}, + {"Match", Func, 0}, + {"Rel", Func, 0}, + {"Separator", Const, 0}, + {"SkipAll", Var, 20}, + {"SkipDir", Var, 0}, + {"Split", Func, 0}, + {"SplitList", Func, 0}, + {"ToSlash", Func, 0}, + {"VolumeName", Func, 0}, + {"Walk", Func, 0}, + {"WalkDir", Func, 16}, + {"WalkFunc", Type, 0}, + }, + "plugin": { + {"(*Plugin).Lookup", Method, 8}, + {"Open", Func, 8}, + {"Plugin", Type, 8}, + {"Symbol", Type, 8}, + }, + "reflect": { + {"(*MapIter).Key", Method, 12}, + {"(*MapIter).Next", Method, 12}, + {"(*MapIter).Reset", Method, 18}, + {"(*MapIter).Value", Method, 12}, + {"(*ValueError).Error", Method, 0}, + {"(ChanDir).String", Method, 0}, + {"(Kind).String", Method, 0}, + {"(Method).IsExported", Method, 17}, + {"(StructField).IsExported", Method, 17}, + {"(StructTag).Get", Method, 0}, + {"(StructTag).Lookup", Method, 7}, + {"(Value).Addr", Method, 0}, + {"(Value).Bool", Method, 0}, + {"(Value).Bytes", Method, 0}, + {"(Value).Call", Method, 0}, + {"(Value).CallSlice", Method, 0}, + {"(Value).CanAddr", Method, 0}, + {"(Value).CanComplex", Method, 18}, + {"(Value).CanConvert", Method, 17}, + {"(Value).CanFloat", Method, 18}, + {"(Value).CanInt", Method, 18}, + {"(Value).CanInterface", Method, 0}, + {"(Value).CanSet", Method, 0}, + {"(Value).CanUint", Method, 18}, + {"(Value).Cap", Method, 0}, + {"(Value).Clear", Method, 21}, + {"(Value).Close", Method, 0}, + {"(Value).Comparable", Method, 20}, + {"(Value).Complex", Method, 0}, + {"(Value).Convert", Method, 1}, + {"(Value).Elem", Method, 0}, + {"(Value).Equal", Method, 20}, + {"(Value).Field", Method, 0}, + {"(Value).FieldByIndex", Method, 0}, + {"(Value).FieldByIndexErr", Method, 18}, + {"(Value).FieldByName", Method, 0}, + {"(Value).FieldByNameFunc", Method, 0}, + {"(Value).Float", Method, 0}, + {"(Value).Grow", Method, 20}, + {"(Value).Index", Method, 0}, + {"(Value).Int", Method, 0}, + {"(Value).Interface", Method, 0}, + {"(Value).InterfaceData", Method, 0}, + {"(Value).IsNil", Method, 0}, + {"(Value).IsValid", Method, 0}, + {"(Value).IsZero", Method, 13}, + {"(Value).Kind", Method, 0}, + {"(Value).Len", Method, 0}, + {"(Value).MapIndex", Method, 0}, + {"(Value).MapKeys", Method, 0}, + {"(Value).MapRange", Method, 12}, + {"(Value).Method", Method, 0}, + {"(Value).MethodByName", Method, 0}, + {"(Value).NumField", Method, 0}, + {"(Value).NumMethod", Method, 0}, + {"(Value).OverflowComplex", Method, 0}, + {"(Value).OverflowFloat", Method, 0}, + {"(Value).OverflowInt", Method, 0}, + {"(Value).OverflowUint", Method, 0}, + {"(Value).Pointer", Method, 0}, + {"(Value).Recv", Method, 0}, + {"(Value).Send", Method, 0}, + {"(Value).Set", Method, 0}, + {"(Value).SetBool", Method, 0}, + {"(Value).SetBytes", Method, 0}, + {"(Value).SetCap", Method, 2}, + {"(Value).SetComplex", Method, 0}, + {"(Value).SetFloat", Method, 0}, + {"(Value).SetInt", Method, 0}, + {"(Value).SetIterKey", Method, 18}, + {"(Value).SetIterValue", Method, 18}, + {"(Value).SetLen", Method, 0}, + {"(Value).SetMapIndex", Method, 0}, + {"(Value).SetPointer", Method, 0}, + {"(Value).SetString", Method, 0}, + {"(Value).SetUint", Method, 0}, + {"(Value).SetZero", Method, 20}, + {"(Value).Slice", Method, 0}, + {"(Value).Slice3", Method, 2}, + {"(Value).String", Method, 0}, + {"(Value).TryRecv", Method, 0}, + {"(Value).TrySend", Method, 0}, + {"(Value).Type", Method, 0}, + {"(Value).Uint", Method, 0}, + {"(Value).UnsafeAddr", Method, 0}, + {"(Value).UnsafePointer", Method, 18}, + {"Append", Func, 0}, + {"AppendSlice", Func, 0}, + {"Array", Const, 0}, + {"ArrayOf", Func, 5}, + {"Bool", Const, 0}, + {"BothDir", Const, 0}, + {"Chan", Const, 0}, + {"ChanDir", Type, 0}, + {"ChanOf", Func, 1}, + {"Complex128", Const, 0}, + {"Complex64", Const, 0}, + {"Copy", Func, 0}, + {"DeepEqual", Func, 0}, + {"Float32", Const, 0}, + {"Float64", Const, 0}, + {"Func", Const, 0}, + {"FuncOf", Func, 5}, + {"Indirect", Func, 0}, + {"Int", Const, 0}, + {"Int16", Const, 0}, + {"Int32", Const, 0}, + {"Int64", Const, 0}, + {"Int8", Const, 0}, + {"Interface", Const, 0}, + {"Invalid", Const, 0}, + {"Kind", Type, 0}, + {"MakeChan", Func, 0}, + {"MakeFunc", Func, 1}, + {"MakeMap", Func, 0}, + {"MakeMapWithSize", Func, 9}, + {"MakeSlice", Func, 0}, + {"Map", Const, 0}, + {"MapIter", Type, 12}, + {"MapOf", Func, 1}, + {"Method", Type, 0}, + {"Method.Func", Field, 0}, + {"Method.Index", Field, 0}, + {"Method.Name", Field, 0}, + {"Method.PkgPath", Field, 0}, + {"Method.Type", Field, 0}, + {"New", Func, 0}, + {"NewAt", Func, 0}, + {"Pointer", Const, 18}, + {"PointerTo", Func, 18}, + {"Ptr", Const, 0}, + {"PtrTo", Func, 0}, + {"RecvDir", Const, 0}, + {"Select", Func, 1}, + {"SelectCase", Type, 1}, + {"SelectCase.Chan", Field, 1}, + {"SelectCase.Dir", Field, 1}, + {"SelectCase.Send", Field, 1}, + {"SelectDefault", Const, 1}, + {"SelectDir", Type, 1}, + {"SelectRecv", Const, 1}, + {"SelectSend", Const, 1}, + {"SendDir", Const, 0}, + {"Slice", Const, 0}, + {"SliceHeader", Type, 0}, + {"SliceHeader.Cap", Field, 0}, + {"SliceHeader.Data", Field, 0}, + {"SliceHeader.Len", Field, 0}, + {"SliceOf", Func, 1}, + {"String", Const, 0}, + {"StringHeader", Type, 0}, + {"StringHeader.Data", Field, 0}, + {"StringHeader.Len", Field, 0}, + {"Struct", Const, 0}, + {"StructField", Type, 0}, + {"StructField.Anonymous", Field, 0}, + {"StructField.Index", Field, 0}, + {"StructField.Name", Field, 0}, + {"StructField.Offset", Field, 0}, + {"StructField.PkgPath", Field, 0}, + {"StructField.Tag", Field, 0}, + {"StructField.Type", Field, 0}, + {"StructOf", Func, 7}, + {"StructTag", Type, 0}, + {"Swapper", Func, 8}, + {"Type", Type, 0}, + {"TypeFor", Func, 22}, + {"TypeOf", Func, 0}, + {"Uint", Const, 0}, + {"Uint16", Const, 0}, + {"Uint32", Const, 0}, + {"Uint64", Const, 0}, + {"Uint8", Const, 0}, + {"Uintptr", Const, 0}, + {"UnsafePointer", Const, 0}, + {"Value", Type, 0}, + {"ValueError", Type, 0}, + {"ValueError.Kind", Field, 0}, + {"ValueError.Method", Field, 0}, + {"ValueOf", Func, 0}, + {"VisibleFields", Func, 17}, + {"Zero", Func, 0}, + }, + "regexp": { + {"(*Regexp).Copy", Method, 6}, + {"(*Regexp).Expand", Method, 0}, + {"(*Regexp).ExpandString", Method, 0}, + {"(*Regexp).Find", Method, 0}, + {"(*Regexp).FindAll", Method, 0}, + {"(*Regexp).FindAllIndex", Method, 0}, + {"(*Regexp).FindAllString", Method, 0}, + {"(*Regexp).FindAllStringIndex", Method, 0}, + {"(*Regexp).FindAllStringSubmatch", Method, 0}, + {"(*Regexp).FindAllStringSubmatchIndex", Method, 0}, + {"(*Regexp).FindAllSubmatch", Method, 0}, + {"(*Regexp).FindAllSubmatchIndex", Method, 0}, + {"(*Regexp).FindIndex", Method, 0}, + {"(*Regexp).FindReaderIndex", Method, 0}, + {"(*Regexp).FindReaderSubmatchIndex", Method, 0}, + {"(*Regexp).FindString", Method, 0}, + {"(*Regexp).FindStringIndex", Method, 0}, + {"(*Regexp).FindStringSubmatch", Method, 0}, + {"(*Regexp).FindStringSubmatchIndex", Method, 0}, + {"(*Regexp).FindSubmatch", Method, 0}, + {"(*Regexp).FindSubmatchIndex", Method, 0}, + {"(*Regexp).LiteralPrefix", Method, 0}, + {"(*Regexp).Longest", Method, 1}, + {"(*Regexp).MarshalText", Method, 21}, + {"(*Regexp).Match", Method, 0}, + {"(*Regexp).MatchReader", Method, 0}, + {"(*Regexp).MatchString", Method, 0}, + {"(*Regexp).NumSubexp", Method, 0}, + {"(*Regexp).ReplaceAll", Method, 0}, + {"(*Regexp).ReplaceAllFunc", Method, 0}, + {"(*Regexp).ReplaceAllLiteral", Method, 0}, + {"(*Regexp).ReplaceAllLiteralString", Method, 0}, + {"(*Regexp).ReplaceAllString", Method, 0}, + {"(*Regexp).ReplaceAllStringFunc", Method, 0}, + {"(*Regexp).Split", Method, 1}, + {"(*Regexp).String", Method, 0}, + {"(*Regexp).SubexpIndex", Method, 15}, + {"(*Regexp).SubexpNames", Method, 0}, + {"(*Regexp).UnmarshalText", Method, 21}, + {"Compile", Func, 0}, + {"CompilePOSIX", Func, 0}, + {"Match", Func, 0}, + {"MatchReader", Func, 0}, + {"MatchString", Func, 0}, + {"MustCompile", Func, 0}, + {"MustCompilePOSIX", Func, 0}, + {"QuoteMeta", Func, 0}, + {"Regexp", Type, 0}, + }, + "regexp/syntax": { + {"(*Error).Error", Method, 0}, + {"(*Inst).MatchEmptyWidth", Method, 0}, + {"(*Inst).MatchRune", Method, 0}, + {"(*Inst).MatchRunePos", Method, 3}, + {"(*Inst).String", Method, 0}, + {"(*Prog).Prefix", Method, 0}, + {"(*Prog).StartCond", Method, 0}, + {"(*Prog).String", Method, 0}, + {"(*Regexp).CapNames", Method, 0}, + {"(*Regexp).Equal", Method, 0}, + {"(*Regexp).MaxCap", Method, 0}, + {"(*Regexp).Simplify", Method, 0}, + {"(*Regexp).String", Method, 0}, + {"(ErrorCode).String", Method, 0}, + {"(InstOp).String", Method, 3}, + {"(Op).String", Method, 11}, + {"ClassNL", Const, 0}, + {"Compile", Func, 0}, + {"DotNL", Const, 0}, + {"EmptyBeginLine", Const, 0}, + {"EmptyBeginText", Const, 0}, + {"EmptyEndLine", Const, 0}, + {"EmptyEndText", Const, 0}, + {"EmptyNoWordBoundary", Const, 0}, + {"EmptyOp", Type, 0}, + {"EmptyOpContext", Func, 0}, + {"EmptyWordBoundary", Const, 0}, + {"ErrInternalError", Const, 0}, + {"ErrInvalidCharClass", Const, 0}, + {"ErrInvalidCharRange", Const, 0}, + {"ErrInvalidEscape", Const, 0}, + {"ErrInvalidNamedCapture", Const, 0}, + {"ErrInvalidPerlOp", Const, 0}, + {"ErrInvalidRepeatOp", Const, 0}, + {"ErrInvalidRepeatSize", Const, 0}, + {"ErrInvalidUTF8", Const, 0}, + {"ErrLarge", Const, 20}, + {"ErrMissingBracket", Const, 0}, + {"ErrMissingParen", Const, 0}, + {"ErrMissingRepeatArgument", Const, 0}, + {"ErrNestingDepth", Const, 19}, + {"ErrTrailingBackslash", Const, 0}, + {"ErrUnexpectedParen", Const, 1}, + {"Error", Type, 0}, + {"Error.Code", Field, 0}, + {"Error.Expr", Field, 0}, + {"ErrorCode", Type, 0}, + {"Flags", Type, 0}, + {"FoldCase", Const, 0}, + {"Inst", Type, 0}, + {"Inst.Arg", Field, 0}, + {"Inst.Op", Field, 0}, + {"Inst.Out", Field, 0}, + {"Inst.Rune", Field, 0}, + {"InstAlt", Const, 0}, + {"InstAltMatch", Const, 0}, + {"InstCapture", Const, 0}, + {"InstEmptyWidth", Const, 0}, + {"InstFail", Const, 0}, + {"InstMatch", Const, 0}, + {"InstNop", Const, 0}, + {"InstOp", Type, 0}, + {"InstRune", Const, 0}, + {"InstRune1", Const, 0}, + {"InstRuneAny", Const, 0}, + {"InstRuneAnyNotNL", Const, 0}, + {"IsWordChar", Func, 0}, + {"Literal", Const, 0}, + {"MatchNL", Const, 0}, + {"NonGreedy", Const, 0}, + {"OneLine", Const, 0}, + {"Op", Type, 0}, + {"OpAlternate", Const, 0}, + {"OpAnyChar", Const, 0}, + {"OpAnyCharNotNL", Const, 0}, + {"OpBeginLine", Const, 0}, + {"OpBeginText", Const, 0}, + {"OpCapture", Const, 0}, + {"OpCharClass", Const, 0}, + {"OpConcat", Const, 0}, + {"OpEmptyMatch", Const, 0}, + {"OpEndLine", Const, 0}, + {"OpEndText", Const, 0}, + {"OpLiteral", Const, 0}, + {"OpNoMatch", Const, 0}, + {"OpNoWordBoundary", Const, 0}, + {"OpPlus", Const, 0}, + {"OpQuest", Const, 0}, + {"OpRepeat", Const, 0}, + {"OpStar", Const, 0}, + {"OpWordBoundary", Const, 0}, + {"POSIX", Const, 0}, + {"Parse", Func, 0}, + {"Perl", Const, 0}, + {"PerlX", Const, 0}, + {"Prog", Type, 0}, + {"Prog.Inst", Field, 0}, + {"Prog.NumCap", Field, 0}, + {"Prog.Start", Field, 0}, + {"Regexp", Type, 0}, + {"Regexp.Cap", Field, 0}, + {"Regexp.Flags", Field, 0}, + {"Regexp.Max", Field, 0}, + {"Regexp.Min", Field, 0}, + {"Regexp.Name", Field, 0}, + {"Regexp.Op", Field, 0}, + {"Regexp.Rune", Field, 0}, + {"Regexp.Rune0", Field, 0}, + {"Regexp.Sub", Field, 0}, + {"Regexp.Sub0", Field, 0}, + {"Simple", Const, 0}, + {"UnicodeGroups", Const, 0}, + {"WasDollar", Const, 0}, + }, + "runtime": { + {"(*BlockProfileRecord).Stack", Method, 1}, + {"(*Frames).Next", Method, 7}, + {"(*Func).Entry", Method, 0}, + {"(*Func).FileLine", Method, 0}, + {"(*Func).Name", Method, 0}, + {"(*MemProfileRecord).InUseBytes", Method, 0}, + {"(*MemProfileRecord).InUseObjects", Method, 0}, + {"(*MemProfileRecord).Stack", Method, 0}, + {"(*PanicNilError).Error", Method, 21}, + {"(*PanicNilError).RuntimeError", Method, 21}, + {"(*Pinner).Pin", Method, 21}, + {"(*Pinner).Unpin", Method, 21}, + {"(*StackRecord).Stack", Method, 0}, + {"(*TypeAssertionError).Error", Method, 0}, + {"(*TypeAssertionError).RuntimeError", Method, 0}, + {"BlockProfile", Func, 1}, + {"BlockProfileRecord", Type, 1}, + {"BlockProfileRecord.Count", Field, 1}, + {"BlockProfileRecord.Cycles", Field, 1}, + {"BlockProfileRecord.StackRecord", Field, 1}, + {"Breakpoint", Func, 0}, + {"CPUProfile", Func, 0}, + {"Caller", Func, 0}, + {"Callers", Func, 0}, + {"CallersFrames", Func, 7}, + {"Compiler", Const, 0}, + {"Error", Type, 0}, + {"Frame", Type, 7}, + {"Frame.Entry", Field, 7}, + {"Frame.File", Field, 7}, + {"Frame.Func", Field, 7}, + {"Frame.Function", Field, 7}, + {"Frame.Line", Field, 7}, + {"Frame.PC", Field, 7}, + {"Frames", Type, 7}, + {"Func", Type, 0}, + {"FuncForPC", Func, 0}, + {"GC", Func, 0}, + {"GOARCH", Const, 0}, + {"GOMAXPROCS", Func, 0}, + {"GOOS", Const, 0}, + {"GOROOT", Func, 0}, + {"Goexit", Func, 0}, + {"GoroutineProfile", Func, 0}, + {"Gosched", Func, 0}, + {"KeepAlive", Func, 7}, + {"LockOSThread", Func, 0}, + {"MemProfile", Func, 0}, + {"MemProfileRate", Var, 0}, + {"MemProfileRecord", Type, 0}, + {"MemProfileRecord.AllocBytes", Field, 0}, + {"MemProfileRecord.AllocObjects", Field, 0}, + {"MemProfileRecord.FreeBytes", Field, 0}, + {"MemProfileRecord.FreeObjects", Field, 0}, + {"MemProfileRecord.Stack0", Field, 0}, + {"MemStats", Type, 0}, + {"MemStats.Alloc", Field, 0}, + {"MemStats.BuckHashSys", Field, 0}, + {"MemStats.BySize", Field, 0}, + {"MemStats.DebugGC", Field, 0}, + {"MemStats.EnableGC", Field, 0}, + {"MemStats.Frees", Field, 0}, + {"MemStats.GCCPUFraction", Field, 5}, + {"MemStats.GCSys", Field, 2}, + {"MemStats.HeapAlloc", Field, 0}, + {"MemStats.HeapIdle", Field, 0}, + {"MemStats.HeapInuse", Field, 0}, + {"MemStats.HeapObjects", Field, 0}, + {"MemStats.HeapReleased", Field, 0}, + {"MemStats.HeapSys", Field, 0}, + {"MemStats.LastGC", Field, 0}, + {"MemStats.Lookups", Field, 0}, + {"MemStats.MCacheInuse", Field, 0}, + {"MemStats.MCacheSys", Field, 0}, + {"MemStats.MSpanInuse", Field, 0}, + {"MemStats.MSpanSys", Field, 0}, + {"MemStats.Mallocs", Field, 0}, + {"MemStats.NextGC", Field, 0}, + {"MemStats.NumForcedGC", Field, 8}, + {"MemStats.NumGC", Field, 0}, + {"MemStats.OtherSys", Field, 2}, + {"MemStats.PauseEnd", Field, 4}, + {"MemStats.PauseNs", Field, 0}, + {"MemStats.PauseTotalNs", Field, 0}, + {"MemStats.StackInuse", Field, 0}, + {"MemStats.StackSys", Field, 0}, + {"MemStats.Sys", Field, 0}, + {"MemStats.TotalAlloc", Field, 0}, + {"MutexProfile", Func, 8}, + {"NumCPU", Func, 0}, + {"NumCgoCall", Func, 0}, + {"NumGoroutine", Func, 0}, + {"PanicNilError", Type, 21}, + {"Pinner", Type, 21}, + {"ReadMemStats", Func, 0}, + {"ReadTrace", Func, 5}, + {"SetBlockProfileRate", Func, 1}, + {"SetCPUProfileRate", Func, 0}, + {"SetCgoTraceback", Func, 7}, + {"SetFinalizer", Func, 0}, + {"SetMutexProfileFraction", Func, 8}, + {"Stack", Func, 0}, + {"StackRecord", Type, 0}, + {"StackRecord.Stack0", Field, 0}, + {"StartTrace", Func, 5}, + {"StopTrace", Func, 5}, + {"ThreadCreateProfile", Func, 0}, + {"TypeAssertionError", Type, 0}, + {"UnlockOSThread", Func, 0}, + {"Version", Func, 0}, + }, + "runtime/cgo": { + {"(Handle).Delete", Method, 17}, + {"(Handle).Value", Method, 17}, + {"Handle", Type, 17}, + {"Incomplete", Type, 20}, + {"NewHandle", Func, 17}, + }, + "runtime/coverage": { + {"ClearCounters", Func, 20}, + {"WriteCounters", Func, 20}, + {"WriteCountersDir", Func, 20}, + {"WriteMeta", Func, 20}, + {"WriteMetaDir", Func, 20}, + }, + "runtime/debug": { + {"(*BuildInfo).String", Method, 18}, + {"BuildInfo", Type, 12}, + {"BuildInfo.Deps", Field, 12}, + {"BuildInfo.GoVersion", Field, 18}, + {"BuildInfo.Main", Field, 12}, + {"BuildInfo.Path", Field, 12}, + {"BuildInfo.Settings", Field, 18}, + {"BuildSetting", Type, 18}, + {"BuildSetting.Key", Field, 18}, + {"BuildSetting.Value", Field, 18}, + {"FreeOSMemory", Func, 1}, + {"GCStats", Type, 1}, + {"GCStats.LastGC", Field, 1}, + {"GCStats.NumGC", Field, 1}, + {"GCStats.Pause", Field, 1}, + {"GCStats.PauseEnd", Field, 4}, + {"GCStats.PauseQuantiles", Field, 1}, + {"GCStats.PauseTotal", Field, 1}, + {"Module", Type, 12}, + {"Module.Path", Field, 12}, + {"Module.Replace", Field, 12}, + {"Module.Sum", Field, 12}, + {"Module.Version", Field, 12}, + {"ParseBuildInfo", Func, 18}, + {"PrintStack", Func, 0}, + {"ReadBuildInfo", Func, 12}, + {"ReadGCStats", Func, 1}, + {"SetGCPercent", Func, 1}, + {"SetMaxStack", Func, 2}, + {"SetMaxThreads", Func, 2}, + {"SetMemoryLimit", Func, 19}, + {"SetPanicOnFault", Func, 3}, + {"SetTraceback", Func, 6}, + {"Stack", Func, 0}, + {"WriteHeapDump", Func, 3}, + }, + "runtime/metrics": { + {"(Value).Float64", Method, 16}, + {"(Value).Float64Histogram", Method, 16}, + {"(Value).Kind", Method, 16}, + {"(Value).Uint64", Method, 16}, + {"All", Func, 16}, + {"Description", Type, 16}, + {"Description.Cumulative", Field, 16}, + {"Description.Description", Field, 16}, + {"Description.Kind", Field, 16}, + {"Description.Name", Field, 16}, + {"Float64Histogram", Type, 16}, + {"Float64Histogram.Buckets", Field, 16}, + {"Float64Histogram.Counts", Field, 16}, + {"KindBad", Const, 16}, + {"KindFloat64", Const, 16}, + {"KindFloat64Histogram", Const, 16}, + {"KindUint64", Const, 16}, + {"Read", Func, 16}, + {"Sample", Type, 16}, + {"Sample.Name", Field, 16}, + {"Sample.Value", Field, 16}, + {"Value", Type, 16}, + {"ValueKind", Type, 16}, + }, + "runtime/pprof": { + {"(*Profile).Add", Method, 0}, + {"(*Profile).Count", Method, 0}, + {"(*Profile).Name", Method, 0}, + {"(*Profile).Remove", Method, 0}, + {"(*Profile).WriteTo", Method, 0}, + {"Do", Func, 9}, + {"ForLabels", Func, 9}, + {"Label", Func, 9}, + {"LabelSet", Type, 9}, + {"Labels", Func, 9}, + {"Lookup", Func, 0}, + {"NewProfile", Func, 0}, + {"Profile", Type, 0}, + {"Profiles", Func, 0}, + {"SetGoroutineLabels", Func, 9}, + {"StartCPUProfile", Func, 0}, + {"StopCPUProfile", Func, 0}, + {"WithLabels", Func, 9}, + {"WriteHeapProfile", Func, 0}, + }, + "runtime/trace": { + {"(*Region).End", Method, 11}, + {"(*Task).End", Method, 11}, + {"IsEnabled", Func, 11}, + {"Log", Func, 11}, + {"Logf", Func, 11}, + {"NewTask", Func, 11}, + {"Region", Type, 11}, + {"Start", Func, 5}, + {"StartRegion", Func, 11}, + {"Stop", Func, 5}, + {"Task", Type, 11}, + {"WithRegion", Func, 11}, + }, + "slices": { + {"BinarySearch", Func, 21}, + {"BinarySearchFunc", Func, 21}, + {"Clip", Func, 21}, + {"Clone", Func, 21}, + {"Compact", Func, 21}, + {"CompactFunc", Func, 21}, + {"Compare", Func, 21}, + {"CompareFunc", Func, 21}, + {"Concat", Func, 22}, + {"Contains", Func, 21}, + {"ContainsFunc", Func, 21}, + {"Delete", Func, 21}, + {"DeleteFunc", Func, 21}, + {"Equal", Func, 21}, + {"EqualFunc", Func, 21}, + {"Grow", Func, 21}, + {"Index", Func, 21}, + {"IndexFunc", Func, 21}, + {"Insert", Func, 21}, + {"IsSorted", Func, 21}, + {"IsSortedFunc", Func, 21}, + {"Max", Func, 21}, + {"MaxFunc", Func, 21}, + {"Min", Func, 21}, + {"MinFunc", Func, 21}, + {"Replace", Func, 21}, + {"Reverse", Func, 21}, + {"Sort", Func, 21}, + {"SortFunc", Func, 21}, + {"SortStableFunc", Func, 21}, + }, + "sort": { + {"(Float64Slice).Len", Method, 0}, + {"(Float64Slice).Less", Method, 0}, + {"(Float64Slice).Search", Method, 0}, + {"(Float64Slice).Sort", Method, 0}, + {"(Float64Slice).Swap", Method, 0}, + {"(IntSlice).Len", Method, 0}, + {"(IntSlice).Less", Method, 0}, + {"(IntSlice).Search", Method, 0}, + {"(IntSlice).Sort", Method, 0}, + {"(IntSlice).Swap", Method, 0}, + {"(StringSlice).Len", Method, 0}, + {"(StringSlice).Less", Method, 0}, + {"(StringSlice).Search", Method, 0}, + {"(StringSlice).Sort", Method, 0}, + {"(StringSlice).Swap", Method, 0}, + {"Find", Func, 19}, + {"Float64Slice", Type, 0}, + {"Float64s", Func, 0}, + {"Float64sAreSorted", Func, 0}, + {"IntSlice", Type, 0}, + {"Interface", Type, 0}, + {"Ints", Func, 0}, + {"IntsAreSorted", Func, 0}, + {"IsSorted", Func, 0}, + {"Reverse", Func, 1}, + {"Search", Func, 0}, + {"SearchFloat64s", Func, 0}, + {"SearchInts", Func, 0}, + {"SearchStrings", Func, 0}, + {"Slice", Func, 8}, + {"SliceIsSorted", Func, 8}, + {"SliceStable", Func, 8}, + {"Sort", Func, 0}, + {"Stable", Func, 2}, + {"StringSlice", Type, 0}, + {"Strings", Func, 0}, + {"StringsAreSorted", Func, 0}, + }, + "strconv": { + {"(*NumError).Error", Method, 0}, + {"(*NumError).Unwrap", Method, 14}, + {"AppendBool", Func, 0}, + {"AppendFloat", Func, 0}, + {"AppendInt", Func, 0}, + {"AppendQuote", Func, 0}, + {"AppendQuoteRune", Func, 0}, + {"AppendQuoteRuneToASCII", Func, 0}, + {"AppendQuoteRuneToGraphic", Func, 6}, + {"AppendQuoteToASCII", Func, 0}, + {"AppendQuoteToGraphic", Func, 6}, + {"AppendUint", Func, 0}, + {"Atoi", Func, 0}, + {"CanBackquote", Func, 0}, + {"ErrRange", Var, 0}, + {"ErrSyntax", Var, 0}, + {"FormatBool", Func, 0}, + {"FormatComplex", Func, 15}, + {"FormatFloat", Func, 0}, + {"FormatInt", Func, 0}, + {"FormatUint", Func, 0}, + {"IntSize", Const, 0}, + {"IsGraphic", Func, 6}, + {"IsPrint", Func, 0}, + {"Itoa", Func, 0}, + {"NumError", Type, 0}, + {"NumError.Err", Field, 0}, + {"NumError.Func", Field, 0}, + {"NumError.Num", Field, 0}, + {"ParseBool", Func, 0}, + {"ParseComplex", Func, 15}, + {"ParseFloat", Func, 0}, + {"ParseInt", Func, 0}, + {"ParseUint", Func, 0}, + {"Quote", Func, 0}, + {"QuoteRune", Func, 0}, + {"QuoteRuneToASCII", Func, 0}, + {"QuoteRuneToGraphic", Func, 6}, + {"QuoteToASCII", Func, 0}, + {"QuoteToGraphic", Func, 6}, + {"QuotedPrefix", Func, 17}, + {"Unquote", Func, 0}, + {"UnquoteChar", Func, 0}, + }, + "strings": { + {"(*Builder).Cap", Method, 12}, + {"(*Builder).Grow", Method, 10}, + {"(*Builder).Len", Method, 10}, + {"(*Builder).Reset", Method, 10}, + {"(*Builder).String", Method, 10}, + {"(*Builder).Write", Method, 10}, + {"(*Builder).WriteByte", Method, 10}, + {"(*Builder).WriteRune", Method, 10}, + {"(*Builder).WriteString", Method, 10}, + {"(*Reader).Len", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAt", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).Reset", Method, 7}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).Size", Method, 5}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"(*Replacer).Replace", Method, 0}, + {"(*Replacer).WriteString", Method, 0}, + {"Builder", Type, 10}, + {"Clone", Func, 18}, + {"Compare", Func, 5}, + {"Contains", Func, 0}, + {"ContainsAny", Func, 0}, + {"ContainsFunc", Func, 21}, + {"ContainsRune", Func, 0}, + {"Count", Func, 0}, + {"Cut", Func, 18}, + {"CutPrefix", Func, 20}, + {"CutSuffix", Func, 20}, + {"EqualFold", Func, 0}, + {"Fields", Func, 0}, + {"FieldsFunc", Func, 0}, + {"HasPrefix", Func, 0}, + {"HasSuffix", Func, 0}, + {"Index", Func, 0}, + {"IndexAny", Func, 0}, + {"IndexByte", Func, 2}, + {"IndexFunc", Func, 0}, + {"IndexRune", Func, 0}, + {"Join", Func, 0}, + {"LastIndex", Func, 0}, + {"LastIndexAny", Func, 0}, + {"LastIndexByte", Func, 5}, + {"LastIndexFunc", Func, 0}, + {"Map", Func, 0}, + {"NewReader", Func, 0}, + {"NewReplacer", Func, 0}, + {"Reader", Type, 0}, + {"Repeat", Func, 0}, + {"Replace", Func, 0}, + {"ReplaceAll", Func, 12}, + {"Replacer", Type, 0}, + {"Split", Func, 0}, + {"SplitAfter", Func, 0}, + {"SplitAfterN", Func, 0}, + {"SplitN", Func, 0}, + {"Title", Func, 0}, + {"ToLower", Func, 0}, + {"ToLowerSpecial", Func, 0}, + {"ToTitle", Func, 0}, + {"ToTitleSpecial", Func, 0}, + {"ToUpper", Func, 0}, + {"ToUpperSpecial", Func, 0}, + {"ToValidUTF8", Func, 13}, + {"Trim", Func, 0}, + {"TrimFunc", Func, 0}, + {"TrimLeft", Func, 0}, + {"TrimLeftFunc", Func, 0}, + {"TrimPrefix", Func, 1}, + {"TrimRight", Func, 0}, + {"TrimRightFunc", Func, 0}, + {"TrimSpace", Func, 0}, + {"TrimSuffix", Func, 1}, + }, + "sync": { + {"(*Cond).Broadcast", Method, 0}, + {"(*Cond).Signal", Method, 0}, + {"(*Cond).Wait", Method, 0}, + {"(*Map).CompareAndDelete", Method, 20}, + {"(*Map).CompareAndSwap", Method, 20}, + {"(*Map).Delete", Method, 9}, + {"(*Map).Load", Method, 9}, + {"(*Map).LoadAndDelete", Method, 15}, + {"(*Map).LoadOrStore", Method, 9}, + {"(*Map).Range", Method, 9}, + {"(*Map).Store", Method, 9}, + {"(*Map).Swap", Method, 20}, + {"(*Mutex).Lock", Method, 0}, + {"(*Mutex).TryLock", Method, 18}, + {"(*Mutex).Unlock", Method, 0}, + {"(*Once).Do", Method, 0}, + {"(*Pool).Get", Method, 3}, + {"(*Pool).Put", Method, 3}, + {"(*RWMutex).Lock", Method, 0}, + {"(*RWMutex).RLock", Method, 0}, + {"(*RWMutex).RLocker", Method, 0}, + {"(*RWMutex).RUnlock", Method, 0}, + {"(*RWMutex).TryLock", Method, 18}, + {"(*RWMutex).TryRLock", Method, 18}, + {"(*RWMutex).Unlock", Method, 0}, + {"(*WaitGroup).Add", Method, 0}, + {"(*WaitGroup).Done", Method, 0}, + {"(*WaitGroup).Wait", Method, 0}, + {"Cond", Type, 0}, + {"Cond.L", Field, 0}, + {"Locker", Type, 0}, + {"Map", Type, 9}, + {"Mutex", Type, 0}, + {"NewCond", Func, 0}, + {"Once", Type, 0}, + {"OnceFunc", Func, 21}, + {"OnceValue", Func, 21}, + {"OnceValues", Func, 21}, + {"Pool", Type, 3}, + {"Pool.New", Field, 3}, + {"RWMutex", Type, 0}, + {"WaitGroup", Type, 0}, + }, + "sync/atomic": { + {"(*Bool).CompareAndSwap", Method, 19}, + {"(*Bool).Load", Method, 19}, + {"(*Bool).Store", Method, 19}, + {"(*Bool).Swap", Method, 19}, + {"(*Int32).Add", Method, 19}, + {"(*Int32).CompareAndSwap", Method, 19}, + {"(*Int32).Load", Method, 19}, + {"(*Int32).Store", Method, 19}, + {"(*Int32).Swap", Method, 19}, + {"(*Int64).Add", Method, 19}, + {"(*Int64).CompareAndSwap", Method, 19}, + {"(*Int64).Load", Method, 19}, + {"(*Int64).Store", Method, 19}, + {"(*Int64).Swap", Method, 19}, + {"(*Pointer).CompareAndSwap", Method, 19}, + {"(*Pointer).Load", Method, 19}, + {"(*Pointer).Store", Method, 19}, + {"(*Pointer).Swap", Method, 19}, + {"(*Uint32).Add", Method, 19}, + {"(*Uint32).CompareAndSwap", Method, 19}, + {"(*Uint32).Load", Method, 19}, + {"(*Uint32).Store", Method, 19}, + {"(*Uint32).Swap", Method, 19}, + {"(*Uint64).Add", Method, 19}, + {"(*Uint64).CompareAndSwap", Method, 19}, + {"(*Uint64).Load", Method, 19}, + {"(*Uint64).Store", Method, 19}, + {"(*Uint64).Swap", Method, 19}, + {"(*Uintptr).Add", Method, 19}, + {"(*Uintptr).CompareAndSwap", Method, 19}, + {"(*Uintptr).Load", Method, 19}, + {"(*Uintptr).Store", Method, 19}, + {"(*Uintptr).Swap", Method, 19}, + {"(*Value).CompareAndSwap", Method, 17}, + {"(*Value).Load", Method, 4}, + {"(*Value).Store", Method, 4}, + {"(*Value).Swap", Method, 17}, + {"AddInt32", Func, 0}, + {"AddInt64", Func, 0}, + {"AddUint32", Func, 0}, + {"AddUint64", Func, 0}, + {"AddUintptr", Func, 0}, + {"Bool", Type, 19}, + {"CompareAndSwapInt32", Func, 0}, + {"CompareAndSwapInt64", Func, 0}, + {"CompareAndSwapPointer", Func, 0}, + {"CompareAndSwapUint32", Func, 0}, + {"CompareAndSwapUint64", Func, 0}, + {"CompareAndSwapUintptr", Func, 0}, + {"Int32", Type, 19}, + {"Int64", Type, 19}, + {"LoadInt32", Func, 0}, + {"LoadInt64", Func, 0}, + {"LoadPointer", Func, 0}, + {"LoadUint32", Func, 0}, + {"LoadUint64", Func, 0}, + {"LoadUintptr", Func, 0}, + {"Pointer", Type, 19}, + {"StoreInt32", Func, 0}, + {"StoreInt64", Func, 0}, + {"StorePointer", Func, 0}, + {"StoreUint32", Func, 0}, + {"StoreUint64", Func, 0}, + {"StoreUintptr", Func, 0}, + {"SwapInt32", Func, 2}, + {"SwapInt64", Func, 2}, + {"SwapPointer", Func, 2}, + {"SwapUint32", Func, 2}, + {"SwapUint64", Func, 2}, + {"SwapUintptr", Func, 2}, + {"Uint32", Type, 19}, + {"Uint64", Type, 19}, + {"Uintptr", Type, 19}, + {"Value", Type, 4}, + }, + "syscall": { + {"(*Cmsghdr).SetLen", Method, 0}, + {"(*DLL).FindProc", Method, 0}, + {"(*DLL).MustFindProc", Method, 0}, + {"(*DLL).Release", Method, 0}, + {"(*DLLError).Error", Method, 0}, + {"(*DLLError).Unwrap", Method, 16}, + {"(*Filetime).Nanoseconds", Method, 0}, + {"(*Iovec).SetLen", Method, 0}, + {"(*LazyDLL).Handle", Method, 0}, + {"(*LazyDLL).Load", Method, 0}, + {"(*LazyDLL).NewProc", Method, 0}, + {"(*LazyProc).Addr", Method, 0}, + {"(*LazyProc).Call", Method, 0}, + {"(*LazyProc).Find", Method, 0}, + {"(*Msghdr).SetControllen", Method, 0}, + {"(*Proc).Addr", Method, 0}, + {"(*Proc).Call", Method, 0}, + {"(*PtraceRegs).PC", Method, 0}, + {"(*PtraceRegs).SetPC", Method, 0}, + {"(*RawSockaddrAny).Sockaddr", Method, 0}, + {"(*SID).Copy", Method, 0}, + {"(*SID).Len", Method, 0}, + {"(*SID).LookupAccount", Method, 0}, + {"(*SID).String", Method, 0}, + {"(*Timespec).Nano", Method, 0}, + {"(*Timespec).Unix", Method, 0}, + {"(*Timeval).Nano", Method, 0}, + {"(*Timeval).Nanoseconds", Method, 0}, + {"(*Timeval).Unix", Method, 0}, + {"(Errno).Error", Method, 0}, + {"(Errno).Is", Method, 13}, + {"(Errno).Temporary", Method, 0}, + {"(Errno).Timeout", Method, 0}, + {"(Signal).Signal", Method, 0}, + {"(Signal).String", Method, 0}, + {"(Token).Close", Method, 0}, + {"(Token).GetTokenPrimaryGroup", Method, 0}, + {"(Token).GetTokenUser", Method, 0}, + {"(Token).GetUserProfileDirectory", Method, 0}, + {"(WaitStatus).Continued", Method, 0}, + {"(WaitStatus).CoreDump", Method, 0}, + {"(WaitStatus).ExitStatus", Method, 0}, + {"(WaitStatus).Exited", Method, 0}, + {"(WaitStatus).Signal", Method, 0}, + {"(WaitStatus).Signaled", Method, 0}, + {"(WaitStatus).StopSignal", Method, 0}, + {"(WaitStatus).Stopped", Method, 0}, + {"(WaitStatus).TrapCause", Method, 0}, + {"AF_ALG", Const, 0}, + {"AF_APPLETALK", Const, 0}, + {"AF_ARP", Const, 0}, + {"AF_ASH", Const, 0}, + {"AF_ATM", Const, 0}, + {"AF_ATMPVC", Const, 0}, + {"AF_ATMSVC", Const, 0}, + {"AF_AX25", Const, 0}, + {"AF_BLUETOOTH", Const, 0}, + {"AF_BRIDGE", Const, 0}, + {"AF_CAIF", Const, 0}, + {"AF_CAN", Const, 0}, + {"AF_CCITT", Const, 0}, + {"AF_CHAOS", Const, 0}, + {"AF_CNT", Const, 0}, + {"AF_COIP", Const, 0}, + {"AF_DATAKIT", Const, 0}, + {"AF_DECnet", Const, 0}, + {"AF_DLI", Const, 0}, + {"AF_E164", Const, 0}, + {"AF_ECMA", Const, 0}, + {"AF_ECONET", Const, 0}, + {"AF_ENCAP", Const, 1}, + {"AF_FILE", Const, 0}, + {"AF_HYLINK", Const, 0}, + {"AF_IEEE80211", Const, 0}, + {"AF_IEEE802154", Const, 0}, + {"AF_IMPLINK", Const, 0}, + {"AF_INET", Const, 0}, + {"AF_INET6", Const, 0}, + {"AF_INET6_SDP", Const, 3}, + {"AF_INET_SDP", Const, 3}, + {"AF_IPX", Const, 0}, + {"AF_IRDA", Const, 0}, + {"AF_ISDN", Const, 0}, + {"AF_ISO", Const, 0}, + {"AF_IUCV", Const, 0}, + {"AF_KEY", Const, 0}, + {"AF_LAT", Const, 0}, + {"AF_LINK", Const, 0}, + {"AF_LLC", Const, 0}, + {"AF_LOCAL", Const, 0}, + {"AF_MAX", Const, 0}, + {"AF_MPLS", Const, 1}, + {"AF_NATM", Const, 0}, + {"AF_NDRV", Const, 0}, + {"AF_NETBEUI", Const, 0}, + {"AF_NETBIOS", Const, 0}, + {"AF_NETGRAPH", Const, 0}, + {"AF_NETLINK", Const, 0}, + {"AF_NETROM", Const, 0}, + {"AF_NS", Const, 0}, + {"AF_OROUTE", Const, 1}, + {"AF_OSI", Const, 0}, + {"AF_PACKET", Const, 0}, + {"AF_PHONET", Const, 0}, + {"AF_PPP", Const, 0}, + {"AF_PPPOX", Const, 0}, + {"AF_PUP", Const, 0}, + {"AF_RDS", Const, 0}, + {"AF_RESERVED_36", Const, 0}, + {"AF_ROSE", Const, 0}, + {"AF_ROUTE", Const, 0}, + {"AF_RXRPC", Const, 0}, + {"AF_SCLUSTER", Const, 0}, + {"AF_SECURITY", Const, 0}, + {"AF_SIP", Const, 0}, + {"AF_SLOW", Const, 0}, + {"AF_SNA", Const, 0}, + {"AF_SYSTEM", Const, 0}, + {"AF_TIPC", Const, 0}, + {"AF_UNIX", Const, 0}, + {"AF_UNSPEC", Const, 0}, + {"AF_UTUN", Const, 16}, + {"AF_VENDOR00", Const, 0}, + {"AF_VENDOR01", Const, 0}, + {"AF_VENDOR02", Const, 0}, + {"AF_VENDOR03", Const, 0}, + {"AF_VENDOR04", Const, 0}, + {"AF_VENDOR05", Const, 0}, + {"AF_VENDOR06", Const, 0}, + {"AF_VENDOR07", Const, 0}, + {"AF_VENDOR08", Const, 0}, + {"AF_VENDOR09", Const, 0}, + {"AF_VENDOR10", Const, 0}, + {"AF_VENDOR11", Const, 0}, + {"AF_VENDOR12", Const, 0}, + {"AF_VENDOR13", Const, 0}, + {"AF_VENDOR14", Const, 0}, + {"AF_VENDOR15", Const, 0}, + {"AF_VENDOR16", Const, 0}, + {"AF_VENDOR17", Const, 0}, + {"AF_VENDOR18", Const, 0}, + {"AF_VENDOR19", Const, 0}, + {"AF_VENDOR20", Const, 0}, + {"AF_VENDOR21", Const, 0}, + {"AF_VENDOR22", Const, 0}, + {"AF_VENDOR23", Const, 0}, + {"AF_VENDOR24", Const, 0}, + {"AF_VENDOR25", Const, 0}, + {"AF_VENDOR26", Const, 0}, + {"AF_VENDOR27", Const, 0}, + {"AF_VENDOR28", Const, 0}, + {"AF_VENDOR29", Const, 0}, + {"AF_VENDOR30", Const, 0}, + {"AF_VENDOR31", Const, 0}, + {"AF_VENDOR32", Const, 0}, + {"AF_VENDOR33", Const, 0}, + {"AF_VENDOR34", Const, 0}, + {"AF_VENDOR35", Const, 0}, + {"AF_VENDOR36", Const, 0}, + {"AF_VENDOR37", Const, 0}, + {"AF_VENDOR38", Const, 0}, + {"AF_VENDOR39", Const, 0}, + {"AF_VENDOR40", Const, 0}, + {"AF_VENDOR41", Const, 0}, + {"AF_VENDOR42", Const, 0}, + {"AF_VENDOR43", Const, 0}, + {"AF_VENDOR44", Const, 0}, + {"AF_VENDOR45", Const, 0}, + {"AF_VENDOR46", Const, 0}, + {"AF_VENDOR47", Const, 0}, + {"AF_WANPIPE", Const, 0}, + {"AF_X25", Const, 0}, + {"AI_CANONNAME", Const, 1}, + {"AI_NUMERICHOST", Const, 1}, + {"AI_PASSIVE", Const, 1}, + {"APPLICATION_ERROR", Const, 0}, + {"ARPHRD_ADAPT", Const, 0}, + {"ARPHRD_APPLETLK", Const, 0}, + {"ARPHRD_ARCNET", Const, 0}, + {"ARPHRD_ASH", Const, 0}, + {"ARPHRD_ATM", Const, 0}, + {"ARPHRD_AX25", Const, 0}, + {"ARPHRD_BIF", Const, 0}, + {"ARPHRD_CHAOS", Const, 0}, + {"ARPHRD_CISCO", Const, 0}, + {"ARPHRD_CSLIP", Const, 0}, + {"ARPHRD_CSLIP6", Const, 0}, + {"ARPHRD_DDCMP", Const, 0}, + {"ARPHRD_DLCI", Const, 0}, + {"ARPHRD_ECONET", Const, 0}, + {"ARPHRD_EETHER", Const, 0}, + {"ARPHRD_ETHER", Const, 0}, + {"ARPHRD_EUI64", Const, 0}, + {"ARPHRD_FCAL", Const, 0}, + {"ARPHRD_FCFABRIC", Const, 0}, + {"ARPHRD_FCPL", Const, 0}, + {"ARPHRD_FCPP", Const, 0}, + {"ARPHRD_FDDI", Const, 0}, + {"ARPHRD_FRAD", Const, 0}, + {"ARPHRD_FRELAY", Const, 1}, + {"ARPHRD_HDLC", Const, 0}, + {"ARPHRD_HIPPI", Const, 0}, + {"ARPHRD_HWX25", Const, 0}, + {"ARPHRD_IEEE1394", Const, 0}, + {"ARPHRD_IEEE802", Const, 0}, + {"ARPHRD_IEEE80211", Const, 0}, + {"ARPHRD_IEEE80211_PRISM", Const, 0}, + {"ARPHRD_IEEE80211_RADIOTAP", Const, 0}, + {"ARPHRD_IEEE802154", Const, 0}, + {"ARPHRD_IEEE802154_PHY", Const, 0}, + {"ARPHRD_IEEE802_TR", Const, 0}, + {"ARPHRD_INFINIBAND", Const, 0}, + {"ARPHRD_IPDDP", Const, 0}, + {"ARPHRD_IPGRE", Const, 0}, + {"ARPHRD_IRDA", Const, 0}, + {"ARPHRD_LAPB", Const, 0}, + {"ARPHRD_LOCALTLK", Const, 0}, + {"ARPHRD_LOOPBACK", Const, 0}, + {"ARPHRD_METRICOM", Const, 0}, + {"ARPHRD_NETROM", Const, 0}, + {"ARPHRD_NONE", Const, 0}, + {"ARPHRD_PIMREG", Const, 0}, + {"ARPHRD_PPP", Const, 0}, + {"ARPHRD_PRONET", Const, 0}, + {"ARPHRD_RAWHDLC", Const, 0}, + {"ARPHRD_ROSE", Const, 0}, + {"ARPHRD_RSRVD", Const, 0}, + {"ARPHRD_SIT", Const, 0}, + {"ARPHRD_SKIP", Const, 0}, + {"ARPHRD_SLIP", Const, 0}, + {"ARPHRD_SLIP6", Const, 0}, + {"ARPHRD_STRIP", Const, 1}, + {"ARPHRD_TUNNEL", Const, 0}, + {"ARPHRD_TUNNEL6", Const, 0}, + {"ARPHRD_VOID", Const, 0}, + {"ARPHRD_X25", Const, 0}, + {"AUTHTYPE_CLIENT", Const, 0}, + {"AUTHTYPE_SERVER", Const, 0}, + {"Accept", Func, 0}, + {"Accept4", Func, 1}, + {"AcceptEx", Func, 0}, + {"Access", Func, 0}, + {"Acct", Func, 0}, + {"AddrinfoW", Type, 1}, + {"AddrinfoW.Addr", Field, 1}, + {"AddrinfoW.Addrlen", Field, 1}, + {"AddrinfoW.Canonname", Field, 1}, + {"AddrinfoW.Family", Field, 1}, + {"AddrinfoW.Flags", Field, 1}, + {"AddrinfoW.Next", Field, 1}, + {"AddrinfoW.Protocol", Field, 1}, + {"AddrinfoW.Socktype", Field, 1}, + {"Adjtime", Func, 0}, + {"Adjtimex", Func, 0}, + {"AllThreadsSyscall", Func, 16}, + {"AllThreadsSyscall6", Func, 16}, + {"AttachLsf", Func, 0}, + {"B0", Const, 0}, + {"B1000000", Const, 0}, + {"B110", Const, 0}, + {"B115200", Const, 0}, + {"B1152000", Const, 0}, + {"B1200", Const, 0}, + {"B134", Const, 0}, + {"B14400", Const, 1}, + {"B150", Const, 0}, + {"B1500000", Const, 0}, + {"B1800", Const, 0}, + {"B19200", Const, 0}, + {"B200", Const, 0}, + {"B2000000", Const, 0}, + {"B230400", Const, 0}, + {"B2400", Const, 0}, + {"B2500000", Const, 0}, + {"B28800", Const, 1}, + {"B300", Const, 0}, + {"B3000000", Const, 0}, + {"B3500000", Const, 0}, + {"B38400", Const, 0}, + {"B4000000", Const, 0}, + {"B460800", Const, 0}, + {"B4800", Const, 0}, + {"B50", Const, 0}, + {"B500000", Const, 0}, + {"B57600", Const, 0}, + {"B576000", Const, 0}, + {"B600", Const, 0}, + {"B7200", Const, 1}, + {"B75", Const, 0}, + {"B76800", Const, 1}, + {"B921600", Const, 0}, + {"B9600", Const, 0}, + {"BASE_PROTOCOL", Const, 2}, + {"BIOCFEEDBACK", Const, 0}, + {"BIOCFLUSH", Const, 0}, + {"BIOCGBLEN", Const, 0}, + {"BIOCGDIRECTION", Const, 0}, + {"BIOCGDIRFILT", Const, 1}, + {"BIOCGDLT", Const, 0}, + {"BIOCGDLTLIST", Const, 0}, + {"BIOCGETBUFMODE", Const, 0}, + {"BIOCGETIF", Const, 0}, + {"BIOCGETZMAX", Const, 0}, + {"BIOCGFEEDBACK", Const, 1}, + {"BIOCGFILDROP", Const, 1}, + {"BIOCGHDRCMPLT", Const, 0}, + {"BIOCGRSIG", Const, 0}, + {"BIOCGRTIMEOUT", Const, 0}, + {"BIOCGSEESENT", Const, 0}, + {"BIOCGSTATS", Const, 0}, + {"BIOCGSTATSOLD", Const, 1}, + {"BIOCGTSTAMP", Const, 1}, + {"BIOCIMMEDIATE", Const, 0}, + {"BIOCLOCK", Const, 0}, + {"BIOCPROMISC", Const, 0}, + {"BIOCROTZBUF", Const, 0}, + {"BIOCSBLEN", Const, 0}, + {"BIOCSDIRECTION", Const, 0}, + {"BIOCSDIRFILT", Const, 1}, + {"BIOCSDLT", Const, 0}, + {"BIOCSETBUFMODE", Const, 0}, + {"BIOCSETF", Const, 0}, + {"BIOCSETFNR", Const, 0}, + {"BIOCSETIF", Const, 0}, + {"BIOCSETWF", Const, 0}, + {"BIOCSETZBUF", Const, 0}, + {"BIOCSFEEDBACK", Const, 1}, + {"BIOCSFILDROP", Const, 1}, + {"BIOCSHDRCMPLT", Const, 0}, + {"BIOCSRSIG", Const, 0}, + {"BIOCSRTIMEOUT", Const, 0}, + {"BIOCSSEESENT", Const, 0}, + {"BIOCSTCPF", Const, 1}, + {"BIOCSTSTAMP", Const, 1}, + {"BIOCSUDPF", Const, 1}, + {"BIOCVERSION", Const, 0}, + {"BPF_A", Const, 0}, + {"BPF_ABS", Const, 0}, + {"BPF_ADD", Const, 0}, + {"BPF_ALIGNMENT", Const, 0}, + {"BPF_ALIGNMENT32", Const, 1}, + {"BPF_ALU", Const, 0}, + {"BPF_AND", Const, 0}, + {"BPF_B", Const, 0}, + {"BPF_BUFMODE_BUFFER", Const, 0}, + {"BPF_BUFMODE_ZBUF", Const, 0}, + {"BPF_DFLTBUFSIZE", Const, 1}, + {"BPF_DIRECTION_IN", Const, 1}, + {"BPF_DIRECTION_OUT", Const, 1}, + {"BPF_DIV", Const, 0}, + {"BPF_H", Const, 0}, + {"BPF_IMM", Const, 0}, + {"BPF_IND", Const, 0}, + {"BPF_JA", Const, 0}, + {"BPF_JEQ", Const, 0}, + {"BPF_JGE", Const, 0}, + {"BPF_JGT", Const, 0}, + {"BPF_JMP", Const, 0}, + {"BPF_JSET", Const, 0}, + {"BPF_K", Const, 0}, + {"BPF_LD", Const, 0}, + {"BPF_LDX", Const, 0}, + {"BPF_LEN", Const, 0}, + {"BPF_LSH", Const, 0}, + {"BPF_MAJOR_VERSION", Const, 0}, + {"BPF_MAXBUFSIZE", Const, 0}, + {"BPF_MAXINSNS", Const, 0}, + {"BPF_MEM", Const, 0}, + {"BPF_MEMWORDS", Const, 0}, + {"BPF_MINBUFSIZE", Const, 0}, + {"BPF_MINOR_VERSION", Const, 0}, + {"BPF_MISC", Const, 0}, + {"BPF_MSH", Const, 0}, + {"BPF_MUL", Const, 0}, + {"BPF_NEG", Const, 0}, + {"BPF_OR", Const, 0}, + {"BPF_RELEASE", Const, 0}, + {"BPF_RET", Const, 0}, + {"BPF_RSH", Const, 0}, + {"BPF_ST", Const, 0}, + {"BPF_STX", Const, 0}, + {"BPF_SUB", Const, 0}, + {"BPF_TAX", Const, 0}, + {"BPF_TXA", Const, 0}, + {"BPF_T_BINTIME", Const, 1}, + {"BPF_T_BINTIME_FAST", Const, 1}, + {"BPF_T_BINTIME_MONOTONIC", Const, 1}, + {"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_FAST", Const, 1}, + {"BPF_T_FLAG_MASK", Const, 1}, + {"BPF_T_FORMAT_MASK", Const, 1}, + {"BPF_T_MICROTIME", Const, 1}, + {"BPF_T_MICROTIME_FAST", Const, 1}, + {"BPF_T_MICROTIME_MONOTONIC", Const, 1}, + {"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_MONOTONIC", Const, 1}, + {"BPF_T_MONOTONIC_FAST", Const, 1}, + {"BPF_T_NANOTIME", Const, 1}, + {"BPF_T_NANOTIME_FAST", Const, 1}, + {"BPF_T_NANOTIME_MONOTONIC", Const, 1}, + {"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_NONE", Const, 1}, + {"BPF_T_NORMAL", Const, 1}, + {"BPF_W", Const, 0}, + {"BPF_X", Const, 0}, + {"BRKINT", Const, 0}, + {"Bind", Func, 0}, + {"BindToDevice", Func, 0}, + {"BpfBuflen", Func, 0}, + {"BpfDatalink", Func, 0}, + {"BpfHdr", Type, 0}, + {"BpfHdr.Caplen", Field, 0}, + {"BpfHdr.Datalen", Field, 0}, + {"BpfHdr.Hdrlen", Field, 0}, + {"BpfHdr.Pad_cgo_0", Field, 0}, + {"BpfHdr.Tstamp", Field, 0}, + {"BpfHeadercmpl", Func, 0}, + {"BpfInsn", Type, 0}, + {"BpfInsn.Code", Field, 0}, + {"BpfInsn.Jf", Field, 0}, + {"BpfInsn.Jt", Field, 0}, + {"BpfInsn.K", Field, 0}, + {"BpfInterface", Func, 0}, + {"BpfJump", Func, 0}, + {"BpfProgram", Type, 0}, + {"BpfProgram.Insns", Field, 0}, + {"BpfProgram.Len", Field, 0}, + {"BpfProgram.Pad_cgo_0", Field, 0}, + {"BpfStat", Type, 0}, + {"BpfStat.Capt", Field, 2}, + {"BpfStat.Drop", Field, 0}, + {"BpfStat.Padding", Field, 2}, + {"BpfStat.Recv", Field, 0}, + {"BpfStats", Func, 0}, + {"BpfStmt", Func, 0}, + {"BpfTimeout", Func, 0}, + {"BpfTimeval", Type, 2}, + {"BpfTimeval.Sec", Field, 2}, + {"BpfTimeval.Usec", Field, 2}, + {"BpfVersion", Type, 0}, + {"BpfVersion.Major", Field, 0}, + {"BpfVersion.Minor", Field, 0}, + {"BpfZbuf", Type, 0}, + {"BpfZbuf.Bufa", Field, 0}, + {"BpfZbuf.Bufb", Field, 0}, + {"BpfZbuf.Buflen", Field, 0}, + {"BpfZbufHeader", Type, 0}, + {"BpfZbufHeader.Kernel_gen", Field, 0}, + {"BpfZbufHeader.Kernel_len", Field, 0}, + {"BpfZbufHeader.User_gen", Field, 0}, + {"BpfZbufHeader.X_bzh_pad", Field, 0}, + {"ByHandleFileInformation", Type, 0}, + {"ByHandleFileInformation.CreationTime", Field, 0}, + {"ByHandleFileInformation.FileAttributes", Field, 0}, + {"ByHandleFileInformation.FileIndexHigh", Field, 0}, + {"ByHandleFileInformation.FileIndexLow", Field, 0}, + {"ByHandleFileInformation.FileSizeHigh", Field, 0}, + {"ByHandleFileInformation.FileSizeLow", Field, 0}, + {"ByHandleFileInformation.LastAccessTime", Field, 0}, + {"ByHandleFileInformation.LastWriteTime", Field, 0}, + {"ByHandleFileInformation.NumberOfLinks", Field, 0}, + {"ByHandleFileInformation.VolumeSerialNumber", Field, 0}, + {"BytePtrFromString", Func, 1}, + {"ByteSliceFromString", Func, 1}, + {"CCR0_FLUSH", Const, 1}, + {"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0}, + {"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0}, + {"CERT_CHAIN_POLICY_BASE", Const, 0}, + {"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0}, + {"CERT_CHAIN_POLICY_EV", Const, 0}, + {"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0}, + {"CERT_CHAIN_POLICY_NT_AUTH", Const, 0}, + {"CERT_CHAIN_POLICY_SSL", Const, 0}, + {"CERT_E_CN_NO_MATCH", Const, 0}, + {"CERT_E_EXPIRED", Const, 0}, + {"CERT_E_PURPOSE", Const, 0}, + {"CERT_E_ROLE", Const, 0}, + {"CERT_E_UNTRUSTEDROOT", Const, 0}, + {"CERT_STORE_ADD_ALWAYS", Const, 0}, + {"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0}, + {"CERT_STORE_PROV_MEMORY", Const, 0}, + {"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_INVALID_EXTENSION", Const, 0}, + {"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_IS_CYCLIC", Const, 0}, + {"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0}, + {"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0}, + {"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0}, + {"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0}, + {"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0}, + {"CERT_TRUST_IS_REVOKED", Const, 0}, + {"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0}, + {"CERT_TRUST_NO_ERROR", Const, 0}, + {"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0}, + {"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0}, + {"CFLUSH", Const, 1}, + {"CLOCAL", Const, 0}, + {"CLONE_CHILD_CLEARTID", Const, 2}, + {"CLONE_CHILD_SETTID", Const, 2}, + {"CLONE_CLEAR_SIGHAND", Const, 20}, + {"CLONE_CSIGNAL", Const, 3}, + {"CLONE_DETACHED", Const, 2}, + {"CLONE_FILES", Const, 2}, + {"CLONE_FS", Const, 2}, + {"CLONE_INTO_CGROUP", Const, 20}, + {"CLONE_IO", Const, 2}, + {"CLONE_NEWCGROUP", Const, 20}, + {"CLONE_NEWIPC", Const, 2}, + {"CLONE_NEWNET", Const, 2}, + {"CLONE_NEWNS", Const, 2}, + {"CLONE_NEWPID", Const, 2}, + {"CLONE_NEWTIME", Const, 20}, + {"CLONE_NEWUSER", Const, 2}, + {"CLONE_NEWUTS", Const, 2}, + {"CLONE_PARENT", Const, 2}, + {"CLONE_PARENT_SETTID", Const, 2}, + {"CLONE_PID", Const, 3}, + {"CLONE_PIDFD", Const, 20}, + {"CLONE_PTRACE", Const, 2}, + {"CLONE_SETTLS", Const, 2}, + {"CLONE_SIGHAND", Const, 2}, + {"CLONE_SYSVSEM", Const, 2}, + {"CLONE_THREAD", Const, 2}, + {"CLONE_UNTRACED", Const, 2}, + {"CLONE_VFORK", Const, 2}, + {"CLONE_VM", Const, 2}, + {"CPUID_CFLUSH", Const, 1}, + {"CREAD", Const, 0}, + {"CREATE_ALWAYS", Const, 0}, + {"CREATE_NEW", Const, 0}, + {"CREATE_NEW_PROCESS_GROUP", Const, 1}, + {"CREATE_UNICODE_ENVIRONMENT", Const, 0}, + {"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0}, + {"CRYPT_DELETEKEYSET", Const, 0}, + {"CRYPT_MACHINE_KEYSET", Const, 0}, + {"CRYPT_NEWKEYSET", Const, 0}, + {"CRYPT_SILENT", Const, 0}, + {"CRYPT_VERIFYCONTEXT", Const, 0}, + {"CS5", Const, 0}, + {"CS6", Const, 0}, + {"CS7", Const, 0}, + {"CS8", Const, 0}, + {"CSIZE", Const, 0}, + {"CSTART", Const, 1}, + {"CSTATUS", Const, 1}, + {"CSTOP", Const, 1}, + {"CSTOPB", Const, 0}, + {"CSUSP", Const, 1}, + {"CTL_MAXNAME", Const, 0}, + {"CTL_NET", Const, 0}, + {"CTL_QUERY", Const, 1}, + {"CTRL_BREAK_EVENT", Const, 1}, + {"CTRL_CLOSE_EVENT", Const, 14}, + {"CTRL_C_EVENT", Const, 1}, + {"CTRL_LOGOFF_EVENT", Const, 14}, + {"CTRL_SHUTDOWN_EVENT", Const, 14}, + {"CancelIo", Func, 0}, + {"CancelIoEx", Func, 1}, + {"CertAddCertificateContextToStore", Func, 0}, + {"CertChainContext", Type, 0}, + {"CertChainContext.ChainCount", Field, 0}, + {"CertChainContext.Chains", Field, 0}, + {"CertChainContext.HasRevocationFreshnessTime", Field, 0}, + {"CertChainContext.LowerQualityChainCount", Field, 0}, + {"CertChainContext.LowerQualityChains", Field, 0}, + {"CertChainContext.RevocationFreshnessTime", Field, 0}, + {"CertChainContext.Size", Field, 0}, + {"CertChainContext.TrustStatus", Field, 0}, + {"CertChainElement", Type, 0}, + {"CertChainElement.ApplicationUsage", Field, 0}, + {"CertChainElement.CertContext", Field, 0}, + {"CertChainElement.ExtendedErrorInfo", Field, 0}, + {"CertChainElement.IssuanceUsage", Field, 0}, + {"CertChainElement.RevocationInfo", Field, 0}, + {"CertChainElement.Size", Field, 0}, + {"CertChainElement.TrustStatus", Field, 0}, + {"CertChainPara", Type, 0}, + {"CertChainPara.CacheResync", Field, 0}, + {"CertChainPara.CheckRevocationFreshnessTime", Field, 0}, + {"CertChainPara.RequestedUsage", Field, 0}, + {"CertChainPara.RequstedIssuancePolicy", Field, 0}, + {"CertChainPara.RevocationFreshnessTime", Field, 0}, + {"CertChainPara.Size", Field, 0}, + {"CertChainPara.URLRetrievalTimeout", Field, 0}, + {"CertChainPolicyPara", Type, 0}, + {"CertChainPolicyPara.ExtraPolicyPara", Field, 0}, + {"CertChainPolicyPara.Flags", Field, 0}, + {"CertChainPolicyPara.Size", Field, 0}, + {"CertChainPolicyStatus", Type, 0}, + {"CertChainPolicyStatus.ChainIndex", Field, 0}, + {"CertChainPolicyStatus.ElementIndex", Field, 0}, + {"CertChainPolicyStatus.Error", Field, 0}, + {"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0}, + {"CertChainPolicyStatus.Size", Field, 0}, + {"CertCloseStore", Func, 0}, + {"CertContext", Type, 0}, + {"CertContext.CertInfo", Field, 0}, + {"CertContext.EncodedCert", Field, 0}, + {"CertContext.EncodingType", Field, 0}, + {"CertContext.Length", Field, 0}, + {"CertContext.Store", Field, 0}, + {"CertCreateCertificateContext", Func, 0}, + {"CertEnhKeyUsage", Type, 0}, + {"CertEnhKeyUsage.Length", Field, 0}, + {"CertEnhKeyUsage.UsageIdentifiers", Field, 0}, + {"CertEnumCertificatesInStore", Func, 0}, + {"CertFreeCertificateChain", Func, 0}, + {"CertFreeCertificateContext", Func, 0}, + {"CertGetCertificateChain", Func, 0}, + {"CertInfo", Type, 11}, + {"CertOpenStore", Func, 0}, + {"CertOpenSystemStore", Func, 0}, + {"CertRevocationCrlInfo", Type, 11}, + {"CertRevocationInfo", Type, 0}, + {"CertRevocationInfo.CrlInfo", Field, 0}, + {"CertRevocationInfo.FreshnessTime", Field, 0}, + {"CertRevocationInfo.HasFreshnessTime", Field, 0}, + {"CertRevocationInfo.OidSpecificInfo", Field, 0}, + {"CertRevocationInfo.RevocationOid", Field, 0}, + {"CertRevocationInfo.RevocationResult", Field, 0}, + {"CertRevocationInfo.Size", Field, 0}, + {"CertSimpleChain", Type, 0}, + {"CertSimpleChain.Elements", Field, 0}, + {"CertSimpleChain.HasRevocationFreshnessTime", Field, 0}, + {"CertSimpleChain.NumElements", Field, 0}, + {"CertSimpleChain.RevocationFreshnessTime", Field, 0}, + {"CertSimpleChain.Size", Field, 0}, + {"CertSimpleChain.TrustListInfo", Field, 0}, + {"CertSimpleChain.TrustStatus", Field, 0}, + {"CertTrustListInfo", Type, 11}, + {"CertTrustStatus", Type, 0}, + {"CertTrustStatus.ErrorStatus", Field, 0}, + {"CertTrustStatus.InfoStatus", Field, 0}, + {"CertUsageMatch", Type, 0}, + {"CertUsageMatch.Type", Field, 0}, + {"CertUsageMatch.Usage", Field, 0}, + {"CertVerifyCertificateChainPolicy", Func, 0}, + {"Chdir", Func, 0}, + {"CheckBpfVersion", Func, 0}, + {"Chflags", Func, 0}, + {"Chmod", Func, 0}, + {"Chown", Func, 0}, + {"Chroot", Func, 0}, + {"Clearenv", Func, 0}, + {"Close", Func, 0}, + {"CloseHandle", Func, 0}, + {"CloseOnExec", Func, 0}, + {"Closesocket", Func, 0}, + {"CmsgLen", Func, 0}, + {"CmsgSpace", Func, 0}, + {"Cmsghdr", Type, 0}, + {"Cmsghdr.Len", Field, 0}, + {"Cmsghdr.Level", Field, 0}, + {"Cmsghdr.Type", Field, 0}, + {"Cmsghdr.X__cmsg_data", Field, 0}, + {"CommandLineToArgv", Func, 0}, + {"ComputerName", Func, 0}, + {"Conn", Type, 9}, + {"Connect", Func, 0}, + {"ConnectEx", Func, 1}, + {"ConvertSidToStringSid", Func, 0}, + {"ConvertStringSidToSid", Func, 0}, + {"CopySid", Func, 0}, + {"Creat", Func, 0}, + {"CreateDirectory", Func, 0}, + {"CreateFile", Func, 0}, + {"CreateFileMapping", Func, 0}, + {"CreateHardLink", Func, 4}, + {"CreateIoCompletionPort", Func, 0}, + {"CreatePipe", Func, 0}, + {"CreateProcess", Func, 0}, + {"CreateProcessAsUser", Func, 10}, + {"CreateSymbolicLink", Func, 4}, + {"CreateToolhelp32Snapshot", Func, 4}, + {"Credential", Type, 0}, + {"Credential.Gid", Field, 0}, + {"Credential.Groups", Field, 0}, + {"Credential.NoSetGroups", Field, 9}, + {"Credential.Uid", Field, 0}, + {"CryptAcquireContext", Func, 0}, + {"CryptGenRandom", Func, 0}, + {"CryptReleaseContext", Func, 0}, + {"DIOCBSFLUSH", Const, 1}, + {"DIOCOSFPFLUSH", Const, 1}, + {"DLL", Type, 0}, + {"DLL.Handle", Field, 0}, + {"DLL.Name", Field, 0}, + {"DLLError", Type, 0}, + {"DLLError.Err", Field, 0}, + {"DLLError.Msg", Field, 0}, + {"DLLError.ObjName", Field, 0}, + {"DLT_A429", Const, 0}, + {"DLT_A653_ICM", Const, 0}, + {"DLT_AIRONET_HEADER", Const, 0}, + {"DLT_AOS", Const, 1}, + {"DLT_APPLE_IP_OVER_IEEE1394", Const, 0}, + {"DLT_ARCNET", Const, 0}, + {"DLT_ARCNET_LINUX", Const, 0}, + {"DLT_ATM_CLIP", Const, 0}, + {"DLT_ATM_RFC1483", Const, 0}, + {"DLT_AURORA", Const, 0}, + {"DLT_AX25", Const, 0}, + {"DLT_AX25_KISS", Const, 0}, + {"DLT_BACNET_MS_TP", Const, 0}, + {"DLT_BLUETOOTH_HCI_H4", Const, 0}, + {"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0}, + {"DLT_CAN20B", Const, 0}, + {"DLT_CAN_SOCKETCAN", Const, 1}, + {"DLT_CHAOS", Const, 0}, + {"DLT_CHDLC", Const, 0}, + {"DLT_CISCO_IOS", Const, 0}, + {"DLT_C_HDLC", Const, 0}, + {"DLT_C_HDLC_WITH_DIR", Const, 0}, + {"DLT_DBUS", Const, 1}, + {"DLT_DECT", Const, 1}, + {"DLT_DOCSIS", Const, 0}, + {"DLT_DVB_CI", Const, 1}, + {"DLT_ECONET", Const, 0}, + {"DLT_EN10MB", Const, 0}, + {"DLT_EN3MB", Const, 0}, + {"DLT_ENC", Const, 0}, + {"DLT_ERF", Const, 0}, + {"DLT_ERF_ETH", Const, 0}, + {"DLT_ERF_POS", Const, 0}, + {"DLT_FC_2", Const, 1}, + {"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1}, + {"DLT_FDDI", Const, 0}, + {"DLT_FLEXRAY", Const, 0}, + {"DLT_FRELAY", Const, 0}, + {"DLT_FRELAY_WITH_DIR", Const, 0}, + {"DLT_GCOM_SERIAL", Const, 0}, + {"DLT_GCOM_T1E1", Const, 0}, + {"DLT_GPF_F", Const, 0}, + {"DLT_GPF_T", Const, 0}, + {"DLT_GPRS_LLC", Const, 0}, + {"DLT_GSMTAP_ABIS", Const, 1}, + {"DLT_GSMTAP_UM", Const, 1}, + {"DLT_HDLC", Const, 1}, + {"DLT_HHDLC", Const, 0}, + {"DLT_HIPPI", Const, 1}, + {"DLT_IBM_SN", Const, 0}, + {"DLT_IBM_SP", Const, 0}, + {"DLT_IEEE802", Const, 0}, + {"DLT_IEEE802_11", Const, 0}, + {"DLT_IEEE802_11_RADIO", Const, 0}, + {"DLT_IEEE802_11_RADIO_AVS", Const, 0}, + {"DLT_IEEE802_15_4", Const, 0}, + {"DLT_IEEE802_15_4_LINUX", Const, 0}, + {"DLT_IEEE802_15_4_NOFCS", Const, 1}, + {"DLT_IEEE802_15_4_NONASK_PHY", Const, 0}, + {"DLT_IEEE802_16_MAC_CPS", Const, 0}, + {"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0}, + {"DLT_IPFILTER", Const, 0}, + {"DLT_IPMB", Const, 0}, + {"DLT_IPMB_LINUX", Const, 0}, + {"DLT_IPNET", Const, 1}, + {"DLT_IPOIB", Const, 1}, + {"DLT_IPV4", Const, 1}, + {"DLT_IPV6", Const, 1}, + {"DLT_IP_OVER_FC", Const, 0}, + {"DLT_JUNIPER_ATM1", Const, 0}, + {"DLT_JUNIPER_ATM2", Const, 0}, + {"DLT_JUNIPER_ATM_CEMIC", Const, 1}, + {"DLT_JUNIPER_CHDLC", Const, 0}, + {"DLT_JUNIPER_ES", Const, 0}, + {"DLT_JUNIPER_ETHER", Const, 0}, + {"DLT_JUNIPER_FIBRECHANNEL", Const, 1}, + {"DLT_JUNIPER_FRELAY", Const, 0}, + {"DLT_JUNIPER_GGSN", Const, 0}, + {"DLT_JUNIPER_ISM", Const, 0}, + {"DLT_JUNIPER_MFR", Const, 0}, + {"DLT_JUNIPER_MLFR", Const, 0}, + {"DLT_JUNIPER_MLPPP", Const, 0}, + {"DLT_JUNIPER_MONITOR", Const, 0}, + {"DLT_JUNIPER_PIC_PEER", Const, 0}, + {"DLT_JUNIPER_PPP", Const, 0}, + {"DLT_JUNIPER_PPPOE", Const, 0}, + {"DLT_JUNIPER_PPPOE_ATM", Const, 0}, + {"DLT_JUNIPER_SERVICES", Const, 0}, + {"DLT_JUNIPER_SRX_E2E", Const, 1}, + {"DLT_JUNIPER_ST", Const, 0}, + {"DLT_JUNIPER_VP", Const, 0}, + {"DLT_JUNIPER_VS", Const, 1}, + {"DLT_LAPB_WITH_DIR", Const, 0}, + {"DLT_LAPD", Const, 0}, + {"DLT_LIN", Const, 0}, + {"DLT_LINUX_EVDEV", Const, 1}, + {"DLT_LINUX_IRDA", Const, 0}, + {"DLT_LINUX_LAPD", Const, 0}, + {"DLT_LINUX_PPP_WITHDIRECTION", Const, 0}, + {"DLT_LINUX_SLL", Const, 0}, + {"DLT_LOOP", Const, 0}, + {"DLT_LTALK", Const, 0}, + {"DLT_MATCHING_MAX", Const, 1}, + {"DLT_MATCHING_MIN", Const, 1}, + {"DLT_MFR", Const, 0}, + {"DLT_MOST", Const, 0}, + {"DLT_MPEG_2_TS", Const, 1}, + {"DLT_MPLS", Const, 1}, + {"DLT_MTP2", Const, 0}, + {"DLT_MTP2_WITH_PHDR", Const, 0}, + {"DLT_MTP3", Const, 0}, + {"DLT_MUX27010", Const, 1}, + {"DLT_NETANALYZER", Const, 1}, + {"DLT_NETANALYZER_TRANSPARENT", Const, 1}, + {"DLT_NFC_LLCP", Const, 1}, + {"DLT_NFLOG", Const, 1}, + {"DLT_NG40", Const, 1}, + {"DLT_NULL", Const, 0}, + {"DLT_PCI_EXP", Const, 0}, + {"DLT_PFLOG", Const, 0}, + {"DLT_PFSYNC", Const, 0}, + {"DLT_PPI", Const, 0}, + {"DLT_PPP", Const, 0}, + {"DLT_PPP_BSDOS", Const, 0}, + {"DLT_PPP_ETHER", Const, 0}, + {"DLT_PPP_PPPD", Const, 0}, + {"DLT_PPP_SERIAL", Const, 0}, + {"DLT_PPP_WITH_DIR", Const, 0}, + {"DLT_PPP_WITH_DIRECTION", Const, 0}, + {"DLT_PRISM_HEADER", Const, 0}, + {"DLT_PRONET", Const, 0}, + {"DLT_RAIF1", Const, 0}, + {"DLT_RAW", Const, 0}, + {"DLT_RAWAF_MASK", Const, 1}, + {"DLT_RIO", Const, 0}, + {"DLT_SCCP", Const, 0}, + {"DLT_SITA", Const, 0}, + {"DLT_SLIP", Const, 0}, + {"DLT_SLIP_BSDOS", Const, 0}, + {"DLT_STANAG_5066_D_PDU", Const, 1}, + {"DLT_SUNATM", Const, 0}, + {"DLT_SYMANTEC_FIREWALL", Const, 0}, + {"DLT_TZSP", Const, 0}, + {"DLT_USB", Const, 0}, + {"DLT_USB_LINUX", Const, 0}, + {"DLT_USB_LINUX_MMAPPED", Const, 1}, + {"DLT_USER0", Const, 0}, + {"DLT_USER1", Const, 0}, + {"DLT_USER10", Const, 0}, + {"DLT_USER11", Const, 0}, + {"DLT_USER12", Const, 0}, + {"DLT_USER13", Const, 0}, + {"DLT_USER14", Const, 0}, + {"DLT_USER15", Const, 0}, + {"DLT_USER2", Const, 0}, + {"DLT_USER3", Const, 0}, + {"DLT_USER4", Const, 0}, + {"DLT_USER5", Const, 0}, + {"DLT_USER6", Const, 0}, + {"DLT_USER7", Const, 0}, + {"DLT_USER8", Const, 0}, + {"DLT_USER9", Const, 0}, + {"DLT_WIHART", Const, 1}, + {"DLT_X2E_SERIAL", Const, 0}, + {"DLT_X2E_XORAYA", Const, 0}, + {"DNSMXData", Type, 0}, + {"DNSMXData.NameExchange", Field, 0}, + {"DNSMXData.Pad", Field, 0}, + {"DNSMXData.Preference", Field, 0}, + {"DNSPTRData", Type, 0}, + {"DNSPTRData.Host", Field, 0}, + {"DNSRecord", Type, 0}, + {"DNSRecord.Data", Field, 0}, + {"DNSRecord.Dw", Field, 0}, + {"DNSRecord.Length", Field, 0}, + {"DNSRecord.Name", Field, 0}, + {"DNSRecord.Next", Field, 0}, + {"DNSRecord.Reserved", Field, 0}, + {"DNSRecord.Ttl", Field, 0}, + {"DNSRecord.Type", Field, 0}, + {"DNSSRVData", Type, 0}, + {"DNSSRVData.Pad", Field, 0}, + {"DNSSRVData.Port", Field, 0}, + {"DNSSRVData.Priority", Field, 0}, + {"DNSSRVData.Target", Field, 0}, + {"DNSSRVData.Weight", Field, 0}, + {"DNSTXTData", Type, 0}, + {"DNSTXTData.StringArray", Field, 0}, + {"DNSTXTData.StringCount", Field, 0}, + {"DNS_INFO_NO_RECORDS", Const, 4}, + {"DNS_TYPE_A", Const, 0}, + {"DNS_TYPE_A6", Const, 0}, + {"DNS_TYPE_AAAA", Const, 0}, + {"DNS_TYPE_ADDRS", Const, 0}, + {"DNS_TYPE_AFSDB", Const, 0}, + {"DNS_TYPE_ALL", Const, 0}, + {"DNS_TYPE_ANY", Const, 0}, + {"DNS_TYPE_ATMA", Const, 0}, + {"DNS_TYPE_AXFR", Const, 0}, + {"DNS_TYPE_CERT", Const, 0}, + {"DNS_TYPE_CNAME", Const, 0}, + {"DNS_TYPE_DHCID", Const, 0}, + {"DNS_TYPE_DNAME", Const, 0}, + {"DNS_TYPE_DNSKEY", Const, 0}, + {"DNS_TYPE_DS", Const, 0}, + {"DNS_TYPE_EID", Const, 0}, + {"DNS_TYPE_GID", Const, 0}, + {"DNS_TYPE_GPOS", Const, 0}, + {"DNS_TYPE_HINFO", Const, 0}, + {"DNS_TYPE_ISDN", Const, 0}, + {"DNS_TYPE_IXFR", Const, 0}, + {"DNS_TYPE_KEY", Const, 0}, + {"DNS_TYPE_KX", Const, 0}, + {"DNS_TYPE_LOC", Const, 0}, + {"DNS_TYPE_MAILA", Const, 0}, + {"DNS_TYPE_MAILB", Const, 0}, + {"DNS_TYPE_MB", Const, 0}, + {"DNS_TYPE_MD", Const, 0}, + {"DNS_TYPE_MF", Const, 0}, + {"DNS_TYPE_MG", Const, 0}, + {"DNS_TYPE_MINFO", Const, 0}, + {"DNS_TYPE_MR", Const, 0}, + {"DNS_TYPE_MX", Const, 0}, + {"DNS_TYPE_NAPTR", Const, 0}, + {"DNS_TYPE_NBSTAT", Const, 0}, + {"DNS_TYPE_NIMLOC", Const, 0}, + {"DNS_TYPE_NS", Const, 0}, + {"DNS_TYPE_NSAP", Const, 0}, + {"DNS_TYPE_NSAPPTR", Const, 0}, + {"DNS_TYPE_NSEC", Const, 0}, + {"DNS_TYPE_NULL", Const, 0}, + {"DNS_TYPE_NXT", Const, 0}, + {"DNS_TYPE_OPT", Const, 0}, + {"DNS_TYPE_PTR", Const, 0}, + {"DNS_TYPE_PX", Const, 0}, + {"DNS_TYPE_RP", Const, 0}, + {"DNS_TYPE_RRSIG", Const, 0}, + {"DNS_TYPE_RT", Const, 0}, + {"DNS_TYPE_SIG", Const, 0}, + {"DNS_TYPE_SINK", Const, 0}, + {"DNS_TYPE_SOA", Const, 0}, + {"DNS_TYPE_SRV", Const, 0}, + {"DNS_TYPE_TEXT", Const, 0}, + {"DNS_TYPE_TKEY", Const, 0}, + {"DNS_TYPE_TSIG", Const, 0}, + {"DNS_TYPE_UID", Const, 0}, + {"DNS_TYPE_UINFO", Const, 0}, + {"DNS_TYPE_UNSPEC", Const, 0}, + {"DNS_TYPE_WINS", Const, 0}, + {"DNS_TYPE_WINSR", Const, 0}, + {"DNS_TYPE_WKS", Const, 0}, + {"DNS_TYPE_X25", Const, 0}, + {"DT_BLK", Const, 0}, + {"DT_CHR", Const, 0}, + {"DT_DIR", Const, 0}, + {"DT_FIFO", Const, 0}, + {"DT_LNK", Const, 0}, + {"DT_REG", Const, 0}, + {"DT_SOCK", Const, 0}, + {"DT_UNKNOWN", Const, 0}, + {"DT_WHT", Const, 0}, + {"DUPLICATE_CLOSE_SOURCE", Const, 0}, + {"DUPLICATE_SAME_ACCESS", Const, 0}, + {"DeleteFile", Func, 0}, + {"DetachLsf", Func, 0}, + {"DeviceIoControl", Func, 4}, + {"Dirent", Type, 0}, + {"Dirent.Fileno", Field, 0}, + {"Dirent.Ino", Field, 0}, + {"Dirent.Name", Field, 0}, + {"Dirent.Namlen", Field, 0}, + {"Dirent.Off", Field, 0}, + {"Dirent.Pad0", Field, 12}, + {"Dirent.Pad1", Field, 12}, + {"Dirent.Pad_cgo_0", Field, 0}, + {"Dirent.Reclen", Field, 0}, + {"Dirent.Seekoff", Field, 0}, + {"Dirent.Type", Field, 0}, + {"Dirent.X__d_padding", Field, 3}, + {"DnsNameCompare", Func, 4}, + {"DnsQuery", Func, 0}, + {"DnsRecordListFree", Func, 0}, + {"DnsSectionAdditional", Const, 4}, + {"DnsSectionAnswer", Const, 4}, + {"DnsSectionAuthority", Const, 4}, + {"DnsSectionQuestion", Const, 4}, + {"Dup", Func, 0}, + {"Dup2", Func, 0}, + {"Dup3", Func, 2}, + {"DuplicateHandle", Func, 0}, + {"E2BIG", Const, 0}, + {"EACCES", Const, 0}, + {"EADDRINUSE", Const, 0}, + {"EADDRNOTAVAIL", Const, 0}, + {"EADV", Const, 0}, + {"EAFNOSUPPORT", Const, 0}, + {"EAGAIN", Const, 0}, + {"EALREADY", Const, 0}, + {"EAUTH", Const, 0}, + {"EBADARCH", Const, 0}, + {"EBADE", Const, 0}, + {"EBADEXEC", Const, 0}, + {"EBADF", Const, 0}, + {"EBADFD", Const, 0}, + {"EBADMACHO", Const, 0}, + {"EBADMSG", Const, 0}, + {"EBADR", Const, 0}, + {"EBADRPC", Const, 0}, + {"EBADRQC", Const, 0}, + {"EBADSLT", Const, 0}, + {"EBFONT", Const, 0}, + {"EBUSY", Const, 0}, + {"ECANCELED", Const, 0}, + {"ECAPMODE", Const, 1}, + {"ECHILD", Const, 0}, + {"ECHO", Const, 0}, + {"ECHOCTL", Const, 0}, + {"ECHOE", Const, 0}, + {"ECHOK", Const, 0}, + {"ECHOKE", Const, 0}, + {"ECHONL", Const, 0}, + {"ECHOPRT", Const, 0}, + {"ECHRNG", Const, 0}, + {"ECOMM", Const, 0}, + {"ECONNABORTED", Const, 0}, + {"ECONNREFUSED", Const, 0}, + {"ECONNRESET", Const, 0}, + {"EDEADLK", Const, 0}, + {"EDEADLOCK", Const, 0}, + {"EDESTADDRREQ", Const, 0}, + {"EDEVERR", Const, 0}, + {"EDOM", Const, 0}, + {"EDOOFUS", Const, 0}, + {"EDOTDOT", Const, 0}, + {"EDQUOT", Const, 0}, + {"EEXIST", Const, 0}, + {"EFAULT", Const, 0}, + {"EFBIG", Const, 0}, + {"EFER_LMA", Const, 1}, + {"EFER_LME", Const, 1}, + {"EFER_NXE", Const, 1}, + {"EFER_SCE", Const, 1}, + {"EFTYPE", Const, 0}, + {"EHOSTDOWN", Const, 0}, + {"EHOSTUNREACH", Const, 0}, + {"EHWPOISON", Const, 0}, + {"EIDRM", Const, 0}, + {"EILSEQ", Const, 0}, + {"EINPROGRESS", Const, 0}, + {"EINTR", Const, 0}, + {"EINVAL", Const, 0}, + {"EIO", Const, 0}, + {"EIPSEC", Const, 1}, + {"EISCONN", Const, 0}, + {"EISDIR", Const, 0}, + {"EISNAM", Const, 0}, + {"EKEYEXPIRED", Const, 0}, + {"EKEYREJECTED", Const, 0}, + {"EKEYREVOKED", Const, 0}, + {"EL2HLT", Const, 0}, + {"EL2NSYNC", Const, 0}, + {"EL3HLT", Const, 0}, + {"EL3RST", Const, 0}, + {"ELAST", Const, 0}, + {"ELF_NGREG", Const, 0}, + {"ELF_PRARGSZ", Const, 0}, + {"ELIBACC", Const, 0}, + {"ELIBBAD", Const, 0}, + {"ELIBEXEC", Const, 0}, + {"ELIBMAX", Const, 0}, + {"ELIBSCN", Const, 0}, + {"ELNRNG", Const, 0}, + {"ELOOP", Const, 0}, + {"EMEDIUMTYPE", Const, 0}, + {"EMFILE", Const, 0}, + {"EMLINK", Const, 0}, + {"EMSGSIZE", Const, 0}, + {"EMT_TAGOVF", Const, 1}, + {"EMULTIHOP", Const, 0}, + {"EMUL_ENABLED", Const, 1}, + {"EMUL_LINUX", Const, 1}, + {"EMUL_LINUX32", Const, 1}, + {"EMUL_MAXID", Const, 1}, + {"EMUL_NATIVE", Const, 1}, + {"ENAMETOOLONG", Const, 0}, + {"ENAVAIL", Const, 0}, + {"ENDRUNDISC", Const, 1}, + {"ENEEDAUTH", Const, 0}, + {"ENETDOWN", Const, 0}, + {"ENETRESET", Const, 0}, + {"ENETUNREACH", Const, 0}, + {"ENFILE", Const, 0}, + {"ENOANO", Const, 0}, + {"ENOATTR", Const, 0}, + {"ENOBUFS", Const, 0}, + {"ENOCSI", Const, 0}, + {"ENODATA", Const, 0}, + {"ENODEV", Const, 0}, + {"ENOENT", Const, 0}, + {"ENOEXEC", Const, 0}, + {"ENOKEY", Const, 0}, + {"ENOLCK", Const, 0}, + {"ENOLINK", Const, 0}, + {"ENOMEDIUM", Const, 0}, + {"ENOMEM", Const, 0}, + {"ENOMSG", Const, 0}, + {"ENONET", Const, 0}, + {"ENOPKG", Const, 0}, + {"ENOPOLICY", Const, 0}, + {"ENOPROTOOPT", Const, 0}, + {"ENOSPC", Const, 0}, + {"ENOSR", Const, 0}, + {"ENOSTR", Const, 0}, + {"ENOSYS", Const, 0}, + {"ENOTBLK", Const, 0}, + {"ENOTCAPABLE", Const, 0}, + {"ENOTCONN", Const, 0}, + {"ENOTDIR", Const, 0}, + {"ENOTEMPTY", Const, 0}, + {"ENOTNAM", Const, 0}, + {"ENOTRECOVERABLE", Const, 0}, + {"ENOTSOCK", Const, 0}, + {"ENOTSUP", Const, 0}, + {"ENOTTY", Const, 0}, + {"ENOTUNIQ", Const, 0}, + {"ENXIO", Const, 0}, + {"EN_SW_CTL_INF", Const, 1}, + {"EN_SW_CTL_PREC", Const, 1}, + {"EN_SW_CTL_ROUND", Const, 1}, + {"EN_SW_DATACHAIN", Const, 1}, + {"EN_SW_DENORM", Const, 1}, + {"EN_SW_INVOP", Const, 1}, + {"EN_SW_OVERFLOW", Const, 1}, + {"EN_SW_PRECLOSS", Const, 1}, + {"EN_SW_UNDERFLOW", Const, 1}, + {"EN_SW_ZERODIV", Const, 1}, + {"EOPNOTSUPP", Const, 0}, + {"EOVERFLOW", Const, 0}, + {"EOWNERDEAD", Const, 0}, + {"EPERM", Const, 0}, + {"EPFNOSUPPORT", Const, 0}, + {"EPIPE", Const, 0}, + {"EPOLLERR", Const, 0}, + {"EPOLLET", Const, 0}, + {"EPOLLHUP", Const, 0}, + {"EPOLLIN", Const, 0}, + {"EPOLLMSG", Const, 0}, + {"EPOLLONESHOT", Const, 0}, + {"EPOLLOUT", Const, 0}, + {"EPOLLPRI", Const, 0}, + {"EPOLLRDBAND", Const, 0}, + {"EPOLLRDHUP", Const, 0}, + {"EPOLLRDNORM", Const, 0}, + {"EPOLLWRBAND", Const, 0}, + {"EPOLLWRNORM", Const, 0}, + {"EPOLL_CLOEXEC", Const, 0}, + {"EPOLL_CTL_ADD", Const, 0}, + {"EPOLL_CTL_DEL", Const, 0}, + {"EPOLL_CTL_MOD", Const, 0}, + {"EPOLL_NONBLOCK", Const, 0}, + {"EPROCLIM", Const, 0}, + {"EPROCUNAVAIL", Const, 0}, + {"EPROGMISMATCH", Const, 0}, + {"EPROGUNAVAIL", Const, 0}, + {"EPROTO", Const, 0}, + {"EPROTONOSUPPORT", Const, 0}, + {"EPROTOTYPE", Const, 0}, + {"EPWROFF", Const, 0}, + {"EQFULL", Const, 16}, + {"ERANGE", Const, 0}, + {"EREMCHG", Const, 0}, + {"EREMOTE", Const, 0}, + {"EREMOTEIO", Const, 0}, + {"ERESTART", Const, 0}, + {"ERFKILL", Const, 0}, + {"EROFS", Const, 0}, + {"ERPCMISMATCH", Const, 0}, + {"ERROR_ACCESS_DENIED", Const, 0}, + {"ERROR_ALREADY_EXISTS", Const, 0}, + {"ERROR_BROKEN_PIPE", Const, 0}, + {"ERROR_BUFFER_OVERFLOW", Const, 0}, + {"ERROR_DIR_NOT_EMPTY", Const, 8}, + {"ERROR_ENVVAR_NOT_FOUND", Const, 0}, + {"ERROR_FILE_EXISTS", Const, 0}, + {"ERROR_FILE_NOT_FOUND", Const, 0}, + {"ERROR_HANDLE_EOF", Const, 2}, + {"ERROR_INSUFFICIENT_BUFFER", Const, 0}, + {"ERROR_IO_PENDING", Const, 0}, + {"ERROR_MOD_NOT_FOUND", Const, 0}, + {"ERROR_MORE_DATA", Const, 3}, + {"ERROR_NETNAME_DELETED", Const, 3}, + {"ERROR_NOT_FOUND", Const, 1}, + {"ERROR_NO_MORE_FILES", Const, 0}, + {"ERROR_OPERATION_ABORTED", Const, 0}, + {"ERROR_PATH_NOT_FOUND", Const, 0}, + {"ERROR_PRIVILEGE_NOT_HELD", Const, 4}, + {"ERROR_PROC_NOT_FOUND", Const, 0}, + {"ESHLIBVERS", Const, 0}, + {"ESHUTDOWN", Const, 0}, + {"ESOCKTNOSUPPORT", Const, 0}, + {"ESPIPE", Const, 0}, + {"ESRCH", Const, 0}, + {"ESRMNT", Const, 0}, + {"ESTALE", Const, 0}, + {"ESTRPIPE", Const, 0}, + {"ETHERCAP_JUMBO_MTU", Const, 1}, + {"ETHERCAP_VLAN_HWTAGGING", Const, 1}, + {"ETHERCAP_VLAN_MTU", Const, 1}, + {"ETHERMIN", Const, 1}, + {"ETHERMTU", Const, 1}, + {"ETHERMTU_JUMBO", Const, 1}, + {"ETHERTYPE_8023", Const, 1}, + {"ETHERTYPE_AARP", Const, 1}, + {"ETHERTYPE_ACCTON", Const, 1}, + {"ETHERTYPE_AEONIC", Const, 1}, + {"ETHERTYPE_ALPHA", Const, 1}, + {"ETHERTYPE_AMBER", Const, 1}, + {"ETHERTYPE_AMOEBA", Const, 1}, + {"ETHERTYPE_AOE", Const, 1}, + {"ETHERTYPE_APOLLO", Const, 1}, + {"ETHERTYPE_APOLLODOMAIN", Const, 1}, + {"ETHERTYPE_APPLETALK", Const, 1}, + {"ETHERTYPE_APPLITEK", Const, 1}, + {"ETHERTYPE_ARGONAUT", Const, 1}, + {"ETHERTYPE_ARP", Const, 1}, + {"ETHERTYPE_AT", Const, 1}, + {"ETHERTYPE_ATALK", Const, 1}, + {"ETHERTYPE_ATOMIC", Const, 1}, + {"ETHERTYPE_ATT", Const, 1}, + {"ETHERTYPE_ATTSTANFORD", Const, 1}, + {"ETHERTYPE_AUTOPHON", Const, 1}, + {"ETHERTYPE_AXIS", Const, 1}, + {"ETHERTYPE_BCLOOP", Const, 1}, + {"ETHERTYPE_BOFL", Const, 1}, + {"ETHERTYPE_CABLETRON", Const, 1}, + {"ETHERTYPE_CHAOS", Const, 1}, + {"ETHERTYPE_COMDESIGN", Const, 1}, + {"ETHERTYPE_COMPUGRAPHIC", Const, 1}, + {"ETHERTYPE_COUNTERPOINT", Const, 1}, + {"ETHERTYPE_CRONUS", Const, 1}, + {"ETHERTYPE_CRONUSVLN", Const, 1}, + {"ETHERTYPE_DCA", Const, 1}, + {"ETHERTYPE_DDE", Const, 1}, + {"ETHERTYPE_DEBNI", Const, 1}, + {"ETHERTYPE_DECAM", Const, 1}, + {"ETHERTYPE_DECCUST", Const, 1}, + {"ETHERTYPE_DECDIAG", Const, 1}, + {"ETHERTYPE_DECDNS", Const, 1}, + {"ETHERTYPE_DECDTS", Const, 1}, + {"ETHERTYPE_DECEXPER", Const, 1}, + {"ETHERTYPE_DECLAST", Const, 1}, + {"ETHERTYPE_DECLTM", Const, 1}, + {"ETHERTYPE_DECMUMPS", Const, 1}, + {"ETHERTYPE_DECNETBIOS", Const, 1}, + {"ETHERTYPE_DELTACON", Const, 1}, + {"ETHERTYPE_DIDDLE", Const, 1}, + {"ETHERTYPE_DLOG1", Const, 1}, + {"ETHERTYPE_DLOG2", Const, 1}, + {"ETHERTYPE_DN", Const, 1}, + {"ETHERTYPE_DOGFIGHT", Const, 1}, + {"ETHERTYPE_DSMD", Const, 1}, + {"ETHERTYPE_ECMA", Const, 1}, + {"ETHERTYPE_ENCRYPT", Const, 1}, + {"ETHERTYPE_ES", Const, 1}, + {"ETHERTYPE_EXCELAN", Const, 1}, + {"ETHERTYPE_EXPERDATA", Const, 1}, + {"ETHERTYPE_FLIP", Const, 1}, + {"ETHERTYPE_FLOWCONTROL", Const, 1}, + {"ETHERTYPE_FRARP", Const, 1}, + {"ETHERTYPE_GENDYN", Const, 1}, + {"ETHERTYPE_HAYES", Const, 1}, + {"ETHERTYPE_HIPPI_FP", Const, 1}, + {"ETHERTYPE_HITACHI", Const, 1}, + {"ETHERTYPE_HP", Const, 1}, + {"ETHERTYPE_IEEEPUP", Const, 1}, + {"ETHERTYPE_IEEEPUPAT", Const, 1}, + {"ETHERTYPE_IMLBL", Const, 1}, + {"ETHERTYPE_IMLBLDIAG", Const, 1}, + {"ETHERTYPE_IP", Const, 1}, + {"ETHERTYPE_IPAS", Const, 1}, + {"ETHERTYPE_IPV6", Const, 1}, + {"ETHERTYPE_IPX", Const, 1}, + {"ETHERTYPE_IPXNEW", Const, 1}, + {"ETHERTYPE_KALPANA", Const, 1}, + {"ETHERTYPE_LANBRIDGE", Const, 1}, + {"ETHERTYPE_LANPROBE", Const, 1}, + {"ETHERTYPE_LAT", Const, 1}, + {"ETHERTYPE_LBACK", Const, 1}, + {"ETHERTYPE_LITTLE", Const, 1}, + {"ETHERTYPE_LLDP", Const, 1}, + {"ETHERTYPE_LOGICRAFT", Const, 1}, + {"ETHERTYPE_LOOPBACK", Const, 1}, + {"ETHERTYPE_MATRA", Const, 1}, + {"ETHERTYPE_MAX", Const, 1}, + {"ETHERTYPE_MERIT", Const, 1}, + {"ETHERTYPE_MICP", Const, 1}, + {"ETHERTYPE_MOPDL", Const, 1}, + {"ETHERTYPE_MOPRC", Const, 1}, + {"ETHERTYPE_MOTOROLA", Const, 1}, + {"ETHERTYPE_MPLS", Const, 1}, + {"ETHERTYPE_MPLS_MCAST", Const, 1}, + {"ETHERTYPE_MUMPS", Const, 1}, + {"ETHERTYPE_NBPCC", Const, 1}, + {"ETHERTYPE_NBPCLAIM", Const, 1}, + {"ETHERTYPE_NBPCLREQ", Const, 1}, + {"ETHERTYPE_NBPCLRSP", Const, 1}, + {"ETHERTYPE_NBPCREQ", Const, 1}, + {"ETHERTYPE_NBPCRSP", Const, 1}, + {"ETHERTYPE_NBPDG", Const, 1}, + {"ETHERTYPE_NBPDGB", Const, 1}, + {"ETHERTYPE_NBPDLTE", Const, 1}, + {"ETHERTYPE_NBPRAR", Const, 1}, + {"ETHERTYPE_NBPRAS", Const, 1}, + {"ETHERTYPE_NBPRST", Const, 1}, + {"ETHERTYPE_NBPSCD", Const, 1}, + {"ETHERTYPE_NBPVCD", Const, 1}, + {"ETHERTYPE_NBS", Const, 1}, + {"ETHERTYPE_NCD", Const, 1}, + {"ETHERTYPE_NESTAR", Const, 1}, + {"ETHERTYPE_NETBEUI", Const, 1}, + {"ETHERTYPE_NOVELL", Const, 1}, + {"ETHERTYPE_NS", Const, 1}, + {"ETHERTYPE_NSAT", Const, 1}, + {"ETHERTYPE_NSCOMPAT", Const, 1}, + {"ETHERTYPE_NTRAILER", Const, 1}, + {"ETHERTYPE_OS9", Const, 1}, + {"ETHERTYPE_OS9NET", Const, 1}, + {"ETHERTYPE_PACER", Const, 1}, + {"ETHERTYPE_PAE", Const, 1}, + {"ETHERTYPE_PCS", Const, 1}, + {"ETHERTYPE_PLANNING", Const, 1}, + {"ETHERTYPE_PPP", Const, 1}, + {"ETHERTYPE_PPPOE", Const, 1}, + {"ETHERTYPE_PPPOEDISC", Const, 1}, + {"ETHERTYPE_PRIMENTS", Const, 1}, + {"ETHERTYPE_PUP", Const, 1}, + {"ETHERTYPE_PUPAT", Const, 1}, + {"ETHERTYPE_QINQ", Const, 1}, + {"ETHERTYPE_RACAL", Const, 1}, + {"ETHERTYPE_RATIONAL", Const, 1}, + {"ETHERTYPE_RAWFR", Const, 1}, + {"ETHERTYPE_RCL", Const, 1}, + {"ETHERTYPE_RDP", Const, 1}, + {"ETHERTYPE_RETIX", Const, 1}, + {"ETHERTYPE_REVARP", Const, 1}, + {"ETHERTYPE_SCA", Const, 1}, + {"ETHERTYPE_SECTRA", Const, 1}, + {"ETHERTYPE_SECUREDATA", Const, 1}, + {"ETHERTYPE_SGITW", Const, 1}, + {"ETHERTYPE_SG_BOUNCE", Const, 1}, + {"ETHERTYPE_SG_DIAG", Const, 1}, + {"ETHERTYPE_SG_NETGAMES", Const, 1}, + {"ETHERTYPE_SG_RESV", Const, 1}, + {"ETHERTYPE_SIMNET", Const, 1}, + {"ETHERTYPE_SLOW", Const, 1}, + {"ETHERTYPE_SLOWPROTOCOLS", Const, 1}, + {"ETHERTYPE_SNA", Const, 1}, + {"ETHERTYPE_SNMP", Const, 1}, + {"ETHERTYPE_SONIX", Const, 1}, + {"ETHERTYPE_SPIDER", Const, 1}, + {"ETHERTYPE_SPRITE", Const, 1}, + {"ETHERTYPE_STP", Const, 1}, + {"ETHERTYPE_TALARIS", Const, 1}, + {"ETHERTYPE_TALARISMC", Const, 1}, + {"ETHERTYPE_TCPCOMP", Const, 1}, + {"ETHERTYPE_TCPSM", Const, 1}, + {"ETHERTYPE_TEC", Const, 1}, + {"ETHERTYPE_TIGAN", Const, 1}, + {"ETHERTYPE_TRAIL", Const, 1}, + {"ETHERTYPE_TRANSETHER", Const, 1}, + {"ETHERTYPE_TYMSHARE", Const, 1}, + {"ETHERTYPE_UBBST", Const, 1}, + {"ETHERTYPE_UBDEBUG", Const, 1}, + {"ETHERTYPE_UBDIAGLOOP", Const, 1}, + {"ETHERTYPE_UBDL", Const, 1}, + {"ETHERTYPE_UBNIU", Const, 1}, + {"ETHERTYPE_UBNMC", Const, 1}, + {"ETHERTYPE_VALID", Const, 1}, + {"ETHERTYPE_VARIAN", Const, 1}, + {"ETHERTYPE_VAXELN", Const, 1}, + {"ETHERTYPE_VEECO", Const, 1}, + {"ETHERTYPE_VEXP", Const, 1}, + {"ETHERTYPE_VGLAB", Const, 1}, + {"ETHERTYPE_VINES", Const, 1}, + {"ETHERTYPE_VINESECHO", Const, 1}, + {"ETHERTYPE_VINESLOOP", Const, 1}, + {"ETHERTYPE_VITAL", Const, 1}, + {"ETHERTYPE_VLAN", Const, 1}, + {"ETHERTYPE_VLTLMAN", Const, 1}, + {"ETHERTYPE_VPROD", Const, 1}, + {"ETHERTYPE_VURESERVED", Const, 1}, + {"ETHERTYPE_WATERLOO", Const, 1}, + {"ETHERTYPE_WELLFLEET", Const, 1}, + {"ETHERTYPE_X25", Const, 1}, + {"ETHERTYPE_X75", Const, 1}, + {"ETHERTYPE_XNSSM", Const, 1}, + {"ETHERTYPE_XTP", Const, 1}, + {"ETHER_ADDR_LEN", Const, 1}, + {"ETHER_ALIGN", Const, 1}, + {"ETHER_CRC_LEN", Const, 1}, + {"ETHER_CRC_POLY_BE", Const, 1}, + {"ETHER_CRC_POLY_LE", Const, 1}, + {"ETHER_HDR_LEN", Const, 1}, + {"ETHER_MAX_DIX_LEN", Const, 1}, + {"ETHER_MAX_LEN", Const, 1}, + {"ETHER_MAX_LEN_JUMBO", Const, 1}, + {"ETHER_MIN_LEN", Const, 1}, + {"ETHER_PPPOE_ENCAP_LEN", Const, 1}, + {"ETHER_TYPE_LEN", Const, 1}, + {"ETHER_VLAN_ENCAP_LEN", Const, 1}, + {"ETH_P_1588", Const, 0}, + {"ETH_P_8021Q", Const, 0}, + {"ETH_P_802_2", Const, 0}, + {"ETH_P_802_3", Const, 0}, + {"ETH_P_AARP", Const, 0}, + {"ETH_P_ALL", Const, 0}, + {"ETH_P_AOE", Const, 0}, + {"ETH_P_ARCNET", Const, 0}, + {"ETH_P_ARP", Const, 0}, + {"ETH_P_ATALK", Const, 0}, + {"ETH_P_ATMFATE", Const, 0}, + {"ETH_P_ATMMPOA", Const, 0}, + {"ETH_P_AX25", Const, 0}, + {"ETH_P_BPQ", Const, 0}, + {"ETH_P_CAIF", Const, 0}, + {"ETH_P_CAN", Const, 0}, + {"ETH_P_CONTROL", Const, 0}, + {"ETH_P_CUST", Const, 0}, + {"ETH_P_DDCMP", Const, 0}, + {"ETH_P_DEC", Const, 0}, + {"ETH_P_DIAG", Const, 0}, + {"ETH_P_DNA_DL", Const, 0}, + {"ETH_P_DNA_RC", Const, 0}, + {"ETH_P_DNA_RT", Const, 0}, + {"ETH_P_DSA", Const, 0}, + {"ETH_P_ECONET", Const, 0}, + {"ETH_P_EDSA", Const, 0}, + {"ETH_P_FCOE", Const, 0}, + {"ETH_P_FIP", Const, 0}, + {"ETH_P_HDLC", Const, 0}, + {"ETH_P_IEEE802154", Const, 0}, + {"ETH_P_IEEEPUP", Const, 0}, + {"ETH_P_IEEEPUPAT", Const, 0}, + {"ETH_P_IP", Const, 0}, + {"ETH_P_IPV6", Const, 0}, + {"ETH_P_IPX", Const, 0}, + {"ETH_P_IRDA", Const, 0}, + {"ETH_P_LAT", Const, 0}, + {"ETH_P_LINK_CTL", Const, 0}, + {"ETH_P_LOCALTALK", Const, 0}, + {"ETH_P_LOOP", Const, 0}, + {"ETH_P_MOBITEX", Const, 0}, + {"ETH_P_MPLS_MC", Const, 0}, + {"ETH_P_MPLS_UC", Const, 0}, + {"ETH_P_PAE", Const, 0}, + {"ETH_P_PAUSE", Const, 0}, + {"ETH_P_PHONET", Const, 0}, + {"ETH_P_PPPTALK", Const, 0}, + {"ETH_P_PPP_DISC", Const, 0}, + {"ETH_P_PPP_MP", Const, 0}, + {"ETH_P_PPP_SES", Const, 0}, + {"ETH_P_PUP", Const, 0}, + {"ETH_P_PUPAT", Const, 0}, + {"ETH_P_RARP", Const, 0}, + {"ETH_P_SCA", Const, 0}, + {"ETH_P_SLOW", Const, 0}, + {"ETH_P_SNAP", Const, 0}, + {"ETH_P_TEB", Const, 0}, + {"ETH_P_TIPC", Const, 0}, + {"ETH_P_TRAILER", Const, 0}, + {"ETH_P_TR_802_2", Const, 0}, + {"ETH_P_WAN_PPP", Const, 0}, + {"ETH_P_WCCP", Const, 0}, + {"ETH_P_X25", Const, 0}, + {"ETIME", Const, 0}, + {"ETIMEDOUT", Const, 0}, + {"ETOOMANYREFS", Const, 0}, + {"ETXTBSY", Const, 0}, + {"EUCLEAN", Const, 0}, + {"EUNATCH", Const, 0}, + {"EUSERS", Const, 0}, + {"EVFILT_AIO", Const, 0}, + {"EVFILT_FS", Const, 0}, + {"EVFILT_LIO", Const, 0}, + {"EVFILT_MACHPORT", Const, 0}, + {"EVFILT_PROC", Const, 0}, + {"EVFILT_READ", Const, 0}, + {"EVFILT_SIGNAL", Const, 0}, + {"EVFILT_SYSCOUNT", Const, 0}, + {"EVFILT_THREADMARKER", Const, 0}, + {"EVFILT_TIMER", Const, 0}, + {"EVFILT_USER", Const, 0}, + {"EVFILT_VM", Const, 0}, + {"EVFILT_VNODE", Const, 0}, + {"EVFILT_WRITE", Const, 0}, + {"EV_ADD", Const, 0}, + {"EV_CLEAR", Const, 0}, + {"EV_DELETE", Const, 0}, + {"EV_DISABLE", Const, 0}, + {"EV_DISPATCH", Const, 0}, + {"EV_DROP", Const, 3}, + {"EV_ENABLE", Const, 0}, + {"EV_EOF", Const, 0}, + {"EV_ERROR", Const, 0}, + {"EV_FLAG0", Const, 0}, + {"EV_FLAG1", Const, 0}, + {"EV_ONESHOT", Const, 0}, + {"EV_OOBAND", Const, 0}, + {"EV_POLL", Const, 0}, + {"EV_RECEIPT", Const, 0}, + {"EV_SYSFLAGS", Const, 0}, + {"EWINDOWS", Const, 0}, + {"EWOULDBLOCK", Const, 0}, + {"EXDEV", Const, 0}, + {"EXFULL", Const, 0}, + {"EXTA", Const, 0}, + {"EXTB", Const, 0}, + {"EXTPROC", Const, 0}, + {"Environ", Func, 0}, + {"EpollCreate", Func, 0}, + {"EpollCreate1", Func, 0}, + {"EpollCtl", Func, 0}, + {"EpollEvent", Type, 0}, + {"EpollEvent.Events", Field, 0}, + {"EpollEvent.Fd", Field, 0}, + {"EpollEvent.Pad", Field, 0}, + {"EpollEvent.PadFd", Field, 0}, + {"EpollWait", Func, 0}, + {"Errno", Type, 0}, + {"EscapeArg", Func, 0}, + {"Exchangedata", Func, 0}, + {"Exec", Func, 0}, + {"Exit", Func, 0}, + {"ExitProcess", Func, 0}, + {"FD_CLOEXEC", Const, 0}, + {"FD_SETSIZE", Const, 0}, + {"FILE_ACTION_ADDED", Const, 0}, + {"FILE_ACTION_MODIFIED", Const, 0}, + {"FILE_ACTION_REMOVED", Const, 0}, + {"FILE_ACTION_RENAMED_NEW_NAME", Const, 0}, + {"FILE_ACTION_RENAMED_OLD_NAME", Const, 0}, + {"FILE_APPEND_DATA", Const, 0}, + {"FILE_ATTRIBUTE_ARCHIVE", Const, 0}, + {"FILE_ATTRIBUTE_DIRECTORY", Const, 0}, + {"FILE_ATTRIBUTE_HIDDEN", Const, 0}, + {"FILE_ATTRIBUTE_NORMAL", Const, 0}, + {"FILE_ATTRIBUTE_READONLY", Const, 0}, + {"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4}, + {"FILE_ATTRIBUTE_SYSTEM", Const, 0}, + {"FILE_BEGIN", Const, 0}, + {"FILE_CURRENT", Const, 0}, + {"FILE_END", Const, 0}, + {"FILE_FLAG_BACKUP_SEMANTICS", Const, 0}, + {"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4}, + {"FILE_FLAG_OVERLAPPED", Const, 0}, + {"FILE_LIST_DIRECTORY", Const, 0}, + {"FILE_MAP_COPY", Const, 0}, + {"FILE_MAP_EXECUTE", Const, 0}, + {"FILE_MAP_READ", Const, 0}, + {"FILE_MAP_WRITE", Const, 0}, + {"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0}, + {"FILE_NOTIFY_CHANGE_CREATION", Const, 0}, + {"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0}, + {"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0}, + {"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0}, + {"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0}, + {"FILE_NOTIFY_CHANGE_SIZE", Const, 0}, + {"FILE_SHARE_DELETE", Const, 0}, + {"FILE_SHARE_READ", Const, 0}, + {"FILE_SHARE_WRITE", Const, 0}, + {"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2}, + {"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2}, + {"FILE_TYPE_CHAR", Const, 0}, + {"FILE_TYPE_DISK", Const, 0}, + {"FILE_TYPE_PIPE", Const, 0}, + {"FILE_TYPE_REMOTE", Const, 0}, + {"FILE_TYPE_UNKNOWN", Const, 0}, + {"FILE_WRITE_ATTRIBUTES", Const, 0}, + {"FLUSHO", Const, 0}, + {"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0}, + {"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0}, + {"FORMAT_MESSAGE_FROM_HMODULE", Const, 0}, + {"FORMAT_MESSAGE_FROM_STRING", Const, 0}, + {"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0}, + {"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0}, + {"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0}, + {"FSCTL_GET_REPARSE_POINT", Const, 4}, + {"F_ADDFILESIGS", Const, 0}, + {"F_ADDSIGS", Const, 0}, + {"F_ALLOCATEALL", Const, 0}, + {"F_ALLOCATECONTIG", Const, 0}, + {"F_CANCEL", Const, 0}, + {"F_CHKCLEAN", Const, 0}, + {"F_CLOSEM", Const, 1}, + {"F_DUP2FD", Const, 0}, + {"F_DUP2FD_CLOEXEC", Const, 1}, + {"F_DUPFD", Const, 0}, + {"F_DUPFD_CLOEXEC", Const, 0}, + {"F_EXLCK", Const, 0}, + {"F_FINDSIGS", Const, 16}, + {"F_FLUSH_DATA", Const, 0}, + {"F_FREEZE_FS", Const, 0}, + {"F_FSCTL", Const, 1}, + {"F_FSDIRMASK", Const, 1}, + {"F_FSIN", Const, 1}, + {"F_FSINOUT", Const, 1}, + {"F_FSOUT", Const, 1}, + {"F_FSPRIV", Const, 1}, + {"F_FSVOID", Const, 1}, + {"F_FULLFSYNC", Const, 0}, + {"F_GETCODEDIR", Const, 16}, + {"F_GETFD", Const, 0}, + {"F_GETFL", Const, 0}, + {"F_GETLEASE", Const, 0}, + {"F_GETLK", Const, 0}, + {"F_GETLK64", Const, 0}, + {"F_GETLKPID", Const, 0}, + {"F_GETNOSIGPIPE", Const, 0}, + {"F_GETOWN", Const, 0}, + {"F_GETOWN_EX", Const, 0}, + {"F_GETPATH", Const, 0}, + {"F_GETPATH_MTMINFO", Const, 0}, + {"F_GETPIPE_SZ", Const, 0}, + {"F_GETPROTECTIONCLASS", Const, 0}, + {"F_GETPROTECTIONLEVEL", Const, 16}, + {"F_GETSIG", Const, 0}, + {"F_GLOBAL_NOCACHE", Const, 0}, + {"F_LOCK", Const, 0}, + {"F_LOG2PHYS", Const, 0}, + {"F_LOG2PHYS_EXT", Const, 0}, + {"F_MARKDEPENDENCY", Const, 0}, + {"F_MAXFD", Const, 1}, + {"F_NOCACHE", Const, 0}, + {"F_NODIRECT", Const, 0}, + {"F_NOTIFY", Const, 0}, + {"F_OGETLK", Const, 0}, + {"F_OK", Const, 0}, + {"F_OSETLK", Const, 0}, + {"F_OSETLKW", Const, 0}, + {"F_PARAM_MASK", Const, 1}, + {"F_PARAM_MAX", Const, 1}, + {"F_PATHPKG_CHECK", Const, 0}, + {"F_PEOFPOSMODE", Const, 0}, + {"F_PREALLOCATE", Const, 0}, + {"F_RDADVISE", Const, 0}, + {"F_RDAHEAD", Const, 0}, + {"F_RDLCK", Const, 0}, + {"F_READAHEAD", Const, 0}, + {"F_READBOOTSTRAP", Const, 0}, + {"F_SETBACKINGSTORE", Const, 0}, + {"F_SETFD", Const, 0}, + {"F_SETFL", Const, 0}, + {"F_SETLEASE", Const, 0}, + {"F_SETLK", Const, 0}, + {"F_SETLK64", Const, 0}, + {"F_SETLKW", Const, 0}, + {"F_SETLKW64", Const, 0}, + {"F_SETLKWTIMEOUT", Const, 16}, + {"F_SETLK_REMOTE", Const, 0}, + {"F_SETNOSIGPIPE", Const, 0}, + {"F_SETOWN", Const, 0}, + {"F_SETOWN_EX", Const, 0}, + {"F_SETPIPE_SZ", Const, 0}, + {"F_SETPROTECTIONCLASS", Const, 0}, + {"F_SETSIG", Const, 0}, + {"F_SETSIZE", Const, 0}, + {"F_SHLCK", Const, 0}, + {"F_SINGLE_WRITER", Const, 16}, + {"F_TEST", Const, 0}, + {"F_THAW_FS", Const, 0}, + {"F_TLOCK", Const, 0}, + {"F_TRANSCODEKEY", Const, 16}, + {"F_ULOCK", Const, 0}, + {"F_UNLCK", Const, 0}, + {"F_UNLCKSYS", Const, 0}, + {"F_VOLPOSMODE", Const, 0}, + {"F_WRITEBOOTSTRAP", Const, 0}, + {"F_WRLCK", Const, 0}, + {"Faccessat", Func, 0}, + {"Fallocate", Func, 0}, + {"Fbootstraptransfer_t", Type, 0}, + {"Fbootstraptransfer_t.Buffer", Field, 0}, + {"Fbootstraptransfer_t.Length", Field, 0}, + {"Fbootstraptransfer_t.Offset", Field, 0}, + {"Fchdir", Func, 0}, + {"Fchflags", Func, 0}, + {"Fchmod", Func, 0}, + {"Fchmodat", Func, 0}, + {"Fchown", Func, 0}, + {"Fchownat", Func, 0}, + {"FcntlFlock", Func, 3}, + {"FdSet", Type, 0}, + {"FdSet.Bits", Field, 0}, + {"FdSet.X__fds_bits", Field, 0}, + {"Fdatasync", Func, 0}, + {"FileNotifyInformation", Type, 0}, + {"FileNotifyInformation.Action", Field, 0}, + {"FileNotifyInformation.FileName", Field, 0}, + {"FileNotifyInformation.FileNameLength", Field, 0}, + {"FileNotifyInformation.NextEntryOffset", Field, 0}, + {"Filetime", Type, 0}, + {"Filetime.HighDateTime", Field, 0}, + {"Filetime.LowDateTime", Field, 0}, + {"FindClose", Func, 0}, + {"FindFirstFile", Func, 0}, + {"FindNextFile", Func, 0}, + {"Flock", Func, 0}, + {"Flock_t", Type, 0}, + {"Flock_t.Len", Field, 0}, + {"Flock_t.Pad_cgo_0", Field, 0}, + {"Flock_t.Pad_cgo_1", Field, 3}, + {"Flock_t.Pid", Field, 0}, + {"Flock_t.Start", Field, 0}, + {"Flock_t.Sysid", Field, 0}, + {"Flock_t.Type", Field, 0}, + {"Flock_t.Whence", Field, 0}, + {"FlushBpf", Func, 0}, + {"FlushFileBuffers", Func, 0}, + {"FlushViewOfFile", Func, 0}, + {"ForkExec", Func, 0}, + {"ForkLock", Var, 0}, + {"FormatMessage", Func, 0}, + {"Fpathconf", Func, 0}, + {"FreeAddrInfoW", Func, 1}, + {"FreeEnvironmentStrings", Func, 0}, + {"FreeLibrary", Func, 0}, + {"Fsid", Type, 0}, + {"Fsid.Val", Field, 0}, + {"Fsid.X__fsid_val", Field, 2}, + {"Fsid.X__val", Field, 0}, + {"Fstat", Func, 0}, + {"Fstatat", Func, 12}, + {"Fstatfs", Func, 0}, + {"Fstore_t", Type, 0}, + {"Fstore_t.Bytesalloc", Field, 0}, + {"Fstore_t.Flags", Field, 0}, + {"Fstore_t.Length", Field, 0}, + {"Fstore_t.Offset", Field, 0}, + {"Fstore_t.Posmode", Field, 0}, + {"Fsync", Func, 0}, + {"Ftruncate", Func, 0}, + {"FullPath", Func, 4}, + {"Futimes", Func, 0}, + {"Futimesat", Func, 0}, + {"GENERIC_ALL", Const, 0}, + {"GENERIC_EXECUTE", Const, 0}, + {"GENERIC_READ", Const, 0}, + {"GENERIC_WRITE", Const, 0}, + {"GUID", Type, 1}, + {"GUID.Data1", Field, 1}, + {"GUID.Data2", Field, 1}, + {"GUID.Data3", Field, 1}, + {"GUID.Data4", Field, 1}, + {"GetAcceptExSockaddrs", Func, 0}, + {"GetAdaptersInfo", Func, 0}, + {"GetAddrInfoW", Func, 1}, + {"GetCommandLine", Func, 0}, + {"GetComputerName", Func, 0}, + {"GetConsoleMode", Func, 1}, + {"GetCurrentDirectory", Func, 0}, + {"GetCurrentProcess", Func, 0}, + {"GetEnvironmentStrings", Func, 0}, + {"GetEnvironmentVariable", Func, 0}, + {"GetExitCodeProcess", Func, 0}, + {"GetFileAttributes", Func, 0}, + {"GetFileAttributesEx", Func, 0}, + {"GetFileExInfoStandard", Const, 0}, + {"GetFileExMaxInfoLevel", Const, 0}, + {"GetFileInformationByHandle", Func, 0}, + {"GetFileType", Func, 0}, + {"GetFullPathName", Func, 0}, + {"GetHostByName", Func, 0}, + {"GetIfEntry", Func, 0}, + {"GetLastError", Func, 0}, + {"GetLengthSid", Func, 0}, + {"GetLongPathName", Func, 0}, + {"GetProcAddress", Func, 0}, + {"GetProcessTimes", Func, 0}, + {"GetProtoByName", Func, 0}, + {"GetQueuedCompletionStatus", Func, 0}, + {"GetServByName", Func, 0}, + {"GetShortPathName", Func, 0}, + {"GetStartupInfo", Func, 0}, + {"GetStdHandle", Func, 0}, + {"GetSystemTimeAsFileTime", Func, 0}, + {"GetTempPath", Func, 0}, + {"GetTimeZoneInformation", Func, 0}, + {"GetTokenInformation", Func, 0}, + {"GetUserNameEx", Func, 0}, + {"GetUserProfileDirectory", Func, 0}, + {"GetVersion", Func, 0}, + {"Getcwd", Func, 0}, + {"Getdents", Func, 0}, + {"Getdirentries", Func, 0}, + {"Getdtablesize", Func, 0}, + {"Getegid", Func, 0}, + {"Getenv", Func, 0}, + {"Geteuid", Func, 0}, + {"Getfsstat", Func, 0}, + {"Getgid", Func, 0}, + {"Getgroups", Func, 0}, + {"Getpagesize", Func, 0}, + {"Getpeername", Func, 0}, + {"Getpgid", Func, 0}, + {"Getpgrp", Func, 0}, + {"Getpid", Func, 0}, + {"Getppid", Func, 0}, + {"Getpriority", Func, 0}, + {"Getrlimit", Func, 0}, + {"Getrusage", Func, 0}, + {"Getsid", Func, 0}, + {"Getsockname", Func, 0}, + {"Getsockopt", Func, 1}, + {"GetsockoptByte", Func, 0}, + {"GetsockoptICMPv6Filter", Func, 2}, + {"GetsockoptIPMreq", Func, 0}, + {"GetsockoptIPMreqn", Func, 0}, + {"GetsockoptIPv6MTUInfo", Func, 2}, + {"GetsockoptIPv6Mreq", Func, 0}, + {"GetsockoptInet4Addr", Func, 0}, + {"GetsockoptInt", Func, 0}, + {"GetsockoptUcred", Func, 1}, + {"Gettid", Func, 0}, + {"Gettimeofday", Func, 0}, + {"Getuid", Func, 0}, + {"Getwd", Func, 0}, + {"Getxattr", Func, 1}, + {"HANDLE_FLAG_INHERIT", Const, 0}, + {"HKEY_CLASSES_ROOT", Const, 0}, + {"HKEY_CURRENT_CONFIG", Const, 0}, + {"HKEY_CURRENT_USER", Const, 0}, + {"HKEY_DYN_DATA", Const, 0}, + {"HKEY_LOCAL_MACHINE", Const, 0}, + {"HKEY_PERFORMANCE_DATA", Const, 0}, + {"HKEY_USERS", Const, 0}, + {"HUPCL", Const, 0}, + {"Handle", Type, 0}, + {"Hostent", Type, 0}, + {"Hostent.AddrList", Field, 0}, + {"Hostent.AddrType", Field, 0}, + {"Hostent.Aliases", Field, 0}, + {"Hostent.Length", Field, 0}, + {"Hostent.Name", Field, 0}, + {"ICANON", Const, 0}, + {"ICMP6_FILTER", Const, 2}, + {"ICMPV6_FILTER", Const, 2}, + {"ICMPv6Filter", Type, 2}, + {"ICMPv6Filter.Data", Field, 2}, + {"ICMPv6Filter.Filt", Field, 2}, + {"ICRNL", Const, 0}, + {"IEXTEN", Const, 0}, + {"IFAN_ARRIVAL", Const, 1}, + {"IFAN_DEPARTURE", Const, 1}, + {"IFA_ADDRESS", Const, 0}, + {"IFA_ANYCAST", Const, 0}, + {"IFA_BROADCAST", Const, 0}, + {"IFA_CACHEINFO", Const, 0}, + {"IFA_F_DADFAILED", Const, 0}, + {"IFA_F_DEPRECATED", Const, 0}, + {"IFA_F_HOMEADDRESS", Const, 0}, + {"IFA_F_NODAD", Const, 0}, + {"IFA_F_OPTIMISTIC", Const, 0}, + {"IFA_F_PERMANENT", Const, 0}, + {"IFA_F_SECONDARY", Const, 0}, + {"IFA_F_TEMPORARY", Const, 0}, + {"IFA_F_TENTATIVE", Const, 0}, + {"IFA_LABEL", Const, 0}, + {"IFA_LOCAL", Const, 0}, + {"IFA_MAX", Const, 0}, + {"IFA_MULTICAST", Const, 0}, + {"IFA_ROUTE", Const, 1}, + {"IFA_UNSPEC", Const, 0}, + {"IFF_ALLMULTI", Const, 0}, + {"IFF_ALTPHYS", Const, 0}, + {"IFF_AUTOMEDIA", Const, 0}, + {"IFF_BROADCAST", Const, 0}, + {"IFF_CANTCHANGE", Const, 0}, + {"IFF_CANTCONFIG", Const, 1}, + {"IFF_DEBUG", Const, 0}, + {"IFF_DRV_OACTIVE", Const, 0}, + {"IFF_DRV_RUNNING", Const, 0}, + {"IFF_DYING", Const, 0}, + {"IFF_DYNAMIC", Const, 0}, + {"IFF_LINK0", Const, 0}, + {"IFF_LINK1", Const, 0}, + {"IFF_LINK2", Const, 0}, + {"IFF_LOOPBACK", Const, 0}, + {"IFF_MASTER", Const, 0}, + {"IFF_MONITOR", Const, 0}, + {"IFF_MULTICAST", Const, 0}, + {"IFF_NOARP", Const, 0}, + {"IFF_NOTRAILERS", Const, 0}, + {"IFF_NO_PI", Const, 0}, + {"IFF_OACTIVE", Const, 0}, + {"IFF_ONE_QUEUE", Const, 0}, + {"IFF_POINTOPOINT", Const, 0}, + {"IFF_POINTTOPOINT", Const, 0}, + {"IFF_PORTSEL", Const, 0}, + {"IFF_PPROMISC", Const, 0}, + {"IFF_PROMISC", Const, 0}, + {"IFF_RENAMING", Const, 0}, + {"IFF_RUNNING", Const, 0}, + {"IFF_SIMPLEX", Const, 0}, + {"IFF_SLAVE", Const, 0}, + {"IFF_SMART", Const, 0}, + {"IFF_STATICARP", Const, 0}, + {"IFF_TAP", Const, 0}, + {"IFF_TUN", Const, 0}, + {"IFF_TUN_EXCL", Const, 0}, + {"IFF_UP", Const, 0}, + {"IFF_VNET_HDR", Const, 0}, + {"IFLA_ADDRESS", Const, 0}, + {"IFLA_BROADCAST", Const, 0}, + {"IFLA_COST", Const, 0}, + {"IFLA_IFALIAS", Const, 0}, + {"IFLA_IFNAME", Const, 0}, + {"IFLA_LINK", Const, 0}, + {"IFLA_LINKINFO", Const, 0}, + {"IFLA_LINKMODE", Const, 0}, + {"IFLA_MAP", Const, 0}, + {"IFLA_MASTER", Const, 0}, + {"IFLA_MAX", Const, 0}, + {"IFLA_MTU", Const, 0}, + {"IFLA_NET_NS_PID", Const, 0}, + {"IFLA_OPERSTATE", Const, 0}, + {"IFLA_PRIORITY", Const, 0}, + {"IFLA_PROTINFO", Const, 0}, + {"IFLA_QDISC", Const, 0}, + {"IFLA_STATS", Const, 0}, + {"IFLA_TXQLEN", Const, 0}, + {"IFLA_UNSPEC", Const, 0}, + {"IFLA_WEIGHT", Const, 0}, + {"IFLA_WIRELESS", Const, 0}, + {"IFNAMSIZ", Const, 0}, + {"IFT_1822", Const, 0}, + {"IFT_A12MPPSWITCH", Const, 0}, + {"IFT_AAL2", Const, 0}, + {"IFT_AAL5", Const, 0}, + {"IFT_ADSL", Const, 0}, + {"IFT_AFLANE8023", Const, 0}, + {"IFT_AFLANE8025", Const, 0}, + {"IFT_ARAP", Const, 0}, + {"IFT_ARCNET", Const, 0}, + {"IFT_ARCNETPLUS", Const, 0}, + {"IFT_ASYNC", Const, 0}, + {"IFT_ATM", Const, 0}, + {"IFT_ATMDXI", Const, 0}, + {"IFT_ATMFUNI", Const, 0}, + {"IFT_ATMIMA", Const, 0}, + {"IFT_ATMLOGICAL", Const, 0}, + {"IFT_ATMRADIO", Const, 0}, + {"IFT_ATMSUBINTERFACE", Const, 0}, + {"IFT_ATMVCIENDPT", Const, 0}, + {"IFT_ATMVIRTUAL", Const, 0}, + {"IFT_BGPPOLICYACCOUNTING", Const, 0}, + {"IFT_BLUETOOTH", Const, 1}, + {"IFT_BRIDGE", Const, 0}, + {"IFT_BSC", Const, 0}, + {"IFT_CARP", Const, 0}, + {"IFT_CCTEMUL", Const, 0}, + {"IFT_CELLULAR", Const, 0}, + {"IFT_CEPT", Const, 0}, + {"IFT_CES", Const, 0}, + {"IFT_CHANNEL", Const, 0}, + {"IFT_CNR", Const, 0}, + {"IFT_COFFEE", Const, 0}, + {"IFT_COMPOSITELINK", Const, 0}, + {"IFT_DCN", Const, 0}, + {"IFT_DIGITALPOWERLINE", Const, 0}, + {"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0}, + {"IFT_DLSW", Const, 0}, + {"IFT_DOCSCABLEDOWNSTREAM", Const, 0}, + {"IFT_DOCSCABLEMACLAYER", Const, 0}, + {"IFT_DOCSCABLEUPSTREAM", Const, 0}, + {"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1}, + {"IFT_DS0", Const, 0}, + {"IFT_DS0BUNDLE", Const, 0}, + {"IFT_DS1FDL", Const, 0}, + {"IFT_DS3", Const, 0}, + {"IFT_DTM", Const, 0}, + {"IFT_DUMMY", Const, 1}, + {"IFT_DVBASILN", Const, 0}, + {"IFT_DVBASIOUT", Const, 0}, + {"IFT_DVBRCCDOWNSTREAM", Const, 0}, + {"IFT_DVBRCCMACLAYER", Const, 0}, + {"IFT_DVBRCCUPSTREAM", Const, 0}, + {"IFT_ECONET", Const, 1}, + {"IFT_ENC", Const, 0}, + {"IFT_EON", Const, 0}, + {"IFT_EPLRS", Const, 0}, + {"IFT_ESCON", Const, 0}, + {"IFT_ETHER", Const, 0}, + {"IFT_FAITH", Const, 0}, + {"IFT_FAST", Const, 0}, + {"IFT_FASTETHER", Const, 0}, + {"IFT_FASTETHERFX", Const, 0}, + {"IFT_FDDI", Const, 0}, + {"IFT_FIBRECHANNEL", Const, 0}, + {"IFT_FRAMERELAYINTERCONNECT", Const, 0}, + {"IFT_FRAMERELAYMPI", Const, 0}, + {"IFT_FRDLCIENDPT", Const, 0}, + {"IFT_FRELAY", Const, 0}, + {"IFT_FRELAYDCE", Const, 0}, + {"IFT_FRF16MFRBUNDLE", Const, 0}, + {"IFT_FRFORWARD", Const, 0}, + {"IFT_G703AT2MB", Const, 0}, + {"IFT_G703AT64K", Const, 0}, + {"IFT_GIF", Const, 0}, + {"IFT_GIGABITETHERNET", Const, 0}, + {"IFT_GR303IDT", Const, 0}, + {"IFT_GR303RDT", Const, 0}, + {"IFT_H323GATEKEEPER", Const, 0}, + {"IFT_H323PROXY", Const, 0}, + {"IFT_HDH1822", Const, 0}, + {"IFT_HDLC", Const, 0}, + {"IFT_HDSL2", Const, 0}, + {"IFT_HIPERLAN2", Const, 0}, + {"IFT_HIPPI", Const, 0}, + {"IFT_HIPPIINTERFACE", Const, 0}, + {"IFT_HOSTPAD", Const, 0}, + {"IFT_HSSI", Const, 0}, + {"IFT_HY", Const, 0}, + {"IFT_IBM370PARCHAN", Const, 0}, + {"IFT_IDSL", Const, 0}, + {"IFT_IEEE1394", Const, 0}, + {"IFT_IEEE80211", Const, 0}, + {"IFT_IEEE80212", Const, 0}, + {"IFT_IEEE8023ADLAG", Const, 0}, + {"IFT_IFGSN", Const, 0}, + {"IFT_IMT", Const, 0}, + {"IFT_INFINIBAND", Const, 1}, + {"IFT_INTERLEAVE", Const, 0}, + {"IFT_IP", Const, 0}, + {"IFT_IPFORWARD", Const, 0}, + {"IFT_IPOVERATM", Const, 0}, + {"IFT_IPOVERCDLC", Const, 0}, + {"IFT_IPOVERCLAW", Const, 0}, + {"IFT_IPSWITCH", Const, 0}, + {"IFT_IPXIP", Const, 0}, + {"IFT_ISDN", Const, 0}, + {"IFT_ISDNBASIC", Const, 0}, + {"IFT_ISDNPRIMARY", Const, 0}, + {"IFT_ISDNS", Const, 0}, + {"IFT_ISDNU", Const, 0}, + {"IFT_ISO88022LLC", Const, 0}, + {"IFT_ISO88023", Const, 0}, + {"IFT_ISO88024", Const, 0}, + {"IFT_ISO88025", Const, 0}, + {"IFT_ISO88025CRFPINT", Const, 0}, + {"IFT_ISO88025DTR", Const, 0}, + {"IFT_ISO88025FIBER", Const, 0}, + {"IFT_ISO88026", Const, 0}, + {"IFT_ISUP", Const, 0}, + {"IFT_L2VLAN", Const, 0}, + {"IFT_L3IPVLAN", Const, 0}, + {"IFT_L3IPXVLAN", Const, 0}, + {"IFT_LAPB", Const, 0}, + {"IFT_LAPD", Const, 0}, + {"IFT_LAPF", Const, 0}, + {"IFT_LINEGROUP", Const, 1}, + {"IFT_LOCALTALK", Const, 0}, + {"IFT_LOOP", Const, 0}, + {"IFT_MEDIAMAILOVERIP", Const, 0}, + {"IFT_MFSIGLINK", Const, 0}, + {"IFT_MIOX25", Const, 0}, + {"IFT_MODEM", Const, 0}, + {"IFT_MPC", Const, 0}, + {"IFT_MPLS", Const, 0}, + {"IFT_MPLSTUNNEL", Const, 0}, + {"IFT_MSDSL", Const, 0}, + {"IFT_MVL", Const, 0}, + {"IFT_MYRINET", Const, 0}, + {"IFT_NFAS", Const, 0}, + {"IFT_NSIP", Const, 0}, + {"IFT_OPTICALCHANNEL", Const, 0}, + {"IFT_OPTICALTRANSPORT", Const, 0}, + {"IFT_OTHER", Const, 0}, + {"IFT_P10", Const, 0}, + {"IFT_P80", Const, 0}, + {"IFT_PARA", Const, 0}, + {"IFT_PDP", Const, 0}, + {"IFT_PFLOG", Const, 0}, + {"IFT_PFLOW", Const, 1}, + {"IFT_PFSYNC", Const, 0}, + {"IFT_PLC", Const, 0}, + {"IFT_PON155", Const, 1}, + {"IFT_PON622", Const, 1}, + {"IFT_POS", Const, 0}, + {"IFT_PPP", Const, 0}, + {"IFT_PPPMULTILINKBUNDLE", Const, 0}, + {"IFT_PROPATM", Const, 1}, + {"IFT_PROPBWAP2MP", Const, 0}, + {"IFT_PROPCNLS", Const, 0}, + {"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0}, + {"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0}, + {"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0}, + {"IFT_PROPMUX", Const, 0}, + {"IFT_PROPVIRTUAL", Const, 0}, + {"IFT_PROPWIRELESSP2P", Const, 0}, + {"IFT_PTPSERIAL", Const, 0}, + {"IFT_PVC", Const, 0}, + {"IFT_Q2931", Const, 1}, + {"IFT_QLLC", Const, 0}, + {"IFT_RADIOMAC", Const, 0}, + {"IFT_RADSL", Const, 0}, + {"IFT_REACHDSL", Const, 0}, + {"IFT_RFC1483", Const, 0}, + {"IFT_RS232", Const, 0}, + {"IFT_RSRB", Const, 0}, + {"IFT_SDLC", Const, 0}, + {"IFT_SDSL", Const, 0}, + {"IFT_SHDSL", Const, 0}, + {"IFT_SIP", Const, 0}, + {"IFT_SIPSIG", Const, 1}, + {"IFT_SIPTG", Const, 1}, + {"IFT_SLIP", Const, 0}, + {"IFT_SMDSDXI", Const, 0}, + {"IFT_SMDSICIP", Const, 0}, + {"IFT_SONET", Const, 0}, + {"IFT_SONETOVERHEADCHANNEL", Const, 0}, + {"IFT_SONETPATH", Const, 0}, + {"IFT_SONETVT", Const, 0}, + {"IFT_SRP", Const, 0}, + {"IFT_SS7SIGLINK", Const, 0}, + {"IFT_STACKTOSTACK", Const, 0}, + {"IFT_STARLAN", Const, 0}, + {"IFT_STF", Const, 0}, + {"IFT_T1", Const, 0}, + {"IFT_TDLC", Const, 0}, + {"IFT_TELINK", Const, 1}, + {"IFT_TERMPAD", Const, 0}, + {"IFT_TR008", Const, 0}, + {"IFT_TRANSPHDLC", Const, 0}, + {"IFT_TUNNEL", Const, 0}, + {"IFT_ULTRA", Const, 0}, + {"IFT_USB", Const, 0}, + {"IFT_V11", Const, 0}, + {"IFT_V35", Const, 0}, + {"IFT_V36", Const, 0}, + {"IFT_V37", Const, 0}, + {"IFT_VDSL", Const, 0}, + {"IFT_VIRTUALIPADDRESS", Const, 0}, + {"IFT_VIRTUALTG", Const, 1}, + {"IFT_VOICEDID", Const, 1}, + {"IFT_VOICEEM", Const, 0}, + {"IFT_VOICEEMFGD", Const, 1}, + {"IFT_VOICEENCAP", Const, 0}, + {"IFT_VOICEFGDEANA", Const, 1}, + {"IFT_VOICEFXO", Const, 0}, + {"IFT_VOICEFXS", Const, 0}, + {"IFT_VOICEOVERATM", Const, 0}, + {"IFT_VOICEOVERCABLE", Const, 1}, + {"IFT_VOICEOVERFRAMERELAY", Const, 0}, + {"IFT_VOICEOVERIP", Const, 0}, + {"IFT_X213", Const, 0}, + {"IFT_X25", Const, 0}, + {"IFT_X25DDN", Const, 0}, + {"IFT_X25HUNTGROUP", Const, 0}, + {"IFT_X25MLP", Const, 0}, + {"IFT_X25PLE", Const, 0}, + {"IFT_XETHER", Const, 0}, + {"IGNBRK", Const, 0}, + {"IGNCR", Const, 0}, + {"IGNORE", Const, 0}, + {"IGNPAR", Const, 0}, + {"IMAXBEL", Const, 0}, + {"INFINITE", Const, 0}, + {"INLCR", Const, 0}, + {"INPCK", Const, 0}, + {"INVALID_FILE_ATTRIBUTES", Const, 0}, + {"IN_ACCESS", Const, 0}, + {"IN_ALL_EVENTS", Const, 0}, + {"IN_ATTRIB", Const, 0}, + {"IN_CLASSA_HOST", Const, 0}, + {"IN_CLASSA_MAX", Const, 0}, + {"IN_CLASSA_NET", Const, 0}, + {"IN_CLASSA_NSHIFT", Const, 0}, + {"IN_CLASSB_HOST", Const, 0}, + {"IN_CLASSB_MAX", Const, 0}, + {"IN_CLASSB_NET", Const, 0}, + {"IN_CLASSB_NSHIFT", Const, 0}, + {"IN_CLASSC_HOST", Const, 0}, + {"IN_CLASSC_NET", Const, 0}, + {"IN_CLASSC_NSHIFT", Const, 0}, + {"IN_CLASSD_HOST", Const, 0}, + {"IN_CLASSD_NET", Const, 0}, + {"IN_CLASSD_NSHIFT", Const, 0}, + {"IN_CLOEXEC", Const, 0}, + {"IN_CLOSE", Const, 0}, + {"IN_CLOSE_NOWRITE", Const, 0}, + {"IN_CLOSE_WRITE", Const, 0}, + {"IN_CREATE", Const, 0}, + {"IN_DELETE", Const, 0}, + {"IN_DELETE_SELF", Const, 0}, + {"IN_DONT_FOLLOW", Const, 0}, + {"IN_EXCL_UNLINK", Const, 0}, + {"IN_IGNORED", Const, 0}, + {"IN_ISDIR", Const, 0}, + {"IN_LINKLOCALNETNUM", Const, 0}, + {"IN_LOOPBACKNET", Const, 0}, + {"IN_MASK_ADD", Const, 0}, + {"IN_MODIFY", Const, 0}, + {"IN_MOVE", Const, 0}, + {"IN_MOVED_FROM", Const, 0}, + {"IN_MOVED_TO", Const, 0}, + {"IN_MOVE_SELF", Const, 0}, + {"IN_NONBLOCK", Const, 0}, + {"IN_ONESHOT", Const, 0}, + {"IN_ONLYDIR", Const, 0}, + {"IN_OPEN", Const, 0}, + {"IN_Q_OVERFLOW", Const, 0}, + {"IN_RFC3021_HOST", Const, 1}, + {"IN_RFC3021_MASK", Const, 1}, + {"IN_RFC3021_NET", Const, 1}, + {"IN_RFC3021_NSHIFT", Const, 1}, + {"IN_UNMOUNT", Const, 0}, + {"IOC_IN", Const, 1}, + {"IOC_INOUT", Const, 1}, + {"IOC_OUT", Const, 1}, + {"IOC_VENDOR", Const, 3}, + {"IOC_WS2", Const, 1}, + {"IO_REPARSE_TAG_SYMLINK", Const, 4}, + {"IPMreq", Type, 0}, + {"IPMreq.Interface", Field, 0}, + {"IPMreq.Multiaddr", Field, 0}, + {"IPMreqn", Type, 0}, + {"IPMreqn.Address", Field, 0}, + {"IPMreqn.Ifindex", Field, 0}, + {"IPMreqn.Multiaddr", Field, 0}, + {"IPPROTO_3PC", Const, 0}, + {"IPPROTO_ADFS", Const, 0}, + {"IPPROTO_AH", Const, 0}, + {"IPPROTO_AHIP", Const, 0}, + {"IPPROTO_APES", Const, 0}, + {"IPPROTO_ARGUS", Const, 0}, + {"IPPROTO_AX25", Const, 0}, + {"IPPROTO_BHA", Const, 0}, + {"IPPROTO_BLT", Const, 0}, + {"IPPROTO_BRSATMON", Const, 0}, + {"IPPROTO_CARP", Const, 0}, + {"IPPROTO_CFTP", Const, 0}, + {"IPPROTO_CHAOS", Const, 0}, + {"IPPROTO_CMTP", Const, 0}, + {"IPPROTO_COMP", Const, 0}, + {"IPPROTO_CPHB", Const, 0}, + {"IPPROTO_CPNX", Const, 0}, + {"IPPROTO_DCCP", Const, 0}, + {"IPPROTO_DDP", Const, 0}, + {"IPPROTO_DGP", Const, 0}, + {"IPPROTO_DIVERT", Const, 0}, + {"IPPROTO_DIVERT_INIT", Const, 3}, + {"IPPROTO_DIVERT_RESP", Const, 3}, + {"IPPROTO_DONE", Const, 0}, + {"IPPROTO_DSTOPTS", Const, 0}, + {"IPPROTO_EGP", Const, 0}, + {"IPPROTO_EMCON", Const, 0}, + {"IPPROTO_ENCAP", Const, 0}, + {"IPPROTO_EON", Const, 0}, + {"IPPROTO_ESP", Const, 0}, + {"IPPROTO_ETHERIP", Const, 0}, + {"IPPROTO_FRAGMENT", Const, 0}, + {"IPPROTO_GGP", Const, 0}, + {"IPPROTO_GMTP", Const, 0}, + {"IPPROTO_GRE", Const, 0}, + {"IPPROTO_HELLO", Const, 0}, + {"IPPROTO_HMP", Const, 0}, + {"IPPROTO_HOPOPTS", Const, 0}, + {"IPPROTO_ICMP", Const, 0}, + {"IPPROTO_ICMPV6", Const, 0}, + {"IPPROTO_IDP", Const, 0}, + {"IPPROTO_IDPR", Const, 0}, + {"IPPROTO_IDRP", Const, 0}, + {"IPPROTO_IGMP", Const, 0}, + {"IPPROTO_IGP", Const, 0}, + {"IPPROTO_IGRP", Const, 0}, + {"IPPROTO_IL", Const, 0}, + {"IPPROTO_INLSP", Const, 0}, + {"IPPROTO_INP", Const, 0}, + {"IPPROTO_IP", Const, 0}, + {"IPPROTO_IPCOMP", Const, 0}, + {"IPPROTO_IPCV", Const, 0}, + {"IPPROTO_IPEIP", Const, 0}, + {"IPPROTO_IPIP", Const, 0}, + {"IPPROTO_IPPC", Const, 0}, + {"IPPROTO_IPV4", Const, 0}, + {"IPPROTO_IPV6", Const, 0}, + {"IPPROTO_IPV6_ICMP", Const, 1}, + {"IPPROTO_IRTP", Const, 0}, + {"IPPROTO_KRYPTOLAN", Const, 0}, + {"IPPROTO_LARP", Const, 0}, + {"IPPROTO_LEAF1", Const, 0}, + {"IPPROTO_LEAF2", Const, 0}, + {"IPPROTO_MAX", Const, 0}, + {"IPPROTO_MAXID", Const, 0}, + {"IPPROTO_MEAS", Const, 0}, + {"IPPROTO_MH", Const, 1}, + {"IPPROTO_MHRP", Const, 0}, + {"IPPROTO_MICP", Const, 0}, + {"IPPROTO_MOBILE", Const, 0}, + {"IPPROTO_MPLS", Const, 1}, + {"IPPROTO_MTP", Const, 0}, + {"IPPROTO_MUX", Const, 0}, + {"IPPROTO_ND", Const, 0}, + {"IPPROTO_NHRP", Const, 0}, + {"IPPROTO_NONE", Const, 0}, + {"IPPROTO_NSP", Const, 0}, + {"IPPROTO_NVPII", Const, 0}, + {"IPPROTO_OLD_DIVERT", Const, 0}, + {"IPPROTO_OSPFIGP", Const, 0}, + {"IPPROTO_PFSYNC", Const, 0}, + {"IPPROTO_PGM", Const, 0}, + {"IPPROTO_PIGP", Const, 0}, + {"IPPROTO_PIM", Const, 0}, + {"IPPROTO_PRM", Const, 0}, + {"IPPROTO_PUP", Const, 0}, + {"IPPROTO_PVP", Const, 0}, + {"IPPROTO_RAW", Const, 0}, + {"IPPROTO_RCCMON", Const, 0}, + {"IPPROTO_RDP", Const, 0}, + {"IPPROTO_ROUTING", Const, 0}, + {"IPPROTO_RSVP", Const, 0}, + {"IPPROTO_RVD", Const, 0}, + {"IPPROTO_SATEXPAK", Const, 0}, + {"IPPROTO_SATMON", Const, 0}, + {"IPPROTO_SCCSP", Const, 0}, + {"IPPROTO_SCTP", Const, 0}, + {"IPPROTO_SDRP", Const, 0}, + {"IPPROTO_SEND", Const, 1}, + {"IPPROTO_SEP", Const, 0}, + {"IPPROTO_SKIP", Const, 0}, + {"IPPROTO_SPACER", Const, 0}, + {"IPPROTO_SRPC", Const, 0}, + {"IPPROTO_ST", Const, 0}, + {"IPPROTO_SVMTP", Const, 0}, + {"IPPROTO_SWIPE", Const, 0}, + {"IPPROTO_TCF", Const, 0}, + {"IPPROTO_TCP", Const, 0}, + {"IPPROTO_TLSP", Const, 0}, + {"IPPROTO_TP", Const, 0}, + {"IPPROTO_TPXX", Const, 0}, + {"IPPROTO_TRUNK1", Const, 0}, + {"IPPROTO_TRUNK2", Const, 0}, + {"IPPROTO_TTP", Const, 0}, + {"IPPROTO_UDP", Const, 0}, + {"IPPROTO_UDPLITE", Const, 0}, + {"IPPROTO_VINES", Const, 0}, + {"IPPROTO_VISA", Const, 0}, + {"IPPROTO_VMTP", Const, 0}, + {"IPPROTO_VRRP", Const, 1}, + {"IPPROTO_WBEXPAK", Const, 0}, + {"IPPROTO_WBMON", Const, 0}, + {"IPPROTO_WSN", Const, 0}, + {"IPPROTO_XNET", Const, 0}, + {"IPPROTO_XTP", Const, 0}, + {"IPV6_2292DSTOPTS", Const, 0}, + {"IPV6_2292HOPLIMIT", Const, 0}, + {"IPV6_2292HOPOPTS", Const, 0}, + {"IPV6_2292NEXTHOP", Const, 0}, + {"IPV6_2292PKTINFO", Const, 0}, + {"IPV6_2292PKTOPTIONS", Const, 0}, + {"IPV6_2292RTHDR", Const, 0}, + {"IPV6_ADDRFORM", Const, 0}, + {"IPV6_ADD_MEMBERSHIP", Const, 0}, + {"IPV6_AUTHHDR", Const, 0}, + {"IPV6_AUTH_LEVEL", Const, 1}, + {"IPV6_AUTOFLOWLABEL", Const, 0}, + {"IPV6_BINDANY", Const, 0}, + {"IPV6_BINDV6ONLY", Const, 0}, + {"IPV6_BOUND_IF", Const, 0}, + {"IPV6_CHECKSUM", Const, 0}, + {"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0}, + {"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0}, + {"IPV6_DEFHLIM", Const, 0}, + {"IPV6_DONTFRAG", Const, 0}, + {"IPV6_DROP_MEMBERSHIP", Const, 0}, + {"IPV6_DSTOPTS", Const, 0}, + {"IPV6_ESP_NETWORK_LEVEL", Const, 1}, + {"IPV6_ESP_TRANS_LEVEL", Const, 1}, + {"IPV6_FAITH", Const, 0}, + {"IPV6_FLOWINFO_MASK", Const, 0}, + {"IPV6_FLOWLABEL_MASK", Const, 0}, + {"IPV6_FRAGTTL", Const, 0}, + {"IPV6_FW_ADD", Const, 0}, + {"IPV6_FW_DEL", Const, 0}, + {"IPV6_FW_FLUSH", Const, 0}, + {"IPV6_FW_GET", Const, 0}, + {"IPV6_FW_ZERO", Const, 0}, + {"IPV6_HLIMDEC", Const, 0}, + {"IPV6_HOPLIMIT", Const, 0}, + {"IPV6_HOPOPTS", Const, 0}, + {"IPV6_IPCOMP_LEVEL", Const, 1}, + {"IPV6_IPSEC_POLICY", Const, 0}, + {"IPV6_JOIN_ANYCAST", Const, 0}, + {"IPV6_JOIN_GROUP", Const, 0}, + {"IPV6_LEAVE_ANYCAST", Const, 0}, + {"IPV6_LEAVE_GROUP", Const, 0}, + {"IPV6_MAXHLIM", Const, 0}, + {"IPV6_MAXOPTHDR", Const, 0}, + {"IPV6_MAXPACKET", Const, 0}, + {"IPV6_MAX_GROUP_SRC_FILTER", Const, 0}, + {"IPV6_MAX_MEMBERSHIPS", Const, 0}, + {"IPV6_MAX_SOCK_SRC_FILTER", Const, 0}, + {"IPV6_MIN_MEMBERSHIPS", Const, 0}, + {"IPV6_MMTU", Const, 0}, + {"IPV6_MSFILTER", Const, 0}, + {"IPV6_MTU", Const, 0}, + {"IPV6_MTU_DISCOVER", Const, 0}, + {"IPV6_MULTICAST_HOPS", Const, 0}, + {"IPV6_MULTICAST_IF", Const, 0}, + {"IPV6_MULTICAST_LOOP", Const, 0}, + {"IPV6_NEXTHOP", Const, 0}, + {"IPV6_OPTIONS", Const, 1}, + {"IPV6_PATHMTU", Const, 0}, + {"IPV6_PIPEX", Const, 1}, + {"IPV6_PKTINFO", Const, 0}, + {"IPV6_PMTUDISC_DO", Const, 0}, + {"IPV6_PMTUDISC_DONT", Const, 0}, + {"IPV6_PMTUDISC_PROBE", Const, 0}, + {"IPV6_PMTUDISC_WANT", Const, 0}, + {"IPV6_PORTRANGE", Const, 0}, + {"IPV6_PORTRANGE_DEFAULT", Const, 0}, + {"IPV6_PORTRANGE_HIGH", Const, 0}, + {"IPV6_PORTRANGE_LOW", Const, 0}, + {"IPV6_PREFER_TEMPADDR", Const, 0}, + {"IPV6_RECVDSTOPTS", Const, 0}, + {"IPV6_RECVDSTPORT", Const, 3}, + {"IPV6_RECVERR", Const, 0}, + {"IPV6_RECVHOPLIMIT", Const, 0}, + {"IPV6_RECVHOPOPTS", Const, 0}, + {"IPV6_RECVPATHMTU", Const, 0}, + {"IPV6_RECVPKTINFO", Const, 0}, + {"IPV6_RECVRTHDR", Const, 0}, + {"IPV6_RECVTCLASS", Const, 0}, + {"IPV6_ROUTER_ALERT", Const, 0}, + {"IPV6_RTABLE", Const, 1}, + {"IPV6_RTHDR", Const, 0}, + {"IPV6_RTHDRDSTOPTS", Const, 0}, + {"IPV6_RTHDR_LOOSE", Const, 0}, + {"IPV6_RTHDR_STRICT", Const, 0}, + {"IPV6_RTHDR_TYPE_0", Const, 0}, + {"IPV6_RXDSTOPTS", Const, 0}, + {"IPV6_RXHOPOPTS", Const, 0}, + {"IPV6_SOCKOPT_RESERVED1", Const, 0}, + {"IPV6_TCLASS", Const, 0}, + {"IPV6_UNICAST_HOPS", Const, 0}, + {"IPV6_USE_MIN_MTU", Const, 0}, + {"IPV6_V6ONLY", Const, 0}, + {"IPV6_VERSION", Const, 0}, + {"IPV6_VERSION_MASK", Const, 0}, + {"IPV6_XFRM_POLICY", Const, 0}, + {"IP_ADD_MEMBERSHIP", Const, 0}, + {"IP_ADD_SOURCE_MEMBERSHIP", Const, 0}, + {"IP_AUTH_LEVEL", Const, 1}, + {"IP_BINDANY", Const, 0}, + {"IP_BLOCK_SOURCE", Const, 0}, + {"IP_BOUND_IF", Const, 0}, + {"IP_DEFAULT_MULTICAST_LOOP", Const, 0}, + {"IP_DEFAULT_MULTICAST_TTL", Const, 0}, + {"IP_DF", Const, 0}, + {"IP_DIVERTFL", Const, 3}, + {"IP_DONTFRAG", Const, 0}, + {"IP_DROP_MEMBERSHIP", Const, 0}, + {"IP_DROP_SOURCE_MEMBERSHIP", Const, 0}, + {"IP_DUMMYNET3", Const, 0}, + {"IP_DUMMYNET_CONFIGURE", Const, 0}, + {"IP_DUMMYNET_DEL", Const, 0}, + {"IP_DUMMYNET_FLUSH", Const, 0}, + {"IP_DUMMYNET_GET", Const, 0}, + {"IP_EF", Const, 1}, + {"IP_ERRORMTU", Const, 1}, + {"IP_ESP_NETWORK_LEVEL", Const, 1}, + {"IP_ESP_TRANS_LEVEL", Const, 1}, + {"IP_FAITH", Const, 0}, + {"IP_FREEBIND", Const, 0}, + {"IP_FW3", Const, 0}, + {"IP_FW_ADD", Const, 0}, + {"IP_FW_DEL", Const, 0}, + {"IP_FW_FLUSH", Const, 0}, + {"IP_FW_GET", Const, 0}, + {"IP_FW_NAT_CFG", Const, 0}, + {"IP_FW_NAT_DEL", Const, 0}, + {"IP_FW_NAT_GET_CONFIG", Const, 0}, + {"IP_FW_NAT_GET_LOG", Const, 0}, + {"IP_FW_RESETLOG", Const, 0}, + {"IP_FW_TABLE_ADD", Const, 0}, + {"IP_FW_TABLE_DEL", Const, 0}, + {"IP_FW_TABLE_FLUSH", Const, 0}, + {"IP_FW_TABLE_GETSIZE", Const, 0}, + {"IP_FW_TABLE_LIST", Const, 0}, + {"IP_FW_ZERO", Const, 0}, + {"IP_HDRINCL", Const, 0}, + {"IP_IPCOMP_LEVEL", Const, 1}, + {"IP_IPSECFLOWINFO", Const, 1}, + {"IP_IPSEC_LOCAL_AUTH", Const, 1}, + {"IP_IPSEC_LOCAL_CRED", Const, 1}, + {"IP_IPSEC_LOCAL_ID", Const, 1}, + {"IP_IPSEC_POLICY", Const, 0}, + {"IP_IPSEC_REMOTE_AUTH", Const, 1}, + {"IP_IPSEC_REMOTE_CRED", Const, 1}, + {"IP_IPSEC_REMOTE_ID", Const, 1}, + {"IP_MAXPACKET", Const, 0}, + {"IP_MAX_GROUP_SRC_FILTER", Const, 0}, + {"IP_MAX_MEMBERSHIPS", Const, 0}, + {"IP_MAX_SOCK_MUTE_FILTER", Const, 0}, + {"IP_MAX_SOCK_SRC_FILTER", Const, 0}, + {"IP_MAX_SOURCE_FILTER", Const, 0}, + {"IP_MF", Const, 0}, + {"IP_MINFRAGSIZE", Const, 1}, + {"IP_MINTTL", Const, 0}, + {"IP_MIN_MEMBERSHIPS", Const, 0}, + {"IP_MSFILTER", Const, 0}, + {"IP_MSS", Const, 0}, + {"IP_MTU", Const, 0}, + {"IP_MTU_DISCOVER", Const, 0}, + {"IP_MULTICAST_IF", Const, 0}, + {"IP_MULTICAST_IFINDEX", Const, 0}, + {"IP_MULTICAST_LOOP", Const, 0}, + {"IP_MULTICAST_TTL", Const, 0}, + {"IP_MULTICAST_VIF", Const, 0}, + {"IP_NAT__XXX", Const, 0}, + {"IP_OFFMASK", Const, 0}, + {"IP_OLD_FW_ADD", Const, 0}, + {"IP_OLD_FW_DEL", Const, 0}, + {"IP_OLD_FW_FLUSH", Const, 0}, + {"IP_OLD_FW_GET", Const, 0}, + {"IP_OLD_FW_RESETLOG", Const, 0}, + {"IP_OLD_FW_ZERO", Const, 0}, + {"IP_ONESBCAST", Const, 0}, + {"IP_OPTIONS", Const, 0}, + {"IP_ORIGDSTADDR", Const, 0}, + {"IP_PASSSEC", Const, 0}, + {"IP_PIPEX", Const, 1}, + {"IP_PKTINFO", Const, 0}, + {"IP_PKTOPTIONS", Const, 0}, + {"IP_PMTUDISC", Const, 0}, + {"IP_PMTUDISC_DO", Const, 0}, + {"IP_PMTUDISC_DONT", Const, 0}, + {"IP_PMTUDISC_PROBE", Const, 0}, + {"IP_PMTUDISC_WANT", Const, 0}, + {"IP_PORTRANGE", Const, 0}, + {"IP_PORTRANGE_DEFAULT", Const, 0}, + {"IP_PORTRANGE_HIGH", Const, 0}, + {"IP_PORTRANGE_LOW", Const, 0}, + {"IP_RECVDSTADDR", Const, 0}, + {"IP_RECVDSTPORT", Const, 1}, + {"IP_RECVERR", Const, 0}, + {"IP_RECVIF", Const, 0}, + {"IP_RECVOPTS", Const, 0}, + {"IP_RECVORIGDSTADDR", Const, 0}, + {"IP_RECVPKTINFO", Const, 0}, + {"IP_RECVRETOPTS", Const, 0}, + {"IP_RECVRTABLE", Const, 1}, + {"IP_RECVTOS", Const, 0}, + {"IP_RECVTTL", Const, 0}, + {"IP_RETOPTS", Const, 0}, + {"IP_RF", Const, 0}, + {"IP_ROUTER_ALERT", Const, 0}, + {"IP_RSVP_OFF", Const, 0}, + {"IP_RSVP_ON", Const, 0}, + {"IP_RSVP_VIF_OFF", Const, 0}, + {"IP_RSVP_VIF_ON", Const, 0}, + {"IP_RTABLE", Const, 1}, + {"IP_SENDSRCADDR", Const, 0}, + {"IP_STRIPHDR", Const, 0}, + {"IP_TOS", Const, 0}, + {"IP_TRAFFIC_MGT_BACKGROUND", Const, 0}, + {"IP_TRANSPARENT", Const, 0}, + {"IP_TTL", Const, 0}, + {"IP_UNBLOCK_SOURCE", Const, 0}, + {"IP_XFRM_POLICY", Const, 0}, + {"IPv6MTUInfo", Type, 2}, + {"IPv6MTUInfo.Addr", Field, 2}, + {"IPv6MTUInfo.Mtu", Field, 2}, + {"IPv6Mreq", Type, 0}, + {"IPv6Mreq.Interface", Field, 0}, + {"IPv6Mreq.Multiaddr", Field, 0}, + {"ISIG", Const, 0}, + {"ISTRIP", Const, 0}, + {"IUCLC", Const, 0}, + {"IUTF8", Const, 0}, + {"IXANY", Const, 0}, + {"IXOFF", Const, 0}, + {"IXON", Const, 0}, + {"IfAddrmsg", Type, 0}, + {"IfAddrmsg.Family", Field, 0}, + {"IfAddrmsg.Flags", Field, 0}, + {"IfAddrmsg.Index", Field, 0}, + {"IfAddrmsg.Prefixlen", Field, 0}, + {"IfAddrmsg.Scope", Field, 0}, + {"IfAnnounceMsghdr", Type, 1}, + {"IfAnnounceMsghdr.Hdrlen", Field, 2}, + {"IfAnnounceMsghdr.Index", Field, 1}, + {"IfAnnounceMsghdr.Msglen", Field, 1}, + {"IfAnnounceMsghdr.Name", Field, 1}, + {"IfAnnounceMsghdr.Type", Field, 1}, + {"IfAnnounceMsghdr.Version", Field, 1}, + {"IfAnnounceMsghdr.What", Field, 1}, + {"IfData", Type, 0}, + {"IfData.Addrlen", Field, 0}, + {"IfData.Baudrate", Field, 0}, + {"IfData.Capabilities", Field, 2}, + {"IfData.Collisions", Field, 0}, + {"IfData.Datalen", Field, 0}, + {"IfData.Epoch", Field, 0}, + {"IfData.Hdrlen", Field, 0}, + {"IfData.Hwassist", Field, 0}, + {"IfData.Ibytes", Field, 0}, + {"IfData.Ierrors", Field, 0}, + {"IfData.Imcasts", Field, 0}, + {"IfData.Ipackets", Field, 0}, + {"IfData.Iqdrops", Field, 0}, + {"IfData.Lastchange", Field, 0}, + {"IfData.Link_state", Field, 0}, + {"IfData.Mclpool", Field, 2}, + {"IfData.Metric", Field, 0}, + {"IfData.Mtu", Field, 0}, + {"IfData.Noproto", Field, 0}, + {"IfData.Obytes", Field, 0}, + {"IfData.Oerrors", Field, 0}, + {"IfData.Omcasts", Field, 0}, + {"IfData.Opackets", Field, 0}, + {"IfData.Pad", Field, 2}, + {"IfData.Pad_cgo_0", Field, 2}, + {"IfData.Pad_cgo_1", Field, 2}, + {"IfData.Physical", Field, 0}, + {"IfData.Recvquota", Field, 0}, + {"IfData.Recvtiming", Field, 0}, + {"IfData.Reserved1", Field, 0}, + {"IfData.Reserved2", Field, 0}, + {"IfData.Spare_char1", Field, 0}, + {"IfData.Spare_char2", Field, 0}, + {"IfData.Type", Field, 0}, + {"IfData.Typelen", Field, 0}, + {"IfData.Unused1", Field, 0}, + {"IfData.Unused2", Field, 0}, + {"IfData.Xmitquota", Field, 0}, + {"IfData.Xmittiming", Field, 0}, + {"IfInfomsg", Type, 0}, + {"IfInfomsg.Change", Field, 0}, + {"IfInfomsg.Family", Field, 0}, + {"IfInfomsg.Flags", Field, 0}, + {"IfInfomsg.Index", Field, 0}, + {"IfInfomsg.Type", Field, 0}, + {"IfInfomsg.X__ifi_pad", Field, 0}, + {"IfMsghdr", Type, 0}, + {"IfMsghdr.Addrs", Field, 0}, + {"IfMsghdr.Data", Field, 0}, + {"IfMsghdr.Flags", Field, 0}, + {"IfMsghdr.Hdrlen", Field, 2}, + {"IfMsghdr.Index", Field, 0}, + {"IfMsghdr.Msglen", Field, 0}, + {"IfMsghdr.Pad1", Field, 2}, + {"IfMsghdr.Pad2", Field, 2}, + {"IfMsghdr.Pad_cgo_0", Field, 0}, + {"IfMsghdr.Pad_cgo_1", Field, 2}, + {"IfMsghdr.Tableid", Field, 2}, + {"IfMsghdr.Type", Field, 0}, + {"IfMsghdr.Version", Field, 0}, + {"IfMsghdr.Xflags", Field, 2}, + {"IfaMsghdr", Type, 0}, + {"IfaMsghdr.Addrs", Field, 0}, + {"IfaMsghdr.Flags", Field, 0}, + {"IfaMsghdr.Hdrlen", Field, 2}, + {"IfaMsghdr.Index", Field, 0}, + {"IfaMsghdr.Metric", Field, 0}, + {"IfaMsghdr.Msglen", Field, 0}, + {"IfaMsghdr.Pad1", Field, 2}, + {"IfaMsghdr.Pad2", Field, 2}, + {"IfaMsghdr.Pad_cgo_0", Field, 0}, + {"IfaMsghdr.Tableid", Field, 2}, + {"IfaMsghdr.Type", Field, 0}, + {"IfaMsghdr.Version", Field, 0}, + {"IfmaMsghdr", Type, 0}, + {"IfmaMsghdr.Addrs", Field, 0}, + {"IfmaMsghdr.Flags", Field, 0}, + {"IfmaMsghdr.Index", Field, 0}, + {"IfmaMsghdr.Msglen", Field, 0}, + {"IfmaMsghdr.Pad_cgo_0", Field, 0}, + {"IfmaMsghdr.Type", Field, 0}, + {"IfmaMsghdr.Version", Field, 0}, + {"IfmaMsghdr2", Type, 0}, + {"IfmaMsghdr2.Addrs", Field, 0}, + {"IfmaMsghdr2.Flags", Field, 0}, + {"IfmaMsghdr2.Index", Field, 0}, + {"IfmaMsghdr2.Msglen", Field, 0}, + {"IfmaMsghdr2.Pad_cgo_0", Field, 0}, + {"IfmaMsghdr2.Refcount", Field, 0}, + {"IfmaMsghdr2.Type", Field, 0}, + {"IfmaMsghdr2.Version", Field, 0}, + {"ImplementsGetwd", Const, 0}, + {"Inet4Pktinfo", Type, 0}, + {"Inet4Pktinfo.Addr", Field, 0}, + {"Inet4Pktinfo.Ifindex", Field, 0}, + {"Inet4Pktinfo.Spec_dst", Field, 0}, + {"Inet6Pktinfo", Type, 0}, + {"Inet6Pktinfo.Addr", Field, 0}, + {"Inet6Pktinfo.Ifindex", Field, 0}, + {"InotifyAddWatch", Func, 0}, + {"InotifyEvent", Type, 0}, + {"InotifyEvent.Cookie", Field, 0}, + {"InotifyEvent.Len", Field, 0}, + {"InotifyEvent.Mask", Field, 0}, + {"InotifyEvent.Name", Field, 0}, + {"InotifyEvent.Wd", Field, 0}, + {"InotifyInit", Func, 0}, + {"InotifyInit1", Func, 0}, + {"InotifyRmWatch", Func, 0}, + {"InterfaceAddrMessage", Type, 0}, + {"InterfaceAddrMessage.Data", Field, 0}, + {"InterfaceAddrMessage.Header", Field, 0}, + {"InterfaceAnnounceMessage", Type, 1}, + {"InterfaceAnnounceMessage.Header", Field, 1}, + {"InterfaceInfo", Type, 0}, + {"InterfaceInfo.Address", Field, 0}, + {"InterfaceInfo.BroadcastAddress", Field, 0}, + {"InterfaceInfo.Flags", Field, 0}, + {"InterfaceInfo.Netmask", Field, 0}, + {"InterfaceMessage", Type, 0}, + {"InterfaceMessage.Data", Field, 0}, + {"InterfaceMessage.Header", Field, 0}, + {"InterfaceMulticastAddrMessage", Type, 0}, + {"InterfaceMulticastAddrMessage.Data", Field, 0}, + {"InterfaceMulticastAddrMessage.Header", Field, 0}, + {"InvalidHandle", Const, 0}, + {"Ioperm", Func, 0}, + {"Iopl", Func, 0}, + {"Iovec", Type, 0}, + {"Iovec.Base", Field, 0}, + {"Iovec.Len", Field, 0}, + {"IpAdapterInfo", Type, 0}, + {"IpAdapterInfo.AdapterName", Field, 0}, + {"IpAdapterInfo.Address", Field, 0}, + {"IpAdapterInfo.AddressLength", Field, 0}, + {"IpAdapterInfo.ComboIndex", Field, 0}, + {"IpAdapterInfo.CurrentIpAddress", Field, 0}, + {"IpAdapterInfo.Description", Field, 0}, + {"IpAdapterInfo.DhcpEnabled", Field, 0}, + {"IpAdapterInfo.DhcpServer", Field, 0}, + {"IpAdapterInfo.GatewayList", Field, 0}, + {"IpAdapterInfo.HaveWins", Field, 0}, + {"IpAdapterInfo.Index", Field, 0}, + {"IpAdapterInfo.IpAddressList", Field, 0}, + {"IpAdapterInfo.LeaseExpires", Field, 0}, + {"IpAdapterInfo.LeaseObtained", Field, 0}, + {"IpAdapterInfo.Next", Field, 0}, + {"IpAdapterInfo.PrimaryWinsServer", Field, 0}, + {"IpAdapterInfo.SecondaryWinsServer", Field, 0}, + {"IpAdapterInfo.Type", Field, 0}, + {"IpAddrString", Type, 0}, + {"IpAddrString.Context", Field, 0}, + {"IpAddrString.IpAddress", Field, 0}, + {"IpAddrString.IpMask", Field, 0}, + {"IpAddrString.Next", Field, 0}, + {"IpAddressString", Type, 0}, + {"IpAddressString.String", Field, 0}, + {"IpMaskString", Type, 0}, + {"IpMaskString.String", Field, 2}, + {"Issetugid", Func, 0}, + {"KEY_ALL_ACCESS", Const, 0}, + {"KEY_CREATE_LINK", Const, 0}, + {"KEY_CREATE_SUB_KEY", Const, 0}, + {"KEY_ENUMERATE_SUB_KEYS", Const, 0}, + {"KEY_EXECUTE", Const, 0}, + {"KEY_NOTIFY", Const, 0}, + {"KEY_QUERY_VALUE", Const, 0}, + {"KEY_READ", Const, 0}, + {"KEY_SET_VALUE", Const, 0}, + {"KEY_WOW64_32KEY", Const, 0}, + {"KEY_WOW64_64KEY", Const, 0}, + {"KEY_WRITE", Const, 0}, + {"Kevent", Func, 0}, + {"Kevent_t", Type, 0}, + {"Kevent_t.Data", Field, 0}, + {"Kevent_t.Fflags", Field, 0}, + {"Kevent_t.Filter", Field, 0}, + {"Kevent_t.Flags", Field, 0}, + {"Kevent_t.Ident", Field, 0}, + {"Kevent_t.Pad_cgo_0", Field, 2}, + {"Kevent_t.Udata", Field, 0}, + {"Kill", Func, 0}, + {"Klogctl", Func, 0}, + {"Kqueue", Func, 0}, + {"LANG_ENGLISH", Const, 0}, + {"LAYERED_PROTOCOL", Const, 2}, + {"LCNT_OVERLOAD_FLUSH", Const, 1}, + {"LINUX_REBOOT_CMD_CAD_OFF", Const, 0}, + {"LINUX_REBOOT_CMD_CAD_ON", Const, 0}, + {"LINUX_REBOOT_CMD_HALT", Const, 0}, + {"LINUX_REBOOT_CMD_KEXEC", Const, 0}, + {"LINUX_REBOOT_CMD_POWER_OFF", Const, 0}, + {"LINUX_REBOOT_CMD_RESTART", Const, 0}, + {"LINUX_REBOOT_CMD_RESTART2", Const, 0}, + {"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0}, + {"LINUX_REBOOT_MAGIC1", Const, 0}, + {"LINUX_REBOOT_MAGIC2", Const, 0}, + {"LOCK_EX", Const, 0}, + {"LOCK_NB", Const, 0}, + {"LOCK_SH", Const, 0}, + {"LOCK_UN", Const, 0}, + {"LazyDLL", Type, 0}, + {"LazyDLL.Name", Field, 0}, + {"LazyProc", Type, 0}, + {"LazyProc.Name", Field, 0}, + {"Lchown", Func, 0}, + {"Linger", Type, 0}, + {"Linger.Linger", Field, 0}, + {"Linger.Onoff", Field, 0}, + {"Link", Func, 0}, + {"Listen", Func, 0}, + {"Listxattr", Func, 1}, + {"LoadCancelIoEx", Func, 1}, + {"LoadConnectEx", Func, 1}, + {"LoadCreateSymbolicLink", Func, 4}, + {"LoadDLL", Func, 0}, + {"LoadGetAddrInfo", Func, 1}, + {"LoadLibrary", Func, 0}, + {"LoadSetFileCompletionNotificationModes", Func, 2}, + {"LocalFree", Func, 0}, + {"Log2phys_t", Type, 0}, + {"Log2phys_t.Contigbytes", Field, 0}, + {"Log2phys_t.Devoffset", Field, 0}, + {"Log2phys_t.Flags", Field, 0}, + {"LookupAccountName", Func, 0}, + {"LookupAccountSid", Func, 0}, + {"LookupSID", Func, 0}, + {"LsfJump", Func, 0}, + {"LsfSocket", Func, 0}, + {"LsfStmt", Func, 0}, + {"Lstat", Func, 0}, + {"MADV_AUTOSYNC", Const, 1}, + {"MADV_CAN_REUSE", Const, 0}, + {"MADV_CORE", Const, 1}, + {"MADV_DOFORK", Const, 0}, + {"MADV_DONTFORK", Const, 0}, + {"MADV_DONTNEED", Const, 0}, + {"MADV_FREE", Const, 0}, + {"MADV_FREE_REUSABLE", Const, 0}, + {"MADV_FREE_REUSE", Const, 0}, + {"MADV_HUGEPAGE", Const, 0}, + {"MADV_HWPOISON", Const, 0}, + {"MADV_MERGEABLE", Const, 0}, + {"MADV_NOCORE", Const, 1}, + {"MADV_NOHUGEPAGE", Const, 0}, + {"MADV_NORMAL", Const, 0}, + {"MADV_NOSYNC", Const, 1}, + {"MADV_PROTECT", Const, 1}, + {"MADV_RANDOM", Const, 0}, + {"MADV_REMOVE", Const, 0}, + {"MADV_SEQUENTIAL", Const, 0}, + {"MADV_SPACEAVAIL", Const, 3}, + {"MADV_UNMERGEABLE", Const, 0}, + {"MADV_WILLNEED", Const, 0}, + {"MADV_ZERO_WIRED_PAGES", Const, 0}, + {"MAP_32BIT", Const, 0}, + {"MAP_ALIGNED_SUPER", Const, 3}, + {"MAP_ALIGNMENT_16MB", Const, 3}, + {"MAP_ALIGNMENT_1TB", Const, 3}, + {"MAP_ALIGNMENT_256TB", Const, 3}, + {"MAP_ALIGNMENT_4GB", Const, 3}, + {"MAP_ALIGNMENT_64KB", Const, 3}, + {"MAP_ALIGNMENT_64PB", Const, 3}, + {"MAP_ALIGNMENT_MASK", Const, 3}, + {"MAP_ALIGNMENT_SHIFT", Const, 3}, + {"MAP_ANON", Const, 0}, + {"MAP_ANONYMOUS", Const, 0}, + {"MAP_COPY", Const, 0}, + {"MAP_DENYWRITE", Const, 0}, + {"MAP_EXECUTABLE", Const, 0}, + {"MAP_FILE", Const, 0}, + {"MAP_FIXED", Const, 0}, + {"MAP_FLAGMASK", Const, 3}, + {"MAP_GROWSDOWN", Const, 0}, + {"MAP_HASSEMAPHORE", Const, 0}, + {"MAP_HUGETLB", Const, 0}, + {"MAP_INHERIT", Const, 3}, + {"MAP_INHERIT_COPY", Const, 3}, + {"MAP_INHERIT_DEFAULT", Const, 3}, + {"MAP_INHERIT_DONATE_COPY", Const, 3}, + {"MAP_INHERIT_NONE", Const, 3}, + {"MAP_INHERIT_SHARE", Const, 3}, + {"MAP_JIT", Const, 0}, + {"MAP_LOCKED", Const, 0}, + {"MAP_NOCACHE", Const, 0}, + {"MAP_NOCORE", Const, 1}, + {"MAP_NOEXTEND", Const, 0}, + {"MAP_NONBLOCK", Const, 0}, + {"MAP_NORESERVE", Const, 0}, + {"MAP_NOSYNC", Const, 1}, + {"MAP_POPULATE", Const, 0}, + {"MAP_PREFAULT_READ", Const, 1}, + {"MAP_PRIVATE", Const, 0}, + {"MAP_RENAME", Const, 0}, + {"MAP_RESERVED0080", Const, 0}, + {"MAP_RESERVED0100", Const, 1}, + {"MAP_SHARED", Const, 0}, + {"MAP_STACK", Const, 0}, + {"MAP_TRYFIXED", Const, 3}, + {"MAP_TYPE", Const, 0}, + {"MAP_WIRED", Const, 3}, + {"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4}, + {"MAXLEN_IFDESCR", Const, 0}, + {"MAXLEN_PHYSADDR", Const, 0}, + {"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0}, + {"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0}, + {"MAX_ADAPTER_NAME_LENGTH", Const, 0}, + {"MAX_COMPUTERNAME_LENGTH", Const, 0}, + {"MAX_INTERFACE_NAME_LEN", Const, 0}, + {"MAX_LONG_PATH", Const, 0}, + {"MAX_PATH", Const, 0}, + {"MAX_PROTOCOL_CHAIN", Const, 2}, + {"MCL_CURRENT", Const, 0}, + {"MCL_FUTURE", Const, 0}, + {"MNT_DETACH", Const, 0}, + {"MNT_EXPIRE", Const, 0}, + {"MNT_FORCE", Const, 0}, + {"MSG_BCAST", Const, 1}, + {"MSG_CMSG_CLOEXEC", Const, 0}, + {"MSG_COMPAT", Const, 0}, + {"MSG_CONFIRM", Const, 0}, + {"MSG_CONTROLMBUF", Const, 1}, + {"MSG_CTRUNC", Const, 0}, + {"MSG_DONTROUTE", Const, 0}, + {"MSG_DONTWAIT", Const, 0}, + {"MSG_EOF", Const, 0}, + {"MSG_EOR", Const, 0}, + {"MSG_ERRQUEUE", Const, 0}, + {"MSG_FASTOPEN", Const, 1}, + {"MSG_FIN", Const, 0}, + {"MSG_FLUSH", Const, 0}, + {"MSG_HAVEMORE", Const, 0}, + {"MSG_HOLD", Const, 0}, + {"MSG_IOVUSRSPACE", Const, 1}, + {"MSG_LENUSRSPACE", Const, 1}, + {"MSG_MCAST", Const, 1}, + {"MSG_MORE", Const, 0}, + {"MSG_NAMEMBUF", Const, 1}, + {"MSG_NBIO", Const, 0}, + {"MSG_NEEDSA", Const, 0}, + {"MSG_NOSIGNAL", Const, 0}, + {"MSG_NOTIFICATION", Const, 0}, + {"MSG_OOB", Const, 0}, + {"MSG_PEEK", Const, 0}, + {"MSG_PROXY", Const, 0}, + {"MSG_RCVMORE", Const, 0}, + {"MSG_RST", Const, 0}, + {"MSG_SEND", Const, 0}, + {"MSG_SYN", Const, 0}, + {"MSG_TRUNC", Const, 0}, + {"MSG_TRYHARD", Const, 0}, + {"MSG_USERFLAGS", Const, 1}, + {"MSG_WAITALL", Const, 0}, + {"MSG_WAITFORONE", Const, 0}, + {"MSG_WAITSTREAM", Const, 0}, + {"MS_ACTIVE", Const, 0}, + {"MS_ASYNC", Const, 0}, + {"MS_BIND", Const, 0}, + {"MS_DEACTIVATE", Const, 0}, + {"MS_DIRSYNC", Const, 0}, + {"MS_INVALIDATE", Const, 0}, + {"MS_I_VERSION", Const, 0}, + {"MS_KERNMOUNT", Const, 0}, + {"MS_KILLPAGES", Const, 0}, + {"MS_MANDLOCK", Const, 0}, + {"MS_MGC_MSK", Const, 0}, + {"MS_MGC_VAL", Const, 0}, + {"MS_MOVE", Const, 0}, + {"MS_NOATIME", Const, 0}, + {"MS_NODEV", Const, 0}, + {"MS_NODIRATIME", Const, 0}, + {"MS_NOEXEC", Const, 0}, + {"MS_NOSUID", Const, 0}, + {"MS_NOUSER", Const, 0}, + {"MS_POSIXACL", Const, 0}, + {"MS_PRIVATE", Const, 0}, + {"MS_RDONLY", Const, 0}, + {"MS_REC", Const, 0}, + {"MS_RELATIME", Const, 0}, + {"MS_REMOUNT", Const, 0}, + {"MS_RMT_MASK", Const, 0}, + {"MS_SHARED", Const, 0}, + {"MS_SILENT", Const, 0}, + {"MS_SLAVE", Const, 0}, + {"MS_STRICTATIME", Const, 0}, + {"MS_SYNC", Const, 0}, + {"MS_SYNCHRONOUS", Const, 0}, + {"MS_UNBINDABLE", Const, 0}, + {"Madvise", Func, 0}, + {"MapViewOfFile", Func, 0}, + {"MaxTokenInfoClass", Const, 0}, + {"Mclpool", Type, 2}, + {"Mclpool.Alive", Field, 2}, + {"Mclpool.Cwm", Field, 2}, + {"Mclpool.Grown", Field, 2}, + {"Mclpool.Hwm", Field, 2}, + {"Mclpool.Lwm", Field, 2}, + {"MibIfRow", Type, 0}, + {"MibIfRow.AdminStatus", Field, 0}, + {"MibIfRow.Descr", Field, 0}, + {"MibIfRow.DescrLen", Field, 0}, + {"MibIfRow.InDiscards", Field, 0}, + {"MibIfRow.InErrors", Field, 0}, + {"MibIfRow.InNUcastPkts", Field, 0}, + {"MibIfRow.InOctets", Field, 0}, + {"MibIfRow.InUcastPkts", Field, 0}, + {"MibIfRow.InUnknownProtos", Field, 0}, + {"MibIfRow.Index", Field, 0}, + {"MibIfRow.LastChange", Field, 0}, + {"MibIfRow.Mtu", Field, 0}, + {"MibIfRow.Name", Field, 0}, + {"MibIfRow.OperStatus", Field, 0}, + {"MibIfRow.OutDiscards", Field, 0}, + {"MibIfRow.OutErrors", Field, 0}, + {"MibIfRow.OutNUcastPkts", Field, 0}, + {"MibIfRow.OutOctets", Field, 0}, + {"MibIfRow.OutQLen", Field, 0}, + {"MibIfRow.OutUcastPkts", Field, 0}, + {"MibIfRow.PhysAddr", Field, 0}, + {"MibIfRow.PhysAddrLen", Field, 0}, + {"MibIfRow.Speed", Field, 0}, + {"MibIfRow.Type", Field, 0}, + {"Mkdir", Func, 0}, + {"Mkdirat", Func, 0}, + {"Mkfifo", Func, 0}, + {"Mknod", Func, 0}, + {"Mknodat", Func, 0}, + {"Mlock", Func, 0}, + {"Mlockall", Func, 0}, + {"Mmap", Func, 0}, + {"Mount", Func, 0}, + {"MoveFile", Func, 0}, + {"Mprotect", Func, 0}, + {"Msghdr", Type, 0}, + {"Msghdr.Control", Field, 0}, + {"Msghdr.Controllen", Field, 0}, + {"Msghdr.Flags", Field, 0}, + {"Msghdr.Iov", Field, 0}, + {"Msghdr.Iovlen", Field, 0}, + {"Msghdr.Name", Field, 0}, + {"Msghdr.Namelen", Field, 0}, + {"Msghdr.Pad_cgo_0", Field, 0}, + {"Msghdr.Pad_cgo_1", Field, 0}, + {"Munlock", Func, 0}, + {"Munlockall", Func, 0}, + {"Munmap", Func, 0}, + {"MustLoadDLL", Func, 0}, + {"NAME_MAX", Const, 0}, + {"NETLINK_ADD_MEMBERSHIP", Const, 0}, + {"NETLINK_AUDIT", Const, 0}, + {"NETLINK_BROADCAST_ERROR", Const, 0}, + {"NETLINK_CONNECTOR", Const, 0}, + {"NETLINK_DNRTMSG", Const, 0}, + {"NETLINK_DROP_MEMBERSHIP", Const, 0}, + {"NETLINK_ECRYPTFS", Const, 0}, + {"NETLINK_FIB_LOOKUP", Const, 0}, + {"NETLINK_FIREWALL", Const, 0}, + {"NETLINK_GENERIC", Const, 0}, + {"NETLINK_INET_DIAG", Const, 0}, + {"NETLINK_IP6_FW", Const, 0}, + {"NETLINK_ISCSI", Const, 0}, + {"NETLINK_KOBJECT_UEVENT", Const, 0}, + {"NETLINK_NETFILTER", Const, 0}, + {"NETLINK_NFLOG", Const, 0}, + {"NETLINK_NO_ENOBUFS", Const, 0}, + {"NETLINK_PKTINFO", Const, 0}, + {"NETLINK_RDMA", Const, 0}, + {"NETLINK_ROUTE", Const, 0}, + {"NETLINK_SCSITRANSPORT", Const, 0}, + {"NETLINK_SELINUX", Const, 0}, + {"NETLINK_UNUSED", Const, 0}, + {"NETLINK_USERSOCK", Const, 0}, + {"NETLINK_XFRM", Const, 0}, + {"NET_RT_DUMP", Const, 0}, + {"NET_RT_DUMP2", Const, 0}, + {"NET_RT_FLAGS", Const, 0}, + {"NET_RT_IFLIST", Const, 0}, + {"NET_RT_IFLIST2", Const, 0}, + {"NET_RT_IFLISTL", Const, 1}, + {"NET_RT_IFMALIST", Const, 0}, + {"NET_RT_MAXID", Const, 0}, + {"NET_RT_OIFLIST", Const, 1}, + {"NET_RT_OOIFLIST", Const, 1}, + {"NET_RT_STAT", Const, 0}, + {"NET_RT_STATS", Const, 1}, + {"NET_RT_TABLE", Const, 1}, + {"NET_RT_TRASH", Const, 0}, + {"NLA_ALIGNTO", Const, 0}, + {"NLA_F_NESTED", Const, 0}, + {"NLA_F_NET_BYTEORDER", Const, 0}, + {"NLA_HDRLEN", Const, 0}, + {"NLMSG_ALIGNTO", Const, 0}, + {"NLMSG_DONE", Const, 0}, + {"NLMSG_ERROR", Const, 0}, + {"NLMSG_HDRLEN", Const, 0}, + {"NLMSG_MIN_TYPE", Const, 0}, + {"NLMSG_NOOP", Const, 0}, + {"NLMSG_OVERRUN", Const, 0}, + {"NLM_F_ACK", Const, 0}, + {"NLM_F_APPEND", Const, 0}, + {"NLM_F_ATOMIC", Const, 0}, + {"NLM_F_CREATE", Const, 0}, + {"NLM_F_DUMP", Const, 0}, + {"NLM_F_ECHO", Const, 0}, + {"NLM_F_EXCL", Const, 0}, + {"NLM_F_MATCH", Const, 0}, + {"NLM_F_MULTI", Const, 0}, + {"NLM_F_REPLACE", Const, 0}, + {"NLM_F_REQUEST", Const, 0}, + {"NLM_F_ROOT", Const, 0}, + {"NOFLSH", Const, 0}, + {"NOTE_ABSOLUTE", Const, 0}, + {"NOTE_ATTRIB", Const, 0}, + {"NOTE_BACKGROUND", Const, 16}, + {"NOTE_CHILD", Const, 0}, + {"NOTE_CRITICAL", Const, 16}, + {"NOTE_DELETE", Const, 0}, + {"NOTE_EOF", Const, 1}, + {"NOTE_EXEC", Const, 0}, + {"NOTE_EXIT", Const, 0}, + {"NOTE_EXITSTATUS", Const, 0}, + {"NOTE_EXIT_CSERROR", Const, 16}, + {"NOTE_EXIT_DECRYPTFAIL", Const, 16}, + {"NOTE_EXIT_DETAIL", Const, 16}, + {"NOTE_EXIT_DETAIL_MASK", Const, 16}, + {"NOTE_EXIT_MEMORY", Const, 16}, + {"NOTE_EXIT_REPARENTED", Const, 16}, + {"NOTE_EXTEND", Const, 0}, + {"NOTE_FFAND", Const, 0}, + {"NOTE_FFCOPY", Const, 0}, + {"NOTE_FFCTRLMASK", Const, 0}, + {"NOTE_FFLAGSMASK", Const, 0}, + {"NOTE_FFNOP", Const, 0}, + {"NOTE_FFOR", Const, 0}, + {"NOTE_FORK", Const, 0}, + {"NOTE_LEEWAY", Const, 16}, + {"NOTE_LINK", Const, 0}, + {"NOTE_LOWAT", Const, 0}, + {"NOTE_NONE", Const, 0}, + {"NOTE_NSECONDS", Const, 0}, + {"NOTE_PCTRLMASK", Const, 0}, + {"NOTE_PDATAMASK", Const, 0}, + {"NOTE_REAP", Const, 0}, + {"NOTE_RENAME", Const, 0}, + {"NOTE_RESOURCEEND", Const, 0}, + {"NOTE_REVOKE", Const, 0}, + {"NOTE_SECONDS", Const, 0}, + {"NOTE_SIGNAL", Const, 0}, + {"NOTE_TRACK", Const, 0}, + {"NOTE_TRACKERR", Const, 0}, + {"NOTE_TRIGGER", Const, 0}, + {"NOTE_TRUNCATE", Const, 1}, + {"NOTE_USECONDS", Const, 0}, + {"NOTE_VM_ERROR", Const, 0}, + {"NOTE_VM_PRESSURE", Const, 0}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0}, + {"NOTE_VM_PRESSURE_TERMINATE", Const, 0}, + {"NOTE_WRITE", Const, 0}, + {"NameCanonical", Const, 0}, + {"NameCanonicalEx", Const, 0}, + {"NameDisplay", Const, 0}, + {"NameDnsDomain", Const, 0}, + {"NameFullyQualifiedDN", Const, 0}, + {"NameSamCompatible", Const, 0}, + {"NameServicePrincipal", Const, 0}, + {"NameUniqueId", Const, 0}, + {"NameUnknown", Const, 0}, + {"NameUserPrincipal", Const, 0}, + {"Nanosleep", Func, 0}, + {"NetApiBufferFree", Func, 0}, + {"NetGetJoinInformation", Func, 2}, + {"NetSetupDomainName", Const, 2}, + {"NetSetupUnjoined", Const, 2}, + {"NetSetupUnknownStatus", Const, 2}, + {"NetSetupWorkgroupName", Const, 2}, + {"NetUserGetInfo", Func, 0}, + {"NetlinkMessage", Type, 0}, + {"NetlinkMessage.Data", Field, 0}, + {"NetlinkMessage.Header", Field, 0}, + {"NetlinkRIB", Func, 0}, + {"NetlinkRouteAttr", Type, 0}, + {"NetlinkRouteAttr.Attr", Field, 0}, + {"NetlinkRouteAttr.Value", Field, 0}, + {"NetlinkRouteRequest", Type, 0}, + {"NetlinkRouteRequest.Data", Field, 0}, + {"NetlinkRouteRequest.Header", Field, 0}, + {"NewCallback", Func, 0}, + {"NewCallbackCDecl", Func, 3}, + {"NewLazyDLL", Func, 0}, + {"NlAttr", Type, 0}, + {"NlAttr.Len", Field, 0}, + {"NlAttr.Type", Field, 0}, + {"NlMsgerr", Type, 0}, + {"NlMsgerr.Error", Field, 0}, + {"NlMsgerr.Msg", Field, 0}, + {"NlMsghdr", Type, 0}, + {"NlMsghdr.Flags", Field, 0}, + {"NlMsghdr.Len", Field, 0}, + {"NlMsghdr.Pid", Field, 0}, + {"NlMsghdr.Seq", Field, 0}, + {"NlMsghdr.Type", Field, 0}, + {"NsecToFiletime", Func, 0}, + {"NsecToTimespec", Func, 0}, + {"NsecToTimeval", Func, 0}, + {"Ntohs", Func, 0}, + {"OCRNL", Const, 0}, + {"OFDEL", Const, 0}, + {"OFILL", Const, 0}, + {"OFIOGETBMAP", Const, 1}, + {"OID_PKIX_KP_SERVER_AUTH", Var, 0}, + {"OID_SERVER_GATED_CRYPTO", Var, 0}, + {"OID_SGC_NETSCAPE", Var, 0}, + {"OLCUC", Const, 0}, + {"ONLCR", Const, 0}, + {"ONLRET", Const, 0}, + {"ONOCR", Const, 0}, + {"ONOEOT", Const, 1}, + {"OPEN_ALWAYS", Const, 0}, + {"OPEN_EXISTING", Const, 0}, + {"OPOST", Const, 0}, + {"O_ACCMODE", Const, 0}, + {"O_ALERT", Const, 0}, + {"O_ALT_IO", Const, 1}, + {"O_APPEND", Const, 0}, + {"O_ASYNC", Const, 0}, + {"O_CLOEXEC", Const, 0}, + {"O_CREAT", Const, 0}, + {"O_DIRECT", Const, 0}, + {"O_DIRECTORY", Const, 0}, + {"O_DP_GETRAWENCRYPTED", Const, 16}, + {"O_DSYNC", Const, 0}, + {"O_EVTONLY", Const, 0}, + {"O_EXCL", Const, 0}, + {"O_EXEC", Const, 0}, + {"O_EXLOCK", Const, 0}, + {"O_FSYNC", Const, 0}, + {"O_LARGEFILE", Const, 0}, + {"O_NDELAY", Const, 0}, + {"O_NOATIME", Const, 0}, + {"O_NOCTTY", Const, 0}, + {"O_NOFOLLOW", Const, 0}, + {"O_NONBLOCK", Const, 0}, + {"O_NOSIGPIPE", Const, 1}, + {"O_POPUP", Const, 0}, + {"O_RDONLY", Const, 0}, + {"O_RDWR", Const, 0}, + {"O_RSYNC", Const, 0}, + {"O_SHLOCK", Const, 0}, + {"O_SYMLINK", Const, 0}, + {"O_SYNC", Const, 0}, + {"O_TRUNC", Const, 0}, + {"O_TTY_INIT", Const, 0}, + {"O_WRONLY", Const, 0}, + {"Open", Func, 0}, + {"OpenCurrentProcessToken", Func, 0}, + {"OpenProcess", Func, 0}, + {"OpenProcessToken", Func, 0}, + {"Openat", Func, 0}, + {"Overlapped", Type, 0}, + {"Overlapped.HEvent", Field, 0}, + {"Overlapped.Internal", Field, 0}, + {"Overlapped.InternalHigh", Field, 0}, + {"Overlapped.Offset", Field, 0}, + {"Overlapped.OffsetHigh", Field, 0}, + {"PACKET_ADD_MEMBERSHIP", Const, 0}, + {"PACKET_BROADCAST", Const, 0}, + {"PACKET_DROP_MEMBERSHIP", Const, 0}, + {"PACKET_FASTROUTE", Const, 0}, + {"PACKET_HOST", Const, 0}, + {"PACKET_LOOPBACK", Const, 0}, + {"PACKET_MR_ALLMULTI", Const, 0}, + {"PACKET_MR_MULTICAST", Const, 0}, + {"PACKET_MR_PROMISC", Const, 0}, + {"PACKET_MULTICAST", Const, 0}, + {"PACKET_OTHERHOST", Const, 0}, + {"PACKET_OUTGOING", Const, 0}, + {"PACKET_RECV_OUTPUT", Const, 0}, + {"PACKET_RX_RING", Const, 0}, + {"PACKET_STATISTICS", Const, 0}, + {"PAGE_EXECUTE_READ", Const, 0}, + {"PAGE_EXECUTE_READWRITE", Const, 0}, + {"PAGE_EXECUTE_WRITECOPY", Const, 0}, + {"PAGE_READONLY", Const, 0}, + {"PAGE_READWRITE", Const, 0}, + {"PAGE_WRITECOPY", Const, 0}, + {"PARENB", Const, 0}, + {"PARMRK", Const, 0}, + {"PARODD", Const, 0}, + {"PENDIN", Const, 0}, + {"PFL_HIDDEN", Const, 2}, + {"PFL_MATCHES_PROTOCOL_ZERO", Const, 2}, + {"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2}, + {"PFL_NETWORKDIRECT_PROVIDER", Const, 2}, + {"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2}, + {"PF_FLUSH", Const, 1}, + {"PKCS_7_ASN_ENCODING", Const, 0}, + {"PMC5_PIPELINE_FLUSH", Const, 1}, + {"PRIO_PGRP", Const, 2}, + {"PRIO_PROCESS", Const, 2}, + {"PRIO_USER", Const, 2}, + {"PRI_IOFLUSH", Const, 1}, + {"PROCESS_QUERY_INFORMATION", Const, 0}, + {"PROCESS_TERMINATE", Const, 2}, + {"PROT_EXEC", Const, 0}, + {"PROT_GROWSDOWN", Const, 0}, + {"PROT_GROWSUP", Const, 0}, + {"PROT_NONE", Const, 0}, + {"PROT_READ", Const, 0}, + {"PROT_WRITE", Const, 0}, + {"PROV_DH_SCHANNEL", Const, 0}, + {"PROV_DSS", Const, 0}, + {"PROV_DSS_DH", Const, 0}, + {"PROV_EC_ECDSA_FULL", Const, 0}, + {"PROV_EC_ECDSA_SIG", Const, 0}, + {"PROV_EC_ECNRA_FULL", Const, 0}, + {"PROV_EC_ECNRA_SIG", Const, 0}, + {"PROV_FORTEZZA", Const, 0}, + {"PROV_INTEL_SEC", Const, 0}, + {"PROV_MS_EXCHANGE", Const, 0}, + {"PROV_REPLACE_OWF", Const, 0}, + {"PROV_RNG", Const, 0}, + {"PROV_RSA_AES", Const, 0}, + {"PROV_RSA_FULL", Const, 0}, + {"PROV_RSA_SCHANNEL", Const, 0}, + {"PROV_RSA_SIG", Const, 0}, + {"PROV_SPYRUS_LYNKS", Const, 0}, + {"PROV_SSL", Const, 0}, + {"PR_CAPBSET_DROP", Const, 0}, + {"PR_CAPBSET_READ", Const, 0}, + {"PR_CLEAR_SECCOMP_FILTER", Const, 0}, + {"PR_ENDIAN_BIG", Const, 0}, + {"PR_ENDIAN_LITTLE", Const, 0}, + {"PR_ENDIAN_PPC_LITTLE", Const, 0}, + {"PR_FPEMU_NOPRINT", Const, 0}, + {"PR_FPEMU_SIGFPE", Const, 0}, + {"PR_FP_EXC_ASYNC", Const, 0}, + {"PR_FP_EXC_DISABLED", Const, 0}, + {"PR_FP_EXC_DIV", Const, 0}, + {"PR_FP_EXC_INV", Const, 0}, + {"PR_FP_EXC_NONRECOV", Const, 0}, + {"PR_FP_EXC_OVF", Const, 0}, + {"PR_FP_EXC_PRECISE", Const, 0}, + {"PR_FP_EXC_RES", Const, 0}, + {"PR_FP_EXC_SW_ENABLE", Const, 0}, + {"PR_FP_EXC_UND", Const, 0}, + {"PR_GET_DUMPABLE", Const, 0}, + {"PR_GET_ENDIAN", Const, 0}, + {"PR_GET_FPEMU", Const, 0}, + {"PR_GET_FPEXC", Const, 0}, + {"PR_GET_KEEPCAPS", Const, 0}, + {"PR_GET_NAME", Const, 0}, + {"PR_GET_PDEATHSIG", Const, 0}, + {"PR_GET_SECCOMP", Const, 0}, + {"PR_GET_SECCOMP_FILTER", Const, 0}, + {"PR_GET_SECUREBITS", Const, 0}, + {"PR_GET_TIMERSLACK", Const, 0}, + {"PR_GET_TIMING", Const, 0}, + {"PR_GET_TSC", Const, 0}, + {"PR_GET_UNALIGN", Const, 0}, + {"PR_MCE_KILL", Const, 0}, + {"PR_MCE_KILL_CLEAR", Const, 0}, + {"PR_MCE_KILL_DEFAULT", Const, 0}, + {"PR_MCE_KILL_EARLY", Const, 0}, + {"PR_MCE_KILL_GET", Const, 0}, + {"PR_MCE_KILL_LATE", Const, 0}, + {"PR_MCE_KILL_SET", Const, 0}, + {"PR_SECCOMP_FILTER_EVENT", Const, 0}, + {"PR_SECCOMP_FILTER_SYSCALL", Const, 0}, + {"PR_SET_DUMPABLE", Const, 0}, + {"PR_SET_ENDIAN", Const, 0}, + {"PR_SET_FPEMU", Const, 0}, + {"PR_SET_FPEXC", Const, 0}, + {"PR_SET_KEEPCAPS", Const, 0}, + {"PR_SET_NAME", Const, 0}, + {"PR_SET_PDEATHSIG", Const, 0}, + {"PR_SET_PTRACER", Const, 0}, + {"PR_SET_SECCOMP", Const, 0}, + {"PR_SET_SECCOMP_FILTER", Const, 0}, + {"PR_SET_SECUREBITS", Const, 0}, + {"PR_SET_TIMERSLACK", Const, 0}, + {"PR_SET_TIMING", Const, 0}, + {"PR_SET_TSC", Const, 0}, + {"PR_SET_UNALIGN", Const, 0}, + {"PR_TASK_PERF_EVENTS_DISABLE", Const, 0}, + {"PR_TASK_PERF_EVENTS_ENABLE", Const, 0}, + {"PR_TIMING_STATISTICAL", Const, 0}, + {"PR_TIMING_TIMESTAMP", Const, 0}, + {"PR_TSC_ENABLE", Const, 0}, + {"PR_TSC_SIGSEGV", Const, 0}, + {"PR_UNALIGN_NOPRINT", Const, 0}, + {"PR_UNALIGN_SIGBUS", Const, 0}, + {"PTRACE_ARCH_PRCTL", Const, 0}, + {"PTRACE_ATTACH", Const, 0}, + {"PTRACE_CONT", Const, 0}, + {"PTRACE_DETACH", Const, 0}, + {"PTRACE_EVENT_CLONE", Const, 0}, + {"PTRACE_EVENT_EXEC", Const, 0}, + {"PTRACE_EVENT_EXIT", Const, 0}, + {"PTRACE_EVENT_FORK", Const, 0}, + {"PTRACE_EVENT_VFORK", Const, 0}, + {"PTRACE_EVENT_VFORK_DONE", Const, 0}, + {"PTRACE_GETCRUNCHREGS", Const, 0}, + {"PTRACE_GETEVENTMSG", Const, 0}, + {"PTRACE_GETFPREGS", Const, 0}, + {"PTRACE_GETFPXREGS", Const, 0}, + {"PTRACE_GETHBPREGS", Const, 0}, + {"PTRACE_GETREGS", Const, 0}, + {"PTRACE_GETREGSET", Const, 0}, + {"PTRACE_GETSIGINFO", Const, 0}, + {"PTRACE_GETVFPREGS", Const, 0}, + {"PTRACE_GETWMMXREGS", Const, 0}, + {"PTRACE_GET_THREAD_AREA", Const, 0}, + {"PTRACE_KILL", Const, 0}, + {"PTRACE_OLDSETOPTIONS", Const, 0}, + {"PTRACE_O_MASK", Const, 0}, + {"PTRACE_O_TRACECLONE", Const, 0}, + {"PTRACE_O_TRACEEXEC", Const, 0}, + {"PTRACE_O_TRACEEXIT", Const, 0}, + {"PTRACE_O_TRACEFORK", Const, 0}, + {"PTRACE_O_TRACESYSGOOD", Const, 0}, + {"PTRACE_O_TRACEVFORK", Const, 0}, + {"PTRACE_O_TRACEVFORKDONE", Const, 0}, + {"PTRACE_PEEKDATA", Const, 0}, + {"PTRACE_PEEKTEXT", Const, 0}, + {"PTRACE_PEEKUSR", Const, 0}, + {"PTRACE_POKEDATA", Const, 0}, + {"PTRACE_POKETEXT", Const, 0}, + {"PTRACE_POKEUSR", Const, 0}, + {"PTRACE_SETCRUNCHREGS", Const, 0}, + {"PTRACE_SETFPREGS", Const, 0}, + {"PTRACE_SETFPXREGS", Const, 0}, + {"PTRACE_SETHBPREGS", Const, 0}, + {"PTRACE_SETOPTIONS", Const, 0}, + {"PTRACE_SETREGS", Const, 0}, + {"PTRACE_SETREGSET", Const, 0}, + {"PTRACE_SETSIGINFO", Const, 0}, + {"PTRACE_SETVFPREGS", Const, 0}, + {"PTRACE_SETWMMXREGS", Const, 0}, + {"PTRACE_SET_SYSCALL", Const, 0}, + {"PTRACE_SET_THREAD_AREA", Const, 0}, + {"PTRACE_SINGLEBLOCK", Const, 0}, + {"PTRACE_SINGLESTEP", Const, 0}, + {"PTRACE_SYSCALL", Const, 0}, + {"PTRACE_SYSEMU", Const, 0}, + {"PTRACE_SYSEMU_SINGLESTEP", Const, 0}, + {"PTRACE_TRACEME", Const, 0}, + {"PT_ATTACH", Const, 0}, + {"PT_ATTACHEXC", Const, 0}, + {"PT_CONTINUE", Const, 0}, + {"PT_DATA_ADDR", Const, 0}, + {"PT_DENY_ATTACH", Const, 0}, + {"PT_DETACH", Const, 0}, + {"PT_FIRSTMACH", Const, 0}, + {"PT_FORCEQUOTA", Const, 0}, + {"PT_KILL", Const, 0}, + {"PT_MASK", Const, 1}, + {"PT_READ_D", Const, 0}, + {"PT_READ_I", Const, 0}, + {"PT_READ_U", Const, 0}, + {"PT_SIGEXC", Const, 0}, + {"PT_STEP", Const, 0}, + {"PT_TEXT_ADDR", Const, 0}, + {"PT_TEXT_END_ADDR", Const, 0}, + {"PT_THUPDATE", Const, 0}, + {"PT_TRACE_ME", Const, 0}, + {"PT_WRITE_D", Const, 0}, + {"PT_WRITE_I", Const, 0}, + {"PT_WRITE_U", Const, 0}, + {"ParseDirent", Func, 0}, + {"ParseNetlinkMessage", Func, 0}, + {"ParseNetlinkRouteAttr", Func, 0}, + {"ParseRoutingMessage", Func, 0}, + {"ParseRoutingSockaddr", Func, 0}, + {"ParseSocketControlMessage", Func, 0}, + {"ParseUnixCredentials", Func, 0}, + {"ParseUnixRights", Func, 0}, + {"PathMax", Const, 0}, + {"Pathconf", Func, 0}, + {"Pause", Func, 0}, + {"Pipe", Func, 0}, + {"Pipe2", Func, 1}, + {"PivotRoot", Func, 0}, + {"Pointer", Type, 11}, + {"PostQueuedCompletionStatus", Func, 0}, + {"Pread", Func, 0}, + {"Proc", Type, 0}, + {"Proc.Dll", Field, 0}, + {"Proc.Name", Field, 0}, + {"ProcAttr", Type, 0}, + {"ProcAttr.Dir", Field, 0}, + {"ProcAttr.Env", Field, 0}, + {"ProcAttr.Files", Field, 0}, + {"ProcAttr.Sys", Field, 0}, + {"Process32First", Func, 4}, + {"Process32Next", Func, 4}, + {"ProcessEntry32", Type, 4}, + {"ProcessEntry32.DefaultHeapID", Field, 4}, + {"ProcessEntry32.ExeFile", Field, 4}, + {"ProcessEntry32.Flags", Field, 4}, + {"ProcessEntry32.ModuleID", Field, 4}, + {"ProcessEntry32.ParentProcessID", Field, 4}, + {"ProcessEntry32.PriClassBase", Field, 4}, + {"ProcessEntry32.ProcessID", Field, 4}, + {"ProcessEntry32.Size", Field, 4}, + {"ProcessEntry32.Threads", Field, 4}, + {"ProcessEntry32.Usage", Field, 4}, + {"ProcessInformation", Type, 0}, + {"ProcessInformation.Process", Field, 0}, + {"ProcessInformation.ProcessId", Field, 0}, + {"ProcessInformation.Thread", Field, 0}, + {"ProcessInformation.ThreadId", Field, 0}, + {"Protoent", Type, 0}, + {"Protoent.Aliases", Field, 0}, + {"Protoent.Name", Field, 0}, + {"Protoent.Proto", Field, 0}, + {"PtraceAttach", Func, 0}, + {"PtraceCont", Func, 0}, + {"PtraceDetach", Func, 0}, + {"PtraceGetEventMsg", Func, 0}, + {"PtraceGetRegs", Func, 0}, + {"PtracePeekData", Func, 0}, + {"PtracePeekText", Func, 0}, + {"PtracePokeData", Func, 0}, + {"PtracePokeText", Func, 0}, + {"PtraceRegs", Type, 0}, + {"PtraceRegs.Cs", Field, 0}, + {"PtraceRegs.Ds", Field, 0}, + {"PtraceRegs.Eax", Field, 0}, + {"PtraceRegs.Ebp", Field, 0}, + {"PtraceRegs.Ebx", Field, 0}, + {"PtraceRegs.Ecx", Field, 0}, + {"PtraceRegs.Edi", Field, 0}, + {"PtraceRegs.Edx", Field, 0}, + {"PtraceRegs.Eflags", Field, 0}, + {"PtraceRegs.Eip", Field, 0}, + {"PtraceRegs.Es", Field, 0}, + {"PtraceRegs.Esi", Field, 0}, + {"PtraceRegs.Esp", Field, 0}, + {"PtraceRegs.Fs", Field, 0}, + {"PtraceRegs.Fs_base", Field, 0}, + {"PtraceRegs.Gs", Field, 0}, + {"PtraceRegs.Gs_base", Field, 0}, + {"PtraceRegs.Orig_eax", Field, 0}, + {"PtraceRegs.Orig_rax", Field, 0}, + {"PtraceRegs.R10", Field, 0}, + {"PtraceRegs.R11", Field, 0}, + {"PtraceRegs.R12", Field, 0}, + {"PtraceRegs.R13", Field, 0}, + {"PtraceRegs.R14", Field, 0}, + {"PtraceRegs.R15", Field, 0}, + {"PtraceRegs.R8", Field, 0}, + {"PtraceRegs.R9", Field, 0}, + {"PtraceRegs.Rax", Field, 0}, + {"PtraceRegs.Rbp", Field, 0}, + {"PtraceRegs.Rbx", Field, 0}, + {"PtraceRegs.Rcx", Field, 0}, + {"PtraceRegs.Rdi", Field, 0}, + {"PtraceRegs.Rdx", Field, 0}, + {"PtraceRegs.Rip", Field, 0}, + {"PtraceRegs.Rsi", Field, 0}, + {"PtraceRegs.Rsp", Field, 0}, + {"PtraceRegs.Ss", Field, 0}, + {"PtraceRegs.Uregs", Field, 0}, + {"PtraceRegs.Xcs", Field, 0}, + {"PtraceRegs.Xds", Field, 0}, + {"PtraceRegs.Xes", Field, 0}, + {"PtraceRegs.Xfs", Field, 0}, + {"PtraceRegs.Xgs", Field, 0}, + {"PtraceRegs.Xss", Field, 0}, + {"PtraceSetOptions", Func, 0}, + {"PtraceSetRegs", Func, 0}, + {"PtraceSingleStep", Func, 0}, + {"PtraceSyscall", Func, 1}, + {"Pwrite", Func, 0}, + {"REG_BINARY", Const, 0}, + {"REG_DWORD", Const, 0}, + {"REG_DWORD_BIG_ENDIAN", Const, 0}, + {"REG_DWORD_LITTLE_ENDIAN", Const, 0}, + {"REG_EXPAND_SZ", Const, 0}, + {"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0}, + {"REG_LINK", Const, 0}, + {"REG_MULTI_SZ", Const, 0}, + {"REG_NONE", Const, 0}, + {"REG_QWORD", Const, 0}, + {"REG_QWORD_LITTLE_ENDIAN", Const, 0}, + {"REG_RESOURCE_LIST", Const, 0}, + {"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0}, + {"REG_SZ", Const, 0}, + {"RLIMIT_AS", Const, 0}, + {"RLIMIT_CORE", Const, 0}, + {"RLIMIT_CPU", Const, 0}, + {"RLIMIT_CPU_USAGE_MONITOR", Const, 16}, + {"RLIMIT_DATA", Const, 0}, + {"RLIMIT_FSIZE", Const, 0}, + {"RLIMIT_NOFILE", Const, 0}, + {"RLIMIT_STACK", Const, 0}, + {"RLIM_INFINITY", Const, 0}, + {"RTAX_ADVMSS", Const, 0}, + {"RTAX_AUTHOR", Const, 0}, + {"RTAX_BRD", Const, 0}, + {"RTAX_CWND", Const, 0}, + {"RTAX_DST", Const, 0}, + {"RTAX_FEATURES", Const, 0}, + {"RTAX_FEATURE_ALLFRAG", Const, 0}, + {"RTAX_FEATURE_ECN", Const, 0}, + {"RTAX_FEATURE_SACK", Const, 0}, + {"RTAX_FEATURE_TIMESTAMP", Const, 0}, + {"RTAX_GATEWAY", Const, 0}, + {"RTAX_GENMASK", Const, 0}, + {"RTAX_HOPLIMIT", Const, 0}, + {"RTAX_IFA", Const, 0}, + {"RTAX_IFP", Const, 0}, + {"RTAX_INITCWND", Const, 0}, + {"RTAX_INITRWND", Const, 0}, + {"RTAX_LABEL", Const, 1}, + {"RTAX_LOCK", Const, 0}, + {"RTAX_MAX", Const, 0}, + {"RTAX_MTU", Const, 0}, + {"RTAX_NETMASK", Const, 0}, + {"RTAX_REORDERING", Const, 0}, + {"RTAX_RTO_MIN", Const, 0}, + {"RTAX_RTT", Const, 0}, + {"RTAX_RTTVAR", Const, 0}, + {"RTAX_SRC", Const, 1}, + {"RTAX_SRCMASK", Const, 1}, + {"RTAX_SSTHRESH", Const, 0}, + {"RTAX_TAG", Const, 1}, + {"RTAX_UNSPEC", Const, 0}, + {"RTAX_WINDOW", Const, 0}, + {"RTA_ALIGNTO", Const, 0}, + {"RTA_AUTHOR", Const, 0}, + {"RTA_BRD", Const, 0}, + {"RTA_CACHEINFO", Const, 0}, + {"RTA_DST", Const, 0}, + {"RTA_FLOW", Const, 0}, + {"RTA_GATEWAY", Const, 0}, + {"RTA_GENMASK", Const, 0}, + {"RTA_IFA", Const, 0}, + {"RTA_IFP", Const, 0}, + {"RTA_IIF", Const, 0}, + {"RTA_LABEL", Const, 1}, + {"RTA_MAX", Const, 0}, + {"RTA_METRICS", Const, 0}, + {"RTA_MULTIPATH", Const, 0}, + {"RTA_NETMASK", Const, 0}, + {"RTA_OIF", Const, 0}, + {"RTA_PREFSRC", Const, 0}, + {"RTA_PRIORITY", Const, 0}, + {"RTA_SRC", Const, 0}, + {"RTA_SRCMASK", Const, 1}, + {"RTA_TABLE", Const, 0}, + {"RTA_TAG", Const, 1}, + {"RTA_UNSPEC", Const, 0}, + {"RTCF_DIRECTSRC", Const, 0}, + {"RTCF_DOREDIRECT", Const, 0}, + {"RTCF_LOG", Const, 0}, + {"RTCF_MASQ", Const, 0}, + {"RTCF_NAT", Const, 0}, + {"RTCF_VALVE", Const, 0}, + {"RTF_ADDRCLASSMASK", Const, 0}, + {"RTF_ADDRCONF", Const, 0}, + {"RTF_ALLONLINK", Const, 0}, + {"RTF_ANNOUNCE", Const, 1}, + {"RTF_BLACKHOLE", Const, 0}, + {"RTF_BROADCAST", Const, 0}, + {"RTF_CACHE", Const, 0}, + {"RTF_CLONED", Const, 1}, + {"RTF_CLONING", Const, 0}, + {"RTF_CONDEMNED", Const, 0}, + {"RTF_DEFAULT", Const, 0}, + {"RTF_DELCLONE", Const, 0}, + {"RTF_DONE", Const, 0}, + {"RTF_DYNAMIC", Const, 0}, + {"RTF_FLOW", Const, 0}, + {"RTF_FMASK", Const, 0}, + {"RTF_GATEWAY", Const, 0}, + {"RTF_GWFLAG_COMPAT", Const, 3}, + {"RTF_HOST", Const, 0}, + {"RTF_IFREF", Const, 0}, + {"RTF_IFSCOPE", Const, 0}, + {"RTF_INTERFACE", Const, 0}, + {"RTF_IRTT", Const, 0}, + {"RTF_LINKRT", Const, 0}, + {"RTF_LLDATA", Const, 0}, + {"RTF_LLINFO", Const, 0}, + {"RTF_LOCAL", Const, 0}, + {"RTF_MASK", Const, 1}, + {"RTF_MODIFIED", Const, 0}, + {"RTF_MPATH", Const, 1}, + {"RTF_MPLS", Const, 1}, + {"RTF_MSS", Const, 0}, + {"RTF_MTU", Const, 0}, + {"RTF_MULTICAST", Const, 0}, + {"RTF_NAT", Const, 0}, + {"RTF_NOFORWARD", Const, 0}, + {"RTF_NONEXTHOP", Const, 0}, + {"RTF_NOPMTUDISC", Const, 0}, + {"RTF_PERMANENT_ARP", Const, 1}, + {"RTF_PINNED", Const, 0}, + {"RTF_POLICY", Const, 0}, + {"RTF_PRCLONING", Const, 0}, + {"RTF_PROTO1", Const, 0}, + {"RTF_PROTO2", Const, 0}, + {"RTF_PROTO3", Const, 0}, + {"RTF_PROXY", Const, 16}, + {"RTF_REINSTATE", Const, 0}, + {"RTF_REJECT", Const, 0}, + {"RTF_RNH_LOCKED", Const, 0}, + {"RTF_ROUTER", Const, 16}, + {"RTF_SOURCE", Const, 1}, + {"RTF_SRC", Const, 1}, + {"RTF_STATIC", Const, 0}, + {"RTF_STICKY", Const, 0}, + {"RTF_THROW", Const, 0}, + {"RTF_TUNNEL", Const, 1}, + {"RTF_UP", Const, 0}, + {"RTF_USETRAILERS", Const, 1}, + {"RTF_WASCLONED", Const, 0}, + {"RTF_WINDOW", Const, 0}, + {"RTF_XRESOLVE", Const, 0}, + {"RTM_ADD", Const, 0}, + {"RTM_BASE", Const, 0}, + {"RTM_CHANGE", Const, 0}, + {"RTM_CHGADDR", Const, 1}, + {"RTM_DELACTION", Const, 0}, + {"RTM_DELADDR", Const, 0}, + {"RTM_DELADDRLABEL", Const, 0}, + {"RTM_DELETE", Const, 0}, + {"RTM_DELLINK", Const, 0}, + {"RTM_DELMADDR", Const, 0}, + {"RTM_DELNEIGH", Const, 0}, + {"RTM_DELQDISC", Const, 0}, + {"RTM_DELROUTE", Const, 0}, + {"RTM_DELRULE", Const, 0}, + {"RTM_DELTCLASS", Const, 0}, + {"RTM_DELTFILTER", Const, 0}, + {"RTM_DESYNC", Const, 1}, + {"RTM_F_CLONED", Const, 0}, + {"RTM_F_EQUALIZE", Const, 0}, + {"RTM_F_NOTIFY", Const, 0}, + {"RTM_F_PREFIX", Const, 0}, + {"RTM_GET", Const, 0}, + {"RTM_GET2", Const, 0}, + {"RTM_GETACTION", Const, 0}, + {"RTM_GETADDR", Const, 0}, + {"RTM_GETADDRLABEL", Const, 0}, + {"RTM_GETANYCAST", Const, 0}, + {"RTM_GETDCB", Const, 0}, + {"RTM_GETLINK", Const, 0}, + {"RTM_GETMULTICAST", Const, 0}, + {"RTM_GETNEIGH", Const, 0}, + {"RTM_GETNEIGHTBL", Const, 0}, + {"RTM_GETQDISC", Const, 0}, + {"RTM_GETROUTE", Const, 0}, + {"RTM_GETRULE", Const, 0}, + {"RTM_GETTCLASS", Const, 0}, + {"RTM_GETTFILTER", Const, 0}, + {"RTM_IEEE80211", Const, 0}, + {"RTM_IFANNOUNCE", Const, 0}, + {"RTM_IFINFO", Const, 0}, + {"RTM_IFINFO2", Const, 0}, + {"RTM_LLINFO_UPD", Const, 1}, + {"RTM_LOCK", Const, 0}, + {"RTM_LOSING", Const, 0}, + {"RTM_MAX", Const, 0}, + {"RTM_MAXSIZE", Const, 1}, + {"RTM_MISS", Const, 0}, + {"RTM_NEWACTION", Const, 0}, + {"RTM_NEWADDR", Const, 0}, + {"RTM_NEWADDRLABEL", Const, 0}, + {"RTM_NEWLINK", Const, 0}, + {"RTM_NEWMADDR", Const, 0}, + {"RTM_NEWMADDR2", Const, 0}, + {"RTM_NEWNDUSEROPT", Const, 0}, + {"RTM_NEWNEIGH", Const, 0}, + {"RTM_NEWNEIGHTBL", Const, 0}, + {"RTM_NEWPREFIX", Const, 0}, + {"RTM_NEWQDISC", Const, 0}, + {"RTM_NEWROUTE", Const, 0}, + {"RTM_NEWRULE", Const, 0}, + {"RTM_NEWTCLASS", Const, 0}, + {"RTM_NEWTFILTER", Const, 0}, + {"RTM_NR_FAMILIES", Const, 0}, + {"RTM_NR_MSGTYPES", Const, 0}, + {"RTM_OIFINFO", Const, 1}, + {"RTM_OLDADD", Const, 0}, + {"RTM_OLDDEL", Const, 0}, + {"RTM_OOIFINFO", Const, 1}, + {"RTM_REDIRECT", Const, 0}, + {"RTM_RESOLVE", Const, 0}, + {"RTM_RTTUNIT", Const, 0}, + {"RTM_SETDCB", Const, 0}, + {"RTM_SETGATE", Const, 1}, + {"RTM_SETLINK", Const, 0}, + {"RTM_SETNEIGHTBL", Const, 0}, + {"RTM_VERSION", Const, 0}, + {"RTNH_ALIGNTO", Const, 0}, + {"RTNH_F_DEAD", Const, 0}, + {"RTNH_F_ONLINK", Const, 0}, + {"RTNH_F_PERVASIVE", Const, 0}, + {"RTNLGRP_IPV4_IFADDR", Const, 1}, + {"RTNLGRP_IPV4_MROUTE", Const, 1}, + {"RTNLGRP_IPV4_ROUTE", Const, 1}, + {"RTNLGRP_IPV4_RULE", Const, 1}, + {"RTNLGRP_IPV6_IFADDR", Const, 1}, + {"RTNLGRP_IPV6_IFINFO", Const, 1}, + {"RTNLGRP_IPV6_MROUTE", Const, 1}, + {"RTNLGRP_IPV6_PREFIX", Const, 1}, + {"RTNLGRP_IPV6_ROUTE", Const, 1}, + {"RTNLGRP_IPV6_RULE", Const, 1}, + {"RTNLGRP_LINK", Const, 1}, + {"RTNLGRP_ND_USEROPT", Const, 1}, + {"RTNLGRP_NEIGH", Const, 1}, + {"RTNLGRP_NONE", Const, 1}, + {"RTNLGRP_NOTIFY", Const, 1}, + {"RTNLGRP_TC", Const, 1}, + {"RTN_ANYCAST", Const, 0}, + {"RTN_BLACKHOLE", Const, 0}, + {"RTN_BROADCAST", Const, 0}, + {"RTN_LOCAL", Const, 0}, + {"RTN_MAX", Const, 0}, + {"RTN_MULTICAST", Const, 0}, + {"RTN_NAT", Const, 0}, + {"RTN_PROHIBIT", Const, 0}, + {"RTN_THROW", Const, 0}, + {"RTN_UNICAST", Const, 0}, + {"RTN_UNREACHABLE", Const, 0}, + {"RTN_UNSPEC", Const, 0}, + {"RTN_XRESOLVE", Const, 0}, + {"RTPROT_BIRD", Const, 0}, + {"RTPROT_BOOT", Const, 0}, + {"RTPROT_DHCP", Const, 0}, + {"RTPROT_DNROUTED", Const, 0}, + {"RTPROT_GATED", Const, 0}, + {"RTPROT_KERNEL", Const, 0}, + {"RTPROT_MRT", Const, 0}, + {"RTPROT_NTK", Const, 0}, + {"RTPROT_RA", Const, 0}, + {"RTPROT_REDIRECT", Const, 0}, + {"RTPROT_STATIC", Const, 0}, + {"RTPROT_UNSPEC", Const, 0}, + {"RTPROT_XORP", Const, 0}, + {"RTPROT_ZEBRA", Const, 0}, + {"RTV_EXPIRE", Const, 0}, + {"RTV_HOPCOUNT", Const, 0}, + {"RTV_MTU", Const, 0}, + {"RTV_RPIPE", Const, 0}, + {"RTV_RTT", Const, 0}, + {"RTV_RTTVAR", Const, 0}, + {"RTV_SPIPE", Const, 0}, + {"RTV_SSTHRESH", Const, 0}, + {"RTV_WEIGHT", Const, 0}, + {"RT_CACHING_CONTEXT", Const, 1}, + {"RT_CLASS_DEFAULT", Const, 0}, + {"RT_CLASS_LOCAL", Const, 0}, + {"RT_CLASS_MAIN", Const, 0}, + {"RT_CLASS_MAX", Const, 0}, + {"RT_CLASS_UNSPEC", Const, 0}, + {"RT_DEFAULT_FIB", Const, 1}, + {"RT_NORTREF", Const, 1}, + {"RT_SCOPE_HOST", Const, 0}, + {"RT_SCOPE_LINK", Const, 0}, + {"RT_SCOPE_NOWHERE", Const, 0}, + {"RT_SCOPE_SITE", Const, 0}, + {"RT_SCOPE_UNIVERSE", Const, 0}, + {"RT_TABLEID_MAX", Const, 1}, + {"RT_TABLE_COMPAT", Const, 0}, + {"RT_TABLE_DEFAULT", Const, 0}, + {"RT_TABLE_LOCAL", Const, 0}, + {"RT_TABLE_MAIN", Const, 0}, + {"RT_TABLE_MAX", Const, 0}, + {"RT_TABLE_UNSPEC", Const, 0}, + {"RUSAGE_CHILDREN", Const, 0}, + {"RUSAGE_SELF", Const, 0}, + {"RUSAGE_THREAD", Const, 0}, + {"Radvisory_t", Type, 0}, + {"Radvisory_t.Count", Field, 0}, + {"Radvisory_t.Offset", Field, 0}, + {"Radvisory_t.Pad_cgo_0", Field, 0}, + {"RawConn", Type, 9}, + {"RawSockaddr", Type, 0}, + {"RawSockaddr.Data", Field, 0}, + {"RawSockaddr.Family", Field, 0}, + {"RawSockaddr.Len", Field, 0}, + {"RawSockaddrAny", Type, 0}, + {"RawSockaddrAny.Addr", Field, 0}, + {"RawSockaddrAny.Pad", Field, 0}, + {"RawSockaddrDatalink", Type, 0}, + {"RawSockaddrDatalink.Alen", Field, 0}, + {"RawSockaddrDatalink.Data", Field, 0}, + {"RawSockaddrDatalink.Family", Field, 0}, + {"RawSockaddrDatalink.Index", Field, 0}, + {"RawSockaddrDatalink.Len", Field, 0}, + {"RawSockaddrDatalink.Nlen", Field, 0}, + {"RawSockaddrDatalink.Pad_cgo_0", Field, 2}, + {"RawSockaddrDatalink.Slen", Field, 0}, + {"RawSockaddrDatalink.Type", Field, 0}, + {"RawSockaddrInet4", Type, 0}, + {"RawSockaddrInet4.Addr", Field, 0}, + {"RawSockaddrInet4.Family", Field, 0}, + {"RawSockaddrInet4.Len", Field, 0}, + {"RawSockaddrInet4.Port", Field, 0}, + {"RawSockaddrInet4.Zero", Field, 0}, + {"RawSockaddrInet6", Type, 0}, + {"RawSockaddrInet6.Addr", Field, 0}, + {"RawSockaddrInet6.Family", Field, 0}, + {"RawSockaddrInet6.Flowinfo", Field, 0}, + {"RawSockaddrInet6.Len", Field, 0}, + {"RawSockaddrInet6.Port", Field, 0}, + {"RawSockaddrInet6.Scope_id", Field, 0}, + {"RawSockaddrLinklayer", Type, 0}, + {"RawSockaddrLinklayer.Addr", Field, 0}, + {"RawSockaddrLinklayer.Family", Field, 0}, + {"RawSockaddrLinklayer.Halen", Field, 0}, + {"RawSockaddrLinklayer.Hatype", Field, 0}, + {"RawSockaddrLinklayer.Ifindex", Field, 0}, + {"RawSockaddrLinklayer.Pkttype", Field, 0}, + {"RawSockaddrLinklayer.Protocol", Field, 0}, + {"RawSockaddrNetlink", Type, 0}, + {"RawSockaddrNetlink.Family", Field, 0}, + {"RawSockaddrNetlink.Groups", Field, 0}, + {"RawSockaddrNetlink.Pad", Field, 0}, + {"RawSockaddrNetlink.Pid", Field, 0}, + {"RawSockaddrUnix", Type, 0}, + {"RawSockaddrUnix.Family", Field, 0}, + {"RawSockaddrUnix.Len", Field, 0}, + {"RawSockaddrUnix.Pad_cgo_0", Field, 2}, + {"RawSockaddrUnix.Path", Field, 0}, + {"RawSyscall", Func, 0}, + {"RawSyscall6", Func, 0}, + {"Read", Func, 0}, + {"ReadConsole", Func, 1}, + {"ReadDirectoryChanges", Func, 0}, + {"ReadDirent", Func, 0}, + {"ReadFile", Func, 0}, + {"Readlink", Func, 0}, + {"Reboot", Func, 0}, + {"Recvfrom", Func, 0}, + {"Recvmsg", Func, 0}, + {"RegCloseKey", Func, 0}, + {"RegEnumKeyEx", Func, 0}, + {"RegOpenKeyEx", Func, 0}, + {"RegQueryInfoKey", Func, 0}, + {"RegQueryValueEx", Func, 0}, + {"RemoveDirectory", Func, 0}, + {"Removexattr", Func, 1}, + {"Rename", Func, 0}, + {"Renameat", Func, 0}, + {"Revoke", Func, 0}, + {"Rlimit", Type, 0}, + {"Rlimit.Cur", Field, 0}, + {"Rlimit.Max", Field, 0}, + {"Rmdir", Func, 0}, + {"RouteMessage", Type, 0}, + {"RouteMessage.Data", Field, 0}, + {"RouteMessage.Header", Field, 0}, + {"RouteRIB", Func, 0}, + {"RoutingMessage", Type, 0}, + {"RtAttr", Type, 0}, + {"RtAttr.Len", Field, 0}, + {"RtAttr.Type", Field, 0}, + {"RtGenmsg", Type, 0}, + {"RtGenmsg.Family", Field, 0}, + {"RtMetrics", Type, 0}, + {"RtMetrics.Expire", Field, 0}, + {"RtMetrics.Filler", Field, 0}, + {"RtMetrics.Hopcount", Field, 0}, + {"RtMetrics.Locks", Field, 0}, + {"RtMetrics.Mtu", Field, 0}, + {"RtMetrics.Pad", Field, 3}, + {"RtMetrics.Pksent", Field, 0}, + {"RtMetrics.Recvpipe", Field, 0}, + {"RtMetrics.Refcnt", Field, 2}, + {"RtMetrics.Rtt", Field, 0}, + {"RtMetrics.Rttvar", Field, 0}, + {"RtMetrics.Sendpipe", Field, 0}, + {"RtMetrics.Ssthresh", Field, 0}, + {"RtMetrics.Weight", Field, 0}, + {"RtMsg", Type, 0}, + {"RtMsg.Dst_len", Field, 0}, + {"RtMsg.Family", Field, 0}, + {"RtMsg.Flags", Field, 0}, + {"RtMsg.Protocol", Field, 0}, + {"RtMsg.Scope", Field, 0}, + {"RtMsg.Src_len", Field, 0}, + {"RtMsg.Table", Field, 0}, + {"RtMsg.Tos", Field, 0}, + {"RtMsg.Type", Field, 0}, + {"RtMsghdr", Type, 0}, + {"RtMsghdr.Addrs", Field, 0}, + {"RtMsghdr.Errno", Field, 0}, + {"RtMsghdr.Flags", Field, 0}, + {"RtMsghdr.Fmask", Field, 0}, + {"RtMsghdr.Hdrlen", Field, 2}, + {"RtMsghdr.Index", Field, 0}, + {"RtMsghdr.Inits", Field, 0}, + {"RtMsghdr.Mpls", Field, 2}, + {"RtMsghdr.Msglen", Field, 0}, + {"RtMsghdr.Pad_cgo_0", Field, 0}, + {"RtMsghdr.Pad_cgo_1", Field, 2}, + {"RtMsghdr.Pid", Field, 0}, + {"RtMsghdr.Priority", Field, 2}, + {"RtMsghdr.Rmx", Field, 0}, + {"RtMsghdr.Seq", Field, 0}, + {"RtMsghdr.Tableid", Field, 2}, + {"RtMsghdr.Type", Field, 0}, + {"RtMsghdr.Use", Field, 0}, + {"RtMsghdr.Version", Field, 0}, + {"RtNexthop", Type, 0}, + {"RtNexthop.Flags", Field, 0}, + {"RtNexthop.Hops", Field, 0}, + {"RtNexthop.Ifindex", Field, 0}, + {"RtNexthop.Len", Field, 0}, + {"Rusage", Type, 0}, + {"Rusage.CreationTime", Field, 0}, + {"Rusage.ExitTime", Field, 0}, + {"Rusage.Idrss", Field, 0}, + {"Rusage.Inblock", Field, 0}, + {"Rusage.Isrss", Field, 0}, + {"Rusage.Ixrss", Field, 0}, + {"Rusage.KernelTime", Field, 0}, + {"Rusage.Majflt", Field, 0}, + {"Rusage.Maxrss", Field, 0}, + {"Rusage.Minflt", Field, 0}, + {"Rusage.Msgrcv", Field, 0}, + {"Rusage.Msgsnd", Field, 0}, + {"Rusage.Nivcsw", Field, 0}, + {"Rusage.Nsignals", Field, 0}, + {"Rusage.Nswap", Field, 0}, + {"Rusage.Nvcsw", Field, 0}, + {"Rusage.Oublock", Field, 0}, + {"Rusage.Stime", Field, 0}, + {"Rusage.UserTime", Field, 0}, + {"Rusage.Utime", Field, 0}, + {"SCM_BINTIME", Const, 0}, + {"SCM_CREDENTIALS", Const, 0}, + {"SCM_CREDS", Const, 0}, + {"SCM_RIGHTS", Const, 0}, + {"SCM_TIMESTAMP", Const, 0}, + {"SCM_TIMESTAMPING", Const, 0}, + {"SCM_TIMESTAMPNS", Const, 0}, + {"SCM_TIMESTAMP_MONOTONIC", Const, 0}, + {"SHUT_RD", Const, 0}, + {"SHUT_RDWR", Const, 0}, + {"SHUT_WR", Const, 0}, + {"SID", Type, 0}, + {"SIDAndAttributes", Type, 0}, + {"SIDAndAttributes.Attributes", Field, 0}, + {"SIDAndAttributes.Sid", Field, 0}, + {"SIGABRT", Const, 0}, + {"SIGALRM", Const, 0}, + {"SIGBUS", Const, 0}, + {"SIGCHLD", Const, 0}, + {"SIGCLD", Const, 0}, + {"SIGCONT", Const, 0}, + {"SIGEMT", Const, 0}, + {"SIGFPE", Const, 0}, + {"SIGHUP", Const, 0}, + {"SIGILL", Const, 0}, + {"SIGINFO", Const, 0}, + {"SIGINT", Const, 0}, + {"SIGIO", Const, 0}, + {"SIGIOT", Const, 0}, + {"SIGKILL", Const, 0}, + {"SIGLIBRT", Const, 1}, + {"SIGLWP", Const, 0}, + {"SIGPIPE", Const, 0}, + {"SIGPOLL", Const, 0}, + {"SIGPROF", Const, 0}, + {"SIGPWR", Const, 0}, + {"SIGQUIT", Const, 0}, + {"SIGSEGV", Const, 0}, + {"SIGSTKFLT", Const, 0}, + {"SIGSTOP", Const, 0}, + {"SIGSYS", Const, 0}, + {"SIGTERM", Const, 0}, + {"SIGTHR", Const, 0}, + {"SIGTRAP", Const, 0}, + {"SIGTSTP", Const, 0}, + {"SIGTTIN", Const, 0}, + {"SIGTTOU", Const, 0}, + {"SIGUNUSED", Const, 0}, + {"SIGURG", Const, 0}, + {"SIGUSR1", Const, 0}, + {"SIGUSR2", Const, 0}, + {"SIGVTALRM", Const, 0}, + {"SIGWINCH", Const, 0}, + {"SIGXCPU", Const, 0}, + {"SIGXFSZ", Const, 0}, + {"SIOCADDDLCI", Const, 0}, + {"SIOCADDMULTI", Const, 0}, + {"SIOCADDRT", Const, 0}, + {"SIOCAIFADDR", Const, 0}, + {"SIOCAIFGROUP", Const, 0}, + {"SIOCALIFADDR", Const, 0}, + {"SIOCARPIPLL", Const, 0}, + {"SIOCATMARK", Const, 0}, + {"SIOCAUTOADDR", Const, 0}, + {"SIOCAUTONETMASK", Const, 0}, + {"SIOCBRDGADD", Const, 1}, + {"SIOCBRDGADDS", Const, 1}, + {"SIOCBRDGARL", Const, 1}, + {"SIOCBRDGDADDR", Const, 1}, + {"SIOCBRDGDEL", Const, 1}, + {"SIOCBRDGDELS", Const, 1}, + {"SIOCBRDGFLUSH", Const, 1}, + {"SIOCBRDGFRL", Const, 1}, + {"SIOCBRDGGCACHE", Const, 1}, + {"SIOCBRDGGFD", Const, 1}, + {"SIOCBRDGGHT", Const, 1}, + {"SIOCBRDGGIFFLGS", Const, 1}, + {"SIOCBRDGGMA", Const, 1}, + {"SIOCBRDGGPARAM", Const, 1}, + {"SIOCBRDGGPRI", Const, 1}, + {"SIOCBRDGGRL", Const, 1}, + {"SIOCBRDGGSIFS", Const, 1}, + {"SIOCBRDGGTO", Const, 1}, + {"SIOCBRDGIFS", Const, 1}, + {"SIOCBRDGRTS", Const, 1}, + {"SIOCBRDGSADDR", Const, 1}, + {"SIOCBRDGSCACHE", Const, 1}, + {"SIOCBRDGSFD", Const, 1}, + {"SIOCBRDGSHT", Const, 1}, + {"SIOCBRDGSIFCOST", Const, 1}, + {"SIOCBRDGSIFFLGS", Const, 1}, + {"SIOCBRDGSIFPRIO", Const, 1}, + {"SIOCBRDGSMA", Const, 1}, + {"SIOCBRDGSPRI", Const, 1}, + {"SIOCBRDGSPROTO", Const, 1}, + {"SIOCBRDGSTO", Const, 1}, + {"SIOCBRDGSTXHC", Const, 1}, + {"SIOCDARP", Const, 0}, + {"SIOCDELDLCI", Const, 0}, + {"SIOCDELMULTI", Const, 0}, + {"SIOCDELRT", Const, 0}, + {"SIOCDEVPRIVATE", Const, 0}, + {"SIOCDIFADDR", Const, 0}, + {"SIOCDIFGROUP", Const, 0}, + {"SIOCDIFPHYADDR", Const, 0}, + {"SIOCDLIFADDR", Const, 0}, + {"SIOCDRARP", Const, 0}, + {"SIOCGARP", Const, 0}, + {"SIOCGDRVSPEC", Const, 0}, + {"SIOCGETKALIVE", Const, 1}, + {"SIOCGETLABEL", Const, 1}, + {"SIOCGETPFLOW", Const, 1}, + {"SIOCGETPFSYNC", Const, 1}, + {"SIOCGETSGCNT", Const, 0}, + {"SIOCGETVIFCNT", Const, 0}, + {"SIOCGETVLAN", Const, 0}, + {"SIOCGHIWAT", Const, 0}, + {"SIOCGIFADDR", Const, 0}, + {"SIOCGIFADDRPREF", Const, 1}, + {"SIOCGIFALIAS", Const, 1}, + {"SIOCGIFALTMTU", Const, 0}, + {"SIOCGIFASYNCMAP", Const, 0}, + {"SIOCGIFBOND", Const, 0}, + {"SIOCGIFBR", Const, 0}, + {"SIOCGIFBRDADDR", Const, 0}, + {"SIOCGIFCAP", Const, 0}, + {"SIOCGIFCONF", Const, 0}, + {"SIOCGIFCOUNT", Const, 0}, + {"SIOCGIFDATA", Const, 1}, + {"SIOCGIFDESCR", Const, 0}, + {"SIOCGIFDEVMTU", Const, 0}, + {"SIOCGIFDLT", Const, 1}, + {"SIOCGIFDSTADDR", Const, 0}, + {"SIOCGIFENCAP", Const, 0}, + {"SIOCGIFFIB", Const, 1}, + {"SIOCGIFFLAGS", Const, 0}, + {"SIOCGIFGATTR", Const, 1}, + {"SIOCGIFGENERIC", Const, 0}, + {"SIOCGIFGMEMB", Const, 0}, + {"SIOCGIFGROUP", Const, 0}, + {"SIOCGIFHARDMTU", Const, 3}, + {"SIOCGIFHWADDR", Const, 0}, + {"SIOCGIFINDEX", Const, 0}, + {"SIOCGIFKPI", Const, 0}, + {"SIOCGIFMAC", Const, 0}, + {"SIOCGIFMAP", Const, 0}, + {"SIOCGIFMEDIA", Const, 0}, + {"SIOCGIFMEM", Const, 0}, + {"SIOCGIFMETRIC", Const, 0}, + {"SIOCGIFMTU", Const, 0}, + {"SIOCGIFNAME", Const, 0}, + {"SIOCGIFNETMASK", Const, 0}, + {"SIOCGIFPDSTADDR", Const, 0}, + {"SIOCGIFPFLAGS", Const, 0}, + {"SIOCGIFPHYS", Const, 0}, + {"SIOCGIFPRIORITY", Const, 1}, + {"SIOCGIFPSRCADDR", Const, 0}, + {"SIOCGIFRDOMAIN", Const, 1}, + {"SIOCGIFRTLABEL", Const, 1}, + {"SIOCGIFSLAVE", Const, 0}, + {"SIOCGIFSTATUS", Const, 0}, + {"SIOCGIFTIMESLOT", Const, 1}, + {"SIOCGIFTXQLEN", Const, 0}, + {"SIOCGIFVLAN", Const, 0}, + {"SIOCGIFWAKEFLAGS", Const, 0}, + {"SIOCGIFXFLAGS", Const, 1}, + {"SIOCGLIFADDR", Const, 0}, + {"SIOCGLIFPHYADDR", Const, 0}, + {"SIOCGLIFPHYRTABLE", Const, 1}, + {"SIOCGLIFPHYTTL", Const, 3}, + {"SIOCGLINKSTR", Const, 1}, + {"SIOCGLOWAT", Const, 0}, + {"SIOCGPGRP", Const, 0}, + {"SIOCGPRIVATE_0", Const, 0}, + {"SIOCGPRIVATE_1", Const, 0}, + {"SIOCGRARP", Const, 0}, + {"SIOCGSPPPPARAMS", Const, 3}, + {"SIOCGSTAMP", Const, 0}, + {"SIOCGSTAMPNS", Const, 0}, + {"SIOCGVH", Const, 1}, + {"SIOCGVNETID", Const, 3}, + {"SIOCIFCREATE", Const, 0}, + {"SIOCIFCREATE2", Const, 0}, + {"SIOCIFDESTROY", Const, 0}, + {"SIOCIFGCLONERS", Const, 0}, + {"SIOCINITIFADDR", Const, 1}, + {"SIOCPROTOPRIVATE", Const, 0}, + {"SIOCRSLVMULTI", Const, 0}, + {"SIOCRTMSG", Const, 0}, + {"SIOCSARP", Const, 0}, + {"SIOCSDRVSPEC", Const, 0}, + {"SIOCSETKALIVE", Const, 1}, + {"SIOCSETLABEL", Const, 1}, + {"SIOCSETPFLOW", Const, 1}, + {"SIOCSETPFSYNC", Const, 1}, + {"SIOCSETVLAN", Const, 0}, + {"SIOCSHIWAT", Const, 0}, + {"SIOCSIFADDR", Const, 0}, + {"SIOCSIFADDRPREF", Const, 1}, + {"SIOCSIFALTMTU", Const, 0}, + {"SIOCSIFASYNCMAP", Const, 0}, + {"SIOCSIFBOND", Const, 0}, + {"SIOCSIFBR", Const, 0}, + {"SIOCSIFBRDADDR", Const, 0}, + {"SIOCSIFCAP", Const, 0}, + {"SIOCSIFDESCR", Const, 0}, + {"SIOCSIFDSTADDR", Const, 0}, + {"SIOCSIFENCAP", Const, 0}, + {"SIOCSIFFIB", Const, 1}, + {"SIOCSIFFLAGS", Const, 0}, + {"SIOCSIFGATTR", Const, 1}, + {"SIOCSIFGENERIC", Const, 0}, + {"SIOCSIFHWADDR", Const, 0}, + {"SIOCSIFHWBROADCAST", Const, 0}, + {"SIOCSIFKPI", Const, 0}, + {"SIOCSIFLINK", Const, 0}, + {"SIOCSIFLLADDR", Const, 0}, + {"SIOCSIFMAC", Const, 0}, + {"SIOCSIFMAP", Const, 0}, + {"SIOCSIFMEDIA", Const, 0}, + {"SIOCSIFMEM", Const, 0}, + {"SIOCSIFMETRIC", Const, 0}, + {"SIOCSIFMTU", Const, 0}, + {"SIOCSIFNAME", Const, 0}, + {"SIOCSIFNETMASK", Const, 0}, + {"SIOCSIFPFLAGS", Const, 0}, + {"SIOCSIFPHYADDR", Const, 0}, + {"SIOCSIFPHYS", Const, 0}, + {"SIOCSIFPRIORITY", Const, 1}, + {"SIOCSIFRDOMAIN", Const, 1}, + {"SIOCSIFRTLABEL", Const, 1}, + {"SIOCSIFRVNET", Const, 0}, + {"SIOCSIFSLAVE", Const, 0}, + {"SIOCSIFTIMESLOT", Const, 1}, + {"SIOCSIFTXQLEN", Const, 0}, + {"SIOCSIFVLAN", Const, 0}, + {"SIOCSIFVNET", Const, 0}, + {"SIOCSIFXFLAGS", Const, 1}, + {"SIOCSLIFPHYADDR", Const, 0}, + {"SIOCSLIFPHYRTABLE", Const, 1}, + {"SIOCSLIFPHYTTL", Const, 3}, + {"SIOCSLINKSTR", Const, 1}, + {"SIOCSLOWAT", Const, 0}, + {"SIOCSPGRP", Const, 0}, + {"SIOCSRARP", Const, 0}, + {"SIOCSSPPPPARAMS", Const, 3}, + {"SIOCSVH", Const, 1}, + {"SIOCSVNETID", Const, 3}, + {"SIOCZIFDATA", Const, 1}, + {"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1}, + {"SIO_GET_INTERFACE_LIST", Const, 0}, + {"SIO_KEEPALIVE_VALS", Const, 3}, + {"SIO_UDP_CONNRESET", Const, 4}, + {"SOCK_CLOEXEC", Const, 0}, + {"SOCK_DCCP", Const, 0}, + {"SOCK_DGRAM", Const, 0}, + {"SOCK_FLAGS_MASK", Const, 1}, + {"SOCK_MAXADDRLEN", Const, 0}, + {"SOCK_NONBLOCK", Const, 0}, + {"SOCK_NOSIGPIPE", Const, 1}, + {"SOCK_PACKET", Const, 0}, + {"SOCK_RAW", Const, 0}, + {"SOCK_RDM", Const, 0}, + {"SOCK_SEQPACKET", Const, 0}, + {"SOCK_STREAM", Const, 0}, + {"SOL_AAL", Const, 0}, + {"SOL_ATM", Const, 0}, + {"SOL_DECNET", Const, 0}, + {"SOL_ICMPV6", Const, 0}, + {"SOL_IP", Const, 0}, + {"SOL_IPV6", Const, 0}, + {"SOL_IRDA", Const, 0}, + {"SOL_PACKET", Const, 0}, + {"SOL_RAW", Const, 0}, + {"SOL_SOCKET", Const, 0}, + {"SOL_TCP", Const, 0}, + {"SOL_X25", Const, 0}, + {"SOMAXCONN", Const, 0}, + {"SO_ACCEPTCONN", Const, 0}, + {"SO_ACCEPTFILTER", Const, 0}, + {"SO_ATTACH_FILTER", Const, 0}, + {"SO_BINDANY", Const, 1}, + {"SO_BINDTODEVICE", Const, 0}, + {"SO_BINTIME", Const, 0}, + {"SO_BROADCAST", Const, 0}, + {"SO_BSDCOMPAT", Const, 0}, + {"SO_DEBUG", Const, 0}, + {"SO_DETACH_FILTER", Const, 0}, + {"SO_DOMAIN", Const, 0}, + {"SO_DONTROUTE", Const, 0}, + {"SO_DONTTRUNC", Const, 0}, + {"SO_ERROR", Const, 0}, + {"SO_KEEPALIVE", Const, 0}, + {"SO_LABEL", Const, 0}, + {"SO_LINGER", Const, 0}, + {"SO_LINGER_SEC", Const, 0}, + {"SO_LISTENINCQLEN", Const, 0}, + {"SO_LISTENQLEN", Const, 0}, + {"SO_LISTENQLIMIT", Const, 0}, + {"SO_MARK", Const, 0}, + {"SO_NETPROC", Const, 1}, + {"SO_NKE", Const, 0}, + {"SO_NOADDRERR", Const, 0}, + {"SO_NOHEADER", Const, 1}, + {"SO_NOSIGPIPE", Const, 0}, + {"SO_NOTIFYCONFLICT", Const, 0}, + {"SO_NO_CHECK", Const, 0}, + {"SO_NO_DDP", Const, 0}, + {"SO_NO_OFFLOAD", Const, 0}, + {"SO_NP_EXTENSIONS", Const, 0}, + {"SO_NREAD", Const, 0}, + {"SO_NUMRCVPKT", Const, 16}, + {"SO_NWRITE", Const, 0}, + {"SO_OOBINLINE", Const, 0}, + {"SO_OVERFLOWED", Const, 1}, + {"SO_PASSCRED", Const, 0}, + {"SO_PASSSEC", Const, 0}, + {"SO_PEERCRED", Const, 0}, + {"SO_PEERLABEL", Const, 0}, + {"SO_PEERNAME", Const, 0}, + {"SO_PEERSEC", Const, 0}, + {"SO_PRIORITY", Const, 0}, + {"SO_PROTOCOL", Const, 0}, + {"SO_PROTOTYPE", Const, 1}, + {"SO_RANDOMPORT", Const, 0}, + {"SO_RCVBUF", Const, 0}, + {"SO_RCVBUFFORCE", Const, 0}, + {"SO_RCVLOWAT", Const, 0}, + {"SO_RCVTIMEO", Const, 0}, + {"SO_RESTRICTIONS", Const, 0}, + {"SO_RESTRICT_DENYIN", Const, 0}, + {"SO_RESTRICT_DENYOUT", Const, 0}, + {"SO_RESTRICT_DENYSET", Const, 0}, + {"SO_REUSEADDR", Const, 0}, + {"SO_REUSEPORT", Const, 0}, + {"SO_REUSESHAREUID", Const, 0}, + {"SO_RTABLE", Const, 1}, + {"SO_RXQ_OVFL", Const, 0}, + {"SO_SECURITY_AUTHENTICATION", Const, 0}, + {"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0}, + {"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0}, + {"SO_SETFIB", Const, 0}, + {"SO_SNDBUF", Const, 0}, + {"SO_SNDBUFFORCE", Const, 0}, + {"SO_SNDLOWAT", Const, 0}, + {"SO_SNDTIMEO", Const, 0}, + {"SO_SPLICE", Const, 1}, + {"SO_TIMESTAMP", Const, 0}, + {"SO_TIMESTAMPING", Const, 0}, + {"SO_TIMESTAMPNS", Const, 0}, + {"SO_TIMESTAMP_MONOTONIC", Const, 0}, + {"SO_TYPE", Const, 0}, + {"SO_UPCALLCLOSEWAIT", Const, 0}, + {"SO_UPDATE_ACCEPT_CONTEXT", Const, 0}, + {"SO_UPDATE_CONNECT_CONTEXT", Const, 1}, + {"SO_USELOOPBACK", Const, 0}, + {"SO_USER_COOKIE", Const, 1}, + {"SO_VENDOR", Const, 3}, + {"SO_WANTMORE", Const, 0}, + {"SO_WANTOOBFLAG", Const, 0}, + {"SSLExtraCertChainPolicyPara", Type, 0}, + {"SSLExtraCertChainPolicyPara.AuthType", Field, 0}, + {"SSLExtraCertChainPolicyPara.Checks", Field, 0}, + {"SSLExtraCertChainPolicyPara.ServerName", Field, 0}, + {"SSLExtraCertChainPolicyPara.Size", Field, 0}, + {"STANDARD_RIGHTS_ALL", Const, 0}, + {"STANDARD_RIGHTS_EXECUTE", Const, 0}, + {"STANDARD_RIGHTS_READ", Const, 0}, + {"STANDARD_RIGHTS_REQUIRED", Const, 0}, + {"STANDARD_RIGHTS_WRITE", Const, 0}, + {"STARTF_USESHOWWINDOW", Const, 0}, + {"STARTF_USESTDHANDLES", Const, 0}, + {"STD_ERROR_HANDLE", Const, 0}, + {"STD_INPUT_HANDLE", Const, 0}, + {"STD_OUTPUT_HANDLE", Const, 0}, + {"SUBLANG_ENGLISH_US", Const, 0}, + {"SW_FORCEMINIMIZE", Const, 0}, + {"SW_HIDE", Const, 0}, + {"SW_MAXIMIZE", Const, 0}, + {"SW_MINIMIZE", Const, 0}, + {"SW_NORMAL", Const, 0}, + {"SW_RESTORE", Const, 0}, + {"SW_SHOW", Const, 0}, + {"SW_SHOWDEFAULT", Const, 0}, + {"SW_SHOWMAXIMIZED", Const, 0}, + {"SW_SHOWMINIMIZED", Const, 0}, + {"SW_SHOWMINNOACTIVE", Const, 0}, + {"SW_SHOWNA", Const, 0}, + {"SW_SHOWNOACTIVATE", Const, 0}, + {"SW_SHOWNORMAL", Const, 0}, + {"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4}, + {"SYNCHRONIZE", Const, 0}, + {"SYSCTL_VERSION", Const, 1}, + {"SYSCTL_VERS_0", Const, 1}, + {"SYSCTL_VERS_1", Const, 1}, + {"SYSCTL_VERS_MASK", Const, 1}, + {"SYS_ABORT2", Const, 0}, + {"SYS_ACCEPT", Const, 0}, + {"SYS_ACCEPT4", Const, 0}, + {"SYS_ACCEPT_NOCANCEL", Const, 0}, + {"SYS_ACCESS", Const, 0}, + {"SYS_ACCESS_EXTENDED", Const, 0}, + {"SYS_ACCT", Const, 0}, + {"SYS_ADD_KEY", Const, 0}, + {"SYS_ADD_PROFIL", Const, 0}, + {"SYS_ADJFREQ", Const, 1}, + {"SYS_ADJTIME", Const, 0}, + {"SYS_ADJTIMEX", Const, 0}, + {"SYS_AFS_SYSCALL", Const, 0}, + {"SYS_AIO_CANCEL", Const, 0}, + {"SYS_AIO_ERROR", Const, 0}, + {"SYS_AIO_FSYNC", Const, 0}, + {"SYS_AIO_MLOCK", Const, 14}, + {"SYS_AIO_READ", Const, 0}, + {"SYS_AIO_RETURN", Const, 0}, + {"SYS_AIO_SUSPEND", Const, 0}, + {"SYS_AIO_SUSPEND_NOCANCEL", Const, 0}, + {"SYS_AIO_WAITCOMPLETE", Const, 14}, + {"SYS_AIO_WRITE", Const, 0}, + {"SYS_ALARM", Const, 0}, + {"SYS_ARCH_PRCTL", Const, 0}, + {"SYS_ARM_FADVISE64_64", Const, 0}, + {"SYS_ARM_SYNC_FILE_RANGE", Const, 0}, + {"SYS_ATGETMSG", Const, 0}, + {"SYS_ATPGETREQ", Const, 0}, + {"SYS_ATPGETRSP", Const, 0}, + {"SYS_ATPSNDREQ", Const, 0}, + {"SYS_ATPSNDRSP", Const, 0}, + {"SYS_ATPUTMSG", Const, 0}, + {"SYS_ATSOCKET", Const, 0}, + {"SYS_AUDIT", Const, 0}, + {"SYS_AUDITCTL", Const, 0}, + {"SYS_AUDITON", Const, 0}, + {"SYS_AUDIT_SESSION_JOIN", Const, 0}, + {"SYS_AUDIT_SESSION_PORT", Const, 0}, + {"SYS_AUDIT_SESSION_SELF", Const, 0}, + {"SYS_BDFLUSH", Const, 0}, + {"SYS_BIND", Const, 0}, + {"SYS_BINDAT", Const, 3}, + {"SYS_BREAK", Const, 0}, + {"SYS_BRK", Const, 0}, + {"SYS_BSDTHREAD_CREATE", Const, 0}, + {"SYS_BSDTHREAD_REGISTER", Const, 0}, + {"SYS_BSDTHREAD_TERMINATE", Const, 0}, + {"SYS_CAPGET", Const, 0}, + {"SYS_CAPSET", Const, 0}, + {"SYS_CAP_ENTER", Const, 0}, + {"SYS_CAP_FCNTLS_GET", Const, 1}, + {"SYS_CAP_FCNTLS_LIMIT", Const, 1}, + {"SYS_CAP_GETMODE", Const, 0}, + {"SYS_CAP_GETRIGHTS", Const, 0}, + {"SYS_CAP_IOCTLS_GET", Const, 1}, + {"SYS_CAP_IOCTLS_LIMIT", Const, 1}, + {"SYS_CAP_NEW", Const, 0}, + {"SYS_CAP_RIGHTS_GET", Const, 1}, + {"SYS_CAP_RIGHTS_LIMIT", Const, 1}, + {"SYS_CHDIR", Const, 0}, + {"SYS_CHFLAGS", Const, 0}, + {"SYS_CHFLAGSAT", Const, 3}, + {"SYS_CHMOD", Const, 0}, + {"SYS_CHMOD_EXTENDED", Const, 0}, + {"SYS_CHOWN", Const, 0}, + {"SYS_CHOWN32", Const, 0}, + {"SYS_CHROOT", Const, 0}, + {"SYS_CHUD", Const, 0}, + {"SYS_CLOCK_ADJTIME", Const, 0}, + {"SYS_CLOCK_GETCPUCLOCKID2", Const, 1}, + {"SYS_CLOCK_GETRES", Const, 0}, + {"SYS_CLOCK_GETTIME", Const, 0}, + {"SYS_CLOCK_NANOSLEEP", Const, 0}, + {"SYS_CLOCK_SETTIME", Const, 0}, + {"SYS_CLONE", Const, 0}, + {"SYS_CLOSE", Const, 0}, + {"SYS_CLOSEFROM", Const, 0}, + {"SYS_CLOSE_NOCANCEL", Const, 0}, + {"SYS_CONNECT", Const, 0}, + {"SYS_CONNECTAT", Const, 3}, + {"SYS_CONNECT_NOCANCEL", Const, 0}, + {"SYS_COPYFILE", Const, 0}, + {"SYS_CPUSET", Const, 0}, + {"SYS_CPUSET_GETAFFINITY", Const, 0}, + {"SYS_CPUSET_GETID", Const, 0}, + {"SYS_CPUSET_SETAFFINITY", Const, 0}, + {"SYS_CPUSET_SETID", Const, 0}, + {"SYS_CREAT", Const, 0}, + {"SYS_CREATE_MODULE", Const, 0}, + {"SYS_CSOPS", Const, 0}, + {"SYS_CSOPS_AUDITTOKEN", Const, 16}, + {"SYS_DELETE", Const, 0}, + {"SYS_DELETE_MODULE", Const, 0}, + {"SYS_DUP", Const, 0}, + {"SYS_DUP2", Const, 0}, + {"SYS_DUP3", Const, 0}, + {"SYS_EACCESS", Const, 0}, + {"SYS_EPOLL_CREATE", Const, 0}, + {"SYS_EPOLL_CREATE1", Const, 0}, + {"SYS_EPOLL_CTL", Const, 0}, + {"SYS_EPOLL_CTL_OLD", Const, 0}, + {"SYS_EPOLL_PWAIT", Const, 0}, + {"SYS_EPOLL_WAIT", Const, 0}, + {"SYS_EPOLL_WAIT_OLD", Const, 0}, + {"SYS_EVENTFD", Const, 0}, + {"SYS_EVENTFD2", Const, 0}, + {"SYS_EXCHANGEDATA", Const, 0}, + {"SYS_EXECVE", Const, 0}, + {"SYS_EXIT", Const, 0}, + {"SYS_EXIT_GROUP", Const, 0}, + {"SYS_EXTATTRCTL", Const, 0}, + {"SYS_EXTATTR_DELETE_FD", Const, 0}, + {"SYS_EXTATTR_DELETE_FILE", Const, 0}, + {"SYS_EXTATTR_DELETE_LINK", Const, 0}, + {"SYS_EXTATTR_GET_FD", Const, 0}, + {"SYS_EXTATTR_GET_FILE", Const, 0}, + {"SYS_EXTATTR_GET_LINK", Const, 0}, + {"SYS_EXTATTR_LIST_FD", Const, 0}, + {"SYS_EXTATTR_LIST_FILE", Const, 0}, + {"SYS_EXTATTR_LIST_LINK", Const, 0}, + {"SYS_EXTATTR_SET_FD", Const, 0}, + {"SYS_EXTATTR_SET_FILE", Const, 0}, + {"SYS_EXTATTR_SET_LINK", Const, 0}, + {"SYS_FACCESSAT", Const, 0}, + {"SYS_FADVISE64", Const, 0}, + {"SYS_FADVISE64_64", Const, 0}, + {"SYS_FALLOCATE", Const, 0}, + {"SYS_FANOTIFY_INIT", Const, 0}, + {"SYS_FANOTIFY_MARK", Const, 0}, + {"SYS_FCHDIR", Const, 0}, + {"SYS_FCHFLAGS", Const, 0}, + {"SYS_FCHMOD", Const, 0}, + {"SYS_FCHMODAT", Const, 0}, + {"SYS_FCHMOD_EXTENDED", Const, 0}, + {"SYS_FCHOWN", Const, 0}, + {"SYS_FCHOWN32", Const, 0}, + {"SYS_FCHOWNAT", Const, 0}, + {"SYS_FCHROOT", Const, 1}, + {"SYS_FCNTL", Const, 0}, + {"SYS_FCNTL64", Const, 0}, + {"SYS_FCNTL_NOCANCEL", Const, 0}, + {"SYS_FDATASYNC", Const, 0}, + {"SYS_FEXECVE", Const, 0}, + {"SYS_FFCLOCK_GETCOUNTER", Const, 0}, + {"SYS_FFCLOCK_GETESTIMATE", Const, 0}, + {"SYS_FFCLOCK_SETESTIMATE", Const, 0}, + {"SYS_FFSCTL", Const, 0}, + {"SYS_FGETATTRLIST", Const, 0}, + {"SYS_FGETXATTR", Const, 0}, + {"SYS_FHOPEN", Const, 0}, + {"SYS_FHSTAT", Const, 0}, + {"SYS_FHSTATFS", Const, 0}, + {"SYS_FILEPORT_MAKEFD", Const, 0}, + {"SYS_FILEPORT_MAKEPORT", Const, 0}, + {"SYS_FKTRACE", Const, 1}, + {"SYS_FLISTXATTR", Const, 0}, + {"SYS_FLOCK", Const, 0}, + {"SYS_FORK", Const, 0}, + {"SYS_FPATHCONF", Const, 0}, + {"SYS_FREEBSD6_FTRUNCATE", Const, 0}, + {"SYS_FREEBSD6_LSEEK", Const, 0}, + {"SYS_FREEBSD6_MMAP", Const, 0}, + {"SYS_FREEBSD6_PREAD", Const, 0}, + {"SYS_FREEBSD6_PWRITE", Const, 0}, + {"SYS_FREEBSD6_TRUNCATE", Const, 0}, + {"SYS_FREMOVEXATTR", Const, 0}, + {"SYS_FSCTL", Const, 0}, + {"SYS_FSETATTRLIST", Const, 0}, + {"SYS_FSETXATTR", Const, 0}, + {"SYS_FSGETPATH", Const, 0}, + {"SYS_FSTAT", Const, 0}, + {"SYS_FSTAT64", Const, 0}, + {"SYS_FSTAT64_EXTENDED", Const, 0}, + {"SYS_FSTATAT", Const, 0}, + {"SYS_FSTATAT64", Const, 0}, + {"SYS_FSTATFS", Const, 0}, + {"SYS_FSTATFS64", Const, 0}, + {"SYS_FSTATV", Const, 0}, + {"SYS_FSTATVFS1", Const, 1}, + {"SYS_FSTAT_EXTENDED", Const, 0}, + {"SYS_FSYNC", Const, 0}, + {"SYS_FSYNC_NOCANCEL", Const, 0}, + {"SYS_FSYNC_RANGE", Const, 1}, + {"SYS_FTIME", Const, 0}, + {"SYS_FTRUNCATE", Const, 0}, + {"SYS_FTRUNCATE64", Const, 0}, + {"SYS_FUTEX", Const, 0}, + {"SYS_FUTIMENS", Const, 1}, + {"SYS_FUTIMES", Const, 0}, + {"SYS_FUTIMESAT", Const, 0}, + {"SYS_GETATTRLIST", Const, 0}, + {"SYS_GETAUDIT", Const, 0}, + {"SYS_GETAUDIT_ADDR", Const, 0}, + {"SYS_GETAUID", Const, 0}, + {"SYS_GETCONTEXT", Const, 0}, + {"SYS_GETCPU", Const, 0}, + {"SYS_GETCWD", Const, 0}, + {"SYS_GETDENTS", Const, 0}, + {"SYS_GETDENTS64", Const, 0}, + {"SYS_GETDIRENTRIES", Const, 0}, + {"SYS_GETDIRENTRIES64", Const, 0}, + {"SYS_GETDIRENTRIESATTR", Const, 0}, + {"SYS_GETDTABLECOUNT", Const, 1}, + {"SYS_GETDTABLESIZE", Const, 0}, + {"SYS_GETEGID", Const, 0}, + {"SYS_GETEGID32", Const, 0}, + {"SYS_GETEUID", Const, 0}, + {"SYS_GETEUID32", Const, 0}, + {"SYS_GETFH", Const, 0}, + {"SYS_GETFSSTAT", Const, 0}, + {"SYS_GETFSSTAT64", Const, 0}, + {"SYS_GETGID", Const, 0}, + {"SYS_GETGID32", Const, 0}, + {"SYS_GETGROUPS", Const, 0}, + {"SYS_GETGROUPS32", Const, 0}, + {"SYS_GETHOSTUUID", Const, 0}, + {"SYS_GETITIMER", Const, 0}, + {"SYS_GETLCID", Const, 0}, + {"SYS_GETLOGIN", Const, 0}, + {"SYS_GETLOGINCLASS", Const, 0}, + {"SYS_GETPEERNAME", Const, 0}, + {"SYS_GETPGID", Const, 0}, + {"SYS_GETPGRP", Const, 0}, + {"SYS_GETPID", Const, 0}, + {"SYS_GETPMSG", Const, 0}, + {"SYS_GETPPID", Const, 0}, + {"SYS_GETPRIORITY", Const, 0}, + {"SYS_GETRESGID", Const, 0}, + {"SYS_GETRESGID32", Const, 0}, + {"SYS_GETRESUID", Const, 0}, + {"SYS_GETRESUID32", Const, 0}, + {"SYS_GETRLIMIT", Const, 0}, + {"SYS_GETRTABLE", Const, 1}, + {"SYS_GETRUSAGE", Const, 0}, + {"SYS_GETSGROUPS", Const, 0}, + {"SYS_GETSID", Const, 0}, + {"SYS_GETSOCKNAME", Const, 0}, + {"SYS_GETSOCKOPT", Const, 0}, + {"SYS_GETTHRID", Const, 1}, + {"SYS_GETTID", Const, 0}, + {"SYS_GETTIMEOFDAY", Const, 0}, + {"SYS_GETUID", Const, 0}, + {"SYS_GETUID32", Const, 0}, + {"SYS_GETVFSSTAT", Const, 1}, + {"SYS_GETWGROUPS", Const, 0}, + {"SYS_GETXATTR", Const, 0}, + {"SYS_GET_KERNEL_SYMS", Const, 0}, + {"SYS_GET_MEMPOLICY", Const, 0}, + {"SYS_GET_ROBUST_LIST", Const, 0}, + {"SYS_GET_THREAD_AREA", Const, 0}, + {"SYS_GSSD_SYSCALL", Const, 14}, + {"SYS_GTTY", Const, 0}, + {"SYS_IDENTITYSVC", Const, 0}, + {"SYS_IDLE", Const, 0}, + {"SYS_INITGROUPS", Const, 0}, + {"SYS_INIT_MODULE", Const, 0}, + {"SYS_INOTIFY_ADD_WATCH", Const, 0}, + {"SYS_INOTIFY_INIT", Const, 0}, + {"SYS_INOTIFY_INIT1", Const, 0}, + {"SYS_INOTIFY_RM_WATCH", Const, 0}, + {"SYS_IOCTL", Const, 0}, + {"SYS_IOPERM", Const, 0}, + {"SYS_IOPL", Const, 0}, + {"SYS_IOPOLICYSYS", Const, 0}, + {"SYS_IOPRIO_GET", Const, 0}, + {"SYS_IOPRIO_SET", Const, 0}, + {"SYS_IO_CANCEL", Const, 0}, + {"SYS_IO_DESTROY", Const, 0}, + {"SYS_IO_GETEVENTS", Const, 0}, + {"SYS_IO_SETUP", Const, 0}, + {"SYS_IO_SUBMIT", Const, 0}, + {"SYS_IPC", Const, 0}, + {"SYS_ISSETUGID", Const, 0}, + {"SYS_JAIL", Const, 0}, + {"SYS_JAIL_ATTACH", Const, 0}, + {"SYS_JAIL_GET", Const, 0}, + {"SYS_JAIL_REMOVE", Const, 0}, + {"SYS_JAIL_SET", Const, 0}, + {"SYS_KAS_INFO", Const, 16}, + {"SYS_KDEBUG_TRACE", Const, 0}, + {"SYS_KENV", Const, 0}, + {"SYS_KEVENT", Const, 0}, + {"SYS_KEVENT64", Const, 0}, + {"SYS_KEXEC_LOAD", Const, 0}, + {"SYS_KEYCTL", Const, 0}, + {"SYS_KILL", Const, 0}, + {"SYS_KLDFIND", Const, 0}, + {"SYS_KLDFIRSTMOD", Const, 0}, + {"SYS_KLDLOAD", Const, 0}, + {"SYS_KLDNEXT", Const, 0}, + {"SYS_KLDSTAT", Const, 0}, + {"SYS_KLDSYM", Const, 0}, + {"SYS_KLDUNLOAD", Const, 0}, + {"SYS_KLDUNLOADF", Const, 0}, + {"SYS_KMQ_NOTIFY", Const, 14}, + {"SYS_KMQ_OPEN", Const, 14}, + {"SYS_KMQ_SETATTR", Const, 14}, + {"SYS_KMQ_TIMEDRECEIVE", Const, 14}, + {"SYS_KMQ_TIMEDSEND", Const, 14}, + {"SYS_KMQ_UNLINK", Const, 14}, + {"SYS_KQUEUE", Const, 0}, + {"SYS_KQUEUE1", Const, 1}, + {"SYS_KSEM_CLOSE", Const, 14}, + {"SYS_KSEM_DESTROY", Const, 14}, + {"SYS_KSEM_GETVALUE", Const, 14}, + {"SYS_KSEM_INIT", Const, 14}, + {"SYS_KSEM_OPEN", Const, 14}, + {"SYS_KSEM_POST", Const, 14}, + {"SYS_KSEM_TIMEDWAIT", Const, 14}, + {"SYS_KSEM_TRYWAIT", Const, 14}, + {"SYS_KSEM_UNLINK", Const, 14}, + {"SYS_KSEM_WAIT", Const, 14}, + {"SYS_KTIMER_CREATE", Const, 0}, + {"SYS_KTIMER_DELETE", Const, 0}, + {"SYS_KTIMER_GETOVERRUN", Const, 0}, + {"SYS_KTIMER_GETTIME", Const, 0}, + {"SYS_KTIMER_SETTIME", Const, 0}, + {"SYS_KTRACE", Const, 0}, + {"SYS_LCHFLAGS", Const, 0}, + {"SYS_LCHMOD", Const, 0}, + {"SYS_LCHOWN", Const, 0}, + {"SYS_LCHOWN32", Const, 0}, + {"SYS_LEDGER", Const, 16}, + {"SYS_LGETFH", Const, 0}, + {"SYS_LGETXATTR", Const, 0}, + {"SYS_LINK", Const, 0}, + {"SYS_LINKAT", Const, 0}, + {"SYS_LIO_LISTIO", Const, 0}, + {"SYS_LISTEN", Const, 0}, + {"SYS_LISTXATTR", Const, 0}, + {"SYS_LLISTXATTR", Const, 0}, + {"SYS_LOCK", Const, 0}, + {"SYS_LOOKUP_DCOOKIE", Const, 0}, + {"SYS_LPATHCONF", Const, 0}, + {"SYS_LREMOVEXATTR", Const, 0}, + {"SYS_LSEEK", Const, 0}, + {"SYS_LSETXATTR", Const, 0}, + {"SYS_LSTAT", Const, 0}, + {"SYS_LSTAT64", Const, 0}, + {"SYS_LSTAT64_EXTENDED", Const, 0}, + {"SYS_LSTATV", Const, 0}, + {"SYS_LSTAT_EXTENDED", Const, 0}, + {"SYS_LUTIMES", Const, 0}, + {"SYS_MAC_SYSCALL", Const, 0}, + {"SYS_MADVISE", Const, 0}, + {"SYS_MADVISE1", Const, 0}, + {"SYS_MAXSYSCALL", Const, 0}, + {"SYS_MBIND", Const, 0}, + {"SYS_MIGRATE_PAGES", Const, 0}, + {"SYS_MINCORE", Const, 0}, + {"SYS_MINHERIT", Const, 0}, + {"SYS_MKCOMPLEX", Const, 0}, + {"SYS_MKDIR", Const, 0}, + {"SYS_MKDIRAT", Const, 0}, + {"SYS_MKDIR_EXTENDED", Const, 0}, + {"SYS_MKFIFO", Const, 0}, + {"SYS_MKFIFOAT", Const, 0}, + {"SYS_MKFIFO_EXTENDED", Const, 0}, + {"SYS_MKNOD", Const, 0}, + {"SYS_MKNODAT", Const, 0}, + {"SYS_MLOCK", Const, 0}, + {"SYS_MLOCKALL", Const, 0}, + {"SYS_MMAP", Const, 0}, + {"SYS_MMAP2", Const, 0}, + {"SYS_MODCTL", Const, 1}, + {"SYS_MODFIND", Const, 0}, + {"SYS_MODFNEXT", Const, 0}, + {"SYS_MODIFY_LDT", Const, 0}, + {"SYS_MODNEXT", Const, 0}, + {"SYS_MODSTAT", Const, 0}, + {"SYS_MODWATCH", Const, 0}, + {"SYS_MOUNT", Const, 0}, + {"SYS_MOVE_PAGES", Const, 0}, + {"SYS_MPROTECT", Const, 0}, + {"SYS_MPX", Const, 0}, + {"SYS_MQUERY", Const, 1}, + {"SYS_MQ_GETSETATTR", Const, 0}, + {"SYS_MQ_NOTIFY", Const, 0}, + {"SYS_MQ_OPEN", Const, 0}, + {"SYS_MQ_TIMEDRECEIVE", Const, 0}, + {"SYS_MQ_TIMEDSEND", Const, 0}, + {"SYS_MQ_UNLINK", Const, 0}, + {"SYS_MREMAP", Const, 0}, + {"SYS_MSGCTL", Const, 0}, + {"SYS_MSGGET", Const, 0}, + {"SYS_MSGRCV", Const, 0}, + {"SYS_MSGRCV_NOCANCEL", Const, 0}, + {"SYS_MSGSND", Const, 0}, + {"SYS_MSGSND_NOCANCEL", Const, 0}, + {"SYS_MSGSYS", Const, 0}, + {"SYS_MSYNC", Const, 0}, + {"SYS_MSYNC_NOCANCEL", Const, 0}, + {"SYS_MUNLOCK", Const, 0}, + {"SYS_MUNLOCKALL", Const, 0}, + {"SYS_MUNMAP", Const, 0}, + {"SYS_NAME_TO_HANDLE_AT", Const, 0}, + {"SYS_NANOSLEEP", Const, 0}, + {"SYS_NEWFSTATAT", Const, 0}, + {"SYS_NFSCLNT", Const, 0}, + {"SYS_NFSSERVCTL", Const, 0}, + {"SYS_NFSSVC", Const, 0}, + {"SYS_NFSTAT", Const, 0}, + {"SYS_NICE", Const, 0}, + {"SYS_NLM_SYSCALL", Const, 14}, + {"SYS_NLSTAT", Const, 0}, + {"SYS_NMOUNT", Const, 0}, + {"SYS_NSTAT", Const, 0}, + {"SYS_NTP_ADJTIME", Const, 0}, + {"SYS_NTP_GETTIME", Const, 0}, + {"SYS_NUMA_GETAFFINITY", Const, 14}, + {"SYS_NUMA_SETAFFINITY", Const, 14}, + {"SYS_OABI_SYSCALL_BASE", Const, 0}, + {"SYS_OBREAK", Const, 0}, + {"SYS_OLDFSTAT", Const, 0}, + {"SYS_OLDLSTAT", Const, 0}, + {"SYS_OLDOLDUNAME", Const, 0}, + {"SYS_OLDSTAT", Const, 0}, + {"SYS_OLDUNAME", Const, 0}, + {"SYS_OPEN", Const, 0}, + {"SYS_OPENAT", Const, 0}, + {"SYS_OPENBSD_POLL", Const, 0}, + {"SYS_OPEN_BY_HANDLE_AT", Const, 0}, + {"SYS_OPEN_DPROTECTED_NP", Const, 16}, + {"SYS_OPEN_EXTENDED", Const, 0}, + {"SYS_OPEN_NOCANCEL", Const, 0}, + {"SYS_OVADVISE", Const, 0}, + {"SYS_PACCEPT", Const, 1}, + {"SYS_PATHCONF", Const, 0}, + {"SYS_PAUSE", Const, 0}, + {"SYS_PCICONFIG_IOBASE", Const, 0}, + {"SYS_PCICONFIG_READ", Const, 0}, + {"SYS_PCICONFIG_WRITE", Const, 0}, + {"SYS_PDFORK", Const, 0}, + {"SYS_PDGETPID", Const, 0}, + {"SYS_PDKILL", Const, 0}, + {"SYS_PERF_EVENT_OPEN", Const, 0}, + {"SYS_PERSONALITY", Const, 0}, + {"SYS_PID_HIBERNATE", Const, 0}, + {"SYS_PID_RESUME", Const, 0}, + {"SYS_PID_SHUTDOWN_SOCKETS", Const, 0}, + {"SYS_PID_SUSPEND", Const, 0}, + {"SYS_PIPE", Const, 0}, + {"SYS_PIPE2", Const, 0}, + {"SYS_PIVOT_ROOT", Const, 0}, + {"SYS_PMC_CONTROL", Const, 1}, + {"SYS_PMC_GET_INFO", Const, 1}, + {"SYS_POLL", Const, 0}, + {"SYS_POLLTS", Const, 1}, + {"SYS_POLL_NOCANCEL", Const, 0}, + {"SYS_POSIX_FADVISE", Const, 0}, + {"SYS_POSIX_FALLOCATE", Const, 0}, + {"SYS_POSIX_OPENPT", Const, 0}, + {"SYS_POSIX_SPAWN", Const, 0}, + {"SYS_PPOLL", Const, 0}, + {"SYS_PRCTL", Const, 0}, + {"SYS_PREAD", Const, 0}, + {"SYS_PREAD64", Const, 0}, + {"SYS_PREADV", Const, 0}, + {"SYS_PREAD_NOCANCEL", Const, 0}, + {"SYS_PRLIMIT64", Const, 0}, + {"SYS_PROCCTL", Const, 3}, + {"SYS_PROCESS_POLICY", Const, 0}, + {"SYS_PROCESS_VM_READV", Const, 0}, + {"SYS_PROCESS_VM_WRITEV", Const, 0}, + {"SYS_PROC_INFO", Const, 0}, + {"SYS_PROF", Const, 0}, + {"SYS_PROFIL", Const, 0}, + {"SYS_PSELECT", Const, 0}, + {"SYS_PSELECT6", Const, 0}, + {"SYS_PSET_ASSIGN", Const, 1}, + {"SYS_PSET_CREATE", Const, 1}, + {"SYS_PSET_DESTROY", Const, 1}, + {"SYS_PSYNCH_CVBROAD", Const, 0}, + {"SYS_PSYNCH_CVCLRPREPOST", Const, 0}, + {"SYS_PSYNCH_CVSIGNAL", Const, 0}, + {"SYS_PSYNCH_CVWAIT", Const, 0}, + {"SYS_PSYNCH_MUTEXDROP", Const, 0}, + {"SYS_PSYNCH_MUTEXWAIT", Const, 0}, + {"SYS_PSYNCH_RW_DOWNGRADE", Const, 0}, + {"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0}, + {"SYS_PSYNCH_RW_RDLOCK", Const, 0}, + {"SYS_PSYNCH_RW_UNLOCK", Const, 0}, + {"SYS_PSYNCH_RW_UNLOCK2", Const, 0}, + {"SYS_PSYNCH_RW_UPGRADE", Const, 0}, + {"SYS_PSYNCH_RW_WRLOCK", Const, 0}, + {"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0}, + {"SYS_PTRACE", Const, 0}, + {"SYS_PUTPMSG", Const, 0}, + {"SYS_PWRITE", Const, 0}, + {"SYS_PWRITE64", Const, 0}, + {"SYS_PWRITEV", Const, 0}, + {"SYS_PWRITE_NOCANCEL", Const, 0}, + {"SYS_QUERY_MODULE", Const, 0}, + {"SYS_QUOTACTL", Const, 0}, + {"SYS_RASCTL", Const, 1}, + {"SYS_RCTL_ADD_RULE", Const, 0}, + {"SYS_RCTL_GET_LIMITS", Const, 0}, + {"SYS_RCTL_GET_RACCT", Const, 0}, + {"SYS_RCTL_GET_RULES", Const, 0}, + {"SYS_RCTL_REMOVE_RULE", Const, 0}, + {"SYS_READ", Const, 0}, + {"SYS_READAHEAD", Const, 0}, + {"SYS_READDIR", Const, 0}, + {"SYS_READLINK", Const, 0}, + {"SYS_READLINKAT", Const, 0}, + {"SYS_READV", Const, 0}, + {"SYS_READV_NOCANCEL", Const, 0}, + {"SYS_READ_NOCANCEL", Const, 0}, + {"SYS_REBOOT", Const, 0}, + {"SYS_RECV", Const, 0}, + {"SYS_RECVFROM", Const, 0}, + {"SYS_RECVFROM_NOCANCEL", Const, 0}, + {"SYS_RECVMMSG", Const, 0}, + {"SYS_RECVMSG", Const, 0}, + {"SYS_RECVMSG_NOCANCEL", Const, 0}, + {"SYS_REMAP_FILE_PAGES", Const, 0}, + {"SYS_REMOVEXATTR", Const, 0}, + {"SYS_RENAME", Const, 0}, + {"SYS_RENAMEAT", Const, 0}, + {"SYS_REQUEST_KEY", Const, 0}, + {"SYS_RESTART_SYSCALL", Const, 0}, + {"SYS_REVOKE", Const, 0}, + {"SYS_RFORK", Const, 0}, + {"SYS_RMDIR", Const, 0}, + {"SYS_RTPRIO", Const, 0}, + {"SYS_RTPRIO_THREAD", Const, 0}, + {"SYS_RT_SIGACTION", Const, 0}, + {"SYS_RT_SIGPENDING", Const, 0}, + {"SYS_RT_SIGPROCMASK", Const, 0}, + {"SYS_RT_SIGQUEUEINFO", Const, 0}, + {"SYS_RT_SIGRETURN", Const, 0}, + {"SYS_RT_SIGSUSPEND", Const, 0}, + {"SYS_RT_SIGTIMEDWAIT", Const, 0}, + {"SYS_RT_TGSIGQUEUEINFO", Const, 0}, + {"SYS_SBRK", Const, 0}, + {"SYS_SCHED_GETAFFINITY", Const, 0}, + {"SYS_SCHED_GETPARAM", Const, 0}, + {"SYS_SCHED_GETSCHEDULER", Const, 0}, + {"SYS_SCHED_GET_PRIORITY_MAX", Const, 0}, + {"SYS_SCHED_GET_PRIORITY_MIN", Const, 0}, + {"SYS_SCHED_RR_GET_INTERVAL", Const, 0}, + {"SYS_SCHED_SETAFFINITY", Const, 0}, + {"SYS_SCHED_SETPARAM", Const, 0}, + {"SYS_SCHED_SETSCHEDULER", Const, 0}, + {"SYS_SCHED_YIELD", Const, 0}, + {"SYS_SCTP_GENERIC_RECVMSG", Const, 0}, + {"SYS_SCTP_GENERIC_SENDMSG", Const, 0}, + {"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0}, + {"SYS_SCTP_PEELOFF", Const, 0}, + {"SYS_SEARCHFS", Const, 0}, + {"SYS_SECURITY", Const, 0}, + {"SYS_SELECT", Const, 0}, + {"SYS_SELECT_NOCANCEL", Const, 0}, + {"SYS_SEMCONFIG", Const, 1}, + {"SYS_SEMCTL", Const, 0}, + {"SYS_SEMGET", Const, 0}, + {"SYS_SEMOP", Const, 0}, + {"SYS_SEMSYS", Const, 0}, + {"SYS_SEMTIMEDOP", Const, 0}, + {"SYS_SEM_CLOSE", Const, 0}, + {"SYS_SEM_DESTROY", Const, 0}, + {"SYS_SEM_GETVALUE", Const, 0}, + {"SYS_SEM_INIT", Const, 0}, + {"SYS_SEM_OPEN", Const, 0}, + {"SYS_SEM_POST", Const, 0}, + {"SYS_SEM_TRYWAIT", Const, 0}, + {"SYS_SEM_UNLINK", Const, 0}, + {"SYS_SEM_WAIT", Const, 0}, + {"SYS_SEM_WAIT_NOCANCEL", Const, 0}, + {"SYS_SEND", Const, 0}, + {"SYS_SENDFILE", Const, 0}, + {"SYS_SENDFILE64", Const, 0}, + {"SYS_SENDMMSG", Const, 0}, + {"SYS_SENDMSG", Const, 0}, + {"SYS_SENDMSG_NOCANCEL", Const, 0}, + {"SYS_SENDTO", Const, 0}, + {"SYS_SENDTO_NOCANCEL", Const, 0}, + {"SYS_SETATTRLIST", Const, 0}, + {"SYS_SETAUDIT", Const, 0}, + {"SYS_SETAUDIT_ADDR", Const, 0}, + {"SYS_SETAUID", Const, 0}, + {"SYS_SETCONTEXT", Const, 0}, + {"SYS_SETDOMAINNAME", Const, 0}, + {"SYS_SETEGID", Const, 0}, + {"SYS_SETEUID", Const, 0}, + {"SYS_SETFIB", Const, 0}, + {"SYS_SETFSGID", Const, 0}, + {"SYS_SETFSGID32", Const, 0}, + {"SYS_SETFSUID", Const, 0}, + {"SYS_SETFSUID32", Const, 0}, + {"SYS_SETGID", Const, 0}, + {"SYS_SETGID32", Const, 0}, + {"SYS_SETGROUPS", Const, 0}, + {"SYS_SETGROUPS32", Const, 0}, + {"SYS_SETHOSTNAME", Const, 0}, + {"SYS_SETITIMER", Const, 0}, + {"SYS_SETLCID", Const, 0}, + {"SYS_SETLOGIN", Const, 0}, + {"SYS_SETLOGINCLASS", Const, 0}, + {"SYS_SETNS", Const, 0}, + {"SYS_SETPGID", Const, 0}, + {"SYS_SETPRIORITY", Const, 0}, + {"SYS_SETPRIVEXEC", Const, 0}, + {"SYS_SETREGID", Const, 0}, + {"SYS_SETREGID32", Const, 0}, + {"SYS_SETRESGID", Const, 0}, + {"SYS_SETRESGID32", Const, 0}, + {"SYS_SETRESUID", Const, 0}, + {"SYS_SETRESUID32", Const, 0}, + {"SYS_SETREUID", Const, 0}, + {"SYS_SETREUID32", Const, 0}, + {"SYS_SETRLIMIT", Const, 0}, + {"SYS_SETRTABLE", Const, 1}, + {"SYS_SETSGROUPS", Const, 0}, + {"SYS_SETSID", Const, 0}, + {"SYS_SETSOCKOPT", Const, 0}, + {"SYS_SETTID", Const, 0}, + {"SYS_SETTID_WITH_PID", Const, 0}, + {"SYS_SETTIMEOFDAY", Const, 0}, + {"SYS_SETUID", Const, 0}, + {"SYS_SETUID32", Const, 0}, + {"SYS_SETWGROUPS", Const, 0}, + {"SYS_SETXATTR", Const, 0}, + {"SYS_SET_MEMPOLICY", Const, 0}, + {"SYS_SET_ROBUST_LIST", Const, 0}, + {"SYS_SET_THREAD_AREA", Const, 0}, + {"SYS_SET_TID_ADDRESS", Const, 0}, + {"SYS_SGETMASK", Const, 0}, + {"SYS_SHARED_REGION_CHECK_NP", Const, 0}, + {"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0}, + {"SYS_SHMAT", Const, 0}, + {"SYS_SHMCTL", Const, 0}, + {"SYS_SHMDT", Const, 0}, + {"SYS_SHMGET", Const, 0}, + {"SYS_SHMSYS", Const, 0}, + {"SYS_SHM_OPEN", Const, 0}, + {"SYS_SHM_UNLINK", Const, 0}, + {"SYS_SHUTDOWN", Const, 0}, + {"SYS_SIGACTION", Const, 0}, + {"SYS_SIGALTSTACK", Const, 0}, + {"SYS_SIGNAL", Const, 0}, + {"SYS_SIGNALFD", Const, 0}, + {"SYS_SIGNALFD4", Const, 0}, + {"SYS_SIGPENDING", Const, 0}, + {"SYS_SIGPROCMASK", Const, 0}, + {"SYS_SIGQUEUE", Const, 0}, + {"SYS_SIGQUEUEINFO", Const, 1}, + {"SYS_SIGRETURN", Const, 0}, + {"SYS_SIGSUSPEND", Const, 0}, + {"SYS_SIGSUSPEND_NOCANCEL", Const, 0}, + {"SYS_SIGTIMEDWAIT", Const, 0}, + {"SYS_SIGWAIT", Const, 0}, + {"SYS_SIGWAITINFO", Const, 0}, + {"SYS_SOCKET", Const, 0}, + {"SYS_SOCKETCALL", Const, 0}, + {"SYS_SOCKETPAIR", Const, 0}, + {"SYS_SPLICE", Const, 0}, + {"SYS_SSETMASK", Const, 0}, + {"SYS_SSTK", Const, 0}, + {"SYS_STACK_SNAPSHOT", Const, 0}, + {"SYS_STAT", Const, 0}, + {"SYS_STAT64", Const, 0}, + {"SYS_STAT64_EXTENDED", Const, 0}, + {"SYS_STATFS", Const, 0}, + {"SYS_STATFS64", Const, 0}, + {"SYS_STATV", Const, 0}, + {"SYS_STATVFS1", Const, 1}, + {"SYS_STAT_EXTENDED", Const, 0}, + {"SYS_STIME", Const, 0}, + {"SYS_STTY", Const, 0}, + {"SYS_SWAPCONTEXT", Const, 0}, + {"SYS_SWAPCTL", Const, 1}, + {"SYS_SWAPOFF", Const, 0}, + {"SYS_SWAPON", Const, 0}, + {"SYS_SYMLINK", Const, 0}, + {"SYS_SYMLINKAT", Const, 0}, + {"SYS_SYNC", Const, 0}, + {"SYS_SYNCFS", Const, 0}, + {"SYS_SYNC_FILE_RANGE", Const, 0}, + {"SYS_SYSARCH", Const, 0}, + {"SYS_SYSCALL", Const, 0}, + {"SYS_SYSCALL_BASE", Const, 0}, + {"SYS_SYSFS", Const, 0}, + {"SYS_SYSINFO", Const, 0}, + {"SYS_SYSLOG", Const, 0}, + {"SYS_TEE", Const, 0}, + {"SYS_TGKILL", Const, 0}, + {"SYS_THREAD_SELFID", Const, 0}, + {"SYS_THR_CREATE", Const, 0}, + {"SYS_THR_EXIT", Const, 0}, + {"SYS_THR_KILL", Const, 0}, + {"SYS_THR_KILL2", Const, 0}, + {"SYS_THR_NEW", Const, 0}, + {"SYS_THR_SELF", Const, 0}, + {"SYS_THR_SET_NAME", Const, 0}, + {"SYS_THR_SUSPEND", Const, 0}, + {"SYS_THR_WAKE", Const, 0}, + {"SYS_TIME", Const, 0}, + {"SYS_TIMERFD_CREATE", Const, 0}, + {"SYS_TIMERFD_GETTIME", Const, 0}, + {"SYS_TIMERFD_SETTIME", Const, 0}, + {"SYS_TIMER_CREATE", Const, 0}, + {"SYS_TIMER_DELETE", Const, 0}, + {"SYS_TIMER_GETOVERRUN", Const, 0}, + {"SYS_TIMER_GETTIME", Const, 0}, + {"SYS_TIMER_SETTIME", Const, 0}, + {"SYS_TIMES", Const, 0}, + {"SYS_TKILL", Const, 0}, + {"SYS_TRUNCATE", Const, 0}, + {"SYS_TRUNCATE64", Const, 0}, + {"SYS_TUXCALL", Const, 0}, + {"SYS_UGETRLIMIT", Const, 0}, + {"SYS_ULIMIT", Const, 0}, + {"SYS_UMASK", Const, 0}, + {"SYS_UMASK_EXTENDED", Const, 0}, + {"SYS_UMOUNT", Const, 0}, + {"SYS_UMOUNT2", Const, 0}, + {"SYS_UNAME", Const, 0}, + {"SYS_UNDELETE", Const, 0}, + {"SYS_UNLINK", Const, 0}, + {"SYS_UNLINKAT", Const, 0}, + {"SYS_UNMOUNT", Const, 0}, + {"SYS_UNSHARE", Const, 0}, + {"SYS_USELIB", Const, 0}, + {"SYS_USTAT", Const, 0}, + {"SYS_UTIME", Const, 0}, + {"SYS_UTIMENSAT", Const, 0}, + {"SYS_UTIMES", Const, 0}, + {"SYS_UTRACE", Const, 0}, + {"SYS_UUIDGEN", Const, 0}, + {"SYS_VADVISE", Const, 1}, + {"SYS_VFORK", Const, 0}, + {"SYS_VHANGUP", Const, 0}, + {"SYS_VM86", Const, 0}, + {"SYS_VM86OLD", Const, 0}, + {"SYS_VMSPLICE", Const, 0}, + {"SYS_VM_PRESSURE_MONITOR", Const, 0}, + {"SYS_VSERVER", Const, 0}, + {"SYS_WAIT4", Const, 0}, + {"SYS_WAIT4_NOCANCEL", Const, 0}, + {"SYS_WAIT6", Const, 1}, + {"SYS_WAITEVENT", Const, 0}, + {"SYS_WAITID", Const, 0}, + {"SYS_WAITID_NOCANCEL", Const, 0}, + {"SYS_WAITPID", Const, 0}, + {"SYS_WATCHEVENT", Const, 0}, + {"SYS_WORKQ_KERNRETURN", Const, 0}, + {"SYS_WORKQ_OPEN", Const, 0}, + {"SYS_WRITE", Const, 0}, + {"SYS_WRITEV", Const, 0}, + {"SYS_WRITEV_NOCANCEL", Const, 0}, + {"SYS_WRITE_NOCANCEL", Const, 0}, + {"SYS_YIELD", Const, 0}, + {"SYS__LLSEEK", Const, 0}, + {"SYS__LWP_CONTINUE", Const, 1}, + {"SYS__LWP_CREATE", Const, 1}, + {"SYS__LWP_CTL", Const, 1}, + {"SYS__LWP_DETACH", Const, 1}, + {"SYS__LWP_EXIT", Const, 1}, + {"SYS__LWP_GETNAME", Const, 1}, + {"SYS__LWP_GETPRIVATE", Const, 1}, + {"SYS__LWP_KILL", Const, 1}, + {"SYS__LWP_PARK", Const, 1}, + {"SYS__LWP_SELF", Const, 1}, + {"SYS__LWP_SETNAME", Const, 1}, + {"SYS__LWP_SETPRIVATE", Const, 1}, + {"SYS__LWP_SUSPEND", Const, 1}, + {"SYS__LWP_UNPARK", Const, 1}, + {"SYS__LWP_UNPARK_ALL", Const, 1}, + {"SYS__LWP_WAIT", Const, 1}, + {"SYS__LWP_WAKEUP", Const, 1}, + {"SYS__NEWSELECT", Const, 0}, + {"SYS__PSET_BIND", Const, 1}, + {"SYS__SCHED_GETAFFINITY", Const, 1}, + {"SYS__SCHED_GETPARAM", Const, 1}, + {"SYS__SCHED_SETAFFINITY", Const, 1}, + {"SYS__SCHED_SETPARAM", Const, 1}, + {"SYS__SYSCTL", Const, 0}, + {"SYS__UMTX_LOCK", Const, 0}, + {"SYS__UMTX_OP", Const, 0}, + {"SYS__UMTX_UNLOCK", Const, 0}, + {"SYS___ACL_ACLCHECK_FD", Const, 0}, + {"SYS___ACL_ACLCHECK_FILE", Const, 0}, + {"SYS___ACL_ACLCHECK_LINK", Const, 0}, + {"SYS___ACL_DELETE_FD", Const, 0}, + {"SYS___ACL_DELETE_FILE", Const, 0}, + {"SYS___ACL_DELETE_LINK", Const, 0}, + {"SYS___ACL_GET_FD", Const, 0}, + {"SYS___ACL_GET_FILE", Const, 0}, + {"SYS___ACL_GET_LINK", Const, 0}, + {"SYS___ACL_SET_FD", Const, 0}, + {"SYS___ACL_SET_FILE", Const, 0}, + {"SYS___ACL_SET_LINK", Const, 0}, + {"SYS___CAP_RIGHTS_GET", Const, 14}, + {"SYS___CLONE", Const, 1}, + {"SYS___DISABLE_THREADSIGNAL", Const, 0}, + {"SYS___GETCWD", Const, 0}, + {"SYS___GETLOGIN", Const, 1}, + {"SYS___GET_TCB", Const, 1}, + {"SYS___MAC_EXECVE", Const, 0}, + {"SYS___MAC_GETFSSTAT", Const, 0}, + {"SYS___MAC_GET_FD", Const, 0}, + {"SYS___MAC_GET_FILE", Const, 0}, + {"SYS___MAC_GET_LCID", Const, 0}, + {"SYS___MAC_GET_LCTX", Const, 0}, + {"SYS___MAC_GET_LINK", Const, 0}, + {"SYS___MAC_GET_MOUNT", Const, 0}, + {"SYS___MAC_GET_PID", Const, 0}, + {"SYS___MAC_GET_PROC", Const, 0}, + {"SYS___MAC_MOUNT", Const, 0}, + {"SYS___MAC_SET_FD", Const, 0}, + {"SYS___MAC_SET_FILE", Const, 0}, + {"SYS___MAC_SET_LCTX", Const, 0}, + {"SYS___MAC_SET_LINK", Const, 0}, + {"SYS___MAC_SET_PROC", Const, 0}, + {"SYS___MAC_SYSCALL", Const, 0}, + {"SYS___OLD_SEMWAIT_SIGNAL", Const, 0}, + {"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0}, + {"SYS___POSIX_CHOWN", Const, 1}, + {"SYS___POSIX_FCHOWN", Const, 1}, + {"SYS___POSIX_LCHOWN", Const, 1}, + {"SYS___POSIX_RENAME", Const, 1}, + {"SYS___PTHREAD_CANCELED", Const, 0}, + {"SYS___PTHREAD_CHDIR", Const, 0}, + {"SYS___PTHREAD_FCHDIR", Const, 0}, + {"SYS___PTHREAD_KILL", Const, 0}, + {"SYS___PTHREAD_MARKCANCEL", Const, 0}, + {"SYS___PTHREAD_SIGMASK", Const, 0}, + {"SYS___QUOTACTL", Const, 1}, + {"SYS___SEMCTL", Const, 1}, + {"SYS___SEMWAIT_SIGNAL", Const, 0}, + {"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0}, + {"SYS___SETLOGIN", Const, 1}, + {"SYS___SETUGID", Const, 0}, + {"SYS___SET_TCB", Const, 1}, + {"SYS___SIGACTION_SIGTRAMP", Const, 1}, + {"SYS___SIGTIMEDWAIT", Const, 1}, + {"SYS___SIGWAIT", Const, 0}, + {"SYS___SIGWAIT_NOCANCEL", Const, 0}, + {"SYS___SYSCTL", Const, 0}, + {"SYS___TFORK", Const, 1}, + {"SYS___THREXIT", Const, 1}, + {"SYS___THRSIGDIVERT", Const, 1}, + {"SYS___THRSLEEP", Const, 1}, + {"SYS___THRWAKEUP", Const, 1}, + {"S_ARCH1", Const, 1}, + {"S_ARCH2", Const, 1}, + {"S_BLKSIZE", Const, 0}, + {"S_IEXEC", Const, 0}, + {"S_IFBLK", Const, 0}, + {"S_IFCHR", Const, 0}, + {"S_IFDIR", Const, 0}, + {"S_IFIFO", Const, 0}, + {"S_IFLNK", Const, 0}, + {"S_IFMT", Const, 0}, + {"S_IFREG", Const, 0}, + {"S_IFSOCK", Const, 0}, + {"S_IFWHT", Const, 0}, + {"S_IREAD", Const, 0}, + {"S_IRGRP", Const, 0}, + {"S_IROTH", Const, 0}, + {"S_IRUSR", Const, 0}, + {"S_IRWXG", Const, 0}, + {"S_IRWXO", Const, 0}, + {"S_IRWXU", Const, 0}, + {"S_ISGID", Const, 0}, + {"S_ISTXT", Const, 0}, + {"S_ISUID", Const, 0}, + {"S_ISVTX", Const, 0}, + {"S_IWGRP", Const, 0}, + {"S_IWOTH", Const, 0}, + {"S_IWRITE", Const, 0}, + {"S_IWUSR", Const, 0}, + {"S_IXGRP", Const, 0}, + {"S_IXOTH", Const, 0}, + {"S_IXUSR", Const, 0}, + {"S_LOGIN_SET", Const, 1}, + {"SecurityAttributes", Type, 0}, + {"SecurityAttributes.InheritHandle", Field, 0}, + {"SecurityAttributes.Length", Field, 0}, + {"SecurityAttributes.SecurityDescriptor", Field, 0}, + {"Seek", Func, 0}, + {"Select", Func, 0}, + {"Sendfile", Func, 0}, + {"Sendmsg", Func, 0}, + {"SendmsgN", Func, 3}, + {"Sendto", Func, 0}, + {"Servent", Type, 0}, + {"Servent.Aliases", Field, 0}, + {"Servent.Name", Field, 0}, + {"Servent.Port", Field, 0}, + {"Servent.Proto", Field, 0}, + {"SetBpf", Func, 0}, + {"SetBpfBuflen", Func, 0}, + {"SetBpfDatalink", Func, 0}, + {"SetBpfHeadercmpl", Func, 0}, + {"SetBpfImmediate", Func, 0}, + {"SetBpfInterface", Func, 0}, + {"SetBpfPromisc", Func, 0}, + {"SetBpfTimeout", Func, 0}, + {"SetCurrentDirectory", Func, 0}, + {"SetEndOfFile", Func, 0}, + {"SetEnvironmentVariable", Func, 0}, + {"SetFileAttributes", Func, 0}, + {"SetFileCompletionNotificationModes", Func, 2}, + {"SetFilePointer", Func, 0}, + {"SetFileTime", Func, 0}, + {"SetHandleInformation", Func, 0}, + {"SetKevent", Func, 0}, + {"SetLsfPromisc", Func, 0}, + {"SetNonblock", Func, 0}, + {"Setdomainname", Func, 0}, + {"Setegid", Func, 0}, + {"Setenv", Func, 0}, + {"Seteuid", Func, 0}, + {"Setfsgid", Func, 0}, + {"Setfsuid", Func, 0}, + {"Setgid", Func, 0}, + {"Setgroups", Func, 0}, + {"Sethostname", Func, 0}, + {"Setlogin", Func, 0}, + {"Setpgid", Func, 0}, + {"Setpriority", Func, 0}, + {"Setprivexec", Func, 0}, + {"Setregid", Func, 0}, + {"Setresgid", Func, 0}, + {"Setresuid", Func, 0}, + {"Setreuid", Func, 0}, + {"Setrlimit", Func, 0}, + {"Setsid", Func, 0}, + {"Setsockopt", Func, 0}, + {"SetsockoptByte", Func, 0}, + {"SetsockoptICMPv6Filter", Func, 2}, + {"SetsockoptIPMreq", Func, 0}, + {"SetsockoptIPMreqn", Func, 0}, + {"SetsockoptIPv6Mreq", Func, 0}, + {"SetsockoptInet4Addr", Func, 0}, + {"SetsockoptInt", Func, 0}, + {"SetsockoptLinger", Func, 0}, + {"SetsockoptString", Func, 0}, + {"SetsockoptTimeval", Func, 0}, + {"Settimeofday", Func, 0}, + {"Setuid", Func, 0}, + {"Setxattr", Func, 1}, + {"Shutdown", Func, 0}, + {"SidTypeAlias", Const, 0}, + {"SidTypeComputer", Const, 0}, + {"SidTypeDeletedAccount", Const, 0}, + {"SidTypeDomain", Const, 0}, + {"SidTypeGroup", Const, 0}, + {"SidTypeInvalid", Const, 0}, + {"SidTypeLabel", Const, 0}, + {"SidTypeUnknown", Const, 0}, + {"SidTypeUser", Const, 0}, + {"SidTypeWellKnownGroup", Const, 0}, + {"Signal", Type, 0}, + {"SizeofBpfHdr", Const, 0}, + {"SizeofBpfInsn", Const, 0}, + {"SizeofBpfProgram", Const, 0}, + {"SizeofBpfStat", Const, 0}, + {"SizeofBpfVersion", Const, 0}, + {"SizeofBpfZbuf", Const, 0}, + {"SizeofBpfZbufHeader", Const, 0}, + {"SizeofCmsghdr", Const, 0}, + {"SizeofICMPv6Filter", Const, 2}, + {"SizeofIPMreq", Const, 0}, + {"SizeofIPMreqn", Const, 0}, + {"SizeofIPv6MTUInfo", Const, 2}, + {"SizeofIPv6Mreq", Const, 0}, + {"SizeofIfAddrmsg", Const, 0}, + {"SizeofIfAnnounceMsghdr", Const, 1}, + {"SizeofIfData", Const, 0}, + {"SizeofIfInfomsg", Const, 0}, + {"SizeofIfMsghdr", Const, 0}, + {"SizeofIfaMsghdr", Const, 0}, + {"SizeofIfmaMsghdr", Const, 0}, + {"SizeofIfmaMsghdr2", Const, 0}, + {"SizeofInet4Pktinfo", Const, 0}, + {"SizeofInet6Pktinfo", Const, 0}, + {"SizeofInotifyEvent", Const, 0}, + {"SizeofLinger", Const, 0}, + {"SizeofMsghdr", Const, 0}, + {"SizeofNlAttr", Const, 0}, + {"SizeofNlMsgerr", Const, 0}, + {"SizeofNlMsghdr", Const, 0}, + {"SizeofRtAttr", Const, 0}, + {"SizeofRtGenmsg", Const, 0}, + {"SizeofRtMetrics", Const, 0}, + {"SizeofRtMsg", Const, 0}, + {"SizeofRtMsghdr", Const, 0}, + {"SizeofRtNexthop", Const, 0}, + {"SizeofSockFilter", Const, 0}, + {"SizeofSockFprog", Const, 0}, + {"SizeofSockaddrAny", Const, 0}, + {"SizeofSockaddrDatalink", Const, 0}, + {"SizeofSockaddrInet4", Const, 0}, + {"SizeofSockaddrInet6", Const, 0}, + {"SizeofSockaddrLinklayer", Const, 0}, + {"SizeofSockaddrNetlink", Const, 0}, + {"SizeofSockaddrUnix", Const, 0}, + {"SizeofTCPInfo", Const, 1}, + {"SizeofUcred", Const, 0}, + {"SlicePtrFromStrings", Func, 1}, + {"SockFilter", Type, 0}, + {"SockFilter.Code", Field, 0}, + {"SockFilter.Jf", Field, 0}, + {"SockFilter.Jt", Field, 0}, + {"SockFilter.K", Field, 0}, + {"SockFprog", Type, 0}, + {"SockFprog.Filter", Field, 0}, + {"SockFprog.Len", Field, 0}, + {"SockFprog.Pad_cgo_0", Field, 0}, + {"Sockaddr", Type, 0}, + {"SockaddrDatalink", Type, 0}, + {"SockaddrDatalink.Alen", Field, 0}, + {"SockaddrDatalink.Data", Field, 0}, + {"SockaddrDatalink.Family", Field, 0}, + {"SockaddrDatalink.Index", Field, 0}, + {"SockaddrDatalink.Len", Field, 0}, + {"SockaddrDatalink.Nlen", Field, 0}, + {"SockaddrDatalink.Slen", Field, 0}, + {"SockaddrDatalink.Type", Field, 0}, + {"SockaddrGen", Type, 0}, + {"SockaddrInet4", Type, 0}, + {"SockaddrInet4.Addr", Field, 0}, + {"SockaddrInet4.Port", Field, 0}, + {"SockaddrInet6", Type, 0}, + {"SockaddrInet6.Addr", Field, 0}, + {"SockaddrInet6.Port", Field, 0}, + {"SockaddrInet6.ZoneId", Field, 0}, + {"SockaddrLinklayer", Type, 0}, + {"SockaddrLinklayer.Addr", Field, 0}, + {"SockaddrLinklayer.Halen", Field, 0}, + {"SockaddrLinklayer.Hatype", Field, 0}, + {"SockaddrLinklayer.Ifindex", Field, 0}, + {"SockaddrLinklayer.Pkttype", Field, 0}, + {"SockaddrLinklayer.Protocol", Field, 0}, + {"SockaddrNetlink", Type, 0}, + {"SockaddrNetlink.Family", Field, 0}, + {"SockaddrNetlink.Groups", Field, 0}, + {"SockaddrNetlink.Pad", Field, 0}, + {"SockaddrNetlink.Pid", Field, 0}, + {"SockaddrUnix", Type, 0}, + {"SockaddrUnix.Name", Field, 0}, + {"Socket", Func, 0}, + {"SocketControlMessage", Type, 0}, + {"SocketControlMessage.Data", Field, 0}, + {"SocketControlMessage.Header", Field, 0}, + {"SocketDisableIPv6", Var, 0}, + {"Socketpair", Func, 0}, + {"Splice", Func, 0}, + {"StartProcess", Func, 0}, + {"StartupInfo", Type, 0}, + {"StartupInfo.Cb", Field, 0}, + {"StartupInfo.Desktop", Field, 0}, + {"StartupInfo.FillAttribute", Field, 0}, + {"StartupInfo.Flags", Field, 0}, + {"StartupInfo.ShowWindow", Field, 0}, + {"StartupInfo.StdErr", Field, 0}, + {"StartupInfo.StdInput", Field, 0}, + {"StartupInfo.StdOutput", Field, 0}, + {"StartupInfo.Title", Field, 0}, + {"StartupInfo.X", Field, 0}, + {"StartupInfo.XCountChars", Field, 0}, + {"StartupInfo.XSize", Field, 0}, + {"StartupInfo.Y", Field, 0}, + {"StartupInfo.YCountChars", Field, 0}, + {"StartupInfo.YSize", Field, 0}, + {"Stat", Func, 0}, + {"Stat_t", Type, 0}, + {"Stat_t.Atim", Field, 0}, + {"Stat_t.Atim_ext", Field, 12}, + {"Stat_t.Atimespec", Field, 0}, + {"Stat_t.Birthtimespec", Field, 0}, + {"Stat_t.Blksize", Field, 0}, + {"Stat_t.Blocks", Field, 0}, + {"Stat_t.Btim_ext", Field, 12}, + {"Stat_t.Ctim", Field, 0}, + {"Stat_t.Ctim_ext", Field, 12}, + {"Stat_t.Ctimespec", Field, 0}, + {"Stat_t.Dev", Field, 0}, + {"Stat_t.Flags", Field, 0}, + {"Stat_t.Gen", Field, 0}, + {"Stat_t.Gid", Field, 0}, + {"Stat_t.Ino", Field, 0}, + {"Stat_t.Lspare", Field, 0}, + {"Stat_t.Lspare0", Field, 2}, + {"Stat_t.Lspare1", Field, 2}, + {"Stat_t.Mode", Field, 0}, + {"Stat_t.Mtim", Field, 0}, + {"Stat_t.Mtim_ext", Field, 12}, + {"Stat_t.Mtimespec", Field, 0}, + {"Stat_t.Nlink", Field, 0}, + {"Stat_t.Pad_cgo_0", Field, 0}, + {"Stat_t.Pad_cgo_1", Field, 0}, + {"Stat_t.Pad_cgo_2", Field, 0}, + {"Stat_t.Padding0", Field, 12}, + {"Stat_t.Padding1", Field, 12}, + {"Stat_t.Qspare", Field, 0}, + {"Stat_t.Rdev", Field, 0}, + {"Stat_t.Size", Field, 0}, + {"Stat_t.Spare", Field, 2}, + {"Stat_t.Uid", Field, 0}, + {"Stat_t.X__pad0", Field, 0}, + {"Stat_t.X__pad1", Field, 0}, + {"Stat_t.X__pad2", Field, 0}, + {"Stat_t.X__st_birthtim", Field, 2}, + {"Stat_t.X__st_ino", Field, 0}, + {"Stat_t.X__unused", Field, 0}, + {"Statfs", Func, 0}, + {"Statfs_t", Type, 0}, + {"Statfs_t.Asyncreads", Field, 0}, + {"Statfs_t.Asyncwrites", Field, 0}, + {"Statfs_t.Bavail", Field, 0}, + {"Statfs_t.Bfree", Field, 0}, + {"Statfs_t.Blocks", Field, 0}, + {"Statfs_t.Bsize", Field, 0}, + {"Statfs_t.Charspare", Field, 0}, + {"Statfs_t.F_asyncreads", Field, 2}, + {"Statfs_t.F_asyncwrites", Field, 2}, + {"Statfs_t.F_bavail", Field, 2}, + {"Statfs_t.F_bfree", Field, 2}, + {"Statfs_t.F_blocks", Field, 2}, + {"Statfs_t.F_bsize", Field, 2}, + {"Statfs_t.F_ctime", Field, 2}, + {"Statfs_t.F_favail", Field, 2}, + {"Statfs_t.F_ffree", Field, 2}, + {"Statfs_t.F_files", Field, 2}, + {"Statfs_t.F_flags", Field, 2}, + {"Statfs_t.F_fsid", Field, 2}, + {"Statfs_t.F_fstypename", Field, 2}, + {"Statfs_t.F_iosize", Field, 2}, + {"Statfs_t.F_mntfromname", Field, 2}, + {"Statfs_t.F_mntfromspec", Field, 3}, + {"Statfs_t.F_mntonname", Field, 2}, + {"Statfs_t.F_namemax", Field, 2}, + {"Statfs_t.F_owner", Field, 2}, + {"Statfs_t.F_spare", Field, 2}, + {"Statfs_t.F_syncreads", Field, 2}, + {"Statfs_t.F_syncwrites", Field, 2}, + {"Statfs_t.Ffree", Field, 0}, + {"Statfs_t.Files", Field, 0}, + {"Statfs_t.Flags", Field, 0}, + {"Statfs_t.Frsize", Field, 0}, + {"Statfs_t.Fsid", Field, 0}, + {"Statfs_t.Fssubtype", Field, 0}, + {"Statfs_t.Fstypename", Field, 0}, + {"Statfs_t.Iosize", Field, 0}, + {"Statfs_t.Mntfromname", Field, 0}, + {"Statfs_t.Mntonname", Field, 0}, + {"Statfs_t.Mount_info", Field, 2}, + {"Statfs_t.Namelen", Field, 0}, + {"Statfs_t.Namemax", Field, 0}, + {"Statfs_t.Owner", Field, 0}, + {"Statfs_t.Pad_cgo_0", Field, 0}, + {"Statfs_t.Pad_cgo_1", Field, 2}, + {"Statfs_t.Reserved", Field, 0}, + {"Statfs_t.Spare", Field, 0}, + {"Statfs_t.Syncreads", Field, 0}, + {"Statfs_t.Syncwrites", Field, 0}, + {"Statfs_t.Type", Field, 0}, + {"Statfs_t.Version", Field, 0}, + {"Stderr", Var, 0}, + {"Stdin", Var, 0}, + {"Stdout", Var, 0}, + {"StringBytePtr", Func, 0}, + {"StringByteSlice", Func, 0}, + {"StringSlicePtr", Func, 0}, + {"StringToSid", Func, 0}, + {"StringToUTF16", Func, 0}, + {"StringToUTF16Ptr", Func, 0}, + {"Symlink", Func, 0}, + {"Sync", Func, 0}, + {"SyncFileRange", Func, 0}, + {"SysProcAttr", Type, 0}, + {"SysProcAttr.AdditionalInheritedHandles", Field, 17}, + {"SysProcAttr.AmbientCaps", Field, 9}, + {"SysProcAttr.CgroupFD", Field, 20}, + {"SysProcAttr.Chroot", Field, 0}, + {"SysProcAttr.Cloneflags", Field, 2}, + {"SysProcAttr.CmdLine", Field, 0}, + {"SysProcAttr.CreationFlags", Field, 1}, + {"SysProcAttr.Credential", Field, 0}, + {"SysProcAttr.Ctty", Field, 1}, + {"SysProcAttr.Foreground", Field, 5}, + {"SysProcAttr.GidMappings", Field, 4}, + {"SysProcAttr.GidMappingsEnableSetgroups", Field, 5}, + {"SysProcAttr.HideWindow", Field, 0}, + {"SysProcAttr.Jail", Field, 21}, + {"SysProcAttr.NoInheritHandles", Field, 16}, + {"SysProcAttr.Noctty", Field, 0}, + {"SysProcAttr.ParentProcess", Field, 17}, + {"SysProcAttr.Pdeathsig", Field, 0}, + {"SysProcAttr.Pgid", Field, 5}, + {"SysProcAttr.PidFD", Field, 22}, + {"SysProcAttr.ProcessAttributes", Field, 13}, + {"SysProcAttr.Ptrace", Field, 0}, + {"SysProcAttr.Setctty", Field, 0}, + {"SysProcAttr.Setpgid", Field, 0}, + {"SysProcAttr.Setsid", Field, 0}, + {"SysProcAttr.ThreadAttributes", Field, 13}, + {"SysProcAttr.Token", Field, 10}, + {"SysProcAttr.UidMappings", Field, 4}, + {"SysProcAttr.Unshareflags", Field, 7}, + {"SysProcAttr.UseCgroupFD", Field, 20}, + {"SysProcIDMap", Type, 4}, + {"SysProcIDMap.ContainerID", Field, 4}, + {"SysProcIDMap.HostID", Field, 4}, + {"SysProcIDMap.Size", Field, 4}, + {"Syscall", Func, 0}, + {"Syscall12", Func, 0}, + {"Syscall15", Func, 0}, + {"Syscall18", Func, 12}, + {"Syscall6", Func, 0}, + {"Syscall9", Func, 0}, + {"SyscallN", Func, 18}, + {"Sysctl", Func, 0}, + {"SysctlUint32", Func, 0}, + {"Sysctlnode", Type, 2}, + {"Sysctlnode.Flags", Field, 2}, + {"Sysctlnode.Name", Field, 2}, + {"Sysctlnode.Num", Field, 2}, + {"Sysctlnode.Un", Field, 2}, + {"Sysctlnode.Ver", Field, 2}, + {"Sysctlnode.X__rsvd", Field, 2}, + {"Sysctlnode.X_sysctl_desc", Field, 2}, + {"Sysctlnode.X_sysctl_func", Field, 2}, + {"Sysctlnode.X_sysctl_parent", Field, 2}, + {"Sysctlnode.X_sysctl_size", Field, 2}, + {"Sysinfo", Func, 0}, + {"Sysinfo_t", Type, 0}, + {"Sysinfo_t.Bufferram", Field, 0}, + {"Sysinfo_t.Freehigh", Field, 0}, + {"Sysinfo_t.Freeram", Field, 0}, + {"Sysinfo_t.Freeswap", Field, 0}, + {"Sysinfo_t.Loads", Field, 0}, + {"Sysinfo_t.Pad", Field, 0}, + {"Sysinfo_t.Pad_cgo_0", Field, 0}, + {"Sysinfo_t.Pad_cgo_1", Field, 0}, + {"Sysinfo_t.Procs", Field, 0}, + {"Sysinfo_t.Sharedram", Field, 0}, + {"Sysinfo_t.Totalhigh", Field, 0}, + {"Sysinfo_t.Totalram", Field, 0}, + {"Sysinfo_t.Totalswap", Field, 0}, + {"Sysinfo_t.Unit", Field, 0}, + {"Sysinfo_t.Uptime", Field, 0}, + {"Sysinfo_t.X_f", Field, 0}, + {"Systemtime", Type, 0}, + {"Systemtime.Day", Field, 0}, + {"Systemtime.DayOfWeek", Field, 0}, + {"Systemtime.Hour", Field, 0}, + {"Systemtime.Milliseconds", Field, 0}, + {"Systemtime.Minute", Field, 0}, + {"Systemtime.Month", Field, 0}, + {"Systemtime.Second", Field, 0}, + {"Systemtime.Year", Field, 0}, + {"TCGETS", Const, 0}, + {"TCIFLUSH", Const, 1}, + {"TCIOFLUSH", Const, 1}, + {"TCOFLUSH", Const, 1}, + {"TCPInfo", Type, 1}, + {"TCPInfo.Advmss", Field, 1}, + {"TCPInfo.Ato", Field, 1}, + {"TCPInfo.Backoff", Field, 1}, + {"TCPInfo.Ca_state", Field, 1}, + {"TCPInfo.Fackets", Field, 1}, + {"TCPInfo.Last_ack_recv", Field, 1}, + {"TCPInfo.Last_ack_sent", Field, 1}, + {"TCPInfo.Last_data_recv", Field, 1}, + {"TCPInfo.Last_data_sent", Field, 1}, + {"TCPInfo.Lost", Field, 1}, + {"TCPInfo.Options", Field, 1}, + {"TCPInfo.Pad_cgo_0", Field, 1}, + {"TCPInfo.Pmtu", Field, 1}, + {"TCPInfo.Probes", Field, 1}, + {"TCPInfo.Rcv_mss", Field, 1}, + {"TCPInfo.Rcv_rtt", Field, 1}, + {"TCPInfo.Rcv_space", Field, 1}, + {"TCPInfo.Rcv_ssthresh", Field, 1}, + {"TCPInfo.Reordering", Field, 1}, + {"TCPInfo.Retrans", Field, 1}, + {"TCPInfo.Retransmits", Field, 1}, + {"TCPInfo.Rto", Field, 1}, + {"TCPInfo.Rtt", Field, 1}, + {"TCPInfo.Rttvar", Field, 1}, + {"TCPInfo.Sacked", Field, 1}, + {"TCPInfo.Snd_cwnd", Field, 1}, + {"TCPInfo.Snd_mss", Field, 1}, + {"TCPInfo.Snd_ssthresh", Field, 1}, + {"TCPInfo.State", Field, 1}, + {"TCPInfo.Total_retrans", Field, 1}, + {"TCPInfo.Unacked", Field, 1}, + {"TCPKeepalive", Type, 3}, + {"TCPKeepalive.Interval", Field, 3}, + {"TCPKeepalive.OnOff", Field, 3}, + {"TCPKeepalive.Time", Field, 3}, + {"TCP_CA_NAME_MAX", Const, 0}, + {"TCP_CONGCTL", Const, 1}, + {"TCP_CONGESTION", Const, 0}, + {"TCP_CONNECTIONTIMEOUT", Const, 0}, + {"TCP_CORK", Const, 0}, + {"TCP_DEFER_ACCEPT", Const, 0}, + {"TCP_ENABLE_ECN", Const, 16}, + {"TCP_INFO", Const, 0}, + {"TCP_KEEPALIVE", Const, 0}, + {"TCP_KEEPCNT", Const, 0}, + {"TCP_KEEPIDLE", Const, 0}, + {"TCP_KEEPINIT", Const, 1}, + {"TCP_KEEPINTVL", Const, 0}, + {"TCP_LINGER2", Const, 0}, + {"TCP_MAXBURST", Const, 0}, + {"TCP_MAXHLEN", Const, 0}, + {"TCP_MAXOLEN", Const, 0}, + {"TCP_MAXSEG", Const, 0}, + {"TCP_MAXWIN", Const, 0}, + {"TCP_MAX_SACK", Const, 0}, + {"TCP_MAX_WINSHIFT", Const, 0}, + {"TCP_MD5SIG", Const, 0}, + {"TCP_MD5SIG_MAXKEYLEN", Const, 0}, + {"TCP_MINMSS", Const, 0}, + {"TCP_MINMSSOVERLOAD", Const, 0}, + {"TCP_MSS", Const, 0}, + {"TCP_NODELAY", Const, 0}, + {"TCP_NOOPT", Const, 0}, + {"TCP_NOPUSH", Const, 0}, + {"TCP_NOTSENT_LOWAT", Const, 16}, + {"TCP_NSTATES", Const, 1}, + {"TCP_QUICKACK", Const, 0}, + {"TCP_RXT_CONNDROPTIME", Const, 0}, + {"TCP_RXT_FINDROP", Const, 0}, + {"TCP_SACK_ENABLE", Const, 1}, + {"TCP_SENDMOREACKS", Const, 16}, + {"TCP_SYNCNT", Const, 0}, + {"TCP_VENDOR", Const, 3}, + {"TCP_WINDOW_CLAMP", Const, 0}, + {"TCSAFLUSH", Const, 1}, + {"TCSETS", Const, 0}, + {"TF_DISCONNECT", Const, 0}, + {"TF_REUSE_SOCKET", Const, 0}, + {"TF_USE_DEFAULT_WORKER", Const, 0}, + {"TF_USE_KERNEL_APC", Const, 0}, + {"TF_USE_SYSTEM_THREAD", Const, 0}, + {"TF_WRITE_BEHIND", Const, 0}, + {"TH32CS_INHERIT", Const, 4}, + {"TH32CS_SNAPALL", Const, 4}, + {"TH32CS_SNAPHEAPLIST", Const, 4}, + {"TH32CS_SNAPMODULE", Const, 4}, + {"TH32CS_SNAPMODULE32", Const, 4}, + {"TH32CS_SNAPPROCESS", Const, 4}, + {"TH32CS_SNAPTHREAD", Const, 4}, + {"TIME_ZONE_ID_DAYLIGHT", Const, 0}, + {"TIME_ZONE_ID_STANDARD", Const, 0}, + {"TIME_ZONE_ID_UNKNOWN", Const, 0}, + {"TIOCCBRK", Const, 0}, + {"TIOCCDTR", Const, 0}, + {"TIOCCONS", Const, 0}, + {"TIOCDCDTIMESTAMP", Const, 0}, + {"TIOCDRAIN", Const, 0}, + {"TIOCDSIMICROCODE", Const, 0}, + {"TIOCEXCL", Const, 0}, + {"TIOCEXT", Const, 0}, + {"TIOCFLAG_CDTRCTS", Const, 1}, + {"TIOCFLAG_CLOCAL", Const, 1}, + {"TIOCFLAG_CRTSCTS", Const, 1}, + {"TIOCFLAG_MDMBUF", Const, 1}, + {"TIOCFLAG_PPS", Const, 1}, + {"TIOCFLAG_SOFTCAR", Const, 1}, + {"TIOCFLUSH", Const, 0}, + {"TIOCGDEV", Const, 0}, + {"TIOCGDRAINWAIT", Const, 0}, + {"TIOCGETA", Const, 0}, + {"TIOCGETD", Const, 0}, + {"TIOCGFLAGS", Const, 1}, + {"TIOCGICOUNT", Const, 0}, + {"TIOCGLCKTRMIOS", Const, 0}, + {"TIOCGLINED", Const, 1}, + {"TIOCGPGRP", Const, 0}, + {"TIOCGPTN", Const, 0}, + {"TIOCGQSIZE", Const, 1}, + {"TIOCGRANTPT", Const, 1}, + {"TIOCGRS485", Const, 0}, + {"TIOCGSERIAL", Const, 0}, + {"TIOCGSID", Const, 0}, + {"TIOCGSIZE", Const, 1}, + {"TIOCGSOFTCAR", Const, 0}, + {"TIOCGTSTAMP", Const, 1}, + {"TIOCGWINSZ", Const, 0}, + {"TIOCINQ", Const, 0}, + {"TIOCIXOFF", Const, 0}, + {"TIOCIXON", Const, 0}, + {"TIOCLINUX", Const, 0}, + {"TIOCMBIC", Const, 0}, + {"TIOCMBIS", Const, 0}, + {"TIOCMGDTRWAIT", Const, 0}, + {"TIOCMGET", Const, 0}, + {"TIOCMIWAIT", Const, 0}, + {"TIOCMODG", Const, 0}, + {"TIOCMODS", Const, 0}, + {"TIOCMSDTRWAIT", Const, 0}, + {"TIOCMSET", Const, 0}, + {"TIOCM_CAR", Const, 0}, + {"TIOCM_CD", Const, 0}, + {"TIOCM_CTS", Const, 0}, + {"TIOCM_DCD", Const, 0}, + {"TIOCM_DSR", Const, 0}, + {"TIOCM_DTR", Const, 0}, + {"TIOCM_LE", Const, 0}, + {"TIOCM_RI", Const, 0}, + {"TIOCM_RNG", Const, 0}, + {"TIOCM_RTS", Const, 0}, + {"TIOCM_SR", Const, 0}, + {"TIOCM_ST", Const, 0}, + {"TIOCNOTTY", Const, 0}, + {"TIOCNXCL", Const, 0}, + {"TIOCOUTQ", Const, 0}, + {"TIOCPKT", Const, 0}, + {"TIOCPKT_DATA", Const, 0}, + {"TIOCPKT_DOSTOP", Const, 0}, + {"TIOCPKT_FLUSHREAD", Const, 0}, + {"TIOCPKT_FLUSHWRITE", Const, 0}, + {"TIOCPKT_IOCTL", Const, 0}, + {"TIOCPKT_NOSTOP", Const, 0}, + {"TIOCPKT_START", Const, 0}, + {"TIOCPKT_STOP", Const, 0}, + {"TIOCPTMASTER", Const, 0}, + {"TIOCPTMGET", Const, 1}, + {"TIOCPTSNAME", Const, 1}, + {"TIOCPTYGNAME", Const, 0}, + {"TIOCPTYGRANT", Const, 0}, + {"TIOCPTYUNLK", Const, 0}, + {"TIOCRCVFRAME", Const, 1}, + {"TIOCREMOTE", Const, 0}, + {"TIOCSBRK", Const, 0}, + {"TIOCSCONS", Const, 0}, + {"TIOCSCTTY", Const, 0}, + {"TIOCSDRAINWAIT", Const, 0}, + {"TIOCSDTR", Const, 0}, + {"TIOCSERCONFIG", Const, 0}, + {"TIOCSERGETLSR", Const, 0}, + {"TIOCSERGETMULTI", Const, 0}, + {"TIOCSERGSTRUCT", Const, 0}, + {"TIOCSERGWILD", Const, 0}, + {"TIOCSERSETMULTI", Const, 0}, + {"TIOCSERSWILD", Const, 0}, + {"TIOCSER_TEMT", Const, 0}, + {"TIOCSETA", Const, 0}, + {"TIOCSETAF", Const, 0}, + {"TIOCSETAW", Const, 0}, + {"TIOCSETD", Const, 0}, + {"TIOCSFLAGS", Const, 1}, + {"TIOCSIG", Const, 0}, + {"TIOCSLCKTRMIOS", Const, 0}, + {"TIOCSLINED", Const, 1}, + {"TIOCSPGRP", Const, 0}, + {"TIOCSPTLCK", Const, 0}, + {"TIOCSQSIZE", Const, 1}, + {"TIOCSRS485", Const, 0}, + {"TIOCSSERIAL", Const, 0}, + {"TIOCSSIZE", Const, 1}, + {"TIOCSSOFTCAR", Const, 0}, + {"TIOCSTART", Const, 0}, + {"TIOCSTAT", Const, 0}, + {"TIOCSTI", Const, 0}, + {"TIOCSTOP", Const, 0}, + {"TIOCSTSTAMP", Const, 1}, + {"TIOCSWINSZ", Const, 0}, + {"TIOCTIMESTAMP", Const, 0}, + {"TIOCUCNTL", Const, 0}, + {"TIOCVHANGUP", Const, 0}, + {"TIOCXMTFRAME", Const, 1}, + {"TOKEN_ADJUST_DEFAULT", Const, 0}, + {"TOKEN_ADJUST_GROUPS", Const, 0}, + {"TOKEN_ADJUST_PRIVILEGES", Const, 0}, + {"TOKEN_ADJUST_SESSIONID", Const, 11}, + {"TOKEN_ALL_ACCESS", Const, 0}, + {"TOKEN_ASSIGN_PRIMARY", Const, 0}, + {"TOKEN_DUPLICATE", Const, 0}, + {"TOKEN_EXECUTE", Const, 0}, + {"TOKEN_IMPERSONATE", Const, 0}, + {"TOKEN_QUERY", Const, 0}, + {"TOKEN_QUERY_SOURCE", Const, 0}, + {"TOKEN_READ", Const, 0}, + {"TOKEN_WRITE", Const, 0}, + {"TOSTOP", Const, 0}, + {"TRUNCATE_EXISTING", Const, 0}, + {"TUNATTACHFILTER", Const, 0}, + {"TUNDETACHFILTER", Const, 0}, + {"TUNGETFEATURES", Const, 0}, + {"TUNGETIFF", Const, 0}, + {"TUNGETSNDBUF", Const, 0}, + {"TUNGETVNETHDRSZ", Const, 0}, + {"TUNSETDEBUG", Const, 0}, + {"TUNSETGROUP", Const, 0}, + {"TUNSETIFF", Const, 0}, + {"TUNSETLINK", Const, 0}, + {"TUNSETNOCSUM", Const, 0}, + {"TUNSETOFFLOAD", Const, 0}, + {"TUNSETOWNER", Const, 0}, + {"TUNSETPERSIST", Const, 0}, + {"TUNSETSNDBUF", Const, 0}, + {"TUNSETTXFILTER", Const, 0}, + {"TUNSETVNETHDRSZ", Const, 0}, + {"Tee", Func, 0}, + {"TerminateProcess", Func, 0}, + {"Termios", Type, 0}, + {"Termios.Cc", Field, 0}, + {"Termios.Cflag", Field, 0}, + {"Termios.Iflag", Field, 0}, + {"Termios.Ispeed", Field, 0}, + {"Termios.Lflag", Field, 0}, + {"Termios.Line", Field, 0}, + {"Termios.Oflag", Field, 0}, + {"Termios.Ospeed", Field, 0}, + {"Termios.Pad_cgo_0", Field, 0}, + {"Tgkill", Func, 0}, + {"Time", Func, 0}, + {"Time_t", Type, 0}, + {"Times", Func, 0}, + {"Timespec", Type, 0}, + {"Timespec.Nsec", Field, 0}, + {"Timespec.Pad_cgo_0", Field, 2}, + {"Timespec.Sec", Field, 0}, + {"TimespecToNsec", Func, 0}, + {"Timeval", Type, 0}, + {"Timeval.Pad_cgo_0", Field, 0}, + {"Timeval.Sec", Field, 0}, + {"Timeval.Usec", Field, 0}, + {"Timeval32", Type, 0}, + {"Timeval32.Sec", Field, 0}, + {"Timeval32.Usec", Field, 0}, + {"TimevalToNsec", Func, 0}, + {"Timex", Type, 0}, + {"Timex.Calcnt", Field, 0}, + {"Timex.Constant", Field, 0}, + {"Timex.Errcnt", Field, 0}, + {"Timex.Esterror", Field, 0}, + {"Timex.Freq", Field, 0}, + {"Timex.Jitcnt", Field, 0}, + {"Timex.Jitter", Field, 0}, + {"Timex.Maxerror", Field, 0}, + {"Timex.Modes", Field, 0}, + {"Timex.Offset", Field, 0}, + {"Timex.Pad_cgo_0", Field, 0}, + {"Timex.Pad_cgo_1", Field, 0}, + {"Timex.Pad_cgo_2", Field, 0}, + {"Timex.Pad_cgo_3", Field, 0}, + {"Timex.Ppsfreq", Field, 0}, + {"Timex.Precision", Field, 0}, + {"Timex.Shift", Field, 0}, + {"Timex.Stabil", Field, 0}, + {"Timex.Status", Field, 0}, + {"Timex.Stbcnt", Field, 0}, + {"Timex.Tai", Field, 0}, + {"Timex.Tick", Field, 0}, + {"Timex.Time", Field, 0}, + {"Timex.Tolerance", Field, 0}, + {"Timezoneinformation", Type, 0}, + {"Timezoneinformation.Bias", Field, 0}, + {"Timezoneinformation.DaylightBias", Field, 0}, + {"Timezoneinformation.DaylightDate", Field, 0}, + {"Timezoneinformation.DaylightName", Field, 0}, + {"Timezoneinformation.StandardBias", Field, 0}, + {"Timezoneinformation.StandardDate", Field, 0}, + {"Timezoneinformation.StandardName", Field, 0}, + {"Tms", Type, 0}, + {"Tms.Cstime", Field, 0}, + {"Tms.Cutime", Field, 0}, + {"Tms.Stime", Field, 0}, + {"Tms.Utime", Field, 0}, + {"Token", Type, 0}, + {"TokenAccessInformation", Const, 0}, + {"TokenAuditPolicy", Const, 0}, + {"TokenDefaultDacl", Const, 0}, + {"TokenElevation", Const, 0}, + {"TokenElevationType", Const, 0}, + {"TokenGroups", Const, 0}, + {"TokenGroupsAndPrivileges", Const, 0}, + {"TokenHasRestrictions", Const, 0}, + {"TokenImpersonationLevel", Const, 0}, + {"TokenIntegrityLevel", Const, 0}, + {"TokenLinkedToken", Const, 0}, + {"TokenLogonSid", Const, 0}, + {"TokenMandatoryPolicy", Const, 0}, + {"TokenOrigin", Const, 0}, + {"TokenOwner", Const, 0}, + {"TokenPrimaryGroup", Const, 0}, + {"TokenPrivileges", Const, 0}, + {"TokenRestrictedSids", Const, 0}, + {"TokenSandBoxInert", Const, 0}, + {"TokenSessionId", Const, 0}, + {"TokenSessionReference", Const, 0}, + {"TokenSource", Const, 0}, + {"TokenStatistics", Const, 0}, + {"TokenType", Const, 0}, + {"TokenUIAccess", Const, 0}, + {"TokenUser", Const, 0}, + {"TokenVirtualizationAllowed", Const, 0}, + {"TokenVirtualizationEnabled", Const, 0}, + {"Tokenprimarygroup", Type, 0}, + {"Tokenprimarygroup.PrimaryGroup", Field, 0}, + {"Tokenuser", Type, 0}, + {"Tokenuser.User", Field, 0}, + {"TranslateAccountName", Func, 0}, + {"TranslateName", Func, 0}, + {"TransmitFile", Func, 0}, + {"TransmitFileBuffers", Type, 0}, + {"TransmitFileBuffers.Head", Field, 0}, + {"TransmitFileBuffers.HeadLength", Field, 0}, + {"TransmitFileBuffers.Tail", Field, 0}, + {"TransmitFileBuffers.TailLength", Field, 0}, + {"Truncate", Func, 0}, + {"UNIX_PATH_MAX", Const, 12}, + {"USAGE_MATCH_TYPE_AND", Const, 0}, + {"USAGE_MATCH_TYPE_OR", Const, 0}, + {"UTF16FromString", Func, 1}, + {"UTF16PtrFromString", Func, 1}, + {"UTF16ToString", Func, 0}, + {"Ucred", Type, 0}, + {"Ucred.Gid", Field, 0}, + {"Ucred.Pid", Field, 0}, + {"Ucred.Uid", Field, 0}, + {"Umask", Func, 0}, + {"Uname", Func, 0}, + {"Undelete", Func, 0}, + {"UnixCredentials", Func, 0}, + {"UnixRights", Func, 0}, + {"Unlink", Func, 0}, + {"Unlinkat", Func, 0}, + {"UnmapViewOfFile", Func, 0}, + {"Unmount", Func, 0}, + {"Unsetenv", Func, 4}, + {"Unshare", Func, 0}, + {"UserInfo10", Type, 0}, + {"UserInfo10.Comment", Field, 0}, + {"UserInfo10.FullName", Field, 0}, + {"UserInfo10.Name", Field, 0}, + {"UserInfo10.UsrComment", Field, 0}, + {"Ustat", Func, 0}, + {"Ustat_t", Type, 0}, + {"Ustat_t.Fname", Field, 0}, + {"Ustat_t.Fpack", Field, 0}, + {"Ustat_t.Pad_cgo_0", Field, 0}, + {"Ustat_t.Pad_cgo_1", Field, 0}, + {"Ustat_t.Tfree", Field, 0}, + {"Ustat_t.Tinode", Field, 0}, + {"Utimbuf", Type, 0}, + {"Utimbuf.Actime", Field, 0}, + {"Utimbuf.Modtime", Field, 0}, + {"Utime", Func, 0}, + {"Utimes", Func, 0}, + {"UtimesNano", Func, 1}, + {"Utsname", Type, 0}, + {"Utsname.Domainname", Field, 0}, + {"Utsname.Machine", Field, 0}, + {"Utsname.Nodename", Field, 0}, + {"Utsname.Release", Field, 0}, + {"Utsname.Sysname", Field, 0}, + {"Utsname.Version", Field, 0}, + {"VDISCARD", Const, 0}, + {"VDSUSP", Const, 1}, + {"VEOF", Const, 0}, + {"VEOL", Const, 0}, + {"VEOL2", Const, 0}, + {"VERASE", Const, 0}, + {"VERASE2", Const, 1}, + {"VINTR", Const, 0}, + {"VKILL", Const, 0}, + {"VLNEXT", Const, 0}, + {"VMIN", Const, 0}, + {"VQUIT", Const, 0}, + {"VREPRINT", Const, 0}, + {"VSTART", Const, 0}, + {"VSTATUS", Const, 1}, + {"VSTOP", Const, 0}, + {"VSUSP", Const, 0}, + {"VSWTC", Const, 0}, + {"VT0", Const, 1}, + {"VT1", Const, 1}, + {"VTDLY", Const, 1}, + {"VTIME", Const, 0}, + {"VWERASE", Const, 0}, + {"VirtualLock", Func, 0}, + {"VirtualUnlock", Func, 0}, + {"WAIT_ABANDONED", Const, 0}, + {"WAIT_FAILED", Const, 0}, + {"WAIT_OBJECT_0", Const, 0}, + {"WAIT_TIMEOUT", Const, 0}, + {"WALL", Const, 0}, + {"WALLSIG", Const, 1}, + {"WALTSIG", Const, 1}, + {"WCLONE", Const, 0}, + {"WCONTINUED", Const, 0}, + {"WCOREFLAG", Const, 0}, + {"WEXITED", Const, 0}, + {"WLINUXCLONE", Const, 0}, + {"WNOHANG", Const, 0}, + {"WNOTHREAD", Const, 0}, + {"WNOWAIT", Const, 0}, + {"WNOZOMBIE", Const, 1}, + {"WOPTSCHECKED", Const, 1}, + {"WORDSIZE", Const, 0}, + {"WSABuf", Type, 0}, + {"WSABuf.Buf", Field, 0}, + {"WSABuf.Len", Field, 0}, + {"WSACleanup", Func, 0}, + {"WSADESCRIPTION_LEN", Const, 0}, + {"WSAData", Type, 0}, + {"WSAData.Description", Field, 0}, + {"WSAData.HighVersion", Field, 0}, + {"WSAData.MaxSockets", Field, 0}, + {"WSAData.MaxUdpDg", Field, 0}, + {"WSAData.SystemStatus", Field, 0}, + {"WSAData.VendorInfo", Field, 0}, + {"WSAData.Version", Field, 0}, + {"WSAEACCES", Const, 2}, + {"WSAECONNABORTED", Const, 9}, + {"WSAECONNRESET", Const, 3}, + {"WSAEnumProtocols", Func, 2}, + {"WSAID_CONNECTEX", Var, 1}, + {"WSAIoctl", Func, 0}, + {"WSAPROTOCOL_LEN", Const, 2}, + {"WSAProtocolChain", Type, 2}, + {"WSAProtocolChain.ChainEntries", Field, 2}, + {"WSAProtocolChain.ChainLen", Field, 2}, + {"WSAProtocolInfo", Type, 2}, + {"WSAProtocolInfo.AddressFamily", Field, 2}, + {"WSAProtocolInfo.CatalogEntryId", Field, 2}, + {"WSAProtocolInfo.MaxSockAddr", Field, 2}, + {"WSAProtocolInfo.MessageSize", Field, 2}, + {"WSAProtocolInfo.MinSockAddr", Field, 2}, + {"WSAProtocolInfo.NetworkByteOrder", Field, 2}, + {"WSAProtocolInfo.Protocol", Field, 2}, + {"WSAProtocolInfo.ProtocolChain", Field, 2}, + {"WSAProtocolInfo.ProtocolMaxOffset", Field, 2}, + {"WSAProtocolInfo.ProtocolName", Field, 2}, + {"WSAProtocolInfo.ProviderFlags", Field, 2}, + {"WSAProtocolInfo.ProviderId", Field, 2}, + {"WSAProtocolInfo.ProviderReserved", Field, 2}, + {"WSAProtocolInfo.SecurityScheme", Field, 2}, + {"WSAProtocolInfo.ServiceFlags1", Field, 2}, + {"WSAProtocolInfo.ServiceFlags2", Field, 2}, + {"WSAProtocolInfo.ServiceFlags3", Field, 2}, + {"WSAProtocolInfo.ServiceFlags4", Field, 2}, + {"WSAProtocolInfo.SocketType", Field, 2}, + {"WSAProtocolInfo.Version", Field, 2}, + {"WSARecv", Func, 0}, + {"WSARecvFrom", Func, 0}, + {"WSASYS_STATUS_LEN", Const, 0}, + {"WSASend", Func, 0}, + {"WSASendTo", Func, 0}, + {"WSASendto", Func, 0}, + {"WSAStartup", Func, 0}, + {"WSTOPPED", Const, 0}, + {"WTRAPPED", Const, 1}, + {"WUNTRACED", Const, 0}, + {"Wait4", Func, 0}, + {"WaitForSingleObject", Func, 0}, + {"WaitStatus", Type, 0}, + {"WaitStatus.ExitCode", Field, 0}, + {"Win32FileAttributeData", Type, 0}, + {"Win32FileAttributeData.CreationTime", Field, 0}, + {"Win32FileAttributeData.FileAttributes", Field, 0}, + {"Win32FileAttributeData.FileSizeHigh", Field, 0}, + {"Win32FileAttributeData.FileSizeLow", Field, 0}, + {"Win32FileAttributeData.LastAccessTime", Field, 0}, + {"Win32FileAttributeData.LastWriteTime", Field, 0}, + {"Win32finddata", Type, 0}, + {"Win32finddata.AlternateFileName", Field, 0}, + {"Win32finddata.CreationTime", Field, 0}, + {"Win32finddata.FileAttributes", Field, 0}, + {"Win32finddata.FileName", Field, 0}, + {"Win32finddata.FileSizeHigh", Field, 0}, + {"Win32finddata.FileSizeLow", Field, 0}, + {"Win32finddata.LastAccessTime", Field, 0}, + {"Win32finddata.LastWriteTime", Field, 0}, + {"Win32finddata.Reserved0", Field, 0}, + {"Win32finddata.Reserved1", Field, 0}, + {"Write", Func, 0}, + {"WriteConsole", Func, 1}, + {"WriteFile", Func, 0}, + {"X509_ASN_ENCODING", Const, 0}, + {"XCASE", Const, 0}, + {"XP1_CONNECTIONLESS", Const, 2}, + {"XP1_CONNECT_DATA", Const, 2}, + {"XP1_DISCONNECT_DATA", Const, 2}, + {"XP1_EXPEDITED_DATA", Const, 2}, + {"XP1_GRACEFUL_CLOSE", Const, 2}, + {"XP1_GUARANTEED_DELIVERY", Const, 2}, + {"XP1_GUARANTEED_ORDER", Const, 2}, + {"XP1_IFS_HANDLES", Const, 2}, + {"XP1_MESSAGE_ORIENTED", Const, 2}, + {"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2}, + {"XP1_MULTIPOINT_DATA_PLANE", Const, 2}, + {"XP1_PARTIAL_MESSAGE", Const, 2}, + {"XP1_PSEUDO_STREAM", Const, 2}, + {"XP1_QOS_SUPPORTED", Const, 2}, + {"XP1_SAN_SUPPORT_SDP", Const, 2}, + {"XP1_SUPPORT_BROADCAST", Const, 2}, + {"XP1_SUPPORT_MULTIPOINT", Const, 2}, + {"XP1_UNI_RECV", Const, 2}, + {"XP1_UNI_SEND", Const, 2}, + }, + "syscall/js": { + {"CopyBytesToGo", Func, 0}, + {"CopyBytesToJS", Func, 0}, + {"Error", Type, 0}, + {"Func", Type, 0}, + {"FuncOf", Func, 0}, + {"Global", Func, 0}, + {"Null", Func, 0}, + {"Type", Type, 0}, + {"TypeBoolean", Const, 0}, + {"TypeFunction", Const, 0}, + {"TypeNull", Const, 0}, + {"TypeNumber", Const, 0}, + {"TypeObject", Const, 0}, + {"TypeString", Const, 0}, + {"TypeSymbol", Const, 0}, + {"TypeUndefined", Const, 0}, + {"Undefined", Func, 0}, + {"Value", Type, 0}, + {"ValueError", Type, 0}, + {"ValueOf", Func, 0}, + }, + "testing": { + {"(*B).Cleanup", Method, 14}, + {"(*B).Elapsed", Method, 20}, + {"(*B).Error", Method, 0}, + {"(*B).Errorf", Method, 0}, + {"(*B).Fail", Method, 0}, + {"(*B).FailNow", Method, 0}, + {"(*B).Failed", Method, 0}, + {"(*B).Fatal", Method, 0}, + {"(*B).Fatalf", Method, 0}, + {"(*B).Helper", Method, 9}, + {"(*B).Log", Method, 0}, + {"(*B).Logf", Method, 0}, + {"(*B).Name", Method, 8}, + {"(*B).ReportAllocs", Method, 1}, + {"(*B).ReportMetric", Method, 13}, + {"(*B).ResetTimer", Method, 0}, + {"(*B).Run", Method, 7}, + {"(*B).RunParallel", Method, 3}, + {"(*B).SetBytes", Method, 0}, + {"(*B).SetParallelism", Method, 3}, + {"(*B).Setenv", Method, 17}, + {"(*B).Skip", Method, 1}, + {"(*B).SkipNow", Method, 1}, + {"(*B).Skipf", Method, 1}, + {"(*B).Skipped", Method, 1}, + {"(*B).StartTimer", Method, 0}, + {"(*B).StopTimer", Method, 0}, + {"(*B).TempDir", Method, 15}, + {"(*F).Add", Method, 18}, + {"(*F).Cleanup", Method, 18}, + {"(*F).Error", Method, 18}, + {"(*F).Errorf", Method, 18}, + {"(*F).Fail", Method, 18}, + {"(*F).FailNow", Method, 18}, + {"(*F).Failed", Method, 18}, + {"(*F).Fatal", Method, 18}, + {"(*F).Fatalf", Method, 18}, + {"(*F).Fuzz", Method, 18}, + {"(*F).Helper", Method, 18}, + {"(*F).Log", Method, 18}, + {"(*F).Logf", Method, 18}, + {"(*F).Name", Method, 18}, + {"(*F).Setenv", Method, 18}, + {"(*F).Skip", Method, 18}, + {"(*F).SkipNow", Method, 18}, + {"(*F).Skipf", Method, 18}, + {"(*F).Skipped", Method, 18}, + {"(*F).TempDir", Method, 18}, + {"(*M).Run", Method, 4}, + {"(*PB).Next", Method, 3}, + {"(*T).Cleanup", Method, 14}, + {"(*T).Deadline", Method, 15}, + {"(*T).Error", Method, 0}, + {"(*T).Errorf", Method, 0}, + {"(*T).Fail", Method, 0}, + {"(*T).FailNow", Method, 0}, + {"(*T).Failed", Method, 0}, + {"(*T).Fatal", Method, 0}, + {"(*T).Fatalf", Method, 0}, + {"(*T).Helper", Method, 9}, + {"(*T).Log", Method, 0}, + {"(*T).Logf", Method, 0}, + {"(*T).Name", Method, 8}, + {"(*T).Parallel", Method, 0}, + {"(*T).Run", Method, 7}, + {"(*T).Setenv", Method, 17}, + {"(*T).Skip", Method, 1}, + {"(*T).SkipNow", Method, 1}, + {"(*T).Skipf", Method, 1}, + {"(*T).Skipped", Method, 1}, + {"(*T).TempDir", Method, 15}, + {"(BenchmarkResult).AllocedBytesPerOp", Method, 1}, + {"(BenchmarkResult).AllocsPerOp", Method, 1}, + {"(BenchmarkResult).MemString", Method, 1}, + {"(BenchmarkResult).NsPerOp", Method, 0}, + {"(BenchmarkResult).String", Method, 0}, + {"AllocsPerRun", Func, 1}, + {"B", Type, 0}, + {"B.N", Field, 0}, + {"Benchmark", Func, 0}, + {"BenchmarkResult", Type, 0}, + {"BenchmarkResult.Bytes", Field, 0}, + {"BenchmarkResult.Extra", Field, 13}, + {"BenchmarkResult.MemAllocs", Field, 1}, + {"BenchmarkResult.MemBytes", Field, 1}, + {"BenchmarkResult.N", Field, 0}, + {"BenchmarkResult.T", Field, 0}, + {"Cover", Type, 2}, + {"Cover.Blocks", Field, 2}, + {"Cover.Counters", Field, 2}, + {"Cover.CoveredPackages", Field, 2}, + {"Cover.Mode", Field, 2}, + {"CoverBlock", Type, 2}, + {"CoverBlock.Col0", Field, 2}, + {"CoverBlock.Col1", Field, 2}, + {"CoverBlock.Line0", Field, 2}, + {"CoverBlock.Line1", Field, 2}, + {"CoverBlock.Stmts", Field, 2}, + {"CoverMode", Func, 8}, + {"Coverage", Func, 4}, + {"F", Type, 18}, + {"Init", Func, 13}, + {"InternalBenchmark", Type, 0}, + {"InternalBenchmark.F", Field, 0}, + {"InternalBenchmark.Name", Field, 0}, + {"InternalExample", Type, 0}, + {"InternalExample.F", Field, 0}, + {"InternalExample.Name", Field, 0}, + {"InternalExample.Output", Field, 0}, + {"InternalExample.Unordered", Field, 7}, + {"InternalFuzzTarget", Type, 18}, + {"InternalFuzzTarget.Fn", Field, 18}, + {"InternalFuzzTarget.Name", Field, 18}, + {"InternalTest", Type, 0}, + {"InternalTest.F", Field, 0}, + {"InternalTest.Name", Field, 0}, + {"M", Type, 4}, + {"Main", Func, 0}, + {"MainStart", Func, 4}, + {"PB", Type, 3}, + {"RegisterCover", Func, 2}, + {"RunBenchmarks", Func, 0}, + {"RunExamples", Func, 0}, + {"RunTests", Func, 0}, + {"Short", Func, 0}, + {"T", Type, 0}, + {"TB", Type, 2}, + {"Testing", Func, 21}, + {"Verbose", Func, 1}, + }, + "testing/fstest": { + {"(MapFS).Glob", Method, 16}, + {"(MapFS).Open", Method, 16}, + {"(MapFS).ReadDir", Method, 16}, + {"(MapFS).ReadFile", Method, 16}, + {"(MapFS).Stat", Method, 16}, + {"(MapFS).Sub", Method, 16}, + {"MapFS", Type, 16}, + {"MapFile", Type, 16}, + {"MapFile.Data", Field, 16}, + {"MapFile.ModTime", Field, 16}, + {"MapFile.Mode", Field, 16}, + {"MapFile.Sys", Field, 16}, + {"TestFS", Func, 16}, + }, + "testing/iotest": { + {"DataErrReader", Func, 0}, + {"ErrReader", Func, 16}, + {"ErrTimeout", Var, 0}, + {"HalfReader", Func, 0}, + {"NewReadLogger", Func, 0}, + {"NewWriteLogger", Func, 0}, + {"OneByteReader", Func, 0}, + {"TestReader", Func, 16}, + {"TimeoutReader", Func, 0}, + {"TruncateWriter", Func, 0}, + }, + "testing/quick": { + {"(*CheckEqualError).Error", Method, 0}, + {"(*CheckError).Error", Method, 0}, + {"(SetupError).Error", Method, 0}, + {"Check", Func, 0}, + {"CheckEqual", Func, 0}, + {"CheckEqualError", Type, 0}, + {"CheckEqualError.CheckError", Field, 0}, + {"CheckEqualError.Out1", Field, 0}, + {"CheckEqualError.Out2", Field, 0}, + {"CheckError", Type, 0}, + {"CheckError.Count", Field, 0}, + {"CheckError.In", Field, 0}, + {"Config", Type, 0}, + {"Config.MaxCount", Field, 0}, + {"Config.MaxCountScale", Field, 0}, + {"Config.Rand", Field, 0}, + {"Config.Values", Field, 0}, + {"Generator", Type, 0}, + {"SetupError", Type, 0}, + {"Value", Func, 0}, + }, + "testing/slogtest": { + {"Run", Func, 22}, + {"TestHandler", Func, 21}, + }, + "text/scanner": { + {"(*Position).IsValid", Method, 0}, + {"(*Scanner).Init", Method, 0}, + {"(*Scanner).IsValid", Method, 0}, + {"(*Scanner).Next", Method, 0}, + {"(*Scanner).Peek", Method, 0}, + {"(*Scanner).Pos", Method, 0}, + {"(*Scanner).Scan", Method, 0}, + {"(*Scanner).TokenText", Method, 0}, + {"(Position).String", Method, 0}, + {"(Scanner).String", Method, 0}, + {"Char", Const, 0}, + {"Comment", Const, 0}, + {"EOF", Const, 0}, + {"Float", Const, 0}, + {"GoTokens", Const, 0}, + {"GoWhitespace", Const, 0}, + {"Ident", Const, 0}, + {"Int", Const, 0}, + {"Position", Type, 0}, + {"Position.Column", Field, 0}, + {"Position.Filename", Field, 0}, + {"Position.Line", Field, 0}, + {"Position.Offset", Field, 0}, + {"RawString", Const, 0}, + {"ScanChars", Const, 0}, + {"ScanComments", Const, 0}, + {"ScanFloats", Const, 0}, + {"ScanIdents", Const, 0}, + {"ScanInts", Const, 0}, + {"ScanRawStrings", Const, 0}, + {"ScanStrings", Const, 0}, + {"Scanner", Type, 0}, + {"Scanner.Error", Field, 0}, + {"Scanner.ErrorCount", Field, 0}, + {"Scanner.IsIdentRune", Field, 4}, + {"Scanner.Mode", Field, 0}, + {"Scanner.Position", Field, 0}, + {"Scanner.Whitespace", Field, 0}, + {"SkipComments", Const, 0}, + {"String", Const, 0}, + {"TokenString", Func, 0}, + }, + "text/tabwriter": { + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Init", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"AlignRight", Const, 0}, + {"Debug", Const, 0}, + {"DiscardEmptyColumns", Const, 0}, + {"Escape", Const, 0}, + {"FilterHTML", Const, 0}, + {"NewWriter", Func, 0}, + {"StripEscape", Const, 0}, + {"TabIndent", Const, 0}, + {"Writer", Type, 0}, + }, + "text/template": { + {"(*Template).AddParseTree", Method, 0}, + {"(*Template).Clone", Method, 0}, + {"(*Template).DefinedTemplates", Method, 5}, + {"(*Template).Delims", Method, 0}, + {"(*Template).Execute", Method, 0}, + {"(*Template).ExecuteTemplate", Method, 0}, + {"(*Template).Funcs", Method, 0}, + {"(*Template).Lookup", Method, 0}, + {"(*Template).Name", Method, 0}, + {"(*Template).New", Method, 0}, + {"(*Template).Option", Method, 5}, + {"(*Template).Parse", Method, 0}, + {"(*Template).ParseFS", Method, 16}, + {"(*Template).ParseFiles", Method, 0}, + {"(*Template).ParseGlob", Method, 0}, + {"(*Template).Templates", Method, 0}, + {"(ExecError).Error", Method, 6}, + {"(ExecError).Unwrap", Method, 13}, + {"(Template).Copy", Method, 2}, + {"(Template).ErrorContext", Method, 1}, + {"ExecError", Type, 6}, + {"ExecError.Err", Field, 6}, + {"ExecError.Name", Field, 6}, + {"FuncMap", Type, 0}, + {"HTMLEscape", Func, 0}, + {"HTMLEscapeString", Func, 0}, + {"HTMLEscaper", Func, 0}, + {"IsTrue", Func, 6}, + {"JSEscape", Func, 0}, + {"JSEscapeString", Func, 0}, + {"JSEscaper", Func, 0}, + {"Must", Func, 0}, + {"New", Func, 0}, + {"ParseFS", Func, 16}, + {"ParseFiles", Func, 0}, + {"ParseGlob", Func, 0}, + {"Template", Type, 0}, + {"Template.Tree", Field, 0}, + {"URLQueryEscaper", Func, 0}, + }, + "text/template/parse": { + {"(*ActionNode).Copy", Method, 0}, + {"(*ActionNode).String", Method, 0}, + {"(*BoolNode).Copy", Method, 0}, + {"(*BoolNode).String", Method, 0}, + {"(*BranchNode).Copy", Method, 4}, + {"(*BranchNode).String", Method, 0}, + {"(*BreakNode).Copy", Method, 18}, + {"(*BreakNode).String", Method, 18}, + {"(*ChainNode).Add", Method, 1}, + {"(*ChainNode).Copy", Method, 1}, + {"(*ChainNode).String", Method, 1}, + {"(*CommandNode).Copy", Method, 0}, + {"(*CommandNode).String", Method, 0}, + {"(*CommentNode).Copy", Method, 16}, + {"(*CommentNode).String", Method, 16}, + {"(*ContinueNode).Copy", Method, 18}, + {"(*ContinueNode).String", Method, 18}, + {"(*DotNode).Copy", Method, 0}, + {"(*DotNode).String", Method, 0}, + {"(*DotNode).Type", Method, 0}, + {"(*FieldNode).Copy", Method, 0}, + {"(*FieldNode).String", Method, 0}, + {"(*IdentifierNode).Copy", Method, 0}, + {"(*IdentifierNode).SetPos", Method, 1}, + {"(*IdentifierNode).SetTree", Method, 4}, + {"(*IdentifierNode).String", Method, 0}, + {"(*IfNode).Copy", Method, 0}, + {"(*IfNode).String", Method, 0}, + {"(*ListNode).Copy", Method, 0}, + {"(*ListNode).CopyList", Method, 0}, + {"(*ListNode).String", Method, 0}, + {"(*NilNode).Copy", Method, 1}, + {"(*NilNode).String", Method, 1}, + {"(*NilNode).Type", Method, 1}, + {"(*NumberNode).Copy", Method, 0}, + {"(*NumberNode).String", Method, 0}, + {"(*PipeNode).Copy", Method, 0}, + {"(*PipeNode).CopyPipe", Method, 0}, + {"(*PipeNode).String", Method, 0}, + {"(*RangeNode).Copy", Method, 0}, + {"(*RangeNode).String", Method, 0}, + {"(*StringNode).Copy", Method, 0}, + {"(*StringNode).String", Method, 0}, + {"(*TemplateNode).Copy", Method, 0}, + {"(*TemplateNode).String", Method, 0}, + {"(*TextNode).Copy", Method, 0}, + {"(*TextNode).String", Method, 0}, + {"(*Tree).Copy", Method, 2}, + {"(*Tree).ErrorContext", Method, 1}, + {"(*Tree).Parse", Method, 0}, + {"(*VariableNode).Copy", Method, 0}, + {"(*VariableNode).String", Method, 0}, + {"(*WithNode).Copy", Method, 0}, + {"(*WithNode).String", Method, 0}, + {"(ActionNode).Position", Method, 1}, + {"(ActionNode).Type", Method, 0}, + {"(BoolNode).Position", Method, 1}, + {"(BoolNode).Type", Method, 0}, + {"(BranchNode).Position", Method, 1}, + {"(BranchNode).Type", Method, 0}, + {"(BreakNode).Position", Method, 18}, + {"(BreakNode).Type", Method, 18}, + {"(ChainNode).Position", Method, 1}, + {"(ChainNode).Type", Method, 1}, + {"(CommandNode).Position", Method, 1}, + {"(CommandNode).Type", Method, 0}, + {"(CommentNode).Position", Method, 16}, + {"(CommentNode).Type", Method, 16}, + {"(ContinueNode).Position", Method, 18}, + {"(ContinueNode).Type", Method, 18}, + {"(DotNode).Position", Method, 1}, + {"(FieldNode).Position", Method, 1}, + {"(FieldNode).Type", Method, 0}, + {"(IdentifierNode).Position", Method, 1}, + {"(IdentifierNode).Type", Method, 0}, + {"(IfNode).Position", Method, 1}, + {"(IfNode).Type", Method, 0}, + {"(ListNode).Position", Method, 1}, + {"(ListNode).Type", Method, 0}, + {"(NilNode).Position", Method, 1}, + {"(NodeType).Type", Method, 0}, + {"(NumberNode).Position", Method, 1}, + {"(NumberNode).Type", Method, 0}, + {"(PipeNode).Position", Method, 1}, + {"(PipeNode).Type", Method, 0}, + {"(Pos).Position", Method, 1}, + {"(RangeNode).Position", Method, 1}, + {"(RangeNode).Type", Method, 0}, + {"(StringNode).Position", Method, 1}, + {"(StringNode).Type", Method, 0}, + {"(TemplateNode).Position", Method, 1}, + {"(TemplateNode).Type", Method, 0}, + {"(TextNode).Position", Method, 1}, + {"(TextNode).Type", Method, 0}, + {"(VariableNode).Position", Method, 1}, + {"(VariableNode).Type", Method, 0}, + {"(WithNode).Position", Method, 1}, + {"(WithNode).Type", Method, 0}, + {"ActionNode", Type, 0}, + {"ActionNode.Line", Field, 0}, + {"ActionNode.NodeType", Field, 0}, + {"ActionNode.Pipe", Field, 0}, + {"ActionNode.Pos", Field, 1}, + {"BoolNode", Type, 0}, + {"BoolNode.NodeType", Field, 0}, + {"BoolNode.Pos", Field, 1}, + {"BoolNode.True", Field, 0}, + {"BranchNode", Type, 0}, + {"BranchNode.ElseList", Field, 0}, + {"BranchNode.Line", Field, 0}, + {"BranchNode.List", Field, 0}, + {"BranchNode.NodeType", Field, 0}, + {"BranchNode.Pipe", Field, 0}, + {"BranchNode.Pos", Field, 1}, + {"BreakNode", Type, 18}, + {"BreakNode.Line", Field, 18}, + {"BreakNode.NodeType", Field, 18}, + {"BreakNode.Pos", Field, 18}, + {"ChainNode", Type, 1}, + {"ChainNode.Field", Field, 1}, + {"ChainNode.Node", Field, 1}, + {"ChainNode.NodeType", Field, 1}, + {"ChainNode.Pos", Field, 1}, + {"CommandNode", Type, 0}, + {"CommandNode.Args", Field, 0}, + {"CommandNode.NodeType", Field, 0}, + {"CommandNode.Pos", Field, 1}, + {"CommentNode", Type, 16}, + {"CommentNode.NodeType", Field, 16}, + {"CommentNode.Pos", Field, 16}, + {"CommentNode.Text", Field, 16}, + {"ContinueNode", Type, 18}, + {"ContinueNode.Line", Field, 18}, + {"ContinueNode.NodeType", Field, 18}, + {"ContinueNode.Pos", Field, 18}, + {"DotNode", Type, 0}, + {"DotNode.NodeType", Field, 4}, + {"DotNode.Pos", Field, 1}, + {"FieldNode", Type, 0}, + {"FieldNode.Ident", Field, 0}, + {"FieldNode.NodeType", Field, 0}, + {"FieldNode.Pos", Field, 1}, + {"IdentifierNode", Type, 0}, + {"IdentifierNode.Ident", Field, 0}, + {"IdentifierNode.NodeType", Field, 0}, + {"IdentifierNode.Pos", Field, 1}, + {"IfNode", Type, 0}, + {"IfNode.BranchNode", Field, 0}, + {"IsEmptyTree", Func, 0}, + {"ListNode", Type, 0}, + {"ListNode.NodeType", Field, 0}, + {"ListNode.Nodes", Field, 0}, + {"ListNode.Pos", Field, 1}, + {"Mode", Type, 16}, + {"New", Func, 0}, + {"NewIdentifier", Func, 0}, + {"NilNode", Type, 1}, + {"NilNode.NodeType", Field, 4}, + {"NilNode.Pos", Field, 1}, + {"Node", Type, 0}, + {"NodeAction", Const, 0}, + {"NodeBool", Const, 0}, + {"NodeBreak", Const, 18}, + {"NodeChain", Const, 1}, + {"NodeCommand", Const, 0}, + {"NodeComment", Const, 16}, + {"NodeContinue", Const, 18}, + {"NodeDot", Const, 0}, + {"NodeField", Const, 0}, + {"NodeIdentifier", Const, 0}, + {"NodeIf", Const, 0}, + {"NodeList", Const, 0}, + {"NodeNil", Const, 1}, + {"NodeNumber", Const, 0}, + {"NodePipe", Const, 0}, + {"NodeRange", Const, 0}, + {"NodeString", Const, 0}, + {"NodeTemplate", Const, 0}, + {"NodeText", Const, 0}, + {"NodeType", Type, 0}, + {"NodeVariable", Const, 0}, + {"NodeWith", Const, 0}, + {"NumberNode", Type, 0}, + {"NumberNode.Complex128", Field, 0}, + {"NumberNode.Float64", Field, 0}, + {"NumberNode.Int64", Field, 0}, + {"NumberNode.IsComplex", Field, 0}, + {"NumberNode.IsFloat", Field, 0}, + {"NumberNode.IsInt", Field, 0}, + {"NumberNode.IsUint", Field, 0}, + {"NumberNode.NodeType", Field, 0}, + {"NumberNode.Pos", Field, 1}, + {"NumberNode.Text", Field, 0}, + {"NumberNode.Uint64", Field, 0}, + {"Parse", Func, 0}, + {"ParseComments", Const, 16}, + {"PipeNode", Type, 0}, + {"PipeNode.Cmds", Field, 0}, + {"PipeNode.Decl", Field, 0}, + {"PipeNode.IsAssign", Field, 11}, + {"PipeNode.Line", Field, 0}, + {"PipeNode.NodeType", Field, 0}, + {"PipeNode.Pos", Field, 1}, + {"Pos", Type, 1}, + {"RangeNode", Type, 0}, + {"RangeNode.BranchNode", Field, 0}, + {"SkipFuncCheck", Const, 17}, + {"StringNode", Type, 0}, + {"StringNode.NodeType", Field, 0}, + {"StringNode.Pos", Field, 1}, + {"StringNode.Quoted", Field, 0}, + {"StringNode.Text", Field, 0}, + {"TemplateNode", Type, 0}, + {"TemplateNode.Line", Field, 0}, + {"TemplateNode.Name", Field, 0}, + {"TemplateNode.NodeType", Field, 0}, + {"TemplateNode.Pipe", Field, 0}, + {"TemplateNode.Pos", Field, 1}, + {"TextNode", Type, 0}, + {"TextNode.NodeType", Field, 0}, + {"TextNode.Pos", Field, 1}, + {"TextNode.Text", Field, 0}, + {"Tree", Type, 0}, + {"Tree.Mode", Field, 16}, + {"Tree.Name", Field, 0}, + {"Tree.ParseName", Field, 1}, + {"Tree.Root", Field, 0}, + {"VariableNode", Type, 0}, + {"VariableNode.Ident", Field, 0}, + {"VariableNode.NodeType", Field, 0}, + {"VariableNode.Pos", Field, 1}, + {"WithNode", Type, 0}, + {"WithNode.BranchNode", Field, 0}, + }, + "time": { + {"(*Location).String", Method, 0}, + {"(*ParseError).Error", Method, 0}, + {"(*Ticker).Reset", Method, 15}, + {"(*Ticker).Stop", Method, 0}, + {"(*Time).GobDecode", Method, 0}, + {"(*Time).UnmarshalBinary", Method, 2}, + {"(*Time).UnmarshalJSON", Method, 0}, + {"(*Time).UnmarshalText", Method, 2}, + {"(*Timer).Reset", Method, 1}, + {"(*Timer).Stop", Method, 0}, + {"(Duration).Abs", Method, 19}, + {"(Duration).Hours", Method, 0}, + {"(Duration).Microseconds", Method, 13}, + {"(Duration).Milliseconds", Method, 13}, + {"(Duration).Minutes", Method, 0}, + {"(Duration).Nanoseconds", Method, 0}, + {"(Duration).Round", Method, 9}, + {"(Duration).Seconds", Method, 0}, + {"(Duration).String", Method, 0}, + {"(Duration).Truncate", Method, 9}, + {"(Month).String", Method, 0}, + {"(Time).Add", Method, 0}, + {"(Time).AddDate", Method, 0}, + {"(Time).After", Method, 0}, + {"(Time).AppendFormat", Method, 5}, + {"(Time).Before", Method, 0}, + {"(Time).Clock", Method, 0}, + {"(Time).Compare", Method, 20}, + {"(Time).Date", Method, 0}, + {"(Time).Day", Method, 0}, + {"(Time).Equal", Method, 0}, + {"(Time).Format", Method, 0}, + {"(Time).GoString", Method, 17}, + {"(Time).GobEncode", Method, 0}, + {"(Time).Hour", Method, 0}, + {"(Time).ISOWeek", Method, 0}, + {"(Time).In", Method, 0}, + {"(Time).IsDST", Method, 17}, + {"(Time).IsZero", Method, 0}, + {"(Time).Local", Method, 0}, + {"(Time).Location", Method, 0}, + {"(Time).MarshalBinary", Method, 2}, + {"(Time).MarshalJSON", Method, 0}, + {"(Time).MarshalText", Method, 2}, + {"(Time).Minute", Method, 0}, + {"(Time).Month", Method, 0}, + {"(Time).Nanosecond", Method, 0}, + {"(Time).Round", Method, 1}, + {"(Time).Second", Method, 0}, + {"(Time).String", Method, 0}, + {"(Time).Sub", Method, 0}, + {"(Time).Truncate", Method, 1}, + {"(Time).UTC", Method, 0}, + {"(Time).Unix", Method, 0}, + {"(Time).UnixMicro", Method, 17}, + {"(Time).UnixMilli", Method, 17}, + {"(Time).UnixNano", Method, 0}, + {"(Time).Weekday", Method, 0}, + {"(Time).Year", Method, 0}, + {"(Time).YearDay", Method, 1}, + {"(Time).Zone", Method, 0}, + {"(Time).ZoneBounds", Method, 19}, + {"(Weekday).String", Method, 0}, + {"ANSIC", Const, 0}, + {"After", Func, 0}, + {"AfterFunc", Func, 0}, + {"April", Const, 0}, + {"August", Const, 0}, + {"Date", Func, 0}, + {"DateOnly", Const, 20}, + {"DateTime", Const, 20}, + {"December", Const, 0}, + {"Duration", Type, 0}, + {"February", Const, 0}, + {"FixedZone", Func, 0}, + {"Friday", Const, 0}, + {"Hour", Const, 0}, + {"January", Const, 0}, + {"July", Const, 0}, + {"June", Const, 0}, + {"Kitchen", Const, 0}, + {"Layout", Const, 17}, + {"LoadLocation", Func, 0}, + {"LoadLocationFromTZData", Func, 10}, + {"Local", Var, 0}, + {"Location", Type, 0}, + {"March", Const, 0}, + {"May", Const, 0}, + {"Microsecond", Const, 0}, + {"Millisecond", Const, 0}, + {"Minute", Const, 0}, + {"Monday", Const, 0}, + {"Month", Type, 0}, + {"Nanosecond", Const, 0}, + {"NewTicker", Func, 0}, + {"NewTimer", Func, 0}, + {"November", Const, 0}, + {"Now", Func, 0}, + {"October", Const, 0}, + {"Parse", Func, 0}, + {"ParseDuration", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Layout", Field, 0}, + {"ParseError.LayoutElem", Field, 0}, + {"ParseError.Message", Field, 0}, + {"ParseError.Value", Field, 0}, + {"ParseError.ValueElem", Field, 0}, + {"ParseInLocation", Func, 1}, + {"RFC1123", Const, 0}, + {"RFC1123Z", Const, 0}, + {"RFC3339", Const, 0}, + {"RFC3339Nano", Const, 0}, + {"RFC822", Const, 0}, + {"RFC822Z", Const, 0}, + {"RFC850", Const, 0}, + {"RubyDate", Const, 0}, + {"Saturday", Const, 0}, + {"Second", Const, 0}, + {"September", Const, 0}, + {"Since", Func, 0}, + {"Sleep", Func, 0}, + {"Stamp", Const, 0}, + {"StampMicro", Const, 0}, + {"StampMilli", Const, 0}, + {"StampNano", Const, 0}, + {"Sunday", Const, 0}, + {"Thursday", Const, 0}, + {"Tick", Func, 0}, + {"Ticker", Type, 0}, + {"Ticker.C", Field, 0}, + {"Time", Type, 0}, + {"TimeOnly", Const, 20}, + {"Timer", Type, 0}, + {"Timer.C", Field, 0}, + {"Tuesday", Const, 0}, + {"UTC", Var, 0}, + {"Unix", Func, 0}, + {"UnixDate", Const, 0}, + {"UnixMicro", Func, 17}, + {"UnixMilli", Func, 17}, + {"Until", Func, 8}, + {"Wednesday", Const, 0}, + {"Weekday", Type, 0}, + }, + "unicode": { + {"(SpecialCase).ToLower", Method, 0}, + {"(SpecialCase).ToTitle", Method, 0}, + {"(SpecialCase).ToUpper", Method, 0}, + {"ASCII_Hex_Digit", Var, 0}, + {"Adlam", Var, 7}, + {"Ahom", Var, 5}, + {"Anatolian_Hieroglyphs", Var, 5}, + {"Arabic", Var, 0}, + {"Armenian", Var, 0}, + {"Avestan", Var, 0}, + {"AzeriCase", Var, 0}, + {"Balinese", Var, 0}, + {"Bamum", Var, 0}, + {"Bassa_Vah", Var, 4}, + {"Batak", Var, 0}, + {"Bengali", Var, 0}, + {"Bhaiksuki", Var, 7}, + {"Bidi_Control", Var, 0}, + {"Bopomofo", Var, 0}, + {"Brahmi", Var, 0}, + {"Braille", Var, 0}, + {"Buginese", Var, 0}, + {"Buhid", Var, 0}, + {"C", Var, 0}, + {"Canadian_Aboriginal", Var, 0}, + {"Carian", Var, 0}, + {"CaseRange", Type, 0}, + {"CaseRange.Delta", Field, 0}, + {"CaseRange.Hi", Field, 0}, + {"CaseRange.Lo", Field, 0}, + {"CaseRanges", Var, 0}, + {"Categories", Var, 0}, + {"Caucasian_Albanian", Var, 4}, + {"Cc", Var, 0}, + {"Cf", Var, 0}, + {"Chakma", Var, 1}, + {"Cham", Var, 0}, + {"Cherokee", Var, 0}, + {"Chorasmian", Var, 16}, + {"Co", Var, 0}, + {"Common", Var, 0}, + {"Coptic", Var, 0}, + {"Cs", Var, 0}, + {"Cuneiform", Var, 0}, + {"Cypriot", Var, 0}, + {"Cypro_Minoan", Var, 21}, + {"Cyrillic", Var, 0}, + {"Dash", Var, 0}, + {"Deprecated", Var, 0}, + {"Deseret", Var, 0}, + {"Devanagari", Var, 0}, + {"Diacritic", Var, 0}, + {"Digit", Var, 0}, + {"Dives_Akuru", Var, 16}, + {"Dogra", Var, 13}, + {"Duployan", Var, 4}, + {"Egyptian_Hieroglyphs", Var, 0}, + {"Elbasan", Var, 4}, + {"Elymaic", Var, 14}, + {"Ethiopic", Var, 0}, + {"Extender", Var, 0}, + {"FoldCategory", Var, 0}, + {"FoldScript", Var, 0}, + {"Georgian", Var, 0}, + {"Glagolitic", Var, 0}, + {"Gothic", Var, 0}, + {"Grantha", Var, 4}, + {"GraphicRanges", Var, 0}, + {"Greek", Var, 0}, + {"Gujarati", Var, 0}, + {"Gunjala_Gondi", Var, 13}, + {"Gurmukhi", Var, 0}, + {"Han", Var, 0}, + {"Hangul", Var, 0}, + {"Hanifi_Rohingya", Var, 13}, + {"Hanunoo", Var, 0}, + {"Hatran", Var, 5}, + {"Hebrew", Var, 0}, + {"Hex_Digit", Var, 0}, + {"Hiragana", Var, 0}, + {"Hyphen", Var, 0}, + {"IDS_Binary_Operator", Var, 0}, + {"IDS_Trinary_Operator", Var, 0}, + {"Ideographic", Var, 0}, + {"Imperial_Aramaic", Var, 0}, + {"In", Func, 2}, + {"Inherited", Var, 0}, + {"Inscriptional_Pahlavi", Var, 0}, + {"Inscriptional_Parthian", Var, 0}, + {"Is", Func, 0}, + {"IsControl", Func, 0}, + {"IsDigit", Func, 0}, + {"IsGraphic", Func, 0}, + {"IsLetter", Func, 0}, + {"IsLower", Func, 0}, + {"IsMark", Func, 0}, + {"IsNumber", Func, 0}, + {"IsOneOf", Func, 0}, + {"IsPrint", Func, 0}, + {"IsPunct", Func, 0}, + {"IsSpace", Func, 0}, + {"IsSymbol", Func, 0}, + {"IsTitle", Func, 0}, + {"IsUpper", Func, 0}, + {"Javanese", Var, 0}, + {"Join_Control", Var, 0}, + {"Kaithi", Var, 0}, + {"Kannada", Var, 0}, + {"Katakana", Var, 0}, + {"Kawi", Var, 21}, + {"Kayah_Li", Var, 0}, + {"Kharoshthi", Var, 0}, + {"Khitan_Small_Script", Var, 16}, + {"Khmer", Var, 0}, + {"Khojki", Var, 4}, + {"Khudawadi", Var, 4}, + {"L", Var, 0}, + {"Lao", Var, 0}, + {"Latin", Var, 0}, + {"Lepcha", Var, 0}, + {"Letter", Var, 0}, + {"Limbu", Var, 0}, + {"Linear_A", Var, 4}, + {"Linear_B", Var, 0}, + {"Lisu", Var, 0}, + {"Ll", Var, 0}, + {"Lm", Var, 0}, + {"Lo", Var, 0}, + {"Logical_Order_Exception", Var, 0}, + {"Lower", Var, 0}, + {"LowerCase", Const, 0}, + {"Lt", Var, 0}, + {"Lu", Var, 0}, + {"Lycian", Var, 0}, + {"Lydian", Var, 0}, + {"M", Var, 0}, + {"Mahajani", Var, 4}, + {"Makasar", Var, 13}, + {"Malayalam", Var, 0}, + {"Mandaic", Var, 0}, + {"Manichaean", Var, 4}, + {"Marchen", Var, 7}, + {"Mark", Var, 0}, + {"Masaram_Gondi", Var, 10}, + {"MaxASCII", Const, 0}, + {"MaxCase", Const, 0}, + {"MaxLatin1", Const, 0}, + {"MaxRune", Const, 0}, + {"Mc", Var, 0}, + {"Me", Var, 0}, + {"Medefaidrin", Var, 13}, + {"Meetei_Mayek", Var, 0}, + {"Mende_Kikakui", Var, 4}, + {"Meroitic_Cursive", Var, 1}, + {"Meroitic_Hieroglyphs", Var, 1}, + {"Miao", Var, 1}, + {"Mn", Var, 0}, + {"Modi", Var, 4}, + {"Mongolian", Var, 0}, + {"Mro", Var, 4}, + {"Multani", Var, 5}, + {"Myanmar", Var, 0}, + {"N", Var, 0}, + {"Nabataean", Var, 4}, + {"Nag_Mundari", Var, 21}, + {"Nandinagari", Var, 14}, + {"Nd", Var, 0}, + {"New_Tai_Lue", Var, 0}, + {"Newa", Var, 7}, + {"Nko", Var, 0}, + {"Nl", Var, 0}, + {"No", Var, 0}, + {"Noncharacter_Code_Point", Var, 0}, + {"Number", Var, 0}, + {"Nushu", Var, 10}, + {"Nyiakeng_Puachue_Hmong", Var, 14}, + {"Ogham", Var, 0}, + {"Ol_Chiki", Var, 0}, + {"Old_Hungarian", Var, 5}, + {"Old_Italic", Var, 0}, + {"Old_North_Arabian", Var, 4}, + {"Old_Permic", Var, 4}, + {"Old_Persian", Var, 0}, + {"Old_Sogdian", Var, 13}, + {"Old_South_Arabian", Var, 0}, + {"Old_Turkic", Var, 0}, + {"Old_Uyghur", Var, 21}, + {"Oriya", Var, 0}, + {"Osage", Var, 7}, + {"Osmanya", Var, 0}, + {"Other", Var, 0}, + {"Other_Alphabetic", Var, 0}, + {"Other_Default_Ignorable_Code_Point", Var, 0}, + {"Other_Grapheme_Extend", Var, 0}, + {"Other_ID_Continue", Var, 0}, + {"Other_ID_Start", Var, 0}, + {"Other_Lowercase", Var, 0}, + {"Other_Math", Var, 0}, + {"Other_Uppercase", Var, 0}, + {"P", Var, 0}, + {"Pahawh_Hmong", Var, 4}, + {"Palmyrene", Var, 4}, + {"Pattern_Syntax", Var, 0}, + {"Pattern_White_Space", Var, 0}, + {"Pau_Cin_Hau", Var, 4}, + {"Pc", Var, 0}, + {"Pd", Var, 0}, + {"Pe", Var, 0}, + {"Pf", Var, 0}, + {"Phags_Pa", Var, 0}, + {"Phoenician", Var, 0}, + {"Pi", Var, 0}, + {"Po", Var, 0}, + {"Prepended_Concatenation_Mark", Var, 7}, + {"PrintRanges", Var, 0}, + {"Properties", Var, 0}, + {"Ps", Var, 0}, + {"Psalter_Pahlavi", Var, 4}, + {"Punct", Var, 0}, + {"Quotation_Mark", Var, 0}, + {"Radical", Var, 0}, + {"Range16", Type, 0}, + {"Range16.Hi", Field, 0}, + {"Range16.Lo", Field, 0}, + {"Range16.Stride", Field, 0}, + {"Range32", Type, 0}, + {"Range32.Hi", Field, 0}, + {"Range32.Lo", Field, 0}, + {"Range32.Stride", Field, 0}, + {"RangeTable", Type, 0}, + {"RangeTable.LatinOffset", Field, 1}, + {"RangeTable.R16", Field, 0}, + {"RangeTable.R32", Field, 0}, + {"Regional_Indicator", Var, 10}, + {"Rejang", Var, 0}, + {"ReplacementChar", Const, 0}, + {"Runic", Var, 0}, + {"S", Var, 0}, + {"STerm", Var, 0}, + {"Samaritan", Var, 0}, + {"Saurashtra", Var, 0}, + {"Sc", Var, 0}, + {"Scripts", Var, 0}, + {"Sentence_Terminal", Var, 7}, + {"Sharada", Var, 1}, + {"Shavian", Var, 0}, + {"Siddham", Var, 4}, + {"SignWriting", Var, 5}, + {"SimpleFold", Func, 0}, + {"Sinhala", Var, 0}, + {"Sk", Var, 0}, + {"Sm", Var, 0}, + {"So", Var, 0}, + {"Soft_Dotted", Var, 0}, + {"Sogdian", Var, 13}, + {"Sora_Sompeng", Var, 1}, + {"Soyombo", Var, 10}, + {"Space", Var, 0}, + {"SpecialCase", Type, 0}, + {"Sundanese", Var, 0}, + {"Syloti_Nagri", Var, 0}, + {"Symbol", Var, 0}, + {"Syriac", Var, 0}, + {"Tagalog", Var, 0}, + {"Tagbanwa", Var, 0}, + {"Tai_Le", Var, 0}, + {"Tai_Tham", Var, 0}, + {"Tai_Viet", Var, 0}, + {"Takri", Var, 1}, + {"Tamil", Var, 0}, + {"Tangsa", Var, 21}, + {"Tangut", Var, 7}, + {"Telugu", Var, 0}, + {"Terminal_Punctuation", Var, 0}, + {"Thaana", Var, 0}, + {"Thai", Var, 0}, + {"Tibetan", Var, 0}, + {"Tifinagh", Var, 0}, + {"Tirhuta", Var, 4}, + {"Title", Var, 0}, + {"TitleCase", Const, 0}, + {"To", Func, 0}, + {"ToLower", Func, 0}, + {"ToTitle", Func, 0}, + {"ToUpper", Func, 0}, + {"Toto", Var, 21}, + {"TurkishCase", Var, 0}, + {"Ugaritic", Var, 0}, + {"Unified_Ideograph", Var, 0}, + {"Upper", Var, 0}, + {"UpperCase", Const, 0}, + {"UpperLower", Const, 0}, + {"Vai", Var, 0}, + {"Variation_Selector", Var, 0}, + {"Version", Const, 0}, + {"Vithkuqi", Var, 21}, + {"Wancho", Var, 14}, + {"Warang_Citi", Var, 4}, + {"White_Space", Var, 0}, + {"Yezidi", Var, 16}, + {"Yi", Var, 0}, + {"Z", Var, 0}, + {"Zanabazar_Square", Var, 10}, + {"Zl", Var, 0}, + {"Zp", Var, 0}, + {"Zs", Var, 0}, + }, + "unicode/utf16": { + {"AppendRune", Func, 20}, + {"Decode", Func, 0}, + {"DecodeRune", Func, 0}, + {"Encode", Func, 0}, + {"EncodeRune", Func, 0}, + {"IsSurrogate", Func, 0}, + }, + "unicode/utf8": { + {"AppendRune", Func, 18}, + {"DecodeLastRune", Func, 0}, + {"DecodeLastRuneInString", Func, 0}, + {"DecodeRune", Func, 0}, + {"DecodeRuneInString", Func, 0}, + {"EncodeRune", Func, 0}, + {"FullRune", Func, 0}, + {"FullRuneInString", Func, 0}, + {"MaxRune", Const, 0}, + {"RuneCount", Func, 0}, + {"RuneCountInString", Func, 0}, + {"RuneError", Const, 0}, + {"RuneLen", Func, 0}, + {"RuneSelf", Const, 0}, + {"RuneStart", Func, 0}, + {"UTFMax", Const, 0}, + {"Valid", Func, 0}, + {"ValidRune", Func, 1}, + {"ValidString", Func, 0}, + }, + "unsafe": { + {"Add", Func, 0}, + {"Alignof", Func, 0}, + {"Offsetof", Func, 0}, + {"Pointer", Type, 0}, + {"Sizeof", Func, 0}, + {"Slice", Func, 0}, + {"SliceData", Func, 0}, + {"String", Func, 0}, + {"StringData", Func, 0}, + }, +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/stdlib/stdlib.go new file mode 100644 index 00000000000..98904017f2c --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -0,0 +1,97 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run generate.go + +// Package stdlib provides a table of all exported symbols in the +// standard library, along with the version at which they first +// appeared. +package stdlib + +import ( + "fmt" + "strings" +) + +type Symbol struct { + Name string + Kind Kind + Version Version // Go version that first included the symbol +} + +// A Kind indicates the kind of a symbol: +// function, variable, constant, type, and so on. +type Kind int8 + +const ( + Invalid Kind = iota // Example name: + Type // "Buffer" + Func // "Println" + Var // "EOF" + Const // "Pi" + Field // "Point.X" + Method // "(*Buffer).Grow" +) + +func (kind Kind) String() string { + return [...]string{ + Invalid: "invalid", + Type: "type", + Func: "func", + Var: "var", + Const: "const", + Field: "field", + Method: "method", + }[kind] +} + +// A Version represents a version of Go of the form "go1.%d". +type Version int8 + +// String returns a version string of the form "go1.23", without allocating. +func (v Version) String() string { return versions[v] } + +var versions [30]string // (increase constant as needed) + +func init() { + for i := range versions { + versions[i] = fmt.Sprintf("go1.%d", i) + } +} + +// HasPackage reports whether the specified package path is part of +// the standard library's public API. +func HasPackage(path string) bool { + _, ok := PackageSymbols[path] + return ok +} + +// SplitField splits the field symbol name into type and field +// components. It must be called only on Field symbols. +// +// Example: "File.Package" -> ("File", "Package") +func (sym *Symbol) SplitField() (typename, name string) { + if sym.Kind != Field { + panic("not a field") + } + typename, name, _ = strings.Cut(sym.Name, ".") + return +} + +// SplitMethod splits the method symbol name into pointer, receiver, +// and method components. It must be called only on Method symbols. +// +// Example: "(*Buffer).Grow" -> (true, "Buffer", "Grow") +func (sym *Symbol) SplitMethod() (ptr bool, recv, name string) { + if sym.Kind != Method { + panic("not a method") + } + recv, name, _ = strings.Cut(sym.Name, ".") + recv = recv[len("(") : len(recv)-len(")")] + ptr = recv[0] == '*' + if ptr { + recv = recv[len("*"):] + } + return +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go new file mode 100644 index 00000000000..ff9437a36cd --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -0,0 +1,137 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package tokeninternal provides access to some internal features of the token +// package. +package tokeninternal + +import ( + "fmt" + "go/token" + "sort" + "sync" + "unsafe" +) + +// GetLines returns the table of line-start offsets from a token.File. +func GetLines(file *token.File) []int { + // token.File has a Lines method on Go 1.21 and later. + if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { + return file.Lines() + } + + // This declaration must match that of token.File. + // This creates a risk of dependency skew. + // For now we check that the size of the two + // declarations is the same, on the (fragile) assumption + // that future changes would add fields. + type tokenFile119 struct { + _ string + _ int + _ int + mu sync.Mutex // we're not complete monsters + lines []int + _ []struct{} + } + + if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { + panic("unexpected token.File size") + } + var ptr *tokenFile119 + type uP = unsafe.Pointer + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines +} + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { + // Punch through the FileSet encapsulation. + type tokenFileSet struct { + // This type remained essentially consistent from go1.16 to go1.21. + mutex sync.RWMutex + base int + files []*token.File + _ *token.File // changed to atomic.Pointer[token.File] in go1.19 + } + + // If the size of token.FileSet changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) + var _ [-delta * delta]int + + type uP = unsafe.Pointer + var ptr *tokenFileSet + *(*uP)(uP(&ptr)) = uP(fset) + ptr.mutex.Lock() + defer ptr.mutex.Unlock() + + // Merge and sort. + newFiles := append(ptr.files, files...) + sort.Slice(newFiles, func(i, j int) bool { + return newFiles[i].Base() < newFiles[j].Base() + }) + + // Reject overlapping files. + // Discard adjacent identical files. + out := newFiles[:0] + for i, file := range newFiles { + if i > 0 { + prev := newFiles[i-1] + if file == prev { + continue + } + if prev.Base()+prev.Size()+1 > file.Base() { + panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", + prev.Name(), prev.Base(), prev.Base()+prev.Size(), + file.Name(), file.Base(), file.Base()+file.Size())) + } + } + out = append(out, file) + } + newFiles = out + + ptr.files = newFiles + + // Advance FileSet.Base(). + if len(newFiles) > 0 { + last := newFiles[len(newFiles)-1] + newBase := last.Base() + last.Size() + 1 + if ptr.base < newBase { + ptr.base = newBase + } + } +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { + fset := token.NewFileSet() + for _, f := range files { + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := GetLines(f) + f2.SetLines(lines) + } + return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { + var files []*token.File + fset.Iterate(func(f *token.File) bool { + files = append(files, f) + return true + }) + newFileSet := token.NewFileSet() + AddExistingFiles(newFileSet, files) + return newFileSet +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go new file mode 100644 index 00000000000..834e05381ce --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -0,0 +1,1560 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +//go:generate stringer -type=ErrorCode + +type ErrorCode int + +// This file defines the error codes that can be produced during type-checking. +// Collectively, these codes provide an identifier that may be used to +// implement special handling for certain types of errors. +// +// Error codes should be fine-grained enough that the exact nature of the error +// can be easily determined, but coarse enough that they are not an +// implementation detail of the type checking algorithm. As a rule-of-thumb, +// errors should be considered equivalent if there is a theoretical refactoring +// of the type checker in which they are emitted in exactly one place. For +// example, the type checker emits different error messages for "too many +// arguments" and "too few arguments", but one can imagine an alternative type +// checker where this check instead just emits a single "wrong number of +// arguments", so these errors should have the same code. +// +// Error code names should be as brief as possible while retaining accuracy and +// distinctiveness. In most cases names should start with an adjective +// describing the nature of the error (e.g. "invalid", "unused", "misplaced"), +// and end with a noun identifying the relevant language object. For example, +// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the +// convention that "bad" implies a problem with syntax, and "invalid" implies a +// problem with types. + +const ( + // InvalidSyntaxTree occurs if an invalid syntax tree is provided + // to the type checker. It should never happen. + InvalidSyntaxTree ErrorCode = -1 +) + +const ( + _ ErrorCode = iota + + // Test is reserved for errors that only apply while in self-test mode. + Test + + /* package names */ + + // BlankPkgName occurs when a package name is the blank identifier "_". + // + // Per the spec: + // "The PackageName must not be the blank identifier." + BlankPkgName + + // MismatchedPkgName occurs when a file's package name doesn't match the + // package name already established by other files. + MismatchedPkgName + + // InvalidPkgUse occurs when a package identifier is used outside of a + // selector expression. + // + // Example: + // import "fmt" + // + // var _ = fmt + InvalidPkgUse + + /* imports */ + + // BadImportPath occurs when an import path is not valid. + BadImportPath + + // BrokenImport occurs when importing a package fails. + // + // Example: + // import "amissingpackage" + BrokenImport + + // ImportCRenamed occurs when the special import "C" is renamed. "C" is a + // pseudo-package, and must not be renamed. + // + // Example: + // import _ "C" + ImportCRenamed + + // UnusedImport occurs when an import is unused. + // + // Example: + // import "fmt" + // + // func main() {} + UnusedImport + + /* initialization */ + + // InvalidInitCycle occurs when an invalid cycle is detected within the + // initialization graph. + // + // Example: + // var x int = f() + // + // func f() int { return x } + InvalidInitCycle + + /* decls */ + + // DuplicateDecl occurs when an identifier is declared multiple times. + // + // Example: + // var x = 1 + // var x = 2 + DuplicateDecl + + // InvalidDeclCycle occurs when a declaration cycle is not valid. + // + // Example: + // import "unsafe" + // + // type T struct { + // a [n]int + // } + // + // var n = unsafe.Sizeof(T{}) + InvalidDeclCycle + + // InvalidTypeCycle occurs when a cycle in type definitions results in a + // type that is not well-defined. + // + // Example: + // import "unsafe" + // + // type T [unsafe.Sizeof(T{})]int + InvalidTypeCycle + + /* decls > const */ + + // InvalidConstInit occurs when a const declaration has a non-constant + // initializer. + // + // Example: + // var x int + // const _ = x + InvalidConstInit + + // InvalidConstVal occurs when a const value cannot be converted to its + // target type. + // + // TODO(findleyr): this error code and example are not very clear. Consider + // removing it. + // + // Example: + // const _ = 1 << "hello" + InvalidConstVal + + // InvalidConstType occurs when the underlying type in a const declaration + // is not a valid constant type. + // + // Example: + // const c *int = 4 + InvalidConstType + + /* decls > var (+ other variable assignment codes) */ + + // UntypedNilUse occurs when the predeclared (untyped) value nil is used to + // initialize a variable declared without an explicit type. + // + // Example: + // var x = nil + UntypedNilUse + + // WrongAssignCount occurs when the number of values on the right-hand side + // of an assignment or initialization expression does not match the number + // of variables on the left-hand side. + // + // Example: + // var x = 1, 2 + WrongAssignCount + + // UnassignableOperand occurs when the left-hand side of an assignment is + // not assignable. + // + // Example: + // func f() { + // const c = 1 + // c = 2 + // } + UnassignableOperand + + // NoNewVar occurs when a short variable declaration (':=') does not declare + // new variables. + // + // Example: + // func f() { + // x := 1 + // x := 2 + // } + NoNewVar + + // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does + // not have single-valued left-hand or right-hand side. + // + // Per the spec: + // "In assignment operations, both the left- and right-hand expression lists + // must contain exactly one single-valued expression" + // + // Example: + // func f() int { + // x, y := 1, 2 + // x, y += 1 + // return x + y + // } + MultiValAssignOp + + // InvalidIfaceAssign occurs when a value of type T is used as an + // interface, but T does not implement a method of the expected interface. + // + // Example: + // type I interface { + // f() + // } + // + // type T int + // + // var x I = T(1) + InvalidIfaceAssign + + // InvalidChanAssign occurs when a chan assignment is invalid. + // + // Per the spec, a value x is assignable to a channel type T if: + // "x is a bidirectional channel value, T is a channel type, x's type V and + // T have identical element types, and at least one of V or T is not a + // defined type." + // + // Example: + // type T1 chan int + // type T2 chan int + // + // var x T1 + // // Invalid assignment because both types are named + // var _ T2 = x + InvalidChanAssign + + // IncompatibleAssign occurs when the type of the right-hand side expression + // in an assignment cannot be assigned to the type of the variable being + // assigned. + // + // Example: + // var x []int + // var _ int = x + IncompatibleAssign + + // UnaddressableFieldAssign occurs when trying to assign to a struct field + // in a map value. + // + // Example: + // func f() { + // m := make(map[string]struct{i int}) + // m["foo"].i = 42 + // } + UnaddressableFieldAssign + + /* decls > type (+ other type expression codes) */ + + // NotAType occurs when the identifier used as the underlying type in a type + // declaration or the right-hand side of a type alias does not denote a type. + // + // Example: + // var S = 2 + // + // type T S + NotAType + + // InvalidArrayLen occurs when an array length is not a constant value. + // + // Example: + // var n = 3 + // var _ = [n]int{} + InvalidArrayLen + + // BlankIfaceMethod occurs when a method name is '_'. + // + // Per the spec: + // "The name of each explicitly specified method must be unique and not + // blank." + // + // Example: + // type T interface { + // _(int) + // } + BlankIfaceMethod + + // IncomparableMapKey occurs when a map key type does not support the == and + // != operators. + // + // Per the spec: + // "The comparison operators == and != must be fully defined for operands of + // the key type; thus the key type must not be a function, map, or slice." + // + // Example: + // var x map[T]int + // + // type T []int + IncomparableMapKey + + // InvalidIfaceEmbed occurs when a non-interface type is embedded in an + // interface. + // + // Example: + // type T struct {} + // + // func (T) m() + // + // type I interface { + // T + // } + InvalidIfaceEmbed + + // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T, + // and T itself is itself a pointer, an unsafe.Pointer, or an interface. + // + // Per the spec: + // "An embedded field must be specified as a type name T or as a pointer to + // a non-interface type name *T, and T itself may not be a pointer type." + // + // Example: + // type T *int + // + // type S struct { + // *T + // } + InvalidPtrEmbed + + /* decls > func and method */ + + // BadRecv occurs when a method declaration does not have exactly one + // receiver parameter. + // + // Example: + // func () _() {} + BadRecv + + // InvalidRecv occurs when a receiver type expression is not of the form T + // or *T, or T is a pointer type. + // + // Example: + // type T struct {} + // + // func (**T) m() {} + InvalidRecv + + // DuplicateFieldAndMethod occurs when an identifier appears as both a field + // and method name. + // + // Example: + // type T struct { + // m int + // } + // + // func (T) m() {} + DuplicateFieldAndMethod + + // DuplicateMethod occurs when two methods on the same receiver type have + // the same name. + // + // Example: + // type T struct {} + // func (T) m() {} + // func (T) m(i int) int { return i } + DuplicateMethod + + /* decls > special */ + + // InvalidBlank occurs when a blank identifier is used as a value or type. + // + // Per the spec: + // "The blank identifier may appear as an operand only on the left-hand side + // of an assignment." + // + // Example: + // var x = _ + InvalidBlank + + // InvalidIota occurs when the predeclared identifier iota is used outside + // of a constant declaration. + // + // Example: + // var x = iota + InvalidIota + + // MissingInitBody occurs when an init function is missing its body. + // + // Example: + // func init() + MissingInitBody + + // InvalidInitSig occurs when an init function declares parameters or + // results. + // + // Example: + // func init() int { return 1 } + InvalidInitSig + + // InvalidInitDecl occurs when init is declared as anything other than a + // function. + // + // Example: + // var init = 1 + InvalidInitDecl + + // InvalidMainDecl occurs when main is declared as anything other than a + // function, in a main package. + InvalidMainDecl + + /* exprs */ + + // TooManyValues occurs when a function returns too many values for the + // expression context in which it is used. + // + // Example: + // func ReturnTwo() (int, int) { + // return 1, 2 + // } + // + // var x = ReturnTwo() + TooManyValues + + // NotAnExpr occurs when a type expression is used where a value expression + // is expected. + // + // Example: + // type T struct {} + // + // func f() { + // T + // } + NotAnExpr + + /* exprs > const */ + + // TruncatedFloat occurs when a float constant is truncated to an integer + // value. + // + // Example: + // var _ int = 98.6 + TruncatedFloat + + // NumericOverflow occurs when a numeric constant overflows its target type. + // + // Example: + // var x int8 = 1000 + NumericOverflow + + /* exprs > operation */ + + // UndefinedOp occurs when an operator is not defined for the type(s) used + // in an operation. + // + // Example: + // var c = "a" - "b" + UndefinedOp + + // MismatchedTypes occurs when operand types are incompatible in a binary + // operation. + // + // Example: + // var a = "hello" + // var b = 1 + // var c = a - b + MismatchedTypes + + // DivByZero occurs when a division operation is provable at compile + // time to be a division by zero. + // + // Example: + // const divisor = 0 + // var x int = 1/divisor + DivByZero + + // NonNumericIncDec occurs when an increment or decrement operator is + // applied to a non-numeric value. + // + // Example: + // func f() { + // var c = "c" + // c++ + // } + NonNumericIncDec + + /* exprs > ptr */ + + // UnaddressableOperand occurs when the & operator is applied to an + // unaddressable expression. + // + // Example: + // var x = &1 + UnaddressableOperand + + // InvalidIndirection occurs when a non-pointer value is indirected via the + // '*' operator. + // + // Example: + // var x int + // var y = *x + InvalidIndirection + + /* exprs > [] */ + + // NonIndexableOperand occurs when an index operation is applied to a value + // that cannot be indexed. + // + // Example: + // var x = 1 + // var y = x[1] + NonIndexableOperand + + // InvalidIndex occurs when an index argument is not of integer type, + // negative, or out-of-bounds. + // + // Example: + // var s = [...]int{1,2,3} + // var x = s[5] + // + // Example: + // var s = []int{1,2,3} + // var _ = s[-1] + // + // Example: + // var s = []int{1,2,3} + // var i string + // var _ = s[i] + InvalidIndex + + // SwappedSliceIndices occurs when constant indices in a slice expression + // are decreasing in value. + // + // Example: + // var _ = []int{1,2,3}[2:1] + SwappedSliceIndices + + /* operators > slice */ + + // NonSliceableOperand occurs when a slice operation is applied to a value + // whose type is not sliceable, or is unaddressable. + // + // Example: + // var x = [...]int{1, 2, 3}[:1] + // + // Example: + // var x = 1 + // var y = 1[:1] + NonSliceableOperand + + // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is + // applied to a string. + // + // Example: + // var s = "hello" + // var x = s[1:2:3] + InvalidSliceExpr + + /* exprs > shift */ + + // InvalidShiftCount occurs when the right-hand side of a shift operation is + // either non-integer, negative, or too large. + // + // Example: + // var ( + // x string + // y int = 1 << x + // ) + InvalidShiftCount + + // InvalidShiftOperand occurs when the shifted operand is not an integer. + // + // Example: + // var s = "hello" + // var x = s << 2 + InvalidShiftOperand + + /* exprs > chan */ + + // InvalidReceive occurs when there is a channel receive from a value that + // is either not a channel, or is a send-only channel. + // + // Example: + // func f() { + // var x = 1 + // <-x + // } + InvalidReceive + + // InvalidSend occurs when there is a channel send to a value that is not a + // channel, or is a receive-only channel. + // + // Example: + // func f() { + // var x = 1 + // x <- "hello!" + // } + InvalidSend + + /* exprs > literal */ + + // DuplicateLitKey occurs when an index is duplicated in a slice, array, or + // map literal. + // + // Example: + // var _ = []int{0:1, 0:2} + // + // Example: + // var _ = map[string]int{"a": 1, "a": 2} + DuplicateLitKey + + // MissingLitKey occurs when a map literal is missing a key expression. + // + // Example: + // var _ = map[string]int{1} + MissingLitKey + + // InvalidLitIndex occurs when the key in a key-value element of a slice or + // array literal is not an integer constant. + // + // Example: + // var i = 0 + // var x = []string{i: "world"} + InvalidLitIndex + + // OversizeArrayLit occurs when an array literal exceeds its length. + // + // Example: + // var _ = [2]int{1,2,3} + OversizeArrayLit + + // MixedStructLit occurs when a struct literal contains a mix of positional + // and named elements. + // + // Example: + // var _ = struct{i, j int}{i: 1, 2} + MixedStructLit + + // InvalidStructLit occurs when a positional struct literal has an incorrect + // number of values. + // + // Example: + // var _ = struct{i, j int}{1,2,3} + InvalidStructLit + + // MissingLitField occurs when a struct literal refers to a field that does + // not exist on the struct type. + // + // Example: + // var _ = struct{i int}{j: 2} + MissingLitField + + // DuplicateLitField occurs when a struct literal contains duplicated + // fields. + // + // Example: + // var _ = struct{i int}{i: 1, i: 2} + DuplicateLitField + + // UnexportedLitField occurs when a positional struct literal implicitly + // assigns an unexported field of an imported type. + UnexportedLitField + + // InvalidLitField occurs when a field name is not a valid identifier. + // + // Example: + // var _ = struct{i int}{1: 1} + InvalidLitField + + // UntypedLit occurs when a composite literal omits a required type + // identifier. + // + // Example: + // type outer struct{ + // inner struct { i int } + // } + // + // var _ = outer{inner: {1}} + UntypedLit + + // InvalidLit occurs when a composite literal expression does not match its + // type. + // + // Example: + // type P *struct{ + // x int + // } + // var _ = P {} + InvalidLit + + /* exprs > selector */ + + // AmbiguousSelector occurs when a selector is ambiguous. + // + // Example: + // type E1 struct { i int } + // type E2 struct { i int } + // type T struct { E1; E2 } + // + // var x T + // var _ = x.i + AmbiguousSelector + + // UndeclaredImportedName occurs when a package-qualified identifier is + // undeclared by the imported package. + // + // Example: + // import "go/types" + // + // var _ = types.NotAnActualIdentifier + UndeclaredImportedName + + // UnexportedName occurs when a selector refers to an unexported identifier + // of an imported package. + // + // Example: + // import "reflect" + // + // type _ reflect.flag + UnexportedName + + // UndeclaredName occurs when an identifier is not declared in the current + // scope. + // + // Example: + // var x T + UndeclaredName + + // MissingFieldOrMethod occurs when a selector references a field or method + // that does not exist. + // + // Example: + // type T struct {} + // + // var x = T{}.f + MissingFieldOrMethod + + /* exprs > ... */ + + // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is + // not valid. + // + // Example: + // var _ = map[int][...]int{0: {}} + BadDotDotDotSyntax + + // NonVariadicDotDotDot occurs when a "..." is used on the final argument to + // a non-variadic function. + // + // Example: + // func printArgs(s []string) { + // for _, a := range s { + // println(a) + // } + // } + // + // func f() { + // s := []string{"a", "b", "c"} + // printArgs(s...) + // } + NonVariadicDotDotDot + + // MisplacedDotDotDot occurs when a "..." is used somewhere other than the + // final argument to a function call. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := []int{1,2,3} + // printArgs(0, a...) + // } + MisplacedDotDotDot + + // InvalidDotDotDotOperand occurs when a "..." operator is applied to a + // single-valued operand. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := 1 + // printArgs(a...) + // } + // + // Example: + // func args() (int, int) { + // return 1, 2 + // } + // + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func g() { + // printArgs(args()...) + // } + InvalidDotDotDotOperand + + // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in + // function. + // + // Example: + // var s = []int{1, 2, 3} + // var l = len(s...) + InvalidDotDotDot + + /* exprs > built-in */ + + // UncalledBuiltin occurs when a built-in function is used as a + // function-valued expression, instead of being called. + // + // Per the spec: + // "The built-in functions do not have standard Go types, so they can only + // appear in call expressions; they cannot be used as function values." + // + // Example: + // var _ = copy + UncalledBuiltin + + // InvalidAppend occurs when append is called with a first argument that is + // not a slice. + // + // Example: + // var _ = append(1, 2) + InvalidAppend + + // InvalidCap occurs when an argument to the cap built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = cap(s) + InvalidCap + + // InvalidClose occurs when close(...) is called with an argument that is + // not of channel type, or that is a receive-only channel. + // + // Example: + // func f() { + // var x int + // close(x) + // } + InvalidClose + + // InvalidCopy occurs when the arguments are not of slice type or do not + // have compatible type. + // + // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // information on the type requirements for the copy built-in. + // + // Example: + // func f() { + // var x []int + // y := []int64{1,2,3} + // copy(x, y) + // } + InvalidCopy + + // InvalidComplex occurs when the complex built-in function is called with + // arguments with incompatible types. + // + // Example: + // var _ = complex(float32(1), float64(2)) + InvalidComplex + + // InvalidDelete occurs when the delete built-in function is called with a + // first argument that is not a map. + // + // Example: + // func f() { + // m := "hello" + // delete(m, "e") + // } + InvalidDelete + + // InvalidImag occurs when the imag built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = imag(int(1)) + InvalidImag + + // InvalidLen occurs when an argument to the len built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = len(s) + InvalidLen + + // SwappedMakeArgs occurs when make is called with three arguments, and its + // length argument is larger than its capacity argument. + // + // Example: + // var x = make([]int, 3, 2) + SwappedMakeArgs + + // InvalidMake occurs when make is called with an unsupported type argument. + // + // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // information on the types that may be created using make. + // + // Example: + // var x = make(int) + InvalidMake + + // InvalidReal occurs when the real built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = real(int(1)) + InvalidReal + + /* exprs > assertion */ + + // InvalidAssert occurs when a type assertion is applied to a + // value that is not of interface type. + // + // Example: + // var x = 1 + // var _ = x.(float64) + InvalidAssert + + // ImpossibleAssert occurs for a type assertion x.(T) when the value x of + // interface cannot have dynamic type T, due to a missing or mismatching + // method on T. + // + // Example: + // type T int + // + // func (t *T) m() int { return int(*t) } + // + // type I interface { m() int } + // + // var x I + // var _ = x.(T) + ImpossibleAssert + + /* exprs > conversion */ + + // InvalidConversion occurs when the argument type cannot be converted to the + // target. + // + // See https://golang.org/ref/spec#Conversions for the rules of + // convertibility. + // + // Example: + // var x float64 + // var _ = string(x) + InvalidConversion + + // InvalidUntypedConversion occurs when an there is no valid implicit + // conversion from an untyped value satisfying the type constraints of the + // context in which it is used. + // + // Example: + // var _ = 1 + "" + InvalidUntypedConversion + + /* offsetof */ + + // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument + // that is not a selector expression. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Offsetof(x) + BadOffsetofSyntax + + // InvalidOffsetof occurs when unsafe.Offsetof is called with a method + // selector, rather than a field selector, or when the field is embedded via + // a pointer. + // + // Per the spec: + // + // "If f is an embedded field, it must be reachable without pointer + // indirections through fields of the struct. " + // + // Example: + // import "unsafe" + // + // type T struct { f int } + // type S struct { *T } + // var s S + // var _ = unsafe.Offsetof(s.f) + // + // Example: + // import "unsafe" + // + // type S struct{} + // + // func (S) m() {} + // + // var s S + // var _ = unsafe.Offsetof(s.m) + InvalidOffsetof + + /* control flow > scope */ + + // UnusedExpr occurs when a side-effect free expression is used as a + // statement. Such a statement has no effect. + // + // Example: + // func f(i int) { + // i*i + // } + UnusedExpr + + // UnusedVar occurs when a variable is declared but unused. + // + // Example: + // func f() { + // x := 1 + // } + UnusedVar + + // MissingReturn occurs when a function with results is missing a return + // statement. + // + // Example: + // func f() int {} + MissingReturn + + // WrongResultCount occurs when a return statement returns an incorrect + // number of values. + // + // Example: + // func ReturnOne() int { + // return 1, 2 + // } + WrongResultCount + + // OutOfScopeResult occurs when the name of a value implicitly returned by + // an empty return statement is shadowed in a nested scope. + // + // Example: + // func factor(n int) (i int) { + // for i := 2; i < n; i++ { + // if n%i == 0 { + // return + // } + // } + // return 0 + // } + OutOfScopeResult + + /* control flow > if */ + + // InvalidCond occurs when an if condition is not a boolean expression. + // + // Example: + // func checkReturn(i int) { + // if i { + // panic("non-zero return") + // } + // } + InvalidCond + + /* control flow > for */ + + // InvalidPostDecl occurs when there is a declaration in a for-loop post + // statement. + // + // Example: + // func f() { + // for i := 0; i < 10; j := 0 {} + // } + InvalidPostDecl + + // InvalidChanRange occurs when a send-only channel used in a range + // expression. + // + // Example: + // func sum(c chan<- int) { + // s := 0 + // for i := range c { + // s += i + // } + // } + InvalidChanRange + + // InvalidIterVar occurs when two iteration variables are used while ranging + // over a channel. + // + // Example: + // func f(c chan int) { + // for k, v := range c { + // println(k, v) + // } + // } + InvalidIterVar + + // InvalidRangeExpr occurs when the type of a range expression is not array, + // slice, string, map, or channel. + // + // Example: + // func f(i int) { + // for j := range i { + // println(j) + // } + // } + InvalidRangeExpr + + /* control flow > switch */ + + // MisplacedBreak occurs when a break statement is not within a for, switch, + // or select statement of the innermost function definition. + // + // Example: + // func f() { + // break + // } + MisplacedBreak + + // MisplacedContinue occurs when a continue statement is not within a for + // loop of the innermost function definition. + // + // Example: + // func sumeven(n int) int { + // proceed := func() { + // continue + // } + // sum := 0 + // for i := 1; i <= n; i++ { + // if i % 2 != 0 { + // proceed() + // } + // sum += i + // } + // return sum + // } + MisplacedContinue + + // MisplacedFallthrough occurs when a fallthrough statement is not within an + // expression switch. + // + // Example: + // func typename(i interface{}) string { + // switch i.(type) { + // case int64: + // fallthrough + // case int: + // return "int" + // } + // return "unsupported" + // } + MisplacedFallthrough + + // DuplicateCase occurs when a type or expression switch has duplicate + // cases. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // case 1: + // println("One") + // } + // } + DuplicateCase + + // DuplicateDefault occurs when a type or expression switch has multiple + // default clauses. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // default: + // println("One") + // default: + // println("1") + // } + // } + DuplicateDefault + + // BadTypeKeyword occurs when a .(type) expression is used anywhere other + // than a type switch. + // + // Example: + // type I interface { + // m() + // } + // var t I + // var _ = t.(type) + BadTypeKeyword + + // InvalidTypeSwitch occurs when .(type) is used on an expression that is + // not of interface type. + // + // Example: + // func f(i int) { + // switch x := i.(type) {} + // } + InvalidTypeSwitch + + // InvalidExprSwitch occurs when a switch expression is not comparable. + // + // Example: + // func _() { + // var a struct{ _ func() } + // switch a /* ERROR cannot switch on a */ { + // } + // } + InvalidExprSwitch + + /* control flow > select */ + + // InvalidSelectCase occurs when a select case is not a channel send or + // receive. + // + // Example: + // func checkChan(c <-chan int) bool { + // select { + // case c: + // return true + // default: + // return false + // } + // } + InvalidSelectCase + + /* control flow > labels and jumps */ + + // UndeclaredLabel occurs when an undeclared label is jumped to. + // + // Example: + // func f() { + // goto L + // } + UndeclaredLabel + + // DuplicateLabel occurs when a label is declared more than once. + // + // Example: + // func f() int { + // L: + // L: + // return 1 + // } + DuplicateLabel + + // MisplacedLabel occurs when a break or continue label is not on a for, + // switch, or select statement. + // + // Example: + // func f() { + // L: + // a := []int{1,2,3} + // for _, e := range a { + // if e > 10 { + // break L + // } + // println(a) + // } + // } + MisplacedLabel + + // UnusedLabel occurs when a label is declared but not used. + // + // Example: + // func f() { + // L: + // } + UnusedLabel + + // JumpOverDecl occurs when a label jumps over a variable declaration. + // + // Example: + // func f() int { + // goto L + // x := 2 + // L: + // x++ + // return x + // } + JumpOverDecl + + // JumpIntoBlock occurs when a forward jump goes to a label inside a nested + // block. + // + // Example: + // func f(x int) { + // goto L + // if x > 0 { + // L: + // print("inside block") + // } + // } + JumpIntoBlock + + /* control flow > calls */ + + // InvalidMethodExpr occurs when a pointer method is called but the argument + // is not addressable. + // + // Example: + // type T struct {} + // + // func (*T) m() int { return 1 } + // + // var _ = T.m(T{}) + InvalidMethodExpr + + // WrongArgCount occurs when too few or too many arguments are passed by a + // function call. + // + // Example: + // func f(i int) {} + // var x = f() + WrongArgCount + + // InvalidCall occurs when an expression is called that is not of function + // type. + // + // Example: + // var x = "x" + // var y = x() + InvalidCall + + /* control flow > suspended */ + + // UnusedResults occurs when a restricted expression-only built-in function + // is suspended via go or defer. Such a suspension discards the results of + // these side-effect free built-in functions, and therefore is ineffectual. + // + // Example: + // func f(a []int) int { + // defer len(a) + // return i + // } + UnusedResults + + // InvalidDefer occurs when a deferred expression is not a function call, + // for example if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // defer int32(i) + // return i + // } + InvalidDefer + + // InvalidGo occurs when a go expression is not a function call, for example + // if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // go int32(i) + // return i + // } + InvalidGo + + // All codes below were added in Go 1.17. + + /* decl */ + + // BadDecl occurs when a declaration has invalid syntax. + BadDecl + + // RepeatedDecl occurs when an identifier occurs more than once on the left + // hand side of a short variable declaration. + // + // Example: + // func _() { + // x, y, y := 1, 2, 3 + // } + RepeatedDecl + + /* unsafe */ + + // InvalidUnsafeAdd occurs when unsafe.Add is called with a + // length argument that is not of integer type. + // + // Example: + // import "unsafe" + // + // var p unsafe.Pointer + // var _ = unsafe.Add(p, float64(1)) + InvalidUnsafeAdd + + // InvalidUnsafeSlice occurs when unsafe.Slice is called with a + // pointer argument that is not of pointer type or a length argument + // that is not of integer type, negative, or out of bounds. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(x, 1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, float64(1)) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, -1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, uint64(1) << 63) + InvalidUnsafeSlice + + // All codes below were added in Go 1.18. + + /* features */ + + // UnsupportedFeature occurs when a language feature is used that is not + // supported at this Go version. + UnsupportedFeature + + /* type params */ + + // NotAGenericType occurs when a non-generic type is used where a generic + // type is expected: in type or function instantiation. + // + // Example: + // type T int + // + // var _ T[int] + NotAGenericType + + // WrongTypeArgCount occurs when a type or function is instantiated with an + // incorrect number of type arguments, including when a generic type or + // function is used without instantiation. + // + // Errors involving failed type inference are assigned other error codes. + // + // Example: + // type T[p any] int + // + // var _ T[int, string] + // + // Example: + // func f[T any]() {} + // + // var x = f + WrongTypeArgCount + + // CannotInferTypeArgs occurs when type or function type argument inference + // fails to infer all type arguments. + // + // Example: + // func f[T any]() {} + // + // func _() { + // f() + // } + // + // Example: + // type N[P, Q any] struct{} + // + // var _ N[int] + CannotInferTypeArgs + + // InvalidTypeArg occurs when a type argument does not satisfy its + // corresponding type parameter constraints. + // + // Example: + // type T[P ~int] struct{} + // + // var _ T[string] + InvalidTypeArg // arguments? InferenceFailed + + // InvalidInstanceCycle occurs when an invalid cycle is detected + // within the instantiation graph. + // + // Example: + // func f[T any]() { f[*T]() } + InvalidInstanceCycle + + // InvalidUnion occurs when an embedded union or approximation element is + // not valid. + // + // Example: + // type _ interface { + // ~int | interface{ m() } + // } + InvalidUnion + + // MisplacedConstraintIface occurs when a constraint-type interface is used + // outside of constraint position. + // + // Example: + // type I interface { ~int } + // + // var _ I + MisplacedConstraintIface + + // InvalidMethodTypeParams occurs when methods have type parameters. + // + // It cannot be encountered with an AST parsed using go/parser. + InvalidMethodTypeParams + + // MisplacedTypeParam occurs when a type parameter is used in a place where + // it is not permitted. + // + // Example: + // type T[P any] P + // + // Example: + // type T[P any] struct{ *P } + MisplacedTypeParam + + // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with + // an argument that is not of slice type. It also occurs if it is used + // in a package compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.SliceData(x) + InvalidUnsafeSliceData + + // InvalidUnsafeString occurs when unsafe.String is called with + // a length argument that is not of integer type, negative, or + // out of bounds. It also occurs if it is used in a package + // compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var b [10]byte + // var _ = unsafe.String(&b[0], -1) + InvalidUnsafeString + + // InvalidUnsafeStringData occurs if it is used in a package + // compiled for a language version before go1.20. + _ // not used anymore + +) diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go new file mode 100644 index 00000000000..15ecf7c5ded --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -0,0 +1,179 @@ +// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT. + +package typesinternal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSyntaxTree - -1] + _ = x[Test-1] + _ = x[BlankPkgName-2] + _ = x[MismatchedPkgName-3] + _ = x[InvalidPkgUse-4] + _ = x[BadImportPath-5] + _ = x[BrokenImport-6] + _ = x[ImportCRenamed-7] + _ = x[UnusedImport-8] + _ = x[InvalidInitCycle-9] + _ = x[DuplicateDecl-10] + _ = x[InvalidDeclCycle-11] + _ = x[InvalidTypeCycle-12] + _ = x[InvalidConstInit-13] + _ = x[InvalidConstVal-14] + _ = x[InvalidConstType-15] + _ = x[UntypedNilUse-16] + _ = x[WrongAssignCount-17] + _ = x[UnassignableOperand-18] + _ = x[NoNewVar-19] + _ = x[MultiValAssignOp-20] + _ = x[InvalidIfaceAssign-21] + _ = x[InvalidChanAssign-22] + _ = x[IncompatibleAssign-23] + _ = x[UnaddressableFieldAssign-24] + _ = x[NotAType-25] + _ = x[InvalidArrayLen-26] + _ = x[BlankIfaceMethod-27] + _ = x[IncomparableMapKey-28] + _ = x[InvalidIfaceEmbed-29] + _ = x[InvalidPtrEmbed-30] + _ = x[BadRecv-31] + _ = x[InvalidRecv-32] + _ = x[DuplicateFieldAndMethod-33] + _ = x[DuplicateMethod-34] + _ = x[InvalidBlank-35] + _ = x[InvalidIota-36] + _ = x[MissingInitBody-37] + _ = x[InvalidInitSig-38] + _ = x[InvalidInitDecl-39] + _ = x[InvalidMainDecl-40] + _ = x[TooManyValues-41] + _ = x[NotAnExpr-42] + _ = x[TruncatedFloat-43] + _ = x[NumericOverflow-44] + _ = x[UndefinedOp-45] + _ = x[MismatchedTypes-46] + _ = x[DivByZero-47] + _ = x[NonNumericIncDec-48] + _ = x[UnaddressableOperand-49] + _ = x[InvalidIndirection-50] + _ = x[NonIndexableOperand-51] + _ = x[InvalidIndex-52] + _ = x[SwappedSliceIndices-53] + _ = x[NonSliceableOperand-54] + _ = x[InvalidSliceExpr-55] + _ = x[InvalidShiftCount-56] + _ = x[InvalidShiftOperand-57] + _ = x[InvalidReceive-58] + _ = x[InvalidSend-59] + _ = x[DuplicateLitKey-60] + _ = x[MissingLitKey-61] + _ = x[InvalidLitIndex-62] + _ = x[OversizeArrayLit-63] + _ = x[MixedStructLit-64] + _ = x[InvalidStructLit-65] + _ = x[MissingLitField-66] + _ = x[DuplicateLitField-67] + _ = x[UnexportedLitField-68] + _ = x[InvalidLitField-69] + _ = x[UntypedLit-70] + _ = x[InvalidLit-71] + _ = x[AmbiguousSelector-72] + _ = x[UndeclaredImportedName-73] + _ = x[UnexportedName-74] + _ = x[UndeclaredName-75] + _ = x[MissingFieldOrMethod-76] + _ = x[BadDotDotDotSyntax-77] + _ = x[NonVariadicDotDotDot-78] + _ = x[MisplacedDotDotDot-79] + _ = x[InvalidDotDotDotOperand-80] + _ = x[InvalidDotDotDot-81] + _ = x[UncalledBuiltin-82] + _ = x[InvalidAppend-83] + _ = x[InvalidCap-84] + _ = x[InvalidClose-85] + _ = x[InvalidCopy-86] + _ = x[InvalidComplex-87] + _ = x[InvalidDelete-88] + _ = x[InvalidImag-89] + _ = x[InvalidLen-90] + _ = x[SwappedMakeArgs-91] + _ = x[InvalidMake-92] + _ = x[InvalidReal-93] + _ = x[InvalidAssert-94] + _ = x[ImpossibleAssert-95] + _ = x[InvalidConversion-96] + _ = x[InvalidUntypedConversion-97] + _ = x[BadOffsetofSyntax-98] + _ = x[InvalidOffsetof-99] + _ = x[UnusedExpr-100] + _ = x[UnusedVar-101] + _ = x[MissingReturn-102] + _ = x[WrongResultCount-103] + _ = x[OutOfScopeResult-104] + _ = x[InvalidCond-105] + _ = x[InvalidPostDecl-106] + _ = x[InvalidChanRange-107] + _ = x[InvalidIterVar-108] + _ = x[InvalidRangeExpr-109] + _ = x[MisplacedBreak-110] + _ = x[MisplacedContinue-111] + _ = x[MisplacedFallthrough-112] + _ = x[DuplicateCase-113] + _ = x[DuplicateDefault-114] + _ = x[BadTypeKeyword-115] + _ = x[InvalidTypeSwitch-116] + _ = x[InvalidExprSwitch-117] + _ = x[InvalidSelectCase-118] + _ = x[UndeclaredLabel-119] + _ = x[DuplicateLabel-120] + _ = x[MisplacedLabel-121] + _ = x[UnusedLabel-122] + _ = x[JumpOverDecl-123] + _ = x[JumpIntoBlock-124] + _ = x[InvalidMethodExpr-125] + _ = x[WrongArgCount-126] + _ = x[InvalidCall-127] + _ = x[UnusedResults-128] + _ = x[InvalidDefer-129] + _ = x[InvalidGo-130] + _ = x[BadDecl-131] + _ = x[RepeatedDecl-132] + _ = x[InvalidUnsafeAdd-133] + _ = x[InvalidUnsafeSlice-134] + _ = x[UnsupportedFeature-135] + _ = x[NotAGenericType-136] + _ = x[WrongTypeArgCount-137] + _ = x[CannotInferTypeArgs-138] + _ = x[InvalidTypeArg-139] + _ = x[InvalidInstanceCycle-140] + _ = x[InvalidUnion-141] + _ = x[MisplacedConstraintIface-142] + _ = x[InvalidMethodTypeParams-143] + _ = x[MisplacedTypeParam-144] + _ = x[InvalidUnsafeSliceData-145] + _ = x[InvalidUnsafeString-146] +} + +const ( + _ErrorCode_name_0 = "InvalidSyntaxTree" + _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString" +) + +var ( + _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180} +) + +func (i ErrorCode) String() string { + switch { + case i == -1: + return _ErrorCode_name_0 + case 1 <= i && i <= 146: + i -= 1 + return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]] + default: + return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/recv.go new file mode 100644 index 00000000000..fea7c8b75e8 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -0,0 +1,43 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// ReceiverNamed returns the named type (if any) associated with the +// type of recv, which may be of the form N or *N, or aliases thereof. +// It also reports whether a Pointer was present. +func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { + t := recv.Type() + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + isPtr = true + t = ptr.Elem() + } + named, _ = aliases.Unalias(t).(*types.Named) + return +} + +// Unpointer returns T given *T or an alias thereof. +// For all other types it is the identity function. +// It does not look at underlying types. +// The result may be an alias. +// +// Use this function to strip off the optional pointer on a receiver +// in a field or method selection, without losing the named type +// (which is needed to compute the method set). +// +// See also [typeparams.MustDeref], which removes one level of +// indirection from the type, regardless of named types (analogous to +// a LOAD instruction). +func Unpointer(t types.Type) types.Type { + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/toonew.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/toonew.go new file mode 100644 index 00000000000..cc86487eaa0 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/toonew.go @@ -0,0 +1,89 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + + "golang.org/x/tools/internal/stdlib" + "golang.org/x/tools/internal/versions" +) + +// TooNewStdSymbols computes the set of package-level symbols +// exported by pkg that are not available at the specified version. +// The result maps each symbol to its minimum version. +// +// The pkg is allowed to contain type errors. +func TooNewStdSymbols(pkg *types.Package, version string) map[types.Object]string { + disallowed := make(map[types.Object]string) + + // Pass 1: package-level symbols. + symbols := stdlib.PackageSymbols[pkg.Path()] + for _, sym := range symbols { + symver := sym.Version.String() + if versions.Before(version, symver) { + switch sym.Kind { + case stdlib.Func, stdlib.Var, stdlib.Const, stdlib.Type: + disallowed[pkg.Scope().Lookup(sym.Name)] = symver + } + } + } + + // Pass 2: fields and methods. + // + // We allow fields and methods if their associated type is + // disallowed, as otherwise we would report false positives + // for compatibility shims. Consider: + // + // //go:build go1.22 + // type T struct { F std.Real } // correct new API + // + // //go:build !go1.22 + // type T struct { F fake } // shim + // type fake struct { ... } + // func (fake) M () {} + // + // These alternative declarations of T use either the std.Real + // type, introduced in go1.22, or a fake type, for the field + // F. (The fakery could be arbitrarily deep, involving more + // nested fields and methods than are shown here.) Clients + // that use the compatibility shim T will compile with any + // version of go, whether older or newer than go1.22, but only + // the newer version will use the std.Real implementation. + // + // Now consider a reference to method M in new(T).F.M() in a + // module that requires a minimum of go1.21. The analysis may + // occur using a version of Go higher than 1.21, selecting the + // first version of T, so the method M is Real.M. This would + // spuriously cause the analyzer to report a reference to a + // too-new symbol even though this expression compiles just + // fine (with the fake implementation) using go1.21. + for _, sym := range symbols { + symVersion := sym.Version.String() + if !versions.Before(version, symVersion) { + continue // allowed + } + + var obj types.Object + switch sym.Kind { + case stdlib.Field: + typename, name := sym.SplitField() + if t := pkg.Scope().Lookup(typename); t != nil && disallowed[t] == "" { + obj, _, _ = types.LookupFieldOrMethod(t.Type(), false, pkg, name) + } + + case stdlib.Method: + ptr, recvname, name := sym.SplitMethod() + if t := pkg.Scope().Lookup(recvname); t != nil && disallowed[t] == "" { + obj, _, _ = types.LookupFieldOrMethod(t.Type(), ptr, pkg, name) + } + } + if obj != nil { + disallowed[obj] = symVersion + } + } + + return disallowed +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/types.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/types.go new file mode 100644 index 00000000000..7c77c2fbc03 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -0,0 +1,50 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typesinternal provides access to internal go/types APIs that are not +// yet exported. +package typesinternal + +import ( + "go/token" + "go/types" + "reflect" + "unsafe" +) + +func SetUsesCgo(conf *types.Config) bool { + v := reflect.ValueOf(conf).Elem() + + f := v.FieldByName("go115UsesCgo") + if !f.IsValid() { + f = v.FieldByName("UsesCgo") + if !f.IsValid() { + return false + } + } + + addr := unsafe.Pointer(f.UnsafeAddr()) + *(*bool)(addr) = true + + return true +} + +// ReadGo116ErrorData extracts additional information from types.Error values +// generated by Go version 1.16 and later: the error code, start position, and +// end position. If all positions are valid, start <= err.Pos <= end. +// +// If the data could not be read, the final result parameter will be false. +func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { + var data [3]int + // By coincidence all of these fields are ints, which simplifies things. + v := reflect.ValueOf(err) + for i, name := range []string{"go116code", "go116start", "go116end"} { + f := v.FieldByName(name) + if !f.IsValid() { + return 0, 0, 0, false + } + data[i] = int(f.Int()) + } + return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/features.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/features.go new file mode 100644 index 00000000000..b53f1786161 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/features.go @@ -0,0 +1,43 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// This file contains predicates for working with file versions to +// decide when a tool should consider a language feature enabled. + +// GoVersions that features in x/tools can be gated to. +const ( + Go1_18 = "go1.18" + Go1_19 = "go1.19" + Go1_20 = "go1.20" + Go1_21 = "go1.21" + Go1_22 = "go1.22" +) + +// Future is an invalid unknown Go version sometime in the future. +// Do not use directly with Compare. +const Future = "" + +// AtLeast reports whether the file version v comes after a Go release. +// +// Use this predicate to enable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func AtLeast(v, release string) bool { + if v == Future { + return true // an unknown future version is always after y. + } + return Compare(Lang(v), Lang(release)) >= 0 +} + +// Before reports whether the file version v is strictly before a Go release. +// +// Use this predicate to disable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func Before(v, release string) bool { + if v == Future { + return false // an unknown future version happens after y. + } + return Compare(Lang(v), Lang(release)) < 0 +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/gover.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/gover.go new file mode 100644 index 00000000000..bbabcd22e94 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/gover.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a fork of internal/gover for use by x/tools until +// go1.21 and earlier are no longer supported by x/tools. + +package versions + +import "strings" + +// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] +// The numbers are the original decimal strings to avoid integer overflows +// and since there is very little actual math. (Probably overflow doesn't matter in practice, +// but at the time this code was written, there was an existing test that used +// go1.99999999999, which does not fit in an int on 32-bit platforms. +// The "big decimal" representation avoids the problem entirely.) +type gover struct { + major string // decimal + minor string // decimal or "" + patch string // decimal or "" + kind string // "", "alpha", "beta", "rc" + pre string // decimal or "" +} + +// compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as toolchain versions. +// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". +// Malformed versions compare less than well-formed versions and equal to each other. +// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". +func compare(x, y string) int { + vx := parse(x) + vy := parse(y) + + if c := cmpInt(vx.major, vy.major); c != 0 { + return c + } + if c := cmpInt(vx.minor, vy.minor); c != 0 { + return c + } + if c := cmpInt(vx.patch, vy.patch); c != 0 { + return c + } + if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc + return c + } + if c := cmpInt(vx.pre, vy.pre); c != 0 { + return c + } + return 0 +} + +// lang returns the Go language version. For example, lang("1.2.3") == "1.2". +func lang(x string) string { + v := parse(x) + if v.minor == "" || v.major == "1" && v.minor == "0" { + return v.major + } + return v.major + "." + v.minor +} + +// isValid reports whether the version x is valid. +func isValid(x string) bool { + return parse(x) != gover{} +} + +// parse parses the Go version string x into a version. +// It returns the zero version if x is malformed. +func parse(x string) gover { + var v gover + + // Parse major version. + var ok bool + v.major, x, ok = cutInt(x) + if !ok { + return gover{} + } + if x == "" { + // Interpret "1" as "1.0.0". + v.minor = "0" + v.patch = "0" + return v + } + + // Parse . before minor version. + if x[0] != '.' { + return gover{} + } + + // Parse minor version. + v.minor, x, ok = cutInt(x[1:]) + if !ok { + return gover{} + } + if x == "" { + // Patch missing is same as "0" for older versions. + // Starting in Go 1.21, patch missing is different from explicit .0. + if cmpInt(v.minor, "21") < 0 { + v.patch = "0" + } + return v + } + + // Parse patch if present. + if x[0] == '.' { + v.patch, x, ok = cutInt(x[1:]) + if !ok || x != "" { + // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). + // Allowing them would be a bit confusing because we already have: + // 1.21 < 1.21rc1 + // But a prerelease of a patch would have the opposite effect: + // 1.21.3rc1 < 1.21.3 + // We've never needed them before, so let's not start now. + return gover{} + } + return v + } + + // Parse prerelease. + i := 0 + for i < len(x) && (x[i] < '0' || '9' < x[i]) { + if x[i] < 'a' || 'z' < x[i] { + return gover{} + } + i++ + } + if i == 0 { + return gover{} + } + v.kind, x = x[:i], x[i:] + if x == "" { + return v + } + v.pre, x, ok = cutInt(x) + if !ok || x != "" { + return gover{} + } + + return v +} + +// cutInt scans the leading decimal number at the start of x to an integer +// and returns that value and the rest of the string. +func cutInt(x string) (n, rest string, ok bool) { + i := 0 + for i < len(x) && '0' <= x[i] && x[i] <= '9' { + i++ + } + if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero + return "", "", false + } + return x[:i], x[i:], true +} + +// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. +// (Copied from golang.org/x/mod/semver's compareInt.) +func cmpInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain.go new file mode 100644 index 00000000000..377bf7a53b4 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// toolchain is maximum version (<1.22) that the go toolchain used +// to build the current tool is known to support. +// +// When a tool is built with >=1.22, the value of toolchain is unused. +// +// x/tools does not support building with go <1.18. So we take this +// as the minimum possible maximum. +var toolchain string = Go1_18 diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go new file mode 100644 index 00000000000..f65beed9d83 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package versions + +func init() { + if Compare(toolchain, Go1_19) < 0 { + toolchain = Go1_19 + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go new file mode 100644 index 00000000000..1a9efa126cd --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package versions + +func init() { + if Compare(toolchain, Go1_20) < 0 { + toolchain = Go1_20 + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go new file mode 100644 index 00000000000..b7ef216dfec --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package versions + +func init() { + if Compare(toolchain, Go1_21) < 0 { + toolchain = Go1_21 + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types.go new file mode 100644 index 00000000000..562eef21fa2 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "go/types" +) + +// GoVersion returns the Go version of the type package. +// It returns zero if no version can be determined. +func GoVersion(pkg *types.Package) string { + // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. + if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { + return pkg.GoVersion() + } + return "" +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types_go121.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types_go121.go new file mode 100644 index 00000000000..b4345d3349e --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types_go121.go @@ -0,0 +1,30 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersion returns a language version (<=1.21) derived from runtime.Version() +// or an unknown future version. +func FileVersion(info *types.Info, file *ast.File) string { + // In x/tools built with Go <= 1.21, we do not have Info.FileVersions + // available. We use a go version derived from the toolchain used to + // compile the tool by default. + // This will be <= go1.21. We take this as the maximum version that + // this tool can support. + // + // There are no features currently in x/tools that need to tell fine grained + // differences for versions <1.22. + return toolchain +} + +// InitFileVersions is a noop when compiled with this Go version. +func InitFileVersions(*types.Info) {} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types_go122.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types_go122.go new file mode 100644 index 00000000000..e8180632a52 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -0,0 +1,41 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v + } + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) +} diff --git a/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/versions.go b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/versions.go new file mode 100644 index 00000000000..8d1f7453dbf --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/tools/internal/versions/versions.go @@ -0,0 +1,57 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "strings" +) + +// Note: If we use build tags to use go/versions when go >=1.22, +// we run into go.dev/issue/53737. Under some operations users would see an +// import of "go/versions" even if they would not compile the file. +// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include +// For this reason, this library just a clone of go/versions for the moment. + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { + v := lang(stripGo(x)) + if v == "" { + return "" + } + return x[:2+len(v)] // "go"+v without allocation +} + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return isValid(stripGo(x)) } + +// stripGo converts from a "go1.21" version to a "1.21" version. +// If v does not start with "go", stripGo returns the empty string (a known invalid version). +func stripGo(v string) string { + v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix. + if len(v) < 2 || v[:2] != "go" { + return "" + } + return v[2:] +} diff --git a/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-api.json b/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-api.json index 6c0782b8935..e1f3dff1215 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-api.json @@ -1710,6 +1710,66 @@ ] } } + }, + "services": { + "resources": { + "versions": { + "methods": { + "delete": { + "description": "Deletes an existing Version resource.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services/{servicesId}/versions/{versionsId}", + "httpMethod": "DELETE", + "id": "appengine.projects.locations.applications.services.versions.delete", + "parameterOrder": [ + "projectsId", + "locationsId", + "applicationsId", + "servicesId", + "versionsId" + ], + "parameters": { + "applicationsId": { + "description": "Part of `name`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + }, + "locationsId": { + "description": "Part of `name`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + }, + "projectsId": { + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1.", + "location": "path", + "required": true, + "type": "string" + }, + "servicesId": { + "description": "Part of `name`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + }, + "versionsId": { + "description": "Part of `name`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services/{servicesId}/versions/{versionsId}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } } } } @@ -1718,7 +1778,7 @@ } } }, - "revision": "20240415", + "revision": "20241007", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { @@ -2480,6 +2540,24 @@ }, "type": "object" }, + "GceTag": { + "description": "For use only by GCE. GceTag is a wrapper around the GCE administrative tag with parent info.", + "id": "GceTag", + "properties": { + "parent": { + "description": "The parents(s) of the tag. Eg. projects/123, folders/456 It usually contains only one parent. But, in some corner cases, it can contain multiple parents. Currently, organizations are not supported.", + "items": { + "type": "string" + }, + "type": "array" + }, + "tag": { + "description": "The administrative_tag name.", + "type": "string" + } + }, + "type": "object" + }, "GoogleAppengineV1betaLocationMetadata": { "description": "Metadata for the given google.cloud.location.Location.", "id": "GoogleAppengineV1betaLocationMetadata", @@ -3343,6 +3421,13 @@ ], "type": "string" }, + "gceTag": { + "description": "The GCE tags associated with the consumer project and those inherited due to their ancestry, if any. Not supported by CCFE.", + "items": { + "$ref": "GceTag" + }, + "type": "array" + }, "p4ServiceAccount": { "description": "The service account authorized to operate on the consumer project. Note: CCFE only propagates P4SA with default tag to CLH.", "type": "string" @@ -3699,7 +3784,8 @@ "type": "object" }, "id": { - "description": "Relative name of the service within the application. Example: default.@OutputOnly", + "description": "Output only. Relative name of the service within the application. Example: default.@OutputOnly", + "readOnly": true, "type": "string" }, "labels": { @@ -3710,7 +3796,8 @@ "type": "object" }, "name": { - "description": "Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly", + "description": "Output only. Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly", + "readOnly": true, "type": "string" }, "networkSettings": { @@ -4026,7 +4113,8 @@ "type": "string" }, "createdBy": { - "description": "Email address of the user who created this version.@OutputOnly", + "description": "Output only. Email address of the user who created this version.@OutputOnly", + "readOnly": true, "type": "string" }, "defaultExpiration": { @@ -4039,8 +4127,9 @@ "description": "Code and application artifacts that make up this version.Only returned in GET requests if view=FULL is set." }, "diskUsageBytes": { - "description": "Total size in bytes of all the files that are included in this version and currently hosted on the App Engine disk.@OutputOnly", + "description": "Output only. Total size in bytes of all the files that are included in this version and currently hosted on the App Engine disk.@OutputOnly", "format": "int64", + "readOnly": true, "type": "string" }, "endpointsApiService": { @@ -4145,7 +4234,8 @@ "description": "A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time. Manually scaled versions are sometimes referred to as \"backends\"." }, "name": { - "description": "Full path to the Version resource in the API. Example: apps/myapp/services/default/versions/v1.@OutputOnly", + "description": "Output only. Full path to the Version resource in the API. Example: apps/myapp/services/default/versions/v1.@OutputOnly", + "readOnly": true, "type": "string" }, "network": { @@ -4203,7 +4293,8 @@ "type": "boolean" }, "versionUrl": { - "description": "Serving URL for this version. Example: \"https://myversion-dot-myservice-dot-myapp.appspot.com\"@OutputOnly", + "description": "Output only. Serving URL for this version. Example: \"https://myversion-dot-myservice-dot-myapp.appspot.com\"@OutputOnly", + "readOnly": true, "type": "string" }, "vm": { diff --git a/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-gen.go b/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-gen.go index 26e5fbbadd6..515eb6ec586 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-gen.go @@ -327,6 +327,7 @@ type ProjectsLocationsService struct { func NewProjectsLocationsApplicationsService(s *APIService) *ProjectsLocationsApplicationsService { rs := &ProjectsLocationsApplicationsService{s: s} rs.AuthorizedDomains = NewProjectsLocationsApplicationsAuthorizedDomainsService(s) + rs.Services = NewProjectsLocationsApplicationsServicesService(s) return rs } @@ -334,6 +335,8 @@ type ProjectsLocationsApplicationsService struct { s *APIService AuthorizedDomains *ProjectsLocationsApplicationsAuthorizedDomainsService + + Services *ProjectsLocationsApplicationsServicesService } func NewProjectsLocationsApplicationsAuthorizedDomainsService(s *APIService) *ProjectsLocationsApplicationsAuthorizedDomainsService { @@ -345,6 +348,27 @@ type ProjectsLocationsApplicationsAuthorizedDomainsService struct { s *APIService } +func NewProjectsLocationsApplicationsServicesService(s *APIService) *ProjectsLocationsApplicationsServicesService { + rs := &ProjectsLocationsApplicationsServicesService{s: s} + rs.Versions = NewProjectsLocationsApplicationsServicesVersionsService(s) + return rs +} + +type ProjectsLocationsApplicationsServicesService struct { + s *APIService + + Versions *ProjectsLocationsApplicationsServicesVersionsService +} + +func NewProjectsLocationsApplicationsServicesVersionsService(s *APIService) *ProjectsLocationsApplicationsServicesVersionsService { + rs := &ProjectsLocationsApplicationsServicesVersionsService{s: s} + return rs +} + +type ProjectsLocationsApplicationsServicesVersionsService struct { + s *APIService +} + // ApiConfigHandler: Google Cloud Endpoints // (https://cloud.google.com/endpoints) configuration for API handlers. type ApiConfigHandler struct { @@ -406,9 +430,9 @@ type ApiConfigHandler struct { NullFields []string `json:"-"` } -func (s *ApiConfigHandler) MarshalJSON() ([]byte, error) { +func (s ApiConfigHandler) MarshalJSON() ([]byte, error) { type NoMethod ApiConfigHandler - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ApiEndpointHandler: Uses Google Cloud Endpoints to handle requests. @@ -428,9 +452,9 @@ type ApiEndpointHandler struct { NullFields []string `json:"-"` } -func (s *ApiEndpointHandler) MarshalJSON() ([]byte, error) { +func (s ApiEndpointHandler) MarshalJSON() ([]byte, error) { type NoMethod ApiEndpointHandler - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Application: An Application resource contains the top-level configuration of @@ -517,9 +541,9 @@ type Application struct { NullFields []string `json:"-"` } -func (s *Application) MarshalJSON() ([]byte, error) { +func (s Application) MarshalJSON() ([]byte, error) { type NoMethod Application - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthorizedCertificate: An SSL certificate that a user has been authorized to @@ -584,9 +608,9 @@ type AuthorizedCertificate struct { NullFields []string `json:"-"` } -func (s *AuthorizedCertificate) MarshalJSON() ([]byte, error) { +func (s AuthorizedCertificate) MarshalJSON() ([]byte, error) { type NoMethod AuthorizedCertificate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthorizedDomain: A domain that a user has been authorized to administer. To @@ -612,9 +636,9 @@ type AuthorizedDomain struct { NullFields []string `json:"-"` } -func (s *AuthorizedDomain) MarshalJSON() ([]byte, error) { +func (s AuthorizedDomain) MarshalJSON() ([]byte, error) { type NoMethod AuthorizedDomain - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutomaticScaling: Automatic scaling is based on request rate, response @@ -672,9 +696,9 @@ type AutomaticScaling struct { NullFields []string `json:"-"` } -func (s *AutomaticScaling) MarshalJSON() ([]byte, error) { +func (s AutomaticScaling) MarshalJSON() ([]byte, error) { type NoMethod AutomaticScaling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BasicScaling: A service with basic scaling will create an instance when the @@ -700,9 +724,9 @@ type BasicScaling struct { NullFields []string `json:"-"` } -func (s *BasicScaling) MarshalJSON() ([]byte, error) { +func (s BasicScaling) MarshalJSON() ([]byte, error) { type NoMethod BasicScaling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchUpdateIngressRulesRequest: Request message for @@ -723,9 +747,9 @@ type BatchUpdateIngressRulesRequest struct { NullFields []string `json:"-"` } -func (s *BatchUpdateIngressRulesRequest) MarshalJSON() ([]byte, error) { +func (s BatchUpdateIngressRulesRequest) MarshalJSON() ([]byte, error) { type NoMethod BatchUpdateIngressRulesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchUpdateIngressRulesResponse: Response message for @@ -749,9 +773,9 @@ type BatchUpdateIngressRulesResponse struct { NullFields []string `json:"-"` } -func (s *BatchUpdateIngressRulesResponse) MarshalJSON() ([]byte, error) { +func (s BatchUpdateIngressRulesResponse) MarshalJSON() ([]byte, error) { type NoMethod BatchUpdateIngressRulesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CertificateRawData: An SSL certificate obtained from a certificate @@ -779,9 +803,9 @@ type CertificateRawData struct { NullFields []string `json:"-"` } -func (s *CertificateRawData) MarshalJSON() ([]byte, error) { +func (s CertificateRawData) MarshalJSON() ([]byte, error) { type NoMethod CertificateRawData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloudBuildOptions: Options for the build operations performed as a part of @@ -809,9 +833,9 @@ type CloudBuildOptions struct { NullFields []string `json:"-"` } -func (s *CloudBuildOptions) MarshalJSON() ([]byte, error) { +func (s CloudBuildOptions) MarshalJSON() ([]byte, error) { type NoMethod CloudBuildOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ContainerInfo: Docker image that is used to create a container and start a @@ -835,9 +859,9 @@ type ContainerInfo struct { NullFields []string `json:"-"` } -func (s *ContainerInfo) MarshalJSON() ([]byte, error) { +func (s ContainerInfo) MarshalJSON() ([]byte, error) { type NoMethod ContainerInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ContainerState: ContainerState contains the externally-visible container @@ -894,9 +918,9 @@ type ContainerState struct { NullFields []string `json:"-"` } -func (s *ContainerState) MarshalJSON() ([]byte, error) { +func (s ContainerState) MarshalJSON() ([]byte, error) { type NoMethod ContainerState - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CpuUtilization: Target scaling by CPU usage. @@ -920,9 +944,9 @@ type CpuUtilization struct { NullFields []string `json:"-"` } -func (s *CpuUtilization) MarshalJSON() ([]byte, error) { +func (s CpuUtilization) MarshalJSON() ([]byte, error) { type NoMethod CpuUtilization - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *CpuUtilization) UnmarshalJSON(data []byte) error { @@ -958,9 +982,9 @@ type CreateVersionMetadataV1 struct { NullFields []string `json:"-"` } -func (s *CreateVersionMetadataV1) MarshalJSON() ([]byte, error) { +func (s CreateVersionMetadataV1) MarshalJSON() ([]byte, error) { type NoMethod CreateVersionMetadataV1 - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateVersionMetadataV1Alpha: Metadata for the given @@ -983,9 +1007,9 @@ type CreateVersionMetadataV1Alpha struct { NullFields []string `json:"-"` } -func (s *CreateVersionMetadataV1Alpha) MarshalJSON() ([]byte, error) { +func (s CreateVersionMetadataV1Alpha) MarshalJSON() ([]byte, error) { type NoMethod CreateVersionMetadataV1Alpha - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateVersionMetadataV1Beta: Metadata for the given @@ -1008,9 +1032,9 @@ type CreateVersionMetadataV1Beta struct { NullFields []string `json:"-"` } -func (s *CreateVersionMetadataV1Beta) MarshalJSON() ([]byte, error) { +func (s CreateVersionMetadataV1Beta) MarshalJSON() ([]byte, error) { type NoMethod CreateVersionMetadataV1Beta - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Date: Represents a whole or partial calendar date, such as a birthday. The @@ -1045,9 +1069,9 @@ type Date struct { NullFields []string `json:"-"` } -func (s *Date) MarshalJSON() ([]byte, error) { +func (s Date) MarshalJSON() ([]byte, error) { type NoMethod Date - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DebugInstanceRequest: Request message for Instances.DebugInstance. @@ -1071,9 +1095,9 @@ type DebugInstanceRequest struct { NullFields []string `json:"-"` } -func (s *DebugInstanceRequest) MarshalJSON() ([]byte, error) { +func (s DebugInstanceRequest) MarshalJSON() ([]byte, error) { type NoMethod DebugInstanceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Deployment: Code and application artifacts used to deploy a version to App @@ -1106,9 +1130,9 @@ type Deployment struct { NullFields []string `json:"-"` } -func (s *Deployment) MarshalJSON() ([]byte, error) { +func (s Deployment) MarshalJSON() ([]byte, error) { type NoMethod Deployment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskUtilization: Target scaling by disk usage. Only applicable in the App @@ -1135,9 +1159,9 @@ type DiskUtilization struct { NullFields []string `json:"-"` } -func (s *DiskUtilization) MarshalJSON() ([]byte, error) { +func (s DiskUtilization) MarshalJSON() ([]byte, error) { type NoMethod DiskUtilization - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DomainMapping: A domain serving an App Engine application. @@ -1171,9 +1195,9 @@ type DomainMapping struct { NullFields []string `json:"-"` } -func (s *DomainMapping) MarshalJSON() ([]byte, error) { +func (s DomainMapping) MarshalJSON() ([]byte, error) { type NoMethod DomainMapping - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -1233,9 +1257,9 @@ type EndpointsApiService struct { NullFields []string `json:"-"` } -func (s *EndpointsApiService) MarshalJSON() ([]byte, error) { +func (s EndpointsApiService) MarshalJSON() ([]byte, error) { type NoMethod EndpointsApiService - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Entrypoint: The entrypoint for the application. @@ -1255,9 +1279,9 @@ type Entrypoint struct { NullFields []string `json:"-"` } -func (s *Entrypoint) MarshalJSON() ([]byte, error) { +func (s Entrypoint) MarshalJSON() ([]byte, error) { type NoMethod Entrypoint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ErrorHandler: Custom static error page to be served when an error occurs. @@ -1289,9 +1313,9 @@ type ErrorHandler struct { NullFields []string `json:"-"` } -func (s *ErrorHandler) MarshalJSON() ([]byte, error) { +func (s ErrorHandler) MarshalJSON() ([]byte, error) { type NoMethod ErrorHandler - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FeatureSettings: The feature specific settings to be used in the @@ -1320,9 +1344,9 @@ type FeatureSettings struct { NullFields []string `json:"-"` } -func (s *FeatureSettings) MarshalJSON() ([]byte, error) { +func (s FeatureSettings) MarshalJSON() ([]byte, error) { type NoMethod FeatureSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FileInfo: Single source file that is part of the version to be deployed. @@ -1349,9 +1373,9 @@ type FileInfo struct { NullFields []string `json:"-"` } -func (s *FileInfo) MarshalJSON() ([]byte, error) { +func (s FileInfo) MarshalJSON() ([]byte, error) { type NoMethod FileInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FirewallRule: A single firewall rule that is evaluated against incoming @@ -1398,9 +1422,9 @@ type FirewallRule struct { NullFields []string `json:"-"` } -func (s *FirewallRule) MarshalJSON() ([]byte, error) { +func (s FirewallRule) MarshalJSON() ([]byte, error) { type NoMethod FirewallRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FlexibleRuntimeSettings: Runtime settings for the App Engine flexible @@ -1423,9 +1447,36 @@ type FlexibleRuntimeSettings struct { NullFields []string `json:"-"` } -func (s *FlexibleRuntimeSettings) MarshalJSON() ([]byte, error) { +func (s FlexibleRuntimeSettings) MarshalJSON() ([]byte, error) { type NoMethod FlexibleRuntimeSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GceTag: For use only by GCE. GceTag is a wrapper around the GCE +// administrative tag with parent info. +type GceTag struct { + // Parent: The parents(s) of the tag. Eg. projects/123, folders/456 It usually + // contains only one parent. But, in some corner cases, it can contain multiple + // parents. Currently, organizations are not supported. + Parent []string `json:"parent,omitempty"` + // Tag: The administrative_tag name. + Tag string `json:"tag,omitempty"` + // ForceSendFields is a list of field names (e.g. "Parent") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Parent") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GceTag) MarshalJSON() ([]byte, error) { + type NoMethod GceTag + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppengineV1betaLocationMetadata: Metadata for the given @@ -1454,9 +1505,9 @@ type GoogleAppengineV1betaLocationMetadata struct { NullFields []string `json:"-"` } -func (s *GoogleAppengineV1betaLocationMetadata) MarshalJSON() ([]byte, error) { +func (s GoogleAppengineV1betaLocationMetadata) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppengineV1betaLocationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HealthCheck: Health checking configuration for VM instances. Unhealthy @@ -1495,9 +1546,9 @@ type HealthCheck struct { NullFields []string `json:"-"` } -func (s *HealthCheck) MarshalJSON() ([]byte, error) { +func (s HealthCheck) MarshalJSON() ([]byte, error) { type NoMethod HealthCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IdentityAwareProxy: Identity-Aware Proxy @@ -1529,9 +1580,9 @@ type IdentityAwareProxy struct { NullFields []string `json:"-"` } -func (s *IdentityAwareProxy) MarshalJSON() ([]byte, error) { +func (s IdentityAwareProxy) MarshalJSON() ([]byte, error) { type NoMethod IdentityAwareProxy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Instance: An Instance resource is the computing unit that App Engine uses to @@ -1618,9 +1669,9 @@ type Instance struct { NullFields []string `json:"-"` } -func (s *Instance) MarshalJSON() ([]byte, error) { +func (s Instance) MarshalJSON() ([]byte, error) { type NoMethod Instance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Instance) UnmarshalJSON(data []byte) error { @@ -1657,9 +1708,9 @@ type Library struct { NullFields []string `json:"-"` } -func (s *Library) MarshalJSON() ([]byte, error) { +func (s Library) MarshalJSON() ([]byte, error) { type NoMethod Library - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListAuthorizedCertificatesResponse: Response message for @@ -1685,9 +1736,9 @@ type ListAuthorizedCertificatesResponse struct { NullFields []string `json:"-"` } -func (s *ListAuthorizedCertificatesResponse) MarshalJSON() ([]byte, error) { +func (s ListAuthorizedCertificatesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListAuthorizedCertificatesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListAuthorizedDomainsResponse: Response message for @@ -1713,9 +1764,9 @@ type ListAuthorizedDomainsResponse struct { NullFields []string `json:"-"` } -func (s *ListAuthorizedDomainsResponse) MarshalJSON() ([]byte, error) { +func (s ListAuthorizedDomainsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListAuthorizedDomainsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListDomainMappingsResponse: Response message for @@ -1741,9 +1792,9 @@ type ListDomainMappingsResponse struct { NullFields []string `json:"-"` } -func (s *ListDomainMappingsResponse) MarshalJSON() ([]byte, error) { +func (s ListDomainMappingsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListDomainMappingsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListIngressRulesResponse: Response message for Firewall.ListIngressRules. @@ -1768,9 +1819,9 @@ type ListIngressRulesResponse struct { NullFields []string `json:"-"` } -func (s *ListIngressRulesResponse) MarshalJSON() ([]byte, error) { +func (s ListIngressRulesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListIngressRulesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListInstancesResponse: Response message for Instances.ListInstances. @@ -1795,9 +1846,9 @@ type ListInstancesResponse struct { NullFields []string `json:"-"` } -func (s *ListInstancesResponse) MarshalJSON() ([]byte, error) { +func (s ListInstancesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListInstancesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLocationsResponse: The response message for Locations.ListLocations. @@ -1823,9 +1874,9 @@ type ListLocationsResponse struct { NullFields []string `json:"-"` } -func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { +func (s ListLocationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLocationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -1851,9 +1902,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListRuntimesResponse: Response message for Applications.ListRuntimes. @@ -1878,9 +1929,9 @@ type ListRuntimesResponse struct { NullFields []string `json:"-"` } -func (s *ListRuntimesResponse) MarshalJSON() ([]byte, error) { +func (s ListRuntimesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListRuntimesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListServicesResponse: Response message for Services.ListServices. @@ -1905,9 +1956,9 @@ type ListServicesResponse struct { NullFields []string `json:"-"` } -func (s *ListServicesResponse) MarshalJSON() ([]byte, error) { +func (s ListServicesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListServicesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListVersionsResponse: Response message for Versions.ListVersions. @@ -1932,9 +1983,9 @@ type ListVersionsResponse struct { NullFields []string `json:"-"` } -func (s *ListVersionsResponse) MarshalJSON() ([]byte, error) { +func (s ListVersionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListVersionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LivenessCheck: Health checking configuration for VM instances. Unhealthy @@ -1970,9 +2021,9 @@ type LivenessCheck struct { NullFields []string `json:"-"` } -func (s *LivenessCheck) MarshalJSON() ([]byte, error) { +func (s LivenessCheck) MarshalJSON() ([]byte, error) { type NoMethod LivenessCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Location: A resource that represents a Google Cloud location. @@ -2007,9 +2058,9 @@ type Location struct { NullFields []string `json:"-"` } -func (s *Location) MarshalJSON() ([]byte, error) { +func (s Location) MarshalJSON() ([]byte, error) { type NoMethod Location - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LocationMetadata: Metadata for the given google.cloud.location.Location. @@ -2037,9 +2088,9 @@ type LocationMetadata struct { NullFields []string `json:"-"` } -func (s *LocationMetadata) MarshalJSON() ([]byte, error) { +func (s LocationMetadata) MarshalJSON() ([]byte, error) { type NoMethod LocationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedCertificate: A certificate managed by App Engine. @@ -2088,9 +2139,9 @@ type ManagedCertificate struct { NullFields []string `json:"-"` } -func (s *ManagedCertificate) MarshalJSON() ([]byte, error) { +func (s ManagedCertificate) MarshalJSON() ([]byte, error) { type NoMethod ManagedCertificate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManualScaling: A service with manual scaling runs continuously, allowing you @@ -2115,9 +2166,9 @@ type ManualScaling struct { NullFields []string `json:"-"` } -func (s *ManualScaling) MarshalJSON() ([]byte, error) { +func (s ManualScaling) MarshalJSON() ([]byte, error) { type NoMethod ManualScaling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Network: Extra network settings. Only applicable in the App Engine flexible @@ -2172,9 +2223,9 @@ type Network struct { NullFields []string `json:"-"` } -func (s *Network) MarshalJSON() ([]byte, error) { +func (s Network) MarshalJSON() ([]byte, error) { type NoMethod Network - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkSettings: A NetworkSettings resource is a container for ingress @@ -2204,9 +2255,9 @@ type NetworkSettings struct { NullFields []string `json:"-"` } -func (s *NetworkSettings) MarshalJSON() ([]byte, error) { +func (s NetworkSettings) MarshalJSON() ([]byte, error) { type NoMethod NetworkSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkUtilization: Target scaling by network usage. Only applicable in the @@ -2233,9 +2284,9 @@ type NetworkUtilization struct { NullFields []string `json:"-"` } -func (s *NetworkUtilization) MarshalJSON() ([]byte, error) { +func (s NetworkUtilization) MarshalJSON() ([]byte, error) { type NoMethod NetworkUtilization - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -2279,9 +2330,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadataV1: Metadata for the given google.longrunning.Operation. @@ -2317,9 +2368,9 @@ type OperationMetadataV1 struct { NullFields []string `json:"-"` } -func (s *OperationMetadataV1) MarshalJSON() ([]byte, error) { +func (s OperationMetadataV1) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadataV1 - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadataV1Alpha: Metadata for the given @@ -2356,9 +2407,9 @@ type OperationMetadataV1Alpha struct { NullFields []string `json:"-"` } -func (s *OperationMetadataV1Alpha) MarshalJSON() ([]byte, error) { +func (s OperationMetadataV1Alpha) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadataV1Alpha - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadataV1Beta: Metadata for the given @@ -2395,9 +2446,9 @@ type OperationMetadataV1Beta struct { NullFields []string `json:"-"` } -func (s *OperationMetadataV1Beta) MarshalJSON() ([]byte, error) { +func (s OperationMetadataV1Beta) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadataV1Beta - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ProjectEvent: The request sent to CLHs during project events. @@ -2431,9 +2482,9 @@ type ProjectEvent struct { NullFields []string `json:"-"` } -func (s *ProjectEvent) MarshalJSON() ([]byte, error) { +func (s ProjectEvent) MarshalJSON() ([]byte, error) { type NoMethod ProjectEvent - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ProjectsMetadata: ProjectsMetadata is the metadata CCFE stores about the all @@ -2469,6 +2520,9 @@ type ProjectsMetadata struct { // completely removed. This is often due to a data governance purge request and // therefore resources should be deleted when this state is reached. ConsumerProjectState string `json:"consumerProjectState,omitempty"` + // GceTag: The GCE tags associated with the consumer project and those + // inherited due to their ancestry, if any. Not supported by CCFE. + GceTag []*GceTag `json:"gceTag,omitempty"` // P4ServiceAccount: The service account authorized to operate on the consumer // project. Note: CCFE only propagates P4SA with default tag to CLH. P4ServiceAccount string `json:"p4ServiceAccount,omitempty"` @@ -2493,9 +2547,9 @@ type ProjectsMetadata struct { NullFields []string `json:"-"` } -func (s *ProjectsMetadata) MarshalJSON() ([]byte, error) { +func (s ProjectsMetadata) MarshalJSON() ([]byte, error) { type NoMethod ProjectsMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReadinessCheck: Readiness checking configuration for VM instances. Unhealthy @@ -2533,9 +2587,9 @@ type ReadinessCheck struct { NullFields []string `json:"-"` } -func (s *ReadinessCheck) MarshalJSON() ([]byte, error) { +func (s ReadinessCheck) MarshalJSON() ([]byte, error) { type NoMethod ReadinessCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Reasons: Containers transition between and within states based on reasons @@ -2646,9 +2700,9 @@ type Reasons struct { NullFields []string `json:"-"` } -func (s *Reasons) MarshalJSON() ([]byte, error) { +func (s Reasons) MarshalJSON() ([]byte, error) { type NoMethod Reasons - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RepairApplicationRequest: Request message for @@ -2676,9 +2730,9 @@ type RequestUtilization struct { NullFields []string `json:"-"` } -func (s *RequestUtilization) MarshalJSON() ([]byte, error) { +func (s RequestUtilization) MarshalJSON() ([]byte, error) { type NoMethod RequestUtilization - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourceRecord: A DNS resource record. @@ -2710,9 +2764,9 @@ type ResourceRecord struct { NullFields []string `json:"-"` } -func (s *ResourceRecord) MarshalJSON() ([]byte, error) { +func (s ResourceRecord) MarshalJSON() ([]byte, error) { type NoMethod ResourceRecord - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Resources: Machine resources for a version. @@ -2741,9 +2795,9 @@ type Resources struct { NullFields []string `json:"-"` } -func (s *Resources) MarshalJSON() ([]byte, error) { +func (s Resources) MarshalJSON() ([]byte, error) { type NoMethod Resources - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Resources) UnmarshalJSON(data []byte) error { @@ -2813,9 +2867,9 @@ type Runtime struct { NullFields []string `json:"-"` } -func (s *Runtime) MarshalJSON() ([]byte, error) { +func (s Runtime) MarshalJSON() ([]byte, error) { type NoMethod Runtime - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ScriptHandler: Executes a script to handle the request that matches the URL @@ -2836,9 +2890,9 @@ type ScriptHandler struct { NullFields []string `json:"-"` } -func (s *ScriptHandler) MarshalJSON() ([]byte, error) { +func (s ScriptHandler) MarshalJSON() ([]byte, error) { type NoMethod ScriptHandler - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Service: A Service resource is a logical component of an application that @@ -2853,8 +2907,8 @@ type Service struct { // this field won't be provided by default and can be requested by setting the // IncludeExtraData field in GetServiceRequest GeneratedCustomerMetadata googleapi.RawMessage `json:"generatedCustomerMetadata,omitempty"` - // Id: Relative name of the service within the application. Example: - // default.@OutputOnly + // Id: Output only. Relative name of the service within the application. + // Example: default.@OutputOnly Id string `json:"id,omitempty"` // Labels: A set of labels to apply to this service. Labels are key/value pairs // that describe the service and all resources that belong to it (e.g., @@ -2867,7 +2921,7 @@ type Service struct { // must start with a lowercase letter or an international character. Each // service can have at most 32 labels. Labels map[string]string `json:"labels,omitempty"` - // Name: Full path to the Service resource in the API. Example: + // Name: Output only. Full path to the Service resource in the API. Example: // apps/myapp/services/default.@OutputOnly Name string `json:"name,omitempty"` // NetworkSettings: Ingress settings for this service. Will apply to all @@ -2892,9 +2946,9 @@ type Service struct { NullFields []string `json:"-"` } -func (s *Service) MarshalJSON() ([]byte, error) { +func (s Service) MarshalJSON() ([]byte, error) { type NoMethod Service - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslSettings: SSL configuration for a DomainMapping resource. @@ -2941,9 +2995,9 @@ type SslSettings struct { NullFields []string `json:"-"` } -func (s *SslSettings) MarshalJSON() ([]byte, error) { +func (s SslSettings) MarshalJSON() ([]byte, error) { type NoMethod SslSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StandardSchedulerSettings: Scheduler settings for standard environment. @@ -2972,9 +3026,9 @@ type StandardSchedulerSettings struct { NullFields []string `json:"-"` } -func (s *StandardSchedulerSettings) MarshalJSON() ([]byte, error) { +func (s StandardSchedulerSettings) MarshalJSON() ([]byte, error) { type NoMethod StandardSchedulerSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *StandardSchedulerSettings) UnmarshalJSON(data []byte) error { @@ -3036,9 +3090,9 @@ type StaticFilesHandler struct { NullFields []string `json:"-"` } -func (s *StaticFilesHandler) MarshalJSON() ([]byte, error) { +func (s StaticFilesHandler) MarshalJSON() ([]byte, error) { type NoMethod StaticFilesHandler - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The Status type defines a logical error model that is suitable for @@ -3070,9 +3124,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TrafficSplit: Traffic routing configuration for versions within a single @@ -3115,9 +3169,9 @@ type TrafficSplit struct { NullFields []string `json:"-"` } -func (s *TrafficSplit) MarshalJSON() ([]byte, error) { +func (s TrafficSplit) MarshalJSON() ([]byte, error) { type NoMethod TrafficSplit - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UrlDispatchRule: Rules to match an HTTP request and dispatch that request to @@ -3146,9 +3200,9 @@ type UrlDispatchRule struct { NullFields []string `json:"-"` } -func (s *UrlDispatchRule) MarshalJSON() ([]byte, error) { +func (s UrlDispatchRule) MarshalJSON() ([]byte, error) { type NoMethod UrlDispatchRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UrlMap: URL pattern and description of how the URL should be handled. App @@ -3233,9 +3287,9 @@ type UrlMap struct { NullFields []string `json:"-"` } -func (s *UrlMap) MarshalJSON() ([]byte, error) { +func (s UrlMap) MarshalJSON() ([]byte, error) { type NoMethod UrlMap - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Version: A Version resource is a specific set of source code and @@ -3265,7 +3319,8 @@ type Version struct { BuildEnvVariables map[string]string `json:"buildEnvVariables,omitempty"` // CreateTime: Time that this version was created.@OutputOnly CreateTime string `json:"createTime,omitempty"` - // CreatedBy: Email address of the user who created this version.@OutputOnly + // CreatedBy: Output only. Email address of the user who created this + // version.@OutputOnly CreatedBy string `json:"createdBy,omitempty"` // DefaultExpiration: Duration that static files should be cached by web // proxies and browsers. Only applicable if the corresponding @@ -3277,8 +3332,9 @@ type Version struct { // Deployment: Code and application artifacts that make up this version.Only // returned in GET requests if view=FULL is set. Deployment *Deployment `json:"deployment,omitempty"` - // DiskUsageBytes: Total size in bytes of all the files that are included in - // this version and currently hosted on the App Engine disk.@OutputOnly + // DiskUsageBytes: Output only. Total size in bytes of all the files that are + // included in this version and currently hosted on the App Engine + // disk.@OutputOnly DiskUsageBytes int64 `json:"diskUsageBytes,omitempty,string"` // EndpointsApiService: Cloud Endpoints configuration.If endpoints_api_service // is set, the Cloud Endpoints Extensible Service Proxy will be provided to @@ -3349,7 +3405,7 @@ type Version struct { // to perform complex initialization and rely on the state of its memory over // time. Manually scaled versions are sometimes referred to as "backends". ManualScaling *ManualScaling `json:"manualScaling,omitempty"` - // Name: Full path to the Version resource in the API. Example: + // Name: Output only. Full path to the Version resource in the API. Example: // apps/myapp/services/default/versions/v1.@OutputOnly Name string `json:"name,omitempty"` // Network: Extra network settings. Only applicable in the App Engine flexible @@ -3394,7 +3450,7 @@ type Version struct { // Threadsafe: Whether multiple requests can be dispatched to this version at // once. Threadsafe bool `json:"threadsafe,omitempty"` - // VersionUrl: Serving URL for this version. Example: + // VersionUrl: Output only. Serving URL for this version. Example: // "https://myversion-dot-myservice-dot-myapp.appspot.com"@OutputOnly VersionUrl string `json:"versionUrl,omitempty"` // Vm: Whether to deploy this version in a container on a virtual machine. @@ -3420,9 +3476,9 @@ type Version struct { NullFields []string `json:"-"` } -func (s *Version) MarshalJSON() ([]byte, error) { +func (s Version) MarshalJSON() ([]byte, error) { type NoMethod Version - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Volume: Volumes mounted within the app container. Only applicable in the App @@ -3447,9 +3503,9 @@ type Volume struct { NullFields []string `json:"-"` } -func (s *Volume) MarshalJSON() ([]byte, error) { +func (s Volume) MarshalJSON() ([]byte, error) { type NoMethod Volume - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Volume) UnmarshalJSON(data []byte) error { @@ -3494,9 +3550,9 @@ type VpcAccessConnector struct { NullFields []string `json:"-"` } -func (s *VpcAccessConnector) MarshalJSON() ([]byte, error) { +func (s VpcAccessConnector) MarshalJSON() ([]byte, error) { type NoMethod VpcAccessConnector - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ZipInfo: The zip file information for a zip deployment. @@ -3522,9 +3578,9 @@ type ZipInfo struct { NullFields []string `json:"-"` } -func (s *ZipInfo) MarshalJSON() ([]byte, error) { +func (s ZipInfo) MarshalJSON() ([]byte, error) { type NoMethod ZipInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AppsCreateCall struct { @@ -8479,3 +8535,115 @@ func (c *ProjectsLocationsApplicationsAuthorizedDomainsListCall) Pages(ctx conte c.PageToken(x.NextPageToken) } } + +type ProjectsLocationsApplicationsServicesVersionsDeleteCall struct { + s *APIService + projectsId string + locationsId string + applicationsId string + servicesId string + versionsId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an existing Version resource. +// +// - applicationsId: Part of `name`. See documentation of `projectsId`. +// - locationsId: Part of `name`. See documentation of `projectsId`. +// - projectsId: Part of `name`. Name of the resource requested. Example: +// apps/myapp/services/default/versions/v1. +// - servicesId: Part of `name`. See documentation of `projectsId`. +// - versionsId: Part of `name`. See documentation of `projectsId`. +func (r *ProjectsLocationsApplicationsServicesVersionsService) Delete(projectsId string, locationsId string, applicationsId string, servicesId string, versionsId string) *ProjectsLocationsApplicationsServicesVersionsDeleteCall { + c := &ProjectsLocationsApplicationsServicesVersionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectsId = projectsId + c.locationsId = locationsId + c.applicationsId = applicationsId + c.servicesId = servicesId + c.versionsId = versionsId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsApplicationsServicesVersionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsApplicationsServicesVersionsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsApplicationsServicesVersionsDeleteCall) Context(ctx context.Context) *ProjectsLocationsApplicationsServicesVersionsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsApplicationsServicesVersionsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsApplicationsServicesVersionsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services/{servicesId}/versions/{versionsId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectsId": c.projectsId, + "locationsId": c.locationsId, + "applicationsId": c.applicationsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.projects.locations.applications.services.versions.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsApplicationsServicesVersionsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json b/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json index e1baac25f8d..ec4ff938239 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json @@ -36,33 +36,83 @@ "endpoints": [ { "description": "Regional Endpoint", - "endpointUrl": "https://bigquery.me-central2.rep.googleapis.com/", - "location": "me-central2" + "endpointUrl": "https://bigquery.europe-west3.rep.googleapis.com/", + "location": "europe-west3" }, { "description": "Regional Endpoint", - "endpointUrl": "https://bigquery.europe-west3.rep.googleapis.com/", - "location": "europe-west3" + "endpointUrl": "https://bigquery.europe-west8.rep.googleapis.com/", + "location": "europe-west8" }, { "description": "Regional Endpoint", "endpointUrl": "https://bigquery.europe-west9.rep.googleapis.com/", "location": "europe-west9" }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://bigquery.me-central2.rep.googleapis.com/", + "location": "me-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://bigquery.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://bigquery.us-central2.rep.googleapis.com/", + "location": "us-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://bigquery.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, { "description": "Regional Endpoint", "endpointUrl": "https://bigquery.us-east4.rep.googleapis.com/", "location": "us-east4" }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://bigquery.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, { "description": "Regional Endpoint", "endpointUrl": "https://bigquery.us-east7.rep.googleapis.com/", "location": "us-east7" }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://bigquery.us-south1.rep.googleapis.com/", + "location": "us-south1" + }, { "description": "Regional Endpoint", "endpointUrl": "https://bigquery.us-west1.rep.googleapis.com/", "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://bigquery.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://bigquery.us-west3.rep.googleapis.com/", + "location": "us-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://bigquery.us-west4.rep.googleapis.com/", + "location": "us-west4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://bigquery.us-west8.rep.googleapis.com/", + "location": "us-west8" } ], "fullyEncodeReservedExpansion": true, @@ -289,7 +339,7 @@ "type": "boolean" }, "filter": { - "description": "An expression for filtering the results of the request by label. The syntax is \\\"labels.\u003cname\u003e[:\u003cvalue\u003e]\\\". Multiple filters can be ANDed together by connecting with a space. Example: \\\"labels.department:receiving labels.active\\\". See [Filtering datasets using labels](/bigquery/docs/filtering-labels#filtering_datasets_using_labels) for details.", + "description": "An expression for filtering the results of the request by label. The syntax is `labels.[:]`. Multiple filters can be ANDed together by connecting with a space. Example: `labels.department:receiving labels.active`. See [Filtering datasets using labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) for details.", "location": "query", "type": "string" }, @@ -455,7 +505,7 @@ "type": "string" }, "location": { - "description": "The geographic location of the job. You must specify the location to run the job for the following scenarios: - If the location to run a job is not in the `us` or the `eu` multi-regional location - If the job's location is in a single region (for example, `us-central1`) For more information, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location.", + "description": "The geographic location of the job. You must specify the location to run the job for the following scenarios: * If the location to run a job is not in the `us` or the `eu` multi-regional location * If the job's location is in a single region (for example, `us-central1`) For more information, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location.", "location": "query", "type": "string" }, @@ -530,7 +580,7 @@ "type": "string" }, "location": { - "description": "The geographic location of the job. You must specify the location to run the job for the following scenarios: - If the location to run a job is not in the `us` or the `eu` multi-regional location - If the job's location is in a single region (for example, `us-central1`) For more information, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location.", + "description": "The geographic location of the job. You must specify the location to run the job for the following scenarios: * If the location to run a job is not in the `us` or the `eu` multi-regional location * If the job's location is in a single region (for example, `us-central1`) For more information, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location.", "location": "query", "type": "string" }, @@ -575,7 +625,7 @@ "type": "string" }, "location": { - "description": "The geographic location of the job. You must specify the location to run the job for the following scenarios: - If the location to run a job is not in the `us` or the `eu` multi-regional location - If the job's location is in a single region (for example, `us-central1`) For more information, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location.", + "description": "The geographic location of the job. You must specify the location to run the job for the following scenarios: * If the location to run a job is not in the `us` or the `eu` multi-regional location * If the job's location is in a single region (for example, `us-central1`) For more information, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location.", "location": "query", "type": "string" }, @@ -1935,7 +1985,7 @@ } } }, - "revision": "20240526", + "revision": "20240919", "rootUrl": "https://bigquery.googleapis.com/", "schemas": { "AggregateClassificationMetrics": { @@ -2518,7 +2568,7 @@ "id": "BigLakeConfiguration", "properties": { "connectionId": { - "description": "Required. The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form \"\u003cproject\\_id\u003e.\u003clocation\\_id\u003e.\u003cconnection\\_id\u003e\" or \"projects/\u003cproject\\_id\u003e/locations/\u003clocation\\_id\u003e/connections/\u003cconnection\\_id\u003e\".", + "description": "Required. The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form `{project}.{location}.{connection_id}` or `projects/{project}/locations/{location}/connections/{connection_id}\".", "type": "string" }, "fileFormat": { @@ -2534,7 +2584,7 @@ "type": "string" }, "storageUri": { - "description": "Required. The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format \"gs://bucket/path_to_table/\"", + "description": "Required. The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format `gs://bucket/path_to_table/`", "type": "string" }, "tableFormat": { @@ -2585,7 +2635,7 @@ "type": "boolean" }, "qualifierEncoded": { - "description": "[Required] Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifier_string field. Otherwise, a base-64 encoded value must be set to qualifier_encoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match a-zA-Z*, a valid identifier must be provided as field_name.", + "description": "[Required] Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as `.` field. If the qualifier is valid UTF-8 string, it can be specified in the qualifier_string field. Otherwise, a base-64 encoded value must be set to qualifier_encoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match a-zA-Z*, a valid identifier must be provided as field_name.", "format": "byte", "type": "string" }, @@ -2605,7 +2655,7 @@ "id": "BigtableColumnFamily", "properties": { "columns": { - "description": "Optional. Lists of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as .. Other columns can be accessed as a list through .Column field.", + "description": "Optional. Lists of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as `.`. Other columns can be accessed as a list through the `.Column` field.", "items": { "$ref": "BigtableColumn" }, @@ -2998,7 +3048,7 @@ "type": "object" }, "ConnectionProperty": { - "description": "A connection-level property to customize query behavior. Under JDBC, these correspond directly to connection properties passed to the DriverManager. Under ODBC, these correspond to properties in the connection string. Currently supported connection properties: * **dataset_project_id**: represents the default project for datasets that are used in the query. Setting the system variable `@@dataset_project_id` achieves the same behavior. For more information about system variables, see: https://cloud.google.com/bigquery/docs/reference/system-variables * **time_zone**: represents the default timezone used to run the query. * **session_id**: associates the query with a given session. * **query_label**: associates the query with a given job label. If set, all subsequent queries in a script or session will have this label. For the format in which a you can specify a query label, see labels in the JobConfiguration resource type: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfiguration Additional properties are allowed, but ignored. Specifying multiple connection properties with the same key returns an error.", + "description": "A connection-level property to customize query behavior. Under JDBC, these correspond directly to connection properties passed to the DriverManager. Under ODBC, these correspond to properties in the connection string. Currently supported connection properties: * **dataset_project_id**: represents the default project for datasets that are used in the query. Setting the system variable `@@dataset_project_id` achieves the same behavior. For more information about system variables, see: https://cloud.google.com/bigquery/docs/reference/system-variables * **time_zone**: represents the default timezone used to run the query. * **session_id**: associates the query with a given session. * **query_label**: associates the query with a given job label. If set, all subsequent queries in a script or session will have this label. For the format in which a you can specify a query label, see labels in the JobConfiguration resource type: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfiguration * **service_account**: indicates the service account to use to run a continuous query. If set, the query job uses the service account to access Google Cloud resources. Service account access is bounded by the IAM permissions that you have granted to the service account. Additional properties are allowed, but ignored. Specifying multiple connection properties with the same key returns an error.", "id": "ConnectionProperty", "properties": { "key": { @@ -3076,6 +3126,17 @@ }, "type": "object" }, + "DataPolicyOption": { + "description": "Data policy option proto, it currently supports name only, will support precedence later.", + "id": "DataPolicyOption", + "properties": { + "name": { + "description": "Data policy resource name in the form of projects/project_id/locations/location_id/dataPolicies/data_policy_id.", + "type": "string" + } + }, + "type": "object" + }, "DataSplitResult": { "description": "Data split result. This contains references to the training and evaluation data tables that were used to train the model.", "id": "DataSplitResult", @@ -3096,10 +3157,11 @@ "type": "object" }, "Dataset": { + "description": "Represents a BigQuery dataset.", "id": "Dataset", "properties": { "access": { - "description": "Optional. An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER;", + "description": "Optional. An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER; If you patch a dataset, then this field is overwritten by the patched dataset's access field. To add entities, you must supply the entire existing access array in addition to any new entities that you want to add.", "items": { "description": "An object that defines dataset access for an entity.", "properties": { @@ -3120,7 +3182,7 @@ "type": "string" }, "role": { - "description": "An IAM role ID that should be granted to the user, group, or domain specified in this access entry. The following legacy mappings will be applied: OWNER \u003c=\u003e roles/bigquery.dataOwner WRITER \u003c=\u003e roles/bigquery.dataEditor READER \u003c=\u003e roles/bigquery.dataViewer This field will accept any of the above formats, but will return only the legacy format. For example, if you set this field to \"roles/bigquery.dataOwner\", it will be returned back as \"OWNER\".", + "description": "An IAM role ID that should be granted to the user, group, or domain specified in this access entry. The following legacy mappings will be applied: * `OWNER`: `roles/bigquery.dataOwner` * `WRITER`: `roles/bigquery.dataEditor` * `READER`: `roles/bigquery.dataViewer` This field will accept any of the above formats, but will return only the legacy format. For example, if you set this field to \"roles/bigquery.dataOwner\", it will be returned back as \"OWNER\".", "type": "string" }, "routine": { @@ -3128,7 +3190,7 @@ "description": "[Pick one] A routine from a different dataset to grant access to. Queries executed against that routine will have read access to views/tables/routines in this dataset. Only UDF is supported for now. The role field is not required when this field is set. If that routine is updated by any user, access to the routine needs to be granted again via an update operation." }, "specialGroup": { - "description": "[Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users. Maps to similarly-named IAM members.", + "description": "[Pick one] A special group to grant access to. Possible values include: * projectOwners: Owners of the enclosing project. * projectReaders: Readers of the enclosing project. * projectWriters: Writers of the enclosing project. * allAuthenticatedUsers: All authenticated BigQuery users. Maps to similarly-named IAM members.", "type": "string" }, "userByEmail": { @@ -3226,7 +3288,7 @@ "additionalProperties": { "type": "string" }, - "description": "The labels associated with this dataset. You can use these to organize and group your datasets. You can set this property when inserting or updating a dataset. See Creating and Updating Dataset Labels for more information.", + "description": "The labels associated with this dataset. You can use these to organize and group your datasets. You can set this property when inserting or updating a dataset. See [Creating and Updating Dataset Labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#creating_and_updating_dataset_labels) for more information.", "type": "object" }, "lastModifiedTime": { @@ -3257,12 +3319,12 @@ "additionalProperties": { "type": "string" }, - "description": "Optional. The [tags](/bigquery/docs/tags) attached to this dataset. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example \"123456789012/environment\" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example \"Production\". See [Tag definitions](/iam/docs/tags-access-control#definitions) for more details.", + "description": "Optional. The [tags](https://cloud.google.com/bigquery/docs/tags) attached to this dataset. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example \"123456789012/environment\" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example \"Production\". See [Tag definitions](https://cloud.google.com/iam/docs/tags-access-control#definitions) for more details.", "type": "object" }, "restrictions": { "$ref": "RestrictionConfig", - "description": "Optional. Output only. Restriction config for all tables and dataset. If set, restrict certain accesses on the dataset and all its tables based on the config. See [Data egress](/bigquery/docs/analytics-hub-introduction#data_egress) for more details.", + "description": "Optional. Output only. Restriction config for all tables and dataset. If set, restrict certain accesses on the dataset and all its tables based on the config. See [Data egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) for more details.", "readOnly": true }, "satisfiesPzi": { @@ -3295,7 +3357,8 @@ "type": "string" }, "tags": { - "description": "Output only. Tags for the Dataset.", + "deprecated": true, + "description": "Output only. Tags for the dataset. To provide tags as inputs, use the `resourceTags` field.", "items": { "description": "A global tag managed by Resource Manager. https://cloud.google.com/iam/docs/tags-access-control#definitions", "properties": { @@ -3416,6 +3479,7 @@ "type": "object" }, "DatasetReference": { + "description": "Identifier for a dataset.", "id": "DatasetReference", "properties": { "datasetId": { @@ -3589,6 +3653,7 @@ "type": "object" }, "EncryptionConfiguration": { + "description": "Configuration for Cloud KMS encryption settings.", "id": "EncryptionConfiguration", "properties": { "kmsKeyName": { @@ -3981,7 +4046,7 @@ "type": "string" }, "connectionId": { - "description": "Optional. The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form \"\u003cproject\\_id\u003e.\u003clocation\\_id\u003e.\u003cconnection\\_id\u003e\" or \"projects/\u003cproject\\_id\u003e/locations/\u003clocation\\_id\u003e/connections/\u003cconnection\\_id\u003e\".", + "description": "Optional. The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form `{project_id}.{location_id};{connection_id}` or `projects/{project_id}/locations/{location_id}/connections/{connection_id}`.", "type": "string" }, "csvOptions": { @@ -4501,7 +4566,7 @@ "type": "object" }, "HparamTuningTrial": { - "description": "Training info of a trial in [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models.", + "description": "Training info of a trial in [hyperparameter tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models.", "id": "HparamTuningTrial", "properties": { "endTimeMs": { @@ -4599,6 +4664,7 @@ "NOT_SUPPORTED_IN_STANDARD_EDITION", "INDEX_SUPPRESSED_BY_FUNCTION_OPTION", "QUERY_CACHE_HIT", + "STALE_INDEX", "INTERNAL_ERROR", "OTHER_REASON" ], @@ -4621,6 +4687,7 @@ "Indicates that search indexes can not be used for search query with STANDARD edition.", "Indicates that an option in the search function that cannot make use of the index has been selected.", "Indicates that the query was cached, and thus the search index was not used.", + "The index cannot be used in the search query because it is stale.", "Indicates an internal error that causes the search index to be unused.", "Indicates that the reason search indexes cannot be used in the query is not covered by any of the other IndexUnusedReason options." ], @@ -4795,7 +4862,7 @@ }, "jobCreationReason": { "$ref": "JobCreationReason", - "description": "Output only. If set, it provides the reason why a Job was created. If not set, it should be treated as the default: REQUESTED. This feature is not yet available. Jobs will always be created.", + "description": "Output only. The reason why a Job was created. [Preview](https://cloud.google.com/products/#product-launch-stages)", "readOnly": true }, "jobReference": { @@ -5355,7 +5422,7 @@ "type": "object" }, "JobCreationReason": { - "description": "Reason about why a Job was created from a [`jobs.query`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query) method when used with `JOB_CREATION_OPTIONAL` Job creation mode. For [`jobs.insert`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert) method calls it will always be `REQUESTED`. This feature is not yet available. Jobs will always be created.", + "description": "Reason about why a Job was created from a [`jobs.query`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query) method when used with `JOB_CREATION_OPTIONAL` Job creation mode. For [`jobs.insert`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert) method calls it will always be `REQUESTED`. [Preview](https://cloud.google.com/products/#product-launch-stages)", "id": "JobCreationReason", "properties": { "code": { @@ -5503,6 +5570,23 @@ "description": "Output only. Statistics for data-masking. Present only for query and extract jobs.", "readOnly": true }, + "edition": { + "description": "Output only. Name of edition corresponding to the reservation for this job at the time of this update.", + "enum": [ + "RESERVATION_EDITION_UNSPECIFIED", + "STANDARD", + "ENTERPRISE", + "ENTERPRISE_PLUS" + ], + "enumDescriptions": [ + "Default value, which will be treated as ENTERPRISE.", + "Standard edition.", + "Enterprise edition.", + "Enterprise plus edition." + ], + "readOnly": true, + "type": "string" + }, "endTime": { "description": "Output only. End time of this job, in milliseconds since the epoch. This field will be present whenever a job is in the DONE state.", "format": "int64", @@ -5822,7 +5906,7 @@ "readOnly": true }, "statementType": { - "description": "Output only. The type of query statement, if valid. Possible values: * `SELECT`: [`SELECT`](/bigquery/docs/reference/standard-sql/query-syntax#select_list) statement. * `ASSERT`: [`ASSERT`](/bigquery/docs/reference/standard-sql/debugging-statements#assert) statement. * `INSERT`: [`INSERT`](/bigquery/docs/reference/standard-sql/dml-syntax#insert_statement) statement. * `UPDATE`: [`UPDATE`](/bigquery/docs/reference/standard-sql/query-syntax#update_statement) statement. * `DELETE`: [`DELETE`](/bigquery/docs/reference/standard-sql/data-manipulation-language) statement. * `MERGE`: [`MERGE`](/bigquery/docs/reference/standard-sql/data-manipulation-language) statement. * `CREATE_TABLE`: [`CREATE TABLE`](/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement) statement, without `AS SELECT`. * `CREATE_TABLE_AS_SELECT`: [`CREATE TABLE AS SELECT`](/bigquery/docs/reference/standard-sql/data-definition-language#query_statement) statement. * `CREATE_VIEW`: [`CREATE VIEW`](/bigquery/docs/reference/standard-sql/data-definition-language#create_view_statement) statement. * `CREATE_MODEL`: [`CREATE MODEL`](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create#create_model_statement) statement. * `CREATE_MATERIALIZED_VIEW`: [`CREATE MATERIALIZED VIEW`](/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_statement) statement. * `CREATE_FUNCTION`: [`CREATE FUNCTION`](/bigquery/docs/reference/standard-sql/data-definition-language#create_function_statement) statement. * `CREATE_TABLE_FUNCTION`: [`CREATE TABLE FUNCTION`](/bigquery/docs/reference/standard-sql/data-definition-language#create_table_function_statement) statement. * `CREATE_PROCEDURE`: [`CREATE PROCEDURE`](/bigquery/docs/reference/standard-sql/data-definition-language#create_procedure) statement. * `CREATE_ROW_ACCESS_POLICY`: [`CREATE ROW ACCESS POLICY`](/bigquery/docs/reference/standard-sql/data-definition-language#create_row_access_policy_statement) statement. * `CREATE_SCHEMA`: [`CREATE SCHEMA`](/bigquery/docs/reference/standard-sql/data-definition-language#create_schema_statement) statement. * `CREATE_SNAPSHOT_TABLE`: [`CREATE SNAPSHOT TABLE`](/bigquery/docs/reference/standard-sql/data-definition-language#create_snapshot_table_statement) statement. * `CREATE_SEARCH_INDEX`: [`CREATE SEARCH INDEX`](/bigquery/docs/reference/standard-sql/data-definition-language#create_search_index_statement) statement. * `DROP_TABLE`: [`DROP TABLE`](/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_statement) statement. * `DROP_EXTERNAL_TABLE`: [`DROP EXTERNAL TABLE`](/bigquery/docs/reference/standard-sql/data-definition-language#drop_external_table_statement) statement. * `DROP_VIEW`: [`DROP VIEW`](/bigquery/docs/reference/standard-sql/data-definition-language#drop_view_statement) statement. * `DROP_MODEL`: [`DROP MODEL`](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model) statement. * `DROP_MATERIALIZED_VIEW`: [`DROP MATERIALIZED VIEW`](/bigquery/docs/reference/standard-sql/data-definition-language#drop_materialized_view_statement) statement. * `DROP_FUNCTION` : [`DROP FUNCTION`](/bigquery/docs/reference/standard-sql/data-definition-language#drop_function_statement) statement. * `DROP_TABLE_FUNCTION` : [`DROP TABLE FUNCTION`](/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_function) statement. * `DROP_PROCEDURE`: [`DROP PROCEDURE`](/bigquery/docs/reference/standard-sql/data-definition-language#drop_procedure_statement) statement. * `DROP_SEARCH_INDEX`: [`DROP SEARCH INDEX`](/bigquery/docs/reference/standard-sql/data-definition-language#drop_search_index) statement. * `DROP_SCHEMA`: [`DROP SCHEMA`](/bigquery/docs/reference/standard-sql/data-definition-language#drop_schema_statement) statement. * `DROP_SNAPSHOT_TABLE`: [`DROP SNAPSHOT TABLE`](/bigquery/docs/reference/standard-sql/data-definition-language#drop_snapshot_table_statement) statement. * `DROP_ROW_ACCESS_POLICY`: [`DROP [ALL] ROW ACCESS POLICY|POLICIES`](/bigquery/docs/reference/standard-sql/data-definition-language#drop_row_access_policy_statement) statement. * `ALTER_TABLE`: [`ALTER TABLE`](/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement) statement. * `ALTER_VIEW`: [`ALTER VIEW`](/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement) statement. * `ALTER_MATERIALIZED_VIEW`: [`ALTER MATERIALIZED VIEW`](/bigquery/docs/reference/standard-sql/data-definition-language#alter_materialized_view_set_options_statement) statement. * `ALTER_SCHEMA`: [`ALTER SCHEMA`](/bigquery/docs/reference/standard-sql/data-definition-language#aalter_schema_set_options_statement) statement. * `SCRIPT`: [`SCRIPT`](/bigquery/docs/reference/standard-sql/procedural-language). * `TRUNCATE_TABLE`: [`TRUNCATE TABLE`](/bigquery/docs/reference/standard-sql/dml-syntax#truncate_table_statement) statement. * `CREATE_EXTERNAL_TABLE`: [`CREATE EXTERNAL TABLE`](/bigquery/docs/reference/standard-sql/data-definition-language#create_external_table_statement) statement. * `EXPORT_DATA`: [`EXPORT DATA`](/bigquery/docs/reference/standard-sql/other-statements#export_data_statement) statement. * `EXPORT_MODEL`: [`EXPORT MODEL`](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-export-model) statement. * `LOAD_DATA`: [`LOAD DATA`](/bigquery/docs/reference/standard-sql/other-statements#load_data_statement) statement. * `CALL`: [`CALL`](/bigquery/docs/reference/standard-sql/procedural-language#call) statement.", + "description": "Output only. The type of query statement, if valid. Possible values: * `SELECT`: [`SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select_list) statement. * `ASSERT`: [`ASSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/debugging-statements#assert) statement. * `INSERT`: [`INSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#insert_statement) statement. * `UPDATE`: [`UPDATE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#update_statement) statement. * `DELETE`: [`DELETE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) statement. * `MERGE`: [`MERGE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) statement. * `CREATE_TABLE`: [`CREATE TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement) statement, without `AS SELECT`. * `CREATE_TABLE_AS_SELECT`: [`CREATE TABLE AS SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#query_statement) statement. * `CREATE_VIEW`: [`CREATE VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_view_statement) statement. * `CREATE_MODEL`: [`CREATE MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create#create_model_statement) statement. * `CREATE_MATERIALIZED_VIEW`: [`CREATE MATERIALIZED VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_statement) statement. * `CREATE_FUNCTION`: [`CREATE FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_function_statement) statement. * `CREATE_TABLE_FUNCTION`: [`CREATE TABLE FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_function_statement) statement. * `CREATE_PROCEDURE`: [`CREATE PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_procedure) statement. * `CREATE_ROW_ACCESS_POLICY`: [`CREATE ROW ACCESS POLICY`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_row_access_policy_statement) statement. * `CREATE_SCHEMA`: [`CREATE SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_schema_statement) statement. * `CREATE_SNAPSHOT_TABLE`: [`CREATE SNAPSHOT TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_snapshot_table_statement) statement. * `CREATE_SEARCH_INDEX`: [`CREATE SEARCH INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_search_index_statement) statement. * `DROP_TABLE`: [`DROP TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_statement) statement. * `DROP_EXTERNAL_TABLE`: [`DROP EXTERNAL TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_external_table_statement) statement. * `DROP_VIEW`: [`DROP VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_view_statement) statement. * `DROP_MODEL`: [`DROP MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model) statement. * `DROP_MATERIALIZED_VIEW`: [`DROP MATERIALIZED VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_materialized_view_statement) statement. * `DROP_FUNCTION` : [`DROP FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_function_statement) statement. * `DROP_TABLE_FUNCTION` : [`DROP TABLE FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_function) statement. * `DROP_PROCEDURE`: [`DROP PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_procedure_statement) statement. * `DROP_SEARCH_INDEX`: [`DROP SEARCH INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_search_index) statement. * `DROP_SCHEMA`: [`DROP SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_schema_statement) statement. * `DROP_SNAPSHOT_TABLE`: [`DROP SNAPSHOT TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_snapshot_table_statement) statement. * `DROP_ROW_ACCESS_POLICY`: [`DROP [ALL] ROW ACCESS POLICY|POLICIES`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_row_access_policy_statement) statement. * `ALTER_TABLE`: [`ALTER TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement) statement. * `ALTER_VIEW`: [`ALTER VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement) statement. * `ALTER_MATERIALIZED_VIEW`: [`ALTER MATERIALIZED VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_materialized_view_set_options_statement) statement. * `ALTER_SCHEMA`: [`ALTER SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#aalter_schema_set_options_statement) statement. * `SCRIPT`: [`SCRIPT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language). * `TRUNCATE_TABLE`: [`TRUNCATE TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#truncate_table_statement) statement. * `CREATE_EXTERNAL_TABLE`: [`CREATE EXTERNAL TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_external_table_statement) statement. * `EXPORT_DATA`: [`EXPORT DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#export_data_statement) statement. * `EXPORT_MODEL`: [`EXPORT MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-export-model) statement. * `LOAD_DATA`: [`LOAD DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#load_data_statement) statement. * `CALL`: [`CALL`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#call) statement.", "readOnly": true, "type": "string" }, @@ -6338,7 +6422,7 @@ "id": "MlStatistics", "properties": { "hparamTrials": { - "description": "Output only. Trials of a [hyperparameter tuning job](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) sorted by trial_id.", + "description": "Output only. Trials of a [hyperparameter tuning job](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) sorted by trial_id.", "items": { "$ref": "HparamTuningTrial" }, @@ -6346,7 +6430,7 @@ "type": "array" }, "iterationResults": { - "description": "Results for all completed iterations. Empty for [hyperparameter tuning jobs](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview).", + "description": "Results for all completed iterations. Empty for [hyperparameter tuning jobs](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview).", "items": { "$ref": "IterationResult" }, @@ -6385,7 +6469,8 @@ "RANDOM_FOREST_CLASSIFIER", "TENSORFLOW_LITE", "ONNX", - "TRANSFORM_ONLY" + "TRANSFORM_ONLY", + "CONTRIBUTION_ANALYSIS" ], "enumDescriptions": [ "Default value.", @@ -6412,7 +6497,8 @@ "Random forest classifier model.", "An imported TensorFlow Lite model.", "An imported ONNX model.", - "Model to capture the manual preprocessing logic in the transform clause." + "Model to capture the columns and logic in the TRANSFORM clause along with statistics useful for ML analytic functions.", + "The contribution analysis model." ], "readOnly": true, "type": "string" @@ -6427,7 +6513,7 @@ "enumDescriptions": [ "Unspecified training type.", "Single training with fixed parameter space.", - "[Hyperparameter tuning training](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview)." + "[Hyperparameter tuning training](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview)." ], "readOnly": true, "type": "string" @@ -6451,7 +6537,7 @@ "type": "string" }, "defaultTrialId": { - "description": "Output only. The default trial_id to use in TVFs when the trial_id is not passed in. For single-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the best trial ID. For multi-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the smallest trial ID among all Pareto optimal trials.", + "description": "Output only. The default trial_id to use in TVFs when the trial_id is not passed in. For single-objective [hyperparameter tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the best trial ID. For multi-objective [hyperparameter tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the smallest trial ID among all Pareto optimal trials.", "format": "int64", "readOnly": true, "type": "string" @@ -6492,7 +6578,7 @@ "readOnly": true }, "hparamTrials": { - "description": "Output only. Trials of a [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) model sorted by trial_id.", + "description": "Output only. Trials of a [hyperparameter tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) model sorted by trial_id.", "items": { "$ref": "HparamTuningTrial" }, @@ -6556,7 +6642,8 @@ "RANDOM_FOREST_CLASSIFIER", "TENSORFLOW_LITE", "ONNX", - "TRANSFORM_ONLY" + "TRANSFORM_ONLY", + "CONTRIBUTION_ANALYSIS" ], "enumDescriptions": [ "Default value.", @@ -6583,13 +6670,14 @@ "Random forest classifier model.", "An imported TensorFlow Lite model.", "An imported ONNX model.", - "Model to capture the manual preprocessing logic in the transform clause." + "Model to capture the columns and logic in the TRANSFORM clause along with statistics useful for ML analytic functions.", + "The contribution analysis model." ], "readOnly": true, "type": "string" }, "optimalTrialIds": { - "description": "Output only. For single-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it only contains the best trial. For multi-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it contains all Pareto optimal trials sorted by trial_id.", + "description": "Output only. For single-objective [hyperparameter tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it only contains the best trial. For multi-objective [hyperparameter tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it contains all Pareto optimal trials sorted by trial_id.", "items": { "format": "int64", "type": "string" @@ -6656,7 +6744,7 @@ "id": "ModelExtractOptions", "properties": { "trialId": { - "description": "The 1-based ID of the trial to be exported from a hyperparameter tuning model. If not specified, the trial with id = [Model](/bigquery/docs/reference/rest/v2/models#resource:-model).defaultTrialId is exported. This field is ignored for models not trained with hyperparameter tuning.", + "description": "The 1-based ID of the trial to be exported from a hyperparameter tuning model. If not specified, the trial with id = [Model](https://cloud.google.com/bigquery/docs/reference/rest/v2/models#resource:-model).defaultTrialId is exported. This field is ignored for models not trained with hyperparameter tuning.", "format": "int64", "type": "string" } @@ -6747,23 +6835,21 @@ "id": "PartitionedColumn", "properties": { "field": { - "description": "Output only. The name of the partition column.", - "readOnly": true, + "description": "Required. The name of the partition column.", "type": "string" } }, "type": "object" }, "PartitioningDefinition": { - "description": "The partitioning information, which includes managed table and external table partition information.", + "description": "The partitioning information, which includes managed table, external table and metastore partitioned table partition information.", "id": "PartitioningDefinition", "properties": { "partitionedColumn": { - "description": "Output only. Details about each partitioning column. BigQuery native tables only support 1 partitioning column. Other table types may support 0, 1 or more partitioning columns.", + "description": "Optional. Details about each partitioning column. This field is output only for all partitioning types other than metastore partitioned tables. BigQuery native tables only support 1 partitioning column. Other table types may support 0, 1 or more partitioning columns. For metastore partitioned tables, the order must match the definition order in the Hive Metastore, where it must match the physical layout of the table. For example, CREATE TABLE a_table(id BIGINT, name STRING) PARTITIONED BY (city STRING, state STRING). In this case the values must be ['city', 'state'] in that order.", "items": { "$ref": "PartitionedColumn" }, - "readOnly": true, "type": "array" } }, @@ -7079,7 +7165,7 @@ "description": "Optional. Output format adjustments." }, "jobCreationMode": { - "description": "Optional. If not set, jobs are always required. If set, the query request will follow the behavior described JobCreationMode. This feature is not yet available. Jobs will always be created.", + "description": "Optional. If not set, jobs are always required. If set, the query request will follow the behavior described JobCreationMode. [Preview](https://cloud.google.com/products/#product-launch-stages)", "enum": [ "JOB_CREATION_MODE_UNSPECIFIED", "JOB_CREATION_REQUIRED", @@ -7186,11 +7272,11 @@ }, "jobCreationReason": { "$ref": "JobCreationReason", - "description": "Optional. Only relevant when a job_reference is present in the response. If job_reference is not present it will always be unset. When job_reference is present, this field should be interpreted as follows: If set, it will provide the reason of why a Job was created. If not set, it should be treated as the default: REQUESTED. This feature is not yet available. Jobs will always be created." + "description": "Optional. The reason why a Job was created. Only relevant when a job_reference is present in the response. If job_reference is not present it will always be unset. [Preview](https://cloud.google.com/products/#product-launch-stages)" }, "jobReference": { "$ref": "JobReference", - "description": "Reference to the Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults)." + "description": "Reference to the Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults). If job_creation_mode was set to `JOB_CREATION_OPTIONAL` and the query completes without creating a job, this field will be empty." }, "kind": { "default": "bigquery#queryResponse", @@ -7208,7 +7294,7 @@ "type": "string" }, "queryId": { - "description": "Query ID for the completed query. This ID will be auto-generated. This field is not yet available and it is currently not guaranteed to be populated.", + "description": "Auto-generated ID for the query. [Preview](https://cloud.google.com/products/#product-launch-stages)", "type": "string" }, "rows": { @@ -7281,7 +7367,7 @@ "id": "RangePartitioning", "properties": { "field": { - "description": "Required. [Experimental] The table is partitioned by this field. The field must be a top-level NULLABLE/REQUIRED field. The only supported type is INTEGER/INT64.", + "description": "Required. The name of the column to partition the table on. It must be a top-level, INT64 column whose mode is NULLABLE or REQUIRED.", "type": "string" }, "range": { @@ -7472,7 +7558,7 @@ ], "enumDescriptions": [ "Should never be used.", - "Restrict data egress. See [Data egress](/bigquery/docs/analytics-hub-introduction#data_egress) for more details." + "Restrict data egress. See [Data egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) for more details." ], "readOnly": true, "type": "string" @@ -8122,7 +8208,7 @@ "type": "object" }, "StandardSqlDataType": { - "description": "The data type of a variable such as a function argument. Examples include: * INT64: `{\"typeKind\": \"INT64\"}` * ARRAY: { \"typeKind\": \"ARRAY\", \"arrayElementType\": {\"typeKind\": \"STRING\"} } * STRUCT\u003e: { \"typeKind\": \"STRUCT\", \"structType\": { \"fields\": [ { \"name\": \"x\", \"type\": {\"typeKind\": \"STRING\"} }, { \"name\": \"y\", \"type\": { \"typeKind\": \"ARRAY\", \"arrayElementType\": {\"typeKind\": \"DATE\"} } } ] } }", + "description": "The data type of a variable such as a function argument. Examples include: * INT64: `{\"typeKind\": \"INT64\"}` * ARRAY: { \"typeKind\": \"ARRAY\", \"arrayElementType\": {\"typeKind\": \"STRING\"} } * STRUCT\u003e: { \"typeKind\": \"STRUCT\", \"structType\": { \"fields\": [ { \"name\": \"x\", \"type\": {\"typeKind\": \"STRING\"} }, { \"name\": \"y\", \"type\": { \"typeKind\": \"ARRAY\", \"arrayElementType\": {\"typeKind\": \"DATE\"} } } ] } } * RANGE: { \"typeKind\": \"RANGE\", \"rangeElementType\": {\"typeKind\": \"DATE\"} }", "id": "StandardSqlDataType", "properties": { "arrayElementType": { @@ -8236,7 +8322,7 @@ "type": "string" }, "locationUri": { - "description": "Optional. The physical location of the table (e.g. 'gs://spark-dataproc-data/pangea-data/case_sensitive/' or 'gs://spark-dataproc-data/pangea-data/*'). The maximum length is 2056 bytes.", + "description": "Optional. The physical location of the table (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or `gs://spark-dataproc-data/pangea-data/*`). The maximum length is 2056 bytes.", "type": "string" }, "outputFormat": { @@ -8507,8 +8593,7 @@ }, "partitionDefinition": { "$ref": "PartitioningDefinition", - "description": "Output only. The partition information for all table formats, including managed partitioned tables, hive partitioned tables, and iceberg partitioned tables.", - "readOnly": true + "description": "Optional. The partition information for all table formats, including managed partitioned tables, hive partitioned tables, iceberg partitioned, and metastore partitioned tables. This field is only populated for metastore partitioned tables. For other table formats, this is an output only field." }, "rangePartitioning": { "$ref": "RangePartitioning", @@ -8536,7 +8621,7 @@ }, "restrictions": { "$ref": "RestrictionConfig", - "description": "Optional. Output only. Restriction config for table. If set, restrict certain accesses on the table based on the config. See [Data egress](/bigquery/docs/analytics-hub-introduction#data_egress) for more details.", + "description": "Optional. Output only. Restriction config for table. If set, restrict certain accesses on the table based on the config. See [Data egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) for more details.", "readOnly": true }, "schema": { @@ -8575,7 +8660,7 @@ "description": "If specified, configures time-based partitioning for this table." }, "type": { - "description": "Output only. Describes the table type. The following values are supported: * `TABLE`: A normal BigQuery table. * `VIEW`: A virtual table defined by a SQL query. * `EXTERNAL`: A table that references data stored in an external storage system, such as Google Cloud Storage. * `MATERIALIZED_VIEW`: A precomputed view defined by a SQL query. * `SNAPSHOT`: An immutable BigQuery table that preserves the contents of a base table at a particular time. See additional information on [table snapshots](/bigquery/docs/table-snapshots-intro). The default value is `TABLE`.", + "description": "Output only. Describes the table type. The following values are supported: * `TABLE`: A normal BigQuery table. * `VIEW`: A virtual table defined by a SQL query. * `EXTERNAL`: A table that references data stored in an external storage system, such as Google Cloud Storage. * `MATERIALIZED_VIEW`: A precomputed view defined by a SQL query. * `SNAPSHOT`: An immutable BigQuery table that preserves the contents of a base table at a particular time. See additional information on [table snapshots](https://cloud.google.com/bigquery/docs/table-snapshots-intro). The default value is `TABLE`.", "readOnly": true, "type": "string" }, @@ -8792,6 +8877,13 @@ "description": "Optional. Field collation can be set only when the type of field is STRING. The following values are supported: * 'und:ci': undetermined locale, case insensitive. * '': empty string. Default to case-sensitive behavior.", "type": "string" }, + "dataPolicies": { + "description": "Optional. Data policy options, will replace the data_policies.", + "items": { + "$ref": "DataPolicyOption" + }, + "type": "array" + }, "defaultValueExpression": { "description": "Optional. A SQL expression to specify the [default value] (https://cloud.google.com/bigquery/docs/default-values) for this field.", "type": "string" @@ -8872,7 +8964,7 @@ "type": "string" }, "type": { - "description": "Required. The field data type. Possible values include: * STRING * BYTES * INTEGER (or INT64) * FLOAT (or FLOAT64) * BOOLEAN (or BOOL) * TIMESTAMP * DATE * TIME * DATETIME * GEOGRAPHY * NUMERIC * BIGNUMERIC * JSON * RECORD (or STRUCT) * RANGE ([Preview](/products/#product-launch-stages)) Use of RECORD/STRUCT indicates that the field contains a nested schema.", + "description": "Required. The field data type. Possible values include: * STRING * BYTES * INTEGER (or INT64) * FLOAT (or FLOAT64) * BOOLEAN (or BOOL) * TIMESTAMP * DATE * TIME * DATETIME * GEOGRAPHY * NUMERIC * BIGNUMERIC * JSON * RECORD (or STRUCT) * RANGE Use of RECORD/STRUCT indicates that the field contains a nested schema.", "type": "string" } }, @@ -8989,12 +9081,17 @@ "description": "Free form human-readable reason metadata caching was unused for the job.", "type": "string" }, + "staleness": { + "description": "Duration since last refresh as of this job for managed tables (indicates metadata cache staleness as seen by this job).", + "format": "google-duration", + "type": "string" + }, "tableReference": { "$ref": "TableReference", "description": "Metadata caching eligible table referenced in the query." }, "tableType": { - "description": "[Table type](/bigquery/docs/reference/rest/v2/tables#Table.FIELDS.type).", + "description": "[Table type](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table.FIELDS.type).", "type": "string" }, "unusedReason": { @@ -9281,6 +9378,10 @@ "format": "double", "type": "number" }, + "contributionMetric": { + "description": "The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as \"SUM(x)\" or \"SUM(x)/SUM(y)\", where x and y are column names from the base table.", + "type": "string" + }, "dartNormalizeType": { "description": "Type of normalization algorithm for boosted tree models using dart booster.", "enum": [ @@ -9354,6 +9455,13 @@ "description": "If true, perform decompose time series and save the results.", "type": "boolean" }, + "dimensionIdColumns": { + "description": "Optional. Names of the columns to slice on. Applies to contribution analysis models.", + "items": { + "type": "string" + }, + "type": "array" + }, "distanceType": { "description": "Distance type for clustering models.", "enum": [ @@ -9777,6 +9885,10 @@ "format": "int64", "type": "string" }, + "isTestColumn": { + "description": "Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models.", + "type": "string" + }, "itemColumn": { "description": "Item column specified for matrix factorization models.", "type": "string" @@ -9877,6 +9989,11 @@ "format": "int64", "type": "string" }, + "minAprioriSupport": { + "description": "The apriori support minimum. Applies to contribution analysis models.", + "format": "double", + "type": "number" + }, "minRelativeProgress": { "description": "When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms.", "format": "double", diff --git a/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go b/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go index 5e1d7bd25b3..2772173d314 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go @@ -145,6 +145,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + opts = append(opts, internaloption.EnableNewAuthLibrary()) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -323,9 +324,9 @@ type AggregateClassificationMetrics struct { NullFields []string `json:"-"` } -func (s *AggregateClassificationMetrics) MarshalJSON() ([]byte, error) { +func (s AggregateClassificationMetrics) MarshalJSON() ([]byte, error) { type NoMethod AggregateClassificationMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *AggregateClassificationMetrics) UnmarshalJSON(data []byte) error { @@ -379,9 +380,9 @@ type AggregationThresholdPolicy struct { NullFields []string `json:"-"` } -func (s *AggregationThresholdPolicy) MarshalJSON() ([]byte, error) { +func (s AggregationThresholdPolicy) MarshalJSON() ([]byte, error) { type NoMethod AggregationThresholdPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Argument: Input/output argument of a function or a stored procedure. @@ -428,9 +429,9 @@ type Argument struct { NullFields []string `json:"-"` } -func (s *Argument) MarshalJSON() ([]byte, error) { +func (s Argument) MarshalJSON() ([]byte, error) { type NoMethod Argument - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ArimaCoefficients: Arima coefficients. @@ -455,9 +456,9 @@ type ArimaCoefficients struct { NullFields []string `json:"-"` } -func (s *ArimaCoefficients) MarshalJSON() ([]byte, error) { +func (s ArimaCoefficients) MarshalJSON() ([]byte, error) { type NoMethod ArimaCoefficients - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ArimaCoefficients) UnmarshalJSON(data []byte) error { @@ -505,9 +506,9 @@ type ArimaFittingMetrics struct { NullFields []string `json:"-"` } -func (s *ArimaFittingMetrics) MarshalJSON() ([]byte, error) { +func (s ArimaFittingMetrics) MarshalJSON() ([]byte, error) { type NoMethod ArimaFittingMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ArimaFittingMetrics) UnmarshalJSON(data []byte) error { @@ -569,9 +570,9 @@ type ArimaForecastingMetrics struct { NullFields []string `json:"-"` } -func (s *ArimaForecastingMetrics) MarshalJSON() ([]byte, error) { +func (s ArimaForecastingMetrics) MarshalJSON() ([]byte, error) { type NoMethod ArimaForecastingMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ArimaModelInfo: Arima model information. @@ -630,9 +631,9 @@ type ArimaModelInfo struct { NullFields []string `json:"-"` } -func (s *ArimaModelInfo) MarshalJSON() ([]byte, error) { +func (s ArimaModelInfo) MarshalJSON() ([]byte, error) { type NoMethod ArimaModelInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ArimaOrder: Arima order, can be used for both non-seasonal and seasonal @@ -657,9 +658,9 @@ type ArimaOrder struct { NullFields []string `json:"-"` } -func (s *ArimaOrder) MarshalJSON() ([]byte, error) { +func (s ArimaOrder) MarshalJSON() ([]byte, error) { type NoMethod ArimaOrder - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ArimaResult: (Auto-)arima fitting result. Wrap everything in ArimaResult for @@ -693,9 +694,9 @@ type ArimaResult struct { NullFields []string `json:"-"` } -func (s *ArimaResult) MarshalJSON() ([]byte, error) { +func (s ArimaResult) MarshalJSON() ([]byte, error) { type NoMethod ArimaResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ArimaSingleModelForecastingMetrics: Model evaluation metrics for a single @@ -753,9 +754,9 @@ type ArimaSingleModelForecastingMetrics struct { NullFields []string `json:"-"` } -func (s *ArimaSingleModelForecastingMetrics) MarshalJSON() ([]byte, error) { +func (s ArimaSingleModelForecastingMetrics) MarshalJSON() ([]byte, error) { type NoMethod ArimaSingleModelForecastingMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditConfig: Specifies the audit configuration for a service. The @@ -794,9 +795,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -829,9 +830,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AvroOptions: Options for external data sources. @@ -854,9 +855,9 @@ type AvroOptions struct { NullFields []string `json:"-"` } -func (s *AvroOptions) MarshalJSON() ([]byte, error) { +func (s AvroOptions) MarshalJSON() ([]byte, error) { type NoMethod AvroOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BiEngineReason: Reason why BI Engine didn't accelerate the query (or @@ -894,9 +895,9 @@ type BiEngineReason struct { NullFields []string `json:"-"` } -func (s *BiEngineReason) MarshalJSON() ([]byte, error) { +func (s BiEngineReason) MarshalJSON() ([]byte, error) { type NoMethod BiEngineReason - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BiEngineStatistics: Statistics for a BI Engine specific query. Populated as @@ -943,19 +944,17 @@ type BiEngineStatistics struct { NullFields []string `json:"-"` } -func (s *BiEngineStatistics) MarshalJSON() ([]byte, error) { +func (s BiEngineStatistics) MarshalJSON() ([]byte, error) { type NoMethod BiEngineStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BigLakeConfiguration: Configuration for BigLake managed tables. type BigLakeConfiguration struct { // ConnectionId: Required. The connection specifying the credentials to be used // to read and write to external storage, such as Cloud Storage. The - // connection_id can have the form - // ".." or - // "projects//locations//connections/ - // ". + // connection_id can have the form `{project}.{location}.{connection_id}` or + // `projects/{project}/locations/{location}/connections/{connection_id}". ConnectionId string `json:"connectionId,omitempty"` // FileFormat: Required. The file format the table data is stored in. // @@ -965,7 +964,7 @@ type BigLakeConfiguration struct { FileFormat string `json:"fileFormat,omitempty"` // StorageUri: Required. The fully qualified location prefix of the external // folder where table data is stored. The '*' wildcard character is not - // allowed. The URI should be in the format "gs://bucket/path_to_table/" + // allowed. The URI should be in the format `gs://bucket/path_to_table/` StorageUri string `json:"storageUri,omitempty"` // TableFormat: Required. The table format the metadata only snapshots are // stored in. @@ -987,9 +986,9 @@ type BigLakeConfiguration struct { NullFields []string `json:"-"` } -func (s *BigLakeConfiguration) MarshalJSON() ([]byte, error) { +func (s BigLakeConfiguration) MarshalJSON() ([]byte, error) { type NoMethod BigLakeConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BigQueryModelTraining struct { @@ -1010,9 +1009,9 @@ type BigQueryModelTraining struct { NullFields []string `json:"-"` } -func (s *BigQueryModelTraining) MarshalJSON() ([]byte, error) { +func (s BigQueryModelTraining) MarshalJSON() ([]byte, error) { type NoMethod BigQueryModelTraining - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BigtableColumn: Information related to a Bigtable column. @@ -1034,7 +1033,7 @@ type BigtableColumn struct { // 'onlyReadLatest' is set at both levels. OnlyReadLatest bool `json:"onlyReadLatest,omitempty"` // QualifierEncoded: [Required] Qualifier of the column. Columns in the parent - // column family that has this exact qualifier are exposed as . field. If the + // column family that has this exact qualifier are exposed as `.` field. If the // qualifier is valid UTF-8 string, it can be specified in the qualifier_string // field. Otherwise, a base-64 encoded value must be set to qualifier_encoded. // The column field name is the same as the column qualifier. However, if the @@ -1064,17 +1063,17 @@ type BigtableColumn struct { NullFields []string `json:"-"` } -func (s *BigtableColumn) MarshalJSON() ([]byte, error) { +func (s BigtableColumn) MarshalJSON() ([]byte, error) { type NoMethod BigtableColumn - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BigtableColumnFamily: Information related to a Bigtable column family. type BigtableColumnFamily struct { // Columns: Optional. Lists of columns that should be exposed as individual // fields as opposed to a list of (column name, value) pairs. All columns whose - // qualifier matches a qualifier in this list can be accessed as .. Other - // columns can be accessed as a list through .Column field. + // qualifier matches a qualifier in this list can be accessed as `.`. Other + // columns can be accessed as a list through the `.Column` field. Columns []*BigtableColumn `json:"columns,omitempty"` // Encoding: Optional. The encoding of the values when the type is not STRING. // Acceptable encoding values are: TEXT - indicates values are alphanumeric @@ -1110,9 +1109,9 @@ type BigtableColumnFamily struct { NullFields []string `json:"-"` } -func (s *BigtableColumnFamily) MarshalJSON() ([]byte, error) { +func (s BigtableColumnFamily) MarshalJSON() ([]byte, error) { type NoMethod BigtableColumnFamily - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BigtableOptions: Options specific to Google Cloud Bigtable data sources. @@ -1153,9 +1152,9 @@ type BigtableOptions struct { NullFields []string `json:"-"` } -func (s *BigtableOptions) MarshalJSON() ([]byte, error) { +func (s BigtableOptions) MarshalJSON() ([]byte, error) { type NoMethod BigtableOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BinaryClassificationMetrics: Evaluation metrics for binary @@ -1183,9 +1182,9 @@ type BinaryClassificationMetrics struct { NullFields []string `json:"-"` } -func (s *BinaryClassificationMetrics) MarshalJSON() ([]byte, error) { +func (s BinaryClassificationMetrics) MarshalJSON() ([]byte, error) { type NoMethod BinaryClassificationMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BinaryConfusionMatrix: Confusion matrix for binary classification models. @@ -1224,9 +1223,9 @@ type BinaryConfusionMatrix struct { NullFields []string `json:"-"` } -func (s *BinaryConfusionMatrix) MarshalJSON() ([]byte, error) { +func (s BinaryConfusionMatrix) MarshalJSON() ([]byte, error) { type NoMethod BinaryConfusionMatrix - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *BinaryConfusionMatrix) UnmarshalJSON(data []byte) error { @@ -1345,9 +1344,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BqmlIterationResult struct { @@ -1374,9 +1373,9 @@ type BqmlIterationResult struct { NullFields []string `json:"-"` } -func (s *BqmlIterationResult) MarshalJSON() ([]byte, error) { +func (s BqmlIterationResult) MarshalJSON() ([]byte, error) { type NoMethod BqmlIterationResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *BqmlIterationResult) UnmarshalJSON(data []byte) error { @@ -1419,9 +1418,9 @@ type BqmlTrainingRun struct { NullFields []string `json:"-"` } -func (s *BqmlTrainingRun) MarshalJSON() ([]byte, error) { +func (s BqmlTrainingRun) MarshalJSON() ([]byte, error) { type NoMethod BqmlTrainingRun - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BqmlTrainingRunTrainingOptions: Deprecated. @@ -1448,9 +1447,9 @@ type BqmlTrainingRunTrainingOptions struct { NullFields []string `json:"-"` } -func (s *BqmlTrainingRunTrainingOptions) MarshalJSON() ([]byte, error) { +func (s BqmlTrainingRunTrainingOptions) MarshalJSON() ([]byte, error) { type NoMethod BqmlTrainingRunTrainingOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *BqmlTrainingRunTrainingOptions) UnmarshalJSON(data []byte) error { @@ -1495,9 +1494,9 @@ type CategoricalValue struct { NullFields []string `json:"-"` } -func (s *CategoricalValue) MarshalJSON() ([]byte, error) { +func (s CategoricalValue) MarshalJSON() ([]byte, error) { type NoMethod CategoricalValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CategoryCount: Represents the count of a single category within the cluster. @@ -1520,9 +1519,9 @@ type CategoryCount struct { NullFields []string `json:"-"` } -func (s *CategoryCount) MarshalJSON() ([]byte, error) { +func (s CategoryCount) MarshalJSON() ([]byte, error) { type NoMethod CategoryCount - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloneDefinition: Information about base table and clone time of a table @@ -1547,9 +1546,9 @@ type CloneDefinition struct { NullFields []string `json:"-"` } -func (s *CloneDefinition) MarshalJSON() ([]byte, error) { +func (s CloneDefinition) MarshalJSON() ([]byte, error) { type NoMethod CloneDefinition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Cluster: Message containing the information about one cluster. @@ -1573,9 +1572,9 @@ type Cluster struct { NullFields []string `json:"-"` } -func (s *Cluster) MarshalJSON() ([]byte, error) { +func (s Cluster) MarshalJSON() ([]byte, error) { type NoMethod Cluster - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ClusterInfo: Information about a single cluster for clustering model. @@ -1601,9 +1600,9 @@ type ClusterInfo struct { NullFields []string `json:"-"` } -func (s *ClusterInfo) MarshalJSON() ([]byte, error) { +func (s ClusterInfo) MarshalJSON() ([]byte, error) { type NoMethod ClusterInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ClusterInfo) UnmarshalJSON(data []byte) error { @@ -1641,9 +1640,9 @@ type Clustering struct { NullFields []string `json:"-"` } -func (s *Clustering) MarshalJSON() ([]byte, error) { +func (s Clustering) MarshalJSON() ([]byte, error) { type NoMethod Clustering - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ClusteringMetrics: Evaluation metrics for clustering models. @@ -1668,9 +1667,9 @@ type ClusteringMetrics struct { NullFields []string `json:"-"` } -func (s *ClusteringMetrics) MarshalJSON() ([]byte, error) { +func (s ClusteringMetrics) MarshalJSON() ([]byte, error) { type NoMethod ClusteringMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ClusteringMetrics) UnmarshalJSON(data []byte) error { @@ -1709,9 +1708,9 @@ type ConfusionMatrix struct { NullFields []string `json:"-"` } -func (s *ConfusionMatrix) MarshalJSON() ([]byte, error) { +func (s ConfusionMatrix) MarshalJSON() ([]byte, error) { type NoMethod ConfusionMatrix - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ConfusionMatrix) UnmarshalJSON(data []byte) error { @@ -1743,8 +1742,12 @@ func (s *ConfusionMatrix) UnmarshalJSON(data []byte) error { // format in which a you can specify a query label, see labels in the // JobConfiguration resource type: // https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfiguration -// Additional properties are allowed, but ignored. Specifying multiple -// connection properties with the same key returns an error. +// * **service_account**: indicates the service account to use to run a +// continuous query. If set, the query job uses the service account to access +// Google Cloud resources. Service account access is bounded by the IAM +// permissions that you have granted to the service account. Additional +// properties are allowed, but ignored. Specifying multiple connection +// properties with the same key returns an error. type ConnectionProperty struct { // Key: The key of the property to set. Key string `json:"key,omitempty"` @@ -1763,9 +1766,9 @@ type ConnectionProperty struct { NullFields []string `json:"-"` } -func (s *ConnectionProperty) MarshalJSON() ([]byte, error) { +func (s ConnectionProperty) MarshalJSON() ([]byte, error) { type NoMethod ConnectionProperty - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CsvOptions: Information related to a CSV data source. @@ -1846,9 +1849,9 @@ type CsvOptions struct { NullFields []string `json:"-"` } -func (s *CsvOptions) MarshalJSON() ([]byte, error) { +func (s CsvOptions) MarshalJSON() ([]byte, error) { type NoMethod CsvOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DataFormatOptions: Options for data format adjustments. @@ -1869,9 +1872,9 @@ type DataFormatOptions struct { NullFields []string `json:"-"` } -func (s *DataFormatOptions) MarshalJSON() ([]byte, error) { +func (s DataFormatOptions) MarshalJSON() ([]byte, error) { type NoMethod DataFormatOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DataMaskingStatistics: Statistics for data-masking. @@ -1892,9 +1895,33 @@ type DataMaskingStatistics struct { NullFields []string `json:"-"` } -func (s *DataMaskingStatistics) MarshalJSON() ([]byte, error) { +func (s DataMaskingStatistics) MarshalJSON() ([]byte, error) { type NoMethod DataMaskingStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// DataPolicyOption: Data policy option proto, it currently supports name only, +// will support precedence later. +type DataPolicyOption struct { + // Name: Data policy resource name in the form of + // projects/project_id/locations/location_id/dataPolicies/data_policy_id. + Name string `json:"name,omitempty"` + // ForceSendFields is a list of field names (e.g. "Name") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Name") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s DataPolicyOption) MarshalJSON() ([]byte, error) { + type NoMethod DataPolicyOption + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DataSplitResult: Data split result. This contains references to the training @@ -1919,11 +1946,12 @@ type DataSplitResult struct { NullFields []string `json:"-"` } -func (s *DataSplitResult) MarshalJSON() ([]byte, error) { +func (s DataSplitResult) MarshalJSON() ([]byte, error) { type NoMethod DataSplitResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// Dataset: Represents a BigQuery dataset. type Dataset struct { // Access: Optional. An array of objects that define dataset access for one or // more entities. You can set this property when inserting or updating a @@ -1932,7 +1960,10 @@ type Dataset struct { // for the following entities: access.specialGroup: projectReaders; // access.role: READER; access.specialGroup: projectWriters; access.role: // WRITER; access.specialGroup: projectOwners; access.role: OWNER; - // access.userByEmail: [dataset creator email]; access.role: OWNER; + // access.userByEmail: [dataset creator email]; access.role: OWNER; If you + // patch a dataset, then this field is overwritten by the patched dataset's + // access field. To add entities, you must supply the entire existing access + // array in addition to any new entities that you want to add. Access []*DatasetAccess `json:"access,omitempty"` // CreationTime: Output only. The time when this dataset was created, in // milliseconds since the epoch. @@ -2025,8 +2056,9 @@ type Dataset struct { Kind string `json:"kind,omitempty"` // Labels: The labels associated with this dataset. You can use these to // organize and group your datasets. You can set this property when inserting - // or updating a dataset. See Creating and Updating Dataset Labels for more - // information. + // or updating a dataset. See Creating and Updating Dataset Labels + // (https://cloud.google.com/bigquery/docs/creating-managing-labels#creating_and_updating_dataset_labels) + // for more information. Labels map[string]string `json:"labels,omitempty"` // LastModifiedTime: Output only. The date when this dataset was last modified, // in milliseconds since the epoch. @@ -2046,18 +2078,21 @@ type Dataset struct { // value can be from 48 to 168 hours (2 to 7 days). The default value is 168 // hours if this is not set. MaxTimeTravelHours int64 `json:"maxTimeTravelHours,omitempty,string"` - // ResourceTags: Optional. The tags (/bigquery/docs/tags) attached to this - // dataset. Tag keys are globally unique. Tag key is expected to be in the - // namespaced format, for example "123456789012/environment" where 123456789012 - // is the ID of the parent organization or project resource for this tag key. - // Tag value is expected to be the short name, for example "Production". See - // Tag definitions (/iam/docs/tags-access-control#definitions) for more + // ResourceTags: Optional. The tags + // (https://cloud.google.com/bigquery/docs/tags) attached to this dataset. Tag + // keys are globally unique. Tag key is expected to be in the namespaced + // format, for example "123456789012/environment" where 123456789012 is the ID + // of the parent organization or project resource for this tag key. Tag value + // is expected to be the short name, for example "Production". See Tag + // definitions + // (https://cloud.google.com/iam/docs/tags-access-control#definitions) for more // details. ResourceTags map[string]string `json:"resourceTags,omitempty"` // Restrictions: Optional. Output only. Restriction config for all tables and // dataset. If set, restrict certain accesses on the dataset and all its tables // based on the config. See Data egress - // (/bigquery/docs/analytics-hub-introduction#data_egress) for more details. + // (https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) + // for more details. Restrictions *RestrictionConfig `json:"restrictions,omitempty"` // SatisfiesPzi: Output only. Reserved for future use. SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` @@ -2074,7 +2109,8 @@ type Dataset struct { // "LOGICAL" - Billing for logical bytes. // "PHYSICAL" - Billing for physical bytes. StorageBillingModel string `json:"storageBillingModel,omitempty"` - // Tags: Output only. Tags for the Dataset. + // Tags: Output only. Tags for the dataset. To provide tags as inputs, use the + // `resourceTags` field. Tags []*DatasetTags `json:"tags,omitempty"` // Type: Output only. Same as `type` in `ListFormatDataset`. The type of the // dataset, one of: * DEFAULT - only accessible by owner and authorized @@ -2097,9 +2133,9 @@ type Dataset struct { NullFields []string `json:"-"` } -func (s *Dataset) MarshalJSON() ([]byte, error) { +func (s Dataset) MarshalJSON() ([]byte, error) { type NoMethod Dataset - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatasetAccess: An object that defines dataset access for an entity. @@ -2122,9 +2158,9 @@ type DatasetAccess struct { IamMember string `json:"iamMember,omitempty"` // Role: An IAM role ID that should be granted to the user, group, or domain // specified in this access entry. The following legacy mappings will be - // applied: OWNER <=> roles/bigquery.dataOwner WRITER <=> - // roles/bigquery.dataEditor READER <=> roles/bigquery.dataViewer This field - // will accept any of the above formats, but will return only the legacy + // applied: * `OWNER`: `roles/bigquery.dataOwner` * `WRITER`: + // `roles/bigquery.dataEditor` * `READER`: `roles/bigquery.dataViewer` This + // field will accept any of the above formats, but will return only the legacy // format. For example, if you set this field to "roles/bigquery.dataOwner", it // will be returned back as "OWNER". Role string `json:"role,omitempty"` @@ -2136,9 +2172,9 @@ type DatasetAccess struct { // update operation. Routine *RoutineReference `json:"routine,omitempty"` // SpecialGroup: [Pick one] A special group to grant access to. Possible values - // include: projectOwners: Owners of the enclosing project. projectReaders: - // Readers of the enclosing project. projectWriters: Writers of the enclosing - // project. allAuthenticatedUsers: All authenticated BigQuery users. Maps to + // include: * projectOwners: Owners of the enclosing project. * projectReaders: + // Readers of the enclosing project. * projectWriters: Writers of the enclosing + // project. * allAuthenticatedUsers: All authenticated BigQuery users. Maps to // similarly-named IAM members. SpecialGroup string `json:"specialGroup,omitempty"` // UserByEmail: [Pick one] An email address of a user to grant access to. For @@ -2164,9 +2200,9 @@ type DatasetAccess struct { NullFields []string `json:"-"` } -func (s *DatasetAccess) MarshalJSON() ([]byte, error) { +func (s DatasetAccess) MarshalJSON() ([]byte, error) { type NoMethod DatasetAccess - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatasetTags: A global tag managed by Resource Manager. @@ -2191,9 +2227,9 @@ type DatasetTags struct { NullFields []string `json:"-"` } -func (s *DatasetTags) MarshalJSON() ([]byte, error) { +func (s DatasetTags) MarshalJSON() ([]byte, error) { type NoMethod DatasetTags - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatasetAccessEntry: Grants all resources of particular types in a particular @@ -2227,9 +2263,9 @@ type DatasetAccessEntry struct { NullFields []string `json:"-"` } -func (s *DatasetAccessEntry) MarshalJSON() ([]byte, error) { +func (s DatasetAccessEntry) MarshalJSON() ([]byte, error) { type NoMethod DatasetAccessEntry - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatasetList: Response format for a page of results when listing datasets. @@ -2268,9 +2304,9 @@ type DatasetList struct { NullFields []string `json:"-"` } -func (s *DatasetList) MarshalJSON() ([]byte, error) { +func (s DatasetList) MarshalJSON() ([]byte, error) { type NoMethod DatasetList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatasetListDatasets: A dataset resource with only a subset of fields, to be @@ -2305,11 +2341,12 @@ type DatasetListDatasets struct { NullFields []string `json:"-"` } -func (s *DatasetListDatasets) MarshalJSON() ([]byte, error) { +func (s DatasetListDatasets) MarshalJSON() ([]byte, error) { type NoMethod DatasetListDatasets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// DatasetReference: Identifier for a dataset. type DatasetReference struct { // DatasetId: Required. A unique ID for this dataset, without the project name. // The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores @@ -2330,9 +2367,9 @@ type DatasetReference struct { NullFields []string `json:"-"` } -func (s *DatasetReference) MarshalJSON() ([]byte, error) { +func (s DatasetReference) MarshalJSON() ([]byte, error) { type NoMethod DatasetReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DestinationTableProperties: Properties for the destination table. @@ -2365,9 +2402,9 @@ type DestinationTableProperties struct { NullFields []string `json:"-"` } -func (s *DestinationTableProperties) MarshalJSON() ([]byte, error) { +func (s DestinationTableProperties) MarshalJSON() ([]byte, error) { type NoMethod DestinationTableProperties - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DifferentialPrivacyPolicy: Represents privacy policy associated with @@ -2444,9 +2481,9 @@ type DifferentialPrivacyPolicy struct { NullFields []string `json:"-"` } -func (s *DifferentialPrivacyPolicy) MarshalJSON() ([]byte, error) { +func (s DifferentialPrivacyPolicy) MarshalJSON() ([]byte, error) { type NoMethod DifferentialPrivacyPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *DifferentialPrivacyPolicy) UnmarshalJSON(data []byte) error { @@ -2492,9 +2529,9 @@ type DimensionalityReductionMetrics struct { NullFields []string `json:"-"` } -func (s *DimensionalityReductionMetrics) MarshalJSON() ([]byte, error) { +func (s DimensionalityReductionMetrics) MarshalJSON() ([]byte, error) { type NoMethod DimensionalityReductionMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *DimensionalityReductionMetrics) UnmarshalJSON(data []byte) error { @@ -2535,9 +2572,9 @@ type DmlStatistics struct { NullFields []string `json:"-"` } -func (s *DmlStatistics) MarshalJSON() ([]byte, error) { +func (s DmlStatistics) MarshalJSON() ([]byte, error) { type NoMethod DmlStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DoubleCandidates: Discrete candidates of a double hyperparameter. @@ -2557,9 +2594,9 @@ type DoubleCandidates struct { NullFields []string `json:"-"` } -func (s *DoubleCandidates) MarshalJSON() ([]byte, error) { +func (s DoubleCandidates) MarshalJSON() ([]byte, error) { type NoMethod DoubleCandidates - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *DoubleCandidates) UnmarshalJSON(data []byte) error { @@ -2598,9 +2635,9 @@ type DoubleHparamSearchSpace struct { NullFields []string `json:"-"` } -func (s *DoubleHparamSearchSpace) MarshalJSON() ([]byte, error) { +func (s DoubleHparamSearchSpace) MarshalJSON() ([]byte, error) { type NoMethod DoubleHparamSearchSpace - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DoubleRange: Range of a double hyperparameter. @@ -2622,9 +2659,9 @@ type DoubleRange struct { NullFields []string `json:"-"` } -func (s *DoubleRange) MarshalJSON() ([]byte, error) { +func (s DoubleRange) MarshalJSON() ([]byte, error) { type NoMethod DoubleRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *DoubleRange) UnmarshalJSON(data []byte) error { @@ -2643,6 +2680,7 @@ func (s *DoubleRange) UnmarshalJSON(data []byte) error { return nil } +// EncryptionConfiguration: Configuration for Cloud KMS encryption settings. type EncryptionConfiguration struct { // KmsKeyName: Optional. Describes the Cloud KMS encryption key that will be // used to protect destination BigQuery table. The BigQuery Service Account @@ -2661,9 +2699,9 @@ type EncryptionConfiguration struct { NullFields []string `json:"-"` } -func (s *EncryptionConfiguration) MarshalJSON() ([]byte, error) { +func (s EncryptionConfiguration) MarshalJSON() ([]byte, error) { type NoMethod EncryptionConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Entry: A single entry in the confusion matrix. @@ -2687,9 +2725,9 @@ type Entry struct { NullFields []string `json:"-"` } -func (s *Entry) MarshalJSON() ([]byte, error) { +func (s Entry) MarshalJSON() ([]byte, error) { type NoMethod Entry - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ErrorProto: Error details. @@ -2716,9 +2754,9 @@ type ErrorProto struct { NullFields []string `json:"-"` } -func (s *ErrorProto) MarshalJSON() ([]byte, error) { +func (s ErrorProto) MarshalJSON() ([]byte, error) { type NoMethod ErrorProto - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EvaluationMetrics: Evaluation metrics of a model. These are either computed @@ -2757,9 +2795,9 @@ type EvaluationMetrics struct { NullFields []string `json:"-"` } -func (s *EvaluationMetrics) MarshalJSON() ([]byte, error) { +func (s EvaluationMetrics) MarshalJSON() ([]byte, error) { type NoMethod EvaluationMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExplainQueryStage: A single stage of query execution. @@ -2852,9 +2890,9 @@ type ExplainQueryStage struct { NullFields []string `json:"-"` } -func (s *ExplainQueryStage) MarshalJSON() ([]byte, error) { +func (s ExplainQueryStage) MarshalJSON() ([]byte, error) { type NoMethod ExplainQueryStage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ExplainQueryStage) UnmarshalJSON(data []byte) error { @@ -2904,9 +2942,9 @@ type ExplainQueryStep struct { NullFields []string `json:"-"` } -func (s *ExplainQueryStep) MarshalJSON() ([]byte, error) { +func (s ExplainQueryStep) MarshalJSON() ([]byte, error) { type NoMethod ExplainQueryStep - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Explanation: Explanation for a single feature. @@ -2930,9 +2968,9 @@ type Explanation struct { NullFields []string `json:"-"` } -func (s *Explanation) MarshalJSON() ([]byte, error) { +func (s Explanation) MarshalJSON() ([]byte, error) { type NoMethod Explanation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Explanation) UnmarshalJSON(data []byte) error { @@ -2971,9 +3009,9 @@ type ExportDataStatistics struct { NullFields []string `json:"-"` } -func (s *ExportDataStatistics) MarshalJSON() ([]byte, error) { +func (s ExportDataStatistics) MarshalJSON() ([]byte, error) { type NoMethod ExportDataStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents a textual expression in the Common Expression Language @@ -3019,9 +3057,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExternalCatalogDatasetOptions: Options defining open source compatible @@ -3048,9 +3086,9 @@ type ExternalCatalogDatasetOptions struct { NullFields []string `json:"-"` } -func (s *ExternalCatalogDatasetOptions) MarshalJSON() ([]byte, error) { +func (s ExternalCatalogDatasetOptions) MarshalJSON() ([]byte, error) { type NoMethod ExternalCatalogDatasetOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExternalCatalogTableOptions: Metadata about open source compatible table. @@ -3082,9 +3120,9 @@ type ExternalCatalogTableOptions struct { NullFields []string `json:"-"` } -func (s *ExternalCatalogTableOptions) MarshalJSON() ([]byte, error) { +func (s ExternalCatalogTableOptions) MarshalJSON() ([]byte, error) { type NoMethod ExternalCatalogTableOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ExternalDataConfiguration struct { @@ -3104,10 +3142,9 @@ type ExternalDataConfiguration struct { Compression string `json:"compression,omitempty"` // ConnectionId: Optional. The connection specifying the credentials to be used // to read external storage, such as Azure Blob, Cloud Storage, or S3. The - // connection_id can have the form - // ".." or - // "projects//locations//connections/ - // ". + // connection_id can have the form `{project_id}.{location_id};{connection_id}` + // or + // `projects/{project_id}/locations/{location_id}/connections/{connection_id}`. ConnectionId string `json:"connectionId,omitempty"` // CsvOptions: Optional. Additional properties to set if sourceFormat is set to // CSV. @@ -3253,9 +3290,9 @@ type ExternalDataConfiguration struct { NullFields []string `json:"-"` } -func (s *ExternalDataConfiguration) MarshalJSON() ([]byte, error) { +func (s ExternalDataConfiguration) MarshalJSON() ([]byte, error) { type NoMethod ExternalDataConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExternalDatasetReference: Configures the access a dataset defined in an @@ -3280,9 +3317,9 @@ type ExternalDatasetReference struct { NullFields []string `json:"-"` } -func (s *ExternalDatasetReference) MarshalJSON() ([]byte, error) { +func (s ExternalDatasetReference) MarshalJSON() ([]byte, error) { type NoMethod ExternalDatasetReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExternalServiceCost: The external service cost is a portion of the total @@ -3321,9 +3358,9 @@ type ExternalServiceCost struct { NullFields []string `json:"-"` } -func (s *ExternalServiceCost) MarshalJSON() ([]byte, error) { +func (s ExternalServiceCost) MarshalJSON() ([]byte, error) { type NoMethod ExternalServiceCost - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FeatureValue: Representative value of a single feature within the cluster. @@ -3348,9 +3385,9 @@ type FeatureValue struct { NullFields []string `json:"-"` } -func (s *FeatureValue) MarshalJSON() ([]byte, error) { +func (s FeatureValue) MarshalJSON() ([]byte, error) { type NoMethod FeatureValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *FeatureValue) UnmarshalJSON(data []byte) error { @@ -3390,9 +3427,9 @@ type ForeignTypeInfo struct { NullFields []string `json:"-"` } -func (s *ForeignTypeInfo) MarshalJSON() ([]byte, error) { +func (s ForeignTypeInfo) MarshalJSON() ([]byte, error) { type NoMethod ForeignTypeInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ForeignViewDefinition: A view can be represented in multiple ways. Each @@ -3416,9 +3453,9 @@ type ForeignViewDefinition struct { NullFields []string `json:"-"` } -func (s *ForeignViewDefinition) MarshalJSON() ([]byte, error) { +func (s ForeignViewDefinition) MarshalJSON() ([]byte, error) { type NoMethod ForeignViewDefinition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetIamPolicyRequest: Request message for `GetIamPolicy` method. @@ -3439,9 +3476,9 @@ type GetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. @@ -3471,9 +3508,9 @@ type GetPolicyOptions struct { NullFields []string `json:"-"` } -func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { +func (s GetPolicyOptions) MarshalJSON() ([]byte, error) { type NoMethod GetPolicyOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetQueryResultsResponse: Response object of GetQueryResults. @@ -3539,9 +3576,9 @@ type GetQueryResultsResponse struct { NullFields []string `json:"-"` } -func (s *GetQueryResultsResponse) MarshalJSON() ([]byte, error) { +func (s GetQueryResultsResponse) MarshalJSON() ([]byte, error) { type NoMethod GetQueryResultsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetServiceAccountResponse: Response object of GetServiceAccount @@ -3566,9 +3603,9 @@ type GetServiceAccountResponse struct { NullFields []string `json:"-"` } -func (s *GetServiceAccountResponse) MarshalJSON() ([]byte, error) { +func (s GetServiceAccountResponse) MarshalJSON() ([]byte, error) { type NoMethod GetServiceAccountResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GlobalExplanation: Global explanations containing the top most important @@ -3594,9 +3631,9 @@ type GlobalExplanation struct { NullFields []string `json:"-"` } -func (s *GlobalExplanation) MarshalJSON() ([]byte, error) { +func (s GlobalExplanation) MarshalJSON() ([]byte, error) { type NoMethod GlobalExplanation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleSheetsOptions: Options specific to Google Sheets data sources. @@ -3630,9 +3667,9 @@ type GoogleSheetsOptions struct { NullFields []string `json:"-"` } -func (s *GoogleSheetsOptions) MarshalJSON() ([]byte, error) { +func (s GoogleSheetsOptions) MarshalJSON() ([]byte, error) { type NoMethod GoogleSheetsOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HighCardinalityJoin: High cardinality join detailed information. @@ -3659,9 +3696,9 @@ type HighCardinalityJoin struct { NullFields []string `json:"-"` } -func (s *HighCardinalityJoin) MarshalJSON() ([]byte, error) { +func (s HighCardinalityJoin) MarshalJSON() ([]byte, error) { type NoMethod HighCardinalityJoin - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HivePartitioningOptions: Options for configuring hive partitioning detect. @@ -3717,9 +3754,9 @@ type HivePartitioningOptions struct { NullFields []string `json:"-"` } -func (s *HivePartitioningOptions) MarshalJSON() ([]byte, error) { +func (s HivePartitioningOptions) MarshalJSON() ([]byte, error) { type NoMethod HivePartitioningOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HparamSearchSpaces: Hyperparameter search spaces. These should be a subset @@ -3789,14 +3826,14 @@ type HparamSearchSpaces struct { NullFields []string `json:"-"` } -func (s *HparamSearchSpaces) MarshalJSON() ([]byte, error) { +func (s HparamSearchSpaces) MarshalJSON() ([]byte, error) { type NoMethod HparamSearchSpaces - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HparamTuningTrial: Training info of a trial in hyperparameter tuning -// (/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overvie -// w) models. +// (https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) +// models. type HparamTuningTrial struct { // EndTimeMs: Ending time of the trial. EndTimeMs int64 `json:"endTimeMs,omitempty,string"` @@ -3843,9 +3880,9 @@ type HparamTuningTrial struct { NullFields []string `json:"-"` } -func (s *HparamTuningTrial) MarshalJSON() ([]byte, error) { +func (s HparamTuningTrial) MarshalJSON() ([]byte, error) { type NoMethod HparamTuningTrial - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *HparamTuningTrial) UnmarshalJSON(data []byte) error { @@ -3915,6 +3952,8 @@ type IndexUnusedReason struct { // search function that cannot make use of the index has been selected. // "QUERY_CACHE_HIT" - Indicates that the query was cached, and thus the // search index was not used. + // "STALE_INDEX" - The index cannot be used in the search query because it is + // stale. // "INTERNAL_ERROR" - Indicates an internal error that causes the search // index to be unused. // "OTHER_REASON" - Indicates that the reason search indexes cannot be used @@ -3938,9 +3977,9 @@ type IndexUnusedReason struct { NullFields []string `json:"-"` } -func (s *IndexUnusedReason) MarshalJSON() ([]byte, error) { +func (s IndexUnusedReason) MarshalJSON() ([]byte, error) { type NoMethod IndexUnusedReason - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InputDataChange: Details about the input data change insight. @@ -3961,9 +4000,9 @@ type InputDataChange struct { NullFields []string `json:"-"` } -func (s *InputDataChange) MarshalJSON() ([]byte, error) { +func (s InputDataChange) MarshalJSON() ([]byte, error) { type NoMethod InputDataChange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *InputDataChange) UnmarshalJSON(data []byte) error { @@ -3997,9 +4036,9 @@ type IntArray struct { NullFields []string `json:"-"` } -func (s *IntArray) MarshalJSON() ([]byte, error) { +func (s IntArray) MarshalJSON() ([]byte, error) { type NoMethod IntArray - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IntArrayHparamSearchSpace: Search space for int array. @@ -4019,9 +4058,9 @@ type IntArrayHparamSearchSpace struct { NullFields []string `json:"-"` } -func (s *IntArrayHparamSearchSpace) MarshalJSON() ([]byte, error) { +func (s IntArrayHparamSearchSpace) MarshalJSON() ([]byte, error) { type NoMethod IntArrayHparamSearchSpace - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IntCandidates: Discrete candidates of an int hyperparameter. @@ -4041,9 +4080,9 @@ type IntCandidates struct { NullFields []string `json:"-"` } -func (s *IntCandidates) MarshalJSON() ([]byte, error) { +func (s IntCandidates) MarshalJSON() ([]byte, error) { type NoMethod IntCandidates - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IntHparamSearchSpace: Search space for an int hyperparameter. @@ -4065,9 +4104,9 @@ type IntHparamSearchSpace struct { NullFields []string `json:"-"` } -func (s *IntHparamSearchSpace) MarshalJSON() ([]byte, error) { +func (s IntHparamSearchSpace) MarshalJSON() ([]byte, error) { type NoMethod IntHparamSearchSpace - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IntRange: Range of an int hyperparameter. @@ -4089,9 +4128,9 @@ type IntRange struct { NullFields []string `json:"-"` } -func (s *IntRange) MarshalJSON() ([]byte, error) { +func (s IntRange) MarshalJSON() ([]byte, error) { type NoMethod IntRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IterationResult: Information about a single iteration of the training run. @@ -4125,9 +4164,9 @@ type IterationResult struct { NullFields []string `json:"-"` } -func (s *IterationResult) MarshalJSON() ([]byte, error) { +func (s IterationResult) MarshalJSON() ([]byte, error) { type NoMethod IterationResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *IterationResult) UnmarshalJSON(data []byte) error { @@ -4155,9 +4194,8 @@ type Job struct { Etag string `json:"etag,omitempty"` // Id: Output only. Opaque ID field of the job. Id string `json:"id,omitempty"` - // JobCreationReason: Output only. If set, it provides the reason why a Job was - // created. If not set, it should be treated as the default: REQUESTED. This - // feature is not yet available. Jobs will always be created. + // JobCreationReason: Output only. The reason why a Job was created. Preview + // (https://cloud.google.com/products/#product-launch-stages) JobCreationReason *JobCreationReason `json:"jobCreationReason,omitempty"` // JobReference: Optional. Reference describing the unique-per-user name of the // job. @@ -4194,9 +4232,9 @@ type Job struct { NullFields []string `json:"-"` } -func (s *Job) MarshalJSON() ([]byte, error) { +func (s Job) MarshalJSON() ([]byte, error) { type NoMethod Job - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobCancelResponse: Describes format of a jobs cancellation response. @@ -4221,9 +4259,9 @@ type JobCancelResponse struct { NullFields []string `json:"-"` } -func (s *JobCancelResponse) MarshalJSON() ([]byte, error) { +func (s JobCancelResponse) MarshalJSON() ([]byte, error) { type NoMethod JobCancelResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type JobConfiguration struct { @@ -4269,9 +4307,9 @@ type JobConfiguration struct { NullFields []string `json:"-"` } -func (s *JobConfiguration) MarshalJSON() ([]byte, error) { +func (s JobConfiguration) MarshalJSON() ([]byte, error) { type NoMethod JobConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobConfigurationExtract: JobConfigurationExtract configures a job that @@ -4328,9 +4366,9 @@ type JobConfigurationExtract struct { NullFields []string `json:"-"` } -func (s *JobConfigurationExtract) MarshalJSON() ([]byte, error) { +func (s JobConfigurationExtract) MarshalJSON() ([]byte, error) { type NoMethod JobConfigurationExtract - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobConfigurationLoad: JobConfigurationLoad contains the configuration @@ -4621,9 +4659,9 @@ type JobConfigurationLoad struct { NullFields []string `json:"-"` } -func (s *JobConfigurationLoad) MarshalJSON() ([]byte, error) { +func (s JobConfigurationLoad) MarshalJSON() ([]byte, error) { type NoMethod JobConfigurationLoad - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobConfigurationQuery: JobConfigurationQuery configures a BigQuery query @@ -4786,9 +4824,9 @@ type JobConfigurationQuery struct { NullFields []string `json:"-"` } -func (s *JobConfigurationQuery) MarshalJSON() ([]byte, error) { +func (s JobConfigurationQuery) MarshalJSON() ([]byte, error) { type NoMethod JobConfigurationQuery - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobConfigurationTableCopy: JobConfigurationTableCopy configures a job that @@ -4851,17 +4889,17 @@ type JobConfigurationTableCopy struct { NullFields []string `json:"-"` } -func (s *JobConfigurationTableCopy) MarshalJSON() ([]byte, error) { +func (s JobConfigurationTableCopy) MarshalJSON() ([]byte, error) { type NoMethod JobConfigurationTableCopy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobCreationReason: Reason about why a Job was created from a `jobs.query` // (https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query) method // when used with `JOB_CREATION_OPTIONAL` Job creation mode. For `jobs.insert` // (https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert) -// method calls it will always be `REQUESTED`. This feature is not yet -// available. Jobs will always be created. +// method calls it will always be `REQUESTED`. Preview +// (https://cloud.google.com/products/#product-launch-stages) type JobCreationReason struct { // Code: Output only. Specifies the high level reason why a Job was created. // @@ -4890,9 +4928,9 @@ type JobCreationReason struct { NullFields []string `json:"-"` } -func (s *JobCreationReason) MarshalJSON() ([]byte, error) { +func (s JobCreationReason) MarshalJSON() ([]byte, error) { type NoMethod JobCreationReason - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobList: JobList is the response format for a jobs.list call. @@ -4925,9 +4963,9 @@ type JobList struct { NullFields []string `json:"-"` } -func (s *JobList) MarshalJSON() ([]byte, error) { +func (s JobList) MarshalJSON() ([]byte, error) { type NoMethod JobList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobListJobs: ListFormatJob is a partial projection of job information @@ -4971,9 +5009,9 @@ type JobListJobs struct { NullFields []string `json:"-"` } -func (s *JobListJobs) MarshalJSON() ([]byte, error) { +func (s JobListJobs) MarshalJSON() ([]byte, error) { type NoMethod JobListJobs - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobReference: A job reference is a fully qualified identifier for referring @@ -5002,9 +5040,9 @@ type JobReference struct { NullFields []string `json:"-"` } -func (s *JobReference) MarshalJSON() ([]byte, error) { +func (s JobReference) MarshalJSON() ([]byte, error) { type NoMethod JobReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobStatistics: Statistics for a single job execution. @@ -5020,6 +5058,16 @@ type JobStatistics struct { // DataMaskingStatistics: Output only. Statistics for data-masking. Present // only for query and extract jobs. DataMaskingStatistics *DataMaskingStatistics `json:"dataMaskingStatistics,omitempty"` + // Edition: Output only. Name of edition corresponding to the reservation for + // this job at the time of this update. + // + // Possible values: + // "RESERVATION_EDITION_UNSPECIFIED" - Default value, which will be treated + // as ENTERPRISE. + // "STANDARD" - Standard edition. + // "ENTERPRISE" - Enterprise edition. + // "ENTERPRISE_PLUS" - Enterprise plus edition. + Edition string `json:"edition,omitempty"` // EndTime: Output only. End time of this job, in milliseconds since the epoch. // This field will be present whenever a job is in the DONE state. EndTime int64 `json:"endTime,omitempty,string"` @@ -5083,9 +5131,9 @@ type JobStatistics struct { NullFields []string `json:"-"` } -func (s *JobStatistics) MarshalJSON() ([]byte, error) { +func (s JobStatistics) MarshalJSON() ([]byte, error) { type NoMethod JobStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *JobStatistics) UnmarshalJSON(data []byte) error { @@ -5123,9 +5171,9 @@ type JobStatisticsReservationUsage struct { NullFields []string `json:"-"` } -func (s *JobStatisticsReservationUsage) MarshalJSON() ([]byte, error) { +func (s JobStatisticsReservationUsage) MarshalJSON() ([]byte, error) { type NoMethod JobStatisticsReservationUsage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobStatistics2: Statistics for a query job. @@ -5224,93 +5272,90 @@ type JobStatistics2 struct { SparkStatistics *SparkStatistics `json:"sparkStatistics,omitempty"` // StatementType: Output only. The type of query statement, if valid. Possible // values: * `SELECT`: `SELECT` - // (/bigquery/docs/reference/standard-sql/query-syntax#select_list) statement. - // * `ASSERT`: `ASSERT` - // (/bigquery/docs/reference/standard-sql/debugging-statements#assert) + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select_list) + // statement. * `ASSERT`: `ASSERT` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/debugging-statements#assert) // statement. * `INSERT`: `INSERT` - // (/bigquery/docs/reference/standard-sql/dml-syntax#insert_statement) + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#insert_statement) // statement. * `UPDATE`: `UPDATE` - // (/bigquery/docs/reference/standard-sql/query-syntax#update_statement) + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#update_statement) // statement. * `DELETE`: `DELETE` - // (/bigquery/docs/reference/standard-sql/data-manipulation-language) + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) // statement. * `MERGE`: `MERGE` - // (/bigquery/docs/reference/standard-sql/data-manipulation-language) + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) // statement. * `CREATE_TABLE`: `CREATE TABLE` - // (/bigquery/docs/reference/standard-sql/data-definition-language#create_table_ - // statement) statement, without `AS SELECT`. * `CREATE_TABLE_AS_SELECT`: - // `CREATE TABLE AS SELECT` - // (/bigquery/docs/reference/standard-sql/data-definition-language#query_stateme - // nt) statement. * `CREATE_VIEW`: `CREATE VIEW` - // (/bigquery/docs/reference/standard-sql/data-definition-language#create_view_s - // tatement) statement. * `CREATE_MODEL`: `CREATE MODEL` - // (/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create#create_mod - // el_statement) statement. * `CREATE_MATERIALIZED_VIEW`: `CREATE MATERIALIZED - // VIEW` - // (/bigquery/docs/reference/standard-sql/data-definition-language#create_materi - // alized_view_statement) statement. * `CREATE_FUNCTION`: `CREATE FUNCTION` - // (/bigquery/docs/reference/standard-sql/data-definition-language#create_functi - // on_statement) statement. * `CREATE_TABLE_FUNCTION`: `CREATE TABLE FUNCTION` - // (/bigquery/docs/reference/standard-sql/data-definition-language#create_table_ - // function_statement) statement. * `CREATE_PROCEDURE`: `CREATE PROCEDURE` - // (/bigquery/docs/reference/standard-sql/data-definition-language#create_proced - // ure) statement. * `CREATE_ROW_ACCESS_POLICY`: `CREATE ROW ACCESS POLICY` - // (/bigquery/docs/reference/standard-sql/data-definition-language#create_row_ac - // cess_policy_statement) statement. * `CREATE_SCHEMA`: `CREATE SCHEMA` - // (/bigquery/docs/reference/standard-sql/data-definition-language#create_schema - // _statement) statement. * `CREATE_SNAPSHOT_TABLE`: `CREATE SNAPSHOT TABLE` - // (/bigquery/docs/reference/standard-sql/data-definition-language#create_snapsh - // ot_table_statement) statement. * `CREATE_SEARCH_INDEX`: `CREATE SEARCH - // INDEX` - // (/bigquery/docs/reference/standard-sql/data-definition-language#create_search - // _index_statement) statement. * `DROP_TABLE`: `DROP TABLE` - // (/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_st - // atement) statement. * `DROP_EXTERNAL_TABLE`: `DROP EXTERNAL TABLE` - // (/bigquery/docs/reference/standard-sql/data-definition-language#drop_external - // _table_statement) statement. * `DROP_VIEW`: `DROP VIEW` - // (/bigquery/docs/reference/standard-sql/data-definition-language#drop_view_sta - // tement) statement. * `DROP_MODEL`: `DROP MODEL` - // (/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model) + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement) + // statement, without `AS SELECT`. * `CREATE_TABLE_AS_SELECT`: `CREATE TABLE AS + // SELECT` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#query_statement) + // statement. * `CREATE_VIEW`: `CREATE VIEW` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_view_statement) + // statement. * `CREATE_MODEL`: `CREATE MODEL` + // (https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create#create_model_statement) + // statement. * `CREATE_MATERIALIZED_VIEW`: `CREATE MATERIALIZED VIEW` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_statement) + // statement. * `CREATE_FUNCTION`: `CREATE FUNCTION` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_function_statement) + // statement. * `CREATE_TABLE_FUNCTION`: `CREATE TABLE FUNCTION` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_function_statement) + // statement. * `CREATE_PROCEDURE`: `CREATE PROCEDURE` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_procedure) + // statement. * `CREATE_ROW_ACCESS_POLICY`: `CREATE ROW ACCESS POLICY` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_row_access_policy_statement) + // statement. * `CREATE_SCHEMA`: `CREATE SCHEMA` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_schema_statement) + // statement. * `CREATE_SNAPSHOT_TABLE`: `CREATE SNAPSHOT TABLE` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_snapshot_table_statement) + // statement. * `CREATE_SEARCH_INDEX`: `CREATE SEARCH INDEX` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_search_index_statement) + // statement. * `DROP_TABLE`: `DROP TABLE` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_statement) + // statement. * `DROP_EXTERNAL_TABLE`: `DROP EXTERNAL TABLE` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_external_table_statement) + // statement. * `DROP_VIEW`: `DROP VIEW` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_view_statement) + // statement. * `DROP_MODEL`: `DROP MODEL` + // (https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model) // statement. * `DROP_MATERIALIZED_VIEW`: `DROP MATERIALIZED VIEW` - // (/bigquery/docs/reference/standard-sql/data-definition-language#drop_material - // ized_view_statement) statement. * `DROP_FUNCTION` : `DROP FUNCTION` - // (/bigquery/docs/reference/standard-sql/data-definition-language#drop_function - // _statement) statement. * `DROP_TABLE_FUNCTION` : `DROP TABLE FUNCTION` - // (/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_fu - // nction) statement. * `DROP_PROCEDURE`: `DROP PROCEDURE` - // (/bigquery/docs/reference/standard-sql/data-definition-language#drop_procedur - // e_statement) statement. * `DROP_SEARCH_INDEX`: `DROP SEARCH INDEX` - // (/bigquery/docs/reference/standard-sql/data-definition-language#drop_search_i - // ndex) statement. * `DROP_SCHEMA`: `DROP SCHEMA` - // (/bigquery/docs/reference/standard-sql/data-definition-language#drop_schema_s - // tatement) statement. * `DROP_SNAPSHOT_TABLE`: `DROP SNAPSHOT TABLE` - // (/bigquery/docs/reference/standard-sql/data-definition-language#drop_snapshot - // _table_statement) statement. * `DROP_ROW_ACCESS_POLICY`: [`DROP ALL] ROW - // ACCESS POLICY|POLICIES` - // (/bigquery/docs/reference/standard-sql/data-definition-language#drop_row_acce - // ss_policy_statement) statement. * `ALTER_TABLE`: `ALTER TABLE` - // (/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_s - // et_options_statement) statement. * `ALTER_VIEW`: `ALTER VIEW` - // (/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_se - // t_options_statement) statement. * `ALTER_MATERIALIZED_VIEW`: `ALTER - // MATERIALIZED VIEW` - // (/bigquery/docs/reference/standard-sql/data-definition-language#alter_materia - // lized_view_set_options_statement) statement. * `ALTER_SCHEMA`: `ALTER - // SCHEMA` - // (/bigquery/docs/reference/standard-sql/data-definition-language#aalter_schema - // _set_options_statement) statement. * `SCRIPT`: `SCRIPT` - // (/bigquery/docs/reference/standard-sql/procedural-language). * - // `TRUNCATE_TABLE`: `TRUNCATE TABLE` - // (/bigquery/docs/reference/standard-sql/dml-syntax#truncate_table_statement) + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_materialized_view_statement) + // statement. * `DROP_FUNCTION` : `DROP FUNCTION` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_function_statement) + // statement. * `DROP_TABLE_FUNCTION` : `DROP TABLE FUNCTION` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_function) + // statement. * `DROP_PROCEDURE`: `DROP PROCEDURE` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_procedure_statement) + // statement. * `DROP_SEARCH_INDEX`: `DROP SEARCH INDEX` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_search_index) + // statement. * `DROP_SCHEMA`: `DROP SCHEMA` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_schema_statement) + // statement. * `DROP_SNAPSHOT_TABLE`: `DROP SNAPSHOT TABLE` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_snapshot_table_statement) + // statement. * `DROP_ROW_ACCESS_POLICY`: [`DROP ALL] ROW ACCESS + // POLICY|POLICIES` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_row_access_policy_statement) + // statement. * `ALTER_TABLE`: `ALTER TABLE` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement) + // statement. * `ALTER_VIEW`: `ALTER VIEW` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement) + // statement. * `ALTER_MATERIALIZED_VIEW`: `ALTER MATERIALIZED VIEW` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_materialized_view_set_options_statement) + // statement. * `ALTER_SCHEMA`: `ALTER SCHEMA` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#aalter_schema_set_options_statement) + // statement. * `SCRIPT`: `SCRIPT` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language). + // * `TRUNCATE_TABLE`: `TRUNCATE TABLE` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#truncate_table_statement) // statement. * `CREATE_EXTERNAL_TABLE`: `CREATE EXTERNAL TABLE` - // (/bigquery/docs/reference/standard-sql/data-definition-language#create_extern - // al_table_statement) statement. * `EXPORT_DATA`: `EXPORT DATA` - // (/bigquery/docs/reference/standard-sql/other-statements#export_data_statement - // ) statement. * `EXPORT_MODEL`: `EXPORT MODEL` - // (/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-export-model) + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_external_table_statement) + // statement. * `EXPORT_DATA`: `EXPORT DATA` + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#export_data_statement) + // statement. * `EXPORT_MODEL`: `EXPORT MODEL` + // (https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-export-model) // statement. * `LOAD_DATA`: `LOAD DATA` - // (/bigquery/docs/reference/standard-sql/other-statements#load_data_statement) + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#load_data_statement) // statement. * `CALL`: `CALL` - // (/bigquery/docs/reference/standard-sql/procedural-language#call) statement. + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#call) + // statement. StatementType string `json:"statementType,omitempty"` // Timeline: Output only. Describes a timeline of job execution. Timeline []*QueryTimelineSample `json:"timeline,omitempty"` @@ -5355,9 +5400,9 @@ type JobStatistics2 struct { NullFields []string `json:"-"` } -func (s *JobStatistics2) MarshalJSON() ([]byte, error) { +func (s JobStatistics2) MarshalJSON() ([]byte, error) { type NoMethod JobStatistics2 - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobStatistics2ReservationUsage: Job resource usage breakdown by reservation. @@ -5381,9 +5426,9 @@ type JobStatistics2ReservationUsage struct { NullFields []string `json:"-"` } -func (s *JobStatistics2ReservationUsage) MarshalJSON() ([]byte, error) { +func (s JobStatistics2ReservationUsage) MarshalJSON() ([]byte, error) { type NoMethod JobStatistics2ReservationUsage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobStatistics3: Statistics for a load job. @@ -5418,9 +5463,9 @@ type JobStatistics3 struct { NullFields []string `json:"-"` } -func (s *JobStatistics3) MarshalJSON() ([]byte, error) { +func (s JobStatistics3) MarshalJSON() ([]byte, error) { type NoMethod JobStatistics3 - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobStatistics4: Statistics for an extract job. @@ -5449,9 +5494,9 @@ type JobStatistics4 struct { NullFields []string `json:"-"` } -func (s *JobStatistics4) MarshalJSON() ([]byte, error) { +func (s JobStatistics4) MarshalJSON() ([]byte, error) { type NoMethod JobStatistics4 - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobStatistics5: Statistics for a copy job. @@ -5474,9 +5519,9 @@ type JobStatistics5 struct { NullFields []string `json:"-"` } -func (s *JobStatistics5) MarshalJSON() ([]byte, error) { +func (s JobStatistics5) MarshalJSON() ([]byte, error) { type NoMethod JobStatistics5 - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type JobStatus struct { @@ -5504,9 +5549,9 @@ type JobStatus struct { NullFields []string `json:"-"` } -func (s *JobStatus) MarshalJSON() ([]byte, error) { +func (s JobStatus) MarshalJSON() ([]byte, error) { type NoMethod JobStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JoinRestrictionPolicy: Represents privacy policy associated with "join @@ -5543,9 +5588,9 @@ type JoinRestrictionPolicy struct { NullFields []string `json:"-"` } -func (s *JoinRestrictionPolicy) MarshalJSON() ([]byte, error) { +func (s JoinRestrictionPolicy) MarshalJSON() ([]byte, error) { type NoMethod JoinRestrictionPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JsonOptions: Json Options for load and make external tables. @@ -5567,9 +5612,9 @@ type JsonOptions struct { NullFields []string `json:"-"` } -func (s *JsonOptions) MarshalJSON() ([]byte, error) { +func (s JsonOptions) MarshalJSON() ([]byte, error) { type NoMethod JsonOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type JsonValue interface{} @@ -5599,9 +5644,9 @@ type LinkedDatasetMetadata struct { NullFields []string `json:"-"` } -func (s *LinkedDatasetMetadata) MarshalJSON() ([]byte, error) { +func (s LinkedDatasetMetadata) MarshalJSON() ([]byte, error) { type NoMethod LinkedDatasetMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LinkedDatasetSource: A dataset source type which refers to another BigQuery @@ -5623,9 +5668,9 @@ type LinkedDatasetSource struct { NullFields []string `json:"-"` } -func (s *LinkedDatasetSource) MarshalJSON() ([]byte, error) { +func (s LinkedDatasetSource) MarshalJSON() ([]byte, error) { type NoMethod LinkedDatasetSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListModelsResponse: Response format for a single page when listing BigQuery @@ -5653,9 +5698,9 @@ type ListModelsResponse struct { NullFields []string `json:"-"` } -func (s *ListModelsResponse) MarshalJSON() ([]byte, error) { +func (s ListModelsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListModelsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListRoutinesResponse: Describes the format of a single result page when @@ -5684,9 +5729,9 @@ type ListRoutinesResponse struct { NullFields []string `json:"-"` } -func (s *ListRoutinesResponse) MarshalJSON() ([]byte, error) { +func (s ListRoutinesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListRoutinesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListRowAccessPoliciesResponse: Response message for the @@ -5712,9 +5757,9 @@ type ListRowAccessPoliciesResponse struct { NullFields []string `json:"-"` } -func (s *ListRowAccessPoliciesResponse) MarshalJSON() ([]byte, error) { +func (s ListRowAccessPoliciesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListRowAccessPoliciesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LoadQueryStatistics: Statistics for a LOAD query. @@ -5753,9 +5798,9 @@ type LoadQueryStatistics struct { NullFields []string `json:"-"` } -func (s *LoadQueryStatistics) MarshalJSON() ([]byte, error) { +func (s LoadQueryStatistics) MarshalJSON() ([]byte, error) { type NoMethod LoadQueryStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LocationMetadata: BigQuery-specific metadata about a location. This will be @@ -5779,9 +5824,9 @@ type LocationMetadata struct { NullFields []string `json:"-"` } -func (s *LocationMetadata) MarshalJSON() ([]byte, error) { +func (s LocationMetadata) MarshalJSON() ([]byte, error) { type NoMethod LocationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MaterializedView: A materialized view considered for a query job. @@ -5841,9 +5886,9 @@ type MaterializedView struct { NullFields []string `json:"-"` } -func (s *MaterializedView) MarshalJSON() ([]byte, error) { +func (s MaterializedView) MarshalJSON() ([]byte, error) { type NoMethod MaterializedView - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MaterializedViewDefinition: Definition and configuration of a materialized @@ -5880,9 +5925,9 @@ type MaterializedViewDefinition struct { NullFields []string `json:"-"` } -func (s *MaterializedViewDefinition) MarshalJSON() ([]byte, error) { +func (s MaterializedViewDefinition) MarshalJSON() ([]byte, error) { type NoMethod MaterializedViewDefinition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MaterializedViewStatistics: Statistics of materialized views considered in a @@ -5906,9 +5951,9 @@ type MaterializedViewStatistics struct { NullFields []string `json:"-"` } -func (s *MaterializedViewStatistics) MarshalJSON() ([]byte, error) { +func (s MaterializedViewStatistics) MarshalJSON() ([]byte, error) { type NoMethod MaterializedViewStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MaterializedViewStatus: Status of a materialized view. The last refresh @@ -5935,9 +5980,9 @@ type MaterializedViewStatus struct { NullFields []string `json:"-"` } -func (s *MaterializedViewStatus) MarshalJSON() ([]byte, error) { +func (s MaterializedViewStatus) MarshalJSON() ([]byte, error) { type NoMethod MaterializedViewStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetadataCacheStatistics: Statistics for metadata caching in BigLake tables. @@ -5958,21 +6003,20 @@ type MetadataCacheStatistics struct { NullFields []string `json:"-"` } -func (s *MetadataCacheStatistics) MarshalJSON() ([]byte, error) { +func (s MetadataCacheStatistics) MarshalJSON() ([]byte, error) { type NoMethod MetadataCacheStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MlStatistics: Job statistics specific to a BigQuery ML training job. type MlStatistics struct { // HparamTrials: Output only. Trials of a hyperparameter tuning job - // (/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overvie - // w) sorted by trial_id. + // (https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // sorted by trial_id. HparamTrials []*HparamTuningTrial `json:"hparamTrials,omitempty"` // IterationResults: Results for all completed iterations. Empty for // hyperparameter tuning jobs - // (/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overvie - // w). + // (https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview). IterationResults []*IterationResult `json:"iterationResults,omitempty"` // MaxIterations: Output only. Maximum number of iterations specified as // max_iterations in the 'CREATE MODEL' query. The actual number of iterations @@ -6005,8 +6049,9 @@ type MlStatistics struct { // "RANDOM_FOREST_CLASSIFIER" - Random forest classifier model. // "TENSORFLOW_LITE" - An imported TensorFlow Lite model. // "ONNX" - An imported ONNX model. - // "TRANSFORM_ONLY" - Model to capture the manual preprocessing logic in the - // transform clause. + // "TRANSFORM_ONLY" - Model to capture the columns and logic in the TRANSFORM + // clause along with statistics useful for ML analytic functions. + // "CONTRIBUTION_ANALYSIS" - The contribution analysis model. ModelType string `json:"modelType,omitempty"` // TrainingType: Output only. Training type of the job. // @@ -6014,8 +6059,8 @@ type MlStatistics struct { // "TRAINING_TYPE_UNSPECIFIED" - Unspecified training type. // "SINGLE_TRAINING" - Single training with fixed parameter space. // "HPARAM_TUNING" - [Hyperparameter tuning - // training](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tunin - // g-overview). + // training](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bi + // gqueryml-syntax-hp-tuning-overview). TrainingType string `json:"trainingType,omitempty"` // ForceSendFields is a list of field names (e.g. "HparamTrials") to // unconditionally include in API requests. By default, fields with empty or @@ -6030,9 +6075,9 @@ type MlStatistics struct { NullFields []string `json:"-"` } -func (s *MlStatistics) MarshalJSON() ([]byte, error) { +func (s MlStatistics) MarshalJSON() ([]byte, error) { type NoMethod MlStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type Model struct { @@ -6043,11 +6088,10 @@ type Model struct { CreationTime int64 `json:"creationTime,omitempty,string"` // DefaultTrialId: Output only. The default trial_id to use in TVFs when the // trial_id is not passed in. For single-objective hyperparameter tuning - // (/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overvie - // w) models, this is the best trial ID. For multi-objective hyperparameter - // tuning - // (/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overvie - // w) models, this is the smallest trial ID among all Pareto optimal trials. + // (https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, this is the best trial ID. For multi-objective hyperparameter tuning + // (https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, this is the smallest trial ID among all Pareto optimal trials. DefaultTrialId int64 `json:"defaultTrialId,omitempty,string"` // Description: Optional. A user-friendly description of this model. Description string `json:"description,omitempty"` @@ -6074,8 +6118,8 @@ type Model struct { // model. HparamSearchSpaces *HparamSearchSpaces `json:"hparamSearchSpaces,omitempty"` // HparamTrials: Output only. Trials of a hyperparameter tuning - // (/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overvie - // w) model sorted by trial_id. + // (https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // model sorted by trial_id. HparamTrials []*HparamTuningTrial `json:"hparamTrials,omitempty"` // LabelColumns: Output only. Label columns that were used to train this model. // The output of the model will have a "predicted_" prefix to these columns. @@ -6122,15 +6166,16 @@ type Model struct { // "RANDOM_FOREST_CLASSIFIER" - Random forest classifier model. // "TENSORFLOW_LITE" - An imported TensorFlow Lite model. // "ONNX" - An imported ONNX model. - // "TRANSFORM_ONLY" - Model to capture the manual preprocessing logic in the - // transform clause. + // "TRANSFORM_ONLY" - Model to capture the columns and logic in the TRANSFORM + // clause along with statistics useful for ML analytic functions. + // "CONTRIBUTION_ANALYSIS" - The contribution analysis model. ModelType string `json:"modelType,omitempty"` // OptimalTrialIds: Output only. For single-objective hyperparameter tuning - // (/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overvie - // w) models, it only contains the best trial. For multi-objective - // hyperparameter tuning - // (/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overvie - // w) models, it contains all Pareto optimal trials sorted by trial_id. + // (https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, it only contains the best trial. For multi-objective hyperparameter + // tuning + // (https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, it contains all Pareto optimal trials sorted by trial_id. OptimalTrialIds googleapi.Int64s `json:"optimalTrialIds,omitempty"` // RemoteModelInfo: Output only. Remote model info RemoteModelInfo *RemoteModelInfo `json:"remoteModelInfo,omitempty"` @@ -6158,9 +6203,9 @@ type Model struct { NullFields []string `json:"-"` } -func (s *Model) MarshalJSON() ([]byte, error) { +func (s Model) MarshalJSON() ([]byte, error) { type NoMethod Model - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ModelDefinition struct { @@ -6181,9 +6226,9 @@ type ModelDefinition struct { NullFields []string `json:"-"` } -func (s *ModelDefinition) MarshalJSON() ([]byte, error) { +func (s ModelDefinition) MarshalJSON() ([]byte, error) { type NoMethod ModelDefinition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ModelDefinitionModelOptions: Deprecated. @@ -6204,18 +6249,18 @@ type ModelDefinitionModelOptions struct { NullFields []string `json:"-"` } -func (s *ModelDefinitionModelOptions) MarshalJSON() ([]byte, error) { +func (s ModelDefinitionModelOptions) MarshalJSON() ([]byte, error) { type NoMethod ModelDefinitionModelOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ModelExtractOptions: Options related to model extraction. type ModelExtractOptions struct { // TrialId: The 1-based ID of the trial to be exported from a hyperparameter // tuning model. If not specified, the trial with id = Model - // (/bigquery/docs/reference/rest/v2/models#resource:-model).defaultTrialId is - // exported. This field is ignored for models not trained with hyperparameter - // tuning. + // (https://cloud.google.com/bigquery/docs/reference/rest/v2/models#resource:-model).defaultTrialId + // is exported. This field is ignored for models not trained with + // hyperparameter tuning. TrialId int64 `json:"trialId,omitempty,string"` // ForceSendFields is a list of field names (e.g. "TrialId") to unconditionally // include in API requests. By default, fields with empty or default values are @@ -6230,9 +6275,9 @@ type ModelExtractOptions struct { NullFields []string `json:"-"` } -func (s *ModelExtractOptions) MarshalJSON() ([]byte, error) { +func (s ModelExtractOptions) MarshalJSON() ([]byte, error) { type NoMethod ModelExtractOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ModelReference: Id path of a model. @@ -6258,9 +6303,9 @@ type ModelReference struct { NullFields []string `json:"-"` } -func (s *ModelReference) MarshalJSON() ([]byte, error) { +func (s ModelReference) MarshalJSON() ([]byte, error) { type NoMethod ModelReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MultiClassClassificationMetrics: Evaluation metrics for multi-class @@ -6284,9 +6329,9 @@ type MultiClassClassificationMetrics struct { NullFields []string `json:"-"` } -func (s *MultiClassClassificationMetrics) MarshalJSON() ([]byte, error) { +func (s MultiClassClassificationMetrics) MarshalJSON() ([]byte, error) { type NoMethod MultiClassClassificationMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ParquetOptions: Parquet Options for load and make external tables. @@ -6320,9 +6365,9 @@ type ParquetOptions struct { NullFields []string `json:"-"` } -func (s *ParquetOptions) MarshalJSON() ([]byte, error) { +func (s ParquetOptions) MarshalJSON() ([]byte, error) { type NoMethod ParquetOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartitionSkew: Partition skew detailed information. @@ -6342,14 +6387,14 @@ type PartitionSkew struct { NullFields []string `json:"-"` } -func (s *PartitionSkew) MarshalJSON() ([]byte, error) { +func (s PartitionSkew) MarshalJSON() ([]byte, error) { type NoMethod PartitionSkew - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartitionedColumn: The partitioning column information. type PartitionedColumn struct { - // Field: Output only. The name of the partition column. + // Field: Required. The name of the partition column. Field string `json:"field,omitempty"` // ForceSendFields is a list of field names (e.g. "Field") to unconditionally // include in API requests. By default, fields with empty or default values are @@ -6364,17 +6409,23 @@ type PartitionedColumn struct { NullFields []string `json:"-"` } -func (s *PartitionedColumn) MarshalJSON() ([]byte, error) { +func (s PartitionedColumn) MarshalJSON() ([]byte, error) { type NoMethod PartitionedColumn - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartitioningDefinition: The partitioning information, which includes managed -// table and external table partition information. +// table, external table and metastore partitioned table partition information. type PartitioningDefinition struct { - // PartitionedColumn: Output only. Details about each partitioning column. - // BigQuery native tables only support 1 partitioning column. Other table types - // may support 0, 1 or more partitioning columns. + // PartitionedColumn: Optional. Details about each partitioning column. This + // field is output only for all partitioning types other than metastore + // partitioned tables. BigQuery native tables only support 1 partitioning + // column. Other table types may support 0, 1 or more partitioning columns. For + // metastore partitioned tables, the order must match the definition order in + // the Hive Metastore, where it must match the physical layout of the table. + // For example, CREATE TABLE a_table(id BIGINT, name STRING) PARTITIONED BY + // (city STRING, state STRING). In this case the values must be ['city', + // 'state'] in that order. PartitionedColumn []*PartitionedColumn `json:"partitionedColumn,omitempty"` // ForceSendFields is a list of field names (e.g. "PartitionedColumn") to // unconditionally include in API requests. By default, fields with empty or @@ -6389,9 +6440,9 @@ type PartitioningDefinition struct { NullFields []string `json:"-"` } -func (s *PartitioningDefinition) MarshalJSON() ([]byte, error) { +func (s PartitioningDefinition) MarshalJSON() ([]byte, error) { type NoMethod PartitioningDefinition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PerformanceInsights: Performance insights for the job. @@ -6420,9 +6471,9 @@ type PerformanceInsights struct { NullFields []string `json:"-"` } -func (s *PerformanceInsights) MarshalJSON() ([]byte, error) { +func (s PerformanceInsights) MarshalJSON() ([]byte, error) { type NoMethod PerformanceInsights - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -6512,9 +6563,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PrincipalComponentInfo: Principal component infos, used only for eigen @@ -6547,9 +6598,9 @@ type PrincipalComponentInfo struct { NullFields []string `json:"-"` } -func (s *PrincipalComponentInfo) MarshalJSON() ([]byte, error) { +func (s PrincipalComponentInfo) MarshalJSON() ([]byte, error) { type NoMethod PrincipalComponentInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *PrincipalComponentInfo) UnmarshalJSON(data []byte) error { @@ -6597,9 +6648,9 @@ type PrivacyPolicy struct { NullFields []string `json:"-"` } -func (s *PrivacyPolicy) MarshalJSON() ([]byte, error) { +func (s PrivacyPolicy) MarshalJSON() ([]byte, error) { type NoMethod PrivacyPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ProjectList: Response object of ListProjects @@ -6631,9 +6682,9 @@ type ProjectList struct { NullFields []string `json:"-"` } -func (s *ProjectList) MarshalJSON() ([]byte, error) { +func (s ProjectList) MarshalJSON() ([]byte, error) { type NoMethod ProjectList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ProjectListProjects: Information about a single project. @@ -6662,9 +6713,9 @@ type ProjectListProjects struct { NullFields []string `json:"-"` } -func (s *ProjectListProjects) MarshalJSON() ([]byte, error) { +func (s ProjectListProjects) MarshalJSON() ([]byte, error) { type NoMethod ProjectListProjects - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ProjectReference: A unique reference to a project. @@ -6685,9 +6736,9 @@ type ProjectReference struct { NullFields []string `json:"-"` } -func (s *ProjectReference) MarshalJSON() ([]byte, error) { +func (s ProjectReference) MarshalJSON() ([]byte, error) { type NoMethod ProjectReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryInfo: Query optimization information for a QUERY job. @@ -6707,9 +6758,9 @@ type QueryInfo struct { NullFields []string `json:"-"` } -func (s *QueryInfo) MarshalJSON() ([]byte, error) { +func (s QueryInfo) MarshalJSON() ([]byte, error) { type NoMethod QueryInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryParameter: A parameter given to a query. @@ -6734,9 +6785,9 @@ type QueryParameter struct { NullFields []string `json:"-"` } -func (s *QueryParameter) MarshalJSON() ([]byte, error) { +func (s QueryParameter) MarshalJSON() ([]byte, error) { type NoMethod QueryParameter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryParameterType: The type of a query parameter. @@ -6764,9 +6815,9 @@ type QueryParameterType struct { NullFields []string `json:"-"` } -func (s *QueryParameterType) MarshalJSON() ([]byte, error) { +func (s QueryParameterType) MarshalJSON() ([]byte, error) { type NoMethod QueryParameterType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryParameterTypeStructTypes: The type of a struct parameter. @@ -6790,9 +6841,9 @@ type QueryParameterTypeStructTypes struct { NullFields []string `json:"-"` } -func (s *QueryParameterTypeStructTypes) MarshalJSON() ([]byte, error) { +func (s QueryParameterTypeStructTypes) MarshalJSON() ([]byte, error) { type NoMethod QueryParameterTypeStructTypes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryParameterValue: The value of a query parameter. @@ -6818,9 +6869,9 @@ type QueryParameterValue struct { NullFields []string `json:"-"` } -func (s *QueryParameterValue) MarshalJSON() ([]byte, error) { +func (s QueryParameterValue) MarshalJSON() ([]byte, error) { type NoMethod QueryParameterValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryRequest: Describes the format of the jobs.query request. @@ -6850,8 +6901,8 @@ type QueryRequest struct { // FormatOptions: Optional. Output format adjustments. FormatOptions *DataFormatOptions `json:"formatOptions,omitempty"` // JobCreationMode: Optional. If not set, jobs are always required. If set, the - // query request will follow the behavior described JobCreationMode. This - // feature is not yet available. Jobs will always be created. + // query request will follow the behavior described JobCreationMode. Preview + // (https://cloud.google.com/products/#product-launch-stages) // // Possible values: // "JOB_CREATION_MODE_UNSPECIFIED" - If unspecified JOB_CREATION_REQUIRED is @@ -6956,9 +7007,9 @@ type QueryRequest struct { NullFields []string `json:"-"` } -func (s *QueryRequest) MarshalJSON() ([]byte, error) { +func (s QueryRequest) MarshalJSON() ([]byte, error) { type NoMethod QueryRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type QueryResponse struct { @@ -6978,18 +7029,18 @@ type QueryResponse struct { // are present, this will always be true. If this is false, totalRows will not // be available. JobComplete bool `json:"jobComplete,omitempty"` - // JobCreationReason: Optional. Only relevant when a job_reference is present - // in the response. If job_reference is not present it will always be unset. - // When job_reference is present, this field should be interpreted as follows: - // If set, it will provide the reason of why a Job was created. If not set, it - // should be treated as the default: REQUESTED. This feature is not yet - // available. Jobs will always be created. + // JobCreationReason: Optional. The reason why a Job was created. Only relevant + // when a job_reference is present in the response. If job_reference is not + // present it will always be unset. Preview + // (https://cloud.google.com/products/#product-launch-stages) JobCreationReason *JobCreationReason `json:"jobCreationReason,omitempty"` // JobReference: Reference to the Job that was created to run the query. This // field will be present even if the original request timed out, in which case // GetQueryResults can be used to read the results once the query has // completed. Since this API only returns the first page of results, subsequent - // pages can be fetched via the same mechanism (GetQueryResults). + // pages can be fetched via the same mechanism (GetQueryResults). If + // job_creation_mode was set to `JOB_CREATION_OPTIONAL` and the query completes + // without creating a job, this field will be empty. JobReference *JobReference `json:"jobReference,omitempty"` // Kind: The resource type. Kind string `json:"kind,omitempty"` @@ -7003,9 +7054,8 @@ type QueryResponse struct { // method. For more information, see Paging through table data // (https://cloud.google.com/bigquery/docs/paging-results). PageToken string `json:"pageToken,omitempty"` - // QueryId: Query ID for the completed query. This ID will be auto-generated. - // This field is not yet available and it is currently not guaranteed to be - // populated. + // QueryId: Auto-generated ID for the query. Preview + // (https://cloud.google.com/products/#product-launch-stages) QueryId string `json:"queryId,omitempty"` // Rows: An object with as many results as can be contained within the maximum // permitted reply size. To get any additional rows, you can call @@ -7040,9 +7090,9 @@ type QueryResponse struct { NullFields []string `json:"-"` } -func (s *QueryResponse) MarshalJSON() ([]byte, error) { +func (s QueryResponse) MarshalJSON() ([]byte, error) { type NoMethod QueryResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryTimelineSample: Summary of the state of query execution at a given @@ -7078,15 +7128,14 @@ type QueryTimelineSample struct { NullFields []string `json:"-"` } -func (s *QueryTimelineSample) MarshalJSON() ([]byte, error) { +func (s QueryTimelineSample) MarshalJSON() ([]byte, error) { type NoMethod QueryTimelineSample - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RangePartitioning struct { - // Field: Required. [Experimental] The table is partitioned by this field. The - // field must be a top-level NULLABLE/REQUIRED field. The only supported type - // is INTEGER/INT64. + // Field: Required. The name of the column to partition the table on. It must + // be a top-level, INT64 column whose mode is NULLABLE or REQUIRED. Field string `json:"field,omitempty"` // Range: [Experimental] Defines the ranges for range partitioning. Range *RangePartitioningRange `json:"range,omitempty"` @@ -7103,9 +7152,9 @@ type RangePartitioning struct { NullFields []string `json:"-"` } -func (s *RangePartitioning) MarshalJSON() ([]byte, error) { +func (s RangePartitioning) MarshalJSON() ([]byte, error) { type NoMethod RangePartitioning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RangePartitioningRange: [Experimental] Defines the ranges for range @@ -7130,9 +7179,9 @@ type RangePartitioningRange struct { NullFields []string `json:"-"` } -func (s *RangePartitioningRange) MarshalJSON() ([]byte, error) { +func (s RangePartitioningRange) MarshalJSON() ([]byte, error) { type NoMethod RangePartitioningRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RangeValue: Represents the value of a range. @@ -7156,9 +7205,9 @@ type RangeValue struct { NullFields []string `json:"-"` } -func (s *RangeValue) MarshalJSON() ([]byte, error) { +func (s RangeValue) MarshalJSON() ([]byte, error) { type NoMethod RangeValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RankingMetrics: Evaluation metrics used by weighted-ALS models specified by @@ -7193,9 +7242,9 @@ type RankingMetrics struct { NullFields []string `json:"-"` } -func (s *RankingMetrics) MarshalJSON() ([]byte, error) { +func (s RankingMetrics) MarshalJSON() ([]byte, error) { type NoMethod RankingMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *RankingMetrics) UnmarshalJSON(data []byte) error { @@ -7244,9 +7293,9 @@ type RegressionMetrics struct { NullFields []string `json:"-"` } -func (s *RegressionMetrics) MarshalJSON() ([]byte, error) { +func (s RegressionMetrics) MarshalJSON() ([]byte, error) { type NoMethod RegressionMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *RegressionMetrics) UnmarshalJSON(data []byte) error { @@ -7304,9 +7353,9 @@ type RemoteFunctionOptions struct { NullFields []string `json:"-"` } -func (s *RemoteFunctionOptions) MarshalJSON() ([]byte, error) { +func (s RemoteFunctionOptions) MarshalJSON() ([]byte, error) { type NoMethod RemoteFunctionOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RemoteModelInfo: Remote Model Info @@ -7362,9 +7411,9 @@ type RemoteModelInfo struct { NullFields []string `json:"-"` } -func (s *RemoteModelInfo) MarshalJSON() ([]byte, error) { +func (s RemoteModelInfo) MarshalJSON() ([]byte, error) { type NoMethod RemoteModelInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RestrictionConfig struct { @@ -7373,8 +7422,8 @@ type RestrictionConfig struct { // Possible values: // "RESTRICTION_TYPE_UNSPECIFIED" - Should never be used. // "RESTRICTED_DATA_EGRESS" - Restrict data egress. See [Data - // egress](/bigquery/docs/analytics-hub-introduction#data_egress) for more - // details. + // egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#dat + // a_egress) for more details. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Type") to unconditionally // include in API requests. By default, fields with empty or default values are @@ -7389,9 +7438,9 @@ type RestrictionConfig struct { NullFields []string `json:"-"` } -func (s *RestrictionConfig) MarshalJSON() ([]byte, error) { +func (s RestrictionConfig) MarshalJSON() ([]byte, error) { type NoMethod RestrictionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Routine: A user-defined function or a stored procedure. @@ -7529,9 +7578,9 @@ type Routine struct { NullFields []string `json:"-"` } -func (s *Routine) MarshalJSON() ([]byte, error) { +func (s Routine) MarshalJSON() ([]byte, error) { type NoMethod Routine - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RoutineReference: Id path of a routine. @@ -7557,9 +7606,9 @@ type RoutineReference struct { NullFields []string `json:"-"` } -func (s *RoutineReference) MarshalJSON() ([]byte, error) { +func (s RoutineReference) MarshalJSON() ([]byte, error) { type NoMethod RoutineReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Row: A single row in the confusion matrix. @@ -7581,9 +7630,9 @@ type Row struct { NullFields []string `json:"-"` } -func (s *Row) MarshalJSON() ([]byte, error) { +func (s Row) MarshalJSON() ([]byte, error) { type NoMethod Row - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RowAccessPolicy: Represents access on a subset of rows on the specified @@ -7621,9 +7670,9 @@ type RowAccessPolicy struct { NullFields []string `json:"-"` } -func (s *RowAccessPolicy) MarshalJSON() ([]byte, error) { +func (s RowAccessPolicy) MarshalJSON() ([]byte, error) { type NoMethod RowAccessPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RowAccessPolicyReference: Id path of a row access policy. @@ -7653,9 +7702,9 @@ type RowAccessPolicyReference struct { NullFields []string `json:"-"` } -func (s *RowAccessPolicyReference) MarshalJSON() ([]byte, error) { +func (s RowAccessPolicyReference) MarshalJSON() ([]byte, error) { type NoMethod RowAccessPolicyReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RowLevelSecurityStatistics: Statistics for row-level security. @@ -7676,9 +7725,9 @@ type RowLevelSecurityStatistics struct { NullFields []string `json:"-"` } -func (s *RowLevelSecurityStatistics) MarshalJSON() ([]byte, error) { +func (s RowLevelSecurityStatistics) MarshalJSON() ([]byte, error) { type NoMethod RowLevelSecurityStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ScriptOptions: Options related to script execution. @@ -7710,9 +7759,9 @@ type ScriptOptions struct { NullFields []string `json:"-"` } -func (s *ScriptOptions) MarshalJSON() ([]byte, error) { +func (s ScriptOptions) MarshalJSON() ([]byte, error) { type NoMethod ScriptOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ScriptStackFrame: Represents the location of the statement/expression being @@ -7755,9 +7804,9 @@ type ScriptStackFrame struct { NullFields []string `json:"-"` } -func (s *ScriptStackFrame) MarshalJSON() ([]byte, error) { +func (s ScriptStackFrame) MarshalJSON() ([]byte, error) { type NoMethod ScriptStackFrame - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ScriptStatistics: Job statistics specific to the child job of a script. @@ -7787,9 +7836,9 @@ type ScriptStatistics struct { NullFields []string `json:"-"` } -func (s *ScriptStatistics) MarshalJSON() ([]byte, error) { +func (s ScriptStatistics) MarshalJSON() ([]byte, error) { type NoMethod ScriptStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SearchStatistics: Statistics for a search query. Populated as part of @@ -7826,9 +7875,9 @@ type SearchStatistics struct { NullFields []string `json:"-"` } -func (s *SearchStatistics) MarshalJSON() ([]byte, error) { +func (s SearchStatistics) MarshalJSON() ([]byte, error) { type NoMethod SearchStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SerDeInfo: Serializer and deserializer information. @@ -7856,9 +7905,9 @@ type SerDeInfo struct { NullFields []string `json:"-"` } -func (s *SerDeInfo) MarshalJSON() ([]byte, error) { +func (s SerDeInfo) MarshalJSON() ([]byte, error) { type NoMethod SerDeInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SessionInfo: [Preview] Information related to sessions. @@ -7878,9 +7927,9 @@ type SessionInfo struct { NullFields []string `json:"-"` } -func (s *SessionInfo) MarshalJSON() ([]byte, error) { +func (s SessionInfo) MarshalJSON() ([]byte, error) { type NoMethod SessionInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -7907,9 +7956,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SkewSource: Details about source stages which produce skewed data. @@ -7929,9 +7978,9 @@ type SkewSource struct { NullFields []string `json:"-"` } -func (s *SkewSource) MarshalJSON() ([]byte, error) { +func (s SkewSource) MarshalJSON() ([]byte, error) { type NoMethod SkewSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SnapshotDefinition: Information about base table and snapshot time of the @@ -7956,9 +8005,9 @@ type SnapshotDefinition struct { NullFields []string `json:"-"` } -func (s *SnapshotDefinition) MarshalJSON() ([]byte, error) { +func (s SnapshotDefinition) MarshalJSON() ([]byte, error) { type NoMethod SnapshotDefinition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SparkLoggingInfo: Spark job logs can be filtered by these fields in Cloud @@ -7981,9 +8030,9 @@ type SparkLoggingInfo struct { NullFields []string `json:"-"` } -func (s *SparkLoggingInfo) MarshalJSON() ([]byte, error) { +func (s SparkLoggingInfo) MarshalJSON() ([]byte, error) { type NoMethod SparkLoggingInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SparkOptions: Options for a user-defined Spark routine. @@ -8043,9 +8092,9 @@ type SparkOptions struct { NullFields []string `json:"-"` } -func (s *SparkOptions) MarshalJSON() ([]byte, error) { +func (s SparkOptions) MarshalJSON() ([]byte, error) { type NoMethod SparkOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SparkStatistics: Statistics for a BigSpark query. Populated as part of @@ -8097,9 +8146,9 @@ type SparkStatistics struct { NullFields []string `json:"-"` } -func (s *SparkStatistics) MarshalJSON() ([]byte, error) { +func (s SparkStatistics) MarshalJSON() ([]byte, error) { type NoMethod SparkStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StagePerformanceChangeInsight: Performance insights compared to the previous @@ -8122,9 +8171,9 @@ type StagePerformanceChangeInsight struct { NullFields []string `json:"-"` } -func (s *StagePerformanceChangeInsight) MarshalJSON() ([]byte, error) { +func (s StagePerformanceChangeInsight) MarshalJSON() ([]byte, error) { type NoMethod StagePerformanceChangeInsight - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StagePerformanceStandaloneInsight: Standalone performance insights for a @@ -8157,9 +8206,9 @@ type StagePerformanceStandaloneInsight struct { NullFields []string `json:"-"` } -func (s *StagePerformanceStandaloneInsight) MarshalJSON() ([]byte, error) { +func (s StagePerformanceStandaloneInsight) MarshalJSON() ([]byte, error) { type NoMethod StagePerformanceStandaloneInsight - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StandardSqlDataType: The data type of a variable such as a function @@ -8167,7 +8216,8 @@ func (s *StagePerformanceStandaloneInsight) MarshalJSON() ([]byte, error) { // "typeKind": "ARRAY", "arrayElementType": {"typeKind": "STRING"} } * STRUCT>: // { "typeKind": "STRUCT", "structType": { "fields": [ { "name": "x", "type": // {"typeKind": "STRING"} }, { "name": "y", "type": { "typeKind": "ARRAY", -// "arrayElementType": {"typeKind": "DATE"} } } ] } } +// "arrayElementType": {"typeKind": "DATE"} } } ] } } * RANGE: { "typeKind": +// "RANGE", "rangeElementType": {"typeKind": "DATE"} } type StandardSqlDataType struct { // ArrayElementType: The type of the array's elements, if type_kind = "ARRAY". ArrayElementType *StandardSqlDataType `json:"arrayElementType,omitempty"` @@ -8216,9 +8266,9 @@ type StandardSqlDataType struct { NullFields []string `json:"-"` } -func (s *StandardSqlDataType) MarshalJSON() ([]byte, error) { +func (s StandardSqlDataType) MarshalJSON() ([]byte, error) { type NoMethod StandardSqlDataType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StandardSqlField: A field or a column. @@ -8242,9 +8292,9 @@ type StandardSqlField struct { NullFields []string `json:"-"` } -func (s *StandardSqlField) MarshalJSON() ([]byte, error) { +func (s StandardSqlField) MarshalJSON() ([]byte, error) { type NoMethod StandardSqlField - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StandardSqlStructType: The representation of a SQL STRUCT type. @@ -8264,9 +8314,9 @@ type StandardSqlStructType struct { NullFields []string `json:"-"` } -func (s *StandardSqlStructType) MarshalJSON() ([]byte, error) { +func (s StandardSqlStructType) MarshalJSON() ([]byte, error) { type NoMethod StandardSqlStructType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StandardSqlTableType: A table type @@ -8286,9 +8336,9 @@ type StandardSqlTableType struct { NullFields []string `json:"-"` } -func (s *StandardSqlTableType) MarshalJSON() ([]byte, error) { +func (s StandardSqlTableType) MarshalJSON() ([]byte, error) { type NoMethod StandardSqlTableType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StorageDescriptor: Contains information about how a table's data is stored @@ -8299,8 +8349,8 @@ type StorageDescriptor struct { // maximum length is 128 characters. InputFormat string `json:"inputFormat,omitempty"` // LocationUri: Optional. The physical location of the table (e.g. - // 'gs://spark-dataproc-data/pangea-data/case_sensitive/' or - // 'gs://spark-dataproc-data/pangea-data/*'). The maximum length is 2056 bytes. + // `gs://spark-dataproc-data/pangea-data/case_sensitive/` or + // `gs://spark-dataproc-data/pangea-data/*`). The maximum length is 2056 bytes. LocationUri string `json:"locationUri,omitempty"` // OutputFormat: Optional. Specifies the fully qualified class name of the // OutputFormat (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"). The @@ -8321,9 +8371,9 @@ type StorageDescriptor struct { NullFields []string `json:"-"` } -func (s *StorageDescriptor) MarshalJSON() ([]byte, error) { +func (s StorageDescriptor) MarshalJSON() ([]byte, error) { type NoMethod StorageDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type Streamingbuffer struct { @@ -8350,9 +8400,9 @@ type Streamingbuffer struct { NullFields []string `json:"-"` } -func (s *Streamingbuffer) MarshalJSON() ([]byte, error) { +func (s Streamingbuffer) MarshalJSON() ([]byte, error) { type NoMethod Streamingbuffer - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StringHparamSearchSpace: Search space for string and enum. @@ -8372,9 +8422,9 @@ type StringHparamSearchSpace struct { NullFields []string `json:"-"` } -func (s *StringHparamSearchSpace) MarshalJSON() ([]byte, error) { +func (s StringHparamSearchSpace) MarshalJSON() ([]byte, error) { type NoMethod StringHparamSearchSpace - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SystemVariables: System variables given to a query. @@ -8396,9 +8446,9 @@ type SystemVariables struct { NullFields []string `json:"-"` } -func (s *SystemVariables) MarshalJSON() ([]byte, error) { +func (s SystemVariables) MarshalJSON() ([]byte, error) { type NoMethod SystemVariables - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type Table struct { @@ -8537,9 +8587,11 @@ type Table struct { // bytes. This also includes storage used for time travel. This data is not // kept in real time, and might be delayed by a few seconds to a few minutes. NumTotalPhysicalBytes int64 `json:"numTotalPhysicalBytes,omitempty,string"` - // PartitionDefinition: Output only. The partition information for all table - // formats, including managed partitioned tables, hive partitioned tables, and - // iceberg partitioned tables. + // PartitionDefinition: Optional. The partition information for all table + // formats, including managed partitioned tables, hive partitioned tables, + // iceberg partitioned, and metastore partitioned tables. This field is only + // populated for metastore partitioned tables. For other table formats, this is + // an output only field. PartitionDefinition *PartitioningDefinition `json:"partitionDefinition,omitempty"` // RangePartitioning: If specified, configures range partitioning for this // table. @@ -8561,7 +8613,8 @@ type Table struct { ResourceTags map[string]string `json:"resourceTags,omitempty"` // Restrictions: Optional. Output only. Restriction config for table. If set, // restrict certain accesses on the table based on the config. See Data egress - // (/bigquery/docs/analytics-hub-introduction#data_egress) for more details. + // (https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) + // for more details. Restrictions *RestrictionConfig `json:"restrictions,omitempty"` // Schema: Optional. Describes the schema of this table. Schema *TableSchema `json:"schema,omitempty"` @@ -8591,8 +8644,8 @@ type Table struct { // `MATERIALIZED_VIEW`: A precomputed view defined by a SQL query. * // `SNAPSHOT`: An immutable BigQuery table that preserves the contents of a // base table at a particular time. See additional information on table - // snapshots (/bigquery/docs/table-snapshots-intro). The default value is - // `TABLE`. + // snapshots (https://cloud.google.com/bigquery/docs/table-snapshots-intro). + // The default value is `TABLE`. Type string `json:"type,omitempty"` // View: Optional. The view definition. View *ViewDefinition `json:"view,omitempty"` @@ -8612,9 +8665,9 @@ type Table struct { NullFields []string `json:"-"` } -func (s *Table) MarshalJSON() ([]byte, error) { +func (s Table) MarshalJSON() ([]byte, error) { type NoMethod Table - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TableCell struct { @@ -8632,9 +8685,9 @@ type TableCell struct { NullFields []string `json:"-"` } -func (s *TableCell) MarshalJSON() ([]byte, error) { +func (s TableCell) MarshalJSON() ([]byte, error) { type NoMethod TableCell - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableConstraints: The TableConstraints defines the primary key and foreign @@ -8658,9 +8711,9 @@ type TableConstraints struct { NullFields []string `json:"-"` } -func (s *TableConstraints) MarshalJSON() ([]byte, error) { +func (s TableConstraints) MarshalJSON() ([]byte, error) { type NoMethod TableConstraints - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableConstraintsForeignKeys: Represents a foreign key constraint on a @@ -8684,9 +8737,9 @@ type TableConstraintsForeignKeys struct { NullFields []string `json:"-"` } -func (s *TableConstraintsForeignKeys) MarshalJSON() ([]byte, error) { +func (s TableConstraintsForeignKeys) MarshalJSON() ([]byte, error) { type NoMethod TableConstraintsForeignKeys - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableConstraintsForeignKeysColumnReferences: The pair of the foreign key @@ -8710,9 +8763,9 @@ type TableConstraintsForeignKeysColumnReferences struct { NullFields []string `json:"-"` } -func (s *TableConstraintsForeignKeysColumnReferences) MarshalJSON() ([]byte, error) { +func (s TableConstraintsForeignKeysColumnReferences) MarshalJSON() ([]byte, error) { type NoMethod TableConstraintsForeignKeysColumnReferences - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TableConstraintsForeignKeysReferencedTable struct { @@ -8732,9 +8785,9 @@ type TableConstraintsForeignKeysReferencedTable struct { NullFields []string `json:"-"` } -func (s *TableConstraintsForeignKeysReferencedTable) MarshalJSON() ([]byte, error) { +func (s TableConstraintsForeignKeysReferencedTable) MarshalJSON() ([]byte, error) { type NoMethod TableConstraintsForeignKeysReferencedTable - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableConstraintsPrimaryKey: Represents the primary key constraint on a @@ -8756,9 +8809,9 @@ type TableConstraintsPrimaryKey struct { NullFields []string `json:"-"` } -func (s *TableConstraintsPrimaryKey) MarshalJSON() ([]byte, error) { +func (s TableConstraintsPrimaryKey) MarshalJSON() ([]byte, error) { type NoMethod TableConstraintsPrimaryKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableDataInsertAllRequest: Request for sending a single streaming insert. @@ -8800,9 +8853,9 @@ type TableDataInsertAllRequest struct { NullFields []string `json:"-"` } -func (s *TableDataInsertAllRequest) MarshalJSON() ([]byte, error) { +func (s TableDataInsertAllRequest) MarshalJSON() ([]byte, error) { type NoMethod TableDataInsertAllRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableDataInsertAllRequestRows: Data for a single insertion row. @@ -8826,9 +8879,9 @@ type TableDataInsertAllRequestRows struct { NullFields []string `json:"-"` } -func (s *TableDataInsertAllRequestRows) MarshalJSON() ([]byte, error) { +func (s TableDataInsertAllRequestRows) MarshalJSON() ([]byte, error) { type NoMethod TableDataInsertAllRequestRows - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableDataInsertAllResponse: Describes the format of a streaming insert @@ -8855,9 +8908,9 @@ type TableDataInsertAllResponse struct { NullFields []string `json:"-"` } -func (s *TableDataInsertAllResponse) MarshalJSON() ([]byte, error) { +func (s TableDataInsertAllResponse) MarshalJSON() ([]byte, error) { type NoMethod TableDataInsertAllResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableDataInsertAllResponseInsertErrors: Error details about a single row's @@ -8880,9 +8933,9 @@ type TableDataInsertAllResponseInsertErrors struct { NullFields []string `json:"-"` } -func (s *TableDataInsertAllResponseInsertErrors) MarshalJSON() ([]byte, error) { +func (s TableDataInsertAllResponseInsertErrors) MarshalJSON() ([]byte, error) { type NoMethod TableDataInsertAllResponseInsertErrors - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TableDataList struct { @@ -8915,9 +8968,9 @@ type TableDataList struct { NullFields []string `json:"-"` } -func (s *TableDataList) MarshalJSON() ([]byte, error) { +func (s TableDataList) MarshalJSON() ([]byte, error) { type NoMethod TableDataList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableFieldSchema: A field in TableSchema @@ -8929,6 +8982,8 @@ type TableFieldSchema struct { // locale, case insensitive. * '': empty string. Default to case-sensitive // behavior. Collation string `json:"collation,omitempty"` + // DataPolicies: Optional. Data policy options, will replace the data_policies. + DataPolicies []*DataPolicyOption `json:"dataPolicies,omitempty"` // DefaultValueExpression: Optional. A SQL expression to specify the [default // value] (https://cloud.google.com/bigquery/docs/default-values) for this // field. @@ -9000,8 +9055,8 @@ type TableFieldSchema struct { // Type: Required. The field data type. Possible values include: * STRING * // BYTES * INTEGER (or INT64) * FLOAT (or FLOAT64) * BOOLEAN (or BOOL) * // TIMESTAMP * DATE * TIME * DATETIME * GEOGRAPHY * NUMERIC * BIGNUMERIC * JSON - // * RECORD (or STRUCT) * RANGE (Preview (/products/#product-launch-stages)) - // Use of RECORD/STRUCT indicates that the field contains a nested schema. + // * RECORD (or STRUCT) * RANGE Use of RECORD/STRUCT indicates that the field + // contains a nested schema. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Categories") to // unconditionally include in API requests. By default, fields with empty or @@ -9016,9 +9071,9 @@ type TableFieldSchema struct { NullFields []string `json:"-"` } -func (s *TableFieldSchema) MarshalJSON() ([]byte, error) { +func (s TableFieldSchema) MarshalJSON() ([]byte, error) { type NoMethod TableFieldSchema - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableFieldSchemaCategories: Deprecated. @@ -9038,9 +9093,9 @@ type TableFieldSchemaCategories struct { NullFields []string `json:"-"` } -func (s *TableFieldSchemaCategories) MarshalJSON() ([]byte, error) { +func (s TableFieldSchemaCategories) MarshalJSON() ([]byte, error) { type NoMethod TableFieldSchemaCategories - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableFieldSchemaPolicyTags: Optional. The policy tags attached to this @@ -9064,9 +9119,9 @@ type TableFieldSchemaPolicyTags struct { NullFields []string `json:"-"` } -func (s *TableFieldSchemaPolicyTags) MarshalJSON() ([]byte, error) { +func (s TableFieldSchemaPolicyTags) MarshalJSON() ([]byte, error) { type NoMethod TableFieldSchemaPolicyTags - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableFieldSchemaRangeElementType: Represents the type of a field element. @@ -9087,9 +9142,9 @@ type TableFieldSchemaRangeElementType struct { NullFields []string `json:"-"` } -func (s *TableFieldSchemaRangeElementType) MarshalJSON() ([]byte, error) { +func (s TableFieldSchemaRangeElementType) MarshalJSON() ([]byte, error) { type NoMethod TableFieldSchemaRangeElementType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableList: Partial projection of the metadata for a given table in a list @@ -9121,9 +9176,9 @@ type TableList struct { NullFields []string `json:"-"` } -func (s *TableList) MarshalJSON() ([]byte, error) { +func (s TableList) MarshalJSON() ([]byte, error) { type NoMethod TableList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TableListTables struct { @@ -9172,9 +9227,9 @@ type TableListTables struct { NullFields []string `json:"-"` } -func (s *TableListTables) MarshalJSON() ([]byte, error) { +func (s TableListTables) MarshalJSON() ([]byte, error) { type NoMethod TableListTables - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableListTablesView: Information about a logical view. @@ -9197,9 +9252,9 @@ type TableListTablesView struct { NullFields []string `json:"-"` } -func (s *TableListTablesView) MarshalJSON() ([]byte, error) { +func (s TableListTablesView) MarshalJSON() ([]byte, error) { type NoMethod TableListTablesView - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableMetadataCacheUsage: Table level detail on the usage of metadata @@ -9209,10 +9264,13 @@ type TableMetadataCacheUsage struct { // Explanation: Free form human-readable reason metadata caching was unused for // the job. Explanation string `json:"explanation,omitempty"` + // Staleness: Duration since last refresh as of this job for managed tables + // (indicates metadata cache staleness as seen by this job). + Staleness string `json:"staleness,omitempty"` // TableReference: Metadata caching eligible table referenced in the query. TableReference *TableReference `json:"tableReference,omitempty"` // TableType: Table type - // (/bigquery/docs/reference/rest/v2/tables#Table.FIELDS.type). + // (https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table.FIELDS.type). TableType string `json:"tableType,omitempty"` // UnusedReason: Reason for not using metadata caching for the table. // @@ -9239,9 +9297,9 @@ type TableMetadataCacheUsage struct { NullFields []string `json:"-"` } -func (s *TableMetadataCacheUsage) MarshalJSON() ([]byte, error) { +func (s TableMetadataCacheUsage) MarshalJSON() ([]byte, error) { type NoMethod TableMetadataCacheUsage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TableReference struct { @@ -9270,9 +9328,9 @@ type TableReference struct { NullFields []string `json:"-"` } -func (s *TableReference) MarshalJSON() ([]byte, error) { +func (s TableReference) MarshalJSON() ([]byte, error) { type NoMethod TableReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableReplicationInfo: Replication info of a table created using `AS REPLICA` @@ -9315,9 +9373,9 @@ type TableReplicationInfo struct { NullFields []string `json:"-"` } -func (s *TableReplicationInfo) MarshalJSON() ([]byte, error) { +func (s TableReplicationInfo) MarshalJSON() ([]byte, error) { type NoMethod TableReplicationInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TableRow struct { @@ -9337,9 +9395,9 @@ type TableRow struct { NullFields []string `json:"-"` } -func (s *TableRow) MarshalJSON() ([]byte, error) { +func (s TableRow) MarshalJSON() ([]byte, error) { type NoMethod TableRow - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableSchema: Schema of a table @@ -9362,9 +9420,9 @@ type TableSchema struct { NullFields []string `json:"-"` } -func (s *TableSchema) MarshalJSON() ([]byte, error) { +func (s TableSchema) MarshalJSON() ([]byte, error) { type NoMethod TableSchema - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` method. @@ -9387,9 +9445,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -9414,9 +9472,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TimePartitioning struct { @@ -9451,9 +9509,9 @@ type TimePartitioning struct { NullFields []string `json:"-"` } -func (s *TimePartitioning) MarshalJSON() ([]byte, error) { +func (s TimePartitioning) MarshalJSON() ([]byte, error) { type NoMethod TimePartitioning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TrainingOptions: Options used in model training. @@ -9520,6 +9578,11 @@ type TrainingOptions struct { // ColsampleBytree: Subsample ratio of columns when constructing each tree for // boosted tree models. ColsampleBytree float64 `json:"colsampleBytree,omitempty"` + // ContributionMetric: The contribution metric. Applies to contribution + // analysis models. Allowed formats supported are for summable and summable + // ratio contribution metrics. These include expressions such as "SUM(x)" or + // "SUM(x)/SUM(y)", where x and y are column names from the base table. + ContributionMetric string `json:"contributionMetric,omitempty"` // DartNormalizeType: Type of normalization algorithm for boosted tree models // using dart booster. // @@ -9569,6 +9632,9 @@ type TrainingOptions struct { // DecomposeTimeSeries: If true, perform decompose time series and save the // results. DecomposeTimeSeries bool `json:"decomposeTimeSeries,omitempty"` + // DimensionIdColumns: Optional. Names of the columns to slice on. Applies to + // contribution analysis models. + DimensionIdColumns []string `json:"dimensionIdColumns,omitempty"` // DistanceType: Distance type for clustering models. // // Possible values: @@ -9799,6 +9865,9 @@ type TrainingOptions struct { // IntegratedGradientsNumSteps: Number of integral steps for the integrated // gradients explain method. IntegratedGradientsNumSteps int64 `json:"integratedGradientsNumSteps,omitempty,string"` + // IsTestColumn: Name of the column used to determine the rows corresponding to + // control and test. Applies to contribution analysis models. + IsTestColumn string `json:"isTestColumn,omitempty"` // ItemColumn: Item column specified for matrix factorization models. ItemColumn string `json:"itemColumn,omitempty"` // KmeansInitializationColumn: The column used to provide the initial centroids @@ -9854,6 +9923,9 @@ type TrainingOptions struct { MaxTimeSeriesLength int64 `json:"maxTimeSeriesLength,omitempty,string"` // MaxTreeDepth: Maximum depth of a tree for boosted tree models. MaxTreeDepth int64 `json:"maxTreeDepth,omitempty,string"` + // MinAprioriSupport: The apriori support minimum. Applies to contribution + // analysis models. + MinAprioriSupport float64 `json:"minAprioriSupport,omitempty"` // MinRelativeProgress: When early_stop is true, stops training when accuracy // improvement is less than 'min_relative_progress'. Used only for iterative // training algorithms. @@ -9998,9 +10070,9 @@ type TrainingOptions struct { NullFields []string `json:"-"` } -func (s *TrainingOptions) MarshalJSON() ([]byte, error) { +func (s TrainingOptions) MarshalJSON() ([]byte, error) { type NoMethod TrainingOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *TrainingOptions) UnmarshalJSON(data []byte) error { @@ -10017,6 +10089,7 @@ func (s *TrainingOptions) UnmarshalJSON(data []byte) error { L1Regularization gensupport.JSONFloat64 `json:"l1Regularization"` L2Regularization gensupport.JSONFloat64 `json:"l2Regularization"` LearnRate gensupport.JSONFloat64 `json:"learnRate"` + MinAprioriSupport gensupport.JSONFloat64 `json:"minAprioriSupport"` MinRelativeProgress gensupport.JSONFloat64 `json:"minRelativeProgress"` MinSplitLoss gensupport.JSONFloat64 `json:"minSplitLoss"` PcaExplainedVarianceRatio gensupport.JSONFloat64 `json:"pcaExplainedVarianceRatio"` @@ -10040,6 +10113,7 @@ func (s *TrainingOptions) UnmarshalJSON(data []byte) error { s.L1Regularization = float64(s1.L1Regularization) s.L2Regularization = float64(s1.L2Regularization) s.LearnRate = float64(s1.LearnRate) + s.MinAprioriSupport = float64(s1.MinAprioriSupport) s.MinRelativeProgress = float64(s1.MinRelativeProgress) s.MinSplitLoss = float64(s1.MinSplitLoss) s.PcaExplainedVarianceRatio = float64(s1.PcaExplainedVarianceRatio) @@ -10098,9 +10172,9 @@ type TrainingRun struct { NullFields []string `json:"-"` } -func (s *TrainingRun) MarshalJSON() ([]byte, error) { +func (s TrainingRun) MarshalJSON() ([]byte, error) { type NoMethod TrainingRun - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransactionInfo: [Alpha] Information of a multi-statement transaction. @@ -10120,9 +10194,9 @@ type TransactionInfo struct { NullFields []string `json:"-"` } -func (s *TransactionInfo) MarshalJSON() ([]byte, error) { +func (s TransactionInfo) MarshalJSON() ([]byte, error) { type NoMethod TransactionInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransformColumn: Information about a single transform column. @@ -10146,9 +10220,9 @@ type TransformColumn struct { NullFields []string `json:"-"` } -func (s *TransformColumn) MarshalJSON() ([]byte, error) { +func (s TransformColumn) MarshalJSON() ([]byte, error) { type NoMethod TransformColumn - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UndeleteDatasetRequest: Request format for undeleting a dataset. @@ -10170,9 +10244,9 @@ type UndeleteDatasetRequest struct { NullFields []string `json:"-"` } -func (s *UndeleteDatasetRequest) MarshalJSON() ([]byte, error) { +func (s UndeleteDatasetRequest) MarshalJSON() ([]byte, error) { type NoMethod UndeleteDatasetRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UserDefinedFunctionResource: This is used for defining User Defined @@ -10202,9 +10276,9 @@ type UserDefinedFunctionResource struct { NullFields []string `json:"-"` } -func (s *UserDefinedFunctionResource) MarshalJSON() ([]byte, error) { +func (s UserDefinedFunctionResource) MarshalJSON() ([]byte, error) { type NoMethod UserDefinedFunctionResource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VectorSearchStatistics: Statistics for a vector search query. Populated as @@ -10242,9 +10316,9 @@ type VectorSearchStatistics struct { NullFields []string `json:"-"` } -func (s *VectorSearchStatistics) MarshalJSON() ([]byte, error) { +func (s VectorSearchStatistics) MarshalJSON() ([]byte, error) { type NoMethod VectorSearchStatistics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ViewDefinition: Describes the definition of a logical view. @@ -10281,9 +10355,9 @@ type ViewDefinition struct { NullFields []string `json:"-"` } -func (s *ViewDefinition) MarshalJSON() ([]byte, error) { +func (s ViewDefinition) MarshalJSON() ([]byte, error) { type NoMethod ViewDefinition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DatasetsDeleteCall struct { @@ -10633,11 +10707,12 @@ func (c *DatasetsListCall) All(all bool) *DatasetsListCall { } // Filter sets the optional parameter "filter": An expression for filtering the -// results of the request by label. The syntax is \"labels.[:]\". -// Multiple filters can be ANDed together by connecting with a space. Example: -// \"labels.department:receiving labels.active\". See Filtering datasets using -// labels (/bigquery/docs/filtering-labels#filtering_datasets_using_labels) for -// details. +// results of the request by label. The syntax is `labels.[:]`. Multiple +// filters can be ANDed together by connecting with a space. Example: +// `labels.department:receiving labels.active`. See Filtering datasets using +// labels +// (https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) +// for details. func (c *DatasetsListCall) Filter(filter string) *DatasetsListCall { c.urlParams_.Set("filter", filter) return c @@ -11114,8 +11189,8 @@ func (r *JobsService) Cancel(projectId string, jobId string) *JobsCancelCall { // Location sets the optional parameter "location": The geographic location of // the job. You must specify the location to run the job for the following -// scenarios: - If the location to run a job is not in the `us` or the `eu` -// multi-regional location - If the job's location is in a single region (for +// scenarios: * If the location to run a job is not in the `us` or the `eu` +// multi-regional location * If the job's location is in a single region (for // example, `us-central1`) For more information, see // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. func (c *JobsCancelCall) Location(location string) *JobsCancelCall { @@ -11316,8 +11391,8 @@ func (r *JobsService) Get(projectId string, jobId string) *JobsGetCall { // Location sets the optional parameter "location": The geographic location of // the job. You must specify the location to run the job for the following -// scenarios: - If the location to run a job is not in the `us` or the `eu` -// multi-regional location - If the job's location is in a single region (for +// scenarios: * If the location to run a job is not in the `us` or the `eu` +// multi-regional location * If the job's location is in a single region (for // example, `us-central1`) For more information, see // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. func (c *JobsGetCall) Location(location string) *JobsGetCall { @@ -11446,8 +11521,8 @@ func (c *JobsGetQueryResultsCall) FormatOptionsUseInt64Timestamp(formatOptionsUs // Location sets the optional parameter "location": The geographic location of // the job. You must specify the location to run the job for the following -// scenarios: - If the location to run a job is not in the `us` or the `eu` -// multi-regional location - If the job's location is in a single region (for +// scenarios: * If the location to run a job is not in the `us` or the `eu` +// multi-regional location * If the job's location is in a single region (for // example, `us-central1`) For more information, see // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. func (c *JobsGetQueryResultsCall) Location(location string) *JobsGetQueryResultsCall { diff --git a/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json b/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json index 077a894686d..6496fb7e863 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json @@ -949,7 +949,7 @@ ], "parameters": { "parent": { - "description": "Required. The name of the destination cluster that will contain the backup copy. The cluster must already exists. Values are of the form: `projects/{project}/instances/{instance}/clusters/{cluster}`.", + "description": "Required. The name of the destination cluster that will contain the backup copy. The cluster must already exist. Values are of the form: `projects/{project}/instances/{instance}/clusters/{cluster}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+$", "required": true, @@ -2194,7 +2194,7 @@ } } }, - "revision": "20240522", + "revision": "20240918", "rootUrl": "https://bigtableadmin.googleapis.com/", "schemas": { "AppProfile": { @@ -2375,6 +2375,20 @@ "description": "A backup of a Cloud Bigtable table.", "id": "Backup", "properties": { + "backupType": { + "description": "Indicates the backup type of the backup.", + "enum": [ + "BACKUP_TYPE_UNSPECIFIED", + "STANDARD", + "HOT" + ], + "enumDescriptions": [ + "Not specified.", + "The default type for Cloud Bigtable managed backups. Supported for backups created in both HDD and SSD instances. Requires optimization when restored to a table in an SSD instance.", + "A backup type with faster restore to SSD performance. Only supported for backups created in SSD instances. A new SSD table restored from a hot backup reaches production performance more quickly than a standard backup." + ], + "type": "string" + }, "encryptionInfo": { "$ref": "EncryptionInfo", "description": "Output only. The encryption information for the backup.", @@ -2387,7 +2401,12 @@ "type": "string" }, "expireTime": { - "description": "Required. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 90 days from the time the request is received. Once the `expire_time` has passed, Cloud Bigtable will delete the backup and free the resources used by the backup.", + "description": "Required. The expiration time of the backup. When creating a backup or updating its `expire_time`, the value must be greater than the backup creation time by: - At least 6 hours - At most 90 days Once the `expire_time` has passed, Cloud Bigtable will delete the backup.", + "format": "google-datetime", + "type": "string" + }, + "hotToStandardTime": { + "description": "The time at which the hot backup will be converted to a standard backup. Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the hot backup to a standard backup. This value must be greater than the backup creation time by: - At least 24 hours This field only applies for hot backups. When creating or updating a standard backup, attempting to set this field will fail the request.", "format": "google-datetime", "type": "string" }, @@ -2566,6 +2585,20 @@ "description": "The unique name of the cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/a-z*`.", "type": "string" }, + "nodeScalingFactor": { + "description": "Immutable. The node scaling factor of this cluster.", + "enum": [ + "NODE_SCALING_FACTOR_UNSPECIFIED", + "NODE_SCALING_FACTOR_1X", + "NODE_SCALING_FACTOR_2X" + ], + "enumDescriptions": [ + "No node scaling specified. Defaults to NODE_SCALING_FACTOR_1X.", + "The cluster is running with a scaling factor of 1.", + "The cluster is running with a scaling factor of 2. All node count values must be in increments of 2 with this scaling factor enabled, otherwise an INVALID_ARGUMENT error will be returned." + ], + "type": "string" + }, "serveNodes": { "description": "The number of nodes in the cluster. If no value is set, Cloud Bigtable automatically allocates nodes based on your data footprint and optimized for 50% storage utilization.", "format": "int32", @@ -3144,10 +3177,22 @@ "description": "A value that combines incremental updates into a summarized value. Data is never directly written or read using type `Aggregate`. Writes will provide either the `input_type` or `state_type`, and reads will always return the `state_type` .", "id": "GoogleBigtableAdminV2TypeAggregate", "properties": { + "hllppUniqueCount": { + "$ref": "GoogleBigtableAdminV2TypeAggregateHyperLogLogPlusPlusUniqueCount", + "description": "HyperLogLogPlusPlusUniqueCount aggregator." + }, "inputType": { "$ref": "Type", "description": "Type of the inputs that are accumulated by this `Aggregate`, which must specify a full encoding. Use `AddInput` mutations to accumulate new inputs." }, + "max": { + "$ref": "GoogleBigtableAdminV2TypeAggregateMax", + "description": "Max aggregator." + }, + "min": { + "$ref": "GoogleBigtableAdminV2TypeAggregateMin", + "description": "Min aggregator." + }, "stateType": { "$ref": "Type", "description": "Output only. Type that holds the internal accumulator state for the `Aggregate`. This is a function of the `input_type` and `aggregator` chosen, and will always specify a full encoding.", @@ -3160,12 +3205,47 @@ }, "type": "object" }, + "GoogleBigtableAdminV2TypeAggregateHyperLogLogPlusPlusUniqueCount": { + "description": "Computes an approximate unique count over the input values. When using raw data as input, be careful to use a consistent encoding. Otherwise the same value encoded differently could count more than once, or two distinct values could count as identical. Input: Any, or omit for Raw State: TBD Special state conversions: `Int64` (the unique count estimate)", + "id": "GoogleBigtableAdminV2TypeAggregateHyperLogLogPlusPlusUniqueCount", + "properties": {}, + "type": "object" + }, + "GoogleBigtableAdminV2TypeAggregateMax": { + "description": "Computes the max of the input values. Allowed input: `Int64` State: same as input", + "id": "GoogleBigtableAdminV2TypeAggregateMax", + "properties": {}, + "type": "object" + }, + "GoogleBigtableAdminV2TypeAggregateMin": { + "description": "Computes the min of the input values. Allowed input: `Int64` State: same as input", + "id": "GoogleBigtableAdminV2TypeAggregateMin", + "properties": {}, + "type": "object" + }, "GoogleBigtableAdminV2TypeAggregateSum": { "description": "Computes the sum of the input values. Allowed input: `Int64` State: same as input", "id": "GoogleBigtableAdminV2TypeAggregateSum", "properties": {}, "type": "object" }, + "GoogleBigtableAdminV2TypeArray": { + "description": "An ordered list of elements of a given type. Values of type `Array` are stored in `Value.array_value`.", + "id": "GoogleBigtableAdminV2TypeArray", + "properties": { + "elementType": { + "$ref": "Type", + "description": "The type of the elements in the array. This must not be `Array`." + } + }, + "type": "object" + }, + "GoogleBigtableAdminV2TypeBool": { + "description": "bool Values of type `Bool` are stored in `Value.bool_value`.", + "id": "GoogleBigtableAdminV2TypeBool", + "properties": {}, + "type": "object" + }, "GoogleBigtableAdminV2TypeBytes": { "description": "Bytes Values of type `Bytes` are stored in `Value.bytes_value`.", "id": "GoogleBigtableAdminV2TypeBytes", @@ -3189,11 +3269,29 @@ "type": "object" }, "GoogleBigtableAdminV2TypeBytesEncodingRaw": { - "description": "Leaves the value \"as-is\" * Natural sort? Yes * Self-delimiting? No * Compatibility? N/A", + "description": "Leaves the value \"as-is\" * Order-preserving? Yes * Self-delimiting? No * Compatibility? N/A", "id": "GoogleBigtableAdminV2TypeBytesEncodingRaw", "properties": {}, "type": "object" }, + "GoogleBigtableAdminV2TypeDate": { + "description": "Date Values of type `Date` are stored in `Value.date_value`.", + "id": "GoogleBigtableAdminV2TypeDate", + "properties": {}, + "type": "object" + }, + "GoogleBigtableAdminV2TypeFloat32": { + "description": "Float32 Values of type `Float32` are stored in `Value.float_value`.", + "id": "GoogleBigtableAdminV2TypeFloat32", + "properties": {}, + "type": "object" + }, + "GoogleBigtableAdminV2TypeFloat64": { + "description": "Float64 Values of type `Float64` are stored in `Value.float_value`.", + "id": "GoogleBigtableAdminV2TypeFloat64", + "properties": {}, + "type": "object" + }, "GoogleBigtableAdminV2TypeInt64": { "description": "Int64 Values of type `Int64` are stored in `Value.int_value`.", "id": "GoogleBigtableAdminV2TypeInt64", @@ -3217,16 +3315,107 @@ "type": "object" }, "GoogleBigtableAdminV2TypeInt64EncodingBigEndianBytes": { - "description": "Encodes the value as an 8-byte big endian twos complement `Bytes` value. * Natural sort? No (positive values only) * Self-delimiting? Yes * Compatibility? - BigQuery Federation `BINARY` encoding - HBase `Bytes.toBytes` - Java `ByteBuffer.putLong()` with `ByteOrder.BIG_ENDIAN`", + "description": "Encodes the value as an 8-byte big endian twos complement `Bytes` value. * Order-preserving? No (positive values only) * Self-delimiting? Yes * Compatibility? - BigQuery Federation `BINARY` encoding - HBase `Bytes.toBytes` - Java `ByteBuffer.putLong()` with `ByteOrder.BIG_ENDIAN`", "id": "GoogleBigtableAdminV2TypeInt64EncodingBigEndianBytes", "properties": { "bytesType": { "$ref": "GoogleBigtableAdminV2TypeBytes", - "description": "The underlying `Bytes` type, which may be able to encode further." + "deprecated": true, + "description": "Deprecated: ignored if set." + } + }, + "type": "object" + }, + "GoogleBigtableAdminV2TypeMap": { + "description": "A mapping of keys to values of a given type. Values of type `Map` are stored in a `Value.array_value` where each entry is another `Value.array_value` with two elements (the key and the value, in that order). Normally encoded Map values won't have repeated keys, however, clients are expected to handle the case in which they do. If the same key appears multiple times, the _last_ value takes precedence.", + "id": "GoogleBigtableAdminV2TypeMap", + "properties": { + "keyType": { + "$ref": "Type", + "description": "The type of a map key. Only `Bytes`, `String`, and `Int64` are allowed as key types." + }, + "valueType": { + "$ref": "Type", + "description": "The type of the values in a map." + } + }, + "type": "object" + }, + "GoogleBigtableAdminV2TypeString": { + "description": "String Values of type `String` are stored in `Value.string_value`.", + "id": "GoogleBigtableAdminV2TypeString", + "properties": { + "encoding": { + "$ref": "GoogleBigtableAdminV2TypeStringEncoding", + "description": "The encoding to use when converting to/from lower level types." + } + }, + "type": "object" + }, + "GoogleBigtableAdminV2TypeStringEncoding": { + "description": "Rules used to convert to/from lower level types.", + "id": "GoogleBigtableAdminV2TypeStringEncoding", + "properties": { + "utf8Bytes": { + "$ref": "GoogleBigtableAdminV2TypeStringEncodingUtf8Bytes", + "description": "Use `Utf8Bytes` encoding." + }, + "utf8Raw": { + "$ref": "GoogleBigtableAdminV2TypeStringEncodingUtf8Raw", + "deprecated": true, + "description": "Deprecated: if set, converts to an empty `utf8_bytes`." } }, "type": "object" }, + "GoogleBigtableAdminV2TypeStringEncodingUtf8Bytes": { + "description": "UTF-8 encoding * Order-preserving? Yes (code point order) * Self-delimiting? No * Compatibility? - BigQuery Federation `TEXT` encoding - HBase `Bytes.toBytes` - Java `String#getBytes(StandardCharsets.UTF_8)`", + "id": "GoogleBigtableAdminV2TypeStringEncodingUtf8Bytes", + "properties": {}, + "type": "object" + }, + "GoogleBigtableAdminV2TypeStringEncodingUtf8Raw": { + "deprecated": true, + "description": "Deprecated: prefer the equivalent `Utf8Bytes`.", + "id": "GoogleBigtableAdminV2TypeStringEncodingUtf8Raw", + "properties": {}, + "type": "object" + }, + "GoogleBigtableAdminV2TypeStruct": { + "description": "A structured data value, consisting of fields which map to dynamically typed values. Values of type `Struct` are stored in `Value.array_value` where entries are in the same order and number as `field_types`.", + "id": "GoogleBigtableAdminV2TypeStruct", + "properties": { + "fields": { + "description": "The names and types of the fields in this struct.", + "items": { + "$ref": "GoogleBigtableAdminV2TypeStructField" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleBigtableAdminV2TypeStructField": { + "description": "A struct field and its type.", + "id": "GoogleBigtableAdminV2TypeStructField", + "properties": { + "fieldName": { + "description": "The field name (optional). Fields without a `field_name` are considered anonymous and cannot be referenced by name.", + "type": "string" + }, + "type": { + "$ref": "Type", + "description": "The type of values in this field." + } + }, + "type": "object" + }, + "GoogleBigtableAdminV2TypeTimestamp": { + "description": "Timestamp Values of type `Timestamp` are stored in `Value.timestamp_value`.", + "id": "GoogleBigtableAdminV2TypeTimestamp", + "properties": {}, + "type": "object" + }, "HotTablet": { "description": "A tablet is a defined by a start and end key and is explained in https://cloud.google.com/bigtable/docs/overview#architecture and https://cloud.google.com/bigtable/docs/performance#optimization. A Hot tablet is a tablet that exhibits high average cpu usage during the time interval from start time to end time.", "id": "HotTablet", @@ -3622,6 +3811,10 @@ "type": "string" }, "type": "array" + }, + "rowAffinity": { + "$ref": "RowAffinity", + "description": "Row affinity sticky routing based on the row key of the request. Requests that span multiple rows are routed non-deterministically." } }, "type": "object" @@ -3854,6 +4047,12 @@ }, "type": "object" }, + "RowAffinity": { + "description": "If enabled, Bigtable will route the request based on the row key of the request, rather than randomly. Instead, each row key will be assigned to a cluster, and will stick to that cluster. If clusters are added or removed, then this may affect which row keys stick to which clusters. To avoid this, users can use a cluster group to specify which clusters are to be used. In this case, new clusters that are not a part of the cluster group will not be routed to, and routing will be unaffected by the new cluster. Moreover, clusters specified in the cluster group cannot be deleted unless removed from the cluster group.", + "id": "RowAffinity", + "properties": {}, + "type": "object" + }, "SetIamPolicyRequest": { "description": "Request message for `SetIamPolicy` method.", "id": "SetIamPolicyRequest", @@ -4103,20 +4302,56 @@ "type": "object" }, "Type": { - "description": "`Type` represents the type of data that is written to, read from, or stored in Bigtable. It is heavily based on the GoogleSQL standard to help maintain familiarity and consistency across products and features. For compatibility with Bigtable's existing untyped APIs, each `Type` includes an `Encoding` which describes how to convert to/from the underlying data. This might involve composing a series of steps into an \"encoding chain,\" for example to convert from INT64 -\u003e STRING -\u003e raw bytes. In most cases, a \"link\" in the encoding chain will be based an on existing GoogleSQL conversion function like `CAST`. Each link in the encoding chain also defines the following properties: * Natural sort: Does the encoded value sort consistently with the original typed value? Note that Bigtable will always sort data based on the raw encoded value, *not* the decoded type. - Example: BYTES values sort in the same order as their raw encodings. - Counterexample: Encoding INT64 to a fixed-width STRING does *not* preserve sort order when dealing with negative numbers. INT64(1) \u003e INT64(-1), but STRING(\"-00001\") \u003e STRING(\"00001). - The overall encoding chain has this property if *every* link does. * Self-delimiting: If we concatenate two encoded values, can we always tell where the first one ends and the second one begins? - Example: If we encode INT64s to fixed-width STRINGs, the first value will always contain exactly N digits, possibly preceded by a sign. - Counterexample: If we concatenate two UTF-8 encoded STRINGs, we have no way to tell where the first one ends. - The overall encoding chain has this property if *any* link does. * Compatibility: Which other systems have matching encoding schemes? For example, does this encoding have a GoogleSQL equivalent? HBase? Java?", + "description": "`Type` represents the type of data that is written to, read from, or stored in Bigtable. It is heavily based on the GoogleSQL standard to help maintain familiarity and consistency across products and features. For compatibility with Bigtable's existing untyped APIs, each `Type` includes an `Encoding` which describes how to convert to/from the underlying data. Each encoding also defines the following properties: * Order-preserving: Does the encoded value sort consistently with the original typed value? Note that Bigtable will always sort data based on the raw encoded value, *not* the decoded type. - Example: BYTES values sort in the same order as their raw encodings. - Counterexample: Encoding INT64 as a fixed-width decimal string does *not* preserve sort order when dealing with negative numbers. `INT64(1) \u003e INT64(-1)`, but `STRING(\"-00001\") \u003e STRING(\"00001)`. * Self-delimiting: If we concatenate two encoded values, can we always tell where the first one ends and the second one begins? - Example: If we encode INT64s to fixed-width STRINGs, the first value will always contain exactly N digits, possibly preceded by a sign. - Counterexample: If we concatenate two UTF-8 encoded STRINGs, we have no way to tell where the first one ends. * Compatibility: Which other systems have matching encoding schemes? For example, does this encoding have a GoogleSQL equivalent? HBase? Java?", "id": "Type", "properties": { "aggregateType": { "$ref": "GoogleBigtableAdminV2TypeAggregate", "description": "Aggregate" }, + "arrayType": { + "$ref": "GoogleBigtableAdminV2TypeArray", + "description": "Array" + }, + "boolType": { + "$ref": "GoogleBigtableAdminV2TypeBool", + "description": "Bool" + }, "bytesType": { "$ref": "GoogleBigtableAdminV2TypeBytes", "description": "Bytes" }, + "dateType": { + "$ref": "GoogleBigtableAdminV2TypeDate", + "description": "Date" + }, + "float32Type": { + "$ref": "GoogleBigtableAdminV2TypeFloat32", + "description": "Float32" + }, + "float64Type": { + "$ref": "GoogleBigtableAdminV2TypeFloat64", + "description": "Float64" + }, "int64Type": { "$ref": "GoogleBigtableAdminV2TypeInt64", "description": "Int64" + }, + "mapType": { + "$ref": "GoogleBigtableAdminV2TypeMap", + "description": "Map" + }, + "stringType": { + "$ref": "GoogleBigtableAdminV2TypeString", + "description": "String" + }, + "structType": { + "$ref": "GoogleBigtableAdminV2TypeStruct", + "description": "Struct" + }, + "timestampType": { + "$ref": "GoogleBigtableAdminV2TypeTimestamp", + "description": "Timestamp" } }, "type": "object" diff --git a/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go b/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go index 0f8a68d37b1..63a13c755b9 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go @@ -386,9 +386,9 @@ type AppProfile struct { NullFields []string `json:"-"` } -func (s *AppProfile) MarshalJSON() ([]byte, error) { +func (s AppProfile) MarshalJSON() ([]byte, error) { type NoMethod AppProfile - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditConfig: Specifies the audit configuration for a service. The @@ -427,9 +427,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -462,9 +462,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthorizedView: An Authorized View of a Cloud Bigtable Table. @@ -500,9 +500,9 @@ type AuthorizedView struct { NullFields []string `json:"-"` } -func (s *AuthorizedView) MarshalJSON() ([]byte, error) { +func (s AuthorizedView) MarshalJSON() ([]byte, error) { type NoMethod AuthorizedView - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutomatedBackupPolicy: Defines an automated backup policy for a table @@ -526,9 +526,9 @@ type AutomatedBackupPolicy struct { NullFields []string `json:"-"` } -func (s *AutomatedBackupPolicy) MarshalJSON() ([]byte, error) { +func (s AutomatedBackupPolicy) MarshalJSON() ([]byte, error) { type NoMethod AutomatedBackupPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalingLimits: Limits for the number of nodes a Cluster can autoscale @@ -551,9 +551,9 @@ type AutoscalingLimits struct { NullFields []string `json:"-"` } -func (s *AutoscalingLimits) MarshalJSON() ([]byte, error) { +func (s AutoscalingLimits) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingLimits - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalingTargets: The Autoscaling targets for a Cluster. These determine @@ -584,23 +584,42 @@ type AutoscalingTargets struct { NullFields []string `json:"-"` } -func (s *AutoscalingTargets) MarshalJSON() ([]byte, error) { +func (s AutoscalingTargets) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingTargets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Backup: A backup of a Cloud Bigtable table. type Backup struct { + // BackupType: Indicates the backup type of the backup. + // + // Possible values: + // "BACKUP_TYPE_UNSPECIFIED" - Not specified. + // "STANDARD" - The default type for Cloud Bigtable managed backups. + // Supported for backups created in both HDD and SSD instances. Requires + // optimization when restored to a table in an SSD instance. + // "HOT" - A backup type with faster restore to SSD performance. Only + // supported for backups created in SSD instances. A new SSD table restored + // from a hot backup reaches production performance more quickly than a + // standard backup. + BackupType string `json:"backupType,omitempty"` // EncryptionInfo: Output only. The encryption information for the backup. EncryptionInfo *EncryptionInfo `json:"encryptionInfo,omitempty"` // EndTime: Output only. `end_time` is the time that the backup was finished. // The row data in the backup will be no newer than this timestamp. EndTime string `json:"endTime,omitempty"` - // ExpireTime: Required. The expiration time of the backup, with microseconds - // granularity that must be at least 6 hours and at most 90 days from the time - // the request is received. Once the `expire_time` has passed, Cloud Bigtable - // will delete the backup and free the resources used by the backup. + // ExpireTime: Required. The expiration time of the backup. When creating a + // backup or updating its `expire_time`, the value must be greater than the + // backup creation time by: - At least 6 hours - At most 90 days Once the + // `expire_time` has passed, Cloud Bigtable will delete the backup. ExpireTime string `json:"expireTime,omitempty"` + // HotToStandardTime: The time at which the hot backup will be converted to a + // standard backup. Once the `hot_to_standard_time` has passed, Cloud Bigtable + // will convert the hot backup to a standard backup. This value must be greater + // than the backup creation time by: - At least 24 hours This field only + // applies for hot backups. When creating or updating a standard backup, + // attempting to set this field will fail the request. + HotToStandardTime string `json:"hotToStandardTime,omitempty"` // Name: A globally unique identifier for the backup which cannot be changed. // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/ @@ -634,22 +653,22 @@ type Backup struct { // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "EncryptionInfo") to + // ForceSendFields is a list of field names (e.g. "BackupType") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "EncryptionInfo") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "BackupType") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Backup) MarshalJSON() ([]byte, error) { +func (s Backup) MarshalJSON() ([]byte, error) { type NoMethod Backup - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackupInfo: Information about a backup. @@ -681,9 +700,9 @@ type BackupInfo struct { NullFields []string `json:"-"` } -func (s *BackupInfo) MarshalJSON() ([]byte, error) { +func (s BackupInfo) MarshalJSON() ([]byte, error) { type NoMethod BackupInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates `members`, or principals, with a `role`. @@ -780,9 +799,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ChangeStreamConfig: Change stream configuration. @@ -805,9 +824,9 @@ type ChangeStreamConfig struct { NullFields []string `json:"-"` } -func (s *ChangeStreamConfig) MarshalJSON() ([]byte, error) { +func (s ChangeStreamConfig) MarshalJSON() ([]byte, error) { type NoMethod ChangeStreamConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CheckConsistencyRequest: Request message for @@ -837,9 +856,9 @@ type CheckConsistencyRequest struct { NullFields []string `json:"-"` } -func (s *CheckConsistencyRequest) MarshalJSON() ([]byte, error) { +func (s CheckConsistencyRequest) MarshalJSON() ([]byte, error) { type NoMethod CheckConsistencyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CheckConsistencyResponse: Response message for @@ -864,9 +883,9 @@ type CheckConsistencyResponse struct { NullFields []string `json:"-"` } -func (s *CheckConsistencyResponse) MarshalJSON() ([]byte, error) { +func (s CheckConsistencyResponse) MarshalJSON() ([]byte, error) { type NoMethod CheckConsistencyResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Cluster: A resizable group of nodes in a particular cloud location, capable @@ -893,6 +912,17 @@ type Cluster struct { // Name: The unique name of the cluster. Values are of the form // `projects/{project}/instances/{instance}/clusters/a-z*`. Name string `json:"name,omitempty"` + // NodeScalingFactor: Immutable. The node scaling factor of this cluster. + // + // Possible values: + // "NODE_SCALING_FACTOR_UNSPECIFIED" - No node scaling specified. Defaults to + // NODE_SCALING_FACTOR_1X. + // "NODE_SCALING_FACTOR_1X" - The cluster is running with a scaling factor of + // 1. + // "NODE_SCALING_FACTOR_2X" - The cluster is running with a scaling factor of + // 2. All node count values must be in increments of 2 with this scaling factor + // enabled, otherwise an INVALID_ARGUMENT error will be returned. + NodeScalingFactor string `json:"nodeScalingFactor,omitempty"` // ServeNodes: The number of nodes in the cluster. If no value is set, Cloud // Bigtable automatically allocates nodes based on your data footprint and // optimized for 50% storage utilization. @@ -930,9 +960,9 @@ type Cluster struct { NullFields []string `json:"-"` } -func (s *Cluster) MarshalJSON() ([]byte, error) { +func (s Cluster) MarshalJSON() ([]byte, error) { type NoMethod Cluster - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ClusterAutoscalingConfig: Autoscaling config for a cluster. @@ -954,9 +984,9 @@ type ClusterAutoscalingConfig struct { NullFields []string `json:"-"` } -func (s *ClusterAutoscalingConfig) MarshalJSON() ([]byte, error) { +func (s ClusterAutoscalingConfig) MarshalJSON() ([]byte, error) { type NoMethod ClusterAutoscalingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ClusterConfig: Configuration for a cluster. @@ -976,9 +1006,9 @@ type ClusterConfig struct { NullFields []string `json:"-"` } -func (s *ClusterConfig) MarshalJSON() ([]byte, error) { +func (s ClusterConfig) MarshalJSON() ([]byte, error) { type NoMethod ClusterConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ClusterState: The state of a table's data in a particular cluster. @@ -1022,9 +1052,9 @@ type ClusterState struct { NullFields []string `json:"-"` } -func (s *ClusterState) MarshalJSON() ([]byte, error) { +func (s ClusterState) MarshalJSON() ([]byte, error) { type NoMethod ClusterState - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ColumnFamily: A set of columns within a table which share a common @@ -1059,9 +1089,9 @@ type ColumnFamily struct { NullFields []string `json:"-"` } -func (s *ColumnFamily) MarshalJSON() ([]byte, error) { +func (s ColumnFamily) MarshalJSON() ([]byte, error) { type NoMethod ColumnFamily - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ColumnFamilyStats: Approximate statistics related to a single column family @@ -1104,9 +1134,9 @@ type ColumnFamilyStats struct { NullFields []string `json:"-"` } -func (s *ColumnFamilyStats) MarshalJSON() ([]byte, error) { +func (s ColumnFamilyStats) MarshalJSON() ([]byte, error) { type NoMethod ColumnFamilyStats - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ColumnFamilyStats) UnmarshalJSON(data []byte) error { @@ -1149,9 +1179,9 @@ type CopyBackupMetadata struct { NullFields []string `json:"-"` } -func (s *CopyBackupMetadata) MarshalJSON() ([]byte, error) { +func (s CopyBackupMetadata) MarshalJSON() ([]byte, error) { type NoMethod CopyBackupMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CopyBackupRequest: The request for CopyBackup. @@ -1188,9 +1218,9 @@ type CopyBackupRequest struct { NullFields []string `json:"-"` } -func (s *CopyBackupRequest) MarshalJSON() ([]byte, error) { +func (s CopyBackupRequest) MarshalJSON() ([]byte, error) { type NoMethod CopyBackupRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateAuthorizedViewMetadata: The metadata for the Operation returned by @@ -1217,9 +1247,9 @@ type CreateAuthorizedViewMetadata struct { NullFields []string `json:"-"` } -func (s *CreateAuthorizedViewMetadata) MarshalJSON() ([]byte, error) { +func (s CreateAuthorizedViewMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateAuthorizedViewMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateAuthorizedViewRequest: The request for CreateAuthorizedView @@ -1249,9 +1279,9 @@ type CreateAuthorizedViewRequest struct { NullFields []string `json:"-"` } -func (s *CreateAuthorizedViewRequest) MarshalJSON() ([]byte, error) { +func (s CreateAuthorizedViewRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateAuthorizedViewRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateBackupMetadata: Metadata type for the operation returned by @@ -1278,9 +1308,9 @@ type CreateBackupMetadata struct { NullFields []string `json:"-"` } -func (s *CreateBackupMetadata) MarshalJSON() ([]byte, error) { +func (s CreateBackupMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateBackupMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateClusterMetadata: The metadata for the Operation returned by @@ -1313,9 +1343,9 @@ type CreateClusterMetadata struct { NullFields []string `json:"-"` } -func (s *CreateClusterMetadata) MarshalJSON() ([]byte, error) { +func (s CreateClusterMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateClusterMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateClusterRequest: Request message for @@ -1344,9 +1374,9 @@ type CreateClusterRequest struct { NullFields []string `json:"-"` } -func (s *CreateClusterRequest) MarshalJSON() ([]byte, error) { +func (s CreateClusterRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateClusterRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateInstanceMetadata: The metadata for the Operation returned by @@ -1373,9 +1403,9 @@ type CreateInstanceMetadata struct { NullFields []string `json:"-"` } -func (s *CreateInstanceMetadata) MarshalJSON() ([]byte, error) { +func (s CreateInstanceMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateInstanceMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateInstanceRequest: Request message for @@ -1409,9 +1439,9 @@ type CreateInstanceRequest struct { NullFields []string `json:"-"` } -func (s *CreateInstanceRequest) MarshalJSON() ([]byte, error) { +func (s CreateInstanceRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateInstanceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateTableRequest: Request message for @@ -1447,9 +1477,9 @@ type CreateTableRequest struct { NullFields []string `json:"-"` } -func (s *CreateTableRequest) MarshalJSON() ([]byte, error) { +func (s CreateTableRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateTableRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DataBoostIsolationReadOnly: Data Boost is a serverless compute capability @@ -1479,9 +1509,9 @@ type DataBoostIsolationReadOnly struct { NullFields []string `json:"-"` } -func (s *DataBoostIsolationReadOnly) MarshalJSON() ([]byte, error) { +func (s DataBoostIsolationReadOnly) MarshalJSON() ([]byte, error) { type NoMethod DataBoostIsolationReadOnly - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DataBoostReadLocalWrites: Checks that all writes before the consistency @@ -1511,9 +1541,9 @@ type DropRowRangeRequest struct { NullFields []string `json:"-"` } -func (s *DropRowRangeRequest) MarshalJSON() ([]byte, error) { +func (s DropRowRangeRequest) MarshalJSON() ([]byte, error) { type NoMethod DropRowRangeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -1550,9 +1580,9 @@ type EncryptionConfig struct { NullFields []string `json:"-"` } -func (s *EncryptionConfig) MarshalJSON() ([]byte, error) { +func (s EncryptionConfig) MarshalJSON() ([]byte, error) { type NoMethod EncryptionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EncryptionInfo: Encryption information for a given resource. If this @@ -1596,9 +1626,9 @@ type EncryptionInfo struct { NullFields []string `json:"-"` } -func (s *EncryptionInfo) MarshalJSON() ([]byte, error) { +func (s EncryptionInfo) MarshalJSON() ([]byte, error) { type NoMethod EncryptionInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents a textual expression in the Common Expression Language @@ -1644,9 +1674,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GcRule: Rule for determining which cells to delete during garbage @@ -1674,9 +1704,9 @@ type GcRule struct { NullFields []string `json:"-"` } -func (s *GcRule) MarshalJSON() ([]byte, error) { +func (s GcRule) MarshalJSON() ([]byte, error) { type NoMethod GcRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GenerateConsistencyTokenRequest: Request message for @@ -1705,9 +1735,9 @@ type GenerateConsistencyTokenResponse struct { NullFields []string `json:"-"` } -func (s *GenerateConsistencyTokenResponse) MarshalJSON() ([]byte, error) { +func (s GenerateConsistencyTokenResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateConsistencyTokenResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetIamPolicyRequest: Request message for `GetIamPolicy` method. @@ -1728,9 +1758,9 @@ type GetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. @@ -1760,9 +1790,9 @@ type GetPolicyOptions struct { NullFields []string `json:"-"` } -func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { +func (s GetPolicyOptions) MarshalJSON() ([]byte, error) { type NoMethod GetPolicyOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleBigtableAdminV2AuthorizedViewFamilySubsets: Subsets of a column family @@ -1789,9 +1819,9 @@ type GoogleBigtableAdminV2AuthorizedViewFamilySubsets struct { NullFields []string `json:"-"` } -func (s *GoogleBigtableAdminV2AuthorizedViewFamilySubsets) MarshalJSON() ([]byte, error) { +func (s GoogleBigtableAdminV2AuthorizedViewFamilySubsets) MarshalJSON() ([]byte, error) { type NoMethod GoogleBigtableAdminV2AuthorizedViewFamilySubsets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleBigtableAdminV2AuthorizedViewSubsetView: Defines a simple @@ -1816,9 +1846,9 @@ type GoogleBigtableAdminV2AuthorizedViewSubsetView struct { NullFields []string `json:"-"` } -func (s *GoogleBigtableAdminV2AuthorizedViewSubsetView) MarshalJSON() ([]byte, error) { +func (s GoogleBigtableAdminV2AuthorizedViewSubsetView) MarshalJSON() ([]byte, error) { type NoMethod GoogleBigtableAdminV2AuthorizedViewSubsetView - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleBigtableAdminV2TypeAggregate: A value that combines incremental @@ -1826,32 +1856,57 @@ func (s *GoogleBigtableAdminV2AuthorizedViewSubsetView) MarshalJSON() ([]byte, e // using type `Aggregate`. Writes will provide either the `input_type` or // `state_type`, and reads will always return the `state_type` . type GoogleBigtableAdminV2TypeAggregate struct { + // HllppUniqueCount: HyperLogLogPlusPlusUniqueCount aggregator. + HllppUniqueCount *GoogleBigtableAdminV2TypeAggregateHyperLogLogPlusPlusUniqueCount `json:"hllppUniqueCount,omitempty"` // InputType: Type of the inputs that are accumulated by this `Aggregate`, // which must specify a full encoding. Use `AddInput` mutations to accumulate // new inputs. InputType *Type `json:"inputType,omitempty"` + // Max: Max aggregator. + Max *GoogleBigtableAdminV2TypeAggregateMax `json:"max,omitempty"` + // Min: Min aggregator. + Min *GoogleBigtableAdminV2TypeAggregateMin `json:"min,omitempty"` // StateType: Output only. Type that holds the internal accumulator state for // the `Aggregate`. This is a function of the `input_type` and `aggregator` // chosen, and will always specify a full encoding. StateType *Type `json:"stateType,omitempty"` // Sum: Sum aggregator. Sum *GoogleBigtableAdminV2TypeAggregateSum `json:"sum,omitempty"` - // ForceSendFields is a list of field names (e.g. "InputType") to + // ForceSendFields is a list of field names (e.g. "HllppUniqueCount") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InputType") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "HllppUniqueCount") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GoogleBigtableAdminV2TypeAggregate) MarshalJSON() ([]byte, error) { +func (s GoogleBigtableAdminV2TypeAggregate) MarshalJSON() ([]byte, error) { type NoMethod GoogleBigtableAdminV2TypeAggregate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleBigtableAdminV2TypeAggregateHyperLogLogPlusPlusUniqueCount: Computes +// an approximate unique count over the input values. When using raw data as +// input, be careful to use a consistent encoding. Otherwise the same value +// encoded differently could count more than once, or two distinct values could +// count as identical. Input: Any, or omit for Raw State: TBD Special state +// conversions: `Int64` (the unique count estimate) +type GoogleBigtableAdminV2TypeAggregateHyperLogLogPlusPlusUniqueCount struct { +} + +// GoogleBigtableAdminV2TypeAggregateMax: Computes the max of the input values. +// Allowed input: `Int64` State: same as input +type GoogleBigtableAdminV2TypeAggregateMax struct { +} + +// GoogleBigtableAdminV2TypeAggregateMin: Computes the min of the input values. +// Allowed input: `Int64` State: same as input +type GoogleBigtableAdminV2TypeAggregateMin struct { } // GoogleBigtableAdminV2TypeAggregateSum: Computes the sum of the input values. @@ -1859,6 +1914,35 @@ func (s *GoogleBigtableAdminV2TypeAggregate) MarshalJSON() ([]byte, error) { type GoogleBigtableAdminV2TypeAggregateSum struct { } +// GoogleBigtableAdminV2TypeArray: An ordered list of elements of a given type. +// Values of type `Array` are stored in `Value.array_value`. +type GoogleBigtableAdminV2TypeArray struct { + // ElementType: The type of the elements in the array. This must not be + // `Array`. + ElementType *Type `json:"elementType,omitempty"` + // ForceSendFields is a list of field names (e.g. "ElementType") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ElementType") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleBigtableAdminV2TypeArray) MarshalJSON() ([]byte, error) { + type NoMethod GoogleBigtableAdminV2TypeArray + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleBigtableAdminV2TypeBool: bool Values of type `Bool` are stored in +// `Value.bool_value`. +type GoogleBigtableAdminV2TypeBool struct { +} + // GoogleBigtableAdminV2TypeBytes: Bytes Values of type `Bytes` are stored in // `Value.bytes_value`. type GoogleBigtableAdminV2TypeBytes struct { @@ -1877,9 +1961,9 @@ type GoogleBigtableAdminV2TypeBytes struct { NullFields []string `json:"-"` } -func (s *GoogleBigtableAdminV2TypeBytes) MarshalJSON() ([]byte, error) { +func (s GoogleBigtableAdminV2TypeBytes) MarshalJSON() ([]byte, error) { type NoMethod GoogleBigtableAdminV2TypeBytes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleBigtableAdminV2TypeBytesEncoding: Rules used to convert to/from lower @@ -1900,16 +1984,31 @@ type GoogleBigtableAdminV2TypeBytesEncoding struct { NullFields []string `json:"-"` } -func (s *GoogleBigtableAdminV2TypeBytesEncoding) MarshalJSON() ([]byte, error) { +func (s GoogleBigtableAdminV2TypeBytesEncoding) MarshalJSON() ([]byte, error) { type NoMethod GoogleBigtableAdminV2TypeBytesEncoding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleBigtableAdminV2TypeBytesEncodingRaw: Leaves the value "as-is" * -// Natural sort? Yes * Self-delimiting? No * Compatibility? N/A +// Order-preserving? Yes * Self-delimiting? No * Compatibility? N/A type GoogleBigtableAdminV2TypeBytesEncodingRaw struct { } +// GoogleBigtableAdminV2TypeDate: Date Values of type `Date` are stored in +// `Value.date_value`. +type GoogleBigtableAdminV2TypeDate struct { +} + +// GoogleBigtableAdminV2TypeFloat32: Float32 Values of type `Float32` are +// stored in `Value.float_value`. +type GoogleBigtableAdminV2TypeFloat32 struct { +} + +// GoogleBigtableAdminV2TypeFloat64: Float64 Values of type `Float64` are +// stored in `Value.float_value`. +type GoogleBigtableAdminV2TypeFloat64 struct { +} + // GoogleBigtableAdminV2TypeInt64: Int64 Values of type `Int64` are stored in // `Value.int_value`. type GoogleBigtableAdminV2TypeInt64 struct { @@ -1928,9 +2027,9 @@ type GoogleBigtableAdminV2TypeInt64 struct { NullFields []string `json:"-"` } -func (s *GoogleBigtableAdminV2TypeInt64) MarshalJSON() ([]byte, error) { +func (s GoogleBigtableAdminV2TypeInt64) MarshalJSON() ([]byte, error) { type NoMethod GoogleBigtableAdminV2TypeInt64 - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleBigtableAdminV2TypeInt64Encoding: Rules used to convert to/from lower @@ -1951,18 +2050,18 @@ type GoogleBigtableAdminV2TypeInt64Encoding struct { NullFields []string `json:"-"` } -func (s *GoogleBigtableAdminV2TypeInt64Encoding) MarshalJSON() ([]byte, error) { +func (s GoogleBigtableAdminV2TypeInt64Encoding) MarshalJSON() ([]byte, error) { type NoMethod GoogleBigtableAdminV2TypeInt64Encoding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleBigtableAdminV2TypeInt64EncodingBigEndianBytes: Encodes the value as -// an 8-byte big endian twos complement `Bytes` value. * Natural sort? No +// an 8-byte big endian twos complement `Bytes` value. * Order-preserving? No // (positive values only) * Self-delimiting? Yes * Compatibility? - BigQuery // Federation `BINARY` encoding - HBase `Bytes.toBytes` - Java // `ByteBuffer.putLong()` with `ByteOrder.BIG_ENDIAN` type GoogleBigtableAdminV2TypeInt64EncodingBigEndianBytes struct { - // BytesType: The underlying `Bytes` type, which may be able to encode further. + // BytesType: Deprecated: ignored if set. BytesType *GoogleBigtableAdminV2TypeBytes `json:"bytesType,omitempty"` // ForceSendFields is a list of field names (e.g. "BytesType") to // unconditionally include in API requests. By default, fields with empty or @@ -1977,9 +2076,154 @@ type GoogleBigtableAdminV2TypeInt64EncodingBigEndianBytes struct { NullFields []string `json:"-"` } -func (s *GoogleBigtableAdminV2TypeInt64EncodingBigEndianBytes) MarshalJSON() ([]byte, error) { +func (s GoogleBigtableAdminV2TypeInt64EncodingBigEndianBytes) MarshalJSON() ([]byte, error) { type NoMethod GoogleBigtableAdminV2TypeInt64EncodingBigEndianBytes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleBigtableAdminV2TypeMap: A mapping of keys to values of a given type. +// Values of type `Map` are stored in a `Value.array_value` where each entry is +// another `Value.array_value` with two elements (the key and the value, in +// that order). Normally encoded Map values won't have repeated keys, however, +// clients are expected to handle the case in which they do. If the same key +// appears multiple times, the _last_ value takes precedence. +type GoogleBigtableAdminV2TypeMap struct { + // KeyType: The type of a map key. Only `Bytes`, `String`, and `Int64` are + // allowed as key types. + KeyType *Type `json:"keyType,omitempty"` + // ValueType: The type of the values in a map. + ValueType *Type `json:"valueType,omitempty"` + // ForceSendFields is a list of field names (e.g. "KeyType") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "KeyType") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleBigtableAdminV2TypeMap) MarshalJSON() ([]byte, error) { + type NoMethod GoogleBigtableAdminV2TypeMap + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleBigtableAdminV2TypeString: String Values of type `String` are stored +// in `Value.string_value`. +type GoogleBigtableAdminV2TypeString struct { + // Encoding: The encoding to use when converting to/from lower level types. + Encoding *GoogleBigtableAdminV2TypeStringEncoding `json:"encoding,omitempty"` + // ForceSendFields is a list of field names (e.g. "Encoding") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Encoding") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleBigtableAdminV2TypeString) MarshalJSON() ([]byte, error) { + type NoMethod GoogleBigtableAdminV2TypeString + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleBigtableAdminV2TypeStringEncoding: Rules used to convert to/from lower +// level types. +type GoogleBigtableAdminV2TypeStringEncoding struct { + // Utf8Bytes: Use `Utf8Bytes` encoding. + Utf8Bytes *GoogleBigtableAdminV2TypeStringEncodingUtf8Bytes `json:"utf8Bytes,omitempty"` + // Utf8Raw: Deprecated: if set, converts to an empty `utf8_bytes`. + Utf8Raw *GoogleBigtableAdminV2TypeStringEncodingUtf8Raw `json:"utf8Raw,omitempty"` + // ForceSendFields is a list of field names (e.g. "Utf8Bytes") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Utf8Bytes") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleBigtableAdminV2TypeStringEncoding) MarshalJSON() ([]byte, error) { + type NoMethod GoogleBigtableAdminV2TypeStringEncoding + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleBigtableAdminV2TypeStringEncodingUtf8Bytes: UTF-8 encoding * +// Order-preserving? Yes (code point order) * Self-delimiting? No * +// Compatibility? - BigQuery Federation `TEXT` encoding - HBase `Bytes.toBytes` +// - Java `String#getBytes(StandardCharsets.UTF_8)` +type GoogleBigtableAdminV2TypeStringEncodingUtf8Bytes struct { +} + +// GoogleBigtableAdminV2TypeStringEncodingUtf8Raw: Deprecated: prefer the +// equivalent `Utf8Bytes`. +type GoogleBigtableAdminV2TypeStringEncodingUtf8Raw struct { +} + +// GoogleBigtableAdminV2TypeStruct: A structured data value, consisting of +// fields which map to dynamically typed values. Values of type `Struct` are +// stored in `Value.array_value` where entries are in the same order and number +// as `field_types`. +type GoogleBigtableAdminV2TypeStruct struct { + // Fields: The names and types of the fields in this struct. + Fields []*GoogleBigtableAdminV2TypeStructField `json:"fields,omitempty"` + // ForceSendFields is a list of field names (e.g. "Fields") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Fields") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleBigtableAdminV2TypeStruct) MarshalJSON() ([]byte, error) { + type NoMethod GoogleBigtableAdminV2TypeStruct + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleBigtableAdminV2TypeStructField: A struct field and its type. +type GoogleBigtableAdminV2TypeStructField struct { + // FieldName: The field name (optional). Fields without a `field_name` are + // considered anonymous and cannot be referenced by name. + FieldName string `json:"fieldName,omitempty"` + // Type: The type of values in this field. + Type *Type `json:"type,omitempty"` + // ForceSendFields is a list of field names (e.g. "FieldName") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "FieldName") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleBigtableAdminV2TypeStructField) MarshalJSON() ([]byte, error) { + type NoMethod GoogleBigtableAdminV2TypeStructField + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleBigtableAdminV2TypeTimestamp: Timestamp Values of type `Timestamp` are +// stored in `Value.timestamp_value`. +type GoogleBigtableAdminV2TypeTimestamp struct { } // HotTablet: A tablet is a defined by a start and end key and is explained in @@ -2022,9 +2266,9 @@ type HotTablet struct { NullFields []string `json:"-"` } -func (s *HotTablet) MarshalJSON() ([]byte, error) { +func (s HotTablet) MarshalJSON() ([]byte, error) { type NoMethod HotTablet - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *HotTablet) UnmarshalJSON(data []byte) error { @@ -2105,9 +2349,9 @@ type Instance struct { NullFields []string `json:"-"` } -func (s *Instance) MarshalJSON() ([]byte, error) { +func (s Instance) MarshalJSON() ([]byte, error) { type NoMethod Instance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Intersection: A GcRule which deletes cells matching all of the given rules. @@ -2127,9 +2371,9 @@ type Intersection struct { NullFields []string `json:"-"` } -func (s *Intersection) MarshalJSON() ([]byte, error) { +func (s Intersection) MarshalJSON() ([]byte, error) { type NoMethod Intersection - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListAppProfilesResponse: Response message for @@ -2162,9 +2406,9 @@ type ListAppProfilesResponse struct { NullFields []string `json:"-"` } -func (s *ListAppProfilesResponse) MarshalJSON() ([]byte, error) { +func (s ListAppProfilesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListAppProfilesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListAuthorizedViewsResponse: Response message for @@ -2192,9 +2436,9 @@ type ListAuthorizedViewsResponse struct { NullFields []string `json:"-"` } -func (s *ListAuthorizedViewsResponse) MarshalJSON() ([]byte, error) { +func (s ListAuthorizedViewsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListAuthorizedViewsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListBackupsResponse: The response for ListBackups. @@ -2220,9 +2464,9 @@ type ListBackupsResponse struct { NullFields []string `json:"-"` } -func (s *ListBackupsResponse) MarshalJSON() ([]byte, error) { +func (s ListBackupsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListBackupsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListClustersResponse: Response message for @@ -2253,9 +2497,9 @@ type ListClustersResponse struct { NullFields []string `json:"-"` } -func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { +func (s ListClustersResponse) MarshalJSON() ([]byte, error) { type NoMethod ListClustersResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListHotTabletsResponse: Response message for @@ -2287,9 +2531,9 @@ type ListHotTabletsResponse struct { NullFields []string `json:"-"` } -func (s *ListHotTabletsResponse) MarshalJSON() ([]byte, error) { +func (s ListHotTabletsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListHotTabletsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListInstancesResponse: Response message for @@ -2322,9 +2566,9 @@ type ListInstancesResponse struct { NullFields []string `json:"-"` } -func (s *ListInstancesResponse) MarshalJSON() ([]byte, error) { +func (s ListInstancesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListInstancesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLocationsResponse: The response message for Locations.ListLocations. @@ -2350,9 +2594,9 @@ type ListLocationsResponse struct { NullFields []string `json:"-"` } -func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { +func (s ListLocationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLocationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -2378,9 +2622,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListTablesResponse: Response message for @@ -2408,9 +2652,9 @@ type ListTablesResponse struct { NullFields []string `json:"-"` } -func (s *ListTablesResponse) MarshalJSON() ([]byte, error) { +func (s ListTablesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListTablesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Location: A resource that represents a Google Cloud location. @@ -2443,9 +2687,9 @@ type Location struct { NullFields []string `json:"-"` } -func (s *Location) MarshalJSON() ([]byte, error) { +func (s Location) MarshalJSON() ([]byte, error) { type NoMethod Location - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Modification: A create, update, or delete of a particular column family. @@ -2478,9 +2722,9 @@ type Modification struct { NullFields []string `json:"-"` } -func (s *Modification) MarshalJSON() ([]byte, error) { +func (s Modification) MarshalJSON() ([]byte, error) { type NoMethod Modification - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ModifyColumnFamiliesRequest: Request message for @@ -2507,9 +2751,9 @@ type ModifyColumnFamiliesRequest struct { NullFields []string `json:"-"` } -func (s *ModifyColumnFamiliesRequest) MarshalJSON() ([]byte, error) { +func (s ModifyColumnFamiliesRequest) MarshalJSON() ([]byte, error) { type NoMethod ModifyColumnFamiliesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MultiClusterRoutingUseAny: Read/write requests are routed to the nearest @@ -2522,6 +2766,9 @@ type MultiClusterRoutingUseAny struct { // will be tried in order of distance. If left empty, all clusters are // eligible. ClusterIds []string `json:"clusterIds,omitempty"` + // RowAffinity: Row affinity sticky routing based on the row key of the + // request. Requests that span multiple rows are routed non-deterministically. + RowAffinity *RowAffinity `json:"rowAffinity,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterIds") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -2535,9 +2782,9 @@ type MultiClusterRoutingUseAny struct { NullFields []string `json:"-"` } -func (s *MultiClusterRoutingUseAny) MarshalJSON() ([]byte, error) { +func (s MultiClusterRoutingUseAny) MarshalJSON() ([]byte, error) { type NoMethod MultiClusterRoutingUseAny - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -2582,9 +2829,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationProgress: Encapsulates progress related information for a Cloud @@ -2611,9 +2858,9 @@ type OperationProgress struct { NullFields []string `json:"-"` } -func (s *OperationProgress) MarshalJSON() ([]byte, error) { +func (s OperationProgress) MarshalJSON() ([]byte, error) { type NoMethod OperationProgress - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OptimizeRestoredTableMetadata: Metadata type for the long-running operation @@ -2638,9 +2885,9 @@ type OptimizeRestoredTableMetadata struct { NullFields []string `json:"-"` } -func (s *OptimizeRestoredTableMetadata) MarshalJSON() ([]byte, error) { +func (s OptimizeRestoredTableMetadata) MarshalJSON() ([]byte, error) { type NoMethod OptimizeRestoredTableMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartialUpdateClusterMetadata: The metadata for the Operation returned by @@ -2666,9 +2913,9 @@ type PartialUpdateClusterMetadata struct { NullFields []string `json:"-"` } -func (s *PartialUpdateClusterMetadata) MarshalJSON() ([]byte, error) { +func (s PartialUpdateClusterMetadata) MarshalJSON() ([]byte, error) { type NoMethod PartialUpdateClusterMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartialUpdateClusterRequest: Request message for @@ -2692,9 +2939,9 @@ type PartialUpdateClusterRequest struct { NullFields []string `json:"-"` } -func (s *PartialUpdateClusterRequest) MarshalJSON() ([]byte, error) { +func (s PartialUpdateClusterRequest) MarshalJSON() ([]byte, error) { type NoMethod PartialUpdateClusterRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartialUpdateInstanceRequest: Request message for @@ -2719,9 +2966,9 @@ type PartialUpdateInstanceRequest struct { NullFields []string `json:"-"` } -func (s *PartialUpdateInstanceRequest) MarshalJSON() ([]byte, error) { +func (s PartialUpdateInstanceRequest) MarshalJSON() ([]byte, error) { type NoMethod PartialUpdateInstanceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -2811,9 +3058,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestoreInfo: Information about a table restore. @@ -2840,9 +3087,9 @@ type RestoreInfo struct { NullFields []string `json:"-"` } -func (s *RestoreInfo) MarshalJSON() ([]byte, error) { +func (s RestoreInfo) MarshalJSON() ([]byte, error) { type NoMethod RestoreInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestoreTableMetadata: Metadata type for the long-running operation returned @@ -2881,9 +3128,9 @@ type RestoreTableMetadata struct { NullFields []string `json:"-"` } -func (s *RestoreTableMetadata) MarshalJSON() ([]byte, error) { +func (s RestoreTableMetadata) MarshalJSON() ([]byte, error) { type NoMethod RestoreTableMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestoreTableRequest: The request for RestoreTable. @@ -2908,9 +3155,21 @@ type RestoreTableRequest struct { NullFields []string `json:"-"` } -func (s *RestoreTableRequest) MarshalJSON() ([]byte, error) { +func (s RestoreTableRequest) MarshalJSON() ([]byte, error) { type NoMethod RestoreTableRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RowAffinity: If enabled, Bigtable will route the request based on the row +// key of the request, rather than randomly. Instead, each row key will be +// assigned to a cluster, and will stick to that cluster. If clusters are added +// or removed, then this may affect which row keys stick to which clusters. To +// avoid this, users can use a cluster group to specify which clusters are to +// be used. In this case, new clusters that are not a part of the cluster group +// will not be routed to, and routing will be unaffected by the new cluster. +// Moreover, clusters specified in the cluster group cannot be deleted unless +// removed from the cluster group. +type RowAffinity struct { } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -2937,9 +3196,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SingleClusterRouting: Unconditionally routes all read/write requests to a @@ -2965,9 +3224,9 @@ type SingleClusterRouting struct { NullFields []string `json:"-"` } -func (s *SingleClusterRouting) MarshalJSON() ([]byte, error) { +func (s SingleClusterRouting) MarshalJSON() ([]byte, error) { type NoMethod SingleClusterRouting - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Split: An initial split point for a newly created table. @@ -2987,9 +3246,9 @@ type Split struct { NullFields []string `json:"-"` } -func (s *Split) MarshalJSON() ([]byte, error) { +func (s Split) MarshalJSON() ([]byte, error) { type NoMethod Split - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StandardIsolation: Standard options for isolating this app profile's traffic @@ -3017,9 +3276,9 @@ type StandardIsolation struct { NullFields []string `json:"-"` } -func (s *StandardIsolation) MarshalJSON() ([]byte, error) { +func (s StandardIsolation) MarshalJSON() ([]byte, error) { type NoMethod StandardIsolation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StandardReadRemoteWrites: Checks that all writes before the consistency @@ -3056,9 +3315,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Table: A collection of user data indexed by row, column, and timestamp. Each @@ -3126,9 +3385,9 @@ type Table struct { NullFields []string `json:"-"` } -func (s *Table) MarshalJSON() ([]byte, error) { +func (s Table) MarshalJSON() ([]byte, error) { type NoMethod Table - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableProgress: Progress info for copying a table's data to the new cluster. @@ -3161,9 +3420,9 @@ type TableProgress struct { NullFields []string `json:"-"` } -func (s *TableProgress) MarshalJSON() ([]byte, error) { +func (s TableProgress) MarshalJSON() ([]byte, error) { type NoMethod TableProgress - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TableStats: Approximate statistics related to a table. These statistics are @@ -3205,9 +3464,9 @@ type TableStats struct { NullFields []string `json:"-"` } -func (s *TableStats) MarshalJSON() ([]byte, error) { +func (s TableStats) MarshalJSON() ([]byte, error) { type NoMethod TableStats - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *TableStats) UnmarshalJSON(data []byte) error { @@ -3246,9 +3505,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -3273,43 +3532,55 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Type: `Type` represents the type of data that is written to, read from, or // stored in Bigtable. It is heavily based on the GoogleSQL standard to help // maintain familiarity and consistency across products and features. For // compatibility with Bigtable's existing untyped APIs, each `Type` includes an -// `Encoding` which describes how to convert to/from the underlying data. This -// might involve composing a series of steps into an "encoding chain," for -// example to convert from INT64 -> STRING -> raw bytes. In most cases, a -// "link" in the encoding chain will be based an on existing GoogleSQL -// conversion function like `CAST`. Each link in the encoding chain also -// defines the following properties: * Natural sort: Does the encoded value -// sort consistently with the original typed value? Note that Bigtable will -// always sort data based on the raw encoded value, *not* the decoded type. - -// Example: BYTES values sort in the same order as their raw encodings. - -// Counterexample: Encoding INT64 to a fixed-width STRING does *not* preserve -// sort order when dealing with negative numbers. INT64(1) > INT64(-1), but -// STRING("-00001") > STRING("00001). - The overall encoding chain has this -// property if *every* link does. * Self-delimiting: If we concatenate two -// encoded values, can we always tell where the first one ends and the second -// one begins? - Example: If we encode INT64s to fixed-width STRINGs, the first -// value will always contain exactly N digits, possibly preceded by a sign. - -// Counterexample: If we concatenate two UTF-8 encoded STRINGs, we have no way -// to tell where the first one ends. - The overall encoding chain has this -// property if *any* link does. * Compatibility: Which other systems have -// matching encoding schemes? For example, does this encoding have a GoogleSQL -// equivalent? HBase? Java? +// `Encoding` which describes how to convert to/from the underlying data. Each +// encoding also defines the following properties: * Order-preserving: Does the +// encoded value sort consistently with the original typed value? Note that +// Bigtable will always sort data based on the raw encoded value, *not* the +// decoded type. - Example: BYTES values sort in the same order as their raw +// encodings. - Counterexample: Encoding INT64 as a fixed-width decimal string +// does *not* preserve sort order when dealing with negative numbers. `INT64(1) +// > INT64(-1)`, but `STRING("-00001") > STRING("00001)`. * Self-delimiting: If +// we concatenate two encoded values, can we always tell where the first one +// ends and the second one begins? - Example: If we encode INT64s to +// fixed-width STRINGs, the first value will always contain exactly N digits, +// possibly preceded by a sign. - Counterexample: If we concatenate two UTF-8 +// encoded STRINGs, we have no way to tell where the first one ends. * +// Compatibility: Which other systems have matching encoding schemes? For +// example, does this encoding have a GoogleSQL equivalent? HBase? Java? type Type struct { // AggregateType: Aggregate AggregateType *GoogleBigtableAdminV2TypeAggregate `json:"aggregateType,omitempty"` + // ArrayType: Array + ArrayType *GoogleBigtableAdminV2TypeArray `json:"arrayType,omitempty"` + // BoolType: Bool + BoolType *GoogleBigtableAdminV2TypeBool `json:"boolType,omitempty"` // BytesType: Bytes BytesType *GoogleBigtableAdminV2TypeBytes `json:"bytesType,omitempty"` + // DateType: Date + DateType *GoogleBigtableAdminV2TypeDate `json:"dateType,omitempty"` + // Float32Type: Float32 + Float32Type *GoogleBigtableAdminV2TypeFloat32 `json:"float32Type,omitempty"` + // Float64Type: Float64 + Float64Type *GoogleBigtableAdminV2TypeFloat64 `json:"float64Type,omitempty"` // Int64Type: Int64 Int64Type *GoogleBigtableAdminV2TypeInt64 `json:"int64Type,omitempty"` + // MapType: Map + MapType *GoogleBigtableAdminV2TypeMap `json:"mapType,omitempty"` + // StringType: String + StringType *GoogleBigtableAdminV2TypeString `json:"stringType,omitempty"` + // StructType: Struct + StructType *GoogleBigtableAdminV2TypeStruct `json:"structType,omitempty"` + // TimestampType: Timestamp + TimestampType *GoogleBigtableAdminV2TypeTimestamp `json:"timestampType,omitempty"` // ForceSendFields is a list of field names (e.g. "AggregateType") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -3323,9 +3594,9 @@ type Type struct { NullFields []string `json:"-"` } -func (s *Type) MarshalJSON() ([]byte, error) { +func (s Type) MarshalJSON() ([]byte, error) { type NoMethod Type - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UndeleteTableMetadata: Metadata type for the operation returned by @@ -3350,9 +3621,9 @@ type UndeleteTableMetadata struct { NullFields []string `json:"-"` } -func (s *UndeleteTableMetadata) MarshalJSON() ([]byte, error) { +func (s UndeleteTableMetadata) MarshalJSON() ([]byte, error) { type NoMethod UndeleteTableMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UndeleteTableRequest: Request message for @@ -3377,9 +3648,9 @@ type Union struct { NullFields []string `json:"-"` } -func (s *Union) MarshalJSON() ([]byte, error) { +func (s Union) MarshalJSON() ([]byte, error) { type NoMethod Union - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateAppProfileMetadata: The metadata for the Operation returned by @@ -3411,9 +3682,9 @@ type UpdateAuthorizedViewMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateAuthorizedViewMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateAuthorizedViewMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateAuthorizedViewMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateAuthorizedViewRequest: The request for UpdateAuthorizedView. @@ -3447,9 +3718,9 @@ type UpdateAuthorizedViewRequest struct { NullFields []string `json:"-"` } -func (s *UpdateAuthorizedViewRequest) MarshalJSON() ([]byte, error) { +func (s UpdateAuthorizedViewRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateAuthorizedViewRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateClusterMetadata: The metadata for the Operation returned by @@ -3476,9 +3747,9 @@ type UpdateClusterMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateClusterMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateClusterMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateClusterMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateInstanceMetadata: The metadata for the Operation returned by @@ -3505,9 +3776,9 @@ type UpdateInstanceMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateInstanceMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateInstanceMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateInstanceMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateTableMetadata: Metadata type for the operation returned by @@ -3532,9 +3803,9 @@ type UpdateTableMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateTableMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateTableMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateTableMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationsGetCall struct { @@ -6064,7 +6335,7 @@ type ProjectsInstancesClustersBackupsCopyCall struct { // cluster located in the destination instance and project. // // - parent: The name of the destination cluster that will contain the backup -// copy. The cluster must already exists. Values are of the form: +// copy. The cluster must already exist. Values are of the form: // `projects/{project}/instances/{instance}/clusters/{cluster}`. func (r *ProjectsInstancesClustersBackupsService) Copy(parent string, copybackuprequest *CopyBackupRequest) *ProjectsInstancesClustersBackupsCopyCall { c := &ProjectsInstancesClustersBackupsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} diff --git a/terraform/providers/google/vendor/google.golang.org/api/certificatemanager/v1/certificatemanager-api.json b/terraform/providers/google/vendor/google.golang.org/api/certificatemanager/v1/certificatemanager-api.json new file mode 100644 index 00000000000..ead5a53b0dc --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/api/certificatemanager/v1/certificatemanager-api.json @@ -0,0 +1,2308 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account." + } + } + } + }, + "basePath": "", + "baseUrl": "https://certificatemanager.googleapis.com/", + "batchPath": "batch", + "canonicalName": "Certificate Manager", + "description": "", + "discoveryVersion": "v1", + "documentationLink": "https://cloud.google.com/certificate-manager", + "fullyEncodeReservedExpansion": true, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "id": "certificatemanager:v1", + "kind": "discovery#restDescription", + "mtlsRootUrl": "https://certificatemanager.mtls.googleapis.com/", + "name": "certificatemanager", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, + "alt": { + "default": "json", + "description": "Data format for response.", + "enum": [ + "json", + "media", + "proto" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "type": "string" + }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "projects": { + "resources": { + "locations": { + "methods": { + "get": { + "description": "Gets information about a location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name for the location.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Location" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists information about the supported locations for this service.", + "flatPath": "v1/projects/{projectsId}/locations", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "A filter to narrow down results to a preferred subset. The filtering language accepts strings like `\"displayName=tokyo\"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).", + "location": "query", + "type": "string" + }, + "name": { + "description": "The resource that owns the locations collection, if applicable.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The maximum number of results to return. If not set, the service selects a default.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}/locations", + "response": { + "$ref": "ListLocationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "certificateIssuanceConfigs": { + "methods": { + "create": { + "description": "Creates a new CertificateIssuanceConfig in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateIssuanceConfigs", + "httpMethod": "POST", + "id": "certificatemanager.projects.locations.certificateIssuanceConfigs.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "certificateIssuanceConfigId": { + "description": "Required. A user-provided name of the certificate config.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource of the certificate issuance config. Must be in the format `projects/*/locations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/certificateIssuanceConfigs", + "request": { + "$ref": "CertificateIssuanceConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a single CertificateIssuanceConfig.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateIssuanceConfigs/{certificateIssuanceConfigsId}", + "httpMethod": "DELETE", + "id": "certificatemanager.projects.locations.certificateIssuanceConfigs.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the certificate issuance config to delete. Must be in the format `projects/*/locations/*/certificateIssuanceConfigs/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificateIssuanceConfigs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets details of a single CertificateIssuanceConfig.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateIssuanceConfigs/{certificateIssuanceConfigsId}", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.certificateIssuanceConfigs.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the certificate issuance config to describe. Must be in the format `projects/*/locations/*/certificateIssuanceConfigs/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificateIssuanceConfigs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "CertificateIssuanceConfig" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists CertificateIssuanceConfigs in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateIssuanceConfigs", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.certificateIssuanceConfigs.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Optional. Filter expression to restrict the Certificates Configs returned.", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "Optional. A list of Certificate Config field names used to specify the order of the returned results. The default sorting order is ascending. To specify descending order for a field, add a suffix `\" desc\"`.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of certificate configs to return per call.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The value returned by the last `ListCertificateIssuanceConfigsResponse`. Indicates that this is a continuation of a prior `ListCertificateIssuanceConfigs` call, and that the system should return the next page of data.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The project and location from which the certificate should be listed, specified in the format `projects/*/locations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/certificateIssuanceConfigs", + "response": { + "$ref": "ListCertificateIssuanceConfigsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a CertificateIssuanceConfig.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateIssuanceConfigs/{certificateIssuanceConfigsId}", + "httpMethod": "PATCH", + "id": "certificatemanager.projects.locations.certificateIssuanceConfigs.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Identifier. A user-defined name of the certificate issuance config. CertificateIssuanceConfig names must be unique globally and match pattern `projects/*/locations/*/certificateIssuanceConfigs/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificateIssuanceConfigs/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "CertificateIssuanceConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "certificateMaps": { + "methods": { + "create": { + "description": "Creates a new CertificateMap in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateMaps", + "httpMethod": "POST", + "id": "certificatemanager.projects.locations.certificateMaps.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "certificateMapId": { + "description": "Required. A user-provided name of the certificate map.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource of the certificate map. Must be in the format `projects/*/locations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/certificateMaps", + "request": { + "$ref": "CertificateMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a single CertificateMap. A Certificate Map can't be deleted if it contains Certificate Map Entries. Remove all the entries from the map before calling this method.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateMaps/{certificateMapsId}", + "httpMethod": "DELETE", + "id": "certificatemanager.projects.locations.certificateMaps.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the certificate map to delete. Must be in the format `projects/*/locations/*/certificateMaps/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificateMaps/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets details of a single CertificateMap.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateMaps/{certificateMapsId}", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.certificateMaps.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the certificate map to describe. Must be in the format `projects/*/locations/*/certificateMaps/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificateMaps/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "CertificateMap" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists CertificateMaps in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateMaps", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.certificateMaps.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Optional. Filter expression to restrict the Certificates Maps returned.", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "Optional. A list of Certificate Map field names used to specify the order of the returned results. The default sorting order is ascending. To specify descending order for a field, add a suffix `\" desc\"`.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of certificate maps to return per call.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The value returned by the last `ListCertificateMapsResponse`. Indicates that this is a continuation of a prior `ListCertificateMaps` call, and that the system should return the next page of data.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The project and location from which the certificate maps should be listed, specified in the format `projects/*/locations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/certificateMaps", + "response": { + "$ref": "ListCertificateMapsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a CertificateMap.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateMaps/{certificateMapsId}", + "httpMethod": "PATCH", + "id": "certificatemanager.projects.locations.certificateMaps.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Identifier. A user-defined name of the Certificate Map. Certificate Map names must be unique globally and match pattern `projects/*/locations/*/certificateMaps/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificateMaps/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "CertificateMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "certificateMapEntries": { + "methods": { + "create": { + "description": "Creates a new CertificateMapEntry in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateMaps/{certificateMapsId}/certificateMapEntries", + "httpMethod": "POST", + "id": "certificatemanager.projects.locations.certificateMaps.certificateMapEntries.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "certificateMapEntryId": { + "description": "Required. A user-provided name of the certificate map entry.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource of the certificate map entry. Must be in the format `projects/*/locations/*/certificateMaps/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificateMaps/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/certificateMapEntries", + "request": { + "$ref": "CertificateMapEntry" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a single CertificateMapEntry.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateMaps/{certificateMapsId}/certificateMapEntries/{certificateMapEntriesId}", + "httpMethod": "DELETE", + "id": "certificatemanager.projects.locations.certificateMaps.certificateMapEntries.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the certificate map entry to delete. Must be in the format `projects/*/locations/*/certificateMaps/*/certificateMapEntries/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificateMaps/[^/]+/certificateMapEntries/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets details of a single CertificateMapEntry.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateMaps/{certificateMapsId}/certificateMapEntries/{certificateMapEntriesId}", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.certificateMaps.certificateMapEntries.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the certificate map entry to describe. Must be in the format `projects/*/locations/*/certificateMaps/*/certificateMapEntries/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificateMaps/[^/]+/certificateMapEntries/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "CertificateMapEntry" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists CertificateMapEntries in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateMaps/{certificateMapsId}/certificateMapEntries", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.certificateMaps.certificateMapEntries.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Optional. Filter expression to restrict the returned Certificate Map Entries.", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "Optional. A list of Certificate Map Entry field names used to specify the order of the returned results. The default sorting order is ascending. To specify descending order for a field, add a suffix `\" desc\"`.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of certificate map entries to return. The service may return fewer than this value. If unspecified, at most 50 certificate map entries will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The value returned by the last `ListCertificateMapEntriesResponse`. Indicates that this is a continuation of a prior `ListCertificateMapEntries` call, and that the system should return the next page of data.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The project, location and certificate map from which the certificate map entries should be listed, specified in the format `projects/*/locations/*/certificateMaps/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificateMaps/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/certificateMapEntries", + "response": { + "$ref": "ListCertificateMapEntriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a CertificateMapEntry.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificateMaps/{certificateMapsId}/certificateMapEntries/{certificateMapEntriesId}", + "httpMethod": "PATCH", + "id": "certificatemanager.projects.locations.certificateMaps.certificateMapEntries.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Identifier. A user-defined name of the Certificate Map Entry. Certificate Map Entry names must be unique globally and match pattern `projects/*/locations/*/certificateMaps/*/certificateMapEntries/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificateMaps/[^/]+/certificateMapEntries/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "CertificateMapEntry" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, + "certificates": { + "methods": { + "create": { + "description": "Creates a new Certificate in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificates", + "httpMethod": "POST", + "id": "certificatemanager.projects.locations.certificates.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "certificateId": { + "description": "Required. A user-provided name of the certificate.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource of the certificate. Must be in the format `projects/*/locations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/certificates", + "request": { + "$ref": "Certificate" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a single Certificate.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificates/{certificatesId}", + "httpMethod": "DELETE", + "id": "certificatemanager.projects.locations.certificates.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the certificate to delete. Must be in the format `projects/*/locations/*/certificates/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets details of a single Certificate.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificates/{certificatesId}", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.certificates.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the certificate to describe. Must be in the format `projects/*/locations/*/certificates/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Certificate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists Certificates in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificates", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.certificates.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Optional. Filter expression to restrict the Certificates returned.", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "Optional. A list of Certificate field names used to specify the order of the returned results. The default sorting order is ascending. To specify descending order for a field, add a suffix `\" desc\"`.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of certificates to return per call.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The value returned by the last `ListCertificatesResponse`. Indicates that this is a continuation of a prior `ListCertificates` call, and that the system should return the next page of data.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The project and location from which the certificate should be listed, specified in the format `projects/*/locations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/certificates", + "response": { + "$ref": "ListCertificatesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a Certificate.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/certificates/{certificatesId}", + "httpMethod": "PATCH", + "id": "certificatemanager.projects.locations.certificates.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Identifier. A user-defined name of the certificate. Certificate names must be unique globally and match pattern `projects/*/locations/*/certificates/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/certificates/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "Certificate" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "dnsAuthorizations": { + "methods": { + "create": { + "description": "Creates a new DnsAuthorization in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/dnsAuthorizations", + "httpMethod": "POST", + "id": "certificatemanager.projects.locations.dnsAuthorizations.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "dnsAuthorizationId": { + "description": "Required. A user-provided name of the dns authorization.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource of the dns authorization. Must be in the format `projects/*/locations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/dnsAuthorizations", + "request": { + "$ref": "DnsAuthorization" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a single DnsAuthorization.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/dnsAuthorizations/{dnsAuthorizationsId}", + "httpMethod": "DELETE", + "id": "certificatemanager.projects.locations.dnsAuthorizations.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the dns authorization to delete. Must be in the format `projects/*/locations/*/dnsAuthorizations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/dnsAuthorizations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets details of a single DnsAuthorization.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/dnsAuthorizations/{dnsAuthorizationsId}", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.dnsAuthorizations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the dns authorization to describe. Must be in the format `projects/*/locations/*/dnsAuthorizations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/dnsAuthorizations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "DnsAuthorization" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists DnsAuthorizations in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/dnsAuthorizations", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.dnsAuthorizations.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Optional. Filter expression to restrict the Dns Authorizations returned.", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "Optional. A list of Dns Authorization field names used to specify the order of the returned results. The default sorting order is ascending. To specify descending order for a field, add a suffix `\" desc\"`.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of dns authorizations to return per call.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The value returned by the last `ListDnsAuthorizationsResponse`. Indicates that this is a continuation of a prior `ListDnsAuthorizations` call, and that the system should return the next page of data.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The project and location from which the dns authorizations should be listed, specified in the format `projects/*/locations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/dnsAuthorizations", + "response": { + "$ref": "ListDnsAuthorizationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a DnsAuthorization.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/dnsAuthorizations/{dnsAuthorizationsId}", + "httpMethod": "PATCH", + "id": "certificatemanager.projects.locations.dnsAuthorizations.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Identifier. A user-defined name of the dns authorization. DnsAuthorization names must be unique globally and match pattern `projects/*/locations/*/dnsAuthorizations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/dnsAuthorizations/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "DnsAuthorization" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "certificatemanager.projects.locations.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "DELETE", + "id": "certificatemanager.projects.locations.operations.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be deleted.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.operations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name of the operation's parent resource.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}/operations", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "trustConfigs": { + "methods": { + "create": { + "description": "Creates a new TrustConfig in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/trustConfigs", + "httpMethod": "POST", + "id": "certificatemanager.projects.locations.trustConfigs.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The parent resource of the TrustConfig. Must be in the format `projects/*/locations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "trustConfigId": { + "description": "Required. A user-provided name of the TrustConfig. Must match the regexp `[a-z0-9-]{1,63}`.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/trustConfigs", + "request": { + "$ref": "TrustConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a single TrustConfig.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/trustConfigs/{trustConfigsId}", + "httpMethod": "DELETE", + "id": "certificatemanager.projects.locations.trustConfigs.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "etag": { + "description": "Optional. The current etag of the TrustConfig. If an etag is provided and does not match the current etag of the resource, deletion will be blocked and an ABORTED error will be returned.", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. A name of the TrustConfig to delete. Must be in the format `projects/*/locations/*/trustConfigs/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/trustConfigs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets details of a single TrustConfig.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/trustConfigs/{trustConfigsId}", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.trustConfigs.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the TrustConfig to describe. Must be in the format `projects/*/locations/*/trustConfigs/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/trustConfigs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "TrustConfig" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists TrustConfigs in a given project and location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/trustConfigs", + "httpMethod": "GET", + "id": "certificatemanager.projects.locations.trustConfigs.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Optional. Filter expression to restrict the TrustConfigs returned.", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "Optional. A list of TrustConfig field names used to specify the order of the returned results. The default sorting order is ascending. To specify descending order for a field, add a suffix `\" desc\"`.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of TrustConfigs to return per call.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The value returned by the last `ListTrustConfigsResponse`. Indicates that this is a continuation of a prior `ListTrustConfigs` call, and that the system should return the next page of data.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The project and location from which the TrustConfigs should be listed, specified in the format `projects/*/locations/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/trustConfigs", + "response": { + "$ref": "ListTrustConfigsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a TrustConfig.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/trustConfigs/{trustConfigsId}", + "httpMethod": "PATCH", + "id": "certificatemanager.projects.locations.trustConfigs.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Identifier. A user-defined name of the trust config. TrustConfig names must be unique globally and match pattern `projects/*/locations/*/trustConfigs/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/trustConfigs/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "TrustConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + } + } + }, + "revision": "20240729", + "rootUrl": "https://certificatemanager.googleapis.com/", + "schemas": { + "AllowlistedCertificate": { + "description": "Defines an allowlisted certificate.", + "id": "AllowlistedCertificate", + "properties": { + "pemCertificate": { + "description": "Required. PEM certificate that is allowlisted. The certificate can be up to 5k bytes, and must be a parseable X.509 certificate.", + "type": "string" + } + }, + "type": "object" + }, + "AuthorizationAttemptInfo": { + "description": "State of the latest attempt to authorize a domain for certificate issuance.", + "id": "AuthorizationAttemptInfo", + "properties": { + "details": { + "description": "Output only. Human readable explanation for reaching the state. Provided to help address the configuration issues. Not guaranteed to be stable. For programmatic access use FailureReason enum.", + "readOnly": true, + "type": "string" + }, + "domain": { + "description": "Output only. Domain name of the authorization attempt.", + "readOnly": true, + "type": "string" + }, + "failureReason": { + "description": "Output only. Reason for failure of the authorization attempt for the domain.", + "enum": [ + "FAILURE_REASON_UNSPECIFIED", + "CONFIG", + "CAA", + "RATE_LIMITED" + ], + "enumDescriptions": [ + "FailureReason is unspecified.", + "There was a problem with the user's DNS or load balancer configuration for this domain.", + "Certificate issuance forbidden by an explicit CAA record for the domain or a failure to check CAA records for the domain.", + "Reached a CA or internal rate-limit for the domain, e.g. for certificates per top-level private domain." + ], + "readOnly": true, + "type": "string" + }, + "state": { + "description": "Output only. State of the domain for managed certificate issuance.", + "enum": [ + "STATE_UNSPECIFIED", + "AUTHORIZING", + "AUTHORIZED", + "FAILED" + ], + "enumDescriptions": [ + "State is unspecified.", + "Certificate provisioning for this domain is under way. Google Cloud will attempt to authorize the domain.", + "A managed certificate can be provisioned, no issues for this domain.", + "Attempt to authorize the domain failed. This prevents the Managed Certificate from being issued. See `failure_reason` and `details` fields for more information." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "CancelOperationRequest": { + "description": "The request message for Operations.CancelOperation.", + "id": "CancelOperationRequest", + "properties": {}, + "type": "object" + }, + "Certificate": { + "description": "Defines TLS certificate.", + "id": "Certificate", + "properties": { + "createTime": { + "description": "Output only. The creation timestamp of a Certificate.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Optional. One or more paragraphs of text description of a certificate.", + "type": "string" + }, + "expireTime": { + "description": "Output only. The expiry timestamp of a Certificate.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Set of labels associated with a Certificate.", + "type": "object" + }, + "managed": { + "$ref": "ManagedCertificate", + "description": "If set, contains configuration and state of a managed certificate." + }, + "name": { + "description": "Identifier. A user-defined name of the certificate. Certificate names must be unique globally and match pattern `projects/*/locations/*/certificates/*`.", + "type": "string" + }, + "pemCertificate": { + "description": "Output only. The PEM-encoded certificate chain.", + "readOnly": true, + "type": "string" + }, + "sanDnsnames": { + "description": "Output only. The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6). Managed certificates that haven't been provisioned yet have this field populated with a value of the managed.domains field.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + }, + "scope": { + "description": "Optional. Immutable. The scope of the certificate.", + "enum": [ + "DEFAULT", + "EDGE_CACHE", + "ALL_REGIONS" + ], + "enumDescriptions": [ + "Certificates with default scope are served from core Google data centers. If unsure, choose this option.", + "Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. See https://cloud.google.com/vpc/docs/edge-locations.", + "Certificates with ALL_REGIONS scope are served from all Google Cloud regions. See https://cloud.google.com/compute/docs/regions-zones." + ], + "type": "string" + }, + "selfManaged": { + "$ref": "SelfManagedCertificate", + "description": "If set, defines data of a self-managed certificate." + }, + "updateTime": { + "description": "Output only. The last update timestamp of a Certificate.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "CertificateAuthorityConfig": { + "description": "The CA that issues the workload certificate. It includes CA address, type, authentication to CA service, etc.", + "id": "CertificateAuthorityConfig", + "properties": { + "certificateAuthorityServiceConfig": { + "$ref": "CertificateAuthorityServiceConfig", + "description": "Defines a CertificateAuthorityServiceConfig." + } + }, + "type": "object" + }, + "CertificateAuthorityServiceConfig": { + "description": "Contains information required to contact CA service.", + "id": "CertificateAuthorityServiceConfig", + "properties": { + "caPool": { + "description": "Required. A CA pool resource used to issue a certificate. The CA pool string has a relative resource path following the form \"projects/{project}/locations/{location}/caPools/{ca_pool}\".", + "type": "string" + } + }, + "type": "object" + }, + "CertificateIssuanceConfig": { + "description": "CertificateIssuanceConfig specifies how to issue and manage a certificate.", + "id": "CertificateIssuanceConfig", + "properties": { + "certificateAuthorityConfig": { + "$ref": "CertificateAuthorityConfig", + "description": "Required. The CA that issues the workload certificate. It includes the CA address, type, authentication to CA service, etc." + }, + "createTime": { + "description": "Output only. The creation timestamp of a CertificateIssuanceConfig.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Optional. One or more paragraphs of text description of a CertificateIssuanceConfig.", + "type": "string" + }, + "keyAlgorithm": { + "description": "Required. The key algorithm to use when generating the private key.", + "enum": [ + "KEY_ALGORITHM_UNSPECIFIED", + "RSA_2048", + "ECDSA_P256" + ], + "enumDescriptions": [ + "Unspecified key algorithm.", + "Specifies RSA with a 2048-bit modulus.", + "Specifies ECDSA with curve P256." + ], + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Set of labels associated with a CertificateIssuanceConfig.", + "type": "object" + }, + "lifetime": { + "description": "Required. Workload certificate lifetime requested.", + "format": "google-duration", + "type": "string" + }, + "name": { + "description": "Identifier. A user-defined name of the certificate issuance config. CertificateIssuanceConfig names must be unique globally and match pattern `projects/*/locations/*/certificateIssuanceConfigs/*`.", + "type": "string" + }, + "rotationWindowPercentage": { + "description": "Required. Specifies the percentage of elapsed time of the certificate lifetime to wait before renewing the certificate. Must be a number between 1-99, inclusive.", + "format": "int32", + "type": "integer" + }, + "updateTime": { + "description": "Output only. The last update timestamp of a CertificateIssuanceConfig.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "CertificateMap": { + "description": "Defines a collection of certificate configurations.", + "id": "CertificateMap", + "properties": { + "createTime": { + "description": "Output only. The creation timestamp of a Certificate Map.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Optional. One or more paragraphs of text description of a certificate map.", + "type": "string" + }, + "gclbTargets": { + "description": "Output only. A list of GCLB targets that use this Certificate Map. A Target Proxy is only present on this list if it's attached to a Forwarding Rule.", + "items": { + "$ref": "GclbTarget" + }, + "readOnly": true, + "type": "array" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Set of labels associated with a Certificate Map.", + "type": "object" + }, + "name": { + "description": "Identifier. A user-defined name of the Certificate Map. Certificate Map names must be unique globally and match pattern `projects/*/locations/*/certificateMaps/*`.", + "type": "string" + }, + "updateTime": { + "description": "Output only. The update timestamp of a Certificate Map.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "CertificateMapEntry": { + "description": "Defines a certificate map entry.", + "id": "CertificateMapEntry", + "properties": { + "certificates": { + "description": "Optional. A set of Certificates defines for the given `hostname`. There can be defined up to four certificates in each Certificate Map Entry. Each certificate must match pattern `projects/*/locations/*/certificates/*`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "createTime": { + "description": "Output only. The creation timestamp of a Certificate Map Entry.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Optional. One or more paragraphs of text description of a certificate map entry.", + "type": "string" + }, + "hostname": { + "description": "A Hostname (FQDN, e.g. `example.com`) or a wildcard hostname expression (`*.example.com`) for a set of hostnames with common suffix. Used as Server Name Indication (SNI) for selecting a proper certificate.", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Set of labels associated with a Certificate Map Entry.", + "type": "object" + }, + "matcher": { + "description": "A predefined matcher for particular cases, other than SNI selection.", + "enum": [ + "MATCHER_UNSPECIFIED", + "PRIMARY" + ], + "enumDescriptions": [ + "A matcher has't been recognized.", + "A primary certificate that is served when SNI wasn't specified in the request or SNI couldn't be found in the map." + ], + "type": "string" + }, + "name": { + "description": "Identifier. A user-defined name of the Certificate Map Entry. Certificate Map Entry names must be unique globally and match pattern `projects/*/locations/*/certificateMaps/*/certificateMapEntries/*`.", + "type": "string" + }, + "state": { + "description": "Output only. A serving state of this Certificate Map Entry.", + "enum": [ + "SERVING_STATE_UNSPECIFIED", + "ACTIVE", + "PENDING" + ], + "enumDescriptions": [ + "The status is undefined.", + "The configuration is serving.", + "Update is in progress. Some frontends may serve this configuration." + ], + "readOnly": true, + "type": "string" + }, + "updateTime": { + "description": "Output only. The update timestamp of a Certificate Map Entry.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "DnsAuthorization": { + "description": "A DnsAuthorization resource describes a way to perform domain authorization for certificate issuance.", + "id": "DnsAuthorization", + "properties": { + "createTime": { + "description": "Output only. The creation timestamp of a DnsAuthorization.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Optional. One or more paragraphs of text description of a DnsAuthorization.", + "type": "string" + }, + "dnsResourceRecord": { + "$ref": "DnsResourceRecord", + "description": "Output only. DNS Resource Record that needs to be added to DNS configuration.", + "readOnly": true + }, + "domain": { + "description": "Required. Immutable. A domain that is being authorized. A DnsAuthorization resource covers a single domain and its wildcard, e.g. authorization for `example.com` can be used to issue certificates for `example.com` and `*.example.com`.", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Set of labels associated with a DnsAuthorization.", + "type": "object" + }, + "name": { + "description": "Identifier. A user-defined name of the dns authorization. DnsAuthorization names must be unique globally and match pattern `projects/*/locations/*/dnsAuthorizations/*`.", + "type": "string" + }, + "type": { + "description": "Optional. Immutable. Type of DnsAuthorization. If unset during resource creation the following default will be used: - in location `global`: FIXED_RECORD, - in other locations: PER_PROJECT_RECORD.", + "enum": [ + "TYPE_UNSPECIFIED", + "FIXED_RECORD", + "PER_PROJECT_RECORD" + ], + "enumDescriptions": [ + "Type is unspecified.", + "FIXED_RECORD DNS authorization uses DNS-01 validation method.", + "PER_PROJECT_RECORD DNS authorization allows for independent management of Google-managed certificates with DNS authorization across multiple projects." + ], + "type": "string" + }, + "updateTime": { + "description": "Output only. The last update timestamp of a DnsAuthorization.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "DnsResourceRecord": { + "description": "The structure describing the DNS Resource Record that needs to be added to DNS configuration for the authorization to be usable by certificate.", + "id": "DnsResourceRecord", + "properties": { + "data": { + "description": "Output only. Data of the DNS Resource Record.", + "readOnly": true, + "type": "string" + }, + "name": { + "description": "Output only. Fully qualified name of the DNS Resource Record. e.g. `_acme-challenge.example.com`", + "readOnly": true, + "type": "string" + }, + "type": { + "description": "Output only. Type of the DNS Resource Record. Currently always set to \"CNAME\".", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", + "id": "Empty", + "properties": {}, + "type": "object" + }, + "GclbTarget": { + "description": "Describes a Target Proxy that uses this Certificate Map.", + "id": "GclbTarget", + "properties": { + "ipConfigs": { + "description": "Output only. IP configurations for this Target Proxy where the Certificate Map is serving.", + "items": { + "$ref": "IpConfig" + }, + "readOnly": true, + "type": "array" + }, + "targetHttpsProxy": { + "description": "Output only. This field returns the resource name in the following format: `//compute.googleapis.com/projects/*/global/targetHttpsProxies/*`.", + "readOnly": true, + "type": "string" + }, + "targetSslProxy": { + "description": "Output only. This field returns the resource name in the following format: `//compute.googleapis.com/projects/*/global/targetSslProxies/*`.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "IntermediateCA": { + "description": "Defines an intermediate CA.", + "id": "IntermediateCA", + "properties": { + "pemCertificate": { + "description": "PEM intermediate certificate used for building up paths for validation. Each certificate provided in PEM format may occupy up to 5kB.", + "type": "string" + } + }, + "type": "object" + }, + "IpConfig": { + "description": "Defines IP configuration where this Certificate Map is serving.", + "id": "IpConfig", + "properties": { + "ipAddress": { + "description": "Output only. An external IP address.", + "readOnly": true, + "type": "string" + }, + "ports": { + "description": "Output only. Ports.", + "items": { + "format": "uint32", + "type": "integer" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, + "ListCertificateIssuanceConfigsResponse": { + "description": "Response for the `ListCertificateIssuanceConfigs` method.", + "id": "ListCertificateIssuanceConfigsResponse", + "properties": { + "certificateIssuanceConfigs": { + "description": "A list of certificate configs for the parent resource.", + "items": { + "$ref": "CertificateIssuanceConfig" + }, + "type": "array" + }, + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then `next_page_token` is included. To get the next set of results, call this method again using the value of `next_page_token` as `page_token`.", + "type": "string" + }, + "unreachable": { + "description": "Locations that could not be reached.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListCertificateMapEntriesResponse": { + "description": "Response for the `ListCertificateMapEntries` method.", + "id": "ListCertificateMapEntriesResponse", + "properties": { + "certificateMapEntries": { + "description": "A list of certificate map entries for the parent resource.", + "items": { + "$ref": "CertificateMapEntry" + }, + "type": "array" + }, + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then `next_page_token` is included. To get the next set of results, call this method again using the value of `next_page_token` as `page_token`.", + "type": "string" + }, + "unreachable": { + "description": "Locations that could not be reached.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListCertificateMapsResponse": { + "description": "Response for the `ListCertificateMaps` method.", + "id": "ListCertificateMapsResponse", + "properties": { + "certificateMaps": { + "description": "A list of certificate maps for the parent resource.", + "items": { + "$ref": "CertificateMap" + }, + "type": "array" + }, + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then `next_page_token` is included. To get the next set of results, call this method again using the value of `next_page_token` as `page_token`.", + "type": "string" + }, + "unreachable": { + "description": "Locations that could not be reached.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListCertificatesResponse": { + "description": "Response for the `ListCertificates` method.", + "id": "ListCertificatesResponse", + "properties": { + "certificates": { + "description": "A list of certificates for the parent resource.", + "items": { + "$ref": "Certificate" + }, + "type": "array" + }, + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then `next_page_token` is included. To get the next set of results, call this method again using the value of `next_page_token` as `page_token`.", + "type": "string" + }, + "unreachable": { + "description": "A list of locations that could not be reached.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListDnsAuthorizationsResponse": { + "description": "Response for the `ListDnsAuthorizations` method.", + "id": "ListDnsAuthorizationsResponse", + "properties": { + "dnsAuthorizations": { + "description": "A list of dns authorizations for the parent resource.", + "items": { + "$ref": "DnsAuthorization" + }, + "type": "array" + }, + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then `next_page_token` is included. To get the next set of results, call this method again using the value of `next_page_token` as `page_token`.", + "type": "string" + }, + "unreachable": { + "description": "Locations that could not be reached.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListLocationsResponse": { + "description": "The response message for Locations.ListLocations.", + "id": "ListLocationsResponse", + "properties": { + "locations": { + "description": "A list of locations that matches the specified filter in the request.", + "items": { + "$ref": "Location" + }, + "type": "array" + }, + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + } + }, + "type": "object" + }, + "ListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", + "id": "ListOperationsResponse", + "properties": { + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + }, + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "items": { + "$ref": "Operation" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListTrustConfigsResponse": { + "description": "Response for the `ListTrustConfigs` method.", + "id": "ListTrustConfigsResponse", + "properties": { + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then `next_page_token` is included. To get the next set of results, call this method again using the value of `next_page_token` as `page_token`.", + "type": "string" + }, + "trustConfigs": { + "description": "A list of TrustConfigs for the parent resource.", + "items": { + "$ref": "TrustConfig" + }, + "type": "array" + }, + "unreachable": { + "description": "Locations that could not be reached.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "Location": { + "description": "A resource that represents a Google Cloud location.", + "id": "Location", + "properties": { + "displayName": { + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}", + "type": "object" + }, + "locationId": { + "description": "The canonical id for this location. For example: `\"us-east1\"`.", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata. For example the available capacity at the given location.", + "type": "object" + }, + "name": { + "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`", + "type": "string" + } + }, + "type": "object" + }, + "ManagedCertificate": { + "description": "Configuration and state of a Managed Certificate. Certificate Manager provisions and renews Managed Certificates automatically, for as long as it's authorized to do so.", + "id": "ManagedCertificate", + "properties": { + "authorizationAttemptInfo": { + "description": "Output only. Detailed state of the latest authorization attempt for each domain specified for managed certificate resource.", + "items": { + "$ref": "AuthorizationAttemptInfo" + }, + "readOnly": true, + "type": "array" + }, + "dnsAuthorizations": { + "description": "Optional. Immutable. Authorizations that will be used for performing domain authorization.", + "items": { + "type": "string" + }, + "type": "array" + }, + "domains": { + "description": "Optional. Immutable. The domains for which a managed SSL certificate will be generated. Wildcard domains are only supported with DNS challenge resolution.", + "items": { + "type": "string" + }, + "type": "array" + }, + "issuanceConfig": { + "description": "Optional. Immutable. The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format `projects/*/locations/*/certificateIssuanceConfigs/*`. If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa.", + "type": "string" + }, + "provisioningIssue": { + "$ref": "ProvisioningIssue", + "description": "Output only. Information about issues with provisioning a Managed Certificate.", + "readOnly": true + }, + "state": { + "description": "Output only. State of the managed certificate resource.", + "enum": [ + "STATE_UNSPECIFIED", + "PROVISIONING", + "FAILED", + "ACTIVE" + ], + "enumDescriptions": [ + "State is unspecified.", + "Certificate Manager attempts to provision or renew the certificate. If the process takes longer than expected, consult the `provisioning_issue` field.", + "Multiple certificate provisioning attempts failed and Certificate Manager gave up. To try again, delete and create a new managed Certificate resource. For details see the `provisioning_issue` field.", + "The certificate management is working, and a certificate has been provisioned." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "Operation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "id": "Operation", + "properties": { + "done": { + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", + "type": "boolean" + }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", + "type": "string" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", + "type": "object" + } + }, + "type": "object" + }, + "OperationMetadata": { + "description": "Represents the metadata of the long-running operation. Output only.", + "id": "OperationMetadata", + "properties": { + "apiVersion": { + "description": "API version used to start the operation.", + "type": "string" + }, + "createTime": { + "description": "The time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "endTime": { + "description": "The time the operation finished running.", + "format": "google-datetime", + "type": "string" + }, + "requestedCancellation": { + "description": "Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + "type": "boolean" + }, + "statusMessage": { + "description": "Human-readable status of the operation, if any.", + "type": "string" + }, + "target": { + "description": "Server-defined resource path for the target of the operation.", + "type": "string" + }, + "verb": { + "description": "Name of the verb executed by the operation.", + "type": "string" + } + }, + "type": "object" + }, + "ProvisioningIssue": { + "description": "Information about issues with provisioning a Managed Certificate.", + "id": "ProvisioningIssue", + "properties": { + "details": { + "description": "Output only. Human readable explanation about the issue. Provided to help address the configuration issues. Not guaranteed to be stable. For programmatic access use Reason enum.", + "readOnly": true, + "type": "string" + }, + "reason": { + "description": "Output only. Reason for provisioning failures.", + "enum": [ + "REASON_UNSPECIFIED", + "AUTHORIZATION_ISSUE", + "RATE_LIMITED" + ], + "enumDescriptions": [ + "Reason is unspecified.", + "Certificate provisioning failed due to an issue with one or more of the domains on the certificate. For details of which domains failed, consult the `authorization_attempt_info` field.", + "Exceeded Certificate Authority quotas or internal rate limits of the system. Provisioning may take longer to complete." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "SelfManagedCertificate": { + "description": "Certificate data for a SelfManaged Certificate. SelfManaged Certificates are uploaded by the user. Updating such certificates before they expire remains the user's responsibility.", + "id": "SelfManagedCertificate", + "properties": { + "pemCertificate": { + "description": "Optional. Input only. The PEM-encoded certificate chain. Leaf certificate comes first, followed by intermediate ones if any.", + "type": "string" + }, + "pemPrivateKey": { + "description": "Optional. Input only. The PEM-encoded private key of the leaf certificate.", + "type": "string" + } + }, + "type": "object" + }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "Status", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "type": "object" + }, + "TrustAnchor": { + "description": "Defines a trust anchor.", + "id": "TrustAnchor", + "properties": { + "pemCertificate": { + "description": "PEM root certificate of the PKI used for validation. Each certificate provided in PEM format may occupy up to 5kB.", + "type": "string" + } + }, + "type": "object" + }, + "TrustConfig": { + "description": "Defines a trust config.", + "id": "TrustConfig", + "properties": { + "allowlistedCertificates": { + "description": "Optional. A certificate matching an allowlisted certificate is always considered valid as long as the certificate is parseable, proof of private key possession is established, and constraints on the certificate's SAN field are met.", + "items": { + "$ref": "AllowlistedCertificate" + }, + "type": "array" + }, + "createTime": { + "description": "Output only. The creation timestamp of a TrustConfig.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Optional. One or more paragraphs of text description of a TrustConfig.", + "type": "string" + }, + "etag": { + "description": "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Set of labels associated with a TrustConfig.", + "type": "object" + }, + "name": { + "description": "Identifier. A user-defined name of the trust config. TrustConfig names must be unique globally and match pattern `projects/*/locations/*/trustConfigs/*`.", + "type": "string" + }, + "trustStores": { + "description": "Optional. Set of trust stores to perform validation against. This field is supported when TrustConfig is configured with Load Balancers, currently not supported for SPIFFE certificate validation. Only one TrustStore specified is currently allowed.", + "items": { + "$ref": "TrustStore" + }, + "type": "array" + }, + "updateTime": { + "description": "Output only. The last update timestamp of a TrustConfig.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "TrustStore": { + "description": "Defines a trust store.", + "id": "TrustStore", + "properties": { + "intermediateCas": { + "description": "Optional. Set of intermediate CA certificates used for the path building phase of chain validation. The field is currently not supported if TrustConfig is used for the workload certificate feature.", + "items": { + "$ref": "IntermediateCA" + }, + "type": "array" + }, + "trustAnchors": { + "description": "Optional. List of Trust Anchors to be used while performing validation against a given TrustStore.", + "items": { + "$ref": "TrustAnchor" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "servicePath": "", + "title": "Certificate Manager API", + "version": "v1", + "version_module": true +} \ No newline at end of file diff --git a/terraform/providers/google/vendor/google.golang.org/api/certificatemanager/v1/certificatemanager-gen.go b/terraform/providers/google/vendor/google.golang.org/api/certificatemanager/v1/certificatemanager-gen.go new file mode 100644 index 00000000000..991f20e2622 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/api/certificatemanager/v1/certificatemanager-gen.go @@ -0,0 +1,5674 @@ +// Copyright 2024 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated file. DO NOT EDIT. + +// Package certificatemanager provides access to the Certificate Manager API. +// +// For product documentation, see: https://cloud.google.com/certificate-manager +// +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// +// # Creating a client +// +// Usage example: +// +// import "google.golang.org/api/certificatemanager/v1" +// ... +// ctx := context.Background() +// certificatemanagerService, err := certificatemanager.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// # Other authentication options +// +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: +// +// certificatemanagerService, err := certificatemanager.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// certificatemanagerService, err := certificatemanager.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See [google.golang.org/api/option.ClientOption] for details on options. +package certificatemanager // import "google.golang.org/api/certificatemanager/v1" + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + googleapi "google.golang.org/api/googleapi" + internal "google.golang.org/api/internal" + gensupport "google.golang.org/api/internal/gensupport" + option "google.golang.org/api/option" + internaloption "google.golang.org/api/option/internaloption" + htransport "google.golang.org/api/transport/http" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version + +const apiId = "certificatemanager:v1" +const apiName = "certificatemanager" +const apiVersion = "v1" +const basePath = "https://certificatemanager.googleapis.com/" +const basePathTemplate = "https://certificatemanager.UNIVERSE_DOMAIN/" +const mtlsBasePath = "https://certificatemanager.mtls.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // See, edit, configure, and delete your Google Cloud data and see the email + // address for your Google Account. + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +) + +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := internaloption.WithDefaultScopes( + "https://www.googleapis.com/auth/cloud-platform", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + opts = append(opts, internaloption.EnableNewAuthLibrary()) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.Locations = NewProjectsLocationsService(s) + return rs +} + +type ProjectsService struct { + s *Service + + Locations *ProjectsLocationsService +} + +func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { + rs := &ProjectsLocationsService{s: s} + rs.CertificateIssuanceConfigs = NewProjectsLocationsCertificateIssuanceConfigsService(s) + rs.CertificateMaps = NewProjectsLocationsCertificateMapsService(s) + rs.Certificates = NewProjectsLocationsCertificatesService(s) + rs.DnsAuthorizations = NewProjectsLocationsDnsAuthorizationsService(s) + rs.Operations = NewProjectsLocationsOperationsService(s) + rs.TrustConfigs = NewProjectsLocationsTrustConfigsService(s) + return rs +} + +type ProjectsLocationsService struct { + s *Service + + CertificateIssuanceConfigs *ProjectsLocationsCertificateIssuanceConfigsService + + CertificateMaps *ProjectsLocationsCertificateMapsService + + Certificates *ProjectsLocationsCertificatesService + + DnsAuthorizations *ProjectsLocationsDnsAuthorizationsService + + Operations *ProjectsLocationsOperationsService + + TrustConfigs *ProjectsLocationsTrustConfigsService +} + +func NewProjectsLocationsCertificateIssuanceConfigsService(s *Service) *ProjectsLocationsCertificateIssuanceConfigsService { + rs := &ProjectsLocationsCertificateIssuanceConfigsService{s: s} + return rs +} + +type ProjectsLocationsCertificateIssuanceConfigsService struct { + s *Service +} + +func NewProjectsLocationsCertificateMapsService(s *Service) *ProjectsLocationsCertificateMapsService { + rs := &ProjectsLocationsCertificateMapsService{s: s} + rs.CertificateMapEntries = NewProjectsLocationsCertificateMapsCertificateMapEntriesService(s) + return rs +} + +type ProjectsLocationsCertificateMapsService struct { + s *Service + + CertificateMapEntries *ProjectsLocationsCertificateMapsCertificateMapEntriesService +} + +func NewProjectsLocationsCertificateMapsCertificateMapEntriesService(s *Service) *ProjectsLocationsCertificateMapsCertificateMapEntriesService { + rs := &ProjectsLocationsCertificateMapsCertificateMapEntriesService{s: s} + return rs +} + +type ProjectsLocationsCertificateMapsCertificateMapEntriesService struct { + s *Service +} + +func NewProjectsLocationsCertificatesService(s *Service) *ProjectsLocationsCertificatesService { + rs := &ProjectsLocationsCertificatesService{s: s} + return rs +} + +type ProjectsLocationsCertificatesService struct { + s *Service +} + +func NewProjectsLocationsDnsAuthorizationsService(s *Service) *ProjectsLocationsDnsAuthorizationsService { + rs := &ProjectsLocationsDnsAuthorizationsService{s: s} + return rs +} + +type ProjectsLocationsDnsAuthorizationsService struct { + s *Service +} + +func NewProjectsLocationsOperationsService(s *Service) *ProjectsLocationsOperationsService { + rs := &ProjectsLocationsOperationsService{s: s} + return rs +} + +type ProjectsLocationsOperationsService struct { + s *Service +} + +func NewProjectsLocationsTrustConfigsService(s *Service) *ProjectsLocationsTrustConfigsService { + rs := &ProjectsLocationsTrustConfigsService{s: s} + return rs +} + +type ProjectsLocationsTrustConfigsService struct { + s *Service +} + +// AllowlistedCertificate: Defines an allowlisted certificate. +type AllowlistedCertificate struct { + // PemCertificate: Required. PEM certificate that is allowlisted. The + // certificate can be up to 5k bytes, and must be a parseable X.509 + // certificate. + PemCertificate string `json:"pemCertificate,omitempty"` + // ForceSendFields is a list of field names (e.g. "PemCertificate") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "PemCertificate") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AllowlistedCertificate) MarshalJSON() ([]byte, error) { + type NoMethod AllowlistedCertificate + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// AuthorizationAttemptInfo: State of the latest attempt to authorize a domain +// for certificate issuance. +type AuthorizationAttemptInfo struct { + // Details: Output only. Human readable explanation for reaching the state. + // Provided to help address the configuration issues. Not guaranteed to be + // stable. For programmatic access use FailureReason enum. + Details string `json:"details,omitempty"` + // Domain: Output only. Domain name of the authorization attempt. + Domain string `json:"domain,omitempty"` + // FailureReason: Output only. Reason for failure of the authorization attempt + // for the domain. + // + // Possible values: + // "FAILURE_REASON_UNSPECIFIED" - FailureReason is unspecified. + // "CONFIG" - There was a problem with the user's DNS or load balancer + // configuration for this domain. + // "CAA" - Certificate issuance forbidden by an explicit CAA record for the + // domain or a failure to check CAA records for the domain. + // "RATE_LIMITED" - Reached a CA or internal rate-limit for the domain, e.g. + // for certificates per top-level private domain. + FailureReason string `json:"failureReason,omitempty"` + // State: Output only. State of the domain for managed certificate issuance. + // + // Possible values: + // "STATE_UNSPECIFIED" - State is unspecified. + // "AUTHORIZING" - Certificate provisioning for this domain is under way. + // Google Cloud will attempt to authorize the domain. + // "AUTHORIZED" - A managed certificate can be provisioned, no issues for + // this domain. + // "FAILED" - Attempt to authorize the domain failed. This prevents the + // Managed Certificate from being issued. See `failure_reason` and `details` + // fields for more information. + State string `json:"state,omitempty"` + // ForceSendFields is a list of field names (e.g. "Details") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Details") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AuthorizationAttemptInfo) MarshalJSON() ([]byte, error) { + type NoMethod AuthorizationAttemptInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CancelOperationRequest: The request message for Operations.CancelOperation. +type CancelOperationRequest struct { +} + +// Certificate: Defines TLS certificate. +type Certificate struct { + // CreateTime: Output only. The creation timestamp of a Certificate. + CreateTime string `json:"createTime,omitempty"` + // Description: Optional. One or more paragraphs of text description of a + // certificate. + Description string `json:"description,omitempty"` + // ExpireTime: Output only. The expiry timestamp of a Certificate. + ExpireTime string `json:"expireTime,omitempty"` + // Labels: Optional. Set of labels associated with a Certificate. + Labels map[string]string `json:"labels,omitempty"` + // Managed: If set, contains configuration and state of a managed certificate. + Managed *ManagedCertificate `json:"managed,omitempty"` + // Name: Identifier. A user-defined name of the certificate. Certificate names + // must be unique globally and match pattern + // `projects/*/locations/*/certificates/*`. + Name string `json:"name,omitempty"` + // PemCertificate: Output only. The PEM-encoded certificate chain. + PemCertificate string `json:"pemCertificate,omitempty"` + // SanDnsnames: Output only. The list of Subject Alternative Names of dnsName + // type defined in the certificate (see RFC 5280 4.2.1.6). Managed certificates + // that haven't been provisioned yet have this field populated with a value of + // the managed.domains field. + SanDnsnames []string `json:"sanDnsnames,omitempty"` + // Scope: Optional. Immutable. The scope of the certificate. + // + // Possible values: + // "DEFAULT" - Certificates with default scope are served from core Google + // data centers. If unsure, choose this option. + // "EDGE_CACHE" - Certificates with scope EDGE_CACHE are special-purposed + // certificates, served from Edge Points of Presence. See + // https://cloud.google.com/vpc/docs/edge-locations. + // "ALL_REGIONS" - Certificates with ALL_REGIONS scope are served from all + // Google Cloud regions. See + // https://cloud.google.com/compute/docs/regions-zones. + Scope string `json:"scope,omitempty"` + // SelfManaged: If set, defines data of a self-managed certificate. + SelfManaged *SelfManagedCertificate `json:"selfManaged,omitempty"` + // UpdateTime: Output only. The last update timestamp of a Certificate. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Certificate) MarshalJSON() ([]byte, error) { + type NoMethod Certificate + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CertificateAuthorityConfig: The CA that issues the workload certificate. It +// includes CA address, type, authentication to CA service, etc. +type CertificateAuthorityConfig struct { + // CertificateAuthorityServiceConfig: Defines a + // CertificateAuthorityServiceConfig. + CertificateAuthorityServiceConfig *CertificateAuthorityServiceConfig `json:"certificateAuthorityServiceConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "CertificateAuthorityServiceConfig") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted from + // API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. + // "CertificateAuthorityServiceConfig") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted from API + // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for + // more details. + NullFields []string `json:"-"` +} + +func (s CertificateAuthorityConfig) MarshalJSON() ([]byte, error) { + type NoMethod CertificateAuthorityConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CertificateAuthorityServiceConfig: Contains information required to contact +// CA service. +type CertificateAuthorityServiceConfig struct { + // CaPool: Required. A CA pool resource used to issue a certificate. The CA + // pool string has a relative resource path following the form + // "projects/{project}/locations/{location}/caPools/{ca_pool}". + CaPool string `json:"caPool,omitempty"` + // ForceSendFields is a list of field names (e.g. "CaPool") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CaPool") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s CertificateAuthorityServiceConfig) MarshalJSON() ([]byte, error) { + type NoMethod CertificateAuthorityServiceConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CertificateIssuanceConfig: CertificateIssuanceConfig specifies how to issue +// and manage a certificate. +type CertificateIssuanceConfig struct { + // CertificateAuthorityConfig: Required. The CA that issues the workload + // certificate. It includes the CA address, type, authentication to CA service, + // etc. + CertificateAuthorityConfig *CertificateAuthorityConfig `json:"certificateAuthorityConfig,omitempty"` + // CreateTime: Output only. The creation timestamp of a + // CertificateIssuanceConfig. + CreateTime string `json:"createTime,omitempty"` + // Description: Optional. One or more paragraphs of text description of a + // CertificateIssuanceConfig. + Description string `json:"description,omitempty"` + // KeyAlgorithm: Required. The key algorithm to use when generating the private + // key. + // + // Possible values: + // "KEY_ALGORITHM_UNSPECIFIED" - Unspecified key algorithm. + // "RSA_2048" - Specifies RSA with a 2048-bit modulus. + // "ECDSA_P256" - Specifies ECDSA with curve P256. + KeyAlgorithm string `json:"keyAlgorithm,omitempty"` + // Labels: Optional. Set of labels associated with a CertificateIssuanceConfig. + Labels map[string]string `json:"labels,omitempty"` + // Lifetime: Required. Workload certificate lifetime requested. + Lifetime string `json:"lifetime,omitempty"` + // Name: Identifier. A user-defined name of the certificate issuance config. + // CertificateIssuanceConfig names must be unique globally and match pattern + // `projects/*/locations/*/certificateIssuanceConfigs/*`. + Name string `json:"name,omitempty"` + // RotationWindowPercentage: Required. Specifies the percentage of elapsed time + // of the certificate lifetime to wait before renewing the certificate. Must be + // a number between 1-99, inclusive. + RotationWindowPercentage int64 `json:"rotationWindowPercentage,omitempty"` + // UpdateTime: Output only. The last update timestamp of a + // CertificateIssuanceConfig. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CertificateAuthorityConfig") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CertificateAuthorityConfig") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s CertificateIssuanceConfig) MarshalJSON() ([]byte, error) { + type NoMethod CertificateIssuanceConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CertificateMap: Defines a collection of certificate configurations. +type CertificateMap struct { + // CreateTime: Output only. The creation timestamp of a Certificate Map. + CreateTime string `json:"createTime,omitempty"` + // Description: Optional. One or more paragraphs of text description of a + // certificate map. + Description string `json:"description,omitempty"` + // GclbTargets: Output only. A list of GCLB targets that use this Certificate + // Map. A Target Proxy is only present on this list if it's attached to a + // Forwarding Rule. + GclbTargets []*GclbTarget `json:"gclbTargets,omitempty"` + // Labels: Optional. Set of labels associated with a Certificate Map. + Labels map[string]string `json:"labels,omitempty"` + // Name: Identifier. A user-defined name of the Certificate Map. Certificate + // Map names must be unique globally and match pattern + // `projects/*/locations/*/certificateMaps/*`. + Name string `json:"name,omitempty"` + // UpdateTime: Output only. The update timestamp of a Certificate Map. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s CertificateMap) MarshalJSON() ([]byte, error) { + type NoMethod CertificateMap + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CertificateMapEntry: Defines a certificate map entry. +type CertificateMapEntry struct { + // Certificates: Optional. A set of Certificates defines for the given + // `hostname`. There can be defined up to four certificates in each Certificate + // Map Entry. Each certificate must match pattern + // `projects/*/locations/*/certificates/*`. + Certificates []string `json:"certificates,omitempty"` + // CreateTime: Output only. The creation timestamp of a Certificate Map Entry. + CreateTime string `json:"createTime,omitempty"` + // Description: Optional. One or more paragraphs of text description of a + // certificate map entry. + Description string `json:"description,omitempty"` + // Hostname: A Hostname (FQDN, e.g. `example.com`) or a wildcard hostname + // expression (`*.example.com`) for a set of hostnames with common suffix. Used + // as Server Name Indication (SNI) for selecting a proper certificate. + Hostname string `json:"hostname,omitempty"` + // Labels: Optional. Set of labels associated with a Certificate Map Entry. + Labels map[string]string `json:"labels,omitempty"` + // Matcher: A predefined matcher for particular cases, other than SNI + // selection. + // + // Possible values: + // "MATCHER_UNSPECIFIED" - A matcher has't been recognized. + // "PRIMARY" - A primary certificate that is served when SNI wasn't specified + // in the request or SNI couldn't be found in the map. + Matcher string `json:"matcher,omitempty"` + // Name: Identifier. A user-defined name of the Certificate Map Entry. + // Certificate Map Entry names must be unique globally and match pattern + // `projects/*/locations/*/certificateMaps/*/certificateMapEntries/*`. + Name string `json:"name,omitempty"` + // State: Output only. A serving state of this Certificate Map Entry. + // + // Possible values: + // "SERVING_STATE_UNSPECIFIED" - The status is undefined. + // "ACTIVE" - The configuration is serving. + // "PENDING" - Update is in progress. Some frontends may serve this + // configuration. + State string `json:"state,omitempty"` + // UpdateTime: Output only. The update timestamp of a Certificate Map Entry. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Certificates") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Certificates") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s CertificateMapEntry) MarshalJSON() ([]byte, error) { + type NoMethod CertificateMapEntry + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// DnsAuthorization: A DnsAuthorization resource describes a way to perform +// domain authorization for certificate issuance. +type DnsAuthorization struct { + // CreateTime: Output only. The creation timestamp of a DnsAuthorization. + CreateTime string `json:"createTime,omitempty"` + // Description: Optional. One or more paragraphs of text description of a + // DnsAuthorization. + Description string `json:"description,omitempty"` + // DnsResourceRecord: Output only. DNS Resource Record that needs to be added + // to DNS configuration. + DnsResourceRecord *DnsResourceRecord `json:"dnsResourceRecord,omitempty"` + // Domain: Required. Immutable. A domain that is being authorized. A + // DnsAuthorization resource covers a single domain and its wildcard, e.g. + // authorization for `example.com` can be used to issue certificates for + // `example.com` and `*.example.com`. + Domain string `json:"domain,omitempty"` + // Labels: Optional. Set of labels associated with a DnsAuthorization. + Labels map[string]string `json:"labels,omitempty"` + // Name: Identifier. A user-defined name of the dns authorization. + // DnsAuthorization names must be unique globally and match pattern + // `projects/*/locations/*/dnsAuthorizations/*`. + Name string `json:"name,omitempty"` + // Type: Optional. Immutable. Type of DnsAuthorization. If unset during + // resource creation the following default will be used: - in location + // `global`: FIXED_RECORD, - in other locations: PER_PROJECT_RECORD. + // + // Possible values: + // "TYPE_UNSPECIFIED" - Type is unspecified. + // "FIXED_RECORD" - FIXED_RECORD DNS authorization uses DNS-01 validation + // method. + // "PER_PROJECT_RECORD" - PER_PROJECT_RECORD DNS authorization allows for + // independent management of Google-managed certificates with DNS authorization + // across multiple projects. + Type string `json:"type,omitempty"` + // UpdateTime: Output only. The last update timestamp of a DnsAuthorization. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s DnsAuthorization) MarshalJSON() ([]byte, error) { + type NoMethod DnsAuthorization + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// DnsResourceRecord: The structure describing the DNS Resource Record that +// needs to be added to DNS configuration for the authorization to be usable by +// certificate. +type DnsResourceRecord struct { + // Data: Output only. Data of the DNS Resource Record. + Data string `json:"data,omitempty"` + // Name: Output only. Fully qualified name of the DNS Resource Record. e.g. + // `_acme-challenge.example.com` + Name string `json:"name,omitempty"` + // Type: Output only. Type of the DNS Resource Record. Currently always set to + // "CNAME". + Type string `json:"type,omitempty"` + // ForceSendFields is a list of field names (e.g. "Data") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Data") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s DnsResourceRecord) MarshalJSON() ([]byte, error) { + type NoMethod DnsResourceRecord + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Empty: A generic empty message that you can re-use to avoid defining +// duplicated empty messages in your APIs. A typical example is to use it as +// the request or the response type of an API method. For instance: service Foo +// { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } +type Empty struct { + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` +} + +// GclbTarget: Describes a Target Proxy that uses this Certificate Map. +type GclbTarget struct { + // IpConfigs: Output only. IP configurations for this Target Proxy where the + // Certificate Map is serving. + IpConfigs []*IpConfig `json:"ipConfigs,omitempty"` + // TargetHttpsProxy: Output only. This field returns the resource name in the + // following format: + // `//compute.googleapis.com/projects/*/global/targetHttpsProxies/*`. + TargetHttpsProxy string `json:"targetHttpsProxy,omitempty"` + // TargetSslProxy: Output only. This field returns the resource name in the + // following format: + // `//compute.googleapis.com/projects/*/global/targetSslProxies/*`. + TargetSslProxy string `json:"targetSslProxy,omitempty"` + // ForceSendFields is a list of field names (e.g. "IpConfigs") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "IpConfigs") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GclbTarget) MarshalJSON() ([]byte, error) { + type NoMethod GclbTarget + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// IntermediateCA: Defines an intermediate CA. +type IntermediateCA struct { + // PemCertificate: PEM intermediate certificate used for building up paths for + // validation. Each certificate provided in PEM format may occupy up to 5kB. + PemCertificate string `json:"pemCertificate,omitempty"` + // ForceSendFields is a list of field names (e.g. "PemCertificate") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "PemCertificate") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s IntermediateCA) MarshalJSON() ([]byte, error) { + type NoMethod IntermediateCA + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// IpConfig: Defines IP configuration where this Certificate Map is serving. +type IpConfig struct { + // IpAddress: Output only. An external IP address. + IpAddress string `json:"ipAddress,omitempty"` + // Ports: Output only. Ports. + Ports []int64 `json:"ports,omitempty"` + // ForceSendFields is a list of field names (e.g. "IpAddress") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "IpAddress") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s IpConfig) MarshalJSON() ([]byte, error) { + type NoMethod IpConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ListCertificateIssuanceConfigsResponse: Response for the +// `ListCertificateIssuanceConfigs` method. +type ListCertificateIssuanceConfigsResponse struct { + // CertificateIssuanceConfigs: A list of certificate configs for the parent + // resource. + CertificateIssuanceConfigs []*CertificateIssuanceConfig `json:"certificateIssuanceConfigs,omitempty"` + // NextPageToken: If there might be more results than those appearing in this + // response, then `next_page_token` is included. To get the next set of + // results, call this method again using the value of `next_page_token` as + // `page_token`. + NextPageToken string `json:"nextPageToken,omitempty"` + // Unreachable: Locations that could not be reached. + Unreachable []string `json:"unreachable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CertificateIssuanceConfigs") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CertificateIssuanceConfigs") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ListCertificateIssuanceConfigsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListCertificateIssuanceConfigsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ListCertificateMapEntriesResponse: Response for the +// `ListCertificateMapEntries` method. +type ListCertificateMapEntriesResponse struct { + // CertificateMapEntries: A list of certificate map entries for the parent + // resource. + CertificateMapEntries []*CertificateMapEntry `json:"certificateMapEntries,omitempty"` + // NextPageToken: If there might be more results than those appearing in this + // response, then `next_page_token` is included. To get the next set of + // results, call this method again using the value of `next_page_token` as + // `page_token`. + NextPageToken string `json:"nextPageToken,omitempty"` + // Unreachable: Locations that could not be reached. + Unreachable []string `json:"unreachable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CertificateMapEntries") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CertificateMapEntries") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ListCertificateMapEntriesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListCertificateMapEntriesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ListCertificateMapsResponse: Response for the `ListCertificateMaps` method. +type ListCertificateMapsResponse struct { + // CertificateMaps: A list of certificate maps for the parent resource. + CertificateMaps []*CertificateMap `json:"certificateMaps,omitempty"` + // NextPageToken: If there might be more results than those appearing in this + // response, then `next_page_token` is included. To get the next set of + // results, call this method again using the value of `next_page_token` as + // `page_token`. + NextPageToken string `json:"nextPageToken,omitempty"` + // Unreachable: Locations that could not be reached. + Unreachable []string `json:"unreachable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CertificateMaps") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CertificateMaps") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ListCertificateMapsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListCertificateMapsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ListCertificatesResponse: Response for the `ListCertificates` method. +type ListCertificatesResponse struct { + // Certificates: A list of certificates for the parent resource. + Certificates []*Certificate `json:"certificates,omitempty"` + // NextPageToken: If there might be more results than those appearing in this + // response, then `next_page_token` is included. To get the next set of + // results, call this method again using the value of `next_page_token` as + // `page_token`. + NextPageToken string `json:"nextPageToken,omitempty"` + // Unreachable: A list of locations that could not be reached. + Unreachable []string `json:"unreachable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Certificates") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Certificates") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ListCertificatesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListCertificatesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ListDnsAuthorizationsResponse: Response for the `ListDnsAuthorizations` +// method. +type ListDnsAuthorizationsResponse struct { + // DnsAuthorizations: A list of dns authorizations for the parent resource. + DnsAuthorizations []*DnsAuthorization `json:"dnsAuthorizations,omitempty"` + // NextPageToken: If there might be more results than those appearing in this + // response, then `next_page_token` is included. To get the next set of + // results, call this method again using the value of `next_page_token` as + // `page_token`. + NextPageToken string `json:"nextPageToken,omitempty"` + // Unreachable: Locations that could not be reached. + Unreachable []string `json:"unreachable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "DnsAuthorizations") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DnsAuthorizations") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ListDnsAuthorizationsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListDnsAuthorizationsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ListLocationsResponse: The response message for Locations.ListLocations. +type ListLocationsResponse struct { + // Locations: A list of locations that matches the specified filter in the + // request. + Locations []*Location `json:"locations,omitempty"` + // NextPageToken: The standard List next-page token. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Locations") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Locations") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ListLocationsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListLocationsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ListOperationsResponse: The response message for Operations.ListOperations. +type ListOperationsResponse struct { + // NextPageToken: The standard List next-page token. + NextPageToken string `json:"nextPageToken,omitempty"` + // Operations: A list of operations that matches the specified filter in the + // request. + Operations []*Operation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListOperationsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ListTrustConfigsResponse: Response for the `ListTrustConfigs` method. +type ListTrustConfigsResponse struct { + // NextPageToken: If there might be more results than those appearing in this + // response, then `next_page_token` is included. To get the next set of + // results, call this method again using the value of `next_page_token` as + // `page_token`. + NextPageToken string `json:"nextPageToken,omitempty"` + // TrustConfigs: A list of TrustConfigs for the parent resource. + TrustConfigs []*TrustConfig `json:"trustConfigs,omitempty"` + // Unreachable: Locations that could not be reached. + Unreachable []string `json:"unreachable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ListTrustConfigsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListTrustConfigsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Location: A resource that represents a Google Cloud location. +type Location struct { + // DisplayName: The friendly name for this location, typically a nearby city + // name. For example, "Tokyo". + DisplayName string `json:"displayName,omitempty"` + // Labels: Cross-service attributes for the location. For example + // {"cloud.googleapis.com/region": "us-east1"} + Labels map[string]string `json:"labels,omitempty"` + // LocationId: The canonical id for this location. For example: "us-east1". + LocationId string `json:"locationId,omitempty"` + // Metadata: Service-specific metadata. For example the available capacity at + // the given location. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + // Name: Resource name for the location, which may vary between + // implementations. For example: + // "projects/example-project/locations/us-east1" + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DisplayName") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Location) MarshalJSON() ([]byte, error) { + type NoMethod Location + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ManagedCertificate: Configuration and state of a Managed Certificate. +// Certificate Manager provisions and renews Managed Certificates +// automatically, for as long as it's authorized to do so. +type ManagedCertificate struct { + // AuthorizationAttemptInfo: Output only. Detailed state of the latest + // authorization attempt for each domain specified for managed certificate + // resource. + AuthorizationAttemptInfo []*AuthorizationAttemptInfo `json:"authorizationAttemptInfo,omitempty"` + // DnsAuthorizations: Optional. Immutable. Authorizations that will be used for + // performing domain authorization. + DnsAuthorizations []string `json:"dnsAuthorizations,omitempty"` + // Domains: Optional. Immutable. The domains for which a managed SSL + // certificate will be generated. Wildcard domains are only supported with DNS + // challenge resolution. + Domains []string `json:"domains,omitempty"` + // IssuanceConfig: Optional. Immutable. The resource name for a + // CertificateIssuanceConfig used to configure private PKI certificates in the + // format `projects/*/locations/*/certificateIssuanceConfigs/*`. If this field + // is not set, the certificates will instead be publicly signed as documented + // at + // https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. + IssuanceConfig string `json:"issuanceConfig,omitempty"` + // ProvisioningIssue: Output only. Information about issues with provisioning a + // Managed Certificate. + ProvisioningIssue *ProvisioningIssue `json:"provisioningIssue,omitempty"` + // State: Output only. State of the managed certificate resource. + // + // Possible values: + // "STATE_UNSPECIFIED" - State is unspecified. + // "PROVISIONING" - Certificate Manager attempts to provision or renew the + // certificate. If the process takes longer than expected, consult the + // `provisioning_issue` field. + // "FAILED" - Multiple certificate provisioning attempts failed and + // Certificate Manager gave up. To try again, delete and create a new managed + // Certificate resource. For details see the `provisioning_issue` field. + // "ACTIVE" - The certificate management is working, and a certificate has + // been provisioned. + State string `json:"state,omitempty"` + // ForceSendFields is a list of field names (e.g. "AuthorizationAttemptInfo") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AuthorizationAttemptInfo") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ManagedCertificate) MarshalJSON() ([]byte, error) { + type NoMethod ManagedCertificate + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Operation: This resource represents a long-running operation that is the +// result of a network API call. +type Operation struct { + // Done: If the value is `false`, it means the operation is still in progress. + // If `true`, the operation is completed, and either `error` or `response` is + // available. + Done bool `json:"done,omitempty"` + // Error: The error result of the operation in case of failure or cancellation. + Error *Status `json:"error,omitempty"` + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as create + // time. Some services might not provide such metadata. Any method that returns + // a long-running operation should document the metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + // Name: The server-assigned name, which is only unique within the same service + // that originally returns it. If you use the default HTTP mapping, the `name` + // should be a resource name ending with `operations/{unique_id}`. + Name string `json:"name,omitempty"` + // Response: The normal, successful response of the operation. If the original + // method returns no data on success, such as `Delete`, the response is + // `google.protobuf.Empty`. If the original method is standard + // `Get`/`Create`/`Update`, the response should be the resource. For other + // methods, the response should have the type `XxxResponse`, where `Xxx` is the + // original method name. For example, if the original method name is + // `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. + Response googleapi.RawMessage `json:"response,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Done") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Done") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Operation) MarshalJSON() ([]byte, error) { + type NoMethod Operation + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// OperationMetadata: Represents the metadata of the long-running operation. +// Output only. +type OperationMetadata struct { + // ApiVersion: API version used to start the operation. + ApiVersion string `json:"apiVersion,omitempty"` + // CreateTime: The time the operation was created. + CreateTime string `json:"createTime,omitempty"` + // EndTime: The time the operation finished running. + EndTime string `json:"endTime,omitempty"` + // RequestedCancellation: Identifies whether the user has requested + // cancellation of the operation. Operations that have successfully been + // cancelled have Operation.error value with a google.rpc.Status.code of 1, + // corresponding to `Code.CANCELLED`. + RequestedCancellation bool `json:"requestedCancellation,omitempty"` + // StatusMessage: Human-readable status of the operation, if any. + StatusMessage string `json:"statusMessage,omitempty"` + // Target: Server-defined resource path for the target of the operation. + Target string `json:"target,omitempty"` + // Verb: Name of the verb executed by the operation. + Verb string `json:"verb,omitempty"` + // ForceSendFields is a list of field names (e.g. "ApiVersion") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ApiVersion") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s OperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod OperationMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ProvisioningIssue: Information about issues with provisioning a Managed +// Certificate. +type ProvisioningIssue struct { + // Details: Output only. Human readable explanation about the issue. Provided + // to help address the configuration issues. Not guaranteed to be stable. For + // programmatic access use Reason enum. + Details string `json:"details,omitempty"` + // Reason: Output only. Reason for provisioning failures. + // + // Possible values: + // "REASON_UNSPECIFIED" - Reason is unspecified. + // "AUTHORIZATION_ISSUE" - Certificate provisioning failed due to an issue + // with one or more of the domains on the certificate. For details of which + // domains failed, consult the `authorization_attempt_info` field. + // "RATE_LIMITED" - Exceeded Certificate Authority quotas or internal rate + // limits of the system. Provisioning may take longer to complete. + Reason string `json:"reason,omitempty"` + // ForceSendFields is a list of field names (e.g. "Details") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Details") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ProvisioningIssue) MarshalJSON() ([]byte, error) { + type NoMethod ProvisioningIssue + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SelfManagedCertificate: Certificate data for a SelfManaged Certificate. +// SelfManaged Certificates are uploaded by the user. Updating such +// certificates before they expire remains the user's responsibility. +type SelfManagedCertificate struct { + // PemCertificate: Optional. Input only. The PEM-encoded certificate chain. + // Leaf certificate comes first, followed by intermediate ones if any. + PemCertificate string `json:"pemCertificate,omitempty"` + // PemPrivateKey: Optional. Input only. The PEM-encoded private key of the leaf + // certificate. + PemPrivateKey string `json:"pemPrivateKey,omitempty"` + // ForceSendFields is a list of field names (e.g. "PemCertificate") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "PemCertificate") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SelfManagedCertificate) MarshalJSON() ([]byte, error) { + type NoMethod SelfManagedCertificate + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Status: The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by gRPC (https://github.com/grpc). Each `Status` message contains three +// pieces of data: error code, error message, and error details. You can find +// out more about this error model and how to work with it in the API Design +// Guide (https://cloud.google.com/apis/design/errors). +type Status struct { + // Code: The status code, which should be an enum value of google.rpc.Code. + Code int64 `json:"code,omitempty"` + // Details: A list of messages that carry the error details. There is a common + // set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + // Message: A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + // ForceSendFields is a list of field names (e.g. "Code") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Code") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Status) MarshalJSON() ([]byte, error) { + type NoMethod Status + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// TrustAnchor: Defines a trust anchor. +type TrustAnchor struct { + // PemCertificate: PEM root certificate of the PKI used for validation. Each + // certificate provided in PEM format may occupy up to 5kB. + PemCertificate string `json:"pemCertificate,omitempty"` + // ForceSendFields is a list of field names (e.g. "PemCertificate") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "PemCertificate") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TrustAnchor) MarshalJSON() ([]byte, error) { + type NoMethod TrustAnchor + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// TrustConfig: Defines a trust config. +type TrustConfig struct { + // AllowlistedCertificates: Optional. A certificate matching an allowlisted + // certificate is always considered valid as long as the certificate is + // parseable, proof of private key possession is established, and constraints + // on the certificate's SAN field are met. + AllowlistedCertificates []*AllowlistedCertificate `json:"allowlistedCertificates,omitempty"` + // CreateTime: Output only. The creation timestamp of a TrustConfig. + CreateTime string `json:"createTime,omitempty"` + // Description: Optional. One or more paragraphs of text description of a + // TrustConfig. + Description string `json:"description,omitempty"` + // Etag: This checksum is computed by the server based on the value of other + // fields, and may be sent on update and delete requests to ensure the client + // has an up-to-date value before proceeding. + Etag string `json:"etag,omitempty"` + // Labels: Optional. Set of labels associated with a TrustConfig. + Labels map[string]string `json:"labels,omitempty"` + // Name: Identifier. A user-defined name of the trust config. TrustConfig names + // must be unique globally and match pattern + // `projects/*/locations/*/trustConfigs/*`. + Name string `json:"name,omitempty"` + // TrustStores: Optional. Set of trust stores to perform validation against. + // This field is supported when TrustConfig is configured with Load Balancers, + // currently not supported for SPIFFE certificate validation. Only one + // TrustStore specified is currently allowed. + TrustStores []*TrustStore `json:"trustStores,omitempty"` + // UpdateTime: Output only. The last update timestamp of a TrustConfig. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "AllowlistedCertificates") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowlistedCertificates") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TrustConfig) MarshalJSON() ([]byte, error) { + type NoMethod TrustConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// TrustStore: Defines a trust store. +type TrustStore struct { + // IntermediateCas: Optional. Set of intermediate CA certificates used for the + // path building phase of chain validation. The field is currently not + // supported if TrustConfig is used for the workload certificate feature. + IntermediateCas []*IntermediateCA `json:"intermediateCas,omitempty"` + // TrustAnchors: Optional. List of Trust Anchors to be used while performing + // validation against a given TrustStore. + TrustAnchors []*TrustAnchor `json:"trustAnchors,omitempty"` + // ForceSendFields is a list of field names (e.g. "IntermediateCas") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "IntermediateCas") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TrustStore) MarshalJSON() ([]byte, error) { + type NoMethod TrustStore + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type ProjectsLocationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets information about a location. +// +// - name: Resource name for the location. +func (r *ProjectsLocationsService) Get(name string) *ProjectsLocationsGetCall { + c := &ProjectsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsGetCall) Context(ctx context.Context) *ProjectsLocationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *Location.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Location{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists information about the supported locations for this service. +// +// - name: The resource that owns the locations collection, if applicable. +func (r *ProjectsLocationsService) List(name string) *ProjectsLocationsListCall { + c := &ProjectsLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Filter sets the optional parameter "filter": A filter to narrow down results +// to a preferred subset. The filtering language accepts strings like +// "displayName=tokyo", and is documented in more detail in AIP-160 +// (https://google.aip.dev/160). +func (c *ProjectsLocationsListCall) Filter(filter string) *ProjectsLocationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number of +// results to return. If not set, the service selects a default. +func (c *ProjectsLocationsListCall) PageSize(pageSize int64) *ProjectsLocationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from the `next_page_token` field in the response. Send that page token to +// receive the subsequent page. +func (c *ProjectsLocationsListCall) PageToken(pageToken string) *ProjectsLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsListCall) Context(ctx context.Context) *ProjectsLocationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/locations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListLocationsResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListLocationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsCertificateIssuanceConfigsCreateCall struct { + s *Service + parent string + certificateissuanceconfig *CertificateIssuanceConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new CertificateIssuanceConfig in a given project and +// location. +// +// - parent: The parent resource of the certificate issuance config. Must be in +// the format `projects/*/locations/*`. +func (r *ProjectsLocationsCertificateIssuanceConfigsService) Create(parent string, certificateissuanceconfig *CertificateIssuanceConfig) *ProjectsLocationsCertificateIssuanceConfigsCreateCall { + c := &ProjectsLocationsCertificateIssuanceConfigsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.certificateissuanceconfig = certificateissuanceconfig + return c +} + +// CertificateIssuanceConfigId sets the optional parameter +// "certificateIssuanceConfigId": Required. A user-provided name of the +// certificate config. +func (c *ProjectsLocationsCertificateIssuanceConfigsCreateCall) CertificateIssuanceConfigId(certificateIssuanceConfigId string) *ProjectsLocationsCertificateIssuanceConfigsCreateCall { + c.urlParams_.Set("certificateIssuanceConfigId", certificateIssuanceConfigId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateIssuanceConfigsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateIssuanceConfigsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateIssuanceConfigsCreateCall) Context(ctx context.Context) *ProjectsLocationsCertificateIssuanceConfigsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateIssuanceConfigsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateIssuanceConfigsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.certificateissuanceconfig) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/certificateIssuanceConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateIssuanceConfigs.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateIssuanceConfigsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificateIssuanceConfigsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a single CertificateIssuanceConfig. +// +// - name: A name of the certificate issuance config to delete. Must be in the +// format `projects/*/locations/*/certificateIssuanceConfigs/*`. +func (r *ProjectsLocationsCertificateIssuanceConfigsService) Delete(name string) *ProjectsLocationsCertificateIssuanceConfigsDeleteCall { + c := &ProjectsLocationsCertificateIssuanceConfigsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateIssuanceConfigsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateIssuanceConfigsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateIssuanceConfigsDeleteCall) Context(ctx context.Context) *ProjectsLocationsCertificateIssuanceConfigsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateIssuanceConfigsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateIssuanceConfigsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateIssuanceConfigs.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateIssuanceConfigsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificateIssuanceConfigsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets details of a single CertificateIssuanceConfig. +// +// - name: A name of the certificate issuance config to describe. Must be in +// the format `projects/*/locations/*/certificateIssuanceConfigs/*`. +func (r *ProjectsLocationsCertificateIssuanceConfigsService) Get(name string) *ProjectsLocationsCertificateIssuanceConfigsGetCall { + c := &ProjectsLocationsCertificateIssuanceConfigsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateIssuanceConfigsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateIssuanceConfigsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsCertificateIssuanceConfigsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsCertificateIssuanceConfigsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateIssuanceConfigsGetCall) Context(ctx context.Context) *ProjectsLocationsCertificateIssuanceConfigsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateIssuanceConfigsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateIssuanceConfigsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateIssuanceConfigs.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *CertificateIssuanceConfig.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateIssuanceConfigsGetCall) Do(opts ...googleapi.CallOption) (*CertificateIssuanceConfig, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &CertificateIssuanceConfig{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificateIssuanceConfigsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists CertificateIssuanceConfigs in a given project and location. +// +// - parent: The project and location from which the certificate should be +// listed, specified in the format `projects/*/locations/*`. +func (r *ProjectsLocationsCertificateIssuanceConfigsService) List(parent string) *ProjectsLocationsCertificateIssuanceConfigsListCall { + c := &ProjectsLocationsCertificateIssuanceConfigsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Filter expression to restrict +// the Certificates Configs returned. +func (c *ProjectsLocationsCertificateIssuanceConfigsListCall) Filter(filter string) *ProjectsLocationsCertificateIssuanceConfigsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": A list of Certificate Config +// field names used to specify the order of the returned results. The default +// sorting order is ascending. To specify descending order for a field, add a +// suffix " desc". +func (c *ProjectsLocationsCertificateIssuanceConfigsListCall) OrderBy(orderBy string) *ProjectsLocationsCertificateIssuanceConfigsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// certificate configs to return per call. +func (c *ProjectsLocationsCertificateIssuanceConfigsListCall) PageSize(pageSize int64) *ProjectsLocationsCertificateIssuanceConfigsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The value returned by the +// last `ListCertificateIssuanceConfigsResponse`. Indicates that this is a +// continuation of a prior `ListCertificateIssuanceConfigs` call, and that the +// system should return the next page of data. +func (c *ProjectsLocationsCertificateIssuanceConfigsListCall) PageToken(pageToken string) *ProjectsLocationsCertificateIssuanceConfigsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateIssuanceConfigsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateIssuanceConfigsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsCertificateIssuanceConfigsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsCertificateIssuanceConfigsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateIssuanceConfigsListCall) Context(ctx context.Context) *ProjectsLocationsCertificateIssuanceConfigsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateIssuanceConfigsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateIssuanceConfigsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/certificateIssuanceConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateIssuanceConfigs.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListCertificateIssuanceConfigsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateIssuanceConfigsListCall) Do(opts ...googleapi.CallOption) (*ListCertificateIssuanceConfigsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListCertificateIssuanceConfigsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsCertificateIssuanceConfigsListCall) Pages(ctx context.Context, f func(*ListCertificateIssuanceConfigsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsCertificateIssuanceConfigsPatchCall struct { + s *Service + name string + certificateissuanceconfig *CertificateIssuanceConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a CertificateIssuanceConfig. +// +// - name: Identifier. A user-defined name of the certificate issuance config. +// CertificateIssuanceConfig names must be unique globally and match pattern +// `projects/*/locations/*/certificateIssuanceConfigs/*`. +func (r *ProjectsLocationsCertificateIssuanceConfigsService) Patch(name string, certificateissuanceconfig *CertificateIssuanceConfig) *ProjectsLocationsCertificateIssuanceConfigsPatchCall { + c := &ProjectsLocationsCertificateIssuanceConfigsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.certificateissuanceconfig = certificateissuanceconfig + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. The update +// mask applies to the resource. For the `FieldMask` definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask. +func (c *ProjectsLocationsCertificateIssuanceConfigsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsCertificateIssuanceConfigsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateIssuanceConfigsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateIssuanceConfigsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateIssuanceConfigsPatchCall) Context(ctx context.Context) *ProjectsLocationsCertificateIssuanceConfigsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateIssuanceConfigsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateIssuanceConfigsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.certificateissuanceconfig) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateIssuanceConfigs.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateIssuanceConfigsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificateMapsCreateCall struct { + s *Service + parent string + certificatemap *CertificateMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new CertificateMap in a given project and location. +// +// - parent: The parent resource of the certificate map. Must be in the format +// `projects/*/locations/*`. +func (r *ProjectsLocationsCertificateMapsService) Create(parent string, certificatemap *CertificateMap) *ProjectsLocationsCertificateMapsCreateCall { + c := &ProjectsLocationsCertificateMapsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.certificatemap = certificatemap + return c +} + +// CertificateMapId sets the optional parameter "certificateMapId": Required. A +// user-provided name of the certificate map. +func (c *ProjectsLocationsCertificateMapsCreateCall) CertificateMapId(certificateMapId string) *ProjectsLocationsCertificateMapsCreateCall { + c.urlParams_.Set("certificateMapId", certificateMapId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateMapsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateMapsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateMapsCreateCall) Context(ctx context.Context) *ProjectsLocationsCertificateMapsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateMapsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateMapsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.certificatemap) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/certificateMaps") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateMaps.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateMapsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificateMapsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a single CertificateMap. A Certificate Map can't be deleted +// if it contains Certificate Map Entries. Remove all the entries from the map +// before calling this method. +// +// - name: A name of the certificate map to delete. Must be in the format +// `projects/*/locations/*/certificateMaps/*`. +func (r *ProjectsLocationsCertificateMapsService) Delete(name string) *ProjectsLocationsCertificateMapsDeleteCall { + c := &ProjectsLocationsCertificateMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateMapsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateMapsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateMapsDeleteCall) Context(ctx context.Context) *ProjectsLocationsCertificateMapsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateMapsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateMapsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateMaps.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificateMapsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets details of a single CertificateMap. +// +// - name: A name of the certificate map to describe. Must be in the format +// `projects/*/locations/*/certificateMaps/*`. +func (r *ProjectsLocationsCertificateMapsService) Get(name string) *ProjectsLocationsCertificateMapsGetCall { + c := &ProjectsLocationsCertificateMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateMapsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateMapsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsCertificateMapsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsCertificateMapsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateMapsGetCall) Context(ctx context.Context) *ProjectsLocationsCertificateMapsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateMapsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateMapsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateMaps.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *CertificateMap.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateMapsGetCall) Do(opts ...googleapi.CallOption) (*CertificateMap, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &CertificateMap{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificateMapsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists CertificateMaps in a given project and location. +// +// - parent: The project and location from which the certificate maps should be +// listed, specified in the format `projects/*/locations/*`. +func (r *ProjectsLocationsCertificateMapsService) List(parent string) *ProjectsLocationsCertificateMapsListCall { + c := &ProjectsLocationsCertificateMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Filter expression to restrict +// the Certificates Maps returned. +func (c *ProjectsLocationsCertificateMapsListCall) Filter(filter string) *ProjectsLocationsCertificateMapsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": A list of Certificate Map +// field names used to specify the order of the returned results. The default +// sorting order is ascending. To specify descending order for a field, add a +// suffix " desc". +func (c *ProjectsLocationsCertificateMapsListCall) OrderBy(orderBy string) *ProjectsLocationsCertificateMapsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// certificate maps to return per call. +func (c *ProjectsLocationsCertificateMapsListCall) PageSize(pageSize int64) *ProjectsLocationsCertificateMapsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The value returned by the +// last `ListCertificateMapsResponse`. Indicates that this is a continuation of +// a prior `ListCertificateMaps` call, and that the system should return the +// next page of data. +func (c *ProjectsLocationsCertificateMapsListCall) PageToken(pageToken string) *ProjectsLocationsCertificateMapsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateMapsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateMapsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsCertificateMapsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsCertificateMapsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateMapsListCall) Context(ctx context.Context) *ProjectsLocationsCertificateMapsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateMapsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateMapsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/certificateMaps") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateMaps.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListCertificateMapsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateMapsListCall) Do(opts ...googleapi.CallOption) (*ListCertificateMapsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListCertificateMapsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsCertificateMapsListCall) Pages(ctx context.Context, f func(*ListCertificateMapsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsCertificateMapsPatchCall struct { + s *Service + name string + certificatemap *CertificateMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a CertificateMap. +// +// - name: Identifier. A user-defined name of the Certificate Map. Certificate +// Map names must be unique globally and match pattern +// `projects/*/locations/*/certificateMaps/*`. +func (r *ProjectsLocationsCertificateMapsService) Patch(name string, certificatemap *CertificateMap) *ProjectsLocationsCertificateMapsPatchCall { + c := &ProjectsLocationsCertificateMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.certificatemap = certificatemap + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. The update +// mask applies to the resource. For the `FieldMask` definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask. +func (c *ProjectsLocationsCertificateMapsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsCertificateMapsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateMapsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateMapsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateMapsPatchCall) Context(ctx context.Context) *ProjectsLocationsCertificateMapsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateMapsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateMapsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.certificatemap) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateMaps.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall struct { + s *Service + parent string + certificatemapentry *CertificateMapEntry + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new CertificateMapEntry in a given project and location. +// +// - parent: The parent resource of the certificate map entry. Must be in the +// format `projects/*/locations/*/certificateMaps/*`. +func (r *ProjectsLocationsCertificateMapsCertificateMapEntriesService) Create(parent string, certificatemapentry *CertificateMapEntry) *ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall { + c := &ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.certificatemapentry = certificatemapentry + return c +} + +// CertificateMapEntryId sets the optional parameter "certificateMapEntryId": +// Required. A user-provided name of the certificate map entry. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall) CertificateMapEntryId(certificateMapEntryId string) *ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall { + c.urlParams_.Set("certificateMapEntryId", certificateMapEntryId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall) Context(ctx context.Context) *ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.certificatemapentry) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/certificateMapEntries") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateMaps.certificateMapEntries.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificateMapsCertificateMapEntriesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a single CertificateMapEntry. +// +// - name: A name of the certificate map entry to delete. Must be in the format +// `projects/*/locations/*/certificateMaps/*/certificateMapEntries/*`. +func (r *ProjectsLocationsCertificateMapsCertificateMapEntriesService) Delete(name string) *ProjectsLocationsCertificateMapsCertificateMapEntriesDeleteCall { + c := &ProjectsLocationsCertificateMapsCertificateMapEntriesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateMapsCertificateMapEntriesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesDeleteCall) Context(ctx context.Context) *ProjectsLocationsCertificateMapsCertificateMapEntriesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateMaps.certificateMapEntries.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets details of a single CertificateMapEntry. +// +// - name: A name of the certificate map entry to describe. Must be in the +// format `projects/*/locations/*/certificateMaps/*/certificateMapEntries/*`. +func (r *ProjectsLocationsCertificateMapsCertificateMapEntriesService) Get(name string) *ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall { + c := &ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall) Context(ctx context.Context) *ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateMaps.certificateMapEntries.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *CertificateMapEntry.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesGetCall) Do(opts ...googleapi.CallOption) (*CertificateMapEntry, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &CertificateMapEntry{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificateMapsCertificateMapEntriesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists CertificateMapEntries in a given project and location. +// +// - parent: The project, location and certificate map from which the +// certificate map entries should be listed, specified in the format +// `projects/*/locations/*/certificateMaps/*`. +func (r *ProjectsLocationsCertificateMapsCertificateMapEntriesService) List(parent string) *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall { + c := &ProjectsLocationsCertificateMapsCertificateMapEntriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Filter expression to restrict +// the returned Certificate Map Entries. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall) Filter(filter string) *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": A list of Certificate Map +// Entry field names used to specify the order of the returned results. The +// default sorting order is ascending. To specify descending order for a field, +// add a suffix " desc". +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall) OrderBy(orderBy string) *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// certificate map entries to return. The service may return fewer than this +// value. If unspecified, at most 50 certificate map entries will be returned. +// The maximum value is 1000; values above 1000 will be coerced to 1000. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall) PageSize(pageSize int64) *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The value returned by the +// last `ListCertificateMapEntriesResponse`. Indicates that this is a +// continuation of a prior `ListCertificateMapEntries` call, and that the +// system should return the next page of data. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall) PageToken(pageToken string) *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall) Context(ctx context.Context) *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/certificateMapEntries") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateMaps.certificateMapEntries.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListCertificateMapEntriesResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall) Do(opts ...googleapi.CallOption) (*ListCertificateMapEntriesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListCertificateMapEntriesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesListCall) Pages(ctx context.Context, f func(*ListCertificateMapEntriesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall struct { + s *Service + name string + certificatemapentry *CertificateMapEntry + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a CertificateMapEntry. +// +// - name: Identifier. A user-defined name of the Certificate Map Entry. +// Certificate Map Entry names must be unique globally and match pattern +// `projects/*/locations/*/certificateMaps/*/certificateMapEntries/*`. +func (r *ProjectsLocationsCertificateMapsCertificateMapEntriesService) Patch(name string, certificatemapentry *CertificateMapEntry) *ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall { + c := &ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.certificatemapentry = certificatemapentry + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. The update +// mask applies to the resource. For the `FieldMask` definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall) UpdateMask(updateMask string) *ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall) Context(ctx context.Context) *ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.certificatemapentry) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificateMaps.certificateMapEntries.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificateMapsCertificateMapEntriesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificatesCreateCall struct { + s *Service + parent string + certificate *Certificate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new Certificate in a given project and location. +// +// - parent: The parent resource of the certificate. Must be in the format +// `projects/*/locations/*`. +func (r *ProjectsLocationsCertificatesService) Create(parent string, certificate *Certificate) *ProjectsLocationsCertificatesCreateCall { + c := &ProjectsLocationsCertificatesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.certificate = certificate + return c +} + +// CertificateId sets the optional parameter "certificateId": Required. A +// user-provided name of the certificate. +func (c *ProjectsLocationsCertificatesCreateCall) CertificateId(certificateId string) *ProjectsLocationsCertificatesCreateCall { + c.urlParams_.Set("certificateId", certificateId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificatesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificatesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificatesCreateCall) Context(ctx context.Context) *ProjectsLocationsCertificatesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificatesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificatesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.certificate) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/certificates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificates.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificatesCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificatesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a single Certificate. +// +// - name: A name of the certificate to delete. Must be in the format +// `projects/*/locations/*/certificates/*`. +func (r *ProjectsLocationsCertificatesService) Delete(name string) *ProjectsLocationsCertificatesDeleteCall { + c := &ProjectsLocationsCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificatesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificatesDeleteCall) Context(ctx context.Context) *ProjectsLocationsCertificatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificates.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificatesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets details of a single Certificate. +// +// - name: A name of the certificate to describe. Must be in the format +// `projects/*/locations/*/certificates/*`. +func (r *ProjectsLocationsCertificatesService) Get(name string) *ProjectsLocationsCertificatesGetCall { + c := &ProjectsLocationsCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificatesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsCertificatesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsCertificatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificatesGetCall) Context(ctx context.Context) *ProjectsLocationsCertificatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificates.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *Certificate.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificatesGetCall) Do(opts ...googleapi.CallOption) (*Certificate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Certificate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsCertificatesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists Certificates in a given project and location. +// +// - parent: The project and location from which the certificate should be +// listed, specified in the format `projects/*/locations/*`. +func (r *ProjectsLocationsCertificatesService) List(parent string) *ProjectsLocationsCertificatesListCall { + c := &ProjectsLocationsCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Filter expression to restrict +// the Certificates returned. +func (c *ProjectsLocationsCertificatesListCall) Filter(filter string) *ProjectsLocationsCertificatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": A list of Certificate field +// names used to specify the order of the returned results. The default sorting +// order is ascending. To specify descending order for a field, add a suffix " +// desc". +func (c *ProjectsLocationsCertificatesListCall) OrderBy(orderBy string) *ProjectsLocationsCertificatesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// certificates to return per call. +func (c *ProjectsLocationsCertificatesListCall) PageSize(pageSize int64) *ProjectsLocationsCertificatesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The value returned by the +// last `ListCertificatesResponse`. Indicates that this is a continuation of a +// prior `ListCertificates` call, and that the system should return the next +// page of data. +func (c *ProjectsLocationsCertificatesListCall) PageToken(pageToken string) *ProjectsLocationsCertificatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificatesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsCertificatesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsCertificatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificatesListCall) Context(ctx context.Context) *ProjectsLocationsCertificatesListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificatesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/certificates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificates.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListCertificatesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificatesListCall) Do(opts ...googleapi.CallOption) (*ListCertificatesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListCertificatesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsCertificatesListCall) Pages(ctx context.Context, f func(*ListCertificatesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsCertificatesPatchCall struct { + s *Service + name string + certificate *Certificate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a Certificate. +// +// - name: Identifier. A user-defined name of the certificate. Certificate +// names must be unique globally and match pattern +// `projects/*/locations/*/certificates/*`. +func (r *ProjectsLocationsCertificatesService) Patch(name string, certificate *Certificate) *ProjectsLocationsCertificatesPatchCall { + c := &ProjectsLocationsCertificatesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.certificate = certificate + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. The update +// mask applies to the resource. For the `FieldMask` definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask. +func (c *ProjectsLocationsCertificatesPatchCall) UpdateMask(updateMask string) *ProjectsLocationsCertificatesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsCertificatesPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsCertificatesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsCertificatesPatchCall) Context(ctx context.Context) *ProjectsLocationsCertificatesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsCertificatesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsCertificatesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.certificate) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.certificates.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsCertificatesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsDnsAuthorizationsCreateCall struct { + s *Service + parent string + dnsauthorization *DnsAuthorization + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new DnsAuthorization in a given project and location. +// +// - parent: The parent resource of the dns authorization. Must be in the +// format `projects/*/locations/*`. +func (r *ProjectsLocationsDnsAuthorizationsService) Create(parent string, dnsauthorization *DnsAuthorization) *ProjectsLocationsDnsAuthorizationsCreateCall { + c := &ProjectsLocationsDnsAuthorizationsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.dnsauthorization = dnsauthorization + return c +} + +// DnsAuthorizationId sets the optional parameter "dnsAuthorizationId": +// Required. A user-provided name of the dns authorization. +func (c *ProjectsLocationsDnsAuthorizationsCreateCall) DnsAuthorizationId(dnsAuthorizationId string) *ProjectsLocationsDnsAuthorizationsCreateCall { + c.urlParams_.Set("dnsAuthorizationId", dnsAuthorizationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDnsAuthorizationsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsDnsAuthorizationsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDnsAuthorizationsCreateCall) Context(ctx context.Context) *ProjectsLocationsDnsAuthorizationsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDnsAuthorizationsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDnsAuthorizationsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.dnsauthorization) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/dnsAuthorizations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.dnsAuthorizations.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsDnsAuthorizationsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsDnsAuthorizationsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a single DnsAuthorization. +// +// - name: A name of the dns authorization to delete. Must be in the format +// `projects/*/locations/*/dnsAuthorizations/*`. +func (r *ProjectsLocationsDnsAuthorizationsService) Delete(name string) *ProjectsLocationsDnsAuthorizationsDeleteCall { + c := &ProjectsLocationsDnsAuthorizationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDnsAuthorizationsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsDnsAuthorizationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDnsAuthorizationsDeleteCall) Context(ctx context.Context) *ProjectsLocationsDnsAuthorizationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDnsAuthorizationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDnsAuthorizationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.dnsAuthorizations.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsDnsAuthorizationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsDnsAuthorizationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets details of a single DnsAuthorization. +// +// - name: A name of the dns authorization to describe. Must be in the format +// `projects/*/locations/*/dnsAuthorizations/*`. +func (r *ProjectsLocationsDnsAuthorizationsService) Get(name string) *ProjectsLocationsDnsAuthorizationsGetCall { + c := &ProjectsLocationsDnsAuthorizationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDnsAuthorizationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsDnsAuthorizationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsDnsAuthorizationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsDnsAuthorizationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDnsAuthorizationsGetCall) Context(ctx context.Context) *ProjectsLocationsDnsAuthorizationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDnsAuthorizationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDnsAuthorizationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.dnsAuthorizations.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *DnsAuthorization.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsDnsAuthorizationsGetCall) Do(opts ...googleapi.CallOption) (*DnsAuthorization, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DnsAuthorization{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsDnsAuthorizationsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists DnsAuthorizations in a given project and location. +// +// - parent: The project and location from which the dns authorizations should +// be listed, specified in the format `projects/*/locations/*`. +func (r *ProjectsLocationsDnsAuthorizationsService) List(parent string) *ProjectsLocationsDnsAuthorizationsListCall { + c := &ProjectsLocationsDnsAuthorizationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Filter expression to restrict +// the Dns Authorizations returned. +func (c *ProjectsLocationsDnsAuthorizationsListCall) Filter(filter string) *ProjectsLocationsDnsAuthorizationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": A list of Dns Authorization +// field names used to specify the order of the returned results. The default +// sorting order is ascending. To specify descending order for a field, add a +// suffix " desc". +func (c *ProjectsLocationsDnsAuthorizationsListCall) OrderBy(orderBy string) *ProjectsLocationsDnsAuthorizationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of dns +// authorizations to return per call. +func (c *ProjectsLocationsDnsAuthorizationsListCall) PageSize(pageSize int64) *ProjectsLocationsDnsAuthorizationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The value returned by the +// last `ListDnsAuthorizationsResponse`. Indicates that this is a continuation +// of a prior `ListDnsAuthorizations` call, and that the system should return +// the next page of data. +func (c *ProjectsLocationsDnsAuthorizationsListCall) PageToken(pageToken string) *ProjectsLocationsDnsAuthorizationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDnsAuthorizationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsDnsAuthorizationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsDnsAuthorizationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsDnsAuthorizationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDnsAuthorizationsListCall) Context(ctx context.Context) *ProjectsLocationsDnsAuthorizationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDnsAuthorizationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDnsAuthorizationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/dnsAuthorizations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.dnsAuthorizations.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListDnsAuthorizationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsDnsAuthorizationsListCall) Do(opts ...googleapi.CallOption) (*ListDnsAuthorizationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListDnsAuthorizationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsDnsAuthorizationsListCall) Pages(ctx context.Context, f func(*ListDnsAuthorizationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsDnsAuthorizationsPatchCall struct { + s *Service + name string + dnsauthorization *DnsAuthorization + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a DnsAuthorization. +// +// - name: Identifier. A user-defined name of the dns authorization. +// DnsAuthorization names must be unique globally and match pattern +// `projects/*/locations/*/dnsAuthorizations/*`. +func (r *ProjectsLocationsDnsAuthorizationsService) Patch(name string, dnsauthorization *DnsAuthorization) *ProjectsLocationsDnsAuthorizationsPatchCall { + c := &ProjectsLocationsDnsAuthorizationsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.dnsauthorization = dnsauthorization + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. The update +// mask applies to the resource. For the `FieldMask` definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask. +func (c *ProjectsLocationsDnsAuthorizationsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsDnsAuthorizationsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDnsAuthorizationsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsDnsAuthorizationsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDnsAuthorizationsPatchCall) Context(ctx context.Context) *ProjectsLocationsDnsAuthorizationsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDnsAuthorizationsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDnsAuthorizationsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.dnsauthorization) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.dnsAuthorizations.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsDnsAuthorizationsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsOperationsCancelCall struct { + s *Service + name string + canceloperationrequest *CancelOperationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Starts asynchronous cancellation on a long-running operation. The +// server makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn't support this method, it returns +// `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, the +// operation is not deleted; instead, it becomes an operation with an +// Operation.error value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. +// +// - name: The name of the operation resource to be cancelled. +func (r *ProjectsLocationsOperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *ProjectsLocationsOperationsCancelCall { + c := &ProjectsLocationsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.canceloperationrequest = canceloperationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsOperationsCancelCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsOperationsCancelCall) Context(ctx context.Context) *ProjectsLocationsOperationsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsOperationsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.operations.cancel" call. +// Any non-2xx status code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsOperationsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a long-running operation. This method indicates that the +// client is no longer interested in the operation result. It does not cancel +// the operation. If the server doesn't support this method, it returns +// `google.rpc.Code.UNIMPLEMENTED`. +// +// - name: The name of the operation resource to be deleted. +func (r *ProjectsLocationsOperationsService) Delete(name string) *ProjectsLocationsOperationsDeleteCall { + c := &ProjectsLocationsOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsOperationsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsOperationsDeleteCall) Context(ctx context.Context) *ProjectsLocationsOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsOperationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.operations.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsOperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +// +// - name: The name of the operation resource. +func (r *ProjectsLocationsOperationsService) Get(name string) *ProjectsLocationsOperationsGetCall { + c := &ProjectsLocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.operations.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsOperationsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists operations that match the specified filter in the request. If +// the server doesn't support this method, it returns `UNIMPLEMENTED`. +// +// - name: The name of the operation's parent resource. +func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall { + c := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Filter sets the optional parameter "filter": The standard list filter. +func (c *ProjectsLocationsOperationsListCall) Filter(filter string) *ProjectsLocationsOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list page +// size. +func (c *ProjectsLocationsOperationsListCall) PageSize(pageSize int64) *ProjectsLocationsOperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list page +// token. +func (c *ProjectsLocationsOperationsListCall) PageToken(pageToken string) *ProjectsLocationsOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsOperationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsOperationsListCall) Context(ctx context.Context) *ProjectsLocationsOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/operations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.operations.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListOperationsResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsTrustConfigsCreateCall struct { + s *Service + parent string + trustconfig *TrustConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new TrustConfig in a given project and location. +// +// - parent: The parent resource of the TrustConfig. Must be in the format +// `projects/*/locations/*`. +func (r *ProjectsLocationsTrustConfigsService) Create(parent string, trustconfig *TrustConfig) *ProjectsLocationsTrustConfigsCreateCall { + c := &ProjectsLocationsTrustConfigsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.trustconfig = trustconfig + return c +} + +// TrustConfigId sets the optional parameter "trustConfigId": Required. A +// user-provided name of the TrustConfig. Must match the regexp +// `[a-z0-9-]{1,63}`. +func (c *ProjectsLocationsTrustConfigsCreateCall) TrustConfigId(trustConfigId string) *ProjectsLocationsTrustConfigsCreateCall { + c.urlParams_.Set("trustConfigId", trustConfigId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsTrustConfigsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsTrustConfigsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsTrustConfigsCreateCall) Context(ctx context.Context) *ProjectsLocationsTrustConfigsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsTrustConfigsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsTrustConfigsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.trustconfig) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/trustConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.trustConfigs.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsTrustConfigsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsTrustConfigsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a single TrustConfig. +// +// - name: A name of the TrustConfig to delete. Must be in the format +// `projects/*/locations/*/trustConfigs/*`. +func (r *ProjectsLocationsTrustConfigsService) Delete(name string) *ProjectsLocationsTrustConfigsDeleteCall { + c := &ProjectsLocationsTrustConfigsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Etag sets the optional parameter "etag": The current etag of the +// TrustConfig. If an etag is provided and does not match the current etag of +// the resource, deletion will be blocked and an ABORTED error will be +// returned. +func (c *ProjectsLocationsTrustConfigsDeleteCall) Etag(etag string) *ProjectsLocationsTrustConfigsDeleteCall { + c.urlParams_.Set("etag", etag) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsTrustConfigsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsTrustConfigsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsTrustConfigsDeleteCall) Context(ctx context.Context) *ProjectsLocationsTrustConfigsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsTrustConfigsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsTrustConfigsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.trustConfigs.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsTrustConfigsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsTrustConfigsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets details of a single TrustConfig. +// +// - name: A name of the TrustConfig to describe. Must be in the format +// `projects/*/locations/*/trustConfigs/*`. +func (r *ProjectsLocationsTrustConfigsService) Get(name string) *ProjectsLocationsTrustConfigsGetCall { + c := &ProjectsLocationsTrustConfigsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsTrustConfigsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsTrustConfigsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsTrustConfigsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsTrustConfigsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsTrustConfigsGetCall) Context(ctx context.Context) *ProjectsLocationsTrustConfigsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsTrustConfigsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsTrustConfigsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.trustConfigs.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *TrustConfig.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsTrustConfigsGetCall) Do(opts ...googleapi.CallOption) (*TrustConfig, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TrustConfig{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsTrustConfigsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists TrustConfigs in a given project and location. +// +// - parent: The project and location from which the TrustConfigs should be +// listed, specified in the format `projects/*/locations/*`. +func (r *ProjectsLocationsTrustConfigsService) List(parent string) *ProjectsLocationsTrustConfigsListCall { + c := &ProjectsLocationsTrustConfigsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Filter expression to restrict +// the TrustConfigs returned. +func (c *ProjectsLocationsTrustConfigsListCall) Filter(filter string) *ProjectsLocationsTrustConfigsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": A list of TrustConfig field +// names used to specify the order of the returned results. The default sorting +// order is ascending. To specify descending order for a field, add a suffix " +// desc". +func (c *ProjectsLocationsTrustConfigsListCall) OrderBy(orderBy string) *ProjectsLocationsTrustConfigsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// TrustConfigs to return per call. +func (c *ProjectsLocationsTrustConfigsListCall) PageSize(pageSize int64) *ProjectsLocationsTrustConfigsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The value returned by the +// last `ListTrustConfigsResponse`. Indicates that this is a continuation of a +// prior `ListTrustConfigs` call, and that the system should return the next +// page of data. +func (c *ProjectsLocationsTrustConfigsListCall) PageToken(pageToken string) *ProjectsLocationsTrustConfigsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsTrustConfigsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsTrustConfigsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsTrustConfigsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsTrustConfigsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsTrustConfigsListCall) Context(ctx context.Context) *ProjectsLocationsTrustConfigsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsTrustConfigsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsTrustConfigsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/trustConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.trustConfigs.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListTrustConfigsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsTrustConfigsListCall) Do(opts ...googleapi.CallOption) (*ListTrustConfigsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListTrustConfigsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsTrustConfigsListCall) Pages(ctx context.Context, f func(*ListTrustConfigsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsTrustConfigsPatchCall struct { + s *Service + name string + trustconfig *TrustConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a TrustConfig. +// +// - name: Identifier. A user-defined name of the trust config. TrustConfig +// names must be unique globally and match pattern +// `projects/*/locations/*/trustConfigs/*`. +func (r *ProjectsLocationsTrustConfigsService) Patch(name string, trustconfig *TrustConfig) *ProjectsLocationsTrustConfigsPatchCall { + c := &ProjectsLocationsTrustConfigsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.trustconfig = trustconfig + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. The update +// mask applies to the resource. For the `FieldMask` definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask. +func (c *ProjectsLocationsTrustConfigsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsTrustConfigsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsTrustConfigsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsTrustConfigsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsTrustConfigsPatchCall) Context(ctx context.Context) *ProjectsLocationsTrustConfigsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsTrustConfigsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsTrustConfigsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.trustconfig) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "certificatemanager.projects.locations.trustConfigs.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsTrustConfigsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go index 3a32a5716f4..b594ebef2b5 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go @@ -290,9 +290,9 @@ type AggregationInfo struct { NullFields []string `json:"-"` } -func (s *AggregationInfo) MarshalJSON() ([]byte, error) { +func (s AggregationInfo) MarshalJSON() ([]byte, error) { type NoMethod AggregationInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditConfig: Specifies the audit configuration for a service. The @@ -331,9 +331,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -366,9 +366,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BillingAccount: A billing account in the Google Cloud Console @@ -415,9 +415,9 @@ type BillingAccount struct { NullFields []string `json:"-"` } -func (s *BillingAccount) MarshalJSON() ([]byte, error) { +func (s BillingAccount) MarshalJSON() ([]byte, error) { type NoMethod BillingAccount - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates `members`, or principals, with a `role`. @@ -514,9 +514,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Category: Represents the category hierarchy of a SKU. @@ -545,9 +545,9 @@ type Category struct { NullFields []string `json:"-"` } -func (s *Category) MarshalJSON() ([]byte, error) { +func (s Category) MarshalJSON() ([]byte, error) { type NoMethod Category - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents a textual expression in the Common Expression Language @@ -593,9 +593,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GeoTaxonomy: Encapsulates the geographic taxonomy data for a sku. @@ -626,9 +626,9 @@ type GeoTaxonomy struct { NullFields []string `json:"-"` } -func (s *GeoTaxonomy) MarshalJSON() ([]byte, error) { +func (s GeoTaxonomy) MarshalJSON() ([]byte, error) { type NoMethod GeoTaxonomy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListBillingAccountsResponse: Response message for `ListBillingAccounts`. @@ -655,9 +655,9 @@ type ListBillingAccountsResponse struct { NullFields []string `json:"-"` } -func (s *ListBillingAccountsResponse) MarshalJSON() ([]byte, error) { +func (s ListBillingAccountsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListBillingAccountsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListProjectBillingInfoResponse: Request message for @@ -687,9 +687,9 @@ type ListProjectBillingInfoResponse struct { NullFields []string `json:"-"` } -func (s *ListProjectBillingInfoResponse) MarshalJSON() ([]byte, error) { +func (s ListProjectBillingInfoResponse) MarshalJSON() ([]byte, error) { type NoMethod ListProjectBillingInfoResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListServicesResponse: Response message for `ListServices`. @@ -716,9 +716,9 @@ type ListServicesResponse struct { NullFields []string `json:"-"` } -func (s *ListServicesResponse) MarshalJSON() ([]byte, error) { +func (s ListServicesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListServicesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListSkusResponse: Response message for `ListSkus`. @@ -745,9 +745,9 @@ type ListSkusResponse struct { NullFields []string `json:"-"` } -func (s *ListSkusResponse) MarshalJSON() ([]byte, error) { +func (s ListSkusResponse) MarshalJSON() ([]byte, error) { type NoMethod ListSkusResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Money: Represents an amount of money with its currency type. @@ -776,9 +776,9 @@ type Money struct { NullFields []string `json:"-"` } -func (s *Money) MarshalJSON() ([]byte, error) { +func (s Money) MarshalJSON() ([]byte, error) { type NoMethod Money - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MoveBillingAccountRequest: Request message for `MoveBillingAccount` RPC. @@ -800,9 +800,9 @@ type MoveBillingAccountRequest struct { NullFields []string `json:"-"` } -func (s *MoveBillingAccountRequest) MarshalJSON() ([]byte, error) { +func (s MoveBillingAccountRequest) MarshalJSON() ([]byte, error) { type NoMethod MoveBillingAccountRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -892,9 +892,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PricingExpression: Expresses a mathematical pricing formula. For Example:- @@ -945,9 +945,9 @@ type PricingExpression struct { NullFields []string `json:"-"` } -func (s *PricingExpression) MarshalJSON() ([]byte, error) { +func (s PricingExpression) MarshalJSON() ([]byte, error) { type NoMethod PricingExpression - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *PricingExpression) UnmarshalJSON(data []byte) error { @@ -1004,9 +1004,9 @@ type PricingInfo struct { NullFields []string `json:"-"` } -func (s *PricingInfo) MarshalJSON() ([]byte, error) { +func (s PricingInfo) MarshalJSON() ([]byte, error) { type NoMethod PricingInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *PricingInfo) UnmarshalJSON(data []byte) error { @@ -1060,9 +1060,9 @@ type ProjectBillingInfo struct { NullFields []string `json:"-"` } -func (s *ProjectBillingInfo) MarshalJSON() ([]byte, error) { +func (s ProjectBillingInfo) MarshalJSON() ([]byte, error) { type NoMethod ProjectBillingInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Service: Encapsulates a single service in Google Cloud Platform. @@ -1089,9 +1089,9 @@ type Service struct { NullFields []string `json:"-"` } -func (s *Service) MarshalJSON() ([]byte, error) { +func (s Service) MarshalJSON() ([]byte, error) { type NoMethod Service - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -1118,9 +1118,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Sku: Encapsulates a single SKU in Google Cloud @@ -1160,9 +1160,9 @@ type Sku struct { NullFields []string `json:"-"` } -func (s *Sku) MarshalJSON() ([]byte, error) { +func (s Sku) MarshalJSON() ([]byte, error) { type NoMethod Sku - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` method. @@ -1185,9 +1185,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -1212,9 +1212,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TierRate: The price rate indicating starting usage and its corresponding @@ -1240,9 +1240,9 @@ type TierRate struct { NullFields []string `json:"-"` } -func (s *TierRate) MarshalJSON() ([]byte, error) { +func (s TierRate) MarshalJSON() ([]byte, error) { type NoMethod TierRate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *TierRate) UnmarshalJSON(data []byte) error { diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json b/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json index b48f0071934..46b1e8beb5c 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json @@ -453,7 +453,7 @@ "type": "string" }, "parent": { - "description": "Required. Name of the parent project. For example: projects/{$project_number} or projects/{$project_id}", + "description": "Name of the parent project. For example: projects/{$project_number} or projects/{$project_id}", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -1384,7 +1384,7 @@ "type": "string" }, "parent": { - "description": "Required. Name of the parent project. For example: projects/{$project_number} or projects/{$project_id}", + "description": "Name of the parent project. For example: projects/{$project_number} or projects/{$project_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -2346,7 +2346,7 @@ } } }, - "revision": "20240609", + "revision": "20240923", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "ApprovalConfig": { @@ -3017,11 +3017,13 @@ "description": "Optional. Option to specify how default logs buckets are setup.", "enum": [ "DEFAULT_LOGS_BUCKET_BEHAVIOR_UNSPECIFIED", - "REGIONAL_USER_OWNED_BUCKET" + "REGIONAL_USER_OWNED_BUCKET", + "LEGACY_BUCKET" ], "enumDescriptions": [ "Unspecified.", - "Bucket is located in user-owned project in the same region as the build. The builder service account must have access to create and write to Cloud Storage buckets in the build project." + "Bucket is located in user-owned project in the same region as the build. The builder service account must have access to create and write to Cloud Storage buckets in the build project.", + "Bucket is located in a Google-owned project and is not regionalized." ], "type": "string" }, @@ -3428,7 +3430,7 @@ "type": "string" }, "serviceAccount": { - "description": "The service account used for all user-controlled operations including UpdateBuildTrigger, RunBuildTrigger, CreateBuild, and CancelBuild. If no service account is set, then the standard Cloud Build service account ([PROJECT_NUM]@system.gserviceaccount.com) will be used instead. Format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_ID_OR_EMAIL}`", + "description": "The service account used for all user-controlled operations including UpdateBuildTrigger, RunBuildTrigger, CreateBuild, and CancelBuild. If no service account is set and the legacy Cloud Build service account ([PROJECT_NUM]@cloudbuild.gserviceaccount.com) is the default for the project then it will be used instead. Format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_ID_OR_EMAIL}`", "type": "string" }, "sourceToBuild": { @@ -3810,26 +3812,6 @@ }, "type": "object" }, - "GCSLocation": { - "description": "Represents a storage location in Cloud Storage", - "id": "GCSLocation", - "properties": { - "bucket": { - "description": "Cloud Storage bucket. See https://cloud.google.com/storage/docs/naming#requirements", - "type": "string" - }, - "generation": { - "description": "Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used.", - "format": "int64", - "type": "string" - }, - "object": { - "description": "Cloud Storage object. See https://cloud.google.com/storage/docs/naming#objectnames", - "type": "string" - } - }, - "type": "object" - }, "GitConfig": { "description": "GitConfig is a configuration for git operations.", "id": "GitConfig", @@ -4315,12 +4297,8 @@ "id": "HttpConfig", "properties": { "proxySecretVersionName": { - "description": "SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port].", + "description": "SecretVersion resource of the HTTP proxy URL. The Service Account used in the build (either the default Service Account or user-specified Service Account) should have `secretmanager.versions.access` permissions on this secret. The proxy URL should be in format `protocol://@]proxyhost[:port]`.", "type": "string" - }, - "proxySslCaInfo": { - "$ref": "GCSLocation", - "description": "Optional. Cloud Storage object storing the certificate to use with the HTTP proxy." } }, "type": "object" diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go index c55960959d7..845c0e05df2 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go @@ -411,9 +411,9 @@ type ApprovalConfig struct { NullFields []string `json:"-"` } -func (s *ApprovalConfig) MarshalJSON() ([]byte, error) { +func (s ApprovalConfig) MarshalJSON() ([]byte, error) { type NoMethod ApprovalConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ApprovalResult: ApprovalResult describes the decision and associated @@ -451,9 +451,9 @@ type ApprovalResult struct { NullFields []string `json:"-"` } -func (s *ApprovalResult) MarshalJSON() ([]byte, error) { +func (s ApprovalResult) MarshalJSON() ([]byte, error) { type NoMethod ApprovalResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ApproveBuildRequest: Request to approve or reject a pending build. @@ -473,9 +473,9 @@ type ApproveBuildRequest struct { NullFields []string `json:"-"` } -func (s *ApproveBuildRequest) MarshalJSON() ([]byte, error) { +func (s ApproveBuildRequest) MarshalJSON() ([]byte, error) { type NoMethod ApproveBuildRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ArtifactObjects: Files in the workspace to upload to Cloud Storage upon @@ -505,9 +505,9 @@ type ArtifactObjects struct { NullFields []string `json:"-"` } -func (s *ArtifactObjects) MarshalJSON() ([]byte, error) { +func (s ArtifactObjects) MarshalJSON() ([]byte, error) { type NoMethod ArtifactObjects - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ArtifactResult: An artifact that was uploaded during a build. This is a @@ -532,9 +532,9 @@ type ArtifactResult struct { NullFields []string `json:"-"` } -func (s *ArtifactResult) MarshalJSON() ([]byte, error) { +func (s ArtifactResult) MarshalJSON() ([]byte, error) { type NoMethod ArtifactResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Artifacts: Artifacts produced by a build that should be uploaded upon @@ -584,9 +584,9 @@ type Artifacts struct { NullFields []string `json:"-"` } -func (s *Artifacts) MarshalJSON() ([]byte, error) { +func (s Artifacts) MarshalJSON() ([]byte, error) { type NoMethod Artifacts - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchCreateBitbucketServerConnectedRepositoriesRequest: RPC request object @@ -607,9 +607,9 @@ type BatchCreateBitbucketServerConnectedRepositoriesRequest struct { NullFields []string `json:"-"` } -func (s *BatchCreateBitbucketServerConnectedRepositoriesRequest) MarshalJSON() ([]byte, error) { +func (s BatchCreateBitbucketServerConnectedRepositoriesRequest) MarshalJSON() ([]byte, error) { type NoMethod BatchCreateBitbucketServerConnectedRepositoriesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchCreateBitbucketServerConnectedRepositoriesResponse: Response of @@ -634,9 +634,9 @@ type BatchCreateBitbucketServerConnectedRepositoriesResponse struct { NullFields []string `json:"-"` } -func (s *BatchCreateBitbucketServerConnectedRepositoriesResponse) MarshalJSON() ([]byte, error) { +func (s BatchCreateBitbucketServerConnectedRepositoriesResponse) MarshalJSON() ([]byte, error) { type NoMethod BatchCreateBitbucketServerConnectedRepositoriesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchCreateBitbucketServerConnectedRepositoriesResponseMetadata: Metadata @@ -663,9 +663,9 @@ type BatchCreateBitbucketServerConnectedRepositoriesResponseMetadata struct { NullFields []string `json:"-"` } -func (s *BatchCreateBitbucketServerConnectedRepositoriesResponseMetadata) MarshalJSON() ([]byte, error) { +func (s BatchCreateBitbucketServerConnectedRepositoriesResponseMetadata) MarshalJSON() ([]byte, error) { type NoMethod BatchCreateBitbucketServerConnectedRepositoriesResponseMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchCreateGitLabConnectedRepositoriesRequest: RPC request object accepted @@ -686,9 +686,9 @@ type BatchCreateGitLabConnectedRepositoriesRequest struct { NullFields []string `json:"-"` } -func (s *BatchCreateGitLabConnectedRepositoriesRequest) MarshalJSON() ([]byte, error) { +func (s BatchCreateGitLabConnectedRepositoriesRequest) MarshalJSON() ([]byte, error) { type NoMethod BatchCreateGitLabConnectedRepositoriesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchCreateGitLabConnectedRepositoriesResponse: Response of @@ -710,9 +710,9 @@ type BatchCreateGitLabConnectedRepositoriesResponse struct { NullFields []string `json:"-"` } -func (s *BatchCreateGitLabConnectedRepositoriesResponse) MarshalJSON() ([]byte, error) { +func (s BatchCreateGitLabConnectedRepositoriesResponse) MarshalJSON() ([]byte, error) { type NoMethod BatchCreateGitLabConnectedRepositoriesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchCreateGitLabConnectedRepositoriesResponseMetadata: Metadata for @@ -738,9 +738,9 @@ type BatchCreateGitLabConnectedRepositoriesResponseMetadata struct { NullFields []string `json:"-"` } -func (s *BatchCreateGitLabConnectedRepositoriesResponseMetadata) MarshalJSON() ([]byte, error) { +func (s BatchCreateGitLabConnectedRepositoriesResponseMetadata) MarshalJSON() ([]byte, error) { type NoMethod BatchCreateGitLabConnectedRepositoriesResponseMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BitbucketServerConfig: BitbucketServerConfig represents the configuration @@ -806,9 +806,9 @@ type BitbucketServerConfig struct { NullFields []string `json:"-"` } -func (s *BitbucketServerConfig) MarshalJSON() ([]byte, error) { +func (s BitbucketServerConfig) MarshalJSON() ([]byte, error) { type NoMethod BitbucketServerConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BitbucketServerConnectedRepository: / BitbucketServerConnectedRepository @@ -835,9 +835,9 @@ type BitbucketServerConnectedRepository struct { NullFields []string `json:"-"` } -func (s *BitbucketServerConnectedRepository) MarshalJSON() ([]byte, error) { +func (s BitbucketServerConnectedRepository) MarshalJSON() ([]byte, error) { type NoMethod BitbucketServerConnectedRepository - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BitbucketServerRepository: BitbucketServerRepository represents a repository @@ -866,9 +866,9 @@ type BitbucketServerRepository struct { NullFields []string `json:"-"` } -func (s *BitbucketServerRepository) MarshalJSON() ([]byte, error) { +func (s BitbucketServerRepository) MarshalJSON() ([]byte, error) { type NoMethod BitbucketServerRepository - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BitbucketServerRepositoryId: BitbucketServerRepositoryId identifies a @@ -895,9 +895,9 @@ type BitbucketServerRepositoryId struct { NullFields []string `json:"-"` } -func (s *BitbucketServerRepositoryId) MarshalJSON() ([]byte, error) { +func (s BitbucketServerRepositoryId) MarshalJSON() ([]byte, error) { type NoMethod BitbucketServerRepositoryId - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BitbucketServerSecrets: BitbucketServerSecrets represents the secrets in @@ -927,9 +927,9 @@ type BitbucketServerSecrets struct { NullFields []string `json:"-"` } -func (s *BitbucketServerSecrets) MarshalJSON() ([]byte, error) { +func (s BitbucketServerSecrets) MarshalJSON() ([]byte, error) { type NoMethod BitbucketServerSecrets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BitbucketServerTriggerConfig: BitbucketServerTriggerConfig describes the @@ -969,9 +969,9 @@ type BitbucketServerTriggerConfig struct { NullFields []string `json:"-"` } -func (s *BitbucketServerTriggerConfig) MarshalJSON() ([]byte, error) { +func (s BitbucketServerTriggerConfig) MarshalJSON() ([]byte, error) { type NoMethod BitbucketServerTriggerConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Build: A build resource in the Cloud Build API. At a high level, a `Build` @@ -1111,9 +1111,9 @@ type Build struct { NullFields []string `json:"-"` } -func (s *Build) MarshalJSON() ([]byte, error) { +func (s Build) MarshalJSON() ([]byte, error) { type NoMethod Build - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BuildApproval: BuildApproval describes a build's approval configuration, @@ -1145,9 +1145,9 @@ type BuildApproval struct { NullFields []string `json:"-"` } -func (s *BuildApproval) MarshalJSON() ([]byte, error) { +func (s BuildApproval) MarshalJSON() ([]byte, error) { type NoMethod BuildApproval - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BuildOperationMetadata: Metadata for build operations. @@ -1167,9 +1167,9 @@ type BuildOperationMetadata struct { NullFields []string `json:"-"` } -func (s *BuildOperationMetadata) MarshalJSON() ([]byte, error) { +func (s BuildOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod BuildOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BuildOptions: Optional arguments to enable specific features of builds. @@ -1185,6 +1185,8 @@ type BuildOptions struct { // "REGIONAL_USER_OWNED_BUCKET" - Bucket is located in user-owned project in // the same region as the build. The builder service account must have access // to create and write to Cloud Storage buckets in the build project. + // "LEGACY_BUCKET" - Bucket is located in a Google-owned project and is not + // regionalized. DefaultLogsBucketBehavior string `json:"defaultLogsBucketBehavior,omitempty"` // DiskSizeGb: Requested disk size for the VM that runs the build. Note that // this is *NOT* "disk free"; some of the space will be used by the operating @@ -1293,9 +1295,9 @@ type BuildOptions struct { NullFields []string `json:"-"` } -func (s *BuildOptions) MarshalJSON() ([]byte, error) { +func (s BuildOptions) MarshalJSON() ([]byte, error) { type NoMethod BuildOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BuildStep: A step in the build pipeline. @@ -1413,9 +1415,9 @@ type BuildStep struct { NullFields []string `json:"-"` } -func (s *BuildStep) MarshalJSON() ([]byte, error) { +func (s BuildStep) MarshalJSON() ([]byte, error) { type NoMethod BuildStep - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BuildTrigger: Configuration for an automated build in response to source @@ -1510,8 +1512,9 @@ type BuildTrigger struct { ResourceName string `json:"resourceName,omitempty"` // ServiceAccount: The service account used for all user-controlled operations // including UpdateBuildTrigger, RunBuildTrigger, CreateBuild, and CancelBuild. - // If no service account is set, then the standard Cloud Build service account - // ([PROJECT_NUM]@system.gserviceaccount.com) will be used instead. Format: + // If no service account is set and the legacy Cloud Build service account + // ([PROJECT_NUM]@cloudbuild.gserviceaccount.com) is the default for the + // project then it will be used instead. Format: // `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_ID_OR_EMAIL}` ServiceAccount string `json:"serviceAccount,omitempty"` // SourceToBuild: The repo and ref of the repository from which to build. This @@ -1549,9 +1552,9 @@ type BuildTrigger struct { NullFields []string `json:"-"` } -func (s *BuildTrigger) MarshalJSON() ([]byte, error) { +func (s BuildTrigger) MarshalJSON() ([]byte, error) { type NoMethod BuildTrigger - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BuiltImage: An image built by the pipeline. @@ -1577,9 +1580,9 @@ type BuiltImage struct { NullFields []string `json:"-"` } -func (s *BuiltImage) MarshalJSON() ([]byte, error) { +func (s BuiltImage) MarshalJSON() ([]byte, error) { type NoMethod BuiltImage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CancelBuildRequest: Request to cancel an ongoing build. @@ -1604,9 +1607,9 @@ type CancelBuildRequest struct { NullFields []string `json:"-"` } -func (s *CancelBuildRequest) MarshalJSON() ([]byte, error) { +func (s CancelBuildRequest) MarshalJSON() ([]byte, error) { type NoMethod CancelBuildRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CancelOperationRequest: The request message for Operations.CancelOperation. @@ -1638,9 +1641,9 @@ type ConnectedRepository struct { NullFields []string `json:"-"` } -func (s *ConnectedRepository) MarshalJSON() ([]byte, error) { +func (s ConnectedRepository) MarshalJSON() ([]byte, error) { type NoMethod ConnectedRepository - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateBitbucketServerConfigOperationMetadata: Metadata for @@ -1667,9 +1670,9 @@ type CreateBitbucketServerConfigOperationMetadata struct { NullFields []string `json:"-"` } -func (s *CreateBitbucketServerConfigOperationMetadata) MarshalJSON() ([]byte, error) { +func (s CreateBitbucketServerConfigOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateBitbucketServerConfigOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateBitbucketServerConnectedRepositoryRequest: Request to connect a @@ -1697,9 +1700,9 @@ type CreateBitbucketServerConnectedRepositoryRequest struct { NullFields []string `json:"-"` } -func (s *CreateBitbucketServerConnectedRepositoryRequest) MarshalJSON() ([]byte, error) { +func (s CreateBitbucketServerConnectedRepositoryRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateBitbucketServerConnectedRepositoryRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateGitHubEnterpriseConfigOperationMetadata: Metadata for @@ -1726,9 +1729,9 @@ type CreateGitHubEnterpriseConfigOperationMetadata struct { NullFields []string `json:"-"` } -func (s *CreateGitHubEnterpriseConfigOperationMetadata) MarshalJSON() ([]byte, error) { +func (s CreateGitHubEnterpriseConfigOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateGitHubEnterpriseConfigOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateGitLabConfigOperationMetadata: Metadata for `CreateGitLabConfig` @@ -1754,9 +1757,9 @@ type CreateGitLabConfigOperationMetadata struct { NullFields []string `json:"-"` } -func (s *CreateGitLabConfigOperationMetadata) MarshalJSON() ([]byte, error) { +func (s CreateGitLabConfigOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateGitLabConfigOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateGitLabConnectedRepositoryRequest: Request to connect a repository from @@ -1781,9 +1784,9 @@ type CreateGitLabConnectedRepositoryRequest struct { NullFields []string `json:"-"` } -func (s *CreateGitLabConnectedRepositoryRequest) MarshalJSON() ([]byte, error) { +func (s CreateGitLabConnectedRepositoryRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateGitLabConnectedRepositoryRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateWorkerPoolOperationMetadata: Metadata for the `CreateWorkerPool` @@ -1809,9 +1812,9 @@ type CreateWorkerPoolOperationMetadata struct { NullFields []string `json:"-"` } -func (s *CreateWorkerPoolOperationMetadata) MarshalJSON() ([]byte, error) { +func (s CreateWorkerPoolOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateWorkerPoolOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DefaultServiceAccount: The default service account used for `Builds`. @@ -1843,9 +1846,9 @@ type DefaultServiceAccount struct { NullFields []string `json:"-"` } -func (s *DefaultServiceAccount) MarshalJSON() ([]byte, error) { +func (s DefaultServiceAccount) MarshalJSON() ([]byte, error) { type NoMethod DefaultServiceAccount - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeleteBitbucketServerConfigOperationMetadata: Metadata for @@ -1872,9 +1875,9 @@ type DeleteBitbucketServerConfigOperationMetadata struct { NullFields []string `json:"-"` } -func (s *DeleteBitbucketServerConfigOperationMetadata) MarshalJSON() ([]byte, error) { +func (s DeleteBitbucketServerConfigOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod DeleteBitbucketServerConfigOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeleteGitHubEnterpriseConfigOperationMetadata: Metadata for @@ -1901,9 +1904,9 @@ type DeleteGitHubEnterpriseConfigOperationMetadata struct { NullFields []string `json:"-"` } -func (s *DeleteGitHubEnterpriseConfigOperationMetadata) MarshalJSON() ([]byte, error) { +func (s DeleteGitHubEnterpriseConfigOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod DeleteGitHubEnterpriseConfigOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeleteGitLabConfigOperationMetadata: Metadata for `DeleteGitLabConfig` @@ -1929,9 +1932,9 @@ type DeleteGitLabConfigOperationMetadata struct { NullFields []string `json:"-"` } -func (s *DeleteGitLabConfigOperationMetadata) MarshalJSON() ([]byte, error) { +func (s DeleteGitLabConfigOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod DeleteGitLabConfigOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeleteWorkerPoolOperationMetadata: Metadata for the `DeleteWorkerPool` @@ -1957,9 +1960,9 @@ type DeleteWorkerPoolOperationMetadata struct { NullFields []string `json:"-"` } -func (s *DeleteWorkerPoolOperationMetadata) MarshalJSON() ([]byte, error) { +func (s DeleteWorkerPoolOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod DeleteWorkerPoolOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeveloperConnectConfig: This config defines the location of a source through @@ -1987,9 +1990,9 @@ type DeveloperConnectConfig struct { NullFields []string `json:"-"` } -func (s *DeveloperConnectConfig) MarshalJSON() ([]byte, error) { +func (s DeveloperConnectConfig) MarshalJSON() ([]byte, error) { type NoMethod DeveloperConnectConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -2029,9 +2032,9 @@ type FailureInfo struct { NullFields []string `json:"-"` } -func (s *FailureInfo) MarshalJSON() ([]byte, error) { +func (s FailureInfo) MarshalJSON() ([]byte, error) { type NoMethod FailureInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FileHashes: Container message for hashes of byte content of files, used in @@ -2052,38 +2055,9 @@ type FileHashes struct { NullFields []string `json:"-"` } -func (s *FileHashes) MarshalJSON() ([]byte, error) { +func (s FileHashes) MarshalJSON() ([]byte, error) { type NoMethod FileHashes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GCSLocation: Represents a storage location in Cloud Storage -type GCSLocation struct { - // Bucket: Cloud Storage bucket. See - // https://cloud.google.com/storage/docs/naming#requirements - Bucket string `json:"bucket,omitempty"` - // Generation: Cloud Storage generation for the object. If the generation is - // omitted, the latest generation will be used. - Generation int64 `json:"generation,omitempty,string"` - // Object: Cloud Storage object. See - // https://cloud.google.com/storage/docs/naming#objectnames - Object string `json:"object,omitempty"` - // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bucket") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GCSLocation) MarshalJSON() ([]byte, error) { - type NoMethod GCSLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitConfig: GitConfig is a configuration for git operations. @@ -2103,9 +2077,9 @@ type GitConfig struct { NullFields []string `json:"-"` } -func (s *GitConfig) MarshalJSON() ([]byte, error) { +func (s GitConfig) MarshalJSON() ([]byte, error) { type NoMethod GitConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitFileSource: GitFileSource describes a file within a (possibly remote) @@ -2163,9 +2137,9 @@ type GitFileSource struct { NullFields []string `json:"-"` } -func (s *GitFileSource) MarshalJSON() ([]byte, error) { +func (s GitFileSource) MarshalJSON() ([]byte, error) { type NoMethod GitFileSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitHubEnterpriseConfig: GitHubEnterpriseConfig represents a configuration @@ -2217,9 +2191,9 @@ type GitHubEnterpriseConfig struct { NullFields []string `json:"-"` } -func (s *GitHubEnterpriseConfig) MarshalJSON() ([]byte, error) { +func (s GitHubEnterpriseConfig) MarshalJSON() ([]byte, error) { type NoMethod GitHubEnterpriseConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitHubEnterpriseSecrets: GitHubEnterpriseSecrets represents the names of all @@ -2260,9 +2234,9 @@ type GitHubEnterpriseSecrets struct { NullFields []string `json:"-"` } -func (s *GitHubEnterpriseSecrets) MarshalJSON() ([]byte, error) { +func (s GitHubEnterpriseSecrets) MarshalJSON() ([]byte, error) { type NoMethod GitHubEnterpriseSecrets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitHubEventsConfig: GitHubEventsConfig describes the configuration of a @@ -2299,9 +2273,9 @@ type GitHubEventsConfig struct { NullFields []string `json:"-"` } -func (s *GitHubEventsConfig) MarshalJSON() ([]byte, error) { +func (s GitHubEventsConfig) MarshalJSON() ([]byte, error) { type NoMethod GitHubEventsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitLabConfig: GitLabConfig represents the configuration for a GitLab @@ -2340,9 +2314,9 @@ type GitLabConfig struct { NullFields []string `json:"-"` } -func (s *GitLabConfig) MarshalJSON() ([]byte, error) { +func (s GitLabConfig) MarshalJSON() ([]byte, error) { type NoMethod GitLabConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitLabConnectedRepository: GitLabConnectedRepository represents a GitLab @@ -2368,9 +2342,9 @@ type GitLabConnectedRepository struct { NullFields []string `json:"-"` } -func (s *GitLabConnectedRepository) MarshalJSON() ([]byte, error) { +func (s GitLabConnectedRepository) MarshalJSON() ([]byte, error) { type NoMethod GitLabConnectedRepository - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitLabEnterpriseConfig: GitLabEnterpriseConfig represents the configuration @@ -2397,9 +2371,9 @@ type GitLabEnterpriseConfig struct { NullFields []string `json:"-"` } -func (s *GitLabEnterpriseConfig) MarshalJSON() ([]byte, error) { +func (s GitLabEnterpriseConfig) MarshalJSON() ([]byte, error) { type NoMethod GitLabEnterpriseConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitLabEventsConfig: GitLabEventsConfig describes the configuration of a @@ -2430,9 +2404,9 @@ type GitLabEventsConfig struct { NullFields []string `json:"-"` } -func (s *GitLabEventsConfig) MarshalJSON() ([]byte, error) { +func (s GitLabEventsConfig) MarshalJSON() ([]byte, error) { type NoMethod GitLabEventsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitLabRepository: Proto Representing a GitLabRepository @@ -2460,9 +2434,9 @@ type GitLabRepository struct { NullFields []string `json:"-"` } -func (s *GitLabRepository) MarshalJSON() ([]byte, error) { +func (s GitLabRepository) MarshalJSON() ([]byte, error) { type NoMethod GitLabRepository - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitLabRepositoryId: GitLabRepositoryId identifies a specific repository @@ -2488,9 +2462,9 @@ type GitLabRepositoryId struct { NullFields []string `json:"-"` } -func (s *GitLabRepositoryId) MarshalJSON() ([]byte, error) { +func (s GitLabRepositoryId) MarshalJSON() ([]byte, error) { type NoMethod GitLabRepositoryId - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitLabSecrets: GitLabSecrets represents the secrets in Secret Manager for a @@ -2522,9 +2496,9 @@ type GitLabSecrets struct { NullFields []string `json:"-"` } -func (s *GitLabSecrets) MarshalJSON() ([]byte, error) { +func (s GitLabSecrets) MarshalJSON() ([]byte, error) { type NoMethod GitLabSecrets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitRepoSource: GitRepoSource describes a repo and ref of a code repository. @@ -2573,9 +2547,9 @@ type GitRepoSource struct { NullFields []string `json:"-"` } -func (s *GitRepoSource) MarshalJSON() ([]byte, error) { +func (s GitRepoSource) MarshalJSON() ([]byte, error) { type NoMethod GitRepoSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GitSource: Location of the source in any accessible Git repository. @@ -2608,9 +2582,9 @@ type GitSource struct { NullFields []string `json:"-"` } -func (s *GitSource) MarshalJSON() ([]byte, error) { +func (s GitSource) MarshalJSON() ([]byte, error) { type NoMethod GitSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Hash: Container message for hash values. @@ -2638,9 +2612,9 @@ type Hash struct { NullFields []string `json:"-"` } -func (s *Hash) MarshalJSON() ([]byte, error) { +func (s Hash) MarshalJSON() ([]byte, error) { type NoMethod Hash - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpBody: Message that represents an arbitrary HTTP body. It should only be @@ -2682,19 +2656,19 @@ type HttpBody struct { NullFields []string `json:"-"` } -func (s *HttpBody) MarshalJSON() ([]byte, error) { +func (s HttpBody) MarshalJSON() ([]byte, error) { type NoMethod HttpBody - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpConfig: HttpConfig is a configuration for HTTP related git operations. type HttpConfig struct { // ProxySecretVersionName: SecretVersion resource of the HTTP proxy URL. The - // proxy URL should be in format protocol://@]proxyhost[:port]. + // Service Account used in the build (either the default Service Account or + // user-specified Service Account) should have `secretmanager.versions.access` + // permissions on this secret. The proxy URL should be in format + // `protocol://@]proxyhost[:port]`. ProxySecretVersionName string `json:"proxySecretVersionName,omitempty"` - // ProxySslCaInfo: Optional. Cloud Storage object storing the certificate to - // use with the HTTP proxy. - ProxySslCaInfo *GCSLocation `json:"proxySslCaInfo,omitempty"` // ForceSendFields is a list of field names (e.g. "ProxySecretVersionName") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -2708,9 +2682,9 @@ type HttpConfig struct { NullFields []string `json:"-"` } -func (s *HttpConfig) MarshalJSON() ([]byte, error) { +func (s HttpConfig) MarshalJSON() ([]byte, error) { type NoMethod HttpConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InlineSecret: Pairs a set of secret environment variables mapped to @@ -2738,9 +2712,9 @@ type InlineSecret struct { NullFields []string `json:"-"` } -func (s *InlineSecret) MarshalJSON() ([]byte, error) { +func (s InlineSecret) MarshalJSON() ([]byte, error) { type NoMethod InlineSecret - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListBitbucketServerConfigsResponse: RPC response object returned by @@ -2767,9 +2741,9 @@ type ListBitbucketServerConfigsResponse struct { NullFields []string `json:"-"` } -func (s *ListBitbucketServerConfigsResponse) MarshalJSON() ([]byte, error) { +func (s ListBitbucketServerConfigsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListBitbucketServerConfigsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListBitbucketServerRepositoriesResponse: RPC response object returned by the @@ -2796,9 +2770,9 @@ type ListBitbucketServerRepositoriesResponse struct { NullFields []string `json:"-"` } -func (s *ListBitbucketServerRepositoriesResponse) MarshalJSON() ([]byte, error) { +func (s ListBitbucketServerRepositoriesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListBitbucketServerRepositoriesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListBuildTriggersResponse: Response containing existing `BuildTriggers`. @@ -2824,9 +2798,9 @@ type ListBuildTriggersResponse struct { NullFields []string `json:"-"` } -func (s *ListBuildTriggersResponse) MarshalJSON() ([]byte, error) { +func (s ListBuildTriggersResponse) MarshalJSON() ([]byte, error) { type NoMethod ListBuildTriggersResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListBuildsResponse: Response including listed builds. @@ -2852,9 +2826,9 @@ type ListBuildsResponse struct { NullFields []string `json:"-"` } -func (s *ListBuildsResponse) MarshalJSON() ([]byte, error) { +func (s ListBuildsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListBuildsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListGitLabConfigsResponse: RPC response object returned by ListGitLabConfigs @@ -2881,9 +2855,9 @@ type ListGitLabConfigsResponse struct { NullFields []string `json:"-"` } -func (s *ListGitLabConfigsResponse) MarshalJSON() ([]byte, error) { +func (s ListGitLabConfigsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListGitLabConfigsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListGitLabRepositoriesResponse: RPC response object returned by the @@ -2910,9 +2884,9 @@ type ListGitLabRepositoriesResponse struct { NullFields []string `json:"-"` } -func (s *ListGitLabRepositoriesResponse) MarshalJSON() ([]byte, error) { +func (s ListGitLabRepositoriesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListGitLabRepositoriesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListGithubEnterpriseConfigsResponse: RPC response object returned by @@ -2936,9 +2910,9 @@ type ListGithubEnterpriseConfigsResponse struct { NullFields []string `json:"-"` } -func (s *ListGithubEnterpriseConfigsResponse) MarshalJSON() ([]byte, error) { +func (s ListGithubEnterpriseConfigsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListGithubEnterpriseConfigsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListWorkerPoolsResponse: Response containing existing `WorkerPools`. @@ -2965,9 +2939,9 @@ type ListWorkerPoolsResponse struct { NullFields []string `json:"-"` } -func (s *ListWorkerPoolsResponse) MarshalJSON() ([]byte, error) { +func (s ListWorkerPoolsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListWorkerPoolsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MavenArtifact: A Maven artifact to upload to Artifact Registry upon @@ -3005,9 +2979,9 @@ type MavenArtifact struct { NullFields []string `json:"-"` } -func (s *MavenArtifact) MarshalJSON() ([]byte, error) { +func (s MavenArtifact) MarshalJSON() ([]byte, error) { type NoMethod MavenArtifact - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkConfig: Defines the network configuration for the pool. @@ -3051,9 +3025,9 @@ type NetworkConfig struct { NullFields []string `json:"-"` } -func (s *NetworkConfig) MarshalJSON() ([]byte, error) { +func (s NetworkConfig) MarshalJSON() ([]byte, error) { type NoMethod NetworkConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NpmPackage: Npm package to upload to Artifact Registry upon successful @@ -3079,9 +3053,9 @@ type NpmPackage struct { NullFields []string `json:"-"` } -func (s *NpmPackage) MarshalJSON() ([]byte, error) { +func (s NpmPackage) MarshalJSON() ([]byte, error) { type NoMethod NpmPackage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -3126,9 +3100,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadata: Represents the metadata of the long-running operation. @@ -3164,9 +3138,9 @@ type OperationMetadata struct { NullFields []string `json:"-"` } -func (s *OperationMetadata) MarshalJSON() ([]byte, error) { +func (s OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PoolOption: Details about how a build should be executed on a `WorkerPool`. @@ -3191,9 +3165,9 @@ type PoolOption struct { NullFields []string `json:"-"` } -func (s *PoolOption) MarshalJSON() ([]byte, error) { +func (s PoolOption) MarshalJSON() ([]byte, error) { type NoMethod PoolOption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PrivatePoolV1Config: Configuration for a V1 `PrivatePool`. @@ -3215,9 +3189,9 @@ type PrivatePoolV1Config struct { NullFields []string `json:"-"` } -func (s *PrivatePoolV1Config) MarshalJSON() ([]byte, error) { +func (s PrivatePoolV1Config) MarshalJSON() ([]byte, error) { type NoMethod PrivatePoolV1Config - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ProcessAppManifestCallbackOperationMetadata: Metadata for @@ -3244,9 +3218,9 @@ type ProcessAppManifestCallbackOperationMetadata struct { NullFields []string `json:"-"` } -func (s *ProcessAppManifestCallbackOperationMetadata) MarshalJSON() ([]byte, error) { +func (s ProcessAppManifestCallbackOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod ProcessAppManifestCallbackOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PubsubConfig: PubsubConfig describes the configuration of a trigger that @@ -3284,9 +3258,9 @@ type PubsubConfig struct { NullFields []string `json:"-"` } -func (s *PubsubConfig) MarshalJSON() ([]byte, error) { +func (s PubsubConfig) MarshalJSON() ([]byte, error) { type NoMethod PubsubConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PullRequestFilter: PullRequestFilter contains filter properties for matching @@ -3335,9 +3309,9 @@ type PullRequestFilter struct { NullFields []string `json:"-"` } -func (s *PullRequestFilter) MarshalJSON() ([]byte, error) { +func (s PullRequestFilter) MarshalJSON() ([]byte, error) { type NoMethod PullRequestFilter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PushFilter: Push contains filter properties for matching GitHub git pushes. @@ -3366,9 +3340,9 @@ type PushFilter struct { NullFields []string `json:"-"` } -func (s *PushFilter) MarshalJSON() ([]byte, error) { +func (s PushFilter) MarshalJSON() ([]byte, error) { type NoMethod PushFilter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PythonPackage: Python package to upload to Artifact Registry upon successful @@ -3396,9 +3370,9 @@ type PythonPackage struct { NullFields []string `json:"-"` } -func (s *PythonPackage) MarshalJSON() ([]byte, error) { +func (s PythonPackage) MarshalJSON() ([]byte, error) { type NoMethod PythonPackage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReceiveTriggerWebhookResponse: ReceiveTriggerWebhookResponse [Experimental] @@ -3426,9 +3400,9 @@ type RemoveBitbucketServerConnectedRepositoryRequest struct { NullFields []string `json:"-"` } -func (s *RemoveBitbucketServerConnectedRepositoryRequest) MarshalJSON() ([]byte, error) { +func (s RemoveBitbucketServerConnectedRepositoryRequest) MarshalJSON() ([]byte, error) { type NoMethod RemoveBitbucketServerConnectedRepositoryRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RemoveGitLabConnectedRepositoryRequest: RPC request object accepted by @@ -3449,9 +3423,9 @@ type RemoveGitLabConnectedRepositoryRequest struct { NullFields []string `json:"-"` } -func (s *RemoveGitLabConnectedRepositoryRequest) MarshalJSON() ([]byte, error) { +func (s RemoveGitLabConnectedRepositoryRequest) MarshalJSON() ([]byte, error) { type NoMethod RemoveGitLabConnectedRepositoryRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RepoSource: Location of the source in a Google Cloud Source Repository. @@ -3494,9 +3468,9 @@ type RepoSource struct { NullFields []string `json:"-"` } -func (s *RepoSource) MarshalJSON() ([]byte, error) { +func (s RepoSource) MarshalJSON() ([]byte, error) { type NoMethod RepoSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RepositoryEventConfig: The configuration of a trigger that creates a build @@ -3533,9 +3507,9 @@ type RepositoryEventConfig struct { NullFields []string `json:"-"` } -func (s *RepositoryEventConfig) MarshalJSON() ([]byte, error) { +func (s RepositoryEventConfig) MarshalJSON() ([]byte, error) { type NoMethod RepositoryEventConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Results: Artifacts created by the build pipeline. @@ -3583,9 +3557,9 @@ type Results struct { NullFields []string `json:"-"` } -func (s *Results) MarshalJSON() ([]byte, error) { +func (s Results) MarshalJSON() ([]byte, error) { type NoMethod Results - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RetryBuildRequest: Specifies a build to retry. @@ -3610,9 +3584,9 @@ type RetryBuildRequest struct { NullFields []string `json:"-"` } -func (s *RetryBuildRequest) MarshalJSON() ([]byte, error) { +func (s RetryBuildRequest) MarshalJSON() ([]byte, error) { type NoMethod RetryBuildRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RunBuildTriggerRequest: Specifies a build trigger to run and the source to @@ -3638,9 +3612,9 @@ type RunBuildTriggerRequest struct { NullFields []string `json:"-"` } -func (s *RunBuildTriggerRequest) MarshalJSON() ([]byte, error) { +func (s RunBuildTriggerRequest) MarshalJSON() ([]byte, error) { type NoMethod RunBuildTriggerRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Secret: Pairs a set of secret environment variables containing encrypted @@ -3670,9 +3644,9 @@ type Secret struct { NullFields []string `json:"-"` } -func (s *Secret) MarshalJSON() ([]byte, error) { +func (s Secret) MarshalJSON() ([]byte, error) { type NoMethod Secret - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecretManagerSecret: Pairs a secret environment variable with a @@ -3698,9 +3672,9 @@ type SecretManagerSecret struct { NullFields []string `json:"-"` } -func (s *SecretManagerSecret) MarshalJSON() ([]byte, error) { +func (s SecretManagerSecret) MarshalJSON() ([]byte, error) { type NoMethod SecretManagerSecret - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Secrets: Secrets and secret environment variables. @@ -3724,9 +3698,9 @@ type Secrets struct { NullFields []string `json:"-"` } -func (s *Secrets) MarshalJSON() ([]byte, error) { +func (s Secrets) MarshalJSON() ([]byte, error) { type NoMethod Secrets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceDirectoryConfig: ServiceDirectoryConfig represents Service Directory @@ -3749,9 +3723,9 @@ type ServiceDirectoryConfig struct { NullFields []string `json:"-"` } -func (s *ServiceDirectoryConfig) MarshalJSON() ([]byte, error) { +func (s ServiceDirectoryConfig) MarshalJSON() ([]byte, error) { type NoMethod ServiceDirectoryConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Source: Location of the source in a supported storage service. @@ -3787,9 +3761,9 @@ type Source struct { NullFields []string `json:"-"` } -func (s *Source) MarshalJSON() ([]byte, error) { +func (s Source) MarshalJSON() ([]byte, error) { type NoMethod Source - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceProvenance: Provenance of the source. Ways to find the original @@ -3832,9 +3806,9 @@ type SourceProvenance struct { NullFields []string `json:"-"` } -func (s *SourceProvenance) MarshalJSON() ([]byte, error) { +func (s SourceProvenance) MarshalJSON() ([]byte, error) { type NoMethod SourceProvenance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -3866,9 +3840,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StorageSource: Location of the source in an archive file in Cloud Storage. @@ -3906,9 +3880,9 @@ type StorageSource struct { NullFields []string `json:"-"` } -func (s *StorageSource) MarshalJSON() ([]byte, error) { +func (s StorageSource) MarshalJSON() ([]byte, error) { type NoMethod StorageSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StorageSourceManifest: Location of the source manifest in Cloud Storage. @@ -3938,9 +3912,9 @@ type StorageSourceManifest struct { NullFields []string `json:"-"` } -func (s *StorageSourceManifest) MarshalJSON() ([]byte, error) { +func (s StorageSourceManifest) MarshalJSON() ([]byte, error) { type NoMethod StorageSourceManifest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TimeSpan: Start and end times for a build execution phase. @@ -3962,9 +3936,9 @@ type TimeSpan struct { NullFields []string `json:"-"` } -func (s *TimeSpan) MarshalJSON() ([]byte, error) { +func (s TimeSpan) MarshalJSON() ([]byte, error) { type NoMethod TimeSpan - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateBitbucketServerConfigOperationMetadata: Metadata for @@ -3991,9 +3965,9 @@ type UpdateBitbucketServerConfigOperationMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateBitbucketServerConfigOperationMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateBitbucketServerConfigOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateBitbucketServerConfigOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateGitHubEnterpriseConfigOperationMetadata: Metadata for @@ -4020,9 +3994,9 @@ type UpdateGitHubEnterpriseConfigOperationMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateGitHubEnterpriseConfigOperationMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateGitHubEnterpriseConfigOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateGitHubEnterpriseConfigOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateGitLabConfigOperationMetadata: Metadata for `UpdateGitLabConfig` @@ -4048,9 +4022,9 @@ type UpdateGitLabConfigOperationMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateGitLabConfigOperationMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateGitLabConfigOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateGitLabConfigOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateWorkerPoolOperationMetadata: Metadata for the `UpdateWorkerPool` @@ -4076,9 +4050,9 @@ type UpdateWorkerPoolOperationMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateWorkerPoolOperationMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateWorkerPoolOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateWorkerPoolOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UploadedMavenArtifact: A Maven artifact uploaded using the MavenArtifact @@ -4104,9 +4078,9 @@ type UploadedMavenArtifact struct { NullFields []string `json:"-"` } -func (s *UploadedMavenArtifact) MarshalJSON() ([]byte, error) { +func (s UploadedMavenArtifact) MarshalJSON() ([]byte, error) { type NoMethod UploadedMavenArtifact - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UploadedNpmPackage: An npm package uploaded to Artifact Registry using the @@ -4132,9 +4106,9 @@ type UploadedNpmPackage struct { NullFields []string `json:"-"` } -func (s *UploadedNpmPackage) MarshalJSON() ([]byte, error) { +func (s UploadedNpmPackage) MarshalJSON() ([]byte, error) { type NoMethod UploadedNpmPackage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UploadedPythonPackage: Artifact uploaded using the PythonPackage directive. @@ -4159,9 +4133,9 @@ type UploadedPythonPackage struct { NullFields []string `json:"-"` } -func (s *UploadedPythonPackage) MarshalJSON() ([]byte, error) { +func (s UploadedPythonPackage) MarshalJSON() ([]byte, error) { type NoMethod UploadedPythonPackage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Volume: Volume describes a Docker container volume which is mounted into @@ -4188,9 +4162,9 @@ type Volume struct { NullFields []string `json:"-"` } -func (s *Volume) MarshalJSON() ([]byte, error) { +func (s Volume) MarshalJSON() ([]byte, error) { type NoMethod Volume - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Warning: A non-fatal problem encountered during the execution of the build. @@ -4218,9 +4192,9 @@ type Warning struct { NullFields []string `json:"-"` } -func (s *Warning) MarshalJSON() ([]byte, error) { +func (s Warning) MarshalJSON() ([]byte, error) { type NoMethod Warning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WebhookConfig: WebhookConfig describes the configuration of a trigger that @@ -4249,9 +4223,9 @@ type WebhookConfig struct { NullFields []string `json:"-"` } -func (s *WebhookConfig) MarshalJSON() ([]byte, error) { +func (s WebhookConfig) MarshalJSON() ([]byte, error) { type NoMethod WebhookConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerConfig: Defines the configuration to be used for creating workers in @@ -4281,9 +4255,9 @@ type WorkerConfig struct { NullFields []string `json:"-"` } -func (s *WorkerConfig) MarshalJSON() ([]byte, error) { +func (s WorkerConfig) MarshalJSON() ([]byte, error) { type NoMethod WorkerConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerPool: Configuration for a `WorkerPool`. Cloud Build owns and maintains @@ -4353,9 +4327,9 @@ type WorkerPool struct { NullFields []string `json:"-"` } -func (s *WorkerPool) MarshalJSON() ([]byte, error) { +func (s WorkerPool) MarshalJSON() ([]byte, error) { type NoMethod WorkerPool - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GithubDotComWebhookReceiveCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json b/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json index 773024a1318..29d91e9f814 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json @@ -552,7 +552,7 @@ } } }, - "revision": "20240523", + "revision": "20240905", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AuditConfig": { @@ -691,7 +691,7 @@ "type": "string" }, "buildServiceAccount": { - "description": "Optional. A service account the user provides for use with Cloud Build.", + "description": "A service account the user provides for use with Cloud Build. The format of this field is `projects/{projectId}/serviceAccounts/{serviceAccountEmail}`.", "type": "string" }, "buildWorkerPool": { @@ -987,609 +987,6 @@ }, "type": "object" }, - "GoogleCloudFunctionsV2LocationMetadata": { - "description": "Extra GCF specific location information.", - "id": "GoogleCloudFunctionsV2LocationMetadata", - "properties": { - "environments": { - "description": "The Cloud Function environments this location supports.", - "items": { - "enum": [ - "ENVIRONMENT_UNSPECIFIED", - "GEN_1", - "GEN_2" - ], - "enumDescriptions": [ - "Unspecified", - "Gen 1", - "Gen 2" - ], - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudFunctionsV2OperationMetadata": { - "description": "Represents the metadata of the long-running operation.", - "id": "GoogleCloudFunctionsV2OperationMetadata", - "properties": { - "apiVersion": { - "description": "API version used to start the operation.", - "type": "string" - }, - "cancelRequested": { - "description": "Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have google.longrunning.Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", - "type": "boolean" - }, - "createTime": { - "description": "The time the operation was created.", - "format": "google-datetime", - "type": "string" - }, - "endTime": { - "description": "The time the operation finished running.", - "format": "google-datetime", - "type": "string" - }, - "operationType": { - "description": "The operation type.", - "enum": [ - "OPERATIONTYPE_UNSPECIFIED", - "CREATE_FUNCTION", - "UPDATE_FUNCTION", - "DELETE_FUNCTION", - "REDIRECT_FUNCTION_UPGRADE_TRAFFIC", - "ROLLBACK_FUNCTION_UPGRADE_TRAFFIC", - "SETUP_FUNCTION_UPGRADE_CONFIG", - "ABORT_FUNCTION_UPGRADE", - "COMMIT_FUNCTION_UPGRADE" - ], - "enumDescriptions": [ - "Unspecified", - "CreateFunction", - "UpdateFunction", - "DeleteFunction", - "RedirectFunctionUpgradeTraffic", - "RollbackFunctionUpgradeTraffic", - "SetupFunctionUpgradeConfig", - "AbortFunctionUpgrade", - "CommitFunctionUpgrade" - ], - "type": "string" - }, - "requestResource": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The original request that started the operation.", - "type": "object" - }, - "sourceToken": { - "description": "An identifier for Firebase function sources. Disclaimer: This field is only supported for Firebase function deployments.", - "type": "string" - }, - "stages": { - "description": "Mechanism for reporting in-progress stages", - "items": { - "$ref": "GoogleCloudFunctionsV2Stage" - }, - "type": "array" - }, - "statusDetail": { - "description": "Human-readable status of the operation, if any.", - "type": "string" - }, - "target": { - "description": "Server-defined resource path for the target of the operation.", - "type": "string" - }, - "verb": { - "description": "Name of the verb executed by the operation.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudFunctionsV2Stage": { - "description": "Each Stage of the deployment process", - "id": "GoogleCloudFunctionsV2Stage", - "properties": { - "message": { - "description": "Message describing the Stage", - "type": "string" - }, - "name": { - "description": "Name of the Stage. This will be unique for each Stage.", - "enum": [ - "NAME_UNSPECIFIED", - "ARTIFACT_REGISTRY", - "BUILD", - "SERVICE", - "TRIGGER", - "SERVICE_ROLLBACK", - "TRIGGER_ROLLBACK" - ], - "enumDescriptions": [ - "Not specified. Invalid name.", - "Artifact Regsitry Stage", - "Build Stage", - "Service Stage", - "Trigger Stage", - "Service Rollback Stage", - "Trigger Rollback Stage" - ], - "type": "string" - }, - "resource": { - "description": "Resource of the Stage", - "type": "string" - }, - "resourceUri": { - "description": "Link to the current Stage resource", - "type": "string" - }, - "state": { - "description": "Current state of the Stage", - "enum": [ - "STATE_UNSPECIFIED", - "NOT_STARTED", - "IN_PROGRESS", - "COMPLETE" - ], - "enumDescriptions": [ - "Not specified. Invalid state.", - "Stage has not started.", - "Stage is in progress.", - "Stage has completed." - ], - "type": "string" - }, - "stateMessages": { - "description": "State messages from the current Stage.", - "items": { - "$ref": "GoogleCloudFunctionsV2StateMessage" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudFunctionsV2StateMessage": { - "description": "Informational messages about the state of the Cloud Function or Operation.", - "id": "GoogleCloudFunctionsV2StateMessage", - "properties": { - "message": { - "description": "The message.", - "type": "string" - }, - "severity": { - "description": "Severity of the state message.", - "enum": [ - "SEVERITY_UNSPECIFIED", - "ERROR", - "WARNING", - "INFO" - ], - "enumDescriptions": [ - "Not specified. Invalid severity.", - "ERROR-level severity.", - "WARNING-level severity.", - "INFO-level severity." - ], - "type": "string" - }, - "type": { - "description": "One-word CamelCase type of the state message.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudFunctionsV2alphaLocationMetadata": { - "description": "Extra GCF specific location information.", - "id": "GoogleCloudFunctionsV2alphaLocationMetadata", - "properties": { - "environments": { - "description": "The Cloud Function environments this location supports.", - "items": { - "enum": [ - "ENVIRONMENT_UNSPECIFIED", - "GEN_1", - "GEN_2" - ], - "enumDescriptions": [ - "Unspecified", - "Gen 1", - "Gen 2" - ], - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudFunctionsV2alphaOperationMetadata": { - "description": "Represents the metadata of the long-running operation.", - "id": "GoogleCloudFunctionsV2alphaOperationMetadata", - "properties": { - "apiVersion": { - "description": "API version used to start the operation.", - "type": "string" - }, - "cancelRequested": { - "description": "Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have google.longrunning.Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", - "type": "boolean" - }, - "createTime": { - "description": "The time the operation was created.", - "format": "google-datetime", - "type": "string" - }, - "endTime": { - "description": "The time the operation finished running.", - "format": "google-datetime", - "type": "string" - }, - "operationType": { - "description": "The operation type.", - "enum": [ - "OPERATIONTYPE_UNSPECIFIED", - "CREATE_FUNCTION", - "UPDATE_FUNCTION", - "DELETE_FUNCTION", - "REDIRECT_FUNCTION_UPGRADE_TRAFFIC", - "ROLLBACK_FUNCTION_UPGRADE_TRAFFIC", - "SETUP_FUNCTION_UPGRADE_CONFIG", - "ABORT_FUNCTION_UPGRADE", - "COMMIT_FUNCTION_UPGRADE" - ], - "enumDescriptions": [ - "Unspecified", - "CreateFunction", - "UpdateFunction", - "DeleteFunction", - "RedirectFunctionUpgradeTraffic", - "RollbackFunctionUpgradeTraffic", - "SetupFunctionUpgradeConfig", - "AbortFunctionUpgrade", - "CommitFunctionUpgrade" - ], - "type": "string" - }, - "requestResource": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The original request that started the operation.", - "type": "object" - }, - "sourceToken": { - "description": "An identifier for Firebase function sources. Disclaimer: This field is only supported for Firebase function deployments.", - "type": "string" - }, - "stages": { - "description": "Mechanism for reporting in-progress stages", - "items": { - "$ref": "GoogleCloudFunctionsV2alphaStage" - }, - "type": "array" - }, - "statusDetail": { - "description": "Human-readable status of the operation, if any.", - "type": "string" - }, - "target": { - "description": "Server-defined resource path for the target of the operation.", - "type": "string" - }, - "verb": { - "description": "Name of the verb executed by the operation.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudFunctionsV2alphaStage": { - "description": "Each Stage of the deployment process", - "id": "GoogleCloudFunctionsV2alphaStage", - "properties": { - "message": { - "description": "Message describing the Stage", - "type": "string" - }, - "name": { - "description": "Name of the Stage. This will be unique for each Stage.", - "enum": [ - "NAME_UNSPECIFIED", - "ARTIFACT_REGISTRY", - "BUILD", - "SERVICE", - "TRIGGER", - "SERVICE_ROLLBACK", - "TRIGGER_ROLLBACK" - ], - "enumDescriptions": [ - "Not specified. Invalid name.", - "Artifact Regsitry Stage", - "Build Stage", - "Service Stage", - "Trigger Stage", - "Service Rollback Stage", - "Trigger Rollback Stage" - ], - "type": "string" - }, - "resource": { - "description": "Resource of the Stage", - "type": "string" - }, - "resourceUri": { - "description": "Link to the current Stage resource", - "type": "string" - }, - "state": { - "description": "Current state of the Stage", - "enum": [ - "STATE_UNSPECIFIED", - "NOT_STARTED", - "IN_PROGRESS", - "COMPLETE" - ], - "enumDescriptions": [ - "Not specified. Invalid state.", - "Stage has not started.", - "Stage is in progress.", - "Stage has completed." - ], - "type": "string" - }, - "stateMessages": { - "description": "State messages from the current Stage.", - "items": { - "$ref": "GoogleCloudFunctionsV2alphaStateMessage" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudFunctionsV2alphaStateMessage": { - "description": "Informational messages about the state of the Cloud Function or Operation.", - "id": "GoogleCloudFunctionsV2alphaStateMessage", - "properties": { - "message": { - "description": "The message.", - "type": "string" - }, - "severity": { - "description": "Severity of the state message.", - "enum": [ - "SEVERITY_UNSPECIFIED", - "ERROR", - "WARNING", - "INFO" - ], - "enumDescriptions": [ - "Not specified. Invalid severity.", - "ERROR-level severity.", - "WARNING-level severity.", - "INFO-level severity." - ], - "type": "string" - }, - "type": { - "description": "One-word CamelCase type of the state message.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudFunctionsV2betaLocationMetadata": { - "description": "Extra GCF specific location information.", - "id": "GoogleCloudFunctionsV2betaLocationMetadata", - "properties": { - "environments": { - "description": "The Cloud Function environments this location supports.", - "items": { - "enum": [ - "ENVIRONMENT_UNSPECIFIED", - "GEN_1", - "GEN_2" - ], - "enumDescriptions": [ - "Unspecified", - "Gen 1", - "Gen 2" - ], - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudFunctionsV2betaOperationMetadata": { - "description": "Represents the metadata of the long-running operation.", - "id": "GoogleCloudFunctionsV2betaOperationMetadata", - "properties": { - "apiVersion": { - "description": "API version used to start the operation.", - "type": "string" - }, - "cancelRequested": { - "description": "Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have google.longrunning.Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", - "type": "boolean" - }, - "createTime": { - "description": "The time the operation was created.", - "format": "google-datetime", - "type": "string" - }, - "endTime": { - "description": "The time the operation finished running.", - "format": "google-datetime", - "type": "string" - }, - "operationType": { - "description": "The operation type.", - "enum": [ - "OPERATIONTYPE_UNSPECIFIED", - "CREATE_FUNCTION", - "UPDATE_FUNCTION", - "DELETE_FUNCTION", - "REDIRECT_FUNCTION_UPGRADE_TRAFFIC", - "ROLLBACK_FUNCTION_UPGRADE_TRAFFIC", - "SETUP_FUNCTION_UPGRADE_CONFIG", - "ABORT_FUNCTION_UPGRADE", - "COMMIT_FUNCTION_UPGRADE" - ], - "enumDescriptions": [ - "Unspecified", - "CreateFunction", - "UpdateFunction", - "DeleteFunction", - "RedirectFunctionUpgradeTraffic", - "RollbackFunctionUpgradeTraffic", - "SetupFunctionUpgradeConfig", - "AbortFunctionUpgrade", - "CommitFunctionUpgrade" - ], - "type": "string" - }, - "requestResource": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The original request that started the operation.", - "type": "object" - }, - "sourceToken": { - "description": "An identifier for Firebase function sources. Disclaimer: This field is only supported for Firebase function deployments.", - "type": "string" - }, - "stages": { - "description": "Mechanism for reporting in-progress stages", - "items": { - "$ref": "GoogleCloudFunctionsV2betaStage" - }, - "type": "array" - }, - "statusDetail": { - "description": "Human-readable status of the operation, if any.", - "type": "string" - }, - "target": { - "description": "Server-defined resource path for the target of the operation.", - "type": "string" - }, - "verb": { - "description": "Name of the verb executed by the operation.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudFunctionsV2betaStage": { - "description": "Each Stage of the deployment process", - "id": "GoogleCloudFunctionsV2betaStage", - "properties": { - "message": { - "description": "Message describing the Stage", - "type": "string" - }, - "name": { - "description": "Name of the Stage. This will be unique for each Stage.", - "enum": [ - "NAME_UNSPECIFIED", - "ARTIFACT_REGISTRY", - "BUILD", - "SERVICE", - "TRIGGER", - "SERVICE_ROLLBACK", - "TRIGGER_ROLLBACK" - ], - "enumDescriptions": [ - "Not specified. Invalid name.", - "Artifact Regsitry Stage", - "Build Stage", - "Service Stage", - "Trigger Stage", - "Service Rollback Stage", - "Trigger Rollback Stage" - ], - "type": "string" - }, - "resource": { - "description": "Resource of the Stage", - "type": "string" - }, - "resourceUri": { - "description": "Link to the current Stage resource", - "type": "string" - }, - "state": { - "description": "Current state of the Stage", - "enum": [ - "STATE_UNSPECIFIED", - "NOT_STARTED", - "IN_PROGRESS", - "COMPLETE" - ], - "enumDescriptions": [ - "Not specified. Invalid state.", - "Stage has not started.", - "Stage is in progress.", - "Stage has completed." - ], - "type": "string" - }, - "stateMessages": { - "description": "State messages from the current Stage.", - "items": { - "$ref": "GoogleCloudFunctionsV2betaStateMessage" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudFunctionsV2betaStateMessage": { - "description": "Informational messages about the state of the Cloud Function or Operation.", - "id": "GoogleCloudFunctionsV2betaStateMessage", - "properties": { - "message": { - "description": "The message.", - "type": "string" - }, - "severity": { - "description": "Severity of the state message.", - "enum": [ - "SEVERITY_UNSPECIFIED", - "ERROR", - "WARNING", - "INFO" - ], - "enumDescriptions": [ - "Not specified. Invalid severity.", - "ERROR-level severity.", - "WARNING-level severity.", - "INFO-level severity." - ], - "type": "string" - }, - "type": { - "description": "One-word CamelCase type of the state message.", - "type": "string" - } - }, - "type": "object" - }, "HttpsTrigger": { "description": "Describes HttpsTrigger, could be used to connect web hooks to function.", "id": "HttpsTrigger", @@ -1942,7 +1339,7 @@ "type": "string" }, "url": { - "description": "The URL pointing to the hosted repository where the function is defined. There are supported Cloud Source Repository URLs in the following formats: To refer to a specific commit: `https://source.developers.google.com/projects/*/repos/*/revisions/*/paths/*` To refer to a moveable alias (branch): `https://source.developers.google.com/projects/*/repos/*/moveable-aliases/*/paths/*` In particular, to refer to HEAD use `master` moveable alias. To refer to a specific fixed alias (tag): `https://source.developers.google.com/projects/*/repos/*/fixed-aliases/*/paths/*` You may omit `paths/*` if you want to use the main directory.", + "description": "The URL pointing to the hosted repository where the function is defined. There are supported Cloud Source Repository URLs in the following formats: To refer to a specific commit: `https://source.developers.google.com/projects/*/repos/*/revisions/*/paths/*` To refer to a moveable alias (branch): `https://source.developers.google.com/projects/*/repos/*/moveable-aliases/*/paths/*` In particular, to refer to HEAD use `master` moveable alias. To refer to a specific fixed alias (tag): `https://source.developers.google.com/projects/*/repos/*/fixed-aliases/*/paths/*` You may omit `paths/*` if you want to use the main directory. The function response may add an empty `/paths/` to the URL.", "type": "string" } }, diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go index 19fad779aea..1cc377c3abc 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go @@ -235,9 +235,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -270,9 +270,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutomaticUpdatePolicy: Security patches are applied automatically to the @@ -374,9 +374,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CallFunctionRequest: Request for the `CallFunction` method. @@ -396,9 +396,9 @@ type CallFunctionRequest struct { NullFields []string `json:"-"` } -func (s *CallFunctionRequest) MarshalJSON() ([]byte, error) { +func (s CallFunctionRequest) MarshalJSON() ([]byte, error) { type NoMethod CallFunctionRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CallFunctionResponse: Response of `CallFunction` method. @@ -427,9 +427,9 @@ type CallFunctionResponse struct { NullFields []string `json:"-"` } -func (s *CallFunctionResponse) MarshalJSON() ([]byte, error) { +func (s CallFunctionResponse) MarshalJSON() ([]byte, error) { type NoMethod CallFunctionResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloudFunction: Describes a Cloud Function that contains user computation @@ -449,8 +449,9 @@ type CloudFunction struct { // BuildName: Output only. The Cloud Build Name of the function deployment. // `projects//locations//builds/`. BuildName string `json:"buildName,omitempty"` - // BuildServiceAccount: Optional. A service account the user provides for use - // with Cloud Build. + // BuildServiceAccount: A service account the user provides for use with Cloud + // Build. The format of this field is + // `projects/{projectId}/serviceAccounts/{serviceAccountEmail}`. BuildServiceAccount string `json:"buildServiceAccount,omitempty"` // BuildWorkerPool: Name of the Cloud Build Custom Worker Pool that should be // used to build the function. The format of this field is @@ -636,9 +637,9 @@ type CloudFunction struct { NullFields []string `json:"-"` } -func (s *CloudFunction) MarshalJSON() ([]byte, error) { +func (s CloudFunction) MarshalJSON() ([]byte, error) { type NoMethod CloudFunction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EventTrigger: Describes EventTrigger, used to request events be sent from @@ -687,9 +688,9 @@ type EventTrigger struct { NullFields []string `json:"-"` } -func (s *EventTrigger) MarshalJSON() ([]byte, error) { +func (s EventTrigger) MarshalJSON() ([]byte, error) { type NoMethod EventTrigger - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents a textual expression in the Common Expression Language @@ -735,9 +736,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FailurePolicy: Describes the policy in case of function's execution failure. @@ -758,9 +759,9 @@ type FailurePolicy struct { NullFields []string `json:"-"` } -func (s *FailurePolicy) MarshalJSON() ([]byte, error) { +func (s FailurePolicy) MarshalJSON() ([]byte, error) { type NoMethod FailurePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GenerateDownloadUrlRequest: Request of `GenerateDownloadUrl` method. @@ -781,9 +782,9 @@ type GenerateDownloadUrlRequest struct { NullFields []string `json:"-"` } -func (s *GenerateDownloadUrlRequest) MarshalJSON() ([]byte, error) { +func (s GenerateDownloadUrlRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateDownloadUrlRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GenerateDownloadUrlResponse: Response of `GenerateDownloadUrl` method. @@ -807,9 +808,9 @@ type GenerateDownloadUrlResponse struct { NullFields []string `json:"-"` } -func (s *GenerateDownloadUrlResponse) MarshalJSON() ([]byte, error) { +func (s GenerateDownloadUrlResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateDownloadUrlResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GenerateUploadUrlRequest: Request of `GenerateSourceUploadUrl` method. @@ -841,9 +842,9 @@ type GenerateUploadUrlRequest struct { NullFields []string `json:"-"` } -func (s *GenerateUploadUrlRequest) MarshalJSON() ([]byte, error) { +func (s GenerateUploadUrlRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateUploadUrlRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GenerateUploadUrlResponse: Response of `GenerateSourceUploadUrl` method. @@ -868,507 +869,9 @@ type GenerateUploadUrlResponse struct { NullFields []string `json:"-"` } -func (s *GenerateUploadUrlResponse) MarshalJSON() ([]byte, error) { +func (s GenerateUploadUrlResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateUploadUrlResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2LocationMetadata: Extra GCF specific location -// information. -type GoogleCloudFunctionsV2LocationMetadata struct { - // Environments: The Cloud Function environments this location supports. - // - // Possible values: - // "ENVIRONMENT_UNSPECIFIED" - Unspecified - // "GEN_1" - Gen 1 - // "GEN_2" - Gen 2 - Environments []string `json:"environments,omitempty"` - // ForceSendFields is a list of field names (e.g. "Environments") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Environments") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2LocationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2LocationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2OperationMetadata: Represents the metadata of the -// long-running operation. -type GoogleCloudFunctionsV2OperationMetadata struct { - // ApiVersion: API version used to start the operation. - ApiVersion string `json:"apiVersion,omitempty"` - // CancelRequested: Identifies whether the user has requested cancellation of - // the operation. Operations that have successfully been cancelled have - // google.longrunning.Operation.error value with a google.rpc.Status.code of 1, - // corresponding to `Code.CANCELLED`. - CancelRequested bool `json:"cancelRequested,omitempty"` - // CreateTime: The time the operation was created. - CreateTime string `json:"createTime,omitempty"` - // EndTime: The time the operation finished running. - EndTime string `json:"endTime,omitempty"` - // OperationType: The operation type. - // - // Possible values: - // "OPERATIONTYPE_UNSPECIFIED" - Unspecified - // "CREATE_FUNCTION" - CreateFunction - // "UPDATE_FUNCTION" - UpdateFunction - // "DELETE_FUNCTION" - DeleteFunction - // "REDIRECT_FUNCTION_UPGRADE_TRAFFIC" - RedirectFunctionUpgradeTraffic - // "ROLLBACK_FUNCTION_UPGRADE_TRAFFIC" - RollbackFunctionUpgradeTraffic - // "SETUP_FUNCTION_UPGRADE_CONFIG" - SetupFunctionUpgradeConfig - // "ABORT_FUNCTION_UPGRADE" - AbortFunctionUpgrade - // "COMMIT_FUNCTION_UPGRADE" - CommitFunctionUpgrade - OperationType string `json:"operationType,omitempty"` - // RequestResource: The original request that started the operation. - RequestResource googleapi.RawMessage `json:"requestResource,omitempty"` - // SourceToken: An identifier for Firebase function sources. Disclaimer: This - // field is only supported for Firebase function deployments. - SourceToken string `json:"sourceToken,omitempty"` - // Stages: Mechanism for reporting in-progress stages - Stages []*GoogleCloudFunctionsV2Stage `json:"stages,omitempty"` - // StatusDetail: Human-readable status of the operation, if any. - StatusDetail string `json:"statusDetail,omitempty"` - // Target: Server-defined resource path for the target of the operation. - Target string `json:"target,omitempty"` - // Verb: Name of the verb executed by the operation. - Verb string `json:"verb,omitempty"` - // ForceSendFields is a list of field names (e.g. "ApiVersion") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ApiVersion") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2OperationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2OperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2Stage: Each Stage of the deployment process -type GoogleCloudFunctionsV2Stage struct { - // Message: Message describing the Stage - Message string `json:"message,omitempty"` - // Name: Name of the Stage. This will be unique for each Stage. - // - // Possible values: - // "NAME_UNSPECIFIED" - Not specified. Invalid name. - // "ARTIFACT_REGISTRY" - Artifact Regsitry Stage - // "BUILD" - Build Stage - // "SERVICE" - Service Stage - // "TRIGGER" - Trigger Stage - // "SERVICE_ROLLBACK" - Service Rollback Stage - // "TRIGGER_ROLLBACK" - Trigger Rollback Stage - Name string `json:"name,omitempty"` - // Resource: Resource of the Stage - Resource string `json:"resource,omitempty"` - // ResourceUri: Link to the current Stage resource - ResourceUri string `json:"resourceUri,omitempty"` - // State: Current state of the Stage - // - // Possible values: - // "STATE_UNSPECIFIED" - Not specified. Invalid state. - // "NOT_STARTED" - Stage has not started. - // "IN_PROGRESS" - Stage is in progress. - // "COMPLETE" - Stage has completed. - State string `json:"state,omitempty"` - // StateMessages: State messages from the current Stage. - StateMessages []*GoogleCloudFunctionsV2StateMessage `json:"stateMessages,omitempty"` - // ForceSendFields is a list of field names (e.g. "Message") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Message") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2Stage) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2Stage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2StateMessage: Informational messages about the state -// of the Cloud Function or Operation. -type GoogleCloudFunctionsV2StateMessage struct { - // Message: The message. - Message string `json:"message,omitempty"` - // Severity: Severity of the state message. - // - // Possible values: - // "SEVERITY_UNSPECIFIED" - Not specified. Invalid severity. - // "ERROR" - ERROR-level severity. - // "WARNING" - WARNING-level severity. - // "INFO" - INFO-level severity. - Severity string `json:"severity,omitempty"` - // Type: One-word CamelCase type of the state message. - Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "Message") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Message") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2StateMessage) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2StateMessage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2alphaLocationMetadata: Extra GCF specific location -// information. -type GoogleCloudFunctionsV2alphaLocationMetadata struct { - // Environments: The Cloud Function environments this location supports. - // - // Possible values: - // "ENVIRONMENT_UNSPECIFIED" - Unspecified - // "GEN_1" - Gen 1 - // "GEN_2" - Gen 2 - Environments []string `json:"environments,omitempty"` - // ForceSendFields is a list of field names (e.g. "Environments") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Environments") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2alphaLocationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2alphaLocationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2alphaOperationMetadata: Represents the metadata of the -// long-running operation. -type GoogleCloudFunctionsV2alphaOperationMetadata struct { - // ApiVersion: API version used to start the operation. - ApiVersion string `json:"apiVersion,omitempty"` - // CancelRequested: Identifies whether the user has requested cancellation of - // the operation. Operations that have successfully been cancelled have - // google.longrunning.Operation.error value with a google.rpc.Status.code of 1, - // corresponding to `Code.CANCELLED`. - CancelRequested bool `json:"cancelRequested,omitempty"` - // CreateTime: The time the operation was created. - CreateTime string `json:"createTime,omitempty"` - // EndTime: The time the operation finished running. - EndTime string `json:"endTime,omitempty"` - // OperationType: The operation type. - // - // Possible values: - // "OPERATIONTYPE_UNSPECIFIED" - Unspecified - // "CREATE_FUNCTION" - CreateFunction - // "UPDATE_FUNCTION" - UpdateFunction - // "DELETE_FUNCTION" - DeleteFunction - // "REDIRECT_FUNCTION_UPGRADE_TRAFFIC" - RedirectFunctionUpgradeTraffic - // "ROLLBACK_FUNCTION_UPGRADE_TRAFFIC" - RollbackFunctionUpgradeTraffic - // "SETUP_FUNCTION_UPGRADE_CONFIG" - SetupFunctionUpgradeConfig - // "ABORT_FUNCTION_UPGRADE" - AbortFunctionUpgrade - // "COMMIT_FUNCTION_UPGRADE" - CommitFunctionUpgrade - OperationType string `json:"operationType,omitempty"` - // RequestResource: The original request that started the operation. - RequestResource googleapi.RawMessage `json:"requestResource,omitempty"` - // SourceToken: An identifier for Firebase function sources. Disclaimer: This - // field is only supported for Firebase function deployments. - SourceToken string `json:"sourceToken,omitempty"` - // Stages: Mechanism for reporting in-progress stages - Stages []*GoogleCloudFunctionsV2alphaStage `json:"stages,omitempty"` - // StatusDetail: Human-readable status of the operation, if any. - StatusDetail string `json:"statusDetail,omitempty"` - // Target: Server-defined resource path for the target of the operation. - Target string `json:"target,omitempty"` - // Verb: Name of the verb executed by the operation. - Verb string `json:"verb,omitempty"` - // ForceSendFields is a list of field names (e.g. "ApiVersion") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ApiVersion") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2alphaOperationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2alphaOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2alphaStage: Each Stage of the deployment process -type GoogleCloudFunctionsV2alphaStage struct { - // Message: Message describing the Stage - Message string `json:"message,omitempty"` - // Name: Name of the Stage. This will be unique for each Stage. - // - // Possible values: - // "NAME_UNSPECIFIED" - Not specified. Invalid name. - // "ARTIFACT_REGISTRY" - Artifact Regsitry Stage - // "BUILD" - Build Stage - // "SERVICE" - Service Stage - // "TRIGGER" - Trigger Stage - // "SERVICE_ROLLBACK" - Service Rollback Stage - // "TRIGGER_ROLLBACK" - Trigger Rollback Stage - Name string `json:"name,omitempty"` - // Resource: Resource of the Stage - Resource string `json:"resource,omitempty"` - // ResourceUri: Link to the current Stage resource - ResourceUri string `json:"resourceUri,omitempty"` - // State: Current state of the Stage - // - // Possible values: - // "STATE_UNSPECIFIED" - Not specified. Invalid state. - // "NOT_STARTED" - Stage has not started. - // "IN_PROGRESS" - Stage is in progress. - // "COMPLETE" - Stage has completed. - State string `json:"state,omitempty"` - // StateMessages: State messages from the current Stage. - StateMessages []*GoogleCloudFunctionsV2alphaStateMessage `json:"stateMessages,omitempty"` - // ForceSendFields is a list of field names (e.g. "Message") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Message") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2alphaStage) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2alphaStage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2alphaStateMessage: Informational messages about the -// state of the Cloud Function or Operation. -type GoogleCloudFunctionsV2alphaStateMessage struct { - // Message: The message. - Message string `json:"message,omitempty"` - // Severity: Severity of the state message. - // - // Possible values: - // "SEVERITY_UNSPECIFIED" - Not specified. Invalid severity. - // "ERROR" - ERROR-level severity. - // "WARNING" - WARNING-level severity. - // "INFO" - INFO-level severity. - Severity string `json:"severity,omitempty"` - // Type: One-word CamelCase type of the state message. - Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "Message") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Message") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2alphaStateMessage) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2alphaStateMessage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2betaLocationMetadata: Extra GCF specific location -// information. -type GoogleCloudFunctionsV2betaLocationMetadata struct { - // Environments: The Cloud Function environments this location supports. - // - // Possible values: - // "ENVIRONMENT_UNSPECIFIED" - Unspecified - // "GEN_1" - Gen 1 - // "GEN_2" - Gen 2 - Environments []string `json:"environments,omitempty"` - // ForceSendFields is a list of field names (e.g. "Environments") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Environments") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2betaLocationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2betaLocationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2betaOperationMetadata: Represents the metadata of the -// long-running operation. -type GoogleCloudFunctionsV2betaOperationMetadata struct { - // ApiVersion: API version used to start the operation. - ApiVersion string `json:"apiVersion,omitempty"` - // CancelRequested: Identifies whether the user has requested cancellation of - // the operation. Operations that have successfully been cancelled have - // google.longrunning.Operation.error value with a google.rpc.Status.code of 1, - // corresponding to `Code.CANCELLED`. - CancelRequested bool `json:"cancelRequested,omitempty"` - // CreateTime: The time the operation was created. - CreateTime string `json:"createTime,omitempty"` - // EndTime: The time the operation finished running. - EndTime string `json:"endTime,omitempty"` - // OperationType: The operation type. - // - // Possible values: - // "OPERATIONTYPE_UNSPECIFIED" - Unspecified - // "CREATE_FUNCTION" - CreateFunction - // "UPDATE_FUNCTION" - UpdateFunction - // "DELETE_FUNCTION" - DeleteFunction - // "REDIRECT_FUNCTION_UPGRADE_TRAFFIC" - RedirectFunctionUpgradeTraffic - // "ROLLBACK_FUNCTION_UPGRADE_TRAFFIC" - RollbackFunctionUpgradeTraffic - // "SETUP_FUNCTION_UPGRADE_CONFIG" - SetupFunctionUpgradeConfig - // "ABORT_FUNCTION_UPGRADE" - AbortFunctionUpgrade - // "COMMIT_FUNCTION_UPGRADE" - CommitFunctionUpgrade - OperationType string `json:"operationType,omitempty"` - // RequestResource: The original request that started the operation. - RequestResource googleapi.RawMessage `json:"requestResource,omitempty"` - // SourceToken: An identifier for Firebase function sources. Disclaimer: This - // field is only supported for Firebase function deployments. - SourceToken string `json:"sourceToken,omitempty"` - // Stages: Mechanism for reporting in-progress stages - Stages []*GoogleCloudFunctionsV2betaStage `json:"stages,omitempty"` - // StatusDetail: Human-readable status of the operation, if any. - StatusDetail string `json:"statusDetail,omitempty"` - // Target: Server-defined resource path for the target of the operation. - Target string `json:"target,omitempty"` - // Verb: Name of the verb executed by the operation. - Verb string `json:"verb,omitempty"` - // ForceSendFields is a list of field names (e.g. "ApiVersion") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ApiVersion") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2betaOperationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2betaOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2betaStage: Each Stage of the deployment process -type GoogleCloudFunctionsV2betaStage struct { - // Message: Message describing the Stage - Message string `json:"message,omitempty"` - // Name: Name of the Stage. This will be unique for each Stage. - // - // Possible values: - // "NAME_UNSPECIFIED" - Not specified. Invalid name. - // "ARTIFACT_REGISTRY" - Artifact Regsitry Stage - // "BUILD" - Build Stage - // "SERVICE" - Service Stage - // "TRIGGER" - Trigger Stage - // "SERVICE_ROLLBACK" - Service Rollback Stage - // "TRIGGER_ROLLBACK" - Trigger Rollback Stage - Name string `json:"name,omitempty"` - // Resource: Resource of the Stage - Resource string `json:"resource,omitempty"` - // ResourceUri: Link to the current Stage resource - ResourceUri string `json:"resourceUri,omitempty"` - // State: Current state of the Stage - // - // Possible values: - // "STATE_UNSPECIFIED" - Not specified. Invalid state. - // "NOT_STARTED" - Stage has not started. - // "IN_PROGRESS" - Stage is in progress. - // "COMPLETE" - Stage has completed. - State string `json:"state,omitempty"` - // StateMessages: State messages from the current Stage. - StateMessages []*GoogleCloudFunctionsV2betaStateMessage `json:"stateMessages,omitempty"` - // ForceSendFields is a list of field names (e.g. "Message") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Message") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2betaStage) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2betaStage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleCloudFunctionsV2betaStateMessage: Informational messages about the -// state of the Cloud Function or Operation. -type GoogleCloudFunctionsV2betaStateMessage struct { - // Message: The message. - Message string `json:"message,omitempty"` - // Severity: Severity of the state message. - // - // Possible values: - // "SEVERITY_UNSPECIFIED" - Not specified. Invalid severity. - // "ERROR" - ERROR-level severity. - // "WARNING" - WARNING-level severity. - // "INFO" - INFO-level severity. - Severity string `json:"severity,omitempty"` - // Type: One-word CamelCase type of the state message. - Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "Message") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Message") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleCloudFunctionsV2betaStateMessage) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudFunctionsV2betaStateMessage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpsTrigger: Describes HttpsTrigger, could be used to connect web hooks to @@ -1400,9 +903,9 @@ type HttpsTrigger struct { NullFields []string `json:"-"` } -func (s *HttpsTrigger) MarshalJSON() ([]byte, error) { +func (s HttpsTrigger) MarshalJSON() ([]byte, error) { type NoMethod HttpsTrigger - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListFunctionsResponse: Response for the `ListFunctions` method. @@ -1432,9 +935,9 @@ type ListFunctionsResponse struct { NullFields []string `json:"-"` } -func (s *ListFunctionsResponse) MarshalJSON() ([]byte, error) { +func (s ListFunctionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListFunctionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLocationsResponse: The response message for Locations.ListLocations. @@ -1460,9 +963,9 @@ type ListLocationsResponse struct { NullFields []string `json:"-"` } -func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { +func (s ListLocationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLocationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -1488,9 +991,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Location: A resource that represents a Google Cloud location. @@ -1523,9 +1026,9 @@ type Location struct { NullFields []string `json:"-"` } -func (s *Location) MarshalJSON() ([]byte, error) { +func (s Location) MarshalJSON() ([]byte, error) { type NoMethod Location - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OnDeployUpdatePolicy: Security patches are only applied when a function is @@ -1547,9 +1050,9 @@ type OnDeployUpdatePolicy struct { NullFields []string `json:"-"` } -func (s *OnDeployUpdatePolicy) MarshalJSON() ([]byte, error) { +func (s OnDeployUpdatePolicy) MarshalJSON() ([]byte, error) { type NoMethod OnDeployUpdatePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -1594,9 +1097,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadataV1: Metadata describing an Operation @@ -1642,9 +1145,9 @@ type OperationMetadataV1 struct { NullFields []string `json:"-"` } -func (s *OperationMetadataV1) MarshalJSON() ([]byte, error) { +func (s OperationMetadataV1) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadataV1 - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -1734,9 +1237,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Retry: Describes the retry policy in case of function's execution failure. A @@ -1776,9 +1279,9 @@ type SecretEnvVar struct { NullFields []string `json:"-"` } -func (s *SecretEnvVar) MarshalJSON() ([]byte, error) { +func (s SecretEnvVar) MarshalJSON() ([]byte, error) { type NoMethod SecretEnvVar - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecretVersion: Configuration for a single version. @@ -1805,9 +1308,9 @@ type SecretVersion struct { NullFields []string `json:"-"` } -func (s *SecretVersion) MarshalJSON() ([]byte, error) { +func (s SecretVersion) MarshalJSON() ([]byte, error) { type NoMethod SecretVersion - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecretVolume: Configuration for a secret volume. It has the information @@ -1848,9 +1351,9 @@ type SecretVolume struct { NullFields []string `json:"-"` } -func (s *SecretVolume) MarshalJSON() ([]byte, error) { +func (s SecretVolume) MarshalJSON() ([]byte, error) { type NoMethod SecretVolume - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -1877,9 +1380,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceRepository: Describes SourceRepository, used to represent parameters @@ -1898,7 +1401,8 @@ type SourceRepository struct { // aths/*` In particular, to refer to HEAD use `master` moveable alias. To // refer to a specific fixed alias (tag): // `https://source.developers.google.com/projects/*/repos/*/fixed-aliases/*/path - // s/*` You may omit `paths/*` if you want to use the main directory. + // s/*` You may omit `paths/*` if you want to use the main directory. The + // function response may add an empty `/paths/` to the URL. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "DeployedUrl") to // unconditionally include in API requests. By default, fields with empty or @@ -1913,9 +1417,9 @@ type SourceRepository struct { NullFields []string `json:"-"` } -func (s *SourceRepository) MarshalJSON() ([]byte, error) { +func (s SourceRepository) MarshalJSON() ([]byte, error) { type NoMethod SourceRepository - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -1947,9 +1451,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` method. @@ -1972,9 +1476,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -1999,9 +1503,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationsGetCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-api.json b/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-api.json index 248632d04ed..f5ed5ca6fec 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-api.json @@ -422,7 +422,7 @@ ], "enumDescriptions": [ "Default value. The value is unused.", - "This view contains all devices imported by the company admin. Each device in the response contains all information specified by the company admin when importing the device (i.e. asset tags). This includes devices that may be unaassigned or assigned to users.", + "This view contains all devices imported by the company admin. Each device in the response contains all information specified by the company admin when importing the device (i.e. asset tags). This includes devices that may be unassigned or assigned to users.", "This view contains all devices with at least one user registered on the device. Each device in the response contains all device information, except for asset tags." ], "location": "query", @@ -1990,7 +1990,7 @@ } } }, - "revision": "20240611", + "revision": "20240924", "rootUrl": "https://cloudidentity.googleapis.com/", "schemas": { "AddIdpCredentialOperationMetadata": { @@ -2347,7 +2347,7 @@ "type": "string" }, "lastProfileSyncTime": { - "description": "Timestamp in milliseconds since Epoch when the profile/gcm id was last synced.", + "description": "Timestamp in milliseconds since the Unix epoch when the profile/gcm id was last synced.", "format": "google-datetime", "type": "string" } @@ -2355,7 +2355,7 @@ "type": "object" }, "GoogleAppsCloudidentityDevicesV1BrowserInfo": { - "description": "Browser-specific fields reported by the [Endpoint Verification extension](https://chromewebstore.google.com/detail/endpoint-verification/callobklhcbilhphinckomhgkigmfocg?pli=1). LINT.IfChange", + "description": "Browser-specific fields reported by the [Endpoint Verification extension](https://chromewebstore.google.com/detail/endpoint-verification/callobklhcbilhphinckomhgkigmfocg?pli=1).", "id": "GoogleAppsCloudidentityDevicesV1BrowserInfo", "properties": { "browserManagementState": { @@ -2378,7 +2378,7 @@ "type": "string" }, "browserVersion": { - "description": "Version of the request initiating browser.", + "description": "Version of the request initiating browser. E.g. `91.0.4442.4`.", "type": "string" }, "isBuiltInDnsClientEnabled": { @@ -3083,7 +3083,7 @@ "description": "Properties of the object.", "type": "any" }, - "description": "Additional signals reported by Endpoint Verification. It includes the following attributes: 1. Non-configurable attributes: hotfixes, av_installed, av_enabled, windows_domain_name, is_os_native_firewall_enabled, and is_secure_boot_enabled. 2. [Configurable attributes](https://cloud.google.com/endpoint-verification/docs/collect-config-attributes): file, folder, and binary attributes; registry entries; and properties in a plist.", + "description": "[Additional signals](https://cloud.google.com/endpoint-verification/docs/device-information) reported by Endpoint Verification. It includes the following attributes: * Non-configurable attributes: hotfixes, av_installed, av_enabled, windows_domain_name, is_os_native_firewall_enabled, and is_secure_boot_enabled. * [Configurable attributes](https://cloud.google.com/endpoint-verification/docs/collect-config-attributes): file, folder, and binary attributes; registry entries; and properties in a plist.", "type": "object" }, "browserAttributes": { @@ -3732,6 +3732,7 @@ "SERVICE_ACCOUNT", "GROUP", "SHARED_DRIVE", + "CBCM_BROWSER", "OTHER" ], "enumDescriptions": [ @@ -3740,6 +3741,7 @@ "Represents service account type.", "Represents group type.", "Represents Shared drive.", + "Represents a CBCM-managed Chrome Browser type.", "Represents other type." ], "readOnly": true, diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-gen.go index 79b0376202c..1b88815924d 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-gen.go @@ -320,9 +320,9 @@ type AddIdpCredentialOperationMetadata struct { NullFields []string `json:"-"` } -func (s *AddIdpCredentialOperationMetadata) MarshalJSON() ([]byte, error) { +func (s AddIdpCredentialOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod AddIdpCredentialOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AddIdpCredentialRequest: The request for creating an IdpCredential with its @@ -344,9 +344,9 @@ type AddIdpCredentialRequest struct { NullFields []string `json:"-"` } -func (s *AddIdpCredentialRequest) MarshalJSON() ([]byte, error) { +func (s AddIdpCredentialRequest) MarshalJSON() ([]byte, error) { type NoMethod AddIdpCredentialRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CancelUserInvitationRequest: Request to cancel sent invitation for target @@ -378,9 +378,9 @@ type CheckTransitiveMembershipResponse struct { NullFields []string `json:"-"` } -func (s *CheckTransitiveMembershipResponse) MarshalJSON() ([]byte, error) { +func (s CheckTransitiveMembershipResponse) MarshalJSON() ([]byte, error) { type NoMethod CheckTransitiveMembershipResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateGroupMetadata: Metadata for CreateGroup LRO. @@ -408,9 +408,9 @@ type CreateInboundSamlSsoProfileOperationMetadata struct { NullFields []string `json:"-"` } -func (s *CreateInboundSamlSsoProfileOperationMetadata) MarshalJSON() ([]byte, error) { +func (s CreateInboundSamlSsoProfileOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateInboundSamlSsoProfileOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateInboundSsoAssignmentOperationMetadata: LRO response metadata for @@ -462,9 +462,9 @@ type DsaPublicKeyInfo struct { NullFields []string `json:"-"` } -func (s *DsaPublicKeyInfo) MarshalJSON() ([]byte, error) { +func (s DsaPublicKeyInfo) MarshalJSON() ([]byte, error) { type NoMethod DsaPublicKeyInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DynamicGroupMetadata: Dynamic group metadata like queries and status. @@ -488,9 +488,9 @@ type DynamicGroupMetadata struct { NullFields []string `json:"-"` } -func (s *DynamicGroupMetadata) MarshalJSON() ([]byte, error) { +func (s DynamicGroupMetadata) MarshalJSON() ([]byte, error) { type NoMethod DynamicGroupMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DynamicGroupQuery: Defines a query on a resource. @@ -523,9 +523,9 @@ type DynamicGroupQuery struct { NullFields []string `json:"-"` } -func (s *DynamicGroupQuery) MarshalJSON() ([]byte, error) { +func (s DynamicGroupQuery) MarshalJSON() ([]byte, error) { type NoMethod DynamicGroupQuery - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DynamicGroupStatus: The current status of a dynamic group along with @@ -559,9 +559,9 @@ type DynamicGroupStatus struct { NullFields []string `json:"-"` } -func (s *DynamicGroupStatus) MarshalJSON() ([]byte, error) { +func (s DynamicGroupStatus) MarshalJSON() ([]byte, error) { type NoMethod DynamicGroupStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EntityKey: A unique identifier for an entity in the Cloud Identity Groups @@ -596,9 +596,9 @@ type EntityKey struct { NullFields []string `json:"-"` } -func (s *EntityKey) MarshalJSON() ([]byte, error) { +func (s EntityKey) MarshalJSON() ([]byte, error) { type NoMethod EntityKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExpiryDetail: The `MembershipRole` expiry details. @@ -618,9 +618,9 @@ type ExpiryDetail struct { NullFields []string `json:"-"` } -func (s *ExpiryDetail) MarshalJSON() ([]byte, error) { +func (s ExpiryDetail) MarshalJSON() ([]byte, error) { type NoMethod ExpiryDetail - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetMembershipGraphMetadata: Metadata of GetMembershipGraphResponse LRO. This @@ -651,9 +651,9 @@ type GetMembershipGraphResponse struct { NullFields []string `json:"-"` } -func (s *GetMembershipGraphResponse) MarshalJSON() ([]byte, error) { +func (s GetMembershipGraphResponse) MarshalJSON() ([]byte, error) { type NoMethod GetMembershipGraphResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1AndroidAttributes: Resource representing the @@ -702,9 +702,9 @@ type GoogleAppsCloudidentityDevicesV1AndroidAttributes struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1AndroidAttributes) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1AndroidAttributes) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1AndroidAttributes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1ApproveDeviceUserMetadata: Metadata for @@ -735,9 +735,9 @@ type GoogleAppsCloudidentityDevicesV1ApproveDeviceUserRequest struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1ApproveDeviceUserRequest) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1ApproveDeviceUserRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1ApproveDeviceUserRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1ApproveDeviceUserResponse: Response message @@ -758,9 +758,9 @@ type GoogleAppsCloudidentityDevicesV1ApproveDeviceUserResponse struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1ApproveDeviceUserResponse) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1ApproveDeviceUserResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1ApproveDeviceUserResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1BlockDeviceUserMetadata: Metadata for @@ -791,9 +791,9 @@ type GoogleAppsCloudidentityDevicesV1BlockDeviceUserRequest struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1BlockDeviceUserRequest) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1BlockDeviceUserRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1BlockDeviceUserRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1BlockDeviceUserResponse: Response message @@ -814,9 +814,9 @@ type GoogleAppsCloudidentityDevicesV1BlockDeviceUserResponse struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1BlockDeviceUserResponse) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1BlockDeviceUserResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1BlockDeviceUserResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1BrowserAttributes: Contains information @@ -832,7 +832,7 @@ type GoogleAppsCloudidentityDevicesV1BrowserAttributes struct { // ChromeProfileId: Chrome profile ID that is exposed by the Chrome API. It is // unique for each device. ChromeProfileId string `json:"chromeProfileId,omitempty"` - // LastProfileSyncTime: Timestamp in milliseconds since Epoch when the + // LastProfileSyncTime: Timestamp in milliseconds since the Unix epoch when the // profile/gcm id was last synced. LastProfileSyncTime string `json:"lastProfileSyncTime,omitempty"` // ForceSendFields is a list of field names (e.g. "ChromeBrowserInfo") to @@ -848,15 +848,14 @@ type GoogleAppsCloudidentityDevicesV1BrowserAttributes struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1BrowserAttributes) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1BrowserAttributes) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1BrowserAttributes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1BrowserInfo: Browser-specific fields // reported by the Endpoint Verification extension // (https://chromewebstore.google.com/detail/endpoint-verification/callobklhcbilhphinckomhgkigmfocg?pli=1). -// LINT.IfChange type GoogleAppsCloudidentityDevicesV1BrowserInfo struct { // BrowserManagementState: Output only. Browser's management state. // @@ -868,7 +867,8 @@ type GoogleAppsCloudidentityDevicesV1BrowserInfo struct { // "PROFILE_MANAGED" - Profile is managed by customer. // "BROWSER_MANAGED" - Browser is managed by customer. BrowserManagementState string `json:"browserManagementState,omitempty"` - // BrowserVersion: Version of the request initiating browser. + // BrowserVersion: Version of the request initiating browser. E.g. + // `91.0.4442.4`. BrowserVersion string `json:"browserVersion,omitempty"` // IsBuiltInDnsClientEnabled: Current state of built-in DNS client // (https://chromeenterprise.google/policies/#BuiltInDnsClientEnabled). @@ -944,9 +944,9 @@ type GoogleAppsCloudidentityDevicesV1BrowserInfo struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1BrowserInfo) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1BrowserInfo) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1BrowserInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1CancelWipeDeviceMetadata: Metadata for @@ -977,9 +977,9 @@ type GoogleAppsCloudidentityDevicesV1CancelWipeDeviceRequest struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1CancelWipeDeviceRequest) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1CancelWipeDeviceRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1CancelWipeDeviceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1CancelWipeDeviceResponse: Response message @@ -1001,9 +1001,9 @@ type GoogleAppsCloudidentityDevicesV1CancelWipeDeviceResponse struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1CancelWipeDeviceResponse) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1CancelWipeDeviceResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1CancelWipeDeviceResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1CancelWipeDeviceUserMetadata: Metadata for @@ -1034,9 +1034,9 @@ type GoogleAppsCloudidentityDevicesV1CancelWipeDeviceUserRequest struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1CancelWipeDeviceUserRequest) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1CancelWipeDeviceUserRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1CancelWipeDeviceUserRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1CancelWipeDeviceUserResponse: Response @@ -1057,9 +1057,9 @@ type GoogleAppsCloudidentityDevicesV1CancelWipeDeviceUserResponse struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1CancelWipeDeviceUserResponse) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1CancelWipeDeviceUserResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1CancelWipeDeviceUserResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1CertificateAttributes: Stores information @@ -1101,9 +1101,9 @@ type GoogleAppsCloudidentityDevicesV1CertificateAttributes struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1CertificateAttributes) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1CertificateAttributes) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1CertificateAttributes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1CertificateTemplate: CertificateTemplate (v3 @@ -1130,9 +1130,9 @@ type GoogleAppsCloudidentityDevicesV1CertificateTemplate struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1CertificateTemplate) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1CertificateTemplate) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1CertificateTemplate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1ClientState: Represents the state associated @@ -1229,9 +1229,9 @@ type GoogleAppsCloudidentityDevicesV1ClientState struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1ClientState) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1ClientState) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1ClientState - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1CreateDeviceMetadata: Metadata for @@ -1261,9 +1261,9 @@ type GoogleAppsCloudidentityDevicesV1CustomAttributeValue struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1CustomAttributeValue) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1CustomAttributeValue) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1CustomAttributeValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *GoogleAppsCloudidentityDevicesV1CustomAttributeValue) UnmarshalJSON(data []byte) error { @@ -1427,9 +1427,9 @@ type GoogleAppsCloudidentityDevicesV1Device struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1Device) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1Device) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1Device - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1DeviceUser: Represents a user's use of a @@ -1498,9 +1498,9 @@ type GoogleAppsCloudidentityDevicesV1DeviceUser struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1DeviceUser) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1DeviceUser) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1DeviceUser - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1EndpointVerificationSpecificAttributes: @@ -1508,11 +1508,12 @@ func (s *GoogleAppsCloudidentityDevicesV1DeviceUser) MarshalJSON() ([]byte, erro // (https://cloud.google.com/endpoint-verification/docs/device-information) of // a device. type GoogleAppsCloudidentityDevicesV1EndpointVerificationSpecificAttributes struct { - // AdditionalSignals: Additional signals reported by Endpoint Verification. It - // includes the following attributes: 1. Non-configurable attributes: hotfixes, - // av_installed, av_enabled, windows_domain_name, - // is_os_native_firewall_enabled, and is_secure_boot_enabled. 2. Configurable - // attributes + // AdditionalSignals: Additional signals + // (https://cloud.google.com/endpoint-verification/docs/device-information) + // reported by Endpoint Verification. It includes the following attributes: * + // Non-configurable attributes: hotfixes, av_installed, av_enabled, + // windows_domain_name, is_os_native_firewall_enabled, and + // is_secure_boot_enabled. * Configurable attributes // (https://cloud.google.com/endpoint-verification/docs/collect-config-attributes): // file, folder, and binary attributes; registry entries; and properties in a // plist. @@ -1535,9 +1536,9 @@ type GoogleAppsCloudidentityDevicesV1EndpointVerificationSpecificAttributes stru NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1EndpointVerificationSpecificAttributes) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1EndpointVerificationSpecificAttributes) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1EndpointVerificationSpecificAttributes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1ListClientStatesResponse: Response message @@ -1564,9 +1565,9 @@ type GoogleAppsCloudidentityDevicesV1ListClientStatesResponse struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1ListClientStatesResponse) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1ListClientStatesResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1ListClientStatesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1ListDeviceUsersResponse: Response message @@ -1593,9 +1594,9 @@ type GoogleAppsCloudidentityDevicesV1ListDeviceUsersResponse struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1ListDeviceUsersResponse) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1ListDeviceUsersResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1ListDeviceUsersResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1ListDevicesResponse: Response message that @@ -1622,9 +1623,9 @@ type GoogleAppsCloudidentityDevicesV1ListDevicesResponse struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1ListDevicesResponse) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1ListDevicesResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1ListDevicesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1ListEndpointAppsMetadata: Metadata for @@ -1663,9 +1664,9 @@ type GoogleAppsCloudidentityDevicesV1LookupSelfDeviceUsersResponse struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1LookupSelfDeviceUsersResponse) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1LookupSelfDeviceUsersResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1LookupSelfDeviceUsersResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1SignoutDeviceUserMetadata: Metadata for @@ -1717,9 +1718,9 @@ type GoogleAppsCloudidentityDevicesV1WipeDeviceRequest struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1WipeDeviceRequest) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1WipeDeviceRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1WipeDeviceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1WipeDeviceResponse: Response message for @@ -1741,9 +1742,9 @@ type GoogleAppsCloudidentityDevicesV1WipeDeviceResponse struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1WipeDeviceResponse) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1WipeDeviceResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1WipeDeviceResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1WipeDeviceUserMetadata: Metadata for @@ -1774,9 +1775,9 @@ type GoogleAppsCloudidentityDevicesV1WipeDeviceUserRequest struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1WipeDeviceUserRequest) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1WipeDeviceUserRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1WipeDeviceUserRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleAppsCloudidentityDevicesV1WipeDeviceUserResponse: Response message for @@ -1797,9 +1798,9 @@ type GoogleAppsCloudidentityDevicesV1WipeDeviceUserResponse struct { NullFields []string `json:"-"` } -func (s *GoogleAppsCloudidentityDevicesV1WipeDeviceUserResponse) MarshalJSON() ([]byte, error) { +func (s GoogleAppsCloudidentityDevicesV1WipeDeviceUserResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleAppsCloudidentityDevicesV1WipeDeviceUserResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Group: A group within the Cloud Identity Groups API. A `Group` is a @@ -1863,9 +1864,9 @@ type Group struct { NullFields []string `json:"-"` } -func (s *Group) MarshalJSON() ([]byte, error) { +func (s Group) MarshalJSON() ([]byte, error) { type NoMethod Group - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GroupRelation: Message representing a transitive group of a user or a group. @@ -1905,9 +1906,9 @@ type GroupRelation struct { NullFields []string `json:"-"` } -func (s *GroupRelation) MarshalJSON() ([]byte, error) { +func (s GroupRelation) MarshalJSON() ([]byte, error) { type NoMethod GroupRelation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IdpCredential: Credential for verifying signatures produced by the Identity @@ -1938,9 +1939,9 @@ type IdpCredential struct { NullFields []string `json:"-"` } -func (s *IdpCredential) MarshalJSON() ([]byte, error) { +func (s IdpCredential) MarshalJSON() ([]byte, error) { type NoMethod IdpCredential - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InboundSamlSsoProfile: A SAML 2.0 @@ -1977,9 +1978,9 @@ type InboundSamlSsoProfile struct { NullFields []string `json:"-"` } -func (s *InboundSamlSsoProfile) MarshalJSON() ([]byte, error) { +func (s InboundSamlSsoProfile) MarshalJSON() ([]byte, error) { type NoMethod InboundSamlSsoProfile - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InboundSsoAssignment: Targets with "set" SSO assignments and their @@ -2036,9 +2037,9 @@ type InboundSsoAssignment struct { NullFields []string `json:"-"` } -func (s *InboundSsoAssignment) MarshalJSON() ([]byte, error) { +func (s InboundSsoAssignment) MarshalJSON() ([]byte, error) { type NoMethod InboundSsoAssignment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IsInvitableUserResponse: Response for IsInvitableUser RPC. @@ -2061,9 +2062,9 @@ type IsInvitableUserResponse struct { NullFields []string `json:"-"` } -func (s *IsInvitableUserResponse) MarshalJSON() ([]byte, error) { +func (s IsInvitableUserResponse) MarshalJSON() ([]byte, error) { type NoMethod IsInvitableUserResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListGroupsResponse: Response message for ListGroups operation. @@ -2090,9 +2091,9 @@ type ListGroupsResponse struct { NullFields []string `json:"-"` } -func (s *ListGroupsResponse) MarshalJSON() ([]byte, error) { +func (s ListGroupsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListGroupsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListIdpCredentialsResponse: Response of the @@ -2119,9 +2120,9 @@ type ListIdpCredentialsResponse struct { NullFields []string `json:"-"` } -func (s *ListIdpCredentialsResponse) MarshalJSON() ([]byte, error) { +func (s ListIdpCredentialsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListIdpCredentialsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListInboundSamlSsoProfilesResponse: Response of the @@ -2148,9 +2149,9 @@ type ListInboundSamlSsoProfilesResponse struct { NullFields []string `json:"-"` } -func (s *ListInboundSamlSsoProfilesResponse) MarshalJSON() ([]byte, error) { +func (s ListInboundSamlSsoProfilesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListInboundSamlSsoProfilesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListInboundSsoAssignmentsResponse: Response of the @@ -2177,9 +2178,9 @@ type ListInboundSsoAssignmentsResponse struct { NullFields []string `json:"-"` } -func (s *ListInboundSsoAssignmentsResponse) MarshalJSON() ([]byte, error) { +func (s ListInboundSsoAssignmentsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListInboundSsoAssignmentsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListMembershipsResponse: The response message for @@ -2206,9 +2207,9 @@ type ListMembershipsResponse struct { NullFields []string `json:"-"` } -func (s *ListMembershipsResponse) MarshalJSON() ([]byte, error) { +func (s ListMembershipsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListMembershipsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListUserInvitationsResponse: Response message for UserInvitation listing @@ -2237,9 +2238,9 @@ type ListUserInvitationsResponse struct { NullFields []string `json:"-"` } -func (s *ListUserInvitationsResponse) MarshalJSON() ([]byte, error) { +func (s ListUserInvitationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListUserInvitationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LookupGroupNameResponse: The response message for @@ -2265,9 +2266,9 @@ type LookupGroupNameResponse struct { NullFields []string `json:"-"` } -func (s *LookupGroupNameResponse) MarshalJSON() ([]byte, error) { +func (s LookupGroupNameResponse) MarshalJSON() ([]byte, error) { type NoMethod LookupGroupNameResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LookupMembershipNameResponse: The response message for @@ -2293,9 +2294,9 @@ type LookupMembershipNameResponse struct { NullFields []string `json:"-"` } -func (s *LookupMembershipNameResponse) MarshalJSON() ([]byte, error) { +func (s LookupMembershipNameResponse) MarshalJSON() ([]byte, error) { type NoMethod LookupMembershipNameResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MemberRelation: Message representing a transitive membership of a group. @@ -2331,9 +2332,9 @@ type MemberRelation struct { NullFields []string `json:"-"` } -func (s *MemberRelation) MarshalJSON() ([]byte, error) { +func (s MemberRelation) MarshalJSON() ([]byte, error) { type NoMethod MemberRelation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MemberRestriction: The definition of MemberRestriction @@ -2364,9 +2365,9 @@ type MemberRestriction struct { NullFields []string `json:"-"` } -func (s *MemberRestriction) MarshalJSON() ([]byte, error) { +func (s MemberRestriction) MarshalJSON() ([]byte, error) { type NoMethod MemberRestriction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Membership: A membership within the Cloud Identity Groups API. A @@ -2404,6 +2405,7 @@ type Membership struct { // "SERVICE_ACCOUNT" - Represents service account type. // "GROUP" - Represents group type. // "SHARED_DRIVE" - Represents Shared drive. + // "CBCM_BROWSER" - Represents a CBCM-managed Chrome Browser type. // "OTHER" - Represents other type. Type string `json:"type,omitempty"` // UpdateTime: Output only. The time when the `Membership` was last updated. @@ -2424,9 +2426,9 @@ type Membership struct { NullFields []string `json:"-"` } -func (s *Membership) MarshalJSON() ([]byte, error) { +func (s Membership) MarshalJSON() ([]byte, error) { type NoMethod Membership - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MembershipAdjacencyList: Membership graph's path information as an adjacency @@ -2452,9 +2454,9 @@ type MembershipAdjacencyList struct { NullFields []string `json:"-"` } -func (s *MembershipAdjacencyList) MarshalJSON() ([]byte, error) { +func (s MembershipAdjacencyList) MarshalJSON() ([]byte, error) { type NoMethod MembershipAdjacencyList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MembershipRelation: Message containing membership relation. @@ -2492,9 +2494,9 @@ type MembershipRelation struct { NullFields []string `json:"-"` } -func (s *MembershipRelation) MarshalJSON() ([]byte, error) { +func (s MembershipRelation) MarshalJSON() ([]byte, error) { type NoMethod MembershipRelation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MembershipRole: A membership role within the Cloud Identity Groups API. A @@ -2523,9 +2525,9 @@ type MembershipRole struct { NullFields []string `json:"-"` } -func (s *MembershipRole) MarshalJSON() ([]byte, error) { +func (s MembershipRole) MarshalJSON() ([]byte, error) { type NoMethod MembershipRole - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MembershipRoleRestrictionEvaluation: The evaluated state of this @@ -2556,9 +2558,9 @@ type MembershipRoleRestrictionEvaluation struct { NullFields []string `json:"-"` } -func (s *MembershipRoleRestrictionEvaluation) MarshalJSON() ([]byte, error) { +func (s MembershipRoleRestrictionEvaluation) MarshalJSON() ([]byte, error) { type NoMethod MembershipRoleRestrictionEvaluation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ModifyMembershipRolesRequest: The request message for @@ -2591,9 +2593,9 @@ type ModifyMembershipRolesRequest struct { NullFields []string `json:"-"` } -func (s *ModifyMembershipRolesRequest) MarshalJSON() ([]byte, error) { +func (s ModifyMembershipRolesRequest) MarshalJSON() ([]byte, error) { type NoMethod ModifyMembershipRolesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ModifyMembershipRolesResponse: The response message for @@ -2617,9 +2619,9 @@ type ModifyMembershipRolesResponse struct { NullFields []string `json:"-"` } -func (s *ModifyMembershipRolesResponse) MarshalJSON() ([]byte, error) { +func (s ModifyMembershipRolesResponse) MarshalJSON() ([]byte, error) { type NoMethod ModifyMembershipRolesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -2664,9 +2666,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestrictionEvaluation: The evaluated state of this restriction. @@ -2696,9 +2698,9 @@ type RestrictionEvaluation struct { NullFields []string `json:"-"` } -func (s *RestrictionEvaluation) MarshalJSON() ([]byte, error) { +func (s RestrictionEvaluation) MarshalJSON() ([]byte, error) { type NoMethod RestrictionEvaluation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestrictionEvaluations: Evaluations of restrictions applied to parent group @@ -2721,9 +2723,9 @@ type RestrictionEvaluations struct { NullFields []string `json:"-"` } -func (s *RestrictionEvaluations) MarshalJSON() ([]byte, error) { +func (s RestrictionEvaluations) MarshalJSON() ([]byte, error) { type NoMethod RestrictionEvaluations - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RsaPublicKeyInfo: Information of a RSA public key. @@ -2743,9 +2745,9 @@ type RsaPublicKeyInfo struct { NullFields []string `json:"-"` } -func (s *RsaPublicKeyInfo) MarshalJSON() ([]byte, error) { +func (s RsaPublicKeyInfo) MarshalJSON() ([]byte, error) { type NoMethod RsaPublicKeyInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SamlIdpConfig: SAML IDP (identity provider) configuration. @@ -2781,9 +2783,9 @@ type SamlIdpConfig struct { NullFields []string `json:"-"` } -func (s *SamlIdpConfig) MarshalJSON() ([]byte, error) { +func (s SamlIdpConfig) MarshalJSON() ([]byte, error) { type NoMethod SamlIdpConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SamlSpConfig: SAML SP (service provider) configuration. @@ -2807,9 +2809,9 @@ type SamlSpConfig struct { NullFields []string `json:"-"` } -func (s *SamlSpConfig) MarshalJSON() ([]byte, error) { +func (s SamlSpConfig) MarshalJSON() ([]byte, error) { type NoMethod SamlSpConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SamlSsoInfo: Details that are applicable when `sso_mode` == `SAML_SSO`. @@ -2830,9 +2832,9 @@ type SamlSsoInfo struct { NullFields []string `json:"-"` } -func (s *SamlSsoInfo) MarshalJSON() ([]byte, error) { +func (s SamlSsoInfo) MarshalJSON() ([]byte, error) { type NoMethod SamlSsoInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SearchDirectGroupsResponse: The response message for @@ -2859,9 +2861,9 @@ type SearchDirectGroupsResponse struct { NullFields []string `json:"-"` } -func (s *SearchDirectGroupsResponse) MarshalJSON() ([]byte, error) { +func (s SearchDirectGroupsResponse) MarshalJSON() ([]byte, error) { type NoMethod SearchDirectGroupsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SearchGroupsResponse: The response message for GroupsService.SearchGroups. @@ -2887,9 +2889,9 @@ type SearchGroupsResponse struct { NullFields []string `json:"-"` } -func (s *SearchGroupsResponse) MarshalJSON() ([]byte, error) { +func (s SearchGroupsResponse) MarshalJSON() ([]byte, error) { type NoMethod SearchGroupsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SearchTransitiveGroupsResponse: The response message for @@ -2916,9 +2918,9 @@ type SearchTransitiveGroupsResponse struct { NullFields []string `json:"-"` } -func (s *SearchTransitiveGroupsResponse) MarshalJSON() ([]byte, error) { +func (s SearchTransitiveGroupsResponse) MarshalJSON() ([]byte, error) { type NoMethod SearchTransitiveGroupsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SearchTransitiveMembershipsResponse: The response message for @@ -2945,9 +2947,9 @@ type SearchTransitiveMembershipsResponse struct { NullFields []string `json:"-"` } -func (s *SearchTransitiveMembershipsResponse) MarshalJSON() ([]byte, error) { +func (s SearchTransitiveMembershipsResponse) MarshalJSON() ([]byte, error) { type NoMethod SearchTransitiveMembershipsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecuritySettings: The definition of security settings. @@ -2973,9 +2975,9 @@ type SecuritySettings struct { NullFields []string `json:"-"` } -func (s *SecuritySettings) MarshalJSON() ([]byte, error) { +func (s SecuritySettings) MarshalJSON() ([]byte, error) { type NoMethod SecuritySettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SendUserInvitationRequest: A request to send email for inviting target user @@ -3008,9 +3010,9 @@ type SignInBehavior struct { NullFields []string `json:"-"` } -func (s *SignInBehavior) MarshalJSON() ([]byte, error) { +func (s SignInBehavior) MarshalJSON() ([]byte, error) { type NoMethod SignInBehavior - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -3042,9 +3044,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransitiveMembershipRole: Message representing the role of a @@ -3066,9 +3068,9 @@ type TransitiveMembershipRole struct { NullFields []string `json:"-"` } -func (s *TransitiveMembershipRole) MarshalJSON() ([]byte, error) { +func (s TransitiveMembershipRole) MarshalJSON() ([]byte, error) { type NoMethod TransitiveMembershipRole - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateGroupMetadata: Metadata for UpdateGroup LRO. @@ -3096,9 +3098,9 @@ type UpdateInboundSamlSsoProfileOperationMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateInboundSamlSsoProfileOperationMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateInboundSamlSsoProfileOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateInboundSamlSsoProfileOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateInboundSsoAssignmentOperationMetadata: LRO response metadata for @@ -3131,9 +3133,9 @@ type UpdateMembershipRolesParams struct { NullFields []string `json:"-"` } -func (s *UpdateMembershipRolesParams) MarshalJSON() ([]byte, error) { +func (s UpdateMembershipRolesParams) MarshalJSON() ([]byte, error) { type NoMethod UpdateMembershipRolesParams - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UserInvitation: The `UserInvitation` resource represents an email that can @@ -3178,9 +3180,9 @@ type UserInvitation struct { NullFields []string `json:"-"` } -func (s *UserInvitation) MarshalJSON() ([]byte, error) { +func (s UserInvitation) MarshalJSON() ([]byte, error) { type NoMethod UserInvitation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type CustomersUserinvitationsCancelCall struct { @@ -4309,7 +4311,7 @@ func (c *DevicesListCall) PageToken(pageToken string) *DevicesListCall { // // company admin. Each device in the response contains all information // specified by the company admin when importing the device (i.e. asset tags). -// This includes devices that may be unaassigned or assigned to users. +// This includes devices that may be unassigned or assigned to users. // // "USER_ASSIGNED_DEVICES" - This view contains all devices with at least one // diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json b/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json index d14b423139f..564d18634c3 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json @@ -24,6 +24,11 @@ "endpointUrl": "https://cloudkms.europe-west3.rep.googleapis.com/", "location": "europe-west3" }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.europe-west8.rep.googleapis.com/", + "location": "europe-west8" + }, { "description": "Regional Endpoint", "endpointUrl": "https://cloudkms.europe-west9.rep.googleapis.com/", @@ -33,6 +38,66 @@ "description": "Regional Endpoint", "endpointUrl": "https://cloudkms.me-central2.rep.googleapis.com/", "location": "me-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-east4.rep.googleapis.com/", + "location": "us-east4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-west1.rep.googleapis.com/", + "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-east7.rep.googleapis.com/", + "location": "us-east7" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-west3.rep.googleapis.com/", + "location": "us-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-central2.rep.googleapis.com/", + "location": "us-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-west4.rep.googleapis.com/", + "location": "us-west4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-west8.rep.googleapis.com/", + "location": "us-west8" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://cloudkms.us-south1.rep.googleapis.com/", + "location": "us-south1" } ], "fullyEncodeReservedExpansion": true, @@ -814,6 +879,17 @@ "location": "query", "type": "string" }, + "pageSize": { + "description": "Optional. Optional limit on the number of KeyHandles to include in the response. The service may return fewer than this value. Further KeyHandles can subsequently be obtained by including the ListKeyHandlesResponse.next_page_token in a subsequent request. If unspecified, at most 100 KeyHandles will be returned.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. Optional pagination token, returned earlier via ListKeyHandlesResponse.next_page_token.", + "location": "query", + "type": "string" + }, "parent": { "description": "Required. Name of the resource project and location from which to list KeyHandles, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}`.", "location": "path", @@ -2056,7 +2132,7 @@ } } }, - "revision": "20240513", + "revision": "20240926", "rootUrl": "https://cloudkms.googleapis.com/", "schemas": { "AsymmetricDecryptRequest": { @@ -2247,6 +2323,23 @@ "name": { "description": "Identifier. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`.", "type": "string" + }, + "state": { + "description": "Output only. The state for the AutokeyConfig.", + "enum": [ + "STATE_UNSPECIFIED", + "ACTIVE", + "KEY_PROJECT_DELETED", + "UNINITIALIZED" + ], + "enumDescriptions": [ + "The state of the AutokeyConfig is unspecified.", + "The AutokeyConfig is currently active.", + "A previously configured key project has been deleted and the current AutokeyConfig is unusable.", + "The AutokeyConfig is not yet initialized or has been reset to its default uninitialized state." + ], + "readOnly": true, + "type": "string" } }, "type": "object" @@ -2373,7 +2466,7 @@ "type": "string" }, "destroyScheduledDuration": { - "description": "Immutable. The period of time that versions of this key spend in the DESTROY_SCHEDULED state before transitioning to DESTROYED. If not specified at creation time, the default duration is 24 hours.", + "description": "Immutable. The period of time that versions of this key spend in the DESTROY_SCHEDULED state before transitioning to DESTROYED. If not specified at creation time, the default duration is 30 days.", "format": "google-duration", "type": "string" }, @@ -2381,6 +2474,10 @@ "description": "Immutable. Whether this key may contain imported versions only.", "type": "boolean" }, + "keyAccessJustificationsPolicy": { + "$ref": "KeyAccessJustificationsPolicy", + "description": "Optional. The policy used for Key Access Justifications Policy Enforcement. If this field is present and this key is enrolled in Key Access Justifications Policy Enforcement, the policy will be evaluated in encrypt, decrypt, and sign operations, and the operation will fail if rejected by the policy. The policy is defined by specifying zero or more allowed justification codes. https://cloud.google.com/assured-workloads/key-access-justifications/docs/justification-codes By default, this field is absent, and all justification codes are allowed." + }, "labels": { "additionalProperties": { "type": "string" @@ -2894,7 +2991,7 @@ "type": "string" }, "serviceResolvers": { - "description": "A list of ServiceResolvers where the EKM can be reached. There should be one ServiceResolver per EKM replica. Currently, only a single ServiceResolver is supported.", + "description": "Optional. A list of ServiceResolvers where the EKM can be reached. There should be one ServiceResolver per EKM replica. Currently, only a single ServiceResolver is supported.", "items": { "$ref": "ServiceResolver" }, @@ -3270,6 +3367,48 @@ }, "type": "object" }, + "KeyAccessJustificationsPolicy": { + "description": "A KeyAccessJustificationsPolicy specifies zero or more allowed AccessReason values for encrypt, decrypt, and sign operations on a CryptoKey.", + "id": "KeyAccessJustificationsPolicy", + "properties": { + "allowedAccessReasons": { + "description": "The list of allowed reasons for access to a CryptoKey. Zero allowed access reasons means all encrypt, decrypt, and sign operations for the CryptoKey associated with this policy will fail.", + "items": { + "enum": [ + "REASON_UNSPECIFIED", + "CUSTOMER_INITIATED_SUPPORT", + "GOOGLE_INITIATED_SERVICE", + "THIRD_PARTY_DATA_REQUEST", + "GOOGLE_INITIATED_REVIEW", + "CUSTOMER_INITIATED_ACCESS", + "GOOGLE_INITIATED_SYSTEM_OPERATION", + "REASON_NOT_EXPECTED", + "MODIFIED_CUSTOMER_INITIATED_ACCESS", + "MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION", + "GOOGLE_RESPONSE_TO_PRODUCTION_ALERT", + "CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING" + ], + "enumDescriptions": [ + "Unspecified access reason.", + "Customer-initiated support.", + "Google-initiated access for system management and troubleshooting.", + "Google-initiated access in response to a legal request or legal process.", + "Google-initiated access for security, fraud, abuse, or compliance purposes.", + "Customer uses their account to perform any access to their own data which their IAM policy authorizes.", + "Google systems access customer data to help optimize the structure of the data or quality for future uses by the customer.", + "No reason is expected for this key request.", + "Customer uses their account to perform any access to their own data which their IAM policy authorizes, and one of the following is true: * A Google administrator has reset the root-access account associated with the user's organization within the past 7 days. * A Google-initiated emergency access operation has interacted with a resource in the same project or folder as the currently accessed resource within the past 7 days.", + "Google systems access customer data to help optimize the structure of the data or quality for future uses by the customer, and one of the following is true: * A Google administrator has reset the root-access account associated with the user's organization within the past 7 days. * A Google-initiated emergency access operation has interacted with a resource in the same project or folder as the currently accessed resource within the past 7 days.", + "Google-initiated access to maintain system reliability.", + "One of the following operations is being executed while simultaneously encountering an internal technical issue which prevented a more precise justification code from being generated: * Your account has been used to perform any access to your own data which your IAM policy authorizes. * An automated Google system operates on encrypted customer data which your IAM policy authorizes. * Customer-initiated Google support access. * Google-initiated support access to protect system reliability." + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "KeyHandle": { "description": "Resource-oriented representation of a request to Cloud KMS Autokey and the resulting provisioning of a CryptoKey.", "id": "KeyHandle", @@ -3443,6 +3582,10 @@ "$ref": "KeyHandle" }, "type": "array" + }, + "nextPageToken": { + "description": "A token to retrieve next page of results. Pass this value in ListKeyHandlesRequest.page_token to retrieve the next page of results.", + "type": "string" } }, "type": "object" diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go index a6fcb994898..a12cc2e5d4d 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go @@ -326,9 +326,9 @@ type AsymmetricDecryptRequest struct { NullFields []string `json:"-"` } -func (s *AsymmetricDecryptRequest) MarshalJSON() ([]byte, error) { +func (s AsymmetricDecryptRequest) MarshalJSON() ([]byte, error) { type NoMethod AsymmetricDecryptRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AsymmetricDecryptResponse: Response message for @@ -384,9 +384,9 @@ type AsymmetricDecryptResponse struct { NullFields []string `json:"-"` } -func (s *AsymmetricDecryptResponse) MarshalJSON() ([]byte, error) { +func (s AsymmetricDecryptResponse) MarshalJSON() ([]byte, error) { type NoMethod AsymmetricDecryptResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AsymmetricSignRequest: Request message for @@ -439,9 +439,9 @@ type AsymmetricSignRequest struct { NullFields []string `json:"-"` } -func (s *AsymmetricSignRequest) MarshalJSON() ([]byte, error) { +func (s AsymmetricSignRequest) MarshalJSON() ([]byte, error) { type NoMethod AsymmetricSignRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AsymmetricSignResponse: Response message for @@ -507,9 +507,9 @@ type AsymmetricSignResponse struct { NullFields []string `json:"-"` } -func (s *AsymmetricSignResponse) MarshalJSON() ([]byte, error) { +func (s AsymmetricSignResponse) MarshalJSON() ([]byte, error) { type NoMethod AsymmetricSignResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditConfig: Specifies the audit configuration for a service. The @@ -548,9 +548,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -583,9 +583,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutokeyConfig: Cloud KMS Autokey configuration for a folder. @@ -603,6 +603,16 @@ type AutokeyConfig struct { // Name: Identifier. Name of the AutokeyConfig resource, e.g. // `folders/{FOLDER_NUMBER}/autokeyConfig`. Name string `json:"name,omitempty"` + // State: Output only. The state for the AutokeyConfig. + // + // Possible values: + // "STATE_UNSPECIFIED" - The state of the AutokeyConfig is unspecified. + // "ACTIVE" - The AutokeyConfig is currently active. + // "KEY_PROJECT_DELETED" - A previously configured key project has been + // deleted and the current AutokeyConfig is unusable. + // "UNINITIALIZED" - The AutokeyConfig is not yet initialized or has been + // reset to its default uninitialized state. + State string `json:"state,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` @@ -619,9 +629,9 @@ type AutokeyConfig struct { NullFields []string `json:"-"` } -func (s *AutokeyConfig) MarshalJSON() ([]byte, error) { +func (s AutokeyConfig) MarshalJSON() ([]byte, error) { type NoMethod AutokeyConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates `members`, or principals, with a `role`. @@ -718,9 +728,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Certificate: A Certificate represents an X.509 certificate used to @@ -764,9 +774,9 @@ type Certificate struct { NullFields []string `json:"-"` } -func (s *Certificate) MarshalJSON() ([]byte, error) { +func (s Certificate) MarshalJSON() ([]byte, error) { type NoMethod Certificate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CertificateChains: Certificate chains needed to verify the attestation. @@ -794,9 +804,9 @@ type CertificateChains struct { NullFields []string `json:"-"` } -func (s *CertificateChains) MarshalJSON() ([]byte, error) { +func (s CertificateChains) MarshalJSON() ([]byte, error) { type NoMethod CertificateChains - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CryptoKey: A CryptoKey represents a logical key that can be used for @@ -815,11 +825,20 @@ type CryptoKey struct { CryptoKeyBackend string `json:"cryptoKeyBackend,omitempty"` // DestroyScheduledDuration: Immutable. The period of time that versions of // this key spend in the DESTROY_SCHEDULED state before transitioning to - // DESTROYED. If not specified at creation time, the default duration is 24 - // hours. + // DESTROYED. If not specified at creation time, the default duration is 30 + // days. DestroyScheduledDuration string `json:"destroyScheduledDuration,omitempty"` // ImportOnly: Immutable. Whether this key may contain imported versions only. ImportOnly bool `json:"importOnly,omitempty"` + // KeyAccessJustificationsPolicy: Optional. The policy used for Key Access + // Justifications Policy Enforcement. If this field is present and this key is + // enrolled in Key Access Justifications Policy Enforcement, the policy will be + // evaluated in encrypt, decrypt, and sign operations, and the operation will + // fail if rejected by the policy. The policy is defined by specifying zero or + // more allowed justification codes. + // https://cloud.google.com/assured-workloads/key-access-justifications/docs/justification-codes + // By default, this field is absent, and all justification codes are allowed. + KeyAccessJustificationsPolicy *KeyAccessJustificationsPolicy `json:"keyAccessJustificationsPolicy,omitempty"` // Labels: Labels with user-defined metadata. For more information, see // Labeling Keys (https://cloud.google.com/kms/docs/labeling-keys). Labels map[string]string `json:"labels,omitempty"` @@ -882,9 +901,9 @@ type CryptoKey struct { NullFields []string `json:"-"` } -func (s *CryptoKey) MarshalJSON() ([]byte, error) { +func (s CryptoKey) MarshalJSON() ([]byte, error) { type NoMethod CryptoKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CryptoKeyVersion: A CryptoKeyVersion represents an individual cryptographic @@ -1064,9 +1083,9 @@ type CryptoKeyVersion struct { NullFields []string `json:"-"` } -func (s *CryptoKeyVersion) MarshalJSON() ([]byte, error) { +func (s CryptoKeyVersion) MarshalJSON() ([]byte, error) { type NoMethod CryptoKeyVersion - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CryptoKeyVersionTemplate: A CryptoKeyVersionTemplate specifies the @@ -1160,9 +1179,9 @@ type CryptoKeyVersionTemplate struct { NullFields []string `json:"-"` } -func (s *CryptoKeyVersionTemplate) MarshalJSON() ([]byte, error) { +func (s CryptoKeyVersionTemplate) MarshalJSON() ([]byte, error) { type NoMethod CryptoKeyVersionTemplate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DecryptRequest: Request message for KeyManagementService.Decrypt. @@ -1213,9 +1232,9 @@ type DecryptRequest struct { NullFields []string `json:"-"` } -func (s *DecryptRequest) MarshalJSON() ([]byte, error) { +func (s DecryptRequest) MarshalJSON() ([]byte, error) { type NoMethod DecryptRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DecryptResponse: Response message for KeyManagementService.Decrypt. @@ -1266,9 +1285,9 @@ type DecryptResponse struct { NullFields []string `json:"-"` } -func (s *DecryptResponse) MarshalJSON() ([]byte, error) { +func (s DecryptResponse) MarshalJSON() ([]byte, error) { type NoMethod DecryptResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DestroyCryptoKeyVersionRequest: Request message for @@ -1297,9 +1316,9 @@ type Digest struct { NullFields []string `json:"-"` } -func (s *Digest) MarshalJSON() ([]byte, error) { +func (s Digest) MarshalJSON() ([]byte, error) { type NoMethod Digest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EkmConfig: An EkmConfig is a singleton resource that represents @@ -1328,9 +1347,9 @@ type EkmConfig struct { NullFields []string `json:"-"` } -func (s *EkmConfig) MarshalJSON() ([]byte, error) { +func (s EkmConfig) MarshalJSON() ([]byte, error) { type NoMethod EkmConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EkmConnection: An EkmConnection represents an individual EKM connection. It @@ -1371,9 +1390,9 @@ type EkmConnection struct { // Name: Output only. The resource name for the EkmConnection in the format // `projects/*/locations/*/ekmConnections/*`. Name string `json:"name,omitempty"` - // ServiceResolvers: A list of ServiceResolvers where the EKM can be reached. - // There should be one ServiceResolver per EKM replica. Currently, only a - // single ServiceResolver is supported. + // ServiceResolvers: Optional. A list of ServiceResolvers where the EKM can be + // reached. There should be one ServiceResolver per EKM replica. Currently, + // only a single ServiceResolver is supported. ServiceResolvers []*ServiceResolver `json:"serviceResolvers,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. @@ -1391,9 +1410,9 @@ type EkmConnection struct { NullFields []string `json:"-"` } -func (s *EkmConnection) MarshalJSON() ([]byte, error) { +func (s EkmConnection) MarshalJSON() ([]byte, error) { type NoMethod EkmConnection - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EncryptRequest: Request message for KeyManagementService.Encrypt. @@ -1452,9 +1471,9 @@ type EncryptRequest struct { NullFields []string `json:"-"` } -func (s *EncryptRequest) MarshalJSON() ([]byte, error) { +func (s EncryptRequest) MarshalJSON() ([]byte, error) { type NoMethod EncryptRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EncryptResponse: Response message for KeyManagementService.Encrypt. @@ -1519,9 +1538,9 @@ type EncryptResponse struct { NullFields []string `json:"-"` } -func (s *EncryptResponse) MarshalJSON() ([]byte, error) { +func (s EncryptResponse) MarshalJSON() ([]byte, error) { type NoMethod EncryptResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents a textual expression in the Common Expression Language @@ -1567,9 +1586,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExternalProtectionLevelOptions: ExternalProtectionLevelOptions stores a @@ -1597,9 +1616,9 @@ type ExternalProtectionLevelOptions struct { NullFields []string `json:"-"` } -func (s *ExternalProtectionLevelOptions) MarshalJSON() ([]byte, error) { +func (s ExternalProtectionLevelOptions) MarshalJSON() ([]byte, error) { type NoMethod ExternalProtectionLevelOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GenerateRandomBytesRequest: Request message for @@ -1632,9 +1651,9 @@ type GenerateRandomBytesRequest struct { NullFields []string `json:"-"` } -func (s *GenerateRandomBytesRequest) MarshalJSON() ([]byte, error) { +func (s GenerateRandomBytesRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateRandomBytesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GenerateRandomBytesResponse: Response message for @@ -1669,9 +1688,9 @@ type GenerateRandomBytesResponse struct { NullFields []string `json:"-"` } -func (s *GenerateRandomBytesResponse) MarshalJSON() ([]byte, error) { +func (s GenerateRandomBytesResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateRandomBytesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportCryptoKeyVersionRequest: Request message for @@ -1787,9 +1806,9 @@ type ImportCryptoKeyVersionRequest struct { NullFields []string `json:"-"` } -func (s *ImportCryptoKeyVersionRequest) MarshalJSON() ([]byte, error) { +func (s ImportCryptoKeyVersionRequest) MarshalJSON() ([]byte, error) { type NoMethod ImportCryptoKeyVersionRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportJob: An ImportJob can be used to create CryptoKeys and @@ -1917,9 +1936,75 @@ type ImportJob struct { NullFields []string `json:"-"` } -func (s *ImportJob) MarshalJSON() ([]byte, error) { +func (s ImportJob) MarshalJSON() ([]byte, error) { type NoMethod ImportJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// KeyAccessJustificationsPolicy: A KeyAccessJustificationsPolicy specifies +// zero or more allowed AccessReason values for encrypt, decrypt, and sign +// operations on a CryptoKey. +type KeyAccessJustificationsPolicy struct { + // AllowedAccessReasons: The list of allowed reasons for access to a CryptoKey. + // Zero allowed access reasons means all encrypt, decrypt, and sign operations + // for the CryptoKey associated with this policy will fail. + // + // Possible values: + // "REASON_UNSPECIFIED" - Unspecified access reason. + // "CUSTOMER_INITIATED_SUPPORT" - Customer-initiated support. + // "GOOGLE_INITIATED_SERVICE" - Google-initiated access for system management + // and troubleshooting. + // "THIRD_PARTY_DATA_REQUEST" - Google-initiated access in response to a + // legal request or legal process. + // "GOOGLE_INITIATED_REVIEW" - Google-initiated access for security, fraud, + // abuse, or compliance purposes. + // "CUSTOMER_INITIATED_ACCESS" - Customer uses their account to perform any + // access to their own data which their IAM policy authorizes. + // "GOOGLE_INITIATED_SYSTEM_OPERATION" - Google systems access customer data + // to help optimize the structure of the data or quality for future uses by the + // customer. + // "REASON_NOT_EXPECTED" - No reason is expected for this key request. + // "MODIFIED_CUSTOMER_INITIATED_ACCESS" - Customer uses their account to + // perform any access to their own data which their IAM policy authorizes, and + // one of the following is true: * A Google administrator has reset the + // root-access account associated with the user's organization within the past + // 7 days. * A Google-initiated emergency access operation has interacted with + // a resource in the same project or folder as the currently accessed resource + // within the past 7 days. + // "MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION" - Google systems access + // customer data to help optimize the structure of the data or quality for + // future uses by the customer, and one of the following is true: * A Google + // administrator has reset the root-access account associated with the user's + // organization within the past 7 days. * A Google-initiated emergency access + // operation has interacted with a resource in the same project or folder as + // the currently accessed resource within the past 7 days. + // "GOOGLE_RESPONSE_TO_PRODUCTION_ALERT" - Google-initiated access to + // maintain system reliability. + // "CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING" - One of the following operations + // is being executed while simultaneously encountering an internal technical + // issue which prevented a more precise justification code from being + // generated: * Your account has been used to perform any access to your own + // data which your IAM policy authorizes. * An automated Google system operates + // on encrypted customer data which your IAM policy authorizes. * + // Customer-initiated Google support access. * Google-initiated support access + // to protect system reliability. + AllowedAccessReasons []string `json:"allowedAccessReasons,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowedAccessReasons") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowedAccessReasons") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s KeyAccessJustificationsPolicy) MarshalJSON() ([]byte, error) { + type NoMethod KeyAccessJustificationsPolicy + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // KeyHandle: Resource-oriented representation of a request to Cloud KMS @@ -1957,9 +2042,9 @@ type KeyHandle struct { NullFields []string `json:"-"` } -func (s *KeyHandle) MarshalJSON() ([]byte, error) { +func (s KeyHandle) MarshalJSON() ([]byte, error) { type NoMethod KeyHandle - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // KeyOperationAttestation: Contains an HSM-generated attestation about a key @@ -1995,9 +2080,9 @@ type KeyOperationAttestation struct { NullFields []string `json:"-"` } -func (s *KeyOperationAttestation) MarshalJSON() ([]byte, error) { +func (s KeyOperationAttestation) MarshalJSON() ([]byte, error) { type NoMethod KeyOperationAttestation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // KeyRing: A KeyRing is a toplevel logical grouping of CryptoKeys. @@ -2023,9 +2108,9 @@ type KeyRing struct { NullFields []string `json:"-"` } -func (s *KeyRing) MarshalJSON() ([]byte, error) { +func (s KeyRing) MarshalJSON() ([]byte, error) { type NoMethod KeyRing - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListCryptoKeyVersionsResponse: Response message for @@ -2055,9 +2140,9 @@ type ListCryptoKeyVersionsResponse struct { NullFields []string `json:"-"` } -func (s *ListCryptoKeyVersionsResponse) MarshalJSON() ([]byte, error) { +func (s ListCryptoKeyVersionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListCryptoKeyVersionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListCryptoKeysResponse: Response message for @@ -2086,9 +2171,9 @@ type ListCryptoKeysResponse struct { NullFields []string `json:"-"` } -func (s *ListCryptoKeysResponse) MarshalJSON() ([]byte, error) { +func (s ListCryptoKeysResponse) MarshalJSON() ([]byte, error) { type NoMethod ListCryptoKeysResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListEkmConnectionsResponse: Response message for @@ -2117,9 +2202,9 @@ type ListEkmConnectionsResponse struct { NullFields []string `json:"-"` } -func (s *ListEkmConnectionsResponse) MarshalJSON() ([]byte, error) { +func (s ListEkmConnectionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListEkmConnectionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListImportJobsResponse: Response message for @@ -2148,15 +2233,18 @@ type ListImportJobsResponse struct { NullFields []string `json:"-"` } -func (s *ListImportJobsResponse) MarshalJSON() ([]byte, error) { +func (s ListImportJobsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListImportJobsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListKeyHandlesResponse: Response message for Autokey.ListKeyHandles. type ListKeyHandlesResponse struct { // KeyHandles: Resulting KeyHandles. KeyHandles []*KeyHandle `json:"keyHandles,omitempty"` + // NextPageToken: A token to retrieve next page of results. Pass this value in + // ListKeyHandlesRequest.page_token to retrieve the next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` @@ -2173,9 +2261,9 @@ type ListKeyHandlesResponse struct { NullFields []string `json:"-"` } -func (s *ListKeyHandlesResponse) MarshalJSON() ([]byte, error) { +func (s ListKeyHandlesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListKeyHandlesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListKeyRingsResponse: Response message for @@ -2204,9 +2292,9 @@ type ListKeyRingsResponse struct { NullFields []string `json:"-"` } -func (s *ListKeyRingsResponse) MarshalJSON() ([]byte, error) { +func (s ListKeyRingsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListKeyRingsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLocationsResponse: The response message for Locations.ListLocations. @@ -2232,9 +2320,9 @@ type ListLocationsResponse struct { NullFields []string `json:"-"` } -func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { +func (s ListLocationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLocationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Location: A resource that represents a Google Cloud location. @@ -2270,9 +2358,9 @@ type Location struct { NullFields []string `json:"-"` } -func (s *Location) MarshalJSON() ([]byte, error) { +func (s Location) MarshalJSON() ([]byte, error) { type NoMethod Location - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LocationMetadata: Cloud KMS metadata for the given @@ -2297,9 +2385,9 @@ type LocationMetadata struct { NullFields []string `json:"-"` } -func (s *LocationMetadata) MarshalJSON() ([]byte, error) { +func (s LocationMetadata) MarshalJSON() ([]byte, error) { type NoMethod LocationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MacSignRequest: Request message for KeyManagementService.MacSign. @@ -2332,9 +2420,9 @@ type MacSignRequest struct { NullFields []string `json:"-"` } -func (s *MacSignRequest) MarshalJSON() ([]byte, error) { +func (s MacSignRequest) MarshalJSON() ([]byte, error) { type NoMethod MacSignRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MacSignResponse: Response message for KeyManagementService.MacSign. @@ -2390,9 +2478,9 @@ type MacSignResponse struct { NullFields []string `json:"-"` } -func (s *MacSignResponse) MarshalJSON() ([]byte, error) { +func (s MacSignResponse) MarshalJSON() ([]byte, error) { type NoMethod MacSignResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MacVerifyRequest: Request message for KeyManagementService.MacVerify. @@ -2440,9 +2528,9 @@ type MacVerifyRequest struct { NullFields []string `json:"-"` } -func (s *MacVerifyRequest) MarshalJSON() ([]byte, error) { +func (s MacVerifyRequest) MarshalJSON() ([]byte, error) { type NoMethod MacVerifyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MacVerifyResponse: Response message for KeyManagementService.MacVerify. @@ -2501,9 +2589,9 @@ type MacVerifyResponse struct { NullFields []string `json:"-"` } -func (s *MacVerifyResponse) MarshalJSON() ([]byte, error) { +func (s MacVerifyResponse) MarshalJSON() ([]byte, error) { type NoMethod MacVerifyResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -2548,9 +2636,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -2640,9 +2728,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PublicKey: The public keys for a given CryptoKeyVersion. Obtained via @@ -2754,9 +2842,9 @@ type PublicKey struct { NullFields []string `json:"-"` } -func (s *PublicKey) MarshalJSON() ([]byte, error) { +func (s PublicKey) MarshalJSON() ([]byte, error) { type NoMethod PublicKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RawDecryptRequest: Request message for KeyManagementService.RawDecrypt. @@ -2827,9 +2915,9 @@ type RawDecryptRequest struct { NullFields []string `json:"-"` } -func (s *RawDecryptRequest) MarshalJSON() ([]byte, error) { +func (s RawDecryptRequest) MarshalJSON() ([]byte, error) { type NoMethod RawDecryptRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RawDecryptResponse: Response message for KeyManagementService.RawDecrypt. @@ -2903,9 +2991,9 @@ type RawDecryptResponse struct { NullFields []string `json:"-"` } -func (s *RawDecryptResponse) MarshalJSON() ([]byte, error) { +func (s RawDecryptResponse) MarshalJSON() ([]byte, error) { type NoMethod RawDecryptResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RawEncryptRequest: Request message for KeyManagementService.RawEncrypt. @@ -2982,9 +3070,9 @@ type RawEncryptRequest struct { NullFields []string `json:"-"` } -func (s *RawEncryptRequest) MarshalJSON() ([]byte, error) { +func (s RawEncryptRequest) MarshalJSON() ([]byte, error) { type NoMethod RawEncryptRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RawEncryptResponse: Response message for KeyManagementService.RawEncrypt. @@ -3079,9 +3167,9 @@ type RawEncryptResponse struct { NullFields []string `json:"-"` } -func (s *RawEncryptResponse) MarshalJSON() ([]byte, error) { +func (s RawEncryptResponse) MarshalJSON() ([]byte, error) { type NoMethod RawEncryptResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestoreCryptoKeyVersionRequest: Request message for @@ -3122,9 +3210,9 @@ type ServiceResolver struct { NullFields []string `json:"-"` } -func (s *ServiceResolver) MarshalJSON() ([]byte, error) { +func (s ServiceResolver) MarshalJSON() ([]byte, error) { type NoMethod ServiceResolver - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -3151,9 +3239,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ShowEffectiveAutokeyConfigResponse: Response message for @@ -3178,9 +3266,9 @@ type ShowEffectiveAutokeyConfigResponse struct { NullFields []string `json:"-"` } -func (s *ShowEffectiveAutokeyConfigResponse) MarshalJSON() ([]byte, error) { +func (s ShowEffectiveAutokeyConfigResponse) MarshalJSON() ([]byte, error) { type NoMethod ShowEffectiveAutokeyConfigResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -3212,9 +3300,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` method. @@ -3237,9 +3325,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -3264,9 +3352,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateCryptoKeyPrimaryVersionRequest: Request message for @@ -3288,9 +3376,9 @@ type UpdateCryptoKeyPrimaryVersionRequest struct { NullFields []string `json:"-"` } -func (s *UpdateCryptoKeyPrimaryVersionRequest) MarshalJSON() ([]byte, error) { +func (s UpdateCryptoKeyPrimaryVersionRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateCryptoKeyPrimaryVersionRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VerifyConnectivityResponse: Response message for @@ -3322,9 +3410,9 @@ type WrappingPublicKey struct { NullFields []string `json:"-"` } -func (s *WrappingPublicKey) MarshalJSON() ([]byte, error) { +func (s WrappingPublicKey) MarshalJSON() ([]byte, error) { type NoMethod WrappingPublicKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type FoldersGetAutokeyConfigCall struct { @@ -5774,6 +5862,23 @@ func (c *ProjectsLocationsKeyHandlesListCall) Filter(filter string) *ProjectsLoc return c } +// PageSize sets the optional parameter "pageSize": Optional limit on the +// number of KeyHandles to include in the response. The service may return +// fewer than this value. Further KeyHandles can subsequently be obtained by +// including the ListKeyHandlesResponse.next_page_token in a subsequent +// request. If unspecified, at most 100 KeyHandles will be returned. +func (c *ProjectsLocationsKeyHandlesListCall) PageSize(pageSize int64) *ProjectsLocationsKeyHandlesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Optional pagination +// token, returned earlier via ListKeyHandlesResponse.next_page_token. +func (c *ProjectsLocationsKeyHandlesListCall) PageToken(pageToken string) *ProjectsLocationsKeyHandlesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. @@ -5864,6 +5969,27 @@ func (c *ProjectsLocationsKeyHandlesListCall) Do(opts ...googleapi.CallOption) ( return ret, nil } +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsKeyHandlesListCall) Pages(ctx context.Context, f func(*ListKeyHandlesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + type ProjectsLocationsKeyRingsCreateCall struct { s *Service parent string diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index d4cd7a1ba9c..86e108d6cd8 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -238,9 +238,9 @@ type Ancestor struct { NullFields []string `json:"-"` } -func (s *Ancestor) MarshalJSON() ([]byte, error) { +func (s Ancestor) MarshalJSON() ([]byte, error) { type NoMethod Ancestor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditConfig: Specifies the audit configuration for a service. The @@ -279,9 +279,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -314,9 +314,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates `members`, or principals, with a `role`. @@ -413,9 +413,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BooleanConstraint: A `Constraint` that is either enforced or not. For @@ -467,9 +467,9 @@ type BooleanPolicy struct { NullFields []string `json:"-"` } -func (s *BooleanPolicy) MarshalJSON() ([]byte, error) { +func (s BooleanPolicy) MarshalJSON() ([]byte, error) { type NoMethod BooleanPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ClearOrgPolicyRequest: The request sent to the ClearOrgPolicy method. @@ -492,9 +492,9 @@ type ClearOrgPolicyRequest struct { NullFields []string `json:"-"` } -func (s *ClearOrgPolicyRequest) MarshalJSON() ([]byte, error) { +func (s ClearOrgPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod ClearOrgPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation: @@ -528,9 +528,9 @@ type CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation struc NullFields []string `json:"-"` } -func (s *CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation) MarshalJSON() ([]byte, error) { +func (s CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation) MarshalJSON() ([]byte, error) { type NoMethod CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation: @@ -564,9 +564,9 @@ type CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation struct NullFields []string `json:"-"` } -func (s *CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation) MarshalJSON() ([]byte, error) { +func (s CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation) MarshalJSON() ([]byte, error) { type NoMethod CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Constraint: A `Constraint` describes a way in which a resource's @@ -621,9 +621,9 @@ type Constraint struct { NullFields []string `json:"-"` } -func (s *Constraint) MarshalJSON() ([]byte, error) { +func (s Constraint) MarshalJSON() ([]byte, error) { type NoMethod Constraint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateFolderMetadata: Metadata pertaining to the Folder creation process. @@ -646,9 +646,9 @@ type CreateFolderMetadata struct { NullFields []string `json:"-"` } -func (s *CreateFolderMetadata) MarshalJSON() ([]byte, error) { +func (s CreateFolderMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateFolderMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateProjectMetadata: A status object which is used as the `metadata` field @@ -676,9 +676,9 @@ type CreateProjectMetadata struct { NullFields []string `json:"-"` } -func (s *CreateProjectMetadata) MarshalJSON() ([]byte, error) { +func (s CreateProjectMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateProjectMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateTagBindingMetadata: Runtime operation information for creating a @@ -776,9 +776,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FolderOperation: Metadata describing a long running folder operation @@ -811,9 +811,9 @@ type FolderOperation struct { NullFields []string `json:"-"` } -func (s *FolderOperation) MarshalJSON() ([]byte, error) { +func (s FolderOperation) MarshalJSON() ([]byte, error) { type NoMethod FolderOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FolderOperationError: A classification of the Folder Operation error. @@ -853,9 +853,9 @@ type FolderOperationError struct { NullFields []string `json:"-"` } -func (s *FolderOperationError) MarshalJSON() ([]byte, error) { +func (s FolderOperationError) MarshalJSON() ([]byte, error) { type NoMethod FolderOperationError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetAncestryRequest: The request sent to the GetAncestry method. @@ -884,9 +884,9 @@ type GetAncestryResponse struct { NullFields []string `json:"-"` } -func (s *GetAncestryResponse) MarshalJSON() ([]byte, error) { +func (s GetAncestryResponse) MarshalJSON() ([]byte, error) { type NoMethod GetAncestryResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetEffectiveOrgPolicyRequest: The request sent to the GetEffectiveOrgPolicy @@ -907,9 +907,9 @@ type GetEffectiveOrgPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GetEffectiveOrgPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GetEffectiveOrgPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GetEffectiveOrgPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetIamPolicyRequest: Request message for `GetIamPolicy` method. @@ -930,9 +930,9 @@ type GetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetOrgPolicyRequest: The request sent to the GetOrgPolicy method. @@ -952,9 +952,9 @@ type GetOrgPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GetOrgPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GetOrgPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GetOrgPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. @@ -984,9 +984,9 @@ type GetPolicyOptions struct { NullFields []string `json:"-"` } -func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { +func (s GetPolicyOptions) MarshalJSON() ([]byte, error) { type NoMethod GetPolicyOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Lien: A Lien represents an encumbrance on the actions that can be performed @@ -1031,9 +1031,9 @@ type Lien struct { NullFields []string `json:"-"` } -func (s *Lien) MarshalJSON() ([]byte, error) { +func (s Lien) MarshalJSON() ([]byte, error) { type NoMethod Lien - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListAvailableOrgPolicyConstraintsRequest: The request sent to the @@ -1061,9 +1061,9 @@ type ListAvailableOrgPolicyConstraintsRequest struct { NullFields []string `json:"-"` } -func (s *ListAvailableOrgPolicyConstraintsRequest) MarshalJSON() ([]byte, error) { +func (s ListAvailableOrgPolicyConstraintsRequest) MarshalJSON() ([]byte, error) { type NoMethod ListAvailableOrgPolicyConstraintsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListAvailableOrgPolicyConstraintsResponse: The response returned from the @@ -1093,9 +1093,9 @@ type ListAvailableOrgPolicyConstraintsResponse struct { NullFields []string `json:"-"` } -func (s *ListAvailableOrgPolicyConstraintsResponse) MarshalJSON() ([]byte, error) { +func (s ListAvailableOrgPolicyConstraintsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListAvailableOrgPolicyConstraintsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListConstraint: A `Constraint` that allows or disallows a list of string @@ -1123,9 +1123,9 @@ type ListConstraint struct { NullFields []string `json:"-"` } -func (s *ListConstraint) MarshalJSON() ([]byte, error) { +func (s ListConstraint) MarshalJSON() ([]byte, error) { type NoMethod ListConstraint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLiensResponse: The response message for Liens.ListLiens. @@ -1151,9 +1151,9 @@ type ListLiensResponse struct { NullFields []string `json:"-"` } -func (s *ListLiensResponse) MarshalJSON() ([]byte, error) { +func (s ListLiensResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLiensResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOrgPoliciesRequest: The request sent to the ListOrgPolicies method. @@ -1179,9 +1179,9 @@ type ListOrgPoliciesRequest struct { NullFields []string `json:"-"` } -func (s *ListOrgPoliciesRequest) MarshalJSON() ([]byte, error) { +func (s ListOrgPoliciesRequest) MarshalJSON() ([]byte, error) { type NoMethod ListOrgPoliciesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOrgPoliciesResponse: The response returned from the `ListOrgPolicies` @@ -1209,9 +1209,9 @@ type ListOrgPoliciesResponse struct { NullFields []string `json:"-"` } -func (s *ListOrgPoliciesResponse) MarshalJSON() ([]byte, error) { +func (s ListOrgPoliciesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOrgPoliciesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListPolicy: Used in `policy_type` to specify how `list_policy` behaves at @@ -1324,9 +1324,9 @@ type ListPolicy struct { NullFields []string `json:"-"` } -func (s *ListPolicy) MarshalJSON() ([]byte, error) { +func (s ListPolicy) MarshalJSON() ([]byte, error) { type NoMethod ListPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListProjectsResponse: A page of the response received from the ListProjects @@ -1361,9 +1361,9 @@ type ListProjectsResponse struct { NullFields []string `json:"-"` } -func (s *ListProjectsResponse) MarshalJSON() ([]byte, error) { +func (s ListProjectsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListProjectsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MoveFolderMetadata: Metadata pertaining to the folder move process. @@ -1388,9 +1388,9 @@ type MoveFolderMetadata struct { NullFields []string `json:"-"` } -func (s *MoveFolderMetadata) MarshalJSON() ([]byte, error) { +func (s MoveFolderMetadata) MarshalJSON() ([]byte, error) { type NoMethod MoveFolderMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MoveProjectMetadata: A status object which is used as the `metadata` field @@ -1440,9 +1440,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OrgPolicy: Defines a Cloud Organization `Policy` which is used to specify @@ -1495,9 +1495,9 @@ type OrgPolicy struct { NullFields []string `json:"-"` } -func (s *OrgPolicy) MarshalJSON() ([]byte, error) { +func (s OrgPolicy) MarshalJSON() ([]byte, error) { type NoMethod OrgPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Organization: The root node in the resource hierarchy to which a particular @@ -1544,9 +1544,9 @@ type Organization struct { NullFields []string `json:"-"` } -func (s *Organization) MarshalJSON() ([]byte, error) { +func (s Organization) MarshalJSON() ([]byte, error) { type NoMethod Organization - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OrganizationOwner: The entity that owns an Organization. The lifetime of the @@ -1569,9 +1569,9 @@ type OrganizationOwner struct { NullFields []string `json:"-"` } -func (s *OrganizationOwner) MarshalJSON() ([]byte, error) { +func (s OrganizationOwner) MarshalJSON() ([]byte, error) { type NoMethod OrganizationOwner - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -1661,9 +1661,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Project: A Project is a high-level Google Cloud Platform entity. It is a @@ -1732,9 +1732,9 @@ type Project struct { NullFields []string `json:"-"` } -func (s *Project) MarshalJSON() ([]byte, error) { +func (s Project) MarshalJSON() ([]byte, error) { type NoMethod Project - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ProjectCreationStatus: A status object which is used as the `metadata` field @@ -1762,9 +1762,9 @@ type ProjectCreationStatus struct { NullFields []string `json:"-"` } -func (s *ProjectCreationStatus) MarshalJSON() ([]byte, error) { +func (s ProjectCreationStatus) MarshalJSON() ([]byte, error) { type NoMethod ProjectCreationStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourceId: A container to reference an id for any resource type. A @@ -1792,9 +1792,9 @@ type ResourceId struct { NullFields []string `json:"-"` } -func (s *ResourceId) MarshalJSON() ([]byte, error) { +func (s ResourceId) MarshalJSON() ([]byte, error) { type NoMethod ResourceId - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestoreDefault: Ignores policies set above this resource and restores the @@ -1843,9 +1843,9 @@ type SearchOrganizationsRequest struct { NullFields []string `json:"-"` } -func (s *SearchOrganizationsRequest) MarshalJSON() ([]byte, error) { +func (s SearchOrganizationsRequest) MarshalJSON() ([]byte, error) { type NoMethod SearchOrganizationsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SearchOrganizationsResponse: The response returned from the @@ -1876,9 +1876,9 @@ type SearchOrganizationsResponse struct { NullFields []string `json:"-"` } -func (s *SearchOrganizationsResponse) MarshalJSON() ([]byte, error) { +func (s SearchOrganizationsResponse) MarshalJSON() ([]byte, error) { type NoMethod SearchOrganizationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -1905,9 +1905,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetOrgPolicyRequest: The request sent to the SetOrgPolicyRequest method. @@ -1927,9 +1927,9 @@ type SetOrgPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetOrgPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetOrgPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetOrgPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -1961,9 +1961,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` method. @@ -1986,9 +1986,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -2013,9 +2013,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UndeleteFolderMetadata: A status object which is used as the `metadata` diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-gen.go index c23059b83dc..3b5c6ff5c1a 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-gen.go @@ -317,9 +317,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -352,9 +352,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates `members`, or principals, with a `role`. @@ -451,9 +451,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation: @@ -487,9 +487,9 @@ type CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation struc NullFields []string `json:"-"` } -func (s *CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation) MarshalJSON() ([]byte, error) { +func (s CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation) MarshalJSON() ([]byte, error) { type NoMethod CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation: @@ -523,9 +523,9 @@ type CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation struct NullFields []string `json:"-"` } -func (s *CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation) MarshalJSON() ([]byte, error) { +func (s CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation) MarshalJSON() ([]byte, error) { type NoMethod CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateFolderMetadata: Metadata pertaining to the Folder creation process. @@ -548,9 +548,9 @@ type CreateFolderMetadata struct { NullFields []string `json:"-"` } -func (s *CreateFolderMetadata) MarshalJSON() ([]byte, error) { +func (s CreateFolderMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateFolderMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateProjectMetadata: A status object which is used as the `metadata` field @@ -578,9 +578,9 @@ type CreateProjectMetadata struct { NullFields []string `json:"-"` } -func (s *CreateProjectMetadata) MarshalJSON() ([]byte, error) { +func (s CreateProjectMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateProjectMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateTagBindingMetadata: Runtime operation information for creating a @@ -668,9 +668,9 @@ type EffectiveTag struct { NullFields []string `json:"-"` } -func (s *EffectiveTag) MarshalJSON() ([]byte, error) { +func (s EffectiveTag) MarshalJSON() ([]byte, error) { type NoMethod EffectiveTag - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -725,9 +725,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Folder: A folder in an organization's resource hierarchy, used to organize @@ -786,9 +786,9 @@ type Folder struct { NullFields []string `json:"-"` } -func (s *Folder) MarshalJSON() ([]byte, error) { +func (s Folder) MarshalJSON() ([]byte, error) { type NoMethod Folder - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FolderOperation: Metadata describing a long running folder operation @@ -821,9 +821,9 @@ type FolderOperation struct { NullFields []string `json:"-"` } -func (s *FolderOperation) MarshalJSON() ([]byte, error) { +func (s FolderOperation) MarshalJSON() ([]byte, error) { type NoMethod FolderOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FolderOperationError: A classification of the Folder Operation error. @@ -863,9 +863,9 @@ type FolderOperationError struct { NullFields []string `json:"-"` } -func (s *FolderOperationError) MarshalJSON() ([]byte, error) { +func (s FolderOperationError) MarshalJSON() ([]byte, error) { type NoMethod FolderOperationError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetIamPolicyRequest: Request message for `GetIamPolicy` method. @@ -886,9 +886,9 @@ type GetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. @@ -918,9 +918,9 @@ type GetPolicyOptions struct { NullFields []string `json:"-"` } -func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { +func (s GetPolicyOptions) MarshalJSON() ([]byte, error) { type NoMethod GetPolicyOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Lien: A Lien represents an encumbrance on the actions that can be performed @@ -965,9 +965,9 @@ type Lien struct { NullFields []string `json:"-"` } -func (s *Lien) MarshalJSON() ([]byte, error) { +func (s Lien) MarshalJSON() ([]byte, error) { type NoMethod Lien - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListEffectiveTagsResponse: The response of ListEffectiveTags. @@ -999,9 +999,9 @@ type ListEffectiveTagsResponse struct { NullFields []string `json:"-"` } -func (s *ListEffectiveTagsResponse) MarshalJSON() ([]byte, error) { +func (s ListEffectiveTagsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListEffectiveTagsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListFoldersResponse: The ListFolders response message. @@ -1028,9 +1028,9 @@ type ListFoldersResponse struct { NullFields []string `json:"-"` } -func (s *ListFoldersResponse) MarshalJSON() ([]byte, error) { +func (s ListFoldersResponse) MarshalJSON() ([]byte, error) { type NoMethod ListFoldersResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLiensResponse: The response message for Liens.ListLiens. @@ -1056,9 +1056,9 @@ type ListLiensResponse struct { NullFields []string `json:"-"` } -func (s *ListLiensResponse) MarshalJSON() ([]byte, error) { +func (s ListLiensResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLiensResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListProjectsResponse: A page of the response received from the ListProjects @@ -1093,9 +1093,9 @@ type ListProjectsResponse struct { NullFields []string `json:"-"` } -func (s *ListProjectsResponse) MarshalJSON() ([]byte, error) { +func (s ListProjectsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListProjectsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListTagBindingsResponse: The ListTagBindings response. @@ -1127,9 +1127,9 @@ type ListTagBindingsResponse struct { NullFields []string `json:"-"` } -func (s *ListTagBindingsResponse) MarshalJSON() ([]byte, error) { +func (s ListTagBindingsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListTagBindingsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListTagHoldsResponse: The ListTagHolds response. @@ -1160,9 +1160,9 @@ type ListTagHoldsResponse struct { NullFields []string `json:"-"` } -func (s *ListTagHoldsResponse) MarshalJSON() ([]byte, error) { +func (s ListTagHoldsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListTagHoldsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListTagKeysResponse: The ListTagKeys response message. @@ -1189,9 +1189,9 @@ type ListTagKeysResponse struct { NullFields []string `json:"-"` } -func (s *ListTagKeysResponse) MarshalJSON() ([]byte, error) { +func (s ListTagKeysResponse) MarshalJSON() ([]byte, error) { type NoMethod ListTagKeysResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListTagValuesResponse: The ListTagValues response. @@ -1220,9 +1220,9 @@ type ListTagValuesResponse struct { NullFields []string `json:"-"` } -func (s *ListTagValuesResponse) MarshalJSON() ([]byte, error) { +func (s ListTagValuesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListTagValuesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MoveFolderMetadata: Metadata pertaining to the folder move process. @@ -1247,9 +1247,9 @@ type MoveFolderMetadata struct { NullFields []string `json:"-"` } -func (s *MoveFolderMetadata) MarshalJSON() ([]byte, error) { +func (s MoveFolderMetadata) MarshalJSON() ([]byte, error) { type NoMethod MoveFolderMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MoveFolderRequest: The MoveFolder request message. @@ -1271,9 +1271,9 @@ type MoveFolderRequest struct { NullFields []string `json:"-"` } -func (s *MoveFolderRequest) MarshalJSON() ([]byte, error) { +func (s MoveFolderRequest) MarshalJSON() ([]byte, error) { type NoMethod MoveFolderRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MoveProjectMetadata: A status object which is used as the `metadata` field @@ -1298,9 +1298,9 @@ type MoveProjectRequest struct { NullFields []string `json:"-"` } -func (s *MoveProjectRequest) MarshalJSON() ([]byte, error) { +func (s MoveProjectRequest) MarshalJSON() ([]byte, error) { type NoMethod MoveProjectRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -1345,9 +1345,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Organization: The root node in the resource hierarchy to which a particular @@ -1402,9 +1402,9 @@ type Organization struct { NullFields []string `json:"-"` } -func (s *Organization) MarshalJSON() ([]byte, error) { +func (s Organization) MarshalJSON() ([]byte, error) { type NoMethod Organization - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -1494,9 +1494,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Project: A project is a high-level Google Cloud entity. It is a container @@ -1569,9 +1569,9 @@ type Project struct { NullFields []string `json:"-"` } -func (s *Project) MarshalJSON() ([]byte, error) { +func (s Project) MarshalJSON() ([]byte, error) { type NoMethod Project - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ProjectCreationStatus: A status object which is used as the `metadata` field @@ -1599,9 +1599,9 @@ type ProjectCreationStatus struct { NullFields []string `json:"-"` } -func (s *ProjectCreationStatus) MarshalJSON() ([]byte, error) { +func (s ProjectCreationStatus) MarshalJSON() ([]byte, error) { type NoMethod ProjectCreationStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SearchFoldersResponse: The response message for searching folders. @@ -1628,9 +1628,9 @@ type SearchFoldersResponse struct { NullFields []string `json:"-"` } -func (s *SearchFoldersResponse) MarshalJSON() ([]byte, error) { +func (s SearchFoldersResponse) MarshalJSON() ([]byte, error) { type NoMethod SearchFoldersResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SearchOrganizationsResponse: The response returned from the @@ -1661,9 +1661,9 @@ type SearchOrganizationsResponse struct { NullFields []string `json:"-"` } -func (s *SearchOrganizationsResponse) MarshalJSON() ([]byte, error) { +func (s SearchOrganizationsResponse) MarshalJSON() ([]byte, error) { type NoMethod SearchOrganizationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SearchProjectsResponse: A page of the response received from the @@ -1698,9 +1698,9 @@ type SearchProjectsResponse struct { NullFields []string `json:"-"` } -func (s *SearchProjectsResponse) MarshalJSON() ([]byte, error) { +func (s SearchProjectsResponse) MarshalJSON() ([]byte, error) { type NoMethod SearchProjectsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -1727,9 +1727,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -1761,9 +1761,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TagBinding: A TagBinding represents a connection between a TagValue and a @@ -1800,9 +1800,9 @@ type TagBinding struct { NullFields []string `json:"-"` } -func (s *TagBinding) MarshalJSON() ([]byte, error) { +func (s TagBinding) MarshalJSON() ([]byte, error) { type NoMethod TagBinding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TagHold: A TagHold represents the use of a TagValue that is not captured by @@ -1845,9 +1845,9 @@ type TagHold struct { NullFields []string `json:"-"` } -func (s *TagHold) MarshalJSON() ([]byte, error) { +func (s TagHold) MarshalJSON() ([]byte, error) { type NoMethod TagHold - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TagKey: A TagKey, used to group a set of TagValues. @@ -1922,9 +1922,9 @@ type TagKey struct { NullFields []string `json:"-"` } -func (s *TagKey) MarshalJSON() ([]byte, error) { +func (s TagKey) MarshalJSON() ([]byte, error) { type NoMethod TagKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TagValue: A TagValue is a child of a particular TagKey. This is used to @@ -1973,9 +1973,9 @@ type TagValue struct { NullFields []string `json:"-"` } -func (s *TagValue) MarshalJSON() ([]byte, error) { +func (s TagValue) MarshalJSON() ([]byte, error) { type NoMethod TagValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` method. @@ -1998,9 +1998,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -2025,9 +2025,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UndeleteFolderMetadata: A status object which is used as the `metadata` diff --git a/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-api.json b/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-api.json index 4eebf7439c3..4ca5f590bbe 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-api.json @@ -15,6 +15,73 @@ "description": "Manages Apache Airflow environments on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/composer/", + "endpoints": [ + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.us-south1.rep.googleapis.com/", + "location": "us-south1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.europe-west3.rep.googleapis.com/", + "location": "europe-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.europe-west8.rep.googleapis.com/", + "location": "europe-west8" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.europe-west9.rep.googleapis.com/", + "location": "europe-west9" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.me-central2.rep.googleapis.com/", + "location": "me-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.us-east4.rep.googleapis.com/", + "location": "us-east4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.us-west1.rep.googleapis.com/", + "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.us-west3.rep.googleapis.com/", + "location": "us-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://composer.us-west4.rep.googleapis.com/", + "location": "us-west4" + } + ], "fullyEncodeReservedExpansion": true, "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", @@ -966,7 +1033,7 @@ } } }, - "revision": "20240520", + "revision": "20241007", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AirflowMetadataRetentionPolicyConfig": { @@ -1304,6 +1371,11 @@ "description": "The resource name of the environment, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\" EnvironmentId must start with a lowercase letter followed by up to 63 lowercase letters, numbers, or hyphens, and cannot end with a hyphen.", "type": "string" }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "Output only. Reserved for future use.", "readOnly": true, diff --git a/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-gen.go b/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-gen.go index 33976a0f389..2f355b53686 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-gen.go @@ -273,9 +273,9 @@ type AirflowMetadataRetentionPolicyConfig struct { NullFields []string `json:"-"` } -func (s *AirflowMetadataRetentionPolicyConfig) MarshalJSON() ([]byte, error) { +func (s AirflowMetadataRetentionPolicyConfig) MarshalJSON() ([]byte, error) { type NoMethod AirflowMetadataRetentionPolicyConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AllowedIpRange: Allowed IP range with user-provided description. @@ -303,9 +303,9 @@ type AllowedIpRange struct { NullFields []string `json:"-"` } -func (s *AllowedIpRange) MarshalJSON() ([]byte, error) { +func (s AllowedIpRange) MarshalJSON() ([]byte, error) { type NoMethod AllowedIpRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CheckUpgradeRequest: Request to check whether image upgrade will succeed. @@ -343,9 +343,9 @@ type CheckUpgradeRequest struct { NullFields []string `json:"-"` } -func (s *CheckUpgradeRequest) MarshalJSON() ([]byte, error) { +func (s CheckUpgradeRequest) MarshalJSON() ([]byte, error) { type NoMethod CheckUpgradeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CheckUpgradeResponse: Message containing information about the result of an @@ -383,9 +383,9 @@ type CheckUpgradeResponse struct { NullFields []string `json:"-"` } -func (s *CheckUpgradeResponse) MarshalJSON() ([]byte, error) { +func (s CheckUpgradeResponse) MarshalJSON() ([]byte, error) { type NoMethod CheckUpgradeResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CidrBlock: CIDR block with an optional name. @@ -407,9 +407,9 @@ type CidrBlock struct { NullFields []string `json:"-"` } -func (s *CidrBlock) MarshalJSON() ([]byte, error) { +func (s CidrBlock) MarshalJSON() ([]byte, error) { type NoMethod CidrBlock - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloudDataLineageIntegration: Configuration for Cloud Data Lineage @@ -430,9 +430,9 @@ type CloudDataLineageIntegration struct { NullFields []string `json:"-"` } -func (s *CloudDataLineageIntegration) MarshalJSON() ([]byte, error) { +func (s CloudDataLineageIntegration) MarshalJSON() ([]byte, error) { type NoMethod CloudDataLineageIntegration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ComposerWorkload: Information about a single workload. @@ -468,9 +468,9 @@ type ComposerWorkload struct { NullFields []string `json:"-"` } -func (s *ComposerWorkload) MarshalJSON() ([]byte, error) { +func (s ComposerWorkload) MarshalJSON() ([]byte, error) { type NoMethod ComposerWorkload - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ComposerWorkloadStatus: Workload status. @@ -504,9 +504,9 @@ type ComposerWorkloadStatus struct { NullFields []string `json:"-"` } -func (s *ComposerWorkloadStatus) MarshalJSON() ([]byte, error) { +func (s ComposerWorkloadStatus) MarshalJSON() ([]byte, error) { type NoMethod ComposerWorkloadStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DagProcessorResource: Configuration for resources used by Airflow DAG @@ -538,9 +538,9 @@ type DagProcessorResource struct { NullFields []string `json:"-"` } -func (s *DagProcessorResource) MarshalJSON() ([]byte, error) { +func (s DagProcessorResource) MarshalJSON() ([]byte, error) { type NoMethod DagProcessorResource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *DagProcessorResource) UnmarshalJSON(data []byte) error { @@ -584,9 +584,9 @@ type DataRetentionConfig struct { NullFields []string `json:"-"` } -func (s *DataRetentionConfig) MarshalJSON() ([]byte, error) { +func (s DataRetentionConfig) MarshalJSON() ([]byte, error) { type NoMethod DataRetentionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatabaseConfig: The configuration of Cloud SQL instance that is used by the @@ -617,9 +617,9 @@ type DatabaseConfig struct { NullFields []string `json:"-"` } -func (s *DatabaseConfig) MarshalJSON() ([]byte, error) { +func (s DatabaseConfig) MarshalJSON() ([]byte, error) { type NoMethod DatabaseConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatabaseFailoverRequest: Request to trigger database failover (only for @@ -664,9 +664,9 @@ type Date struct { NullFields []string `json:"-"` } -func (s *Date) MarshalJSON() ([]byte, error) { +func (s Date) MarshalJSON() ([]byte, error) { type NoMethod Date - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -699,9 +699,9 @@ type EncryptionConfig struct { NullFields []string `json:"-"` } -func (s *EncryptionConfig) MarshalJSON() ([]byte, error) { +func (s EncryptionConfig) MarshalJSON() ([]byte, error) { type NoMethod EncryptionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Environment: An environment for running orchestration tasks. @@ -722,6 +722,8 @@ type Environment struct { // EnvironmentId must start with a lowercase letter followed by up to 63 // lowercase letters, numbers, or hyphens, and cannot end with a hyphen. Name string `json:"name,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` // SatisfiesPzs: Output only. Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` // State: The current state of the environment. @@ -760,9 +762,9 @@ type Environment struct { NullFields []string `json:"-"` } -func (s *Environment) MarshalJSON() ([]byte, error) { +func (s Environment) MarshalJSON() ([]byte, error) { type NoMethod Environment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EnvironmentConfig: Configuration information for an environment. @@ -874,9 +876,9 @@ type EnvironmentConfig struct { NullFields []string `json:"-"` } -func (s *EnvironmentConfig) MarshalJSON() ([]byte, error) { +func (s EnvironmentConfig) MarshalJSON() ([]byte, error) { type NoMethod EnvironmentConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExecuteAirflowCommandRequest: Execute Airflow Command request. @@ -903,9 +905,9 @@ type ExecuteAirflowCommandRequest struct { NullFields []string `json:"-"` } -func (s *ExecuteAirflowCommandRequest) MarshalJSON() ([]byte, error) { +func (s ExecuteAirflowCommandRequest) MarshalJSON() ([]byte, error) { type NoMethod ExecuteAirflowCommandRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExecuteAirflowCommandResponse: Response to ExecuteAirflowCommandRequest. @@ -934,9 +936,9 @@ type ExecuteAirflowCommandResponse struct { NullFields []string `json:"-"` } -func (s *ExecuteAirflowCommandResponse) MarshalJSON() ([]byte, error) { +func (s ExecuteAirflowCommandResponse) MarshalJSON() ([]byte, error) { type NoMethod ExecuteAirflowCommandResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExitInfo: Information about how a command ended. @@ -958,9 +960,9 @@ type ExitInfo struct { NullFields []string `json:"-"` } -func (s *ExitInfo) MarshalJSON() ([]byte, error) { +func (s ExitInfo) MarshalJSON() ([]byte, error) { type NoMethod ExitInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FetchDatabasePropertiesResponse: Response for @@ -993,9 +995,9 @@ type FetchDatabasePropertiesResponse struct { NullFields []string `json:"-"` } -func (s *FetchDatabasePropertiesResponse) MarshalJSON() ([]byte, error) { +func (s FetchDatabasePropertiesResponse) MarshalJSON() ([]byte, error) { type NoMethod FetchDatabasePropertiesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IPAllocationPolicy: Configuration for controlling how IPs are allocated in @@ -1050,9 +1052,9 @@ type IPAllocationPolicy struct { NullFields []string `json:"-"` } -func (s *IPAllocationPolicy) MarshalJSON() ([]byte, error) { +func (s IPAllocationPolicy) MarshalJSON() ([]byte, error) { type NoMethod IPAllocationPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImageVersion: ImageVersion information @@ -1086,9 +1088,9 @@ type ImageVersion struct { NullFields []string `json:"-"` } -func (s *ImageVersion) MarshalJSON() ([]byte, error) { +func (s ImageVersion) MarshalJSON() ([]byte, error) { type NoMethod ImageVersion - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Line: Contains information about a single line from logs. @@ -1110,9 +1112,9 @@ type Line struct { NullFields []string `json:"-"` } -func (s *Line) MarshalJSON() ([]byte, error) { +func (s Line) MarshalJSON() ([]byte, error) { type NoMethod Line - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListEnvironmentsResponse: The environments in a project and location. @@ -1138,9 +1140,9 @@ type ListEnvironmentsResponse struct { NullFields []string `json:"-"` } -func (s *ListEnvironmentsResponse) MarshalJSON() ([]byte, error) { +func (s ListEnvironmentsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListEnvironmentsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListImageVersionsResponse: The ImageVersions in a project and location. @@ -1165,9 +1167,9 @@ type ListImageVersionsResponse struct { NullFields []string `json:"-"` } -func (s *ListImageVersionsResponse) MarshalJSON() ([]byte, error) { +func (s ListImageVersionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListImageVersionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -1193,9 +1195,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListUserWorkloadsConfigMapsResponse: The user workloads ConfigMaps for a @@ -1222,9 +1224,9 @@ type ListUserWorkloadsConfigMapsResponse struct { NullFields []string `json:"-"` } -func (s *ListUserWorkloadsConfigMapsResponse) MarshalJSON() ([]byte, error) { +func (s ListUserWorkloadsConfigMapsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListUserWorkloadsConfigMapsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListUserWorkloadsSecretsResponse: The user workloads Secrets for a given @@ -1251,9 +1253,9 @@ type ListUserWorkloadsSecretsResponse struct { NullFields []string `json:"-"` } -func (s *ListUserWorkloadsSecretsResponse) MarshalJSON() ([]byte, error) { +func (s ListUserWorkloadsSecretsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListUserWorkloadsSecretsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListWorkloadsResponse: Response to ListWorkloadsRequest. @@ -1278,9 +1280,9 @@ type ListWorkloadsResponse struct { NullFields []string `json:"-"` } -func (s *ListWorkloadsResponse) MarshalJSON() ([]byte, error) { +func (s ListWorkloadsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListWorkloadsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LoadSnapshotRequest: Request to load a snapshot into a Cloud Composer @@ -1314,9 +1316,9 @@ type LoadSnapshotRequest struct { NullFields []string `json:"-"` } -func (s *LoadSnapshotRequest) MarshalJSON() ([]byte, error) { +func (s LoadSnapshotRequest) MarshalJSON() ([]byte, error) { type NoMethod LoadSnapshotRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LoadSnapshotResponse: Response to LoadSnapshotRequest. @@ -1354,9 +1356,9 @@ type MaintenanceWindow struct { NullFields []string `json:"-"` } -func (s *MaintenanceWindow) MarshalJSON() ([]byte, error) { +func (s MaintenanceWindow) MarshalJSON() ([]byte, error) { type NoMethod MaintenanceWindow - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MasterAuthorizedNetworksConfig: Configuration options for the master @@ -1383,9 +1385,9 @@ type MasterAuthorizedNetworksConfig struct { NullFields []string `json:"-"` } -func (s *MasterAuthorizedNetworksConfig) MarshalJSON() ([]byte, error) { +func (s MasterAuthorizedNetworksConfig) MarshalJSON() ([]byte, error) { type NoMethod MasterAuthorizedNetworksConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkingConfig: Configuration options for networking connections in the @@ -1417,9 +1419,9 @@ type NetworkingConfig struct { NullFields []string `json:"-"` } -func (s *NetworkingConfig) MarshalJSON() ([]byte, error) { +func (s NetworkingConfig) MarshalJSON() ([]byte, error) { type NoMethod NetworkingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeConfig: The configuration information for the Kubernetes Engine nodes @@ -1535,9 +1537,9 @@ type NodeConfig struct { NullFields []string `json:"-"` } -func (s *NodeConfig) MarshalJSON() ([]byte, error) { +func (s NodeConfig) MarshalJSON() ([]byte, error) { type NoMethod NodeConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -1582,9 +1584,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadata: Metadata describing an operation. @@ -1635,9 +1637,9 @@ type OperationMetadata struct { NullFields []string `json:"-"` } -func (s *OperationMetadata) MarshalJSON() ([]byte, error) { +func (s OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PollAirflowCommandRequest: Poll Airflow Command request. @@ -1663,9 +1665,9 @@ type PollAirflowCommandRequest struct { NullFields []string `json:"-"` } -func (s *PollAirflowCommandRequest) MarshalJSON() ([]byte, error) { +func (s PollAirflowCommandRequest) MarshalJSON() ([]byte, error) { type NoMethod PollAirflowCommandRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PollAirflowCommandResponse: Response to PollAirflowCommandRequest. @@ -1694,9 +1696,9 @@ type PollAirflowCommandResponse struct { NullFields []string `json:"-"` } -func (s *PollAirflowCommandResponse) MarshalJSON() ([]byte, error) { +func (s PollAirflowCommandResponse) MarshalJSON() ([]byte, error) { type NoMethod PollAirflowCommandResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PrivateClusterConfig: Configuration options for the private GKE cluster in a @@ -1728,9 +1730,9 @@ type PrivateClusterConfig struct { NullFields []string `json:"-"` } -func (s *PrivateClusterConfig) MarshalJSON() ([]byte, error) { +func (s PrivateClusterConfig) MarshalJSON() ([]byte, error) { type NoMethod PrivateClusterConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PrivateEnvironmentConfig: The configuration information for configuring a @@ -1806,9 +1808,9 @@ type PrivateEnvironmentConfig struct { NullFields []string `json:"-"` } -func (s *PrivateEnvironmentConfig) MarshalJSON() ([]byte, error) { +func (s PrivateEnvironmentConfig) MarshalJSON() ([]byte, error) { type NoMethod PrivateEnvironmentConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RecoveryConfig: The Recovery settings of an environment. @@ -1829,9 +1831,9 @@ type RecoveryConfig struct { NullFields []string `json:"-"` } -func (s *RecoveryConfig) MarshalJSON() ([]byte, error) { +func (s RecoveryConfig) MarshalJSON() ([]byte, error) { type NoMethod RecoveryConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SaveSnapshotRequest: Request to create a snapshot of a Cloud Composer @@ -1853,9 +1855,9 @@ type SaveSnapshotRequest struct { NullFields []string `json:"-"` } -func (s *SaveSnapshotRequest) MarshalJSON() ([]byte, error) { +func (s SaveSnapshotRequest) MarshalJSON() ([]byte, error) { type NoMethod SaveSnapshotRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SaveSnapshotResponse: Response to SaveSnapshotRequest. @@ -1877,9 +1879,9 @@ type SaveSnapshotResponse struct { NullFields []string `json:"-"` } -func (s *SaveSnapshotResponse) MarshalJSON() ([]byte, error) { +func (s SaveSnapshotResponse) MarshalJSON() ([]byte, error) { type NoMethod SaveSnapshotResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ScheduledSnapshotsConfig: The configuration for scheduled snapshot creation @@ -1910,9 +1912,9 @@ type ScheduledSnapshotsConfig struct { NullFields []string `json:"-"` } -func (s *ScheduledSnapshotsConfig) MarshalJSON() ([]byte, error) { +func (s ScheduledSnapshotsConfig) MarshalJSON() ([]byte, error) { type NoMethod ScheduledSnapshotsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SchedulerResource: Configuration for resources used by Airflow schedulers. @@ -1940,9 +1942,9 @@ type SchedulerResource struct { NullFields []string `json:"-"` } -func (s *SchedulerResource) MarshalJSON() ([]byte, error) { +func (s SchedulerResource) MarshalJSON() ([]byte, error) { type NoMethod SchedulerResource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *SchedulerResource) UnmarshalJSON(data []byte) error { @@ -2051,9 +2053,9 @@ type SoftwareConfig struct { NullFields []string `json:"-"` } -func (s *SoftwareConfig) MarshalJSON() ([]byte, error) { +func (s SoftwareConfig) MarshalJSON() ([]byte, error) { type NoMethod SoftwareConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -2085,9 +2087,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StopAirflowCommandRequest: Stop Airflow Command request. @@ -2114,9 +2116,9 @@ type StopAirflowCommandRequest struct { NullFields []string `json:"-"` } -func (s *StopAirflowCommandRequest) MarshalJSON() ([]byte, error) { +func (s StopAirflowCommandRequest) MarshalJSON() ([]byte, error) { type NoMethod StopAirflowCommandRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StopAirflowCommandResponse: Response to StopAirflowCommandRequest. @@ -2141,9 +2143,9 @@ type StopAirflowCommandResponse struct { NullFields []string `json:"-"` } -func (s *StopAirflowCommandResponse) MarshalJSON() ([]byte, error) { +func (s StopAirflowCommandResponse) MarshalJSON() ([]byte, error) { type NoMethod StopAirflowCommandResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StorageConfig: The configuration for data storage in the environment. @@ -2164,9 +2166,9 @@ type StorageConfig struct { NullFields []string `json:"-"` } -func (s *StorageConfig) MarshalJSON() ([]byte, error) { +func (s StorageConfig) MarshalJSON() ([]byte, error) { type NoMethod StorageConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TaskLogsRetentionConfig: The configuration setting for Task Logs. @@ -2193,9 +2195,9 @@ type TaskLogsRetentionConfig struct { NullFields []string `json:"-"` } -func (s *TaskLogsRetentionConfig) MarshalJSON() ([]byte, error) { +func (s TaskLogsRetentionConfig) MarshalJSON() ([]byte, error) { type NoMethod TaskLogsRetentionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TriggererResource: Configuration for resources used by Airflow triggerers. @@ -2220,9 +2222,9 @@ type TriggererResource struct { NullFields []string `json:"-"` } -func (s *TriggererResource) MarshalJSON() ([]byte, error) { +func (s TriggererResource) MarshalJSON() ([]byte, error) { type NoMethod TriggererResource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *TriggererResource) UnmarshalJSON(data []byte) error { @@ -2268,9 +2270,9 @@ type UserWorkloadsConfigMap struct { NullFields []string `json:"-"` } -func (s *UserWorkloadsConfigMap) MarshalJSON() ([]byte, error) { +func (s UserWorkloadsConfigMap) MarshalJSON() ([]byte, error) { type NoMethod UserWorkloadsConfigMap - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UserWorkloadsSecret: User workloads Secret used by Airflow tasks that run @@ -2301,9 +2303,9 @@ type UserWorkloadsSecret struct { NullFields []string `json:"-"` } -func (s *UserWorkloadsSecret) MarshalJSON() ([]byte, error) { +func (s UserWorkloadsSecret) MarshalJSON() ([]byte, error) { type NoMethod UserWorkloadsSecret - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WebServerConfig: The configuration settings for the Airflow web server App @@ -2329,9 +2331,9 @@ type WebServerConfig struct { NullFields []string `json:"-"` } -func (s *WebServerConfig) MarshalJSON() ([]byte, error) { +func (s WebServerConfig) MarshalJSON() ([]byte, error) { type NoMethod WebServerConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WebServerNetworkAccessControl: Network-level access control policy for the @@ -2352,9 +2354,9 @@ type WebServerNetworkAccessControl struct { NullFields []string `json:"-"` } -func (s *WebServerNetworkAccessControl) MarshalJSON() ([]byte, error) { +func (s WebServerNetworkAccessControl) MarshalJSON() ([]byte, error) { type NoMethod WebServerNetworkAccessControl - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WebServerResource: Configuration for resources used by Airflow web server. @@ -2378,9 +2380,9 @@ type WebServerResource struct { NullFields []string `json:"-"` } -func (s *WebServerResource) MarshalJSON() ([]byte, error) { +func (s WebServerResource) MarshalJSON() ([]byte, error) { type NoMethod WebServerResource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *WebServerResource) UnmarshalJSON(data []byte) error { @@ -2428,9 +2430,9 @@ type WorkerResource struct { NullFields []string `json:"-"` } -func (s *WorkerResource) MarshalJSON() ([]byte, error) { +func (s WorkerResource) MarshalJSON() ([]byte, error) { type NoMethod WorkerResource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *WorkerResource) UnmarshalJSON(data []byte) error { @@ -2480,9 +2482,9 @@ type WorkloadsConfig struct { NullFields []string `json:"-"` } -func (s *WorkloadsConfig) MarshalJSON() ([]byte, error) { +func (s WorkloadsConfig) MarshalJSON() ([]byte, error) { type NoMethod WorkloadsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsLocationsEnvironmentsCheckUpgradeCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-api.json b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-api.json index 61ccfc4f168..7ce5ff86ac8 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -1915,7 +1915,7 @@ ] }, "listUsable": { - "description": "Retrieves an aggregated list of all usable backend services in the specified project.", + "description": "Retrieves a list of all usable backend services in the specified project.", "flatPath": "projects/{project}/global/backendServices/listUsable", "httpMethod": "GET", "id": "compute.backendServices.listUsable", @@ -5736,7 +5736,7 @@ ], "parameters": { "operation": { - "description": "Name of the Operations resource to delete.", + "description": "Name of the Operations resource to delete, or its unique numeric identifier.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -5767,7 +5767,7 @@ ], "parameters": { "operation": { - "description": "Name of the Operations resource to return.", + "description": "Name of the Operations resource to return, or its unique numeric identifier.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -5857,7 +5857,7 @@ ], "parameters": { "operation": { - "description": "Name of the Operations resource to return.", + "description": "Name of the Operations resource to return, or its unique numeric identifier.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -5895,7 +5895,7 @@ ], "parameters": { "operation": { - "description": "Name of the Operations resource to delete.", + "description": "Name of the Operations resource to delete, or its unique numeric identifier.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -5923,7 +5923,7 @@ ], "parameters": { "operation": { - "description": "Name of the Operations resource to return.", + "description": "Name of the Operations resource to return, or its unique numeric identifier.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -20280,7 +20280,7 @@ ] }, "listUsable": { - "description": "Retrieves an aggregated list of all usable backend services in the specified project in the given region.", + "description": "Retrieves a list of all usable backend services in the specified project in the given region.", "flatPath": "projects/{project}/regions/{region}/backendServices/listUsable", "httpMethod": "GET", "id": "compute.regionBackendServices.listUsable", @@ -25614,7 +25614,7 @@ ], "parameters": { "operation": { - "description": "Name of the Operations resource to delete.", + "description": "Name of the Operations resource to delete, or its unique numeric identifier.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -25653,7 +25653,7 @@ ], "parameters": { "operation": { - "description": "Name of the Operations resource to return.", + "description": "Name of the Operations resource to return, or its unique numeric identifier.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -25759,7 +25759,7 @@ ], "parameters": { "operation": { - "description": "Name of the Operations resource to return.", + "description": "Name of the Operations resource to return, or its unique numeric identifier.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -27980,7 +27980,7 @@ "regions": { "methods": { "get": { - "description": "Returns the specified Region resource. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", + "description": "Returns the specified Region resource. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request. This method fails if the quota information is unavailable for the region and if the organization policy constraint compute.requireBasicQuotaInResponse is enforced. This constraint, when enforced, disables the fail-open behaviour when quota information (the `items.quotas` field) is unavailable for the region. It is recommended to use the default setting for the constraint unless your application requires the fail-closed behaviour for this method.", "flatPath": "projects/{project}/regions/{region}", "httpMethod": "GET", "id": "compute.regions.get", @@ -32401,7 +32401,7 @@ ] }, "update": { - "description": "Updates the specified storagePool with the data included in the request. The update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: size_tb and provisioned_iops.", + "description": "Updates the specified storagePool with the data included in the request. The update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: pool_provisioned_capacity_gb, pool_provisioned_iops and pool_provisioned_throughput.", "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", "httpMethod": "PATCH", "id": "compute.storagePools.update", @@ -37148,7 +37148,7 @@ ], "parameters": { "operation": { - "description": "Name of the Operations resource to delete.", + "description": "Name of the Operations resource to delete, or its unique numeric identifier.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -37187,7 +37187,7 @@ ], "parameters": { "operation": { - "description": "Name of the Operations resource to return.", + "description": "Name of the Operations resource to return, or its unique numeric identifier.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -37293,7 +37293,7 @@ ], "parameters": { "operation": { - "description": "Name of the Operations resource to return.", + "description": "Name of the Operations resource to return, or its unique numeric identifier.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -37421,7 +37421,7 @@ } } }, - "revision": "20240604", + "revision": "20241001", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -38675,6 +38675,10 @@ "format": "int32", "type": "integer" }, + "turboMode": { + "description": "Turbo frequency mode to use for the instance. Supported modes include: * ALL_CORE_MAX Using empty string or not setting this field will use the platform-specific default turbo mode.", + "type": "string" + }, "visibleCoreCount": { "description": "The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width.", "format": "int32", @@ -38719,11 +38723,15 @@ "vmFamily": { "description": "The VM family that all instances scheduled against this reservation must belong to.", "enum": [ + "VM_FAMILY_CLOUD_TPU_DEVICE_CT3", "VM_FAMILY_CLOUD_TPU_LITE_DEVICE_CT5L", "VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT5LP", + "VM_FAMILY_CLOUD_TPU_POD_SLICE_CT3P", "VM_FAMILY_CLOUD_TPU_POD_SLICE_CT4P" ], "enumDescriptions": [ + "", + "", "", "", "" @@ -39142,13 +39150,6 @@ }, "type": "array" }, - "exemptedMembers": { - "description": "This is deprecated and has no effect. Do not use.", - "items": { - "type": "string" - }, - "type": "array" - }, "service": { "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" @@ -39167,10 +39168,6 @@ }, "type": "array" }, - "ignoreChildExemptions": { - "description": "This is deprecated and has no effect. Do not use.", - "type": "boolean" - }, "logType": { "description": "The log type that this config enables.", "enum": [ @@ -40122,6 +40119,13 @@ "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" + }, + "usedBy": { + "description": "[Output Only] List of resources referencing that backend bucket.", + "items": { + "$ref": "BackendBucketUsedBy" + }, + "type": "array" } }, "type": "object" @@ -40408,6 +40412,16 @@ }, "type": "object" }, + "BackendBucketUsedBy": { + "id": "BackendBucketUsedBy", + "properties": { + "reference": { + "description": "[Output Only] Server-defined URL for UrlMaps referencing that BackendBucket.", + "type": "string" + } + }, + "type": "object" + }, "BackendService": { "description": "Represents a Backend Service resource. A backend service defines how Google Cloud load balancers distribute traffic. The backend service configuration contains a set of values, such as the protocol used to connect to backends, various distribution and session settings, health checks, and timeouts. These settings provide fine-grained control over how your load balancer behaves. Most of the settings have default values that allow for easy configuration if you need to get started quickly. Backend services in Google Compute Engine can be either regionally or globally scoped. * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/regionBackendServices) For more information, see Backend Services.", "id": "BackendService", @@ -40509,6 +40523,22 @@ "format": "uint64", "type": "string" }, + "ipAddressSelectionPolicy": { + "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced global external Application Load Balancer (load balancing scheme EXTERNAL_MANAGED), - Regional external Application Load Balancer, - Internal proxy Network Load Balancer (load balancing scheme INTERNAL_MANAGED), - Regional internal Application Load Balancer (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", + "enum": [ + "IPV4_ONLY", + "IPV6_ONLY", + "IP_ADDRESS_SELECTION_POLICY_UNSPECIFIED", + "PREFER_IPV6" + ], + "enumDescriptions": [ + "Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting.", + "Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends.", + "Unspecified IP address selection policy.", + "Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address)." + ], + "type": "string" + }, "kind": { "default": "compute#backendService", "description": "[Output Only] Type of resource. Always compute#backendService for backend services.", @@ -40542,7 +40572,7 @@ "type": "array" }, "localityLbPolicy": { - "description": "The load balancing algorithm used within the scope of the locality. The possible values are: - ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. - LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. - RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. - RANDOM: The load balancer selects a random healthy host. - ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. - MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED, INTERNAL_MANAGED, or EXTERNAL_MANAGED. If sessionAffinity is not NONE, and this field is not set to MAGLEV or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "The load balancing algorithm used within the scope of the locality. The possible values are: - ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. - LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. - RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. - RANDOM: The load balancer selects a random healthy host. - ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. - MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED, INTERNAL_MANAGED, or EXTERNAL_MANAGED. If sessionAffinity is not configured—that is, if session affinity remains at the default value of NONE—then the default value for localityLbPolicy is ROUND_ROBIN. If session affinity is set to a value other than NONE, then the default value for localityLbPolicy is MAGLEV. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "enum": [ "INVALID_LB_POLICY", "LEAST_REQUEST", @@ -40687,6 +40717,7 @@ "type": "integer" }, "usedBy": { + "description": "[Output Only] List of resources referencing given backend service.", "items": { "$ref": "BackendServiceUsedBy" }, @@ -41489,6 +41520,7 @@ "id": "BackendServiceUsedBy", "properties": { "reference": { + "description": "[Output Only] Server-defined URL for resources referencing given BackendService like UrlMaps, TargetTcpProxies, TargetSslProxies and ForwardingRule.", "type": "string" } }, @@ -41923,7 +41955,7 @@ }, "locationPolicy": { "$ref": "LocationPolicy", - "description": "Policy for chosing target zone. For more information, see Create VMs in bulk ." + "description": "Policy for choosing target zone. For more information, see Create VMs in bulk." }, "minCount": { "description": "The minimum number of instances to create. If no min_count is specified then count is used as the default value. If min_count instances cannot be created, then no instances will be created and instances already created will be deleted.", @@ -42123,6 +42155,10 @@ "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, + "customEndTimestamp": { + "description": "[Input Only] Optional, specifies the CUD end time requested by the customer in RFC3339 text format. Needed when the customer wants CUD's end date is later than the start date + term duration.", + "type": "string" + }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" @@ -42189,6 +42225,10 @@ }, "type": "array" }, + "resourceStatus": { + "$ref": "CommitmentResourceStatus", + "description": "[Output Only] Status information for Commitment resource." + }, "resources": { "description": "A list of commitment amounts for particular resources. Note that VCPU and MEMORY resource commitments must occur together.", "items": { @@ -42242,6 +42282,7 @@ "COMPUTE_OPTIMIZED_C3D", "COMPUTE_OPTIMIZED_H3", "GENERAL_PURPOSE", + "GENERAL_PURPOSE_C4", "GENERAL_PURPOSE_E2", "GENERAL_PURPOSE_N2", "GENERAL_PURPOSE_N2D", @@ -42272,6 +42313,7 @@ "", "", "", + "", "" ], "type": "string" @@ -42590,6 +42632,17 @@ }, "type": "object" }, + "CommitmentResourceStatus": { + "description": "[Output Only] Contains output only fields.", + "id": "CommitmentResourceStatus", + "properties": { + "customTermEligibilityEndTimestamp": { + "description": "[Output Only] Indicates the end time of customer's eligibility to send custom term requests in RFC3339 text format. Term extension requests that (not the end time in the request) after this time will be rejected.", + "type": "string" + } + }, + "type": "object" + }, "CommitmentsScopedList": { "id": "CommitmentsScopedList", "properties": { @@ -42813,12 +42866,14 @@ "enum": [ "CONFIDENTIAL_INSTANCE_TYPE_UNSPECIFIED", "SEV", - "SEV_SNP" + "SEV_SNP", + "TDX" ], "enumDescriptions": [ "No type specified. Do not use this value.", "AMD Secure Encrypted Virtualization.", - "AMD Secure Encrypted Virtualization - Secure Nested Paging." + "AMD Secure Encrypted Virtualization - Secure Nested Paging.", + "Intel Trust Domain eXtension." ], "type": "string" }, @@ -42935,6 +42990,47 @@ }, "type": "object" }, + "CustomErrorResponsePolicy": { + "description": "Specifies the custom error response policy that must be applied when the backend service or backend bucket responds with an error.", + "id": "CustomErrorResponsePolicy", + "properties": { + "errorResponseRules": { + "description": "Specifies rules for returning error responses. In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect.", + "items": { + "$ref": "CustomErrorResponsePolicyCustomErrorResponseRule" + }, + "type": "array" + }, + "errorService": { + "description": "The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: - https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket - compute/v1/projects/project/global/backendBuckets/myBackendBucket - global/backendBuckets/myBackendBucket If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). errorService is not supported for internal or regional HTTP/HTTPS load balancers.", + "type": "string" + } + }, + "type": "object" + }, + "CustomErrorResponsePolicyCustomErrorResponseRule": { + "description": "Specifies the mapping between the response code that will be returned along with the custom error content and the response code returned by the backend service.", + "id": "CustomErrorResponsePolicyCustomErrorResponseRule", + "properties": { + "matchResponseCodes": { + "description": "Valid values include: - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy.", + "items": { + "type": "string" + }, + "type": "array" + }, + "overrideResponseCode": { + "description": "The HTTP status code returned with the response containing the custom error content. If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client.", + "format": "int32", + "type": "integer" + }, + "path": { + "description": "The full path to a file within backendBucket . For example: /errors/defaultError.html path must start with a leading slash. path cannot have trailing slashes. If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. The value must be from 1 to 1024 characters", + "type": "string" + } + }, + "type": "object" + }, "CustomerEncryptionKey": { "id": "CustomerEncryptionKey", "properties": { @@ -45620,7 +45716,7 @@ "id": "FirewallPolicyRule", "properties": { "action": { - "description": "The Action to perform when the client connection triggers the rule. Valid actions are \"allow\", \"deny\" and \"goto_next\".", + "description": "The Action to perform when the client connection triggers the rule. Valid actions for firewall rules are: \"allow\", \"deny\", \"apply_security_profile_group\" and \"goto_next\". Valid actions for packet mirroring rules are: \"mirror\", \"do_not_mirror\" and \"goto_next\".", "type": "string" }, "description": { @@ -45649,7 +45745,7 @@ }, "kind": { "default": "compute#firewallPolicyRule", - "description": "[Output only] Type of the resource. Always compute#firewallPolicyRule for firewall policy rules", + "description": "[Output only] Type of the resource. Returns compute#firewallPolicyRule for firewall rules and compute#packetMirroringRule for packet mirroring rules.", "type": "string" }, "match": { @@ -45657,7 +45753,7 @@ "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced." }, "priority": { - "description": "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", + "description": "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority.", "format": "int32", "type": "integer" }, @@ -45671,7 +45767,7 @@ "type": "integer" }, "securityProfileGroup": { - "description": "A fully-qualified URL of a SecurityProfile resource instance. Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.", + "description": "A fully-qualified URL of a SecurityProfile resource instance. Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group Must be specified if action is one of 'apply_security_profile_group' or 'mirror'. Cannot be specified for other actions.", "type": "string" }, "targetResources": { @@ -46783,6 +46879,7 @@ "SEV_LIVE_MIGRATABLE", "SEV_LIVE_MIGRATABLE_V2", "SEV_SNP_CAPABLE", + "TDX_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS" @@ -46799,6 +46896,7 @@ "", "", "", + "", "" ], "type": "string" @@ -47026,6 +47124,13 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "sourceRegions": { + "description": "The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. ", + "items": { + "type": "string" + }, + "type": "array" + }, "sslHealthCheck": { "$ref": "SSLHealthCheck" }, @@ -47808,6 +47913,21 @@ "description": "For target pool based Network Load Balancing, it indicates the forwarding rule's IP address assigned to this instance. For other types of load balancing, the field indicates VM internal ip.", "type": "string" }, + "ipv6Address": { + "type": "string" + }, + "ipv6HealthState": { + "description": "Health state of the IPv6 address of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "port": { "description": "The named port of the instance group, not necessarily the port that is health-checked.", "format": "int32", @@ -48419,6 +48539,10 @@ "description": "The HttpRouteRule setting specifies how to match an HTTP request and the corresponding routing action that load balancing proxies perform.", "id": "HttpRouteRule", "properties": { + "customErrorResponsePolicy": { + "$ref": "CustomErrorResponsePolicy", + "description": "customErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. If a policy for an error code is not configured for the RouteRule, a policy for the error code configured in pathMatcher.defaultCustomErrorResponsePolicy is applied. If one is not specified in pathMatcher.defaultCustomErrorResponsePolicy, the policy configured in UrlMap.defaultCustomErrorResponsePolicy takes effect. For example, consider a UrlMap with the following configuration: - UrlMap.defaultCustomErrorResponsePolicy are configured with policies for 5xx and 4xx errors - A RouteRule for /coming_soon/ is configured for the error code 404. If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in RouteRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. When used in conjunction with routeRules.routeAction.retryPolicy, retries take precedence. Only once all retries are exhausted, the customErrorResponsePolicy is applied. While attempting a retry, if load balancer is successful in reaching the service, the customErrorResponsePolicy is ignored and the response from the service is returned to the client. customErrorResponsePolicy is supported only for global external Application Load Balancers." + }, "description": { "description": "The short description conveying the intent of this routeRule. The description can have a maximum length of 1024 characters.", "type": "string" @@ -49994,8 +50118,7 @@ "type": "array" }, "baseInstanceName": { - "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", - "pattern": "[a-z][-a-z0-9]{0,57}", + "description": "The base instance name is a prefix that you want to attach to the names of all VMs in a MIG. The maximum character length is 58 and the name must comply with RFC1035 format. When a VM is created in the group, the MIG appends a hyphen and a random four-character string to the base instance name. If you want the MIG to assign sequential numbers instead of a random string, then end the base instance name with a hyphen followed by one or more hash symbols. The hash symbols indicate the number of digits. For example, a base instance name of \"vm-###\" results in \"vm-001\" as a VM name. @pattern [a-z](([-a-z0-9]{0,57})|([-a-z0-9]{0,51}-#{1,10}(\\\\[[0-9]{1,10}\\\\])?))", "type": "string" }, "creationTimestamp": { @@ -50605,7 +50728,7 @@ "description": "Requested run duration for instances that will be created by this request. At the end of the run duration instance will be deleted." }, "resizeBy": { - "description": "The number of instances to be created by this resize request. The group's target size will be increased by this number.", + "description": "The number of instances to be created by this resize request. The group's target size will be increased by this number. This field cannot be used together with 'instances'.", "format": "int32", "type": "integer" }, @@ -52388,7 +52511,7 @@ "compute.instanceTemplates.insert" ] }, - "description": "The machine type to use for instances that are created from these properties. This field only accept machine types name. e.g. n2-standard-4 and does not accept machine type full or partial url. e.g. projects/my-l7ilb-project/zones/us-central1-a/machineTypes/n2-standard-4 will throw INTERNAL_ERROR.", + "description": "The machine type to use for instances that are created from these properties. This field only accepts a machine type name, for example `n2-standard-4`. If you use the machine type full or partial URL, for example `projects/my-l7ilb-project/zones/us-central1-a/machineTypes/n2-standard-4`, the request will result in an `INTERNAL_ERROR`.", "type": "string" }, "metadata": { @@ -53135,6 +53258,11 @@ "description": "[Output Only] The name of the firewall policy.", "type": "string" }, + "priority": { + "description": "[Output only] Priority of firewall policy association. Not applicable for type=HIERARCHY.", + "format": "int32", + "type": "integer" + }, "rules": { "description": "The rules that apply to the network.", "items": { @@ -53152,9 +53280,13 @@ "HIERARCHY", "NETWORK", "NETWORK_REGIONAL", + "SYSTEM_GLOBAL", + "SYSTEM_REGIONAL", "UNSPECIFIED" ], "enumDescriptions": [ + "", + "", "", "", "", @@ -54021,7 +54153,7 @@ "type": "boolean" }, "availableFeatures": { - "description": "[Output only] List of features available for this Interconnect connection, which can take one of the following values: - MACSEC If present then the Interconnect connection is provisioned on MACsec capable hardware ports. If not present then the Interconnect connection is provisioned on non-MACsec capable ports and MACsec isn't supported and enabling MACsec fails.", + "description": "[Output only] List of features available for this Interconnect connection, which can take one of the following values: - IF_MACSEC If present then the Interconnect connection is provisioned on MACsec capable hardware ports. If not present then the Interconnect connection is provisioned on non-MACsec capable ports and MACsec isn't supported and enabling MACsec fails.", "items": { "enum": [ "IF_MACSEC" @@ -54174,7 +54306,7 @@ "type": "string" }, "requestedFeatures": { - "description": "Optional. List of features requested for this Interconnect connection, which can take one of the following values: - MACSEC If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if available. This parameter can be provided only with Interconnect INSERT. It isn't valid for Interconnect PATCH.", + "description": "Optional. List of features requested for this Interconnect connection, which can take one of the following values: - IF_MACSEC If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if available. This parameter can be provided only with Interconnect INSERT. It isn't valid for Interconnect PATCH.", "items": { "enum": [ "IF_MACSEC" @@ -56989,6 +57121,20 @@ }, "type": "array" }, + "architecture": { + "description": "[Output Only] The architecture of the machine type.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -58939,6 +59085,11 @@ "description": "Metadata defined as annotations on the network endpoint.", "type": "object" }, + "clientDestinationPort": { + "description": "Represents the port number to which PSC consumer sends packets. Only valid for network endpoint groups created with GCE_VM_IP_PORTMAP endpoint type.", + "format": "int32", + "type": "integer" + }, "fqdn": { "description": "Optional fully qualified domain name of network endpoint. This can only be specified when NetworkEndpointGroup.network_endpoint_type is NON_GCP_FQDN_PORT.", "type": "string" @@ -59018,6 +59169,7 @@ "enum": [ "GCE_VM_IP", "GCE_VM_IP_PORT", + "GCE_VM_IP_PORTMAP", "INTERNET_FQDN_PORT", "INTERNET_IP_PORT", "NON_GCP_PRIVATE_IP_PORT", @@ -59027,6 +59179,7 @@ "enumDescriptions": [ "The network endpoint is represented by an IP address.", "The network endpoint is represented by IP address and port pair.", + "The network endpoint is represented by an IP, Port and Client Destination Port.", "The network endpoint is represented by fully qualified domain name and port.", "The network endpoint is represented by an internet IP address and port.", "The network endpoint is represented by an IP address and port. The endpoint belongs to a VM or pod running in a customer's on-premises.", @@ -59437,6 +59590,11 @@ "description": "[Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB.", "type": "string" }, + "producerPort": { + "description": "The psc producer port is used to connect PSC NEG with specific port on the PSC Producer side; should only be used for the PRIVATE_SERVICE_CONNECT NEG type", + "format": "int32", + "type": "integer" + }, "pscConnectionId": { "description": "[Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer.", "format": "uint64", @@ -59904,11 +60062,13 @@ "description": "The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations.", "enum": [ "IPV4_IPV6", - "IPV4_ONLY" + "IPV4_ONLY", + "IPV6_ONLY" ], "enumDescriptions": [ "The network interface can have both IPv4 and IPv6 addresses.", - "The network interface will be assigned IPv4 address." + "The network interface will only be assigned IPv4 addresses.", + "The network interface will only be assigned IPv6 addresses." ], "type": "string" }, @@ -60238,6 +60398,11 @@ "description": "[Output Only] The name of the firewall policy.", "type": "string" }, + "priority": { + "description": "[Output only] Priority of firewall policy association. Not applicable for type=HIERARCHY.", + "format": "int32", + "type": "integer" + }, "rules": { "description": "The rules that apply to the network.", "items": { @@ -60254,9 +60419,11 @@ "enum": [ "HIERARCHY", "NETWORK", + "SYSTEM", "UNSPECIFIED" ], "enumDescriptions": [ + "", "", "", "" @@ -62637,7 +62804,7 @@ "type": "string" }, "targetLink": { - "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from.", + "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the disk that the snapshot was created from.", "type": "string" }, "user": { @@ -63987,6 +64154,10 @@ "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service is used.", "id": "PathMatcher", "properties": { + "defaultCustomErrorResponsePolicy": { + "$ref": "CustomErrorResponsePolicy", + "description": "defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. This policy takes effect at the PathMatcher level and applies only when no policy has been defined for the error code at lower levels like RouteRule and PathRule within this PathMatcher. If an error code does not have a policy defined in defaultCustomErrorResponsePolicy, then a policy defined for the error code in UrlMap.defaultCustomErrorResponsePolicy takes effect. For example, consider a UrlMap with the following configuration: - UrlMap.defaultCustomErrorResponsePolicy is configured with policies for 5xx and 4xx errors - A RouteRule for /coming_soon/ is configured for the error code 404. If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in RouteRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. When used in conjunction with pathMatcher.defaultRouteAction.retryPolicy, retries take precedence. Only once all retries are exhausted, the defaultCustomErrorResponsePolicy is applied. While attempting a retry, if load balancer is successful in reaching the service, the defaultCustomErrorResponsePolicy is ignored and the response from the service is returned to the client. defaultCustomErrorResponsePolicy is supported only for global external Application Load Balancers." + }, "defaultRouteAction": { "$ref": "HttpRouteAction", "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. If defaultRouteAction is specified, don't set defaultUrlRedirect. If defaultRouteAction.weightedBackendServices is specified, don't set defaultService. URL maps for classic Application Load Balancers only support the urlRewrite action within a path matcher's defaultRouteAction." @@ -64032,6 +64203,10 @@ "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.", "id": "PathRule", "properties": { + "customErrorResponsePolicy": { + "$ref": "CustomErrorResponsePolicy", + "description": "customErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. If a policy for an error code is not configured for the PathRule, a policy for the error code configured in pathMatcher.defaultCustomErrorResponsePolicy is applied. If one is not specified in pathMatcher.defaultCustomErrorResponsePolicy, the policy configured in UrlMap.defaultCustomErrorResponsePolicy takes effect. For example, consider a UrlMap with the following configuration: - UrlMap.defaultCustomErrorResponsePolicy are configured with policies for 5xx and 4xx errors - A PathRule for /coming_soon/ is configured for the error code 404. If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in PathRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. customErrorResponsePolicy is supported only for global external Application Load Balancers." + }, "paths": { "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.", "items": { @@ -68952,12 +69127,26 @@ "description": "[Output Only] An opaque ID of the host on which the VM is running.", "type": "string" }, + "scheduling": { + "$ref": "ResourceStatusScheduling" + }, "upcomingMaintenance": { "$ref": "UpcomingMaintenance" } }, "type": "object" }, + "ResourceStatusScheduling": { + "id": "ResourceStatusScheduling", + "properties": { + "availabilityDomain": { + "description": "Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "Route": { "description": "Represents a Route resource. A route defines a path from VM instances in the VPC network to a specific destination. This destination can be inside or outside the VPC network. For more information, read the Routes overview.", "id": "Route", @@ -69024,7 +69213,7 @@ "type": "string" }, "nextHopIlb": { - "description": "The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should handle matching packets or the IP address of the forwarding Rule. For example, the following are all valid URLs: - 10.128.0.56 - https://www.googleapis.com/compute/v1/projects/project/regions/region /forwardingRules/forwardingRule - regions/region/forwardingRules/forwardingRule ", + "description": "The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should handle matching packets or the IP address of the forwarding Rule. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /forwardingRules/forwardingRule - regions/region/forwardingRules/forwardingRule If an IP address is provided, must specify an IPv4 address in dot-decimal notation or an IPv6 address in RFC 4291 format. For example, the following are all valid IP addresses: - 10.128.0.56 - 2001:db8::2d9:51:0:0 - 2001:db8:0:0:2d9:51:0:0 IPv6 addresses will be displayed using RFC 5952 compressed format (e.g. 2001:db8::2d9:51:0:0). Should never be an IPv4-mapped IPv6 address.", "type": "string" }, "nextHopInstance": { @@ -70372,7 +70561,7 @@ "type": "string" }, "match": { - "description": "CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. If it evaluates to true, the corresponding `action` is enforced. The following examples are valid match expressions for public NAT: \"inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')\" \"destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'\" The following example is a valid match expression for private NAT: \"nexthop.hub == '//networkconnectivity.googleapis.com/projects/my-project/locations/global/hubs/hub-1'\"", + "description": "CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. If it evaluates to true, the corresponding `action` is enforced. The following examples are valid match expressions for public NAT: `inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')` `destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'` The following example is a valid match expression for private NAT: `nexthop.hub == '//networkconnectivity.googleapis.com/projects/my-project/locations/global/hubs/hub-1'`", "type": "string" }, "ruleNumber": { @@ -70456,14 +70645,14 @@ "id": "RouterStatus", "properties": { "bestRoutes": { - "description": "Best routes for this router's network.", + "description": "A list of the best dynamic routes for this Cloud Router's Virtual Private Cloud (VPC) network in the same region as this Cloud Router. Lists all of the best routes per prefix that are programmed into this region's VPC data plane. When global dynamic routing mode is turned on in the VPC network, this list can include cross-region dynamic routes from Cloud Routers in other regions.", "items": { "$ref": "Route" }, "type": "array" }, "bestRoutesForRouter": { - "description": "Best routes learned by this router.", + "description": "A list of the best BGP routes learned by this Cloud Router. It is possible that routes listed might not be programmed into the data plane, if the Google Cloud control plane finds a more optimal route for a prefix than a route learned by this Cloud Router.", "items": { "$ref": "Route" }, @@ -71169,6 +71358,11 @@ "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine.", "type": "boolean" }, + "availabilityDomain": { + "description": "Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance.", + "format": "int32", + "type": "integer" + }, "instanceTerminationAction": { "description": "Specifies the termination action for the instance.", "enum": [ @@ -71773,10 +71967,58 @@ "format": "float", "type": "number" }, + "detectionAbsoluteQps": { + "format": "float", + "type": "number" + }, + "detectionLoadThreshold": { + "format": "float", + "type": "number" + }, + "detectionRelativeToBaselineQps": { + "format": "float", + "type": "number" + }, "name": { "description": "The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" + }, + "trafficGranularityConfigs": { + "description": "Configuration options for enabling Adaptive Protection to operate on specified granular traffic units.", + "items": { + "$ref": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig": { + "description": "Configurations to specifc granular traffic units processed by Adaptive Protection.", + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig", + "properties": { + "enableEachUniqueValue": { + "description": "If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if `value` is empty.", + "type": "boolean" + }, + "type": { + "description": "Type of this configuration.", + "enum": [ + "HTTP_HEADER_HOST", + "HTTP_PATH", + "UNSPECIFIED_TYPE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "value": { + "description": "Requests that match this value constitute a granular traffic unit.", + "type": "string" } }, "type": "object" @@ -72707,6 +72949,11 @@ "description": "The URL of a forwarding rule with loadBalancingScheme INTERNAL* that is serving the endpoint identified by this service attachment.", "type": "string" }, + "propagatedConnectionLimit": { + "description": "The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center. This limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer. If the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list. If the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint. If unspecified, the default propagated connection limit is 250.", + "format": "uint32", + "type": "integer" + }, "pscServiceAttachmentId": { "$ref": "Uint128", "description": "[Output Only] An 128-bit global unique ID of the PSC service attachment." @@ -72902,6 +73149,11 @@ "description": "The url of a connected endpoint.", "type": "string" }, + "propagatedConnectionCount": { + "description": "The number of consumer Network Connectivity Center spokes that the connected Private Service Connect endpoint has propagated to.", + "format": "uint32", + "type": "integer" + }, "pscConnectionId": { "description": "The PSC connection id of the connected endpoint.", "format": "uint64", @@ -75394,7 +75646,12 @@ "type": "string" }, "poolProvisionedCapacityGb": { - "description": "Size, in GiB, of the storage pool.", + "annotations": { + "required": [ + "compute.storagePools.insert" + ] + }, + "description": "Size, in GiB, of the storage pool. For more information about the size limits, see https://cloud.google.com/compute/docs/disks/storage-pools.", "format": "int64", "type": "string" }, @@ -76036,7 +76293,7 @@ "type": "string" }, "poolUsedIops": { - "description": "Sum of all the disks' provisioned IOPS, minus some amount that is allowed per disk that is not counted towards pool's IOPS capacity.", + "description": "[Output Only] Sum of all the disks' provisioned IOPS, minus some amount that is allowed per disk that is not counted towards pool's IOPS capacity. For more information, see https://cloud.google.com/compute/docs/disks/storage-pools.", "format": "int64", "type": "string" }, @@ -76762,7 +77019,7 @@ "type": "string" }, "internalIpv6Prefix": { - "description": "[Output Only] The internal IPv6 address range that is assigned to this subnetwork.", + "description": "The internal IPv6 address range that is owned by this subnetwork.", "type": "string" }, "ipCidrRange": { @@ -76878,11 +77135,13 @@ "description": "The stack type for the subnet. If set to IPV4_ONLY, new VMs in the subnet are assigned IPv4 addresses only. If set to IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 addresses. If not specified, IPV4_ONLY is used. This field can be both set at resource creation time and updated using patch.", "enum": [ "IPV4_IPV6", - "IPV4_ONLY" + "IPV4_ONLY", + "IPV6_ONLY" ], "enumDescriptions": [ "New VMs in this subnet can have both IPv4 and IPv6 addresses.", - "New VMs in this subnet will only be assigned IPv4 addresses." + "New VMs in this subnet will only be assigned IPv4 addresses.", + "New VMs in this subnet will only be assigned IPv6 addresses." ], "type": "string" }, @@ -78305,7 +78564,7 @@ "type": "string" }, "certificateMap": { - "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for Global external Application Load Balancer or Classic Application Load Balancer. For other products use Certificate Manager Certificates instead. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", "type": "string" }, "creationTimestamp": { @@ -78372,7 +78631,7 @@ "type": "string" }, "sslCertificates": { - "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED.", + "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. SslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. The URLs should refer to a SSL Certificate resource or Certificate Manager Certificate resource. Mixing Classic Certificates and Certificate Manager Certificates is not allowed. Certificate Manager Certificates must include the certificatemanager API. Certificate Manager Certificates are not supported by Global external Application Load Balancer or Classic Application Load Balancer, use certificate_map instead. Currently, you may specify up to 15 Classic SSL Certificates. Certificate Manager Certificates accepted formats are: - //certificatemanager.googleapis.com/projects/{project}/locations/{ location}/certificates/{resourceName}. - https://certificatemanager.googleapis.com/v1alpha1/projects/{project }/locations/{location}/certificates/{resourceName}. ", "items": { "type": "string" }, @@ -81307,19 +81566,23 @@ "type": "object" }, "UrlMap": { - "description": "Represents a URL Map resource. Compute Engine has two URL Map resources: * [Global](/compute/docs/reference/rest/v1/urlMaps) * [Regional](/compute/docs/reference/rest/v1/regionUrlMaps) A URL map resource is a component of certain types of cloud load balancers and Traffic Director: * urlMaps are used by global external Application Load Balancers, classic Application Load Balancers, and cross-region internal Application Load Balancers. * regionUrlMaps are used by internal Application Load Balancers, regional external Application Load Balancers and regional internal Application Load Balancers. For a list of supported URL map features by the load balancer type, see the Load balancing features: Routing and traffic management table. For a list of supported URL map features for Traffic Director, see the Traffic Director features: Routing and traffic management table. This resource defines mappings from hostnames and URL paths to either a backend service or a backend bucket. To use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.", + "description": "Represents a URL Map resource. Compute Engine has two URL Map resources: * [Global](/compute/docs/reference/rest/v1/urlMaps) * [Regional](/compute/docs/reference/rest/v1/regionUrlMaps) A URL map resource is a component of certain types of cloud load balancers and Traffic Director: * urlMaps are used by global external Application Load Balancers, classic Application Load Balancers, and cross-region internal Application Load Balancers. * regionUrlMaps are used by internal Application Load Balancers, regional external Application Load Balancers and regional internal Application Load Balancers. For a list of supported URL map features by the load balancer type, see the Load balancing features: Routing and traffic management table. For a list of supported URL map features for Traffic Director, see the Traffic Director features: Routing and traffic management table. This resource defines mappings from hostnames and URL paths to either a backend service or a backend bucket. To use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL, EXTERNAL_MANAGED, or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.", "id": "UrlMap", "properties": { "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, + "defaultCustomErrorResponsePolicy": { + "$ref": "CustomErrorResponsePolicy", + "description": "defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. This policy takes effect at the load balancer level and applies only when no policy has been defined for the error code at lower levels like PathMatcher, RouteRule and PathRule within this UrlMap. For example, consider a UrlMap with the following configuration: - defaultCustomErrorResponsePolicy containing policies for responding to 5xx and 4xx errors - A PathMatcher configured for *.example.com has defaultCustomErrorResponsePolicy for 4xx. If a request for http://www.example.com/ encounters a 404, the policy in pathMatcher.defaultCustomErrorResponsePolicy will be enforced. When the request for http://www.example.com/ encounters a 502, the policy in UrlMap.defaultCustomErrorResponsePolicy will be enforced. When a request that does not match any host in *.example.com such as http://www.myotherexample.com/, encounters a 404, UrlMap.defaultCustomErrorResponsePolicy takes effect. When used in conjunction with defaultRouteAction.retryPolicy, retries take precedence. Only once all retries are exhausted, the defaultCustomErrorResponsePolicy is applied. While attempting a retry, if load balancer is successful in reaching the service, the defaultCustomErrorResponsePolicy is ignored and the response from the service is returned to the client. defaultCustomErrorResponsePolicy is supported only for global external Application Load Balancers." + }, "defaultRouteAction": { "$ref": "HttpRouteAction", "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. URL maps for classic Application Load Balancers only support the urlRewrite action within defaultRouteAction. defaultRouteAction has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." }, "defaultService": { - "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. If defaultService is specified, then set either defaultUrlRedirect , or defaultRouteAction.weightedBackendService Don't set both. defaultService has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true.", + "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any defaultRouteAction.weightedBackendServices. Conversely, if defaultRouteAction specifies any defaultRouteAction.weightedBackendServices, defaultService must not be specified. If defaultService is specified, then set either defaultUrlRedirect , or defaultRouteAction.weightedBackendService Don't set both. defaultService has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true.", "type": "string" }, "defaultUrlRedirect": { @@ -82053,11 +82316,13 @@ "description": "The stack type for the subnet. If set to IPV4_ONLY, new VMs in the subnet are assigned IPv4 addresses only. If set to IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 addresses. If not specified, IPV4_ONLY is used. This field can be both set at resource creation time and updated using patch.", "enum": [ "IPV4_IPV6", - "IPV4_ONLY" + "IPV4_ONLY", + "IPV6_ONLY" ], "enumDescriptions": [ "New VMs in this subnet can have both IPv4 and IPv6 addresses.", - "New VMs in this subnet will only be assigned IPv4 addresses." + "New VMs in this subnet will only be assigned IPv4 addresses.", + "New VMs in this subnet will only be assigned IPv6 addresses." ], "type": "string" }, @@ -82574,7 +82839,7 @@ "type": "string" }, "stackType": { - "description": "The stack type for this VPN gateway to identify the IP protocols that are enabled. Possible values are: IPV4_ONLY, IPV4_IPV6. If not specified, IPV4_ONLY will be used.", + "description": "The stack type for this VPN gateway to identify the IP protocols that are enabled. Possible values are: IPV4_ONLY, IPV4_IPV6, IPV6_ONLY. If not specified, IPV4_ONLY is used if the gateway IP version is IPV4, or IPV4_IPV6 if the gateway IP version is IPV6.", "enum": [ "IPV4_IPV6", "IPV4_ONLY", @@ -83208,7 +83473,7 @@ "type": "object" }, "localTrafficSelector": { - "description": "Local traffic selector to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges must be disjoint. Only IPv4 is supported.", + "description": "Local traffic selector to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges must be disjoint. Only IPv4 is supported for Classic VPN tunnels. This field is output only for HA VPN tunnels.", "items": { "type": "string" }, @@ -83238,7 +83503,7 @@ "type": "string" }, "peerIp": { - "description": "IP address of the peer VPN gateway. Only IPv4 is supported.", + "description": "IP address of the peer VPN gateway. Only IPv4 is supported. This field can be set only for Classic VPN tunnels.", "type": "string" }, "region": { @@ -83246,7 +83511,7 @@ "type": "string" }, "remoteTrafficSelector": { - "description": "Remote traffic selectors to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", + "description": "Remote traffic selectors to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported for Classic VPN tunnels. This field is output only for HA VPN tunnels.", "items": { "type": "string" }, @@ -83303,7 +83568,7 @@ "type": "string" }, "targetVpnGateway": { - "description": "URL of the Target VPN gateway with which this VPN tunnel is associated. Provided by the client when the VPN tunnel is created.", + "description": "URL of the Target VPN gateway with which this VPN tunnel is associated. Provided by the client when the VPN tunnel is created. This field can be set only for Classic VPN tunnels.", "type": "string" }, "vpnGateway": { diff --git a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-gen.go b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-gen.go index 3998ee08ee4..245c3c68714 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -137,6 +137,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + opts = append(opts, internaloption.EnableNewAuthLibrary()) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -1410,9 +1411,9 @@ type AWSV4Signature struct { NullFields []string `json:"-"` } -func (s *AWSV4Signature) MarshalJSON() ([]byte, error) { +func (s AWSV4Signature) MarshalJSON() ([]byte, error) { type NoMethod AWSV4Signature - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AcceleratorConfig: A specification of the type and number of accelerator @@ -1440,9 +1441,9 @@ type AcceleratorConfig struct { NullFields []string `json:"-"` } -func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { +func (s AcceleratorConfig) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AcceleratorType: Represents an Accelerator Type resource. Google Cloud @@ -1491,9 +1492,9 @@ type AcceleratorType struct { NullFields []string `json:"-"` } -func (s *AcceleratorType) MarshalJSON() ([]byte, error) { +func (s AcceleratorType) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AcceleratorTypeAggregatedList struct { @@ -1533,9 +1534,9 @@ type AcceleratorTypeAggregatedList struct { NullFields []string `json:"-"` } -func (s *AcceleratorTypeAggregatedList) MarshalJSON() ([]byte, error) { +func (s AcceleratorTypeAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorTypeAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AcceleratorTypeAggregatedListWarning: [Output Only] Informational warning @@ -1619,9 +1620,9 @@ type AcceleratorTypeAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *AcceleratorTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s AcceleratorTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorTypeAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AcceleratorTypeAggregatedListWarningData struct { @@ -1648,9 +1649,9 @@ type AcceleratorTypeAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *AcceleratorTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s AcceleratorTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorTypeAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AcceleratorTypeList: Contains a list of accelerator types. @@ -1688,9 +1689,9 @@ type AcceleratorTypeList struct { NullFields []string `json:"-"` } -func (s *AcceleratorTypeList) MarshalJSON() ([]byte, error) { +func (s AcceleratorTypeList) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorTypeList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AcceleratorTypeListWarning: [Output Only] Informational warning message. @@ -1773,9 +1774,9 @@ type AcceleratorTypeListWarning struct { NullFields []string `json:"-"` } -func (s *AcceleratorTypeListWarning) MarshalJSON() ([]byte, error) { +func (s AcceleratorTypeListWarning) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorTypeListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AcceleratorTypeListWarningData struct { @@ -1802,9 +1803,9 @@ type AcceleratorTypeListWarningData struct { NullFields []string `json:"-"` } -func (s *AcceleratorTypeListWarningData) MarshalJSON() ([]byte, error) { +func (s AcceleratorTypeListWarningData) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorTypeListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AcceleratorTypesScopedList struct { @@ -1827,9 +1828,9 @@ type AcceleratorTypesScopedList struct { NullFields []string `json:"-"` } -func (s *AcceleratorTypesScopedList) MarshalJSON() ([]byte, error) { +func (s AcceleratorTypesScopedList) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorTypesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AcceleratorTypesScopedListWarning: [Output Only] An informational warning @@ -1913,9 +1914,9 @@ type AcceleratorTypesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *AcceleratorTypesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s AcceleratorTypesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorTypesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AcceleratorTypesScopedListWarningData struct { @@ -1942,9 +1943,9 @@ type AcceleratorTypesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorTypesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AccessConfig: An access configuration attached to an instance's network @@ -2024,9 +2025,9 @@ type AccessConfig struct { NullFields []string `json:"-"` } -func (s *AccessConfig) MarshalJSON() ([]byte, error) { +func (s AccessConfig) MarshalJSON() ([]byte, error) { type NoMethod AccessConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Address: Represents an IP Address resource. Google Compute Engine has two IP @@ -2192,9 +2193,9 @@ type Address struct { NullFields []string `json:"-"` } -func (s *Address) MarshalJSON() ([]byte, error) { +func (s Address) MarshalJSON() ([]byte, error) { type NoMethod Address - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AddressAggregatedList struct { @@ -2233,9 +2234,9 @@ type AddressAggregatedList struct { NullFields []string `json:"-"` } -func (s *AddressAggregatedList) MarshalJSON() ([]byte, error) { +func (s AddressAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod AddressAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AddressAggregatedListWarning: [Output Only] Informational warning message. @@ -2318,9 +2319,9 @@ type AddressAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *AddressAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s AddressAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod AddressAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AddressAggregatedListWarningData struct { @@ -2347,9 +2348,9 @@ type AddressAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *AddressAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s AddressAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod AddressAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AddressList: Contains a list of addresses. @@ -2387,9 +2388,9 @@ type AddressList struct { NullFields []string `json:"-"` } -func (s *AddressList) MarshalJSON() ([]byte, error) { +func (s AddressList) MarshalJSON() ([]byte, error) { type NoMethod AddressList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AddressListWarning: [Output Only] Informational warning message. @@ -2472,9 +2473,9 @@ type AddressListWarning struct { NullFields []string `json:"-"` } -func (s *AddressListWarning) MarshalJSON() ([]byte, error) { +func (s AddressListWarning) MarshalJSON() ([]byte, error) { type NoMethod AddressListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AddressListWarningData struct { @@ -2501,9 +2502,9 @@ type AddressListWarningData struct { NullFields []string `json:"-"` } -func (s *AddressListWarningData) MarshalJSON() ([]byte, error) { +func (s AddressListWarningData) MarshalJSON() ([]byte, error) { type NoMethod AddressListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AddressesScopedList struct { @@ -2525,9 +2526,9 @@ type AddressesScopedList struct { NullFields []string `json:"-"` } -func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { +func (s AddressesScopedList) MarshalJSON() ([]byte, error) { type NoMethod AddressesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AddressesScopedListWarning: [Output Only] Informational warning which @@ -2611,9 +2612,9 @@ type AddressesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *AddressesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s AddressesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod AddressesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AddressesScopedListWarningData struct { @@ -2640,9 +2641,9 @@ type AddressesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *AddressesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s AddressesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod AddressesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AdvancedMachineFeatures: Specifies options for controlling advanced machine @@ -2670,6 +2671,10 @@ type AdvancedMachineFeatures struct { // simultaneous multithreading (SMT) set this to 1. If unset, the maximum // number of threads supported per core by the underlying processor is assumed. ThreadsPerCore int64 `json:"threadsPerCore,omitempty"` + // TurboMode: Turbo frequency mode to use for the instance. Supported modes + // include: * ALL_CORE_MAX Using empty string or not setting this field will + // use the platform-specific default turbo mode. + TurboMode string `json:"turboMode,omitempty"` // VisibleCoreCount: The number of physical cores to expose to an instance. // Multiply by the number of threads per core to compute the total number of // virtual CPUs to expose to the instance. If unset, the number of cores is @@ -2689,9 +2694,9 @@ type AdvancedMachineFeatures struct { NullFields []string `json:"-"` } -func (s *AdvancedMachineFeatures) MarshalJSON() ([]byte, error) { +func (s AdvancedMachineFeatures) MarshalJSON() ([]byte, error) { type NoMethod AdvancedMachineFeatures - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AliasIpRange: An alias IP range attached to an instance's network interface. @@ -2719,9 +2724,9 @@ type AliasIpRange struct { NullFields []string `json:"-"` } -func (s *AliasIpRange) MarshalJSON() ([]byte, error) { +func (s AliasIpRange) MarshalJSON() ([]byte, error) { type NoMethod AliasIpRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AllocationAggregateReservation: This reservation type is specified by total @@ -2737,8 +2742,10 @@ type AllocationAggregateReservation struct { // reservation must belong to. // // Possible values: + // "VM_FAMILY_CLOUD_TPU_DEVICE_CT3" // "VM_FAMILY_CLOUD_TPU_LITE_DEVICE_CT5L" // "VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT5LP" + // "VM_FAMILY_CLOUD_TPU_POD_SLICE_CT3P" // "VM_FAMILY_CLOUD_TPU_POD_SLICE_CT4P" VmFamily string `json:"vmFamily,omitempty"` // WorkloadType: The workload type of the instances that will target this @@ -2764,9 +2771,9 @@ type AllocationAggregateReservation struct { NullFields []string `json:"-"` } -func (s *AllocationAggregateReservation) MarshalJSON() ([]byte, error) { +func (s AllocationAggregateReservation) MarshalJSON() ([]byte, error) { type NoMethod AllocationAggregateReservation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AllocationAggregateReservationReservedResourceInfo struct { @@ -2785,9 +2792,9 @@ type AllocationAggregateReservationReservedResourceInfo struct { NullFields []string `json:"-"` } -func (s *AllocationAggregateReservationReservedResourceInfo) MarshalJSON() ([]byte, error) { +func (s AllocationAggregateReservationReservedResourceInfo) MarshalJSON() ([]byte, error) { type NoMethod AllocationAggregateReservationReservedResourceInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AllocationAggregateReservationReservedResourceInfoAccelerator struct { @@ -2809,9 +2816,9 @@ type AllocationAggregateReservationReservedResourceInfoAccelerator struct { NullFields []string `json:"-"` } -func (s *AllocationAggregateReservationReservedResourceInfoAccelerator) MarshalJSON() ([]byte, error) { +func (s AllocationAggregateReservationReservedResourceInfoAccelerator) MarshalJSON() ([]byte, error) { type NoMethod AllocationAggregateReservationReservedResourceInfoAccelerator - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AllocationResourceStatus: [Output Only] Contains output only fields. @@ -2831,9 +2838,9 @@ type AllocationResourceStatus struct { NullFields []string `json:"-"` } -func (s *AllocationResourceStatus) MarshalJSON() ([]byte, error) { +func (s AllocationResourceStatus) MarshalJSON() ([]byte, error) { type NoMethod AllocationResourceStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AllocationResourceStatusSpecificSKUAllocation: Contains Properties set for @@ -2855,9 +2862,9 @@ type AllocationResourceStatusSpecificSKUAllocation struct { NullFields []string `json:"-"` } -func (s *AllocationResourceStatusSpecificSKUAllocation) MarshalJSON() ([]byte, error) { +func (s AllocationResourceStatusSpecificSKUAllocation) MarshalJSON() ([]byte, error) { type NoMethod AllocationResourceStatusSpecificSKUAllocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk struct { @@ -2884,9 +2891,9 @@ type AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk stru NullFields []string `json:"-"` } -func (s *AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk) MarshalJSON() ([]byte, error) { +func (s AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk) MarshalJSON() ([]byte, error) { type NoMethod AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AllocationSpecificSKUAllocationReservedInstanceProperties: Properties of the @@ -2920,9 +2927,9 @@ type AllocationSpecificSKUAllocationReservedInstanceProperties struct { NullFields []string `json:"-"` } -func (s *AllocationSpecificSKUAllocationReservedInstanceProperties) MarshalJSON() ([]byte, error) { +func (s AllocationSpecificSKUAllocationReservedInstanceProperties) MarshalJSON() ([]byte, error) { type NoMethod AllocationSpecificSKUAllocationReservedInstanceProperties - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AllocationSpecificSKUReservation: This reservation type allows to pre @@ -2959,9 +2966,9 @@ type AllocationSpecificSKUReservation struct { NullFields []string `json:"-"` } -func (s *AllocationSpecificSKUReservation) MarshalJSON() ([]byte, error) { +func (s AllocationSpecificSKUReservation) MarshalJSON() ([]byte, error) { type NoMethod AllocationSpecificSKUReservation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AttachedDisk: An instance-attached disk resource. @@ -3089,9 +3096,9 @@ type AttachedDisk struct { NullFields []string `json:"-"` } -func (s *AttachedDisk) MarshalJSON() ([]byte, error) { +func (s AttachedDisk) MarshalJSON() ([]byte, error) { type NoMethod AttachedDisk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AttachedDiskInitializeParams: [Input Only] Specifies the parameters for a @@ -3229,9 +3236,9 @@ type AttachedDiskInitializeParams struct { NullFields []string `json:"-"` } -func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { +func (s AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { type NoMethod AttachedDiskInitializeParams - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditConfig: Specifies the audit configuration for a service. The @@ -3253,8 +3260,6 @@ func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // ExemptedMembers: This is deprecated and has no effect. Do not use. - ExemptedMembers []string `json:"exemptedMembers,omitempty"` // Service: Specifies a service that will be enabled for audit logging. For // example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` // is a special value that covers all services. @@ -3272,9 +3277,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -3286,8 +3291,6 @@ type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging for this // type of permission. Follows the same format of Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` - // IgnoreChildExemptions: This is deprecated and has no effect. Do not use. - IgnoreChildExemptions bool `json:"ignoreChildExemptions,omitempty"` // LogType: The log type that this config enables. // // Possible values: @@ -3309,9 +3312,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Autoscaler: Represents an Autoscaler resource. Google Compute Engine has two @@ -3402,9 +3405,9 @@ type Autoscaler struct { NullFields []string `json:"-"` } -func (s *Autoscaler) MarshalJSON() ([]byte, error) { +func (s Autoscaler) MarshalJSON() ([]byte, error) { type NoMethod Autoscaler - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AutoscalerAggregatedList struct { @@ -3444,9 +3447,9 @@ type AutoscalerAggregatedList struct { NullFields []string `json:"-"` } -func (s *AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { +func (s AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod AutoscalerAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalerAggregatedListWarning: [Output Only] Informational warning @@ -3530,9 +3533,9 @@ type AutoscalerAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *AutoscalerAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s AutoscalerAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod AutoscalerAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AutoscalerAggregatedListWarningData struct { @@ -3559,9 +3562,9 @@ type AutoscalerAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *AutoscalerAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s AutoscalerAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod AutoscalerAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalerList: Contains a list of Autoscaler resources. @@ -3599,9 +3602,9 @@ type AutoscalerList struct { NullFields []string `json:"-"` } -func (s *AutoscalerList) MarshalJSON() ([]byte, error) { +func (s AutoscalerList) MarshalJSON() ([]byte, error) { type NoMethod AutoscalerList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalerListWarning: [Output Only] Informational warning message. @@ -3684,9 +3687,9 @@ type AutoscalerListWarning struct { NullFields []string `json:"-"` } -func (s *AutoscalerListWarning) MarshalJSON() ([]byte, error) { +func (s AutoscalerListWarning) MarshalJSON() ([]byte, error) { type NoMethod AutoscalerListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AutoscalerListWarningData struct { @@ -3713,9 +3716,9 @@ type AutoscalerListWarningData struct { NullFields []string `json:"-"` } -func (s *AutoscalerListWarningData) MarshalJSON() ([]byte, error) { +func (s AutoscalerListWarningData) MarshalJSON() ([]byte, error) { type NoMethod AutoscalerListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AutoscalerStatusDetails struct { @@ -3814,9 +3817,9 @@ type AutoscalerStatusDetails struct { NullFields []string `json:"-"` } -func (s *AutoscalerStatusDetails) MarshalJSON() ([]byte, error) { +func (s AutoscalerStatusDetails) MarshalJSON() ([]byte, error) { type NoMethod AutoscalerStatusDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AutoscalersScopedList struct { @@ -3838,9 +3841,9 @@ type AutoscalersScopedList struct { NullFields []string `json:"-"` } -func (s *AutoscalersScopedList) MarshalJSON() ([]byte, error) { +func (s AutoscalersScopedList) MarshalJSON() ([]byte, error) { type NoMethod AutoscalersScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalersScopedListWarning: [Output Only] Informational warning which @@ -3924,9 +3927,9 @@ type AutoscalersScopedListWarning struct { NullFields []string `json:"-"` } -func (s *AutoscalersScopedListWarning) MarshalJSON() ([]byte, error) { +func (s AutoscalersScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod AutoscalersScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AutoscalersScopedListWarningData struct { @@ -3953,9 +3956,9 @@ type AutoscalersScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod AutoscalersScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalingPolicy: Cloud Autoscaler policy. @@ -4025,9 +4028,9 @@ type AutoscalingPolicy struct { NullFields []string `json:"-"` } -func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { +func (s AutoscalingPolicy) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalingPolicyCpuUtilization: CPU utilization policy. @@ -4068,9 +4071,9 @@ type AutoscalingPolicyCpuUtilization struct { NullFields []string `json:"-"` } -func (s *AutoscalingPolicyCpuUtilization) MarshalJSON() ([]byte, error) { +func (s AutoscalingPolicyCpuUtilization) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingPolicyCpuUtilization - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyCpuUtilization) UnmarshalJSON(data []byte) error { @@ -4162,9 +4165,9 @@ type AutoscalingPolicyCustomMetricUtilization struct { NullFields []string `json:"-"` } -func (s *AutoscalingPolicyCustomMetricUtilization) MarshalJSON() ([]byte, error) { +func (s AutoscalingPolicyCustomMetricUtilization) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingPolicyCustomMetricUtilization - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyCustomMetricUtilization) UnmarshalJSON(data []byte) error { @@ -4203,9 +4206,9 @@ type AutoscalingPolicyLoadBalancingUtilization struct { NullFields []string `json:"-"` } -func (s *AutoscalingPolicyLoadBalancingUtilization) MarshalJSON() ([]byte, error) { +func (s AutoscalingPolicyLoadBalancingUtilization) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingPolicyLoadBalancingUtilization - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyLoadBalancingUtilization) UnmarshalJSON(data []byte) error { @@ -4247,9 +4250,9 @@ type AutoscalingPolicyScaleInControl struct { NullFields []string `json:"-"` } -func (s *AutoscalingPolicyScaleInControl) MarshalJSON() ([]byte, error) { +func (s AutoscalingPolicyScaleInControl) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingPolicyScaleInControl - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalingPolicyScalingSchedule: Scaling based on user-defined schedule. @@ -4299,9 +4302,9 @@ type AutoscalingPolicyScalingSchedule struct { NullFields []string `json:"-"` } -func (s *AutoscalingPolicyScalingSchedule) MarshalJSON() ([]byte, error) { +func (s AutoscalingPolicyScalingSchedule) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingPolicyScalingSchedule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Backend: Message containing information of one individual backend. @@ -4404,9 +4407,9 @@ type Backend struct { NullFields []string `json:"-"` } -func (s *Backend) MarshalJSON() ([]byte, error) { +func (s Backend) MarshalJSON() ([]byte, error) { type NoMethod Backend - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Backend) UnmarshalJSON(data []byte) error { @@ -4473,6 +4476,8 @@ type BackendBucket struct { Name string `json:"name,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // UsedBy: [Output Only] List of resources referencing that backend bucket. + UsedBy []*BackendBucketUsedBy `json:"usedBy,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` @@ -4489,9 +4494,9 @@ type BackendBucket struct { NullFields []string `json:"-"` } -func (s *BackendBucket) MarshalJSON() ([]byte, error) { +func (s BackendBucket) MarshalJSON() ([]byte, error) { type NoMethod BackendBucket - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendBucketCdnPolicy: Message containing Cloud CDN configuration for a @@ -4620,9 +4625,9 @@ type BackendBucketCdnPolicy struct { NullFields []string `json:"-"` } -func (s *BackendBucketCdnPolicy) MarshalJSON() ([]byte, error) { +func (s BackendBucketCdnPolicy) MarshalJSON() ([]byte, error) { type NoMethod BackendBucketCdnPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendBucketCdnPolicyBypassCacheOnRequestHeader: Bypass the cache when the @@ -4646,9 +4651,9 @@ type BackendBucketCdnPolicyBypassCacheOnRequestHeader struct { NullFields []string `json:"-"` } -func (s *BackendBucketCdnPolicyBypassCacheOnRequestHeader) MarshalJSON() ([]byte, error) { +func (s BackendBucketCdnPolicyBypassCacheOnRequestHeader) MarshalJSON() ([]byte, error) { type NoMethod BackendBucketCdnPolicyBypassCacheOnRequestHeader - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendBucketCdnPolicyCacheKeyPolicy: Message containing what to include in @@ -4674,9 +4679,9 @@ type BackendBucketCdnPolicyCacheKeyPolicy struct { NullFields []string `json:"-"` } -func (s *BackendBucketCdnPolicyCacheKeyPolicy) MarshalJSON() ([]byte, error) { +func (s BackendBucketCdnPolicyCacheKeyPolicy) MarshalJSON() ([]byte, error) { type NoMethod BackendBucketCdnPolicyCacheKeyPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendBucketCdnPolicyNegativeCachingPolicy: Specify CDN TTLs for response @@ -4704,9 +4709,9 @@ type BackendBucketCdnPolicyNegativeCachingPolicy struct { NullFields []string `json:"-"` } -func (s *BackendBucketCdnPolicyNegativeCachingPolicy) MarshalJSON() ([]byte, error) { +func (s BackendBucketCdnPolicyNegativeCachingPolicy) MarshalJSON() ([]byte, error) { type NoMethod BackendBucketCdnPolicyNegativeCachingPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendBucketList: Contains a list of BackendBucket resources. @@ -4743,9 +4748,9 @@ type BackendBucketList struct { NullFields []string `json:"-"` } -func (s *BackendBucketList) MarshalJSON() ([]byte, error) { +func (s BackendBucketList) MarshalJSON() ([]byte, error) { type NoMethod BackendBucketList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendBucketListWarning: [Output Only] Informational warning message. @@ -4828,9 +4833,9 @@ type BackendBucketListWarning struct { NullFields []string `json:"-"` } -func (s *BackendBucketListWarning) MarshalJSON() ([]byte, error) { +func (s BackendBucketListWarning) MarshalJSON() ([]byte, error) { type NoMethod BackendBucketListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BackendBucketListWarningData struct { @@ -4857,9 +4862,31 @@ type BackendBucketListWarningData struct { NullFields []string `json:"-"` } -func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { +func (s BackendBucketListWarningData) MarshalJSON() ([]byte, error) { type NoMethod BackendBucketListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type BackendBucketUsedBy struct { + // Reference: [Output Only] Server-defined URL for UrlMaps referencing that + // BackendBucket. + Reference string `json:"reference,omitempty"` + // ForceSendFields is a list of field names (e.g. "Reference") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Reference") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BackendBucketUsedBy) MarshalJSON() ([]byte, error) { + type NoMethod BackendBucketUsedBy + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendService: Represents a Backend Service resource. A backend service @@ -4965,6 +4992,39 @@ type BackendService struct { // Id: [Output Only] The unique identifier for the resource. This identifier is // defined by the server. Id uint64 `json:"id,omitempty,string"` + // IpAddressSelectionPolicy: Specifies a preference for traffic sent from the + // proxy to the backend (or from the client to the backend for proxyless gRPC). + // The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends + // of the backend service (Instance Group, Managed Instance Group, Network + // Endpoint Group), regardless of traffic from the client to the proxy. Only + // IPv4 health checks are used to check the health of the backends. This is the + // default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's + // IPv6 address over its IPv4 address (provided there is a healthy IPv6 + // address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend + // service (Instance Group, Managed Instance Group, Network Endpoint Group), + // regardless of traffic from the client to the proxy. Only IPv6 health checks + // are used to check the health of the backends. This field is applicable to + // either: - Advanced global external Application Load Balancer (load balancing + // scheme EXTERNAL_MANAGED), - Regional external Application Load Balancer, - + // Internal proxy Network Load Balancer (load balancing scheme + // INTERNAL_MANAGED), - Regional internal Application Load Balancer (load + // balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies + // and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + // + // Possible values: + // "IPV4_ONLY" - Only send IPv4 traffic to the backends of the Backend + // Service (Instance Group, Managed Instance Group, Network Endpoint Group) + // regardless of traffic from the client to the proxy. Only IPv4 health-checks + // are used to check the health of the backends. This is the default setting. + // "IPV6_ONLY" - Only send IPv6 traffic to the backends of the Backend + // Service (Instance Group, Managed Instance Group, Network Endpoint Group) + // regardless of traffic from the client to the proxy. Only IPv6 health-checks + // are used to check the health of the backends. + // "IP_ADDRESS_SELECTION_POLICY_UNSPECIFIED" - Unspecified IP address + // selection policy. + // "PREFER_IPV6" - Prioritize the connection to the endpoints IPv6 address + // over its IPv4 address (provided there is a healthy IPv6 address). + IpAddressSelectionPolicy string `json:"ipAddressSelectionPolicy,omitempty"` // Kind: [Output Only] Type of resource. Always compute#backendService for // backend services. Kind string `json:"kind,omitempty"` @@ -5015,11 +5075,13 @@ type BackendService struct { // - A regional backend service with the service_protocol set to HTTP, HTTPS, // or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global // backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED, - // INTERNAL_MANAGED, or EXTERNAL_MANAGED. If sessionAffinity is not NONE, and - // this field is not set to MAGLEV or RING_HASH, session affinity settings will - // not take effect. Only ROUND_ROBIN and RING_HASH are supported when the - // backend service is referenced by a URL map that is bound to target gRPC - // proxy that has validateForProxyless field set to true. + // INTERNAL_MANAGED, or EXTERNAL_MANAGED. If sessionAffinity is not + // configured—that is, if session affinity remains at the default value of + // NONE—then the default value for localityLbPolicy is ROUND_ROBIN. If + // session affinity is set to a value other than NONE, then the default value + // for localityLbPolicy is MAGLEV. Only ROUND_ROBIN and RING_HASH are supported + // when the backend service is referenced by a URL map that is bound to target + // gRPC proxy that has validateForProxyless field set to true. // // Possible values: // "INVALID_LB_POLICY" @@ -5200,8 +5262,9 @@ type BackendService struct { // this backend service. Not supported when the backend service is referenced // by a URL map that is bound to target gRPC proxy that has // validateForProxyless field set to true. Instead, use maxStreamDuration. - TimeoutSec int64 `json:"timeoutSec,omitempty"` - UsedBy []*BackendServiceUsedBy `json:"usedBy,omitempty"` + TimeoutSec int64 `json:"timeoutSec,omitempty"` + // UsedBy: [Output Only] List of resources referencing given backend service. + UsedBy []*BackendServiceUsedBy `json:"usedBy,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` @@ -5218,9 +5281,9 @@ type BackendService struct { NullFields []string `json:"-"` } -func (s *BackendService) MarshalJSON() ([]byte, error) { +func (s BackendService) MarshalJSON() ([]byte, error) { type NoMethod BackendService - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceAggregatedList: Contains a list of BackendServicesScopedList. @@ -5259,9 +5322,9 @@ type BackendServiceAggregatedList struct { NullFields []string `json:"-"` } -func (s *BackendServiceAggregatedList) MarshalJSON() ([]byte, error) { +func (s BackendServiceAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceAggregatedListWarning: [Output Only] Informational warning @@ -5345,9 +5408,9 @@ type BackendServiceAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *BackendServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s BackendServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BackendServiceAggregatedListWarningData struct { @@ -5374,9 +5437,9 @@ type BackendServiceAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *BackendServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s BackendServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceCdnPolicy: Message containing Cloud CDN configuration for a @@ -5505,9 +5568,9 @@ type BackendServiceCdnPolicy struct { NullFields []string `json:"-"` } -func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { +func (s BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceCdnPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceCdnPolicyBypassCacheOnRequestHeader: Bypass the cache when the @@ -5531,9 +5594,9 @@ type BackendServiceCdnPolicyBypassCacheOnRequestHeader struct { NullFields []string `json:"-"` } -func (s *BackendServiceCdnPolicyBypassCacheOnRequestHeader) MarshalJSON() ([]byte, error) { +func (s BackendServiceCdnPolicyBypassCacheOnRequestHeader) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceCdnPolicyBypassCacheOnRequestHeader - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceCdnPolicyNegativeCachingPolicy: Specify CDN TTLs for response @@ -5561,9 +5624,9 @@ type BackendServiceCdnPolicyNegativeCachingPolicy struct { NullFields []string `json:"-"` } -func (s *BackendServiceCdnPolicyNegativeCachingPolicy) MarshalJSON() ([]byte, error) { +func (s BackendServiceCdnPolicyNegativeCachingPolicy) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceCdnPolicyNegativeCachingPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceConnectionTrackingPolicy: Connection Tracking configuration @@ -5633,9 +5696,9 @@ type BackendServiceConnectionTrackingPolicy struct { NullFields []string `json:"-"` } -func (s *BackendServiceConnectionTrackingPolicy) MarshalJSON() ([]byte, error) { +func (s BackendServiceConnectionTrackingPolicy) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceConnectionTrackingPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceFailoverPolicy: For load balancers that have configurable @@ -5689,9 +5752,9 @@ type BackendServiceFailoverPolicy struct { NullFields []string `json:"-"` } -func (s *BackendServiceFailoverPolicy) MarshalJSON() ([]byte, error) { +func (s BackendServiceFailoverPolicy) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceFailoverPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *BackendServiceFailoverPolicy) UnmarshalJSON(data []byte) error { @@ -5734,9 +5797,9 @@ type BackendServiceGroupHealth struct { NullFields []string `json:"-"` } -func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { +func (s BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceGroupHealth - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceIAP: Identity-Aware Proxy @@ -5767,9 +5830,9 @@ type BackendServiceIAP struct { NullFields []string `json:"-"` } -func (s *BackendServiceIAP) MarshalJSON() ([]byte, error) { +func (s BackendServiceIAP) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceIAP - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceList: Contains a list of BackendService resources. @@ -5807,9 +5870,9 @@ type BackendServiceList struct { NullFields []string `json:"-"` } -func (s *BackendServiceList) MarshalJSON() ([]byte, error) { +func (s BackendServiceList) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceListWarning: [Output Only] Informational warning message. @@ -5892,9 +5955,9 @@ type BackendServiceListWarning struct { NullFields []string `json:"-"` } -func (s *BackendServiceListWarning) MarshalJSON() ([]byte, error) { +func (s BackendServiceListWarning) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BackendServiceListWarningData struct { @@ -5921,9 +5984,9 @@ type BackendServiceListWarningData struct { NullFields []string `json:"-"` } -func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { +func (s BackendServiceListWarningData) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceListUsable: Contains a list of usable BackendService @@ -5962,9 +6025,9 @@ type BackendServiceListUsable struct { NullFields []string `json:"-"` } -func (s *BackendServiceListUsable) MarshalJSON() ([]byte, error) { +func (s BackendServiceListUsable) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceListUsable - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceListUsableWarning: [Output Only] Informational warning @@ -6048,9 +6111,9 @@ type BackendServiceListUsableWarning struct { NullFields []string `json:"-"` } -func (s *BackendServiceListUsableWarning) MarshalJSON() ([]byte, error) { +func (s BackendServiceListUsableWarning) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceListUsableWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BackendServiceListUsableWarningData struct { @@ -6077,9 +6140,9 @@ type BackendServiceListUsableWarningData struct { NullFields []string `json:"-"` } -func (s *BackendServiceListUsableWarningData) MarshalJSON() ([]byte, error) { +func (s BackendServiceListUsableWarningData) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceListUsableWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceLocalityLoadBalancingPolicyConfig: Container for either a @@ -6101,9 +6164,9 @@ type BackendServiceLocalityLoadBalancingPolicyConfig struct { NullFields []string `json:"-"` } -func (s *BackendServiceLocalityLoadBalancingPolicyConfig) MarshalJSON() ([]byte, error) { +func (s BackendServiceLocalityLoadBalancingPolicyConfig) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceLocalityLoadBalancingPolicyConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy: The @@ -6134,9 +6197,9 @@ type BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy struct { NullFields []string `json:"-"` } -func (s *BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy) MarshalJSON() ([]byte, error) { +func (s BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceLocalityLoadBalancingPolicyConfigPolicy: The configuration for @@ -6191,9 +6254,9 @@ type BackendServiceLocalityLoadBalancingPolicyConfigPolicy struct { NullFields []string `json:"-"` } -func (s *BackendServiceLocalityLoadBalancingPolicyConfigPolicy) MarshalJSON() ([]byte, error) { +func (s BackendServiceLocalityLoadBalancingPolicyConfigPolicy) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceLocalityLoadBalancingPolicyConfigPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServiceLogConfig: The available logging options for the load balancer @@ -6237,9 +6300,9 @@ type BackendServiceLogConfig struct { NullFields []string `json:"-"` } -func (s *BackendServiceLogConfig) MarshalJSON() ([]byte, error) { +func (s BackendServiceLogConfig) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *BackendServiceLogConfig) UnmarshalJSON(data []byte) error { @@ -6271,12 +6334,15 @@ type BackendServiceReference struct { NullFields []string `json:"-"` } -func (s *BackendServiceReference) MarshalJSON() ([]byte, error) { +func (s BackendServiceReference) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BackendServiceUsedBy struct { + // Reference: [Output Only] Server-defined URL for resources referencing given + // BackendService like UrlMaps, TargetTcpProxies, TargetSslProxies and + // ForwardingRule. Reference string `json:"reference,omitempty"` // ForceSendFields is a list of field names (e.g. "Reference") to // unconditionally include in API requests. By default, fields with empty or @@ -6291,9 +6357,9 @@ type BackendServiceUsedBy struct { NullFields []string `json:"-"` } -func (s *BackendServiceUsedBy) MarshalJSON() ([]byte, error) { +func (s BackendServiceUsedBy) MarshalJSON() ([]byte, error) { type NoMethod BackendServiceUsedBy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BackendServicesScopedList struct { @@ -6315,9 +6381,9 @@ type BackendServicesScopedList struct { NullFields []string `json:"-"` } -func (s *BackendServicesScopedList) MarshalJSON() ([]byte, error) { +func (s BackendServicesScopedList) MarshalJSON() ([]byte, error) { type NoMethod BackendServicesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendServicesScopedListWarning: Informational warning which replaces the @@ -6401,9 +6467,9 @@ type BackendServicesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *BackendServicesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s BackendServicesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod BackendServicesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BackendServicesScopedListWarningData struct { @@ -6430,9 +6496,9 @@ type BackendServicesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod BackendServicesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BfdPacket struct { @@ -6518,9 +6584,9 @@ type BfdPacket struct { NullFields []string `json:"-"` } -func (s *BfdPacket) MarshalJSON() ([]byte, error) { +func (s BfdPacket) MarshalJSON() ([]byte, error) { type NoMethod BfdPacket - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BfdStatus: Next free: 15 @@ -6594,9 +6660,9 @@ type BfdStatus struct { NullFields []string `json:"-"` } -func (s *BfdStatus) MarshalJSON() ([]byte, error) { +func (s BfdStatus) MarshalJSON() ([]byte, error) { type NoMethod BfdStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BfdStatusPacketCounts struct { @@ -6625,9 +6691,9 @@ type BfdStatusPacketCounts struct { NullFields []string `json:"-"` } -func (s *BfdStatusPacketCounts) MarshalJSON() ([]byte, error) { +func (s BfdStatusPacketCounts) MarshalJSON() ([]byte, error) { type NoMethod BfdStatusPacketCounts - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates `members`, or principals, with a `role`. @@ -6726,9 +6792,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BulkInsertDiskResource: A transient resource used in @@ -6755,9 +6821,9 @@ type BulkInsertDiskResource struct { NullFields []string `json:"-"` } -func (s *BulkInsertDiskResource) MarshalJSON() ([]byte, error) { +func (s BulkInsertDiskResource) MarshalJSON() ([]byte, error) { type NoMethod BulkInsertDiskResource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BulkInsertInstanceResource: A transient resource used in @@ -6770,8 +6836,8 @@ type BulkInsertInstanceResource struct { // InstanceProperties: The instance properties defining the VM instances to be // created. Required if sourceInstanceTemplate is not provided. InstanceProperties *InstanceProperties `json:"instanceProperties,omitempty"` - // LocationPolicy: Policy for chosing target zone. For more information, see - // Create VMs in bulk . + // LocationPolicy: Policy for choosing target zone. For more information, see + // Create VMs in bulk. LocationPolicy *LocationPolicy `json:"locationPolicy,omitempty"` // MinCount: The minimum number of instances to create. If no min_count is // specified then count is used as the default value. If min_count instances @@ -6817,9 +6883,9 @@ type BulkInsertInstanceResource struct { NullFields []string `json:"-"` } -func (s *BulkInsertInstanceResource) MarshalJSON() ([]byte, error) { +func (s BulkInsertInstanceResource) MarshalJSON() ([]byte, error) { type NoMethod BulkInsertInstanceResource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BulkInsertInstanceResourcePerInstanceProperties: Per-instance properties to @@ -6843,9 +6909,9 @@ type BulkInsertInstanceResourcePerInstanceProperties struct { NullFields []string `json:"-"` } -func (s *BulkInsertInstanceResourcePerInstanceProperties) MarshalJSON() ([]byte, error) { +func (s BulkInsertInstanceResourcePerInstanceProperties) MarshalJSON() ([]byte, error) { type NoMethod BulkInsertInstanceResourcePerInstanceProperties - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BulkInsertOperationStatus struct { @@ -6880,9 +6946,9 @@ type BulkInsertOperationStatus struct { NullFields []string `json:"-"` } -func (s *BulkInsertOperationStatus) MarshalJSON() ([]byte, error) { +func (s BulkInsertOperationStatus) MarshalJSON() ([]byte, error) { type NoMethod BulkInsertOperationStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type CacheInvalidationRule struct { @@ -6903,9 +6969,9 @@ type CacheInvalidationRule struct { NullFields []string `json:"-"` } -func (s *CacheInvalidationRule) MarshalJSON() ([]byte, error) { +func (s CacheInvalidationRule) MarshalJSON() ([]byte, error) { type NoMethod CacheInvalidationRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CacheKeyPolicy: Message containing what to include in the cache key for a @@ -6949,9 +7015,9 @@ type CacheKeyPolicy struct { NullFields []string `json:"-"` } -func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { +func (s CacheKeyPolicy) MarshalJSON() ([]byte, error) { type NoMethod CacheKeyPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CircuitBreakers: Settings controlling the volume of requests, connections @@ -6995,9 +7061,9 @@ type CircuitBreakers struct { NullFields []string `json:"-"` } -func (s *CircuitBreakers) MarshalJSON() ([]byte, error) { +func (s CircuitBreakers) MarshalJSON() ([]byte, error) { type NoMethod CircuitBreakers - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Commitment: Represents a regional Commitment resource. Creating a commitment @@ -7025,6 +7091,10 @@ type Commitment struct { Category string `json:"category,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text format. CreationTimestamp string `json:"creationTimestamp,omitempty"` + // CustomEndTimestamp: [Input Only] Optional, specifies the CUD end time + // requested by the customer in RFC3339 text format. Needed when the customer + // wants CUD's end date is later than the start date + term duration. + CustomEndTimestamp string `json:"customEndTimestamp,omitempty"` // Description: An optional description of this resource. Provide this property // when you create the resource. Description string `json:"description,omitempty"` @@ -7070,6 +7140,8 @@ type Commitment struct { Region string `json:"region,omitempty"` // Reservations: List of create-on-create reservations for this commitment. Reservations []*Reservation `json:"reservations,omitempty"` + // ResourceStatus: [Output Only] Status information for Commitment resource. + ResourceStatus *CommitmentResourceStatus `json:"resourceStatus,omitempty"` // Resources: A list of commitment amounts for particular resources. Note that // VCPU and MEMORY resource commitments must occur together. Resources []*ResourceCommitment `json:"resources,omitempty"` @@ -7110,6 +7182,7 @@ type Commitment struct { // "COMPUTE_OPTIMIZED_C3D" // "COMPUTE_OPTIMIZED_H3" // "GENERAL_PURPOSE" + // "GENERAL_PURPOSE_C4" // "GENERAL_PURPOSE_E2" // "GENERAL_PURPOSE_N2" // "GENERAL_PURPOSE_N2D" @@ -7137,9 +7210,9 @@ type Commitment struct { NullFields []string `json:"-"` } -func (s *Commitment) MarshalJSON() ([]byte, error) { +func (s Commitment) MarshalJSON() ([]byte, error) { type NoMethod Commitment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type CommitmentAggregatedList struct { @@ -7178,9 +7251,9 @@ type CommitmentAggregatedList struct { NullFields []string `json:"-"` } -func (s *CommitmentAggregatedList) MarshalJSON() ([]byte, error) { +func (s CommitmentAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod CommitmentAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CommitmentAggregatedListWarning: [Output Only] Informational warning @@ -7264,9 +7337,9 @@ type CommitmentAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *CommitmentAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s CommitmentAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod CommitmentAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type CommitmentAggregatedListWarningData struct { @@ -7293,9 +7366,9 @@ type CommitmentAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *CommitmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s CommitmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod CommitmentAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CommitmentList: Contains a list of Commitment resources. @@ -7333,9 +7406,9 @@ type CommitmentList struct { NullFields []string `json:"-"` } -func (s *CommitmentList) MarshalJSON() ([]byte, error) { +func (s CommitmentList) MarshalJSON() ([]byte, error) { type NoMethod CommitmentList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CommitmentListWarning: [Output Only] Informational warning message. @@ -7418,9 +7491,9 @@ type CommitmentListWarning struct { NullFields []string `json:"-"` } -func (s *CommitmentListWarning) MarshalJSON() ([]byte, error) { +func (s CommitmentListWarning) MarshalJSON() ([]byte, error) { type NoMethod CommitmentListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type CommitmentListWarningData struct { @@ -7447,9 +7520,36 @@ type CommitmentListWarningData struct { NullFields []string `json:"-"` } -func (s *CommitmentListWarningData) MarshalJSON() ([]byte, error) { +func (s CommitmentListWarningData) MarshalJSON() ([]byte, error) { type NoMethod CommitmentListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CommitmentResourceStatus: [Output Only] Contains output only fields. +type CommitmentResourceStatus struct { + // CustomTermEligibilityEndTimestamp: [Output Only] Indicates the end time of + // customer's eligibility to send custom term requests in RFC3339 text format. + // Term extension requests that (not the end time in the request) after this + // time will be rejected. + CustomTermEligibilityEndTimestamp string `json:"customTermEligibilityEndTimestamp,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "CustomTermEligibilityEndTimestamp") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted from + // API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. + // "CustomTermEligibilityEndTimestamp") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted from API + // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for + // more details. + NullFields []string `json:"-"` +} + +func (s CommitmentResourceStatus) MarshalJSON() ([]byte, error) { + type NoMethod CommitmentResourceStatus + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type CommitmentsScopedList struct { @@ -7471,9 +7571,9 @@ type CommitmentsScopedList struct { NullFields []string `json:"-"` } -func (s *CommitmentsScopedList) MarshalJSON() ([]byte, error) { +func (s CommitmentsScopedList) MarshalJSON() ([]byte, error) { type NoMethod CommitmentsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CommitmentsScopedListWarning: [Output Only] Informational warning which @@ -7557,9 +7657,9 @@ type CommitmentsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *CommitmentsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s CommitmentsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod CommitmentsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type CommitmentsScopedListWarningData struct { @@ -7586,9 +7686,9 @@ type CommitmentsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *CommitmentsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s CommitmentsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod CommitmentsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Condition: This is deprecated and has no effect. Do not use. @@ -7641,9 +7741,9 @@ type Condition struct { NullFields []string `json:"-"` } -func (s *Condition) MarshalJSON() ([]byte, error) { +func (s Condition) MarshalJSON() ([]byte, error) { type NoMethod Condition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConfidentialInstanceConfig: A set of Confidential Instance options. @@ -7656,6 +7756,7 @@ type ConfidentialInstanceConfig struct { // this value. // "SEV" - AMD Secure Encrypted Virtualization. // "SEV_SNP" - AMD Secure Encrypted Virtualization - Secure Nested Paging. + // "TDX" - Intel Trust Domain eXtension. ConfidentialInstanceType string `json:"confidentialInstanceType,omitempty"` // EnableConfidentialCompute: Defines whether the instance should have // confidential compute enabled. @@ -7673,9 +7774,9 @@ type ConfidentialInstanceConfig struct { NullFields []string `json:"-"` } -func (s *ConfidentialInstanceConfig) MarshalJSON() ([]byte, error) { +func (s ConfidentialInstanceConfig) MarshalJSON() ([]byte, error) { type NoMethod ConfidentialInstanceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConnectionDraining: Message containing connection draining configuration. @@ -7697,9 +7798,9 @@ type ConnectionDraining struct { NullFields []string `json:"-"` } -func (s *ConnectionDraining) MarshalJSON() ([]byte, error) { +func (s ConnectionDraining) MarshalJSON() ([]byte, error) { type NoMethod ConnectionDraining - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConsistentHashLoadBalancerSettings: This message defines settings for a @@ -7733,9 +7834,9 @@ type ConsistentHashLoadBalancerSettings struct { NullFields []string `json:"-"` } -func (s *ConsistentHashLoadBalancerSettings) MarshalJSON() ([]byte, error) { +func (s ConsistentHashLoadBalancerSettings) MarshalJSON() ([]byte, error) { type NoMethod ConsistentHashLoadBalancerSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConsistentHashLoadBalancerSettingsHttpCookie: The information about the HTTP @@ -7761,9 +7862,9 @@ type ConsistentHashLoadBalancerSettingsHttpCookie struct { NullFields []string `json:"-"` } -func (s *ConsistentHashLoadBalancerSettingsHttpCookie) MarshalJSON() ([]byte, error) { +func (s ConsistentHashLoadBalancerSettingsHttpCookie) MarshalJSON() ([]byte, error) { type NoMethod ConsistentHashLoadBalancerSettingsHttpCookie - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CorsPolicy: The specification for allowing client-side cross-origin @@ -7812,9 +7913,94 @@ type CorsPolicy struct { NullFields []string `json:"-"` } -func (s *CorsPolicy) MarshalJSON() ([]byte, error) { +func (s CorsPolicy) MarshalJSON() ([]byte, error) { type NoMethod CorsPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CustomErrorResponsePolicy: Specifies the custom error response policy that +// must be applied when the backend service or backend bucket responds with an +// error. +type CustomErrorResponsePolicy struct { + // ErrorResponseRules: Specifies rules for returning error responses. In a + // given policy, if you specify rules for both a range of error codes as well + // as rules for specific error codes then rules with specific error codes have + // a higher priority. For example, assume that you configure a rule for 401 + // (Un-authorized) code, and another for all 4 series error codes (4XX). If the + // backend service returns a 401, then the rule for 401 will be applied. + // However if the backend service returns a 403, the rule for 4xx takes effect. + ErrorResponseRules []*CustomErrorResponsePolicyCustomErrorResponseRule `json:"errorResponseRules,omitempty"` + // ErrorService: The full or partial URL to the BackendBucket resource that + // contains the custom error content. Examples are: - + // https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket + // - compute/v1/projects/project/global/backendBuckets/myBackendBucket - + // global/backendBuckets/myBackendBucket If errorService is not specified at + // lower levels like pathMatcher, pathRule and routeRule, an errorService + // specified at a higher level in the UrlMap will be used. If + // UrlMap.defaultCustomErrorResponsePolicy contains one or more + // errorResponseRules[], it must specify errorService. If load balancer cannot + // reach the backendBucket, a simple Not Found Error will be returned, with the + // original response code (or overrideResponseCode if configured). errorService + // is not supported for internal or regional HTTP/HTTPS load balancers. + ErrorService string `json:"errorService,omitempty"` + // ForceSendFields is a list of field names (e.g. "ErrorResponseRules") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ErrorResponseRules") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s CustomErrorResponsePolicy) MarshalJSON() ([]byte, error) { + type NoMethod CustomErrorResponsePolicy + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CustomErrorResponsePolicyCustomErrorResponseRule: Specifies the mapping +// between the response code that will be returned along with the custom error +// content and the response code returned by the backend service. +type CustomErrorResponsePolicyCustomErrorResponseRule struct { + // MatchResponseCodes: Valid values include: - A number between 400 and 599: + // For example 401 or 503, in which case the load balancer applies the policy + // if the error code exactly matches this value. - 5xx: Load Balancer will + // apply the policy if the backend service responds with any response code in + // the range of 500 to 599. - 4xx: Load Balancer will apply the policy if the + // backend service responds with any response code in the range of 400 to 499. + // Values must be unique within matchResponseCodes and across all + // errorResponseRules of CustomErrorResponsePolicy. + MatchResponseCodes []string `json:"matchResponseCodes,omitempty"` + // OverrideResponseCode: The HTTP status code returned with the response + // containing the custom error content. If overrideResponseCode is not + // supplied, the same response code returned by the original backend bucket or + // backend service is returned to the client. + OverrideResponseCode int64 `json:"overrideResponseCode,omitempty"` + // Path: The full path to a file within backendBucket . For example: + // /errors/defaultError.html path must start with a leading slash. path cannot + // have trailing slashes. If the file is not available in backendBucket or the + // load balancer cannot reach the BackendBucket, a simple Not Found Error is + // returned to the client. The value must be from 1 to 1024 characters + Path string `json:"path,omitempty"` + // ForceSendFields is a list of field names (e.g. "MatchResponseCodes") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "MatchResponseCodes") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s CustomErrorResponsePolicyCustomErrorResponseRule) MarshalJSON() ([]byte, error) { + type NoMethod CustomErrorResponsePolicyCustomErrorResponseRule + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type CustomerEncryptionKey struct { @@ -7865,9 +8051,9 @@ type CustomerEncryptionKey struct { NullFields []string `json:"-"` } -func (s *CustomerEncryptionKey) MarshalJSON() ([]byte, error) { +func (s CustomerEncryptionKey) MarshalJSON() ([]byte, error) { type NoMethod CustomerEncryptionKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type CustomerEncryptionKeyProtectedDisk struct { @@ -7891,9 +8077,9 @@ type CustomerEncryptionKeyProtectedDisk struct { NullFields []string `json:"-"` } -func (s *CustomerEncryptionKeyProtectedDisk) MarshalJSON() ([]byte, error) { +func (s CustomerEncryptionKeyProtectedDisk) MarshalJSON() ([]byte, error) { type NoMethod CustomerEncryptionKeyProtectedDisk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeprecationStatus: Deprecation status for a public resource. @@ -7942,9 +8128,9 @@ type DeprecationStatus struct { NullFields []string `json:"-"` } -func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { +func (s DeprecationStatus) MarshalJSON() ([]byte, error) { type NoMethod DeprecationStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Disk: Represents a Persistent Disk resource. Google Compute Engine has two @@ -8236,9 +8422,9 @@ type Disk struct { NullFields []string `json:"-"` } -func (s *Disk) MarshalJSON() ([]byte, error) { +func (s Disk) MarshalJSON() ([]byte, error) { type NoMethod Disk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskAggregatedList struct { @@ -8277,9 +8463,9 @@ type DiskAggregatedList struct { NullFields []string `json:"-"` } -func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { +func (s DiskAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod DiskAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskAggregatedListWarning: [Output Only] Informational warning message. @@ -8362,9 +8548,9 @@ type DiskAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *DiskAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s DiskAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod DiskAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskAggregatedListWarningData struct { @@ -8391,9 +8577,9 @@ type DiskAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod DiskAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskAsyncReplication struct { @@ -8429,9 +8615,9 @@ type DiskAsyncReplication struct { NullFields []string `json:"-"` } -func (s *DiskAsyncReplication) MarshalJSON() ([]byte, error) { +func (s DiskAsyncReplication) MarshalJSON() ([]byte, error) { type NoMethod DiskAsyncReplication - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskAsyncReplicationList struct { @@ -8449,9 +8635,9 @@ type DiskAsyncReplicationList struct { NullFields []string `json:"-"` } -func (s *DiskAsyncReplicationList) MarshalJSON() ([]byte, error) { +func (s DiskAsyncReplicationList) MarshalJSON() ([]byte, error) { type NoMethod DiskAsyncReplicationList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskInstantiationConfig: A specification of the desired way to instantiate a @@ -8514,9 +8700,9 @@ type DiskInstantiationConfig struct { NullFields []string `json:"-"` } -func (s *DiskInstantiationConfig) MarshalJSON() ([]byte, error) { +func (s DiskInstantiationConfig) MarshalJSON() ([]byte, error) { type NoMethod DiskInstantiationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskList: A list of Disk resources. @@ -8554,9 +8740,9 @@ type DiskList struct { NullFields []string `json:"-"` } -func (s *DiskList) MarshalJSON() ([]byte, error) { +func (s DiskList) MarshalJSON() ([]byte, error) { type NoMethod DiskList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskListWarning: [Output Only] Informational warning message. @@ -8639,9 +8825,9 @@ type DiskListWarning struct { NullFields []string `json:"-"` } -func (s *DiskListWarning) MarshalJSON() ([]byte, error) { +func (s DiskListWarning) MarshalJSON() ([]byte, error) { type NoMethod DiskListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskListWarningData struct { @@ -8668,9 +8854,9 @@ type DiskListWarningData struct { NullFields []string `json:"-"` } -func (s *DiskListWarningData) MarshalJSON() ([]byte, error) { +func (s DiskListWarningData) MarshalJSON() ([]byte, error) { type NoMethod DiskListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskMoveRequest struct { @@ -8697,9 +8883,9 @@ type DiskMoveRequest struct { NullFields []string `json:"-"` } -func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { +func (s DiskMoveRequest) MarshalJSON() ([]byte, error) { type NoMethod DiskMoveRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskParams: Additional disk params. @@ -8722,9 +8908,9 @@ type DiskParams struct { NullFields []string `json:"-"` } -func (s *DiskParams) MarshalJSON() ([]byte, error) { +func (s DiskParams) MarshalJSON() ([]byte, error) { type NoMethod DiskParams - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskResourceStatus struct { @@ -8744,9 +8930,9 @@ type DiskResourceStatus struct { NullFields []string `json:"-"` } -func (s *DiskResourceStatus) MarshalJSON() ([]byte, error) { +func (s DiskResourceStatus) MarshalJSON() ([]byte, error) { type NoMethod DiskResourceStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskResourceStatusAsyncReplicationStatus struct { @@ -8772,9 +8958,9 @@ type DiskResourceStatusAsyncReplicationStatus struct { NullFields []string `json:"-"` } -func (s *DiskResourceStatusAsyncReplicationStatus) MarshalJSON() ([]byte, error) { +func (s DiskResourceStatusAsyncReplicationStatus) MarshalJSON() ([]byte, error) { type NoMethod DiskResourceStatusAsyncReplicationStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskType: Represents a Disk Type resource. Google Compute Engine has two @@ -8833,9 +9019,9 @@ type DiskType struct { NullFields []string `json:"-"` } -func (s *DiskType) MarshalJSON() ([]byte, error) { +func (s DiskType) MarshalJSON() ([]byte, error) { type NoMethod DiskType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskTypeAggregatedList struct { @@ -8873,9 +9059,9 @@ type DiskTypeAggregatedList struct { NullFields []string `json:"-"` } -func (s *DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { +func (s DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod DiskTypeAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskTypeAggregatedListWarning: [Output Only] Informational warning message. @@ -8958,9 +9144,9 @@ type DiskTypeAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *DiskTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s DiskTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod DiskTypeAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskTypeAggregatedListWarningData struct { @@ -8987,9 +9173,9 @@ type DiskTypeAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *DiskTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s DiskTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod DiskTypeAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskTypeList: Contains a list of disk types. @@ -9027,9 +9213,9 @@ type DiskTypeList struct { NullFields []string `json:"-"` } -func (s *DiskTypeList) MarshalJSON() ([]byte, error) { +func (s DiskTypeList) MarshalJSON() ([]byte, error) { type NoMethod DiskTypeList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskTypeListWarning: [Output Only] Informational warning message. @@ -9112,9 +9298,9 @@ type DiskTypeListWarning struct { NullFields []string `json:"-"` } -func (s *DiskTypeListWarning) MarshalJSON() ([]byte, error) { +func (s DiskTypeListWarning) MarshalJSON() ([]byte, error) { type NoMethod DiskTypeListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskTypeListWarningData struct { @@ -9141,9 +9327,9 @@ type DiskTypeListWarningData struct { NullFields []string `json:"-"` } -func (s *DiskTypeListWarningData) MarshalJSON() ([]byte, error) { +func (s DiskTypeListWarningData) MarshalJSON() ([]byte, error) { type NoMethod DiskTypeListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskTypesScopedList struct { @@ -9165,9 +9351,9 @@ type DiskTypesScopedList struct { NullFields []string `json:"-"` } -func (s *DiskTypesScopedList) MarshalJSON() ([]byte, error) { +func (s DiskTypesScopedList) MarshalJSON() ([]byte, error) { type NoMethod DiskTypesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskTypesScopedListWarning: [Output Only] Informational warning which @@ -9251,9 +9437,9 @@ type DiskTypesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *DiskTypesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s DiskTypesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod DiskTypesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DiskTypesScopedListWarningData struct { @@ -9280,9 +9466,9 @@ type DiskTypesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *DiskTypesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s DiskTypesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod DiskTypesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DisksAddResourcePoliciesRequest struct { @@ -9302,9 +9488,9 @@ type DisksAddResourcePoliciesRequest struct { NullFields []string `json:"-"` } -func (s *DisksAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { +func (s DisksAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { type NoMethod DisksAddResourcePoliciesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DisksRemoveResourcePoliciesRequest struct { @@ -9323,9 +9509,9 @@ type DisksRemoveResourcePoliciesRequest struct { NullFields []string `json:"-"` } -func (s *DisksRemoveResourcePoliciesRequest) MarshalJSON() ([]byte, error) { +func (s DisksRemoveResourcePoliciesRequest) MarshalJSON() ([]byte, error) { type NoMethod DisksRemoveResourcePoliciesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DisksResizeRequest struct { @@ -9344,9 +9530,9 @@ type DisksResizeRequest struct { NullFields []string `json:"-"` } -func (s *DisksResizeRequest) MarshalJSON() ([]byte, error) { +func (s DisksResizeRequest) MarshalJSON() ([]byte, error) { type NoMethod DisksResizeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DisksScopedList struct { @@ -9368,9 +9554,9 @@ type DisksScopedList struct { NullFields []string `json:"-"` } -func (s *DisksScopedList) MarshalJSON() ([]byte, error) { +func (s DisksScopedList) MarshalJSON() ([]byte, error) { type NoMethod DisksScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DisksScopedListWarning: [Output Only] Informational warning which replaces @@ -9454,9 +9640,9 @@ type DisksScopedListWarning struct { NullFields []string `json:"-"` } -func (s *DisksScopedListWarning) MarshalJSON() ([]byte, error) { +func (s DisksScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod DisksScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DisksScopedListWarningData struct { @@ -9483,9 +9669,9 @@ type DisksScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s DisksScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod DisksScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DisksStartAsyncReplicationRequest struct { @@ -9512,9 +9698,9 @@ type DisksStartAsyncReplicationRequest struct { NullFields []string `json:"-"` } -func (s *DisksStartAsyncReplicationRequest) MarshalJSON() ([]byte, error) { +func (s DisksStartAsyncReplicationRequest) MarshalJSON() ([]byte, error) { type NoMethod DisksStartAsyncReplicationRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DisksStopGroupAsyncReplicationResource: A transient resource used in @@ -9542,9 +9728,9 @@ type DisksStopGroupAsyncReplicationResource struct { NullFields []string `json:"-"` } -func (s *DisksStopGroupAsyncReplicationResource) MarshalJSON() ([]byte, error) { +func (s DisksStopGroupAsyncReplicationResource) MarshalJSON() ([]byte, error) { type NoMethod DisksStopGroupAsyncReplicationResource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DisplayDevice: A set of Display Device options @@ -9564,9 +9750,9 @@ type DisplayDevice struct { NullFields []string `json:"-"` } -func (s *DisplayDevice) MarshalJSON() ([]byte, error) { +func (s DisplayDevice) MarshalJSON() ([]byte, error) { type NoMethod DisplayDevice - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DistributionPolicy struct { @@ -9609,9 +9795,9 @@ type DistributionPolicy struct { NullFields []string `json:"-"` } -func (s *DistributionPolicy) MarshalJSON() ([]byte, error) { +func (s DistributionPolicy) MarshalJSON() ([]byte, error) { type NoMethod DistributionPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DistributionPolicyZoneConfiguration struct { @@ -9631,9 +9817,9 @@ type DistributionPolicyZoneConfiguration struct { NullFields []string `json:"-"` } -func (s *DistributionPolicyZoneConfiguration) MarshalJSON() ([]byte, error) { +func (s DistributionPolicyZoneConfiguration) MarshalJSON() ([]byte, error) { type NoMethod DistributionPolicyZoneConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Duration: A Duration represents a fixed-length span of time represented as a @@ -9662,9 +9848,9 @@ type Duration struct { NullFields []string `json:"-"` } -func (s *Duration) MarshalJSON() ([]byte, error) { +func (s Duration) MarshalJSON() ([]byte, error) { type NoMethod Duration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ErrorInfo: Describes the cause of the error with structured details. Example @@ -9710,9 +9896,9 @@ type ErrorInfo struct { NullFields []string `json:"-"` } -func (s *ErrorInfo) MarshalJSON() ([]byte, error) { +func (s ErrorInfo) MarshalJSON() ([]byte, error) { type NoMethod ErrorInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ExchangedPeeringRoute struct { @@ -9749,9 +9935,9 @@ type ExchangedPeeringRoute struct { NullFields []string `json:"-"` } -func (s *ExchangedPeeringRoute) MarshalJSON() ([]byte, error) { +func (s ExchangedPeeringRoute) MarshalJSON() ([]byte, error) { type NoMethod ExchangedPeeringRoute - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ExchangedPeeringRoutesList struct { @@ -9788,9 +9974,9 @@ type ExchangedPeeringRoutesList struct { NullFields []string `json:"-"` } -func (s *ExchangedPeeringRoutesList) MarshalJSON() ([]byte, error) { +func (s ExchangedPeeringRoutesList) MarshalJSON() ([]byte, error) { type NoMethod ExchangedPeeringRoutesList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExchangedPeeringRoutesListWarning: [Output Only] Informational warning @@ -9874,9 +10060,9 @@ type ExchangedPeeringRoutesListWarning struct { NullFields []string `json:"-"` } -func (s *ExchangedPeeringRoutesListWarning) MarshalJSON() ([]byte, error) { +func (s ExchangedPeeringRoutesListWarning) MarshalJSON() ([]byte, error) { type NoMethod ExchangedPeeringRoutesListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ExchangedPeeringRoutesListWarningData struct { @@ -9903,9 +10089,9 @@ type ExchangedPeeringRoutesListWarningData struct { NullFields []string `json:"-"` } -func (s *ExchangedPeeringRoutesListWarningData) MarshalJSON() ([]byte, error) { +func (s ExchangedPeeringRoutesListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ExchangedPeeringRoutesListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents a textual expression in the Common Expression Language @@ -9951,9 +10137,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExternalVpnGateway: Represents an external VPN gateway. External VPN gateway @@ -10046,9 +10232,9 @@ type ExternalVpnGateway struct { NullFields []string `json:"-"` } -func (s *ExternalVpnGateway) MarshalJSON() ([]byte, error) { +func (s ExternalVpnGateway) MarshalJSON() ([]byte, error) { type NoMethod ExternalVpnGateway - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExternalVpnGatewayInterface: The interface for the external VPN gateway. @@ -10083,9 +10269,9 @@ type ExternalVpnGatewayInterface struct { NullFields []string `json:"-"` } -func (s *ExternalVpnGatewayInterface) MarshalJSON() ([]byte, error) { +func (s ExternalVpnGatewayInterface) MarshalJSON() ([]byte, error) { type NoMethod ExternalVpnGatewayInterface - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExternalVpnGatewayList: Response to the list request, and contains a list of @@ -10125,9 +10311,9 @@ type ExternalVpnGatewayList struct { NullFields []string `json:"-"` } -func (s *ExternalVpnGatewayList) MarshalJSON() ([]byte, error) { +func (s ExternalVpnGatewayList) MarshalJSON() ([]byte, error) { type NoMethod ExternalVpnGatewayList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExternalVpnGatewayListWarning: [Output Only] Informational warning message. @@ -10210,9 +10396,9 @@ type ExternalVpnGatewayListWarning struct { NullFields []string `json:"-"` } -func (s *ExternalVpnGatewayListWarning) MarshalJSON() ([]byte, error) { +func (s ExternalVpnGatewayListWarning) MarshalJSON() ([]byte, error) { type NoMethod ExternalVpnGatewayListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ExternalVpnGatewayListWarningData struct { @@ -10239,9 +10425,9 @@ type ExternalVpnGatewayListWarningData struct { NullFields []string `json:"-"` } -func (s *ExternalVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { +func (s ExternalVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ExternalVpnGatewayListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type FileContentBuffer struct { @@ -10267,9 +10453,9 @@ type FileContentBuffer struct { NullFields []string `json:"-"` } -func (s *FileContentBuffer) MarshalJSON() ([]byte, error) { +func (s FileContentBuffer) MarshalJSON() ([]byte, error) { type NoMethod FileContentBuffer - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Firewall: Represents a Firewall Rule resource. Firewall rules allow or deny @@ -10403,9 +10589,9 @@ type Firewall struct { NullFields []string `json:"-"` } -func (s *Firewall) MarshalJSON() ([]byte, error) { +func (s Firewall) MarshalJSON() ([]byte, error) { type NoMethod Firewall - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type FirewallAllowed struct { @@ -10433,9 +10619,9 @@ type FirewallAllowed struct { NullFields []string `json:"-"` } -func (s *FirewallAllowed) MarshalJSON() ([]byte, error) { +func (s FirewallAllowed) MarshalJSON() ([]byte, error) { type NoMethod FirewallAllowed - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type FirewallDenied struct { @@ -10463,9 +10649,9 @@ type FirewallDenied struct { NullFields []string `json:"-"` } -func (s *FirewallDenied) MarshalJSON() ([]byte, error) { +func (s FirewallDenied) MarshalJSON() ([]byte, error) { type NoMethod FirewallDenied - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FirewallList: Contains a list of firewalls. @@ -10503,9 +10689,9 @@ type FirewallList struct { NullFields []string `json:"-"` } -func (s *FirewallList) MarshalJSON() ([]byte, error) { +func (s FirewallList) MarshalJSON() ([]byte, error) { type NoMethod FirewallList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FirewallListWarning: [Output Only] Informational warning message. @@ -10588,9 +10774,9 @@ type FirewallListWarning struct { NullFields []string `json:"-"` } -func (s *FirewallListWarning) MarshalJSON() ([]byte, error) { +func (s FirewallListWarning) MarshalJSON() ([]byte, error) { type NoMethod FirewallListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type FirewallListWarningData struct { @@ -10617,9 +10803,9 @@ type FirewallListWarningData struct { NullFields []string `json:"-"` } -func (s *FirewallListWarningData) MarshalJSON() ([]byte, error) { +func (s FirewallListWarningData) MarshalJSON() ([]byte, error) { type NoMethod FirewallListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FirewallLogConfig: The available logging options for a firewall rule. @@ -10648,9 +10834,9 @@ type FirewallLogConfig struct { NullFields []string `json:"-"` } -func (s *FirewallLogConfig) MarshalJSON() ([]byte, error) { +func (s FirewallLogConfig) MarshalJSON() ([]byte, error) { type NoMethod FirewallLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type FirewallPoliciesListAssociationsResponse struct { @@ -10676,9 +10862,9 @@ type FirewallPoliciesListAssociationsResponse struct { NullFields []string `json:"-"` } -func (s *FirewallPoliciesListAssociationsResponse) MarshalJSON() ([]byte, error) { +func (s FirewallPoliciesListAssociationsResponse) MarshalJSON() ([]byte, error) { type NoMethod FirewallPoliciesListAssociationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FirewallPolicy: Represents a Firewall Policy resource. @@ -10766,9 +10952,9 @@ type FirewallPolicy struct { NullFields []string `json:"-"` } -func (s *FirewallPolicy) MarshalJSON() ([]byte, error) { +func (s FirewallPolicy) MarshalJSON() ([]byte, error) { type NoMethod FirewallPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type FirewallPolicyAssociation struct { @@ -10800,9 +10986,9 @@ type FirewallPolicyAssociation struct { NullFields []string `json:"-"` } -func (s *FirewallPolicyAssociation) MarshalJSON() ([]byte, error) { +func (s FirewallPolicyAssociation) MarshalJSON() ([]byte, error) { type NoMethod FirewallPolicyAssociation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type FirewallPolicyList struct { @@ -10837,9 +11023,9 @@ type FirewallPolicyList struct { NullFields []string `json:"-"` } -func (s *FirewallPolicyList) MarshalJSON() ([]byte, error) { +func (s FirewallPolicyList) MarshalJSON() ([]byte, error) { type NoMethod FirewallPolicyList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FirewallPolicyListWarning: [Output Only] Informational warning message. @@ -10922,9 +11108,9 @@ type FirewallPolicyListWarning struct { NullFields []string `json:"-"` } -func (s *FirewallPolicyListWarning) MarshalJSON() ([]byte, error) { +func (s FirewallPolicyListWarning) MarshalJSON() ([]byte, error) { type NoMethod FirewallPolicyListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type FirewallPolicyListWarningData struct { @@ -10951,9 +11137,9 @@ type FirewallPolicyListWarningData struct { NullFields []string `json:"-"` } -func (s *FirewallPolicyListWarningData) MarshalJSON() ([]byte, error) { +func (s FirewallPolicyListWarningData) MarshalJSON() ([]byte, error) { type NoMethod FirewallPolicyListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FirewallPolicyRule: Represents a rule that describes one or more match @@ -10961,7 +11147,9 @@ func (s *FirewallPolicyListWarningData) MarshalJSON() ([]byte, error) { // condition (allow or deny). type FirewallPolicyRule struct { // Action: The Action to perform when the client connection triggers the rule. - // Valid actions are "allow", "deny" and "goto_next". + // Valid actions for firewall rules are: "allow", "deny", + // "apply_security_profile_group" and "goto_next". Valid actions for packet + // mirroring rules are: "mirror", "do_not_mirror" and "goto_next". Action string `json:"action,omitempty"` // Description: An optional description for this resource. Description string `json:"description,omitempty"` @@ -10981,8 +11169,9 @@ type FirewallPolicyRule struct { // destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. // Note: you cannot enable logging on "goto_next" rules. EnableLogging bool `json:"enableLogging,omitempty"` - // Kind: [Output only] Type of the resource. Always compute#firewallPolicyRule - // for firewall policy rules + // Kind: [Output only] Type of the resource. Returns compute#firewallPolicyRule + // for firewall rules and compute#packetMirroringRule for packet mirroring + // rules. Kind string `json:"kind,omitempty"` // Match: A match condition that incoming traffic is evaluated against. If it // evaluates to true, the corresponding 'action' is enforced. @@ -10990,7 +11179,7 @@ type FirewallPolicyRule struct { // Priority: An integer indicating the priority of a rule in the list. The // priority must be a positive value between 0 and 2147483647. Rules are // evaluated from highest to lowest priority where 0 is the highest priority - // and 2147483647 is the lowest prority. + // and 2147483647 is the lowest priority. Priority int64 `json:"priority,omitempty"` // RuleName: An optional name for the rule. This field is not a unique // identifier and can be updated. @@ -11001,8 +11190,8 @@ type FirewallPolicyRule struct { // SecurityProfileGroup: A fully-qualified URL of a SecurityProfile resource // instance. Example: // https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group - // Must be specified if action = 'apply_security_profile_group' and cannot be - // specified for other actions. + // Must be specified if action is one of 'apply_security_profile_group' or + // 'mirror'. Cannot be specified for other actions. SecurityProfileGroup string `json:"securityProfileGroup,omitempty"` // TargetResources: A list of network resource URLs to which this rule applies. // This field allows you to control which network's VMs get this rule. If this @@ -11040,9 +11229,9 @@ type FirewallPolicyRule struct { NullFields []string `json:"-"` } -func (s *FirewallPolicyRule) MarshalJSON() ([]byte, error) { +func (s FirewallPolicyRule) MarshalJSON() ([]byte, error) { type NoMethod FirewallPolicyRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FirewallPolicyRuleMatcher: Represents a match condition that incoming @@ -11103,9 +11292,9 @@ type FirewallPolicyRuleMatcher struct { NullFields []string `json:"-"` } -func (s *FirewallPolicyRuleMatcher) MarshalJSON() ([]byte, error) { +func (s FirewallPolicyRuleMatcher) MarshalJSON() ([]byte, error) { type NoMethod FirewallPolicyRuleMatcher - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type FirewallPolicyRuleMatcherLayer4Config struct { @@ -11133,9 +11322,9 @@ type FirewallPolicyRuleMatcherLayer4Config struct { NullFields []string `json:"-"` } -func (s *FirewallPolicyRuleMatcherLayer4Config) MarshalJSON() ([]byte, error) { +func (s FirewallPolicyRuleMatcherLayer4Config) MarshalJSON() ([]byte, error) { type NoMethod FirewallPolicyRuleMatcherLayer4Config - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type FirewallPolicyRuleSecureTag struct { @@ -11162,9 +11351,9 @@ type FirewallPolicyRuleSecureTag struct { NullFields []string `json:"-"` } -func (s *FirewallPolicyRuleSecureTag) MarshalJSON() ([]byte, error) { +func (s FirewallPolicyRuleSecureTag) MarshalJSON() ([]byte, error) { type NoMethod FirewallPolicyRuleSecureTag - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FixedOrPercent: Encapsulates numeric value that can be either absolute or @@ -11196,9 +11385,9 @@ type FixedOrPercent struct { NullFields []string `json:"-"` } -func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { +func (s FixedOrPercent) MarshalJSON() ([]byte, error) { type NoMethod FixedOrPercent - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ForwardingRule: Represents a Forwarding Rule resource. Forwarding rule @@ -11513,9 +11702,9 @@ type ForwardingRule struct { NullFields []string `json:"-"` } -func (s *ForwardingRule) MarshalJSON() ([]byte, error) { +func (s ForwardingRule) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ForwardingRuleAggregatedList struct { @@ -11554,9 +11743,9 @@ type ForwardingRuleAggregatedList struct { NullFields []string `json:"-"` } -func (s *ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { +func (s ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRuleAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ForwardingRuleAggregatedListWarning: [Output Only] Informational warning @@ -11640,9 +11829,9 @@ type ForwardingRuleAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *ForwardingRuleAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s ForwardingRuleAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRuleAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ForwardingRuleAggregatedListWarningData struct { @@ -11669,9 +11858,9 @@ type ForwardingRuleAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *ForwardingRuleAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s ForwardingRuleAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRuleAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ForwardingRuleList: Contains a list of ForwardingRule resources. @@ -11708,9 +11897,9 @@ type ForwardingRuleList struct { NullFields []string `json:"-"` } -func (s *ForwardingRuleList) MarshalJSON() ([]byte, error) { +func (s ForwardingRuleList) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRuleList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ForwardingRuleListWarning: [Output Only] Informational warning message. @@ -11793,9 +11982,9 @@ type ForwardingRuleListWarning struct { NullFields []string `json:"-"` } -func (s *ForwardingRuleListWarning) MarshalJSON() ([]byte, error) { +func (s ForwardingRuleListWarning) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRuleListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ForwardingRuleListWarningData struct { @@ -11822,9 +12011,9 @@ type ForwardingRuleListWarningData struct { NullFields []string `json:"-"` } -func (s *ForwardingRuleListWarningData) MarshalJSON() ([]byte, error) { +func (s ForwardingRuleListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRuleListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ForwardingRuleReference struct { @@ -11842,9 +12031,9 @@ type ForwardingRuleReference struct { NullFields []string `json:"-"` } -func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { +func (s ForwardingRuleReference) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRuleReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ForwardingRuleServiceDirectoryRegistration: Describes the auto-registration @@ -11875,9 +12064,9 @@ type ForwardingRuleServiceDirectoryRegistration struct { NullFields []string `json:"-"` } -func (s *ForwardingRuleServiceDirectoryRegistration) MarshalJSON() ([]byte, error) { +func (s ForwardingRuleServiceDirectoryRegistration) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRuleServiceDirectoryRegistration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ForwardingRulesScopedList struct { @@ -11899,9 +12088,9 @@ type ForwardingRulesScopedList struct { NullFields []string `json:"-"` } -func (s *ForwardingRulesScopedList) MarshalJSON() ([]byte, error) { +func (s ForwardingRulesScopedList) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRulesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ForwardingRulesScopedListWarning: Informational warning which replaces the @@ -11985,9 +12174,9 @@ type ForwardingRulesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *ForwardingRulesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s ForwardingRulesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRulesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ForwardingRulesScopedListWarningData struct { @@ -12014,9 +12203,9 @@ type ForwardingRulesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRulesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GRPCHealthCheck struct { @@ -12075,9 +12264,9 @@ type GRPCHealthCheck struct { NullFields []string `json:"-"` } -func (s *GRPCHealthCheck) MarshalJSON() ([]byte, error) { +func (s GRPCHealthCheck) MarshalJSON() ([]byte, error) { type NoMethod GRPCHealthCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GlobalAddressesMoveRequest struct { @@ -12104,9 +12293,9 @@ type GlobalAddressesMoveRequest struct { NullFields []string `json:"-"` } -func (s *GlobalAddressesMoveRequest) MarshalJSON() ([]byte, error) { +func (s GlobalAddressesMoveRequest) MarshalJSON() ([]byte, error) { type NoMethod GlobalAddressesMoveRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { @@ -12125,9 +12314,9 @@ type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { NullFields []string `json:"-"` } -func (s *GlobalNetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { +func (s GlobalNetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { type NoMethod GlobalNetworkEndpointGroupsAttachEndpointsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GlobalNetworkEndpointGroupsDetachEndpointsRequest struct { @@ -12146,9 +12335,9 @@ type GlobalNetworkEndpointGroupsDetachEndpointsRequest struct { NullFields []string `json:"-"` } -func (s *GlobalNetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { +func (s GlobalNetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { type NoMethod GlobalNetworkEndpointGroupsDetachEndpointsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GlobalOrganizationSetPolicyRequest struct { @@ -12176,9 +12365,9 @@ type GlobalOrganizationSetPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GlobalOrganizationSetPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GlobalOrganizationSetPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GlobalOrganizationSetPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GlobalSetLabelsRequest struct { @@ -12207,9 +12396,9 @@ type GlobalSetLabelsRequest struct { NullFields []string `json:"-"` } -func (s *GlobalSetLabelsRequest) MarshalJSON() ([]byte, error) { +func (s GlobalSetLabelsRequest) MarshalJSON() ([]byte, error) { type NoMethod GlobalSetLabelsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GlobalSetPolicyRequest struct { @@ -12237,9 +12426,9 @@ type GlobalSetPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GlobalSetPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GlobalSetPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GlobalSetPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GuestAttributes: A guest attributes entry. @@ -12274,9 +12463,9 @@ type GuestAttributes struct { NullFields []string `json:"-"` } -func (s *GuestAttributes) MarshalJSON() ([]byte, error) { +func (s GuestAttributes) MarshalJSON() ([]byte, error) { type NoMethod GuestAttributes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GuestAttributesEntry: A guest attributes namespace/key/value entry. @@ -12300,9 +12489,9 @@ type GuestAttributesEntry struct { NullFields []string `json:"-"` } -func (s *GuestAttributesEntry) MarshalJSON() ([]byte, error) { +func (s GuestAttributesEntry) MarshalJSON() ([]byte, error) { type NoMethod GuestAttributesEntry - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GuestAttributesValue: Array of guest attribute namespace/key/value tuples. @@ -12321,9 +12510,9 @@ type GuestAttributesValue struct { NullFields []string `json:"-"` } -func (s *GuestAttributesValue) MarshalJSON() ([]byte, error) { +func (s GuestAttributesValue) MarshalJSON() ([]byte, error) { type NoMethod GuestAttributesValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GuestOsFeature: Guest OS features. @@ -12345,6 +12534,7 @@ type GuestOsFeature struct { // "SEV_LIVE_MIGRATABLE" // "SEV_LIVE_MIGRATABLE_V2" // "SEV_SNP_CAPABLE" + // "TDX_CAPABLE" // "UEFI_COMPATIBLE" // "VIRTIO_SCSI_MULTIQUEUE" // "WINDOWS" @@ -12362,9 +12552,9 @@ type GuestOsFeature struct { NullFields []string `json:"-"` } -func (s *GuestOsFeature) MarshalJSON() ([]byte, error) { +func (s GuestOsFeature) MarshalJSON() ([]byte, error) { type NoMethod GuestOsFeature - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HTTP2HealthCheck struct { @@ -12438,9 +12628,9 @@ type HTTP2HealthCheck struct { NullFields []string `json:"-"` } -func (s *HTTP2HealthCheck) MarshalJSON() ([]byte, error) { +func (s HTTP2HealthCheck) MarshalJSON() ([]byte, error) { type NoMethod HTTP2HealthCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HTTPHealthCheck struct { @@ -12515,9 +12705,9 @@ type HTTPHealthCheck struct { NullFields []string `json:"-"` } -func (s *HTTPHealthCheck) MarshalJSON() ([]byte, error) { +func (s HTTPHealthCheck) MarshalJSON() ([]byte, error) { type NoMethod HTTPHealthCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HTTPSHealthCheck struct { @@ -12591,9 +12781,9 @@ type HTTPSHealthCheck struct { NullFields []string `json:"-"` } -func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { +func (s HTTPSHealthCheck) MarshalJSON() ([]byte, error) { type NoMethod HTTPSHealthCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HealthCheck: Represents a health check resource. Google Compute Engine has @@ -12643,7 +12833,18 @@ type HealthCheck struct { // to global health checks. Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + SelfLink string `json:"selfLink,omitempty"` + // SourceRegions: The list of cloud regions from which health checks are + // performed. If any regions are specified, then exactly 3 regions should be + // specified. The region names must be valid names of Google Cloud regions. + // This can only be set for global health check. If this list is non-empty, + // then there are restrictions on what other health check fields are supported + // and what other resources can use this health check: - SSL, HTTP2, and GRPC + // protocols are not supported. - The TCP request field is not supported. - The + // proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The + // checkIntervalSec field must be at least 30. - The health check cannot be + // used with BackendService nor with managed instance group auto-healing. + SourceRegions []string `json:"sourceRegions,omitempty"` SslHealthCheck *SSLHealthCheck `json:"sslHealthCheck,omitempty"` TcpHealthCheck *TCPHealthCheck `json:"tcpHealthCheck,omitempty"` // TimeoutSec: How long (in seconds) to wait before claiming failure. The @@ -12682,9 +12883,9 @@ type HealthCheck struct { NullFields []string `json:"-"` } -func (s *HealthCheck) MarshalJSON() ([]byte, error) { +func (s HealthCheck) MarshalJSON() ([]byte, error) { type NoMethod HealthCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HealthCheckList: Contains a list of HealthCheck resources. @@ -12721,9 +12922,9 @@ type HealthCheckList struct { NullFields []string `json:"-"` } -func (s *HealthCheckList) MarshalJSON() ([]byte, error) { +func (s HealthCheckList) MarshalJSON() ([]byte, error) { type NoMethod HealthCheckList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HealthCheckListWarning: [Output Only] Informational warning message. @@ -12806,9 +13007,9 @@ type HealthCheckListWarning struct { NullFields []string `json:"-"` } -func (s *HealthCheckListWarning) MarshalJSON() ([]byte, error) { +func (s HealthCheckListWarning) MarshalJSON() ([]byte, error) { type NoMethod HealthCheckListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HealthCheckListWarningData struct { @@ -12835,9 +13036,9 @@ type HealthCheckListWarningData struct { NullFields []string `json:"-"` } -func (s *HealthCheckListWarningData) MarshalJSON() ([]byte, error) { +func (s HealthCheckListWarningData) MarshalJSON() ([]byte, error) { type NoMethod HealthCheckListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HealthCheckLogConfig: Configuration of logging on a health check. If logging @@ -12859,9 +13060,9 @@ type HealthCheckLogConfig struct { NullFields []string `json:"-"` } -func (s *HealthCheckLogConfig) MarshalJSON() ([]byte, error) { +func (s HealthCheckLogConfig) MarshalJSON() ([]byte, error) { type NoMethod HealthCheckLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HealthCheckReference: A full or valid partial URL to a health check. For @@ -12884,9 +13085,9 @@ type HealthCheckReference struct { NullFields []string `json:"-"` } -func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { +func (s HealthCheckReference) MarshalJSON() ([]byte, error) { type NoMethod HealthCheckReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HealthCheckService: Represents a Health-Check as a Service resource. @@ -12981,9 +13182,9 @@ type HealthCheckService struct { NullFields []string `json:"-"` } -func (s *HealthCheckService) MarshalJSON() ([]byte, error) { +func (s HealthCheckService) MarshalJSON() ([]byte, error) { type NoMethod HealthCheckService - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HealthCheckServiceReference: A full or valid partial URL to a health check @@ -13007,9 +13208,9 @@ type HealthCheckServiceReference struct { NullFields []string `json:"-"` } -func (s *HealthCheckServiceReference) MarshalJSON() ([]byte, error) { +func (s HealthCheckServiceReference) MarshalJSON() ([]byte, error) { type NoMethod HealthCheckServiceReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HealthCheckServicesList struct { @@ -13046,9 +13247,9 @@ type HealthCheckServicesList struct { NullFields []string `json:"-"` } -func (s *HealthCheckServicesList) MarshalJSON() ([]byte, error) { +func (s HealthCheckServicesList) MarshalJSON() ([]byte, error) { type NoMethod HealthCheckServicesList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HealthCheckServicesListWarning: [Output Only] Informational warning message. @@ -13131,9 +13332,9 @@ type HealthCheckServicesListWarning struct { NullFields []string `json:"-"` } -func (s *HealthCheckServicesListWarning) MarshalJSON() ([]byte, error) { +func (s HealthCheckServicesListWarning) MarshalJSON() ([]byte, error) { type NoMethod HealthCheckServicesListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HealthCheckServicesListWarningData struct { @@ -13160,9 +13361,9 @@ type HealthCheckServicesListWarningData struct { NullFields []string `json:"-"` } -func (s *HealthCheckServicesListWarningData) MarshalJSON() ([]byte, error) { +func (s HealthCheckServicesListWarningData) MarshalJSON() ([]byte, error) { type NoMethod HealthCheckServicesListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HealthChecksAggregatedList struct { @@ -13200,9 +13401,9 @@ type HealthChecksAggregatedList struct { NullFields []string `json:"-"` } -func (s *HealthChecksAggregatedList) MarshalJSON() ([]byte, error) { +func (s HealthChecksAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod HealthChecksAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HealthChecksAggregatedListWarning: [Output Only] Informational warning @@ -13286,9 +13487,9 @@ type HealthChecksAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *HealthChecksAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s HealthChecksAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod HealthChecksAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HealthChecksAggregatedListWarningData struct { @@ -13315,9 +13516,9 @@ type HealthChecksAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *HealthChecksAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s HealthChecksAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod HealthChecksAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HealthChecksScopedList struct { @@ -13339,9 +13540,9 @@ type HealthChecksScopedList struct { NullFields []string `json:"-"` } -func (s *HealthChecksScopedList) MarshalJSON() ([]byte, error) { +func (s HealthChecksScopedList) MarshalJSON() ([]byte, error) { type NoMethod HealthChecksScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HealthChecksScopedListWarning: Informational warning which replaces the list @@ -13425,9 +13626,9 @@ type HealthChecksScopedListWarning struct { NullFields []string `json:"-"` } -func (s *HealthChecksScopedListWarning) MarshalJSON() ([]byte, error) { +func (s HealthChecksScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod HealthChecksScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HealthChecksScopedListWarningData struct { @@ -13454,9 +13655,9 @@ type HealthChecksScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *HealthChecksScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s HealthChecksScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod HealthChecksScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HealthStatus struct { @@ -13478,7 +13679,14 @@ type HealthStatus struct { // IpAddress: For target pool based Network Load Balancing, it indicates the // forwarding rule's IP address assigned to this instance. For other types of // load balancing, the field indicates VM internal ip. - IpAddress string `json:"ipAddress,omitempty"` + IpAddress string `json:"ipAddress,omitempty"` + Ipv6Address string `json:"ipv6Address,omitempty"` + // Ipv6HealthState: Health state of the IPv6 address of the instance. + // + // Possible values: + // "HEALTHY" + // "UNHEALTHY" + Ipv6HealthState string `json:"ipv6HealthState,omitempty"` // Port: The named port of the instance group, not necessarily the port that is // health-checked. Port int64 `json:"port,omitempty"` @@ -13514,9 +13722,9 @@ type HealthStatus struct { NullFields []string `json:"-"` } -func (s *HealthStatus) MarshalJSON() ([]byte, error) { +func (s HealthStatus) MarshalJSON() ([]byte, error) { type NoMethod HealthStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HealthStatusForNetworkEndpoint struct { @@ -13554,9 +13762,9 @@ type HealthStatusForNetworkEndpoint struct { NullFields []string `json:"-"` } -func (s *HealthStatusForNetworkEndpoint) MarshalJSON() ([]byte, error) { +func (s HealthStatusForNetworkEndpoint) MarshalJSON() ([]byte, error) { type NoMethod HealthStatusForNetworkEndpoint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Help: Provides links to documentation or for performing an out of band @@ -13581,9 +13789,9 @@ type Help struct { NullFields []string `json:"-"` } -func (s *Help) MarshalJSON() ([]byte, error) { +func (s Help) MarshalJSON() ([]byte, error) { type NoMethod Help - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HelpLink: Describes a URL link. @@ -13605,9 +13813,9 @@ type HelpLink struct { NullFields []string `json:"-"` } -func (s *HelpLink) MarshalJSON() ([]byte, error) { +func (s HelpLink) MarshalJSON() ([]byte, error) { type NoMethod HelpLink - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HostRule: UrlMaps A host-matching rule for a URL. If matched, will use the @@ -13639,9 +13847,9 @@ type HostRule struct { NullFields []string `json:"-"` } -func (s *HostRule) MarshalJSON() ([]byte, error) { +func (s HostRule) MarshalJSON() ([]byte, error) { type NoMethod HostRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpFaultAbort: Specification for how requests are aborted as part of fault @@ -13670,9 +13878,9 @@ type HttpFaultAbort struct { NullFields []string `json:"-"` } -func (s *HttpFaultAbort) MarshalJSON() ([]byte, error) { +func (s HttpFaultAbort) MarshalJSON() ([]byte, error) { type NoMethod HttpFaultAbort - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *HttpFaultAbort) UnmarshalJSON(data []byte) error { @@ -13711,9 +13919,9 @@ type HttpFaultDelay struct { NullFields []string `json:"-"` } -func (s *HttpFaultDelay) MarshalJSON() ([]byte, error) { +func (s HttpFaultDelay) MarshalJSON() ([]byte, error) { type NoMethod HttpFaultDelay - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *HttpFaultDelay) UnmarshalJSON(data []byte) error { @@ -13756,9 +13964,9 @@ type HttpFaultInjection struct { NullFields []string `json:"-"` } -func (s *HttpFaultInjection) MarshalJSON() ([]byte, error) { +func (s HttpFaultInjection) MarshalJSON() ([]byte, error) { type NoMethod HttpFaultInjection - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpHeaderAction: The request and response header transformations that take @@ -13790,9 +13998,9 @@ type HttpHeaderAction struct { NullFields []string `json:"-"` } -func (s *HttpHeaderAction) MarshalJSON() ([]byte, error) { +func (s HttpHeaderAction) MarshalJSON() ([]byte, error) { type NoMethod HttpHeaderAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpHeaderMatch: matchRule criteria for request header matches. @@ -13861,9 +14069,9 @@ type HttpHeaderMatch struct { NullFields []string `json:"-"` } -func (s *HttpHeaderMatch) MarshalJSON() ([]byte, error) { +func (s HttpHeaderMatch) MarshalJSON() ([]byte, error) { type NoMethod HttpHeaderMatch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpHeaderOption: Specification determining how headers are added to @@ -13890,9 +14098,9 @@ type HttpHeaderOption struct { NullFields []string `json:"-"` } -func (s *HttpHeaderOption) MarshalJSON() ([]byte, error) { +func (s HttpHeaderOption) MarshalJSON() ([]byte, error) { type NoMethod HttpHeaderOption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpHealthCheck: Represents a legacy HTTP Health Check resource. Legacy HTTP @@ -13963,9 +14171,9 @@ type HttpHealthCheck struct { NullFields []string `json:"-"` } -func (s *HttpHealthCheck) MarshalJSON() ([]byte, error) { +func (s HttpHealthCheck) MarshalJSON() ([]byte, error) { type NoMethod HttpHealthCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpHealthCheckList: Contains a list of HttpHealthCheck resources. @@ -14002,9 +14210,9 @@ type HttpHealthCheckList struct { NullFields []string `json:"-"` } -func (s *HttpHealthCheckList) MarshalJSON() ([]byte, error) { +func (s HttpHealthCheckList) MarshalJSON() ([]byte, error) { type NoMethod HttpHealthCheckList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpHealthCheckListWarning: [Output Only] Informational warning message. @@ -14087,9 +14295,9 @@ type HttpHealthCheckListWarning struct { NullFields []string `json:"-"` } -func (s *HttpHealthCheckListWarning) MarshalJSON() ([]byte, error) { +func (s HttpHealthCheckListWarning) MarshalJSON() ([]byte, error) { type NoMethod HttpHealthCheckListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HttpHealthCheckListWarningData struct { @@ -14116,9 +14324,9 @@ type HttpHealthCheckListWarningData struct { NullFields []string `json:"-"` } -func (s *HttpHealthCheckListWarningData) MarshalJSON() ([]byte, error) { +func (s HttpHealthCheckListWarningData) MarshalJSON() ([]byte, error) { type NoMethod HttpHealthCheckListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpQueryParameterMatch: HttpRouteRuleMatch criteria for a request's query @@ -14155,9 +14363,9 @@ type HttpQueryParameterMatch struct { NullFields []string `json:"-"` } -func (s *HttpQueryParameterMatch) MarshalJSON() ([]byte, error) { +func (s HttpQueryParameterMatch) MarshalJSON() ([]byte, error) { type NoMethod HttpQueryParameterMatch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpRedirectAction: Specifies settings for an HTTP redirect. @@ -14218,9 +14426,9 @@ type HttpRedirectAction struct { NullFields []string `json:"-"` } -func (s *HttpRedirectAction) MarshalJSON() ([]byte, error) { +func (s HttpRedirectAction) MarshalJSON() ([]byte, error) { type NoMethod HttpRedirectAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpRetryPolicy: The retry policy associates with HttpRouteRule @@ -14272,9 +14480,9 @@ type HttpRetryPolicy struct { NullFields []string `json:"-"` } -func (s *HttpRetryPolicy) MarshalJSON() ([]byte, error) { +func (s HttpRetryPolicy) MarshalJSON() ([]byte, error) { type NoMethod HttpRetryPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HttpRouteAction struct { @@ -14352,15 +14560,41 @@ type HttpRouteAction struct { NullFields []string `json:"-"` } -func (s *HttpRouteAction) MarshalJSON() ([]byte, error) { +func (s HttpRouteAction) MarshalJSON() ([]byte, error) { type NoMethod HttpRouteAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpRouteRule: The HttpRouteRule setting specifies how to match an HTTP // request and the corresponding routing action that load balancing proxies // perform. type HttpRouteRule struct { + // CustomErrorResponsePolicy: customErrorResponsePolicy specifies how the Load + // Balancer returns error responses when BackendServiceor BackendBucket + // responds with an error. If a policy for an error code is not configured for + // the RouteRule, a policy for the error code configured in + // pathMatcher.defaultCustomErrorResponsePolicy is applied. If one is not + // specified in pathMatcher.defaultCustomErrorResponsePolicy, the policy + // configured in UrlMap.defaultCustomErrorResponsePolicy takes effect. For + // example, consider a UrlMap with the following configuration: - + // UrlMap.defaultCustomErrorResponsePolicy are configured with policies for 5xx + // and 4xx errors - A RouteRule for /coming_soon/ is configured for the error + // code 404. If the request is for www.myotherdomain.com and a 404 is + // encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes + // effect. If a 404 response is encountered for the request + // www.example.com/current_events/, the pathMatcher's policy takes effect. If + // however, the request for www.example.com/coming_soon/ encounters a 404, the + // policy in RouteRule.customErrorResponsePolicy takes effect. If any of the + // requests in this example encounter a 500 error code, the policy at + // UrlMap.defaultCustomErrorResponsePolicy takes effect. When used in + // conjunction with routeRules.routeAction.retryPolicy, retries take + // precedence. Only once all retries are exhausted, the + // customErrorResponsePolicy is applied. While attempting a retry, if load + // balancer is successful in reaching the service, the + // customErrorResponsePolicy is ignored and the response from the service is + // returned to the client. customErrorResponsePolicy is supported only for + // global external Application Load Balancers. + CustomErrorResponsePolicy *CustomErrorResponsePolicy `json:"customErrorResponsePolicy,omitempty"` // Description: The short description conveying the intent of this routeRule. // The description can have a maximum length of 1024 characters. Description string `json:"description,omitempty"` @@ -14415,22 +14649,22 @@ type HttpRouteRule struct { // routeAction must not be set. Not supported when the URL map is bound to a // target gRPC proxy. UrlRedirect *HttpRedirectAction `json:"urlRedirect,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with empty or + // ForceSendFields is a list of field names (e.g. "CustomErrorResponsePolicy") + // to unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "CustomErrorResponsePolicy") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *HttpRouteRule) MarshalJSON() ([]byte, error) { +func (s HttpRouteRule) MarshalJSON() ([]byte, error) { type NoMethod HttpRouteRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpRouteRuleMatch: HttpRouteRuleMatch specifies a set of criteria for @@ -14504,9 +14738,9 @@ type HttpRouteRuleMatch struct { NullFields []string `json:"-"` } -func (s *HttpRouteRuleMatch) MarshalJSON() ([]byte, error) { +func (s HttpRouteRuleMatch) MarshalJSON() ([]byte, error) { type NoMethod HttpRouteRuleMatch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpsHealthCheck: Represents a legacy HTTPS Health Check resource. Legacy @@ -14576,9 +14810,9 @@ type HttpsHealthCheck struct { NullFields []string `json:"-"` } -func (s *HttpsHealthCheck) MarshalJSON() ([]byte, error) { +func (s HttpsHealthCheck) MarshalJSON() ([]byte, error) { type NoMethod HttpsHealthCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpsHealthCheckList: Contains a list of HttpsHealthCheck resources. @@ -14615,9 +14849,9 @@ type HttpsHealthCheckList struct { NullFields []string `json:"-"` } -func (s *HttpsHealthCheckList) MarshalJSON() ([]byte, error) { +func (s HttpsHealthCheckList) MarshalJSON() ([]byte, error) { type NoMethod HttpsHealthCheckList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpsHealthCheckListWarning: [Output Only] Informational warning message. @@ -14700,9 +14934,9 @@ type HttpsHealthCheckListWarning struct { NullFields []string `json:"-"` } -func (s *HttpsHealthCheckListWarning) MarshalJSON() ([]byte, error) { +func (s HttpsHealthCheckListWarning) MarshalJSON() ([]byte, error) { type NoMethod HttpsHealthCheckListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type HttpsHealthCheckListWarningData struct { @@ -14729,9 +14963,9 @@ type HttpsHealthCheckListWarningData struct { NullFields []string `json:"-"` } -func (s *HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { +func (s HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { type NoMethod HttpsHealthCheckListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Image: Represents an Image resource. You can use images to create boot disks @@ -14906,9 +15140,9 @@ type Image struct { NullFields []string `json:"-"` } -func (s *Image) MarshalJSON() ([]byte, error) { +func (s Image) MarshalJSON() ([]byte, error) { type NoMethod Image - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImageRawDisk: The parameters of the raw disk image. @@ -14945,9 +15179,9 @@ type ImageRawDisk struct { NullFields []string `json:"-"` } -func (s *ImageRawDisk) MarshalJSON() ([]byte, error) { +func (s ImageRawDisk) MarshalJSON() ([]byte, error) { type NoMethod ImageRawDisk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ImageFamilyView struct { @@ -14970,9 +15204,9 @@ type ImageFamilyView struct { NullFields []string `json:"-"` } -func (s *ImageFamilyView) MarshalJSON() ([]byte, error) { +func (s ImageFamilyView) MarshalJSON() ([]byte, error) { type NoMethod ImageFamilyView - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImageList: Contains a list of images. @@ -15009,9 +15243,9 @@ type ImageList struct { NullFields []string `json:"-"` } -func (s *ImageList) MarshalJSON() ([]byte, error) { +func (s ImageList) MarshalJSON() ([]byte, error) { type NoMethod ImageList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImageListWarning: [Output Only] Informational warning message. @@ -15094,9 +15328,9 @@ type ImageListWarning struct { NullFields []string `json:"-"` } -func (s *ImageListWarning) MarshalJSON() ([]byte, error) { +func (s ImageListWarning) MarshalJSON() ([]byte, error) { type NoMethod ImageListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ImageListWarningData struct { @@ -15123,9 +15357,9 @@ type ImageListWarningData struct { NullFields []string `json:"-"` } -func (s *ImageListWarningData) MarshalJSON() ([]byte, error) { +func (s ImageListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ImageListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InitialStateConfig: Initial State for shielded instance, these are public @@ -15152,9 +15386,9 @@ type InitialStateConfig struct { NullFields []string `json:"-"` } -func (s *InitialStateConfig) MarshalJSON() ([]byte, error) { +func (s InitialStateConfig) MarshalJSON() ([]byte, error) { type NoMethod InitialStateConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Instance: Represents an Instance resource. An instance is a virtual machine @@ -15378,9 +15612,9 @@ type Instance struct { NullFields []string `json:"-"` } -func (s *Instance) MarshalJSON() ([]byte, error) { +func (s Instance) MarshalJSON() ([]byte, error) { type NoMethod Instance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceAggregatedList struct { @@ -15419,9 +15653,9 @@ type InstanceAggregatedList struct { NullFields []string `json:"-"` } -func (s *InstanceAggregatedList) MarshalJSON() ([]byte, error) { +func (s InstanceAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod InstanceAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceAggregatedListWarning: [Output Only] Informational warning message. @@ -15504,9 +15738,9 @@ type InstanceAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *InstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s InstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceAggregatedListWarningData struct { @@ -15533,9 +15767,9 @@ type InstanceAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceConsumptionData struct { @@ -15556,9 +15790,9 @@ type InstanceConsumptionData struct { NullFields []string `json:"-"` } -func (s *InstanceConsumptionData) MarshalJSON() ([]byte, error) { +func (s InstanceConsumptionData) MarshalJSON() ([]byte, error) { type NoMethod InstanceConsumptionData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceConsumptionInfo struct { @@ -15586,9 +15820,9 @@ type InstanceConsumptionInfo struct { NullFields []string `json:"-"` } -func (s *InstanceConsumptionInfo) MarshalJSON() ([]byte, error) { +func (s InstanceConsumptionInfo) MarshalJSON() ([]byte, error) { type NoMethod InstanceConsumptionInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroup: Represents an Instance Group resource. Instance Groups can be @@ -15663,9 +15897,9 @@ type InstanceGroup struct { NullFields []string `json:"-"` } -func (s *InstanceGroup) MarshalJSON() ([]byte, error) { +func (s InstanceGroup) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroup - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupAggregatedList struct { @@ -15704,9 +15938,9 @@ type InstanceGroupAggregatedList struct { NullFields []string `json:"-"` } -func (s *InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { +func (s InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupAggregatedListWarning: [Output Only] Informational warning @@ -15790,9 +16024,9 @@ type InstanceGroupAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *InstanceGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s InstanceGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupAggregatedListWarningData struct { @@ -15819,9 +16053,9 @@ type InstanceGroupAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupList: A list of InstanceGroup resources. @@ -15859,9 +16093,9 @@ type InstanceGroupList struct { NullFields []string `json:"-"` } -func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { +func (s InstanceGroupList) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupListWarning: [Output Only] Informational warning message. @@ -15944,9 +16178,9 @@ type InstanceGroupListWarning struct { NullFields []string `json:"-"` } -func (s *InstanceGroupListWarning) MarshalJSON() ([]byte, error) { +func (s InstanceGroupListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupListWarningData struct { @@ -15973,9 +16207,9 @@ type InstanceGroupListWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceGroupListWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceGroupListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManager: Represents a Managed Instance Group resource. An @@ -15990,10 +16224,16 @@ type InstanceGroupManager struct { // AutoHealingPolicies: The autohealing policy for this managed instance group. // You can specify only one value. AutoHealingPolicies []*InstanceGroupManagerAutoHealingPolicy `json:"autoHealingPolicies,omitempty"` - // BaseInstanceName: The base instance name to use for instances in this group. - // The value must be 1-58 characters long. Instances are named by appending a - // hyphen and a random four-character string to the base instance name. The - // base instance name must comply with RFC1035. + // BaseInstanceName: The base instance name is a prefix that you want to attach + // to the names of all VMs in a MIG. The maximum character length is 58 and the + // name must comply with RFC1035 format. When a VM is created in the group, the + // MIG appends a hyphen and a random four-character string to the base instance + // name. If you want the MIG to assign sequential numbers instead of a random + // string, then end the base instance name with a hyphen followed by one or + // more hash symbols. The hash symbols indicate the number of digits. For + // example, a base instance name of "vm-###" results in "vm-001" as a VM name. + // @pattern a-z + // (([-a-z0-9]{0,57})|([-a-z0-9]{0,51}-#{1,10}(\\[[0-9]{1,10}\\])?)) BaseInstanceName string `json:"baseInstanceName,omitempty"` // CreationTimestamp: [Output Only] The creation timestamp for this managed // instance group in RFC3339 text format. @@ -16098,9 +16338,9 @@ type InstanceGroupManager struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManager) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManager) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManager - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerActionsSummary struct { @@ -16169,9 +16409,9 @@ type InstanceGroupManagerActionsSummary struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerActionsSummary) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerActionsSummary) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerActionsSummary - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerAggregatedList struct { @@ -16211,9 +16451,9 @@ type InstanceGroupManagerAggregatedList struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagerAggregatedListWarning: [Output Only] Informational @@ -16297,9 +16537,9 @@ type InstanceGroupManagerAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerAggregatedListWarningData struct { @@ -16326,9 +16566,9 @@ type InstanceGroupManagerAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerAllInstancesConfig struct { @@ -16352,9 +16592,9 @@ type InstanceGroupManagerAllInstancesConfig struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerAllInstancesConfig) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerAllInstancesConfig) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerAllInstancesConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerAutoHealingPolicy struct { @@ -16382,9 +16622,9 @@ type InstanceGroupManagerAutoHealingPolicy struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerAutoHealingPolicy) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerAutoHealingPolicy) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerAutoHealingPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerInstanceLifecyclePolicy struct { @@ -16424,9 +16664,9 @@ type InstanceGroupManagerInstanceLifecyclePolicy struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerInstanceLifecyclePolicy) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerInstanceLifecyclePolicy) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerInstanceLifecyclePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagerList: [Output Only] A list of managed instance groups. @@ -16464,9 +16704,9 @@ type InstanceGroupManagerList struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerList) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerList) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagerListWarning: [Output Only] Informational warning @@ -16550,9 +16790,9 @@ type InstanceGroupManagerListWarning struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerListWarningData struct { @@ -16579,9 +16819,9 @@ type InstanceGroupManagerListWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagerResizeRequest: InstanceGroupManagerResizeRequest @@ -16609,7 +16849,8 @@ type InstanceGroupManagerResizeRequest struct { // deleted. RequestedRunDuration *Duration `json:"requestedRunDuration,omitempty"` // ResizeBy: The number of instances to be created by this resize request. The - // group's target size will be increased by this number. + // group's target size will be increased by this number. This field cannot be + // used together with 'instances'. ResizeBy int64 `json:"resizeBy,omitempty"` // SelfLink: [Output Only] The URL for this resize request. The server defines // this URL. @@ -16651,9 +16892,9 @@ type InstanceGroupManagerResizeRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerResizeRequestStatus struct { @@ -16685,9 +16926,9 @@ type InstanceGroupManagerResizeRequestStatus struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequestStatus) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequestStatus) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequestStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagerResizeRequestStatusError: [Output only] Fatal errors @@ -16712,9 +16953,9 @@ type InstanceGroupManagerResizeRequestStatusError struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequestStatusError) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequestStatusError) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequestStatusError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerResizeRequestStatusErrorErrors struct { @@ -16743,9 +16984,9 @@ type InstanceGroupManagerResizeRequestStatusErrorErrors struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequestStatusErrorErrors) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequestStatusErrorErrors) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequestStatusErrorErrors - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerResizeRequestStatusErrorErrorsErrorDetails struct { @@ -16766,9 +17007,9 @@ type InstanceGroupManagerResizeRequestStatusErrorErrorsErrorDetails struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequestStatusErrorErrorsErrorDetails) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequestStatusErrorErrorsErrorDetails) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequestStatusErrorErrorsErrorDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerResizeRequestStatusLastAttempt struct { @@ -16787,9 +17028,9 @@ type InstanceGroupManagerResizeRequestStatusLastAttempt struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequestStatusLastAttempt) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequestStatusLastAttempt) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequestStatusLastAttempt - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagerResizeRequestStatusLastAttemptError: Errors that @@ -16811,9 +17052,9 @@ type InstanceGroupManagerResizeRequestStatusLastAttemptError struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequestStatusLastAttemptError) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequestStatusLastAttemptError) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequestStatusLastAttemptError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerResizeRequestStatusLastAttemptErrorErrors struct { @@ -16842,9 +17083,9 @@ type InstanceGroupManagerResizeRequestStatusLastAttemptErrorErrors struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequestStatusLastAttemptErrorErrors) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequestStatusLastAttemptErrorErrors) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequestStatusLastAttemptErrorErrors - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerResizeRequestStatusLastAttemptErrorErrorsErrorDetails struct { @@ -16865,9 +17106,9 @@ type InstanceGroupManagerResizeRequestStatusLastAttemptErrorErrorsErrorDetails s NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequestStatusLastAttemptErrorErrorsErrorDetails) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequestStatusLastAttemptErrorErrorsErrorDetails) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequestStatusLastAttemptErrorErrorsErrorDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagerResizeRequestsListResponse: [Output Only] A list of @@ -16906,9 +17147,9 @@ type InstanceGroupManagerResizeRequestsListResponse struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequestsListResponse) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequestsListResponse) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequestsListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagerResizeRequestsListResponseWarning: [Output Only] @@ -16992,9 +17233,9 @@ type InstanceGroupManagerResizeRequestsListResponseWarning struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequestsListResponseWarning) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequestsListResponseWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequestsListResponseWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerResizeRequestsListResponseWarningData struct { @@ -17021,9 +17262,9 @@ type InstanceGroupManagerResizeRequestsListResponseWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerResizeRequestsListResponseWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerResizeRequestsListResponseWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerResizeRequestsListResponseWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerStatus struct { @@ -17059,9 +17300,9 @@ type InstanceGroupManagerStatus struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerStatus) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerStatus) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerStatusAllInstancesConfig struct { @@ -17084,9 +17325,9 @@ type InstanceGroupManagerStatusAllInstancesConfig struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerStatusAllInstancesConfig) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerStatusAllInstancesConfig) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerStatusAllInstancesConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerStatusStateful struct { @@ -17113,9 +17354,9 @@ type InstanceGroupManagerStatusStateful struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerStatusStateful) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerStatusStateful) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerStatusStateful - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerStatusStatefulPerInstanceConfigs struct { @@ -17136,9 +17377,9 @@ type InstanceGroupManagerStatusStatefulPerInstanceConfigs struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerStatusStatefulPerInstanceConfigs) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerStatusStatefulPerInstanceConfigs) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerStatusStatefulPerInstanceConfigs - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerStatusVersionTarget struct { @@ -17160,9 +17401,9 @@ type InstanceGroupManagerStatusVersionTarget struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerStatusVersionTarget) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerStatusVersionTarget) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerStatusVersionTarget - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerUpdatePolicy struct { @@ -17264,9 +17505,9 @@ type InstanceGroupManagerUpdatePolicy struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerUpdatePolicy) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerUpdatePolicy) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerUpdatePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagerVersion struct { @@ -17304,9 +17545,9 @@ type InstanceGroupManagerVersion struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerVersion) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagerVersion) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagerVersion - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagersAbandonInstancesRequest struct { @@ -17326,9 +17567,9 @@ type InstanceGroupManagersAbandonInstancesRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersAbandonInstancesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagersApplyUpdatesRequest: @@ -17385,9 +17626,9 @@ type InstanceGroupManagersApplyUpdatesRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersApplyUpdatesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagersCreateInstancesRequest: @@ -17408,9 +17649,9 @@ type InstanceGroupManagersCreateInstancesRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersCreateInstancesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagersDeleteInstancesRequest struct { @@ -17440,9 +17681,9 @@ type InstanceGroupManagersDeleteInstancesRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersDeleteInstancesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagersDeletePerInstanceConfigsReq: @@ -17464,9 +17705,9 @@ type InstanceGroupManagersDeletePerInstanceConfigsReq struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersDeletePerInstanceConfigsReq) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersDeletePerInstanceConfigsReq) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersDeletePerInstanceConfigsReq - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagersListErrorsResponse struct { @@ -17494,9 +17735,9 @@ type InstanceGroupManagersListErrorsResponse struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersListErrorsResponse) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersListErrorsResponse) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersListErrorsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagersListManagedInstancesResponse struct { @@ -17525,9 +17766,9 @@ type InstanceGroupManagersListManagedInstancesResponse struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersListManagedInstancesResponse) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersListManagedInstancesResponse) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersListManagedInstancesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagersListPerInstanceConfigsResp struct { @@ -17557,9 +17798,9 @@ type InstanceGroupManagersListPerInstanceConfigsResp struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersListPerInstanceConfigsResp) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersListPerInstanceConfigsResp) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersListPerInstanceConfigsResp - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagersListPerInstanceConfigsRespWarning: [Output Only] @@ -17643,9 +17884,9 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersListPerInstanceConfigsRespWarning) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersListPerInstanceConfigsRespWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersListPerInstanceConfigsRespWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagersListPerInstanceConfigsRespWarningData struct { @@ -17672,9 +17913,9 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersListPerInstanceConfigsRespWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersListPerInstanceConfigsRespWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersListPerInstanceConfigsRespWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagersPatchPerInstanceConfigsReq: @@ -17696,9 +17937,9 @@ type InstanceGroupManagersPatchPerInstanceConfigsReq struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersPatchPerInstanceConfigsReq) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersPatchPerInstanceConfigsReq) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersPatchPerInstanceConfigsReq - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagersRecreateInstancesRequest struct { @@ -17718,9 +17959,9 @@ type InstanceGroupManagersRecreateInstancesRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersRecreateInstancesRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersRecreateInstancesRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersRecreateInstancesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagersScopedList struct { @@ -17743,9 +17984,9 @@ type InstanceGroupManagersScopedList struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersScopedList) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersScopedList) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagersScopedListWarning: [Output Only] The warning that @@ -17829,9 +18070,9 @@ type InstanceGroupManagersScopedListWarning struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersScopedListWarning) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagersScopedListWarningData struct { @@ -17858,9 +18099,9 @@ type InstanceGroupManagersScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagersSetInstanceTemplateRequest struct { @@ -17883,9 +18124,9 @@ type InstanceGroupManagersSetInstanceTemplateRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersSetInstanceTemplateRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersSetInstanceTemplateRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersSetInstanceTemplateRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupManagersSetTargetPoolsRequest struct { @@ -17914,9 +18155,9 @@ type InstanceGroupManagersSetTargetPoolsRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersSetTargetPoolsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupManagersUpdatePerInstanceConfigsReq: @@ -17938,9 +18179,9 @@ type InstanceGroupManagersUpdatePerInstanceConfigsReq struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersUpdatePerInstanceConfigsReq) MarshalJSON() ([]byte, error) { +func (s InstanceGroupManagersUpdatePerInstanceConfigsReq) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupManagersUpdatePerInstanceConfigsReq - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupsAddInstancesRequest struct { @@ -17959,9 +18200,9 @@ type InstanceGroupsAddInstancesRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupsAddInstancesRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupsAddInstancesRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupsAddInstancesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupsListInstances struct { @@ -17999,9 +18240,9 @@ type InstanceGroupsListInstances struct { NullFields []string `json:"-"` } -func (s *InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { +func (s InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupsListInstances - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupsListInstancesWarning: [Output Only] Informational warning @@ -18085,9 +18326,9 @@ type InstanceGroupsListInstancesWarning struct { NullFields []string `json:"-"` } -func (s *InstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { +func (s InstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupsListInstancesWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupsListInstancesWarningData struct { @@ -18114,9 +18355,9 @@ type InstanceGroupsListInstancesWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupsListInstancesWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupsListInstancesRequest struct { @@ -18143,9 +18384,9 @@ type InstanceGroupsListInstancesRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupsListInstancesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupsRemoveInstancesRequest struct { @@ -18164,9 +18405,9 @@ type InstanceGroupsRemoveInstancesRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupsRemoveInstancesRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupsRemoveInstancesRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupsRemoveInstancesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupsScopedList struct { @@ -18189,9 +18430,9 @@ type InstanceGroupsScopedList struct { NullFields []string `json:"-"` } -func (s *InstanceGroupsScopedList) MarshalJSON() ([]byte, error) { +func (s InstanceGroupsScopedList) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceGroupsScopedListWarning: [Output Only] An informational warning that @@ -18275,9 +18516,9 @@ type InstanceGroupsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *InstanceGroupsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s InstanceGroupsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupsScopedListWarningData struct { @@ -18304,9 +18545,9 @@ type InstanceGroupsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceGroupsSetNamedPortsRequest struct { @@ -18333,9 +18574,9 @@ type InstanceGroupsSetNamedPortsRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { +func (s InstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceGroupsSetNamedPortsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceList: Contains a list of instances. @@ -18373,9 +18614,9 @@ type InstanceList struct { NullFields []string `json:"-"` } -func (s *InstanceList) MarshalJSON() ([]byte, error) { +func (s InstanceList) MarshalJSON() ([]byte, error) { type NoMethod InstanceList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceListWarning: [Output Only] Informational warning message. @@ -18458,9 +18699,9 @@ type InstanceListWarning struct { NullFields []string `json:"-"` } -func (s *InstanceListWarning) MarshalJSON() ([]byte, error) { +func (s InstanceListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceListWarningData struct { @@ -18487,9 +18728,9 @@ type InstanceListWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceListWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceListReferrers: Contains a list of instance referrers. @@ -18527,9 +18768,9 @@ type InstanceListReferrers struct { NullFields []string `json:"-"` } -func (s *InstanceListReferrers) MarshalJSON() ([]byte, error) { +func (s InstanceListReferrers) MarshalJSON() ([]byte, error) { type NoMethod InstanceListReferrers - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceListReferrersWarning: [Output Only] Informational warning message. @@ -18612,9 +18853,9 @@ type InstanceListReferrersWarning struct { NullFields []string `json:"-"` } -func (s *InstanceListReferrersWarning) MarshalJSON() ([]byte, error) { +func (s InstanceListReferrersWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceListReferrersWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceListReferrersWarningData struct { @@ -18641,9 +18882,9 @@ type InstanceListReferrersWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceListReferrersWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceListReferrersWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceListReferrersWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceManagedByIgmError struct { @@ -18669,9 +18910,9 @@ type InstanceManagedByIgmError struct { NullFields []string `json:"-"` } -func (s *InstanceManagedByIgmError) MarshalJSON() ([]byte, error) { +func (s InstanceManagedByIgmError) MarshalJSON() ([]byte, error) { type NoMethod InstanceManagedByIgmError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceManagedByIgmErrorInstanceActionDetails struct { @@ -18730,9 +18971,9 @@ type InstanceManagedByIgmErrorInstanceActionDetails struct { NullFields []string `json:"-"` } -func (s *InstanceManagedByIgmErrorInstanceActionDetails) MarshalJSON() ([]byte, error) { +func (s InstanceManagedByIgmErrorInstanceActionDetails) MarshalJSON() ([]byte, error) { type NoMethod InstanceManagedByIgmErrorInstanceActionDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceManagedByIgmErrorManagedInstanceError struct { @@ -18753,9 +18994,9 @@ type InstanceManagedByIgmErrorManagedInstanceError struct { NullFields []string `json:"-"` } -func (s *InstanceManagedByIgmErrorManagedInstanceError) MarshalJSON() ([]byte, error) { +func (s InstanceManagedByIgmErrorManagedInstanceError) MarshalJSON() ([]byte, error) { type NoMethod InstanceManagedByIgmErrorManagedInstanceError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceMoveRequest struct { @@ -18785,9 +19026,9 @@ type InstanceMoveRequest struct { NullFields []string `json:"-"` } -func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { +func (s InstanceMoveRequest) MarshalJSON() ([]byte, error) { type NoMethod InstanceMoveRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceParams: Additional instance params. @@ -18810,9 +19051,9 @@ type InstanceParams struct { NullFields []string `json:"-"` } -func (s *InstanceParams) MarshalJSON() ([]byte, error) { +func (s InstanceParams) MarshalJSON() ([]byte, error) { type NoMethod InstanceParams - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceProperties struct { @@ -18851,10 +19092,11 @@ type InstanceProperties struct { // Labels: Labels to apply to instances that are created from these properties. Labels map[string]string `json:"labels,omitempty"` // MachineType: The machine type to use for instances that are created from - // these properties. This field only accept machine types name. e.g. - // n2-standard-4 and does not accept machine type full or partial url. e.g. - // projects/my-l7ilb-project/zones/us-central1-a/machineTypes/n2-standard-4 - // will throw INTERNAL_ERROR. + // these properties. This field only accepts a machine type name, for example + // `n2-standard-4`. If you use the machine type full or partial URL, for + // example + // `projects/my-l7ilb-project/zones/us-central1-a/machineTypes/n2-standard-4`, + // the request will result in an `INTERNAL_ERROR`. MachineType string `json:"machineType,omitempty"` // Metadata: The metadata key/value pairs to assign to instances that are // created from these properties. These pairs can consist of custom metadata or @@ -18929,9 +19171,9 @@ type InstanceProperties struct { NullFields []string `json:"-"` } -func (s *InstanceProperties) MarshalJSON() ([]byte, error) { +func (s InstanceProperties) MarshalJSON() ([]byte, error) { type NoMethod InstanceProperties - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancePropertiesPatch: Represents the change that you want to make to the @@ -18955,9 +19197,9 @@ type InstancePropertiesPatch struct { NullFields []string `json:"-"` } -func (s *InstancePropertiesPatch) MarshalJSON() ([]byte, error) { +func (s InstancePropertiesPatch) MarshalJSON() ([]byte, error) { type NoMethod InstancePropertiesPatch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceReference struct { @@ -18977,9 +19219,9 @@ type InstanceReference struct { NullFields []string `json:"-"` } -func (s *InstanceReference) MarshalJSON() ([]byte, error) { +func (s InstanceReference) MarshalJSON() ([]byte, error) { type NoMethod InstanceReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceSettings: Represents a Instance Settings resource. You can use @@ -19022,9 +19264,9 @@ type InstanceSettings struct { NullFields []string `json:"-"` } -func (s *InstanceSettings) MarshalJSON() ([]byte, error) { +func (s InstanceSettings) MarshalJSON() ([]byte, error) { type NoMethod InstanceSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceSettingsMetadata struct { @@ -19047,9 +19289,9 @@ type InstanceSettingsMetadata struct { NullFields []string `json:"-"` } -func (s *InstanceSettingsMetadata) MarshalJSON() ([]byte, error) { +func (s InstanceSettingsMetadata) MarshalJSON() ([]byte, error) { type NoMethod InstanceSettingsMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceTemplate: Represents an Instance Template resource. Google Compute @@ -19116,9 +19358,9 @@ type InstanceTemplate struct { NullFields []string `json:"-"` } -func (s *InstanceTemplate) MarshalJSON() ([]byte, error) { +func (s InstanceTemplate) MarshalJSON() ([]byte, error) { type NoMethod InstanceTemplate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceTemplateAggregatedList: Contains a list of @@ -19156,9 +19398,9 @@ type InstanceTemplateAggregatedList struct { NullFields []string `json:"-"` } -func (s *InstanceTemplateAggregatedList) MarshalJSON() ([]byte, error) { +func (s InstanceTemplateAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod InstanceTemplateAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceTemplateAggregatedListWarning: [Output Only] Informational warning @@ -19242,9 +19484,9 @@ type InstanceTemplateAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *InstanceTemplateAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s InstanceTemplateAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceTemplateAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceTemplateAggregatedListWarningData struct { @@ -19271,9 +19513,9 @@ type InstanceTemplateAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceTemplateAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceTemplateAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceTemplateAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceTemplateList: A list of instance templates. @@ -19311,9 +19553,9 @@ type InstanceTemplateList struct { NullFields []string `json:"-"` } -func (s *InstanceTemplateList) MarshalJSON() ([]byte, error) { +func (s InstanceTemplateList) MarshalJSON() ([]byte, error) { type NoMethod InstanceTemplateList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceTemplateListWarning: [Output Only] Informational warning message. @@ -19396,9 +19638,9 @@ type InstanceTemplateListWarning struct { NullFields []string `json:"-"` } -func (s *InstanceTemplateListWarning) MarshalJSON() ([]byte, error) { +func (s InstanceTemplateListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceTemplateListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceTemplateListWarningData struct { @@ -19425,9 +19667,9 @@ type InstanceTemplateListWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceTemplateListWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceTemplateListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceTemplateListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceTemplatesScopedList struct { @@ -19450,9 +19692,9 @@ type InstanceTemplatesScopedList struct { NullFields []string `json:"-"` } -func (s *InstanceTemplatesScopedList) MarshalJSON() ([]byte, error) { +func (s InstanceTemplatesScopedList) MarshalJSON() ([]byte, error) { type NoMethod InstanceTemplatesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceTemplatesScopedListWarning: [Output Only] An informational warning @@ -19536,9 +19778,9 @@ type InstanceTemplatesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *InstanceTemplatesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s InstanceTemplatesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstanceTemplatesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceTemplatesScopedListWarningData struct { @@ -19565,9 +19807,9 @@ type InstanceTemplatesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *InstanceTemplatesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s InstanceTemplatesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstanceTemplatesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstanceWithNamedPorts struct { @@ -19608,9 +19850,9 @@ type InstanceWithNamedPorts struct { NullFields []string `json:"-"` } -func (s *InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { +func (s InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { type NoMethod InstanceWithNamedPorts - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesAddResourcePoliciesRequest struct { @@ -19629,9 +19871,9 @@ type InstancesAddResourcePoliciesRequest struct { NullFields []string `json:"-"` } -func (s *InstancesAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { +func (s InstancesAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesAddResourcePoliciesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesBulkInsertOperationMetadata struct { @@ -19651,9 +19893,9 @@ type InstancesBulkInsertOperationMetadata struct { NullFields []string `json:"-"` } -func (s *InstancesBulkInsertOperationMetadata) MarshalJSON() ([]byte, error) { +func (s InstancesBulkInsertOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod InstancesBulkInsertOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesGetEffectiveFirewallsResponse struct { @@ -19677,9 +19919,9 @@ type InstancesGetEffectiveFirewallsResponse struct { NullFields []string `json:"-"` } -func (s *InstancesGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { +func (s InstancesGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { type NoMethod InstancesGetEffectiveFirewallsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { @@ -19688,6 +19930,9 @@ type InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { DisplayName string `json:"displayName,omitempty"` // Name: [Output Only] The name of the firewall policy. Name string `json:"name,omitempty"` + // Priority: [Output only] Priority of firewall policy association. Not + // applicable for type=HIERARCHY. + Priority int64 `json:"priority,omitempty"` // Rules: The rules that apply to the network. Rules []*FirewallPolicyRule `json:"rules,omitempty"` // ShortName: [Output Only] The short name of the firewall policy. @@ -19699,6 +19944,8 @@ type InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { // "HIERARCHY" // "NETWORK" // "NETWORK_REGIONAL" + // "SYSTEM_GLOBAL" + // "SYSTEM_REGIONAL" // "UNSPECIFIED" Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to @@ -19714,9 +19961,9 @@ type InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { NullFields []string `json:"-"` } -func (s *InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy) MarshalJSON() ([]byte, error) { +func (s InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy) MarshalJSON() ([]byte, error) { type NoMethod InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesRemoveResourcePoliciesRequest struct { @@ -19735,9 +19982,9 @@ type InstancesRemoveResourcePoliciesRequest struct { NullFields []string `json:"-"` } -func (s *InstancesRemoveResourcePoliciesRequest) MarshalJSON() ([]byte, error) { +func (s InstancesRemoveResourcePoliciesRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesRemoveResourcePoliciesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesScopedList struct { @@ -19759,9 +20006,9 @@ type InstancesScopedList struct { NullFields []string `json:"-"` } -func (s *InstancesScopedList) MarshalJSON() ([]byte, error) { +func (s InstancesScopedList) MarshalJSON() ([]byte, error) { type NoMethod InstancesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesScopedListWarning: [Output Only] Informational warning which @@ -19845,9 +20092,9 @@ type InstancesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *InstancesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s InstancesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstancesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesScopedListWarningData struct { @@ -19874,9 +20121,9 @@ type InstancesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *InstancesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s InstancesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstancesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesSetLabelsRequest struct { @@ -19898,9 +20145,9 @@ type InstancesSetLabelsRequest struct { NullFields []string `json:"-"` } -func (s *InstancesSetLabelsRequest) MarshalJSON() ([]byte, error) { +func (s InstancesSetLabelsRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesSetLabelsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesSetMachineResourcesRequest struct { @@ -19920,9 +20167,9 @@ type InstancesSetMachineResourcesRequest struct { NullFields []string `json:"-"` } -func (s *InstancesSetMachineResourcesRequest) MarshalJSON() ([]byte, error) { +func (s InstancesSetMachineResourcesRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesSetMachineResourcesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesSetMachineTypeRequest struct { @@ -19943,9 +20190,9 @@ type InstancesSetMachineTypeRequest struct { NullFields []string `json:"-"` } -func (s *InstancesSetMachineTypeRequest) MarshalJSON() ([]byte, error) { +func (s InstancesSetMachineTypeRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesSetMachineTypeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesSetMinCpuPlatformRequest struct { @@ -19964,9 +20211,9 @@ type InstancesSetMinCpuPlatformRequest struct { NullFields []string `json:"-"` } -func (s *InstancesSetMinCpuPlatformRequest) MarshalJSON() ([]byte, error) { +func (s InstancesSetMinCpuPlatformRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesSetMinCpuPlatformRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesSetNameRequest struct { @@ -19989,9 +20236,9 @@ type InstancesSetNameRequest struct { NullFields []string `json:"-"` } -func (s *InstancesSetNameRequest) MarshalJSON() ([]byte, error) { +func (s InstancesSetNameRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesSetNameRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesSetSecurityPolicyRequest struct { @@ -20016,9 +20263,9 @@ type InstancesSetSecurityPolicyRequest struct { NullFields []string `json:"-"` } -func (s *InstancesSetSecurityPolicyRequest) MarshalJSON() ([]byte, error) { +func (s InstancesSetSecurityPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesSetSecurityPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesSetServiceAccountRequest struct { @@ -20039,9 +20286,9 @@ type InstancesSetServiceAccountRequest struct { NullFields []string `json:"-"` } -func (s *InstancesSetServiceAccountRequest) MarshalJSON() ([]byte, error) { +func (s InstancesSetServiceAccountRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesSetServiceAccountRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstancesStartWithEncryptionKeyRequest struct { @@ -20063,9 +20310,9 @@ type InstancesStartWithEncryptionKeyRequest struct { NullFields []string `json:"-"` } -func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { +func (s InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesStartWithEncryptionKeyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstantSnapshot: Represents a InstantSnapshot resource. You can use instant @@ -20175,9 +20422,9 @@ type InstantSnapshot struct { NullFields []string `json:"-"` } -func (s *InstantSnapshot) MarshalJSON() ([]byte, error) { +func (s InstantSnapshot) MarshalJSON() ([]byte, error) { type NoMethod InstantSnapshot - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstantSnapshotAggregatedList struct { @@ -20217,9 +20464,9 @@ type InstantSnapshotAggregatedList struct { NullFields []string `json:"-"` } -func (s *InstantSnapshotAggregatedList) MarshalJSON() ([]byte, error) { +func (s InstantSnapshotAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod InstantSnapshotAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstantSnapshotAggregatedListWarning: [Output Only] Informational warning @@ -20303,9 +20550,9 @@ type InstantSnapshotAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *InstantSnapshotAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s InstantSnapshotAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstantSnapshotAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstantSnapshotAggregatedListWarningData struct { @@ -20332,9 +20579,9 @@ type InstantSnapshotAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *InstantSnapshotAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s InstantSnapshotAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstantSnapshotAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstantSnapshotList: Contains a list of InstantSnapshot resources. @@ -20371,9 +20618,9 @@ type InstantSnapshotList struct { NullFields []string `json:"-"` } -func (s *InstantSnapshotList) MarshalJSON() ([]byte, error) { +func (s InstantSnapshotList) MarshalJSON() ([]byte, error) { type NoMethod InstantSnapshotList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstantSnapshotListWarning: [Output Only] Informational warning message. @@ -20456,9 +20703,9 @@ type InstantSnapshotListWarning struct { NullFields []string `json:"-"` } -func (s *InstantSnapshotListWarning) MarshalJSON() ([]byte, error) { +func (s InstantSnapshotListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstantSnapshotListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstantSnapshotListWarningData struct { @@ -20485,9 +20732,9 @@ type InstantSnapshotListWarningData struct { NullFields []string `json:"-"` } -func (s *InstantSnapshotListWarningData) MarshalJSON() ([]byte, error) { +func (s InstantSnapshotListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstantSnapshotListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstantSnapshotResourceStatus struct { @@ -20506,9 +20753,9 @@ type InstantSnapshotResourceStatus struct { NullFields []string `json:"-"` } -func (s *InstantSnapshotResourceStatus) MarshalJSON() ([]byte, error) { +func (s InstantSnapshotResourceStatus) MarshalJSON() ([]byte, error) { type NoMethod InstantSnapshotResourceStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstantSnapshotsScopedList struct { @@ -20531,9 +20778,9 @@ type InstantSnapshotsScopedList struct { NullFields []string `json:"-"` } -func (s *InstantSnapshotsScopedList) MarshalJSON() ([]byte, error) { +func (s InstantSnapshotsScopedList) MarshalJSON() ([]byte, error) { type NoMethod InstantSnapshotsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstantSnapshotsScopedListWarning: [Output Only] Informational warning which @@ -20617,9 +20864,9 @@ type InstantSnapshotsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *InstantSnapshotsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s InstantSnapshotsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InstantSnapshotsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InstantSnapshotsScopedListWarningData struct { @@ -20646,9 +20893,9 @@ type InstantSnapshotsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *InstantSnapshotsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s InstantSnapshotsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InstantSnapshotsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Int64RangeMatch: HttpRouteRuleMatch criteria for field values that must stay @@ -20672,9 +20919,9 @@ type Int64RangeMatch struct { NullFields []string `json:"-"` } -func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { +func (s Int64RangeMatch) MarshalJSON() ([]byte, error) { type NoMethod Int64RangeMatch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Interconnect: Represents an Interconnect resource. An Interconnect resource @@ -20689,10 +20936,10 @@ type Interconnect struct { AdminEnabled bool `json:"adminEnabled,omitempty"` // AvailableFeatures: [Output only] List of features available for this // Interconnect connection, which can take one of the following values: - - // MACSEC If present then the Interconnect connection is provisioned on MACsec - // capable hardware ports. If not present then the Interconnect connection is - // provisioned on non-MACsec capable ports and MACsec isn't supported and - // enabling MACsec fails. + // IF_MACSEC If present then the Interconnect connection is provisioned on + // MACsec capable hardware ports. If not present then the Interconnect + // connection is provisioned on non-MACsec capable ports and MACsec isn't + // supported and enabling MACsec fails. // // Possible values: // "IF_MACSEC" - Media Access Control security (MACsec) @@ -20816,7 +21063,7 @@ type Interconnect struct { RemoteLocation string `json:"remoteLocation,omitempty"` // RequestedFeatures: Optional. List of features requested for this // Interconnect connection, which can take one of the following values: - - // MACSEC If specified then the connection is created on MACsec capable + // IF_MACSEC If specified then the connection is created on MACsec capable // hardware ports. If not specified, the default value is false, which // allocates non-MACsec capable ports first if available. This parameter can be // provided only with Interconnect INSERT. It isn't valid for Interconnect @@ -20862,9 +21109,9 @@ type Interconnect struct { NullFields []string `json:"-"` } -func (s *Interconnect) MarshalJSON() ([]byte, error) { +func (s Interconnect) MarshalJSON() ([]byte, error) { type NoMethod Interconnect - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectAttachment: Represents an Interconnect Attachment (VLAN) @@ -21161,9 +21408,9 @@ type InterconnectAttachment struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachment) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachment) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectAttachmentAggregatedList struct { @@ -21203,9 +21450,9 @@ type InterconnectAttachmentAggregatedList struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentAggregatedList) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectAttachmentAggregatedListWarning: [Output Only] Informational @@ -21289,9 +21536,9 @@ type InterconnectAttachmentAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectAttachmentAggregatedListWarningData struct { @@ -21318,9 +21565,9 @@ type InterconnectAttachmentAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectAttachmentConfigurationConstraints struct { @@ -21360,9 +21607,9 @@ type InterconnectAttachmentConfigurationConstraints struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentConfigurationConstraints) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentConfigurationConstraints) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentConfigurationConstraints - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange struct { @@ -21381,9 +21628,9 @@ type InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectAttachmentList: Response to the list request, and contains a @@ -21422,9 +21669,9 @@ type InterconnectAttachmentList struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentList) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentList) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectAttachmentListWarning: [Output Only] Informational warning @@ -21508,9 +21755,9 @@ type InterconnectAttachmentListWarning struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectAttachmentListWarningData struct { @@ -21537,9 +21784,9 @@ type InterconnectAttachmentListWarningData struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectAttachmentPartnerMetadata: Informational metadata about Partner @@ -21571,9 +21818,9 @@ type InterconnectAttachmentPartnerMetadata struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentPartnerMetadata) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentPartnerMetadata) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentPartnerMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectAttachmentPrivateInfo: Information for an interconnect @@ -21595,9 +21842,9 @@ type InterconnectAttachmentPrivateInfo struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentPrivateInfo) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentPrivateInfo) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentPrivateInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectAttachmentsScopedList struct { @@ -21620,9 +21867,9 @@ type InterconnectAttachmentsScopedList struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentsScopedList) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentsScopedList) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectAttachmentsScopedListWarning: Informational warning which @@ -21706,9 +21953,9 @@ type InterconnectAttachmentsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectAttachmentsScopedListWarningData struct { @@ -21735,9 +21982,9 @@ type InterconnectAttachmentsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s InterconnectAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InterconnectAttachmentsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectCircuitInfo: Describes a single physical circuit between the @@ -21765,9 +22012,9 @@ type InterconnectCircuitInfo struct { NullFields []string `json:"-"` } -func (s *InterconnectCircuitInfo) MarshalJSON() ([]byte, error) { +func (s InterconnectCircuitInfo) MarshalJSON() ([]byte, error) { type NoMethod InterconnectCircuitInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectDiagnostics: Diagnostics information about the Interconnect @@ -21815,9 +22062,9 @@ type InterconnectDiagnostics struct { NullFields []string `json:"-"` } -func (s *InterconnectDiagnostics) MarshalJSON() ([]byte, error) { +func (s InterconnectDiagnostics) MarshalJSON() ([]byte, error) { type NoMethod InterconnectDiagnostics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectDiagnosticsARPEntry: Describing the ARP neighbor entries seen on @@ -21840,9 +22087,9 @@ type InterconnectDiagnosticsARPEntry struct { NullFields []string `json:"-"` } -func (s *InterconnectDiagnosticsARPEntry) MarshalJSON() ([]byte, error) { +func (s InterconnectDiagnosticsARPEntry) MarshalJSON() ([]byte, error) { type NoMethod InterconnectDiagnosticsARPEntry - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectDiagnosticsLinkLACPStatus struct { @@ -21874,9 +22121,9 @@ type InterconnectDiagnosticsLinkLACPStatus struct { NullFields []string `json:"-"` } -func (s *InterconnectDiagnosticsLinkLACPStatus) MarshalJSON() ([]byte, error) { +func (s InterconnectDiagnosticsLinkLACPStatus) MarshalJSON() ([]byte, error) { type NoMethod InterconnectDiagnosticsLinkLACPStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectDiagnosticsLinkOpticalPower struct { @@ -21917,9 +22164,9 @@ type InterconnectDiagnosticsLinkOpticalPower struct { NullFields []string `json:"-"` } -func (s *InterconnectDiagnosticsLinkOpticalPower) MarshalJSON() ([]byte, error) { +func (s InterconnectDiagnosticsLinkOpticalPower) MarshalJSON() ([]byte, error) { type NoMethod InterconnectDiagnosticsLinkOpticalPower - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *InterconnectDiagnosticsLinkOpticalPower) UnmarshalJSON(data []byte) error { @@ -21976,9 +22223,9 @@ type InterconnectDiagnosticsLinkStatus struct { NullFields []string `json:"-"` } -func (s *InterconnectDiagnosticsLinkStatus) MarshalJSON() ([]byte, error) { +func (s InterconnectDiagnosticsLinkStatus) MarshalJSON() ([]byte, error) { type NoMethod InterconnectDiagnosticsLinkStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectDiagnosticsMacsecStatus: Describes the status of MACsec @@ -22002,9 +22249,9 @@ type InterconnectDiagnosticsMacsecStatus struct { NullFields []string `json:"-"` } -func (s *InterconnectDiagnosticsMacsecStatus) MarshalJSON() ([]byte, error) { +func (s InterconnectDiagnosticsMacsecStatus) MarshalJSON() ([]byte, error) { type NoMethod InterconnectDiagnosticsMacsecStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectList: Response to the list request, and contains a list of @@ -22043,9 +22290,9 @@ type InterconnectList struct { NullFields []string `json:"-"` } -func (s *InterconnectList) MarshalJSON() ([]byte, error) { +func (s InterconnectList) MarshalJSON() ([]byte, error) { type NoMethod InterconnectList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectListWarning: [Output Only] Informational warning message. @@ -22128,9 +22375,9 @@ type InterconnectListWarning struct { NullFields []string `json:"-"` } -func (s *InterconnectListWarning) MarshalJSON() ([]byte, error) { +func (s InterconnectListWarning) MarshalJSON() ([]byte, error) { type NoMethod InterconnectListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectListWarningData struct { @@ -22157,9 +22404,9 @@ type InterconnectListWarningData struct { NullFields []string `json:"-"` } -func (s *InterconnectListWarningData) MarshalJSON() ([]byte, error) { +func (s InterconnectListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InterconnectListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectLocation: Represents an Interconnect Attachment (VLAN) Location @@ -22266,9 +22513,9 @@ type InterconnectLocation struct { NullFields []string `json:"-"` } -func (s *InterconnectLocation) MarshalJSON() ([]byte, error) { +func (s InterconnectLocation) MarshalJSON() ([]byte, error) { type NoMethod InterconnectLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectLocationList: Response to the list request, and contains a list @@ -22307,9 +22554,9 @@ type InterconnectLocationList struct { NullFields []string `json:"-"` } -func (s *InterconnectLocationList) MarshalJSON() ([]byte, error) { +func (s InterconnectLocationList) MarshalJSON() ([]byte, error) { type NoMethod InterconnectLocationList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectLocationListWarning: [Output Only] Informational warning @@ -22393,9 +22640,9 @@ type InterconnectLocationListWarning struct { NullFields []string `json:"-"` } -func (s *InterconnectLocationListWarning) MarshalJSON() ([]byte, error) { +func (s InterconnectLocationListWarning) MarshalJSON() ([]byte, error) { type NoMethod InterconnectLocationListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectLocationListWarningData struct { @@ -22422,9 +22669,9 @@ type InterconnectLocationListWarningData struct { NullFields []string `json:"-"` } -func (s *InterconnectLocationListWarningData) MarshalJSON() ([]byte, error) { +func (s InterconnectLocationListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InterconnectLocationListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectLocationRegionInfo: Information about any potential @@ -22461,9 +22708,9 @@ type InterconnectLocationRegionInfo struct { NullFields []string `json:"-"` } -func (s *InterconnectLocationRegionInfo) MarshalJSON() ([]byte, error) { +func (s InterconnectLocationRegionInfo) MarshalJSON() ([]byte, error) { type NoMethod InterconnectLocationRegionInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectMacsec: Configuration information for enabling Media Access @@ -22496,9 +22743,9 @@ type InterconnectMacsec struct { NullFields []string `json:"-"` } -func (s *InterconnectMacsec) MarshalJSON() ([]byte, error) { +func (s InterconnectMacsec) MarshalJSON() ([]byte, error) { type NoMethod InterconnectMacsec - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectMacsecConfig: MACsec configuration information for the @@ -22523,9 +22770,9 @@ type InterconnectMacsecConfig struct { NullFields []string `json:"-"` } -func (s *InterconnectMacsecConfig) MarshalJSON() ([]byte, error) { +func (s InterconnectMacsecConfig) MarshalJSON() ([]byte, error) { type NoMethod InterconnectMacsecConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectMacsecConfigPreSharedKey: Describes a pre-shared key used to @@ -22552,9 +22799,9 @@ type InterconnectMacsecConfigPreSharedKey struct { NullFields []string `json:"-"` } -func (s *InterconnectMacsecConfigPreSharedKey) MarshalJSON() ([]byte, error) { +func (s InterconnectMacsecConfigPreSharedKey) MarshalJSON() ([]byte, error) { type NoMethod InterconnectMacsecConfigPreSharedKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectMacsecPreSharedKey: Describes a pre-shared key used to setup @@ -22586,9 +22833,9 @@ type InterconnectMacsecPreSharedKey struct { NullFields []string `json:"-"` } -func (s *InterconnectMacsecPreSharedKey) MarshalJSON() ([]byte, error) { +func (s InterconnectMacsecPreSharedKey) MarshalJSON() ([]byte, error) { type NoMethod InterconnectMacsecPreSharedKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectOutageNotification: Description of a planned outage on this @@ -22666,9 +22913,9 @@ type InterconnectOutageNotification struct { NullFields []string `json:"-"` } -func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { +func (s InterconnectOutageNotification) MarshalJSON() ([]byte, error) { type NoMethod InterconnectOutageNotification - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectRemoteLocation: Represents a Cross-Cloud Interconnect Remote @@ -22775,9 +23022,9 @@ type InterconnectRemoteLocation struct { NullFields []string `json:"-"` } -func (s *InterconnectRemoteLocation) MarshalJSON() ([]byte, error) { +func (s InterconnectRemoteLocation) MarshalJSON() ([]byte, error) { type NoMethod InterconnectRemoteLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectRemoteLocationConstraints struct { @@ -22836,9 +23083,9 @@ type InterconnectRemoteLocationConstraints struct { NullFields []string `json:"-"` } -func (s *InterconnectRemoteLocationConstraints) MarshalJSON() ([]byte, error) { +func (s InterconnectRemoteLocationConstraints) MarshalJSON() ([]byte, error) { type NoMethod InterconnectRemoteLocationConstraints - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectRemoteLocationConstraintsSubnetLengthRange struct { @@ -22857,9 +23104,9 @@ type InterconnectRemoteLocationConstraintsSubnetLengthRange struct { NullFields []string `json:"-"` } -func (s *InterconnectRemoteLocationConstraintsSubnetLengthRange) MarshalJSON() ([]byte, error) { +func (s InterconnectRemoteLocationConstraintsSubnetLengthRange) MarshalJSON() ([]byte, error) { type NoMethod InterconnectRemoteLocationConstraintsSubnetLengthRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectRemoteLocationList: Response to the list request, and contains a @@ -22899,9 +23146,9 @@ type InterconnectRemoteLocationList struct { NullFields []string `json:"-"` } -func (s *InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { +func (s InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { type NoMethod InterconnectRemoteLocationList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectRemoteLocationListWarning: [Output Only] Informational warning @@ -22985,9 +23232,9 @@ type InterconnectRemoteLocationListWarning struct { NullFields []string `json:"-"` } -func (s *InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { +func (s InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { type NoMethod InterconnectRemoteLocationListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectRemoteLocationListWarningData struct { @@ -23014,9 +23261,9 @@ type InterconnectRemoteLocationListWarningData struct { NullFields []string `json:"-"` } -func (s *InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { +func (s InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { type NoMethod InterconnectRemoteLocationListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type InterconnectRemoteLocationPermittedConnections struct { @@ -23036,9 +23283,9 @@ type InterconnectRemoteLocationPermittedConnections struct { NullFields []string `json:"-"` } -func (s *InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { +func (s InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { type NoMethod InterconnectRemoteLocationPermittedConnections - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectsGetDiagnosticsResponse: Response for the @@ -23061,9 +23308,9 @@ type InterconnectsGetDiagnosticsResponse struct { NullFields []string `json:"-"` } -func (s *InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { +func (s InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { type NoMethod InterconnectsGetDiagnosticsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InterconnectsGetMacsecConfigResponse: Response for the @@ -23088,9 +23335,9 @@ type InterconnectsGetMacsecConfigResponse struct { NullFields []string `json:"-"` } -func (s *InterconnectsGetMacsecConfigResponse) MarshalJSON() ([]byte, error) { +func (s InterconnectsGetMacsecConfigResponse) MarshalJSON() ([]byte, error) { type NoMethod InterconnectsGetMacsecConfigResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // License: Represents a License resource. A License represents billing and @@ -23140,9 +23387,9 @@ type License struct { NullFields []string `json:"-"` } -func (s *License) MarshalJSON() ([]byte, error) { +func (s License) MarshalJSON() ([]byte, error) { type NoMethod License - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LicenseCode: Represents a License Code resource. A License Code is a unique @@ -23200,9 +23447,9 @@ type LicenseCode struct { NullFields []string `json:"-"` } -func (s *LicenseCode) MarshalJSON() ([]byte, error) { +func (s LicenseCode) MarshalJSON() ([]byte, error) { type NoMethod LicenseCode - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type LicenseCodeLicenseAlias struct { @@ -23223,9 +23470,9 @@ type LicenseCodeLicenseAlias struct { NullFields []string `json:"-"` } -func (s *LicenseCodeLicenseAlias) MarshalJSON() ([]byte, error) { +func (s LicenseCodeLicenseAlias) MarshalJSON() ([]byte, error) { type NoMethod LicenseCodeLicenseAlias - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LicenseResourceCommitment: Commitment for a particular license resource. @@ -23250,9 +23497,9 @@ type LicenseResourceCommitment struct { NullFields []string `json:"-"` } -func (s *LicenseResourceCommitment) MarshalJSON() ([]byte, error) { +func (s LicenseResourceCommitment) MarshalJSON() ([]byte, error) { type NoMethod LicenseResourceCommitment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type LicenseResourceRequirements struct { @@ -23275,9 +23522,9 @@ type LicenseResourceRequirements struct { NullFields []string `json:"-"` } -func (s *LicenseResourceRequirements) MarshalJSON() ([]byte, error) { +func (s LicenseResourceRequirements) MarshalJSON() ([]byte, error) { type NoMethod LicenseResourceRequirements - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type LicensesListResponse struct { @@ -23311,9 +23558,9 @@ type LicensesListResponse struct { NullFields []string `json:"-"` } -func (s *LicensesListResponse) MarshalJSON() ([]byte, error) { +func (s LicensesListResponse) MarshalJSON() ([]byte, error) { type NoMethod LicensesListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LicensesListResponseWarning: [Output Only] Informational warning message. @@ -23396,9 +23643,9 @@ type LicensesListResponseWarning struct { NullFields []string `json:"-"` } -func (s *LicensesListResponseWarning) MarshalJSON() ([]byte, error) { +func (s LicensesListResponseWarning) MarshalJSON() ([]byte, error) { type NoMethod LicensesListResponseWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type LicensesListResponseWarningData struct { @@ -23425,9 +23672,9 @@ type LicensesListResponseWarningData struct { NullFields []string `json:"-"` } -func (s *LicensesListResponseWarningData) MarshalJSON() ([]byte, error) { +func (s LicensesListResponseWarningData) MarshalJSON() ([]byte, error) { type NoMethod LicensesListResponseWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type LocalDisk struct { @@ -23452,9 +23699,9 @@ type LocalDisk struct { NullFields []string `json:"-"` } -func (s *LocalDisk) MarshalJSON() ([]byte, error) { +func (s LocalDisk) MarshalJSON() ([]byte, error) { type NoMethod LocalDisk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LocalizedMessage: Provides a localized error message that is safe to return @@ -23479,9 +23726,9 @@ type LocalizedMessage struct { NullFields []string `json:"-"` } -func (s *LocalizedMessage) MarshalJSON() ([]byte, error) { +func (s LocalizedMessage) MarshalJSON() ([]byte, error) { type NoMethod LocalizedMessage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LocationPolicy: Configuration for location policy among multiple possible @@ -23522,9 +23769,9 @@ type LocationPolicy struct { NullFields []string `json:"-"` } -func (s *LocationPolicy) MarshalJSON() ([]byte, error) { +func (s LocationPolicy) MarshalJSON() ([]byte, error) { type NoMethod LocationPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type LocationPolicyLocation struct { @@ -23551,9 +23798,9 @@ type LocationPolicyLocation struct { NullFields []string `json:"-"` } -func (s *LocationPolicyLocation) MarshalJSON() ([]byte, error) { +func (s LocationPolicyLocation) MarshalJSON() ([]byte, error) { type NoMethod LocationPolicyLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LocationPolicyLocationConstraints: Per-zone constraints on location policy @@ -23575,9 +23822,9 @@ type LocationPolicyLocationConstraints struct { NullFields []string `json:"-"` } -func (s *LocationPolicyLocationConstraints) MarshalJSON() ([]byte, error) { +func (s LocationPolicyLocationConstraints) MarshalJSON() ([]byte, error) { type NoMethod LocationPolicyLocationConstraints - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogConfig: This is deprecated and has no effect. Do not use. @@ -23601,9 +23848,9 @@ type LogConfig struct { NullFields []string `json:"-"` } -func (s *LogConfig) MarshalJSON() ([]byte, error) { +func (s LogConfig) MarshalJSON() ([]byte, error) { type NoMethod LogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogConfigCloudAuditOptions: This is deprecated and has no effect. Do not @@ -23629,9 +23876,9 @@ type LogConfigCloudAuditOptions struct { NullFields []string `json:"-"` } -func (s *LogConfigCloudAuditOptions) MarshalJSON() ([]byte, error) { +func (s LogConfigCloudAuditOptions) MarshalJSON() ([]byte, error) { type NoMethod LogConfigCloudAuditOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogConfigCounterOptions: This is deprecated and has no effect. Do not use. @@ -23655,9 +23902,9 @@ type LogConfigCounterOptions struct { NullFields []string `json:"-"` } -func (s *LogConfigCounterOptions) MarshalJSON() ([]byte, error) { +func (s LogConfigCounterOptions) MarshalJSON() ([]byte, error) { type NoMethod LogConfigCounterOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogConfigCounterOptionsCustomField: This is deprecated and has no effect. Do @@ -23680,9 +23927,9 @@ type LogConfigCounterOptionsCustomField struct { NullFields []string `json:"-"` } -func (s *LogConfigCounterOptionsCustomField) MarshalJSON() ([]byte, error) { +func (s LogConfigCounterOptionsCustomField) MarshalJSON() ([]byte, error) { type NoMethod LogConfigCounterOptionsCustomField - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogConfigDataAccessOptions: This is deprecated and has no effect. Do not @@ -23707,9 +23954,9 @@ type LogConfigDataAccessOptions struct { NullFields []string `json:"-"` } -func (s *LogConfigDataAccessOptions) MarshalJSON() ([]byte, error) { +func (s LogConfigDataAccessOptions) MarshalJSON() ([]byte, error) { type NoMethod LogConfigDataAccessOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MachineImage: Represents a machine image resource. A machine image is a @@ -23809,9 +24056,9 @@ type MachineImage struct { NullFields []string `json:"-"` } -func (s *MachineImage) MarshalJSON() ([]byte, error) { +func (s MachineImage) MarshalJSON() ([]byte, error) { type NoMethod MachineImage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MachineImageList: A list of machine images. @@ -23849,9 +24096,9 @@ type MachineImageList struct { NullFields []string `json:"-"` } -func (s *MachineImageList) MarshalJSON() ([]byte, error) { +func (s MachineImageList) MarshalJSON() ([]byte, error) { type NoMethod MachineImageList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MachineImageListWarning: [Output Only] Informational warning message. @@ -23934,9 +24181,9 @@ type MachineImageListWarning struct { NullFields []string `json:"-"` } -func (s *MachineImageListWarning) MarshalJSON() ([]byte, error) { +func (s MachineImageListWarning) MarshalJSON() ([]byte, error) { type NoMethod MachineImageListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type MachineImageListWarningData struct { @@ -23963,9 +24210,9 @@ type MachineImageListWarningData struct { NullFields []string `json:"-"` } -func (s *MachineImageListWarningData) MarshalJSON() ([]byte, error) { +func (s MachineImageListWarningData) MarshalJSON() ([]byte, error) { type NoMethod MachineImageListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MachineType: Represents a Machine Type resource. You can use specific @@ -23975,6 +24222,14 @@ type MachineType struct { // Accelerators: [Output Only] A list of accelerator configurations assigned to // this machine type. Accelerators []*MachineTypeAccelerators `json:"accelerators,omitempty"` + // Architecture: [Output Only] The architecture of the machine type. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture is not + // set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text format. CreationTimestamp string `json:"creationTimestamp,omitempty"` // Deprecated -- [Output Only] The deprecation status associated with this @@ -24031,9 +24286,9 @@ type MachineType struct { NullFields []string `json:"-"` } -func (s *MachineType) MarshalJSON() ([]byte, error) { +func (s MachineType) MarshalJSON() ([]byte, error) { type NoMethod MachineType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type MachineTypeAccelerators struct { @@ -24055,9 +24310,9 @@ type MachineTypeAccelerators struct { NullFields []string `json:"-"` } -func (s *MachineTypeAccelerators) MarshalJSON() ([]byte, error) { +func (s MachineTypeAccelerators) MarshalJSON() ([]byte, error) { type NoMethod MachineTypeAccelerators - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type MachineTypeScratchDisks struct { @@ -24076,9 +24331,9 @@ type MachineTypeScratchDisks struct { NullFields []string `json:"-"` } -func (s *MachineTypeScratchDisks) MarshalJSON() ([]byte, error) { +func (s MachineTypeScratchDisks) MarshalJSON() ([]byte, error) { type NoMethod MachineTypeScratchDisks - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type MachineTypeAggregatedList struct { @@ -24117,9 +24372,9 @@ type MachineTypeAggregatedList struct { NullFields []string `json:"-"` } -func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { +func (s MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod MachineTypeAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MachineTypeAggregatedListWarning: [Output Only] Informational warning @@ -24203,9 +24458,9 @@ type MachineTypeAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *MachineTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s MachineTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod MachineTypeAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type MachineTypeAggregatedListWarningData struct { @@ -24232,9 +24487,9 @@ type MachineTypeAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *MachineTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s MachineTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod MachineTypeAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MachineTypeList: Contains a list of machine types. @@ -24272,9 +24527,9 @@ type MachineTypeList struct { NullFields []string `json:"-"` } -func (s *MachineTypeList) MarshalJSON() ([]byte, error) { +func (s MachineTypeList) MarshalJSON() ([]byte, error) { type NoMethod MachineTypeList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MachineTypeListWarning: [Output Only] Informational warning message. @@ -24357,9 +24612,9 @@ type MachineTypeListWarning struct { NullFields []string `json:"-"` } -func (s *MachineTypeListWarning) MarshalJSON() ([]byte, error) { +func (s MachineTypeListWarning) MarshalJSON() ([]byte, error) { type NoMethod MachineTypeListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type MachineTypeListWarningData struct { @@ -24386,9 +24641,9 @@ type MachineTypeListWarningData struct { NullFields []string `json:"-"` } -func (s *MachineTypeListWarningData) MarshalJSON() ([]byte, error) { +func (s MachineTypeListWarningData) MarshalJSON() ([]byte, error) { type NoMethod MachineTypeListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type MachineTypesScopedList struct { @@ -24410,9 +24665,9 @@ type MachineTypesScopedList struct { NullFields []string `json:"-"` } -func (s *MachineTypesScopedList) MarshalJSON() ([]byte, error) { +func (s MachineTypesScopedList) MarshalJSON() ([]byte, error) { type NoMethod MachineTypesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MachineTypesScopedListWarning: [Output Only] An informational warning that @@ -24496,9 +24751,9 @@ type MachineTypesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *MachineTypesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s MachineTypesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod MachineTypesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type MachineTypesScopedListWarningData struct { @@ -24525,9 +24780,9 @@ type MachineTypesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *MachineTypesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s MachineTypesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod MachineTypesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedInstance: A Managed Instance resource. @@ -24637,9 +24892,9 @@ type ManagedInstance struct { NullFields []string `json:"-"` } -func (s *ManagedInstance) MarshalJSON() ([]byte, error) { +func (s ManagedInstance) MarshalJSON() ([]byte, error) { type NoMethod ManagedInstance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedInstanceInstanceHealth struct { @@ -24676,9 +24931,9 @@ type ManagedInstanceInstanceHealth struct { NullFields []string `json:"-"` } -func (s *ManagedInstanceInstanceHealth) MarshalJSON() ([]byte, error) { +func (s ManagedInstanceInstanceHealth) MarshalJSON() ([]byte, error) { type NoMethod ManagedInstanceInstanceHealth - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedInstanceLastAttempt struct { @@ -24698,9 +24953,9 @@ type ManagedInstanceLastAttempt struct { NullFields []string `json:"-"` } -func (s *ManagedInstanceLastAttempt) MarshalJSON() ([]byte, error) { +func (s ManagedInstanceLastAttempt) MarshalJSON() ([]byte, error) { type NoMethod ManagedInstanceLastAttempt - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedInstanceLastAttemptErrors: [Output Only] Encountered errors during @@ -24722,9 +24977,9 @@ type ManagedInstanceLastAttemptErrors struct { NullFields []string `json:"-"` } -func (s *ManagedInstanceLastAttemptErrors) MarshalJSON() ([]byte, error) { +func (s ManagedInstanceLastAttemptErrors) MarshalJSON() ([]byte, error) { type NoMethod ManagedInstanceLastAttemptErrors - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedInstanceLastAttemptErrorsErrors struct { @@ -24753,9 +25008,9 @@ type ManagedInstanceLastAttemptErrorsErrors struct { NullFields []string `json:"-"` } -func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { +func (s ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { type NoMethod ManagedInstanceLastAttemptErrorsErrors - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedInstanceLastAttemptErrorsErrorsErrorDetails struct { @@ -24776,9 +25031,9 @@ type ManagedInstanceLastAttemptErrorsErrorsErrorDetails struct { NullFields []string `json:"-"` } -func (s *ManagedInstanceLastAttemptErrorsErrorsErrorDetails) MarshalJSON() ([]byte, error) { +func (s ManagedInstanceLastAttemptErrorsErrorsErrorDetails) MarshalJSON() ([]byte, error) { type NoMethod ManagedInstanceLastAttemptErrorsErrorsErrorDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedInstanceVersion struct { @@ -24800,9 +25055,9 @@ type ManagedInstanceVersion struct { NullFields []string `json:"-"` } -func (s *ManagedInstanceVersion) MarshalJSON() ([]byte, error) { +func (s ManagedInstanceVersion) MarshalJSON() ([]byte, error) { type NoMethod ManagedInstanceVersion - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Metadata: A metadata key/value entry. @@ -24834,9 +25089,9 @@ type Metadata struct { NullFields []string `json:"-"` } -func (s *Metadata) MarshalJSON() ([]byte, error) { +func (s Metadata) MarshalJSON() ([]byte, error) { type NoMethod Metadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetadataItems: Metadata @@ -24864,9 +25119,9 @@ type MetadataItems struct { NullFields []string `json:"-"` } -func (s *MetadataItems) MarshalJSON() ([]byte, error) { +func (s MetadataItems) MarshalJSON() ([]byte, error) { type NoMethod MetadataItems - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetadataFilter: Opaque filter criteria used by load balancers to restrict @@ -24914,9 +25169,9 @@ type MetadataFilter struct { NullFields []string `json:"-"` } -func (s *MetadataFilter) MarshalJSON() ([]byte, error) { +func (s MetadataFilter) MarshalJSON() ([]byte, error) { type NoMethod MetadataFilter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetadataFilterLabelMatch: MetadataFilter label name value pairs that are @@ -24942,9 +25197,9 @@ type MetadataFilterLabelMatch struct { NullFields []string `json:"-"` } -func (s *MetadataFilterLabelMatch) MarshalJSON() ([]byte, error) { +func (s MetadataFilterLabelMatch) MarshalJSON() ([]byte, error) { type NoMethod MetadataFilterLabelMatch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NamedPort: The named port. For example: <"http", 80>. @@ -24967,9 +25222,9 @@ type NamedPort struct { NullFields []string `json:"-"` } -func (s *NamedPort) MarshalJSON() ([]byte, error) { +func (s NamedPort) MarshalJSON() ([]byte, error) { type NoMethod NamedPort - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NatIpInfo: Contains NAT IP information of a NAT config (i.e. usage status, @@ -24992,9 +25247,9 @@ type NatIpInfo struct { NullFields []string `json:"-"` } -func (s *NatIpInfo) MarshalJSON() ([]byte, error) { +func (s NatIpInfo) MarshalJSON() ([]byte, error) { type NoMethod NatIpInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NatIpInfoNatIpInfoMapping: Contains information of a NAT IP. @@ -25027,9 +25282,9 @@ type NatIpInfoNatIpInfoMapping struct { NullFields []string `json:"-"` } -func (s *NatIpInfoNatIpInfoMapping) MarshalJSON() ([]byte, error) { +func (s NatIpInfoNatIpInfoMapping) MarshalJSON() ([]byte, error) { type NoMethod NatIpInfoNatIpInfoMapping - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NatIpInfoResponse struct { @@ -25051,9 +25306,9 @@ type NatIpInfoResponse struct { NullFields []string `json:"-"` } -func (s *NatIpInfoResponse) MarshalJSON() ([]byte, error) { +func (s NatIpInfoResponse) MarshalJSON() ([]byte, error) { type NoMethod NatIpInfoResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Network: Represents a VPC Network resource. Networks connect resources to @@ -25152,9 +25407,9 @@ type Network struct { NullFields []string `json:"-"` } -func (s *Network) MarshalJSON() ([]byte, error) { +func (s Network) MarshalJSON() ([]byte, error) { type NoMethod Network - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkAttachment: NetworkAttachments A network attachment resource ... @@ -25231,9 +25486,9 @@ type NetworkAttachment struct { NullFields []string `json:"-"` } -func (s *NetworkAttachment) MarshalJSON() ([]byte, error) { +func (s NetworkAttachment) MarshalJSON() ([]byte, error) { type NoMethod NetworkAttachment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkAttachmentAggregatedList: Contains a list of @@ -25270,9 +25525,9 @@ type NetworkAttachmentAggregatedList struct { NullFields []string `json:"-"` } -func (s *NetworkAttachmentAggregatedList) MarshalJSON() ([]byte, error) { +func (s NetworkAttachmentAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod NetworkAttachmentAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkAttachmentAggregatedListWarning: [Output Only] Informational warning @@ -25356,9 +25611,9 @@ type NetworkAttachmentAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s NetworkAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NetworkAttachmentAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkAttachmentAggregatedListWarningData struct { @@ -25385,9 +25640,9 @@ type NetworkAttachmentAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NetworkAttachmentAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkAttachmentConnectedEndpoint: [Output Only] A connection connected to @@ -25438,9 +25693,9 @@ type NetworkAttachmentConnectedEndpoint struct { NullFields []string `json:"-"` } -func (s *NetworkAttachmentConnectedEndpoint) MarshalJSON() ([]byte, error) { +func (s NetworkAttachmentConnectedEndpoint) MarshalJSON() ([]byte, error) { type NoMethod NetworkAttachmentConnectedEndpoint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkAttachmentList struct { @@ -25475,9 +25730,9 @@ type NetworkAttachmentList struct { NullFields []string `json:"-"` } -func (s *NetworkAttachmentList) MarshalJSON() ([]byte, error) { +func (s NetworkAttachmentList) MarshalJSON() ([]byte, error) { type NoMethod NetworkAttachmentList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkAttachmentListWarning: [Output Only] Informational warning message. @@ -25560,9 +25815,9 @@ type NetworkAttachmentListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkAttachmentListWarning) MarshalJSON() ([]byte, error) { +func (s NetworkAttachmentListWarning) MarshalJSON() ([]byte, error) { type NoMethod NetworkAttachmentListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkAttachmentListWarningData struct { @@ -25589,9 +25844,9 @@ type NetworkAttachmentListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkAttachmentListWarningData) MarshalJSON() ([]byte, error) { +func (s NetworkAttachmentListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NetworkAttachmentListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkAttachmentsScopedList struct { @@ -25613,9 +25868,9 @@ type NetworkAttachmentsScopedList struct { NullFields []string `json:"-"` } -func (s *NetworkAttachmentsScopedList) MarshalJSON() ([]byte, error) { +func (s NetworkAttachmentsScopedList) MarshalJSON() ([]byte, error) { type NoMethod NetworkAttachmentsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkAttachmentsScopedListWarning: Informational warning which replaces @@ -25699,9 +25954,9 @@ type NetworkAttachmentsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s NetworkAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NetworkAttachmentsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkAttachmentsScopedListWarningData struct { @@ -25728,9 +25983,9 @@ type NetworkAttachmentsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s NetworkAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NetworkAttachmentsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEdgeSecurityService: Represents a Google Cloud Armor network edge @@ -25791,9 +26046,9 @@ type NetworkEdgeSecurityService struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityService) MarshalJSON() ([]byte, error) { +func (s NetworkEdgeSecurityService) MarshalJSON() ([]byte, error) { type NoMethod NetworkEdgeSecurityService - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEdgeSecurityServiceAggregatedList struct { @@ -25834,9 +26089,9 @@ type NetworkEdgeSecurityServiceAggregatedList struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServiceAggregatedList) MarshalJSON() ([]byte, error) { +func (s NetworkEdgeSecurityServiceAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod NetworkEdgeSecurityServiceAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEdgeSecurityServiceAggregatedListWarning: [Output Only] Informational @@ -25920,9 +26175,9 @@ type NetworkEdgeSecurityServiceAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s NetworkEdgeSecurityServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NetworkEdgeSecurityServiceAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEdgeSecurityServiceAggregatedListWarningData struct { @@ -25949,9 +26204,9 @@ type NetworkEdgeSecurityServiceAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s NetworkEdgeSecurityServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NetworkEdgeSecurityServiceAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEdgeSecurityServicesScopedList struct { @@ -25974,9 +26229,9 @@ type NetworkEdgeSecurityServicesScopedList struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServicesScopedList) MarshalJSON() ([]byte, error) { +func (s NetworkEdgeSecurityServicesScopedList) MarshalJSON() ([]byte, error) { type NoMethod NetworkEdgeSecurityServicesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEdgeSecurityServicesScopedListWarning: Informational warning which @@ -26060,9 +26315,9 @@ type NetworkEdgeSecurityServicesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServicesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s NetworkEdgeSecurityServicesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NetworkEdgeSecurityServicesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEdgeSecurityServicesScopedListWarningData struct { @@ -26089,15 +26344,19 @@ type NetworkEdgeSecurityServicesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServicesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s NetworkEdgeSecurityServicesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NetworkEdgeSecurityServicesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEndpoint: The network endpoint. type NetworkEndpoint struct { // Annotations: Metadata defined as annotations on the network endpoint. Annotations map[string]string `json:"annotations,omitempty"` + // ClientDestinationPort: Represents the port number to which PSC consumer + // sends packets. Only valid for network endpoint groups created with + // GCE_VM_IP_PORTMAP endpoint type. + ClientDestinationPort int64 `json:"clientDestinationPort,omitempty"` // Fqdn: Optional fully qualified domain name of network endpoint. This can // only be specified when NetworkEndpointGroup.network_endpoint_type is // NON_GCP_FQDN_PORT. @@ -26137,9 +26396,9 @@ type NetworkEndpoint struct { NullFields []string `json:"-"` } -func (s *NetworkEndpoint) MarshalJSON() ([]byte, error) { +func (s NetworkEndpoint) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpoint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEndpointGroup: Represents a collection of network endpoints. A @@ -26194,6 +26453,8 @@ type NetworkEndpointGroup struct { // "GCE_VM_IP" - The network endpoint is represented by an IP address. // "GCE_VM_IP_PORT" - The network endpoint is represented by IP address and // port pair. + // "GCE_VM_IP_PORTMAP" - The network endpoint is represented by an IP, Port + // and Client Destination Port. // "INTERNET_FQDN_PORT" - The network endpoint is represented by fully // qualified domain name and port. // "INTERNET_IP_PORT" - The network endpoint is represented by an internet IP @@ -26242,9 +26503,9 @@ type NetworkEndpointGroup struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroup) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroup) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroup - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointGroupAggregatedList struct { @@ -26284,9 +26545,9 @@ type NetworkEndpointGroupAggregatedList struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupAggregatedList) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEndpointGroupAggregatedListWarning: [Output Only] Informational @@ -26370,9 +26631,9 @@ type NetworkEndpointGroupAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointGroupAggregatedListWarningData struct { @@ -26399,9 +26660,9 @@ type NetworkEndpointGroupAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEndpointGroupAppEngine: Configuration for an App Engine network @@ -26439,9 +26700,9 @@ type NetworkEndpointGroupAppEngine struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupAppEngine) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupAppEngine) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupAppEngine - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEndpointGroupCloudFunction: Configuration for a Cloud Function @@ -26473,9 +26734,9 @@ type NetworkEndpointGroupCloudFunction struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupCloudFunction) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupCloudFunction) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupCloudFunction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEndpointGroupCloudRun: Configuration for a Cloud Run network endpoint @@ -26515,9 +26776,9 @@ type NetworkEndpointGroupCloudRun struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupCloudRun) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupCloudRun) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupCloudRun - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointGroupList struct { @@ -26554,9 +26815,9 @@ type NetworkEndpointGroupList struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupList) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupList) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEndpointGroupListWarning: [Output Only] Informational warning @@ -26640,9 +26901,9 @@ type NetworkEndpointGroupListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupListWarning) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupListWarning) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointGroupListWarningData struct { @@ -26669,9 +26930,9 @@ type NetworkEndpointGroupListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupListWarningData) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEndpointGroupPscData: All data that is specifically relevant to only @@ -26681,6 +26942,10 @@ type NetworkEndpointGroupPscData struct { // for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as // an endpoint in L7 PSC-XLB. ConsumerPscAddress string `json:"consumerPscAddress,omitempty"` + // ProducerPort: The psc producer port is used to connect PSC NEG with specific + // port on the PSC Producer side; should only be used for the + // PRIVATE_SERVICE_CONNECT NEG type + ProducerPort int64 `json:"producerPort,omitempty"` // PscConnectionId: [Output Only] The PSC connection id of the PSC Network // Endpoint Group Consumer. PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` @@ -26711,9 +26976,9 @@ type NetworkEndpointGroupPscData struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupPscData) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupPscData) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupPscData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointGroupsAttachEndpointsRequest struct { @@ -26732,9 +26997,9 @@ type NetworkEndpointGroupsAttachEndpointsRequest struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupsAttachEndpointsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointGroupsDetachEndpointsRequest struct { @@ -26753,9 +27018,9 @@ type NetworkEndpointGroupsDetachEndpointsRequest struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupsDetachEndpointsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointGroupsListEndpointsRequest struct { @@ -26781,9 +27046,9 @@ type NetworkEndpointGroupsListEndpointsRequest struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsListEndpointsRequest) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupsListEndpointsRequest) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupsListEndpointsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointGroupsListNetworkEndpoints struct { @@ -26819,9 +27084,9 @@ type NetworkEndpointGroupsListNetworkEndpoints struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsListNetworkEndpoints) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupsListNetworkEndpoints) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupsListNetworkEndpoints - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEndpointGroupsListNetworkEndpointsWarning: [Output Only] @@ -26905,9 +27170,9 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsListNetworkEndpointsWarning) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupsListNetworkEndpointsWarning) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupsListNetworkEndpointsWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointGroupsListNetworkEndpointsWarningData struct { @@ -26934,9 +27199,9 @@ type NetworkEndpointGroupsListNetworkEndpointsWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsListNetworkEndpointsWarningData) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupsListNetworkEndpointsWarningData) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupsListNetworkEndpointsWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointGroupsScopedList struct { @@ -26959,9 +27224,9 @@ type NetworkEndpointGroupsScopedList struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsScopedList) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupsScopedList) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkEndpointGroupsScopedListWarning: [Output Only] An informational @@ -27046,9 +27311,9 @@ type NetworkEndpointGroupsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointGroupsScopedListWarningData struct { @@ -27075,9 +27340,9 @@ type NetworkEndpointGroupsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointGroupsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkEndpointWithHealthStatus struct { @@ -27098,9 +27363,9 @@ type NetworkEndpointWithHealthStatus struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointWithHealthStatus) MarshalJSON() ([]byte, error) { +func (s NetworkEndpointWithHealthStatus) MarshalJSON() ([]byte, error) { type NoMethod NetworkEndpointWithHealthStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkInterface: A network interface resource attached to an instance. @@ -27187,7 +27452,8 @@ type NetworkInterface struct { // // Possible values: // "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 addresses. - // "IPV4_ONLY" - The network interface will be assigned IPv4 address. + // "IPV4_ONLY" - The network interface will only be assigned IPv4 addresses. + // "IPV6_ONLY" - The network interface will only be assigned IPv6 addresses. StackType string `json:"stackType,omitempty"` // Subnetwork: The URL of the Subnetwork resource for this instance. If the // network resource is in legacy mode, do not specify this field. If the @@ -27211,9 +27477,9 @@ type NetworkInterface struct { NullFields []string `json:"-"` } -func (s *NetworkInterface) MarshalJSON() ([]byte, error) { +func (s NetworkInterface) MarshalJSON() ([]byte, error) { type NoMethod NetworkInterface - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkList: Contains a list of networks. @@ -27251,9 +27517,9 @@ type NetworkList struct { NullFields []string `json:"-"` } -func (s *NetworkList) MarshalJSON() ([]byte, error) { +func (s NetworkList) MarshalJSON() ([]byte, error) { type NoMethod NetworkList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkListWarning: [Output Only] Informational warning message. @@ -27336,9 +27602,9 @@ type NetworkListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkListWarning) MarshalJSON() ([]byte, error) { +func (s NetworkListWarning) MarshalJSON() ([]byte, error) { type NoMethod NetworkListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkListWarningData struct { @@ -27365,9 +27631,9 @@ type NetworkListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkListWarningData) MarshalJSON() ([]byte, error) { +func (s NetworkListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NetworkListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkPeering: A network peering attached to a network resource. The @@ -27451,9 +27717,9 @@ type NetworkPeering struct { NullFields []string `json:"-"` } -func (s *NetworkPeering) MarshalJSON() ([]byte, error) { +func (s NetworkPeering) MarshalJSON() ([]byte, error) { type NoMethod NetworkPeering - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworkPerformanceConfig struct { @@ -27474,9 +27740,9 @@ type NetworkPerformanceConfig struct { NullFields []string `json:"-"` } -func (s *NetworkPerformanceConfig) MarshalJSON() ([]byte, error) { +func (s NetworkPerformanceConfig) MarshalJSON() ([]byte, error) { type NoMethod NetworkPerformanceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkRoutingConfig: A routing configuration attached to a network @@ -27507,9 +27773,9 @@ type NetworkRoutingConfig struct { NullFields []string `json:"-"` } -func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { +func (s NetworkRoutingConfig) MarshalJSON() ([]byte, error) { type NoMethod NetworkRoutingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworksAddPeeringRequest struct { @@ -27546,9 +27812,9 @@ type NetworksAddPeeringRequest struct { NullFields []string `json:"-"` } -func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { +func (s NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { type NoMethod NetworksAddPeeringRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworksGetEffectiveFirewallsResponse struct { @@ -27572,9 +27838,9 @@ type NetworksGetEffectiveFirewallsResponse struct { NullFields []string `json:"-"` } -func (s *NetworksGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { +func (s NetworksGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { type NoMethod NetworksGetEffectiveFirewallsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { @@ -27583,6 +27849,9 @@ type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { DisplayName string `json:"displayName,omitempty"` // Name: [Output Only] The name of the firewall policy. Name string `json:"name,omitempty"` + // Priority: [Output only] Priority of firewall policy association. Not + // applicable for type=HIERARCHY. + Priority int64 `json:"priority,omitempty"` // Rules: The rules that apply to the network. Rules []*FirewallPolicyRule `json:"rules,omitempty"` // ShortName: [Output Only] The short name of the firewall policy. @@ -27592,6 +27861,7 @@ type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { // Possible values: // "HIERARCHY" // "NETWORK" + // "SYSTEM" // "UNSPECIFIED" Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to @@ -27607,9 +27877,9 @@ type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { NullFields []string `json:"-"` } -func (s *NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy) MarshalJSON() ([]byte, error) { +func (s NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy) MarshalJSON() ([]byte, error) { type NoMethod NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworksRemovePeeringRequest struct { @@ -27628,9 +27898,9 @@ type NetworksRemovePeeringRequest struct { NullFields []string `json:"-"` } -func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { +func (s NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { type NoMethod NetworksRemovePeeringRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NetworksUpdatePeeringRequest struct { @@ -27648,9 +27918,9 @@ type NetworksUpdatePeeringRequest struct { NullFields []string `json:"-"` } -func (s *NetworksUpdatePeeringRequest) MarshalJSON() ([]byte, error) { +func (s NetworksUpdatePeeringRequest) MarshalJSON() ([]byte, error) { type NoMethod NetworksUpdatePeeringRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeGroup: Represents a sole-tenant Node Group resource. A sole-tenant node @@ -27754,9 +28024,9 @@ type NodeGroup struct { NullFields []string `json:"-"` } -func (s *NodeGroup) MarshalJSON() ([]byte, error) { +func (s NodeGroup) MarshalJSON() ([]byte, error) { type NoMethod NodeGroup - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupAggregatedList struct { @@ -27795,9 +28065,9 @@ type NodeGroupAggregatedList struct { NullFields []string `json:"-"` } -func (s *NodeGroupAggregatedList) MarshalJSON() ([]byte, error) { +func (s NodeGroupAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeGroupAggregatedListWarning: [Output Only] Informational warning message. @@ -27880,9 +28150,9 @@ type NodeGroupAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s NodeGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupAggregatedListWarningData struct { @@ -27909,9 +28179,9 @@ type NodeGroupAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s NodeGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupAutoscalingPolicy struct { @@ -27943,9 +28213,9 @@ type NodeGroupAutoscalingPolicy struct { NullFields []string `json:"-"` } -func (s *NodeGroupAutoscalingPolicy) MarshalJSON() ([]byte, error) { +func (s NodeGroupAutoscalingPolicy) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupAutoscalingPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeGroupList: Contains a list of nodeGroups. @@ -27983,9 +28253,9 @@ type NodeGroupList struct { NullFields []string `json:"-"` } -func (s *NodeGroupList) MarshalJSON() ([]byte, error) { +func (s NodeGroupList) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeGroupListWarning: [Output Only] Informational warning message. @@ -28068,9 +28338,9 @@ type NodeGroupListWarning struct { NullFields []string `json:"-"` } -func (s *NodeGroupListWarning) MarshalJSON() ([]byte, error) { +func (s NodeGroupListWarning) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupListWarningData struct { @@ -28097,9 +28367,9 @@ type NodeGroupListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeGroupListWarningData) MarshalJSON() ([]byte, error) { +func (s NodeGroupListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeGroupMaintenanceWindow: Time window specified for daily maintenance @@ -28125,9 +28395,9 @@ type NodeGroupMaintenanceWindow struct { NullFields []string `json:"-"` } -func (s *NodeGroupMaintenanceWindow) MarshalJSON() ([]byte, error) { +func (s NodeGroupMaintenanceWindow) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupMaintenanceWindow - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupNode struct { @@ -28184,9 +28454,9 @@ type NodeGroupNode struct { NullFields []string `json:"-"` } -func (s *NodeGroupNode) MarshalJSON() ([]byte, error) { +func (s NodeGroupNode) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupNode - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupsAddNodesRequest struct { @@ -28206,9 +28476,9 @@ type NodeGroupsAddNodesRequest struct { NullFields []string `json:"-"` } -func (s *NodeGroupsAddNodesRequest) MarshalJSON() ([]byte, error) { +func (s NodeGroupsAddNodesRequest) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupsAddNodesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupsDeleteNodesRequest struct { @@ -28227,9 +28497,9 @@ type NodeGroupsDeleteNodesRequest struct { NullFields []string `json:"-"` } -func (s *NodeGroupsDeleteNodesRequest) MarshalJSON() ([]byte, error) { +func (s NodeGroupsDeleteNodesRequest) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupsDeleteNodesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupsListNodes struct { @@ -28267,9 +28537,9 @@ type NodeGroupsListNodes struct { NullFields []string `json:"-"` } -func (s *NodeGroupsListNodes) MarshalJSON() ([]byte, error) { +func (s NodeGroupsListNodes) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupsListNodes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeGroupsListNodesWarning: [Output Only] Informational warning message. @@ -28352,9 +28622,9 @@ type NodeGroupsListNodesWarning struct { NullFields []string `json:"-"` } -func (s *NodeGroupsListNodesWarning) MarshalJSON() ([]byte, error) { +func (s NodeGroupsListNodesWarning) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupsListNodesWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupsListNodesWarningData struct { @@ -28381,9 +28651,9 @@ type NodeGroupsListNodesWarningData struct { NullFields []string `json:"-"` } -func (s *NodeGroupsListNodesWarningData) MarshalJSON() ([]byte, error) { +func (s NodeGroupsListNodesWarningData) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupsListNodesWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupsPerformMaintenanceRequest struct { @@ -28405,9 +28675,9 @@ type NodeGroupsPerformMaintenanceRequest struct { NullFields []string `json:"-"` } -func (s *NodeGroupsPerformMaintenanceRequest) MarshalJSON() ([]byte, error) { +func (s NodeGroupsPerformMaintenanceRequest) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupsPerformMaintenanceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupsScopedList struct { @@ -28429,9 +28699,9 @@ type NodeGroupsScopedList struct { NullFields []string `json:"-"` } -func (s *NodeGroupsScopedList) MarshalJSON() ([]byte, error) { +func (s NodeGroupsScopedList) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeGroupsScopedListWarning: [Output Only] An informational warning that @@ -28515,9 +28785,9 @@ type NodeGroupsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeGroupsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s NodeGroupsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupsScopedListWarningData struct { @@ -28544,9 +28814,9 @@ type NodeGroupsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s NodeGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupsSetNodeTemplateRequest struct { @@ -28566,9 +28836,9 @@ type NodeGroupsSetNodeTemplateRequest struct { NullFields []string `json:"-"` } -func (s *NodeGroupsSetNodeTemplateRequest) MarshalJSON() ([]byte, error) { +func (s NodeGroupsSetNodeTemplateRequest) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupsSetNodeTemplateRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeGroupsSimulateMaintenanceEventRequest struct { @@ -28587,9 +28857,9 @@ type NodeGroupsSimulateMaintenanceEventRequest struct { NullFields []string `json:"-"` } -func (s *NodeGroupsSimulateMaintenanceEventRequest) MarshalJSON() ([]byte, error) { +func (s NodeGroupsSimulateMaintenanceEventRequest) MarshalJSON() ([]byte, error) { type NoMethod NodeGroupsSimulateMaintenanceEventRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeTemplate: Represent a sole-tenant Node Template resource. You can use a @@ -28671,9 +28941,9 @@ type NodeTemplate struct { NullFields []string `json:"-"` } -func (s *NodeTemplate) MarshalJSON() ([]byte, error) { +func (s NodeTemplate) MarshalJSON() ([]byte, error) { type NoMethod NodeTemplate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeTemplateAggregatedList struct { @@ -28712,9 +28982,9 @@ type NodeTemplateAggregatedList struct { NullFields []string `json:"-"` } -func (s *NodeTemplateAggregatedList) MarshalJSON() ([]byte, error) { +func (s NodeTemplateAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod NodeTemplateAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeTemplateAggregatedListWarning: [Output Only] Informational warning @@ -28798,9 +29068,9 @@ type NodeTemplateAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTemplateAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s NodeTemplateAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NodeTemplateAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeTemplateAggregatedListWarningData struct { @@ -28827,9 +29097,9 @@ type NodeTemplateAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTemplateAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s NodeTemplateAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NodeTemplateAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeTemplateList: Contains a list of node templates. @@ -28867,9 +29137,9 @@ type NodeTemplateList struct { NullFields []string `json:"-"` } -func (s *NodeTemplateList) MarshalJSON() ([]byte, error) { +func (s NodeTemplateList) MarshalJSON() ([]byte, error) { type NoMethod NodeTemplateList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeTemplateListWarning: [Output Only] Informational warning message. @@ -28952,9 +29222,9 @@ type NodeTemplateListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTemplateListWarning) MarshalJSON() ([]byte, error) { +func (s NodeTemplateListWarning) MarshalJSON() ([]byte, error) { type NoMethod NodeTemplateListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeTemplateListWarningData struct { @@ -28981,9 +29251,9 @@ type NodeTemplateListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTemplateListWarningData) MarshalJSON() ([]byte, error) { +func (s NodeTemplateListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NodeTemplateListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeTemplateNodeTypeFlexibility struct { @@ -29003,9 +29273,9 @@ type NodeTemplateNodeTypeFlexibility struct { NullFields []string `json:"-"` } -func (s *NodeTemplateNodeTypeFlexibility) MarshalJSON() ([]byte, error) { +func (s NodeTemplateNodeTypeFlexibility) MarshalJSON() ([]byte, error) { type NoMethod NodeTemplateNodeTypeFlexibility - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeTemplatesScopedList struct { @@ -29028,9 +29298,9 @@ type NodeTemplatesScopedList struct { NullFields []string `json:"-"` } -func (s *NodeTemplatesScopedList) MarshalJSON() ([]byte, error) { +func (s NodeTemplatesScopedList) MarshalJSON() ([]byte, error) { type NoMethod NodeTemplatesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeTemplatesScopedListWarning: [Output Only] An informational warning that @@ -29114,9 +29384,9 @@ type NodeTemplatesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTemplatesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s NodeTemplatesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NodeTemplatesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeTemplatesScopedListWarningData struct { @@ -29143,9 +29413,9 @@ type NodeTemplatesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTemplatesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s NodeTemplatesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NodeTemplatesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeType: Represent a sole-tenant Node Type resource. Each node within a @@ -29201,9 +29471,9 @@ type NodeType struct { NullFields []string `json:"-"` } -func (s *NodeType) MarshalJSON() ([]byte, error) { +func (s NodeType) MarshalJSON() ([]byte, error) { type NoMethod NodeType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeTypeAggregatedList struct { @@ -29242,9 +29512,9 @@ type NodeTypeAggregatedList struct { NullFields []string `json:"-"` } -func (s *NodeTypeAggregatedList) MarshalJSON() ([]byte, error) { +func (s NodeTypeAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod NodeTypeAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeTypeAggregatedListWarning: [Output Only] Informational warning message. @@ -29327,9 +29597,9 @@ type NodeTypeAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s NodeTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NodeTypeAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeTypeAggregatedListWarningData struct { @@ -29356,9 +29626,9 @@ type NodeTypeAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s NodeTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NodeTypeAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeTypeList: Contains a list of node types. @@ -29396,9 +29666,9 @@ type NodeTypeList struct { NullFields []string `json:"-"` } -func (s *NodeTypeList) MarshalJSON() ([]byte, error) { +func (s NodeTypeList) MarshalJSON() ([]byte, error) { type NoMethod NodeTypeList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeTypeListWarning: [Output Only] Informational warning message. @@ -29481,9 +29751,9 @@ type NodeTypeListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTypeListWarning) MarshalJSON() ([]byte, error) { +func (s NodeTypeListWarning) MarshalJSON() ([]byte, error) { type NoMethod NodeTypeListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeTypeListWarningData struct { @@ -29510,9 +29780,9 @@ type NodeTypeListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTypeListWarningData) MarshalJSON() ([]byte, error) { +func (s NodeTypeListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NodeTypeListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeTypesScopedList struct { @@ -29534,9 +29804,9 @@ type NodeTypesScopedList struct { NullFields []string `json:"-"` } -func (s *NodeTypesScopedList) MarshalJSON() ([]byte, error) { +func (s NodeTypesScopedList) MarshalJSON() ([]byte, error) { type NoMethod NodeTypesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeTypesScopedListWarning: [Output Only] An informational warning that @@ -29620,9 +29890,9 @@ type NodeTypesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTypesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s NodeTypesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod NodeTypesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NodeTypesScopedListWarningData struct { @@ -29649,9 +29919,9 @@ type NodeTypesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTypesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s NodeTypesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NodeTypesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NotificationEndpoint: Represents a notification endpoint. A notification @@ -29704,9 +29974,9 @@ type NotificationEndpoint struct { NullFields []string `json:"-"` } -func (s *NotificationEndpoint) MarshalJSON() ([]byte, error) { +func (s NotificationEndpoint) MarshalJSON() ([]byte, error) { type NoMethod NotificationEndpoint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NotificationEndpointGrpcSettings: Represents a gRPC setting that describes @@ -29746,9 +30016,9 @@ type NotificationEndpointGrpcSettings struct { NullFields []string `json:"-"` } -func (s *NotificationEndpointGrpcSettings) MarshalJSON() ([]byte, error) { +func (s NotificationEndpointGrpcSettings) MarshalJSON() ([]byte, error) { type NoMethod NotificationEndpointGrpcSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NotificationEndpointList struct { @@ -29785,9 +30055,9 @@ type NotificationEndpointList struct { NullFields []string `json:"-"` } -func (s *NotificationEndpointList) MarshalJSON() ([]byte, error) { +func (s NotificationEndpointList) MarshalJSON() ([]byte, error) { type NoMethod NotificationEndpointList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NotificationEndpointListWarning: [Output Only] Informational warning @@ -29871,9 +30141,9 @@ type NotificationEndpointListWarning struct { NullFields []string `json:"-"` } -func (s *NotificationEndpointListWarning) MarshalJSON() ([]byte, error) { +func (s NotificationEndpointListWarning) MarshalJSON() ([]byte, error) { type NoMethod NotificationEndpointListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type NotificationEndpointListWarningData struct { @@ -29900,9 +30170,9 @@ type NotificationEndpointListWarningData struct { NullFields []string `json:"-"` } -func (s *NotificationEndpointListWarningData) MarshalJSON() ([]byte, error) { +func (s NotificationEndpointListWarningData) MarshalJSON() ([]byte, error) { type NoMethod NotificationEndpointListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: Represents an Operation resource. Google Compute Engine has three @@ -29991,7 +30261,7 @@ type Operation struct { TargetId uint64 `json:"targetId,omitempty,string"` // TargetLink: [Output Only] The URL of the resource that the operation // modifies. For operations related to creating a snapshot, this points to the - // persistent disk that the snapshot was created from. + // disk that the snapshot was created from. TargetLink string `json:"targetLink,omitempty"` // User: [Output Only] User who requested the operation, for example: // `user@example.com` or `alice_smith_identifier @@ -30019,9 +30289,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationError: [Output Only] If errors are generated during processing of @@ -30043,9 +30313,9 @@ type OperationError struct { NullFields []string `json:"-"` } -func (s *OperationError) MarshalJSON() ([]byte, error) { +func (s OperationError) MarshalJSON() ([]byte, error) { type NoMethod OperationError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationErrorErrors struct { @@ -30074,9 +30344,9 @@ type OperationErrorErrors struct { NullFields []string `json:"-"` } -func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { +func (s OperationErrorErrors) MarshalJSON() ([]byte, error) { type NoMethod OperationErrorErrors - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationErrorErrorsErrorDetails struct { @@ -30097,9 +30367,9 @@ type OperationErrorErrorsErrorDetails struct { NullFields []string `json:"-"` } -func (s *OperationErrorErrorsErrorDetails) MarshalJSON() ([]byte, error) { +func (s OperationErrorErrorsErrorDetails) MarshalJSON() ([]byte, error) { type NoMethod OperationErrorErrorsErrorDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationWarnings struct { @@ -30181,9 +30451,9 @@ type OperationWarnings struct { NullFields []string `json:"-"` } -func (s *OperationWarnings) MarshalJSON() ([]byte, error) { +func (s OperationWarnings) MarshalJSON() ([]byte, error) { type NoMethod OperationWarnings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationWarningsData struct { @@ -30210,9 +30480,9 @@ type OperationWarningsData struct { NullFields []string `json:"-"` } -func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { +func (s OperationWarningsData) MarshalJSON() ([]byte, error) { type NoMethod OperationWarningsData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationAggregatedList struct { @@ -30252,9 +30522,9 @@ type OperationAggregatedList struct { NullFields []string `json:"-"` } -func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { +func (s OperationAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod OperationAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationAggregatedListWarning: [Output Only] Informational warning message. @@ -30337,9 +30607,9 @@ type OperationAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod OperationAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationAggregatedListWarningData struct { @@ -30366,9 +30636,9 @@ type OperationAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod OperationAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationList: Contains a list of Operation resources. @@ -30407,9 +30677,9 @@ type OperationList struct { NullFields []string `json:"-"` } -func (s *OperationList) MarshalJSON() ([]byte, error) { +func (s OperationList) MarshalJSON() ([]byte, error) { type NoMethod OperationList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationListWarning: [Output Only] Informational warning message. @@ -30492,9 +30762,9 @@ type OperationListWarning struct { NullFields []string `json:"-"` } -func (s *OperationListWarning) MarshalJSON() ([]byte, error) { +func (s OperationListWarning) MarshalJSON() ([]byte, error) { type NoMethod OperationListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationListWarningData struct { @@ -30521,9 +30791,9 @@ type OperationListWarningData struct { NullFields []string `json:"-"` } -func (s *OperationListWarningData) MarshalJSON() ([]byte, error) { +func (s OperationListWarningData) MarshalJSON() ([]byte, error) { type NoMethod OperationListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationsScopedList struct { @@ -30545,9 +30815,9 @@ type OperationsScopedList struct { NullFields []string `json:"-"` } -func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { +func (s OperationsScopedList) MarshalJSON() ([]byte, error) { type NoMethod OperationsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationsScopedListWarning: [Output Only] Informational warning which @@ -30631,9 +30901,9 @@ type OperationsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s OperationsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod OperationsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationsScopedListWarningData struct { @@ -30660,9 +30930,9 @@ type OperationsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod OperationsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OutlierDetection: Settings controlling the eviction of unhealthy hosts from @@ -30744,9 +31014,9 @@ type OutlierDetection struct { NullFields []string `json:"-"` } -func (s *OutlierDetection) MarshalJSON() ([]byte, error) { +func (s OutlierDetection) MarshalJSON() ([]byte, error) { type NoMethod OutlierDetection - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PacketIntervals: Next free: 7 @@ -30790,9 +31060,9 @@ type PacketIntervals struct { NullFields []string `json:"-"` } -func (s *PacketIntervals) MarshalJSON() ([]byte, error) { +func (s PacketIntervals) MarshalJSON() ([]byte, error) { type NoMethod PacketIntervals - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PacketMirroring: Represents a Packet Mirroring resource. Packet Mirroring @@ -30871,9 +31141,9 @@ type PacketMirroring struct { NullFields []string `json:"-"` } -func (s *PacketMirroring) MarshalJSON() ([]byte, error) { +func (s PacketMirroring) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroring - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PacketMirroringAggregatedList: Contains a list of packetMirrorings. @@ -30912,9 +31182,9 @@ type PacketMirroringAggregatedList struct { NullFields []string `json:"-"` } -func (s *PacketMirroringAggregatedList) MarshalJSON() ([]byte, error) { +func (s PacketMirroringAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PacketMirroringAggregatedListWarning: [Output Only] Informational warning @@ -30998,9 +31268,9 @@ type PacketMirroringAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *PacketMirroringAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s PacketMirroringAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PacketMirroringAggregatedListWarningData struct { @@ -31027,9 +31297,9 @@ type PacketMirroringAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *PacketMirroringAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s PacketMirroringAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PacketMirroringFilter struct { @@ -31066,9 +31336,9 @@ type PacketMirroringFilter struct { NullFields []string `json:"-"` } -func (s *PacketMirroringFilter) MarshalJSON() ([]byte, error) { +func (s PacketMirroringFilter) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringFilter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PacketMirroringForwardingRuleInfo struct { @@ -31091,9 +31361,9 @@ type PacketMirroringForwardingRuleInfo struct { NullFields []string `json:"-"` } -func (s *PacketMirroringForwardingRuleInfo) MarshalJSON() ([]byte, error) { +func (s PacketMirroringForwardingRuleInfo) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringForwardingRuleInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PacketMirroringList: Contains a list of PacketMirroring resources. @@ -31131,9 +31401,9 @@ type PacketMirroringList struct { NullFields []string `json:"-"` } -func (s *PacketMirroringList) MarshalJSON() ([]byte, error) { +func (s PacketMirroringList) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PacketMirroringListWarning: [Output Only] Informational warning message. @@ -31216,9 +31486,9 @@ type PacketMirroringListWarning struct { NullFields []string `json:"-"` } -func (s *PacketMirroringListWarning) MarshalJSON() ([]byte, error) { +func (s PacketMirroringListWarning) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PacketMirroringListWarningData struct { @@ -31245,9 +31515,9 @@ type PacketMirroringListWarningData struct { NullFields []string `json:"-"` } -func (s *PacketMirroringListWarningData) MarshalJSON() ([]byte, error) { +func (s PacketMirroringListWarningData) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PacketMirroringMirroredResourceInfo struct { @@ -31277,9 +31547,9 @@ type PacketMirroringMirroredResourceInfo struct { NullFields []string `json:"-"` } -func (s *PacketMirroringMirroredResourceInfo) MarshalJSON() ([]byte, error) { +func (s PacketMirroringMirroredResourceInfo) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringMirroredResourceInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PacketMirroringMirroredResourceInfoInstanceInfo struct { @@ -31301,9 +31571,9 @@ type PacketMirroringMirroredResourceInfoInstanceInfo struct { NullFields []string `json:"-"` } -func (s *PacketMirroringMirroredResourceInfoInstanceInfo) MarshalJSON() ([]byte, error) { +func (s PacketMirroringMirroredResourceInfoInstanceInfo) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringMirroredResourceInfoInstanceInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PacketMirroringMirroredResourceInfoSubnetInfo struct { @@ -31326,9 +31596,9 @@ type PacketMirroringMirroredResourceInfoSubnetInfo struct { NullFields []string `json:"-"` } -func (s *PacketMirroringMirroredResourceInfoSubnetInfo) MarshalJSON() ([]byte, error) { +func (s PacketMirroringMirroredResourceInfoSubnetInfo) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringMirroredResourceInfoSubnetInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PacketMirroringNetworkInfo struct { @@ -31350,9 +31620,9 @@ type PacketMirroringNetworkInfo struct { NullFields []string `json:"-"` } -func (s *PacketMirroringNetworkInfo) MarshalJSON() ([]byte, error) { +func (s PacketMirroringNetworkInfo) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringNetworkInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PacketMirroringsScopedList struct { @@ -31374,9 +31644,9 @@ type PacketMirroringsScopedList struct { NullFields []string `json:"-"` } -func (s *PacketMirroringsScopedList) MarshalJSON() ([]byte, error) { +func (s PacketMirroringsScopedList) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PacketMirroringsScopedListWarning: Informational warning which replaces the @@ -31460,9 +31730,9 @@ type PacketMirroringsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *PacketMirroringsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s PacketMirroringsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PacketMirroringsScopedListWarningData struct { @@ -31489,15 +31759,42 @@ type PacketMirroringsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *PacketMirroringsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s PacketMirroringsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod PacketMirroringsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PathMatcher: A matcher for the path portion of the URL. The BackendService // from the longest-matched rule will serve the URL. If no rule was matched, // the default service is used. type PathMatcher struct { + // DefaultCustomErrorResponsePolicy: defaultCustomErrorResponsePolicy specifies + // how the Load Balancer returns error responses when BackendServiceor + // BackendBucket responds with an error. This policy takes effect at the + // PathMatcher level and applies only when no policy has been defined for the + // error code at lower levels like RouteRule and PathRule within this + // PathMatcher. If an error code does not have a policy defined in + // defaultCustomErrorResponsePolicy, then a policy defined for the error code + // in UrlMap.defaultCustomErrorResponsePolicy takes effect. For example, + // consider a UrlMap with the following configuration: - + // UrlMap.defaultCustomErrorResponsePolicy is configured with policies for 5xx + // and 4xx errors - A RouteRule for /coming_soon/ is configured for the error + // code 404. If the request is for www.myotherdomain.com and a 404 is + // encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes + // effect. If a 404 response is encountered for the request + // www.example.com/current_events/, the pathMatcher's policy takes effect. If + // however, the request for www.example.com/coming_soon/ encounters a 404, the + // policy in RouteRule.customErrorResponsePolicy takes effect. If any of the + // requests in this example encounter a 500 error code, the policy at + // UrlMap.defaultCustomErrorResponsePolicy takes effect. When used in + // conjunction with pathMatcher.defaultRouteAction.retryPolicy, retries take + // precedence. Only once all retries are exhausted, the + // defaultCustomErrorResponsePolicy is applied. While attempting a retry, if + // load balancer is successful in reaching the service, the + // defaultCustomErrorResponsePolicy is ignored and the response from the + // service is returned to the client. defaultCustomErrorResponsePolicy is + // supported only for global external Application Load Balancers. + DefaultCustomErrorResponsePolicy *CustomErrorResponsePolicy `json:"defaultCustomErrorResponsePolicy,omitempty"` // DefaultRouteAction: defaultRouteAction takes effect when none of the // pathRules or routeRules match. The load balancer performs advanced routing // actions, such as URL rewrites and header transformations, before forwarding @@ -31560,27 +31857,50 @@ type PathMatcher struct { // evaluated in order of priority, from the lowest to highest number. Within a // given pathMatcher, you can set only one of pathRules or routeRules. RouteRules []*HttpRouteRule `json:"routeRules,omitempty"` - // ForceSendFields is a list of field names (e.g. "DefaultRouteAction") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See + // ForceSendFields is a list of field names (e.g. + // "DefaultCustomErrorResponsePolicy") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted from + // API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DefaultRouteAction") to include - // in API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + // NullFields is a list of field names (e.g. + // "DefaultCustomErrorResponsePolicy") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for + // more details. NullFields []string `json:"-"` } -func (s *PathMatcher) MarshalJSON() ([]byte, error) { +func (s PathMatcher) MarshalJSON() ([]byte, error) { type NoMethod PathMatcher - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PathRule: A path-matching rule for a URL. If matched, will use the specified // BackendService to handle the traffic arriving at this URL. type PathRule struct { + // CustomErrorResponsePolicy: customErrorResponsePolicy specifies how the Load + // Balancer returns error responses when BackendServiceor BackendBucket + // responds with an error. If a policy for an error code is not configured for + // the PathRule, a policy for the error code configured in + // pathMatcher.defaultCustomErrorResponsePolicy is applied. If one is not + // specified in pathMatcher.defaultCustomErrorResponsePolicy, the policy + // configured in UrlMap.defaultCustomErrorResponsePolicy takes effect. For + // example, consider a UrlMap with the following configuration: - + // UrlMap.defaultCustomErrorResponsePolicy are configured with policies for 5xx + // and 4xx errors - A PathRule for /coming_soon/ is configured for the error + // code 404. If the request is for www.myotherdomain.com and a 404 is + // encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes + // effect. If a 404 response is encountered for the request + // www.example.com/current_events/, the pathMatcher's policy takes effect. If + // however, the request for www.example.com/coming_soon/ encounters a 404, the + // policy in PathRule.customErrorResponsePolicy takes effect. If any of the + // requests in this example encounter a 500 error code, the policy at + // UrlMap.defaultCustomErrorResponsePolicy takes effect. + // customErrorResponsePolicy is supported only for global external Application + // Load Balancers. + CustomErrorResponsePolicy *CustomErrorResponsePolicy `json:"customErrorResponsePolicy,omitempty"` // Paths: The list of path patterns to match. Each must start with / and the // only place a * is allowed is at the end following a /. The string fed to the // path matcher does not include any text after the first ? or #, and those @@ -31609,22 +31929,22 @@ type PathRule struct { // routeAction must not be set. Not supported when the URL map is bound to a // target gRPC proxy. UrlRedirect *HttpRedirectAction `json:"urlRedirect,omitempty"` - // ForceSendFields is a list of field names (e.g. "Paths") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See + // ForceSendFields is a list of field names (e.g. "CustomErrorResponsePolicy") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Paths") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "CustomErrorResponsePolicy") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *PathRule) MarshalJSON() ([]byte, error) { +func (s PathRule) MarshalJSON() ([]byte, error) { type NoMethod PathRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PerInstanceConfig struct { @@ -31676,9 +31996,9 @@ type PerInstanceConfig struct { NullFields []string `json:"-"` } -func (s *PerInstanceConfig) MarshalJSON() ([]byte, error) { +func (s PerInstanceConfig) MarshalJSON() ([]byte, error) { type NoMethod PerInstanceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -31770,9 +32090,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PreconfiguredWafSet struct { @@ -31791,9 +32111,9 @@ type PreconfiguredWafSet struct { NullFields []string `json:"-"` } -func (s *PreconfiguredWafSet) MarshalJSON() ([]byte, error) { +func (s PreconfiguredWafSet) MarshalJSON() ([]byte, error) { type NoMethod PreconfiguredWafSet - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PreservedState: Preserved state for a given instance. @@ -31822,9 +32142,9 @@ type PreservedState struct { NullFields []string `json:"-"` } -func (s *PreservedState) MarshalJSON() ([]byte, error) { +func (s PreservedState) MarshalJSON() ([]byte, error) { type NoMethod PreservedState - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PreservedStatePreservedDisk struct { @@ -31863,9 +32183,9 @@ type PreservedStatePreservedDisk struct { NullFields []string `json:"-"` } -func (s *PreservedStatePreservedDisk) MarshalJSON() ([]byte, error) { +func (s PreservedStatePreservedDisk) MarshalJSON() ([]byte, error) { type NoMethod PreservedStatePreservedDisk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PreservedStatePreservedNetworkIp struct { @@ -31893,9 +32213,9 @@ type PreservedStatePreservedNetworkIp struct { NullFields []string `json:"-"` } -func (s *PreservedStatePreservedNetworkIp) MarshalJSON() ([]byte, error) { +func (s PreservedStatePreservedNetworkIp) MarshalJSON() ([]byte, error) { type NoMethod PreservedStatePreservedNetworkIp - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PreservedStatePreservedNetworkIpIpAddress struct { @@ -31917,9 +32237,9 @@ type PreservedStatePreservedNetworkIpIpAddress struct { NullFields []string `json:"-"` } -func (s *PreservedStatePreservedNetworkIpIpAddress) MarshalJSON() ([]byte, error) { +func (s PreservedStatePreservedNetworkIpIpAddress) MarshalJSON() ([]byte, error) { type NoMethod PreservedStatePreservedNetworkIpIpAddress - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Project: Represents a Project resource. A project is used to organize @@ -32010,9 +32330,9 @@ type Project struct { NullFields []string `json:"-"` } -func (s *Project) MarshalJSON() ([]byte, error) { +func (s Project) MarshalJSON() ([]byte, error) { type NoMethod Project - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsDisableXpnResourceRequest struct { @@ -32031,9 +32351,9 @@ type ProjectsDisableXpnResourceRequest struct { NullFields []string `json:"-"` } -func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { +func (s ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { type NoMethod ProjectsDisableXpnResourceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsEnableXpnResourceRequest struct { @@ -32052,9 +32372,9 @@ type ProjectsEnableXpnResourceRequest struct { NullFields []string `json:"-"` } -func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { +func (s ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { type NoMethod ProjectsEnableXpnResourceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsGetXpnResources struct { @@ -32086,9 +32406,9 @@ type ProjectsGetXpnResources struct { NullFields []string `json:"-"` } -func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { +func (s ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { type NoMethod ProjectsGetXpnResources - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsListXpnHostsRequest struct { @@ -32109,9 +32429,9 @@ type ProjectsListXpnHostsRequest struct { NullFields []string `json:"-"` } -func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { +func (s ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { type NoMethod ProjectsListXpnHostsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsSetCloudArmorTierRequest struct { @@ -32135,9 +32455,9 @@ type ProjectsSetCloudArmorTierRequest struct { NullFields []string `json:"-"` } -func (s *ProjectsSetCloudArmorTierRequest) MarshalJSON() ([]byte, error) { +func (s ProjectsSetCloudArmorTierRequest) MarshalJSON() ([]byte, error) { type NoMethod ProjectsSetCloudArmorTierRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsSetDefaultNetworkTierRequest struct { @@ -32165,9 +32485,9 @@ type ProjectsSetDefaultNetworkTierRequest struct { NullFields []string `json:"-"` } -func (s *ProjectsSetDefaultNetworkTierRequest) MarshalJSON() ([]byte, error) { +func (s ProjectsSetDefaultNetworkTierRequest) MarshalJSON() ([]byte, error) { type NoMethod ProjectsSetDefaultNetworkTierRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PublicAdvertisedPrefix: A public advertised prefix represents an aggregated @@ -32277,9 +32597,9 @@ type PublicAdvertisedPrefix struct { NullFields []string `json:"-"` } -func (s *PublicAdvertisedPrefix) MarshalJSON() ([]byte, error) { +func (s PublicAdvertisedPrefix) MarshalJSON() ([]byte, error) { type NoMethod PublicAdvertisedPrefix - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PublicAdvertisedPrefixList struct { @@ -32316,9 +32636,9 @@ type PublicAdvertisedPrefixList struct { NullFields []string `json:"-"` } -func (s *PublicAdvertisedPrefixList) MarshalJSON() ([]byte, error) { +func (s PublicAdvertisedPrefixList) MarshalJSON() ([]byte, error) { type NoMethod PublicAdvertisedPrefixList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PublicAdvertisedPrefixListWarning: [Output Only] Informational warning @@ -32402,9 +32722,9 @@ type PublicAdvertisedPrefixListWarning struct { NullFields []string `json:"-"` } -func (s *PublicAdvertisedPrefixListWarning) MarshalJSON() ([]byte, error) { +func (s PublicAdvertisedPrefixListWarning) MarshalJSON() ([]byte, error) { type NoMethod PublicAdvertisedPrefixListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PublicAdvertisedPrefixListWarningData struct { @@ -32431,9 +32751,9 @@ type PublicAdvertisedPrefixListWarningData struct { NullFields []string `json:"-"` } -func (s *PublicAdvertisedPrefixListWarningData) MarshalJSON() ([]byte, error) { +func (s PublicAdvertisedPrefixListWarningData) MarshalJSON() ([]byte, error) { type NoMethod PublicAdvertisedPrefixListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PublicAdvertisedPrefixPublicDelegatedPrefix: Represents a CIDR range which @@ -32465,9 +32785,9 @@ type PublicAdvertisedPrefixPublicDelegatedPrefix struct { NullFields []string `json:"-"` } -func (s *PublicAdvertisedPrefixPublicDelegatedPrefix) MarshalJSON() ([]byte, error) { +func (s PublicAdvertisedPrefixPublicDelegatedPrefix) MarshalJSON() ([]byte, error) { type NoMethod PublicAdvertisedPrefixPublicDelegatedPrefix - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PublicDelegatedPrefix: A PublicDelegatedPrefix resource represents an IP @@ -32578,9 +32898,9 @@ type PublicDelegatedPrefix struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefix) MarshalJSON() ([]byte, error) { +func (s PublicDelegatedPrefix) MarshalJSON() ([]byte, error) { type NoMethod PublicDelegatedPrefix - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PublicDelegatedPrefixAggregatedList struct { @@ -32620,9 +32940,9 @@ type PublicDelegatedPrefixAggregatedList struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixAggregatedList) MarshalJSON() ([]byte, error) { +func (s PublicDelegatedPrefixAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod PublicDelegatedPrefixAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PublicDelegatedPrefixAggregatedListWarning: [Output Only] Informational @@ -32706,9 +33026,9 @@ type PublicDelegatedPrefixAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s PublicDelegatedPrefixAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod PublicDelegatedPrefixAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PublicDelegatedPrefixAggregatedListWarningData struct { @@ -32735,9 +33055,9 @@ type PublicDelegatedPrefixAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s PublicDelegatedPrefixAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod PublicDelegatedPrefixAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PublicDelegatedPrefixList struct { @@ -32774,9 +33094,9 @@ type PublicDelegatedPrefixList struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixList) MarshalJSON() ([]byte, error) { +func (s PublicDelegatedPrefixList) MarshalJSON() ([]byte, error) { type NoMethod PublicDelegatedPrefixList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PublicDelegatedPrefixListWarning: [Output Only] Informational warning @@ -32860,9 +33180,9 @@ type PublicDelegatedPrefixListWarning struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixListWarning) MarshalJSON() ([]byte, error) { +func (s PublicDelegatedPrefixListWarning) MarshalJSON() ([]byte, error) { type NoMethod PublicDelegatedPrefixListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PublicDelegatedPrefixListWarningData struct { @@ -32889,9 +33209,9 @@ type PublicDelegatedPrefixListWarningData struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixListWarningData) MarshalJSON() ([]byte, error) { +func (s PublicDelegatedPrefixListWarningData) MarshalJSON() ([]byte, error) { type NoMethod PublicDelegatedPrefixListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PublicDelegatedPrefixPublicDelegatedSubPrefix: Represents a sub @@ -32944,9 +33264,9 @@ type PublicDelegatedPrefixPublicDelegatedSubPrefix struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixPublicDelegatedSubPrefix) MarshalJSON() ([]byte, error) { +func (s PublicDelegatedPrefixPublicDelegatedSubPrefix) MarshalJSON() ([]byte, error) { type NoMethod PublicDelegatedPrefixPublicDelegatedSubPrefix - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PublicDelegatedPrefixesScopedList struct { @@ -32969,9 +33289,9 @@ type PublicDelegatedPrefixesScopedList struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixesScopedList) MarshalJSON() ([]byte, error) { +func (s PublicDelegatedPrefixesScopedList) MarshalJSON() ([]byte, error) { type NoMethod PublicDelegatedPrefixesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PublicDelegatedPrefixesScopedListWarning: [Output Only] Informational @@ -33056,9 +33376,9 @@ type PublicDelegatedPrefixesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s PublicDelegatedPrefixesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod PublicDelegatedPrefixesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PublicDelegatedPrefixesScopedListWarningData struct { @@ -33085,9 +33405,9 @@ type PublicDelegatedPrefixesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s PublicDelegatedPrefixesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod PublicDelegatedPrefixesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Quota: A quotas entry. @@ -33280,9 +33600,9 @@ type Quota struct { NullFields []string `json:"-"` } -func (s *Quota) MarshalJSON() ([]byte, error) { +func (s Quota) MarshalJSON() ([]byte, error) { type NoMethod Quota - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Quota) UnmarshalJSON(data []byte) error { @@ -33337,9 +33657,9 @@ type QuotaExceededInfo struct { NullFields []string `json:"-"` } -func (s *QuotaExceededInfo) MarshalJSON() ([]byte, error) { +func (s QuotaExceededInfo) MarshalJSON() ([]byte, error) { type NoMethod QuotaExceededInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *QuotaExceededInfo) UnmarshalJSON(data []byte) error { @@ -33383,9 +33703,9 @@ type Reference struct { NullFields []string `json:"-"` } -func (s *Reference) MarshalJSON() ([]byte, error) { +func (s Reference) MarshalJSON() ([]byte, error) { type NoMethod Reference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Region: Represents a Region resource. A region is a geographical area where @@ -33440,9 +33760,9 @@ type Region struct { NullFields []string `json:"-"` } -func (s *Region) MarshalJSON() ([]byte, error) { +func (s Region) MarshalJSON() ([]byte, error) { type NoMethod Region - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionQuotaStatusWarning: [Output Only] Warning of fetching the `quotas` @@ -33527,9 +33847,9 @@ type RegionQuotaStatusWarning struct { NullFields []string `json:"-"` } -func (s *RegionQuotaStatusWarning) MarshalJSON() ([]byte, error) { +func (s RegionQuotaStatusWarning) MarshalJSON() ([]byte, error) { type NoMethod RegionQuotaStatusWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionQuotaStatusWarningData struct { @@ -33556,9 +33876,9 @@ type RegionQuotaStatusWarningData struct { NullFields []string `json:"-"` } -func (s *RegionQuotaStatusWarningData) MarshalJSON() ([]byte, error) { +func (s RegionQuotaStatusWarningData) MarshalJSON() ([]byte, error) { type NoMethod RegionQuotaStatusWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionAddressesMoveRequest struct { @@ -33586,9 +33906,9 @@ type RegionAddressesMoveRequest struct { NullFields []string `json:"-"` } -func (s *RegionAddressesMoveRequest) MarshalJSON() ([]byte, error) { +func (s RegionAddressesMoveRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionAddressesMoveRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionAutoscalerList: Contains a list of autoscalers. @@ -33625,9 +33945,9 @@ type RegionAutoscalerList struct { NullFields []string `json:"-"` } -func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { +func (s RegionAutoscalerList) MarshalJSON() ([]byte, error) { type NoMethod RegionAutoscalerList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionAutoscalerListWarning: [Output Only] Informational warning message. @@ -33710,9 +34030,9 @@ type RegionAutoscalerListWarning struct { NullFields []string `json:"-"` } -func (s *RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { +func (s RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { type NoMethod RegionAutoscalerListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionAutoscalerListWarningData struct { @@ -33739,9 +34059,9 @@ type RegionAutoscalerListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { +func (s RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { type NoMethod RegionAutoscalerListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionDiskTypeList struct { @@ -33778,9 +34098,9 @@ type RegionDiskTypeList struct { NullFields []string `json:"-"` } -func (s *RegionDiskTypeList) MarshalJSON() ([]byte, error) { +func (s RegionDiskTypeList) MarshalJSON() ([]byte, error) { type NoMethod RegionDiskTypeList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionDiskTypeListWarning: [Output Only] Informational warning message. @@ -33863,9 +34183,9 @@ type RegionDiskTypeListWarning struct { NullFields []string `json:"-"` } -func (s *RegionDiskTypeListWarning) MarshalJSON() ([]byte, error) { +func (s RegionDiskTypeListWarning) MarshalJSON() ([]byte, error) { type NoMethod RegionDiskTypeListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionDiskTypeListWarningData struct { @@ -33892,9 +34212,9 @@ type RegionDiskTypeListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionDiskTypeListWarningData) MarshalJSON() ([]byte, error) { +func (s RegionDiskTypeListWarningData) MarshalJSON() ([]byte, error) { type NoMethod RegionDiskTypeListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionDisksAddResourcePoliciesRequest struct { @@ -33913,9 +34233,9 @@ type RegionDisksAddResourcePoliciesRequest struct { NullFields []string `json:"-"` } -func (s *RegionDisksAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { +func (s RegionDisksAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionDisksAddResourcePoliciesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionDisksRemoveResourcePoliciesRequest struct { @@ -33934,9 +34254,9 @@ type RegionDisksRemoveResourcePoliciesRequest struct { NullFields []string `json:"-"` } -func (s *RegionDisksRemoveResourcePoliciesRequest) MarshalJSON() ([]byte, error) { +func (s RegionDisksRemoveResourcePoliciesRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionDisksRemoveResourcePoliciesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionDisksResizeRequest struct { @@ -33956,9 +34276,9 @@ type RegionDisksResizeRequest struct { NullFields []string `json:"-"` } -func (s *RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { +func (s RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionDisksResizeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionDisksStartAsyncReplicationRequest struct { @@ -33985,9 +34305,9 @@ type RegionDisksStartAsyncReplicationRequest struct { NullFields []string `json:"-"` } -func (s *RegionDisksStartAsyncReplicationRequest) MarshalJSON() ([]byte, error) { +func (s RegionDisksStartAsyncReplicationRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionDisksStartAsyncReplicationRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionInstanceGroupList: Contains a list of InstanceGroup resources. @@ -34024,9 +34344,9 @@ type RegionInstanceGroupList struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupList) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionInstanceGroupListWarning: [Output Only] Informational warning message. @@ -34109,9 +34429,9 @@ type RegionInstanceGroupListWarning struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupListWarningData struct { @@ -34138,9 +34458,9 @@ type RegionInstanceGroupListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionInstanceGroupManagerDeleteInstanceConfigReq: @@ -34162,9 +34482,9 @@ type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagerDeleteInstanceConfigReq - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionInstanceGroupManagerList: Contains a list of managed instance groups. @@ -34203,9 +34523,9 @@ type RegionInstanceGroupManagerList struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagerList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionInstanceGroupManagerListWarning: [Output Only] Informational warning @@ -34289,9 +34609,9 @@ type RegionInstanceGroupManagerListWarning struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagerListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupManagerListWarningData struct { @@ -34318,9 +34638,9 @@ type RegionInstanceGroupManagerListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagerListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionInstanceGroupManagerPatchInstanceConfigReq: @@ -34342,9 +34662,9 @@ type RegionInstanceGroupManagerPatchInstanceConfigReq struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerPatchInstanceConfigReq) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagerPatchInstanceConfigReq) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagerPatchInstanceConfigReq - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionInstanceGroupManagerUpdateInstanceConfigReq: @@ -34366,9 +34686,9 @@ type RegionInstanceGroupManagerUpdateInstanceConfigReq struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerUpdateInstanceConfigReq) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagerUpdateInstanceConfigReq) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagerUpdateInstanceConfigReq - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupManagersAbandonInstancesRequest struct { @@ -34388,9 +34708,9 @@ type RegionInstanceGroupManagersAbandonInstancesRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersAbandonInstancesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionInstanceGroupManagersApplyUpdatesRequest: @@ -34447,9 +34767,9 @@ type RegionInstanceGroupManagersApplyUpdatesRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersApplyUpdatesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionInstanceGroupManagersCreateInstancesRequest: @@ -34470,9 +34790,9 @@ type RegionInstanceGroupManagersCreateInstancesRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersCreateInstancesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupManagersDeleteInstancesRequest struct { @@ -34500,9 +34820,9 @@ type RegionInstanceGroupManagersDeleteInstancesRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersDeleteInstancesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupManagersListErrorsResponse struct { @@ -34530,9 +34850,9 @@ type RegionInstanceGroupManagersListErrorsResponse struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersListErrorsResponse) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersListErrorsResponse) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersListErrorsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupManagersListInstanceConfigsResp struct { @@ -34562,9 +34882,9 @@ type RegionInstanceGroupManagersListInstanceConfigsResp struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersListInstanceConfigsResp) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersListInstanceConfigsResp) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersListInstanceConfigsResp - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionInstanceGroupManagersListInstanceConfigsRespWarning: [Output Only] @@ -34648,9 +34968,9 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersListInstanceConfigsRespWarning) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersListInstanceConfigsRespWarning) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersListInstanceConfigsRespWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupManagersListInstanceConfigsRespWarningData struct { @@ -34677,9 +34997,9 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarningData struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersListInstanceConfigsRespWarningData) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersListInstanceConfigsRespWarningData) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersListInstanceConfigsRespWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupManagersListInstancesResponse struct { @@ -34707,9 +35027,9 @@ type RegionInstanceGroupManagersListInstancesResponse struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersListInstancesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupManagersRecreateRequest struct { @@ -34729,9 +35049,9 @@ type RegionInstanceGroupManagersRecreateRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersRecreateRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupManagersSetTargetPoolsRequest struct { @@ -34756,9 +35076,9 @@ type RegionInstanceGroupManagersSetTargetPoolsRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersSetTargetPoolsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupManagersSetTemplateRequest struct { @@ -34778,9 +35098,9 @@ type RegionInstanceGroupManagersSetTemplateRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupManagersSetTemplateRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupsListInstances struct { @@ -34816,9 +35136,9 @@ type RegionInstanceGroupsListInstances struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupsListInstances - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionInstanceGroupsListInstancesWarning: [Output Only] Informational @@ -34902,9 +35222,9 @@ type RegionInstanceGroupsListInstancesWarning struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupsListInstancesWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupsListInstancesWarningData struct { @@ -34931,9 +35251,9 @@ type RegionInstanceGroupsListInstancesWarningData struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupsListInstancesWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupsListInstancesRequest struct { @@ -34962,9 +35282,9 @@ type RegionInstanceGroupsListInstancesRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupsListInstancesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionInstanceGroupsSetNamedPortsRequest struct { @@ -34990,9 +35310,9 @@ type RegionInstanceGroupsSetNamedPortsRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { +func (s RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionInstanceGroupsSetNamedPortsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionList: Contains a list of region resources. @@ -35030,9 +35350,9 @@ type RegionList struct { NullFields []string `json:"-"` } -func (s *RegionList) MarshalJSON() ([]byte, error) { +func (s RegionList) MarshalJSON() ([]byte, error) { type NoMethod RegionList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RegionListWarning: [Output Only] Informational warning message. @@ -35115,9 +35435,9 @@ type RegionListWarning struct { NullFields []string `json:"-"` } -func (s *RegionListWarning) MarshalJSON() ([]byte, error) { +func (s RegionListWarning) MarshalJSON() ([]byte, error) { type NoMethod RegionListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionListWarningData struct { @@ -35144,9 +35464,9 @@ type RegionListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { +func (s RegionListWarningData) MarshalJSON() ([]byte, error) { type NoMethod RegionListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionNetworkEndpointGroupsAttachEndpointsRequest struct { @@ -35165,9 +35485,9 @@ type RegionNetworkEndpointGroupsAttachEndpointsRequest struct { NullFields []string `json:"-"` } -func (s *RegionNetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { +func (s RegionNetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionNetworkEndpointGroupsAttachEndpointsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionNetworkEndpointGroupsDetachEndpointsRequest struct { @@ -35186,9 +35506,9 @@ type RegionNetworkEndpointGroupsDetachEndpointsRequest struct { NullFields []string `json:"-"` } -func (s *RegionNetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { +func (s RegionNetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionNetworkEndpointGroupsDetachEndpointsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse struct { @@ -35212,9 +35532,9 @@ type RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse struct { NullFields []string `json:"-"` } -func (s *RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { +func (s RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { type NoMethod RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { @@ -35246,9 +35566,9 @@ type RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewall NullFields []string `json:"-"` } -func (s *RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy) MarshalJSON() ([]byte, error) { +func (s RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy) MarshalJSON() ([]byte, error) { type NoMethod RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionSetLabelsRequest struct { @@ -35274,9 +35594,9 @@ type RegionSetLabelsRequest struct { NullFields []string `json:"-"` } -func (s *RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { +func (s RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionSetLabelsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionSetPolicyRequest struct { @@ -35304,9 +35624,9 @@ type RegionSetPolicyRequest struct { NullFields []string `json:"-"` } -func (s *RegionSetPolicyRequest) MarshalJSON() ([]byte, error) { +func (s RegionSetPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionSetPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionTargetHttpsProxiesSetSslCertificatesRequest struct { @@ -35326,9 +35646,9 @@ type RegionTargetHttpsProxiesSetSslCertificatesRequest struct { NullFields []string `json:"-"` } -func (s *RegionTargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { +func (s RegionTargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionTargetHttpsProxiesSetSslCertificatesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RegionUrlMapsValidateRequest struct { @@ -35347,9 +35667,9 @@ type RegionUrlMapsValidateRequest struct { NullFields []string `json:"-"` } -func (s *RegionUrlMapsValidateRequest) MarshalJSON() ([]byte, error) { +func (s RegionUrlMapsValidateRequest) MarshalJSON() ([]byte, error) { type NoMethod RegionUrlMapsValidateRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RequestMirrorPolicy: A policy that specifies how requests intended for the @@ -35377,9 +35697,9 @@ type RequestMirrorPolicy struct { NullFields []string `json:"-"` } -func (s *RequestMirrorPolicy) MarshalJSON() ([]byte, error) { +func (s RequestMirrorPolicy) MarshalJSON() ([]byte, error) { type NoMethod RequestMirrorPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Reservation: Represents a reservation resource. A reservation ensures that @@ -35462,9 +35782,9 @@ type Reservation struct { NullFields []string `json:"-"` } -func (s *Reservation) MarshalJSON() ([]byte, error) { +func (s Reservation) MarshalJSON() ([]byte, error) { type NoMethod Reservation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReservationAffinity: Specifies the reservations that this instance can @@ -35504,9 +35824,9 @@ type ReservationAffinity struct { NullFields []string `json:"-"` } -func (s *ReservationAffinity) MarshalJSON() ([]byte, error) { +func (s ReservationAffinity) MarshalJSON() ([]byte, error) { type NoMethod ReservationAffinity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReservationAggregatedList: Contains a list of reservations. @@ -35545,9 +35865,9 @@ type ReservationAggregatedList struct { NullFields []string `json:"-"` } -func (s *ReservationAggregatedList) MarshalJSON() ([]byte, error) { +func (s ReservationAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod ReservationAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReservationAggregatedListWarning: [Output Only] Informational warning @@ -35631,9 +35951,9 @@ type ReservationAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *ReservationAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s ReservationAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod ReservationAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ReservationAggregatedListWarningData struct { @@ -35660,9 +35980,9 @@ type ReservationAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *ReservationAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s ReservationAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ReservationAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ReservationList struct { @@ -35700,9 +36020,9 @@ type ReservationList struct { NullFields []string `json:"-"` } -func (s *ReservationList) MarshalJSON() ([]byte, error) { +func (s ReservationList) MarshalJSON() ([]byte, error) { type NoMethod ReservationList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReservationListWarning: [Output Only] Informational warning message. @@ -35785,9 +36105,9 @@ type ReservationListWarning struct { NullFields []string `json:"-"` } -func (s *ReservationListWarning) MarshalJSON() ([]byte, error) { +func (s ReservationListWarning) MarshalJSON() ([]byte, error) { type NoMethod ReservationListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ReservationListWarningData struct { @@ -35814,9 +36134,9 @@ type ReservationListWarningData struct { NullFields []string `json:"-"` } -func (s *ReservationListWarningData) MarshalJSON() ([]byte, error) { +func (s ReservationListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ReservationListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ReservationsResizeRequest struct { @@ -35836,9 +36156,9 @@ type ReservationsResizeRequest struct { NullFields []string `json:"-"` } -func (s *ReservationsResizeRequest) MarshalJSON() ([]byte, error) { +func (s ReservationsResizeRequest) MarshalJSON() ([]byte, error) { type NoMethod ReservationsResizeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ReservationsScopedList struct { @@ -35860,9 +36180,9 @@ type ReservationsScopedList struct { NullFields []string `json:"-"` } -func (s *ReservationsScopedList) MarshalJSON() ([]byte, error) { +func (s ReservationsScopedList) MarshalJSON() ([]byte, error) { type NoMethod ReservationsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReservationsScopedListWarning: Informational warning which replaces the list @@ -35946,9 +36266,9 @@ type ReservationsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *ReservationsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s ReservationsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod ReservationsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ReservationsScopedListWarningData struct { @@ -35975,9 +36295,9 @@ type ReservationsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *ReservationsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s ReservationsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ReservationsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourceCommitment: Commitment for a particular resource (a Commitment is @@ -36014,9 +36334,9 @@ type ResourceCommitment struct { NullFields []string `json:"-"` } -func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { +func (s ResourceCommitment) MarshalJSON() ([]byte, error) { type NoMethod ResourceCommitment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResourceGroupReference struct { @@ -36036,9 +36356,9 @@ type ResourceGroupReference struct { NullFields []string `json:"-"` } -func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { +func (s ResourceGroupReference) MarshalJSON() ([]byte, error) { type NoMethod ResourceGroupReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResourcePoliciesScopedList struct { @@ -36060,9 +36380,9 @@ type ResourcePoliciesScopedList struct { NullFields []string `json:"-"` } -func (s *ResourcePoliciesScopedList) MarshalJSON() ([]byte, error) { +func (s ResourcePoliciesScopedList) MarshalJSON() ([]byte, error) { type NoMethod ResourcePoliciesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePoliciesScopedListWarning: Informational warning which replaces the @@ -36146,9 +36466,9 @@ type ResourcePoliciesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *ResourcePoliciesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s ResourcePoliciesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod ResourcePoliciesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResourcePoliciesScopedListWarningData struct { @@ -36175,9 +36495,9 @@ type ResourcePoliciesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *ResourcePoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s ResourcePoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ResourcePoliciesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicy: Represents a Resource Policy resource. You can use resource @@ -36242,9 +36562,9 @@ type ResourcePolicy struct { NullFields []string `json:"-"` } -func (s *ResourcePolicy) MarshalJSON() ([]byte, error) { +func (s ResourcePolicy) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicyAggregatedList: Contains a list of resourcePolicies. @@ -36284,9 +36604,9 @@ type ResourcePolicyAggregatedList struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyAggregatedList) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicyAggregatedListWarning: [Output Only] Informational warning @@ -36370,9 +36690,9 @@ type ResourcePolicyAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResourcePolicyAggregatedListWarningData struct { @@ -36399,9 +36719,9 @@ type ResourcePolicyAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicyDailyCycle: Time window specified for daily operations. @@ -36429,9 +36749,9 @@ type ResourcePolicyDailyCycle struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyDailyCycle) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyDailyCycle) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyDailyCycle - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicyDiskConsistencyGroupPolicy: Resource policy for disk @@ -36469,9 +36789,9 @@ type ResourcePolicyGroupPlacementPolicy struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyGroupPlacementPolicy) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyGroupPlacementPolicy) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyGroupPlacementPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicyHourlyCycle: Time window specified for hourly operations. @@ -36498,9 +36818,9 @@ type ResourcePolicyHourlyCycle struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyHourlyCycle) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyHourlyCycle) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyHourlyCycle - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicyInstanceSchedulePolicy: An InstanceSchedulePolicy specifies @@ -36533,9 +36853,9 @@ type ResourcePolicyInstanceSchedulePolicy struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyInstanceSchedulePolicy) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyInstanceSchedulePolicy) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyInstanceSchedulePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicyInstanceSchedulePolicySchedule: Schedule for an instance @@ -36557,9 +36877,9 @@ type ResourcePolicyInstanceSchedulePolicySchedule struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyInstanceSchedulePolicySchedule) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyInstanceSchedulePolicySchedule) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyInstanceSchedulePolicySchedule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResourcePolicyList struct { @@ -36598,9 +36918,9 @@ type ResourcePolicyList struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyList) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyList) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicyListWarning: [Output Only] Informational warning message. @@ -36683,9 +37003,9 @@ type ResourcePolicyListWarning struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyListWarning) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyListWarning) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResourcePolicyListWarningData struct { @@ -36712,9 +37032,9 @@ type ResourcePolicyListWarningData struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyListWarningData) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicyResourceStatus: Contains output only fields. Use this @@ -36739,9 +37059,9 @@ type ResourcePolicyResourceStatus struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyResourceStatus) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyResourceStatus) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyResourceStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResourcePolicyResourceStatusInstanceSchedulePolicyStatus struct { @@ -36765,9 +37085,9 @@ type ResourcePolicyResourceStatusInstanceSchedulePolicyStatus struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyResourceStatusInstanceSchedulePolicyStatus) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyResourceStatusInstanceSchedulePolicyStatus) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyResourceStatusInstanceSchedulePolicyStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicySnapshotSchedulePolicy: A snapshot schedule policy specifies @@ -36798,9 +37118,9 @@ type ResourcePolicySnapshotSchedulePolicy struct { NullFields []string `json:"-"` } -func (s *ResourcePolicySnapshotSchedulePolicy) MarshalJSON() ([]byte, error) { +func (s ResourcePolicySnapshotSchedulePolicy) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicySnapshotSchedulePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicySnapshotSchedulePolicyRetentionPolicy: Policy for retention of @@ -36829,9 +37149,9 @@ type ResourcePolicySnapshotSchedulePolicyRetentionPolicy struct { NullFields []string `json:"-"` } -func (s *ResourcePolicySnapshotSchedulePolicyRetentionPolicy) MarshalJSON() ([]byte, error) { +func (s ResourcePolicySnapshotSchedulePolicyRetentionPolicy) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicySnapshotSchedulePolicyRetentionPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicySnapshotSchedulePolicySchedule: A schedule for disks where the @@ -36853,9 +37173,9 @@ type ResourcePolicySnapshotSchedulePolicySchedule struct { NullFields []string `json:"-"` } -func (s *ResourcePolicySnapshotSchedulePolicySchedule) MarshalJSON() ([]byte, error) { +func (s ResourcePolicySnapshotSchedulePolicySchedule) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicySnapshotSchedulePolicySchedule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicySnapshotSchedulePolicySnapshotProperties: Specified snapshot @@ -36884,9 +37204,9 @@ type ResourcePolicySnapshotSchedulePolicySnapshotProperties struct { NullFields []string `json:"-"` } -func (s *ResourcePolicySnapshotSchedulePolicySnapshotProperties) MarshalJSON() ([]byte, error) { +func (s ResourcePolicySnapshotSchedulePolicySnapshotProperties) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicySnapshotSchedulePolicySnapshotProperties - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourcePolicyWeeklyCycle: Time window specified for weekly operations. @@ -36906,9 +37226,9 @@ type ResourcePolicyWeeklyCycle struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyWeeklyCycle) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyWeeklyCycle) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyWeeklyCycle - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResourcePolicyWeeklyCycleDayOfWeek struct { @@ -36945,9 +37265,9 @@ type ResourcePolicyWeeklyCycleDayOfWeek struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyWeeklyCycleDayOfWeek) MarshalJSON() ([]byte, error) { +func (s ResourcePolicyWeeklyCycleDayOfWeek) MarshalJSON() ([]byte, error) { type NoMethod ResourcePolicyWeeklyCycleDayOfWeek - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourceStatus: Contains output only fields. Use this sub-message for actual @@ -36956,8 +37276,9 @@ func (s *ResourcePolicyWeeklyCycleDayOfWeek) MarshalJSON() ([]byte, error) { type ResourceStatus struct { // PhysicalHost: [Output Only] An opaque ID of the host on which the VM is // running. - PhysicalHost string `json:"physicalHost,omitempty"` - UpcomingMaintenance *UpcomingMaintenance `json:"upcomingMaintenance,omitempty"` + PhysicalHost string `json:"physicalHost,omitempty"` + Scheduling *ResourceStatusScheduling `json:"scheduling,omitempty"` + UpcomingMaintenance *UpcomingMaintenance `json:"upcomingMaintenance,omitempty"` // ForceSendFields is a list of field names (e.g. "PhysicalHost") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -36971,9 +37292,32 @@ type ResourceStatus struct { NullFields []string `json:"-"` } -func (s *ResourceStatus) MarshalJSON() ([]byte, error) { +func (s ResourceStatus) MarshalJSON() ([]byte, error) { type NoMethod ResourceStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type ResourceStatusScheduling struct { + // AvailabilityDomain: Specifies the availability domain to place the instance + // in. The value must be a number between 1 and the number of availability + // domains specified in the spread placement policy attached to the instance. + AvailabilityDomain int64 `json:"availabilityDomain,omitempty"` + // ForceSendFields is a list of field names (e.g. "AvailabilityDomain") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AvailabilityDomain") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ResourceStatusScheduling) MarshalJSON() ([]byte, error) { + type NoMethod ResourceStatusScheduling + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Route: Represents a Route resource. A route defines a path from VM instances @@ -37018,10 +37362,15 @@ type Route struct { // NextHopIlb: The URL to a forwarding rule of type // loadBalancingScheme=INTERNAL that should handle matching packets or the IP // address of the forwarding Rule. For example, the following are all valid - // URLs: - 10.128.0.56 - + // URLs: - // https://www.googleapis.com/compute/v1/projects/project/regions/region // /forwardingRules/forwardingRule - - // regions/region/forwardingRules/forwardingRule + // regions/region/forwardingRules/forwardingRule If an IP address is provided, + // must specify an IPv4 address in dot-decimal notation or an IPv6 address in + // RFC 4291 format. For example, the following are all valid IP addresses: - + // 10.128.0.56 - 2001:db8::2d9:51:0:0 - 2001:db8:0:0:2d9:51:0:0 IPv6 addresses + // will be displayed using RFC 5952 compressed format (e.g. + // 2001:db8::2d9:51:0:0). Should never be an IPv4-mapped IPv6 address. NextHopIlb string `json:"nextHopIlb,omitempty"` // NextHopInstance: The URL to an instance that should handle matching packets. // You can specify this as a full or partial URL. For example: @@ -37097,9 +37446,9 @@ type Route struct { NullFields []string `json:"-"` } -func (s *Route) MarshalJSON() ([]byte, error) { +func (s Route) MarshalJSON() ([]byte, error) { type NoMethod Route - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouteWarnings struct { @@ -37181,9 +37530,9 @@ type RouteWarnings struct { NullFields []string `json:"-"` } -func (s *RouteWarnings) MarshalJSON() ([]byte, error) { +func (s RouteWarnings) MarshalJSON() ([]byte, error) { type NoMethod RouteWarnings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouteWarningsData struct { @@ -37210,9 +37559,9 @@ type RouteWarningsData struct { NullFields []string `json:"-"` } -func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { +func (s RouteWarningsData) MarshalJSON() ([]byte, error) { type NoMethod RouteWarningsData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouteAsPath struct { @@ -37245,9 +37594,9 @@ type RouteAsPath struct { NullFields []string `json:"-"` } -func (s *RouteAsPath) MarshalJSON() ([]byte, error) { +func (s RouteAsPath) MarshalJSON() ([]byte, error) { type NoMethod RouteAsPath - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouteList: Contains a list of Route resources. @@ -37284,9 +37633,9 @@ type RouteList struct { NullFields []string `json:"-"` } -func (s *RouteList) MarshalJSON() ([]byte, error) { +func (s RouteList) MarshalJSON() ([]byte, error) { type NoMethod RouteList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouteListWarning: [Output Only] Informational warning message. @@ -37369,9 +37718,9 @@ type RouteListWarning struct { NullFields []string `json:"-"` } -func (s *RouteListWarning) MarshalJSON() ([]byte, error) { +func (s RouteListWarning) MarshalJSON() ([]byte, error) { type NoMethod RouteListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouteListWarningData struct { @@ -37398,9 +37747,9 @@ type RouteListWarningData struct { NullFields []string `json:"-"` } -func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { +func (s RouteListWarningData) MarshalJSON() ([]byte, error) { type NoMethod RouteListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Router: Represents a Cloud Router resource. For more information about Cloud @@ -37467,9 +37816,9 @@ type Router struct { NullFields []string `json:"-"` } -func (s *Router) MarshalJSON() ([]byte, error) { +func (s Router) MarshalJSON() ([]byte, error) { type NoMethod Router - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouterAdvertisedIpRange: Description-tagged IP ranges for the router to @@ -37492,9 +37841,9 @@ type RouterAdvertisedIpRange struct { NullFields []string `json:"-"` } -func (s *RouterAdvertisedIpRange) MarshalJSON() ([]byte, error) { +func (s RouterAdvertisedIpRange) MarshalJSON() ([]byte, error) { type NoMethod RouterAdvertisedIpRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouterAggregatedList: Contains a list of routers. @@ -37533,9 +37882,9 @@ type RouterAggregatedList struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { +func (s RouterAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod RouterAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouterAggregatedListWarning: [Output Only] Informational warning message. @@ -37618,9 +37967,9 @@ type RouterAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod RouterAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterAggregatedListWarningData struct { @@ -37647,9 +37996,9 @@ type RouterAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod RouterAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterBgp struct { @@ -37709,9 +38058,9 @@ type RouterBgp struct { NullFields []string `json:"-"` } -func (s *RouterBgp) MarshalJSON() ([]byte, error) { +func (s RouterBgp) MarshalJSON() ([]byte, error) { type NoMethod RouterBgp - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterBgpPeer struct { @@ -37851,9 +38200,9 @@ type RouterBgpPeer struct { NullFields []string `json:"-"` } -func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { +func (s RouterBgpPeer) MarshalJSON() ([]byte, error) { type NoMethod RouterBgpPeer - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterBgpPeerBfd struct { @@ -37897,9 +38246,9 @@ type RouterBgpPeerBfd struct { NullFields []string `json:"-"` } -func (s *RouterBgpPeerBfd) MarshalJSON() ([]byte, error) { +func (s RouterBgpPeerBfd) MarshalJSON() ([]byte, error) { type NoMethod RouterBgpPeerBfd - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterBgpPeerCustomLearnedIpRange struct { @@ -37921,9 +38270,9 @@ type RouterBgpPeerCustomLearnedIpRange struct { NullFields []string `json:"-"` } -func (s *RouterBgpPeerCustomLearnedIpRange) MarshalJSON() ([]byte, error) { +func (s RouterBgpPeerCustomLearnedIpRange) MarshalJSON() ([]byte, error) { type NoMethod RouterBgpPeerCustomLearnedIpRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterInterface struct { @@ -38011,9 +38360,9 @@ type RouterInterface struct { NullFields []string `json:"-"` } -func (s *RouterInterface) MarshalJSON() ([]byte, error) { +func (s RouterInterface) MarshalJSON() ([]byte, error) { type NoMethod RouterInterface - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouterList: Contains a list of Router resources. @@ -38050,9 +38399,9 @@ type RouterList struct { NullFields []string `json:"-"` } -func (s *RouterList) MarshalJSON() ([]byte, error) { +func (s RouterList) MarshalJSON() ([]byte, error) { type NoMethod RouterList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouterListWarning: [Output Only] Informational warning message. @@ -38135,9 +38484,9 @@ type RouterListWarning struct { NullFields []string `json:"-"` } -func (s *RouterListWarning) MarshalJSON() ([]byte, error) { +func (s RouterListWarning) MarshalJSON() ([]byte, error) { type NoMethod RouterListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterListWarningData struct { @@ -38164,9 +38513,9 @@ type RouterListWarningData struct { NullFields []string `json:"-"` } -func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { +func (s RouterListWarningData) MarshalJSON() ([]byte, error) { type NoMethod RouterListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterMd5AuthenticationKey struct { @@ -38191,9 +38540,9 @@ type RouterMd5AuthenticationKey struct { NullFields []string `json:"-"` } -func (s *RouterMd5AuthenticationKey) MarshalJSON() ([]byte, error) { +func (s RouterMd5AuthenticationKey) MarshalJSON() ([]byte, error) { type NoMethod RouterMd5AuthenticationKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouterNat: Represents a Nat resource. It enables the VMs within the @@ -38332,9 +38681,9 @@ type RouterNat struct { NullFields []string `json:"-"` } -func (s *RouterNat) MarshalJSON() ([]byte, error) { +func (s RouterNat) MarshalJSON() ([]byte, error) { type NoMethod RouterNat - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouterNatLogConfig: Configuration of logging on a NAT. @@ -38366,9 +38715,9 @@ type RouterNatLogConfig struct { NullFields []string `json:"-"` } -func (s *RouterNatLogConfig) MarshalJSON() ([]byte, error) { +func (s RouterNatLogConfig) MarshalJSON() ([]byte, error) { type NoMethod RouterNatLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterNatRule struct { @@ -38379,12 +38728,12 @@ type RouterNatRule struct { // Match: CEL expression that specifies the match condition that egress traffic // from a VM is evaluated against. If it evaluates to true, the corresponding // `action` is enforced. The following examples are valid match expressions for - // public NAT: "inIpRange(destination.ip, '1.1.0.0/16') || - // inIpRange(destination.ip, '2.2.0.0/16')" "destination.ip == '1.1.0.1' || - // destination.ip == '8.8.8.8'" The following example is a valid match - // expression for private NAT: "nexthop.hub == + // public NAT: `inIpRange(destination.ip, '1.1.0.0/16') || + // inIpRange(destination.ip, '2.2.0.0/16')` `destination.ip == '1.1.0.1' || + // destination.ip == '8.8.8.8'` The following example is a valid match + // expression for private NAT: `nexthop.hub == // '//networkconnectivity.googleapis.com/projects/my-project/locations/global/hu - // bs/hub-1'" + // bs/hub-1'` Match string `json:"match,omitempty"` // RuleNumber: An integer uniquely identifying a rule in the list. The rule // number must be a positive value between 0 and 65000, and must be unique @@ -38403,9 +38752,9 @@ type RouterNatRule struct { NullFields []string `json:"-"` } -func (s *RouterNatRule) MarshalJSON() ([]byte, error) { +func (s RouterNatRule) MarshalJSON() ([]byte, error) { type NoMethod RouterNatRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterNatRuleAction struct { @@ -38440,9 +38789,9 @@ type RouterNatRuleAction struct { NullFields []string `json:"-"` } -func (s *RouterNatRuleAction) MarshalJSON() ([]byte, error) { +func (s RouterNatRuleAction) MarshalJSON() ([]byte, error) { type NoMethod RouterNatRuleAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouterNatSubnetworkToNat: Defines the IP ranges that want to use NAT for a @@ -38481,15 +38830,23 @@ type RouterNatSubnetworkToNat struct { NullFields []string `json:"-"` } -func (s *RouterNatSubnetworkToNat) MarshalJSON() ([]byte, error) { +func (s RouterNatSubnetworkToNat) MarshalJSON() ([]byte, error) { type NoMethod RouterNatSubnetworkToNat - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterStatus struct { - // BestRoutes: Best routes for this router's network. + // BestRoutes: A list of the best dynamic routes for this Cloud Router's + // Virtual Private Cloud (VPC) network in the same region as this Cloud Router. + // Lists all of the best routes per prefix that are programmed into this + // region's VPC data plane. When global dynamic routing mode is turned on in + // the VPC network, this list can include cross-region dynamic routes from + // Cloud Routers in other regions. BestRoutes []*Route `json:"bestRoutes,omitempty"` - // BestRoutesForRouter: Best routes learned by this router. + // BestRoutesForRouter: A list of the best BGP routes learned by this Cloud + // Router. It is possible that routes listed might not be programmed into the + // data plane, if the Google Cloud control plane finds a more optimal route for + // a prefix than a route learned by this Cloud Router. BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` NatStatus []*RouterStatusNatStatus `json:"natStatus,omitempty"` @@ -38508,9 +38865,9 @@ type RouterStatus struct { NullFields []string `json:"-"` } -func (s *RouterStatus) MarshalJSON() ([]byte, error) { +func (s RouterStatus) MarshalJSON() ([]byte, error) { type NoMethod RouterStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterStatusBgpPeerStatus struct { @@ -38589,9 +38946,9 @@ type RouterStatusBgpPeerStatus struct { NullFields []string `json:"-"` } -func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { +func (s RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { type NoMethod RouterStatusBgpPeerStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouterStatusNatStatus: Status of a NAT contained in this router. @@ -38636,9 +38993,9 @@ type RouterStatusNatStatus struct { NullFields []string `json:"-"` } -func (s *RouterStatusNatStatus) MarshalJSON() ([]byte, error) { +func (s RouterStatusNatStatus) MarshalJSON() ([]byte, error) { type NoMethod RouterStatusNatStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RouterStatusNatStatusNatRuleStatus: Status of a NAT Rule contained in this @@ -38672,9 +39029,9 @@ type RouterStatusNatStatusNatRuleStatus struct { NullFields []string `json:"-"` } -func (s *RouterStatusNatStatusNatRuleStatus) MarshalJSON() ([]byte, error) { +func (s RouterStatusNatStatusNatRuleStatus) MarshalJSON() ([]byte, error) { type NoMethod RouterStatusNatStatusNatRuleStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RouterStatusResponse struct { @@ -38697,9 +39054,9 @@ type RouterStatusResponse struct { NullFields []string `json:"-"` } -func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { +func (s RouterStatusResponse) MarshalJSON() ([]byte, error) { type NoMethod RouterStatusResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RoutersPreviewResponse struct { @@ -38721,9 +39078,9 @@ type RoutersPreviewResponse struct { NullFields []string `json:"-"` } -func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { +func (s RoutersPreviewResponse) MarshalJSON() ([]byte, error) { type NoMethod RoutersPreviewResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RoutersScopedList struct { @@ -38745,9 +39102,9 @@ type RoutersScopedList struct { NullFields []string `json:"-"` } -func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { +func (s RoutersScopedList) MarshalJSON() ([]byte, error) { type NoMethod RoutersScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RoutersScopedListWarning: Informational warning which replaces the list of @@ -38831,9 +39188,9 @@ type RoutersScopedListWarning struct { NullFields []string `json:"-"` } -func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { +func (s RoutersScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod RoutersScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RoutersScopedListWarningData struct { @@ -38860,9 +39217,9 @@ type RoutersScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod RoutersScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Rule: This is deprecated and has no effect. Do not use. @@ -38902,9 +39259,9 @@ type Rule struct { NullFields []string `json:"-"` } -func (s *Rule) MarshalJSON() ([]byte, error) { +func (s Rule) MarshalJSON() ([]byte, error) { type NoMethod Rule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SSLHealthCheck struct { @@ -38973,9 +39330,9 @@ type SSLHealthCheck struct { NullFields []string `json:"-"` } -func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { +func (s SSLHealthCheck) MarshalJSON() ([]byte, error) { type NoMethod SSLHealthCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SavedAttachedDisk: DEPRECATED: Please use compute#savedDisk instead. An @@ -39058,9 +39415,9 @@ type SavedAttachedDisk struct { NullFields []string `json:"-"` } -func (s *SavedAttachedDisk) MarshalJSON() ([]byte, error) { +func (s SavedAttachedDisk) MarshalJSON() ([]byte, error) { type NoMethod SavedAttachedDisk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SavedDisk: An instance-attached disk resource. @@ -39104,9 +39461,9 @@ type SavedDisk struct { NullFields []string `json:"-"` } -func (s *SavedDisk) MarshalJSON() ([]byte, error) { +func (s SavedDisk) MarshalJSON() ([]byte, error) { type NoMethod SavedDisk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ScalingScheduleStatus struct { @@ -39142,9 +39499,9 @@ type ScalingScheduleStatus struct { NullFields []string `json:"-"` } -func (s *ScalingScheduleStatus) MarshalJSON() ([]byte, error) { +func (s ScalingScheduleStatus) MarshalJSON() ([]byte, error) { type NoMethod ScalingScheduleStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Scheduling: Sets the scheduling options for an Instance. @@ -39156,6 +39513,10 @@ type Scheduling struct { // set to true so an instance is automatically restarted if it is terminated by // Compute Engine. AutomaticRestart *bool `json:"automaticRestart,omitempty"` + // AvailabilityDomain: Specifies the availability domain to place the instance + // in. The value must be a number between 1 and the number of availability + // domains specified in the spread placement policy attached to the instance. + AvailabilityDomain int64 `json:"availabilityDomain,omitempty"` // InstanceTerminationAction: Specifies the termination action for the // instance. // @@ -39229,9 +39590,9 @@ type Scheduling struct { NullFields []string `json:"-"` } -func (s *Scheduling) MarshalJSON() ([]byte, error) { +func (s Scheduling) MarshalJSON() ([]byte, error) { type NoMethod Scheduling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SchedulingNodeAffinity: Node Affinity: the configuration of desired nodes @@ -39262,9 +39623,9 @@ type SchedulingNodeAffinity struct { NullFields []string `json:"-"` } -func (s *SchedulingNodeAffinity) MarshalJSON() ([]byte, error) { +func (s SchedulingNodeAffinity) MarshalJSON() ([]byte, error) { type NoMethod SchedulingNodeAffinity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SchedulingOnInstanceStopAction: Defines the behaviour for instances with the @@ -39287,9 +39648,9 @@ type SchedulingOnInstanceStopAction struct { NullFields []string `json:"-"` } -func (s *SchedulingOnInstanceStopAction) MarshalJSON() ([]byte, error) { +func (s SchedulingOnInstanceStopAction) MarshalJSON() ([]byte, error) { type NoMethod SchedulingOnInstanceStopAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Screenshot: An instance's screenshot. @@ -39315,9 +39676,9 @@ type Screenshot struct { NullFields []string `json:"-"` } -func (s *Screenshot) MarshalJSON() ([]byte, error) { +func (s Screenshot) MarshalJSON() ([]byte, error) { type NoMethod Screenshot - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPoliciesAggregatedList struct { @@ -39357,9 +39718,9 @@ type SecurityPoliciesAggregatedList struct { NullFields []string `json:"-"` } -func (s *SecurityPoliciesAggregatedList) MarshalJSON() ([]byte, error) { +func (s SecurityPoliciesAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod SecurityPoliciesAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecurityPoliciesAggregatedListWarning: [Output Only] Informational warning @@ -39443,9 +39804,9 @@ type SecurityPoliciesAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *SecurityPoliciesAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s SecurityPoliciesAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod SecurityPoliciesAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPoliciesAggregatedListWarningData struct { @@ -39472,9 +39833,9 @@ type SecurityPoliciesAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *SecurityPoliciesAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s SecurityPoliciesAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SecurityPoliciesAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPoliciesListPreconfiguredExpressionSetsResponse struct { @@ -39495,9 +39856,9 @@ type SecurityPoliciesListPreconfiguredExpressionSetsResponse struct { NullFields []string `json:"-"` } -func (s *SecurityPoliciesListPreconfiguredExpressionSetsResponse) MarshalJSON() ([]byte, error) { +func (s SecurityPoliciesListPreconfiguredExpressionSetsResponse) MarshalJSON() ([]byte, error) { type NoMethod SecurityPoliciesListPreconfiguredExpressionSetsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPoliciesScopedList struct { @@ -39519,9 +39880,9 @@ type SecurityPoliciesScopedList struct { NullFields []string `json:"-"` } -func (s *SecurityPoliciesScopedList) MarshalJSON() ([]byte, error) { +func (s SecurityPoliciesScopedList) MarshalJSON() ([]byte, error) { type NoMethod SecurityPoliciesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecurityPoliciesScopedListWarning: Informational warning which replaces the @@ -39605,9 +39966,9 @@ type SecurityPoliciesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *SecurityPoliciesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s SecurityPoliciesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod SecurityPoliciesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPoliciesScopedListWarningData struct { @@ -39634,9 +39995,9 @@ type SecurityPoliciesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *SecurityPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s SecurityPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SecurityPoliciesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPoliciesWafConfig struct { @@ -39654,9 +40015,9 @@ type SecurityPoliciesWafConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPoliciesWafConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPoliciesWafConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPoliciesWafConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecurityPolicy: Represents a Google Cloud Armor security policy resource. @@ -39763,9 +40124,9 @@ type SecurityPolicy struct { NullFields []string `json:"-"` } -func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { +func (s SecurityPolicy) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecurityPolicyAdaptiveProtectionConfig: Configuration options for Cloud @@ -39787,9 +40148,9 @@ type SecurityPolicyAdaptiveProtectionConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyAdaptiveProtectionConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyAdaptiveProtectionConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyAdaptiveProtectionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig: Configuration @@ -39823,9 +40184,9 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig struct { @@ -39833,9 +40194,15 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfi AutoDeployExpirationSec int64 `json:"autoDeployExpirationSec,omitempty"` AutoDeployImpactedBaselineThreshold float64 `json:"autoDeployImpactedBaselineThreshold,omitempty"` AutoDeployLoadThreshold float64 `json:"autoDeployLoadThreshold,omitempty"` + DetectionAbsoluteQps float64 `json:"detectionAbsoluteQps,omitempty"` + DetectionLoadThreshold float64 `json:"detectionLoadThreshold,omitempty"` + DetectionRelativeToBaselineQps float64 `json:"detectionRelativeToBaselineQps,omitempty"` // Name: The name must be 1-63 characters long, and comply with RFC1035. The // name must be unique within the security policy. Name string `json:"name,omitempty"` + // TrafficGranularityConfigs: Configuration options for enabling Adaptive + // Protection to operate on specified granular traffic units. + TrafficGranularityConfigs []*SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig `json:"trafficGranularityConfigs,omitempty"` // ForceSendFields is a list of field names (e.g. // "AutoDeployConfidenceThreshold") to unconditionally include in API requests. // By default, fields with empty or default values are omitted from API @@ -39849,9 +40216,9 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfi NullFields []string `json:"-"` } -func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig) UnmarshalJSON(data []byte) error { @@ -39860,6 +40227,9 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdC AutoDeployConfidenceThreshold gensupport.JSONFloat64 `json:"autoDeployConfidenceThreshold"` AutoDeployImpactedBaselineThreshold gensupport.JSONFloat64 `json:"autoDeployImpactedBaselineThreshold"` AutoDeployLoadThreshold gensupport.JSONFloat64 `json:"autoDeployLoadThreshold"` + DetectionAbsoluteQps gensupport.JSONFloat64 `json:"detectionAbsoluteQps"` + DetectionLoadThreshold gensupport.JSONFloat64 `json:"detectionLoadThreshold"` + DetectionRelativeToBaselineQps gensupport.JSONFloat64 `json:"detectionRelativeToBaselineQps"` *NoMethod } s1.NoMethod = (*NoMethod)(s) @@ -39869,9 +40239,47 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdC s.AutoDeployConfidenceThreshold = float64(s1.AutoDeployConfidenceThreshold) s.AutoDeployImpactedBaselineThreshold = float64(s1.AutoDeployImpactedBaselineThreshold) s.AutoDeployLoadThreshold = float64(s1.AutoDeployLoadThreshold) + s.DetectionAbsoluteQps = float64(s1.DetectionAbsoluteQps) + s.DetectionLoadThreshold = float64(s1.DetectionLoadThreshold) + s.DetectionRelativeToBaselineQps = float64(s1.DetectionRelativeToBaselineQps) return nil } +// SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigT +// rafficGranularityConfig: Configurations to specifc granular traffic units +// processed by Adaptive Protection. +type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig struct { + // EnableEachUniqueValue: If enabled, traffic matching each unique value for + // the specified type constitutes a separate traffic unit. It can only be set + // to true if `value` is empty. + EnableEachUniqueValue bool `json:"enableEachUniqueValue,omitempty"` + // Type: Type of this configuration. + // + // Possible values: + // "HTTP_HEADER_HOST" + // "HTTP_PATH" + // "UNSPECIFIED_TYPE" + Type string `json:"type,omitempty"` + // Value: Requests that match this value constitute a granular traffic unit. + Value string `json:"value,omitempty"` + // ForceSendFields is a list of field names (e.g. "EnableEachUniqueValue") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "EnableEachUniqueValue") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + type SecurityPolicyAdvancedOptionsConfig struct { // JsonCustomConfig: Custom configuration to apply the JSON parsing. Only // applicable when json_parsing is set to STANDARD. @@ -39901,9 +40309,9 @@ type SecurityPolicyAdvancedOptionsConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyAdvancedOptionsConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyAdvancedOptionsConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyAdvancedOptionsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyAdvancedOptionsConfigJsonCustomConfig struct { @@ -39926,9 +40334,9 @@ type SecurityPolicyAdvancedOptionsConfigJsonCustomConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyAdvancedOptionsConfigJsonCustomConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyAdvancedOptionsConfigJsonCustomConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyAdvancedOptionsConfigJsonCustomConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyDdosProtectionConfig struct { @@ -39949,9 +40357,9 @@ type SecurityPolicyDdosProtectionConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyDdosProtectionConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyDdosProtectionConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyDdosProtectionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyList struct { @@ -39986,9 +40394,9 @@ type SecurityPolicyList struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyList) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyList) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecurityPolicyListWarning: [Output Only] Informational warning message. @@ -40071,9 +40479,9 @@ type SecurityPolicyListWarning struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyListWarningData struct { @@ -40100,9 +40508,9 @@ type SecurityPolicyListWarningData struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRecaptchaOptionsConfig struct { @@ -40127,9 +40535,9 @@ type SecurityPolicyRecaptchaOptionsConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRecaptchaOptionsConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRecaptchaOptionsConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRecaptchaOptionsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyReference struct { @@ -40147,9 +40555,9 @@ type SecurityPolicyReference struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyReference) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecurityPolicyRule: Represents a rule that describes one or more match @@ -40239,9 +40647,9 @@ type SecurityPolicyRule struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRule) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRuleHttpHeaderAction struct { @@ -40261,9 +40669,9 @@ type SecurityPolicyRuleHttpHeaderAction struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleHttpHeaderAction) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleHttpHeaderAction) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleHttpHeaderAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRuleHttpHeaderActionHttpHeaderOption struct { @@ -40284,9 +40692,9 @@ type SecurityPolicyRuleHttpHeaderActionHttpHeaderOption struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleHttpHeaderActionHttpHeaderOption) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleHttpHeaderActionHttpHeaderOption) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleHttpHeaderActionHttpHeaderOption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecurityPolicyRuleMatcher: Represents a match condition that incoming @@ -40329,9 +40737,9 @@ type SecurityPolicyRuleMatcher struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleMatcher - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRuleMatcherConfig struct { @@ -40351,9 +40759,9 @@ type SecurityPolicyRuleMatcherConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleMatcherConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRuleMatcherExprOptions struct { @@ -40374,9 +40782,9 @@ type SecurityPolicyRuleMatcherExprOptions struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleMatcherExprOptions) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleMatcherExprOptions) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleMatcherExprOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions struct { @@ -40401,9 +40809,9 @@ type SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecurityPolicyRuleNetworkMatcher: Represents a match condition that incoming @@ -40447,9 +40855,9 @@ type SecurityPolicyRuleNetworkMatcher struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleNetworkMatcher) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleNetworkMatcher) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleNetworkMatcher - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch struct { @@ -40472,9 +40880,9 @@ type SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRulePreconfiguredWafConfig struct { @@ -40494,9 +40902,9 @@ type SecurityPolicyRulePreconfiguredWafConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRulePreconfiguredWafConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRulePreconfiguredWafConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRulePreconfiguredWafConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRulePreconfiguredWafConfigExclusion struct { @@ -40533,9 +40941,9 @@ type SecurityPolicyRulePreconfiguredWafConfigExclusion struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRulePreconfiguredWafConfigExclusion) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRulePreconfiguredWafConfigExclusion) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRulePreconfiguredWafConfigExclusion - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams struct { @@ -40567,9 +40975,9 @@ type SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRuleRateLimitOptions struct { @@ -40663,9 +41071,9 @@ type SecurityPolicyRuleRateLimitOptions struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleRateLimitOptions) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleRateLimitOptions) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleRateLimitOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { @@ -40726,9 +41134,9 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRuleRateLimitOptionsThreshold struct { @@ -40749,9 +41157,9 @@ type SecurityPolicyRuleRateLimitOptionsThreshold struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleRateLimitOptionsThreshold) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleRateLimitOptionsThreshold) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleRateLimitOptionsThreshold - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyRuleRedirectOptions struct { @@ -40777,9 +41185,9 @@ type SecurityPolicyRuleRedirectOptions struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleRedirectOptions) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyRuleRedirectOptions) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyRuleRedirectOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecurityPolicyUserDefinedField struct { @@ -40822,9 +41230,9 @@ type SecurityPolicyUserDefinedField struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyUserDefinedField) MarshalJSON() ([]byte, error) { +func (s SecurityPolicyUserDefinedField) MarshalJSON() ([]byte, error) { type NoMethod SecurityPolicyUserDefinedField - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecuritySettings: The authentication and authorization settings for a @@ -40867,9 +41275,9 @@ type SecuritySettings struct { NullFields []string `json:"-"` } -func (s *SecuritySettings) MarshalJSON() ([]byte, error) { +func (s SecuritySettings) MarshalJSON() ([]byte, error) { type NoMethod SecuritySettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SerialPortOutput: An instance serial console output. @@ -40908,9 +41316,9 @@ type SerialPortOutput struct { NullFields []string `json:"-"` } -func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { +func (s SerialPortOutput) MarshalJSON() ([]byte, error) { type NoMethod SerialPortOutput - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ServerBinding struct { @@ -40934,9 +41342,9 @@ type ServerBinding struct { NullFields []string `json:"-"` } -func (s *ServerBinding) MarshalJSON() ([]byte, error) { +func (s ServerBinding) MarshalJSON() ([]byte, error) { type NoMethod ServerBinding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAccount: A service account. @@ -40958,9 +41366,9 @@ type ServiceAccount struct { NullFields []string `json:"-"` } -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { +func (s ServiceAccount) MarshalJSON() ([]byte, error) { type NoMethod ServiceAccount - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAttachment: Represents a ServiceAttachment resource. A service @@ -41038,6 +41446,17 @@ type ServiceAttachment struct { // loadBalancingScheme INTERNAL* that is serving the endpoint identified by // this service attachment. ProducerForwardingRule string `json:"producerForwardingRule,omitempty"` + // PropagatedConnectionLimit: The number of consumer spokes that connected + // Private Service Connect endpoints can be propagated to through Network + // Connectivity Center. This limit lets the service producer limit how many + // propagated Private Service Connect connections can be established to this + // service attachment from a single consumer. If the connection preference of + // the service attachment is ACCEPT_MANUAL, the limit applies to each project + // or network that is listed in the consumer accept list. If the connection + // preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies + // to each project that contains a connected endpoint. If unspecified, the + // default propagated connection limit is 250. + PropagatedConnectionLimit int64 `json:"propagatedConnectionLimit,omitempty"` // PscServiceAttachmentId: [Output Only] An 128-bit global unique ID of the PSC // service attachment. PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"` @@ -41077,9 +41496,9 @@ type ServiceAttachment struct { NullFields []string `json:"-"` } -func (s *ServiceAttachment) MarshalJSON() ([]byte, error) { +func (s ServiceAttachment) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAttachmentAggregatedList: Contains a list of @@ -41119,9 +41538,9 @@ type ServiceAttachmentAggregatedList struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentAggregatedList) MarshalJSON() ([]byte, error) { +func (s ServiceAttachmentAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachmentAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAttachmentAggregatedListWarning: [Output Only] Informational warning @@ -41205,9 +41624,9 @@ type ServiceAttachmentAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s ServiceAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachmentAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ServiceAttachmentAggregatedListWarningData struct { @@ -41234,9 +41653,9 @@ type ServiceAttachmentAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s ServiceAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachmentAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAttachmentConnectedEndpoint: [Output Only] A connection connected to @@ -41246,6 +41665,10 @@ type ServiceAttachmentConnectedEndpoint struct { ConsumerNetwork string `json:"consumerNetwork,omitempty"` // Endpoint: The url of a connected endpoint. Endpoint string `json:"endpoint,omitempty"` + // PropagatedConnectionCount: The number of consumer Network Connectivity + // Center spokes that the connected Private Service Connect endpoint has + // propagated to. + PropagatedConnectionCount int64 `json:"propagatedConnectionCount,omitempty"` // PscConnectionId: The PSC connection id of the connected endpoint. PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` // Status: The status of a connected endpoint to this service attachment. @@ -41273,9 +41696,9 @@ type ServiceAttachmentConnectedEndpoint struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentConnectedEndpoint) MarshalJSON() ([]byte, error) { +func (s ServiceAttachmentConnectedEndpoint) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachmentConnectedEndpoint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ServiceAttachmentConsumerProjectLimit struct { @@ -41299,9 +41722,9 @@ type ServiceAttachmentConsumerProjectLimit struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentConsumerProjectLimit) MarshalJSON() ([]byte, error) { +func (s ServiceAttachmentConsumerProjectLimit) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachmentConsumerProjectLimit - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ServiceAttachmentList struct { @@ -41338,9 +41761,9 @@ type ServiceAttachmentList struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentList) MarshalJSON() ([]byte, error) { +func (s ServiceAttachmentList) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachmentList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAttachmentListWarning: [Output Only] Informational warning message. @@ -41423,9 +41846,9 @@ type ServiceAttachmentListWarning struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentListWarning) MarshalJSON() ([]byte, error) { +func (s ServiceAttachmentListWarning) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachmentListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ServiceAttachmentListWarningData struct { @@ -41452,9 +41875,9 @@ type ServiceAttachmentListWarningData struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentListWarningData) MarshalJSON() ([]byte, error) { +func (s ServiceAttachmentListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachmentListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ServiceAttachmentsScopedList struct { @@ -41476,9 +41899,9 @@ type ServiceAttachmentsScopedList struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentsScopedList) MarshalJSON() ([]byte, error) { +func (s ServiceAttachmentsScopedList) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachmentsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAttachmentsScopedListWarning: Informational warning which replaces @@ -41562,9 +41985,9 @@ type ServiceAttachmentsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s ServiceAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachmentsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ServiceAttachmentsScopedListWarningData struct { @@ -41591,9 +42014,9 @@ type ServiceAttachmentsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s ServiceAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ServiceAttachmentsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SetCommonInstanceMetadataOperationMetadata struct { @@ -41615,9 +42038,9 @@ type SetCommonInstanceMetadataOperationMetadata struct { NullFields []string `json:"-"` } -func (s *SetCommonInstanceMetadataOperationMetadata) MarshalJSON() ([]byte, error) { +func (s SetCommonInstanceMetadataOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod SetCommonInstanceMetadataOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo struct { @@ -41650,9 +42073,9 @@ type SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo struct { NullFields []string `json:"-"` } -func (s *SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo) MarshalJSON() ([]byte, error) { +func (s SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo) MarshalJSON() ([]byte, error) { type NoMethod SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ShareSettings: The share setting for reservations and sole tenancy node @@ -41682,9 +42105,9 @@ type ShareSettings struct { NullFields []string `json:"-"` } -func (s *ShareSettings) MarshalJSON() ([]byte, error) { +func (s ShareSettings) MarshalJSON() ([]byte, error) { type NoMethod ShareSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ShareSettingsProjectConfig: Config for each project in the share settings. @@ -41705,9 +42128,9 @@ type ShareSettingsProjectConfig struct { NullFields []string `json:"-"` } -func (s *ShareSettingsProjectConfig) MarshalJSON() ([]byte, error) { +func (s ShareSettingsProjectConfig) MarshalJSON() ([]byte, error) { type NoMethod ShareSettingsProjectConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ShieldedInstanceConfig: A set of Shielded Instance options. @@ -41734,9 +42157,9 @@ type ShieldedInstanceConfig struct { NullFields []string `json:"-"` } -func (s *ShieldedInstanceConfig) MarshalJSON() ([]byte, error) { +func (s ShieldedInstanceConfig) MarshalJSON() ([]byte, error) { type NoMethod ShieldedInstanceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ShieldedInstanceIdentity: A Shielded Instance Identity. @@ -41766,9 +42189,9 @@ type ShieldedInstanceIdentity struct { NullFields []string `json:"-"` } -func (s *ShieldedInstanceIdentity) MarshalJSON() ([]byte, error) { +func (s ShieldedInstanceIdentity) MarshalJSON() ([]byte, error) { type NoMethod ShieldedInstanceIdentity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ShieldedInstanceIdentityEntry: A Shielded Instance Identity Entry. @@ -41790,9 +42213,9 @@ type ShieldedInstanceIdentityEntry struct { NullFields []string `json:"-"` } -func (s *ShieldedInstanceIdentityEntry) MarshalJSON() ([]byte, error) { +func (s ShieldedInstanceIdentityEntry) MarshalJSON() ([]byte, error) { type NoMethod ShieldedInstanceIdentityEntry - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ShieldedInstanceIntegrityPolicy: The policy describes the baseline against @@ -41814,9 +42237,9 @@ type ShieldedInstanceIntegrityPolicy struct { NullFields []string `json:"-"` } -func (s *ShieldedInstanceIntegrityPolicy) MarshalJSON() ([]byte, error) { +func (s ShieldedInstanceIntegrityPolicy) MarshalJSON() ([]byte, error) { type NoMethod ShieldedInstanceIntegrityPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SignedUrlKey: Represents a customer-supplied Signing Key used by Cloud CDN @@ -41845,9 +42268,9 @@ type SignedUrlKey struct { NullFields []string `json:"-"` } -func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { +func (s SignedUrlKey) MarshalJSON() ([]byte, error) { type NoMethod SignedUrlKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Snapshot: Represents a Persistent Disk Snapshot resource. You can use @@ -42032,9 +42455,9 @@ type Snapshot struct { NullFields []string `json:"-"` } -func (s *Snapshot) MarshalJSON() ([]byte, error) { +func (s Snapshot) MarshalJSON() ([]byte, error) { type NoMethod Snapshot - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SnapshotList: Contains a list of Snapshot resources. @@ -42071,9 +42494,9 @@ type SnapshotList struct { NullFields []string `json:"-"` } -func (s *SnapshotList) MarshalJSON() ([]byte, error) { +func (s SnapshotList) MarshalJSON() ([]byte, error) { type NoMethod SnapshotList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SnapshotListWarning: [Output Only] Informational warning message. @@ -42156,9 +42579,9 @@ type SnapshotListWarning struct { NullFields []string `json:"-"` } -func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { +func (s SnapshotListWarning) MarshalJSON() ([]byte, error) { type NoMethod SnapshotListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SnapshotListWarningData struct { @@ -42185,9 +42608,9 @@ type SnapshotListWarningData struct { NullFields []string `json:"-"` } -func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { +func (s SnapshotListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SnapshotListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SnapshotSettings struct { @@ -42211,9 +42634,9 @@ type SnapshotSettings struct { NullFields []string `json:"-"` } -func (s *SnapshotSettings) MarshalJSON() ([]byte, error) { +func (s SnapshotSettings) MarshalJSON() ([]byte, error) { type NoMethod SnapshotSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SnapshotSettingsStorageLocationSettings struct { @@ -42247,9 +42670,9 @@ type SnapshotSettingsStorageLocationSettings struct { NullFields []string `json:"-"` } -func (s *SnapshotSettingsStorageLocationSettings) MarshalJSON() ([]byte, error) { +func (s SnapshotSettingsStorageLocationSettings) MarshalJSON() ([]byte, error) { type NoMethod SnapshotSettingsStorageLocationSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SnapshotSettingsStorageLocationSettingsStorageLocationPreference: A @@ -42271,9 +42694,9 @@ type SnapshotSettingsStorageLocationSettingsStorageLocationPreference struct { NullFields []string `json:"-"` } -func (s *SnapshotSettingsStorageLocationSettingsStorageLocationPreference) MarshalJSON() ([]byte, error) { +func (s SnapshotSettingsStorageLocationSettingsStorageLocationPreference) MarshalJSON() ([]byte, error) { type NoMethod SnapshotSettingsStorageLocationSettingsStorageLocationPreference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SourceDiskEncryptionKey struct { @@ -42299,9 +42722,9 @@ type SourceDiskEncryptionKey struct { NullFields []string `json:"-"` } -func (s *SourceDiskEncryptionKey) MarshalJSON() ([]byte, error) { +func (s SourceDiskEncryptionKey) MarshalJSON() ([]byte, error) { type NoMethod SourceDiskEncryptionKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceInstanceParams: A specification of the parameters to use when creating @@ -42325,9 +42748,9 @@ type SourceInstanceParams struct { NullFields []string `json:"-"` } -func (s *SourceInstanceParams) MarshalJSON() ([]byte, error) { +func (s SourceInstanceParams) MarshalJSON() ([]byte, error) { type NoMethod SourceInstanceParams - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceInstanceProperties: DEPRECATED: Please use compute#instanceProperties @@ -42407,9 +42830,9 @@ type SourceInstanceProperties struct { NullFields []string `json:"-"` } -func (s *SourceInstanceProperties) MarshalJSON() ([]byte, error) { +func (s SourceInstanceProperties) MarshalJSON() ([]byte, error) { type NoMethod SourceInstanceProperties - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertificate: Represents an SSL certificate resource. Google Compute @@ -42495,9 +42918,9 @@ type SslCertificate struct { NullFields []string `json:"-"` } -func (s *SslCertificate) MarshalJSON() ([]byte, error) { +func (s SslCertificate) MarshalJSON() ([]byte, error) { type NoMethod SslCertificate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslCertificateAggregatedList struct { @@ -42536,9 +42959,9 @@ type SslCertificateAggregatedList struct { NullFields []string `json:"-"` } -func (s *SslCertificateAggregatedList) MarshalJSON() ([]byte, error) { +func (s SslCertificateAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod SslCertificateAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertificateAggregatedListWarning: [Output Only] Informational warning @@ -42622,9 +43045,9 @@ type SslCertificateAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *SslCertificateAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s SslCertificateAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod SslCertificateAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslCertificateAggregatedListWarningData struct { @@ -42651,9 +43074,9 @@ type SslCertificateAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *SslCertificateAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s SslCertificateAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SslCertificateAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertificateList: Contains a list of SslCertificate resources. @@ -42690,9 +43113,9 @@ type SslCertificateList struct { NullFields []string `json:"-"` } -func (s *SslCertificateList) MarshalJSON() ([]byte, error) { +func (s SslCertificateList) MarshalJSON() ([]byte, error) { type NoMethod SslCertificateList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertificateListWarning: [Output Only] Informational warning message. @@ -42775,9 +43198,9 @@ type SslCertificateListWarning struct { NullFields []string `json:"-"` } -func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { +func (s SslCertificateListWarning) MarshalJSON() ([]byte, error) { type NoMethod SslCertificateListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslCertificateListWarningData struct { @@ -42804,9 +43227,9 @@ type SslCertificateListWarningData struct { NullFields []string `json:"-"` } -func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { +func (s SslCertificateListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SslCertificateListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertificateManagedSslCertificate: Configuration and status of a managed @@ -42854,9 +43277,9 @@ type SslCertificateManagedSslCertificate struct { NullFields []string `json:"-"` } -func (s *SslCertificateManagedSslCertificate) MarshalJSON() ([]byte, error) { +func (s SslCertificateManagedSslCertificate) MarshalJSON() ([]byte, error) { type NoMethod SslCertificateManagedSslCertificate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertificateSelfManagedSslCertificate: Configuration and status of a @@ -42882,9 +43305,9 @@ type SslCertificateSelfManagedSslCertificate struct { NullFields []string `json:"-"` } -func (s *SslCertificateSelfManagedSslCertificate) MarshalJSON() ([]byte, error) { +func (s SslCertificateSelfManagedSslCertificate) MarshalJSON() ([]byte, error) { type NoMethod SslCertificateSelfManagedSslCertificate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslCertificatesScopedList struct { @@ -42906,9 +43329,9 @@ type SslCertificatesScopedList struct { NullFields []string `json:"-"` } -func (s *SslCertificatesScopedList) MarshalJSON() ([]byte, error) { +func (s SslCertificatesScopedList) MarshalJSON() ([]byte, error) { type NoMethod SslCertificatesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertificatesScopedListWarning: Informational warning which replaces the @@ -42992,9 +43415,9 @@ type SslCertificatesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *SslCertificatesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s SslCertificatesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod SslCertificatesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslCertificatesScopedListWarningData struct { @@ -43021,9 +43444,9 @@ type SslCertificatesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *SslCertificatesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s SslCertificatesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SslCertificatesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslPoliciesAggregatedList struct { @@ -43063,9 +43486,9 @@ type SslPoliciesAggregatedList struct { NullFields []string `json:"-"` } -func (s *SslPoliciesAggregatedList) MarshalJSON() ([]byte, error) { +func (s SslPoliciesAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod SslPoliciesAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslPoliciesAggregatedListWarning: [Output Only] Informational warning @@ -43149,9 +43572,9 @@ type SslPoliciesAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *SslPoliciesAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s SslPoliciesAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod SslPoliciesAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslPoliciesAggregatedListWarningData struct { @@ -43178,9 +43601,9 @@ type SslPoliciesAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *SslPoliciesAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s SslPoliciesAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SslPoliciesAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslPoliciesList struct { @@ -43217,9 +43640,9 @@ type SslPoliciesList struct { NullFields []string `json:"-"` } -func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { +func (s SslPoliciesList) MarshalJSON() ([]byte, error) { type NoMethod SslPoliciesList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslPoliciesListWarning: [Output Only] Informational warning message. @@ -43302,9 +43725,9 @@ type SslPoliciesListWarning struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { +func (s SslPoliciesListWarning) MarshalJSON() ([]byte, error) { type NoMethod SslPoliciesListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslPoliciesListWarningData struct { @@ -43331,9 +43754,9 @@ type SslPoliciesListWarningData struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { +func (s SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SslPoliciesListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslPoliciesListAvailableFeaturesResponse struct { @@ -43354,9 +43777,9 @@ type SslPoliciesListAvailableFeaturesResponse struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { +func (s SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { type NoMethod SslPoliciesListAvailableFeaturesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslPoliciesScopedList struct { @@ -43378,9 +43801,9 @@ type SslPoliciesScopedList struct { NullFields []string `json:"-"` } -func (s *SslPoliciesScopedList) MarshalJSON() ([]byte, error) { +func (s SslPoliciesScopedList) MarshalJSON() ([]byte, error) { type NoMethod SslPoliciesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslPoliciesScopedListWarning: Informational warning which replaces the list @@ -43464,9 +43887,9 @@ type SslPoliciesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *SslPoliciesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s SslPoliciesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod SslPoliciesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslPoliciesScopedListWarningData struct { @@ -43493,9 +43916,9 @@ type SslPoliciesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *SslPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s SslPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SslPoliciesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslPolicy: Represents an SSL Policy resource. Use SSL policies to control @@ -43584,9 +44007,9 @@ type SslPolicy struct { NullFields []string `json:"-"` } -func (s *SslPolicy) MarshalJSON() ([]byte, error) { +func (s SslPolicy) MarshalJSON() ([]byte, error) { type NoMethod SslPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslPolicyWarnings struct { @@ -43668,9 +44091,9 @@ type SslPolicyWarnings struct { NullFields []string `json:"-"` } -func (s *SslPolicyWarnings) MarshalJSON() ([]byte, error) { +func (s SslPolicyWarnings) MarshalJSON() ([]byte, error) { type NoMethod SslPolicyWarnings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslPolicyWarningsData struct { @@ -43697,9 +44120,9 @@ type SslPolicyWarningsData struct { NullFields []string `json:"-"` } -func (s *SslPolicyWarningsData) MarshalJSON() ([]byte, error) { +func (s SslPolicyWarningsData) MarshalJSON() ([]byte, error) { type NoMethod SslPolicyWarningsData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SslPolicyReference struct { @@ -43719,9 +44142,9 @@ type SslPolicyReference struct { NullFields []string `json:"-"` } -func (s *SslPolicyReference) MarshalJSON() ([]byte, error) { +func (s SslPolicyReference) MarshalJSON() ([]byte, error) { type NoMethod SslPolicyReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StatefulPolicy struct { @@ -43739,9 +44162,9 @@ type StatefulPolicy struct { NullFields []string `json:"-"` } -func (s *StatefulPolicy) MarshalJSON() ([]byte, error) { +func (s StatefulPolicy) MarshalJSON() ([]byte, error) { type NoMethod StatefulPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StatefulPolicyPreservedState: Configuration of preserved resources. @@ -43770,9 +44193,9 @@ type StatefulPolicyPreservedState struct { NullFields []string `json:"-"` } -func (s *StatefulPolicyPreservedState) MarshalJSON() ([]byte, error) { +func (s StatefulPolicyPreservedState) MarshalJSON() ([]byte, error) { type NoMethod StatefulPolicyPreservedState - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StatefulPolicyPreservedStateDiskDevice struct { @@ -43799,9 +44222,9 @@ type StatefulPolicyPreservedStateDiskDevice struct { NullFields []string `json:"-"` } -func (s *StatefulPolicyPreservedStateDiskDevice) MarshalJSON() ([]byte, error) { +func (s StatefulPolicyPreservedStateDiskDevice) MarshalJSON() ([]byte, error) { type NoMethod StatefulPolicyPreservedStateDiskDevice - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StatefulPolicyPreservedStateNetworkIp struct { @@ -43827,9 +44250,9 @@ type StatefulPolicyPreservedStateNetworkIp struct { NullFields []string `json:"-"` } -func (s *StatefulPolicyPreservedStateNetworkIp) MarshalJSON() ([]byte, error) { +func (s StatefulPolicyPreservedStateNetworkIp) MarshalJSON() ([]byte, error) { type NoMethod StatefulPolicyPreservedStateNetworkIp - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -43861,9 +44284,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StoragePool: Represents a zonal storage pool resource. @@ -43918,7 +44341,9 @@ type StoragePool struct { // pool disks' exclusive use. // "UNSPECIFIED" PerformanceProvisioningType string `json:"performanceProvisioningType,omitempty"` - // PoolProvisionedCapacityGb: Size, in GiB, of the storage pool. + // PoolProvisionedCapacityGb: Size, in GiB, of the storage pool. For more + // information about the size limits, see + // https://cloud.google.com/compute/docs/disks/storage-pools. PoolProvisionedCapacityGb int64 `json:"poolProvisionedCapacityGb,omitempty,string"` // PoolProvisionedIops: Provisioned IOPS of the storage pool. Only relevant if // the storage pool type is hyperdisk-balanced. @@ -43971,9 +44396,9 @@ type StoragePool struct { NullFields []string `json:"-"` } -func (s *StoragePool) MarshalJSON() ([]byte, error) { +func (s StoragePool) MarshalJSON() ([]byte, error) { type NoMethod StoragePool - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolAggregatedList struct { @@ -44013,9 +44438,9 @@ type StoragePoolAggregatedList struct { NullFields []string `json:"-"` } -func (s *StoragePoolAggregatedList) MarshalJSON() ([]byte, error) { +func (s StoragePoolAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StoragePoolAggregatedListWarning: [Output Only] Informational warning @@ -44099,9 +44524,9 @@ type StoragePoolAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *StoragePoolAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s StoragePoolAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolAggregatedListWarningData struct { @@ -44128,9 +44553,9 @@ type StoragePoolAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *StoragePoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s StoragePoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolDisk struct { @@ -44180,9 +44605,9 @@ type StoragePoolDisk struct { NullFields []string `json:"-"` } -func (s *StoragePoolDisk) MarshalJSON() ([]byte, error) { +func (s StoragePoolDisk) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolDisk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StoragePoolList: A list of StoragePool resources. @@ -44224,9 +44649,9 @@ type StoragePoolList struct { NullFields []string `json:"-"` } -func (s *StoragePoolList) MarshalJSON() ([]byte, error) { +func (s StoragePoolList) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StoragePoolListWarning: [Output Only] Informational warning message. @@ -44309,9 +44734,9 @@ type StoragePoolListWarning struct { NullFields []string `json:"-"` } -func (s *StoragePoolListWarning) MarshalJSON() ([]byte, error) { +func (s StoragePoolListWarning) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolListWarningData struct { @@ -44338,9 +44763,9 @@ type StoragePoolListWarningData struct { NullFields []string `json:"-"` } -func (s *StoragePoolListWarningData) MarshalJSON() ([]byte, error) { +func (s StoragePoolListWarningData) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolListDisks struct { @@ -44381,9 +44806,9 @@ type StoragePoolListDisks struct { NullFields []string `json:"-"` } -func (s *StoragePoolListDisks) MarshalJSON() ([]byte, error) { +func (s StoragePoolListDisks) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolListDisks - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StoragePoolListDisksWarning: [Output Only] Informational warning message. @@ -44466,9 +44891,9 @@ type StoragePoolListDisksWarning struct { NullFields []string `json:"-"` } -func (s *StoragePoolListDisksWarning) MarshalJSON() ([]byte, error) { +func (s StoragePoolListDisksWarning) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolListDisksWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolListDisksWarningData struct { @@ -44495,9 +44920,9 @@ type StoragePoolListDisksWarningData struct { NullFields []string `json:"-"` } -func (s *StoragePoolListDisksWarningData) MarshalJSON() ([]byte, error) { +func (s StoragePoolListDisksWarningData) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolListDisksWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StoragePoolResourceStatus: [Output Only] Contains output only fields. @@ -44515,8 +44940,10 @@ type StoragePoolResourceStatus struct { // bytes written to the disks in the pool, in contrast to the capacity of those // disks. PoolUsedCapacityBytes int64 `json:"poolUsedCapacityBytes,omitempty,string"` - // PoolUsedIops: Sum of all the disks' provisioned IOPS, minus some amount that - // is allowed per disk that is not counted towards pool's IOPS capacity. + // PoolUsedIops: [Output Only] Sum of all the disks' provisioned IOPS, minus + // some amount that is allowed per disk that is not counted towards pool's IOPS + // capacity. For more information, see + // https://cloud.google.com/compute/docs/disks/storage-pools. PoolUsedIops int64 `json:"poolUsedIops,omitempty,string"` // PoolUsedThroughput: [Output Only] Sum of all the disks' provisioned // throughput in MB/s. @@ -44548,9 +44975,9 @@ type StoragePoolResourceStatus struct { NullFields []string `json:"-"` } -func (s *StoragePoolResourceStatus) MarshalJSON() ([]byte, error) { +func (s StoragePoolResourceStatus) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolResourceStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolType struct { @@ -44612,9 +45039,9 @@ type StoragePoolType struct { NullFields []string `json:"-"` } -func (s *StoragePoolType) MarshalJSON() ([]byte, error) { +func (s StoragePoolType) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolTypeAggregatedList struct { @@ -44651,9 +45078,9 @@ type StoragePoolTypeAggregatedList struct { NullFields []string `json:"-"` } -func (s *StoragePoolTypeAggregatedList) MarshalJSON() ([]byte, error) { +func (s StoragePoolTypeAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolTypeAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StoragePoolTypeAggregatedListWarning: [Output Only] Informational warning @@ -44737,9 +45164,9 @@ type StoragePoolTypeAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *StoragePoolTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s StoragePoolTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolTypeAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolTypeAggregatedListWarningData struct { @@ -44766,9 +45193,9 @@ type StoragePoolTypeAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *StoragePoolTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s StoragePoolTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolTypeAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StoragePoolTypeList: Contains a list of storage pool types. @@ -44806,9 +45233,9 @@ type StoragePoolTypeList struct { NullFields []string `json:"-"` } -func (s *StoragePoolTypeList) MarshalJSON() ([]byte, error) { +func (s StoragePoolTypeList) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolTypeList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StoragePoolTypeListWarning: [Output Only] Informational warning message. @@ -44891,9 +45318,9 @@ type StoragePoolTypeListWarning struct { NullFields []string `json:"-"` } -func (s *StoragePoolTypeListWarning) MarshalJSON() ([]byte, error) { +func (s StoragePoolTypeListWarning) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolTypeListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolTypeListWarningData struct { @@ -44920,9 +45347,9 @@ type StoragePoolTypeListWarningData struct { NullFields []string `json:"-"` } -func (s *StoragePoolTypeListWarningData) MarshalJSON() ([]byte, error) { +func (s StoragePoolTypeListWarningData) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolTypeListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolTypesScopedList struct { @@ -44945,9 +45372,9 @@ type StoragePoolTypesScopedList struct { NullFields []string `json:"-"` } -func (s *StoragePoolTypesScopedList) MarshalJSON() ([]byte, error) { +func (s StoragePoolTypesScopedList) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolTypesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StoragePoolTypesScopedListWarning: [Output Only] Informational warning which @@ -45031,9 +45458,9 @@ type StoragePoolTypesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *StoragePoolTypesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s StoragePoolTypesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolTypesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolTypesScopedListWarningData struct { @@ -45060,9 +45487,9 @@ type StoragePoolTypesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *StoragePoolTypesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s StoragePoolTypesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolTypesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolsScopedList struct { @@ -45084,9 +45511,9 @@ type StoragePoolsScopedList struct { NullFields []string `json:"-"` } -func (s *StoragePoolsScopedList) MarshalJSON() ([]byte, error) { +func (s StoragePoolsScopedList) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StoragePoolsScopedListWarning: [Output Only] Informational warning which @@ -45170,9 +45597,9 @@ type StoragePoolsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *StoragePoolsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s StoragePoolsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type StoragePoolsScopedListWarningData struct { @@ -45199,9 +45626,9 @@ type StoragePoolsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *StoragePoolsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s StoragePoolsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod StoragePoolsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Subnetwork: Represents a Subnetwork resource. A subnetwork (also known as a @@ -45237,8 +45664,8 @@ type Subnetwork struct { // Id: [Output Only] The unique identifier for the resource. This identifier is // defined by the server. Id uint64 `json:"id,omitempty,string"` - // InternalIpv6Prefix: [Output Only] The internal IPv6 address range that is - // assigned to this subnetwork. + // InternalIpv6Prefix: The internal IPv6 address range that is owned by this + // subnetwork. InternalIpv6Prefix string `json:"internalIpv6Prefix,omitempty"` // IpCidrRange: The range of internal addresses that are owned by this // subnetwork. Provide this property when you create the subnetwork. For @@ -45352,6 +45779,7 @@ type Subnetwork struct { // "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6 // addresses. // "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 addresses. + // "IPV6_ONLY" - New VMs in this subnet will only be assigned IPv6 addresses. StackType string `json:"stackType,omitempty"` // State: [Output Only] The state of the subnetwork, which can be one of the // following values: READY: Subnetwork is created and ready to use DRAINING: @@ -45380,9 +45808,9 @@ type Subnetwork struct { NullFields []string `json:"-"` } -func (s *Subnetwork) MarshalJSON() ([]byte, error) { +func (s Subnetwork) MarshalJSON() ([]byte, error) { type NoMethod Subnetwork - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SubnetworkAggregatedList struct { @@ -45421,9 +45849,9 @@ type SubnetworkAggregatedList struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { +func (s SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod SubnetworkAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SubnetworkAggregatedListWarning: [Output Only] Informational warning @@ -45507,9 +45935,9 @@ type SubnetworkAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod SubnetworkAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SubnetworkAggregatedListWarningData struct { @@ -45536,9 +45964,9 @@ type SubnetworkAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SubnetworkAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SubnetworkList: Contains a list of Subnetwork resources. @@ -45576,9 +46004,9 @@ type SubnetworkList struct { NullFields []string `json:"-"` } -func (s *SubnetworkList) MarshalJSON() ([]byte, error) { +func (s SubnetworkList) MarshalJSON() ([]byte, error) { type NoMethod SubnetworkList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SubnetworkListWarning: [Output Only] Informational warning message. @@ -45661,9 +46089,9 @@ type SubnetworkListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { +func (s SubnetworkListWarning) MarshalJSON() ([]byte, error) { type NoMethod SubnetworkListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SubnetworkListWarningData struct { @@ -45690,9 +46118,9 @@ type SubnetworkListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { +func (s SubnetworkListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SubnetworkListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SubnetworkLogConfig: The available logging options for this subnetwork. @@ -45753,9 +46181,9 @@ type SubnetworkLogConfig struct { NullFields []string `json:"-"` } -func (s *SubnetworkLogConfig) MarshalJSON() ([]byte, error) { +func (s SubnetworkLogConfig) MarshalJSON() ([]byte, error) { type NoMethod SubnetworkLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *SubnetworkLogConfig) UnmarshalJSON(data []byte) error { @@ -45800,9 +46228,9 @@ type SubnetworkSecondaryRange struct { NullFields []string `json:"-"` } -func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { +func (s SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { type NoMethod SubnetworkSecondaryRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SubnetworksExpandIpCidrRangeRequest struct { @@ -45824,9 +46252,9 @@ type SubnetworksExpandIpCidrRangeRequest struct { NullFields []string `json:"-"` } -func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { +func (s SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { type NoMethod SubnetworksExpandIpCidrRangeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SubnetworksScopedList struct { @@ -45848,9 +46276,9 @@ type SubnetworksScopedList struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { +func (s SubnetworksScopedList) MarshalJSON() ([]byte, error) { type NoMethod SubnetworksScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SubnetworksScopedListWarning: An informational warning that appears when the @@ -45934,9 +46362,9 @@ type SubnetworksScopedListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { +func (s SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod SubnetworksScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SubnetworksScopedListWarningData struct { @@ -45963,9 +46391,9 @@ type SubnetworksScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod SubnetworksScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SubnetworksSetPrivateIpGoogleAccessRequest struct { @@ -45983,9 +46411,9 @@ type SubnetworksSetPrivateIpGoogleAccessRequest struct { NullFields []string `json:"-"` } -func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { +func (s SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { type NoMethod SubnetworksSetPrivateIpGoogleAccessRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Subsetting: Subsetting configuration for this BackendService. Currently this @@ -46019,9 +46447,9 @@ type Subsetting struct { NullFields []string `json:"-"` } -func (s *Subsetting) MarshalJSON() ([]byte, error) { +func (s Subsetting) MarshalJSON() ([]byte, error) { type NoMethod Subsetting - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TCPHealthCheck struct { @@ -46089,9 +46517,9 @@ type TCPHealthCheck struct { NullFields []string `json:"-"` } -func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { +func (s TCPHealthCheck) MarshalJSON() ([]byte, error) { type NoMethod TCPHealthCheck - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Tags: A set of instance tags. @@ -46119,9 +46547,9 @@ type Tags struct { NullFields []string `json:"-"` } -func (s *Tags) MarshalJSON() ([]byte, error) { +func (s Tags) MarshalJSON() ([]byte, error) { type NoMethod Tags - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetGrpcProxy: Represents a Target gRPC Proxy resource. A target gRPC @@ -46191,9 +46619,9 @@ type TargetGrpcProxy struct { NullFields []string `json:"-"` } -func (s *TargetGrpcProxy) MarshalJSON() ([]byte, error) { +func (s TargetGrpcProxy) MarshalJSON() ([]byte, error) { type NoMethod TargetGrpcProxy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetGrpcProxyList struct { @@ -46230,9 +46658,9 @@ type TargetGrpcProxyList struct { NullFields []string `json:"-"` } -func (s *TargetGrpcProxyList) MarshalJSON() ([]byte, error) { +func (s TargetGrpcProxyList) MarshalJSON() ([]byte, error) { type NoMethod TargetGrpcProxyList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetGrpcProxyListWarning: [Output Only] Informational warning message. @@ -46315,9 +46743,9 @@ type TargetGrpcProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetGrpcProxyListWarning) MarshalJSON() ([]byte, error) { +func (s TargetGrpcProxyListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetGrpcProxyListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetGrpcProxyListWarningData struct { @@ -46344,9 +46772,9 @@ type TargetGrpcProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetGrpcProxyListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetGrpcProxyListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetGrpcProxyListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpProxiesScopedList struct { @@ -46368,9 +46796,9 @@ type TargetHttpProxiesScopedList struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxiesScopedList) MarshalJSON() ([]byte, error) { +func (s TargetHttpProxiesScopedList) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpProxiesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetHttpProxiesScopedListWarning: Informational warning which replaces the @@ -46454,9 +46882,9 @@ type TargetHttpProxiesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s TargetHttpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpProxiesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpProxiesScopedListWarningData struct { @@ -46483,9 +46911,9 @@ type TargetHttpProxiesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetHttpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpProxiesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetHttpProxy: Represents a Target HTTP Proxy resource. Google Compute @@ -46567,9 +46995,9 @@ type TargetHttpProxy struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { +func (s TargetHttpProxy) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpProxy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpProxyAggregatedList struct { @@ -46606,9 +47034,9 @@ type TargetHttpProxyAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyAggregatedList) MarshalJSON() ([]byte, error) { +func (s TargetHttpProxyAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpProxyAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetHttpProxyList: A list of TargetHttpProxy resources. @@ -46646,9 +47074,9 @@ type TargetHttpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { +func (s TargetHttpProxyList) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpProxyList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetHttpProxyListWarning: [Output Only] Informational warning message. @@ -46731,9 +47159,9 @@ type TargetHttpProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { +func (s TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpProxyListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpProxyListWarningData struct { @@ -46760,9 +47188,9 @@ type TargetHttpProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpProxyListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpsProxiesScopedList struct { @@ -46784,9 +47212,9 @@ type TargetHttpsProxiesScopedList struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesScopedList) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxiesScopedList) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxiesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetHttpsProxiesScopedListWarning: Informational warning which replaces @@ -46870,9 +47298,9 @@ type TargetHttpsProxiesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxiesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxiesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpsProxiesScopedListWarningData struct { @@ -46899,9 +47327,9 @@ type TargetHttpsProxiesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxiesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpsProxiesSetCertificateMapRequest struct { @@ -46923,9 +47351,9 @@ type TargetHttpsProxiesSetCertificateMapRequest struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesSetCertificateMapRequest) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxiesSetCertificateMapRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxiesSetCertificateMapRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpsProxiesSetQuicOverrideRequest struct { @@ -46951,9 +47379,9 @@ type TargetHttpsProxiesSetQuicOverrideRequest struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxiesSetQuicOverrideRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpsProxiesSetSslCertificatesRequest struct { @@ -46974,9 +47402,9 @@ type TargetHttpsProxiesSetSslCertificatesRequest struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxiesSetSslCertificatesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetHttpsProxy: Represents a Target HTTPS Proxy resource. Google Compute @@ -47003,8 +47431,10 @@ type TargetHttpsProxy struct { AuthorizationPolicy string `json:"authorizationPolicy,omitempty"` // CertificateMap: URL of a certificate map that identifies a certificate map // associated with the given target proxy. This field can only be set for - // global target proxies. If set, sslCertificates will be ignored. Accepted - // format is //certificatemanager.googleapis.com/projects/{project + // Global external Application Load Balancer or Classic Application Load + // Balancer. For other products use Certificate Manager Certificates instead. + // If set, sslCertificates will be ignored. Accepted format is + // //certificatemanager.googleapis.com/projects/{project // }/locations/{location}/certificateMaps/{resourceName}. CertificateMap string `json:"certificateMap,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text format. @@ -47081,9 +47511,19 @@ type TargetHttpsProxy struct { ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` // SslCertificates: URLs to SslCertificate resources that are used to // authenticate connections between users and the load balancer. At least one - // SSL certificate must be specified. Currently, you may specify up to 15 SSL - // certificates. sslCertificates do not apply when the load balancing scheme is - // set to INTERNAL_SELF_MANAGED. + // SSL certificate must be specified. SslCertificates do not apply when the + // load balancing scheme is set to INTERNAL_SELF_MANAGED. The URLs should refer + // to a SSL Certificate resource or Certificate Manager Certificate resource. + // Mixing Classic Certificates and Certificate Manager Certificates is not + // allowed. Certificate Manager Certificates must include the + // certificatemanager API. Certificate Manager Certificates are not supported + // by Global external Application Load Balancer or Classic Application Load + // Balancer, use certificate_map instead. Currently, you may specify up to 15 + // Classic SSL Certificates. Certificate Manager Certificates accepted formats + // are: - //certificatemanager.googleapis.com/projects/{project}/locations/{ + // location}/certificates/{resourceName}. - + // https://certificatemanager.googleapis.com/v1alpha1/projects/{project + // }/locations/{location}/certificates/{resourceName}. SslCertificates []string `json:"sslCertificates,omitempty"` // SslPolicy: URL of SslPolicy resource that will be associated with the // TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource has no @@ -47141,9 +47581,9 @@ type TargetHttpsProxy struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxy) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpsProxyAggregatedList struct { @@ -47182,9 +47622,9 @@ type TargetHttpsProxyAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyAggregatedList) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxyAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxyAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetHttpsProxyAggregatedListWarning: [Output Only] Informational warning @@ -47268,9 +47708,9 @@ type TargetHttpsProxyAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxyAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpsProxyAggregatedListWarningData struct { @@ -47297,9 +47737,9 @@ type TargetHttpsProxyAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxyAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. @@ -47337,9 +47777,9 @@ type TargetHttpsProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxyList) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxyList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetHttpsProxyListWarning: [Output Only] Informational warning message. @@ -47422,9 +47862,9 @@ type TargetHttpsProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxyListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetHttpsProxyListWarningData struct { @@ -47451,9 +47891,9 @@ type TargetHttpsProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetHttpsProxyListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetInstance: Represents a Target Instance resource. You can use a target @@ -47524,9 +47964,9 @@ type TargetInstance struct { NullFields []string `json:"-"` } -func (s *TargetInstance) MarshalJSON() ([]byte, error) { +func (s TargetInstance) MarshalJSON() ([]byte, error) { type NoMethod TargetInstance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetInstanceAggregatedList struct { @@ -47564,9 +48004,9 @@ type TargetInstanceAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { +func (s TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod TargetInstanceAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetInstanceAggregatedListWarning: [Output Only] Informational warning @@ -47650,9 +48090,9 @@ type TargetInstanceAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetInstanceAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetInstanceAggregatedListWarningData struct { @@ -47679,9 +48119,9 @@ type TargetInstanceAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetInstanceAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetInstanceList: Contains a list of TargetInstance resources. @@ -47718,9 +48158,9 @@ type TargetInstanceList struct { NullFields []string `json:"-"` } -func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { +func (s TargetInstanceList) MarshalJSON() ([]byte, error) { type NoMethod TargetInstanceList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetInstanceListWarning: [Output Only] Informational warning message. @@ -47803,9 +48243,9 @@ type TargetInstanceListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { +func (s TargetInstanceListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetInstanceListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetInstanceListWarningData struct { @@ -47832,9 +48272,9 @@ type TargetInstanceListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetInstanceListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetInstancesScopedList struct { @@ -47856,9 +48296,9 @@ type TargetInstancesScopedList struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { +func (s TargetInstancesScopedList) MarshalJSON() ([]byte, error) { type NoMethod TargetInstancesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetInstancesScopedListWarning: Informational warning which replaces the @@ -47942,9 +48382,9 @@ type TargetInstancesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetInstancesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetInstancesScopedListWarningData struct { @@ -47971,9 +48411,9 @@ type TargetInstancesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetInstancesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetPool: Represents a Target Pool resource. Target pools are used with @@ -48090,9 +48530,9 @@ type TargetPool struct { NullFields []string `json:"-"` } -func (s *TargetPool) MarshalJSON() ([]byte, error) { +func (s TargetPool) MarshalJSON() ([]byte, error) { type NoMethod TargetPool - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *TargetPool) UnmarshalJSON(data []byte) error { @@ -48145,9 +48585,9 @@ type TargetPoolAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { +func (s TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetPoolAggregatedListWarning: [Output Only] Informational warning @@ -48231,9 +48671,9 @@ type TargetPoolAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetPoolAggregatedListWarningData struct { @@ -48260,9 +48700,9 @@ type TargetPoolAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetPoolInstanceHealth struct { @@ -48286,9 +48726,9 @@ type TargetPoolInstanceHealth struct { NullFields []string `json:"-"` } -func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { +func (s TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolInstanceHealth - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetPoolList: Contains a list of TargetPool resources. @@ -48326,9 +48766,9 @@ type TargetPoolList struct { NullFields []string `json:"-"` } -func (s *TargetPoolList) MarshalJSON() ([]byte, error) { +func (s TargetPoolList) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetPoolListWarning: [Output Only] Informational warning message. @@ -48411,9 +48851,9 @@ type TargetPoolListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { +func (s TargetPoolListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetPoolListWarningData struct { @@ -48440,9 +48880,9 @@ type TargetPoolListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetPoolListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetPoolsAddHealthCheckRequest struct { @@ -48461,9 +48901,9 @@ type TargetPoolsAddHealthCheckRequest struct { NullFields []string `json:"-"` } -func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { +func (s TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolsAddHealthCheckRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetPoolsAddInstanceRequest struct { @@ -48487,9 +48927,9 @@ type TargetPoolsAddInstanceRequest struct { NullFields []string `json:"-"` } -func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { +func (s TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolsAddInstanceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetPoolsRemoveHealthCheckRequest struct { @@ -48513,9 +48953,9 @@ type TargetPoolsRemoveHealthCheckRequest struct { NullFields []string `json:"-"` } -func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { +func (s TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolsRemoveHealthCheckRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetPoolsRemoveInstanceRequest struct { @@ -48534,9 +48974,9 @@ type TargetPoolsRemoveInstanceRequest struct { NullFields []string `json:"-"` } -func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { +func (s TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolsRemoveInstanceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetPoolsScopedList struct { @@ -48558,9 +48998,9 @@ type TargetPoolsScopedList struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { +func (s TargetPoolsScopedList) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetPoolsScopedListWarning: Informational warning which replaces the list @@ -48644,9 +49084,9 @@ type TargetPoolsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetPoolsScopedListWarningData struct { @@ -48673,9 +49113,9 @@ type TargetPoolsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetPoolsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetReference struct { @@ -48693,9 +49133,9 @@ type TargetReference struct { NullFields []string `json:"-"` } -func (s *TargetReference) MarshalJSON() ([]byte, error) { +func (s TargetReference) MarshalJSON() ([]byte, error) { type NoMethod TargetReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetSslProxiesSetBackendServiceRequest struct { @@ -48714,9 +49154,9 @@ type TargetSslProxiesSetBackendServiceRequest struct { NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { +func (s TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetSslProxiesSetBackendServiceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetSslProxiesSetCertificateMapRequest struct { @@ -48738,9 +49178,9 @@ type TargetSslProxiesSetCertificateMapRequest struct { NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetCertificateMapRequest) MarshalJSON() ([]byte, error) { +func (s TargetSslProxiesSetCertificateMapRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetSslProxiesSetCertificateMapRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetSslProxiesSetProxyHeaderRequest struct { @@ -48764,9 +49204,9 @@ type TargetSslProxiesSetProxyHeaderRequest struct { NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { +func (s TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetSslProxiesSetProxyHeaderRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetSslProxiesSetSslCertificatesRequest struct { @@ -48787,9 +49227,9 @@ type TargetSslProxiesSetSslCertificatesRequest struct { NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { +func (s TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetSslProxiesSetSslCertificatesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetSslProxy: Represents a Target SSL Proxy resource. A target SSL proxy @@ -48860,9 +49300,9 @@ type TargetSslProxy struct { NullFields []string `json:"-"` } -func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { +func (s TargetSslProxy) MarshalJSON() ([]byte, error) { type NoMethod TargetSslProxy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetSslProxyList: Contains a list of TargetSslProxy resources. @@ -48899,9 +49339,9 @@ type TargetSslProxyList struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { +func (s TargetSslProxyList) MarshalJSON() ([]byte, error) { type NoMethod TargetSslProxyList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetSslProxyListWarning: [Output Only] Informational warning message. @@ -48984,9 +49424,9 @@ type TargetSslProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { +func (s TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetSslProxyListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetSslProxyListWarningData struct { @@ -49013,9 +49453,9 @@ type TargetSslProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetSslProxyListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetTcpProxiesScopedList struct { @@ -49037,9 +49477,9 @@ type TargetTcpProxiesScopedList struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxiesScopedList) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxiesScopedList) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxiesScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetTcpProxiesScopedListWarning: Informational warning which replaces the @@ -49123,9 +49563,9 @@ type TargetTcpProxiesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxiesScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetTcpProxiesScopedListWarningData struct { @@ -49152,9 +49592,9 @@ type TargetTcpProxiesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxiesScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetTcpProxiesSetBackendServiceRequest struct { @@ -49173,9 +49613,9 @@ type TargetTcpProxiesSetBackendServiceRequest struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxiesSetBackendServiceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetTcpProxiesSetProxyHeaderRequest struct { @@ -49199,9 +49639,9 @@ type TargetTcpProxiesSetProxyHeaderRequest struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxiesSetProxyHeaderRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetTcpProxy: Represents a Target TCP Proxy resource. A target TCP proxy @@ -49268,9 +49708,9 @@ type TargetTcpProxy struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxy) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetTcpProxyAggregatedList struct { @@ -49309,9 +49749,9 @@ type TargetTcpProxyAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyAggregatedList) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxyAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxyAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetTcpProxyAggregatedListWarning: [Output Only] Informational warning @@ -49395,9 +49835,9 @@ type TargetTcpProxyAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxyAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetTcpProxyAggregatedListWarningData struct { @@ -49424,9 +49864,9 @@ type TargetTcpProxyAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxyAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetTcpProxyList: Contains a list of TargetTcpProxy resources. @@ -49463,9 +49903,9 @@ type TargetTcpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxyList) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxyList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetTcpProxyListWarning: [Output Only] Informational warning message. @@ -49548,9 +49988,9 @@ type TargetTcpProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxyListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetTcpProxyListWarningData struct { @@ -49577,9 +50017,9 @@ type TargetTcpProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetTcpProxyListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetVpnGateway: Represents a Target VPN Gateway resource. The target VPN @@ -49659,9 +50099,9 @@ type TargetVpnGateway struct { NullFields []string `json:"-"` } -func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { +func (s TargetVpnGateway) MarshalJSON() ([]byte, error) { type NoMethod TargetVpnGateway - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetVpnGatewayAggregatedList struct { @@ -49700,9 +50140,9 @@ type TargetVpnGatewayAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { +func (s TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod TargetVpnGatewayAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetVpnGatewayAggregatedListWarning: [Output Only] Informational warning @@ -49786,9 +50226,9 @@ type TargetVpnGatewayAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetVpnGatewayAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetVpnGatewayAggregatedListWarningData struct { @@ -49815,9 +50255,9 @@ type TargetVpnGatewayAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetVpnGatewayAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. @@ -49855,9 +50295,9 @@ type TargetVpnGatewayList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { +func (s TargetVpnGatewayList) MarshalJSON() ([]byte, error) { type NoMethod TargetVpnGatewayList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetVpnGatewayListWarning: [Output Only] Informational warning message. @@ -49940,9 +50380,9 @@ type TargetVpnGatewayListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { +func (s TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetVpnGatewayListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetVpnGatewayListWarningData struct { @@ -49969,9 +50409,9 @@ type TargetVpnGatewayListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetVpnGatewayListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetVpnGatewaysScopedList struct { @@ -49994,9 +50434,9 @@ type TargetVpnGatewaysScopedList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { +func (s TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { type NoMethod TargetVpnGatewaysScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TargetVpnGatewaysScopedListWarning: [Output Only] Informational warning @@ -50080,9 +50520,9 @@ type TargetVpnGatewaysScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { +func (s TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod TargetVpnGatewaysScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TargetVpnGatewaysScopedListWarningData struct { @@ -50109,9 +50549,9 @@ type TargetVpnGatewaysScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod TargetVpnGatewaysScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TestFailure struct { @@ -50151,9 +50591,9 @@ type TestFailure struct { NullFields []string `json:"-"` } -func (s *TestFailure) MarshalJSON() ([]byte, error) { +func (s TestFailure) MarshalJSON() ([]byte, error) { type NoMethod TestFailure - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TestPermissionsRequest struct { @@ -50173,9 +50613,9 @@ type TestPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TestPermissionsResponse struct { @@ -50198,9 +50638,9 @@ type TestPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type Uint128 struct { @@ -50219,9 +50659,9 @@ type Uint128 struct { NullFields []string `json:"-"` } -func (s *Uint128) MarshalJSON() ([]byte, error) { +func (s Uint128) MarshalJSON() ([]byte, error) { type NoMethod Uint128 - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpcomingMaintenance: Upcoming Maintenance notification information. @@ -50264,9 +50704,9 @@ type UpcomingMaintenance struct { NullFields []string `json:"-"` } -func (s *UpcomingMaintenance) MarshalJSON() ([]byte, error) { +func (s UpcomingMaintenance) MarshalJSON() ([]byte, error) { type NoMethod UpcomingMaintenance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UrlMap: Represents a URL Map resource. Compute Engine has two URL Map @@ -50283,13 +50723,37 @@ func (s *UpcomingMaintenance) MarshalJSON() ([]byte, error) { // Director, see the Traffic Director features: Routing and traffic management // table. This resource defines mappings from hostnames and URL paths to either // a backend service or a backend bucket. To use the global urlMaps resource, -// the backend service must have a loadBalancingScheme of either EXTERNAL or -// INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend -// service must have a loadBalancingScheme of INTERNAL_MANAGED. For more -// information, read URL Map Concepts. +// the backend service must have a loadBalancingScheme of either EXTERNAL, +// EXTERNAL_MANAGED, or INTERNAL_SELF_MANAGED. To use the regionUrlMaps +// resource, the backend service must have a loadBalancingScheme of +// INTERNAL_MANAGED. For more information, read URL Map Concepts. type UrlMap struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text format. CreationTimestamp string `json:"creationTimestamp,omitempty"` + // DefaultCustomErrorResponsePolicy: defaultCustomErrorResponsePolicy specifies + // how the Load Balancer returns error responses when BackendServiceor + // BackendBucket responds with an error. This policy takes effect at the load + // balancer level and applies only when no policy has been defined for the + // error code at lower levels like PathMatcher, RouteRule and PathRule within + // this UrlMap. For example, consider a UrlMap with the following + // configuration: - defaultCustomErrorResponsePolicy containing policies for + // responding to 5xx and 4xx errors - A PathMatcher configured for + // *.example.com has defaultCustomErrorResponsePolicy for 4xx. If a request for + // http://www.example.com/ encounters a 404, the policy in + // pathMatcher.defaultCustomErrorResponsePolicy will be enforced. When the + // request for http://www.example.com/ encounters a 502, the policy in + // UrlMap.defaultCustomErrorResponsePolicy will be enforced. When a request + // that does not match any host in *.example.com such as + // http://www.myotherexample.com/, encounters a 404, + // UrlMap.defaultCustomErrorResponsePolicy takes effect. When used in + // conjunction with defaultRouteAction.retryPolicy, retries take precedence. + // Only once all retries are exhausted, the defaultCustomErrorResponsePolicy is + // applied. While attempting a retry, if load balancer is successful in + // reaching the service, the defaultCustomErrorResponsePolicy is ignored and + // the response from the service is returned to the client. + // defaultCustomErrorResponsePolicy is supported only for global external + // Application Load Balancers. + DefaultCustomErrorResponsePolicy *CustomErrorResponsePolicy `json:"defaultCustomErrorResponsePolicy,omitempty"` // DefaultRouteAction: defaultRouteAction takes effect when none of the // hostRules match. The load balancer performs advanced routing actions, such // as URL rewrites and header transformations, before forwarding the request to @@ -50307,12 +50771,12 @@ type UrlMap struct { // defaultRouteAction is also specified, advanced routing actions, such as URL // rewrites, take effect before sending the request to the backend. However, if // defaultService is specified, defaultRouteAction cannot contain any - // weightedBackendServices. Conversely, if routeAction specifies any - // weightedBackendServices, service must not be specified. If defaultService is - // specified, then set either defaultUrlRedirect , or - // defaultRouteAction.weightedBackendService Don't set both. defaultService has - // no effect when the URL map is bound to a target gRPC proxy that has the - // validateForProxyless field set to true. + // defaultRouteAction.weightedBackendServices. Conversely, if + // defaultRouteAction specifies any defaultRouteAction.weightedBackendServices, + // defaultService must not be specified. If defaultService is specified, then + // set either defaultUrlRedirect , or defaultRouteAction.weightedBackendService + // Don't set both. defaultService has no effect when the URL map is bound to a + // target gRPC proxy that has the validateForProxyless field set to true. DefaultService string `json:"defaultService,omitempty"` // DefaultUrlRedirect: When none of the specified hostRules match, the request // is redirected to a URL specified by defaultUrlRedirect. If @@ -50382,9 +50846,9 @@ type UrlMap struct { NullFields []string `json:"-"` } -func (s *UrlMap) MarshalJSON() ([]byte, error) { +func (s UrlMap) MarshalJSON() ([]byte, error) { type NoMethod UrlMap - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UrlMapList: Contains a list of UrlMap resources. @@ -50421,9 +50885,9 @@ type UrlMapList struct { NullFields []string `json:"-"` } -func (s *UrlMapList) MarshalJSON() ([]byte, error) { +func (s UrlMapList) MarshalJSON() ([]byte, error) { type NoMethod UrlMapList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UrlMapListWarning: [Output Only] Informational warning message. @@ -50506,9 +50970,9 @@ type UrlMapListWarning struct { NullFields []string `json:"-"` } -func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { +func (s UrlMapListWarning) MarshalJSON() ([]byte, error) { type NoMethod UrlMapListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type UrlMapListWarningData struct { @@ -50535,9 +50999,9 @@ type UrlMapListWarningData struct { NullFields []string `json:"-"` } -func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { +func (s UrlMapListWarningData) MarshalJSON() ([]byte, error) { type NoMethod UrlMapListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type UrlMapReference struct { @@ -50555,9 +51019,9 @@ type UrlMapReference struct { NullFields []string `json:"-"` } -func (s *UrlMapReference) MarshalJSON() ([]byte, error) { +func (s UrlMapReference) MarshalJSON() ([]byte, error) { type NoMethod UrlMapReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UrlMapTest: Message for the expected URL mappings. @@ -50608,9 +51072,9 @@ type UrlMapTest struct { NullFields []string `json:"-"` } -func (s *UrlMapTest) MarshalJSON() ([]byte, error) { +func (s UrlMapTest) MarshalJSON() ([]byte, error) { type NoMethod UrlMapTest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UrlMapTestHeader: HTTP headers used in UrlMapTests. @@ -50632,9 +51096,9 @@ type UrlMapTestHeader struct { NullFields []string `json:"-"` } -func (s *UrlMapTestHeader) MarshalJSON() ([]byte, error) { +func (s UrlMapTestHeader) MarshalJSON() ([]byte, error) { type NoMethod UrlMapTestHeader - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UrlMapValidationResult: Message representing the validation result for a @@ -50661,9 +51125,9 @@ type UrlMapValidationResult struct { NullFields []string `json:"-"` } -func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { +func (s UrlMapValidationResult) MarshalJSON() ([]byte, error) { type NoMethod UrlMapValidationResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type UrlMapsAggregatedList struct { @@ -50701,9 +51165,9 @@ type UrlMapsAggregatedList struct { NullFields []string `json:"-"` } -func (s *UrlMapsAggregatedList) MarshalJSON() ([]byte, error) { +func (s UrlMapsAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod UrlMapsAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UrlMapsAggregatedListWarning: [Output Only] Informational warning message. @@ -50786,9 +51250,9 @@ type UrlMapsAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *UrlMapsAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s UrlMapsAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod UrlMapsAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type UrlMapsAggregatedListWarningData struct { @@ -50815,9 +51279,9 @@ type UrlMapsAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *UrlMapsAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s UrlMapsAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod UrlMapsAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type UrlMapsScopedList struct { @@ -50839,9 +51303,9 @@ type UrlMapsScopedList struct { NullFields []string `json:"-"` } -func (s *UrlMapsScopedList) MarshalJSON() ([]byte, error) { +func (s UrlMapsScopedList) MarshalJSON() ([]byte, error) { type NoMethod UrlMapsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UrlMapsScopedListWarning: Informational warning which replaces the list of @@ -50925,9 +51389,9 @@ type UrlMapsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *UrlMapsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s UrlMapsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod UrlMapsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type UrlMapsScopedListWarningData struct { @@ -50954,9 +51418,9 @@ type UrlMapsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *UrlMapsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s UrlMapsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod UrlMapsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type UrlMapsValidateRequest struct { @@ -50998,9 +51462,9 @@ type UrlMapsValidateRequest struct { NullFields []string `json:"-"` } -func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { +func (s UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { type NoMethod UrlMapsValidateRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type UrlMapsValidateResponse struct { @@ -51021,9 +51485,9 @@ type UrlMapsValidateResponse struct { NullFields []string `json:"-"` } -func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { +func (s UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { type NoMethod UrlMapsValidateResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UrlRewrite: The spec for modifying the path before sending the request to @@ -51064,9 +51528,9 @@ type UrlRewrite struct { NullFields []string `json:"-"` } -func (s *UrlRewrite) MarshalJSON() ([]byte, error) { +func (s UrlRewrite) MarshalJSON() ([]byte, error) { type NoMethod UrlRewrite - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UsableSubnetwork: Subnetwork which the current user has @@ -51141,6 +51605,7 @@ type UsableSubnetwork struct { // "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6 // addresses. // "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 addresses. + // "IPV6_ONLY" - New VMs in this subnet will only be assigned IPv6 addresses. StackType string `json:"stackType,omitempty"` // Subnetwork: Subnetwork URL. Subnetwork string `json:"subnetwork,omitempty"` @@ -51157,9 +51622,9 @@ type UsableSubnetwork struct { NullFields []string `json:"-"` } -func (s *UsableSubnetwork) MarshalJSON() ([]byte, error) { +func (s UsableSubnetwork) MarshalJSON() ([]byte, error) { type NoMethod UsableSubnetwork - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UsableSubnetworkSecondaryRange: Secondary IP range of a usable subnetwork. @@ -51185,9 +51650,9 @@ type UsableSubnetworkSecondaryRange struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { +func (s UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { type NoMethod UsableSubnetworkSecondaryRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type UsableSubnetworksAggregatedList struct { @@ -51228,9 +51693,9 @@ type UsableSubnetworksAggregatedList struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedList) MarshalJSON() ([]byte, error) { +func (s UsableSubnetworksAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod UsableSubnetworksAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UsableSubnetworksAggregatedListWarning: [Output Only] Informational warning @@ -51314,9 +51779,9 @@ type UsableSubnetworksAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s UsableSubnetworksAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod UsableSubnetworksAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type UsableSubnetworksAggregatedListWarningData struct { @@ -51343,9 +51808,9 @@ type UsableSubnetworksAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s UsableSubnetworksAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod UsableSubnetworksAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UsageExportLocation: The location in Cloud Storage and naming method of the @@ -51376,9 +51841,9 @@ type UsageExportLocation struct { NullFields []string `json:"-"` } -func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { +func (s UsageExportLocation) MarshalJSON() ([]byte, error) { type NoMethod UsageExportLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VmEndpointNatMappings: Contain information of Nat mapping for a VM endpoint @@ -51400,9 +51865,9 @@ type VmEndpointNatMappings struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappings) MarshalJSON() ([]byte, error) { +func (s VmEndpointNatMappings) MarshalJSON() ([]byte, error) { type NoMethod VmEndpointNatMappings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VmEndpointNatMappingsInterfaceNatMappings: Contain information of Nat @@ -51446,9 +51911,9 @@ type VmEndpointNatMappingsInterfaceNatMappings struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsInterfaceNatMappings) MarshalJSON() ([]byte, error) { +func (s VmEndpointNatMappingsInterfaceNatMappings) MarshalJSON() ([]byte, error) { type NoMethod VmEndpointNatMappingsInterfaceNatMappings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings: Contains @@ -51487,9 +51952,9 @@ type VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings) MarshalJSON() ([]byte, error) { +func (s VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings) MarshalJSON() ([]byte, error) { type NoMethod VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VmEndpointNatMappingsList: Contains a list of VmEndpointNatMappings. @@ -51528,9 +51993,9 @@ type VmEndpointNatMappingsList struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsList) MarshalJSON() ([]byte, error) { +func (s VmEndpointNatMappingsList) MarshalJSON() ([]byte, error) { type NoMethod VmEndpointNatMappingsList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VmEndpointNatMappingsListWarning: [Output Only] Informational warning @@ -51614,9 +52079,9 @@ type VmEndpointNatMappingsListWarning struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsListWarning) MarshalJSON() ([]byte, error) { +func (s VmEndpointNatMappingsListWarning) MarshalJSON() ([]byte, error) { type NoMethod VmEndpointNatMappingsListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VmEndpointNatMappingsListWarningData struct { @@ -51643,9 +52108,9 @@ type VmEndpointNatMappingsListWarningData struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsListWarningData) MarshalJSON() ([]byte, error) { +func (s VmEndpointNatMappingsListWarningData) MarshalJSON() ([]byte, error) { type NoMethod VmEndpointNatMappingsListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnGateway: Represents a HA VPN gateway. HA VPN is a high-availability (HA) @@ -51702,8 +52167,9 @@ type VpnGateway struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` // StackType: The stack type for this VPN gateway to identify the IP protocols - // that are enabled. Possible values are: IPV4_ONLY, IPV4_IPV6. If not - // specified, IPV4_ONLY will be used. + // that are enabled. Possible values are: IPV4_ONLY, IPV4_IPV6, IPV6_ONLY. If + // not specified, IPV4_ONLY is used if the gateway IP version is IPV4, or + // IPV4_IPV6 if the gateway IP version is IPV6. // // Possible values: // "IPV4_IPV6" - Enable VPN gateway with both IPv4 and IPv6 protocols. @@ -51728,9 +52194,9 @@ type VpnGateway struct { NullFields []string `json:"-"` } -func (s *VpnGateway) MarshalJSON() ([]byte, error) { +func (s VpnGateway) MarshalJSON() ([]byte, error) { type NoMethod VpnGateway - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnGatewayAggregatedList struct { @@ -51769,9 +52235,9 @@ type VpnGatewayAggregatedList struct { NullFields []string `json:"-"` } -func (s *VpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { +func (s VpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnGatewayAggregatedListWarning: [Output Only] Informational warning @@ -51855,9 +52321,9 @@ type VpnGatewayAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s VpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnGatewayAggregatedListWarningData struct { @@ -51884,9 +52350,9 @@ type VpnGatewayAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s VpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnGatewayList: Contains a list of VpnGateway resources. @@ -51924,9 +52390,9 @@ type VpnGatewayList struct { NullFields []string `json:"-"` } -func (s *VpnGatewayList) MarshalJSON() ([]byte, error) { +func (s VpnGatewayList) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnGatewayListWarning: [Output Only] Informational warning message. @@ -52009,9 +52475,9 @@ type VpnGatewayListWarning struct { NullFields []string `json:"-"` } -func (s *VpnGatewayListWarning) MarshalJSON() ([]byte, error) { +func (s VpnGatewayListWarning) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnGatewayListWarningData struct { @@ -52038,9 +52504,9 @@ type VpnGatewayListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnGatewayListWarningData) MarshalJSON() ([]byte, error) { +func (s VpnGatewayListWarningData) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnGatewayStatus struct { @@ -52059,9 +52525,9 @@ type VpnGatewayStatus struct { NullFields []string `json:"-"` } -func (s *VpnGatewayStatus) MarshalJSON() ([]byte, error) { +func (s VpnGatewayStatus) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnGatewayStatusHighAvailabilityRequirementState: Describes the high @@ -52102,9 +52568,9 @@ type VpnGatewayStatusHighAvailabilityRequirementState struct { NullFields []string `json:"-"` } -func (s *VpnGatewayStatusHighAvailabilityRequirementState) MarshalJSON() ([]byte, error) { +func (s VpnGatewayStatusHighAvailabilityRequirementState) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayStatusHighAvailabilityRequirementState - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnGatewayStatusTunnel: Contains some information about a VPN tunnel. @@ -52131,9 +52597,9 @@ type VpnGatewayStatusTunnel struct { NullFields []string `json:"-"` } -func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { +func (s VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayStatusTunnel - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnGatewayStatusVpnConnection: A VPN connection contains all VPN tunnels @@ -52165,9 +52631,9 @@ type VpnGatewayStatusVpnConnection struct { NullFields []string `json:"-"` } -func (s *VpnGatewayStatusVpnConnection) MarshalJSON() ([]byte, error) { +func (s VpnGatewayStatusVpnConnection) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayStatusVpnConnection - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnGatewayVpnGatewayInterface: A VPN gateway interface. @@ -52209,9 +52675,9 @@ type VpnGatewayVpnGatewayInterface struct { NullFields []string `json:"-"` } -func (s *VpnGatewayVpnGatewayInterface) MarshalJSON() ([]byte, error) { +func (s VpnGatewayVpnGatewayInterface) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayVpnGatewayInterface - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnGatewaysGetStatusResponse struct { @@ -52232,9 +52698,9 @@ type VpnGatewaysGetStatusResponse struct { NullFields []string `json:"-"` } -func (s *VpnGatewaysGetStatusResponse) MarshalJSON() ([]byte, error) { +func (s VpnGatewaysGetStatusResponse) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewaysGetStatusResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnGatewaysScopedList struct { @@ -52256,9 +52722,9 @@ type VpnGatewaysScopedList struct { NullFields []string `json:"-"` } -func (s *VpnGatewaysScopedList) MarshalJSON() ([]byte, error) { +func (s VpnGatewaysScopedList) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewaysScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnGatewaysScopedListWarning: [Output Only] Informational warning which @@ -52342,9 +52808,9 @@ type VpnGatewaysScopedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { +func (s VpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewaysScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnGatewaysScopedListWarningData struct { @@ -52371,9 +52837,9 @@ type VpnGatewaysScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s VpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewaysScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnTunnel: Represents a Cloud VPN Tunnel resource. For more information @@ -52411,7 +52877,8 @@ type VpnTunnel struct { // LocalTrafficSelector: Local traffic selector to use when establishing the // VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted // string, for example: 192.168.0.0/16. The ranges must be disjoint. Only IPv4 - // is supported. + // is supported for Classic VPN tunnels. This field is output only for HA VPN + // tunnels. LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` // Name: Name of the resource. Provided by the client when the resource is // created. The name must be 1-63 characters long, and comply with RFC1035. @@ -52437,7 +52904,8 @@ type VpnTunnel struct { // provided, the VPN tunnel will automatically use the same vpnGatewayInterface // ID in the peer Google Cloud VPN gateway. PeerGcpGateway string `json:"peerGcpGateway,omitempty"` - // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. + // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. This + // field can be set only for Classic VPN tunnels. PeerIp string `json:"peerIp,omitempty"` // Region: [Output Only] URL of the region where the VPN tunnel resides. You // must specify this field as part of the HTTP request URL. It is not settable @@ -52446,7 +52914,8 @@ type VpnTunnel struct { // RemoteTrafficSelector: Remote traffic selectors to use when establishing the // VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted // string, for example: 192.168.0.0/16. The ranges should be disjoint. Only - // IPv4 is supported. + // IPv4 is supported for Classic VPN tunnels. This field is output only for HA + // VPN tunnels. RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` // Router: URL of the router resource to be used for dynamic routing. Router string `json:"router,omitempty"` @@ -52498,7 +52967,8 @@ type VpnTunnel struct { // resources are needed to setup VPN tunnel. Status string `json:"status,omitempty"` // TargetVpnGateway: URL of the Target VPN gateway with which this VPN tunnel - // is associated. Provided by the client when the VPN tunnel is created. + // is associated. Provided by the client when the VPN tunnel is created. This + // field can be set only for Classic VPN tunnels. TargetVpnGateway string `json:"targetVpnGateway,omitempty"` // VpnGateway: URL of the VPN gateway with which this VPN tunnel is associated. // Provided by the client when the VPN tunnel is created. This must be used @@ -52524,9 +52994,9 @@ type VpnTunnel struct { NullFields []string `json:"-"` } -func (s *VpnTunnel) MarshalJSON() ([]byte, error) { +func (s VpnTunnel) MarshalJSON() ([]byte, error) { type NoMethod VpnTunnel - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnTunnelAggregatedList struct { @@ -52565,9 +53035,9 @@ type VpnTunnelAggregatedList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { +func (s VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { type NoMethod VpnTunnelAggregatedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnTunnelAggregatedListWarning: [Output Only] Informational warning message. @@ -52650,9 +53120,9 @@ type VpnTunnelAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { +func (s VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { type NoMethod VpnTunnelAggregatedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnTunnelAggregatedListWarningData struct { @@ -52679,9 +53149,9 @@ type VpnTunnelAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { +func (s VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod VpnTunnelAggregatedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnTunnelList: Contains a list of VpnTunnel resources. @@ -52719,9 +53189,9 @@ type VpnTunnelList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { +func (s VpnTunnelList) MarshalJSON() ([]byte, error) { type NoMethod VpnTunnelList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnTunnelListWarning: [Output Only] Informational warning message. @@ -52804,9 +53274,9 @@ type VpnTunnelListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { +func (s VpnTunnelListWarning) MarshalJSON() ([]byte, error) { type NoMethod VpnTunnelListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnTunnelListWarningData struct { @@ -52833,9 +53303,9 @@ type VpnTunnelListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { +func (s VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { type NoMethod VpnTunnelListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnTunnelsScopedList struct { @@ -52857,9 +53327,9 @@ type VpnTunnelsScopedList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { +func (s VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { type NoMethod VpnTunnelsScopedList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpnTunnelsScopedListWarning: Informational warning which replaces the list @@ -52943,9 +53413,9 @@ type VpnTunnelsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { +func (s VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { type NoMethod VpnTunnelsScopedListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VpnTunnelsScopedListWarningData struct { @@ -52972,9 +53442,9 @@ type VpnTunnelsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { +func (s VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { type NoMethod VpnTunnelsScopedListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type WafExpressionSet struct { @@ -53001,9 +53471,9 @@ type WafExpressionSet struct { NullFields []string `json:"-"` } -func (s *WafExpressionSet) MarshalJSON() ([]byte, error) { +func (s WafExpressionSet) MarshalJSON() ([]byte, error) { type NoMethod WafExpressionSet - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type WafExpressionSetExpression struct { @@ -53030,9 +53500,9 @@ type WafExpressionSetExpression struct { NullFields []string `json:"-"` } -func (s *WafExpressionSetExpression) MarshalJSON() ([]byte, error) { +func (s WafExpressionSetExpression) MarshalJSON() ([]byte, error) { type NoMethod WafExpressionSetExpression - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WeightedBackendService: In contrast to a single BackendService in @@ -53076,9 +53546,9 @@ type WeightedBackendService struct { NullFields []string `json:"-"` } -func (s *WeightedBackendService) MarshalJSON() ([]byte, error) { +func (s WeightedBackendService) MarshalJSON() ([]byte, error) { type NoMethod WeightedBackendService - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type XpnHostList struct { @@ -53115,9 +53585,9 @@ type XpnHostList struct { NullFields []string `json:"-"` } -func (s *XpnHostList) MarshalJSON() ([]byte, error) { +func (s XpnHostList) MarshalJSON() ([]byte, error) { type NoMethod XpnHostList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // XpnHostListWarning: [Output Only] Informational warning message. @@ -53200,9 +53670,9 @@ type XpnHostListWarning struct { NullFields []string `json:"-"` } -func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { +func (s XpnHostListWarning) MarshalJSON() ([]byte, error) { type NoMethod XpnHostListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type XpnHostListWarningData struct { @@ -53229,9 +53699,9 @@ type XpnHostListWarningData struct { NullFields []string `json:"-"` } -func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { +func (s XpnHostListWarningData) MarshalJSON() ([]byte, error) { type NoMethod XpnHostListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // XpnResourceId: Service resource (a.k.a service project) ID. @@ -53259,9 +53729,9 @@ type XpnResourceId struct { NullFields []string `json:"-"` } -func (s *XpnResourceId) MarshalJSON() ([]byte, error) { +func (s XpnResourceId) MarshalJSON() ([]byte, error) { type NoMethod XpnResourceId - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Zone: Represents a Zone resource. A zone is a deployment area. These @@ -53314,9 +53784,9 @@ type Zone struct { NullFields []string `json:"-"` } -func (s *Zone) MarshalJSON() ([]byte, error) { +func (s Zone) MarshalJSON() ([]byte, error) { type NoMethod Zone - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ZoneList: Contains a list of zone resources. @@ -53353,9 +53823,9 @@ type ZoneList struct { NullFields []string `json:"-"` } -func (s *ZoneList) MarshalJSON() ([]byte, error) { +func (s ZoneList) MarshalJSON() ([]byte, error) { type NoMethod ZoneList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ZoneListWarning: [Output Only] Informational warning message. @@ -53438,9 +53908,9 @@ type ZoneListWarning struct { NullFields []string `json:"-"` } -func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { +func (s ZoneListWarning) MarshalJSON() ([]byte, error) { type NoMethod ZoneListWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ZoneListWarningData struct { @@ -53467,9 +53937,9 @@ type ZoneListWarningData struct { NullFields []string `json:"-"` } -func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { +func (s ZoneListWarningData) MarshalJSON() ([]byte, error) { type NoMethod ZoneListWarningData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ZoneSetLabelsRequest struct { @@ -53495,9 +53965,9 @@ type ZoneSetLabelsRequest struct { NullFields []string `json:"-"` } -func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { +func (s ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { type NoMethod ZoneSetLabelsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ZoneSetPolicyRequest struct { @@ -53525,7 +53995,7 @@ type ZoneSetPolicyRequest struct { NullFields []string `json:"-"` } -func (s *ZoneSetPolicyRequest) MarshalJSON() ([]byte, error) { +func (s ZoneSetPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod ZoneSetPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } diff --git a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute2-gen.go b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute2-gen.go index 424c7e15f91..f66a5b58a34 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute2-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute2-gen.go @@ -5413,8 +5413,8 @@ type BackendServicesListUsableCall struct { header_ http.Header } -// ListUsable: Retrieves an aggregated list of all usable backend services in -// the specified project. +// ListUsable: Retrieves a list of all usable backend services in the specified +// project. // // - project: Project ID for this request. func (r *BackendServicesService) ListUsable(project string) *BackendServicesListUsableCall { @@ -17076,22 +17076,23 @@ func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(* } type GlobalOperationsDeleteCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + s *Service + project string + operationid string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified Operations resource. // -// - operation: Name of the Operations resource to delete. -// - project: Project ID for this request. -func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { +// - operation: Name of the Operations resource to delete, or its unique +// numeric identifier. +// - project: Project ID for this request. +func (r *GlobalOperationsService) Delete(project string, operationid string) *GlobalOperationsDeleteCall { c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.operationid = operationid return c } @@ -17132,7 +17133,7 @@ func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, erro req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "operation": c.operation, + "operation": c.operationid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -17154,7 +17155,7 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { type GlobalOperationsGetCall struct { s *Service project string - operation string + operationid string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context @@ -17163,12 +17164,13 @@ type GlobalOperationsGetCall struct { // Get: Retrieves the specified Operations resource. // -// - operation: Name of the Operations resource to return. -// - project: Project ID for this request. -func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { +// - operation: Name of the Operations resource to return, or its unique +// numeric identifier. +// - project: Project ID for this request. +func (r *GlobalOperationsService) Get(project string, operationid string) *GlobalOperationsGetCall { c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.operationid = operationid return c } @@ -17220,7 +17222,7 @@ func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "operation": c.operation, + "operation": c.operationid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -17470,12 +17472,12 @@ func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationL } type GlobalOperationsWaitCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + s *Service + project string + operationid string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } // Wait: Waits for the specified Operation resource to return as `DONE` or for @@ -17490,12 +17492,13 @@ type GlobalOperationsWaitCall struct { // actually done when the method returns. Be prepared to retry if the operation // is not `DONE`. // -// - operation: Name of the Operations resource to return. -// - project: Project ID for this request. -func (r *GlobalOperationsService) Wait(project string, operation string) *GlobalOperationsWaitCall { +// - operation: Name of the Operations resource to return, or its unique +// numeric identifier. +// - project: Project ID for this request. +func (r *GlobalOperationsService) Wait(project string, operationid string) *GlobalOperationsWaitCall { c := &GlobalOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.operationid = operationid return c } @@ -17536,7 +17539,7 @@ func (c *GlobalOperationsWaitCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "operation": c.operation, + "operation": c.operationid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -17579,19 +17582,20 @@ func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, } type GlobalOrganizationOperationsDeleteCall struct { - s *Service - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + s *Service + operationid string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified Operations resource. // -// - operation: Name of the Operations resource to delete. -func (r *GlobalOrganizationOperationsService) Delete(operation string) *GlobalOrganizationOperationsDeleteCall { +// - operation: Name of the Operations resource to delete, or its unique +// numeric identifier. +func (r *GlobalOrganizationOperationsService) Delete(operationid string) *GlobalOrganizationOperationsDeleteCall { c := &GlobalOrganizationOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.operation = operation + c.operationid = operationid return c } @@ -17637,7 +17641,7 @@ func (c *GlobalOrganizationOperationsDeleteCall) doRequest(alt string) (*http.Re } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "operation": c.operation, + "operation": c.operationid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -17658,7 +17662,7 @@ func (c *GlobalOrganizationOperationsDeleteCall) Do(opts ...googleapi.CallOption type GlobalOrganizationOperationsGetCall struct { s *Service - operation string + operationid string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context @@ -17668,10 +17672,11 @@ type GlobalOrganizationOperationsGetCall struct { // Get: Retrieves the specified Operations resource. Gets a list of operations // by making a `list()` request. // -// - operation: Name of the Operations resource to return. -func (r *GlobalOrganizationOperationsService) Get(operation string) *GlobalOrganizationOperationsGetCall { +// - operation: Name of the Operations resource to return, or its unique +// numeric identifier. +func (r *GlobalOrganizationOperationsService) Get(operationid string) *GlobalOrganizationOperationsGetCall { c := &GlobalOrganizationOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.operation = operation + c.operationid = operationid return c } @@ -17728,7 +17733,7 @@ func (c *GlobalOrganizationOperationsGetCall) doRequest(alt string) (*http.Respo } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "operation": c.operation, + "operation": c.operationid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } diff --git a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute3-gen.go b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute3-gen.go index 254547bb0a6..1e08b6980c5 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute3-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute3-gen.go @@ -828,8 +828,8 @@ type RegionBackendServicesListUsableCall struct { header_ http.Header } -// ListUsable: Retrieves an aggregated list of all usable backend services in -// the specified project in the given region. +// ListUsable: Retrieves a list of all usable backend services in the specified +// project in the given region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. It must be a string that @@ -15367,25 +15367,26 @@ func (c *RegionNotificationEndpointsListCall) Pages(ctx context.Context, f func( } type RegionOperationsDeleteCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + s *Service + project string + region string + operationid string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified region-specific Operations resource. // -// - operation: Name of the Operations resource to delete. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { +// - operation: Name of the Operations resource to delete, or its unique +// numeric identifier. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *RegionOperationsService) Delete(project string, region string, operationid string) *RegionOperationsDeleteCall { c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.operationid = operationid return c } @@ -15427,7 +15428,7 @@ func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, erro googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "operation": c.operation, + "operation": c.operationid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -15450,7 +15451,7 @@ type RegionOperationsGetCall struct { s *Service project string region string - operation string + operationid string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context @@ -15459,14 +15460,15 @@ type RegionOperationsGetCall struct { // Get: Retrieves the specified region-specific Operations resource. // -// - operation: Name of the Operations resource to return. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { +// - operation: Name of the Operations resource to return, or its unique +// numeric identifier. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *RegionOperationsService) Get(project string, region string, operationid string) *RegionOperationsGetCall { c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.operationid = operationid return c } @@ -15519,7 +15521,7 @@ func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "operation": c.operation, + "operation": c.operationid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -15773,13 +15775,13 @@ func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationL } type RegionOperationsWaitCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + s *Service + project string + region string + operationid string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } // Wait: Waits for the specified Operation resource to return as `DONE` or for @@ -15794,14 +15796,15 @@ type RegionOperationsWaitCall struct { // actually done when the method returns. Be prepared to retry if the operation // is not `DONE`. // -// - operation: Name of the Operations resource to return. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionOperationsService) Wait(project string, region string, operation string) *RegionOperationsWaitCall { +// - operation: Name of the Operations resource to return, or its unique +// numeric identifier. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *RegionOperationsService) Wait(project string, region string, operationid string) *RegionOperationsWaitCall { c := &RegionOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.operationid = operationid return c } @@ -15843,7 +15846,7 @@ func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "operation": c.operation, + "operation": c.operationid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -21851,7 +21854,13 @@ type RegionsGetCall struct { // quota information (the `quotas` field). To exclude one or more fields, set // your request's `fields` query parameter to only include the fields you need. // For example, to only include the `id` and `selfLink` fields, add the query -// parameter `?fields=id,selfLink` to your request. +// parameter `?fields=id,selfLink` to your request. This method fails if the +// quota information is unavailable for the region and if the organization +// policy constraint compute.requireBasicQuotaInResponse is enforced. This +// constraint, when enforced, disables the fail-open behaviour when quota +// information (the `items.quotas` field) is unavailable for the region. It is +// recommended to use the default setting for the constraint unless your +// application requires the fail-closed behaviour for this method. // // - project: Project ID for this request. // - region: Name of the region resource to return. @@ -34987,8 +34996,9 @@ type StoragePoolsUpdateCall struct { // Update: Updates the specified storagePool with the data included in the // request. The update is performed only on selected fields included as part of -// update-mask. Only the following fields can be modified: size_tb and -// provisioned_iops. +// update-mask. Only the following fields can be modified: +// pool_provisioned_capacity_gb, pool_provisioned_iops and +// pool_provisioned_throughput. // // - project: Project ID for this request. // - storagePool: The storagePool name for this request. @@ -49062,25 +49072,26 @@ func (c *VpnTunnelsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } type ZoneOperationsDeleteCall struct { - s *Service - project string - zone string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + s *Service + project string + zone string + operationid string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } // Delete: Deletes the specified zone-specific Operations resource. // -// - operation: Name of the Operations resource to delete. -// - project: Project ID for this request. -// - zone: Name of the zone for this request. -func (r *ZoneOperationsService) Delete(project string, zone string, operation string) *ZoneOperationsDeleteCall { +// - operation: Name of the Operations resource to delete, or its unique +// numeric identifier. +// - project: Project ID for this request. +// - zone: Name of the zone for this request. +func (r *ZoneOperationsService) Delete(project string, zone string, operationid string) *ZoneOperationsDeleteCall { c := &ZoneOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.operation = operation + c.operationid = operationid return c } @@ -49122,7 +49133,7 @@ func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "operation": c.operation, + "operation": c.operationid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -49145,7 +49156,7 @@ type ZoneOperationsGetCall struct { s *Service project string zone string - operation string + operationid string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context @@ -49154,14 +49165,15 @@ type ZoneOperationsGetCall struct { // Get: Retrieves the specified zone-specific Operations resource. // -// - operation: Name of the Operations resource to return. -// - project: Project ID for this request. -// - zone: Name of the zone for this request. -func (r *ZoneOperationsService) Get(project string, zone string, operation string) *ZoneOperationsGetCall { +// - operation: Name of the Operations resource to return, or its unique +// numeric identifier. +// - project: Project ID for this request. +// - zone: Name of the zone for this request. +func (r *ZoneOperationsService) Get(project string, zone string, operationid string) *ZoneOperationsGetCall { c := &ZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.operation = operation + c.operationid = operationid return c } @@ -49214,7 +49226,7 @@ func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "operation": c.operation, + "operation": c.operationid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -49468,13 +49480,13 @@ func (c *ZoneOperationsListCall) Pages(ctx context.Context, f func(*OperationLis } type ZoneOperationsWaitCall struct { - s *Service - project string - zone string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + s *Service + project string + zone string + operationid string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } // Wait: Waits for the specified Operation resource to return as `DONE` or for @@ -49488,14 +49500,15 @@ type ZoneOperationsWaitCall struct { // guarantee that the operation is actually done when the method returns. Be // prepared to retry if the operation is not `DONE`. // -// - operation: Name of the Operations resource to return. -// - project: Project ID for this request. -// - zone: Name of the zone for this request. -func (r *ZoneOperationsService) Wait(project string, zone string, operation string) *ZoneOperationsWaitCall { +// - operation: Name of the Operations resource to return, or its unique +// numeric identifier. +// - project: Project ID for this request. +// - zone: Name of the zone for this request. +func (r *ZoneOperationsService) Wait(project string, zone string, operationid string) *ZoneOperationsWaitCall { c := &ZoneOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.operation = operation + c.operationid = operationid return c } @@ -49537,7 +49550,7 @@ func (c *ZoneOperationsWaitCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "operation": c.operation, + "operation": c.operationid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } diff --git a/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-api.json b/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-api.json index 79bf232105e..4d0b2c28d29 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-api.json @@ -14,7 +14,7 @@ "canonicalName": "Container", "description": "Builds and manages container-based applications, powered by the open source Kubernetes technology.", "discoveryVersion": "v1", - "documentationLink": "https://cloud.google.com/container-engine/", + "documentationLink": "https://cloud.google.com/kubernetes-engine/docs/", "fullyEncodeReservedExpansion": true, "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", @@ -2540,7 +2540,7 @@ } } }, - "revision": "20240510", + "revision": "20240923", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2592,14 +2592,18 @@ "properties": { "maxPodsPerNode": { "$ref": "MaxPodsConstraint", - "description": "The maximum number of pods per node which use this pod network" + "description": "The maximum number of pods per node which use this pod network." + }, + "networkAttachment": { + "description": "The name of the network attachment for pods to communicate to; cannot be specified along with subnetwork or secondary_pod_range.", + "type": "string" }, "secondaryPodRange": { - "description": "The name of the secondary range on the subnet which provides IP address for this pod range", + "description": "The name of the secondary range on the subnet which provides IP address for this pod range.", "type": "string" }, "subnetwork": { - "description": "Name of the subnetwork where the additional pod network belongs", + "description": "Name of the subnetwork where the additional pod network belongs.", "type": "string" } }, @@ -2610,7 +2614,7 @@ "id": "AdditionalPodRangesConfig", "properties": { "podRangeInfo": { - "description": "Output only. [Output only] Information for additional pod range.", + "description": "Output only. Information for additional pod range.", "items": { "$ref": "RangeInfo" }, @@ -2676,6 +2680,10 @@ "$ref": "NetworkPolicyConfig", "description": "Configuration for NetworkPolicy. This only tracks whether the addon is enabled or not on the Master, it does not track whether network policy is enabled for the nodes." }, + "rayOperatorConfig": { + "$ref": "RayOperatorConfig", + "description": "Optional. Configuration for Ray Operator addon." + }, "statefulHaConfig": { "$ref": "StatefulHAConfig", "description": "Optional. Configuration for the StatefulHA add-on." @@ -2750,11 +2758,13 @@ "id": "AutoUpgradeOptions", "properties": { "autoUpgradeStartTime": { - "description": "[Output only] This field is set when upgrades are about to commence with the approximate start time for the upgrades, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "Output only. This field is set when upgrades are about to commence with the approximate start time for the upgrades, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "readOnly": true, "type": "string" }, "description": { - "description": "[Output only] This field is set when upgrades are about to commence with the description of the upgrade.", + "description": "Output only. This field is set when upgrades are about to commence with the description of the upgrade.", + "readOnly": true, "type": "string" } }, @@ -3141,6 +3151,10 @@ "description": "The IP address range of the container pods in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically chosen or specify a `/14` block in `10.0.0.0/8`.", "type": "string" }, + "compliancePostureConfig": { + "$ref": "CompliancePostureConfig", + "description": "Enable/Disable Compliance Posture features for the cluster." + }, "conditions": { "description": "Which conditions caused the current cluster state.", "items": { @@ -3152,27 +3166,35 @@ "$ref": "ConfidentialNodes", "description": "Configuration of Confidential Nodes. All the nodes in the cluster will be Confidential VM once enabled." }, + "controlPlaneEndpointsConfig": { + "$ref": "ControlPlaneEndpointsConfig", + "description": "Configuration for all cluster's control plane endpoints." + }, "costManagementConfig": { "$ref": "CostManagementConfig", "description": "Configuration for the fine-grained cost management feature." }, "createTime": { - "description": "[Output only] The time the cluster was created, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "Output only. The time the cluster was created, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "readOnly": true, "type": "string" }, "currentMasterVersion": { - "description": "[Output only] The current software version of the master endpoint.", + "description": "Output only. The current software version of the master endpoint.", + "readOnly": true, "type": "string" }, "currentNodeCount": { "deprecated": true, - "description": "[Output only] The number of nodes currently in the cluster. Deprecated. Call Kubernetes API directly to retrieve node information.", + "description": "Output only. The number of nodes currently in the cluster. Deprecated. Call Kubernetes API directly to retrieve node information.", "format": "int32", + "readOnly": true, "type": "integer" }, "currentNodeVersion": { "deprecated": true, - "description": "[Output only] Deprecated, use [NodePools.version](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools) instead. The current version of the node software components. If they are currently at multiple versions because they're in the process of being upgraded, this reflects the minimum version of all nodes.", + "description": "Output only. Deprecated, use [NodePools.version](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools) instead. The current version of the node software components. If they are currently at multiple versions because they're in the process of being upgraded, this reflects the minimum version of all nodes.", + "readOnly": true, "type": "string" }, "databaseEncryption": { @@ -3200,7 +3222,8 @@ "type": "boolean" }, "endpoint": { - "description": "[Output only] The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information.", + "description": "Output only. The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information.", + "readOnly": true, "type": "string" }, "enterpriseConfig": { @@ -3212,7 +3235,8 @@ "type": "string" }, "expireTime": { - "description": "[Output only] The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "Output only. The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "readOnly": true, "type": "string" }, "fleet": { @@ -3240,10 +3264,11 @@ }, "instanceGroupUrls": { "deprecated": true, - "description": "Deprecated. Use node_pools.instance_group_urls.", + "description": "Output only. Deprecated. Use node_pools.instance_group_urls.", "items": { "type": "string" }, + "readOnly": true, "type": "array" }, "ipAllocationPolicy": { @@ -3259,7 +3284,8 @@ "description": "Configuration for the legacy ABAC authorization mode." }, "location": { - "description": "[Output only] The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) or [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) in which the cluster resides.", + "description": "Output only. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) or [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) in which the cluster resides.", + "readOnly": true, "type": "string" }, "locations": { @@ -3287,7 +3313,8 @@ }, "masterAuthorizedNetworksConfig": { "$ref": "MasterAuthorizedNetworksConfig", - "description": "The configuration options for master authorized networks feature." + "deprecated": true, + "description": "The configuration options for master authorized networks feature. Deprecated: Use ControlPlaneEndpointsConfig.IPEndpointsConfig.authorized_networks_config instead." }, "meshCertificates": { "$ref": "MeshCertificates", @@ -3323,8 +3350,9 @@ "description": "Parameters used in creating the cluster's nodes. For requests, this field should only be used in lieu of a \"node_pool\" object, since this configuration (along with the \"initial_node_count\") will be used to create a \"NodePool\" object with an auto-generated name. Do not use this and a node_pool at the same time. For responses, this field will be populated with the node configuration of the first node pool. (For configuration of each node pool, see `node_pool.config`) If unspecified, the defaults are used. This field is deprecated, use node_pool.config instead." }, "nodeIpv4CidrSize": { - "description": "[Output only] The size of the address space on each node for hosting containers. This is provisioned from within the `container_ipv4_cidr` range. This field will only be set when cluster is in route-based network mode.", + "description": "Output only. The size of the address space on each node for hosting containers. This is provisioned from within the `container_ipv4_cidr` range. This field will only be set when cluster is in route-based network mode.", "format": "int32", + "readOnly": true, "type": "integer" }, "nodePoolAutoConfig": { @@ -3354,6 +3382,10 @@ "$ref": "PrivateClusterConfig", "description": "Configuration for private cluster." }, + "rbacBindingConfig": { + "$ref": "RBACBindingConfig", + "description": "RBACBindingConfig allows user to restrict ClusterRoleBindings an RoleBindings that can be created." + }, "releaseChannel": { "$ref": "ReleaseChannel", "description": "Release channel configuration. If left unspecified on cluster creation and a version is specified, the cluster is enrolled in the most mature release channel where the version is available (first checking STABLE, then REGULAR, and finally RAPID). Otherwise, if no release channel configuration and no version is specified, the cluster is enrolled in the REGULAR channel with its default version." @@ -3379,16 +3411,22 @@ "readOnly": true, "type": "boolean" }, + "secretManagerConfig": { + "$ref": "SecretManagerConfig", + "description": "Secret CSI driver configuration." + }, "securityPostureConfig": { "$ref": "SecurityPostureConfig", "description": "Enable/Disable Security Posture API features for the cluster." }, "selfLink": { - "description": "[Output only] Server-defined URL for the resource.", + "description": "Output only. Server-defined URL for the resource.", + "readOnly": true, "type": "string" }, "servicesIpv4Cidr": { - "description": "[Output only] The IP address range of the Kubernetes services in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last `/16` from the container CIDR.", + "description": "Output only. The IP address range of the Kubernetes services in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last `/16` from the container CIDR.", + "readOnly": true, "type": "string" }, "shieldedNodes": { @@ -3396,7 +3434,7 @@ "description": "Shielded Nodes configuration." }, "status": { - "description": "[Output only] The current status of this cluster.", + "description": "Output only. The current status of this cluster.", "enum": [ "STATUS_UNSPECIFIED", "PROVISIONING", @@ -3415,11 +3453,13 @@ "The ERROR state indicates the cluster is unusable. It will be automatically deleted. Details can be found in the `statusMessage` field.", "The DEGRADED state indicates the cluster requires user action to restore full functionality. Details can be found in the `statusMessage` field." ], + "readOnly": true, "type": "string" }, "statusMessage": { "deprecated": true, - "description": "[Output only] Deprecated. Use conditions instead. Additional information about the current status of this cluster, if available.", + "description": "Output only. Deprecated. Use conditions instead. Additional information about the current status of this cluster, if available.", + "readOnly": true, "type": "string" }, "subnetwork": { @@ -3427,9 +3467,14 @@ "type": "string" }, "tpuIpv4CidrBlock": { - "description": "[Output only] The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`).", + "description": "Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`).", + "readOnly": true, "type": "string" }, + "userManagedKeysConfig": { + "$ref": "UserManagedKeysConfig", + "description": "The Custom keys configuration for the cluster." + }, "verticalPodAutoscaling": { "$ref": "VerticalPodAutoscaling", "description": "Cluster-level Vertical Pod Autoscaling configuration." @@ -3440,7 +3485,8 @@ }, "zone": { "deprecated": true, - "description": "[Output only] The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field is deprecated, use location instead.", + "description": "Output only. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field is deprecated, use location instead.", + "readOnly": true, "type": "string" } }, @@ -3536,10 +3582,18 @@ "$ref": "ClusterAutoscaling", "description": "Cluster-level autoscaling configuration." }, + "desiredCompliancePostureConfig": { + "$ref": "CompliancePostureConfig", + "description": "Enable/Disable Compliance Posture features for the cluster." + }, "desiredContainerdConfig": { "$ref": "ContainerdConfig", "description": "The desired containerd config for the cluster." }, + "desiredControlPlaneEndpointsConfig": { + "$ref": "ControlPlaneEndpointsConfig", + "description": "Control plane endpoints configuration." + }, "desiredCostManagementConfig": { "$ref": "CostManagementConfig", "description": "The desired configuration for the fine-grained cost management feature." @@ -3562,6 +3616,10 @@ ], "type": "string" }, + "desiredDefaultEnablePrivateNodes": { + "description": "Override the default setting of whether future created nodes have private IP addresses only, namely NetworkConfig.default_enable_private_nodes", + "type": "boolean" + }, "desiredDefaultSnatStatus": { "$ref": "DefaultSnatStatus", "description": "The desired status of whether to disable default sNAT for this cluster." @@ -3583,7 +3641,8 @@ "type": "boolean" }, "desiredEnablePrivateEndpoint": { - "description": "Enable/Disable private endpoint for the cluster's master.", + "deprecated": true, + "description": "Enable/Disable private endpoint for the cluster's master. Deprecated: Use desired_control_plane_endpoints_config.ip_endpoints_config.enable_public_endpoint instead. Note that the value of enable_public_endpoint is reversed: if enable_private_endpoint is false, then enable_public_endpoint will be true.", "type": "boolean" }, "desiredFleet": { @@ -3649,7 +3708,8 @@ }, "desiredMasterAuthorizedNetworksConfig": { "$ref": "MasterAuthorizedNetworksConfig", - "description": "The desired configuration options for master authorized networks feature." + "deprecated": true, + "description": "The desired configuration options for master authorized networks feature. Deprecated: Use desired_control_plane_endpoints_config.ip_endpoints_config.authorized_networks_config instead." }, "desiredMasterVersion": { "description": "The Kubernetes version to change the master to. Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - \"latest\": picks the highest valid Kubernetes version - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version - \"1.X.Y-gke.N\": picks an explicit Kubernetes version - \"-\": picks the default Kubernetes version", @@ -3713,7 +3773,8 @@ }, "desiredPrivateClusterConfig": { "$ref": "PrivateClusterConfig", - "description": "The desired private cluster configuration. master_global_access_config is the only field that can be changed via this field. See also ClusterUpdate.desired_enable_private_endpoint for modifying other fields within PrivateClusterConfig." + "deprecated": true, + "description": "The desired private cluster configuration. master_global_access_config is the only field that can be changed via this field. See also ClusterUpdate.desired_enable_private_endpoint for modifying other fields within PrivateClusterConfig. Deprecated: Use desired_control_plane_endpoints_config.ip_endpoints_config.global_access instead." }, "desiredPrivateIpv6GoogleAccess": { "description": "The desired state of IPv6 connectivity to Google Services.", @@ -3731,6 +3792,10 @@ ], "type": "string" }, + "desiredRbacBindingConfig": { + "$ref": "RBACBindingConfig", + "description": "RBACBindingConfig allows user to restrict ClusterRoleBindings an RoleBindings that can be created." + }, "desiredReleaseChannel": { "$ref": "ReleaseChannel", "description": "The desired release channel configuration." @@ -3739,6 +3804,10 @@ "$ref": "ResourceUsageExportConfig", "description": "The desired configuration for exporting resource usage." }, + "desiredSecretManagerConfig": { + "$ref": "SecretManagerConfig", + "description": "Enable/Disable Secret Manager Config." + }, "desiredSecurityPostureConfig": { "$ref": "SecurityPostureConfig", "description": "Enable/Disable Security Posture API features for the cluster." @@ -3784,6 +3853,10 @@ "removedAdditionalPodRangesConfig": { "$ref": "AdditionalPodRangesConfig", "description": "The additional pod ranges that are to be removed from the cluster. The pod ranges specified here must have been specified earlier in the 'additional_pod_ranges_config' argument." + }, + "userManagedKeysConfig": { + "$ref": "UserManagedKeysConfig", + "description": "The Custom keys configuration for the cluster." } }, "type": "object" @@ -3820,6 +3893,45 @@ "properties": {}, "type": "object" }, + "CompliancePostureConfig": { + "description": "CompliancePostureConfig defines the settings needed to enable/disable features for the Compliance Posture.", + "id": "CompliancePostureConfig", + "properties": { + "complianceStandards": { + "description": "List of enabled compliance standards.", + "items": { + "$ref": "ComplianceStandard" + }, + "type": "array" + }, + "mode": { + "description": "Defines the enablement mode for Compliance Posture.", + "enum": [ + "MODE_UNSPECIFIED", + "DISABLED", + "ENABLED" + ], + "enumDescriptions": [ + "Default value not specified.", + "Disables Compliance Posture features on the cluster.", + "Enables Compliance Posture features on the cluster." + ], + "type": "string" + } + }, + "type": "object" + }, + "ComplianceStandard": { + "description": "Defines the details of a compliance standard.", + "id": "ComplianceStandard", + "properties": { + "standard": { + "description": "Name of the compliance standard.", + "type": "string" + } + }, + "type": "object" + }, "ConfidentialNodes": { "description": "ConfidentialNodes is configuration for the confidential nodes feature, which makes nodes run on confidential VMs.", "id": "ConfidentialNodes", @@ -3864,6 +3976,21 @@ }, "type": "object" }, + "ControlPlaneEndpointsConfig": { + "description": "Configuration for all of the cluster's control plane endpoints.", + "id": "ControlPlaneEndpointsConfig", + "properties": { + "dnsEndpointConfig": { + "$ref": "DNSEndpointConfig", + "description": "DNS endpoint configuration." + }, + "ipEndpointsConfig": { + "$ref": "IPEndpointsConfig", + "description": "IP endpoints configuration." + } + }, + "type": "object" + }, "CostManagementConfig": { "description": "Configuration for fine-grained cost management feature.", "id": "CostManagementConfig", @@ -3975,12 +4102,29 @@ }, "type": "object" }, + "DNSEndpointConfig": { + "description": "Describes the configuration of a DNS endpoint.", + "id": "DNSEndpointConfig", + "properties": { + "allowExternalTraffic": { + "description": "Controls whether user traffic is allowed over this endpoint. Note that GCP-managed services may still use the endpoint even if this is false.", + "type": "boolean" + }, + "endpoint": { + "description": "Output only. The cluster's DNS endpoint configuration. A DNS format address. This is accessible from the public internet. Ex: uid.us-central1.gke.goog. Always present, but the behavior may change according to the value of DNSEndpointConfig.allow_external_traffic.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "DailyMaintenanceWindow": { "description": "Time window specified for daily maintenance operations.", "id": "DailyMaintenanceWindow", "properties": { "duration": { - "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario. Duration will be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format \"PTnHnMnS\".", + "description": "Output only. Duration of the time window, automatically chosen to be smallest possible in the given scenario. Duration will be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format \"PTnHnMnS\".", + "readOnly": true, "type": "string" }, "startTime": { @@ -4087,7 +4231,7 @@ "id": "EnterpriseConfig", "properties": { "clusterTier": { - "description": "Output only. [Output only] cluster_tier specifies the premium tier of the cluster.", + "description": "Output only. cluster_tier indicates the effective tier of the cluster.", "enum": [ "CLUSTER_TIER_UNSPECIFIED", "STANDARD", @@ -4158,11 +4302,13 @@ "id": "Fleet", "properties": { "membership": { - "description": "[Output only] The full resource name of the registered fleet membership of the cluster, in the format `//gkehub.googleapis.com/projects/*/locations/*/memberships/*`.", + "description": "Output only. The full resource name of the registered fleet membership of the cluster, in the format `//gkehub.googleapis.com/projects/*/locations/*/memberships/*`.", + "readOnly": true, "type": "string" }, "preRegistered": { - "description": "[Output only] Whether the cluster has been registered through the fleet API.", + "description": "Output only. Whether the cluster has been registered through the fleet API.", + "readOnly": true, "type": "boolean" }, "project": { @@ -4244,10 +4390,16 @@ "CHANNEL_EXPERIMENTAL", "CHANNEL_STANDARD" ], + "enumDeprecated": [ + false, + false, + true, + false + ], "enumDescriptions": [ "Default value.", "Gateway API support is disabled", - "Gateway API support is enabled, experimental CRDs are installed", + "Deprecated: use CHANNEL_STANDARD instead. Gateway API support is enabled, experimental CRDs are installed", "Gateway API support is enabled, standard CRDs are installed" ], "type": "string" @@ -4305,7 +4457,7 @@ "properties": { "cacheHeader": { "$ref": "HttpCacheControlResponseHeader", - "description": "OnePlatform automatically extracts this field and uses it to set the HTTP Cache-Control header." + "description": "For HTTP requests, this field is automatically extracted into the Cache-Control HTTP header." }, "keys": { "description": "The public component of the keys used by the cluster to sign token requests.", @@ -4323,7 +4475,7 @@ "properties": { "cacheHeader": { "$ref": "HttpCacheControlResponseHeader", - "description": "OnePlatform automatically extracts this field and uses it to set the HTTP Cache-Control header." + "description": "For HTTP requests, this field is automatically extracted into the Cache-Control HTTP header." }, "claims_supported": { "description": "Supported claims.", @@ -4458,7 +4610,7 @@ "properties": { "additionalPodRangesConfig": { "$ref": "AdditionalPodRangesConfig", - "description": "Output only. [Output only] The additional pod ranges that are added to the cluster. These pod ranges can be used by new node pools to allocate pod IPs automatically. Once the range is removed it will not show up in IPAllocationPolicy.", + "description": "Output only. The additional pod ranges that are added to the cluster. These pod ranges can be used by new node pools to allocate pod IPs automatically. Once the range is removed it will not show up in IPAllocationPolicy.", "readOnly": true }, "clusterIpv4Cidr": { @@ -4479,7 +4631,7 @@ "type": "boolean" }, "defaultPodIpv4RangeUtilization": { - "description": "Output only. [Output only] The utilization of the cluster default IPv4 range for the pod. The ratio is Usage/[Total number of IPs in the secondary range], Usage=numNodes*numZones*podIPsPerNode.", + "description": "Output only. The utilization of the cluster default IPv4 range for the pod. The ratio is Usage/[Total number of IPs in the secondary range], Usage=numNodes*numZones*podIPsPerNode.", "format": "double", "readOnly": true, "type": "number" @@ -4521,7 +4673,7 @@ "type": "string" }, "servicesIpv6CidrBlock": { - "description": "Output only. [Output only] The services IPv6 CIDR block for the cluster.", + "description": "Output only. The services IPv6 CIDR block for the cluster.", "readOnly": true, "type": "string" }, @@ -4544,7 +4696,7 @@ "type": "string" }, "subnetIpv6CidrBlock": { - "description": "Output only. [Output only] The subnet's IPv6 CIDR block used by nodes and pods.", + "description": "Output only. The subnet's IPv6 CIDR block used by nodes and pods.", "readOnly": true, "type": "string" }, @@ -4567,6 +4719,43 @@ }, "type": "object" }, + "IPEndpointsConfig": { + "description": "IP endpoints configuration.", + "id": "IPEndpointsConfig", + "properties": { + "authorizedNetworksConfig": { + "$ref": "MasterAuthorizedNetworksConfig", + "description": "Configuration of authorized networks. If enabled, restricts access to the control plane based on source IP. It is invalid to specify both Cluster.masterAuthorizedNetworksConfig and this field at the same time." + }, + "enablePublicEndpoint": { + "description": "Controls whether the control plane allows access through a public IP. It is invalid to specify both PrivateClusterConfig.enablePrivateEndpoint and this field at the same time.", + "type": "boolean" + }, + "enabled": { + "description": "Controls whether to allow direct IP access.", + "type": "boolean" + }, + "globalAccess": { + "description": "Controls whether the control plane's private endpoint is accessible from sources in other regions. It is invalid to specify both PrivateClusterMasterGlobalAccessConfig.enabled and this field at the same time.", + "type": "boolean" + }, + "privateEndpoint": { + "description": "Output only. The internal IP address of this cluster's control plane. Only populated if enabled.", + "readOnly": true, + "type": "string" + }, + "privateEndpointSubnetwork": { + "description": "Subnet to provision the master's private endpoint during cluster creation. Specified in projects/*/regions/*/subnetworks/* format. It is invalid to specify both PrivateClusterConfig.privateEndpointSubnetwork and this field at the same time.", + "type": "string" + }, + "publicEndpoint": { + "description": "Output only. The external IP address of this cluster's control plane. Only populated if enabled.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "IdentityServiceConfig": { "description": "IdentityServiceConfig is configuration for Identity Service which allows customers to use external identity providers with the K8S API", "id": "IdentityServiceConfig", @@ -4799,7 +4988,9 @@ "WORKLOADS", "APISERVER", "SCHEDULER", - "CONTROLLER_MANAGER" + "CONTROLLER_MANAGER", + "KCP_SSHD", + "KCP_CONNECTION" ], "enumDescriptions": [ "Default value. This shouldn't be used.", @@ -4807,7 +4998,9 @@ "workloads", "kube-apiserver", "kube-scheduler", - "kube-controller-manager" + "kube-controller-manager", + "kcp-sshd", + "kcp connection logs" ], "type": "string" }, @@ -4922,7 +5115,8 @@ "id": "MasterAuth", "properties": { "clientCertificate": { - "description": "[Output only] Base64-encoded public certificate used by clients to authenticate to the cluster endpoint.", + "description": "Output only. Base64-encoded public certificate used by clients to authenticate to the cluster endpoint. Issued only if client_certificate_config is set.", + "readOnly": true, "type": "string" }, "clientCertificateConfig": { @@ -4930,11 +5124,13 @@ "description": "Configuration for client certificate authentication on the cluster. For clusters before v1.12, if no configuration is specified, a client certificate is issued." }, "clientKey": { - "description": "[Output only] Base64-encoded private key used by clients to authenticate to the cluster endpoint.", + "description": "Output only. Base64-encoded private key used by clients to authenticate to the cluster endpoint.", + "readOnly": true, "type": "string" }, "clusterCaCertificate": { - "description": "[Output only] Base64-encoded public certificate that is the root of trust for the cluster.", + "description": "Output only. Base64-encoded public certificate that is the root of trust for the cluster.", + "readOnly": true, "type": "string" }, "password": { @@ -4968,6 +5164,10 @@ "gcpPublicCidrsAccessEnabled": { "description": "Whether master is accessbile via Google Compute Engine Public IP addresses.", "type": "boolean" + }, + "privateEndpointEnforcementEnabled": { + "description": "Whether master authorized networks is enforced on private endpoint or not.", + "type": "boolean" } }, "type": "object" @@ -5040,7 +5240,8 @@ "DEPLOYMENT", "STATEFULSET", "CADVISOR", - "KUBELET" + "KUBELET", + "DCGM" ], "enumDescriptions": [ "Default value. This shouldn't be used.", @@ -5055,7 +5256,8 @@ "Deployment", "Statefulset", "CADVISOR", - "KUBELET" + "KUBELET", + "NVIDIA Data Center GPU Manager (DCGM)" ], "type": "string" }, @@ -5101,6 +5303,10 @@ ], "type": "string" }, + "defaultEnablePrivateNodes": { + "description": "Controls whether by default nodes have private IP addresses only. It is invalid to specify both PrivateClusterConfig.enablePrivateNodes and this field at the same time. To update the default setting, use ClusterUpdate.desired_default_enable_private_nodes", + "type": "boolean" + }, "defaultSnatStatus": { "$ref": "DefaultSnatStatus", "description": "Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when default_snat_status is disabled. When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic." @@ -5149,6 +5355,7 @@ }, "network": { "description": "Output only. The relative name of the Google Compute Engine network(https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the cluster is connected. Example: projects/my-project/global/networks/my-network", + "readOnly": true, "type": "string" }, "networkPerformanceConfig": { @@ -5177,6 +5384,7 @@ }, "subnetwork": { "description": "Output only. The relative name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/vpc) to which the cluster is connected. Example: projects/my-project/regions/us-central1/subnetworks/my-subnet", + "readOnly": true, "type": "string" } }, @@ -5317,6 +5525,21 @@ "description": "Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard'", "type": "string" }, + "effectiveCgroupMode": { + "description": "Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version.", + "enum": [ + "EFFECTIVE_CGROUP_MODE_UNSPECIFIED", + "EFFECTIVE_CGROUP_MODE_V1", + "EFFECTIVE_CGROUP_MODE_V2" + ], + "enumDescriptions": [ + "EFFECTIVE_CGROUP_MODE_UNSPECIFIED means the cgroup configuration for the node pool is unspecified, i.e. the node pool is a Windows node pool.", + "CGROUP_MODE_V1 means the node pool is configured to use cgroupv1 for the cgroup configuration.", + "CGROUP_MODE_V2 means the node pool is configured to use cgroupv2 for the cgroup configuration." + ], + "readOnly": true, + "type": "string" + }, "enableConfidentialStorage": { "description": "Optional. Reserved for future use.", "type": "boolean" @@ -5445,6 +5668,13 @@ "description": "Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.", "type": "boolean" }, + "storagePools": { + "description": "List of Storage Pools where boot disks are provisioned.", + "items": { + "type": "string" + }, + "type": "array" + }, "tags": { "description": "The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during cluster or node pool creation. Each tag within the list must comply with RFC1035.", "items": { @@ -5577,7 +5807,7 @@ "type": "boolean" }, "enablePrivateNodes": { - "description": "Whether nodes have internal IP addresses only. If enable_private_nodes is not specified, then the value is derived from cluster.privateClusterConfig.enablePrivateNodes", + "description": "Whether nodes have internal IP addresses only. If enable_private_nodes is not specified, then the value is derived from Cluster.NetworkConfig.default_enable_private_nodes", "type": "boolean" }, "networkPerformanceConfig": { @@ -5593,7 +5823,7 @@ "type": "string" }, "podIpv4RangeUtilization": { - "description": "Output only. [Output only] The utilization of the IPv4 range for the pod. The ratio is Usage/[Total number of IPs in the secondary range], Usage=numNodes*numZones*podIPsPerNode.", + "description": "Output only. The utilization of the IPv4 range for the pod. The ratio is Usage/[Total number of IPs in the secondary range], Usage=numNodes*numZones*podIPsPerNode.", "format": "double", "readOnly": true, "type": "number" @@ -5638,10 +5868,11 @@ "type": "integer" }, "instanceGroupUrls": { - "description": "[Output only] The resource URLs of the [managed instance groups](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances) associated with this node pool. During the node pool blue-green upgrade operation, the URLs contain both blue and green resources.", + "description": "Output only. The resource URLs of the [managed instance groups](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances) associated with this node pool. During the node pool blue-green upgrade operation, the URLs contain both blue and green resources.", "items": { "type": "string" }, + "readOnly": true, "type": "array" }, "locations": { @@ -5672,8 +5903,9 @@ "description": "Specifies the node placement policy." }, "podIpv4CidrSize": { - "description": "[Output only] The pod CIDR block size per node in this node pool.", + "description": "Output only. The pod CIDR block size per node in this node pool.", "format": "int32", + "readOnly": true, "type": "integer" }, "queuedProvisioning": { @@ -5681,11 +5913,12 @@ "description": "Specifies the configuration of queued provisioning." }, "selfLink": { - "description": "[Output only] Server-defined URL for the resource.", + "description": "Output only. Server-defined URL for the resource.", + "readOnly": true, "type": "string" }, "status": { - "description": "[Output only] The status of the nodes in this pool instance.", + "description": "Output only. The status of the nodes in this pool instance.", "enum": [ "STATUS_UNSPECIFIED", "PROVISIONING", @@ -5704,16 +5937,18 @@ "The STOPPING state indicates the node pool is being deleted.", "The ERROR state indicates the node pool may be unusable. Details can be found in the `statusMessage` field." ], + "readOnly": true, "type": "string" }, "statusMessage": { "deprecated": true, - "description": "[Output only] Deprecated. Use conditions instead. Additional information about the current status of this node pool instance, if available.", + "description": "Output only. Deprecated. Use conditions instead. Additional information about the current status of this node pool instance, if available.", + "readOnly": true, "type": "string" }, "updateInfo": { "$ref": "UpdateInfo", - "description": "Output only. [Output only] Update info contains relevant information during a node pool update.", + "description": "Output only. Update info contains relevant information during a node pool update.", "readOnly": true }, "upgradeSettings": { @@ -5886,11 +6121,13 @@ "type": "array" }, "detail": { - "description": "Detailed operation progress, if available.", + "description": "Output only. Detailed operation progress, if available.", + "readOnly": true, "type": "string" }, "endTime": { - "description": "[Output only] The time the operation completed, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "Output only. The time the operation completed, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "readOnly": true, "type": "string" }, "error": { @@ -5898,11 +6135,13 @@ "description": "The error result of the operation in case of failure." }, "location": { - "description": "[Output only] The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) or [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) in which the cluster resides.", + "description": "Output only. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) or [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) in which the cluster resides.", + "readOnly": true, "type": "string" }, "name": { - "description": "The server-assigned ID for the operation.", + "description": "Output only. The server-assigned ID for the operation.", + "readOnly": true, "type": "string" }, "nodepoolConditions": { @@ -5914,7 +6153,7 @@ "type": "array" }, "operationType": { - "description": "The operation type.", + "description": "Output only. The operation type.", "enum": [ "TYPE_UNSPECIFIED", "CREATE_CLUSTER", @@ -5978,23 +6217,26 @@ "The control plane is being resized. This operation type is initiated by GKE. These operations are often performed preemptively to ensure that the control plane has sufficient resources and is not typically an indication of issues. For more details, see [documentation on resizes](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions#repairs).", "Fleet features of GKE Enterprise are being upgraded. The cluster should be assumed to be blocked for other upgrades until the operation finishes." ], + "readOnly": true, "type": "string" }, "progress": { "$ref": "OperationProgress", - "description": "Output only. [Output only] Progress information for an operation.", + "description": "Output only. Progress information for an operation.", "readOnly": true }, "selfLink": { - "description": "Server-defined URI for the operation. Example: `https://container.googleapis.com/v1alpha1/projects/123/locations/us-central1/operations/operation-123`.", + "description": "Output only. Server-defined URI for the operation. Example: `https://container.googleapis.com/v1alpha1/projects/123/locations/us-central1/operations/operation-123`.", + "readOnly": true, "type": "string" }, "startTime": { - "description": "[Output only] The time the operation started, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "Output only. The time the operation started, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "readOnly": true, "type": "string" }, "status": { - "description": "The current status of the operation.", + "description": "Output only. The current status of the operation.", "enum": [ "STATUS_UNSPECIFIED", "PENDING", @@ -6009,6 +6251,7 @@ "The operation is done, either cancelled or completed.", "The operation is aborting." ], + "readOnly": true, "type": "string" }, "statusMessage": { @@ -6018,12 +6261,14 @@ "type": "string" }, "targetLink": { - "description": "Server-defined URI for the target of the operation. The format of this is a URI to the resource being modified (such as a cluster, node pool, or node). For node pool repairs, there may be multiple nodes being repaired, but only one will be the target. Examples: - ## `https://container.googleapis.com/v1/projects/123/locations/us-central1/clusters/my-cluster` ## `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/clusters/my-cluster/nodePools/my-np` `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/clusters/my-cluster/nodePools/my-np/node/my-node`", + "description": "Output only. Server-defined URI for the target of the operation. The format of this is a URI to the resource being modified (such as a cluster, node pool, or node). For node pool repairs, there may be multiple nodes being repaired, but only one will be the target. Examples: - ## `https://container.googleapis.com/v1/projects/123/locations/us-central1/clusters/my-cluster` ## `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/clusters/my-cluster/nodePools/my-np` `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/clusters/my-cluster/nodePools/my-np/node/my-node`", + "readOnly": true, "type": "string" }, "zone": { "deprecated": true, - "description": "The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation is taking place. This field is deprecated, use location instead.", + "description": "Output only. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation is taking place. This field is deprecated, use location instead.", + "readOnly": true, "type": "string" } }, @@ -6153,16 +6398,19 @@ "id": "PrivateClusterConfig", "properties": { "enablePrivateEndpoint": { - "description": "Whether the master's internal IP address is used as the cluster endpoint.", + "deprecated": true, + "description": "Whether the master's internal IP address is used as the cluster endpoint. Deprecated: Use ControlPlaneEndpointsConfig.IPEndpointsConfig.enable_public_endpoint instead. Note that the value of enable_public_endpoint is reversed: if enable_private_endpoint is false, then enable_public_endpoint will be true.", "type": "boolean" }, "enablePrivateNodes": { - "description": "Whether nodes have internal IP addresses only. If enabled, all nodes are given only RFC 1918 private addresses and communicate with the master via private networking.", + "deprecated": true, + "description": "Whether nodes have internal IP addresses only. If enabled, all nodes are given only RFC 1918 private addresses and communicate with the master via private networking. Deprecated: Use NetworkConfig.default_enable_private_nodes instead.", "type": "boolean" }, "masterGlobalAccessConfig": { "$ref": "PrivateClusterMasterGlobalAccessConfig", - "description": "Controls master global access settings." + "deprecated": true, + "description": "Controls master global access settings. Deprecated: Use ControlPlaneEndpointsConfig.IPEndpointsConfig.enable_global_access instead." }, "masterIpv4CidrBlock": { "description": "The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network.", @@ -6170,18 +6418,24 @@ }, "peeringName": { "description": "Output only. The peering name in the customer VPC used by this cluster.", + "readOnly": true, "type": "string" }, "privateEndpoint": { - "description": "Output only. The internal IP address of this cluster's master endpoint.", + "deprecated": true, + "description": "Output only. The internal IP address of this cluster's master endpoint. Deprecated: Use ControlPlaneEndpointsConfig.IPEndpointsConfig.private_endpoint instead.", + "readOnly": true, "type": "string" }, "privateEndpointSubnetwork": { - "description": "Subnet to provision the master's private endpoint during cluster creation. Specified in projects/*/regions/*/subnetworks/* format.", + "deprecated": true, + "description": "Subnet to provision the master's private endpoint during cluster creation. Specified in projects/*/regions/*/subnetworks/* format. Deprecated: Use ControlPlaneEndpointsConfig.IPEndpointsConfig.private_endpoint_subnetwork instead.", "type": "string" }, "publicEndpoint": { - "description": "Output only. The external IP address of this cluster's master endpoint.", + "deprecated": true, + "description": "Output only. The external IP address of this cluster's master endpoint. Deprecated:Use ControlPlaneEndpointsConfig.IPEndpointsConfig.public_endpoint instead.", + "readOnly": true, "type": "string" } }, @@ -6246,17 +6500,32 @@ }, "type": "object" }, + "RBACBindingConfig": { + "description": "RBACBindingConfig allows user to restrict ClusterRoleBindings an RoleBindings that can be created.", + "id": "RBACBindingConfig", + "properties": { + "enableInsecureBindingSystemAuthenticated": { + "description": "Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:authenticated.", + "type": "boolean" + }, + "enableInsecureBindingSystemUnauthenticated": { + "description": "Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjets system:anonymous or system:unauthenticated.", + "type": "boolean" + } + }, + "type": "object" + }, "RangeInfo": { "description": "RangeInfo contains the range name and the range utilization by this cluster.", "id": "RangeInfo", "properties": { "rangeName": { - "description": "Output only. [Output only] Name of a range.", + "description": "Output only. Name of a range.", "readOnly": true, "type": "string" }, "utilization": { - "description": "Output only. [Output only] The utilization of the range.", + "description": "Output only. The utilization of the range.", "format": "double", "readOnly": true, "type": "number" @@ -6264,6 +6533,47 @@ }, "type": "object" }, + "RayClusterLoggingConfig": { + "description": "RayClusterLoggingConfig specifies configuration of Ray logging.", + "id": "RayClusterLoggingConfig", + "properties": { + "enabled": { + "description": "Enable log collection for Ray clusters.", + "type": "boolean" + } + }, + "type": "object" + }, + "RayClusterMonitoringConfig": { + "description": "RayClusterMonitoringConfig specifies monitoring configuration for Ray clusters.", + "id": "RayClusterMonitoringConfig", + "properties": { + "enabled": { + "description": "Enable metrics collection for Ray clusters.", + "type": "boolean" + } + }, + "type": "object" + }, + "RayOperatorConfig": { + "description": "Configuration options for the Ray Operator add-on.", + "id": "RayOperatorConfig", + "properties": { + "enabled": { + "description": "Whether the Ray Operator addon is enabled for this cluster.", + "type": "boolean" + }, + "rayClusterLoggingConfig": { + "$ref": "RayClusterLoggingConfig", + "description": "Optional. Logging configuration for Ray clusters." + }, + "rayClusterMonitoringConfig": { + "$ref": "RayClusterMonitoringConfig", + "description": "Optional. Monitoring configuration for Ray clusters." + } + }, + "type": "object" + }, "RecurringTimeWindow": { "description": "Represents an arbitrary window of time that recurs.", "id": "RecurringTimeWindow", @@ -6289,13 +6599,15 @@ "UNSPECIFIED", "RAPID", "REGULAR", - "STABLE" + "STABLE", + "EXTENDED" ], "enumDescriptions": [ "No channel specified.", "RAPID channel is offered on an early access basis for customers who want to test new releases. WARNING: Versions available in the RAPID Channel may be subject to unresolved issues with no known workaround and are not subject to any SLAs.", "Clusters subscribed to REGULAR receive versions that are considered GA quality. REGULAR is intended for production users who want to take advantage of new features.", - "Clusters subscribed to STABLE receive versions that are known to be stable and reliable in production." + "Clusters subscribed to STABLE receive versions that are known to be stable and reliable in production.", + "Clusters subscribed to EXTENDED receive extended support and availability for versions which are known to be stable and reliable in production." ], "type": "string" } @@ -6312,13 +6624,15 @@ "UNSPECIFIED", "RAPID", "REGULAR", - "STABLE" + "STABLE", + "EXTENDED" ], "enumDescriptions": [ "No channel specified.", "RAPID channel is offered on an early access basis for customers who want to test new releases. WARNING: Versions available in the RAPID Channel may be subject to unresolved issues with no known workaround and are not subject to any SLAs.", "Clusters subscribed to REGULAR receive versions that are considered GA quality. REGULAR is intended for production users who want to take advantage of new features.", - "Clusters subscribed to STABLE receive versions that are known to be stable and reliable in production." + "Clusters subscribed to STABLE receive versions that are known to be stable and reliable in production.", + "Clusters subscribed to EXTENDED receive extended support and availability for versions which are known to be stable and reliable in production." ], "type": "string" }, @@ -6521,6 +6835,17 @@ "properties": {}, "type": "object" }, + "SecretManagerConfig": { + "description": "SecretManagerConfig is config for secret manager enablement.", + "id": "SecretManagerConfig", + "properties": { + "enabled": { + "description": "Enable/Disable Secret Manager Config.", + "type": "boolean" + } + }, + "type": "object" + }, "SecurityBulletinEvent": { "description": "SecurityBulletinEvent is a notification sent to customers when a security bulletin has been posted that they are vulnerable to.", "id": "SecurityBulletinEvent", @@ -7481,6 +7806,13 @@ "$ref": "ResourceManagerTags", "description": "Desired resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Existing tags will be replaced with new values." }, + "storagePools": { + "description": "List of Storage Pools where boot disks are provisioned. Existing Storage Pools will be replaced with storage-pools.", + "items": { + "type": "string" + }, + "type": "array" + }, "tags": { "$ref": "NetworkTags", "description": "The desired network tags to be applied to all nodes in the node pool. If this field is not present, the tags will not be changed. Otherwise, the existing network tags will be *replaced* with the provided tags." @@ -7682,6 +8014,51 @@ }, "type": "object" }, + "UserManagedKeysConfig": { + "description": "UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster.", + "id": "UserManagedKeysConfig", + "properties": { + "aggregationCa": { + "description": "The Certificate Authority Service caPool to use for the aggregation CA in this cluster.", + "type": "string" + }, + "clusterCa": { + "description": "The Certificate Authority Service caPool to use for the cluster CA in this cluster.", + "type": "string" + }, + "controlPlaneDiskEncryptionKey": { + "description": "The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes.", + "type": "string" + }, + "etcdApiCa": { + "description": "Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster.", + "type": "string" + }, + "etcdPeerCa": { + "description": "Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster.", + "type": "string" + }, + "gkeopsEtcdBackupEncryptionKey": { + "description": "Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups.", + "type": "string" + }, + "serviceAccountSigningKeys": { + "description": "The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}`", + "items": { + "type": "string" + }, + "type": "array" + }, + "serviceAccountVerificationKeys": { + "description": "The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}`", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "VerticalPodAutoscaling": { "description": "VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it.", "id": "VerticalPodAutoscaling", diff --git a/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-gen.go b/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-gen.go index 9cc6427b9ad..bc8d5bbd66b 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-gen.go @@ -6,7 +6,7 @@ // Package container provides access to the Kubernetes Engine API. // -// For product documentation, see: https://cloud.google.com/container-engine/ +// For product documentation, see: https://cloud.google.com/kubernetes-engine/docs/ // // # Library status // @@ -326,9 +326,9 @@ type AcceleratorConfig struct { NullFields []string `json:"-"` } -func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { +func (s AcceleratorConfig) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AdditionalNodeNetworkConfig: AdditionalNodeNetworkConfig is the @@ -352,21 +352,25 @@ type AdditionalNodeNetworkConfig struct { NullFields []string `json:"-"` } -func (s *AdditionalNodeNetworkConfig) MarshalJSON() ([]byte, error) { +func (s AdditionalNodeNetworkConfig) MarshalJSON() ([]byte, error) { type NoMethod AdditionalNodeNetworkConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AdditionalPodNetworkConfig: AdditionalPodNetworkConfig is the configuration // for additional pod networks within the NodeNetworkConfig message type AdditionalPodNetworkConfig struct { // MaxPodsPerNode: The maximum number of pods per node which use this pod - // network + // network. MaxPodsPerNode *MaxPodsConstraint `json:"maxPodsPerNode,omitempty"` + // NetworkAttachment: The name of the network attachment for pods to + // communicate to; cannot be specified along with subnetwork or + // secondary_pod_range. + NetworkAttachment string `json:"networkAttachment,omitempty"` // SecondaryPodRange: The name of the secondary range on the subnet which - // provides IP address for this pod range + // provides IP address for this pod range. SecondaryPodRange string `json:"secondaryPodRange,omitempty"` - // Subnetwork: Name of the subnetwork where the additional pod network belongs + // Subnetwork: Name of the subnetwork where the additional pod network belongs. Subnetwork string `json:"subnetwork,omitempty"` // ForceSendFields is a list of field names (e.g. "MaxPodsPerNode") to // unconditionally include in API requests. By default, fields with empty or @@ -381,16 +385,15 @@ type AdditionalPodNetworkConfig struct { NullFields []string `json:"-"` } -func (s *AdditionalPodNetworkConfig) MarshalJSON() ([]byte, error) { +func (s AdditionalPodNetworkConfig) MarshalJSON() ([]byte, error) { type NoMethod AdditionalPodNetworkConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AdditionalPodRangesConfig: AdditionalPodRangesConfig is the configuration // for additional pod secondary ranges supporting the ClusterUpdate message. type AdditionalPodRangesConfig struct { - // PodRangeInfo: Output only. [Output only] Information for additional pod - // range. + // PodRangeInfo: Output only. Information for additional pod range. PodRangeInfo []*RangeInfo `json:"podRangeInfo,omitempty"` // PodRangeNames: Name for pod secondary ipv4 range which has the actual range // defined ahead. @@ -408,9 +411,9 @@ type AdditionalPodRangesConfig struct { NullFields []string `json:"-"` } -func (s *AdditionalPodRangesConfig) MarshalJSON() ([]byte, error) { +func (s AdditionalPodRangesConfig) MarshalJSON() ([]byte, error) { type NoMethod AdditionalPodRangesConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AddonsConfig: Configuration for the addons that can be automatically spun up @@ -453,6 +456,8 @@ type AddonsConfig struct { // whether the addon is enabled or not on the Master, it does not track whether // network policy is enabled for the nodes. NetworkPolicyConfig *NetworkPolicyConfig `json:"networkPolicyConfig,omitempty"` + // RayOperatorConfig: Optional. Configuration for Ray Operator addon. + RayOperatorConfig *RayOperatorConfig `json:"rayOperatorConfig,omitempty"` // StatefulHaConfig: Optional. Configuration for the StatefulHA add-on. StatefulHaConfig *StatefulHAConfig `json:"statefulHaConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "CloudRunConfig") to @@ -468,9 +473,9 @@ type AddonsConfig struct { NullFields []string `json:"-"` } -func (s *AddonsConfig) MarshalJSON() ([]byte, error) { +func (s AddonsConfig) MarshalJSON() ([]byte, error) { type NoMethod AddonsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AdvancedDatapathObservabilityConfig: AdvancedDatapathObservabilityConfig @@ -501,9 +506,9 @@ type AdvancedDatapathObservabilityConfig struct { NullFields []string `json:"-"` } -func (s *AdvancedDatapathObservabilityConfig) MarshalJSON() ([]byte, error) { +func (s AdvancedDatapathObservabilityConfig) MarshalJSON() ([]byte, error) { type NoMethod AdvancedDatapathObservabilityConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AdvancedMachineFeatures: Specifies options for controlling advanced machine @@ -529,9 +534,9 @@ type AdvancedMachineFeatures struct { NullFields []string `json:"-"` } -func (s *AdvancedMachineFeatures) MarshalJSON() ([]byte, error) { +func (s AdvancedMachineFeatures) MarshalJSON() ([]byte, error) { type NoMethod AdvancedMachineFeatures - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthenticatorGroupsConfig: Configuration for returning group information @@ -556,19 +561,19 @@ type AuthenticatorGroupsConfig struct { NullFields []string `json:"-"` } -func (s *AuthenticatorGroupsConfig) MarshalJSON() ([]byte, error) { +func (s AuthenticatorGroupsConfig) MarshalJSON() ([]byte, error) { type NoMethod AuthenticatorGroupsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoUpgradeOptions: AutoUpgradeOptions defines the set of options for the // user to control how the Auto Upgrades will proceed. type AutoUpgradeOptions struct { - // AutoUpgradeStartTime: [Output only] This field is set when upgrades are - // about to commence with the approximate start time for the upgrades, in - // RFC3339 (https://www.ietf.org/rfc/rfc3339.txt) text format. + // AutoUpgradeStartTime: Output only. This field is set when upgrades are about + // to commence with the approximate start time for the upgrades, in RFC3339 + // (https://www.ietf.org/rfc/rfc3339.txt) text format. AutoUpgradeStartTime string `json:"autoUpgradeStartTime,omitempty"` - // Description: [Output only] This field is set when upgrades are about to + // Description: Output only. This field is set when upgrades are about to // commence with the description of the upgrade. Description string `json:"description,omitempty"` // ForceSendFields is a list of field names (e.g. "AutoUpgradeStartTime") to @@ -584,9 +589,9 @@ type AutoUpgradeOptions struct { NullFields []string `json:"-"` } -func (s *AutoUpgradeOptions) MarshalJSON() ([]byte, error) { +func (s AutoUpgradeOptions) MarshalJSON() ([]byte, error) { type NoMethod AutoUpgradeOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Autopilot: Autopilot is the configuration for Autopilot settings on the @@ -609,9 +614,9 @@ type Autopilot struct { NullFields []string `json:"-"` } -func (s *Autopilot) MarshalJSON() ([]byte, error) { +func (s Autopilot) MarshalJSON() ([]byte, error) { type NoMethod Autopilot - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutopilotCompatibilityIssue: AutopilotCompatibilityIssue contains @@ -653,9 +658,9 @@ type AutopilotCompatibilityIssue struct { NullFields []string `json:"-"` } -func (s *AutopilotCompatibilityIssue) MarshalJSON() ([]byte, error) { +func (s AutopilotCompatibilityIssue) MarshalJSON() ([]byte, error) { type NoMethod AutopilotCompatibilityIssue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoprovisioningNodePoolDefaults: AutoprovisioningNodePoolDefaults contains @@ -718,9 +723,9 @@ type AutoprovisioningNodePoolDefaults struct { NullFields []string `json:"-"` } -func (s *AutoprovisioningNodePoolDefaults) MarshalJSON() ([]byte, error) { +func (s AutoprovisioningNodePoolDefaults) MarshalJSON() ([]byte, error) { type NoMethod AutoprovisioningNodePoolDefaults - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BestEffortProvisioning: Best effort provisioning. @@ -746,9 +751,9 @@ type BestEffortProvisioning struct { NullFields []string `json:"-"` } -func (s *BestEffortProvisioning) MarshalJSON() ([]byte, error) { +func (s BestEffortProvisioning) MarshalJSON() ([]byte, error) { type NoMethod BestEffortProvisioning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BigQueryDestination: Parameters for using BigQuery as the destination of @@ -769,9 +774,9 @@ type BigQueryDestination struct { NullFields []string `json:"-"` } -func (s *BigQueryDestination) MarshalJSON() ([]byte, error) { +func (s BigQueryDestination) MarshalJSON() ([]byte, error) { type NoMethod BigQueryDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BinaryAuthorization: Configuration for Binary Authorization. @@ -803,9 +808,9 @@ type BinaryAuthorization struct { NullFields []string `json:"-"` } -func (s *BinaryAuthorization) MarshalJSON() ([]byte, error) { +func (s BinaryAuthorization) MarshalJSON() ([]byte, error) { type NoMethod BinaryAuthorization - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BlueGreenInfo: Information relevant to blue-green upgrade. @@ -849,9 +854,9 @@ type BlueGreenInfo struct { NullFields []string `json:"-"` } -func (s *BlueGreenInfo) MarshalJSON() ([]byte, error) { +func (s BlueGreenInfo) MarshalJSON() ([]byte, error) { type NoMethod BlueGreenInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BlueGreenSettings: Settings for blue-green upgrade. @@ -874,9 +879,9 @@ type BlueGreenSettings struct { NullFields []string `json:"-"` } -func (s *BlueGreenSettings) MarshalJSON() ([]byte, error) { +func (s BlueGreenSettings) MarshalJSON() ([]byte, error) { type NoMethod BlueGreenSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CancelOperationRequest: CancelOperationRequest cancels a single operation. @@ -910,9 +915,9 @@ type CancelOperationRequest struct { NullFields []string `json:"-"` } -func (s *CancelOperationRequest) MarshalJSON() ([]byte, error) { +func (s CancelOperationRequest) MarshalJSON() ([]byte, error) { type NoMethod CancelOperationRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CertificateAuthorityDomainConfig: CertificateAuthorityDomainConfig @@ -939,9 +944,9 @@ type CertificateAuthorityDomainConfig struct { NullFields []string `json:"-"` } -func (s *CertificateAuthorityDomainConfig) MarshalJSON() ([]byte, error) { +func (s CertificateAuthorityDomainConfig) MarshalJSON() ([]byte, error) { type NoMethod CertificateAuthorityDomainConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CheckAutopilotCompatibilityResponse: CheckAutopilotCompatibilityResponse has @@ -967,9 +972,9 @@ type CheckAutopilotCompatibilityResponse struct { NullFields []string `json:"-"` } -func (s *CheckAutopilotCompatibilityResponse) MarshalJSON() ([]byte, error) { +func (s CheckAutopilotCompatibilityResponse) MarshalJSON() ([]byte, error) { type NoMethod CheckAutopilotCompatibilityResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CidrBlock: CidrBlock contains an optional name and one CIDR block. @@ -992,9 +997,9 @@ type CidrBlock struct { NullFields []string `json:"-"` } -func (s *CidrBlock) MarshalJSON() ([]byte, error) { +func (s CidrBlock) MarshalJSON() ([]byte, error) { type NoMethod CidrBlock - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ClientCertificateConfig: Configuration for client certificates on the @@ -1015,9 +1020,9 @@ type ClientCertificateConfig struct { NullFields []string `json:"-"` } -func (s *ClientCertificateConfig) MarshalJSON() ([]byte, error) { +func (s ClientCertificateConfig) MarshalJSON() ([]byte, error) { type NoMethod ClientCertificateConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloudRunConfig: Configuration options for the Cloud Run feature. @@ -1047,9 +1052,9 @@ type CloudRunConfig struct { NullFields []string `json:"-"` } -func (s *CloudRunConfig) MarshalJSON() ([]byte, error) { +func (s CloudRunConfig) MarshalJSON() ([]byte, error) { type NoMethod CloudRunConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Cluster: A Google Kubernetes Engine cluster. @@ -1071,25 +1076,30 @@ type Cluster struct { // notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically chosen // or specify a `/14` block in `10.0.0.0/8`. ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` + // CompliancePostureConfig: Enable/Disable Compliance Posture features for the + // cluster. + CompliancePostureConfig *CompliancePostureConfig `json:"compliancePostureConfig,omitempty"` // Conditions: Which conditions caused the current cluster state. Conditions []*StatusCondition `json:"conditions,omitempty"` // ConfidentialNodes: Configuration of Confidential Nodes. All the nodes in the // cluster will be Confidential VM once enabled. ConfidentialNodes *ConfidentialNodes `json:"confidentialNodes,omitempty"` + // ControlPlaneEndpointsConfig: Configuration for all cluster's control plane + // endpoints. + ControlPlaneEndpointsConfig *ControlPlaneEndpointsConfig `json:"controlPlaneEndpointsConfig,omitempty"` // CostManagementConfig: Configuration for the fine-grained cost management // feature. CostManagementConfig *CostManagementConfig `json:"costManagementConfig,omitempty"` - // CreateTime: [Output only] The time the cluster was created, in RFC3339 + // CreateTime: Output only. The time the cluster was created, in RFC3339 // (https://www.ietf.org/rfc/rfc3339.txt) text format. CreateTime string `json:"createTime,omitempty"` - // CurrentMasterVersion: [Output only] The current software version of the + // CurrentMasterVersion: Output only. The current software version of the // master endpoint. CurrentMasterVersion string `json:"currentMasterVersion,omitempty"` - // CurrentNodeCount: [Output only] The number of nodes currently in the - // cluster. Deprecated. Call Kubernetes API directly to retrieve node - // information. + // CurrentNodeCount: Output only. The number of nodes currently in the cluster. + // Deprecated. Call Kubernetes API directly to retrieve node information. CurrentNodeCount int64 `json:"currentNodeCount,omitempty"` - // CurrentNodeVersion: [Output only] Deprecated, use NodePools.version + // CurrentNodeVersion: Output only. Deprecated, use NodePools.version // (https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools) // instead. The current version of the node software components. If they are // currently at multiple versions because they're in the process of being @@ -1114,8 +1124,8 @@ type Cluster struct { EnableKubernetesAlpha bool `json:"enableKubernetesAlpha,omitempty"` // EnableTpu: Enable the ability to use Cloud TPUs in this cluster. EnableTpu bool `json:"enableTpu,omitempty"` - // Endpoint: [Output only] The IP address of this cluster's master endpoint. - // The endpoint can be accessed from the internet at + // Endpoint: Output only. The IP address of this cluster's master endpoint. The + // endpoint can be accessed from the internet at // `https://username:password@endpoint/`. See the `masterAuth` property of this // resource for username and password information. Endpoint string `json:"endpoint,omitempty"` @@ -1125,7 +1135,7 @@ type Cluster struct { // fields, and may be sent on update requests to ensure the client has an // up-to-date value before proceeding. Etag string `json:"etag,omitempty"` - // ExpireTime: [Output only] The time the cluster will be automatically deleted + // ExpireTime: Output only. The time the cluster will be automatically deleted // in RFC3339 (https://www.ietf.org/rfc/rfc3339.txt) text format. ExpireTime string `json:"expireTime,omitempty"` // Fleet: Fleet information for the cluster. @@ -1155,7 +1165,8 @@ type Cluster struct { // a node_pool at the same time. This field is deprecated, use // node_pool.initial_node_count instead. InitialNodeCount int64 `json:"initialNodeCount,omitempty"` - // InstanceGroupUrls: Deprecated. Use node_pools.instance_group_urls. + // InstanceGroupUrls: Output only. Deprecated. Use + // node_pools.instance_group_urls. InstanceGroupUrls []string `json:"instanceGroupUrls,omitempty"` // IpAllocationPolicy: Configuration for cluster IP allocation. IpAllocationPolicy *IPAllocationPolicy `json:"ipAllocationPolicy,omitempty"` @@ -1163,7 +1174,7 @@ type Cluster struct { LabelFingerprint string `json:"labelFingerprint,omitempty"` // LegacyAbac: Configuration for the legacy ABAC authorization mode. LegacyAbac *LegacyAbac `json:"legacyAbac,omitempty"` - // Location: [Output only] The name of the Google Compute Engine zone + // Location: Output only. The name of the Google Compute Engine zone // (https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) // or region // (https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) @@ -1197,7 +1208,9 @@ type Cluster struct { // password will be generated, and a client certificate will be issued. MasterAuth *MasterAuth `json:"masterAuth,omitempty"` // MasterAuthorizedNetworksConfig: The configuration options for master - // authorized networks feature. + // authorized networks feature. Deprecated: Use + // ControlPlaneEndpointsConfig.IPEndpointsConfig.authorized_networks_config + // instead. MasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `json:"masterAuthorizedNetworksConfig,omitempty"` // MeshCertificates: Configuration for issuance of mTLS keys and certificates // to Kubernetes pods. @@ -1236,7 +1249,7 @@ type Cluster struct { // node pool, see `node_pool.config`) If unspecified, the defaults are used. // This field is deprecated, use node_pool.config instead. NodeConfig *NodeConfig `json:"nodeConfig,omitempty"` - // NodeIpv4CidrSize: [Output only] The size of the address space on each node + // NodeIpv4CidrSize: Output only. The size of the address space on each node // for hosting containers. This is provisioned from within the // `container_ipv4_cidr` range. This field will only be set when cluster is in // route-based network mode. @@ -1259,6 +1272,9 @@ type Cluster struct { ParentProductConfig *ParentProductConfig `json:"parentProductConfig,omitempty"` // PrivateClusterConfig: Configuration for private cluster. PrivateClusterConfig *PrivateClusterConfig `json:"privateClusterConfig,omitempty"` + // RbacBindingConfig: RBACBindingConfig allows user to restrict + // ClusterRoleBindings an RoleBindings that can be created. + RbacBindingConfig *RBACBindingConfig `json:"rbacBindingConfig,omitempty"` // ReleaseChannel: Release channel configuration. If left unspecified on // cluster creation and a version is specified, the cluster is enrolled in the // most mature release channel where the version is available (first checking @@ -1276,12 +1292,14 @@ type Cluster struct { SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` // SatisfiesPzs: Output only. Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + // SecretManagerConfig: Secret CSI driver configuration. + SecretManagerConfig *SecretManagerConfig `json:"secretManagerConfig,omitempty"` // SecurityPostureConfig: Enable/Disable Security Posture API features for the // cluster. SecurityPostureConfig *SecurityPostureConfig `json:"securityPostureConfig,omitempty"` - // SelfLink: [Output only] Server-defined URL for the resource. + // SelfLink: Output only. Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // ServicesIpv4Cidr: [Output only] The IP address range of the Kubernetes + // ServicesIpv4Cidr: Output only. The IP address range of the Kubernetes // services in this cluster, in CIDR // (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. // `1.2.3.4/29`). Service addresses are typically put in the last `/16` from @@ -1289,7 +1307,7 @@ type Cluster struct { ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` // ShieldedNodes: Shielded Nodes configuration. ShieldedNodes *ShieldedNodes `json:"shieldedNodes,omitempty"` - // Status: [Output only] The current status of this cluster. + // Status: Output only. The current status of this cluster. // // Possible values: // "STATUS_UNSPECIFIED" - Not set. @@ -1307,25 +1325,27 @@ type Cluster struct { // to restore full functionality. Details can be found in the `statusMessage` // field. Status string `json:"status,omitempty"` - // StatusMessage: [Output only] Deprecated. Use conditions instead. Additional + // StatusMessage: Output only. Deprecated. Use conditions instead. Additional // information about the current status of this cluster, if available. StatusMessage string `json:"statusMessage,omitempty"` // Subnetwork: The name of the Google Compute Engine subnetwork // (https://cloud.google.com/compute/docs/subnetworks) to which the cluster is // connected. Subnetwork string `json:"subnetwork,omitempty"` - // TpuIpv4CidrBlock: [Output only] The IP address range of the Cloud TPUs in + // TpuIpv4CidrBlock: Output only. The IP address range of the Cloud TPUs in // this cluster, in CIDR // (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. // `1.2.3.4/29`). TpuIpv4CidrBlock string `json:"tpuIpv4CidrBlock,omitempty"` + // UserManagedKeysConfig: The Custom keys configuration for the cluster. + UserManagedKeysConfig *UserManagedKeysConfig `json:"userManagedKeysConfig,omitempty"` // VerticalPodAutoscaling: Cluster-level Vertical Pod Autoscaling // configuration. VerticalPodAutoscaling *VerticalPodAutoscaling `json:"verticalPodAutoscaling,omitempty"` // WorkloadIdentityConfig: Configuration for the use of Kubernetes Service // Accounts in GCP IAM policies. WorkloadIdentityConfig *WorkloadIdentityConfig `json:"workloadIdentityConfig,omitempty"` - // Zone: [Output only] The name of the Google Compute Engine zone + // Zone: Output only. The name of the Google Compute Engine zone // (https://cloud.google.com/compute/docs/zones#available) in which the cluster // resides. This field is deprecated, use location instead. Zone string `json:"zone,omitempty"` @@ -1345,9 +1365,9 @@ type Cluster struct { NullFields []string `json:"-"` } -func (s *Cluster) MarshalJSON() ([]byte, error) { +func (s Cluster) MarshalJSON() ([]byte, error) { type NoMethod Cluster - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ClusterAutoscaling: ClusterAutoscaling contains global, per-cluster @@ -1387,9 +1407,9 @@ type ClusterAutoscaling struct { NullFields []string `json:"-"` } -func (s *ClusterAutoscaling) MarshalJSON() ([]byte, error) { +func (s ClusterAutoscaling) MarshalJSON() ([]byte, error) { type NoMethod ClusterAutoscaling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ClusterNetworkPerformanceConfig: Configuration of network bandwidth tiers @@ -1414,9 +1434,9 @@ type ClusterNetworkPerformanceConfig struct { NullFields []string `json:"-"` } -func (s *ClusterNetworkPerformanceConfig) MarshalJSON() ([]byte, error) { +func (s ClusterNetworkPerformanceConfig) MarshalJSON() ([]byte, error) { type NoMethod ClusterNetworkPerformanceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ClusterUpdate: ClusterUpdate describes an update to the cluster. Exactly one @@ -1440,8 +1460,13 @@ type ClusterUpdate struct { DesiredBinaryAuthorization *BinaryAuthorization `json:"desiredBinaryAuthorization,omitempty"` // DesiredClusterAutoscaling: Cluster-level autoscaling configuration. DesiredClusterAutoscaling *ClusterAutoscaling `json:"desiredClusterAutoscaling,omitempty"` + // DesiredCompliancePostureConfig: Enable/Disable Compliance Posture features + // for the cluster. + DesiredCompliancePostureConfig *CompliancePostureConfig `json:"desiredCompliancePostureConfig,omitempty"` // DesiredContainerdConfig: The desired containerd config for the cluster. DesiredContainerdConfig *ContainerdConfig `json:"desiredContainerdConfig,omitempty"` + // DesiredControlPlaneEndpointsConfig: Control plane endpoints configuration. + DesiredControlPlaneEndpointsConfig *ControlPlaneEndpointsConfig `json:"desiredControlPlaneEndpointsConfig,omitempty"` // DesiredCostManagementConfig: The desired configuration for the fine-grained // cost management feature. DesiredCostManagementConfig *CostManagementConfig `json:"desiredCostManagementConfig,omitempty"` @@ -1457,6 +1482,10 @@ type ClusterUpdate struct { // documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/datapla // ne-v2) for more. DesiredDatapathProvider string `json:"desiredDatapathProvider,omitempty"` + // DesiredDefaultEnablePrivateNodes: Override the default setting of whether + // future created nodes have private IP addresses only, namely + // NetworkConfig.default_enable_private_nodes + DesiredDefaultEnablePrivateNodes bool `json:"desiredDefaultEnablePrivateNodes,omitempty"` // DesiredDefaultSnatStatus: The desired status of whether to disable default // sNAT for this cluster. DesiredDefaultSnatStatus *DefaultSnatStatus `json:"desiredDefaultSnatStatus,omitempty"` @@ -1472,7 +1501,10 @@ type ClusterUpdate struct { // cluster DesiredEnableMultiNetworking bool `json:"desiredEnableMultiNetworking,omitempty"` // DesiredEnablePrivateEndpoint: Enable/Disable private endpoint for the - // cluster's master. + // cluster's master. Deprecated: Use + // desired_control_plane_endpoints_config.ip_endpoints_config.enable_public_endp + // oint instead. Note that the value of enable_public_endpoint is reversed: if + // enable_private_endpoint is false, then enable_public_endpoint will be true. DesiredEnablePrivateEndpoint bool `json:"desiredEnablePrivateEndpoint,omitempty"` // DesiredFleet: The desired fleet configuration for the cluster. DesiredFleet *Fleet `json:"desiredFleet,omitempty"` @@ -1522,7 +1554,9 @@ type ClusterUpdate struct { // be used for GKE 1.14+ or `logging.googleapis.com` for earlier versions. DesiredLoggingService string `json:"desiredLoggingService,omitempty"` // DesiredMasterAuthorizedNetworksConfig: The desired configuration options for - // master authorized networks feature. + // master authorized networks feature. Deprecated: Use + // desired_control_plane_endpoints_config.ip_endpoints_config.authorized_network + // s_config instead. DesiredMasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `json:"desiredMasterAuthorizedNetworksConfig,omitempty"` // DesiredMasterVersion: The Kubernetes version to change the master to. Users // may specify either explicit versions offered by Kubernetes Engine or version @@ -1591,7 +1625,9 @@ type ClusterUpdate struct { // DesiredPrivateClusterConfig: The desired private cluster configuration. // master_global_access_config is the only field that can be changed via this // field. See also ClusterUpdate.desired_enable_private_endpoint for modifying - // other fields within PrivateClusterConfig. + // other fields within PrivateClusterConfig. Deprecated: Use + // desired_control_plane_endpoints_config.ip_endpoints_config.global_access + // instead. DesiredPrivateClusterConfig *PrivateClusterConfig `json:"desiredPrivateClusterConfig,omitempty"` // DesiredPrivateIpv6GoogleAccess: The desired state of IPv6 connectivity to // Google Services. @@ -1605,11 +1641,16 @@ type ClusterUpdate struct { // "PRIVATE_IPV6_GOOGLE_ACCESS_BIDIRECTIONAL" - Enables private IPv6 access // to and from Google Services DesiredPrivateIpv6GoogleAccess string `json:"desiredPrivateIpv6GoogleAccess,omitempty"` + // DesiredRbacBindingConfig: RBACBindingConfig allows user to restrict + // ClusterRoleBindings an RoleBindings that can be created. + DesiredRbacBindingConfig *RBACBindingConfig `json:"desiredRbacBindingConfig,omitempty"` // DesiredReleaseChannel: The desired release channel configuration. DesiredReleaseChannel *ReleaseChannel `json:"desiredReleaseChannel,omitempty"` // DesiredResourceUsageExportConfig: The desired configuration for exporting // resource usage. DesiredResourceUsageExportConfig *ResourceUsageExportConfig `json:"desiredResourceUsageExportConfig,omitempty"` + // DesiredSecretManagerConfig: Enable/Disable Secret Manager Config. + DesiredSecretManagerConfig *SecretManagerConfig `json:"desiredSecretManagerConfig,omitempty"` // DesiredSecurityPostureConfig: Enable/Disable Security Posture API features // for the cluster. DesiredSecurityPostureConfig *SecurityPostureConfig `json:"desiredSecurityPostureConfig,omitempty"` @@ -1643,6 +1684,8 @@ type ClusterUpdate struct { // removed from the cluster. The pod ranges specified here must have been // specified earlier in the 'additional_pod_ranges_config' argument. RemovedAdditionalPodRangesConfig *AdditionalPodRangesConfig `json:"removedAdditionalPodRangesConfig,omitempty"` + // UserManagedKeysConfig: The Custom keys configuration for the cluster. + UserManagedKeysConfig *UserManagedKeysConfig `json:"userManagedKeysConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "AdditionalPodRangesConfig") // to unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -1656,9 +1699,9 @@ type ClusterUpdate struct { NullFields []string `json:"-"` } -func (s *ClusterUpdate) MarshalJSON() ([]byte, error) { +func (s ClusterUpdate) MarshalJSON() ([]byte, error) { type NoMethod ClusterUpdate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CompleteIPRotationRequest: CompleteIPRotationRequest moves the cluster @@ -1692,9 +1735,9 @@ type CompleteIPRotationRequest struct { NullFields []string `json:"-"` } -func (s *CompleteIPRotationRequest) MarshalJSON() ([]byte, error) { +func (s CompleteIPRotationRequest) MarshalJSON() ([]byte, error) { type NoMethod CompleteIPRotationRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CompleteNodePoolUpgradeRequest: CompleteNodePoolUpgradeRequest sets the name @@ -1702,6 +1745,58 @@ func (s *CompleteIPRotationRequest) MarshalJSON() ([]byte, error) { type CompleteNodePoolUpgradeRequest struct { } +// CompliancePostureConfig: CompliancePostureConfig defines the settings needed +// to enable/disable features for the Compliance Posture. +type CompliancePostureConfig struct { + // ComplianceStandards: List of enabled compliance standards. + ComplianceStandards []*ComplianceStandard `json:"complianceStandards,omitempty"` + // Mode: Defines the enablement mode for Compliance Posture. + // + // Possible values: + // "MODE_UNSPECIFIED" - Default value not specified. + // "DISABLED" - Disables Compliance Posture features on the cluster. + // "ENABLED" - Enables Compliance Posture features on the cluster. + Mode string `json:"mode,omitempty"` + // ForceSendFields is a list of field names (e.g. "ComplianceStandards") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ComplianceStandards") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s CompliancePostureConfig) MarshalJSON() ([]byte, error) { + type NoMethod CompliancePostureConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ComplianceStandard: Defines the details of a compliance standard. +type ComplianceStandard struct { + // Standard: Name of the compliance standard. + Standard string `json:"standard,omitempty"` + // ForceSendFields is a list of field names (e.g. "Standard") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Standard") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ComplianceStandard) MarshalJSON() ([]byte, error) { + type NoMethod ComplianceStandard + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // ConfidentialNodes: ConfidentialNodes is configuration for the confidential // nodes feature, which makes nodes run on confidential VMs. type ConfidentialNodes struct { @@ -1720,9 +1815,9 @@ type ConfidentialNodes struct { NullFields []string `json:"-"` } -func (s *ConfidentialNodes) MarshalJSON() ([]byte, error) { +func (s ConfidentialNodes) MarshalJSON() ([]byte, error) { type NoMethod ConfidentialNodes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConfigConnectorConfig: Configuration options for the Config Connector @@ -1743,9 +1838,9 @@ type ConfigConnectorConfig struct { NullFields []string `json:"-"` } -func (s *ConfigConnectorConfig) MarshalJSON() ([]byte, error) { +func (s ConfigConnectorConfig) MarshalJSON() ([]byte, error) { type NoMethod ConfigConnectorConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConsumptionMeteringConfig: Parameters for controlling consumption metering. @@ -1767,9 +1862,9 @@ type ConsumptionMeteringConfig struct { NullFields []string `json:"-"` } -func (s *ConsumptionMeteringConfig) MarshalJSON() ([]byte, error) { +func (s ConsumptionMeteringConfig) MarshalJSON() ([]byte, error) { type NoMethod ConsumptionMeteringConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ContainerdConfig: ContainerdConfig contains configuration to customize @@ -1791,9 +1886,34 @@ type ContainerdConfig struct { NullFields []string `json:"-"` } -func (s *ContainerdConfig) MarshalJSON() ([]byte, error) { +func (s ContainerdConfig) MarshalJSON() ([]byte, error) { type NoMethod ContainerdConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ControlPlaneEndpointsConfig: Configuration for all of the cluster's control +// plane endpoints. +type ControlPlaneEndpointsConfig struct { + // DnsEndpointConfig: DNS endpoint configuration. + DnsEndpointConfig *DNSEndpointConfig `json:"dnsEndpointConfig,omitempty"` + // IpEndpointsConfig: IP endpoints configuration. + IpEndpointsConfig *IPEndpointsConfig `json:"ipEndpointsConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "DnsEndpointConfig") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DnsEndpointConfig") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ControlPlaneEndpointsConfig) MarshalJSON() ([]byte, error) { + type NoMethod ControlPlaneEndpointsConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CostManagementConfig: Configuration for fine-grained cost management @@ -1814,9 +1934,9 @@ type CostManagementConfig struct { NullFields []string `json:"-"` } -func (s *CostManagementConfig) MarshalJSON() ([]byte, error) { +func (s CostManagementConfig) MarshalJSON() ([]byte, error) { type NoMethod CostManagementConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateClusterRequest: CreateClusterRequest creates a cluster. @@ -1849,9 +1969,9 @@ type CreateClusterRequest struct { NullFields []string `json:"-"` } -func (s *CreateClusterRequest) MarshalJSON() ([]byte, error) { +func (s CreateClusterRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateClusterRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateNodePoolRequest: CreateNodePoolRequest creates a node pool for a @@ -1888,9 +2008,9 @@ type CreateNodePoolRequest struct { NullFields []string `json:"-"` } -func (s *CreateNodePoolRequest) MarshalJSON() ([]byte, error) { +func (s CreateNodePoolRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateNodePoolRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DNSConfig: DNSConfig contains the desired set of options for configuring @@ -1933,15 +2053,44 @@ type DNSConfig struct { NullFields []string `json:"-"` } -func (s *DNSConfig) MarshalJSON() ([]byte, error) { +func (s DNSConfig) MarshalJSON() ([]byte, error) { type NoMethod DNSConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// DNSEndpointConfig: Describes the configuration of a DNS endpoint. +type DNSEndpointConfig struct { + // AllowExternalTraffic: Controls whether user traffic is allowed over this + // endpoint. Note that GCP-managed services may still use the endpoint even if + // this is false. + AllowExternalTraffic bool `json:"allowExternalTraffic,omitempty"` + // Endpoint: Output only. The cluster's DNS endpoint configuration. A DNS + // format address. This is accessible from the public internet. Ex: + // uid.us-central1.gke.goog. Always present, but the behavior may change + // according to the value of DNSEndpointConfig.allow_external_traffic. + Endpoint string `json:"endpoint,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowExternalTraffic") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowExternalTraffic") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s DNSEndpointConfig) MarshalJSON() ([]byte, error) { + type NoMethod DNSEndpointConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DailyMaintenanceWindow: Time window specified for daily maintenance // operations. type DailyMaintenanceWindow struct { - // Duration: [Output only] Duration of the time window, automatically chosen to + // Duration: Output only. Duration of the time window, automatically chosen to // be smallest possible in the given scenario. Duration will be in RFC3339 // (https://www.ietf.org/rfc/rfc3339.txt) format "PTnHnMnS". Duration string `json:"duration,omitempty"` @@ -1963,9 +2112,9 @@ type DailyMaintenanceWindow struct { NullFields []string `json:"-"` } -func (s *DailyMaintenanceWindow) MarshalJSON() ([]byte, error) { +func (s DailyMaintenanceWindow) MarshalJSON() ([]byte, error) { type NoMethod DailyMaintenanceWindow - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatabaseEncryption: Configuration of etcd encryption. @@ -2018,9 +2167,9 @@ type DatabaseEncryption struct { NullFields []string `json:"-"` } -func (s *DatabaseEncryption) MarshalJSON() ([]byte, error) { +func (s DatabaseEncryption) MarshalJSON() ([]byte, error) { type NoMethod DatabaseEncryption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DefaultSnatStatus: DefaultSnatStatus contains the desired state of whether @@ -2041,9 +2190,9 @@ type DefaultSnatStatus struct { NullFields []string `json:"-"` } -func (s *DefaultSnatStatus) MarshalJSON() ([]byte, error) { +func (s DefaultSnatStatus) MarshalJSON() ([]byte, error) { type NoMethod DefaultSnatStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DnsCacheConfig: Configuration for NodeLocal DNSCache @@ -2063,9 +2212,9 @@ type DnsCacheConfig struct { NullFields []string `json:"-"` } -func (s *DnsCacheConfig) MarshalJSON() ([]byte, error) { +func (s DnsCacheConfig) MarshalJSON() ([]byte, error) { type NoMethod DnsCacheConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -2079,8 +2228,8 @@ type Empty struct { // EnterpriseConfig: EnterpriseConfig is the cluster enterprise configuration. type EnterpriseConfig struct { - // ClusterTier: Output only. [Output only] cluster_tier specifies the premium - // tier of the cluster. + // ClusterTier: Output only. cluster_tier indicates the effective tier of the + // cluster. // // Possible values: // "CLUSTER_TIER_UNSPECIFIED" - CLUSTER_TIER_UNSPECIFIED is when cluster_tier @@ -2101,9 +2250,9 @@ type EnterpriseConfig struct { NullFields []string `json:"-"` } -func (s *EnterpriseConfig) MarshalJSON() ([]byte, error) { +func (s EnterpriseConfig) MarshalJSON() ([]byte, error) { type NoMethod EnterpriseConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EphemeralStorageLocalSsdConfig: EphemeralStorageLocalSsdConfig contains @@ -2137,9 +2286,9 @@ type EphemeralStorageLocalSsdConfig struct { NullFields []string `json:"-"` } -func (s *EphemeralStorageLocalSsdConfig) MarshalJSON() ([]byte, error) { +func (s EphemeralStorageLocalSsdConfig) MarshalJSON() ([]byte, error) { type NoMethod EphemeralStorageLocalSsdConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FastSocket: Configuration of Fast Socket feature. @@ -2159,9 +2308,9 @@ type FastSocket struct { NullFields []string `json:"-"` } -func (s *FastSocket) MarshalJSON() ([]byte, error) { +func (s FastSocket) MarshalJSON() ([]byte, error) { type NoMethod FastSocket - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Filter: Allows filtering to one or more specific event types. If event types @@ -2190,18 +2339,18 @@ type Filter struct { NullFields []string `json:"-"` } -func (s *Filter) MarshalJSON() ([]byte, error) { +func (s Filter) MarshalJSON() ([]byte, error) { type NoMethod Filter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Fleet: Fleet is the fleet configuration for the cluster. type Fleet struct { - // Membership: [Output only] The full resource name of the registered fleet + // Membership: Output only. The full resource name of the registered fleet // membership of the cluster, in the format // `//gkehub.googleapis.com/projects/*/locations/*/memberships/*`. Membership string `json:"membership,omitempty"` - // PreRegistered: [Output only] Whether the cluster has been registered through + // PreRegistered: Output only. Whether the cluster has been registered through // the fleet API. PreRegistered bool `json:"preRegistered,omitempty"` // Project: The Fleet host project(project ID or project number) where this @@ -2221,9 +2370,9 @@ type Fleet struct { NullFields []string `json:"-"` } -func (s *Fleet) MarshalJSON() ([]byte, error) { +func (s Fleet) MarshalJSON() ([]byte, error) { type NoMethod Fleet - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GCPSecretManagerCertificateConfig: GCPSecretManagerCertificateConfig @@ -2247,9 +2396,9 @@ type GCPSecretManagerCertificateConfig struct { NullFields []string `json:"-"` } -func (s *GCPSecretManagerCertificateConfig) MarshalJSON() ([]byte, error) { +func (s GCPSecretManagerCertificateConfig) MarshalJSON() ([]byte, error) { type NoMethod GCPSecretManagerCertificateConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GPUDriverInstallationConfig: GPUDriverInstallationConfig specifies the @@ -2278,9 +2427,9 @@ type GPUDriverInstallationConfig struct { NullFields []string `json:"-"` } -func (s *GPUDriverInstallationConfig) MarshalJSON() ([]byte, error) { +func (s GPUDriverInstallationConfig) MarshalJSON() ([]byte, error) { type NoMethod GPUDriverInstallationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GPUSharingConfig: GPUSharingConfig represents the GPU sharing configuration @@ -2310,9 +2459,9 @@ type GPUSharingConfig struct { NullFields []string `json:"-"` } -func (s *GPUSharingConfig) MarshalJSON() ([]byte, error) { +func (s GPUSharingConfig) MarshalJSON() ([]byte, error) { type NoMethod GPUSharingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GatewayAPIConfig: GatewayAPIConfig contains the desired config of Gateway @@ -2323,8 +2472,8 @@ type GatewayAPIConfig struct { // Possible values: // "CHANNEL_UNSPECIFIED" - Default value. // "CHANNEL_DISABLED" - Gateway API support is disabled - // "CHANNEL_EXPERIMENTAL" - Gateway API support is enabled, experimental CRDs - // are installed + // "CHANNEL_EXPERIMENTAL" - Deprecated: use CHANNEL_STANDARD instead. Gateway + // API support is enabled, experimental CRDs are installed // "CHANNEL_STANDARD" - Gateway API support is enabled, standard CRDs are // installed Channel string `json:"channel,omitempty"` @@ -2341,9 +2490,9 @@ type GatewayAPIConfig struct { NullFields []string `json:"-"` } -func (s *GatewayAPIConfig) MarshalJSON() ([]byte, error) { +func (s GatewayAPIConfig) MarshalJSON() ([]byte, error) { type NoMethod GatewayAPIConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GcePersistentDiskCsiDriverConfig: Configuration for the Compute Engine PD @@ -2365,9 +2514,9 @@ type GcePersistentDiskCsiDriverConfig struct { NullFields []string `json:"-"` } -func (s *GcePersistentDiskCsiDriverConfig) MarshalJSON() ([]byte, error) { +func (s GcePersistentDiskCsiDriverConfig) MarshalJSON() ([]byte, error) { type NoMethod GcePersistentDiskCsiDriverConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GcfsConfig: GcfsConfig contains configurations of Google Container File @@ -2388,9 +2537,9 @@ type GcfsConfig struct { NullFields []string `json:"-"` } -func (s *GcfsConfig) MarshalJSON() ([]byte, error) { +func (s GcfsConfig) MarshalJSON() ([]byte, error) { type NoMethod GcfsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GcpFilestoreCsiDriverConfig: Configuration for the GCP Filestore CSI driver. @@ -2410,9 +2559,9 @@ type GcpFilestoreCsiDriverConfig struct { NullFields []string `json:"-"` } -func (s *GcpFilestoreCsiDriverConfig) MarshalJSON() ([]byte, error) { +func (s GcpFilestoreCsiDriverConfig) MarshalJSON() ([]byte, error) { type NoMethod GcpFilestoreCsiDriverConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GcsFuseCsiDriverConfig: Configuration for the Cloud Storage Fuse CSI driver. @@ -2433,16 +2582,16 @@ type GcsFuseCsiDriverConfig struct { NullFields []string `json:"-"` } -func (s *GcsFuseCsiDriverConfig) MarshalJSON() ([]byte, error) { +func (s GcsFuseCsiDriverConfig) MarshalJSON() ([]byte, error) { type NoMethod GcsFuseCsiDriverConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetJSONWebKeysResponse: GetJSONWebKeysResponse is a valid JSON Web Key Set // as specififed in rfc 7517 type GetJSONWebKeysResponse struct { - // CacheHeader: OnePlatform automatically extracts this field and uses it to - // set the HTTP Cache-Control header. + // CacheHeader: For HTTP requests, this field is automatically extracted into + // the Cache-Control HTTP header. CacheHeader *HttpCacheControlResponseHeader `json:"cacheHeader,omitempty"` // Keys: The public component of the keys used by the cluster to sign token // requests. @@ -2463,17 +2612,17 @@ type GetJSONWebKeysResponse struct { NullFields []string `json:"-"` } -func (s *GetJSONWebKeysResponse) MarshalJSON() ([]byte, error) { +func (s GetJSONWebKeysResponse) MarshalJSON() ([]byte, error) { type NoMethod GetJSONWebKeysResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetOpenIDConfigResponse: GetOpenIDConfigResponse is an OIDC discovery // document for the cluster. See the OpenID Connect Discovery 1.0 specification // for details. type GetOpenIDConfigResponse struct { - // CacheHeader: OnePlatform automatically extracts this field and uses it to - // set the HTTP Cache-Control header. + // CacheHeader: For HTTP requests, this field is automatically extracted into + // the Cache-Control HTTP header. CacheHeader *HttpCacheControlResponseHeader `json:"cacheHeader,omitempty"` // ClaimsSupported: Supported claims. ClaimsSupported []string `json:"claims_supported,omitempty"` @@ -2505,9 +2654,9 @@ type GetOpenIDConfigResponse struct { NullFields []string `json:"-"` } -func (s *GetOpenIDConfigResponse) MarshalJSON() ([]byte, error) { +func (s GetOpenIDConfigResponse) MarshalJSON() ([]byte, error) { type NoMethod GetOpenIDConfigResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GkeBackupAgentConfig: Configuration for the Backup for GKE Agent. @@ -2527,9 +2676,9 @@ type GkeBackupAgentConfig struct { NullFields []string `json:"-"` } -func (s *GkeBackupAgentConfig) MarshalJSON() ([]byte, error) { +func (s GkeBackupAgentConfig) MarshalJSON() ([]byte, error) { type NoMethod GkeBackupAgentConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HorizontalPodAutoscaling: Configuration options for the horizontal pod @@ -2554,9 +2703,9 @@ type HorizontalPodAutoscaling struct { NullFields []string `json:"-"` } -func (s *HorizontalPodAutoscaling) MarshalJSON() ([]byte, error) { +func (s HorizontalPodAutoscaling) MarshalJSON() ([]byte, error) { type NoMethod HorizontalPodAutoscaling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpCacheControlResponseHeader: RFC-2616: cache control support @@ -2580,9 +2729,9 @@ type HttpCacheControlResponseHeader struct { NullFields []string `json:"-"` } -func (s *HttpCacheControlResponseHeader) MarshalJSON() ([]byte, error) { +func (s HttpCacheControlResponseHeader) MarshalJSON() ([]byte, error) { type NoMethod HttpCacheControlResponseHeader - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpLoadBalancing: Configuration options for the HTTP (L7) load balancing @@ -2606,9 +2755,9 @@ type HttpLoadBalancing struct { NullFields []string `json:"-"` } -func (s *HttpLoadBalancing) MarshalJSON() ([]byte, error) { +func (s HttpLoadBalancing) MarshalJSON() ([]byte, error) { type NoMethod HttpLoadBalancing - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HugepagesConfig: Hugepages amount in both 2m and 1g size @@ -2630,9 +2779,9 @@ type HugepagesConfig struct { NullFields []string `json:"-"` } -func (s *HugepagesConfig) MarshalJSON() ([]byte, error) { +func (s HugepagesConfig) MarshalJSON() ([]byte, error) { type NoMethod HugepagesConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ILBSubsettingConfig: ILBSubsettingConfig contains the desired config of L4 @@ -2653,18 +2802,18 @@ type ILBSubsettingConfig struct { NullFields []string `json:"-"` } -func (s *ILBSubsettingConfig) MarshalJSON() ([]byte, error) { +func (s ILBSubsettingConfig) MarshalJSON() ([]byte, error) { type NoMethod ILBSubsettingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IPAllocationPolicy: Configuration for controlling how IPs are allocated in // the cluster. type IPAllocationPolicy struct { - // AdditionalPodRangesConfig: Output only. [Output only] The additional pod - // ranges that are added to the cluster. These pod ranges can be used by new - // node pools to allocate pod IPs automatically. Once the range is removed it - // will not show up in IPAllocationPolicy. + // AdditionalPodRangesConfig: Output only. The additional pod ranges that are + // added to the cluster. These pod ranges can be used by new node pools to + // allocate pod IPs automatically. Once the range is removed it will not show + // up in IPAllocationPolicy. AdditionalPodRangesConfig *AdditionalPodRangesConfig `json:"additionalPodRangesConfig,omitempty"` // ClusterIpv4Cidr: This field is deprecated, use cluster_ipv4_cidr_block. ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` @@ -2686,10 +2835,9 @@ type IPAllocationPolicy struct { // CreateSubnetwork: Whether a new subnetwork will be created automatically for // the cluster. This field is only applicable when `use_ip_aliases` is true. CreateSubnetwork bool `json:"createSubnetwork,omitempty"` - // DefaultPodIpv4RangeUtilization: Output only. [Output only] The utilization - // of the cluster default IPv4 range for the pod. The ratio is Usage/[Total - // number of IPs in the secondary range], - // Usage=numNodes*numZones*podIPsPerNode. + // DefaultPodIpv4RangeUtilization: Output only. The utilization of the cluster + // default IPv4 range for the pod. The ratio is Usage/[Total number of IPs in + // the secondary range], Usage=numNodes*numZones*podIPsPerNode. DefaultPodIpv4RangeUtilization float64 `json:"defaultPodIpv4RangeUtilization,omitempty"` // Ipv6AccessType: The ipv6 access type (internal or external) when // create_subnetwork is true @@ -2730,8 +2878,8 @@ type IPAllocationPolicy struct { // `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, // `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. ServicesIpv4CidrBlock string `json:"servicesIpv4CidrBlock,omitempty"` - // ServicesIpv6CidrBlock: Output only. [Output only] The services IPv6 CIDR - // block for the cluster. + // ServicesIpv6CidrBlock: Output only. The services IPv6 CIDR block for the + // cluster. ServicesIpv6CidrBlock string `json:"servicesIpv6CidrBlock,omitempty"` // ServicesSecondaryRangeName: The name of the secondary range to be used as // for the services CIDR block. The secondary range will be used for service @@ -2746,8 +2894,8 @@ type IPAllocationPolicy struct { // "IPV4" - Cluster is IPV4 only // "IPV4_IPV6" - Cluster can use both IPv4 and IPv6 StackType string `json:"stackType,omitempty"` - // SubnetIpv6CidrBlock: Output only. [Output only] The subnet's IPv6 CIDR block - // used by nodes and pods. + // SubnetIpv6CidrBlock: Output only. The subnet's IPv6 CIDR block used by nodes + // and pods. SubnetIpv6CidrBlock string `json:"subnetIpv6CidrBlock,omitempty"` // SubnetworkName: A custom subnetwork name to be used if `create_subnetwork` // is true. If this field is empty, then an automatic name will be chosen for @@ -2785,9 +2933,9 @@ type IPAllocationPolicy struct { NullFields []string `json:"-"` } -func (s *IPAllocationPolicy) MarshalJSON() ([]byte, error) { +func (s IPAllocationPolicy) MarshalJSON() ([]byte, error) { type NoMethod IPAllocationPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *IPAllocationPolicy) UnmarshalJSON(data []byte) error { @@ -2804,6 +2952,54 @@ func (s *IPAllocationPolicy) UnmarshalJSON(data []byte) error { return nil } +// IPEndpointsConfig: IP endpoints configuration. +type IPEndpointsConfig struct { + // AuthorizedNetworksConfig: Configuration of authorized networks. If enabled, + // restricts access to the control plane based on source IP. It is invalid to + // specify both Cluster.masterAuthorizedNetworksConfig and this field at the + // same time. + AuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `json:"authorizedNetworksConfig,omitempty"` + // EnablePublicEndpoint: Controls whether the control plane allows access + // through a public IP. It is invalid to specify both + // PrivateClusterConfig.enablePrivateEndpoint and this field at the same time. + EnablePublicEndpoint bool `json:"enablePublicEndpoint,omitempty"` + // Enabled: Controls whether to allow direct IP access. + Enabled bool `json:"enabled,omitempty"` + // GlobalAccess: Controls whether the control plane's private endpoint is + // accessible from sources in other regions. It is invalid to specify both + // PrivateClusterMasterGlobalAccessConfig.enabled and this field at the same + // time. + GlobalAccess bool `json:"globalAccess,omitempty"` + // PrivateEndpoint: Output only. The internal IP address of this cluster's + // control plane. Only populated if enabled. + PrivateEndpoint string `json:"privateEndpoint,omitempty"` + // PrivateEndpointSubnetwork: Subnet to provision the master's private endpoint + // during cluster creation. Specified in projects/*/regions/*/subnetworks/* + // format. It is invalid to specify both + // PrivateClusterConfig.privateEndpointSubnetwork and this field at the same + // time. + PrivateEndpointSubnetwork string `json:"privateEndpointSubnetwork,omitempty"` + // PublicEndpoint: Output only. The external IP address of this cluster's + // control plane. Only populated if enabled. + PublicEndpoint string `json:"publicEndpoint,omitempty"` + // ForceSendFields is a list of field names (e.g. "AuthorizedNetworksConfig") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AuthorizedNetworksConfig") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s IPEndpointsConfig) MarshalJSON() ([]byte, error) { + type NoMethod IPEndpointsConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // IdentityServiceConfig: IdentityServiceConfig is configuration for Identity // Service which allows customers to use external identity providers with the // K8S API @@ -2823,9 +3019,9 @@ type IdentityServiceConfig struct { NullFields []string `json:"-"` } -func (s *IdentityServiceConfig) MarshalJSON() ([]byte, error) { +func (s IdentityServiceConfig) MarshalJSON() ([]byte, error) { type NoMethod IdentityServiceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IntraNodeVisibilityConfig: IntraNodeVisibilityConfig contains the desired @@ -2846,9 +3042,9 @@ type IntraNodeVisibilityConfig struct { NullFields []string `json:"-"` } -func (s *IntraNodeVisibilityConfig) MarshalJSON() ([]byte, error) { +func (s IntraNodeVisibilityConfig) MarshalJSON() ([]byte, error) { type NoMethod IntraNodeVisibilityConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Jwk: Jwk is a JSON Web Key as specified in RFC 7517 @@ -2884,9 +3080,9 @@ type Jwk struct { NullFields []string `json:"-"` } -func (s *Jwk) MarshalJSON() ([]byte, error) { +func (s Jwk) MarshalJSON() ([]byte, error) { type NoMethod Jwk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // K8sBetaAPIConfig: K8sBetaAPIConfig , configuration for beta APIs @@ -2906,9 +3102,9 @@ type K8sBetaAPIConfig struct { NullFields []string `json:"-"` } -func (s *K8sBetaAPIConfig) MarshalJSON() ([]byte, error) { +func (s K8sBetaAPIConfig) MarshalJSON() ([]byte, error) { type NoMethod K8sBetaAPIConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // KubernetesDashboard: Configuration for the Kubernetes Dashboard. @@ -2928,9 +3124,9 @@ type KubernetesDashboard struct { NullFields []string `json:"-"` } -func (s *KubernetesDashboard) MarshalJSON() ([]byte, error) { +func (s KubernetesDashboard) MarshalJSON() ([]byte, error) { type NoMethod KubernetesDashboard - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LegacyAbac: Configuration for the legacy Attribute Based Access Control @@ -2954,9 +3150,9 @@ type LegacyAbac struct { NullFields []string `json:"-"` } -func (s *LegacyAbac) MarshalJSON() ([]byte, error) { +func (s LegacyAbac) MarshalJSON() ([]byte, error) { type NoMethod LegacyAbac - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LinuxNodeConfig: Parameters that can be configured on Linux nodes. @@ -2994,9 +3190,9 @@ type LinuxNodeConfig struct { NullFields []string `json:"-"` } -func (s *LinuxNodeConfig) MarshalJSON() ([]byte, error) { +func (s LinuxNodeConfig) MarshalJSON() ([]byte, error) { type NoMethod LinuxNodeConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListClustersResponse: ListClustersResponse is the result of @@ -3024,9 +3220,9 @@ type ListClustersResponse struct { NullFields []string `json:"-"` } -func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { +func (s ListClustersResponse) MarshalJSON() ([]byte, error) { type NoMethod ListClustersResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListNodePoolsResponse: ListNodePoolsResponse is the result of @@ -3050,9 +3246,9 @@ type ListNodePoolsResponse struct { NullFields []string `json:"-"` } -func (s *ListNodePoolsResponse) MarshalJSON() ([]byte, error) { +func (s ListNodePoolsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListNodePoolsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: ListOperationsResponse is the result of @@ -3079,9 +3275,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListUsableSubnetworksResponse: ListUsableSubnetworksResponse is the response @@ -3110,9 +3306,9 @@ type ListUsableSubnetworksResponse struct { NullFields []string `json:"-"` } -func (s *ListUsableSubnetworksResponse) MarshalJSON() ([]byte, error) { +func (s ListUsableSubnetworksResponse) MarshalJSON() ([]byte, error) { type NoMethod ListUsableSubnetworksResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LocalNvmeSsdBlockConfig: LocalNvmeSsdBlockConfig contains configuration for @@ -3145,9 +3341,9 @@ type LocalNvmeSsdBlockConfig struct { NullFields []string `json:"-"` } -func (s *LocalNvmeSsdBlockConfig) MarshalJSON() ([]byte, error) { +func (s LocalNvmeSsdBlockConfig) MarshalJSON() ([]byte, error) { type NoMethod LocalNvmeSsdBlockConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LoggingComponentConfig: LoggingComponentConfig is cluster logging component @@ -3163,6 +3359,8 @@ type LoggingComponentConfig struct { // "APISERVER" - kube-apiserver // "SCHEDULER" - kube-scheduler // "CONTROLLER_MANAGER" - kube-controller-manager + // "KCP_SSHD" - kcp-sshd + // "KCP_CONNECTION" - kcp connection logs EnableComponents []string `json:"enableComponents,omitempty"` // ForceSendFields is a list of field names (e.g. "EnableComponents") to // unconditionally include in API requests. By default, fields with empty or @@ -3177,9 +3375,9 @@ type LoggingComponentConfig struct { NullFields []string `json:"-"` } -func (s *LoggingComponentConfig) MarshalJSON() ([]byte, error) { +func (s LoggingComponentConfig) MarshalJSON() ([]byte, error) { type NoMethod LoggingComponentConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LoggingConfig: LoggingConfig is cluster logging configuration. @@ -3199,9 +3397,9 @@ type LoggingConfig struct { NullFields []string `json:"-"` } -func (s *LoggingConfig) MarshalJSON() ([]byte, error) { +func (s LoggingConfig) MarshalJSON() ([]byte, error) { type NoMethod LoggingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LoggingVariantConfig: LoggingVariantConfig specifies the behaviour of the @@ -3227,9 +3425,9 @@ type LoggingVariantConfig struct { NullFields []string `json:"-"` } -func (s *LoggingVariantConfig) MarshalJSON() ([]byte, error) { +func (s LoggingVariantConfig) MarshalJSON() ([]byte, error) { type NoMethod LoggingVariantConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MaintenanceExclusionOptions: Represents the Maintenance exclusion option. @@ -3260,9 +3458,9 @@ type MaintenanceExclusionOptions struct { NullFields []string `json:"-"` } -func (s *MaintenanceExclusionOptions) MarshalJSON() ([]byte, error) { +func (s MaintenanceExclusionOptions) MarshalJSON() ([]byte, error) { type NoMethod MaintenanceExclusionOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MaintenancePolicy: MaintenancePolicy defines the maintenance policy to be @@ -3290,9 +3488,9 @@ type MaintenancePolicy struct { NullFields []string `json:"-"` } -func (s *MaintenancePolicy) MarshalJSON() ([]byte, error) { +func (s MaintenancePolicy) MarshalJSON() ([]byte, error) { type NoMethod MaintenancePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MaintenanceWindow: MaintenanceWindow defines the maintenance window to be @@ -3321,9 +3519,9 @@ type MaintenanceWindow struct { NullFields []string `json:"-"` } -func (s *MaintenanceWindow) MarshalJSON() ([]byte, error) { +func (s MaintenanceWindow) MarshalJSON() ([]byte, error) { type NoMethod MaintenanceWindow - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedPrometheusConfig: ManagedPrometheusConfig defines the configuration @@ -3344,27 +3542,28 @@ type ManagedPrometheusConfig struct { NullFields []string `json:"-"` } -func (s *ManagedPrometheusConfig) MarshalJSON() ([]byte, error) { +func (s ManagedPrometheusConfig) MarshalJSON() ([]byte, error) { type NoMethod ManagedPrometheusConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MasterAuth: The authentication information for accessing the master // endpoint. Authentication can be done using HTTP basic auth or using client // certificates. type MasterAuth struct { - // ClientCertificate: [Output only] Base64-encoded public certificate used by - // clients to authenticate to the cluster endpoint. + // ClientCertificate: Output only. Base64-encoded public certificate used by + // clients to authenticate to the cluster endpoint. Issued only if + // client_certificate_config is set. ClientCertificate string `json:"clientCertificate,omitempty"` // ClientCertificateConfig: Configuration for client certificate authentication // on the cluster. For clusters before v1.12, if no configuration is specified, // a client certificate is issued. ClientCertificateConfig *ClientCertificateConfig `json:"clientCertificateConfig,omitempty"` - // ClientKey: [Output only] Base64-encoded private key used by clients to + // ClientKey: Output only. Base64-encoded private key used by clients to // authenticate to the cluster endpoint. ClientKey string `json:"clientKey,omitempty"` - // ClusterCaCertificate: [Output only] Base64-encoded public certificate that - // is the root of trust for the cluster. + // ClusterCaCertificate: Output only. Base64-encoded public certificate that is + // the root of trust for the cluster. ClusterCaCertificate string `json:"clusterCaCertificate,omitempty"` // Password: The password to use for HTTP basic authentication to the master // endpoint. Because the master endpoint is open to the Internet, you should @@ -3395,9 +3594,9 @@ type MasterAuth struct { NullFields []string `json:"-"` } -func (s *MasterAuth) MarshalJSON() ([]byte, error) { +func (s MasterAuth) MarshalJSON() ([]byte, error) { type NoMethod MasterAuth - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MasterAuthorizedNetworksConfig: Configuration options for the master @@ -3414,6 +3613,9 @@ type MasterAuthorizedNetworksConfig struct { // GcpPublicCidrsAccessEnabled: Whether master is accessbile via Google Compute // Engine Public IP addresses. GcpPublicCidrsAccessEnabled bool `json:"gcpPublicCidrsAccessEnabled,omitempty"` + // PrivateEndpointEnforcementEnabled: Whether master authorized networks is + // enforced on private endpoint or not. + PrivateEndpointEnforcementEnabled bool `json:"privateEndpointEnforcementEnabled,omitempty"` // ForceSendFields is a list of field names (e.g. "CidrBlocks") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -3427,9 +3629,9 @@ type MasterAuthorizedNetworksConfig struct { NullFields []string `json:"-"` } -func (s *MasterAuthorizedNetworksConfig) MarshalJSON() ([]byte, error) { +func (s MasterAuthorizedNetworksConfig) MarshalJSON() ([]byte, error) { type NoMethod MasterAuthorizedNetworksConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MaxPodsConstraint: Constraints applied to pods. @@ -3449,9 +3651,9 @@ type MaxPodsConstraint struct { NullFields []string `json:"-"` } -func (s *MaxPodsConstraint) MarshalJSON() ([]byte, error) { +func (s MaxPodsConstraint) MarshalJSON() ([]byte, error) { type NoMethod MaxPodsConstraint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MeshCertificates: Configuration for issuance of mTLS keys and certificates @@ -3476,9 +3678,9 @@ type MeshCertificates struct { NullFields []string `json:"-"` } -func (s *MeshCertificates) MarshalJSON() ([]byte, error) { +func (s MeshCertificates) MarshalJSON() ([]byte, error) { type NoMethod MeshCertificates - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Metric: Progress metric is (string, int|float|string) pair. @@ -3504,9 +3706,9 @@ type Metric struct { NullFields []string `json:"-"` } -func (s *Metric) MarshalJSON() ([]byte, error) { +func (s Metric) MarshalJSON() ([]byte, error) { type NoMethod Metric - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Metric) UnmarshalJSON(data []byte) error { @@ -3543,6 +3745,7 @@ type MonitoringComponentConfig struct { // "STATEFULSET" - Statefulset // "CADVISOR" - CADVISOR // "KUBELET" - KUBELET + // "DCGM" - NVIDIA Data Center GPU Manager (DCGM) EnableComponents []string `json:"enableComponents,omitempty"` // ForceSendFields is a list of field names (e.g. "EnableComponents") to // unconditionally include in API requests. By default, fields with empty or @@ -3557,9 +3760,9 @@ type MonitoringComponentConfig struct { NullFields []string `json:"-"` } -func (s *MonitoringComponentConfig) MarshalJSON() ([]byte, error) { +func (s MonitoringComponentConfig) MarshalJSON() ([]byte, error) { type NoMethod MonitoringComponentConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MonitoringConfig: MonitoringConfig is cluster monitoring configuration. @@ -3587,9 +3790,9 @@ type MonitoringConfig struct { NullFields []string `json:"-"` } -func (s *MonitoringConfig) MarshalJSON() ([]byte, error) { +func (s MonitoringConfig) MarshalJSON() ([]byte, error) { type NoMethod MonitoringConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkConfig: NetworkConfig reports the relative names of network & @@ -3606,6 +3809,12 @@ type NetworkConfig struct { // documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/datapla // ne-v2) for more. DatapathProvider string `json:"datapathProvider,omitempty"` + // DefaultEnablePrivateNodes: Controls whether by default nodes have private IP + // addresses only. It is invalid to specify both + // PrivateClusterConfig.enablePrivateNodes and this field at the same time. To + // update the default setting, use + // ClusterUpdate.desired_default_enable_private_nodes + DefaultEnablePrivateNodes bool `json:"defaultEnablePrivateNodes,omitempty"` // DefaultSnatStatus: Whether the cluster disables default in-node sNAT rules. // In-node sNAT rules will be disabled when default_snat_status is disabled. // When disabled is set to false, default IP masquerade rules will be applied @@ -3680,9 +3889,9 @@ type NetworkConfig struct { NullFields []string `json:"-"` } -func (s *NetworkConfig) MarshalJSON() ([]byte, error) { +func (s NetworkConfig) MarshalJSON() ([]byte, error) { type NoMethod NetworkConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkPerformanceConfig: Configuration of all network bandwidth tiers @@ -3707,9 +3916,9 @@ type NetworkPerformanceConfig struct { NullFields []string `json:"-"` } -func (s *NetworkPerformanceConfig) MarshalJSON() ([]byte, error) { +func (s NetworkPerformanceConfig) MarshalJSON() ([]byte, error) { type NoMethod NetworkPerformanceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkPolicy: Configuration options for the NetworkPolicy feature. @@ -3736,9 +3945,9 @@ type NetworkPolicy struct { NullFields []string `json:"-"` } -func (s *NetworkPolicy) MarshalJSON() ([]byte, error) { +func (s NetworkPolicy) MarshalJSON() ([]byte, error) { type NoMethod NetworkPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkPolicyConfig: Configuration for NetworkPolicy. This only tracks @@ -3760,9 +3969,9 @@ type NetworkPolicyConfig struct { NullFields []string `json:"-"` } -func (s *NetworkPolicyConfig) MarshalJSON() ([]byte, error) { +func (s NetworkPolicyConfig) MarshalJSON() ([]byte, error) { type NoMethod NetworkPolicyConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NetworkTags: Collection of Compute Engine network tags that can be applied @@ -3783,9 +3992,9 @@ type NetworkTags struct { NullFields []string `json:"-"` } -func (s *NetworkTags) MarshalJSON() ([]byte, error) { +func (s NetworkTags) MarshalJSON() ([]byte, error) { type NoMethod NetworkTags - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeAffinity: Specifies the NodeAffinity key, values, and affinity operator @@ -3816,9 +4025,9 @@ type NodeAffinity struct { NullFields []string `json:"-"` } -func (s *NodeAffinity) MarshalJSON() ([]byte, error) { +func (s NodeAffinity) MarshalJSON() ([]byte, error) { type NoMethod NodeAffinity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeConfig: Parameters that describe the nodes in a cluster. GKE Autopilot @@ -3851,6 +4060,20 @@ type NodeConfig struct { // 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is // 'pd-standard' DiskType string `json:"diskType,omitempty"` + // EffectiveCgroupMode: Output only. effective_cgroup_mode is the cgroup mode + // actually used by the node pool. It is determined by the cgroup mode + // specified in the LinuxNodeConfig or the default cgroup mode based on the + // cluster creation version. + // + // Possible values: + // "EFFECTIVE_CGROUP_MODE_UNSPECIFIED" - EFFECTIVE_CGROUP_MODE_UNSPECIFIED + // means the cgroup configuration for the node pool is unspecified, i.e. the + // node pool is a Windows node pool. + // "EFFECTIVE_CGROUP_MODE_V1" - CGROUP_MODE_V1 means the node pool is + // configured to use cgroupv1 for the cgroup configuration. + // "EFFECTIVE_CGROUP_MODE_V2" - CGROUP_MODE_V2 means the node pool is + // configured to use cgroupv2 for the cgroup configuration. + EffectiveCgroupMode string `json:"effectiveCgroupMode,omitempty"` // EnableConfidentialStorage: Optional. Reserved for future use. EnableConfidentialStorage bool `json:"enableConfidentialStorage,omitempty"` // EphemeralStorageLocalSsdConfig: Parameters for the node ephemeral storage @@ -3965,6 +4188,8 @@ type NodeConfig struct { // Spot: Spot flag for enabling Spot VM, which is a rebrand of the existing // preemptible flag. Spot bool `json:"spot,omitempty"` + // StoragePools: List of Storage Pools where boot disks are provisioned. + StoragePools []string `json:"storagePools,omitempty"` // Tags: The list of instance tags applied to all nodes. Tags are used to // identify valid sources or targets for network firewalls and are specified by // the client during cluster or node pool creation. Each tag within the list @@ -3991,9 +4216,9 @@ type NodeConfig struct { NullFields []string `json:"-"` } -func (s *NodeConfig) MarshalJSON() ([]byte, error) { +func (s NodeConfig) MarshalJSON() ([]byte, error) { type NoMethod NodeConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeConfigDefaults: Subset of NodeConfig message that has defaults. @@ -4022,9 +4247,9 @@ type NodeConfigDefaults struct { NullFields []string `json:"-"` } -func (s *NodeConfigDefaults) MarshalJSON() ([]byte, error) { +func (s NodeConfigDefaults) MarshalJSON() ([]byte, error) { type NoMethod NodeConfigDefaults - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeKubeletConfig: Node kubelet configs. @@ -4070,9 +4295,9 @@ type NodeKubeletConfig struct { NullFields []string `json:"-"` } -func (s *NodeKubeletConfig) MarshalJSON() ([]byte, error) { +func (s NodeKubeletConfig) MarshalJSON() ([]byte, error) { type NoMethod NodeKubeletConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeLabels: Collection of node-level Kubernetes labels @@ -4093,9 +4318,9 @@ type NodeLabels struct { NullFields []string `json:"-"` } -func (s *NodeLabels) MarshalJSON() ([]byte, error) { +func (s NodeLabels) MarshalJSON() ([]byte, error) { type NoMethod NodeLabels - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeManagement: NodeManagement defines the set of node management services @@ -4125,9 +4350,9 @@ type NodeManagement struct { NullFields []string `json:"-"` } -func (s *NodeManagement) MarshalJSON() ([]byte, error) { +func (s NodeManagement) MarshalJSON() ([]byte, error) { type NoMethod NodeManagement - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeNetworkConfig: Parameters for node pool-level network config. @@ -4150,7 +4375,7 @@ type NodeNetworkConfig struct { CreatePodRange bool `json:"createPodRange,omitempty"` // EnablePrivateNodes: Whether nodes have internal IP addresses only. If // enable_private_nodes is not specified, then the value is derived from - // cluster.privateClusterConfig.enablePrivateNodes + // Cluster.NetworkConfig.default_enable_private_nodes EnablePrivateNodes bool `json:"enablePrivateNodes,omitempty"` // NetworkPerformanceConfig: Network bandwidth tier configuration. NetworkPerformanceConfig *NetworkPerformanceConfig `json:"networkPerformanceConfig,omitempty"` @@ -4172,9 +4397,9 @@ type NodeNetworkConfig struct { // `ip_allocation_policy.use_ip_aliases` is true. This field cannot be changed // after the node pool has been created. PodIpv4CidrBlock string `json:"podIpv4CidrBlock,omitempty"` - // PodIpv4RangeUtilization: Output only. [Output only] The utilization of the - // IPv4 range for the pod. The ratio is Usage/[Total number of IPs in the - // secondary range], Usage=numNodes*numZones*podIPsPerNode. + // PodIpv4RangeUtilization: Output only. The utilization of the IPv4 range for + // the pod. The ratio is Usage/[Total number of IPs in the secondary range], + // Usage=numNodes*numZones*podIPsPerNode. PodIpv4RangeUtilization float64 `json:"podIpv4RangeUtilization,omitempty"` // PodRange: The ID of the secondary range for pod IPs. If `create_pod_range` // is true, this ID is used for the new range. If `create_pod_range` is false, @@ -4195,9 +4420,9 @@ type NodeNetworkConfig struct { NullFields []string `json:"-"` } -func (s *NodeNetworkConfig) MarshalJSON() ([]byte, error) { +func (s NodeNetworkConfig) MarshalJSON() ([]byte, error) { type NoMethod NodeNetworkConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *NodeNetworkConfig) UnmarshalJSON(data []byte) error { @@ -4239,7 +4464,7 @@ type NodePool struct { // is sufficient for this number of instances. You must also have available // firewall and routes quota. InitialNodeCount int64 `json:"initialNodeCount,omitempty"` - // InstanceGroupUrls: [Output only] The resource URLs of the managed instance + // InstanceGroupUrls: Output only. The resource URLs of the managed instance // groups // (https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances) // associated with this node pool. During the node pool blue-green upgrade @@ -4265,14 +4490,14 @@ type NodePool struct { NetworkConfig *NodeNetworkConfig `json:"networkConfig,omitempty"` // PlacementPolicy: Specifies the node placement policy. PlacementPolicy *PlacementPolicy `json:"placementPolicy,omitempty"` - // PodIpv4CidrSize: [Output only] The pod CIDR block size per node in this node + // PodIpv4CidrSize: Output only. The pod CIDR block size per node in this node // pool. PodIpv4CidrSize int64 `json:"podIpv4CidrSize,omitempty"` // QueuedProvisioning: Specifies the configuration of queued provisioning. QueuedProvisioning *QueuedProvisioning `json:"queuedProvisioning,omitempty"` - // SelfLink: [Output only] Server-defined URL for the resource. + // SelfLink: Output only. Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output only] The status of the nodes in this pool instance. + // Status: Output only. The status of the nodes in this pool instance. // // Possible values: // "STATUS_UNSPECIFIED" - Not set. @@ -4291,12 +4516,12 @@ type NodePool struct { // "ERROR" - The ERROR state indicates the node pool may be unusable. Details // can be found in the `statusMessage` field. Status string `json:"status,omitempty"` - // StatusMessage: [Output only] Deprecated. Use conditions instead. Additional + // StatusMessage: Output only. Deprecated. Use conditions instead. Additional // information about the current status of this node pool instance, if // available. StatusMessage string `json:"statusMessage,omitempty"` - // UpdateInfo: Output only. [Output only] Update info contains relevant - // information during a node pool update. + // UpdateInfo: Output only. Update info contains relevant information during a + // node pool update. UpdateInfo *UpdateInfo `json:"updateInfo,omitempty"` // UpgradeSettings: Upgrade settings control disruption and speed of the // upgrade. @@ -4321,9 +4546,9 @@ type NodePool struct { NullFields []string `json:"-"` } -func (s *NodePool) MarshalJSON() ([]byte, error) { +func (s NodePool) MarshalJSON() ([]byte, error) { type NoMethod NodePool - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodePoolAutoConfig: Node pool configs that apply to all auto-provisioned @@ -4356,9 +4581,9 @@ type NodePoolAutoConfig struct { NullFields []string `json:"-"` } -func (s *NodePoolAutoConfig) MarshalJSON() ([]byte, error) { +func (s NodePoolAutoConfig) MarshalJSON() ([]byte, error) { type NoMethod NodePoolAutoConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodePoolAutoscaling: NodePoolAutoscaling contains information required by @@ -4405,9 +4630,9 @@ type NodePoolAutoscaling struct { NullFields []string `json:"-"` } -func (s *NodePoolAutoscaling) MarshalJSON() ([]byte, error) { +func (s NodePoolAutoscaling) MarshalJSON() ([]byte, error) { type NoMethod NodePoolAutoscaling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodePoolDefaults: Subset of Nodepool message that has defaults. @@ -4427,9 +4652,9 @@ type NodePoolDefaults struct { NullFields []string `json:"-"` } -func (s *NodePoolDefaults) MarshalJSON() ([]byte, error) { +func (s NodePoolDefaults) MarshalJSON() ([]byte, error) { type NoMethod NodePoolDefaults - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodePoolLoggingConfig: NodePoolLoggingConfig specifies logging configuration @@ -4450,9 +4675,9 @@ type NodePoolLoggingConfig struct { NullFields []string `json:"-"` } -func (s *NodePoolLoggingConfig) MarshalJSON() ([]byte, error) { +func (s NodePoolLoggingConfig) MarshalJSON() ([]byte, error) { type NoMethod NodePoolLoggingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeTaint: Kubernetes taint is composed of three fields: key, value, and @@ -4486,9 +4711,9 @@ type NodeTaint struct { NullFields []string `json:"-"` } -func (s *NodeTaint) MarshalJSON() ([]byte, error) { +func (s NodeTaint) MarshalJSON() ([]byte, error) { type NoMethod NodeTaint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeTaints: Collection of Kubernetes node taints @@ -4509,9 +4734,9 @@ type NodeTaints struct { NullFields []string `json:"-"` } -func (s *NodeTaints) MarshalJSON() ([]byte, error) { +func (s NodeTaints) MarshalJSON() ([]byte, error) { type NoMethod NodeTaints - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NotificationConfig: NotificationConfig is the configuration of @@ -4532,9 +4757,9 @@ type NotificationConfig struct { NullFields []string `json:"-"` } -func (s *NotificationConfig) MarshalJSON() ([]byte, error) { +func (s NotificationConfig) MarshalJSON() ([]byte, error) { type NoMethod NotificationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This operation resource represents operations that may have @@ -4543,25 +4768,25 @@ type Operation struct { // ClusterConditions: Which conditions caused the current cluster state. // Deprecated. Use field error instead. ClusterConditions []*StatusCondition `json:"clusterConditions,omitempty"` - // Detail: Detailed operation progress, if available. + // Detail: Output only. Detailed operation progress, if available. Detail string `json:"detail,omitempty"` - // EndTime: [Output only] The time the operation completed, in RFC3339 + // EndTime: Output only. The time the operation completed, in RFC3339 // (https://www.ietf.org/rfc/rfc3339.txt) text format. EndTime string `json:"endTime,omitempty"` // Error: The error result of the operation in case of failure. Error *Status `json:"error,omitempty"` - // Location: [Output only] The name of the Google Compute Engine zone + // Location: Output only. The name of the Google Compute Engine zone // (https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) // or region // (https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) // in which the cluster resides. Location string `json:"location,omitempty"` - // Name: The server-assigned ID for the operation. + // Name: Output only. The server-assigned ID for the operation. Name string `json:"name,omitempty"` // NodepoolConditions: Which conditions caused the current node pool state. // Deprecated. Use field error instead. NodepoolConditions []*StatusCondition `json:"nodepoolConditions,omitempty"` - // OperationType: The operation type. + // OperationType: Output only. The operation type. // // Possible values: // "TYPE_UNSPECIFIED" - Not set. @@ -4642,16 +4867,16 @@ type Operation struct { // upgraded. The cluster should be assumed to be blocked for other upgrades // until the operation finishes. OperationType string `json:"operationType,omitempty"` - // Progress: Output only. [Output only] Progress information for an operation. + // Progress: Output only. Progress information for an operation. Progress *OperationProgress `json:"progress,omitempty"` - // SelfLink: Server-defined URI for the operation. Example: + // SelfLink: Output only. Server-defined URI for the operation. Example: // `https://container.googleapis.com/v1alpha1/projects/123/locations/us-central1 // /operations/operation-123`. SelfLink string `json:"selfLink,omitempty"` - // StartTime: [Output only] The time the operation started, in RFC3339 + // StartTime: Output only. The time the operation started, in RFC3339 // (https://www.ietf.org/rfc/rfc3339.txt) text format. StartTime string `json:"startTime,omitempty"` - // Status: The current status of the operation. + // Status: Output only. The current status of the operation. // // Possible values: // "STATUS_UNSPECIFIED" - Not set. @@ -4663,10 +4888,10 @@ type Operation struct { // StatusMessage: Output only. If an error has occurred, a textual description // of the error. Deprecated. Use the field error instead. StatusMessage string `json:"statusMessage,omitempty"` - // TargetLink: Server-defined URI for the target of the operation. The format - // of this is a URI to the resource being modified (such as a cluster, node - // pool, or node). For node pool repairs, there may be multiple nodes being - // repaired, but only one will be the target. Examples: - ## + // TargetLink: Output only. Server-defined URI for the target of the operation. + // The format of this is a URI to the resource being modified (such as a + // cluster, node pool, or node). For node pool repairs, there may be multiple + // nodes being repaired, but only one will be the target. Examples: - ## // `https://container.googleapis.com/v1/projects/123/locations/us-central1/clust // ers/my-cluster` ## // `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/cluster @@ -4674,7 +4899,7 @@ type Operation struct { // `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/cluster // s/my-cluster/nodePools/my-np/node/my-node` TargetLink string `json:"targetLink,omitempty"` - // Zone: The name of the Google Compute Engine zone + // Zone: Output only. The name of the Google Compute Engine zone // (https://cloud.google.com/compute/docs/zones#available) in which the // operation is taking place. This field is deprecated, use location instead. Zone string `json:"zone,omitempty"` @@ -4694,9 +4919,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationError: OperationError records errors seen from CloudKMS keys @@ -4721,9 +4946,9 @@ type OperationError struct { NullFields []string `json:"-"` } -func (s *OperationError) MarshalJSON() ([]byte, error) { +func (s OperationError) MarshalJSON() ([]byte, error) { type NoMethod OperationError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationProgress: Information about operation (or operation stage) @@ -4761,9 +4986,9 @@ type OperationProgress struct { NullFields []string `json:"-"` } -func (s *OperationProgress) MarshalJSON() ([]byte, error) { +func (s OperationProgress) MarshalJSON() ([]byte, error) { type NoMethod OperationProgress - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ParentProductConfig: ParentProductConfig is the configuration of the parent @@ -4787,9 +5012,9 @@ type ParentProductConfig struct { NullFields []string `json:"-"` } -func (s *ParentProductConfig) MarshalJSON() ([]byte, error) { +func (s ParentProductConfig) MarshalJSON() ([]byte, error) { type NoMethod ParentProductConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PlacementPolicy: PlacementPolicy defines the placement policy used by the @@ -4823,9 +5048,9 @@ type PlacementPolicy struct { NullFields []string `json:"-"` } -func (s *PlacementPolicy) MarshalJSON() ([]byte, error) { +func (s PlacementPolicy) MarshalJSON() ([]byte, error) { type NoMethod PlacementPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PodCIDROverprovisionConfig: [PRIVATE FIELD] Config for pod CIDR size @@ -4847,21 +5072,27 @@ type PodCIDROverprovisionConfig struct { NullFields []string `json:"-"` } -func (s *PodCIDROverprovisionConfig) MarshalJSON() ([]byte, error) { +func (s PodCIDROverprovisionConfig) MarshalJSON() ([]byte, error) { type NoMethod PodCIDROverprovisionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PrivateClusterConfig: Configuration options for private clusters. type PrivateClusterConfig struct { // EnablePrivateEndpoint: Whether the master's internal IP address is used as - // the cluster endpoint. + // the cluster endpoint. Deprecated: Use + // ControlPlaneEndpointsConfig.IPEndpointsConfig.enable_public_endpoint + // instead. Note that the value of enable_public_endpoint is reversed: if + // enable_private_endpoint is false, then enable_public_endpoint will be true. EnablePrivateEndpoint bool `json:"enablePrivateEndpoint,omitempty"` // EnablePrivateNodes: Whether nodes have internal IP addresses only. If // enabled, all nodes are given only RFC 1918 private addresses and communicate - // with the master via private networking. + // with the master via private networking. Deprecated: Use + // NetworkConfig.default_enable_private_nodes instead. EnablePrivateNodes bool `json:"enablePrivateNodes,omitempty"` // MasterGlobalAccessConfig: Controls master global access settings. + // Deprecated: Use + // ControlPlaneEndpointsConfig.IPEndpointsConfig.enable_global_access instead. MasterGlobalAccessConfig *PrivateClusterMasterGlobalAccessConfig `json:"masterGlobalAccessConfig,omitempty"` // MasterIpv4CidrBlock: The IP range in CIDR notation to use for the hosted // master network. This range will be used for assigning internal IP addresses @@ -4872,14 +5103,18 @@ type PrivateClusterConfig struct { // cluster. PeeringName string `json:"peeringName,omitempty"` // PrivateEndpoint: Output only. The internal IP address of this cluster's - // master endpoint. + // master endpoint. Deprecated: Use + // ControlPlaneEndpointsConfig.IPEndpointsConfig.private_endpoint instead. PrivateEndpoint string `json:"privateEndpoint,omitempty"` // PrivateEndpointSubnetwork: Subnet to provision the master's private endpoint // during cluster creation. Specified in projects/*/regions/*/subnetworks/* - // format. + // format. Deprecated: Use + // ControlPlaneEndpointsConfig.IPEndpointsConfig.private_endpoint_subnetwork + // instead. PrivateEndpointSubnetwork string `json:"privateEndpointSubnetwork,omitempty"` // PublicEndpoint: Output only. The external IP address of this cluster's - // master endpoint. + // master endpoint. Deprecated:Use + // ControlPlaneEndpointsConfig.IPEndpointsConfig.public_endpoint instead. PublicEndpoint string `json:"publicEndpoint,omitempty"` // ForceSendFields is a list of field names (e.g. "EnablePrivateEndpoint") to // unconditionally include in API requests. By default, fields with empty or @@ -4894,9 +5129,9 @@ type PrivateClusterConfig struct { NullFields []string `json:"-"` } -func (s *PrivateClusterConfig) MarshalJSON() ([]byte, error) { +func (s PrivateClusterConfig) MarshalJSON() ([]byte, error) { type NoMethod PrivateClusterConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PrivateClusterMasterGlobalAccessConfig: Configuration for controlling master @@ -4917,9 +5152,9 @@ type PrivateClusterMasterGlobalAccessConfig struct { NullFields []string `json:"-"` } -func (s *PrivateClusterMasterGlobalAccessConfig) MarshalJSON() ([]byte, error) { +func (s PrivateClusterMasterGlobalAccessConfig) MarshalJSON() ([]byte, error) { type NoMethod PrivateClusterMasterGlobalAccessConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PrivateRegistryAccessConfig: PrivateRegistryAccessConfig contains access @@ -4944,9 +5179,9 @@ type PrivateRegistryAccessConfig struct { NullFields []string `json:"-"` } -func (s *PrivateRegistryAccessConfig) MarshalJSON() ([]byte, error) { +func (s PrivateRegistryAccessConfig) MarshalJSON() ([]byte, error) { type NoMethod PrivateRegistryAccessConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PubSub: Pub/Sub specific notification config. @@ -4973,9 +5208,9 @@ type PubSub struct { NullFields []string `json:"-"` } -func (s *PubSub) MarshalJSON() ([]byte, error) { +func (s PubSub) MarshalJSON() ([]byte, error) { type NoMethod PubSub - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueuedProvisioning: QueuedProvisioning defines the queued provisioning used @@ -4998,17 +5233,47 @@ type QueuedProvisioning struct { NullFields []string `json:"-"` } -func (s *QueuedProvisioning) MarshalJSON() ([]byte, error) { +func (s QueuedProvisioning) MarshalJSON() ([]byte, error) { type NoMethod QueuedProvisioning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RBACBindingConfig: RBACBindingConfig allows user to restrict +// ClusterRoleBindings an RoleBindings that can be created. +type RBACBindingConfig struct { + // EnableInsecureBindingSystemAuthenticated: Setting this to true will allow + // any ClusterRoleBinding and RoleBinding with subjects system:authenticated. + EnableInsecureBindingSystemAuthenticated bool `json:"enableInsecureBindingSystemAuthenticated,omitempty"` + // EnableInsecureBindingSystemUnauthenticated: Setting this to true will allow + // any ClusterRoleBinding and RoleBinding with subjets system:anonymous or + // system:unauthenticated. + EnableInsecureBindingSystemUnauthenticated bool `json:"enableInsecureBindingSystemUnauthenticated,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "EnableInsecureBindingSystemAuthenticated") to unconditionally include in + // API requests. By default, fields with empty or default values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. + // "EnableInsecureBindingSystemAuthenticated") to include in API requests with + // the JSON null value. By default, fields with empty values are omitted from + // API requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields + // for more details. + NullFields []string `json:"-"` +} + +func (s RBACBindingConfig) MarshalJSON() ([]byte, error) { + type NoMethod RBACBindingConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RangeInfo: RangeInfo contains the range name and the range utilization by // this cluster. type RangeInfo struct { - // RangeName: Output only. [Output only] Name of a range. + // RangeName: Output only. Name of a range. RangeName string `json:"rangeName,omitempty"` - // Utilization: Output only. [Output only] The utilization of the range. + // Utilization: Output only. The utilization of the range. Utilization float64 `json:"utilization,omitempty"` // ForceSendFields is a list of field names (e.g. "RangeName") to // unconditionally include in API requests. By default, fields with empty or @@ -5023,9 +5288,9 @@ type RangeInfo struct { NullFields []string `json:"-"` } -func (s *RangeInfo) MarshalJSON() ([]byte, error) { +func (s RangeInfo) MarshalJSON() ([]byte, error) { type NoMethod RangeInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *RangeInfo) UnmarshalJSON(data []byte) error { @@ -5042,6 +5307,79 @@ func (s *RangeInfo) UnmarshalJSON(data []byte) error { return nil } +// RayClusterLoggingConfig: RayClusterLoggingConfig specifies configuration of +// Ray logging. +type RayClusterLoggingConfig struct { + // Enabled: Enable log collection for Ray clusters. + Enabled bool `json:"enabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RayClusterLoggingConfig) MarshalJSON() ([]byte, error) { + type NoMethod RayClusterLoggingConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RayClusterMonitoringConfig: RayClusterMonitoringConfig specifies monitoring +// configuration for Ray clusters. +type RayClusterMonitoringConfig struct { + // Enabled: Enable metrics collection for Ray clusters. + Enabled bool `json:"enabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RayClusterMonitoringConfig) MarshalJSON() ([]byte, error) { + type NoMethod RayClusterMonitoringConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RayOperatorConfig: Configuration options for the Ray Operator add-on. +type RayOperatorConfig struct { + // Enabled: Whether the Ray Operator addon is enabled for this cluster. + Enabled bool `json:"enabled,omitempty"` + // RayClusterLoggingConfig: Optional. Logging configuration for Ray clusters. + RayClusterLoggingConfig *RayClusterLoggingConfig `json:"rayClusterLoggingConfig,omitempty"` + // RayClusterMonitoringConfig: Optional. Monitoring configuration for Ray + // clusters. + RayClusterMonitoringConfig *RayClusterMonitoringConfig `json:"rayClusterMonitoringConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RayOperatorConfig) MarshalJSON() ([]byte, error) { + type NoMethod RayOperatorConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // RecurringTimeWindow: Represents an arbitrary window of time that recurs. type RecurringTimeWindow struct { // Recurrence: An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) @@ -5076,9 +5414,9 @@ type RecurringTimeWindow struct { NullFields []string `json:"-"` } -func (s *RecurringTimeWindow) MarshalJSON() ([]byte, error) { +func (s RecurringTimeWindow) MarshalJSON() ([]byte, error) { type NoMethod RecurringTimeWindow - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReleaseChannel: ReleaseChannel indicates which release channel a cluster is @@ -5101,6 +5439,9 @@ type ReleaseChannel struct { // take advantage of new features. // "STABLE" - Clusters subscribed to STABLE receive versions that are known // to be stable and reliable in production. + // "EXTENDED" - Clusters subscribed to EXTENDED receive extended support and + // availability for versions which are known to be stable and reliable in + // production. Channel string `json:"channel,omitempty"` // ForceSendFields is a list of field names (e.g. "Channel") to unconditionally // include in API requests. By default, fields with empty or default values are @@ -5115,9 +5456,9 @@ type ReleaseChannel struct { NullFields []string `json:"-"` } -func (s *ReleaseChannel) MarshalJSON() ([]byte, error) { +func (s ReleaseChannel) MarshalJSON() ([]byte, error) { type NoMethod ReleaseChannel - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReleaseChannelConfig: ReleaseChannelConfig exposes configuration for a @@ -5136,6 +5477,9 @@ type ReleaseChannelConfig struct { // take advantage of new features. // "STABLE" - Clusters subscribed to STABLE receive versions that are known // to be stable and reliable in production. + // "EXTENDED" - Clusters subscribed to EXTENDED receive extended support and + // availability for versions which are known to be stable and reliable in + // production. Channel string `json:"channel,omitempty"` // DefaultVersion: The default version for newly created clusters on the // channel. @@ -5155,9 +5499,9 @@ type ReleaseChannelConfig struct { NullFields []string `json:"-"` } -func (s *ReleaseChannelConfig) MarshalJSON() ([]byte, error) { +func (s ReleaseChannelConfig) MarshalJSON() ([]byte, error) { type NoMethod ReleaseChannelConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReservationAffinity: ReservationAffinity @@ -5194,9 +5538,9 @@ type ReservationAffinity struct { NullFields []string `json:"-"` } -func (s *ReservationAffinity) MarshalJSON() ([]byte, error) { +func (s ReservationAffinity) MarshalJSON() ([]byte, error) { type NoMethod ReservationAffinity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourceLabels: Collection of GCP labels @@ -5217,9 +5561,9 @@ type ResourceLabels struct { NullFields []string `json:"-"` } -func (s *ResourceLabels) MarshalJSON() ([]byte, error) { +func (s ResourceLabels) MarshalJSON() ([]byte, error) { type NoMethod ResourceLabels - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourceLimit: Contains information about amount of some resource in the @@ -5244,9 +5588,9 @@ type ResourceLimit struct { NullFields []string `json:"-"` } -func (s *ResourceLimit) MarshalJSON() ([]byte, error) { +func (s ResourceLimit) MarshalJSON() ([]byte, error) { type NoMethod ResourceLimit - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourceManagerTags: A map of resource manager tag keys and values to be @@ -5274,9 +5618,9 @@ type ResourceManagerTags struct { NullFields []string `json:"-"` } -func (s *ResourceManagerTags) MarshalJSON() ([]byte, error) { +func (s ResourceManagerTags) MarshalJSON() ([]byte, error) { type NoMethod ResourceManagerTags - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourceUsageExportConfig: Configuration for exporting cluster resource @@ -5305,9 +5649,9 @@ type ResourceUsageExportConfig struct { NullFields []string `json:"-"` } -func (s *ResourceUsageExportConfig) MarshalJSON() ([]byte, error) { +func (s ResourceUsageExportConfig) MarshalJSON() ([]byte, error) { type NoMethod ResourceUsageExportConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RollbackNodePoolUpgradeRequest: RollbackNodePoolUpgradeRequest rollbacks the @@ -5349,9 +5693,9 @@ type RollbackNodePoolUpgradeRequest struct { NullFields []string `json:"-"` } -func (s *RollbackNodePoolUpgradeRequest) MarshalJSON() ([]byte, error) { +func (s RollbackNodePoolUpgradeRequest) MarshalJSON() ([]byte, error) { type NoMethod RollbackNodePoolUpgradeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SandboxConfig: SandboxConfig contains configurations of the sandbox to use @@ -5376,9 +5720,9 @@ type SandboxConfig struct { NullFields []string `json:"-"` } -func (s *SandboxConfig) MarshalJSON() ([]byte, error) { +func (s SandboxConfig) MarshalJSON() ([]byte, error) { type NoMethod SandboxConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecondaryBootDisk: SecondaryBootDisk represents a persistent disk attached @@ -5406,9 +5750,9 @@ type SecondaryBootDisk struct { NullFields []string `json:"-"` } -func (s *SecondaryBootDisk) MarshalJSON() ([]byte, error) { +func (s SecondaryBootDisk) MarshalJSON() ([]byte, error) { type NoMethod SecondaryBootDisk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecondaryBootDiskUpdateStrategy: SecondaryBootDiskUpdateStrategy is a @@ -5417,6 +5761,29 @@ func (s *SecondaryBootDisk) MarshalJSON() ([]byte, error) { type SecondaryBootDiskUpdateStrategy struct { } +// SecretManagerConfig: SecretManagerConfig is config for secret manager +// enablement. +type SecretManagerConfig struct { + // Enabled: Enable/Disable Secret Manager Config. + Enabled bool `json:"enabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SecretManagerConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecretManagerConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // SecurityBulletinEvent: SecurityBulletinEvent is a notification sent to // customers when a security bulletin has been posted that they are vulnerable // to. @@ -5465,9 +5832,9 @@ type SecurityBulletinEvent struct { NullFields []string `json:"-"` } -func (s *SecurityBulletinEvent) MarshalJSON() ([]byte, error) { +func (s SecurityBulletinEvent) MarshalJSON() ([]byte, error) { type NoMethod SecurityBulletinEvent - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SecurityPostureConfig: SecurityPostureConfig defines the flags needed to @@ -5505,9 +5872,9 @@ type SecurityPostureConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPostureConfig) MarshalJSON() ([]byte, error) { +func (s SecurityPostureConfig) MarshalJSON() ([]byte, error) { type NoMethod SecurityPostureConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServerConfig: Kubernetes Engine service configuration. @@ -5541,9 +5908,9 @@ type ServerConfig struct { NullFields []string `json:"-"` } -func (s *ServerConfig) MarshalJSON() ([]byte, error) { +func (s ServerConfig) MarshalJSON() ([]byte, error) { type NoMethod ServerConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceExternalIPsConfig: Config to block services with externalIPs field. @@ -5563,9 +5930,9 @@ type ServiceExternalIPsConfig struct { NullFields []string `json:"-"` } -func (s *ServiceExternalIPsConfig) MarshalJSON() ([]byte, error) { +func (s ServiceExternalIPsConfig) MarshalJSON() ([]byte, error) { type NoMethod ServiceExternalIPsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetAddonsConfigRequest: SetAddonsConfigRequest sets the addons associated @@ -5602,9 +5969,9 @@ type SetAddonsConfigRequest struct { NullFields []string `json:"-"` } -func (s *SetAddonsConfigRequest) MarshalJSON() ([]byte, error) { +func (s SetAddonsConfigRequest) MarshalJSON() ([]byte, error) { type NoMethod SetAddonsConfigRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetLabelsRequest: SetLabelsRequest sets the Google Cloud Platform labels on @@ -5648,9 +6015,9 @@ type SetLabelsRequest struct { NullFields []string `json:"-"` } -func (s *SetLabelsRequest) MarshalJSON() ([]byte, error) { +func (s SetLabelsRequest) MarshalJSON() ([]byte, error) { type NoMethod SetLabelsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetLegacyAbacRequest: SetLegacyAbacRequest enables or disables the ABAC @@ -5687,9 +6054,9 @@ type SetLegacyAbacRequest struct { NullFields []string `json:"-"` } -func (s *SetLegacyAbacRequest) MarshalJSON() ([]byte, error) { +func (s SetLegacyAbacRequest) MarshalJSON() ([]byte, error) { type NoMethod SetLegacyAbacRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetLocationsRequest: SetLocationsRequest sets the locations of the cluster. @@ -5729,9 +6096,9 @@ type SetLocationsRequest struct { NullFields []string `json:"-"` } -func (s *SetLocationsRequest) MarshalJSON() ([]byte, error) { +func (s SetLocationsRequest) MarshalJSON() ([]byte, error) { type NoMethod SetLocationsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetLoggingServiceRequest: SetLoggingServiceRequest sets the logging service @@ -5774,9 +6141,9 @@ type SetLoggingServiceRequest struct { NullFields []string `json:"-"` } -func (s *SetLoggingServiceRequest) MarshalJSON() ([]byte, error) { +func (s SetLoggingServiceRequest) MarshalJSON() ([]byte, error) { type NoMethod SetLoggingServiceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetMaintenancePolicyRequest: SetMaintenancePolicyRequest sets the @@ -5812,9 +6179,9 @@ type SetMaintenancePolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetMaintenancePolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetMaintenancePolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetMaintenancePolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetMasterAuthRequest: SetMasterAuthRequest updates the admin password of a @@ -5861,9 +6228,9 @@ type SetMasterAuthRequest struct { NullFields []string `json:"-"` } -func (s *SetMasterAuthRequest) MarshalJSON() ([]byte, error) { +func (s SetMasterAuthRequest) MarshalJSON() ([]byte, error) { type NoMethod SetMasterAuthRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetMonitoringServiceRequest: SetMonitoringServiceRequest sets the monitoring @@ -5906,9 +6273,9 @@ type SetMonitoringServiceRequest struct { NullFields []string `json:"-"` } -func (s *SetMonitoringServiceRequest) MarshalJSON() ([]byte, error) { +func (s SetMonitoringServiceRequest) MarshalJSON() ([]byte, error) { type NoMethod SetMonitoringServiceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetNetworkPolicyRequest: SetNetworkPolicyRequest enables/disables network @@ -5946,9 +6313,9 @@ type SetNetworkPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetNetworkPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetNetworkPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetNetworkPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetNodePoolAutoscalingRequest: SetNodePoolAutoscalingRequest sets the @@ -5988,9 +6355,9 @@ type SetNodePoolAutoscalingRequest struct { NullFields []string `json:"-"` } -func (s *SetNodePoolAutoscalingRequest) MarshalJSON() ([]byte, error) { +func (s SetNodePoolAutoscalingRequest) MarshalJSON() ([]byte, error) { type NoMethod SetNodePoolAutoscalingRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetNodePoolManagementRequest: SetNodePoolManagementRequest sets the node @@ -6030,9 +6397,9 @@ type SetNodePoolManagementRequest struct { NullFields []string `json:"-"` } -func (s *SetNodePoolManagementRequest) MarshalJSON() ([]byte, error) { +func (s SetNodePoolManagementRequest) MarshalJSON() ([]byte, error) { type NoMethod SetNodePoolManagementRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetNodePoolSizeRequest: SetNodePoolSizeRequest sets the size of a node pool. @@ -6071,9 +6438,9 @@ type SetNodePoolSizeRequest struct { NullFields []string `json:"-"` } -func (s *SetNodePoolSizeRequest) MarshalJSON() ([]byte, error) { +func (s SetNodePoolSizeRequest) MarshalJSON() ([]byte, error) { type NoMethod SetNodePoolSizeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ShieldedInstanceConfig: A set of Shielded Instance options. @@ -6102,9 +6469,9 @@ type ShieldedInstanceConfig struct { NullFields []string `json:"-"` } -func (s *ShieldedInstanceConfig) MarshalJSON() ([]byte, error) { +func (s ShieldedInstanceConfig) MarshalJSON() ([]byte, error) { type NoMethod ShieldedInstanceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ShieldedNodes: Configuration of Shielded Nodes feature. @@ -6125,9 +6492,9 @@ type ShieldedNodes struct { NullFields []string `json:"-"` } -func (s *ShieldedNodes) MarshalJSON() ([]byte, error) { +func (s ShieldedNodes) MarshalJSON() ([]byte, error) { type NoMethod ShieldedNodes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SoleTenantConfig: SoleTenantConfig contains the NodeAffinities to specify @@ -6149,9 +6516,9 @@ type SoleTenantConfig struct { NullFields []string `json:"-"` } -func (s *SoleTenantConfig) MarshalJSON() ([]byte, error) { +func (s SoleTenantConfig) MarshalJSON() ([]byte, error) { type NoMethod SoleTenantConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StandardRolloutPolicy: Standard rollout policy is the default policy for @@ -6177,9 +6544,9 @@ type StandardRolloutPolicy struct { NullFields []string `json:"-"` } -func (s *StandardRolloutPolicy) MarshalJSON() ([]byte, error) { +func (s StandardRolloutPolicy) MarshalJSON() ([]byte, error) { type NoMethod StandardRolloutPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *StandardRolloutPolicy) UnmarshalJSON(data []byte) error { @@ -6230,9 +6597,9 @@ type StartIPRotationRequest struct { NullFields []string `json:"-"` } -func (s *StartIPRotationRequest) MarshalJSON() ([]byte, error) { +func (s StartIPRotationRequest) MarshalJSON() ([]byte, error) { type NoMethod StartIPRotationRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StatefulHAConfig: Configuration for the Stateful HA add-on. @@ -6252,9 +6619,9 @@ type StatefulHAConfig struct { NullFields []string `json:"-"` } -func (s *StatefulHAConfig) MarshalJSON() ([]byte, error) { +func (s StatefulHAConfig) MarshalJSON() ([]byte, error) { type NoMethod StatefulHAConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -6286,9 +6653,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StatusCondition: StatusCondition describes why a cluster or a node pool has @@ -6407,9 +6774,9 @@ type StatusCondition struct { NullFields []string `json:"-"` } -func (s *StatusCondition) MarshalJSON() ([]byte, error) { +func (s StatusCondition) MarshalJSON() ([]byte, error) { type NoMethod StatusCondition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TimeWindow: Represents an arbitrary window of time. @@ -6435,9 +6802,9 @@ type TimeWindow struct { NullFields []string `json:"-"` } -func (s *TimeWindow) MarshalJSON() ([]byte, error) { +func (s TimeWindow) MarshalJSON() ([]byte, error) { type NoMethod TimeWindow - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateClusterRequest: UpdateClusterRequest updates the settings of a @@ -6473,9 +6840,9 @@ type UpdateClusterRequest struct { NullFields []string `json:"-"` } -func (s *UpdateClusterRequest) MarshalJSON() ([]byte, error) { +func (s UpdateClusterRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateClusterRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateInfo: UpdateInfo contains resource (instance groups, etc), status and @@ -6496,9 +6863,9 @@ type UpdateInfo struct { NullFields []string `json:"-"` } -func (s *UpdateInfo) MarshalJSON() ([]byte, error) { +func (s UpdateInfo) MarshalJSON() ([]byte, error) { type NoMethod UpdateInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateMasterRequest: UpdateMasterRequest updates the master of the cluster. @@ -6539,9 +6906,9 @@ type UpdateMasterRequest struct { NullFields []string `json:"-"` } -func (s *UpdateMasterRequest) MarshalJSON() ([]byte, error) { +func (s UpdateMasterRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateMasterRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateNodePoolRequest: UpdateNodePoolRequests update a node pool's image @@ -6635,6 +7002,9 @@ type UpdateNodePoolRequest struct { // attached to the nodes for managing Compute Engine firewalls using Network // Firewall Policies. Existing tags will be replaced with new values. ResourceManagerTags *ResourceManagerTags `json:"resourceManagerTags,omitempty"` + // StoragePools: List of Storage Pools where boot disks are provisioned. + // Existing Storage Pools will be replaced with storage-pools. + StoragePools []string `json:"storagePools,omitempty"` // Tags: The desired network tags to be applied to all nodes in the node pool. // If this field is not present, the tags will not be changed. Otherwise, the // existing network tags will be *replaced* with the provided tags. @@ -6668,9 +7038,9 @@ type UpdateNodePoolRequest struct { NullFields []string `json:"-"` } -func (s *UpdateNodePoolRequest) MarshalJSON() ([]byte, error) { +func (s UpdateNodePoolRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateNodePoolRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpgradeAvailableEvent: UpgradeAvailableEvent is a notification sent to @@ -6705,9 +7075,9 @@ type UpgradeAvailableEvent struct { NullFields []string `json:"-"` } -func (s *UpgradeAvailableEvent) MarshalJSON() ([]byte, error) { +func (s UpgradeAvailableEvent) MarshalJSON() ([]byte, error) { type NoMethod UpgradeAvailableEvent - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpgradeEvent: UpgradeEvent is a notification sent to customers by the @@ -6745,9 +7115,9 @@ type UpgradeEvent struct { NullFields []string `json:"-"` } -func (s *UpgradeEvent) MarshalJSON() ([]byte, error) { +func (s UpgradeEvent) MarshalJSON() ([]byte, error) { type NoMethod UpgradeEvent - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpgradeSettings: These upgrade settings control the level of parallelism and @@ -6814,9 +7184,9 @@ type UpgradeSettings struct { NullFields []string `json:"-"` } -func (s *UpgradeSettings) MarshalJSON() ([]byte, error) { +func (s UpgradeSettings) MarshalJSON() ([]byte, error) { type NoMethod UpgradeSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UsableSubnetwork: UsableSubnetwork resource returns the subnetwork name, its @@ -6851,9 +7221,9 @@ type UsableSubnetwork struct { NullFields []string `json:"-"` } -func (s *UsableSubnetwork) MarshalJSON() ([]byte, error) { +func (s UsableSubnetwork) MarshalJSON() ([]byte, error) { type NoMethod UsableSubnetwork - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UsableSubnetworkSecondaryRange: Secondary IP range of a usable subnetwork. @@ -6893,9 +7263,59 @@ type UsableSubnetworkSecondaryRange struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { +func (s UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { type NoMethod UsableSubnetworkSecondaryRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// UserManagedKeysConfig: UserManagedKeysConfig holds the resource address to +// Keys which are used for signing certs and token that are used for +// communication within cluster. +type UserManagedKeysConfig struct { + // AggregationCa: The Certificate Authority Service caPool to use for the + // aggregation CA in this cluster. + AggregationCa string `json:"aggregationCa,omitempty"` + // ClusterCa: The Certificate Authority Service caPool to use for the cluster + // CA in this cluster. + ClusterCa string `json:"clusterCa,omitempty"` + // ControlPlaneDiskEncryptionKey: The Cloud KMS cryptoKey to use for + // Confidential Hyperdisk on the control plane nodes. + ControlPlaneDiskEncryptionKey string `json:"controlPlaneDiskEncryptionKey,omitempty"` + // EtcdApiCa: Resource path of the Certificate Authority Service caPool to use + // for the etcd API CA in this cluster. + EtcdApiCa string `json:"etcdApiCa,omitempty"` + // EtcdPeerCa: Resource path of the Certificate Authority Service caPool to use + // for the etcd peer CA in this cluster. + EtcdPeerCa string `json:"etcdPeerCa,omitempty"` + // GkeopsEtcdBackupEncryptionKey: Resource path of the Cloud KMS cryptoKey to + // use for encryption of internal etcd backups. + GkeopsEtcdBackupEncryptionKey string `json:"gkeopsEtcdBackupEncryptionKey,omitempty"` + // ServiceAccountSigningKeys: The Cloud KMS cryptoKeyVersions to use for + // signing service account JWTs issued by this cluster. Format: + // `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{crypt + // oKey}/cryptoKeyVersions/{cryptoKeyVersion}` + ServiceAccountSigningKeys []string `json:"serviceAccountSigningKeys,omitempty"` + // ServiceAccountVerificationKeys: The Cloud KMS cryptoKeyVersions to use for + // verifying service account JWTs issued by this cluster. Format: + // `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{crypt + // oKey}/cryptoKeyVersions/{cryptoKeyVersion}` + ServiceAccountVerificationKeys []string `json:"serviceAccountVerificationKeys,omitempty"` + // ForceSendFields is a list of field names (e.g. "AggregationCa") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AggregationCa") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s UserManagedKeysConfig) MarshalJSON() ([]byte, error) { + type NoMethod UserManagedKeysConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VerticalPodAutoscaling: VerticalPodAutoscaling contains global, per-cluster @@ -6917,9 +7337,9 @@ type VerticalPodAutoscaling struct { NullFields []string `json:"-"` } -func (s *VerticalPodAutoscaling) MarshalJSON() ([]byte, error) { +func (s VerticalPodAutoscaling) MarshalJSON() ([]byte, error) { type NoMethod VerticalPodAutoscaling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VirtualNIC: Configuration of gVNIC feature. @@ -6939,9 +7359,9 @@ type VirtualNIC struct { NullFields []string `json:"-"` } -func (s *VirtualNIC) MarshalJSON() ([]byte, error) { +func (s VirtualNIC) MarshalJSON() ([]byte, error) { type NoMethod VirtualNIC - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WindowsNodeConfig: Parameters that can be configured on Windows nodes. @@ -6971,9 +7391,9 @@ type WindowsNodeConfig struct { NullFields []string `json:"-"` } -func (s *WindowsNodeConfig) MarshalJSON() ([]byte, error) { +func (s WindowsNodeConfig) MarshalJSON() ([]byte, error) { type NoMethod WindowsNodeConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkloadIdentityConfig: Configuration for the use of Kubernetes Service @@ -6995,9 +7415,9 @@ type WorkloadIdentityConfig struct { NullFields []string `json:"-"` } -func (s *WorkloadIdentityConfig) MarshalJSON() ([]byte, error) { +func (s WorkloadIdentityConfig) MarshalJSON() ([]byte, error) { type NoMethod WorkloadIdentityConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkloadMetadataConfig: WorkloadMetadataConfig defines the metadata @@ -7028,9 +7448,9 @@ type WorkloadMetadataConfig struct { NullFields []string `json:"-"` } -func (s *WorkloadMetadataConfig) MarshalJSON() ([]byte, error) { +func (s WorkloadMetadataConfig) MarshalJSON() ([]byte, error) { type NoMethod WorkloadMetadataConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkloadPolicyConfig: WorkloadPolicyConfig is the configuration of workload @@ -7051,9 +7471,9 @@ type WorkloadPolicyConfig struct { NullFields []string `json:"-"` } -func (s *WorkloadPolicyConfig) MarshalJSON() ([]byte, error) { +func (s WorkloadPolicyConfig) MarshalJSON() ([]byte, error) { type NoMethod WorkloadPolicyConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsAggregatedUsableSubnetworksListCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json b/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json index 05e04494292..081758ab888 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json @@ -7,9 +7,6 @@ }, "https://www.googleapis.com/auth/compute": { "description": "View and manage your Google Compute Engine resources" - }, - "https://www.googleapis.com/auth/compute.readonly": { - "description": "View your Google Compute Engine resources" } } } @@ -42,10 +39,65 @@ "endpointUrl": "https://dataflow.me-central2.rep.googleapis.com/", "location": "me-central2" }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataflow.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataflow.us-central2.rep.googleapis.com/", + "location": "us-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataflow.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataflow.us-east4.rep.googleapis.com/", + "location": "us-east4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataflow.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, { "description": "Regional Endpoint", "endpointUrl": "https://dataflow.us-east7.rep.googleapis.com/", "location": "us-east7" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataflow.us-south1.rep.googleapis.com/", + "location": "us-south1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataflow.us-west1.rep.googleapis.com/", + "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataflow.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataflow.us-west3.rep.googleapis.com/", + "location": "us-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataflow.us-west4.rep.googleapis.com/", + "location": "us-west4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataflow.us-west8.rep.googleapis.com/", + "location": "us-west8" } ], "fullyEncodeReservedExpansion": true, @@ -172,8 +224,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "workerMessages": { @@ -201,8 +252,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } }, @@ -287,8 +337,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "create": { @@ -343,8 +392,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "get": { @@ -398,8 +446,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "getMetrics": { @@ -442,8 +489,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "list": { @@ -524,8 +570,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "snapshot": { @@ -560,8 +605,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "update": { @@ -607,8 +651,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } }, @@ -647,8 +690,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "sendCapture": { @@ -683,8 +725,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -769,8 +810,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -809,8 +849,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "reportStatus": { @@ -845,8 +884,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -887,8 +925,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } }, @@ -927,8 +964,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -989,8 +1025,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "get": { @@ -1046,8 +1081,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "getExecutionDetails": { @@ -1097,8 +1131,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "getMetrics": { @@ -1143,8 +1176,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "list": { @@ -1227,8 +1259,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "snapshot": { @@ -1270,8 +1301,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "update": { @@ -1319,8 +1349,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } }, @@ -1366,8 +1395,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "sendCapture": { @@ -1409,8 +1437,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -1497,8 +1524,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -1541,8 +1567,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -1615,8 +1640,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -1662,8 +1686,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "reportStatus": { @@ -1705,8 +1728,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -1751,8 +1773,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "get": { @@ -1791,8 +1812,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "list": { @@ -1829,8 +1849,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -1838,7 +1857,7 @@ "templates": { "methods": { "create": { - "description": "Creates a Cloud Dataflow job from a template. Do not enter confidential information when you supply string values using the API.", + "description": "Creates a Cloud Dataflow job from a template. Do not enter confidential information when you supply string values using the API. To create a job, we recommend using `projects.locations.templates.create` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.templates.create` is not recommended, because your job will always start in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/locations/{location}/templates", "httpMethod": "POST", "id": "dataflow.projects.locations.templates.create", @@ -1869,12 +1888,11 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "get": { - "description": "Get the template associated with a template.", + "description": "Get the template associated with a template. To get the template, we recommend using `projects.locations.templates.get` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.templates.get` is not recommended, because only templates that are running in `us-central1` are retrieved.", "flatPath": "v1b3/projects/{projectId}/locations/{location}/templates:get", "httpMethod": "GET", "id": "dataflow.projects.locations.templates.get", @@ -1918,12 +1936,11 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "launch": { - "description": "Launch a template.", + "description": "Launches a template. To launch a template, we recommend using `projects.locations.templates.launch` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.templates.launch` is not recommended, because jobs launched from the template will always start in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/locations/{location}/templates:launch", "httpMethod": "POST", "id": "dataflow.projects.locations.templates.launch", @@ -1974,8 +1991,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -2018,8 +2034,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "list": { @@ -2054,8 +2069,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -2063,7 +2077,7 @@ "templates": { "methods": { "create": { - "description": "Creates a Cloud Dataflow job from a template. Do not enter confidential information when you supply string values using the API.", + "description": "Creates a Cloud Dataflow job from a template. Do not enter confidential information when you supply string values using the API. To create a job, we recommend using `projects.locations.templates.create` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.templates.create` is not recommended, because your job will always start in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/templates", "httpMethod": "POST", "id": "dataflow.projects.templates.create", @@ -2087,12 +2101,11 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "get": { - "description": "Get the template associated with a template.", + "description": "Get the template associated with a template. To get the template, we recommend using `projects.locations.templates.get` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.templates.get` is not recommended, because only templates that are running in `us-central1` are retrieved.", "flatPath": "v1b3/projects/{projectId}/templates:get", "httpMethod": "GET", "id": "dataflow.projects.templates.get", @@ -2134,12 +2147,11 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "launch": { - "description": "Launch a template.", + "description": "Launches a template. To launch a template, we recommend using `projects.locations.templates.launch` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.templates.launch` is not recommended, because jobs launched from the template will always start in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/templates:launch", "httpMethod": "POST", "id": "dataflow.projects.templates.launch", @@ -2188,8 +2200,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } @@ -2197,7 +2208,7 @@ } } }, - "revision": "20240603", + "revision": "20240817", "rootUrl": "https://dataflow.googleapis.com/", "schemas": { "ApproximateProgress": { @@ -2967,7 +2978,7 @@ "description": "Configuration options for sampling elements from a running pipeline." }, "enableHotKeyLogging": { - "description": "When true, enables the logging of the literal hot key to the user's Cloud Logging.", + "description": "Optional. When true, enables the logging of the literal hot key to the user's Cloud Logging.", "type": "boolean" } }, @@ -3141,12 +3152,12 @@ "type": "string" }, "dataset": { - "description": "The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}", + "description": "Optional. The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}", "type": "string" }, "debugOptions": { "$ref": "DebugOptions", - "description": "Any debugging options to be supplied to the job." + "description": "Optional. Any debugging options to be supplied to the job." }, "experiments": { "description": "The list of experiments to enable. This field should be used for SDK related experiments and not for service related experiments. The proper field for service related experiments is service_options.", @@ -3156,7 +3167,7 @@ "type": "array" }, "flexResourceSchedulingGoal": { - "description": "Which Flexible Resource Scheduling mode to run in.", + "description": "Optional. Which Flexible Resource Scheduling mode to run in.", "enum": [ "FLEXRS_UNSPECIFIED", "FLEXRS_SPEED_OPTIMIZED", @@ -3186,11 +3197,11 @@ "type": "object" }, "serviceAccountEmail": { - "description": "Identity to run virtual machines as. Defaults to the default account.", + "description": "Optional. Identity to run virtual machines as. Defaults to the default account.", "type": "string" }, "serviceKmsKeyName": { - "description": "If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY", + "description": "Optional. If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY", "type": "string" }, "serviceOptions": { @@ -3262,11 +3273,11 @@ "type": "array" }, "workerRegion": { - "description": "The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. \"us-west1\". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.", + "description": "Optional. The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. \"us-west1\". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.", "type": "string" }, "workerZone": { - "description": "The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. \"us-west1-a\". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity.", + "description": "Optional. The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. \"us-west1-a\". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity.", "type": "string" } }, @@ -3935,7 +3946,7 @@ "type": "object" }, "location": { - "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", + "description": "Optional. The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", "type": "string" }, "name": { @@ -4005,6 +4016,11 @@ "description": "Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests.", "type": "boolean" }, + "serviceResources": { + "$ref": "ServiceResources", + "description": "Output only. Resources used by the Dataflow Service to run the job.", + "readOnly": true + }, "stageStates": { "description": "This field may be mutated by the Cloud Dataflow service; callers cannot mutate it.", "items": { @@ -4039,7 +4055,7 @@ "additionalProperties": { "type": "string" }, - "description": "The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.", + "description": "Optional. The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.", "type": "object" }, "type": { @@ -4402,6 +4418,11 @@ "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the WorkItem's job.", "type": "string" }, + "projectNumber": { + "description": "Optional. The project number of the project this worker belongs to.", + "format": "int64", + "type": "string" + }, "requestedLeaseDuration": { "description": "The initial lease period.", "format": "google-duration", @@ -5364,6 +5385,11 @@ "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the WorkItem's job.", "type": "string" }, + "projectNumber": { + "description": "Optional. The project number of the project which owns the WorkItem's job.", + "format": "int64", + "type": "string" + }, "unifiedWorkerRequest": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -5881,6 +5907,21 @@ }, "type": "object" }, + "ServiceResources": { + "description": "Resources used by the Dataflow Service to run the job.", + "id": "ServiceResources", + "properties": { + "zones": { + "description": "Output only. List of Cloud Zones being used by the Dataflow Service for this job. Example: us-central1-c", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, "ShellTask": { "description": "A task which consists of a shell command for the worker to execute.", "id": "ShellTask", @@ -6720,6 +6761,16 @@ "description": "Map from user step names to state families.", "type": "object" }, + "userWorkerRunnerV1Settings": { + "description": "Binary encoded proto to control runtime behavior of the java runner v1 user worker.", + "format": "byte", + "type": "string" + }, + "userWorkerRunnerV2Settings": { + "description": "Binary encoded proto to control runtime behavior of the runner v2 user worker.", + "format": "byte", + "type": "string" + }, "windmillServiceEndpoint": { "description": "If present, the worker must use this endpoint to communicate with Windmill Service dispatchers, otherwise the worker must continue to use whatever endpoint it had been using.", "type": "string" diff --git a/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go b/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go index fccc7fd3d28..4026cccb544 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go @@ -37,7 +37,7 @@ // By default, all available scopes (see "Constants") are used to authenticate. // To restrict scopes, use [google.golang.org/api/option.WithScopes]: // -// dataflowService, err := dataflow.NewService(ctx, option.WithScopes(dataflow.ComputeReadonlyScope)) +// dataflowService, err := dataflow.NewService(ctx, option.WithScopes(dataflow.ComputeScope)) // // To use an API key for authentication (note: some APIs do not support API // keys), use [google.golang.org/api/option.WithAPIKey]: @@ -106,9 +106,6 @@ const ( // View and manage your Google Compute Engine resources ComputeScope = "https://www.googleapis.com/auth/compute" - - // View your Google Compute Engine resources - ComputeReadonlyScope = "https://www.googleapis.com/auth/compute.readonly" ) // NewService creates a new Service. @@ -116,7 +113,6 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err scopesOption := internaloption.WithDefaultScopes( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly", ) // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) @@ -390,9 +386,9 @@ type ApproximateProgress struct { NullFields []string `json:"-"` } -func (s *ApproximateProgress) MarshalJSON() ([]byte, error) { +func (s ApproximateProgress) MarshalJSON() ([]byte, error) { type NoMethod ApproximateProgress - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ApproximateProgress) UnmarshalJSON(data []byte) error { @@ -458,9 +454,9 @@ type ApproximateReportedProgress struct { NullFields []string `json:"-"` } -func (s *ApproximateReportedProgress) MarshalJSON() ([]byte, error) { +func (s ApproximateReportedProgress) MarshalJSON() ([]byte, error) { type NoMethod ApproximateReportedProgress - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ApproximateReportedProgress) UnmarshalJSON(data []byte) error { @@ -501,9 +497,9 @@ type ApproximateSplitRequest struct { NullFields []string `json:"-"` } -func (s *ApproximateSplitRequest) MarshalJSON() ([]byte, error) { +func (s ApproximateSplitRequest) MarshalJSON() ([]byte, error) { type NoMethod ApproximateSplitRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ApproximateSplitRequest) UnmarshalJSON(data []byte) error { @@ -573,9 +569,9 @@ type AutoscalingEvent struct { NullFields []string `json:"-"` } -func (s *AutoscalingEvent) MarshalJSON() ([]byte, error) { +func (s AutoscalingEvent) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingEvent - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalingSettings: Settings for WorkerPool autoscaling. @@ -604,9 +600,9 @@ type AutoscalingSettings struct { NullFields []string `json:"-"` } -func (s *AutoscalingSettings) MarshalJSON() ([]byte, error) { +func (s AutoscalingSettings) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Base2Exponent: Exponential buckets where the growth factor between buckets @@ -632,9 +628,9 @@ type Base2Exponent struct { NullFields []string `json:"-"` } -func (s *Base2Exponent) MarshalJSON() ([]byte, error) { +func (s Base2Exponent) MarshalJSON() ([]byte, error) { type NoMethod Base2Exponent - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BigQueryIODetails: Metadata for a BigQuery connector used by the job. @@ -660,9 +656,9 @@ type BigQueryIODetails struct { NullFields []string `json:"-"` } -func (s *BigQueryIODetails) MarshalJSON() ([]byte, error) { +func (s BigQueryIODetails) MarshalJSON() ([]byte, error) { type NoMethod BigQueryIODetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BigTableIODetails: Metadata for a Cloud Bigtable connector used by the job. @@ -686,9 +682,9 @@ type BigTableIODetails struct { NullFields []string `json:"-"` } -func (s *BigTableIODetails) MarshalJSON() ([]byte, error) { +func (s BigTableIODetails) MarshalJSON() ([]byte, error) { type NoMethod BigTableIODetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketOptions: `BucketOptions` describes the bucket boundaries used in the @@ -711,9 +707,9 @@ type BucketOptions struct { NullFields []string `json:"-"` } -func (s *BucketOptions) MarshalJSON() ([]byte, error) { +func (s BucketOptions) MarshalJSON() ([]byte, error) { type NoMethod BucketOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CPUTime: Modeled after information exposed by /proc/stat. @@ -739,9 +735,9 @@ type CPUTime struct { NullFields []string `json:"-"` } -func (s *CPUTime) MarshalJSON() ([]byte, error) { +func (s CPUTime) MarshalJSON() ([]byte, error) { type NoMethod CPUTime - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *CPUTime) UnmarshalJSON(data []byte) error { @@ -782,9 +778,9 @@ type ComponentSource struct { NullFields []string `json:"-"` } -func (s *ComponentSource) MarshalJSON() ([]byte, error) { +func (s ComponentSource) MarshalJSON() ([]byte, error) { type NoMethod ComponentSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ComponentTransform: Description of a transform executed as part of an @@ -811,9 +807,9 @@ type ComponentTransform struct { NullFields []string `json:"-"` } -func (s *ComponentTransform) MarshalJSON() ([]byte, error) { +func (s ComponentTransform) MarshalJSON() ([]byte, error) { type NoMethod ComponentTransform - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ComputationTopology: All configuration data for a particular Computation. @@ -843,9 +839,9 @@ type ComputationTopology struct { NullFields []string `json:"-"` } -func (s *ComputationTopology) MarshalJSON() ([]byte, error) { +func (s ComputationTopology) MarshalJSON() ([]byte, error) { type NoMethod ComputationTopology - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConcatPosition: A position that encapsulates an inner position and an index @@ -869,9 +865,9 @@ type ConcatPosition struct { NullFields []string `json:"-"` } -func (s *ConcatPosition) MarshalJSON() ([]byte, error) { +func (s ConcatPosition) MarshalJSON() ([]byte, error) { type NoMethod ConcatPosition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ContainerSpec: Container Spec. @@ -907,9 +903,9 @@ type ContainerSpec struct { NullFields []string `json:"-"` } -func (s *ContainerSpec) MarshalJSON() ([]byte, error) { +func (s ContainerSpec) MarshalJSON() ([]byte, error) { type NoMethod ContainerSpec - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CounterMetadata: CounterMetadata includes all static non-name non-value @@ -961,9 +957,9 @@ type CounterMetadata struct { NullFields []string `json:"-"` } -func (s *CounterMetadata) MarshalJSON() ([]byte, error) { +func (s CounterMetadata) MarshalJSON() ([]byte, error) { type NoMethod CounterMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CounterStructuredName: Identifies a counter within a per-job namespace. @@ -1022,9 +1018,9 @@ type CounterStructuredName struct { NullFields []string `json:"-"` } -func (s *CounterStructuredName) MarshalJSON() ([]byte, error) { +func (s CounterStructuredName) MarshalJSON() ([]byte, error) { type NoMethod CounterStructuredName - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CounterStructuredNameAndMetadata: A single message which encapsulates @@ -1047,9 +1043,9 @@ type CounterStructuredNameAndMetadata struct { NullFields []string `json:"-"` } -func (s *CounterStructuredNameAndMetadata) MarshalJSON() ([]byte, error) { +func (s CounterStructuredNameAndMetadata) MarshalJSON() ([]byte, error) { type NoMethod CounterStructuredNameAndMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CounterUpdate: An update to a Counter sent from a worker. @@ -1102,9 +1098,9 @@ type CounterUpdate struct { NullFields []string `json:"-"` } -func (s *CounterUpdate) MarshalJSON() ([]byte, error) { +func (s CounterUpdate) MarshalJSON() ([]byte, error) { type NoMethod CounterUpdate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *CounterUpdate) UnmarshalJSON(data []byte) error { @@ -1150,9 +1146,9 @@ type CreateJobFromTemplateRequest struct { NullFields []string `json:"-"` } -func (s *CreateJobFromTemplateRequest) MarshalJSON() ([]byte, error) { +func (s CreateJobFromTemplateRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateJobFromTemplateRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CustomSourceLocation: Identifies the location of a custom souce. @@ -1172,9 +1168,9 @@ type CustomSourceLocation struct { NullFields []string `json:"-"` } -func (s *CustomSourceLocation) MarshalJSON() ([]byte, error) { +func (s CustomSourceLocation) MarshalJSON() ([]byte, error) { type NoMethod CustomSourceLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DataDiskAssignment: Data disk assignment for a given VM instance. @@ -1200,9 +1196,9 @@ type DataDiskAssignment struct { NullFields []string `json:"-"` } -func (s *DataDiskAssignment) MarshalJSON() ([]byte, error) { +func (s DataDiskAssignment) MarshalJSON() ([]byte, error) { type NoMethod DataDiskAssignment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DataSamplingConfig: Configuration options for sampling elements. @@ -1237,9 +1233,9 @@ type DataSamplingConfig struct { NullFields []string `json:"-"` } -func (s *DataSamplingConfig) MarshalJSON() ([]byte, error) { +func (s DataSamplingConfig) MarshalJSON() ([]byte, error) { type NoMethod DataSamplingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DataSamplingReport: Contains per-worker telemetry about the data sampling @@ -1278,9 +1274,9 @@ type DataSamplingReport struct { NullFields []string `json:"-"` } -func (s *DataSamplingReport) MarshalJSON() ([]byte, error) { +func (s DataSamplingReport) MarshalJSON() ([]byte, error) { type NoMethod DataSamplingReport - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DataflowHistogramValue: Summary statistics for a population of values. @@ -1314,9 +1310,9 @@ type DataflowHistogramValue struct { NullFields []string `json:"-"` } -func (s *DataflowHistogramValue) MarshalJSON() ([]byte, error) { +func (s DataflowHistogramValue) MarshalJSON() ([]byte, error) { type NoMethod DataflowHistogramValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatastoreIODetails: Metadata for a Datastore connector used by the job. @@ -1338,9 +1334,9 @@ type DatastoreIODetails struct { NullFields []string `json:"-"` } -func (s *DatastoreIODetails) MarshalJSON() ([]byte, error) { +func (s DatastoreIODetails) MarshalJSON() ([]byte, error) { type NoMethod DatastoreIODetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DebugOptions: Describes any options that have an effect on the debugging of @@ -1349,8 +1345,8 @@ type DebugOptions struct { // DataSampling: Configuration options for sampling elements from a running // pipeline. DataSampling *DataSamplingConfig `json:"dataSampling,omitempty"` - // EnableHotKeyLogging: When true, enables the logging of the literal hot key - // to the user's Cloud Logging. + // EnableHotKeyLogging: Optional. When true, enables the logging of the literal + // hot key to the user's Cloud Logging. EnableHotKeyLogging bool `json:"enableHotKeyLogging,omitempty"` // ForceSendFields is a list of field names (e.g. "DataSampling") to // unconditionally include in API requests. By default, fields with empty or @@ -1365,9 +1361,9 @@ type DebugOptions struct { NullFields []string `json:"-"` } -func (s *DebugOptions) MarshalJSON() ([]byte, error) { +func (s DebugOptions) MarshalJSON() ([]byte, error) { type NoMethod DebugOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeleteSnapshotResponse: Response from deleting a snapshot. @@ -1408,9 +1404,9 @@ type DerivedSource struct { NullFields []string `json:"-"` } -func (s *DerivedSource) MarshalJSON() ([]byte, error) { +func (s DerivedSource) MarshalJSON() ([]byte, error) { type NoMethod DerivedSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Disk: Describes the data disk used by a workflow job. @@ -1447,9 +1443,9 @@ type Disk struct { NullFields []string `json:"-"` } -func (s *Disk) MarshalJSON() ([]byte, error) { +func (s Disk) MarshalJSON() ([]byte, error) { type NoMethod Disk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DisplayData: Data provided with a pipeline or transform to provide @@ -1500,9 +1496,9 @@ type DisplayData struct { NullFields []string `json:"-"` } -func (s *DisplayData) MarshalJSON() ([]byte, error) { +func (s DisplayData) MarshalJSON() ([]byte, error) { type NoMethod DisplayData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *DisplayData) UnmarshalJSON(data []byte) error { @@ -1548,9 +1544,9 @@ type DistributionUpdate struct { NullFields []string `json:"-"` } -func (s *DistributionUpdate) MarshalJSON() ([]byte, error) { +func (s DistributionUpdate) MarshalJSON() ([]byte, error) { type NoMethod DistributionUpdate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *DistributionUpdate) UnmarshalJSON(data []byte) error { @@ -1590,9 +1586,9 @@ type DynamicSourceSplit struct { NullFields []string `json:"-"` } -func (s *DynamicSourceSplit) MarshalJSON() ([]byte, error) { +func (s DynamicSourceSplit) MarshalJSON() ([]byte, error) { type NoMethod DynamicSourceSplit - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Environment: Describes the environment in which a Dataflow Job runs. @@ -1602,18 +1598,18 @@ type Environment struct { // This should be in the form of the API service name, e.g. // "compute.googleapis.com". ClusterManagerApiService string `json:"clusterManagerApiService,omitempty"` - // Dataset: The dataset for the current project where various workflow related - // tables are stored. The supported resource type is: Google BigQuery: - // bigquery.googleapis.com/{dataset} + // Dataset: Optional. The dataset for the current project where various + // workflow related tables are stored. The supported resource type is: Google + // BigQuery: bigquery.googleapis.com/{dataset} Dataset string `json:"dataset,omitempty"` - // DebugOptions: Any debugging options to be supplied to the job. + // DebugOptions: Optional. Any debugging options to be supplied to the job. DebugOptions *DebugOptions `json:"debugOptions,omitempty"` // Experiments: The list of experiments to enable. This field should be used // for SDK related experiments and not for service related experiments. The // proper field for service related experiments is service_options. Experiments []string `json:"experiments,omitempty"` - // FlexResourceSchedulingGoal: Which Flexible Resource Scheduling mode to run - // in. + // FlexResourceSchedulingGoal: Optional. Which Flexible Resource Scheduling + // mode to run in. // // Possible values: // "FLEXRS_UNSPECIFIED" - Run in the default mode. @@ -1627,11 +1623,12 @@ type Environment struct { // the SDK pipeline options on the worker in a language agnostic and platform // independent way. SdkPipelineOptions googleapi.RawMessage `json:"sdkPipelineOptions,omitempty"` - // ServiceAccountEmail: Identity to run virtual machines as. Defaults to the - // default account. + // ServiceAccountEmail: Optional. Identity to run virtual machines as. Defaults + // to the default account. ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` - // ServiceKmsKeyName: If set, contains the Cloud KMS key identifier used to - // encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: + // ServiceKmsKeyName: Optional. If set, contains the Cloud KMS key identifier + // used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). + // Format: // projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY ServiceKmsKeyName string `json:"serviceKmsKeyName,omitempty"` // ServiceOptions: Optional. The list of service options to enable. This field @@ -1684,13 +1681,13 @@ type Environment struct { // WorkerPools: The worker pools. At least one "harness" worker pool must be // specified in order for the job to have workers. WorkerPools []*WorkerPool `json:"workerPools,omitempty"` - // WorkerRegion: The Compute Engine region + // WorkerRegion: Optional. The Compute Engine region // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which // worker processing should occur, e.g. "us-west1". Mutually exclusive with // worker_zone. If neither worker_region nor worker_zone is specified, default // to the control plane's region. WorkerRegion string `json:"workerRegion,omitempty"` - // WorkerZone: The Compute Engine zone + // WorkerZone: Optional. The Compute Engine zone // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which // worker processing should occur, e.g. "us-west1-a". Mutually exclusive with // worker_region. If neither worker_region nor worker_zone is specified, a zone @@ -1709,9 +1706,9 @@ type Environment struct { NullFields []string `json:"-"` } -func (s *Environment) MarshalJSON() ([]byte, error) { +func (s Environment) MarshalJSON() ([]byte, error) { type NoMethod Environment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExecutionStageState: A message describing the state of a particular @@ -1786,9 +1783,9 @@ type ExecutionStageState struct { NullFields []string `json:"-"` } -func (s *ExecutionStageState) MarshalJSON() ([]byte, error) { +func (s ExecutionStageState) MarshalJSON() ([]byte, error) { type NoMethod ExecutionStageState - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExecutionStageSummary: Description of the composing transforms, names/ids, @@ -1840,9 +1837,9 @@ type ExecutionStageSummary struct { NullFields []string `json:"-"` } -func (s *ExecutionStageSummary) MarshalJSON() ([]byte, error) { +func (s ExecutionStageSummary) MarshalJSON() ([]byte, error) { type NoMethod ExecutionStageSummary - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FailedLocation: Indicates which [regional endpoint] @@ -1866,9 +1863,9 @@ type FailedLocation struct { NullFields []string `json:"-"` } -func (s *FailedLocation) MarshalJSON() ([]byte, error) { +func (s FailedLocation) MarshalJSON() ([]byte, error) { type NoMethod FailedLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FileIODetails: Metadata for a File connector used by the job. @@ -1888,9 +1885,9 @@ type FileIODetails struct { NullFields []string `json:"-"` } -func (s *FileIODetails) MarshalJSON() ([]byte, error) { +func (s FileIODetails) MarshalJSON() ([]byte, error) { type NoMethod FileIODetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FlattenInstruction: An instruction that copies its inputs (zero or more) to @@ -1911,9 +1908,9 @@ type FlattenInstruction struct { NullFields []string `json:"-"` } -func (s *FlattenInstruction) MarshalJSON() ([]byte, error) { +func (s FlattenInstruction) MarshalJSON() ([]byte, error) { type NoMethod FlattenInstruction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FlexTemplateRuntimeEnvironment: The environment values to be set at runtime @@ -2058,9 +2055,9 @@ type FlexTemplateRuntimeEnvironment struct { NullFields []string `json:"-"` } -func (s *FlexTemplateRuntimeEnvironment) MarshalJSON() ([]byte, error) { +func (s FlexTemplateRuntimeEnvironment) MarshalJSON() ([]byte, error) { type NoMethod FlexTemplateRuntimeEnvironment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FloatingPointList: A metric value representing a list of floating point @@ -2081,9 +2078,9 @@ type FloatingPointList struct { NullFields []string `json:"-"` } -func (s *FloatingPointList) MarshalJSON() ([]byte, error) { +func (s FloatingPointList) MarshalJSON() ([]byte, error) { type NoMethod FloatingPointList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *FloatingPointList) UnmarshalJSON(data []byte) error { @@ -2123,9 +2120,9 @@ type FloatingPointMean struct { NullFields []string `json:"-"` } -func (s *FloatingPointMean) MarshalJSON() ([]byte, error) { +func (s FloatingPointMean) MarshalJSON() ([]byte, error) { type NoMethod FloatingPointMean - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *FloatingPointMean) UnmarshalJSON(data []byte) error { @@ -2167,9 +2164,9 @@ type GetDebugConfigRequest struct { NullFields []string `json:"-"` } -func (s *GetDebugConfigRequest) MarshalJSON() ([]byte, error) { +func (s GetDebugConfigRequest) MarshalJSON() ([]byte, error) { type NoMethod GetDebugConfigRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetDebugConfigResponse: Response to a get debug configuration request. @@ -2192,9 +2189,9 @@ type GetDebugConfigResponse struct { NullFields []string `json:"-"` } -func (s *GetDebugConfigResponse) MarshalJSON() ([]byte, error) { +func (s GetDebugConfigResponse) MarshalJSON() ([]byte, error) { type NoMethod GetDebugConfigResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetTemplateResponse: The response to a GetTemplate request. @@ -2231,9 +2228,9 @@ type GetTemplateResponse struct { NullFields []string `json:"-"` } -func (s *GetTemplateResponse) MarshalJSON() ([]byte, error) { +func (s GetTemplateResponse) MarshalJSON() ([]byte, error) { type NoMethod GetTemplateResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Histogram: Histogram of value counts for a distribution. Buckets have an @@ -2265,9 +2262,9 @@ type Histogram struct { NullFields []string `json:"-"` } -func (s *Histogram) MarshalJSON() ([]byte, error) { +func (s Histogram) MarshalJSON() ([]byte, error) { type NoMethod Histogram - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HotKeyDebuggingInfo: Information useful for debugging a hot key detection. @@ -2288,9 +2285,9 @@ type HotKeyDebuggingInfo struct { NullFields []string `json:"-"` } -func (s *HotKeyDebuggingInfo) MarshalJSON() ([]byte, error) { +func (s HotKeyDebuggingInfo) MarshalJSON() ([]byte, error) { type NoMethod HotKeyDebuggingInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HotKeyDetection: Proto describing a hot key detected on a given WorkItem. @@ -2315,9 +2312,9 @@ type HotKeyDetection struct { NullFields []string `json:"-"` } -func (s *HotKeyDetection) MarshalJSON() ([]byte, error) { +func (s HotKeyDetection) MarshalJSON() ([]byte, error) { type NoMethod HotKeyDetection - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HotKeyInfo: Information about a hot key. @@ -2345,9 +2342,9 @@ type HotKeyInfo struct { NullFields []string `json:"-"` } -func (s *HotKeyInfo) MarshalJSON() ([]byte, error) { +func (s HotKeyInfo) MarshalJSON() ([]byte, error) { type NoMethod HotKeyInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstructionInput: An input of an instruction, as a reference to an output of @@ -2373,9 +2370,9 @@ type InstructionInput struct { NullFields []string `json:"-"` } -func (s *InstructionInput) MarshalJSON() ([]byte, error) { +func (s InstructionInput) MarshalJSON() ([]byte, error) { type NoMethod InstructionInput - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstructionOutput: An output of an instruction. @@ -2409,9 +2406,9 @@ type InstructionOutput struct { NullFields []string `json:"-"` } -func (s *InstructionOutput) MarshalJSON() ([]byte, error) { +func (s InstructionOutput) MarshalJSON() ([]byte, error) { type NoMethod InstructionOutput - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IntegerGauge: A metric value representing temporal values of a variable. @@ -2434,9 +2431,9 @@ type IntegerGauge struct { NullFields []string `json:"-"` } -func (s *IntegerGauge) MarshalJSON() ([]byte, error) { +func (s IntegerGauge) MarshalJSON() ([]byte, error) { type NoMethod IntegerGauge - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IntegerList: A metric value representing a list of integers. @@ -2456,9 +2453,9 @@ type IntegerList struct { NullFields []string `json:"-"` } -func (s *IntegerList) MarshalJSON() ([]byte, error) { +func (s IntegerList) MarshalJSON() ([]byte, error) { type NoMethod IntegerList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IntegerMean: A representation of an integer mean metric contribution. @@ -2480,9 +2477,9 @@ type IntegerMean struct { NullFields []string `json:"-"` } -func (s *IntegerMean) MarshalJSON() ([]byte, error) { +func (s IntegerMean) MarshalJSON() ([]byte, error) { type NoMethod IntegerMean - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Job: Defines a job to be run by the Cloud Dataflow service. Do not enter @@ -2577,7 +2574,7 @@ type Job struct { // [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally // constrained to be <= 128 bytes in size. Labels map[string]string `json:"labels,omitempty"` - // Location: The [regional endpoint] + // Location: Optional. The [regional endpoint] // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that // contains this job. Location string `json:"location,omitempty"` @@ -2670,6 +2667,9 @@ type Job struct { // SatisfiesPzs: Reserved for future use. This field is set only in responses // from the server; it is ignored if it is set in any requests. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + // ServiceResources: Output only. Resources used by the Dataflow Service to run + // the job. + ServiceResources *ServiceResources `json:"serviceResources,omitempty"` // StageStates: This field may be mutated by the Cloud Dataflow service; // callers cannot mutate it. StageStates []*ExecutionStageState `json:"stageStates,omitempty"` @@ -2692,8 +2692,8 @@ type Job struct { // files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} // bucket.storage.googleapis.com/{object} TempFiles []string `json:"tempFiles,omitempty"` - // TransformNameMapping: The map of transform name prefixes of the job to be - // replaced to the corresponding name prefixes of the new job. + // TransformNameMapping: Optional. The map of transform name prefixes of the + // job to be replaced to the corresponding name prefixes of the new job. TransformNameMapping map[string]string `json:"transformNameMapping,omitempty"` // Type: Optional. The type of Dataflow job. // @@ -2720,9 +2720,9 @@ type Job struct { NullFields []string `json:"-"` } -func (s *Job) MarshalJSON() ([]byte, error) { +func (s Job) MarshalJSON() ([]byte, error) { type NoMethod Job - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobExecutionDetails: Information about the execution of a job. @@ -2749,9 +2749,9 @@ type JobExecutionDetails struct { NullFields []string `json:"-"` } -func (s *JobExecutionDetails) MarshalJSON() ([]byte, error) { +func (s JobExecutionDetails) MarshalJSON() ([]byte, error) { type NoMethod JobExecutionDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobExecutionInfo: Additional information about how a Cloud Dataflow job will @@ -2772,9 +2772,9 @@ type JobExecutionInfo struct { NullFields []string `json:"-"` } -func (s *JobExecutionInfo) MarshalJSON() ([]byte, error) { +func (s JobExecutionInfo) MarshalJSON() ([]byte, error) { type NoMethod JobExecutionInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobExecutionStageInfo: Contains information about how a particular @@ -2797,9 +2797,9 @@ type JobExecutionStageInfo struct { NullFields []string `json:"-"` } -func (s *JobExecutionStageInfo) MarshalJSON() ([]byte, error) { +func (s JobExecutionStageInfo) MarshalJSON() ([]byte, error) { type NoMethod JobExecutionStageInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobMessage: A particular message pertaining to a Dataflow job. @@ -2850,9 +2850,9 @@ type JobMessage struct { NullFields []string `json:"-"` } -func (s *JobMessage) MarshalJSON() ([]byte, error) { +func (s JobMessage) MarshalJSON() ([]byte, error) { type NoMethod JobMessage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobMetadata: Metadata available primarily for filtering jobs. Will be @@ -2890,9 +2890,9 @@ type JobMetadata struct { NullFields []string `json:"-"` } -func (s *JobMetadata) MarshalJSON() ([]byte, error) { +func (s JobMetadata) MarshalJSON() ([]byte, error) { type NoMethod JobMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JobMetrics: JobMetrics contains a collection of metrics describing the @@ -2924,9 +2924,9 @@ type JobMetrics struct { NullFields []string `json:"-"` } -func (s *JobMetrics) MarshalJSON() ([]byte, error) { +func (s JobMetrics) MarshalJSON() ([]byte, error) { type NoMethod JobMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // KeyRangeDataDiskAssignment: Data disk assignment information for a specific @@ -2955,9 +2955,9 @@ type KeyRangeDataDiskAssignment struct { NullFields []string `json:"-"` } -func (s *KeyRangeDataDiskAssignment) MarshalJSON() ([]byte, error) { +func (s KeyRangeDataDiskAssignment) MarshalJSON() ([]byte, error) { type NoMethod KeyRangeDataDiskAssignment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // KeyRangeLocation: Location information for a specific key-range of a sharded @@ -2993,9 +2993,9 @@ type KeyRangeLocation struct { NullFields []string `json:"-"` } -func (s *KeyRangeLocation) MarshalJSON() ([]byte, error) { +func (s KeyRangeLocation) MarshalJSON() ([]byte, error) { type NoMethod KeyRangeLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LaunchFlexTemplateParameter: Launch FlexTemplate Parameter. @@ -3035,9 +3035,9 @@ type LaunchFlexTemplateParameter struct { NullFields []string `json:"-"` } -func (s *LaunchFlexTemplateParameter) MarshalJSON() ([]byte, error) { +func (s LaunchFlexTemplateParameter) MarshalJSON() ([]byte, error) { type NoMethod LaunchFlexTemplateParameter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LaunchFlexTemplateRequest: A request to launch a Cloud Dataflow job from a @@ -3061,9 +3061,9 @@ type LaunchFlexTemplateRequest struct { NullFields []string `json:"-"` } -func (s *LaunchFlexTemplateRequest) MarshalJSON() ([]byte, error) { +func (s LaunchFlexTemplateRequest) MarshalJSON() ([]byte, error) { type NoMethod LaunchFlexTemplateRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LaunchFlexTemplateResponse: Response to the request to launch a job from @@ -3088,9 +3088,9 @@ type LaunchFlexTemplateResponse struct { NullFields []string `json:"-"` } -func (s *LaunchFlexTemplateResponse) MarshalJSON() ([]byte, error) { +func (s LaunchFlexTemplateResponse) MarshalJSON() ([]byte, error) { type NoMethod LaunchFlexTemplateResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LaunchTemplateParameters: Parameters to provide to the template being @@ -3125,9 +3125,9 @@ type LaunchTemplateParameters struct { NullFields []string `json:"-"` } -func (s *LaunchTemplateParameters) MarshalJSON() ([]byte, error) { +func (s LaunchTemplateParameters) MarshalJSON() ([]byte, error) { type NoMethod LaunchTemplateParameters - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LaunchTemplateResponse: Response to the request to launch a template. @@ -3151,9 +3151,9 @@ type LaunchTemplateResponse struct { NullFields []string `json:"-"` } -func (s *LaunchTemplateResponse) MarshalJSON() ([]byte, error) { +func (s LaunchTemplateResponse) MarshalJSON() ([]byte, error) { type NoMethod LaunchTemplateResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LeaseWorkItemRequest: Request to lease WorkItems. @@ -3164,6 +3164,9 @@ type LeaseWorkItemRequest struct { // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that // contains the WorkItem's job. Location string `json:"location,omitempty"` + // ProjectNumber: Optional. The project number of the project this worker + // belongs to. + ProjectNumber int64 `json:"projectNumber,omitempty,string"` // RequestedLeaseDuration: The initial lease period. RequestedLeaseDuration string `json:"requestedLeaseDuration,omitempty"` // UnifiedWorkerRequest: Untranslated bag-of-bytes WorkRequest from @@ -3190,9 +3193,9 @@ type LeaseWorkItemRequest struct { NullFields []string `json:"-"` } -func (s *LeaseWorkItemRequest) MarshalJSON() ([]byte, error) { +func (s LeaseWorkItemRequest) MarshalJSON() ([]byte, error) { type NoMethod LeaseWorkItemRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LeaseWorkItemResponse: Response to a request to lease WorkItems. @@ -3218,9 +3221,9 @@ type LeaseWorkItemResponse struct { NullFields []string `json:"-"` } -func (s *LeaseWorkItemResponse) MarshalJSON() ([]byte, error) { +func (s LeaseWorkItemResponse) MarshalJSON() ([]byte, error) { type NoMethod LeaseWorkItemResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Linear: Linear buckets with the following boundaries for indices in 0 to @@ -3245,9 +3248,9 @@ type Linear struct { NullFields []string `json:"-"` } -func (s *Linear) MarshalJSON() ([]byte, error) { +func (s Linear) MarshalJSON() ([]byte, error) { type NoMethod Linear - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Linear) UnmarshalJSON(data []byte) error { @@ -3291,9 +3294,9 @@ type ListJobMessagesResponse struct { NullFields []string `json:"-"` } -func (s *ListJobMessagesResponse) MarshalJSON() ([]byte, error) { +func (s ListJobMessagesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListJobMessagesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListJobsResponse: Response to a request to list Cloud Dataflow jobs in a @@ -3326,9 +3329,9 @@ type ListJobsResponse struct { NullFields []string `json:"-"` } -func (s *ListJobsResponse) MarshalJSON() ([]byte, error) { +func (s ListJobsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListJobsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListSnapshotsResponse: List of snapshots. @@ -3351,9 +3354,9 @@ type ListSnapshotsResponse struct { NullFields []string `json:"-"` } -func (s *ListSnapshotsResponse) MarshalJSON() ([]byte, error) { +func (s ListSnapshotsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListSnapshotsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MapTask: MapTask consists of an ordered set of instructions, each of which @@ -3384,9 +3387,9 @@ type MapTask struct { NullFields []string `json:"-"` } -func (s *MapTask) MarshalJSON() ([]byte, error) { +func (s MapTask) MarshalJSON() ([]byte, error) { type NoMethod MapTask - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MemInfo: Information about the memory usage of a worker or a container @@ -3416,9 +3419,9 @@ type MemInfo struct { NullFields []string `json:"-"` } -func (s *MemInfo) MarshalJSON() ([]byte, error) { +func (s MemInfo) MarshalJSON() ([]byte, error) { type NoMethod MemInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricShortId: The metric short id is returned to the user alongside an @@ -3442,9 +3445,9 @@ type MetricShortId struct { NullFields []string `json:"-"` } -func (s *MetricShortId) MarshalJSON() ([]byte, error) { +func (s MetricShortId) MarshalJSON() ([]byte, error) { type NoMethod MetricShortId - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricStructuredName: Identifies a metric, by describing the source which @@ -3475,9 +3478,9 @@ type MetricStructuredName struct { NullFields []string `json:"-"` } -func (s *MetricStructuredName) MarshalJSON() ([]byte, error) { +func (s MetricStructuredName) MarshalJSON() ([]byte, error) { type NoMethod MetricStructuredName - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricUpdate: Describes the state of a metric. @@ -3540,9 +3543,9 @@ type MetricUpdate struct { NullFields []string `json:"-"` } -func (s *MetricUpdate) MarshalJSON() ([]byte, error) { +func (s MetricUpdate) MarshalJSON() ([]byte, error) { type NoMethod MetricUpdate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricValue: The value of a metric along with its name and labels. @@ -3568,9 +3571,9 @@ type MetricValue struct { NullFields []string `json:"-"` } -func (s *MetricValue) MarshalJSON() ([]byte, error) { +func (s MetricValue) MarshalJSON() ([]byte, error) { type NoMethod MetricValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MountedDataDisk: Describes mounted data disk. @@ -3592,9 +3595,9 @@ type MountedDataDisk struct { NullFields []string `json:"-"` } -func (s *MountedDataDisk) MarshalJSON() ([]byte, error) { +func (s MountedDataDisk) MarshalJSON() ([]byte, error) { type NoMethod MountedDataDisk - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MultiOutputInfo: Information about an output of a multi-output DoFn. @@ -3615,9 +3618,9 @@ type MultiOutputInfo struct { NullFields []string `json:"-"` } -func (s *MultiOutputInfo) MarshalJSON() ([]byte, error) { +func (s MultiOutputInfo) MarshalJSON() ([]byte, error) { type NoMethod MultiOutputInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NameAndKind: Basic metadata about a counter. @@ -3654,9 +3657,9 @@ type NameAndKind struct { NullFields []string `json:"-"` } -func (s *NameAndKind) MarshalJSON() ([]byte, error) { +func (s NameAndKind) MarshalJSON() ([]byte, error) { type NoMethod NameAndKind - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OutlierStats: Statistics for the underflow and overflow bucket. @@ -3684,9 +3687,9 @@ type OutlierStats struct { NullFields []string `json:"-"` } -func (s *OutlierStats) MarshalJSON() ([]byte, error) { +func (s OutlierStats) MarshalJSON() ([]byte, error) { type NoMethod OutlierStats - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *OutlierStats) UnmarshalJSON(data []byte) error { @@ -3732,9 +3735,9 @@ type Package struct { NullFields []string `json:"-"` } -func (s *Package) MarshalJSON() ([]byte, error) { +func (s Package) MarshalJSON() ([]byte, error) { type NoMethod Package - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ParDoInstruction: An instruction that does a ParDo operation. Takes one main @@ -3765,9 +3768,9 @@ type ParDoInstruction struct { NullFields []string `json:"-"` } -func (s *ParDoInstruction) MarshalJSON() ([]byte, error) { +func (s ParDoInstruction) MarshalJSON() ([]byte, error) { type NoMethod ParDoInstruction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ParallelInstruction: Describes a particular operation comprising a MapTask. @@ -3806,9 +3809,9 @@ type ParallelInstruction struct { NullFields []string `json:"-"` } -func (s *ParallelInstruction) MarshalJSON() ([]byte, error) { +func (s ParallelInstruction) MarshalJSON() ([]byte, error) { type NoMethod ParallelInstruction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Parameter: Structured data associated with this message. @@ -3830,9 +3833,9 @@ type Parameter struct { NullFields []string `json:"-"` } -func (s *Parameter) MarshalJSON() ([]byte, error) { +func (s Parameter) MarshalJSON() ([]byte, error) { type NoMethod Parameter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ParameterMetadata: Metadata for a specific parameter. @@ -3927,9 +3930,9 @@ type ParameterMetadata struct { NullFields []string `json:"-"` } -func (s *ParameterMetadata) MarshalJSON() ([]byte, error) { +func (s ParameterMetadata) MarshalJSON() ([]byte, error) { type NoMethod ParameterMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ParameterMetadataEnumOption: ParameterMetadataEnumOption specifies the @@ -3954,9 +3957,9 @@ type ParameterMetadataEnumOption struct { NullFields []string `json:"-"` } -func (s *ParameterMetadataEnumOption) MarshalJSON() ([]byte, error) { +func (s ParameterMetadataEnumOption) MarshalJSON() ([]byte, error) { type NoMethod ParameterMetadataEnumOption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartialGroupByKeyInstruction: An instruction that does a partial @@ -3992,9 +3995,9 @@ type PartialGroupByKeyInstruction struct { NullFields []string `json:"-"` } -func (s *PartialGroupByKeyInstruction) MarshalJSON() ([]byte, error) { +func (s PartialGroupByKeyInstruction) MarshalJSON() ([]byte, error) { type NoMethod PartialGroupByKeyInstruction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PerStepNamespaceMetrics: Metrics for a particular unfused step and @@ -4022,9 +4025,9 @@ type PerStepNamespaceMetrics struct { NullFields []string `json:"-"` } -func (s *PerStepNamespaceMetrics) MarshalJSON() ([]byte, error) { +func (s PerStepNamespaceMetrics) MarshalJSON() ([]byte, error) { type NoMethod PerStepNamespaceMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PerWorkerMetrics: Per worker metrics. @@ -4045,9 +4048,9 @@ type PerWorkerMetrics struct { NullFields []string `json:"-"` } -func (s *PerWorkerMetrics) MarshalJSON() ([]byte, error) { +func (s PerWorkerMetrics) MarshalJSON() ([]byte, error) { type NoMethod PerWorkerMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PipelineDescription: A descriptive representation of submitted pipeline as @@ -4078,9 +4081,9 @@ type PipelineDescription struct { NullFields []string `json:"-"` } -func (s *PipelineDescription) MarshalJSON() ([]byte, error) { +func (s PipelineDescription) MarshalJSON() ([]byte, error) { type NoMethod PipelineDescription - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Point: A point in the timeseries. @@ -4102,9 +4105,9 @@ type Point struct { NullFields []string `json:"-"` } -func (s *Point) MarshalJSON() ([]byte, error) { +func (s Point) MarshalJSON() ([]byte, error) { type NoMethod Point - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Point) UnmarshalJSON(data []byte) error { @@ -4152,9 +4155,9 @@ type Position struct { NullFields []string `json:"-"` } -func (s *Position) MarshalJSON() ([]byte, error) { +func (s Position) MarshalJSON() ([]byte, error) { type NoMethod Position - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ProgressTimeseries: Information about the progress of some component of job @@ -4178,9 +4181,9 @@ type ProgressTimeseries struct { NullFields []string `json:"-"` } -func (s *ProgressTimeseries) MarshalJSON() ([]byte, error) { +func (s ProgressTimeseries) MarshalJSON() ([]byte, error) { type NoMethod ProgressTimeseries - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ProgressTimeseries) UnmarshalJSON(data []byte) error { @@ -4216,9 +4219,9 @@ type PubSubIODetails struct { NullFields []string `json:"-"` } -func (s *PubSubIODetails) MarshalJSON() ([]byte, error) { +func (s PubSubIODetails) MarshalJSON() ([]byte, error) { type NoMethod PubSubIODetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PubsubLocation: Identifies a pubsub location to use for transferring data @@ -4258,9 +4261,9 @@ type PubsubLocation struct { NullFields []string `json:"-"` } -func (s *PubsubLocation) MarshalJSON() ([]byte, error) { +func (s PubsubLocation) MarshalJSON() ([]byte, error) { type NoMethod PubsubLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PubsubSnapshotMetadata: Represents a Pubsub snapshot. @@ -4284,9 +4287,9 @@ type PubsubSnapshotMetadata struct { NullFields []string `json:"-"` } -func (s *PubsubSnapshotMetadata) MarshalJSON() ([]byte, error) { +func (s PubsubSnapshotMetadata) MarshalJSON() ([]byte, error) { type NoMethod PubsubSnapshotMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReadInstruction: An instruction that reads records. Takes no inputs, @@ -4307,9 +4310,9 @@ type ReadInstruction struct { NullFields []string `json:"-"` } -func (s *ReadInstruction) MarshalJSON() ([]byte, error) { +func (s ReadInstruction) MarshalJSON() ([]byte, error) { type NoMethod ReadInstruction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReportWorkItemStatusRequest: Request to report the status of WorkItems. @@ -4320,6 +4323,9 @@ type ReportWorkItemStatusRequest struct { // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that // contains the WorkItem's job. Location string `json:"location,omitempty"` + // ProjectNumber: Optional. The project number of the project which owns the + // WorkItem's job. + ProjectNumber int64 `json:"projectNumber,omitempty,string"` // UnifiedWorkerRequest: Untranslated bag-of-bytes WorkProgressUpdateRequest // from UnifiedWorker. UnifiedWorkerRequest googleapi.RawMessage `json:"unifiedWorkerRequest,omitempty"` @@ -4345,9 +4351,9 @@ type ReportWorkItemStatusRequest struct { NullFields []string `json:"-"` } -func (s *ReportWorkItemStatusRequest) MarshalJSON() ([]byte, error) { +func (s ReportWorkItemStatusRequest) MarshalJSON() ([]byte, error) { type NoMethod ReportWorkItemStatusRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReportWorkItemStatusResponse: Response from a request to report the status @@ -4377,9 +4383,9 @@ type ReportWorkItemStatusResponse struct { NullFields []string `json:"-"` } -func (s *ReportWorkItemStatusResponse) MarshalJSON() ([]byte, error) { +func (s ReportWorkItemStatusResponse) MarshalJSON() ([]byte, error) { type NoMethod ReportWorkItemStatusResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReportedParallelism: Represents the level of parallelism in a WorkItem's @@ -4406,9 +4412,9 @@ type ReportedParallelism struct { NullFields []string `json:"-"` } -func (s *ReportedParallelism) MarshalJSON() ([]byte, error) { +func (s ReportedParallelism) MarshalJSON() ([]byte, error) { type NoMethod ReportedParallelism - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ReportedParallelism) UnmarshalJSON(data []byte) error { @@ -4448,9 +4454,9 @@ type ResourceUtilizationReport struct { NullFields []string `json:"-"` } -func (s *ResourceUtilizationReport) MarshalJSON() ([]byte, error) { +func (s ResourceUtilizationReport) MarshalJSON() ([]byte, error) { type NoMethod ResourceUtilizationReport - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourceUtilizationReportResponse: Service-side response to WorkerMessage @@ -4564,9 +4570,9 @@ type RuntimeEnvironment struct { NullFields []string `json:"-"` } -func (s *RuntimeEnvironment) MarshalJSON() ([]byte, error) { +func (s RuntimeEnvironment) MarshalJSON() ([]byte, error) { type NoMethod RuntimeEnvironment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RuntimeMetadata: RuntimeMetadata describing a runtime environment. @@ -4588,9 +4594,9 @@ type RuntimeMetadata struct { NullFields []string `json:"-"` } -func (s *RuntimeMetadata) MarshalJSON() ([]byte, error) { +func (s RuntimeMetadata) MarshalJSON() ([]byte, error) { type NoMethod RuntimeMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RuntimeUpdatableParams: Additional job parameters that can only be updated @@ -4622,9 +4628,9 @@ type RuntimeUpdatableParams struct { NullFields []string `json:"-"` } -func (s *RuntimeUpdatableParams) MarshalJSON() ([]byte, error) { +func (s RuntimeUpdatableParams) MarshalJSON() ([]byte, error) { type NoMethod RuntimeUpdatableParams - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *RuntimeUpdatableParams) UnmarshalJSON(data []byte) error { @@ -4666,9 +4672,9 @@ type SDKInfo struct { NullFields []string `json:"-"` } -func (s *SDKInfo) MarshalJSON() ([]byte, error) { +func (s SDKInfo) MarshalJSON() ([]byte, error) { type NoMethod SDKInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SdkBug: A bug found in the Dataflow SDK. @@ -4709,9 +4715,9 @@ type SdkBug struct { NullFields []string `json:"-"` } -func (s *SdkBug) MarshalJSON() ([]byte, error) { +func (s SdkBug) MarshalJSON() ([]byte, error) { type NoMethod SdkBug - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SdkHarnessContainerImage: Defines an SDK harness container for executing @@ -4746,9 +4752,9 @@ type SdkHarnessContainerImage struct { NullFields []string `json:"-"` } -func (s *SdkHarnessContainerImage) MarshalJSON() ([]byte, error) { +func (s SdkHarnessContainerImage) MarshalJSON() ([]byte, error) { type NoMethod SdkHarnessContainerImage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SdkVersion: The version of the SDK used to run the job. @@ -4784,9 +4790,9 @@ type SdkVersion struct { NullFields []string `json:"-"` } -func (s *SdkVersion) MarshalJSON() ([]byte, error) { +func (s SdkVersion) MarshalJSON() ([]byte, error) { type NoMethod SdkVersion - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SendDebugCaptureRequest: Request to send encoded debug information. Next ID: @@ -4826,9 +4832,9 @@ type SendDebugCaptureRequest struct { NullFields []string `json:"-"` } -func (s *SendDebugCaptureRequest) MarshalJSON() ([]byte, error) { +func (s SendDebugCaptureRequest) MarshalJSON() ([]byte, error) { type NoMethod SendDebugCaptureRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SendDebugCaptureResponse: Response to a send capture request. nothing @@ -4859,9 +4865,9 @@ type SendWorkerMessagesRequest struct { NullFields []string `json:"-"` } -func (s *SendWorkerMessagesRequest) MarshalJSON() ([]byte, error) { +func (s SendWorkerMessagesRequest) MarshalJSON() ([]byte, error) { type NoMethod SendWorkerMessagesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SendWorkerMessagesResponse: The response to the worker messages. @@ -4884,9 +4890,9 @@ type SendWorkerMessagesResponse struct { NullFields []string `json:"-"` } -func (s *SendWorkerMessagesResponse) MarshalJSON() ([]byte, error) { +func (s SendWorkerMessagesResponse) MarshalJSON() ([]byte, error) { type NoMethod SendWorkerMessagesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SeqMapTask: Describes a particular function to invoke. @@ -4918,9 +4924,9 @@ type SeqMapTask struct { NullFields []string `json:"-"` } -func (s *SeqMapTask) MarshalJSON() ([]byte, error) { +func (s SeqMapTask) MarshalJSON() ([]byte, error) { type NoMethod SeqMapTask - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SeqMapTaskOutputInfo: Information about an output of a SeqMapTask. @@ -4942,9 +4948,32 @@ type SeqMapTaskOutputInfo struct { NullFields []string `json:"-"` } -func (s *SeqMapTaskOutputInfo) MarshalJSON() ([]byte, error) { +func (s SeqMapTaskOutputInfo) MarshalJSON() ([]byte, error) { type NoMethod SeqMapTaskOutputInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ServiceResources: Resources used by the Dataflow Service to run the job. +type ServiceResources struct { + // Zones: Output only. List of Cloud Zones being used by the Dataflow Service + // for this job. Example: us-central1-c + Zones []string `json:"zones,omitempty"` + // ForceSendFields is a list of field names (e.g. "Zones") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Zones") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ServiceResources) MarshalJSON() ([]byte, error) { + type NoMethod ServiceResources + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ShellTask: A task which consists of a shell command for the worker to @@ -4967,9 +4996,9 @@ type ShellTask struct { NullFields []string `json:"-"` } -func (s *ShellTask) MarshalJSON() ([]byte, error) { +func (s ShellTask) MarshalJSON() ([]byte, error) { type NoMethod ShellTask - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SideInputInfo: Information about a side input of a DoFn or an input of a @@ -4998,9 +5027,9 @@ type SideInputInfo struct { NullFields []string `json:"-"` } -func (s *SideInputInfo) MarshalJSON() ([]byte, error) { +func (s SideInputInfo) MarshalJSON() ([]byte, error) { type NoMethod SideInputInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Sink: A sink that records can be encoded and written to. @@ -5022,9 +5051,9 @@ type Sink struct { NullFields []string `json:"-"` } -func (s *Sink) MarshalJSON() ([]byte, error) { +func (s Sink) MarshalJSON() ([]byte, error) { type NoMethod Sink - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Snapshot: Represents a snapshot of a job. @@ -5075,9 +5104,9 @@ type Snapshot struct { NullFields []string `json:"-"` } -func (s *Snapshot) MarshalJSON() ([]byte, error) { +func (s Snapshot) MarshalJSON() ([]byte, error) { type NoMethod Snapshot - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SnapshotJobRequest: Request to create a snapshot of a job. @@ -5103,9 +5132,9 @@ type SnapshotJobRequest struct { NullFields []string `json:"-"` } -func (s *SnapshotJobRequest) MarshalJSON() ([]byte, error) { +func (s SnapshotJobRequest) MarshalJSON() ([]byte, error) { type NoMethod SnapshotJobRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Source: A source that records can be read and decoded from. @@ -5152,9 +5181,9 @@ type Source struct { NullFields []string `json:"-"` } -func (s *Source) MarshalJSON() ([]byte, error) { +func (s Source) MarshalJSON() ([]byte, error) { type NoMethod Source - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceFork: DEPRECATED in favor of DynamicSourceSplit. @@ -5180,9 +5209,9 @@ type SourceFork struct { NullFields []string `json:"-"` } -func (s *SourceFork) MarshalJSON() ([]byte, error) { +func (s SourceFork) MarshalJSON() ([]byte, error) { type NoMethod SourceFork - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceGetMetadataRequest: A request to compute the SourceMetadata of a @@ -5203,9 +5232,9 @@ type SourceGetMetadataRequest struct { NullFields []string `json:"-"` } -func (s *SourceGetMetadataRequest) MarshalJSON() ([]byte, error) { +func (s SourceGetMetadataRequest) MarshalJSON() ([]byte, error) { type NoMethod SourceGetMetadataRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceGetMetadataResponse: The result of a SourceGetMetadataOperation. @@ -5225,9 +5254,9 @@ type SourceGetMetadataResponse struct { NullFields []string `json:"-"` } -func (s *SourceGetMetadataResponse) MarshalJSON() ([]byte, error) { +func (s SourceGetMetadataResponse) MarshalJSON() ([]byte, error) { type NoMethod SourceGetMetadataResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceMetadata: Metadata about a Source useful for automatically optimizing @@ -5257,9 +5286,9 @@ type SourceMetadata struct { NullFields []string `json:"-"` } -func (s *SourceMetadata) MarshalJSON() ([]byte, error) { +func (s SourceMetadata) MarshalJSON() ([]byte, error) { type NoMethod SourceMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceOperationRequest: A work item that represents the different operations @@ -5293,9 +5322,9 @@ type SourceOperationRequest struct { NullFields []string `json:"-"` } -func (s *SourceOperationRequest) MarshalJSON() ([]byte, error) { +func (s SourceOperationRequest) MarshalJSON() ([]byte, error) { type NoMethod SourceOperationRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceOperationResponse: The result of a SourceOperationRequest, specified @@ -5319,9 +5348,9 @@ type SourceOperationResponse struct { NullFields []string `json:"-"` } -func (s *SourceOperationResponse) MarshalJSON() ([]byte, error) { +func (s SourceOperationResponse) MarshalJSON() ([]byte, error) { type NoMethod SourceOperationResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceSplitOptions: Hints for splitting a Source into bundles (parts for @@ -5345,9 +5374,9 @@ type SourceSplitOptions struct { NullFields []string `json:"-"` } -func (s *SourceSplitOptions) MarshalJSON() ([]byte, error) { +func (s SourceSplitOptions) MarshalJSON() ([]byte, error) { type NoMethod SourceSplitOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceSplitRequest: Represents the operation to split a high-level Source @@ -5379,9 +5408,9 @@ type SourceSplitRequest struct { NullFields []string `json:"-"` } -func (s *SourceSplitRequest) MarshalJSON() ([]byte, error) { +func (s SourceSplitRequest) MarshalJSON() ([]byte, error) { type NoMethod SourceSplitRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceSplitResponse: The response to a SourceSplitRequest. @@ -5419,9 +5448,9 @@ type SourceSplitResponse struct { NullFields []string `json:"-"` } -func (s *SourceSplitResponse) MarshalJSON() ([]byte, error) { +func (s SourceSplitResponse) MarshalJSON() ([]byte, error) { type NoMethod SourceSplitResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceSplitShard: DEPRECATED in favor of DerivedSource. @@ -5453,9 +5482,9 @@ type SourceSplitShard struct { NullFields []string `json:"-"` } -func (s *SourceSplitShard) MarshalJSON() ([]byte, error) { +func (s SourceSplitShard) MarshalJSON() ([]byte, error) { type NoMethod SourceSplitShard - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SpannerIODetails: Metadata for a Spanner connector used by the job. @@ -5479,9 +5508,9 @@ type SpannerIODetails struct { NullFields []string `json:"-"` } -func (s *SpannerIODetails) MarshalJSON() ([]byte, error) { +func (s SpannerIODetails) MarshalJSON() ([]byte, error) { type NoMethod SpannerIODetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SplitInt64: A representation of an int64, n, that is immune to precision @@ -5504,9 +5533,9 @@ type SplitInt64 struct { NullFields []string `json:"-"` } -func (s *SplitInt64) MarshalJSON() ([]byte, error) { +func (s SplitInt64) MarshalJSON() ([]byte, error) { type NoMethod SplitInt64 - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StageExecutionDetails: Information about the workers and work items within a @@ -5534,9 +5563,9 @@ type StageExecutionDetails struct { NullFields []string `json:"-"` } -func (s *StageExecutionDetails) MarshalJSON() ([]byte, error) { +func (s StageExecutionDetails) MarshalJSON() ([]byte, error) { type NoMethod StageExecutionDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StageSource: Description of an input or output of an execution stage. @@ -5564,9 +5593,9 @@ type StageSource struct { NullFields []string `json:"-"` } -func (s *StageSource) MarshalJSON() ([]byte, error) { +func (s StageSource) MarshalJSON() ([]byte, error) { type NoMethod StageSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StageSummary: Information about a particular execution stage of a job. @@ -5607,9 +5636,9 @@ type StageSummary struct { NullFields []string `json:"-"` } -func (s *StageSummary) MarshalJSON() ([]byte, error) { +func (s StageSummary) MarshalJSON() ([]byte, error) { type NoMethod StageSummary - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StateFamilyConfig: State family configuration. @@ -5631,9 +5660,9 @@ type StateFamilyConfig struct { NullFields []string `json:"-"` } -func (s *StateFamilyConfig) MarshalJSON() ([]byte, error) { +func (s StateFamilyConfig) MarshalJSON() ([]byte, error) { type NoMethod StateFamilyConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -5665,9 +5694,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Step: Defines a particular step within a Cloud Dataflow job. A job consists @@ -5706,9 +5735,9 @@ type Step struct { NullFields []string `json:"-"` } -func (s *Step) MarshalJSON() ([]byte, error) { +func (s Step) MarshalJSON() ([]byte, error) { type NoMethod Step - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Straggler: Information for a straggler. @@ -5731,9 +5760,9 @@ type Straggler struct { NullFields []string `json:"-"` } -func (s *Straggler) MarshalJSON() ([]byte, error) { +func (s Straggler) MarshalJSON() ([]byte, error) { type NoMethod Straggler - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StragglerDebuggingInfo: Information useful for debugging a straggler. Each @@ -5756,9 +5785,9 @@ type StragglerDebuggingInfo struct { NullFields []string `json:"-"` } -func (s *StragglerDebuggingInfo) MarshalJSON() ([]byte, error) { +func (s StragglerDebuggingInfo) MarshalJSON() ([]byte, error) { type NoMethod StragglerDebuggingInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StragglerInfo: Information useful for straggler identification and @@ -5783,9 +5812,9 @@ type StragglerInfo struct { NullFields []string `json:"-"` } -func (s *StragglerInfo) MarshalJSON() ([]byte, error) { +func (s StragglerInfo) MarshalJSON() ([]byte, error) { type NoMethod StragglerInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StragglerSummary: Summarized straggler identification details. @@ -5810,9 +5839,9 @@ type StragglerSummary struct { NullFields []string `json:"-"` } -func (s *StragglerSummary) MarshalJSON() ([]byte, error) { +func (s StragglerSummary) MarshalJSON() ([]byte, error) { type NoMethod StragglerSummary - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamLocation: Describes a stream of data, either as input to be processed @@ -5840,9 +5869,9 @@ type StreamLocation struct { NullFields []string `json:"-"` } -func (s *StreamLocation) MarshalJSON() ([]byte, error) { +func (s StreamLocation) MarshalJSON() ([]byte, error) { type NoMethod StreamLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingApplianceSnapshotConfig: Streaming appliance snapshot @@ -5867,9 +5896,9 @@ type StreamingApplianceSnapshotConfig struct { NullFields []string `json:"-"` } -func (s *StreamingApplianceSnapshotConfig) MarshalJSON() ([]byte, error) { +func (s StreamingApplianceSnapshotConfig) MarshalJSON() ([]byte, error) { type NoMethod StreamingApplianceSnapshotConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingComputationConfig: Configuration information for a single streaming @@ -5899,9 +5928,9 @@ type StreamingComputationConfig struct { NullFields []string `json:"-"` } -func (s *StreamingComputationConfig) MarshalJSON() ([]byte, error) { +func (s StreamingComputationConfig) MarshalJSON() ([]byte, error) { type NoMethod StreamingComputationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingComputationRanges: Describes full or partial data disk assignment @@ -5924,9 +5953,9 @@ type StreamingComputationRanges struct { NullFields []string `json:"-"` } -func (s *StreamingComputationRanges) MarshalJSON() ([]byte, error) { +func (s StreamingComputationRanges) MarshalJSON() ([]byte, error) { type NoMethod StreamingComputationRanges - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingComputationTask: A task which describes what action should be @@ -5960,9 +5989,9 @@ type StreamingComputationTask struct { NullFields []string `json:"-"` } -func (s *StreamingComputationTask) MarshalJSON() ([]byte, error) { +func (s StreamingComputationTask) MarshalJSON() ([]byte, error) { type NoMethod StreamingComputationTask - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingConfigTask: A task that carries configuration information for @@ -5984,6 +6013,12 @@ type StreamingConfigTask struct { StreamingComputationConfigs []*StreamingComputationConfig `json:"streamingComputationConfigs,omitempty"` // UserStepToStateFamilyNameMap: Map from user step names to state families. UserStepToStateFamilyNameMap map[string]string `json:"userStepToStateFamilyNameMap,omitempty"` + // UserWorkerRunnerV1Settings: Binary encoded proto to control runtime behavior + // of the java runner v1 user worker. + UserWorkerRunnerV1Settings string `json:"userWorkerRunnerV1Settings,omitempty"` + // UserWorkerRunnerV2Settings: Binary encoded proto to control runtime behavior + // of the runner v2 user worker. + UserWorkerRunnerV2Settings string `json:"userWorkerRunnerV2Settings,omitempty"` // WindmillServiceEndpoint: If present, the worker must use this endpoint to // communicate with Windmill Service dispatchers, otherwise the worker must // continue to use whatever endpoint it had been using. @@ -6005,9 +6040,9 @@ type StreamingConfigTask struct { NullFields []string `json:"-"` } -func (s *StreamingConfigTask) MarshalJSON() ([]byte, error) { +func (s StreamingConfigTask) MarshalJSON() ([]byte, error) { type NoMethod StreamingConfigTask - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingOperationalLimits: Operational limits imposed on streaming jobs by @@ -6043,9 +6078,9 @@ type StreamingOperationalLimits struct { NullFields []string `json:"-"` } -func (s *StreamingOperationalLimits) MarshalJSON() ([]byte, error) { +func (s StreamingOperationalLimits) MarshalJSON() ([]byte, error) { type NoMethod StreamingOperationalLimits - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingScalingReport: Contains per-user worker telemetry used in streaming @@ -6079,9 +6114,9 @@ type StreamingScalingReport struct { NullFields []string `json:"-"` } -func (s *StreamingScalingReport) MarshalJSON() ([]byte, error) { +func (s StreamingScalingReport) MarshalJSON() ([]byte, error) { type NoMethod StreamingScalingReport - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingScalingReportResponse: Contains per-user-worker streaming scaling @@ -6102,9 +6137,9 @@ type StreamingScalingReportResponse struct { NullFields []string `json:"-"` } -func (s *StreamingScalingReportResponse) MarshalJSON() ([]byte, error) { +func (s StreamingScalingReportResponse) MarshalJSON() ([]byte, error) { type NoMethod StreamingScalingReportResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingSetupTask: A task which initializes part of a streaming Dataflow @@ -6136,9 +6171,9 @@ type StreamingSetupTask struct { NullFields []string `json:"-"` } -func (s *StreamingSetupTask) MarshalJSON() ([]byte, error) { +func (s StreamingSetupTask) MarshalJSON() ([]byte, error) { type NoMethod StreamingSetupTask - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingSideInputLocation: Identifies the location of a streaming side @@ -6161,9 +6196,9 @@ type StreamingSideInputLocation struct { NullFields []string `json:"-"` } -func (s *StreamingSideInputLocation) MarshalJSON() ([]byte, error) { +func (s StreamingSideInputLocation) MarshalJSON() ([]byte, error) { type NoMethod StreamingSideInputLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingStageLocation: Identifies the location of a streaming computation @@ -6185,9 +6220,9 @@ type StreamingStageLocation struct { NullFields []string `json:"-"` } -func (s *StreamingStageLocation) MarshalJSON() ([]byte, error) { +func (s StreamingStageLocation) MarshalJSON() ([]byte, error) { type NoMethod StreamingStageLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamingStragglerInfo: Information useful for streaming straggler @@ -6218,9 +6253,9 @@ type StreamingStragglerInfo struct { NullFields []string `json:"-"` } -func (s *StreamingStragglerInfo) MarshalJSON() ([]byte, error) { +func (s StreamingStragglerInfo) MarshalJSON() ([]byte, error) { type NoMethod StreamingStragglerInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StringList: A metric value representing a list of strings. @@ -6240,9 +6275,9 @@ type StringList struct { NullFields []string `json:"-"` } -func (s *StringList) MarshalJSON() ([]byte, error) { +func (s StringList) MarshalJSON() ([]byte, error) { type NoMethod StringList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StructuredMessage: A rich message format, including a human readable string, @@ -6269,9 +6304,9 @@ type StructuredMessage struct { NullFields []string `json:"-"` } -func (s *StructuredMessage) MarshalJSON() ([]byte, error) { +func (s StructuredMessage) MarshalJSON() ([]byte, error) { type NoMethod StructuredMessage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TaskRunnerSettings: Taskrunner configuration settings. @@ -6342,9 +6377,9 @@ type TaskRunnerSettings struct { NullFields []string `json:"-"` } -func (s *TaskRunnerSettings) MarshalJSON() ([]byte, error) { +func (s TaskRunnerSettings) MarshalJSON() ([]byte, error) { type NoMethod TaskRunnerSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TemplateMetadata: Metadata describing a template. @@ -6381,9 +6416,9 @@ type TemplateMetadata struct { NullFields []string `json:"-"` } -func (s *TemplateMetadata) MarshalJSON() ([]byte, error) { +func (s TemplateMetadata) MarshalJSON() ([]byte, error) { type NoMethod TemplateMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TopologyConfig: Global topology of the streaming Dataflow job, including all @@ -6414,9 +6449,9 @@ type TopologyConfig struct { NullFields []string `json:"-"` } -func (s *TopologyConfig) MarshalJSON() ([]byte, error) { +func (s TopologyConfig) MarshalJSON() ([]byte, error) { type NoMethod TopologyConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransformSummary: Description of the type, names/ids, and input/outputs for @@ -6461,9 +6496,9 @@ type TransformSummary struct { NullFields []string `json:"-"` } -func (s *TransformSummary) MarshalJSON() ([]byte, error) { +func (s TransformSummary) MarshalJSON() ([]byte, error) { type NoMethod TransformSummary - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkItem: WorkItem represents basic information about a WorkItem to be @@ -6516,9 +6551,9 @@ type WorkItem struct { NullFields []string `json:"-"` } -func (s *WorkItem) MarshalJSON() ([]byte, error) { +func (s WorkItem) MarshalJSON() ([]byte, error) { type NoMethod WorkItem - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkItemDetails: Information about an individual work item execution. @@ -6562,9 +6597,9 @@ type WorkItemDetails struct { NullFields []string `json:"-"` } -func (s *WorkItemDetails) MarshalJSON() ([]byte, error) { +func (s WorkItemDetails) MarshalJSON() ([]byte, error) { type NoMethod WorkItemDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkItemServiceState: The Dataflow service's idea of the current state of a @@ -6617,9 +6652,9 @@ type WorkItemServiceState struct { NullFields []string `json:"-"` } -func (s *WorkItemServiceState) MarshalJSON() ([]byte, error) { +func (s WorkItemServiceState) MarshalJSON() ([]byte, error) { type NoMethod WorkItemServiceState - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkItemStatus: Conveys a worker's progress through the work described by a @@ -6703,9 +6738,9 @@ type WorkItemStatus struct { NullFields []string `json:"-"` } -func (s *WorkItemStatus) MarshalJSON() ([]byte, error) { +func (s WorkItemStatus) MarshalJSON() ([]byte, error) { type NoMethod WorkItemStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *WorkItemStatus) UnmarshalJSON(data []byte) error { @@ -6741,9 +6776,9 @@ type WorkerDetails struct { NullFields []string `json:"-"` } -func (s *WorkerDetails) MarshalJSON() ([]byte, error) { +func (s WorkerDetails) MarshalJSON() ([]byte, error) { type NoMethod WorkerDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerHealthReport: WorkerHealthReport contains information about the health @@ -6785,9 +6820,9 @@ type WorkerHealthReport struct { NullFields []string `json:"-"` } -func (s *WorkerHealthReport) MarshalJSON() ([]byte, error) { +func (s WorkerHealthReport) MarshalJSON() ([]byte, error) { type NoMethod WorkerHealthReport - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerHealthReportResponse: WorkerHealthReportResponse contains information @@ -6810,9 +6845,9 @@ type WorkerHealthReportResponse struct { NullFields []string `json:"-"` } -func (s *WorkerHealthReportResponse) MarshalJSON() ([]byte, error) { +func (s WorkerHealthReportResponse) MarshalJSON() ([]byte, error) { type NoMethod WorkerHealthReportResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerLifecycleEvent: A report of an event in a worker's lifecycle. The @@ -6855,9 +6890,9 @@ type WorkerLifecycleEvent struct { NullFields []string `json:"-"` } -func (s *WorkerLifecycleEvent) MarshalJSON() ([]byte, error) { +func (s WorkerLifecycleEvent) MarshalJSON() ([]byte, error) { type NoMethod WorkerLifecycleEvent - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerMessage: WorkerMessage provides information to the backend about a @@ -6906,9 +6941,9 @@ type WorkerMessage struct { NullFields []string `json:"-"` } -func (s *WorkerMessage) MarshalJSON() ([]byte, error) { +func (s WorkerMessage) MarshalJSON() ([]byte, error) { type NoMethod WorkerMessage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerMessageCode: A message code is used to report status and error @@ -6952,9 +6987,9 @@ type WorkerMessageCode struct { NullFields []string `json:"-"` } -func (s *WorkerMessageCode) MarshalJSON() ([]byte, error) { +func (s WorkerMessageCode) MarshalJSON() ([]byte, error) { type NoMethod WorkerMessageCode - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerMessageResponse: A worker_message response allows the server to pass @@ -6989,9 +7024,9 @@ type WorkerMessageResponse struct { NullFields []string `json:"-"` } -func (s *WorkerMessageResponse) MarshalJSON() ([]byte, error) { +func (s WorkerMessageResponse) MarshalJSON() ([]byte, error) { type NoMethod WorkerMessageResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerPool: Describes one particular pool of Cloud Dataflow workers to be @@ -7113,9 +7148,9 @@ type WorkerPool struct { NullFields []string `json:"-"` } -func (s *WorkerPool) MarshalJSON() ([]byte, error) { +func (s WorkerPool) MarshalJSON() ([]byte, error) { type NoMethod WorkerPool - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerSettings: Provides data to pass through to the worker harness. @@ -7155,9 +7190,9 @@ type WorkerSettings struct { NullFields []string `json:"-"` } -func (s *WorkerSettings) MarshalJSON() ([]byte, error) { +func (s WorkerSettings) MarshalJSON() ([]byte, error) { type NoMethod WorkerSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerShutdownNotice: Shutdown notification from workers. This is to be sent @@ -7181,9 +7216,9 @@ type WorkerShutdownNotice struct { NullFields []string `json:"-"` } -func (s *WorkerShutdownNotice) MarshalJSON() ([]byte, error) { +func (s WorkerShutdownNotice) MarshalJSON() ([]byte, error) { type NoMethod WorkerShutdownNotice - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerShutdownNoticeResponse: Service-side response to WorkerMessage issuing @@ -7209,9 +7244,9 @@ type WorkerThreadScalingReport struct { NullFields []string `json:"-"` } -func (s *WorkerThreadScalingReport) MarshalJSON() ([]byte, error) { +func (s WorkerThreadScalingReport) MarshalJSON() ([]byte, error) { type NoMethod WorkerThreadScalingReport - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkerThreadScalingReportResponse: Contains the thread scaling @@ -7232,9 +7267,9 @@ type WorkerThreadScalingReportResponse struct { NullFields []string `json:"-"` } -func (s *WorkerThreadScalingReportResponse) MarshalJSON() ([]byte, error) { +func (s WorkerThreadScalingReportResponse) MarshalJSON() ([]byte, error) { type NoMethod WorkerThreadScalingReportResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WriteInstruction: An instruction that writes records. Takes one input, @@ -7257,9 +7292,9 @@ type WriteInstruction struct { NullFields []string `json:"-"` } -func (s *WriteInstruction) MarshalJSON() ([]byte, error) { +func (s WriteInstruction) MarshalJSON() ([]byte, error) { type NoMethod WriteInstruction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsDeleteSnapshotsCall struct { @@ -11814,7 +11849,12 @@ type ProjectsLocationsTemplatesCreateCall struct { } // Create: Creates a Cloud Dataflow job from a template. Do not enter -// confidential information when you supply string values using the API. +// confidential information when you supply string values using the API. To +// create a job, we recommend using `projects.locations.templates.create` with +// a [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using +// `projects.templates.create` is not recommended, because your job will always +// start in `us-central1`. // // - location: The [regional endpoint] // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to @@ -11921,7 +11961,12 @@ type ProjectsLocationsTemplatesGetCall struct { header_ http.Header } -// Get: Get the template associated with a template. +// Get: Get the template associated with a template. To get the template, we +// recommend using `projects.locations.templates.get` with a [regional +// endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using +// `projects.templates.get` is not recommended, because only templates that are +// running in `us-central1` are retrieved. // // - location: The [regional endpoint] // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to @@ -12056,7 +12101,11 @@ type ProjectsLocationsTemplatesLaunchCall struct { header_ http.Header } -// Launch: Launch a template. +// Launch: Launches a template. To launch a template, we recommend using +// `projects.locations.templates.launch` with a [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using +// `projects.templates.launch` is not recommended, because jobs launched from +// the template will always start in `us-central1`. // // - location: The [regional endpoint] // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to @@ -12437,7 +12486,12 @@ type ProjectsTemplatesCreateCall struct { } // Create: Creates a Cloud Dataflow job from a template. Do not enter -// confidential information when you supply string values using the API. +// confidential information when you supply string values using the API. To +// create a job, we recommend using `projects.locations.templates.create` with +// a [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using +// `projects.templates.create` is not recommended, because your job will always +// start in `us-central1`. // // - projectId: The ID of the Cloud Platform project that the job belongs to. func (r *ProjectsTemplatesService) Create(projectId string, createjobfromtemplaterequest *CreateJobFromTemplateRequest) *ProjectsTemplatesCreateCall { @@ -12538,7 +12592,12 @@ type ProjectsTemplatesGetCall struct { header_ http.Header } -// Get: Get the template associated with a template. +// Get: Get the template associated with a template. To get the template, we +// recommend using `projects.locations.templates.get` with a [regional +// endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using +// `projects.templates.get` is not recommended, because only templates that are +// running in `us-central1` are retrieved. // // - projectId: The ID of the Cloud Platform project that the job belongs to. func (r *ProjectsTemplatesService) Get(projectId string) *ProjectsTemplatesGetCall { @@ -12675,7 +12734,11 @@ type ProjectsTemplatesLaunchCall struct { header_ http.Header } -// Launch: Launch a template. +// Launch: Launches a template. To launch a template, we recommend using +// `projects.locations.templates.launch` with a [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using +// `projects.templates.launch` is not recommended, because jobs launched from +// the template will always start in `us-central1`. // // - projectId: The ID of the Cloud Platform project that the job belongs to. func (r *ProjectsTemplatesService) Launch(projectId string, launchtemplateparameters *LaunchTemplateParameters) *ProjectsTemplatesLaunchCall { diff --git a/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json b/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json index cd078d6316f..03af01be480 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json @@ -26,6 +26,66 @@ "endpointUrl": "https://dataproc.europe-west9.rep.googleapis.com/", "location": "europe-west9" }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.us-central2.rep.googleapis.com/", + "location": "us-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.us-east4.rep.googleapis.com/", + "location": "us-east4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.us-east7.rep.googleapis.com/", + "location": "us-east7" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.us-south1.rep.googleapis.com/", + "location": "us-south1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.us-west1.rep.googleapis.com/", + "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.us-west3.rep.googleapis.com/", + "location": "us-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.us-west8.rep.googleapis.com/", + "location": "us-west8" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://dataproc.europe-west8.rep.googleapis.com/", + "location": "europe-west8" + }, { "description": "Regional Endpoint", "endpointUrl": "https://dataproc.me-central2.rep.googleapis.com/", @@ -520,795 +580,1094 @@ "https://www.googleapis.com/auth/cloud-platform" ] } - } - }, - "operations": { - "methods": { - "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", - "httpMethod": "POST", - "id": "dataproc.projects.locations.operations.cancel", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "The name of the operation resource to be cancelled.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:cancel", - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "delete": { - "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", - "httpMethod": "DELETE", - "id": "dataproc.projects.locations.operations.delete", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "The name of the operation resource to be deleted.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", - "httpMethod": "GET", - "id": "dataproc.projects.locations.operations.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "The name of the operation resource.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", - "httpMethod": "GET", - "id": "dataproc.projects.locations.operations.list", - "parameterOrder": [ - "name" - ], - "parameters": { - "filter": { - "description": "The standard list filter.", - "location": "query", - "type": "string" + }, + "resources": { + "sparkApplications": { + "methods": { + "access": { + "description": "Obtain high level information corresponding to a single Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:access", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.access", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:access", + "response": { + "$ref": "AccessSparkApplicationResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "name": { - "description": "The name of the operation's parent resource.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/operations$", - "required": true, - "type": "string" + "accessEnvironmentInfo": { + "description": "Obtain environment details for a Spark Application", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessEnvironmentInfo", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.accessEnvironmentInfo", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:accessEnvironmentInfo", + "response": { + "$ref": "AccessSparkApplicationEnvironmentInfoResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "pageSize": { - "description": "The standard list page size.", - "format": "int32", - "location": "query", - "type": "integer" + "accessJob": { + "description": "Obtain data corresponding to a spark job for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessJob", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.accessJob", + "parameterOrder": [ + "name" + ], + "parameters": { + "jobId": { + "description": "Required. Job ID to fetch data for.", + "format": "int64", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:accessJob", + "response": { + "$ref": "AccessSparkApplicationJobResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "pageToken": { - "description": "The standard list page token.", - "location": "query", - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "ListOperationsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - } - } - }, - "sessionTemplates": { - "methods": { - "create": { - "description": "Create a session template synchronously.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates", - "httpMethod": "POST", - "id": "dataproc.projects.locations.sessionTemplates.create", - "parameterOrder": [ - "parent" - ], - "parameters": { - "parent": { - "description": "Required. The parent resource where this session template will be created.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+parent}/sessionTemplates", - "request": { - "$ref": "SessionTemplate" - }, - "response": { - "$ref": "SessionTemplate" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "delete": { - "description": "Deletes a session template.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", - "httpMethod": "DELETE", - "id": "dataproc.projects.locations.sessionTemplates.delete", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the session template resource to delete.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "get": { - "description": "Gets the resource representation for a session template.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", - "httpMethod": "GET", - "id": "dataproc.projects.locations.sessionTemplates.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the session template to retrieve.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "SessionTemplate" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "list": { - "description": "Lists session templates.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates", - "httpMethod": "GET", - "id": "dataproc.projects.locations.sessionTemplates.list", - "parameterOrder": [ - "parent" - ], - "parameters": { - "filter": { - "description": "Optional. A filter for the session templates to return in the response. Filters are case sensitive and have the following syntax:field = value AND field = value ...", - "location": "query", - "type": "string" - }, - "pageSize": { - "description": "Optional. The maximum number of sessions to return in each response. The service may return fewer than this value.", - "format": "int32", - "location": "query", - "type": "integer" + "accessSqlPlan": { + "description": "Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessSqlPlan", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.accessSqlPlan", + "parameterOrder": [ + "name" + ], + "parameters": { + "executionId": { + "description": "Required. Execution ID", + "format": "int64", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:accessSqlPlan", + "response": { + "$ref": "AccessSparkApplicationSqlSparkPlanGraphResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "pageToken": { - "description": "Optional. A page token received from a previous ListSessions call. Provide this token to retrieve the subsequent page.", - "location": "query", - "type": "string" + "accessSqlQuery": { + "description": "Obtain data corresponding to a particular SQL Query for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessSqlQuery", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.accessSqlQuery", + "parameterOrder": [ + "name" + ], + "parameters": { + "details": { + "description": "Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.", + "location": "query", + "type": "boolean" + }, + "executionId": { + "description": "Required. Execution ID", + "format": "int64", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + }, + "planDescription": { + "description": "Optional. Enables/ disables physical plan description on demand", + "location": "query", + "type": "boolean" + } + }, + "path": "v1/{+name}:accessSqlQuery", + "response": { + "$ref": "AccessSparkApplicationSqlQueryResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "parent": { - "description": "Required. The parent that owns this collection of session templates.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+parent}/sessionTemplates", - "response": { - "$ref": "ListSessionTemplatesResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "patch": { - "description": "Updates the session template synchronously.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", - "httpMethod": "PATCH", - "id": "dataproc.projects.locations.sessionTemplates.patch", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the session template.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}", - "request": { - "$ref": "SessionTemplate" - }, - "response": { - "$ref": "SessionTemplate" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - } - } - }, - "sessions": { - "methods": { - "create": { - "description": "Create an interactive session asynchronously.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions", - "httpMethod": "POST", - "id": "dataproc.projects.locations.sessions.create", - "parameterOrder": [ - "parent" - ], - "parameters": { - "parent": { - "description": "Required. The parent resource where this session will be created.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+$", - "required": true, - "type": "string" + "accessStageAttempt": { + "description": "Obtain data corresponding to a spark stage attempt for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessStageAttempt", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.accessStageAttempt", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + }, + "stageAttemptId": { + "description": "Required. Stage Attempt ID", + "format": "int32", + "location": "query", + "type": "integer" + }, + "stageId": { + "description": "Required. Stage ID", + "format": "int64", + "location": "query", + "type": "string" + }, + "summaryMetricsMask": { + "description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:accessStageAttempt", + "response": { + "$ref": "AccessSparkApplicationStageAttemptResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the service receives two CreateSessionRequests (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateSessionRequest)s with the same ID, the second request is ignored, and the first Session is created and stored in the backend.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", - "location": "query", - "type": "string" + "accessStageRddGraph": { + "description": "Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessStageRddGraph", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.accessStageRddGraph", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + }, + "stageId": { + "description": "Required. Stage ID", + "format": "int64", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:accessStageRddGraph", + "response": { + "$ref": "AccessSparkApplicationStageRddOperationGraphResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "sessionId": { - "description": "Required. The ID to use for the session, which becomes the final component of the session's resource name.This value must be 4-63 characters. Valid characters are /a-z-/.", - "location": "query", - "type": "string" - } - }, - "path": "v1/{+parent}/sessions", - "request": { - "$ref": "Session" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "delete": { - "description": "Deletes the interactive session resource. If the session is not in terminal state, it is terminated, and then deleted.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}", - "httpMethod": "DELETE", - "id": "dataproc.projects.locations.sessions.delete", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the session resource to delete.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", - "required": true, - "type": "string" + "search": { + "description": "Obtain high level information and list of Spark Applications corresponding to a batch", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications:search", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.search", + "parameterOrder": [ + "parent" + ], + "parameters": { + "applicationStatus": { + "description": "Optional. Search only applications in the chosen state.", + "enum": [ + "APPLICATION_STATUS_UNSPECIFIED", + "APPLICATION_STATUS_RUNNING", + "APPLICATION_STATUS_COMPLETED" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "location": "query", + "type": "string" + }, + "maxEndTime": { + "description": "Optional. Latest end timestamp to list.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "maxTime": { + "description": "Optional. Latest start timestamp to list.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "minEndTime": { + "description": "Optional. Earliest end timestamp to list.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "minTime": { + "description": "Optional. Earliest start timestamp to list.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of applications to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSparkApplications call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/sparkApplications:search", + "response": { + "$ref": "SearchSparkApplicationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the service receives two DeleteSessionRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteSessionRequest)s with the same ID, the second request is ignored.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", - "location": "query", - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "get": { - "description": "Gets the resource representation for an interactive session.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}", - "httpMethod": "GET", - "id": "dataproc.projects.locations.sessions.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the session to retrieve.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "Session" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "list": { - "description": "Lists interactive sessions.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions", - "httpMethod": "GET", - "id": "dataproc.projects.locations.sessions.list", - "parameterOrder": [ - "parent" - ], - "parameters": { - "filter": { - "description": "Optional. A filter for the sessions to return in the response.A filter is a logical expression constraining the values of various fields in each session resource. Filters are case sensitive, and may contain multiple clauses combined with logical operators (AND, OR). Supported fields are session_id, session_uuid, state, create_time, and labels.Example: state = ACTIVE and create_time \u003c \"2023-01-01T00:00:00Z\" is a filter for sessions in an ACTIVE state that were created before 2023-01-01. state = ACTIVE and labels.environment=production is a filter for sessions in an ACTIVE state that have a production environment label.See https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed description of the filter syntax and a list of supported comparators.", - "location": "query", - "type": "string" + "searchExecutorStageSummary": { + "description": "Obtain executor summary with respect to a spark stage attempt.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchExecutorStageSummary", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.searchExecutorStageSummary", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous AccessSparkApplicationExecutorsList call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + }, + "stageAttemptId": { + "description": "Required. Stage Attempt ID", + "format": "int32", + "location": "query", + "type": "integer" + }, + "stageId": { + "description": "Required. Stage ID", + "format": "int64", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:searchExecutorStageSummary", + "response": { + "$ref": "SearchSparkApplicationExecutorStageSummaryResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "pageSize": { - "description": "Optional. The maximum number of sessions to return in each response. The service may return fewer than this value.", - "format": "int32", - "location": "query", - "type": "integer" + "searchExecutors": { + "description": "Obtain data corresponding to executors for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchExecutors", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.searchExecutors", + "parameterOrder": [ + "name" + ], + "parameters": { + "executorStatus": { + "description": "Optional. Filter to select whether active/ dead or all executors should be selected.", + "enum": [ + "EXECUTOR_STATUS_UNSPECIFIED", + "EXECUTOR_STATUS_ACTIVE", + "EXECUTOR_STATUS_DEAD" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous AccessSparkApplicationExecutorsList call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:searchExecutors", + "response": { + "$ref": "SearchSparkApplicationExecutorsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "pageToken": { - "description": "Optional. A page token received from a previous ListSessions call. Provide this token to retrieve the subsequent page.", - "location": "query", - "type": "string" + "searchJobs": { + "description": "Obtain list of spark jobs corresponding to a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchJobs", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.searchJobs", + "parameterOrder": [ + "name" + ], + "parameters": { + "jobStatus": { + "description": "Optional. List only jobs in the specific state.", + "enum": [ + "JOB_EXECUTION_STATUS_UNSPECIFIED", + "JOB_EXECUTION_STATUS_RUNNING", + "JOB_EXECUTION_STATUS_SUCCEEDED", + "JOB_EXECUTION_STATUS_FAILED", + "JOB_EXECUTION_STATUS_UNKNOWN" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of jobs to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSparkApplicationJobs call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:searchJobs", + "response": { + "$ref": "SearchSparkApplicationJobsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "parent": { - "description": "Required. The parent, which owns this collection of sessions.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+parent}/sessions", - "response": { - "$ref": "ListSessionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "terminate": { - "description": "Terminates the interactive session.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}:terminate", - "httpMethod": "POST", - "id": "dataproc.projects.locations.sessions.terminate", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the session resource to terminate.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:terminate", - "request": { - "$ref": "TerminateSessionRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - } - } - }, - "workflowTemplates": { - "methods": { - "create": { - "description": "Creates new workflow template.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates", - "httpMethod": "POST", - "id": "dataproc.projects.locations.workflowTemplates.create", - "parameterOrder": [ - "parent" - ], - "parameters": { - "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+parent}/workflowTemplates", - "request": { - "$ref": "WorkflowTemplate" - }, - "response": { - "$ref": "WorkflowTemplate" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "delete": { - "description": "Deletes a workflow template. It does not cancel in-progress workflows.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", - "httpMethod": "DELETE", - "id": "dataproc.projects.locations.workflowTemplates.delete", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", - "required": true, - "type": "string" + "searchSqlQueries": { + "description": "Obtain data corresponding to SQL Queries for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchSqlQueries", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.searchSqlQueries", + "parameterOrder": [ + "name" + ], + "parameters": { + "details": { + "description": "Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.", + "location": "query", + "type": "boolean" + }, + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + }, + "planDescription": { + "description": "Optional. Enables/ disables physical plan description on demand", + "location": "query", + "type": "boolean" + } + }, + "path": "v1/{+name}:searchSqlQueries", + "response": { + "$ref": "SearchSparkApplicationSqlQueriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "version": { - "description": "Optional. The version of workflow template to delete. If specified, will only delete the template if the current server version matches specified version.", - "format": "int32", - "location": "query", - "type": "integer" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "get": { - "description": "Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", - "httpMethod": "GET", - "id": "dataproc.projects.locations.workflowTemplates.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", - "required": true, - "type": "string" + "searchStageAttemptTasks": { + "description": "Obtain data corresponding to tasks for a spark stage attempt for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchStageAttemptTasks", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.searchStageAttemptTasks", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of tasks to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous ListSparkApplicationStageAttemptTasks call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + }, + "sortRuntime": { + "description": "Optional. Sort the tasks by runtime.", + "location": "query", + "type": "boolean" + }, + "stageAttemptId": { + "description": "Optional. Stage Attempt ID", + "format": "int32", + "location": "query", + "type": "integer" + }, + "stageId": { + "description": "Optional. Stage ID", + "format": "int64", + "location": "query", + "type": "string" + }, + "taskStatus": { + "description": "Optional. List only tasks in the state.", + "enum": [ + "TASK_STATUS_UNSPECIFIED", + "TASK_STATUS_RUNNING", + "TASK_STATUS_SUCCESS", + "TASK_STATUS_FAILED", + "TASK_STATUS_KILLED", + "TASK_STATUS_PENDING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:searchStageAttemptTasks", + "response": { + "$ref": "SearchSparkApplicationStageAttemptTasksResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "version": { - "description": "Optional. The version of workflow template to retrieve. Only previously instantiated versions can be retrieved.If unspecified, retrieves the current version.", - "format": "int32", - "location": "query", - "type": "integer" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "WorkflowTemplate" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:getIamPolicy", - "httpMethod": "POST", - "id": "dataproc.projects.locations.workflowTemplates.getIamPolicy", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:getIamPolicy", - "request": { - "$ref": "GetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "instantiate": { - "description": "Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:instantiate", - "httpMethod": "POST", - "id": "dataproc.projects.locations.workflowTemplates.instantiate", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:instantiate", - "request": { - "$ref": "InstantiateWorkflowTemplateRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "instantiateInline": { - "description": "Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates:instantiateInline", - "httpMethod": "POST", - "id": "dataproc.projects.locations.workflowTemplates.instantiateInline", - "parameterOrder": [ - "parent" - ], - "parameters": { - "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+$", - "required": true, - "type": "string" + "searchStageAttempts": { + "description": "Obtain data corresponding to a spark stage attempts for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchStageAttempts", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.searchStageAttempts", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of stage attempts (paging based on stage_attempt_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSparkApplicationStageAttempts call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + }, + "stageId": { + "description": "Required. Stage ID for which attempts are to be fetched", + "format": "int64", + "location": "query", + "type": "string" + }, + "summaryMetricsMask": { + "description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:searchStageAttempts", + "response": { + "$ref": "SearchSparkApplicationStageAttemptsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "requestId": { - "description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", - "location": "query", - "type": "string" - } - }, - "path": "v1/{+parent}/workflowTemplates:instantiateInline", - "request": { - "$ref": "WorkflowTemplate" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "list": { - "description": "Lists workflows that match the specified filter in the request.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates", - "httpMethod": "GET", - "id": "dataproc.projects.locations.workflowTemplates.list", + "searchStages": { + "description": "Obtain data corresponding to stages for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchStages", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.searchStages", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of stages (paging based on stage_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous FetchSparkApplicationStagesList call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + }, + "stageStatus": { + "description": "Optional. List only stages in the given state.", + "enum": [ + "STAGE_STATUS_UNSPECIFIED", + "STAGE_STATUS_ACTIVE", + "STAGE_STATUS_COMPLETE", + "STAGE_STATUS_FAILED", + "STAGE_STATUS_PENDING", + "STAGE_STATUS_SKIPPED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "location": "query", + "type": "string" + }, + "summaryMetricsMask": { + "description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:searchStages", + "response": { + "$ref": "SearchSparkApplicationStagesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "summarizeExecutors": { + "description": "Obtain summary of Executor Summary for a Spark Application", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:summarizeExecutors", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.summarizeExecutors", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:summarizeExecutors", + "response": { + "$ref": "SummarizeSparkApplicationExecutorsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "summarizeJobs": { + "description": "Obtain summary of Jobs for a Spark Application", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:summarizeJobs", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.summarizeJobs", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:summarizeJobs", + "response": { + "$ref": "SummarizeSparkApplicationJobsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "summarizeStageAttemptTasks": { + "description": "Obtain summary of Tasks for a Spark Application Stage Attempt", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:summarizeStageAttemptTasks", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.summarizeStageAttemptTasks", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + }, + "stageAttemptId": { + "description": "Required. Stage Attempt ID", + "format": "int32", + "location": "query", + "type": "integer" + }, + "stageId": { + "description": "Required. Stage ID", + "format": "int64", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:summarizeStageAttemptTasks", + "response": { + "$ref": "SummarizeSparkApplicationStageAttemptTasksResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "summarizeStages": { + "description": "Obtain summary of Stages for a Spark Application", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:summarizeStages", + "httpMethod": "GET", + "id": "dataproc.projects.locations.batches.sparkApplications.summarizeStages", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:summarizeStages", + "response": { + "$ref": "SummarizeSparkApplicationStagesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "write": { + "description": "Write wrapper objects from dataplane to spanner", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:write", + "httpMethod": "POST", + "id": "dataproc.projects.locations.batches.sparkApplications.write", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the spark application to write data about in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:write", + "request": { + "$ref": "WriteSparkApplicationContextRequest" + }, + "response": { + "$ref": "WriteSparkApplicationContextResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "dataproc.projects.locations.operations.cancel", "parameterOrder": [ - "parent" + "name" ], "parameters": { - "pageSize": { - "description": "Optional. The maximum number of results to return in each response.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "pageToken": { - "description": "Optional. The page token, returned by a previous call, to request the next page of results.", - "location": "query", - "type": "string" - }, - "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "name": { + "description": "The name of the operation resource to be cancelled.", "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+parent}/workflowTemplates", + "path": "v1/{+name}:cancel", "response": { - "$ref": "ListWorkflowTemplatesResponse" + "$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", - "httpMethod": "POST", - "id": "dataproc.projects.locations.workflowTemplates.setIamPolicy", + "delete": { + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "DELETE", + "id": "dataproc.projects.locations.operations.delete", "parameterOrder": [ - "resource" + "name" ], "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "name": { + "description": "The name of the operation resource to be deleted.", "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+resource}:setIamPolicy", - "request": { - "$ref": "SetIamPolicyRequest" - }, + "path": "v1/{+name}", "response": { - "$ref": "Policy" + "$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:testIamPermissions", - "httpMethod": "POST", - "id": "dataproc.projects.locations.workflowTemplates.testIamPermissions", + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "dataproc.projects.locations.operations.get", "parameterOrder": [ - "resource" + "name" ], "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "name": { + "description": "The name of the operation resource.", "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+resource}:testIamPermissions", - "request": { - "$ref": "TestIamPermissionsRequest" - }, + "path": "v1/{+name}", "response": { - "$ref": "TestIamPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "update": { - "description": "Updates (replaces) workflow template. The updated template must contain version that matches the current server version.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", - "httpMethod": "PUT", - "id": "dataproc.projects.locations.workflowTemplates.update", + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", + "httpMethod": "GET", + "id": "dataproc.projects.locations.operations.list", "parameterOrder": [ "name" ], "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, "name": { - "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "The name of the operation's parent resource.", "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/operations$", "required": true, "type": "string" - } - }, - "path": "v1/{+name}", - "request": { - "$ref": "WorkflowTemplate" - }, + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", "response": { - "$ref": "WorkflowTemplate" + "$ref": "ListOperationsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } } - } - } - }, - "regions": { - "resources": { - "autoscalingPolicies": { + }, + "sessionTemplates": { "methods": { "create": { - "description": "Creates new autoscaling policy.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies", + "description": "Create a session template synchronously.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates", "httpMethod": "POST", - "id": "dataproc.projects.regions.autoscalingPolicies.create", + "id": "dataproc.projects.locations.sessionTemplates.create", "parameterOrder": [ "parent" ], "parameters": { "parent": { - "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The parent resource where this session template will be created.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+parent}/autoscalingPolicies", + "path": "v1/{+parent}/sessionTemplates", "request": { - "$ref": "AutoscalingPolicy" + "$ref": "SessionTemplate" }, "response": { - "$ref": "AutoscalingPolicy" + "$ref": "SessionTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "delete": { - "description": "Deletes an autoscaling policy. It is an error to delete an autoscaling policy that is in use by one or more clusters.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", + "description": "Deletes a session template.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", "httpMethod": "DELETE", - "id": "dataproc.projects.regions.autoscalingPolicies.delete", + "id": "dataproc.projects.locations.sessionTemplates.delete", "parameterOrder": [ "name" ], "parameters": { "name": { - "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Required. The name of the session template resource to delete.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", "required": true, "type": "string" } @@ -1322,323 +1681,257 @@ ] }, "get": { - "description": "Retrieves autoscaling policy.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", + "description": "Gets the resource representation for a session template.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", "httpMethod": "GET", - "id": "dataproc.projects.regions.autoscalingPolicies.get", + "id": "dataproc.projects.locations.sessionTemplates.get", "parameterOrder": [ "name" ], "parameters": { "name": { - "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Required. The name of the session template to retrieve.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", "required": true, "type": "string" } }, "path": "v1/{+name}", "response": { - "$ref": "AutoscalingPolicy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:getIamPolicy", - "httpMethod": "POST", - "id": "dataproc.projects.regions.autoscalingPolicies.getIamPolicy", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:getIamPolicy", - "request": { - "$ref": "GetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" + "$ref": "SessionTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { - "description": "Lists autoscaling policies in the project.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies", + "description": "Lists session templates.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates", "httpMethod": "GET", - "id": "dataproc.projects.regions.autoscalingPolicies.list", + "id": "dataproc.projects.locations.sessionTemplates.list", "parameterOrder": [ "parent" ], "parameters": { + "filter": { + "description": "Optional. A filter for the session templates to return in the response. Filters are case sensitive and have the following syntax:field = value AND field = value ...", + "location": "query", + "type": "string" + }, "pageSize": { - "description": "Optional. The maximum number of results to return in each response. Must be less than or equal to 1000. Defaults to 100.", + "description": "Optional. The maximum number of sessions to return in each response. The service may return fewer than this value.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Optional. The page token, returned by a previous call, to request the next page of results.", + "description": "Optional. A page token received from a previous ListSessions call. Provide this token to retrieve the subsequent page.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The parent that owns this collection of session templates.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+parent}/autoscalingPolicies", + "path": "v1/{+parent}/sessionTemplates", "response": { - "$ref": "ListAutoscalingPoliciesResponse" + "$ref": "ListSessionTemplatesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", - "httpMethod": "POST", - "id": "dataproc.projects.regions.autoscalingPolicies.setIamPolicy", + "patch": { + "description": "Updates the session template synchronously.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", + "httpMethod": "PATCH", + "id": "dataproc.projects.locations.sessionTemplates.patch", "parameterOrder": [ - "resource" + "name" ], "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "name": { + "description": "Required. The resource name of the session template.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+resource}:setIamPolicy", + "path": "v1/{+name}", "request": { - "$ref": "SetIamPolicyRequest" + "$ref": "SessionTemplate" }, "response": { - "$ref": "Policy" + "$ref": "SessionTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:testIamPermissions", + } + } + }, + "sessions": { + "methods": { + "create": { + "description": "Create an interactive session asynchronously.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions", "httpMethod": "POST", - "id": "dataproc.projects.regions.autoscalingPolicies.testIamPermissions", + "id": "dataproc.projects.locations.sessions.create", "parameterOrder": [ - "resource" + "parent" ], "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "parent": { + "description": "Required. The parent resource where this session will be created.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" + }, + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the service receives two CreateSessionRequests (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateSessionRequest)s with the same ID, the second request is ignored, and the first Session is created and stored in the backend.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "location": "query", + "type": "string" + }, + "sessionId": { + "description": "Required. The ID to use for the session, which becomes the final component of the session's resource name.This value must be 4-63 characters. Valid characters are /a-z-/.", + "location": "query", + "type": "string" } }, - "path": "v1/{+resource}:testIamPermissions", + "path": "v1/{+parent}/sessions", "request": { - "$ref": "TestIamPermissionsRequest" + "$ref": "Session" }, "response": { - "$ref": "TestIamPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "update": { - "description": "Updates (replaces) autoscaling policy.Disabled check for update_mask, because all updates will be full replacements.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", - "httpMethod": "PUT", - "id": "dataproc.projects.regions.autoscalingPolicies.update", + "delete": { + "description": "Deletes the interactive session resource. If the session is not in terminal state, it is terminated, and then deleted.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}", + "httpMethod": "DELETE", + "id": "dataproc.projects.locations.sessions.delete", "parameterOrder": [ "name" ], "parameters": { "name": { - "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Required. The name of the session resource to delete.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", "required": true, "type": "string" + }, + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the service receives two DeleteSessionRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteSessionRequest)s with the same ID, the second request is ignored.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "location": "query", + "type": "string" } }, "path": "v1/{+name}", - "request": { - "$ref": "AutoscalingPolicy" - }, "response": { - "$ref": "AutoscalingPolicy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] - } - } - }, - "clusters": { - "methods": { - "create": { - "description": "Creates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", - "flatPath": "v1/projects/{projectId}/regions/{region}/clusters", - "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.create", + }, + "get": { + "description": "Gets the resource representation for an interactive session.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.get", "parameterOrder": [ - "projectId", - "region" + "name" ], "parameters": { - "actionOnFailedPrimaryWorkers": { - "description": "Optional. Failure action when primary worker creation fails.", - "enum": [ - "FAILURE_ACTION_UNSPECIFIED", - "NO_ACTION", - "DELETE" - ], - "enumDescriptions": [ - "When FailureAction is unspecified, failure action defaults to NO_ACTION.", - "Take no action on failure to create a cluster resource. NO_ACTION is the default.", - "Delete the failed cluster resource." - ], - "location": "query", - "type": "string" - }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", - "location": "path", - "required": true, - "type": "string" - }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", + "name": { + "description": "Required. The name of the session to retrieve.", "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", "required": true, "type": "string" - }, - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", - "location": "query", - "type": "string" } }, - "path": "v1/projects/{projectId}/regions/{region}/clusters", - "request": { - "$ref": "Cluster" - }, + "path": "v1/{+name}", "response": { - "$ref": "Operation" + "$ref": "Session" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "delete": { - "description": "Deletes a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", - "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", - "httpMethod": "DELETE", - "id": "dataproc.projects.regions.clusters.delete", + "list": { + "description": "Lists interactive sessions.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.list", "parameterOrder": [ - "projectId", - "region", - "clusterName" + "parent" ], "parameters": { - "clusterName": { - "description": "Required. The cluster name.", - "location": "path", - "required": true, - "type": "string" - }, - "clusterUuid": { - "description": "Optional. Specifying the cluster_uuid means the RPC should fail (with error NOT_FOUND) if cluster with specified UUID does not exist.", + "filter": { + "description": "Optional. A filter for the sessions to return in the response.A filter is a logical expression constraining the values of various fields in each session resource. Filters are case sensitive, and may contain multiple clauses combined with logical operators (AND, OR). Supported fields are session_id, session_uuid, state, create_time, and labels.Example: state = ACTIVE and create_time \u003c \"2023-01-01T00:00:00Z\" is a filter for sessions in an ACTIVE state that were created before 2023-01-01. state = ACTIVE and labels.environment=production is a filter for sessions in an ACTIVE state that have a production environment label.See https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed description of the filter syntax and a list of supported comparators.", "location": "query", "type": "string" }, - "gracefulTerminationTimeout": { - "description": "Optional. The graceful termination timeout for the deletion of the cluster. Indicate the time the request will wait to complete the running jobs on the cluster before its forceful deletion. Default value is 0 indicating that the user has not enabled the graceful termination. Value can be between 60 second and 6 Hours, in case the graceful termination is enabled. (There is no separate flag to check the enabling or disabling of graceful termination, it can be checked by the values in the field).", - "format": "google-duration", + "pageSize": { + "description": "Optional. The maximum number of sessions to return in each response. The service may return fewer than this value.", + "format": "int32", "location": "query", - "type": "string" + "type": "integer" }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", - "location": "path", - "required": true, + "pageToken": { + "description": "Optional. A page token received from a previous ListSessions call. Provide this token to retrieve the subsequent page.", + "location": "query", "type": "string" }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", + "parent": { + "description": "Required. The parent, which owns this collection of sessions.", "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" - }, - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the server receives two DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", - "location": "query", - "type": "string" } }, - "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "path": "v1/{+parent}/sessions", "response": { - "$ref": "Operation" + "$ref": "ListSessionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "diagnose": { - "description": "Gets cluster diagnostic information. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). After the operation completes, Operation.response contains DiagnoseClusterResults (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).", - "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", + "terminate": { + "description": "Terminates the interactive session.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}:terminate", "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.diagnose", + "id": "dataproc.projects.locations.sessions.terminate", "parameterOrder": [ - "projectId", - "region", - "clusterName" + "name" ], "parameters": { - "clusterName": { - "description": "Required. The cluster name.", - "location": "path", - "required": true, - "type": "string" - }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", - "location": "path", - "required": true, - "type": "string" - }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", + "name": { + "description": "Required. The name of the session resource to terminate.", "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", + "path": "v1/{+name}:terminate", "request": { - "$ref": "DiagnoseClusterRequest" + "$ref": "TerminateSessionRequest" }, "response": { "$ref": "Operation" @@ -1646,524 +1939,923 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] - }, - "get": { - "description": "Gets the resource representation for a cluster in a project.", - "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", - "httpMethod": "GET", - "id": "dataproc.projects.regions.clusters.get", - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "parameters": { - "clusterName": { - "description": "Required. The cluster name.", - "location": "path", - "required": true, - "type": "string" - }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", - "location": "path", - "required": true, - "type": "string" + } + }, + "resources": { + "sparkApplications": { + "methods": { + "access": { + "description": "Obtain high level information corresponding to a single Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:access", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.access", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:access", + "response": { + "$ref": "AccessSessionSparkApplicationResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", - "location": "path", - "required": true, - "type": "string" - } - }, - "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", - "response": { - "$ref": "Cluster" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:getIamPolicy", - "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.getIamPolicy", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:getIamPolicy", - "request": { - "$ref": "GetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "injectCredentials": { - "description": "Inject encrypted credentials into all of the VMs in a cluster.The target cluster must be a personal auth cluster assigned to the user who is issuing the RPC.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:injectCredentials", - "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.injectCredentials", - "parameterOrder": [ - "project", - "region", - "cluster" - ], - "parameters": { - "cluster": { - "description": "Required. The cluster, in the form clusters/.", - "location": "path", - "pattern": "^clusters/[^/]+$", - "required": true, - "type": "string" + "accessEnvironmentInfo": { + "description": "Obtain environment details for a Spark Application", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessEnvironmentInfo", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.accessEnvironmentInfo", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:accessEnvironmentInfo", + "response": { + "$ref": "AccessSessionSparkApplicationEnvironmentInfoResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "project": { - "description": "Required. The ID of the Google Cloud Platform project the cluster belongs to, of the form projects/.", - "location": "path", - "pattern": "^projects/[^/]+$", - "required": true, - "type": "string" + "accessJob": { + "description": "Obtain data corresponding to a spark job for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessJob", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.accessJob", + "parameterOrder": [ + "name" + ], + "parameters": { + "jobId": { + "description": "Required. Job ID to fetch data for.", + "format": "int64", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:accessJob", + "response": { + "$ref": "AccessSessionSparkApplicationJobResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "region": { - "description": "Required. The region containing the cluster, of the form regions/.", - "location": "path", - "pattern": "^regions/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+project}/{+region}/{+cluster}:injectCredentials", - "request": { - "$ref": "InjectCredentialsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "list": { - "description": "Lists all regions/{region}/clusters in a project alphabetically.", - "flatPath": "v1/projects/{projectId}/regions/{region}/clusters", - "httpMethod": "GET", - "id": "dataproc.projects.regions.clusters.list", - "parameterOrder": [ - "projectId", - "region" - ], - "parameters": { - "filter": { - "description": "Optional. A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is one of status.state, clusterName, or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, or STOPPED. ACTIVE contains the CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING, ERROR, STOPPING, and STOPPED states. clusterName is the name of the cluster provided at creation time. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = *", - "location": "query", - "type": "string" - }, - "pageSize": { - "description": "Optional. The standard List page size.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "pageToken": { - "description": "Optional. The standard List page token.", - "location": "query", - "type": "string" - }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", - "location": "path", - "required": true, - "type": "string" + "accessSqlPlan": { + "description": "Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessSqlPlan", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.accessSqlPlan", + "parameterOrder": [ + "name" + ], + "parameters": { + "executionId": { + "description": "Required. Execution ID", + "format": "int64", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:accessSqlPlan", + "response": { + "$ref": "AccessSessionSparkApplicationSqlSparkPlanGraphResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", - "location": "path", - "required": true, - "type": "string" - } - }, - "path": "v1/projects/{projectId}/regions/{region}/clusters", - "response": { - "$ref": "ListClustersResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "patch": { - "description": "Updates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). The cluster must be in a RUNNING state or an error is returned.", - "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", - "httpMethod": "PATCH", - "id": "dataproc.projects.regions.clusters.patch", - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "parameters": { - "clusterName": { - "description": "Required. The cluster name.", - "location": "path", - "required": true, - "type": "string" + "accessSqlQuery": { + "description": "Obtain data corresponding to a particular SQL Query for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessSqlQuery", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.accessSqlQuery", + "parameterOrder": [ + "name" + ], + "parameters": { + "details": { + "description": "Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.", + "location": "query", + "type": "boolean" + }, + "executionId": { + "description": "Required. Execution ID", + "format": "int64", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + }, + "planDescription": { + "description": "Optional. Enables/ disables physical plan description on demand", + "location": "query", + "type": "boolean" + } + }, + "path": "v1/{+name}:accessSqlQuery", + "response": { + "$ref": "AccessSessionSparkApplicationSqlQueryResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "gracefulDecommissionTimeout": { - "description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher.", - "format": "google-duration", - "location": "query", - "type": "string" + "accessStageAttempt": { + "description": "Obtain data corresponding to a spark stage attempt for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessStageAttempt", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.accessStageAttempt", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + }, + "stageAttemptId": { + "description": "Required. Stage Attempt ID", + "format": "int32", + "location": "query", + "type": "integer" + }, + "stageId": { + "description": "Required. Stage ID", + "format": "int64", + "location": "query", + "type": "string" + }, + "summaryMetricsMask": { + "description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:accessStageAttempt", + "response": { + "$ref": "AccessSessionSparkApplicationStageAttemptResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", - "location": "path", - "required": true, - "type": "string" + "accessStageRddGraph": { + "description": "Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessStageRddGraph", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.accessStageRddGraph", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + }, + "stageId": { + "description": "Required. Stage ID", + "format": "int64", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:accessStageRddGraph", + "response": { + "$ref": "AccessSessionSparkApplicationStageRddOperationGraphResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", - "location": "path", - "required": true, - "type": "string" + "search": { + "description": "Obtain high level information and list of Spark Applications corresponding to a batch", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications:search", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.search", + "parameterOrder": [ + "parent" + ], + "parameters": { + "applicationStatus": { + "description": "Optional. Search only applications in the chosen state.", + "enum": [ + "APPLICATION_STATUS_UNSPECIFIED", + "APPLICATION_STATUS_RUNNING", + "APPLICATION_STATUS_COMPLETED" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "location": "query", + "type": "string" + }, + "maxEndTime": { + "description": "Optional. Latest end timestamp to list.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "maxTime": { + "description": "Optional. Latest start timestamp to list.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "minEndTime": { + "description": "Optional. Earliest end timestamp to list.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "minTime": { + "description": "Optional. Earliest start timestamp to list.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of applications to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSessionSparkApplications call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/sparkApplications:search", + "response": { + "$ref": "SearchSessionSparkApplicationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the server receives two UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", - "location": "query", - "type": "string" + "searchExecutorStageSummary": { + "description": "Obtain executor summary with respect to a spark stage attempt.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchExecutorStageSummary", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.searchExecutorStageSummary", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSessionSparkApplicationExecutorStageSummary call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + }, + "stageAttemptId": { + "description": "Required. Stage Attempt ID", + "format": "int32", + "location": "query", + "type": "integer" + }, + "stageId": { + "description": "Required. Stage ID", + "format": "int64", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:searchExecutorStageSummary", + "response": { + "$ref": "SearchSessionSparkApplicationExecutorStageSummaryResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, - "updateMask": { - "description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows: { \"config\":{ \"workerConfig\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } *Note:* Currently, only the following fields can be updated: *Mask* *Purpose* *labels* Update labels *config.worker_config.num_instances* Resize primary worker group *config.secondary_worker_config.num_instances* Resize secondary worker group config.autoscaling_config.policy_uri Use, stop using, or change autoscaling policies ", - "format": "google-fieldmask", - "location": "query", - "type": "string" - } - }, - "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", - "request": { - "$ref": "Cluster" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "repair": { - "description": "Repairs a cluster.", - "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:repair", - "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.repair", - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "parameters": { - "clusterName": { - "description": "Required. The cluster name.", - "location": "path", - "required": true, - "type": "string" - }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", - "location": "path", - "required": true, - "type": "string" - }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", - "location": "path", - "required": true, - "type": "string" - } - }, - "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:repair", - "request": { - "$ref": "RepairClusterRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:setIamPolicy", - "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.setIamPolicy", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:setIamPolicy", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "start": { - "description": "Starts a cluster in a project.", - "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:start", - "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.start", - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "parameters": { - "clusterName": { - "description": "Required. The cluster name.", - "location": "path", - "required": true, - "type": "string" - }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", - "location": "path", - "required": true, - "type": "string" - }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", - "location": "path", - "required": true, - "type": "string" - } - }, - "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:start", - "request": { - "$ref": "StartClusterRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "stop": { - "description": "Stops a cluster in a project.", - "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:stop", - "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.stop", - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "parameters": { - "clusterName": { - "description": "Required. The cluster name.", - "location": "path", - "required": true, - "type": "string" - }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", - "location": "path", - "required": true, - "type": "string" - }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", - "location": "path", - "required": true, - "type": "string" - } - }, - "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:stop", - "request": { - "$ref": "StopClusterRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:testIamPermissions", - "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.testIamPermissions", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:testIamPermissions", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - } - }, - "resources": { - "nodeGroups": { - "methods": { - "create": { - "description": "Creates a node group in a cluster. The returned Operation.metadata is NodeGroupOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups", - "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.nodeGroups.create", + "searchExecutors": { + "description": "Obtain data corresponding to executors for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchExecutors", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.searchExecutors", "parameterOrder": [ - "parent" + "name" ], "parameters": { - "nodeGroupId": { - "description": "Optional. An optional node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.", + "executorStatus": { + "description": "Optional. Filter to select whether active/ dead or all executors should be selected.", + "enum": [ + "EXECUTOR_STATUS_UNSPECIFIED", + "EXECUTOR_STATUS_ACTIVE", + "EXECUTOR_STATUS_DEAD" + ], + "enumDescriptions": [ + "", + "", + "" + ], "location": "query", "type": "string" }, - "parent": { - "description": "Required. The parent resource where this node group will be created. Format: projects/{project}/regions/{region}/clusters/{cluster}", + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, - "parentOperationId": { - "description": "Optional. operation id of the parent operation sending the create request", + "pageSize": { + "description": "Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSessionSparkApplicationExecutors call. Provide this token to retrieve the subsequent page.", "location": "query", "type": "string" }, - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the server receives two CreateNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequest) with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "parent": { + "description": "Required. Parent (Session) resource reference.", "location": "query", "type": "string" } }, - "path": "v1/{+parent}/nodeGroups", - "request": { - "$ref": "NodeGroup" - }, + "path": "v1/{+name}:searchExecutors", "response": { - "$ref": "Operation" + "$ref": "SearchSessionSparkApplicationExecutorsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "get": { - "description": "Gets the resource representation for a node group in a cluster.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}", + "searchJobs": { + "description": "Obtain list of spark jobs corresponding to a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchJobs", "httpMethod": "GET", - "id": "dataproc.projects.regions.clusters.nodeGroups.get", + "id": "dataproc.projects.locations.sessions.sparkApplications.searchJobs", "parameterOrder": [ "name" ], "parameters": { + "jobStatus": { + "description": "Optional. List only jobs in the specific state.", + "enum": [ + "JOB_EXECUTION_STATUS_UNSPECIFIED", + "JOB_EXECUTION_STATUS_RUNNING", + "JOB_EXECUTION_STATUS_SUCCEEDED", + "JOB_EXECUTION_STATUS_FAILED", + "JOB_EXECUTION_STATUS_UNKNOWN" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "location": "query", + "type": "string" + }, "name": { - "description": "Required. The name of the node group to retrieve. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of jobs to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSessionSparkApplicationJobs call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" } }, - "path": "v1/{+name}", + "path": "v1/{+name}:searchJobs", "response": { - "$ref": "NodeGroup" + "$ref": "SearchSessionSparkApplicationJobsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "repair": { - "description": "Repair nodes in a node group.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}:repair", - "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.nodeGroups.repair", + "searchSqlQueries": { + "description": "Obtain data corresponding to SQL Queries for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchSqlQueries", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.searchSqlQueries", "parameterOrder": [ "name" ], "parameters": { + "details": { + "description": "Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.", + "location": "query", + "type": "boolean" + }, "name": { - "description": "Required. The name of the node group to resize. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" - } - }, - "path": "v1/{+name}:repair", - "request": { - "$ref": "RepairNodeGroupRequest" + }, + "pageSize": { + "description": "Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSessionSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + }, + "planDescription": { + "description": "Optional. Enables/ disables physical plan description on demand", + "location": "query", + "type": "boolean" + } }, + "path": "v1/{+name}:searchSqlQueries", "response": { - "$ref": "Operation" + "$ref": "SearchSessionSparkApplicationSqlQueriesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "resize": { - "description": "Resizes a node group in a cluster. The returned Operation.metadata is NodeGroupOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}:resize", + "searchStageAttemptTasks": { + "description": "Obtain data corresponding to tasks for a spark stage attempt for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchStageAttemptTasks", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.searchStageAttemptTasks", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of tasks to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSessionSparkApplicationStageAttemptTasks call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + }, + "sortRuntime": { + "description": "Optional. Sort the tasks by runtime.", + "location": "query", + "type": "boolean" + }, + "stageAttemptId": { + "description": "Optional. Stage Attempt ID", + "format": "int32", + "location": "query", + "type": "integer" + }, + "stageId": { + "description": "Optional. Stage ID", + "format": "int64", + "location": "query", + "type": "string" + }, + "taskStatus": { + "description": "Optional. List only tasks in the state.", + "enum": [ + "TASK_STATUS_UNSPECIFIED", + "TASK_STATUS_RUNNING", + "TASK_STATUS_SUCCESS", + "TASK_STATUS_FAILED", + "TASK_STATUS_KILLED", + "TASK_STATUS_PENDING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:searchStageAttemptTasks", + "response": { + "$ref": "SearchSessionSparkApplicationStageAttemptTasksResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "searchStageAttempts": { + "description": "Obtain data corresponding to a spark stage attempts for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchStageAttempts", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.searchStageAttempts", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of stage attempts (paging based on stage_attempt_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSessionSparkApplicationStageAttempts call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + }, + "stageId": { + "description": "Required. Stage ID for which attempts are to be fetched", + "format": "int64", + "location": "query", + "type": "string" + }, + "summaryMetricsMask": { + "description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:searchStageAttempts", + "response": { + "$ref": "SearchSessionSparkApplicationStageAttemptsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "searchStages": { + "description": "Obtain data corresponding to stages for a Spark Application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchStages", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.searchStages", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Optional. Maximum number of stages (paging based on stage_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token received from a previous SearchSessionSparkApplicationStages call. Provide this token to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + }, + "stageStatus": { + "description": "Optional. List only stages in the given state.", + "enum": [ + "STAGE_STATUS_UNSPECIFIED", + "STAGE_STATUS_ACTIVE", + "STAGE_STATUS_COMPLETE", + "STAGE_STATUS_FAILED", + "STAGE_STATUS_PENDING", + "STAGE_STATUS_SKIPPED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "location": "query", + "type": "string" + }, + "summaryMetricsMask": { + "description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:searchStages", + "response": { + "$ref": "SearchSessionSparkApplicationStagesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "summarizeExecutors": { + "description": "Obtain summary of Executor Summary for a Spark Application", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:summarizeExecutors", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.summarizeExecutors", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:summarizeExecutors", + "response": { + "$ref": "SummarizeSessionSparkApplicationExecutorsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "summarizeJobs": { + "description": "Obtain summary of Jobs for a Spark Application", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:summarizeJobs", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.summarizeJobs", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:summarizeJobs", + "response": { + "$ref": "SummarizeSessionSparkApplicationJobsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "summarizeStageAttemptTasks": { + "description": "Obtain summary of Tasks for a Spark Application Stage Attempt", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:summarizeStageAttemptTasks", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.summarizeStageAttemptTasks", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + }, + "stageAttemptId": { + "description": "Required. Stage Attempt ID", + "format": "int32", + "location": "query", + "type": "integer" + }, + "stageId": { + "description": "Required. Stage ID", + "format": "int64", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:summarizeStageAttemptTasks", + "response": { + "$ref": "SummarizeSessionSparkApplicationStageAttemptTasksResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "summarizeStages": { + "description": "Obtain summary of Stages for a Spark Application", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:summarizeStages", + "httpMethod": "GET", + "id": "dataproc.projects.locations.sessions.sparkApplications.summarizeStages", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", + "required": true, + "type": "string" + }, + "parent": { + "description": "Required. Parent (Session) resource reference.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}:summarizeStages", + "response": { + "$ref": "SummarizeSessionSparkApplicationStagesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "write": { + "description": "Write wrapper objects from dataplane to spanner", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:write", "httpMethod": "POST", - "id": "dataproc.projects.regions.clusters.nodeGroups.resize", + "id": "dataproc.projects.locations.sessions.sparkApplications.write", "parameterOrder": [ "name" ], "parameters": { "name": { - "description": "Required. The name of the node group to resize. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", + "description": "Required. The fully qualified name of the spark application to write data about in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+name}:resize", + "path": "v1/{+name}:write", "request": { - "$ref": "ResizeNodeGroupRequest" + "$ref": "WriteSessionSparkApplicationContextRequest" }, "response": { - "$ref": "Operation" + "$ref": "WriteSessionSparkApplicationContextResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" @@ -2173,80 +2865,60 @@ } } }, - "jobs": { + "workflowTemplates": { "methods": { - "cancel": { - "description": "Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs.list (https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or regions/{region}/jobs.get (https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).", - "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", + "create": { + "description": "Creates new workflow template.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates", "httpMethod": "POST", - "id": "dataproc.projects.regions.jobs.cancel", + "id": "dataproc.projects.locations.workflowTemplates.create", "parameterOrder": [ - "projectId", - "region", - "jobId" + "parent" ], "parameters": { - "jobId": { - "description": "Required. The job ID.", - "location": "path", - "required": true, - "type": "string" - }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", - "location": "path", - "required": true, - "type": "string" - }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", + "parent": { + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", + "path": "v1/{+parent}/workflowTemplates", "request": { - "$ref": "CancelJobRequest" + "$ref": "WorkflowTemplate" }, "response": { - "$ref": "Job" + "$ref": "WorkflowTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "delete": { - "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.", - "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "description": "Deletes a workflow template. It does not cancel in-progress workflows.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", "httpMethod": "DELETE", - "id": "dataproc.projects.regions.jobs.delete", + "id": "dataproc.projects.locations.workflowTemplates.delete", "parameterOrder": [ - "projectId", - "region", - "jobId" + "name" ], "parameters": { - "jobId": { - "description": "Required. The job ID.", - "location": "path", - "required": true, - "type": "string" - }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", + "name": { + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", - "location": "path", - "required": true, - "type": "string" + "version": { + "description": "Optional. The version of workflow template to delete. If specified, will only delete the template if the current server version matches specified version.", + "format": "int32", + "location": "query", + "type": "integer" } }, - "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "path": "v1/{+name}", "response": { "$ref": "Empty" }, @@ -2255,38 +2927,31 @@ ] }, "get": { - "description": "Gets the resource representation for a job in a project.", - "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "description": "Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", "httpMethod": "GET", - "id": "dataproc.projects.regions.jobs.get", + "id": "dataproc.projects.locations.workflowTemplates.get", "parameterOrder": [ - "projectId", - "region", - "jobId" + "name" ], "parameters": { - "jobId": { - "description": "Required. The job ID.", - "location": "path", - "required": true, - "type": "string" - }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", + "name": { + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", - "location": "path", - "required": true, - "type": "string" + "version": { + "description": "Optional. The version of workflow template to retrieve. Only previously instantiated versions can be retrieved.If unspecified, retrieves the current version.", + "format": "int32", + "location": "query", + "type": "integer" } }, - "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "path": "v1/{+name}", "response": { - "$ref": "Job" + "$ref": "WorkflowTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" @@ -2294,9 +2959,9 @@ }, "getIamPolicy": { "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:getIamPolicy", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:getIamPolicy", "httpMethod": "POST", - "id": "dataproc.projects.regions.jobs.getIamPolicy", + "id": "dataproc.projects.locations.workflowTemplates.getIamPolicy", "parameterOrder": [ "resource" ], @@ -2304,7 +2969,7 @@ "resource": { "description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" } @@ -2320,115 +2985,98 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, - "list": { - "description": "Lists regions/{region}/jobs in a project.", - "flatPath": "v1/projects/{projectId}/regions/{region}/jobs", - "httpMethod": "GET", - "id": "dataproc.projects.regions.jobs.list", + "instantiate": { + "description": "Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:instantiate", + "httpMethod": "POST", + "id": "dataproc.projects.locations.workflowTemplates.instantiate", "parameterOrder": [ - "projectId", - "region" + "name" ], "parameters": { - "clusterName": { - "description": "Optional. If set, the returned jobs list includes only jobs that were submitted to the named cluster.", - "location": "query", - "type": "string" - }, - "filter": { - "description": "Optional. A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is status.state or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be either ACTIVE or NON_ACTIVE. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = *", - "location": "query", - "type": "string" - }, - "jobStateMatcher": { - "description": "Optional. Specifies enumerated categories of jobs to list. (default = match ALL jobs).If filter is provided, jobStateMatcher will be ignored.", - "enum": [ - "ALL", - "ACTIVE", - "NON_ACTIVE" - ], - "enumDescriptions": [ - "Match all jobs, regardless of state.", - "Only match jobs in non-terminal states: PENDING, RUNNING, or CANCEL_PENDING.", - "Only match jobs in terminal states: CANCELLED, DONE, or ERROR." - ], - "location": "query", - "type": "string" - }, - "pageSize": { - "description": "Optional. The number of results to return in each response.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "pageToken": { - "description": "Optional. The page token, returned by a previous call, to request the next page of results.", - "location": "query", - "type": "string" - }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", - "location": "path", - "required": true, - "type": "string" - }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", + "name": { + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/projects/{projectId}/regions/{region}/jobs", + "path": "v1/{+name}:instantiate", + "request": { + "$ref": "InstantiateWorkflowTemplateRequest" + }, "response": { - "$ref": "ListJobsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "patch": { - "description": "Updates a job in a project.", - "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", - "httpMethod": "PATCH", - "id": "dataproc.projects.regions.jobs.patch", + "instantiateInline": { + "description": "Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates:instantiateInline", + "httpMethod": "POST", + "id": "dataproc.projects.locations.workflowTemplates.instantiateInline", "parameterOrder": [ - "projectId", - "region", - "jobId" + "parent" ], "parameters": { - "jobId": { - "description": "Required. The job ID.", + "parent": { + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" }, - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", - "location": "path", - "required": true, + "requestId": { + "description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "location": "query", "type": "string" + } + }, + "path": "v1/{+parent}/workflowTemplates:instantiateInline", + "request": { + "$ref": "WorkflowTemplate" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists workflows that match the specified filter in the request.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates", + "httpMethod": "GET", + "id": "dataproc.projects.locations.workflowTemplates.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return in each response.", + "format": "int32", + "location": "query", + "type": "integer" }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", - "location": "path", - "required": true, + "pageToken": { + "description": "Optional. The page token, returned by a previous call, to request the next page of results.", + "location": "query", "type": "string" }, - "updateMask": { - "description": "Required. Specifies the path, relative to Job, of the field to update. For example, to update the labels of a Job the update_mask parameter would be specified as labels, and the PATCH request body would specify the new value. *Note:* Currently, labels is the only field that can be updated.", - "format": "google-fieldmask", - "location": "query", + "parent": { + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, "type": "string" } }, - "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", - "request": { - "$ref": "Job" - }, + "path": "v1/{+parent}/workflowTemplates", "response": { - "$ref": "Job" + "$ref": "ListWorkflowTemplatesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" @@ -2436,9 +3084,9 @@ }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:setIamPolicy", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", "httpMethod": "POST", - "id": "dataproc.projects.regions.jobs.setIamPolicy", + "id": "dataproc.projects.locations.workflowTemplates.setIamPolicy", "parameterOrder": [ "resource" ], @@ -2446,7 +3094,7 @@ "resource": { "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" } @@ -2462,144 +3110,111 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, - "submit": { - "description": "Submits a job to a cluster.", - "flatPath": "v1/projects/{projectId}/regions/{region}/jobs:submit", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:testIamPermissions", "httpMethod": "POST", - "id": "dataproc.projects.regions.jobs.submit", + "id": "dataproc.projects.locations.workflowTemplates.testIamPermissions", "parameterOrder": [ - "projectId", - "region" + "resource" ], "parameters": { - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", - "location": "path", - "required": true, - "type": "string" - }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/projects/{projectId}/regions/{region}/jobs:submit", + "path": "v1/{+resource}:testIamPermissions", "request": { - "$ref": "SubmitJobRequest" + "$ref": "TestIamPermissionsRequest" }, "response": { - "$ref": "Job" + "$ref": "TestIamPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "submitAsOperation": { - "description": "Submits job to a cluster.", - "flatPath": "v1/projects/{projectId}/regions/{region}/jobs:submitAsOperation", - "httpMethod": "POST", - "id": "dataproc.projects.regions.jobs.submitAsOperation", + "update": { + "description": "Updates (replaces) workflow template. The updated template must contain version that matches the current server version.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", + "httpMethod": "PUT", + "id": "dataproc.projects.locations.workflowTemplates.update", "parameterOrder": [ - "projectId", - "region" - ], - "parameters": { - "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", - "location": "path", - "required": true, - "type": "string" - }, - "region": { - "description": "Required. The Dataproc region in which to handle the request.", - "location": "path", - "required": true, - "type": "string" - } - }, - "path": "v1/projects/{projectId}/regions/{region}/jobs:submitAsOperation", - "request": { - "$ref": "SubmitJobRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:testIamPermissions", - "httpMethod": "POST", - "id": "dataproc.projects.regions.jobs.testIamPermissions", - "parameterOrder": [ - "resource" + "name" ], "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "name": { + "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+resource}:testIamPermissions", + "path": "v1/{+name}", "request": { - "$ref": "TestIamPermissionsRequest" + "$ref": "WorkflowTemplate" }, "response": { - "$ref": "TestIamPermissionsResponse" + "$ref": "WorkflowTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } } - }, - "operations": { + } + } + }, + "regions": { + "resources": { + "autoscalingPolicies": { "methods": { - "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:cancel", + "create": { + "description": "Creates new autoscaling policy.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies", "httpMethod": "POST", - "id": "dataproc.projects.regions.operations.cancel", + "id": "dataproc.projects.regions.autoscalingPolicies.create", "parameterOrder": [ - "name" + "parent" ], "parameters": { - "name": { - "description": "The name of the operation resource to be cancelled.", + "parent": { + "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+name}:cancel", + "path": "v1/{+parent}/autoscalingPolicies", + "request": { + "$ref": "AutoscalingPolicy" + }, "response": { - "$ref": "Empty" + "$ref": "AutoscalingPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "delete": { - "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", + "description": "Deletes an autoscaling policy. It is an error to delete an autoscaling policy that is in use by one or more clusters.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", "httpMethod": "DELETE", - "id": "dataproc.projects.regions.operations.delete", + "id": "dataproc.projects.regions.autoscalingPolicies.delete", "parameterOrder": [ "name" ], "parameters": { "name": { - "description": "The name of the operation resource to be deleted.", + "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" } @@ -2613,25 +3228,25 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", + "description": "Retrieves autoscaling policy.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", "httpMethod": "GET", - "id": "dataproc.projects.regions.operations.get", + "id": "dataproc.projects.regions.autoscalingPolicies.get", "parameterOrder": [ "name" ], "parameters": { "name": { - "description": "The name of the operation resource.", + "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" } }, "path": "v1/{+name}", "response": { - "$ref": "Operation" + "$ref": "AutoscalingPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" @@ -2639,9 +3254,9 @@ }, "getIamPolicy": { "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:getIamPolicy", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:getIamPolicy", "httpMethod": "POST", - "id": "dataproc.projects.regions.operations.getIamPolicy", + "id": "dataproc.projects.regions.autoscalingPolicies.getIamPolicy", "parameterOrder": [ "resource" ], @@ -2649,7 +3264,7 @@ "resource": { "description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" } @@ -2666,41 +3281,36 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations", + "description": "Lists autoscaling policies in the project.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies", "httpMethod": "GET", - "id": "dataproc.projects.regions.operations.list", + "id": "dataproc.projects.regions.autoscalingPolicies.list", "parameterOrder": [ - "name" + "parent" ], "parameters": { - "filter": { - "description": "The standard list filter.", - "location": "query", - "type": "string" - }, - "name": { - "description": "The name of the operation's parent resource.", - "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/operations$", - "required": true, - "type": "string" - }, "pageSize": { - "description": "The standard list page size.", + "description": "Optional. The maximum number of results to return in each response. Must be less than or equal to 1000. Defaults to 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "The standard list page token.", + "description": "Optional. The page token, returned by a previous call, to request the next page of results.", "location": "query", "type": "string" + }, + "parent": { + "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+$", + "required": true, + "type": "string" } }, - "path": "v1/{+name}", + "path": "v1/{+parent}/autoscalingPolicies", "response": { - "$ref": "ListOperationsResponse" + "$ref": "ListAutoscalingPoliciesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" @@ -2708,9 +3318,9 @@ }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:setIamPolicy", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", "httpMethod": "POST", - "id": "dataproc.projects.regions.operations.setIamPolicy", + "id": "dataproc.projects.regions.autoscalingPolicies.setIamPolicy", "parameterOrder": [ "resource" ], @@ -2718,7 +3328,7 @@ "resource": { "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" } @@ -2736,9 +3346,9 @@ }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:testIamPermissions", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:testIamPermissions", "httpMethod": "POST", - "id": "dataproc.projects.regions.operations.testIamPermissions", + "id": "dataproc.projects.regions.autoscalingPolicies.testIamPermissions", "parameterOrder": [ "resource" ], @@ -2746,7 +3356,7 @@ "resource": { "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" } @@ -2761,150 +3371,140 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] - } - } - }, - "workflowTemplates": { - "methods": { - "create": { - "description": "Creates new workflow template.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates", - "httpMethod": "POST", - "id": "dataproc.projects.regions.workflowTemplates.create", + }, + "update": { + "description": "Updates (replaces) autoscaling policy.Disabled check for update_mask, because all updates will be full replacements.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", + "httpMethod": "PUT", + "id": "dataproc.projects.regions.autoscalingPolicies.update", "parameterOrder": [ - "parent" + "name" ], "parameters": { - "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "name": { + "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+$", + "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+parent}/workflowTemplates", + "path": "v1/{+name}", "request": { - "$ref": "WorkflowTemplate" + "$ref": "AutoscalingPolicy" }, "response": { - "$ref": "WorkflowTemplate" + "$ref": "AutoscalingPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] - }, - "delete": { - "description": "Deletes a workflow template. It does not cancel in-progress workflows.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", - "httpMethod": "DELETE", - "id": "dataproc.projects.regions.workflowTemplates.delete", + } + } + }, + "clusters": { + "methods": { + "create": { + "description": "Creates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters", + "httpMethod": "POST", + "id": "dataproc.projects.regions.clusters.create", "parameterOrder": [ - "name" + "projectId", + "region" ], "parameters": { - "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "actionOnFailedPrimaryWorkers": { + "description": "Optional. Failure action when primary worker creation fails.", + "enum": [ + "FAILURE_ACTION_UNSPECIFIED", + "NO_ACTION", + "DELETE" + ], + "enumDescriptions": [ + "When FailureAction is unspecified, failure action defaults to NO_ACTION.", + "Take no action on failure to create a cluster resource. NO_ACTION is the default.", + "Delete the failed cluster resource." + ], + "location": "query", + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" }, - "version": { - "description": "Optional. The version of workflow template to delete. If specified, will only delete the template if the current server version matches specified version.", - "format": "int32", + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", "location": "query", - "type": "integer" + "type": "string" } }, - "path": "v1/{+name}", + "path": "v1/projects/{projectId}/regions/{region}/clusters", + "request": { + "$ref": "Cluster" + }, "response": { - "$ref": "Empty" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "get": { - "description": "Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", - "httpMethod": "GET", - "id": "dataproc.projects.regions.workflowTemplates.get", + "delete": { + "description": "Deletes a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "httpMethod": "DELETE", + "id": "dataproc.projects.regions.clusters.delete", "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "projectId", + "region", + "clusterName" + ], + "parameters": { + "clusterName": { + "description": "Required. The cluster name.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" }, - "version": { - "description": "Optional. The version of workflow template to retrieve. Only previously instantiated versions can be retrieved.If unspecified, retrieves the current version.", - "format": "int32", + "clusterUuid": { + "description": "Optional. Specifying the cluster_uuid means the RPC should fail (with error NOT_FOUND) if cluster with specified UUID does not exist.", "location": "query", - "type": "integer" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "WorkflowTemplate" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:getIamPolicy", - "httpMethod": "POST", - "id": "dataproc.projects.regions.workflowTemplates.getIamPolicy", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "type": "string" + }, + "gracefulTerminationTimeout": { + "description": "Optional. The graceful termination timeout for the deletion of the cluster. Indicate the time the request will wait to complete the running jobs on the cluster before its forceful deletion. Default value is 0 indicating that the user has not enabled the graceful termination. Value can be between 60 second and 6 Hours, in case the graceful termination is enabled. (There is no separate flag to check the enabling or disabling of graceful termination, it can be checked by the values in the field).", + "format": "google-duration", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" - } - }, - "path": "v1/{+resource}:getIamPolicy", - "request": { - "$ref": "GetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "instantiate": { - "description": "Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:instantiate", - "httpMethod": "POST", - "id": "dataproc.projects.regions.workflowTemplates.instantiate", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" + }, + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the server receives two DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "location": "query", + "type": "string" } }, - "path": "v1/{+name}:instantiate", - "request": { - "$ref": "InstantiateWorkflowTemplateRequest" - }, + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", "response": { "$ref": "Operation" }, @@ -2912,31 +3512,39 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, - "instantiateInline": { - "description": "Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates:instantiateInline", + "diagnose": { + "description": "Gets cluster diagnostic information. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). After the operation completes, Operation.response contains DiagnoseClusterResults (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).", + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", "httpMethod": "POST", - "id": "dataproc.projects.regions.workflowTemplates.instantiateInline", + "id": "dataproc.projects.regions.clusters.diagnose", "parameterOrder": [ - "parent" + "projectId", + "region", + "clusterName" ], "parameters": { - "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "clusterName": { + "description": "Required. The cluster name.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, "type": "string" }, - "requestId": { - "description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", - "location": "query", + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, "type": "string" } }, - "path": "v1/{+parent}/workflowTemplates:instantiateInline", + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", "request": { - "$ref": "WorkflowTemplate" + "$ref": "DiagnoseClusterRequest" }, "response": { "$ref": "Operation" @@ -2945,62 +3553,64 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, - "list": { - "description": "Lists workflows that match the specified filter in the request.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates", + "get": { + "description": "Gets the resource representation for a cluster in a project.", + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", "httpMethod": "GET", - "id": "dataproc.projects.regions.workflowTemplates.list", + "id": "dataproc.projects.regions.clusters.get", "parameterOrder": [ - "parent" + "projectId", + "region", + "clusterName" ], "parameters": { - "pageSize": { - "description": "Optional. The maximum number of results to return in each response.", - "format": "int32", - "location": "query", - "type": "integer" + "clusterName": { + "description": "Required. The cluster name.", + "location": "path", + "required": true, + "type": "string" }, - "pageToken": { - "description": "Optional. The page token, returned by a previous call, to request the next page of results.", - "location": "query", + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", + "location": "path", + "required": true, "type": "string" }, - "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "region": { + "description": "Required. The Dataproc region in which to handle the request.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+parent}/workflowTemplates", + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", "response": { - "$ref": "ListWorkflowTemplatesResponse" + "$ref": "Cluster" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:getIamPolicy", "httpMethod": "POST", - "id": "dataproc.projects.regions.workflowTemplates.setIamPolicy", + "id": "dataproc.projects.regions.clusters.getIamPolicy", "parameterOrder": [ "resource" ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", + "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+resource}:setIamPolicy", + "path": "v1/{+resource}:getIamPolicy", "request": { - "$ref": "SetIamPolicyRequest" + "$ref": "GetIamPolicyRequest" }, "response": { "$ref": "Policy" @@ -3009,2860 +3619,7260 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:testIamPermissions", + "injectCredentials": { + "description": "Inject encrypted credentials into all of the VMs in a cluster.The target cluster must be a personal auth cluster assigned to the user who is issuing the RPC.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:injectCredentials", "httpMethod": "POST", - "id": "dataproc.projects.regions.workflowTemplates.testIamPermissions", + "id": "dataproc.projects.regions.clusters.injectCredentials", "parameterOrder": [ - "resource" + "project", + "region", + "cluster" ], "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "cluster": { + "description": "Required. The cluster, in the form clusters/.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", + "pattern": "^clusters/[^/]+$", + "required": true, + "type": "string" + }, + "project": { + "description": "Required. The ID of the Google Cloud Platform project the cluster belongs to, of the form projects/.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The region containing the cluster, of the form regions/.", + "location": "path", + "pattern": "^regions/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+resource}:testIamPermissions", + "path": "v1/{+project}/{+region}/{+cluster}:injectCredentials", "request": { - "$ref": "TestIamPermissionsRequest" + "$ref": "InjectCredentialsRequest" }, "response": { - "$ref": "TestIamPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "update": { - "description": "Updates (replaces) workflow template. The updated template must contain version that matches the current server version.", - "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", - "httpMethod": "PUT", - "id": "dataproc.projects.regions.workflowTemplates.update", + "list": { + "description": "Lists all regions/{region}/clusters in a project alphabetically.", + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters", + "httpMethod": "GET", + "id": "dataproc.projects.regions.clusters.list", "parameterOrder": [ - "name" + "projectId", + "region" ], "parameters": { - "name": { - "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "filter": { + "description": "Optional. A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is one of status.state, clusterName, or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, or STOPPED. ACTIVE contains the CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING, ERROR, STOPPING, and STOPPED states. clusterName is the name of the cluster provided at creation time. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = *", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. The standard List page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The standard List page token.", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", "location": "path", - "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" } }, - "path": "v1/{+name}", - "request": { - "$ref": "WorkflowTemplate" - }, + "path": "v1/projects/{projectId}/regions/{region}/clusters", "response": { - "$ref": "WorkflowTemplate" + "$ref": "ListClustersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). The cluster must be in a RUNNING state or an error is returned.", + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "httpMethod": "PATCH", + "id": "dataproc.projects.regions.clusters.patch", + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "parameters": { + "clusterName": { + "description": "Required. The cluster name.", + "location": "path", + "required": true, + "type": "string" + }, + "gracefulDecommissionTimeout": { + "description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher.", + "format": "google-duration", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the server receives two UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "location": "query", + "type": "string" + }, + "updateMask": { + "description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows: { \"config\":{ \"workerConfig\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } *Note:* Currently, only the following fields can be updated: *Mask* *Purpose* *labels* Update labels *config.worker_config.num_instances* Resize primary worker group *config.secondary_worker_config.num_instances* Resize secondary worker group config.autoscaling_config.policy_uri Use, stop using, or change autoscaling policies ", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "request": { + "$ref": "Cluster" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "repair": { + "description": "Repairs a cluster.", + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:repair", + "httpMethod": "POST", + "id": "dataproc.projects.regions.clusters.repair", + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "parameters": { + "clusterName": { + "description": "Required. The cluster name.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:repair", + "request": { + "$ref": "RepairClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:setIamPolicy", + "httpMethod": "POST", + "id": "dataproc.projects.regions.clusters.setIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "start": { + "description": "Starts a cluster in a project.", + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:start", + "httpMethod": "POST", + "id": "dataproc.projects.regions.clusters.start", + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "parameters": { + "clusterName": { + "description": "Required. The cluster name.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:start", + "request": { + "$ref": "StartClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "stop": { + "description": "Stops a cluster in a project.", + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:stop", + "httpMethod": "POST", + "id": "dataproc.projects.regions.clusters.stop", + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "parameters": { + "clusterName": { + "description": "Required. The cluster name.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:stop", + "request": { + "$ref": "StopClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:testIamPermissions", + "httpMethod": "POST", + "id": "dataproc.projects.regions.clusters.testIamPermissions", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "response": { + "$ref": "TestIamPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } - } - } - } - } - } - } - }, - "revision": "20240605", - "rootUrl": "https://dataproc.googleapis.com/", - "schemas": { - "AcceleratorConfig": { - "description": "Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine (https://cloud.google.com/compute/docs/gpus/).", - "id": "AcceleratorConfig", - "properties": { - "acceleratorCount": { - "description": "The number of the accelerator cards of this type exposed to this instance.", - "format": "int32", + }, + "resources": { + "nodeGroups": { + "methods": { + "create": { + "description": "Creates a node group in a cluster. The returned Operation.metadata is NodeGroupOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups", + "httpMethod": "POST", + "id": "dataproc.projects.regions.clusters.nodeGroups.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "nodeGroupId": { + "description": "Optional. An optional node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource where this node group will be created. Format: projects/{project}/regions/{region}/clusters/{cluster}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + }, + "parentOperationId": { + "description": "Optional. operation id of the parent operation sending the create request", + "location": "query", + "type": "string" + }, + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the server receives two CreateNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequest) with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/nodeGroups", + "request": { + "$ref": "NodeGroup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the resource representation for a node group in a cluster.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}", + "httpMethod": "GET", + "id": "dataproc.projects.regions.clusters.nodeGroups.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the node group to retrieve. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "NodeGroup" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "repair": { + "description": "Repair nodes in a node group.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}:repair", + "httpMethod": "POST", + "id": "dataproc.projects.regions.clusters.nodeGroups.repair", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the node group to resize. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:repair", + "request": { + "$ref": "RepairNodeGroupRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "resize": { + "description": "Resizes a node group in a cluster. The returned Operation.metadata is NodeGroupOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}:resize", + "httpMethod": "POST", + "id": "dataproc.projects.regions.clusters.nodeGroups.resize", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the node group to resize. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:resize", + "request": { + "$ref": "ResizeNodeGroupRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, + "jobs": { + "methods": { + "cancel": { + "description": "Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs.list (https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or regions/{region}/jobs.get (https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).", + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", + "httpMethod": "POST", + "id": "dataproc.projects.regions.jobs.cancel", + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "parameters": { + "jobId": { + "description": "Required. The job ID.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", + "request": { + "$ref": "CancelJobRequest" + }, + "response": { + "$ref": "Job" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.", + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "httpMethod": "DELETE", + "id": "dataproc.projects.regions.jobs.delete", + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "parameters": { + "jobId": { + "description": "Required. The job ID.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the resource representation for a job in a project.", + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "httpMethod": "GET", + "id": "dataproc.projects.regions.jobs.get", + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "parameters": { + "jobId": { + "description": "Required. The job ID.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "response": { + "$ref": "Job" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:getIamPolicy", + "httpMethod": "POST", + "id": "dataproc.projects.regions.jobs.getIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:getIamPolicy", + "request": { + "$ref": "GetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists regions/{region}/jobs in a project.", + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs", + "httpMethod": "GET", + "id": "dataproc.projects.regions.jobs.list", + "parameterOrder": [ + "projectId", + "region" + ], + "parameters": { + "clusterName": { + "description": "Optional. If set, the returned jobs list includes only jobs that were submitted to the named cluster.", + "location": "query", + "type": "string" + }, + "filter": { + "description": "Optional. A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is status.state or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be either ACTIVE or NON_ACTIVE. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = *", + "location": "query", + "type": "string" + }, + "jobStateMatcher": { + "description": "Optional. Specifies enumerated categories of jobs to list. (default = match ALL jobs).If filter is provided, jobStateMatcher will be ignored.", + "enum": [ + "ALL", + "ACTIVE", + "NON_ACTIVE" + ], + "enumDescriptions": [ + "Match all jobs, regardless of state.", + "Only match jobs in non-terminal states: PENDING, RUNNING, or CANCEL_PENDING.", + "Only match jobs in terminal states: CANCELLED, DONE, or ERROR." + ], + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. The number of results to return in each response.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The page token, returned by a previous call, to request the next page of results.", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/jobs", + "response": { + "$ref": "ListJobsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a job in a project.", + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "httpMethod": "PATCH", + "id": "dataproc.projects.regions.jobs.patch", + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "parameters": { + "jobId": { + "description": "Required. The job ID.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. Specifies the path, relative to Job, of the field to update. For example, to update the labels of a Job the update_mask parameter would be specified as labels, and the PATCH request body would specify the new value. *Note:* Currently, labels is the only field that can be updated.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "request": { + "$ref": "Job" + }, + "response": { + "$ref": "Job" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:setIamPolicy", + "httpMethod": "POST", + "id": "dataproc.projects.regions.jobs.setIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "submit": { + "description": "Submits a job to a cluster.", + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs:submit", + "httpMethod": "POST", + "id": "dataproc.projects.regions.jobs.submit", + "parameterOrder": [ + "projectId", + "region" + ], + "parameters": { + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/jobs:submit", + "request": { + "$ref": "SubmitJobRequest" + }, + "response": { + "$ref": "Job" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "submitAsOperation": { + "description": "Submits job to a cluster.", + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs:submitAsOperation", + "httpMethod": "POST", + "id": "dataproc.projects.regions.jobs.submitAsOperation", + "parameterOrder": [ + "projectId", + "region" + ], + "parameters": { + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/jobs:submitAsOperation", + "request": { + "$ref": "SubmitJobRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:testIamPermissions", + "httpMethod": "POST", + "id": "dataproc.projects.regions.jobs.testIamPermissions", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "dataproc.projects.regions.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:cancel", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", + "httpMethod": "DELETE", + "id": "dataproc.projects.regions.operations.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be deleted.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "dataproc.projects.regions.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:getIamPolicy", + "httpMethod": "POST", + "id": "dataproc.projects.regions.operations.getIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:getIamPolicy", + "request": { + "$ref": "GetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations", + "httpMethod": "GET", + "id": "dataproc.projects.regions.operations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name of the operation's parent resource.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/operations$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:setIamPolicy", + "httpMethod": "POST", + "id": "dataproc.projects.regions.operations.setIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:testIamPermissions", + "httpMethod": "POST", + "id": "dataproc.projects.regions.operations.testIamPermissions", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "workflowTemplates": { + "methods": { + "create": { + "description": "Creates new workflow template.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates", + "httpMethod": "POST", + "id": "dataproc.projects.regions.workflowTemplates.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/workflowTemplates", + "request": { + "$ref": "WorkflowTemplate" + }, + "response": { + "$ref": "WorkflowTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a workflow template. It does not cancel in-progress workflows.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", + "httpMethod": "DELETE", + "id": "dataproc.projects.regions.workflowTemplates.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", + "required": true, + "type": "string" + }, + "version": { + "description": "Optional. The version of workflow template to delete. If specified, will only delete the template if the current server version matches specified version.", + "format": "int32", + "location": "query", + "type": "integer" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", + "httpMethod": "GET", + "id": "dataproc.projects.regions.workflowTemplates.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", + "required": true, + "type": "string" + }, + "version": { + "description": "Optional. The version of workflow template to retrieve. Only previously instantiated versions can be retrieved.If unspecified, retrieves the current version.", + "format": "int32", + "location": "query", + "type": "integer" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "WorkflowTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:getIamPolicy", + "httpMethod": "POST", + "id": "dataproc.projects.regions.workflowTemplates.getIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:getIamPolicy", + "request": { + "$ref": "GetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "instantiate": { + "description": "Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:instantiate", + "httpMethod": "POST", + "id": "dataproc.projects.regions.workflowTemplates.instantiate", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:instantiate", + "request": { + "$ref": "InstantiateWorkflowTemplateRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "instantiateInline": { + "description": "Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates:instantiateInline", + "httpMethod": "POST", + "id": "dataproc.projects.regions.workflowTemplates.instantiateInline", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+$", + "required": true, + "type": "string" + }, + "requestId": { + "description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/workflowTemplates:instantiateInline", + "request": { + "$ref": "WorkflowTemplate" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists workflows that match the specified filter in the request.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates", + "httpMethod": "GET", + "id": "dataproc.projects.regions.workflowTemplates.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return in each response.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The page token, returned by a previous call, to request the next page of results.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/workflowTemplates", + "response": { + "$ref": "ListWorkflowTemplatesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", + "httpMethod": "POST", + "id": "dataproc.projects.regions.workflowTemplates.setIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:testIamPermissions", + "httpMethod": "POST", + "id": "dataproc.projects.regions.workflowTemplates.testIamPermissions", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "update": { + "description": "Updates (replaces) workflow template. The updated template must contain version that matches the current server version.", + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", + "httpMethod": "PUT", + "id": "dataproc.projects.regions.workflowTemplates.update", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "location": "path", + "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "WorkflowTemplate" + }, + "response": { + "$ref": "WorkflowTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + } + } + }, + "revision": "20240928", + "rootUrl": "https://dataproc.googleapis.com/", + "schemas": { + "AcceleratorConfig": { + "description": "Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine (https://cloud.google.com/compute/docs/gpus/).", + "id": "AcceleratorConfig", + "properties": { + "acceleratorCount": { + "description": "The number of the accelerator cards of this type exposed to this instance.", + "format": "int32", + "type": "integer" + }, + "acceleratorTypeUri": { + "description": "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4.", + "type": "string" + } + }, + "type": "object" + }, + "AccessSessionSparkApplicationEnvironmentInfoResponse": { + "description": "Environment details of a Saprk Application.", + "id": "AccessSessionSparkApplicationEnvironmentInfoResponse", + "properties": { + "applicationEnvironmentInfo": { + "$ref": "ApplicationEnvironmentInfo", + "description": "Details about the Environment that the application is running in." + } + }, + "type": "object" + }, + "AccessSessionSparkApplicationJobResponse": { + "description": "Details of a particular job associated with Spark Application", + "id": "AccessSessionSparkApplicationJobResponse", + "properties": { + "jobData": { + "$ref": "JobData", + "description": "Output only. Data corresponding to a spark job.", + "readOnly": true + } + }, + "type": "object" + }, + "AccessSessionSparkApplicationResponse": { + "description": "A summary of Spark Application", + "id": "AccessSessionSparkApplicationResponse", + "properties": { + "application": { + "$ref": "ApplicationInfo", + "description": "Output only. High level information corresponding to an application.", + "readOnly": true + } + }, + "type": "object" + }, + "AccessSessionSparkApplicationSqlQueryResponse": { + "description": "Details of a query for a Spark Application", + "id": "AccessSessionSparkApplicationSqlQueryResponse", + "properties": { + "executionData": { + "$ref": "SqlExecutionUiData", + "description": "SQL Execution Data" + } + }, + "type": "object" + }, + "AccessSessionSparkApplicationSqlSparkPlanGraphResponse": { + "description": "SparkPlanGraph for a Spark Application execution limited to maximum 10000 clusters.", + "id": "AccessSessionSparkApplicationSqlSparkPlanGraphResponse", + "properties": { + "sparkPlanGraph": { + "$ref": "SparkPlanGraph", + "description": "SparkPlanGraph for a Spark Application execution." + } + }, + "type": "object" + }, + "AccessSessionSparkApplicationStageAttemptResponse": { + "description": "Stage Attempt for a Stage of a Spark Application", + "id": "AccessSessionSparkApplicationStageAttemptResponse", + "properties": { + "stageData": { + "$ref": "StageData", + "description": "Output only. Data corresponding to a stage.", + "readOnly": true + } + }, + "type": "object" + }, + "AccessSessionSparkApplicationStageRddOperationGraphResponse": { + "description": "RDD operation graph for a Spark Application Stage limited to maximum 10000 clusters.", + "id": "AccessSessionSparkApplicationStageRddOperationGraphResponse", + "properties": { + "rddOperationGraph": { + "$ref": "RddOperationGraph", + "description": "RDD operation graph for a Spark Application Stage." + } + }, + "type": "object" + }, + "AccessSparkApplicationEnvironmentInfoResponse": { + "description": "Environment details of a Saprk Application.", + "id": "AccessSparkApplicationEnvironmentInfoResponse", + "properties": { + "applicationEnvironmentInfo": { + "$ref": "ApplicationEnvironmentInfo", + "description": "Details about the Environment that the application is running in." + } + }, + "type": "object" + }, + "AccessSparkApplicationJobResponse": { + "description": "Details of a particular job associated with Spark Application", + "id": "AccessSparkApplicationJobResponse", + "properties": { + "jobData": { + "$ref": "JobData", + "description": "Output only. Data corresponding to a spark job.", + "readOnly": true + } + }, + "type": "object" + }, + "AccessSparkApplicationResponse": { + "description": "A summary of Spark Application", + "id": "AccessSparkApplicationResponse", + "properties": { + "application": { + "$ref": "ApplicationInfo", + "description": "Output only. High level information corresponding to an application.", + "readOnly": true + } + }, + "type": "object" + }, + "AccessSparkApplicationSqlQueryResponse": { + "description": "Details of a query for a Spark Application", + "id": "AccessSparkApplicationSqlQueryResponse", + "properties": { + "executionData": { + "$ref": "SqlExecutionUiData", + "description": "SQL Execution Data" + } + }, + "type": "object" + }, + "AccessSparkApplicationSqlSparkPlanGraphResponse": { + "description": "SparkPlanGraph for a Spark Application execution limited to maximum 10000 clusters.", + "id": "AccessSparkApplicationSqlSparkPlanGraphResponse", + "properties": { + "sparkPlanGraph": { + "$ref": "SparkPlanGraph", + "description": "SparkPlanGraph for a Spark Application execution." + } + }, + "type": "object" + }, + "AccessSparkApplicationStageAttemptResponse": { + "description": "Stage Attempt for a Stage of a Spark Application", + "id": "AccessSparkApplicationStageAttemptResponse", + "properties": { + "stageData": { + "$ref": "StageData", + "description": "Output only. Data corresponding to a stage.", + "readOnly": true + } + }, + "type": "object" + }, + "AccessSparkApplicationStageRddOperationGraphResponse": { + "description": "RDD operation graph for a Spark Application Stage limited to maximum 10000 clusters.", + "id": "AccessSparkApplicationStageRddOperationGraphResponse", + "properties": { + "rddOperationGraph": { + "$ref": "RddOperationGraph", + "description": "RDD operation graph for a Spark Application Stage." + } + }, + "type": "object" + }, + "AccumulableInfo": { + "id": "AccumulableInfo", + "properties": { + "accumullableInfoId": { + "format": "int64", + "type": "string" + }, + "name": { + "type": "string" + }, + "update": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "AnalyzeBatchRequest": { + "description": "A request to analyze a batch workload.", + "id": "AnalyzeBatchRequest", + "properties": { + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the service receives two AnalyzeBatchRequest (http://cloud/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.AnalyzeBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first request created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "type": "string" + } + }, + "type": "object" + }, + "AnalyzeOperationMetadata": { + "description": "Metadata describing the Analyze operation.", + "id": "AnalyzeOperationMetadata", + "properties": { + "analyzedWorkloadName": { + "description": "Output only. name of the workload being analyzed.", + "readOnly": true, + "type": "string" + }, + "analyzedWorkloadType": { + "description": "Output only. Type of the workload being analyzed.", + "enum": [ + "WORKLOAD_TYPE_UNSPECIFIED", + "BATCH" + ], + "enumDescriptions": [ + "Undefined option", + "Serverless batch job" + ], + "readOnly": true, + "type": "string" + }, + "analyzedWorkloadUuid": { + "description": "Output only. unique identifier of the workload typically generated by control plane. E.g. batch uuid.", + "readOnly": true, + "type": "string" + }, + "createTime": { + "description": "Output only. The time when the operation was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Output only. Short description of the operation.", + "readOnly": true, + "type": "string" + }, + "doneTime": { + "description": "Output only. The time when the operation finished.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Output only. Labels associated with the operation.", + "readOnly": true, + "type": "object" + }, + "warnings": { + "description": "Output only. Warnings encountered during operation execution.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, + "AppSummary": { + "id": "AppSummary", + "properties": { + "numCompletedJobs": { + "format": "int32", + "type": "integer" + }, + "numCompletedStages": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "ApplicationAttemptInfo": { + "description": "Specific attempt of an application.", + "id": "ApplicationAttemptInfo", + "properties": { + "appSparkVersion": { + "type": "string" + }, + "attemptId": { + "type": "string" + }, + "completed": { + "type": "boolean" + }, + "durationMillis": { + "format": "int64", + "type": "string" + }, + "endTime": { + "format": "google-datetime", + "type": "string" + }, + "lastUpdated": { + "format": "google-datetime", + "type": "string" + }, + "sparkUser": { + "type": "string" + }, + "startTime": { + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "ApplicationEnvironmentInfo": { + "description": "Details about the Environment that the application is running in.", + "id": "ApplicationEnvironmentInfo", + "properties": { + "classpathEntries": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "hadoopProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "metricsProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "resourceProfiles": { + "items": { + "$ref": "ResourceProfileInfo" + }, + "type": "array" + }, + "runtime": { + "$ref": "SparkRuntimeInfo" + }, + "sparkProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "systemProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "ApplicationInfo": { + "description": "High level information corresponding to an application.", + "id": "ApplicationInfo", + "properties": { + "applicationContextIngestionStatus": { + "enum": [ + "APPLICATION_CONTEXT_INGESTION_STATUS_UNSPECIFIED", + "APPLICATION_CONTEXT_INGESTION_STATUS_COMPLETED" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "applicationId": { + "type": "string" + }, + "attempts": { + "items": { + "$ref": "ApplicationAttemptInfo" + }, + "type": "array" + }, + "coresGranted": { + "format": "int32", + "type": "integer" + }, + "coresPerExecutor": { + "format": "int32", + "type": "integer" + }, + "maxCores": { + "format": "int32", + "type": "integer" + }, + "memoryPerExecutorMb": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "quantileDataStatus": { + "enum": [ + "QUANTILE_DATA_STATUS_UNSPECIFIED", + "QUANTILE_DATA_STATUS_COMPLETED", + "QUANTILE_DATA_STATUS_FAILED" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "AutoscalingConfig": { + "description": "Autoscaling Policy config associated with the cluster.", + "id": "AutoscalingConfig", + "properties": { + "policyUri": { + "description": "Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.", + "type": "string" + } + }, + "type": "object" + }, + "AutoscalingPolicy": { + "description": "Describes an autoscaling policy for Dataproc cluster autoscaler.", + "id": "AutoscalingPolicy", + "properties": { + "basicAlgorithm": { + "$ref": "BasicAutoscalingAlgorithm" + }, + "id": { + "description": "Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy.", + "type": "object" + }, + "name": { + "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "readOnly": true, + "type": "string" + }, + "secondaryWorkerConfig": { + "$ref": "InstanceGroupAutoscalingPolicyConfig", + "description": "Optional. Describes how the autoscaler will operate for secondary workers." + }, + "workerConfig": { + "$ref": "InstanceGroupAutoscalingPolicyConfig", + "description": "Required. Describes how the autoscaler will operate for primary workers." + } + }, + "type": "object" + }, + "AutotuningConfig": { + "description": "Autotuning configuration of the workload.", + "id": "AutotuningConfig", + "properties": { + "scenarios": { + "description": "Optional. Scenarios for which tunings are applied.", + "items": { + "enum": [ + "SCENARIO_UNSPECIFIED", + "SCALING", + "BROADCAST_HASH_JOIN", + "MEMORY" + ], + "enumDescriptions": [ + "Default value.", + "Scaling recommendations such as initialExecutors.", + "Adding hints for potential relation broadcasts.", + "Memory management for workloads." + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "AuxiliaryNodeGroup": { + "description": "Node group identification and configuration information.", + "id": "AuxiliaryNodeGroup", + "properties": { + "nodeGroup": { + "$ref": "NodeGroup", + "description": "Required. Node group configuration." + }, + "nodeGroupId": { + "description": "Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.", + "type": "string" + } + }, + "type": "object" + }, + "AuxiliaryServicesConfig": { + "description": "Auxiliary services configuration for a Cluster.", + "id": "AuxiliaryServicesConfig", + "properties": { + "metastoreConfig": { + "$ref": "MetastoreConfig", + "description": "Optional. The Hive Metastore configuration for this workload." + }, + "sparkHistoryServerConfig": { + "$ref": "SparkHistoryServerConfig", + "description": "Optional. The Spark History Server configuration for the workload." + } + }, + "type": "object" + }, + "BasicAutoscalingAlgorithm": { + "description": "Basic algorithm for autoscaling.", + "id": "BasicAutoscalingAlgorithm", + "properties": { + "cooldownPeriod": { + "description": "Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.", + "format": "google-duration", + "type": "string" + }, + "sparkStandaloneConfig": { + "$ref": "SparkStandaloneAutoscalingConfig", + "description": "Optional. Spark Standalone autoscaling configuration" + }, + "yarnConfig": { + "$ref": "BasicYarnAutoscalingConfig", + "description": "Optional. YARN autoscaling configuration." + } + }, + "type": "object" + }, + "BasicYarnAutoscalingConfig": { + "description": "Basic autoscaling configurations for YARN.", + "id": "BasicYarnAutoscalingConfig", + "properties": { + "gracefulDecommissionTimeout": { + "description": "Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d.", + "format": "google-duration", + "type": "string" + }, + "scaleDownFactor": { + "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.", + "format": "double", + "type": "number" + }, + "scaleDownMinWorkerFraction": { + "description": "Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", + "format": "double", + "type": "number" + }, + "scaleUpFactor": { + "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.", + "format": "double", + "type": "number" + }, + "scaleUpMinWorkerFraction": { + "description": "Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "Batch": { + "description": "A representation of a batch workload in the service.", + "id": "Batch", + "properties": { + "createTime": { + "description": "Output only. The time when the batch was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "creator": { + "description": "Output only. The email address of the user who created the batch.", + "readOnly": true, + "type": "string" + }, + "environmentConfig": { + "$ref": "EnvironmentConfig", + "description": "Optional. Environment configuration for the batch execution." + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. The labels to associate with this batch. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a batch.", + "type": "object" + }, + "name": { + "description": "Output only. The resource name of the batch.", + "readOnly": true, + "type": "string" + }, + "operation": { + "description": "Output only. The resource name of the operation associated with this batch.", + "readOnly": true, + "type": "string" + }, + "pysparkBatch": { + "$ref": "PySparkBatch", + "description": "Optional. PySpark batch config." + }, + "runtimeConfig": { + "$ref": "RuntimeConfig", + "description": "Optional. Runtime configuration for the batch execution." + }, + "runtimeInfo": { + "$ref": "RuntimeInfo", + "description": "Output only. Runtime information about batch execution.", + "readOnly": true + }, + "sparkBatch": { + "$ref": "SparkBatch", + "description": "Optional. Spark batch config." + }, + "sparkRBatch": { + "$ref": "SparkRBatch", + "description": "Optional. SparkR batch config." + }, + "sparkSqlBatch": { + "$ref": "SparkSqlBatch", + "description": "Optional. SparkSql batch config." + }, + "state": { + "description": "Output only. The state of the batch.", + "enum": [ + "STATE_UNSPECIFIED", + "PENDING", + "RUNNING", + "CANCELLING", + "CANCELLED", + "SUCCEEDED", + "FAILED" + ], + "enumDescriptions": [ + "The batch state is unknown.", + "The batch is created before running.", + "The batch is running.", + "The batch is cancelling.", + "The batch cancellation was successful.", + "The batch completed successfully.", + "The batch is no longer running due to an error." + ], + "readOnly": true, + "type": "string" + }, + "stateHistory": { + "description": "Output only. Historical state information for the batch.", + "items": { + "$ref": "StateHistory" + }, + "readOnly": true, + "type": "array" + }, + "stateMessage": { + "description": "Output only. Batch state details, such as a failure description if the state is FAILED.", + "readOnly": true, + "type": "string" + }, + "stateTime": { + "description": "Output only. The time when the batch entered a current state.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "uuid": { + "description": "Output only. A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "BatchOperationMetadata": { + "description": "Metadata describing the Batch operation.", + "id": "BatchOperationMetadata", + "properties": { + "batch": { + "description": "Name of the batch for the operation.", + "type": "string" + }, + "batchUuid": { + "description": "Batch UUID for the operation.", + "type": "string" + }, + "createTime": { + "description": "The time when the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "description": { + "description": "Short description of the operation.", + "type": "string" + }, + "doneTime": { + "description": "The time when the operation finished.", + "format": "google-datetime", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels associated with the operation.", + "type": "object" + }, + "operationType": { + "description": "The operation type.", + "enum": [ + "BATCH_OPERATION_TYPE_UNSPECIFIED", + "BATCH" + ], + "enumDescriptions": [ + "Batch operation type is unknown.", + "Batch operation type." + ], + "type": "string" + }, + "warnings": { + "description": "Warnings encountered during operation execution.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "Binding": { + "description": "Associates members, or principals, with a role.", + "id": "Binding", + "properties": { + "condition": { + "$ref": "Expr", + "description": "The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies)." + }, + "members": { + "description": "Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}: A single identity in a workforce identity pool. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}: All workforce identities in a group. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}: All workforce identities with a specific attribute value. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*: All identities in a workforce identity pool. principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}: A single identity in a workload identity pool. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}: A workload identity pool group. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}: All identities in a workload identity pool with a certain attribute. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*: All identities in a workload identity pool. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding. deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}: Deleted single identity in a workforce identity pool. For example, deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value.", + "items": { + "type": "string" + }, + "type": "array" + }, + "role": { + "description": "Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.For an overview of the IAM roles and permissions, see the IAM documentation (https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see here (https://cloud.google.com/iam/docs/understanding-roles).", + "type": "string" + } + }, + "type": "object" + }, + "CancelJobRequest": { + "description": "A request to cancel a job.", + "id": "CancelJobRequest", + "properties": {}, + "type": "object" + }, + "Cluster": { + "description": "Describes the identifying information, config, and status of a Dataproc cluster", + "id": "Cluster", + "properties": { + "clusterName": { + "description": "Required. The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.", + "type": "string" + }, + "clusterUuid": { + "description": "Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.", + "readOnly": true, + "type": "string" + }, + "config": { + "$ref": "ClusterConfig", + "description": "Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified." + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.", + "type": "object" + }, + "metrics": { + "$ref": "ClusterMetrics", + "description": "Output only. Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", + "readOnly": true + }, + "projectId": { + "description": "Required. The Google Cloud Platform project ID that the cluster belongs to.", + "type": "string" + }, + "status": { + "$ref": "ClusterStatus", + "description": "Output only. Cluster status.", + "readOnly": true + }, + "statusHistory": { + "description": "Output only. The previous cluster status.", + "items": { + "$ref": "ClusterStatus" + }, + "readOnly": true, + "type": "array" + }, + "virtualClusterConfig": { + "$ref": "VirtualClusterConfig", + "description": "Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified." + } + }, + "type": "object" + }, + "ClusterConfig": { + "description": "The cluster config.", + "id": "ClusterConfig", + "properties": { + "autoscalingConfig": { + "$ref": "AutoscalingConfig", + "description": "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset." + }, + "auxiliaryNodeGroups": { + "description": "Optional. The node group settings.", + "items": { + "$ref": "AuxiliaryNodeGroup" + }, + "type": "array" + }, + "configBucket": { + "description": "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "type": "string" + }, + "dataprocMetricConfig": { + "$ref": "DataprocMetricConfig", + "description": "Optional. The config for Dataproc metrics." + }, + "encryptionConfig": { + "$ref": "EncryptionConfig", + "description": "Optional. Encryption settings for the cluster." + }, + "endpointConfig": { + "$ref": "EndpointConfig", + "description": "Optional. Port/endpoint configuration for this cluster" + }, + "gceClusterConfig": { + "$ref": "GceClusterConfig", + "description": "Optional. The shared Compute Engine config settings for all instances in a cluster." + }, + "gkeClusterConfig": { + "$ref": "GkeClusterConfig", + "deprecated": true, + "description": "Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. These config settings are mutually exclusive with Compute Engine-based options, such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config." + }, + "initializationActions": { + "description": "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi ", + "items": { + "$ref": "NodeInitializationAction" + }, + "type": "array" + }, + "lifecycleConfig": { + "$ref": "LifecycleConfig", + "description": "Optional. Lifecycle setting for the cluster." + }, + "masterConfig": { + "$ref": "InstanceGroupConfig", + "description": "Optional. The Compute Engine config settings for the cluster's master instance." + }, + "metastoreConfig": { + "$ref": "MetastoreConfig", + "description": "Optional. Metastore configuration." + }, + "secondaryWorkerConfig": { + "$ref": "InstanceGroupConfig", + "description": "Optional. The Compute Engine config settings for a cluster's secondary worker instances" + }, + "securityConfig": { + "$ref": "SecurityConfig", + "description": "Optional. Security settings for the cluster." + }, + "softwareConfig": { + "$ref": "SoftwareConfig", + "description": "Optional. The config settings for cluster software." + }, + "tempBucket": { + "description": "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "type": "string" + }, + "workerConfig": { + "$ref": "InstanceGroupConfig", + "description": "Optional. The Compute Engine config settings for the cluster's worker instances." + } + }, + "type": "object" + }, + "ClusterMetrics": { + "description": "Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", + "id": "ClusterMetrics", + "properties": { + "hdfsMetrics": { + "additionalProperties": { + "format": "int64", + "type": "string" + }, + "description": "The HDFS metrics.", + "type": "object" + }, + "yarnMetrics": { + "additionalProperties": { + "format": "int64", + "type": "string" + }, + "description": "YARN metrics.", + "type": "object" + } + }, + "type": "object" + }, + "ClusterOperation": { + "description": "The cluster operation triggered by a workflow.", + "id": "ClusterOperation", + "properties": { + "done": { + "description": "Output only. Indicates the operation is done.", + "readOnly": true, + "type": "boolean" + }, + "error": { + "description": "Output only. Error, if operation failed.", + "readOnly": true, + "type": "string" + }, + "operationId": { + "description": "Output only. The id of the cluster operation.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "ClusterOperationMetadata": { + "description": "Metadata describing the operation.", + "id": "ClusterOperationMetadata", + "properties": { + "childOperationIds": { + "description": "Output only. Child operation ids", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + }, + "clusterName": { + "description": "Output only. Name of the cluster for the operation.", + "readOnly": true, + "type": "string" + }, + "clusterUuid": { + "description": "Output only. Cluster UUID for the operation.", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Output only. Short description of operation.", + "readOnly": true, + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Output only. Labels associated with the operation", + "readOnly": true, + "type": "object" + }, + "operationType": { + "description": "Output only. The operation type.", + "readOnly": true, + "type": "string" + }, + "status": { + "$ref": "ClusterOperationStatus", + "description": "Output only. Current operation status.", + "readOnly": true + }, + "statusHistory": { + "description": "Output only. The previous operation status.", + "items": { + "$ref": "ClusterOperationStatus" + }, + "readOnly": true, + "type": "array" + }, + "warnings": { + "description": "Output only. Errors encountered during operation execution.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, + "ClusterOperationStatus": { + "description": "The status of the operation.", + "id": "ClusterOperationStatus", + "properties": { + "details": { + "description": "Output only. A message containing any operation metadata details.", + "readOnly": true, + "type": "string" + }, + "innerState": { + "description": "Output only. A message containing the detailed operation state.", + "readOnly": true, + "type": "string" + }, + "state": { + "description": "Output only. A message containing the operation state.", + "enum": [ + "UNKNOWN", + "PENDING", + "RUNNING", + "DONE" + ], + "enumDescriptions": [ + "Unused.", + "The operation has been created.", + "The operation is running.", + "The operation is done; either cancelled or completed." + ], + "readOnly": true, + "type": "string" + }, + "stateStartTime": { + "description": "Output only. The time this state was entered.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "ClusterSelector": { + "description": "A selector that chooses target cluster for jobs based on metadata.", + "id": "ClusterSelector", + "properties": { + "clusterLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "Required. The cluster labels. Cluster must have all labels to match.", + "type": "object" + }, + "zone": { + "description": "Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used.", + "type": "string" + } + }, + "type": "object" + }, + "ClusterStatus": { + "description": "The status of a cluster and its instances.", + "id": "ClusterStatus", + "properties": { + "detail": { + "description": "Optional. Output only. Details of cluster's state.", + "readOnly": true, + "type": "string" + }, + "state": { + "description": "Output only. The cluster's state.", + "enum": [ + "UNKNOWN", + "CREATING", + "RUNNING", + "ERROR", + "ERROR_DUE_TO_UPDATE", + "DELETING", + "UPDATING", + "STOPPING", + "STOPPED", + "STARTING", + "REPAIRING" + ], + "enumDescriptions": [ + "The cluster state is unknown.", + "The cluster is being created and set up. It is not ready for use.", + "The cluster is currently running and healthy. It is ready for use.Note: The cluster state changes from \"creating\" to \"running\" status after the master node(s), first two primary worker nodes (and the last primary worker node if primary workers \u003e 2) are running.", + "The cluster encountered an error. It is not ready for use.", + "The cluster has encountered an error while being updated. Jobs can be submitted to the cluster, but the cluster cannot be updated.", + "The cluster is being deleted. It cannot be used.", + "The cluster is being updated. It continues to accept and process jobs.", + "The cluster is being stopped. It cannot be used.", + "The cluster is currently stopped. It is not ready for use.", + "The cluster is being started. It is not ready for use.", + "The cluster is being repaired. It is not ready for use." + ], + "readOnly": true, + "type": "string" + }, + "stateStartTime": { + "description": "Output only. Time when this state was entered (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "substate": { + "description": "Output only. Additional state information that includes status reported by the agent.", + "enum": [ + "UNSPECIFIED", + "UNHEALTHY", + "STALE_STATUS" + ], + "enumDescriptions": [ + "The cluster substate is unknown.", + "The cluster is known to be in an unhealthy state (for example, critical daemons are not running or HDFS capacity is exhausted).Applies to RUNNING state.", + "The agent-reported status is out of date (may occur if Dataproc loses communication with Agent).Applies to RUNNING state." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "ClusterToRepair": { + "description": "Cluster to be repaired", + "id": "ClusterToRepair", + "properties": { + "clusterRepairAction": { + "description": "Required. Repair action to take on the cluster resource.", + "enum": [ + "CLUSTER_REPAIR_ACTION_UNSPECIFIED", + "REPAIR_ERROR_DUE_TO_UPDATE_CLUSTER" + ], + "enumDescriptions": [ + "No action will be taken by default.", + "Repair cluster in ERROR_DUE_TO_UPDATE states." + ], + "type": "string" + } + }, + "type": "object" + }, + "ConfidentialInstanceConfig": { + "description": "Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)", + "id": "ConfidentialInstanceConfig", + "properties": { + "enableConfidentialCompute": { + "description": "Optional. Defines whether the instance should have confidential compute enabled.", + "type": "boolean" + } + }, + "type": "object" + }, + "ConsolidatedExecutorSummary": { + "description": "Consolidated summary about executors used by the application.", + "id": "ConsolidatedExecutorSummary", + "properties": { + "activeTasks": { + "format": "int32", + "type": "integer" + }, + "completedTasks": { + "format": "int32", + "type": "integer" + }, + "count": { + "format": "int32", + "type": "integer" + }, + "diskUsed": { + "format": "int64", + "type": "string" + }, + "failedTasks": { + "format": "int32", + "type": "integer" + }, + "isExcluded": { + "format": "int32", + "type": "integer" + }, + "maxMemory": { + "format": "int64", + "type": "string" + }, + "memoryMetrics": { + "$ref": "MemoryMetrics" + }, + "memoryUsed": { + "format": "int64", + "type": "string" + }, + "rddBlocks": { + "format": "int32", + "type": "integer" + }, + "totalCores": { + "format": "int32", + "type": "integer" + }, + "totalDurationMillis": { + "format": "int64", + "type": "string" + }, + "totalGcTimeMillis": { + "format": "int64", + "type": "string" + }, + "totalInputBytes": { + "format": "int64", + "type": "string" + }, + "totalShuffleRead": { + "format": "int64", + "type": "string" + }, + "totalShuffleWrite": { + "format": "int64", + "type": "string" + }, + "totalTasks": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "DataprocMetricConfig": { + "description": "Dataproc metric config.", + "id": "DataprocMetricConfig", + "properties": { + "metrics": { + "description": "Required. Metrics sources to enable.", + "items": { + "$ref": "Metric" + }, + "type": "array" + } + }, + "type": "object" + }, + "DiagnoseClusterRequest": { + "description": "A request to collect cluster diagnostic information.", + "id": "DiagnoseClusterRequest", + "properties": { + "diagnosisInterval": { + "$ref": "Interval", + "description": "Optional. Time interval in which diagnosis should be carried out on the cluster." + }, + "job": { + "deprecated": true, + "description": "Optional. DEPRECATED Specifies the job on which diagnosis is to be performed. Format: projects/{project}/regions/{region}/jobs/{job}", + "type": "string" + }, + "jobs": { + "description": "Optional. Specifies a list of jobs on which diagnosis is to be performed. Format: projects/{project}/regions/{region}/jobs/{job}", + "items": { + "type": "string" + }, + "type": "array" + }, + "tarballAccess": { + "description": "Optional. (Optional) The access type to the diagnostic tarball. If not specified, falls back to default access of the bucket", + "enum": [ + "TARBALL_ACCESS_UNSPECIFIED", + "GOOGLE_CLOUD_SUPPORT", + "GOOGLE_DATAPROC_DIAGNOSE" + ], + "enumDescriptions": [ + "Tarball Access unspecified. Falls back to default access of the bucket", + "Google Cloud Support group has read access to the diagnostic tarball", + "Google Cloud Dataproc Diagnose service account has read access to the diagnostic tarball" + ], + "type": "string" + }, + "tarballGcsDir": { + "description": "Optional. (Optional) The output Cloud Storage directory for the diagnostic tarball. If not specified, a task-specific directory in the cluster's staging bucket will be used.", + "type": "string" + }, + "yarnApplicationId": { + "deprecated": true, + "description": "Optional. DEPRECATED Specifies the yarn application on which diagnosis is to be performed.", + "type": "string" + }, + "yarnApplicationIds": { + "description": "Optional. Specifies a list of yarn applications on which diagnosis is to be performed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "DiagnoseClusterResults": { + "description": "The location of diagnostic output.", + "id": "DiagnoseClusterResults", + "properties": { + "outputUri": { + "description": "Output only. The Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "DiskConfig": { + "description": "Specifies the config of disk options for a group of VM instances.", + "id": "DiskConfig", + "properties": { + "bootDiskProvisionedIops": { + "description": "Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Note: This field is only supported if boot_disk_type is hyperdisk-balanced.", + "format": "int64", + "type": "string" + }, + "bootDiskProvisionedThroughput": { + "description": "Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. Note: This field is only supported if boot_disk_type is hyperdisk-balanced.", + "format": "int64", + "type": "string" + }, + "bootDiskSizeGb": { + "description": "Optional. Size in GB of the boot disk (default is 500GB).", + "format": "int32", + "type": "integer" + }, + "bootDiskType": { + "description": "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", + "type": "string" + }, + "localSsdInterface": { + "description": "Optional. Interface type of local SSDs (default is \"scsi\"). Valid values: \"scsi\" (Small Computer System Interface), \"nvme\" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", + "type": "string" + }, + "numLocalSsds": { + "description": "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "DriverSchedulingConfig": { + "description": "Driver scheduling configuration.", + "id": "DriverSchedulingConfig", + "properties": { + "memoryMb": { + "description": "Required. The amount of memory in MB the driver is requesting.", + "format": "int32", + "type": "integer" + }, + "vcores": { + "description": "Required. The number of vCPUs the driver is requesting.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } ", + "id": "Empty", + "properties": {}, + "type": "object" + }, + "EncryptionConfig": { + "description": "Encryption settings for the cluster.", + "id": "EncryptionConfig", + "properties": { + "gcePdKmsKeyName": { + "description": "Optional. The Cloud KMS key resource name to use for persistent disk encryption for all instances in the cluster. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.", + "type": "string" + }, + "kmsKey": { + "description": "Optional. The Cloud KMS key resource name to use for cluster persistent disk and job argument encryption. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.When this key resource name is provided, the following job arguments of the following job types submitted to the cluster are encrypted using CMEK: FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries", + "type": "string" + } + }, + "type": "object" + }, + "EndpointConfig": { + "description": "Endpoint config for this cluster", + "id": "EndpointConfig", + "properties": { + "enableHttpPortAccess": { + "description": "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", + "type": "boolean" + }, + "httpPorts": { + "additionalProperties": { + "type": "string" + }, + "description": "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", + "readOnly": true, + "type": "object" + } + }, + "type": "object" + }, + "EnvironmentConfig": { + "description": "Environment configuration for a workload.", + "id": "EnvironmentConfig", + "properties": { + "executionConfig": { + "$ref": "ExecutionConfig", + "description": "Optional. Execution configuration for a workload." + }, + "peripheralsConfig": { + "$ref": "PeripheralsConfig", + "description": "Optional. Peripherals configuration that workload has access to." + } + }, + "type": "object" + }, + "ExecutionConfig": { + "description": "Execution configuration for a workload.", + "id": "ExecutionConfig", + "properties": { + "idleTtl": { + "description": "Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.", + "format": "google-duration", + "type": "string" + }, + "kmsKey": { + "description": "Optional. The Cloud KMS key to use for encryption.", + "type": "string" + }, + "networkTags": { + "description": "Optional. Tags used for network traffic control.", + "items": { + "type": "string" + }, + "type": "array" + }, + "networkUri": { + "description": "Optional. Network URI to connect workload to.", + "type": "string" + }, + "serviceAccount": { + "description": "Optional. Service account that used to execute workload.", + "type": "string" + }, + "stagingBucket": { + "description": "Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "type": "string" + }, + "subnetworkUri": { + "description": "Optional. Subnetwork URI to connect workload to.", + "type": "string" + }, + "ttl": { + "description": "Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, + "ExecutorMetrics": { + "id": "ExecutorMetrics", + "properties": { + "metrics": { + "additionalProperties": { + "format": "int64", + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "ExecutorMetricsDistributions": { + "id": "ExecutorMetricsDistributions", + "properties": { + "diskBytesSpilled": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "failedTasks": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "inputBytes": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "inputRecords": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "killedTasks": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "memoryBytesSpilled": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "outputBytes": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "outputRecords": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "peakMemoryMetrics": { + "$ref": "ExecutorPeakMetricsDistributions" + }, + "quantiles": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "shuffleRead": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "shuffleReadRecords": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "shuffleWrite": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "shuffleWriteRecords": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "succeededTasks": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + }, + "taskTimeMillis": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + } + }, + "type": "object" + }, + "ExecutorPeakMetricsDistributions": { + "id": "ExecutorPeakMetricsDistributions", + "properties": { + "executorMetrics": { + "items": { + "$ref": "ExecutorMetrics" + }, + "type": "array" + }, + "quantiles": { + "items": { + "format": "double", + "type": "number" + }, + "type": "array" + } + }, + "type": "object" + }, + "ExecutorResourceRequest": { + "description": "Resources used per executor used by the application.", + "id": "ExecutorResourceRequest", + "properties": { + "amount": { + "format": "int64", + "type": "string" + }, + "discoveryScript": { + "type": "string" + }, + "resourceName": { + "type": "string" + }, + "vendor": { + "type": "string" + } + }, + "type": "object" + }, + "ExecutorStageSummary": { + "description": "Executor resources consumed by a stage.", + "id": "ExecutorStageSummary", + "properties": { + "diskBytesSpilled": { + "format": "int64", + "type": "string" + }, + "executorId": { + "type": "string" + }, + "failedTasks": { + "format": "int32", + "type": "integer" + }, + "inputBytes": { + "format": "int64", + "type": "string" + }, + "inputRecords": { + "format": "int64", + "type": "string" + }, + "isExcludedForStage": { + "type": "boolean" + }, + "killedTasks": { + "format": "int32", + "type": "integer" + }, + "memoryBytesSpilled": { + "format": "int64", + "type": "string" + }, + "outputBytes": { + "format": "int64", + "type": "string" + }, + "outputRecords": { + "format": "int64", + "type": "string" + }, + "peakMemoryMetrics": { + "$ref": "ExecutorMetrics" + }, + "shuffleRead": { + "format": "int64", + "type": "string" + }, + "shuffleReadRecords": { + "format": "int64", + "type": "string" + }, + "shuffleWrite": { + "format": "int64", + "type": "string" + }, + "shuffleWriteRecords": { + "format": "int64", + "type": "string" + }, + "stageAttemptId": { + "format": "int32", + "type": "integer" + }, + "stageId": { + "format": "int64", + "type": "string" + }, + "succeededTasks": { + "format": "int32", + "type": "integer" + }, + "taskTimeMillis": { + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ExecutorSummary": { + "description": "Details about executors used by the application.", + "id": "ExecutorSummary", + "properties": { + "activeTasks": { + "format": "int32", + "type": "integer" + }, + "addTime": { + "format": "google-datetime", + "type": "string" + }, + "attributes": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "completedTasks": { + "format": "int32", + "type": "integer" + }, + "diskUsed": { + "format": "int64", + "type": "string" + }, + "excludedInStages": { + "items": { + "format": "int64", + "type": "string" + }, + "type": "array" + }, + "executorId": { + "type": "string" + }, + "executorLogs": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "failedTasks": { + "format": "int32", + "type": "integer" + }, + "hostPort": { + "type": "string" + }, + "isActive": { + "type": "boolean" + }, + "isExcluded": { + "type": "boolean" + }, + "maxMemory": { + "format": "int64", + "type": "string" + }, + "maxTasks": { + "format": "int32", + "type": "integer" + }, + "memoryMetrics": { + "$ref": "MemoryMetrics" + }, + "memoryUsed": { + "format": "int64", + "type": "string" + }, + "peakMemoryMetrics": { + "$ref": "ExecutorMetrics" + }, + "rddBlocks": { + "format": "int32", + "type": "integer" + }, + "removeReason": { + "type": "string" + }, + "removeTime": { + "format": "google-datetime", + "type": "string" + }, + "resourceProfileId": { + "format": "int32", + "type": "integer" + }, + "resources": { + "additionalProperties": { + "$ref": "ResourceInformation" + }, + "type": "object" + }, + "totalCores": { + "format": "int32", + "type": "integer" + }, + "totalDurationMillis": { + "format": "int64", + "type": "string" + }, + "totalGcTimeMillis": { + "format": "int64", + "type": "string" + }, + "totalInputBytes": { + "format": "int64", + "type": "string" + }, + "totalShuffleRead": { + "format": "int64", + "type": "string" + }, + "totalShuffleWrite": { + "format": "int64", + "type": "string" + }, + "totalTasks": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "Expr": { + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", + "id": "Expr", + "properties": { + "description": { + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + "type": "string" + }, + "expression": { + "description": "Textual representation of an expression in Common Expression Language syntax.", + "type": "string" + }, + "location": { + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "type": "string" + }, + "title": { + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "type": "string" + } + }, + "type": "object" + }, + "FlinkJob": { + "description": "A Dataproc job for running Apache Flink applications on YARN.", + "id": "FlinkJob", + "properties": { + "args": { + "description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission.", + "items": { + "type": "string" + }, + "type": "array" + }, + "jarFileUris": { + "description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks.", + "items": { + "type": "string" + }, + "type": "array" + }, + "loggingConfig": { + "$ref": "LoggingConfig", + "description": "Optional. The runtime log config for job execution." + }, + "mainClass": { + "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris.", + "type": "string" + }, + "mainJarFileUri": { + "description": "The HCFS URI of the jar file that contains the main class.", + "type": "string" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code.", + "type": "object" + }, + "savepointUri": { + "description": "Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job.", + "type": "string" + } + }, + "type": "object" + }, + "GceClusterConfig": { + "description": "Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.", + "id": "GceClusterConfig", + "properties": { + "confidentialInstanceConfig": { + "$ref": "ConfidentialInstanceConfig", + "description": "Optional. Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)." + }, + "internalIpOnly": { + "description": "Optional. This setting applies to subnetwork-enabled networks. It is set to true by default in clusters created with image versions 2.2.x.When set to true: All cluster VMs have internal IP addresses. Google Private Access (https://cloud.google.com/vpc/docs/private-google-access) must be enabled to access Dataproc and other Google Cloud APIs. Off-cluster dependencies must be configured to be accessible without external IP addresses.When set to false: Cluster VMs are not restricted to internal IP addresses. Ephemeral external IP addresses are assigned to each cluster VM.", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", + "type": "object" + }, + "networkUri": { + "description": "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default", + "type": "string" + }, + "nodeGroupAffinity": { + "$ref": "NodeGroupAffinity", + "description": "Optional. Node Group Affinity for sole-tenant clusters." + }, + "privateIpv6GoogleAccess": { + "description": "Optional. The type of IPv6 access for a cluster.", + "enum": [ + "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", + "INHERIT_FROM_SUBNETWORK", + "OUTBOUND", + "BIDIRECTIONAL" + ], + "enumDescriptions": [ + "If unspecified, Compute Engine default behavior will apply, which is the same as INHERIT_FROM_SUBNETWORK.", + "Private access to and from Google Services configuration inherited from the subnetwork configuration. This is the default Compute Engine behavior.", + "Enables outbound private IPv6 access to Google Services from the Dataproc cluster.", + "Enables bidirectional private IPv6 access between Google Services and the Dataproc cluster." + ], + "type": "string" + }, + "reservationAffinity": { + "$ref": "ReservationAffinity", + "description": "Optional. Reservation Affinity for consuming Zonal reservation." + }, + "serviceAccount": { + "description": "Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", + "type": "string" + }, + "serviceAccountScopes": { + "description": "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control", + "items": { + "type": "string" + }, + "type": "array" + }, + "shieldedInstanceConfig": { + "$ref": "ShieldedInstanceConfig", + "description": "Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm)." + }, + "subnetworkUri": { + "description": "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0", + "type": "string" + }, + "tags": { + "description": "The Compute Engine network tags to add to all instances (see Tagging instances (https://cloud.google.com/vpc/docs/add-remove-network-tags)).", + "items": { + "type": "string" + }, + "type": "array" + }, + "zoneUri": { + "description": "Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone]", + "type": "string" + } + }, + "type": "object" + }, + "GetIamPolicyRequest": { + "description": "Request message for GetIamPolicy method.", + "id": "GetIamPolicyRequest", + "properties": { + "options": { + "$ref": "GetPolicyOptions", + "description": "OPTIONAL: A GetPolicyOptions object for specifying options to GetIamPolicy." + } + }, + "type": "object" + }, + "GetPolicyOptions": { + "description": "Encapsulates settings provided to GetIamPolicy.", + "id": "GetPolicyOptions", + "properties": { + "requestedPolicyVersion": { + "description": "Optional. The maximum policy version that will be used to format the policy.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset.The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GkeClusterConfig": { + "description": "The cluster's GKE config.", + "id": "GkeClusterConfig", + "properties": { + "gkeClusterTarget": { + "description": "Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", + "type": "string" + }, + "namespacedGkeDeploymentTarget": { + "$ref": "NamespacedGkeDeploymentTarget", + "deprecated": true, + "description": "Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment." + }, + "nodePoolTarget": { + "description": "Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.", + "items": { + "$ref": "GkeNodePoolTarget" + }, + "type": "array" + } + }, + "type": "object" + }, + "GkeNodeConfig": { + "description": "Parameters that describe cluster nodes.", + "id": "GkeNodeConfig", + "properties": { + "accelerators": { + "description": "Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node.", + "items": { + "$ref": "GkeNodePoolAcceleratorConfig" + }, + "type": "array" + }, + "bootDiskKmsKey": { + "description": "Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}", + "type": "string" + }, + "localSsdCount": { + "description": "Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)).", + "format": "int32", + "type": "integer" + }, + "machineType": { + "description": "Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types).", + "type": "string" + }, + "minCpuPlatform": { + "description": "Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as \"Intel Haswell\"` or Intel Sandy Bridge\".", + "type": "string" + }, + "preemptible": { + "description": "Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", + "type": "boolean" + }, + "spot": { + "description": "Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", + "type": "boolean" + } + }, + "type": "object" + }, + "GkeNodePoolAcceleratorConfig": { + "description": "A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request for a node pool.", + "id": "GkeNodePoolAcceleratorConfig", + "properties": { + "acceleratorCount": { + "description": "The number of accelerator cards exposed to an instance.", + "format": "int64", + "type": "string" + }, + "acceleratorType": { + "description": "The accelerator type resource namename (see GPUs on Compute Engine).", + "type": "string" + }, + "gpuPartitionSize": { + "description": "Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).", + "type": "string" + } + }, + "type": "object" + }, + "GkeNodePoolAutoscalingConfig": { + "description": "GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage.", + "id": "GkeNodePoolAutoscalingConfig", + "properties": { + "maxNodeCount": { + "description": "The maximum number of nodes in the node pool. Must be \u003e= min_node_count, and must be \u003e 0. Note: Quota must be sufficient to scale up the cluster.", + "format": "int32", + "type": "integer" + }, + "minNodeCount": { + "description": "The minimum number of nodes in the node pool. Must be \u003e= 0 and \u003c= max_node_count.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GkeNodePoolConfig": { + "description": "The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).", + "id": "GkeNodePoolConfig", + "properties": { + "autoscaling": { + "$ref": "GkeNodePoolAutoscalingConfig", + "description": "Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present." + }, + "config": { + "$ref": "GkeNodeConfig", + "description": "Optional. The node pool configuration." + }, + "locations": { + "description": "Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GkeNodePoolTarget": { + "description": "GKE node pools that Dataproc workloads run on.", + "id": "GkeNodePoolTarget", + "properties": { + "nodePool": { + "description": "Required. The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'", + "type": "string" + }, + "nodePoolConfig": { + "$ref": "GkeNodePoolConfig", + "description": "Input only. The configuration for the GKE node pool.If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.If omitted, any node pool with the specified name is used. If a node pool with the specified name does not exist, Dataproc create a node pool with default values.This is an input only field. It will not be returned by the API." + }, + "roles": { + "description": "Required. The roles associated with the GKE node pool.", + "items": { + "enum": [ + "ROLE_UNSPECIFIED", + "DEFAULT", + "CONTROLLER", + "SPARK_DRIVER", + "SPARK_EXECUTOR" + ], + "enumDescriptions": [ + "Role is unspecified.", + "At least one node pool must have the DEFAULT role. Work assigned to a role that is not associated with a node pool is assigned to the node pool with the DEFAULT role. For example, work assigned to the CONTROLLER role will be assigned to the node pool with the DEFAULT role if no node pool has the CONTROLLER role.", + "Run work associated with the Dataproc control plane (for example, controllers and webhooks). Very low resource requirements.", + "Run work associated with a Spark driver of a job.", + "Run work associated with a Spark executor of a job." + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig": { + "description": "Encryption settings for encrypting workflow template job arguments.", + "id": "GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig", + "properties": { + "kmsKey": { + "description": "Optional. The Cloud KMS key name to use for encrypting workflow template job arguments.When this this key is provided, the following workflow template job arguments (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template), if present, are CMEK encrypted (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data): FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries", + "type": "string" + } + }, + "type": "object" + }, + "HadoopJob": { + "description": "A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).", + "id": "HadoopJob", + "properties": { + "archiveUris": { + "description": "Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", + "items": { + "type": "string" + }, + "type": "array" + }, + "args": { + "description": "Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission.", + "items": { + "type": "string" + }, + "type": "array" + }, + "fileUris": { + "description": "Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", + "items": { + "type": "string" + }, + "type": "array" + }, + "jarFileUris": { + "description": "Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", + "items": { + "type": "string" + }, + "type": "array" + }, + "loggingConfig": { + "$ref": "LoggingConfig", + "description": "Optional. The runtime log config for job execution." + }, + "mainClass": { + "description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.", + "type": "string" + }, + "mainJarFileUri": { + "description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", + "type": "string" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", + "type": "object" + } + }, + "type": "object" + }, + "HiveJob": { + "description": "A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.", + "id": "HiveJob", + "properties": { + "continueOnFailure": { + "description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", + "type": "boolean" + }, + "jarFileUris": { + "description": "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", + "items": { + "type": "string" + }, + "type": "array" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", + "type": "object" + }, + "queryFileUri": { + "description": "The HCFS URI of the script that contains Hive queries.", + "type": "string" + }, + "queryList": { + "$ref": "QueryList", + "description": "A list of queries." + }, + "scriptVariables": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name=\"value\";).", + "type": "object" + } + }, + "type": "object" + }, + "IdentityConfig": { + "description": "Identity related configuration, including service account based secure multi-tenancy user mappings.", + "id": "IdentityConfig", + "properties": { + "userServiceAccountMapping": { + "additionalProperties": { + "type": "string" + }, + "description": "Required. Map of user to service account.", + "type": "object" + } + }, + "type": "object" + }, + "InjectCredentialsRequest": { + "description": "A request to inject credentials into a cluster.", + "id": "InjectCredentialsRequest", + "properties": { + "clusterUuid": { + "description": "Required. The cluster UUID.", + "type": "string" + }, + "credentialsCiphertext": { + "description": "Required. The encrypted credentials being injected in to the cluster.The client is responsible for encrypting the credentials in a way that is supported by the cluster.A wrapped value is used here so that the actual contents of the encrypted credentials are not written to audit logs.", + "type": "string" + } + }, + "type": "object" + }, + "InputMetrics": { + "description": "Metrics about the input data read by the task.", + "id": "InputMetrics", + "properties": { + "bytesRead": { + "format": "int64", + "type": "string" + }, + "recordsRead": { + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "InputQuantileMetrics": { + "id": "InputQuantileMetrics", + "properties": { + "bytesRead": { + "$ref": "Quantiles" + }, + "recordsRead": { + "$ref": "Quantiles" + } + }, + "type": "object" + }, + "InstanceFlexibilityPolicy": { + "description": "Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.", + "id": "InstanceFlexibilityPolicy", + "properties": { + "instanceSelectionList": { + "description": "Optional. List of instance selection options that the group will use when creating new VMs.", + "items": { + "$ref": "InstanceSelection" + }, + "type": "array" + }, + "instanceSelectionResults": { + "description": "Output only. A list of instance selection results in the group.", + "items": { + "$ref": "InstanceSelectionResult" + }, + "readOnly": true, + "type": "array" + }, + "provisioningModelMix": { + "$ref": "ProvisioningModelMix", + "description": "Optional. Defines how the Group selects the provisioning model to ensure required reliability." + } + }, + "type": "object" + }, + "InstanceGroupAutoscalingPolicyConfig": { + "description": "Configuration for the size bounds of an instance group, including its proportional size to other groups.", + "id": "InstanceGroupAutoscalingPolicyConfig", + "properties": { + "maxInstances": { + "description": "Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.", + "format": "int32", + "type": "integer" + }, + "minInstances": { + "description": "Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0.", + "format": "int32", + "type": "integer" + }, + "weight": { + "description": "Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "InstanceGroupConfig": { + "description": "The config settings for Compute Engine resources in an instance group, such as a master or worker group.", + "id": "InstanceGroupConfig", + "properties": { + "accelerators": { + "description": "Optional. The Compute Engine accelerator configuration for these instances.", + "items": { + "$ref": "AcceleratorConfig" + }, + "type": "array" + }, + "diskConfig": { + "$ref": "DiskConfig", + "description": "Optional. Disk option config settings." + }, + "imageUri": { + "description": "Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.", + "type": "string" + }, + "instanceFlexibilityPolicy": { + "$ref": "InstanceFlexibilityPolicy", + "description": "Optional. Instance flexibility Policy allowing a mixture of VM shapes and provisioning models." + }, + "instanceNames": { + "description": "Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + }, + "instanceReferences": { + "description": "Output only. List of references to Compute Engine instances.", + "items": { + "$ref": "InstanceReference" + }, + "readOnly": true, + "type": "array" + }, + "isPreemptible": { + "description": "Output only. Specifies that this instance group contains preemptible instances.", + "readOnly": true, + "type": "boolean" + }, + "machineTypeUri": { + "description": "Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.", + "type": "string" + }, + "managedGroupConfig": { + "$ref": "ManagedGroupConfig", + "description": "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + "readOnly": true + }, + "minCpuPlatform": { + "description": "Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -\u003e Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + "type": "string" + }, + "minNumInstances": { + "description": "Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.", + "format": "int32", + "type": "integer" + }, + "numInstances": { + "description": "Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.", + "format": "int32", + "type": "integer" + }, + "preemptibility": { + "description": "Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.", + "enum": [ + "PREEMPTIBILITY_UNSPECIFIED", + "NON_PREEMPTIBLE", + "PREEMPTIBLE", + "SPOT" + ], + "enumDescriptions": [ + "Preemptibility is unspecified, the system will choose the appropriate setting for each instance group.", + "Instances are non-preemptible.This option is allowed for all instance groups and is the only valid value for Master and Worker instance groups.", + "Instances are preemptible (https://cloud.google.com/compute/docs/instances/preemptible).This option is allowed only for secondary worker (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) groups.", + "Instances are Spot VMs (https://cloud.google.com/compute/docs/instances/spot).This option is allowed only for secondary worker (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) groups. Spot VMs are the latest version of preemptible VMs (https://cloud.google.com/compute/docs/instances/preemptible), and provide additional features." + ], + "type": "string" + }, + "startupConfig": { + "$ref": "StartupConfig", + "description": "Optional. Configuration to handle the startup of instances during cluster create and update process." + } + }, + "type": "object" + }, + "InstanceReference": { + "description": "A reference to a Compute Engine instance.", + "id": "InstanceReference", + "properties": { + "instanceId": { + "description": "The unique identifier of the Compute Engine instance.", + "type": "string" + }, + "instanceName": { + "description": "The user-friendly name of the Compute Engine instance.", + "type": "string" + }, + "publicEciesKey": { + "description": "The public ECIES key used for sharing data with this instance.", + "type": "string" + }, + "publicKey": { + "description": "The public RSA key used for sharing data with this instance.", + "type": "string" + } + }, + "type": "object" + }, + "InstanceSelection": { + "description": "Defines machines types and a rank to which the machines types belong.", + "id": "InstanceSelection", + "properties": { + "machineTypes": { + "description": "Optional. Full machine-type names, e.g. \"n1-standard-16\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "rank": { + "description": "Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "InstanceSelectionResult": { + "description": "Defines a mapping from machine types to the number of VMs that are created with each machine type.", + "id": "InstanceSelectionResult", + "properties": { + "machineType": { + "description": "Output only. Full machine-type names, e.g. \"n1-standard-16\".", + "readOnly": true, + "type": "string" + }, + "vmCount": { + "description": "Output only. Number of VM provisioned with the machine_type.", + "format": "int32", + "readOnly": true, + "type": "integer" + } + }, + "type": "object" + }, + "InstantiateWorkflowTemplateRequest": { + "description": "A request to instantiate a workflow template.", + "id": "InstantiateWorkflowTemplateRequest", + "properties": { + "parameters": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 1000 characters.", + "type": "object" + }, + "requestId": { + "description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "type": "string" + }, + "version": { + "description": "Optional. The version of workflow template to instantiate. If specified, the workflow will be instantiated only if the current version of the workflow template has the supplied version.This option cannot be used to instantiate a previous version of workflow template.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "Interval": { + "description": "Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive).The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time.", + "id": "Interval", + "properties": { + "endTime": { + "description": "Optional. Exclusive end of the interval.If specified, a Timestamp matching this interval will have to be before the end.", + "format": "google-datetime", + "type": "string" + }, + "startTime": { + "description": "Optional. Inclusive start of the interval.If specified, a Timestamp matching this interval will have to be the same or after the start.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "Job": { + "description": "A Dataproc job resource.", + "id": "Job", + "properties": { + "done": { + "description": "Output only. Indicates whether the job is completed. If the value is false, the job is still in progress. If true, the job is completed, and status.state field will indicate if it was successful, failed, or cancelled.", + "readOnly": true, + "type": "boolean" + }, + "driverControlFilesUri": { + "description": "Output only. If present, the location of miscellaneous control files which can be used as part of job setup and handling. If not present, control files might be placed in the same location as driver_output_uri.", + "readOnly": true, + "type": "string" + }, + "driverOutputResourceUri": { + "description": "Output only. A URI pointing to the location of the stdout of the job's driver program.", + "readOnly": true, + "type": "string" + }, + "driverSchedulingConfig": { + "$ref": "DriverSchedulingConfig", + "description": "Optional. Driver scheduling configuration." + }, + "flinkJob": { + "$ref": "FlinkJob", + "description": "Optional. Job is a Flink job." + }, + "hadoopJob": { + "$ref": "HadoopJob", + "description": "Optional. Job is a Hadoop job." + }, + "hiveJob": { + "$ref": "HiveJob", + "description": "Optional. Job is a Hive job." + }, + "jobUuid": { + "description": "Output only. A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that might be reused over time.", + "readOnly": true, + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.", + "type": "object" + }, + "pigJob": { + "$ref": "PigJob", + "description": "Optional. Job is a Pig job." + }, + "placement": { + "$ref": "JobPlacement", + "description": "Required. Job information, including how, when, and where to run the job." + }, + "prestoJob": { + "$ref": "PrestoJob", + "description": "Optional. Job is a Presto job." + }, + "pysparkJob": { + "$ref": "PySparkJob", + "description": "Optional. Job is a PySpark job." + }, + "reference": { + "$ref": "JobReference", + "description": "Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id." + }, + "scheduling": { + "$ref": "JobScheduling", + "description": "Optional. Job scheduling configuration." + }, + "sparkJob": { + "$ref": "SparkJob", + "description": "Optional. Job is a Spark job." + }, + "sparkRJob": { + "$ref": "SparkRJob", + "description": "Optional. Job is a SparkR job." + }, + "sparkSqlJob": { + "$ref": "SparkSqlJob", + "description": "Optional. Job is a SparkSql job." + }, + "status": { + "$ref": "JobStatus", + "description": "Output only. The job status. Additional application-specific status information might be contained in the type_job and yarn_applications fields.", + "readOnly": true + }, + "statusHistory": { + "description": "Output only. The previous job status.", + "items": { + "$ref": "JobStatus" + }, + "readOnly": true, + "type": "array" + }, + "trinoJob": { + "$ref": "TrinoJob", + "description": "Optional. Job is a Trino job." + }, + "yarnApplications": { + "description": "Output only. The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It might be changed before final release.", + "items": { + "$ref": "YarnApplication" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, + "JobData": { + "description": "Data corresponding to a spark job.", + "id": "JobData", + "properties": { + "completionTime": { + "format": "google-datetime", + "type": "string" + }, + "description": { + "type": "string" + }, + "jobGroup": { + "type": "string" + }, + "jobId": { + "format": "int64", + "type": "string" + }, + "killTasksSummary": { + "additionalProperties": { + "format": "int32", + "type": "integer" + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "numActiveStages": { + "format": "int32", + "type": "integer" + }, + "numActiveTasks": { + "format": "int32", + "type": "integer" + }, + "numCompletedIndices": { + "format": "int32", + "type": "integer" + }, + "numCompletedStages": { + "format": "int32", + "type": "integer" + }, + "numCompletedTasks": { + "format": "int32", + "type": "integer" + }, + "numFailedStages": { + "format": "int32", + "type": "integer" + }, + "numFailedTasks": { + "format": "int32", + "type": "integer" + }, + "numKilledTasks": { + "format": "int32", + "type": "integer" + }, + "numSkippedStages": { + "format": "int32", + "type": "integer" + }, + "numSkippedTasks": { + "format": "int32", + "type": "integer" + }, + "numTasks": { + "format": "int32", + "type": "integer" + }, + "skippedStages": { + "items": { + "format": "int32", + "type": "integer" + }, + "type": "array" + }, + "sqlExecutionId": { + "format": "int64", + "type": "string" + }, + "stageIds": { + "items": { + "format": "int64", + "type": "string" + }, + "type": "array" + }, + "status": { + "enum": [ + "JOB_EXECUTION_STATUS_UNSPECIFIED", + "JOB_EXECUTION_STATUS_RUNNING", + "JOB_EXECUTION_STATUS_SUCCEEDED", + "JOB_EXECUTION_STATUS_FAILED", + "JOB_EXECUTION_STATUS_UNKNOWN" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "submissionTime": { + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "JobMetadata": { + "description": "Job Operation metadata.", + "id": "JobMetadata", + "properties": { + "jobId": { + "description": "Output only. The job id.", + "readOnly": true, + "type": "string" + }, + "operationType": { + "description": "Output only. Operation type.", + "readOnly": true, + "type": "string" + }, + "startTime": { + "description": "Output only. Job submission time.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "status": { + "$ref": "JobStatus", + "description": "Output only. Most recent job status.", + "readOnly": true + } + }, + "type": "object" + }, + "JobPlacement": { + "description": "Dataproc job config.", + "id": "JobPlacement", + "properties": { + "clusterLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Cluster labels to identify a cluster where the job will be submitted.", + "type": "object" + }, + "clusterName": { + "description": "Required. The name of the cluster where the job will be submitted.", + "type": "string" + }, + "clusterUuid": { + "description": "Output only. A cluster UUID generated by the Dataproc service when the job is submitted.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "JobReference": { + "description": "Encapsulates the full scoping used to reference a job.", + "id": "JobReference", + "properties": { + "jobId": { + "description": "Optional. The job ID, which must be unique within the project.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.If not specified by the caller, the job ID will be provided by the server.", + "type": "string" + }, + "projectId": { + "description": "Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.", + "type": "string" + } + }, + "type": "object" + }, + "JobScheduling": { + "description": "Job scheduling options.", + "id": "JobScheduling", + "properties": { + "maxFailuresPerHour": { + "description": "Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).", + "format": "int32", + "type": "integer" + }, + "maxFailuresTotal": { + "description": "Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "JobStatus": { + "description": "Dataproc job status.", + "id": "JobStatus", + "properties": { + "details": { + "description": "Optional. Output only. Job state details, such as an error description if the state is ERROR.", + "readOnly": true, + "type": "string" + }, + "state": { + "description": "Output only. A state message specifying the overall job state.", + "enum": [ + "STATE_UNSPECIFIED", + "PENDING", + "SETUP_DONE", + "RUNNING", + "CANCEL_PENDING", + "CANCEL_STARTED", + "CANCELLED", + "DONE", + "ERROR", + "ATTEMPT_FAILURE" + ], + "enumDescriptions": [ + "The job state is unknown.", + "The job is pending; it has been submitted, but is not yet running.", + "Job has been received by the service and completed initial setup; it will soon be submitted to the cluster.", + "The job is running on the cluster.", + "A CancelJob request has been received, but is pending.", + "Transient in-flight resources have been canceled, and the request to cancel the running job has been issued to the cluster.", + "The job cancellation was successful.", + "The job has completed successfully.", + "The job has completed, but encountered an error.", + "Job attempt has failed. The detail field contains failure details for this attempt.Applies to restartable jobs only." + ], + "readOnly": true, + "type": "string" + }, + "stateStartTime": { + "description": "Output only. The time when this state was entered.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "substate": { + "description": "Output only. Additional state information, which includes status reported by the agent.", + "enum": [ + "UNSPECIFIED", + "SUBMITTED", + "QUEUED", + "STALE_STATUS" + ], + "enumDescriptions": [ + "The job substate is unknown.", + "The Job is submitted to the agent.Applies to RUNNING state.", + "The Job has been received and is awaiting execution (it might be waiting for a condition to be met). See the \"details\" field for the reason for the delay.Applies to RUNNING state.", + "The agent-reported status is out of date, which can be caused by a loss of communication between the agent and Dataproc. If the agent does not send a timely update, the job will fail.Applies to RUNNING state." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "JobsSummary": { + "description": "Data related to Jobs page summary", + "id": "JobsSummary", + "properties": { + "activeJobs": { + "description": "Number of active jobs", + "format": "int32", + "type": "integer" + }, + "applicationId": { + "description": "Spark Application Id", + "type": "string" + }, + "attempts": { + "description": "Attempts info", + "items": { + "$ref": "ApplicationAttemptInfo" + }, + "type": "array" + }, + "completedJobs": { + "description": "Number of completed jobs", + "format": "int32", + "type": "integer" + }, + "failedJobs": { + "description": "Number of failed jobs", + "format": "int32", + "type": "integer" + }, + "schedulingMode": { + "description": "Spark Scheduling mode", + "type": "string" + } + }, + "type": "object" + }, + "JupyterConfig": { + "description": "Jupyter configuration for an interactive session.", + "id": "JupyterConfig", + "properties": { + "displayName": { + "description": "Optional. Display name, shown in the Jupyter kernelspec card.", + "type": "string" + }, + "kernel": { + "description": "Optional. Kernel", + "enum": [ + "KERNEL_UNSPECIFIED", + "PYTHON", + "SCALA" + ], + "enumDescriptions": [ + "The kernel is unknown.", + "Python kernel.", + "Scala kernel." + ], + "type": "string" + } + }, + "type": "object" + }, + "KerberosConfig": { + "description": "Specifies Kerberos related configuration.", + "id": "KerberosConfig", + "properties": { + "crossRealmTrustAdminServer": { + "description": "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + "type": "string" + }, + "crossRealmTrustKdc": { + "description": "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + "type": "string" + }, + "crossRealmTrustRealm": { + "description": "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", + "type": "string" + }, + "crossRealmTrustSharedPasswordUri": { + "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", + "type": "string" + }, + "enableKerberos": { + "description": "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", + "type": "boolean" + }, + "kdcDbKeyUri": { + "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", + "type": "string" + }, + "keyPasswordUri": { + "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", + "type": "string" + }, + "keystorePasswordUri": { + "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", + "type": "string" + }, + "keystoreUri": { + "description": "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + "type": "string" + }, + "kmsKeyUri": { + "description": "Optional. The URI of the KMS key used to encrypt sensitive files.", + "type": "string" + }, + "realm": { + "description": "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", + "type": "string" + }, + "rootPrincipalPasswordUri": { + "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", + "type": "string" + }, + "tgtLifetimeHours": { + "description": "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", + "format": "int32", "type": "integer" }, - "acceleratorTypeUri": { - "description": "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4.", + "truststorePasswordUri": { + "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", + "type": "string" + }, + "truststoreUri": { + "description": "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + "type": "string" + } + }, + "type": "object" + }, + "KubernetesClusterConfig": { + "description": "The configuration for running the Dataproc cluster on Kubernetes.", + "id": "KubernetesClusterConfig", + "properties": { + "gkeClusterConfig": { + "$ref": "GkeClusterConfig", + "description": "Required. The configuration for running the Dataproc cluster on GKE." + }, + "kubernetesNamespace": { + "description": "Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.", + "type": "string" + }, + "kubernetesSoftwareConfig": { + "$ref": "KubernetesSoftwareConfig", + "description": "Optional. The software configuration for this Dataproc cluster running on Kubernetes." + } + }, + "type": "object" + }, + "KubernetesSoftwareConfig": { + "description": "The software configuration for this Dataproc cluster running on Kubernetes.", + "id": "KubernetesSoftwareConfig", + "properties": { + "componentVersion": { + "additionalProperties": { + "type": "string" + }, + "description": "The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.", + "type": "object" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "The properties to set on daemon config files.Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image. The following are supported prefixes and their mappings: spark: spark-defaults.confFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", + "type": "object" + } + }, + "type": "object" + }, + "LifecycleConfig": { + "description": "Specifies the cluster auto-delete schedule configuration.", + "id": "LifecycleConfig", + "properties": { + "autoDeleteTime": { + "description": "Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", + "format": "google-datetime", + "type": "string" + }, + "autoDeleteTtl": { + "description": "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).", + "format": "google-duration", + "type": "string" + }, + "idleDeleteTtl": { + "description": "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).", + "format": "google-duration", + "type": "string" + }, + "idleStartTime": { + "description": "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "ListAutoscalingPoliciesResponse": { + "description": "A response to a request to list autoscaling policies in a project.", + "id": "ListAutoscalingPoliciesResponse", + "properties": { + "nextPageToken": { + "description": "Output only. This token is included in the response if there are more results to fetch.", + "readOnly": true, + "type": "string" + }, + "policies": { + "description": "Output only. Autoscaling policies list.", + "items": { + "$ref": "AutoscalingPolicy" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, + "ListBatchesResponse": { + "description": "A list of batch workloads.", + "id": "ListBatchesResponse", + "properties": { + "batches": { + "description": "Output only. The batches from the specified collection.", + "items": { + "$ref": "Batch" + }, + "readOnly": true, + "type": "array" + }, + "nextPageToken": { + "description": "A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + }, + "unreachable": { + "description": "Output only. List of Batches that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, + "ListClustersResponse": { + "description": "The list of all clusters in a project.", + "id": "ListClustersResponse", + "properties": { + "clusters": { + "description": "Output only. The clusters in the project.", + "items": { + "$ref": "Cluster" + }, + "readOnly": true, + "type": "array" + }, + "nextPageToken": { + "description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListClustersRequest.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "ListJobsResponse": { + "description": "A list of jobs in a project.", + "id": "ListJobsResponse", + "properties": { + "jobs": { + "description": "Output only. Jobs list.", + "items": { + "$ref": "Job" + }, + "readOnly": true, + "type": "array" + }, + "nextPageToken": { + "description": "Optional. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListJobsRequest.", + "type": "string" + }, + "unreachable": { + "description": "Output only. List of jobs with kms_key-encrypted parameters that could not be decrypted. A response to a jobs.get request may indicate the reason for the decryption failure for a specific job.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, + "ListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", + "id": "ListOperationsResponse", + "properties": { + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + }, + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "items": { + "$ref": "Operation" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListSessionTemplatesResponse": { + "description": "A list of session templates.", + "id": "ListSessionTemplatesResponse", + "properties": { + "nextPageToken": { + "description": "A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + }, + "sessionTemplates": { + "description": "Output only. Session template list", + "items": { + "$ref": "SessionTemplate" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, + "ListSessionsResponse": { + "description": "A list of interactive sessions.", + "id": "ListSessionsResponse", + "properties": { + "nextPageToken": { + "description": "A token, which can be sent as page_token, to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + }, + "sessions": { + "description": "Output only. The sessions from the specified collection.", + "items": { + "$ref": "Session" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, + "ListWorkflowTemplatesResponse": { + "description": "A response to a request to list workflow templates in a project.", + "id": "ListWorkflowTemplatesResponse", + "properties": { + "nextPageToken": { + "description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListWorkflowTemplatesRequest.", + "readOnly": true, + "type": "string" + }, + "templates": { + "description": "Output only. WorkflowTemplates list.", + "items": { + "$ref": "WorkflowTemplate" + }, + "readOnly": true, + "type": "array" + }, + "unreachable": { + "description": "Output only. List of workflow templates that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, + "LoggingConfig": { + "description": "The runtime logging config of the job.", + "id": "LoggingConfig", + "properties": { + "driverLogLevels": { + "additionalProperties": { + "enum": [ + "LEVEL_UNSPECIFIED", + "ALL", + "TRACE", + "DEBUG", + "INFO", + "WARN", + "ERROR", + "FATAL", + "OFF" + ], + "enumDescriptions": [ + "Level is unspecified. Use default level for log4j.", + "Use ALL level for log4j.", + "Use TRACE level for log4j.", + "Use DEBUG level for log4j.", + "Use INFO level for log4j.", + "Use WARN level for log4j.", + "Use ERROR level for log4j.", + "Use FATAL level for log4j.", + "Turn off log4j." + ], + "type": "string" + }, + "description": "The per-package log levels for the driver. This can include \"root\" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'", + "type": "object" + } + }, + "type": "object" + }, + "ManagedCluster": { + "description": "Cluster that is managed by the workflow.", + "id": "ManagedCluster", + "properties": { + "clusterName": { + "description": "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", + "type": "string" + }, + "config": { + "$ref": "ClusterConfig", + "description": "Required. The cluster configuration." + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster.", + "type": "object" + } + }, + "type": "object" + }, + "ManagedGroupConfig": { + "description": "Specifies the resources used to actively manage an instance group.", + "id": "ManagedGroupConfig", + "properties": { + "instanceGroupManagerName": { + "description": "Output only. The name of the Instance Group Manager for this group.", + "readOnly": true, + "type": "string" + }, + "instanceGroupManagerUri": { + "description": "Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm.", + "readOnly": true, + "type": "string" + }, + "instanceTemplateName": { + "description": "Output only. The name of the Instance Template used for the Managed Instance Group.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "MemoryMetrics": { + "id": "MemoryMetrics", + "properties": { + "totalOffHeapStorageMemory": { + "format": "int64", + "type": "string" + }, + "totalOnHeapStorageMemory": { + "format": "int64", + "type": "string" + }, + "usedOffHeapStorageMemory": { + "format": "int64", + "type": "string" + }, + "usedOnHeapStorageMemory": { + "format": "int64", "type": "string" } }, "type": "object" }, - "AnalyzeBatchRequest": { - "description": "A request to analyze a batch workload.", - "id": "AnalyzeBatchRequest", + "MetastoreConfig": { + "description": "Specifies a Metastore configuration.", + "id": "MetastoreConfig", "properties": { - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the service receives two AnalyzeBatchRequest (http://cloud/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.AnalyzeBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first request created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "dataprocMetastoreService": { + "description": "Required. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name]", "type": "string" } }, "type": "object" }, - "AnalyzeOperationMetadata": { - "description": "Metadata describing the Analyze operation.", - "id": "AnalyzeOperationMetadata", + "Metric": { + "description": "A Dataproc custom metric.", + "id": "Metric", "properties": { - "analyzedWorkloadName": { - "description": "Output only. name of the workload being analyzed.", - "readOnly": true, - "type": "string" + "metricOverrides": { + "description": "Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected.", + "items": { + "type": "string" + }, + "type": "array" }, - "analyzedWorkloadType": { - "description": "Output only. Type of the workload being analyzed.", + "metricSource": { + "description": "Required. A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information).", "enum": [ - "WORKLOAD_TYPE_UNSPECIFIED", - "BATCH" + "METRIC_SOURCE_UNSPECIFIED", + "MONITORING_AGENT_DEFAULTS", + "HDFS", + "SPARK", + "YARN", + "SPARK_HISTORY_SERVER", + "HIVESERVER2", + "HIVEMETASTORE", + "FLINK" ], "enumDescriptions": [ - "Undefined option", - "Serverless batch job" + "Required unspecified metric source.", + "Monitoring agent metrics. If this source is enabled, Dataproc enables the monitoring agent in Compute Engine, and collects monitoring agent metrics, which are published with an agent.googleapis.com prefix.", + "HDFS metric source.", + "Spark metric source.", + "YARN metric source.", + "Spark History Server metric source.", + "Hiveserver2 metric source.", + "hivemetastore metric source", + "flink metric source" ], - "readOnly": true, - "type": "string" - }, - "analyzedWorkloadUuid": { - "description": "Output only. unique identifier of the workload typically generated by control plane. E.g. batch uuid.", - "readOnly": true, "type": "string" - }, - "createTime": { - "description": "Output only. The time when the operation was created.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "description": { - "description": "Output only. Short description of the operation.", - "readOnly": true, + } + }, + "type": "object" + }, + "NamespacedGkeDeploymentTarget": { + "deprecated": true, + "description": "Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster.", + "id": "NamespacedGkeDeploymentTarget", + "properties": { + "clusterNamespace": { + "description": "Optional. A namespace within the GKE cluster to deploy into.", "type": "string" }, - "doneTime": { - "description": "Output only. The time when the operation finished.", - "format": "google-datetime", - "readOnly": true, + "targetGkeCluster": { + "description": "Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", "type": "string" - }, + } + }, + "type": "object" + }, + "NodeGroup": { + "description": "Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource.", + "id": "NodeGroup", + "properties": { "labels": { "additionalProperties": { "type": "string" }, - "description": "Output only. Labels associated with the operation.", - "readOnly": true, + "description": "Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels.", "type": "object" }, - "warnings": { - "description": "Output only. Warnings encountered during operation execution.", + "name": { + "description": "The Node group resource name (https://aip.dev/122).", + "type": "string" + }, + "nodeGroupConfig": { + "$ref": "InstanceGroupConfig", + "description": "Optional. The node group instance group configuration." + }, + "roles": { + "description": "Required. Node group roles.", "items": { + "enum": [ + "ROLE_UNSPECIFIED", + "DRIVER" + ], + "enumDescriptions": [ + "Required unspecified role.", + "Job drivers run on the node pool." + ], "type": "string" }, - "readOnly": true, "type": "array" } }, "type": "object" }, - "AutoscalingConfig": { - "description": "Autoscaling Policy config associated with the cluster.", - "id": "AutoscalingConfig", + "NodeGroupAffinity": { + "description": "Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource.", + "id": "NodeGroupAffinity", "properties": { - "policyUri": { - "description": "Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.", + "nodeGroupUri": { + "description": "Required. The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1", "type": "string" } }, "type": "object" }, - "AutoscalingPolicy": { - "description": "Describes an autoscaling policy for Dataproc cluster autoscaler.", - "id": "AutoscalingPolicy", + "NodeGroupOperationMetadata": { + "description": "Metadata describing the node group operation.", + "id": "NodeGroupOperationMetadata", "properties": { - "basicAlgorithm": { - "$ref": "BasicAutoscalingAlgorithm" + "clusterUuid": { + "description": "Output only. Cluster UUID associated with the node group operation.", + "readOnly": true, + "type": "string" }, - "id": { - "description": "Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", + "description": { + "description": "Output only. Short description of operation.", + "readOnly": true, "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy.", + "description": "Output only. Labels associated with the operation.", + "readOnly": true, "type": "object" }, - "name": { - "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "nodeGroupId": { + "description": "Output only. Node group ID for the operation.", "readOnly": true, "type": "string" }, - "secondaryWorkerConfig": { - "$ref": "InstanceGroupAutoscalingPolicyConfig", - "description": "Optional. Describes how the autoscaler will operate for secondary workers." + "operationType": { + "description": "The operation type.", + "enum": [ + "NODE_GROUP_OPERATION_TYPE_UNSPECIFIED", + "CREATE", + "UPDATE", + "DELETE", + "RESIZE", + "REPAIR", + "UPDATE_LABELS", + "START", + "STOP" + ], + "enumDescriptions": [ + "Node group operation type is unknown.", + "Create node group operation type.", + "Update node group operation type.", + "Delete node group operation type.", + "Resize node group operation type.", + "Repair node group operation type.", + "Update node group label operation type.", + "Start node group operation type.", + "Stop node group operation type." + ], + "type": "string" }, - "workerConfig": { - "$ref": "InstanceGroupAutoscalingPolicyConfig", - "description": "Required. Describes how the autoscaler will operate for primary workers." - } - }, - "type": "object" - }, - "AutotuningConfig": { - "description": "Autotuning configuration of the workload.", - "id": "AutotuningConfig", - "properties": { - "scenarios": { - "description": "Optional. Scenarios for which tunings are applied.", + "status": { + "$ref": "ClusterOperationStatus", + "description": "Output only. Current operation status.", + "readOnly": true + }, + "statusHistory": { + "description": "Output only. The previous operation status.", "items": { - "enum": [ - "SCENARIO_UNSPECIFIED", - "SCALING", - "BROADCAST_HASH_JOIN", - "MEMORY" - ], - "enumDescriptions": [ - "Default value.", - "Scaling recommendations such as initialExecutors.", - "Adding hints for potential relation broadcasts.", - "Memory management for workloads." - ], - "type": "string" + "$ref": "ClusterOperationStatus" }, + "readOnly": true, "type": "array" - } - }, - "type": "object" - }, - "AuxiliaryNodeGroup": { - "description": "Node group identification and configuration information.", - "id": "AuxiliaryNodeGroup", - "properties": { - "nodeGroup": { - "$ref": "NodeGroup", - "description": "Required. Node group configuration." }, - "nodeGroupId": { - "description": "Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.", - "type": "string" + "warnings": { + "description": "Output only. Errors encountered during operation execution.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" } }, "type": "object" }, - "AuxiliaryServicesConfig": { - "description": "Auxiliary services configuration for a Cluster.", - "id": "AuxiliaryServicesConfig", + "NodeInitializationAction": { + "description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.", + "id": "NodeInitializationAction", "properties": { - "metastoreConfig": { - "$ref": "MetastoreConfig", - "description": "Optional. The Hive Metastore configuration for this workload." - }, - "sparkHistoryServerConfig": { - "$ref": "SparkHistoryServerConfig", - "description": "Optional. The Spark History Server configuration for the workload." + "executableFile": { + "description": "Required. Cloud Storage URI of executable file.", + "type": "string" + }, + "executionTimeout": { + "description": "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", + "format": "google-duration", + "type": "string" } }, "type": "object" }, - "BasicAutoscalingAlgorithm": { - "description": "Basic algorithm for autoscaling.", - "id": "BasicAutoscalingAlgorithm", + "NodePool": { + "description": "indicating a list of workers of same type", + "id": "NodePool", "properties": { - "cooldownPeriod": { - "description": "Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.", - "format": "google-duration", + "id": { + "description": "Required. A unique id of the node pool. Primary and Secondary workers can be specified using special reserved ids PRIMARY_WORKER_POOL and SECONDARY_WORKER_POOL respectively. Aux node pools can be referenced using corresponding pool id.", "type": "string" }, - "sparkStandaloneConfig": { - "$ref": "SparkStandaloneAutoscalingConfig", - "description": "Optional. Spark Standalone autoscaling configuration" + "instanceNames": { + "description": "Name of instances to be repaired. These instances must belong to specified node pool.", + "items": { + "type": "string" + }, + "type": "array" }, - "yarnConfig": { - "$ref": "BasicYarnAutoscalingConfig", - "description": "Optional. YARN autoscaling configuration." + "repairAction": { + "description": "Required. Repair action to take on specified resources of the node pool.", + "enum": [ + "REPAIR_ACTION_UNSPECIFIED", + "DELETE" + ], + "enumDescriptions": [ + "No action will be taken by default.", + "delete the specified list of nodes." + ], + "type": "string" } }, "type": "object" }, - "BasicYarnAutoscalingConfig": { - "description": "Basic autoscaling configurations for YARN.", - "id": "BasicYarnAutoscalingConfig", + "Operation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "id": "Operation", "properties": { - "gracefulDecommissionTimeout": { - "description": "Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d.", - "format": "google-duration", - "type": "string" + "done": { + "description": "If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.", + "type": "boolean" }, - "scaleDownFactor": { - "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.", - "format": "double", - "type": "number" + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." }, - "scaleDownMinWorkerFraction": { - "description": "Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", - "format": "double", - "type": "number" + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object" }, - "scaleUpFactor": { - "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.", - "format": "double", - "type": "number" + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.", + "type": "string" }, - "scaleUpMinWorkerFraction": { - "description": "Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", - "format": "double", - "type": "number" + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal, successful response of the operation. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.", + "type": "object" } }, "type": "object" }, - "Batch": { - "description": "A representation of a batch workload in the service.", - "id": "Batch", + "OrderedJob": { + "description": "A job executed by the workflow.", + "id": "OrderedJob", "properties": { - "createTime": { - "description": "Output only. The time when the batch was created.", - "format": "google-datetime", - "readOnly": true, - "type": "string" + "flinkJob": { + "$ref": "FlinkJob", + "description": "Optional. Job is a Flink job." }, - "creator": { - "description": "Output only. The email address of the user who created the batch.", - "readOnly": true, - "type": "string" + "hadoopJob": { + "$ref": "HadoopJob", + "description": "Optional. Job is a Hadoop job." }, - "environmentConfig": { - "$ref": "EnvironmentConfig", - "description": "Optional. Environment configuration for the batch execution." + "hiveJob": { + "$ref": "HiveJob", + "description": "Optional. Job is a Hive job." }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Optional. The labels to associate with this batch. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a batch.", + "description": "Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 32 labels can be associated with a given job.", "type": "object" }, - "name": { - "description": "Output only. The resource name of the batch.", - "readOnly": true, - "type": "string" + "pigJob": { + "$ref": "PigJob", + "description": "Optional. Job is a Pig job." }, - "operation": { - "description": "Output only. The resource name of the operation associated with this batch.", - "readOnly": true, - "type": "string" + "prerequisiteStepIds": { + "description": "Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.", + "items": { + "type": "string" + }, + "type": "array" }, - "pysparkBatch": { - "$ref": "PySparkBatch", - "description": "Optional. PySpark batch config." + "prestoJob": { + "$ref": "PrestoJob", + "description": "Optional. Job is a Presto job." }, - "runtimeConfig": { - "$ref": "RuntimeConfig", - "description": "Optional. Runtime configuration for the batch execution." + "pysparkJob": { + "$ref": "PySparkJob", + "description": "Optional. Job is a PySpark job." }, - "runtimeInfo": { - "$ref": "RuntimeInfo", - "description": "Output only. Runtime information about batch execution.", - "readOnly": true + "scheduling": { + "$ref": "JobScheduling", + "description": "Optional. Job scheduling configuration." }, - "sparkBatch": { - "$ref": "SparkBatch", - "description": "Optional. Spark batch config." + "sparkJob": { + "$ref": "SparkJob", + "description": "Optional. Job is a Spark job." }, - "sparkRBatch": { - "$ref": "SparkRBatch", - "description": "Optional. SparkR batch config." + "sparkRJob": { + "$ref": "SparkRJob", + "description": "Optional. Job is a SparkR job." }, - "sparkSqlBatch": { - "$ref": "SparkSqlBatch", - "description": "Optional. SparkSql batch config." + "sparkSqlJob": { + "$ref": "SparkSqlJob", + "description": "Optional. Job is a SparkSql job." }, - "state": { - "description": "Output only. The state of the batch.", - "enum": [ - "STATE_UNSPECIFIED", - "PENDING", - "RUNNING", - "CANCELLING", - "CANCELLED", - "SUCCEEDED", - "FAILED" - ], - "enumDescriptions": [ - "The batch state is unknown.", - "The batch is created before running.", - "The batch is running.", - "The batch is cancelling.", - "The batch cancellation was successful.", - "The batch completed successfully.", - "The batch is no longer running due to an error." - ], - "readOnly": true, + "stepId": { + "description": "Required. The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", "type": "string" }, - "stateHistory": { - "description": "Output only. Historical state information for the batch.", - "items": { - "$ref": "StateHistory" - }, - "readOnly": true, - "type": "array" - }, - "stateMessage": { - "description": "Output only. Batch state details, such as a failure description if the state is FAILED.", - "readOnly": true, + "trinoJob": { + "$ref": "TrinoJob", + "description": "Optional. Job is a Trino job." + } + }, + "type": "object" + }, + "OutputMetrics": { + "description": "Metrics about the data written by the task.", + "id": "OutputMetrics", + "properties": { + "bytesWritten": { + "format": "int64", "type": "string" }, - "stateTime": { - "description": "Output only. The time when the batch entered a current state.", - "format": "google-datetime", - "readOnly": true, + "recordsWritten": { + "format": "int64", "type": "string" + } + }, + "type": "object" + }, + "OutputQuantileMetrics": { + "id": "OutputQuantileMetrics", + "properties": { + "bytesWritten": { + "$ref": "Quantiles" }, - "uuid": { - "description": "Output only. A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.", - "readOnly": true, - "type": "string" + "recordsWritten": { + "$ref": "Quantiles" } }, "type": "object" }, - "BatchOperationMetadata": { - "description": "Metadata describing the Batch operation.", - "id": "BatchOperationMetadata", + "ParameterValidation": { + "description": "Configuration for parameter validation.", + "id": "ParameterValidation", "properties": { - "batch": { - "description": "Name of the batch for the operation.", - "type": "string" + "regex": { + "$ref": "RegexValidation", + "description": "Validation based on regular expressions." }, - "batchUuid": { - "description": "Batch UUID for the operation.", + "values": { + "$ref": "ValueValidation", + "description": "Validation based on a list of allowed values." + } + }, + "type": "object" + }, + "PeripheralsConfig": { + "description": "Auxiliary services configuration for a workload.", + "id": "PeripheralsConfig", + "properties": { + "metastoreService": { + "description": "Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id]", "type": "string" }, - "createTime": { - "description": "The time when the operation was created.", - "format": "google-datetime", - "type": "string" + "sparkHistoryServerConfig": { + "$ref": "SparkHistoryServerConfig", + "description": "Optional. The Spark History Server configuration for the workload." + } + }, + "type": "object" + }, + "PigJob": { + "description": "A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.", + "id": "PigJob", + "properties": { + "continueOnFailure": { + "description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", + "type": "boolean" }, - "description": { - "description": "Short description of the operation.", - "type": "string" + "jarFileUris": { + "description": "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", + "items": { + "type": "string" + }, + "type": "array" }, - "doneTime": { - "description": "The time when the operation finished.", - "format": "google-datetime", - "type": "string" + "loggingConfig": { + "$ref": "LoggingConfig", + "description": "Optional. The runtime log config for job execution." }, - "labels": { + "properties": { "additionalProperties": { "type": "string" }, - "description": "Labels associated with the operation.", + "description": "Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", "type": "object" }, - "operationType": { - "description": "The operation type.", - "enum": [ - "BATCH_OPERATION_TYPE_UNSPECIFIED", - "BATCH" - ], - "enumDescriptions": [ - "Batch operation type is unknown.", - "Batch operation type." - ], + "queryFileUri": { + "description": "The HCFS URI of the script that contains the Pig queries.", "type": "string" }, - "warnings": { - "description": "Warnings encountered during operation execution.", - "items": { + "queryList": { + "$ref": "QueryList", + "description": "A list of queries." + }, + "scriptVariables": { + "additionalProperties": { "type": "string" }, - "type": "array" + "description": "Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]).", + "type": "object" } }, "type": "object" }, - "Binding": { - "description": "Associates members, or principals, with a role.", - "id": "Binding", + "Policy": { + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources.A Policy is a collection of bindings. A binding binds one or more members, or principals, to a single role. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A role is a named list of permissions; each role can be an IAM predefined role or a user-created custom role.For some types of Google Cloud resources, a binding can also specify a condition, which is a logical expression that allows access to a resource only if the expression evaluates to true. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).JSON example: { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } YAML example: bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 For a description of IAM and its features, see the IAM documentation (https://cloud.google.com/iam/docs/).", + "id": "Policy", "properties": { - "condition": { - "$ref": "Expr", - "description": "The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies)." - }, - "members": { - "description": "Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}: A single identity in a workforce identity pool. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}: All workforce identities in a group. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}: All workforce identities with a specific attribute value. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*: All identities in a workforce identity pool. principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}: A single identity in a workload identity pool. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}: A workload identity pool group. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}: All identities in a workload identity pool with a certain attribute. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*: All identities in a workload identity pool. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding. deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}: Deleted single identity in a workforce identity pool. For example, deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value.", + "bindings": { + "description": "Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.", "items": { - "type": "string" + "$ref": "Binding" }, "type": "array" }, - "role": { - "description": "Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.For an overview of the IAM roles and permissions, see the IAM documentation (https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see here (https://cloud.google.com/iam/docs/understanding-roles).", + "etag": { + "description": "etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.", + "format": "byte", "type": "string" + }, + "version": { + "description": "Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "CancelJobRequest": { - "description": "A request to cancel a job.", - "id": "CancelJobRequest", - "properties": {}, - "type": "object" - }, - "Cluster": { - "description": "Describes the identifying information, config, and status of a Dataproc cluster", - "id": "Cluster", + "PoolData": { + "description": "Pool Data", + "id": "PoolData", "properties": { - "clusterName": { - "description": "Required. The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.", - "type": "string" - }, - "clusterUuid": { - "description": "Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.", - "readOnly": true, - "type": "string" - }, - "config": { - "$ref": "ClusterConfig", - "description": "Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified." - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.", - "type": "object" - }, - "metrics": { - "$ref": "ClusterMetrics", - "description": "Output only. Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", - "readOnly": true - }, - "projectId": { - "description": "Required. The Google Cloud Platform project ID that the cluster belongs to.", + "name": { "type": "string" }, - "status": { - "$ref": "ClusterStatus", - "description": "Output only. Cluster status.", - "readOnly": true - }, - "statusHistory": { - "description": "Output only. The previous cluster status.", + "stageIds": { "items": { - "$ref": "ClusterStatus" + "format": "int64", + "type": "string" }, - "readOnly": true, "type": "array" - }, - "virtualClusterConfig": { - "$ref": "VirtualClusterConfig", - "description": "Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified." } }, "type": "object" }, - "ClusterConfig": { - "description": "The cluster config.", - "id": "ClusterConfig", + "PrestoJob": { + "description": "A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster.", + "id": "PrestoJob", "properties": { - "autoscalingConfig": { - "$ref": "AutoscalingConfig", - "description": "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset." - }, - "auxiliaryNodeGroups": { - "description": "Optional. The node group settings.", + "clientTags": { + "description": "Optional. Presto client tags to attach to this query", "items": { - "$ref": "AuxiliaryNodeGroup" + "type": "string" }, "type": "array" }, - "configBucket": { - "description": "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", - "type": "string" - }, - "dataprocMetricConfig": { - "$ref": "DataprocMetricConfig", - "description": "Optional. The config for Dataproc metrics." - }, - "encryptionConfig": { - "$ref": "EncryptionConfig", - "description": "Optional. Encryption settings for the cluster." - }, - "endpointConfig": { - "$ref": "EndpointConfig", - "description": "Optional. Port/endpoint configuration for this cluster" + "continueOnFailure": { + "description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", + "type": "boolean" }, - "gceClusterConfig": { - "$ref": "GceClusterConfig", - "description": "Optional. The shared Compute Engine config settings for all instances in a cluster." + "loggingConfig": { + "$ref": "LoggingConfig", + "description": "Optional. The runtime log config for job execution." }, - "gkeClusterConfig": { - "$ref": "GkeClusterConfig", - "deprecated": true, - "description": "Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. These config settings are mutually exclusive with Compute Engine-based options, such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config." + "outputFormat": { + "description": "Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats", + "type": "string" }, - "initializationActions": { - "description": "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi ", - "items": { - "$ref": "NodeInitializationAction" + "properties": { + "additionalProperties": { + "type": "string" }, - "type": "array" - }, - "lifecycleConfig": { - "$ref": "LifecycleConfig", - "description": "Optional. Lifecycle setting for the cluster." - }, - "masterConfig": { - "$ref": "InstanceGroupConfig", - "description": "Optional. The Compute Engine config settings for the cluster's master instance." - }, - "metastoreConfig": { - "$ref": "MetastoreConfig", - "description": "Optional. Metastore configuration." - }, - "secondaryWorkerConfig": { - "$ref": "InstanceGroupConfig", - "description": "Optional. The Compute Engine config settings for a cluster's secondary worker instances" - }, - "securityConfig": { - "$ref": "SecurityConfig", - "description": "Optional. Security settings for the cluster." - }, - "softwareConfig": { - "$ref": "SoftwareConfig", - "description": "Optional. The config settings for cluster software." + "description": "Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI", + "type": "object" }, - "tempBucket": { - "description": "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "queryFileUri": { + "description": "The HCFS URI of the script that contains SQL queries.", "type": "string" }, - "workerConfig": { - "$ref": "InstanceGroupConfig", - "description": "Optional. The Compute Engine config settings for the cluster's worker instances." + "queryList": { + "$ref": "QueryList", + "description": "A list of queries." } }, "type": "object" }, - "ClusterMetrics": { - "description": "Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", - "id": "ClusterMetrics", + "ProcessSummary": { + "description": "Process Summary", + "id": "ProcessSummary", "properties": { - "hdfsMetrics": { - "additionalProperties": { - "format": "int64", - "type": "string" - }, - "description": "The HDFS metrics.", - "type": "object" + "addTime": { + "format": "google-datetime", + "type": "string" }, - "yarnMetrics": { + "hostPort": { + "type": "string" + }, + "isActive": { + "type": "boolean" + }, + "processId": { + "type": "string" + }, + "processLogs": { "additionalProperties": { - "format": "int64", "type": "string" }, - "description": "YARN metrics.", "type": "object" + }, + "removeTime": { + "format": "google-datetime", + "type": "string" + }, + "totalCores": { + "format": "int32", + "type": "integer" } }, "type": "object" }, - "ClusterOperation": { - "description": "The cluster operation triggered by a workflow.", - "id": "ClusterOperation", + "ProvisioningModelMix": { + "description": "Defines how Dataproc should create VMs with a mixture of provisioning models.", + "id": "ProvisioningModelMix", "properties": { - "done": { - "description": "Output only. Indicates the operation is done.", - "readOnly": true, - "type": "boolean" - }, - "error": { - "description": "Output only. Error, if operation failed.", - "readOnly": true, - "type": "string" + "standardCapacityBase": { + "description": "Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances.", + "format": "int32", + "type": "integer" }, - "operationId": { - "description": "Output only. The id of the cluster operation.", - "readOnly": true, + "standardCapacityPercentAboveBase": { + "description": "Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "PyPiRepositoryConfig": { + "description": "Configuration for PyPi repository", + "id": "PyPiRepositoryConfig", + "properties": { + "pypiRepository": { + "description": "Optional. PyPi repository address", "type": "string" } }, "type": "object" }, - "ClusterOperationMetadata": { - "description": "Metadata describing the operation.", - "id": "ClusterOperationMetadata", + "PySparkBatch": { + "description": "A configuration for running an Apache PySpark (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) batch workload.", + "id": "PySparkBatch", "properties": { - "childOperationIds": { - "description": "Output only. Child operation ids", + "archiveUris": { + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, - "readOnly": true, "type": "array" }, - "clusterName": { - "description": "Output only. Name of the cluster for the operation.", - "readOnly": true, - "type": "string" - }, - "clusterUuid": { - "description": "Output only. Cluster UUID for the operation.", - "readOnly": true, - "type": "string" - }, - "description": { - "description": "Output only. Short description of operation.", - "readOnly": true, - "type": "string" - }, - "labels": { - "additionalProperties": { + "args": { + "description": "Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", + "items": { "type": "string" }, - "description": "Output only. Labels associated with the operation", - "readOnly": true, - "type": "object" - }, - "operationType": { - "description": "Output only. The operation type.", - "readOnly": true, - "type": "string" + "type": "array" }, - "status": { - "$ref": "ClusterOperationStatus", - "description": "Output only. Current operation status.", - "readOnly": true + "fileUris": { + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", + "items": { + "type": "string" + }, + "type": "array" }, - "statusHistory": { - "description": "Output only. The previous operation status.", + "jarFileUris": { + "description": "Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.", "items": { - "$ref": "ClusterOperationStatus" + "type": "string" }, - "readOnly": true, "type": "array" }, - "warnings": { - "description": "Output only. Errors encountered during operation execution.", + "mainPythonFileUri": { + "description": "Required. The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.", + "type": "string" + }, + "pythonFileUris": { + "description": "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", "items": { "type": "string" }, - "readOnly": true, "type": "array" } }, "type": "object" }, - "ClusterOperationStatus": { - "description": "The status of the operation.", - "id": "ClusterOperationStatus", + "PySparkJob": { + "description": "A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.", + "id": "PySparkJob", "properties": { - "details": { - "description": "Output only. A message containing any operation metadata details.", - "readOnly": true, - "type": "string" + "archiveUris": { + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "items": { + "type": "string" + }, + "type": "array" }, - "innerState": { - "description": "Output only. A message containing the detailed operation state.", - "readOnly": true, - "type": "string" + "args": { + "description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + "items": { + "type": "string" + }, + "type": "array" }, - "state": { - "description": "Output only. A message containing the operation state.", - "enum": [ - "UNKNOWN", - "PENDING", - "RUNNING", - "DONE" - ], - "enumDescriptions": [ - "Unused.", - "The operation has been created.", - "The operation is running.", - "The operation is done; either cancelled or completed." - ], - "readOnly": true, - "type": "string" + "fileUris": { + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + "items": { + "type": "string" + }, + "type": "array" }, - "stateStartTime": { - "description": "Output only. The time this state was entered.", - "format": "google-datetime", - "readOnly": true, + "jarFileUris": { + "description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", + "items": { + "type": "string" + }, + "type": "array" + }, + "loggingConfig": { + "$ref": "LoggingConfig", + "description": "Optional. The runtime log config for job execution." + }, + "mainPythonFileUri": { + "description": "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.", "type": "string" - } - }, - "type": "object" - }, - "ClusterSelector": { - "description": "A selector that chooses target cluster for jobs based on metadata.", - "id": "ClusterSelector", - "properties": { - "clusterLabels": { + }, + "properties": { "additionalProperties": { "type": "string" }, - "description": "Required. The cluster labels. Cluster must have all labels to match.", + "description": "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", "type": "object" }, - "zone": { - "description": "Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used.", - "type": "string" + "pythonFileUris": { + "description": "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "ClusterStatus": { - "description": "The status of a cluster and its instances.", - "id": "ClusterStatus", + "Quantiles": { + "description": "Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.", + "id": "Quantiles", "properties": { - "detail": { - "description": "Optional. Output only. Details of cluster's state.", - "readOnly": true, + "count": { + "format": "int64", "type": "string" }, - "state": { - "description": "Output only. The cluster's state.", - "enum": [ - "UNKNOWN", - "CREATING", - "RUNNING", - "ERROR", - "ERROR_DUE_TO_UPDATE", - "DELETING", - "UPDATING", - "STOPPING", - "STOPPED", - "STARTING", - "REPAIRING" - ], - "enumDescriptions": [ - "The cluster state is unknown.", - "The cluster is being created and set up. It is not ready for use.", - "The cluster is currently running and healthy. It is ready for use.Note: The cluster state changes from \"creating\" to \"running\" status after the master node(s), first two primary worker nodes (and the last primary worker node if primary workers \u003e 2) are running.", - "The cluster encountered an error. It is not ready for use.", - "The cluster has encountered an error while being updated. Jobs can be submitted to the cluster, but the cluster cannot be updated.", - "The cluster is being deleted. It cannot be used.", - "The cluster is being updated. It continues to accept and process jobs.", - "The cluster is being stopped. It cannot be used.", - "The cluster is currently stopped. It is not ready for use.", - "The cluster is being started. It is not ready for use.", - "The cluster is being repaired. It is not ready for use." - ], - "readOnly": true, + "maximum": { + "format": "int64", "type": "string" }, - "stateStartTime": { - "description": "Output only. Time when this state was entered (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", - "format": "google-datetime", - "readOnly": true, + "minimum": { + "format": "int64", "type": "string" }, - "substate": { - "description": "Output only. Additional state information that includes status reported by the agent.", - "enum": [ - "UNSPECIFIED", - "UNHEALTHY", - "STALE_STATUS" - ], - "enumDescriptions": [ - "The cluster substate is unknown.", - "The cluster is known to be in an unhealthy state (for example, critical daemons are not running or HDFS capacity is exhausted).Applies to RUNNING state.", - "The agent-reported status is out of date (may occur if Dataproc loses communication with Agent).Applies to RUNNING state." - ], - "readOnly": true, + "percentile25": { + "format": "int64", + "type": "string" + }, + "percentile50": { + "format": "int64", + "type": "string" + }, + "percentile75": { + "format": "int64", + "type": "string" + }, + "sum": { + "format": "int64", "type": "string" } }, "type": "object" }, - "ConfidentialInstanceConfig": { - "description": "Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)", - "id": "ConfidentialInstanceConfig", - "properties": { - "enableConfidentialCompute": { - "description": "Optional. Defines whether the instance should have confidential compute enabled.", - "type": "boolean" - } - }, - "type": "object" - }, - "DataprocMetricConfig": { - "description": "Dataproc metric config.", - "id": "DataprocMetricConfig", + "QueryList": { + "description": "A list of queries to run on a cluster.", + "id": "QueryList", "properties": { - "metrics": { - "description": "Required. Metrics sources to enable.", + "queries": { + "description": "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } } ", "items": { - "$ref": "Metric" + "type": "string" }, "type": "array" } }, "type": "object" }, - "DiagnoseClusterRequest": { - "description": "A request to collect cluster diagnostic information.", - "id": "DiagnoseClusterRequest", + "RddDataDistribution": { + "description": "Details about RDD usage.", + "id": "RddDataDistribution", "properties": { - "diagnosisInterval": { - "$ref": "Interval", - "description": "Optional. Time interval in which diagnosis should be carried out on the cluster." + "address": { + "type": "string" }, - "job": { - "deprecated": true, - "description": "Optional. DEPRECATED Specifies the job on which diagnosis is to be performed. Format: projects/{project}/regions/{region}/jobs/{job}", + "diskUsed": { + "format": "int64", "type": "string" }, - "jobs": { - "description": "Optional. Specifies a list of jobs on which diagnosis is to be performed. Format: projects/{project}/regions/{region}/jobs/{job}", - "items": { - "type": "string" - }, - "type": "array" + "memoryRemaining": { + "format": "int64", + "type": "string" }, - "tarballAccess": { - "description": "Optional. (Optional) The access type to the diagnostic tarball. If not specified, falls back to default access of the bucket", - "enum": [ - "TARBALL_ACCESS_UNSPECIFIED", - "GOOGLE_CLOUD_SUPPORT", - "GOOGLE_DATAPROC_DIAGNOSE" - ], - "enumDescriptions": [ - "Tarball Access unspecified. Falls back to default access of the bucket", - "Google Cloud Support group has read access to the diagnostic tarball", - "Google Cloud Dataproc Diagnose service account has read access to the diagnostic tarball" - ], + "memoryUsed": { + "format": "int64", "type": "string" }, - "tarballGcsDir": { - "description": "Optional. (Optional) The output Cloud Storage directory for the diagnostic tarball. If not specified, a task-specific directory in the cluster's staging bucket will be used.", + "offHeapMemoryRemaining": { + "format": "int64", "type": "string" }, - "yarnApplicationId": { - "deprecated": true, - "description": "Optional. DEPRECATED Specifies the yarn application on which diagnosis is to be performed.", + "offHeapMemoryUsed": { + "format": "int64", "type": "string" }, - "yarnApplicationIds": { - "description": "Optional. Specifies a list of yarn applications on which diagnosis is to be performed.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "DiagnoseClusterResults": { - "description": "The location of diagnostic output.", - "id": "DiagnoseClusterResults", - "properties": { - "outputUri": { - "description": "Output only. The Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics.", - "readOnly": true, + "onHeapMemoryRemaining": { + "format": "int64", + "type": "string" + }, + "onHeapMemoryUsed": { + "format": "int64", "type": "string" } }, "type": "object" }, - "DiskConfig": { - "description": "Specifies the config of disk options for a group of VM instances.", - "id": "DiskConfig", + "RddOperationCluster": { + "description": "A grouping of nodes representing higher level constructs (stage, job etc.).", + "id": "RddOperationCluster", "properties": { - "bootDiskSizeGb": { - "description": "Optional. Size in GB of the boot disk (default is 500GB).", - "format": "int32", - "type": "integer" + "childClusters": { + "items": { + "$ref": "RddOperationCluster" + }, + "type": "array" }, - "bootDiskType": { - "description": "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", - "type": "string" + "childNodes": { + "items": { + "$ref": "RddOperationNode" + }, + "type": "array" }, - "localSsdInterface": { - "description": "Optional. Interface type of local SSDs (default is \"scsi\"). Valid values: \"scsi\" (Small Computer System Interface), \"nvme\" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", + "name": { "type": "string" }, - "numLocalSsds": { - "description": "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", - "format": "int32", - "type": "integer" + "rddClusterId": { + "type": "string" } }, "type": "object" }, - "DriverSchedulingConfig": { - "description": "Driver scheduling configuration.", - "id": "DriverSchedulingConfig", + "RddOperationEdge": { + "description": "A directed edge representing dependency between two RDDs.", + "id": "RddOperationEdge", "properties": { - "memoryMb": { - "description": "Required. The amount of memory in MB the driver is requesting.", + "fromId": { "format": "int32", "type": "integer" }, - "vcores": { - "description": "Required. The number of vCPUs the driver is requesting.", + "toId": { "format": "int32", "type": "integer" } }, "type": "object" }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } ", - "id": "Empty", - "properties": {}, - "type": "object" - }, - "EncryptionConfig": { - "description": "Encryption settings for the cluster.", - "id": "EncryptionConfig", + "RddOperationGraph": { + "description": "Graph representing RDD dependencies. Consists of edges and a root cluster.", + "id": "RddOperationGraph", "properties": { - "gcePdKmsKeyName": { - "description": "Optional. The Cloud KMS key resource name to use for persistent disk encryption for all instances in the cluster. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.", - "type": "string" + "edges": { + "items": { + "$ref": "RddOperationEdge" + }, + "type": "array" }, - "kmsKey": { - "description": "Optional. The Cloud KMS key resource name to use for cluster persistent disk and job argument encryption. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.When this key resource name is provided, the following job arguments of the following job types submitted to the cluster are encrypted using CMEK: FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries", + "incomingEdges": { + "items": { + "$ref": "RddOperationEdge" + }, + "type": "array" + }, + "outgoingEdges": { + "items": { + "$ref": "RddOperationEdge" + }, + "type": "array" + }, + "rootCluster": { + "$ref": "RddOperationCluster" + }, + "stageId": { + "format": "int64", "type": "string" } }, "type": "object" }, - "EndpointConfig": { - "description": "Endpoint config for this cluster", - "id": "EndpointConfig", + "RddOperationNode": { + "description": "A node in the RDD operation graph. Corresponds to a single RDD.", + "id": "RddOperationNode", "properties": { - "enableHttpPortAccess": { - "description": "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", + "barrier": { "type": "boolean" }, - "httpPorts": { - "additionalProperties": { - "type": "string" - }, - "description": "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", - "readOnly": true, - "type": "object" - } - }, - "type": "object" - }, - "EnvironmentConfig": { - "description": "Environment configuration for a workload.", - "id": "EnvironmentConfig", - "properties": { - "executionConfig": { - "$ref": "ExecutionConfig", - "description": "Optional. Execution configuration for a workload." + "cached": { + "type": "boolean" }, - "peripheralsConfig": { - "$ref": "PeripheralsConfig", - "description": "Optional. Peripherals configuration that workload has access to." + "callsite": { + "type": "string" + }, + "name": { + "type": "string" + }, + "nodeId": { + "format": "int32", + "type": "integer" + }, + "outputDeterministicLevel": { + "enum": [ + "DETERMINISTIC_LEVEL_UNSPECIFIED", + "DETERMINISTIC_LEVEL_DETERMINATE", + "DETERMINISTIC_LEVEL_UNORDERED", + "DETERMINISTIC_LEVEL_INDETERMINATE" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" } }, "type": "object" }, - "ExecutionConfig": { - "description": "Execution configuration for a workload.", - "id": "ExecutionConfig", + "RddPartitionInfo": { + "description": "Information about RDD partitions.", + "id": "RddPartitionInfo", "properties": { - "idleTtl": { - "description": "Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.", - "format": "google-duration", + "blockName": { "type": "string" }, - "kmsKey": { - "description": "Optional. The Cloud KMS key to use for encryption.", + "diskUsed": { + "format": "int64", "type": "string" }, - "networkTags": { - "description": "Optional. Tags used for network traffic control.", + "executors": { "items": { "type": "string" }, "type": "array" }, - "networkUri": { - "description": "Optional. Network URI to connect workload to.", + "memoryUsed": { + "format": "int64", "type": "string" }, - "serviceAccount": { - "description": "Optional. Service account that used to execute workload.", + "storageLevel": { "type": "string" + } + }, + "type": "object" + }, + "RddStorageInfo": { + "description": "Overall data about RDD storage.", + "id": "RddStorageInfo", + "properties": { + "dataDistribution": { + "items": { + "$ref": "RddDataDistribution" + }, + "type": "array" }, - "stagingBucket": { - "description": "Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "diskUsed": { + "format": "int64", "type": "string" }, - "subnetworkUri": { - "description": "Optional. Subnetwork URI to connect workload to.", + "memoryUsed": { + "format": "int64", "type": "string" }, - "ttl": { - "description": "Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.", - "format": "google-duration", + "name": { + "type": "string" + }, + "numCachedPartitions": { + "format": "int32", + "type": "integer" + }, + "numPartitions": { + "format": "int32", + "type": "integer" + }, + "partitions": { + "items": { + "$ref": "RddPartitionInfo" + }, + "type": "array" + }, + "rddStorageId": { + "format": "int32", + "type": "integer" + }, + "storageLevel": { "type": "string" } }, "type": "object" }, - "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", - "id": "Expr", + "RegexValidation": { + "description": "Validation based on regular expressions.", + "id": "RegexValidation", "properties": { - "description": { - "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + "regexes": { + "description": "Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RepairClusterRequest": { + "description": "A request to repair a cluster.", + "id": "RepairClusterRequest", + "properties": { + "cluster": { + "$ref": "ClusterToRepair", + "description": "Optional. Cluster to be repaired" + }, + "clusterUuid": { + "description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", + "type": "string" + }, + "gracefulDecommissionTimeout": { + "description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning facilitates the removal of cluster nodes without interrupting jobs in progress. The timeout specifies the amount of time to wait for jobs finish before forcefully removing nodes. The default timeout is 0 for forceful decommissioning, and the maximum timeout period is 1 day. (see JSON Mapping—Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).graceful_decommission_timeout is supported in Dataproc image versions 1.2+.", + "format": "google-duration", "type": "string" }, - "expression": { - "description": "Textual representation of an expression in Common Expression Language syntax.", - "type": "string" + "nodePools": { + "description": "Optional. Node pools and corresponding repair action to be taken. All node pools should be unique in this request. i.e. Multiple entries for the same node pool id are not allowed.", + "items": { + "$ref": "NodePool" + }, + "type": "array" }, - "location": { - "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "parentOperationId": { + "description": "Optional. operation id of the parent operation sending the repair request", "type": "string" }, - "title": { - "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the server receives two RepairClusterRequests with the same ID, the second request is ignored, and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", "type": "string" } }, "type": "object" }, - "FlinkJob": { - "description": "A Dataproc job for running Apache Flink applications on YARN.", - "id": "FlinkJob", + "RepairNodeGroupRequest": { + "id": "RepairNodeGroupRequest", "properties": { - "args": { - "description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission.", - "items": { - "type": "string" - }, - "type": "array" - }, - "jarFileUris": { - "description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks.", + "instanceNames": { + "description": "Required. Name of instances to be repaired. These instances must belong to specified node pool.", "items": { "type": "string" }, "type": "array" }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "Optional. The runtime log config for job execution." - }, - "mainClass": { - "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris.", - "type": "string" - }, - "mainJarFileUri": { - "description": "The HCFS URI of the jar file that contains the main class.", + "repairAction": { + "description": "Required. Repair action to take on specified resources of the node pool.", + "enum": [ + "REPAIR_ACTION_UNSPECIFIED", + "REPLACE" + ], + "enumDescriptions": [ + "No action will be taken by default.", + "replace the specified list of nodes." + ], "type": "string" }, - "properties": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code.", - "type": "object" - }, - "savepointUri": { - "description": "Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job.", + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the server receives two RepairNodeGroupRequest with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", "type": "string" } }, "type": "object" }, - "GceClusterConfig": { - "description": "Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.", - "id": "GceClusterConfig", + "RepositoryConfig": { + "description": "Configuration for dependency repositories", + "id": "RepositoryConfig", "properties": { - "confidentialInstanceConfig": { - "$ref": "ConfidentialInstanceConfig", - "description": "Optional. Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)." - }, - "internalIpOnly": { - "description": "Optional. This setting applies to subnetwork-enabled networks. It is set to true by default in clusters created with image versions 2.2.x.When set to true: All cluster VMs have internal IP addresses. Google Private Access (https://cloud.google.com/vpc/docs/private-google-access) must be enabled to access Dataproc and other Google Cloud APIs. Off-cluster dependencies must be configured to be accessible without external IP addresses.When set to false: Cluster VMs are not restricted to internal IP addresses. Ephemeral external IP addresses are assigned to each cluster VM.", - "type": "boolean" - }, - "metadata": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", - "type": "object" - }, - "networkUri": { - "description": "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default", - "type": "string" - }, - "nodeGroupAffinity": { - "$ref": "NodeGroupAffinity", - "description": "Optional. Node Group Affinity for sole-tenant clusters." - }, - "privateIpv6GoogleAccess": { - "description": "Optional. The type of IPv6 access for a cluster.", + "pypiRepositoryConfig": { + "$ref": "PyPiRepositoryConfig", + "description": "Optional. Configuration for PyPi repository." + } + }, + "type": "object" + }, + "ReservationAffinity": { + "description": "Reservation Affinity for consuming Zonal reservation.", + "id": "ReservationAffinity", + "properties": { + "consumeReservationType": { + "description": "Optional. Type of reservation to consume", "enum": [ - "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", - "INHERIT_FROM_SUBNETWORK", - "OUTBOUND", - "BIDIRECTIONAL" + "TYPE_UNSPECIFIED", + "NO_RESERVATION", + "ANY_RESERVATION", + "SPECIFIC_RESERVATION" ], "enumDescriptions": [ - "If unspecified, Compute Engine default behavior will apply, which is the same as INHERIT_FROM_SUBNETWORK.", - "Private access to and from Google Services configuration inherited from the subnetwork configuration. This is the default Compute Engine behavior.", - "Enables outbound private IPv6 access to Google Services from the Dataproc cluster.", - "Enables bidirectional private IPv6 access between Google Services and the Dataproc cluster." + "", + "Do not consume from any allocated capacity.", + "Consume any reservation available.", + "Must consume from a specific reservation. Must specify key value fields for specifying the reservations." ], "type": "string" }, - "reservationAffinity": { - "$ref": "ReservationAffinity", - "description": "Optional. Reservation Affinity for consuming Zonal reservation." - }, - "serviceAccount": { - "description": "Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", + "key": { + "description": "Optional. Corresponds to the label key of reservation resource.", "type": "string" }, - "serviceAccountScopes": { - "description": "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control", + "values": { + "description": "Optional. Corresponds to the label values of reservation resource.", "items": { "type": "string" }, "type": "array" + } + }, + "type": "object" + }, + "ResizeNodeGroupRequest": { + "description": "A request to resize a node group.", + "id": "ResizeNodeGroupRequest", + "properties": { + "gracefulDecommissionTimeout": { + "description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) allows the removal of nodes from the Compute Engine node group without interrupting jobs in progress. This timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher.", + "format": "google-duration", + "type": "string" }, - "shieldedInstanceConfig": { - "$ref": "ShieldedInstanceConfig", - "description": "Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm)." + "parentOperationId": { + "description": "Optional. operation id of the parent operation sending the resize request", + "type": "string" }, - "subnetworkUri": { - "description": "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0", + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the server receives two ResizeNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests) with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", "type": "string" }, - "tags": { - "description": "The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).", + "size": { + "description": "Required. The number of running instances for the node group to maintain. The group adds or removes instances to maintain the number of instances specified by this parameter.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "ResourceInformation": { + "id": "ResourceInformation", + "properties": { + "addresses": { "items": { "type": "string" }, "type": "array" }, - "zoneUri": { - "description": "Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone]", + "name": { "type": "string" } }, "type": "object" }, - "GetIamPolicyRequest": { - "description": "Request message for GetIamPolicy method.", - "id": "GetIamPolicyRequest", - "properties": { - "options": { - "$ref": "GetPolicyOptions", - "description": "OPTIONAL: A GetPolicyOptions object for specifying options to GetIamPolicy." - } - }, - "type": "object" - }, - "GetPolicyOptions": { - "description": "Encapsulates settings provided to GetIamPolicy.", - "id": "GetPolicyOptions", + "ResourceProfileInfo": { + "description": "Resource profile that contains information about all the resources required by executors and tasks.", + "id": "ResourceProfileInfo", "properties": { - "requestedPolicyVersion": { - "description": "Optional. The maximum policy version that will be used to format the policy.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset.The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", + "executorResources": { + "additionalProperties": { + "$ref": "ExecutorResourceRequest" + }, + "type": "object" + }, + "resourceProfileId": { "format": "int32", "type": "integer" - } - }, - "type": "object" - }, - "GkeClusterConfig": { - "description": "The cluster's GKE config.", - "id": "GkeClusterConfig", - "properties": { - "gkeClusterTarget": { - "description": "Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", - "type": "string" - }, - "namespacedGkeDeploymentTarget": { - "$ref": "NamespacedGkeDeploymentTarget", - "deprecated": true, - "description": "Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment." }, - "nodePoolTarget": { - "description": "Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.", - "items": { - "$ref": "GkeNodePoolTarget" + "taskResources": { + "additionalProperties": { + "$ref": "TaskResourceRequest" }, - "type": "array" + "type": "object" } }, "type": "object" }, - "GkeNodeConfig": { - "description": "Parameters that describe cluster nodes.", - "id": "GkeNodeConfig", + "RuntimeConfig": { + "description": "Runtime configuration for a workload.", + "id": "RuntimeConfig", "properties": { - "accelerators": { - "description": "Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node.", - "items": { - "$ref": "GkeNodePoolAcceleratorConfig" - }, - "type": "array" + "autotuningConfig": { + "$ref": "AutotuningConfig", + "description": "Optional. Autotuning configuration of the workload." }, - "bootDiskKmsKey": { - "description": "Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}", + "cohort": { + "description": "Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.", "type": "string" }, - "localSsdCount": { - "description": "Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)).", - "format": "int32", - "type": "integer" - }, - "machineType": { - "description": "Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types).", + "containerImage": { + "description": "Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used.", "type": "string" }, - "minCpuPlatform": { - "description": "Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as \"Intel Haswell\"` or Intel Sandy Bridge\".", - "type": "string" + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. A mapping of property names to values, which are used to configure workload execution.", + "type": "object" }, - "preemptible": { - "description": "Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", - "type": "boolean" + "repositoryConfig": { + "$ref": "RepositoryConfig", + "description": "Optional. Dependency repository configuration." }, - "spot": { - "description": "Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", - "type": "boolean" + "version": { + "description": "Optional. Version of the batch runtime.", + "type": "string" } }, "type": "object" }, - "GkeNodePoolAcceleratorConfig": { - "description": "A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request for a node pool.", - "id": "GkeNodePoolAcceleratorConfig", + "RuntimeInfo": { + "description": "Runtime information about workload execution.", + "id": "RuntimeInfo", "properties": { - "acceleratorCount": { - "description": "The number of accelerator cards exposed to an instance.", - "format": "int64", - "type": "string" + "approximateUsage": { + "$ref": "UsageMetrics", + "description": "Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments).", + "readOnly": true }, - "acceleratorType": { - "description": "The accelerator type resource namename (see GPUs on Compute Engine).", + "currentUsage": { + "$ref": "UsageSnapshot", + "description": "Output only. Snapshot of current workload resource usage.", + "readOnly": true + }, + "diagnosticOutputUri": { + "description": "Output only. A URI pointing to the location of the diagnostics tarball.", + "readOnly": true, "type": "string" }, - "gpuPartitionSize": { - "description": "Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).", + "endpoints": { + "additionalProperties": { + "type": "string" + }, + "description": "Output only. Map of remote access endpoints (such as web interfaces and APIs) to their URIs.", + "readOnly": true, + "type": "object" + }, + "outputUri": { + "description": "Output only. A URI pointing to the location of the stdout and stderr of the workload.", + "readOnly": true, "type": "string" } }, "type": "object" }, - "GkeNodePoolAutoscalingConfig": { - "description": "GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage.", - "id": "GkeNodePoolAutoscalingConfig", + "SearchSessionSparkApplicationExecutorStageSummaryResponse": { + "description": "List of Executors associated with a Spark Application Stage.", + "id": "SearchSessionSparkApplicationExecutorStageSummaryResponse", "properties": { - "maxNodeCount": { - "description": "The maximum number of nodes in the node pool. Must be \u003e= min_node_count, and must be \u003e 0. Note: Quota must be sufficient to scale up the cluster.", - "format": "int32", - "type": "integer" + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationExecutorStageSummaryRequest.", + "type": "string" }, - "minNodeCount": { - "description": "The minimum number of nodes in the node pool. Must be \u003e= 0 and \u003c= max_node_count.", - "format": "int32", - "type": "integer" + "sparkApplicationStageExecutors": { + "description": "Details about executors used by the application stage.", + "items": { + "$ref": "ExecutorStageSummary" + }, + "type": "array" } }, "type": "object" }, - "GkeNodePoolConfig": { - "description": "The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).", - "id": "GkeNodePoolConfig", + "SearchSessionSparkApplicationExecutorsResponse": { + "description": "List of Executors associated with a Spark Application.", + "id": "SearchSessionSparkApplicationExecutorsResponse", "properties": { - "autoscaling": { - "$ref": "GkeNodePoolAutoscalingConfig", - "description": "Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present." - }, - "config": { - "$ref": "GkeNodeConfig", - "description": "Optional. The node pool configuration." + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationExecutorsRequest.", + "type": "string" }, - "locations": { - "description": "Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.", + "sparkApplicationExecutors": { + "description": "Details about executors used by the application.", "items": { - "type": "string" + "$ref": "ExecutorSummary" }, "type": "array" } }, "type": "object" }, - "GkeNodePoolTarget": { - "description": "GKE node pools that Dataproc workloads run on.", - "id": "GkeNodePoolTarget", + "SearchSessionSparkApplicationJobsResponse": { + "description": "A list of Jobs associated with a Spark Application.", + "id": "SearchSessionSparkApplicationJobsResponse", "properties": { - "nodePool": { - "description": "Required. The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'", + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationJobsRequest.", "type": "string" }, - "nodePoolConfig": { - "$ref": "GkeNodePoolConfig", - "description": "Input only. The configuration for the GKE node pool.If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.If omitted, any node pool with the specified name is used. If a node pool with the specified name does not exist, Dataproc create a node pool with default values.This is an input only field. It will not be returned by the API." - }, - "roles": { - "description": "Required. The roles associated with the GKE node pool.", + "sparkApplicationJobs": { + "description": "Output only. Data corresponding to a spark job.", "items": { - "enum": [ - "ROLE_UNSPECIFIED", - "DEFAULT", - "CONTROLLER", - "SPARK_DRIVER", - "SPARK_EXECUTOR" - ], - "enumDescriptions": [ - "Role is unspecified.", - "At least one node pool must have the DEFAULT role. Work assigned to a role that is not associated with a node pool is assigned to the node pool with the DEFAULT role. For example, work assigned to the CONTROLLER role will be assigned to the node pool with the DEFAULT role if no node pool has the CONTROLLER role.", - "Run work associated with the Dataproc control plane (for example, controllers and webhooks). Very low resource requirements.", - "Run work associated with a Spark driver of a job.", - "Run work associated with a Spark executor of a job." - ], - "type": "string" + "$ref": "JobData" }, + "readOnly": true, "type": "array" } }, "type": "object" }, - "GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig": { - "description": "Encryption settings for encrypting workflow template job arguments.", - "id": "GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig", + "SearchSessionSparkApplicationSqlQueriesResponse": { + "description": "List of all queries for a Spark Application.", + "id": "SearchSessionSparkApplicationSqlQueriesResponse", "properties": { - "kmsKey": { - "description": "Optional. The Cloud KMS key name to use for encrypting workflow template job arguments.When this this key is provided, the following workflow template job arguments (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template), if present, are CMEK encrypted (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data): FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries", + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationSqlQueriesRequest.", "type": "string" + }, + "sparkApplicationSqlQueries": { + "description": "Output only. SQL Execution Data", + "items": { + "$ref": "SqlExecutionUiData" + }, + "readOnly": true, + "type": "array" } }, "type": "object" }, - "HadoopJob": { - "description": "A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).", - "id": "HadoopJob", + "SearchSessionSparkApplicationStageAttemptTasksResponse": { + "description": "List of tasks for a stage of a Spark Application", + "id": "SearchSessionSparkApplicationStageAttemptTasksResponse", "properties": { - "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", - "items": { - "type": "string" - }, - "type": "array" - }, - "args": { - "description": "Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission.", - "items": { - "type": "string" - }, - "type": "array" - }, - "fileUris": { - "description": "Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", - "items": { - "type": "string" - }, - "type": "array" + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStageAttemptTasksRequest.", + "type": "string" }, - "jarFileUris": { - "description": "Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", + "sparkApplicationStageAttemptTasks": { + "description": "Output only. Data corresponding to tasks created by spark.", "items": { - "type": "string" + "$ref": "TaskData" }, + "readOnly": true, "type": "array" - }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "Optional. The runtime log config for job execution." - }, - "mainClass": { - "description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.", - "type": "string" - }, - "mainJarFileUri": { - "description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", - "type": "string" - }, - "properties": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", - "type": "object" } }, "type": "object" }, - "HiveJob": { - "description": "A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.", - "id": "HiveJob", + "SearchSessionSparkApplicationStageAttemptsResponse": { + "description": "A list of Stage Attempts for a Stage of a Spark Application.", + "id": "SearchSessionSparkApplicationStageAttemptsResponse", "properties": { - "continueOnFailure": { - "description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", - "type": "boolean" + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStageAttemptsRequest.", + "type": "string" }, - "jarFileUris": { - "description": "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", + "sparkApplicationStageAttempts": { + "description": "Output only. Data corresponding to a stage attempts", "items": { - "type": "string" + "$ref": "StageData" }, + "readOnly": true, "type": "array" - }, - "properties": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", - "type": "object" - }, - "queryFileUri": { - "description": "The HCFS URI of the script that contains Hive queries.", - "type": "string" - }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." - }, - "scriptVariables": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name=\"value\";).", - "type": "object" } }, "type": "object" }, - "IdentityConfig": { - "description": "Identity related configuration, including service account based secure multi-tenancy user mappings.", - "id": "IdentityConfig", + "SearchSessionSparkApplicationStagesResponse": { + "description": "A list of stages associated with a Spark Application.", + "id": "SearchSessionSparkApplicationStagesResponse", "properties": { - "userServiceAccountMapping": { - "additionalProperties": { - "type": "string" + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStages.", + "type": "string" + }, + "sparkApplicationStages": { + "description": "Output only. Data corresponding to a stage.", + "items": { + "$ref": "StageData" }, - "description": "Required. Map of user to service account.", - "type": "object" + "readOnly": true, + "type": "array" } }, "type": "object" }, - "InjectCredentialsRequest": { - "description": "A request to inject credentials into a cluster.", - "id": "InjectCredentialsRequest", + "SearchSessionSparkApplicationsResponse": { + "description": "A list of summary of Spark Applications", + "id": "SearchSessionSparkApplicationsResponse", "properties": { - "clusterUuid": { - "description": "Required. The cluster UUID.", + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationsRequest.", "type": "string" }, - "credentialsCiphertext": { - "description": "Required. The encrypted credentials being injected in to the cluster.The client is responsible for encrypting the credentials in a way that is supported by the cluster.A wrapped value is used here so that the actual contents of the encrypted credentials are not written to audit logs.", - "type": "string" + "sparkApplications": { + "description": "Output only. High level information corresponding to an application.", + "items": { + "$ref": "SparkApplication" + }, + "readOnly": true, + "type": "array" } }, "type": "object" }, - "InstanceFlexibilityPolicy": { - "description": "Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.", - "id": "InstanceFlexibilityPolicy", + "SearchSparkApplicationExecutorStageSummaryResponse": { + "description": "List of Executors associated with a Spark Application Stage.", + "id": "SearchSparkApplicationExecutorStageSummaryResponse", "properties": { - "instanceSelectionList": { - "description": "Optional. List of instance selection options that the group will use when creating new VMs.", - "items": { - "$ref": "InstanceSelection" - }, - "type": "array" + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationExecutorsListRequest.", + "type": "string" }, - "instanceSelectionResults": { - "description": "Output only. A list of instance selection results in the group.", + "sparkApplicationStageExecutors": { + "description": "Details about executors used by the application stage.", "items": { - "$ref": "InstanceSelectionResult" + "$ref": "ExecutorStageSummary" }, - "readOnly": true, "type": "array" } }, "type": "object" }, - "InstanceGroupAutoscalingPolicyConfig": { - "description": "Configuration for the size bounds of an instance group, including its proportional size to other groups.", - "id": "InstanceGroupAutoscalingPolicyConfig", + "SearchSparkApplicationExecutorsResponse": { + "description": "List of Executors associated with a Spark Application.", + "id": "SearchSparkApplicationExecutorsResponse", "properties": { - "maxInstances": { - "description": "Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.", - "format": "int32", - "type": "integer" - }, - "minInstances": { - "description": "Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0.", - "format": "int32", - "type": "integer" - }, - "weight": { - "description": "Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.", - "format": "int32", - "type": "integer" + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationExecutorsListRequest.", + "type": "string" + }, + "sparkApplicationExecutors": { + "description": "Details about executors used by the application.", + "items": { + "$ref": "ExecutorSummary" + }, + "type": "array" } }, "type": "object" }, - "InstanceGroupConfig": { - "description": "The config settings for Compute Engine resources in an instance group, such as a master or worker group.", - "id": "InstanceGroupConfig", + "SearchSparkApplicationJobsResponse": { + "description": "A list of Jobs associated with a Spark Application.", + "id": "SearchSparkApplicationJobsResponse", "properties": { - "accelerators": { - "description": "Optional. The Compute Engine accelerator configuration for these instances.", - "items": { - "$ref": "AcceleratorConfig" - }, - "type": "array" - }, - "diskConfig": { - "$ref": "DiskConfig", - "description": "Optional. Disk option config settings." - }, - "imageUri": { - "description": "Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.", + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationJobsRequest.", "type": "string" }, - "instanceFlexibilityPolicy": { - "$ref": "InstanceFlexibilityPolicy", - "description": "Optional. Instance flexibility Policy allowing a mixture of VM shapes and provisioning models." - }, - "instanceNames": { - "description": "Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.", + "sparkApplicationJobs": { + "description": "Output only. Data corresponding to a spark job.", "items": { - "type": "string" + "$ref": "JobData" }, "readOnly": true, "type": "array" + } + }, + "type": "object" + }, + "SearchSparkApplicationSqlQueriesResponse": { + "description": "List of all queries for a Spark Application.", + "id": "SearchSparkApplicationSqlQueriesResponse", + "properties": { + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationSqlQueriesRequest.", + "type": "string" }, - "instanceReferences": { - "description": "Output only. List of references to Compute Engine instances.", + "sparkApplicationSqlQueries": { + "description": "Output only. SQL Execution Data", "items": { - "$ref": "InstanceReference" + "$ref": "SqlExecutionUiData" }, "readOnly": true, "type": "array" - }, - "isPreemptible": { - "description": "Output only. Specifies that this instance group contains preemptible instances.", - "readOnly": true, - "type": "boolean" - }, - "machineTypeUri": { - "description": "Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.", - "type": "string" - }, - "managedGroupConfig": { - "$ref": "ManagedGroupConfig", - "description": "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", - "readOnly": true - }, - "minCpuPlatform": { - "description": "Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -\u003e Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", - "type": "string" - }, - "minNumInstances": { - "description": "Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.", - "format": "int32", - "type": "integer" - }, - "numInstances": { - "description": "Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.", - "format": "int32", - "type": "integer" - }, - "preemptibility": { - "description": "Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.", - "enum": [ - "PREEMPTIBILITY_UNSPECIFIED", - "NON_PREEMPTIBLE", - "PREEMPTIBLE", - "SPOT" - ], - "enumDescriptions": [ - "Preemptibility is unspecified, the system will choose the appropriate setting for each instance group.", - "Instances are non-preemptible.This option is allowed for all instance groups and is the only valid value for Master and Worker instance groups.", - "Instances are preemptible (https://cloud.google.com/compute/docs/instances/preemptible).This option is allowed only for secondary worker (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) groups.", - "Instances are Spot VMs (https://cloud.google.com/compute/docs/instances/spot).This option is allowed only for secondary worker (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) groups. Spot VMs are the latest version of preemptible VMs (https://cloud.google.com/compute/docs/instances/preemptible), and provide additional features." - ], - "type": "string" - }, - "startupConfig": { - "$ref": "StartupConfig", - "description": "Optional. Configuration to handle the startup of instances during cluster create and update process." } }, "type": "object" }, - "InstanceReference": { - "description": "A reference to a Compute Engine instance.", - "id": "InstanceReference", + "SearchSparkApplicationStageAttemptTasksResponse": { + "description": "List of tasks for a stage of a Spark Application", + "id": "SearchSparkApplicationStageAttemptTasksResponse", "properties": { - "instanceId": { - "description": "The unique identifier of the Compute Engine instance.", - "type": "string" - }, - "instanceName": { - "description": "The user-friendly name of the Compute Engine instance.", - "type": "string" - }, - "publicEciesKey": { - "description": "The public ECIES key used for sharing data with this instance.", + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListSparkApplicationStageAttemptTasksRequest.", "type": "string" }, - "publicKey": { - "description": "The public RSA key used for sharing data with this instance.", - "type": "string" + "sparkApplicationStageAttemptTasks": { + "description": "Output only. Data corresponding to tasks created by spark.", + "items": { + "$ref": "TaskData" + }, + "readOnly": true, + "type": "array" } }, "type": "object" }, - "InstanceSelection": { - "description": "Defines machines types and a rank to which the machines types belong.", - "id": "InstanceSelection", + "SearchSparkApplicationStageAttemptsResponse": { + "description": "A list of Stage Attempts for a Stage of a Spark Application.", + "id": "SearchSparkApplicationStageAttemptsResponse", "properties": { - "machineTypes": { - "description": "Optional. Full machine-type names, e.g. \"n1-standard-16\".", + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListSparkApplicationStageAttemptsRequest.", + "type": "string" + }, + "sparkApplicationStageAttempts": { + "description": "Output only. Data corresponding to a stage attempts", "items": { - "type": "string" + "$ref": "StageData" }, + "readOnly": true, "type": "array" - }, - "rank": { - "description": "Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.", - "format": "int32", - "type": "integer" } }, "type": "object" }, - "InstanceSelectionResult": { - "description": "Defines a mapping from machine types to the number of VMs that are created with each machine type.", - "id": "InstanceSelectionResult", + "SearchSparkApplicationStagesResponse": { + "description": "A list of stages associated with a Spark Application.", + "id": "SearchSparkApplicationStagesResponse", "properties": { - "machineType": { - "description": "Output only. Full machine-type names, e.g. \"n1-standard-16\".", - "readOnly": true, + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationStages.", "type": "string" }, - "vmCount": { - "description": "Output only. Number of VM provisioned with the machine_type.", - "format": "int32", + "sparkApplicationStages": { + "description": "Output only. Data corresponding to a stage.", + "items": { + "$ref": "StageData" + }, "readOnly": true, - "type": "integer" + "type": "array" } }, "type": "object" }, - "InstantiateWorkflowTemplateRequest": { - "description": "A request to instantiate a workflow template.", - "id": "InstantiateWorkflowTemplateRequest", + "SearchSparkApplicationsResponse": { + "description": "A list of summary of Spark Applications", + "id": "SearchSparkApplicationsResponse", "properties": { - "parameters": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 1000 characters.", - "type": "object" - }, - "requestId": { - "description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "nextPageToken": { + "description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationsRequest.", "type": "string" }, - "version": { - "description": "Optional. The version of workflow template to instantiate. If specified, the workflow will be instantiated only if the current version of the workflow template has the supplied version.This option cannot be used to instantiate a previous version of workflow template.", - "format": "int32", - "type": "integer" + "sparkApplications": { + "description": "Output only. High level information corresponding to an application.", + "items": { + "$ref": "SparkApplication" + }, + "readOnly": true, + "type": "array" } }, "type": "object" }, - "Interval": { - "description": "Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive).The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time.", - "id": "Interval", + "SecurityConfig": { + "description": "Security related configuration, including encryption, Kerberos, etc.", + "id": "SecurityConfig", "properties": { - "endTime": { - "description": "Optional. Exclusive end of the interval.If specified, a Timestamp matching this interval will have to be before the end.", - "format": "google-datetime", - "type": "string" + "identityConfig": { + "$ref": "IdentityConfig", + "description": "Optional. Identity related configuration, including service account based secure multi-tenancy user mappings." }, - "startTime": { - "description": "Optional. Inclusive start of the interval.If specified, a Timestamp matching this interval will have to be the same or after the start.", - "format": "google-datetime", - "type": "string" + "kerberosConfig": { + "$ref": "KerberosConfig", + "description": "Optional. Kerberos related configuration." } }, "type": "object" }, - "Job": { - "description": "A Dataproc job resource.", - "id": "Job", + "Session": { + "description": "A representation of a session.", + "id": "Session", "properties": { - "done": { - "description": "Output only. Indicates whether the job is completed. If the value is false, the job is still in progress. If true, the job is completed, and status.state field will indicate if it was successful, failed, or cancelled.", - "readOnly": true, - "type": "boolean" - }, - "driverControlFilesUri": { - "description": "Output only. If present, the location of miscellaneous control files which can be used as part of job setup and handling. If not present, control files might be placed in the same location as driver_output_uri.", + "createTime": { + "description": "Output only. The time when the session was created.", + "format": "google-datetime", "readOnly": true, "type": "string" }, - "driverOutputResourceUri": { - "description": "Output only. A URI pointing to the location of the stdout of the job's driver program.", + "creator": { + "description": "Output only. The email address of the user who created the session.", "readOnly": true, "type": "string" }, - "driverSchedulingConfig": { - "$ref": "DriverSchedulingConfig", - "description": "Optional. Driver scheduling configuration." - }, - "flinkJob": { - "$ref": "FlinkJob", - "description": "Optional. Job is a Flink job." - }, - "hadoopJob": { - "$ref": "HadoopJob", - "description": "Optional. Job is a Hadoop job." - }, - "hiveJob": { - "$ref": "HiveJob", - "description": "Optional. Job is a Hive job." + "environmentConfig": { + "$ref": "EnvironmentConfig", + "description": "Optional. Environment configuration for the session execution." }, - "jobUuid": { - "description": "Output only. A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that might be reused over time.", - "readOnly": true, - "type": "string" + "jupyterSession": { + "$ref": "JupyterConfig", + "description": "Optional. Jupyter session config." }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.", + "description": "Optional. The labels to associate with the session. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.", "type": "object" }, - "pigJob": { - "$ref": "PigJob", - "description": "Optional. Job is a Pig job." - }, - "placement": { - "$ref": "JobPlacement", - "description": "Required. Job information, including how, when, and where to run the job." - }, - "prestoJob": { - "$ref": "PrestoJob", - "description": "Optional. Job is a Presto job." - }, - "pysparkJob": { - "$ref": "PySparkJob", - "description": "Optional. Job is a PySpark job." - }, - "reference": { - "$ref": "JobReference", - "description": "Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id." + "name": { + "description": "Required. The resource name of the session.", + "type": "string" }, - "scheduling": { - "$ref": "JobScheduling", - "description": "Optional. Job scheduling configuration." + "runtimeConfig": { + "$ref": "RuntimeConfig", + "description": "Optional. Runtime configuration for the session execution." }, - "sparkJob": { - "$ref": "SparkJob", - "description": "Optional. Job is a Spark job." + "runtimeInfo": { + "$ref": "RuntimeInfo", + "description": "Output only. Runtime information about session execution.", + "readOnly": true }, - "sparkRJob": { - "$ref": "SparkRJob", - "description": "Optional. Job is a SparkR job." + "sessionTemplate": { + "description": "Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session.", + "type": "string" }, - "sparkSqlJob": { - "$ref": "SparkSqlJob", - "description": "Optional. Job is a SparkSql job." + "sparkConnectSession": { + "$ref": "SparkConnectConfig", + "description": "Optional. Spark connect session config." }, - "status": { - "$ref": "JobStatus", - "description": "Output only. The job status. Additional application-specific status information might be contained in the type_job and yarn_applications fields.", - "readOnly": true + "state": { + "description": "Output only. A state of the session.", + "enum": [ + "STATE_UNSPECIFIED", + "CREATING", + "ACTIVE", + "TERMINATING", + "TERMINATED", + "FAILED" + ], + "enumDescriptions": [ + "The session state is unknown.", + "The session is created prior to running.", + "The session is running.", + "The session is terminating.", + "The session is terminated successfully.", + "The session is no longer running due to an error." + ], + "readOnly": true, + "type": "string" }, - "statusHistory": { - "description": "Output only. The previous job status.", + "stateHistory": { + "description": "Output only. Historical state information for the session.", "items": { - "$ref": "JobStatus" + "$ref": "SessionStateHistory" }, "readOnly": true, "type": "array" }, - "trinoJob": { - "$ref": "TrinoJob", - "description": "Optional. Job is a Trino job." + "stateMessage": { + "description": "Output only. Session state details, such as the failure description if the state is FAILED.", + "readOnly": true, + "type": "string" }, - "yarnApplications": { - "description": "Output only. The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It might be changed before final release.", - "items": { - "$ref": "YarnApplication" - }, + "stateTime": { + "description": "Output only. The time when the session entered the current state.", + "format": "google-datetime", "readOnly": true, - "type": "array" + "type": "string" + }, + "user": { + "description": "Optional. The email address of the user who owns the session.", + "type": "string" + }, + "uuid": { + "description": "Output only. A session UUID (Unique Universal Identifier). The service generates this value when it creates the session.", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "JobMetadata": { - "description": "Job Operation metadata.", - "id": "JobMetadata", + "SessionOperationMetadata": { + "description": "Metadata describing the Session operation.", + "id": "SessionOperationMetadata", "properties": { - "jobId": { - "description": "Output only. The job id.", - "readOnly": true, + "createTime": { + "description": "The time when the operation was created.", + "format": "google-datetime", "type": "string" }, - "operationType": { - "description": "Output only. Operation type.", - "readOnly": true, + "description": { + "description": "Short description of the operation.", "type": "string" }, - "startTime": { - "description": "Output only. Job submission time.", + "doneTime": { + "description": "The time when the operation was finished.", "format": "google-datetime", - "readOnly": true, "type": "string" }, - "status": { - "$ref": "JobStatus", - "description": "Output only. Most recent job status.", - "readOnly": true - } - }, - "type": "object" - }, - "JobPlacement": { - "description": "Dataproc job config.", - "id": "JobPlacement", - "properties": { - "clusterLabels": { + "labels": { "additionalProperties": { "type": "string" }, - "description": "Optional. Cluster labels to identify a cluster where the job will be submitted.", + "description": "Labels associated with the operation.", "type": "object" }, - "clusterName": { - "description": "Required. The name of the cluster where the job will be submitted.", + "operationType": { + "description": "The operation type.", + "enum": [ + "SESSION_OPERATION_TYPE_UNSPECIFIED", + "CREATE", + "TERMINATE", + "DELETE" + ], + "enumDescriptions": [ + "Session operation type is unknown.", + "Create Session operation type.", + "Terminate Session operation type.", + "Delete Session operation type." + ], "type": "string" }, - "clusterUuid": { - "description": "Output only. A cluster UUID generated by the Dataproc service when the job is submitted.", - "readOnly": true, + "session": { + "description": "Name of the session for the operation.", + "type": "string" + }, + "sessionUuid": { + "description": "Session UUID for the operation.", "type": "string" + }, + "warnings": { + "description": "Warnings encountered during operation execution.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "JobReference": { - "description": "Encapsulates the full scoping used to reference a job.", - "id": "JobReference", + "SessionStateHistory": { + "description": "Historical state information.", + "id": "SessionStateHistory", "properties": { - "jobId": { - "description": "Optional. The job ID, which must be unique within the project.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.If not specified by the caller, the job ID will be provided by the server.", + "state": { + "description": "Output only. The state of the session at this point in the session history.", + "enum": [ + "STATE_UNSPECIFIED", + "CREATING", + "ACTIVE", + "TERMINATING", + "TERMINATED", + "FAILED" + ], + "enumDescriptions": [ + "The session state is unknown.", + "The session is created prior to running.", + "The session is running.", + "The session is terminating.", + "The session is terminated successfully.", + "The session is no longer running due to an error." + ], + "readOnly": true, "type": "string" }, - "projectId": { - "description": "Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.", + "stateMessage": { + "description": "Output only. Details about the state at this point in the session history.", + "readOnly": true, "type": "string" - } - }, - "type": "object" - }, - "JobScheduling": { - "description": "Job scheduling options.", - "id": "JobScheduling", - "properties": { - "maxFailuresPerHour": { - "description": "Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).", - "format": "int32", - "type": "integer" }, - "maxFailuresTotal": { - "description": "Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).", - "format": "int32", - "type": "integer" + "stateStartTime": { + "description": "Output only. The time when the session entered the historical state.", + "format": "google-datetime", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "JobStatus": { - "description": "Dataproc job status.", - "id": "JobStatus", + "SessionTemplate": { + "description": "A representation of a session template.", + "id": "SessionTemplate", "properties": { - "details": { - "description": "Optional. Output only. Job state details, such as an error description if the state is ERROR.", + "createTime": { + "description": "Output only. The time when the template was created.", + "format": "google-datetime", "readOnly": true, "type": "string" }, - "state": { - "description": "Output only. A state message specifying the overall job state.", - "enum": [ - "STATE_UNSPECIFIED", - "PENDING", - "SETUP_DONE", - "RUNNING", - "CANCEL_PENDING", - "CANCEL_STARTED", - "CANCELLED", - "DONE", - "ERROR", - "ATTEMPT_FAILURE" - ], - "enumDescriptions": [ - "The job state is unknown.", - "The job is pending; it has been submitted, but is not yet running.", - "Job has been received by the service and completed initial setup; it will soon be submitted to the cluster.", - "The job is running on the cluster.", - "A CancelJob request has been received, but is pending.", - "Transient in-flight resources have been canceled, and the request to cancel the running job has been issued to the cluster.", - "The job cancellation was successful.", - "The job has completed successfully.", - "The job has completed, but encountered an error.", - "Job attempt has failed. The detail field contains failure details for this attempt.Applies to restartable jobs only." - ], + "creator": { + "description": "Output only. The email address of the user who created the template.", "readOnly": true, "type": "string" }, - "stateStartTime": { - "description": "Output only. The time when this state was entered.", + "description": { + "description": "Optional. Brief description of the template.", + "type": "string" + }, + "environmentConfig": { + "$ref": "EnvironmentConfig", + "description": "Optional. Environment configuration for session execution." + }, + "jupyterSession": { + "$ref": "JupyterConfig", + "description": "Optional. Jupyter session config." + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Labels to associate with sessions created using this template. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.", + "type": "object" + }, + "name": { + "description": "Required. The resource name of the session template.", + "type": "string" + }, + "runtimeConfig": { + "$ref": "RuntimeConfig", + "description": "Optional. Runtime configuration for session execution." + }, + "sparkConnectSession": { + "$ref": "SparkConnectConfig", + "description": "Optional. Spark connect session config." + }, + "updateTime": { + "description": "Output only. The time the template was last updated.", "format": "google-datetime", "readOnly": true, "type": "string" }, - "substate": { - "description": "Output only. Additional state information, which includes status reported by the agent.", - "enum": [ - "UNSPECIFIED", - "SUBMITTED", - "QUEUED", - "STALE_STATUS" - ], - "enumDescriptions": [ - "The job substate is unknown.", - "The Job is submitted to the agent.Applies to RUNNING state.", - "The Job has been received and is awaiting execution (it might be waiting for a condition to be met). See the \"details\" field for the reason for the delay.Applies to RUNNING state.", - "The agent-reported status is out of date, which can be caused by a loss of communication between the agent and Dataproc. If the agent does not send a timely update, the job will fail.Applies to RUNNING state." - ], + "uuid": { + "description": "Output only. A session template UUID (Unique Universal Identifier). The service generates this value when it creates the session template.", "readOnly": true, "type": "string" } }, "type": "object" }, - "JupyterConfig": { - "description": "Jupyter configuration for an interactive session.", - "id": "JupyterConfig", + "SetIamPolicyRequest": { + "description": "Request message for SetIamPolicy method.", + "id": "SetIamPolicyRequest", "properties": { - "displayName": { - "description": "Optional. Display name, shown in the Jupyter kernelspec card.", - "type": "string" - }, - "kernel": { - "description": "Optional. Kernel", - "enum": [ - "KERNEL_UNSPECIFIED", - "PYTHON", - "SCALA" - ], - "enumDescriptions": [ - "The kernel is unknown.", - "Python kernel.", - "Scala kernel." - ], - "type": "string" + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the resource. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them." } }, "type": "object" }, - "KerberosConfig": { - "description": "Specifies Kerberos related configuration.", - "id": "KerberosConfig", + "ShieldedInstanceConfig": { + "description": "Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).", + "id": "ShieldedInstanceConfig", "properties": { - "crossRealmTrustAdminServer": { - "description": "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", - "type": "string" - }, - "crossRealmTrustKdc": { - "description": "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", - "type": "string" - }, - "crossRealmTrustRealm": { - "description": "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", - "type": "string" - }, - "crossRealmTrustSharedPasswordUri": { - "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", - "type": "string" + "enableIntegrityMonitoring": { + "description": "Optional. Defines whether instances have integrity monitoring enabled.", + "type": "boolean" }, - "enableKerberos": { - "description": "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", + "enableSecureBoot": { + "description": "Optional. Defines whether instances have Secure Boot enabled.", "type": "boolean" }, - "kdcDbKeyUri": { - "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", + "enableVtpm": { + "description": "Optional. Defines whether instances have the vTPM enabled.", + "type": "boolean" + } + }, + "type": "object" + }, + "ShufflePushReadMetrics": { + "id": "ShufflePushReadMetrics", + "properties": { + "corruptMergedBlockChunks": { + "format": "int64", "type": "string" }, - "keyPasswordUri": { - "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", + "localMergedBlocksFetched": { + "format": "int64", "type": "string" }, - "keystorePasswordUri": { - "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", + "localMergedBytesRead": { + "format": "int64", "type": "string" }, - "keystoreUri": { - "description": "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + "localMergedChunksFetched": { + "format": "int64", "type": "string" }, - "kmsKeyUri": { - "description": "Optional. The URI of the KMS key used to encrypt sensitive files.", + "mergedFetchFallbackCount": { + "format": "int64", "type": "string" }, - "realm": { - "description": "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", + "remoteMergedBlocksFetched": { + "format": "int64", "type": "string" }, - "rootPrincipalPasswordUri": { - "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", + "remoteMergedBytesRead": { + "format": "int64", "type": "string" }, - "tgtLifetimeHours": { - "description": "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", - "format": "int32", - "type": "integer" - }, - "truststorePasswordUri": { - "description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", + "remoteMergedChunksFetched": { + "format": "int64", "type": "string" }, - "truststoreUri": { - "description": "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + "remoteMergedReqsDuration": { + "format": "int64", "type": "string" } }, "type": "object" }, - "KubernetesClusterConfig": { - "description": "The configuration for running the Dataproc cluster on Kubernetes.", - "id": "KubernetesClusterConfig", + "ShufflePushReadQuantileMetrics": { + "id": "ShufflePushReadQuantileMetrics", "properties": { - "gkeClusterConfig": { - "$ref": "GkeClusterConfig", - "description": "Required. The configuration for running the Dataproc cluster on GKE." + "corruptMergedBlockChunks": { + "$ref": "Quantiles" }, - "kubernetesNamespace": { - "description": "Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.", - "type": "string" + "localMergedBlocksFetched": { + "$ref": "Quantiles" }, - "kubernetesSoftwareConfig": { - "$ref": "KubernetesSoftwareConfig", - "description": "Optional. The software configuration for this Dataproc cluster running on Kubernetes." - } - }, - "type": "object" - }, - "KubernetesSoftwareConfig": { - "description": "The software configuration for this Dataproc cluster running on Kubernetes.", - "id": "KubernetesSoftwareConfig", - "properties": { - "componentVersion": { - "additionalProperties": { - "type": "string" - }, - "description": "The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.", - "type": "object" + "localMergedBytesRead": { + "$ref": "Quantiles" }, - "properties": { - "additionalProperties": { - "type": "string" - }, - "description": "The properties to set on daemon config files.Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image. The following are supported prefixes and their mappings: spark: spark-defaults.confFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", - "type": "object" + "localMergedChunksFetched": { + "$ref": "Quantiles" + }, + "mergedFetchFallbackCount": { + "$ref": "Quantiles" + }, + "remoteMergedBlocksFetched": { + "$ref": "Quantiles" + }, + "remoteMergedBytesRead": { + "$ref": "Quantiles" + }, + "remoteMergedChunksFetched": { + "$ref": "Quantiles" + }, + "remoteMergedReqsDuration": { + "$ref": "Quantiles" } }, "type": "object" }, - "LifecycleConfig": { - "description": "Specifies the cluster auto-delete schedule configuration.", - "id": "LifecycleConfig", + "ShuffleReadMetrics": { + "description": "Shuffle data read by the task.", + "id": "ShuffleReadMetrics", "properties": { - "autoDeleteTime": { - "description": "Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", - "format": "google-datetime", + "fetchWaitTimeMillis": { + "format": "int64", "type": "string" }, - "autoDeleteTtl": { - "description": "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).", - "format": "google-duration", + "localBlocksFetched": { + "format": "int64", "type": "string" }, - "idleDeleteTtl": { - "description": "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).", - "format": "google-duration", + "localBytesRead": { + "format": "int64", "type": "string" }, - "idleStartTime": { - "description": "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", - "format": "google-datetime", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "ListAutoscalingPoliciesResponse": { - "description": "A response to a request to list autoscaling policies in a project.", - "id": "ListAutoscalingPoliciesResponse", - "properties": { - "nextPageToken": { - "description": "Output only. This token is included in the response if there are more results to fetch.", - "readOnly": true, + "recordsRead": { + "format": "int64", "type": "string" }, - "policies": { - "description": "Output only. Autoscaling policies list.", - "items": { - "$ref": "AutoscalingPolicy" - }, - "readOnly": true, - "type": "array" - } - }, - "type": "object" - }, - "ListBatchesResponse": { - "description": "A list of batch workloads.", - "id": "ListBatchesResponse", - "properties": { - "batches": { - "description": "Output only. The batches from the specified collection.", - "items": { - "$ref": "Batch" - }, - "readOnly": true, - "type": "array" - }, - "nextPageToken": { - "description": "A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "remoteBlocksFetched": { + "format": "int64", "type": "string" }, - "unreachable": { - "description": "Output only. List of Batches that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response.", - "items": { - "type": "string" - }, - "readOnly": true, - "type": "array" - } - }, - "type": "object" - }, - "ListClustersResponse": { - "description": "The list of all clusters in a project.", - "id": "ListClustersResponse", - "properties": { - "clusters": { - "description": "Output only. The clusters in the project.", - "items": { - "$ref": "Cluster" - }, - "readOnly": true, - "type": "array" + "remoteBytesRead": { + "format": "int64", + "type": "string" }, - "nextPageToken": { - "description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListClustersRequest.", - "readOnly": true, + "remoteBytesReadToDisk": { + "format": "int64", "type": "string" - } - }, - "type": "object" - }, - "ListJobsResponse": { - "description": "A list of jobs in a project.", - "id": "ListJobsResponse", - "properties": { - "jobs": { - "description": "Output only. Jobs list.", - "items": { - "$ref": "Job" - }, - "readOnly": true, - "type": "array" }, - "nextPageToken": { - "description": "Optional. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListJobsRequest.", + "remoteReqsDuration": { + "format": "int64", "type": "string" }, - "unreachable": { - "description": "Output only. List of jobs with kms_key-encrypted parameters that could not be decrypted. A response to a jobs.get request may indicate the reason for the decryption failure for a specific job.", - "items": { - "type": "string" - }, - "readOnly": true, - "type": "array" + "shufflePushReadMetrics": { + "$ref": "ShufflePushReadMetrics" } }, "type": "object" }, - "ListOperationsResponse": { - "description": "The response message for Operations.ListOperations.", - "id": "ListOperationsResponse", + "ShuffleReadQuantileMetrics": { + "id": "ShuffleReadQuantileMetrics", "properties": { - "nextPageToken": { - "description": "The standard List next-page token.", - "type": "string" + "fetchWaitTimeMillis": { + "$ref": "Quantiles" }, - "operations": { - "description": "A list of operations that matches the specified filter in the request.", - "items": { - "$ref": "Operation" - }, - "type": "array" + "localBlocksFetched": { + "$ref": "Quantiles" + }, + "readBytes": { + "$ref": "Quantiles" + }, + "readRecords": { + "$ref": "Quantiles" + }, + "remoteBlocksFetched": { + "$ref": "Quantiles" + }, + "remoteBytesRead": { + "$ref": "Quantiles" + }, + "remoteBytesReadToDisk": { + "$ref": "Quantiles" + }, + "remoteReqsDuration": { + "$ref": "Quantiles" + }, + "shufflePushReadMetrics": { + "$ref": "ShufflePushReadQuantileMetrics" + }, + "totalBlocksFetched": { + "$ref": "Quantiles" } }, "type": "object" }, - "ListSessionTemplatesResponse": { - "description": "A list of session templates.", - "id": "ListSessionTemplatesResponse", + "ShuffleWriteMetrics": { + "description": "Shuffle data written by task.", + "id": "ShuffleWriteMetrics", "properties": { - "nextPageToken": { - "description": "A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "bytesWritten": { + "format": "int64", "type": "string" }, - "sessionTemplates": { - "description": "Output only. Session template list", - "items": { - "$ref": "SessionTemplate" - }, - "readOnly": true, - "type": "array" + "recordsWritten": { + "format": "int64", + "type": "string" + }, + "writeTimeNanos": { + "format": "int64", + "type": "string" } }, "type": "object" }, - "ListSessionsResponse": { - "description": "A list of interactive sessions.", - "id": "ListSessionsResponse", + "ShuffleWriteQuantileMetrics": { + "id": "ShuffleWriteQuantileMetrics", "properties": { - "nextPageToken": { - "description": "A token, which can be sent as page_token, to retrieve the next page. If this field is omitted, there are no subsequent pages.", - "type": "string" + "writeBytes": { + "$ref": "Quantiles" }, - "sessions": { - "description": "Output only. The sessions from the specified collection.", - "items": { - "$ref": "Session" - }, - "readOnly": true, - "type": "array" + "writeRecords": { + "$ref": "Quantiles" + }, + "writeTimeNanos": { + "$ref": "Quantiles" } }, "type": "object" }, - "ListWorkflowTemplatesResponse": { - "description": "A response to a request to list workflow templates in a project.", - "id": "ListWorkflowTemplatesResponse", + "SinkProgress": { + "id": "SinkProgress", "properties": { - "nextPageToken": { - "description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListWorkflowTemplatesRequest.", - "readOnly": true, + "description": { "type": "string" }, - "templates": { - "description": "Output only. WorkflowTemplates list.", - "items": { - "$ref": "WorkflowTemplate" - }, - "readOnly": true, - "type": "array" - }, - "unreachable": { - "description": "Output only. List of workflow templates that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response.", - "items": { + "metrics": { + "additionalProperties": { "type": "string" }, - "readOnly": true, - "type": "array" + "type": "object" + }, + "numOutputRows": { + "format": "int64", + "type": "string" } }, "type": "object" }, - "LoggingConfig": { - "description": "The runtime logging config of the job.", - "id": "LoggingConfig", + "SoftwareConfig": { + "description": "Specifies the selection and config of software inside the cluster.", + "id": "SoftwareConfig", "properties": { - "driverLogLevels": { - "additionalProperties": { + "imageVersion": { + "description": "Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported-dataproc-image-versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the \"preview\" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", + "type": "string" + }, + "optionalComponents": { + "description": "Optional. The set of components to activate on the cluster.", + "items": { "enum": [ - "LEVEL_UNSPECIFIED", - "ALL", - "TRACE", - "DEBUG", - "INFO", - "WARN", - "ERROR", - "FATAL", - "OFF" + "COMPONENT_UNSPECIFIED", + "ANACONDA", + "DOCKER", + "DRUID", + "FLINK", + "HBASE", + "HIVE_WEBHCAT", + "HUDI", + "JUPYTER", + "PRESTO", + "TRINO", + "RANGER", + "SOLR", + "ZEPPELIN", + "ZOOKEEPER" ], "enumDescriptions": [ - "Level is unspecified. Use default level for log4j.", - "Use ALL level for log4j.", - "Use TRACE level for log4j.", - "Use DEBUG level for log4j.", - "Use INFO level for log4j.", - "Use WARN level for log4j.", - "Use ERROR level for log4j.", - "Use FATAL level for log4j.", - "Turn off log4j." + "Unspecified component. Specifying this will cause Cluster creation to fail.", + "The Anaconda component is no longer supported or applicable to supported Dataproc on Compute Engine image versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-version-clusters#supported-dataproc-image-versions). It cannot be activated on clusters created with supported Dataproc on Compute Engine image versions.", + "Docker", + "The Druid query engine. (alpha)", + "Flink", + "HBase. (beta)", + "The Hive Web HCatalog (the REST service for accessing HCatalog).", + "Hudi.", + "The Jupyter Notebook.", + "The Presto query engine.", + "The Trino query engine.", + "The Ranger service.", + "The Solr service.", + "The Zeppelin notebook.", + "The Zookeeper service." ], "type": "string" }, - "description": "The per-package log levels for the driver. This can include \"root\" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'", + "type": "array" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", "type": "object" } }, "type": "object" }, - "ManagedCluster": { - "description": "Cluster that is managed by the workflow.", - "id": "ManagedCluster", + "SourceProgress": { + "id": "SourceProgress", "properties": { - "clusterName": { - "description": "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", + "description": { "type": "string" }, - "config": { - "$ref": "ClusterConfig", - "description": "Required. The cluster configuration." + "endOffset": { + "type": "string" }, - "labels": { + "inputRowsPerSecond": { + "format": "double", + "type": "number" + }, + "latestOffset": { + "type": "string" + }, + "metrics": { "additionalProperties": { "type": "string" }, - "description": "Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster.", "type": "object" - } - }, - "type": "object" - }, - "ManagedGroupConfig": { - "description": "Specifies the resources used to actively manage an instance group.", - "id": "ManagedGroupConfig", - "properties": { - "instanceGroupManagerName": { - "description": "Output only. The name of the Instance Group Manager for this group.", - "readOnly": true, - "type": "string" }, - "instanceGroupManagerUri": { - "description": "Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm.", - "readOnly": true, + "numInputRows": { + "format": "int64", "type": "string" }, - "instanceTemplateName": { - "description": "Output only. The name of the Instance Template used for the Managed Instance Group.", - "readOnly": true, + "processedRowsPerSecond": { + "format": "double", + "type": "number" + }, + "startOffset": { "type": "string" } }, "type": "object" }, - "MetastoreConfig": { - "description": "Specifies a Metastore configuration.", - "id": "MetastoreConfig", + "SparkApplication": { + "description": "A summary of Spark Application", + "id": "SparkApplication", "properties": { - "dataprocMetastoreService": { - "description": "Required. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name]", + "application": { + "$ref": "ApplicationInfo", + "description": "Output only. High level information corresponding to an application.", + "readOnly": true + }, + "name": { + "description": "Identifier. Name of the spark application", "type": "string" } }, "type": "object" }, - "Metric": { - "description": "A Dataproc custom metric.", - "id": "Metric", + "SparkBatch": { + "description": "A configuration for running an Apache Spark (https://spark.apache.org/) batch workload.", + "id": "SparkBatch", "properties": { - "metricOverrides": { - "description": "Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected.", + "archiveUris": { + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, "type": "array" }, - "metricSource": { - "description": "Required. A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information).", - "enum": [ - "METRIC_SOURCE_UNSPECIFIED", - "MONITORING_AGENT_DEFAULTS", - "HDFS", - "SPARK", - "YARN", - "SPARK_HISTORY_SERVER", - "HIVESERVER2", - "HIVEMETASTORE", - "FLINK" - ], - "enumDescriptions": [ - "Required unspecified metric source.", - "Monitoring agent metrics. If this source is enabled, Dataproc enables the monitoring agent in Compute Engine, and collects monitoring agent metrics, which are published with an agent.googleapis.com prefix.", - "HDFS metric source.", - "Spark metric source.", - "YARN metric source.", - "Spark History Server metric source.", - "Hiveserver2 metric source.", - "hivemetastore metric source", - "flink metric source" - ], + "args": { + "description": "Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", + "items": { + "type": "string" + }, + "type": "array" + }, + "fileUris": { + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", + "items": { + "type": "string" + }, + "type": "array" + }, + "jarFileUris": { + "description": "Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.", + "items": { + "type": "string" + }, + "type": "array" + }, + "mainClass": { + "description": "Optional. The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris.", + "type": "string" + }, + "mainJarFileUri": { + "description": "Optional. The HCFS URI of the jar file that contains the main class.", "type": "string" } }, "type": "object" }, - "NamespacedGkeDeploymentTarget": { - "deprecated": true, - "description": "Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster.", - "id": "NamespacedGkeDeploymentTarget", + "SparkConnectConfig": { + "description": "Spark connect configuration for an interactive session.", + "id": "SparkConnectConfig", + "properties": {}, + "type": "object" + }, + "SparkHistoryServerConfig": { + "description": "Spark History Server configuration for the workload.", + "id": "SparkHistoryServerConfig", "properties": { - "clusterNamespace": { - "description": "Optional. A namespace within the GKE cluster to deploy into.", - "type": "string" - }, - "targetGkeCluster": { - "description": "Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", + "dataprocCluster": { + "description": "Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name]", "type": "string" } }, "type": "object" }, - "NodeGroup": { - "description": "Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource.", - "id": "NodeGroup", + "SparkJob": { + "description": "A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN.", + "id": "SparkJob", "properties": { - "labels": { - "additionalProperties": { + "archiveUris": { + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "items": { "type": "string" }, - "description": "Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.", - "type": "object" + "type": "array" }, - "name": { - "description": "The Node group resource name (https://aip.dev/122).", - "type": "string" + "args": { + "description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + "items": { + "type": "string" + }, + "type": "array" }, - "nodeGroupConfig": { - "$ref": "InstanceGroupConfig", - "description": "Optional. The node group instance group configuration." + "fileUris": { + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + "items": { + "type": "string" + }, + "type": "array" }, - "roles": { - "description": "Required. Node group roles.", + "jarFileUris": { + "description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", "items": { - "enum": [ - "ROLE_UNSPECIFIED", - "DRIVER" - ], - "enumDescriptions": [ - "Required unspecified role.", - "Job drivers run on the node pool." - ], "type": "string" }, "type": "array" + }, + "loggingConfig": { + "$ref": "LoggingConfig", + "description": "Optional. The runtime log config for job execution." + }, + "mainClass": { + "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris.", + "type": "string" + }, + "mainJarFileUri": { + "description": "The HCFS URI of the jar file that contains the main class.", + "type": "string" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + "type": "object" } }, "type": "object" }, - "NodeGroupAffinity": { - "description": "Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource.", - "id": "NodeGroupAffinity", + "SparkPlanGraph": { + "description": "A graph used for storing information of an executionPlan of DataFrame.", + "id": "SparkPlanGraph", "properties": { - "nodeGroupUri": { - "description": "Required. The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1", + "edges": { + "items": { + "$ref": "SparkPlanGraphEdge" + }, + "type": "array" + }, + "executionId": { + "format": "int64", "type": "string" + }, + "nodes": { + "items": { + "$ref": "SparkPlanGraphNodeWrapper" + }, + "type": "array" } }, "type": "object" }, - "NodeGroupOperationMetadata": { - "description": "Metadata describing the node group operation.", - "id": "NodeGroupOperationMetadata", + "SparkPlanGraphCluster": { + "description": "Represents a tree of spark plan.", + "id": "SparkPlanGraphCluster", "properties": { - "clusterUuid": { - "description": "Output only. Cluster UUID associated with the node group operation.", - "readOnly": true, - "type": "string" - }, - "description": { - "description": "Output only. Short description of operation.", - "readOnly": true, + "desc": { "type": "string" }, - "labels": { - "additionalProperties": { - "type": "string" + "metrics": { + "items": { + "$ref": "SqlPlanMetric" }, - "description": "Output only. Labels associated with the operation.", - "readOnly": true, - "type": "object" - }, - "nodeGroupId": { - "description": "Output only. Node group ID for the operation.", - "readOnly": true, - "type": "string" + "type": "array" }, - "operationType": { - "description": "The operation type.", - "enum": [ - "NODE_GROUP_OPERATION_TYPE_UNSPECIFIED", - "CREATE", - "UPDATE", - "DELETE", - "RESIZE", - "REPAIR", - "UPDATE_LABELS", - "START", - "STOP" - ], - "enumDescriptions": [ - "Node group operation type is unknown.", - "Create node group operation type.", - "Update node group operation type.", - "Delete node group operation type.", - "Resize node group operation type.", - "Repair node group operation type.", - "Update node group label operation type.", - "Start node group operation type.", - "Stop node group operation type." - ], + "name": { "type": "string" }, - "status": { - "$ref": "ClusterOperationStatus", - "description": "Output only. Current operation status.", - "readOnly": true - }, - "statusHistory": { - "description": "Output only. The previous operation status.", + "nodes": { "items": { - "$ref": "ClusterOperationStatus" + "$ref": "SparkPlanGraphNodeWrapper" }, - "readOnly": true, "type": "array" }, - "warnings": { - "description": "Output only. Errors encountered during operation execution.", - "items": { - "type": "string" - }, - "readOnly": true, - "type": "array" + "sparkPlanGraphClusterId": { + "format": "int64", + "type": "string" } }, "type": "object" }, - "NodeInitializationAction": { - "description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.", - "id": "NodeInitializationAction", + "SparkPlanGraphEdge": { + "description": "Represents a directed edge in the spark plan tree from child to parent.", + "id": "SparkPlanGraphEdge", "properties": { - "executableFile": { - "description": "Required. Cloud Storage URI of executable file.", + "fromId": { + "format": "int64", "type": "string" }, - "executionTimeout": { - "description": "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", - "format": "google-duration", + "toId": { + "format": "int64", "type": "string" } }, "type": "object" }, - "NodePool": { - "description": "indicating a list of workers of same type", - "id": "NodePool", + "SparkPlanGraphNode": { + "description": "Represents a node in the spark plan tree.", + "id": "SparkPlanGraphNode", "properties": { - "id": { - "description": "Required. A unique id of the node pool. Primary and Secondary workers can be specified using special reserved ids PRIMARY_WORKER_POOL and SECONDARY_WORKER_POOL respectively. Aux node pools can be referenced using corresponding pool id.", + "desc": { "type": "string" }, - "instanceNames": { - "description": "Name of instances to be repaired. These instances must belong to specified node pool.", + "metrics": { "items": { - "type": "string" + "$ref": "SqlPlanMetric" }, "type": "array" }, - "repairAction": { - "description": "Required. Repair action to take on specified resources of the node pool.", - "enum": [ - "REPAIR_ACTION_UNSPECIFIED", - "DELETE" - ], - "enumDescriptions": [ - "No action will be taken by default.", - "delete the specified list of nodes." - ], + "name": { + "type": "string" + }, + "sparkPlanGraphNodeId": { + "format": "int64", "type": "string" } }, "type": "object" }, - "Operation": { - "description": "This resource represents a long-running operation that is the result of a network API call.", - "id": "Operation", + "SparkPlanGraphNodeWrapper": { + "description": "Wrapper user to represent either a node or a cluster.", + "id": "SparkPlanGraphNodeWrapper", "properties": { - "done": { - "description": "If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.", - "type": "boolean" - }, - "error": { - "$ref": "Status", - "description": "The error result of the operation in case of failure or cancellation." + "cluster": { + "$ref": "SparkPlanGraphCluster" }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" + "node": { + "$ref": "SparkPlanGraphNode" + } + }, + "type": "object" + }, + "SparkRBatch": { + "description": "A configuration for running an Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) batch workload.", + "id": "SparkRBatch", + "properties": { + "archiveUris": { + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "items": { + "type": "string" }, - "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", - "type": "object" + "type": "array" }, - "name": { - "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.", - "type": "string" + "args": { + "description": "Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", + "items": { + "type": "string" + }, + "type": "array" }, - "response": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" + "fileUris": { + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", + "items": { + "type": "string" }, - "description": "The normal, successful response of the operation. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.", - "type": "object" + "type": "array" + }, + "mainRFileUri": { + "description": "Required. The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.", + "type": "string" } }, "type": "object" }, - "OrderedJob": { - "description": "A job executed by the workflow.", - "id": "OrderedJob", + "SparkRJob": { + "description": "A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.", + "id": "SparkRJob", "properties": { - "flinkJob": { - "$ref": "FlinkJob", - "description": "Optional. Job is a Flink job." - }, - "hadoopJob": { - "$ref": "HadoopJob", - "description": "Optional. Job is a Hadoop job." - }, - "hiveJob": { - "$ref": "HiveJob", - "description": "Optional. Job is a Hive job." - }, - "labels": { - "additionalProperties": { + "archiveUris": { + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "items": { "type": "string" }, - "description": "Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 32 labels can be associated with a given job.", - "type": "object" - }, - "pigJob": { - "$ref": "PigJob", - "description": "Optional. Job is a Pig job." + "type": "array" }, - "prerequisiteStepIds": { - "description": "Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.", + "args": { + "description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", "items": { "type": "string" }, "type": "array" }, - "prestoJob": { - "$ref": "PrestoJob", - "description": "Optional. Job is a Presto job." - }, - "pysparkJob": { - "$ref": "PySparkJob", - "description": "Optional. Job is a PySpark job." - }, - "scheduling": { - "$ref": "JobScheduling", - "description": "Optional. Job scheduling configuration." - }, - "sparkJob": { - "$ref": "SparkJob", - "description": "Optional. Job is a Spark job." - }, - "sparkRJob": { - "$ref": "SparkRJob", - "description": "Optional. Job is a SparkR job." + "fileUris": { + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + "items": { + "type": "string" + }, + "type": "array" }, - "sparkSqlJob": { - "$ref": "SparkSqlJob", - "description": "Optional. Job is a SparkSql job." + "loggingConfig": { + "$ref": "LoggingConfig", + "description": "Optional. The runtime log config for job execution." }, - "stepId": { - "description": "Required. The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", + "mainRFileUri": { + "description": "Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.", "type": "string" }, - "trinoJob": { - "$ref": "TrinoJob", - "description": "Optional. Job is a Trino job." + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + "type": "object" } }, "type": "object" }, - "ParameterValidation": { - "description": "Configuration for parameter validation.", - "id": "ParameterValidation", + "SparkRuntimeInfo": { + "id": "SparkRuntimeInfo", "properties": { - "regex": { - "$ref": "RegexValidation", - "description": "Validation based on regular expressions." + "javaHome": { + "type": "string" }, - "values": { - "$ref": "ValueValidation", - "description": "Validation based on a list of allowed values." + "javaVersion": { + "type": "string" + }, + "scalaVersion": { + "type": "string" } }, "type": "object" }, - "PeripheralsConfig": { - "description": "Auxiliary services configuration for a workload.", - "id": "PeripheralsConfig", + "SparkSqlBatch": { + "description": "A configuration for running Apache Spark SQL (https://spark.apache.org/sql/) queries as a batch workload.", + "id": "SparkSqlBatch", "properties": { - "metastoreService": { - "description": "Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id]", + "jarFileUris": { + "description": "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", + "items": { + "type": "string" + }, + "type": "array" + }, + "queryFileUri": { + "description": "Required. The HCFS URI of the script that contains Spark SQL queries to execute.", "type": "string" }, - "sparkHistoryServerConfig": { - "$ref": "SparkHistoryServerConfig", - "description": "Optional. The Spark History Server configuration for the workload." + "queryVariables": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", + "type": "object" } }, "type": "object" }, - "PigJob": { - "description": "A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.", - "id": "PigJob", + "SparkSqlJob": { + "description": "A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries.", + "id": "SparkSqlJob", "properties": { - "continueOnFailure": { - "description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", - "type": "boolean" - }, "jarFileUris": { - "description": "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", + "description": "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", "items": { "type": "string" }, @@ -5876,11 +10886,11 @@ "additionalProperties": { "type": "string" }, - "description": "Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", + "description": "Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten.", "type": "object" }, "queryFileUri": { - "description": "The HCFS URI of the script that contains the Pig queries.", + "description": "The HCFS URI of the script that contains SQL queries.", "type": "string" }, "queryList": { @@ -5891,623 +10901,750 @@ "additionalProperties": { "type": "string" }, - "description": "Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]).", + "description": "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", "type": "object" } }, "type": "object" }, - "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources.A Policy is a collection of bindings. A binding binds one or more members, or principals, to a single role. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A role is a named list of permissions; each role can be an IAM predefined role or a user-created custom role.For some types of Google Cloud resources, a binding can also specify a condition, which is a logical expression that allows access to a resource only if the expression evaluates to true. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).JSON example: { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } YAML example: bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 For a description of IAM and its features, see the IAM documentation (https://cloud.google.com/iam/docs/).", - "id": "Policy", + "SparkStandaloneAutoscalingConfig": { + "description": "Basic autoscaling configurations for Spark Standalone.", + "id": "SparkStandaloneAutoscalingConfig", "properties": { - "bindings": { - "description": "Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.", - "items": { - "$ref": "Binding" - }, - "type": "array" + "gracefulDecommissionTimeout": { + "description": "Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.", + "format": "google-duration", + "type": "string" }, - "etag": { - "description": "etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.", - "format": "byte", + "removeOnlyIdleWorkers": { + "description": "Optional. Remove only idle workers when scaling down cluster", + "type": "boolean" + }, + "scaleDownFactor": { + "description": "Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.", + "format": "double", + "type": "number" + }, + "scaleDownMinWorkerFraction": { + "description": "Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", + "format": "double", + "type": "number" + }, + "scaleUpFactor": { + "description": "Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.", + "format": "double", + "type": "number" + }, + "scaleUpMinWorkerFraction": { + "description": "Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "SparkWrapperObject": { + "description": "Outer message that contains the data obtained from spark listener, packaged with information that is required to process it.", + "id": "SparkWrapperObject", + "properties": { + "appSummary": { + "$ref": "AppSummary" + }, + "applicationEnvironmentInfo": { + "$ref": "ApplicationEnvironmentInfo" + }, + "applicationId": { + "description": "Application Id created by Spark.", "type": "string" }, - "version": { - "description": "Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", + "applicationInfo": { + "$ref": "ApplicationInfo" + }, + "eventTimestamp": { + "description": "VM Timestamp associated with the data object.", + "format": "google-datetime", + "type": "string" + }, + "executorStageSummary": { + "$ref": "ExecutorStageSummary" + }, + "executorSummary": { + "$ref": "ExecutorSummary" + }, + "jobData": { + "$ref": "JobData" + }, + "poolData": { + "$ref": "PoolData" + }, + "processSummary": { + "$ref": "ProcessSummary" + }, + "rddOperationGraph": { + "$ref": "RddOperationGraph" + }, + "rddStorageInfo": { + "$ref": "RddStorageInfo" + }, + "resourceProfileInfo": { + "$ref": "ResourceProfileInfo" + }, + "sparkPlanGraph": { + "$ref": "SparkPlanGraph" + }, + "speculationStageSummary": { + "$ref": "SpeculationStageSummary" + }, + "sqlExecutionUiData": { + "$ref": "SqlExecutionUiData" + }, + "stageData": { + "$ref": "StageData" + }, + "streamBlockData": { + "$ref": "StreamBlockData" + }, + "streamingQueryData": { + "$ref": "StreamingQueryData" + }, + "streamingQueryProgress": { + "$ref": "StreamingQueryProgress" + }, + "taskData": { + "$ref": "TaskData" + } + }, + "type": "object" + }, + "SpeculationStageSummary": { + "description": "Details of the speculation task when speculative execution is enabled.", + "id": "SpeculationStageSummary", + "properties": { + "numActiveTasks": { + "format": "int32", + "type": "integer" + }, + "numCompletedTasks": { + "format": "int32", + "type": "integer" + }, + "numFailedTasks": { + "format": "int32", + "type": "integer" + }, + "numKilledTasks": { + "format": "int32", + "type": "integer" + }, + "numTasks": { "format": "int32", "type": "integer" + }, + "stageAttemptId": { + "format": "int32", + "type": "integer" + }, + "stageId": { + "format": "int64", + "type": "string" } }, "type": "object" }, - "PrestoJob": { - "description": "A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster.", - "id": "PrestoJob", + "SqlExecutionUiData": { + "description": "SQL Execution Data", + "id": "SqlExecutionUiData", "properties": { - "clientTags": { - "description": "Optional. Presto client tags to attach to this query", - "items": { + "completionTime": { + "format": "google-datetime", + "type": "string" + }, + "description": { + "type": "string" + }, + "details": { + "type": "string" + }, + "errorMessage": { + "type": "string" + }, + "executionId": { + "format": "int64", + "type": "string" + }, + "jobs": { + "additionalProperties": { + "enum": [ + "JOB_EXECUTION_STATUS_UNSPECIFIED", + "JOB_EXECUTION_STATUS_RUNNING", + "JOB_EXECUTION_STATUS_SUCCEEDED", + "JOB_EXECUTION_STATUS_FAILED", + "JOB_EXECUTION_STATUS_UNKNOWN" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], "type": "string" }, - "type": "array" + "type": "object" }, - "continueOnFailure": { - "description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", - "type": "boolean" + "metricValues": { + "additionalProperties": { + "type": "string" + }, + "type": "object" }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "Optional. The runtime log config for job execution." + "metricValuesIsNull": { + "type": "boolean" }, - "outputFormat": { - "description": "Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats", - "type": "string" + "metrics": { + "items": { + "$ref": "SqlPlanMetric" + }, + "type": "array" }, - "properties": { + "modifiedConfigs": { "additionalProperties": { "type": "string" }, - "description": "Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI", "type": "object" }, - "queryFileUri": { - "description": "The HCFS URI of the script that contains SQL queries.", + "physicalPlanDescription": { "type": "string" }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." + "rootExecutionId": { + "format": "int64", + "type": "string" + }, + "stages": { + "items": { + "format": "int64", + "type": "string" + }, + "type": "array" + }, + "submissionTime": { + "format": "google-datetime", + "type": "string" } }, "type": "object" }, - "PyPiRepositoryConfig": { - "description": "Configuration for PyPi repository", - "id": "PyPiRepositoryConfig", + "SqlPlanMetric": { + "description": "Metrics related to SQL execution.", + "id": "SqlPlanMetric", "properties": { - "pypiRepository": { - "description": "Optional. PyPi repository address", + "accumulatorId": { + "format": "int64", + "type": "string" + }, + "metricType": { + "type": "string" + }, + "name": { "type": "string" } }, "type": "object" }, - "PySparkBatch": { - "description": "A configuration for running an Apache PySpark (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) batch workload.", - "id": "PySparkBatch", + "StageAttemptTasksSummary": { + "description": "Data related to tasks summary for a Spark Stage Attempt", + "id": "StageAttemptTasksSummary", "properties": { - "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - }, - "type": "array" + "applicationId": { + "type": "string" }, - "args": { - "description": "Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", - "items": { - "type": "string" - }, - "type": "array" + "numFailedTasks": { + "format": "int32", + "type": "integer" }, - "fileUris": { - "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", - "items": { - "type": "string" - }, - "type": "array" + "numKilledTasks": { + "format": "int32", + "type": "integer" }, - "jarFileUris": { - "description": "Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.", - "items": { - "type": "string" - }, - "type": "array" + "numPendingTasks": { + "format": "int32", + "type": "integer" }, - "mainPythonFileUri": { - "description": "Required. The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.", - "type": "string" + "numRunningTasks": { + "format": "int32", + "type": "integer" }, - "pythonFileUris": { - "description": "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", - "items": { - "type": "string" - }, - "type": "array" + "numSuccessTasks": { + "format": "int32", + "type": "integer" + }, + "numTasks": { + "format": "int32", + "type": "integer" + }, + "stageAttemptId": { + "format": "int32", + "type": "integer" + }, + "stageId": { + "format": "int64", + "type": "string" } }, "type": "object" }, - "PySparkJob": { - "description": "A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.", - "id": "PySparkJob", + "StageData": { + "description": "Data corresponding to a stage.", + "id": "StageData", "properties": { - "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - }, - "type": "array" - }, - "args": { - "description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - }, - "type": "array" - }, - "fileUris": { - "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + "accumulatorUpdates": { "items": { - "type": "string" + "$ref": "AccumulableInfo" }, "type": "array" }, - "jarFileUris": { - "description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", - "items": { - "type": "string" - }, - "type": "array" + "completionTime": { + "format": "google-datetime", + "type": "string" }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "Optional. The runtime log config for job execution." + "description": { + "type": "string" }, - "mainPythonFileUri": { - "description": "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.", + "details": { "type": "string" }, - "properties": { + "executorMetricsDistributions": { + "$ref": "ExecutorMetricsDistributions" + }, + "executorSummary": { "additionalProperties": { - "type": "string" + "$ref": "ExecutorStageSummary" }, - "description": "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", "type": "object" }, - "pythonFileUris": { - "description": "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", + "failureReason": { + "type": "string" + }, + "firstTaskLaunchedTime": { + "format": "google-datetime", + "type": "string" + }, + "isShufflePushEnabled": { + "type": "boolean" + }, + "jobIds": { "items": { + "format": "int64", "type": "string" }, "type": "array" - } - }, - "type": "object" - }, - "QueryList": { - "description": "A list of queries to run on a cluster.", - "id": "QueryList", - "properties": { - "queries": { - "description": "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } } ", - "items": { - "type": "string" + }, + "killedTasksSummary": { + "additionalProperties": { + "format": "int32", + "type": "integer" }, - "type": "array" - } - }, - "type": "object" - }, - "RegexValidation": { - "description": "Validation based on regular expressions.", - "id": "RegexValidation", - "properties": { - "regexes": { - "description": "Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).", - "items": { + "type": "object" + }, + "locality": { + "additionalProperties": { + "format": "int64", "type": "string" }, - "type": "array" - } - }, - "type": "object" - }, - "RepairClusterRequest": { - "description": "A request to repair a cluster.", - "id": "RepairClusterRequest", - "properties": { - "clusterUuid": { - "description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", - "type": "string" + "type": "object" }, - "gracefulDecommissionTimeout": { - "description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning facilitates the removal of cluster nodes without interrupting jobs in progress. The timeout specifies the amount of time to wait for jobs finish before forcefully removing nodes. The default timeout is 0 for forceful decommissioning, and the maximum timeout period is 1 day. (see JSON Mapping—Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).graceful_decommission_timeout is supported in Dataproc image versions 1.2+.", - "format": "google-duration", + "name": { "type": "string" }, - "nodePools": { - "description": "Optional. Node pools and corresponding repair action to be taken. All node pools should be unique in this request. i.e. Multiple entries for the same node pool id are not allowed.", + "numActiveTasks": { + "format": "int32", + "type": "integer" + }, + "numCompleteTasks": { + "format": "int32", + "type": "integer" + }, + "numCompletedIndices": { + "format": "int32", + "type": "integer" + }, + "numFailedTasks": { + "format": "int32", + "type": "integer" + }, + "numKilledTasks": { + "format": "int32", + "type": "integer" + }, + "numTasks": { + "format": "int32", + "type": "integer" + }, + "parentStageIds": { "items": { - "$ref": "NodePool" + "format": "int64", + "type": "string" }, "type": "array" }, - "parentOperationId": { - "description": "Optional. operation id of the parent operation sending the repair request", - "type": "string" + "peakExecutorMetrics": { + "$ref": "ExecutorMetrics" }, - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the server receives two RepairClusterRequests with the same ID, the second request is ignored, and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", - "type": "string" - } - }, - "type": "object" - }, - "RepairNodeGroupRequest": { - "id": "RepairNodeGroupRequest", - "properties": { - "instanceNames": { - "description": "Required. Name of instances to be repaired. These instances must belong to specified node pool.", + "rddIds": { "items": { + "format": "int64", "type": "string" }, "type": "array" }, - "repairAction": { - "description": "Required. Repair action to take on specified resources of the node pool.", - "enum": [ - "REPAIR_ACTION_UNSPECIFIED", - "REPLACE" - ], - "enumDescriptions": [ - "No action will be taken by default.", - "replace the specified list of nodes." - ], + "resourceProfileId": { + "format": "int32", + "type": "integer" + }, + "schedulingPool": { "type": "string" }, - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the server receives two RepairNodeGroupRequest with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "shuffleMergersCount": { + "format": "int32", + "type": "integer" + }, + "speculationSummary": { + "$ref": "SpeculationStageSummary" + }, + "stageAttemptId": { + "format": "int32", + "type": "integer" + }, + "stageId": { + "format": "int64", "type": "string" - } - }, - "type": "object" - }, - "RepositoryConfig": { - "description": "Configuration for dependency repositories", - "id": "RepositoryConfig", - "properties": { - "pypiRepositoryConfig": { - "$ref": "PyPiRepositoryConfig", - "description": "Optional. Configuration for PyPi repository." - } - }, - "type": "object" - }, - "ReservationAffinity": { - "description": "Reservation Affinity for consuming Zonal reservation.", - "id": "ReservationAffinity", - "properties": { - "consumeReservationType": { - "description": "Optional. Type of reservation to consume", + }, + "stageMetrics": { + "$ref": "StageMetrics" + }, + "status": { "enum": [ - "TYPE_UNSPECIFIED", - "NO_RESERVATION", - "ANY_RESERVATION", - "SPECIFIC_RESERVATION" + "STAGE_STATUS_UNSPECIFIED", + "STAGE_STATUS_ACTIVE", + "STAGE_STATUS_COMPLETE", + "STAGE_STATUS_FAILED", + "STAGE_STATUS_PENDING", + "STAGE_STATUS_SKIPPED" ], "enumDescriptions": [ "", - "Do not consume from any allocated capacity.", - "Consume any reservation available.", - "Must consume from a specific reservation. Must specify key value fields for specifying the reservations." + "", + "", + "", + "", + "" ], "type": "string" }, - "key": { - "description": "Optional. Corresponds to the label key of reservation resource.", + "submissionTime": { + "format": "google-datetime", "type": "string" }, - "values": { - "description": "Optional. Corresponds to the label values of reservation resource.", - "items": { - "type": "string" + "taskQuantileMetrics": { + "$ref": "TaskQuantileMetrics", + "description": "Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request" + }, + "tasks": { + "additionalProperties": { + "$ref": "TaskData" }, - "type": "array" + "type": "object" } }, "type": "object" }, - "ResizeNodeGroupRequest": { - "description": "A request to resize a node group.", - "id": "ResizeNodeGroupRequest", + "StageInputMetrics": { + "description": "Metrics about the input read by the stage.", + "id": "StageInputMetrics", "properties": { - "gracefulDecommissionTimeout": { - "description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) allows the removal of nodes from the Compute Engine node group without interrupting jobs in progress. This timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher.", - "format": "google-duration", - "type": "string" - }, - "parentOperationId": { - "description": "Optional. operation id of the parent operation sending the resize request", + "bytesRead": { + "format": "int64", "type": "string" }, - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the server receives two ResizeNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests) with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "recordsRead": { + "format": "int64", "type": "string" - }, - "size": { - "description": "Required. The number of running instances for the node group to maintain. The group adds or removes instances to maintain the number of instances specified by this parameter.", - "format": "int32", - "type": "integer" } }, "type": "object" }, - "RuntimeConfig": { - "description": "Runtime configuration for a workload.", - "id": "RuntimeConfig", + "StageMetrics": { + "description": "Stage Level Aggregated Metrics", + "id": "StageMetrics", "properties": { - "autotuningConfig": { - "$ref": "AutotuningConfig", - "description": "Optional. Autotuning configuration of the workload." + "diskBytesSpilled": { + "format": "int64", + "type": "string" }, - "cohort": { - "description": "Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.", + "executorCpuTimeNanos": { + "format": "int64", "type": "string" }, - "containerImage": { - "description": "Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used.", + "executorDeserializeCpuTimeNanos": { + "format": "int64", "type": "string" }, - "properties": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. A mapping of property names to values, which are used to configure workload execution.", - "type": "object" + "executorDeserializeTimeMillis": { + "format": "int64", + "type": "string" }, - "repositoryConfig": { - "$ref": "RepositoryConfig", - "description": "Optional. Dependency repository configuration." + "executorRunTimeMillis": { + "format": "int64", + "type": "string" }, - "version": { - "description": "Optional. Version of the batch runtime.", + "jvmGcTimeMillis": { + "format": "int64", "type": "string" - } - }, - "type": "object" - }, - "RuntimeInfo": { - "description": "Runtime information about workload execution.", - "id": "RuntimeInfo", - "properties": { - "approximateUsage": { - "$ref": "UsageMetrics", - "description": "Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments).", - "readOnly": true }, - "currentUsage": { - "$ref": "UsageSnapshot", - "description": "Output only. Snapshot of current workload resource usage.", - "readOnly": true + "memoryBytesSpilled": { + "format": "int64", + "type": "string" }, - "diagnosticOutputUri": { - "description": "Output only. A URI pointing to the location of the diagnostics tarball.", - "readOnly": true, + "peakExecutionMemoryBytes": { + "format": "int64", "type": "string" }, - "endpoints": { - "additionalProperties": { - "type": "string" - }, - "description": "Output only. Map of remote access endpoints (such as web interfaces and APIs) to their URIs.", - "readOnly": true, - "type": "object" + "resultSerializationTimeMillis": { + "format": "int64", + "type": "string" }, - "outputUri": { - "description": "Output only. A URI pointing to the location of the stdout and stderr of the workload.", - "readOnly": true, + "resultSize": { + "format": "int64", "type": "string" + }, + "stageInputMetrics": { + "$ref": "StageInputMetrics" + }, + "stageOutputMetrics": { + "$ref": "StageOutputMetrics" + }, + "stageShuffleReadMetrics": { + "$ref": "StageShuffleReadMetrics" + }, + "stageShuffleWriteMetrics": { + "$ref": "StageShuffleWriteMetrics" } }, "type": "object" }, - "SecurityConfig": { - "description": "Security related configuration, including encryption, Kerberos, etc.", - "id": "SecurityConfig", + "StageOutputMetrics": { + "description": "Metrics about the output written by the stage.", + "id": "StageOutputMetrics", "properties": { - "identityConfig": { - "$ref": "IdentityConfig", - "description": "Optional. Identity related configuration, including service account based secure multi-tenancy user mappings." + "bytesWritten": { + "format": "int64", + "type": "string" }, - "kerberosConfig": { - "$ref": "KerberosConfig", - "description": "Optional. Kerberos related configuration." + "recordsWritten": { + "format": "int64", + "type": "string" } }, "type": "object" }, - "Session": { - "description": "A representation of a session.", - "id": "Session", + "StageShufflePushReadMetrics": { + "id": "StageShufflePushReadMetrics", "properties": { - "createTime": { - "description": "Output only. The time when the session was created.", - "format": "google-datetime", - "readOnly": true, + "corruptMergedBlockChunks": { + "format": "int64", "type": "string" }, - "creator": { - "description": "Output only. The email address of the user who created the session.", - "readOnly": true, + "localMergedBlocksFetched": { + "format": "int64", "type": "string" }, - "environmentConfig": { - "$ref": "EnvironmentConfig", - "description": "Optional. Environment configuration for the session execution." + "localMergedBytesRead": { + "format": "int64", + "type": "string" }, - "jupyterSession": { - "$ref": "JupyterConfig", - "description": "Optional. Jupyter session config." + "localMergedChunksFetched": { + "format": "int64", + "type": "string" }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. The labels to associate with the session. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.", - "type": "object" + "mergedFetchFallbackCount": { + "format": "int64", + "type": "string" }, - "name": { - "description": "Required. The resource name of the session.", + "remoteMergedBlocksFetched": { + "format": "int64", "type": "string" }, - "runtimeConfig": { - "$ref": "RuntimeConfig", - "description": "Optional. Runtime configuration for the session execution." + "remoteMergedBytesRead": { + "format": "int64", + "type": "string" }, - "runtimeInfo": { - "$ref": "RuntimeInfo", - "description": "Output only. Runtime information about session execution.", - "readOnly": true + "remoteMergedChunksFetched": { + "format": "int64", + "type": "string" }, - "sessionTemplate": { - "description": "Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session.", + "remoteMergedReqsDuration": { + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "StageShuffleReadMetrics": { + "description": "Shuffle data read for the stage.", + "id": "StageShuffleReadMetrics", + "properties": { + "bytesRead": { + "format": "int64", "type": "string" }, - "state": { - "description": "Output only. A state of the session.", - "enum": [ - "STATE_UNSPECIFIED", - "CREATING", - "ACTIVE", - "TERMINATING", - "TERMINATED", - "FAILED" - ], - "enumDescriptions": [ - "The session state is unknown.", - "The session is created prior to running.", - "The session is running.", - "The session is terminating.", - "The session is terminated successfully.", - "The session is no longer running due to an error." - ], - "readOnly": true, + "fetchWaitTimeMillis": { + "format": "int64", "type": "string" }, - "stateHistory": { - "description": "Output only. Historical state information for the session.", - "items": { - "$ref": "SessionStateHistory" - }, - "readOnly": true, - "type": "array" + "localBlocksFetched": { + "format": "int64", + "type": "string" }, - "stateMessage": { - "description": "Output only. Session state details, such as the failure description if the state is FAILED.", - "readOnly": true, + "localBytesRead": { + "format": "int64", "type": "string" }, - "stateTime": { - "description": "Output only. The time when the session entered the current state.", - "format": "google-datetime", - "readOnly": true, + "recordsRead": { + "format": "int64", "type": "string" }, - "user": { - "description": "Optional. The email address of the user who owns the session.", + "remoteBlocksFetched": { + "format": "int64", + "type": "string" + }, + "remoteBytesRead": { + "format": "int64", + "type": "string" + }, + "remoteBytesReadToDisk": { + "format": "int64", "type": "string" }, - "uuid": { - "description": "Output only. A session UUID (Unique Universal Identifier). The service generates this value when it creates the session.", - "readOnly": true, + "remoteReqsDuration": { + "format": "int64", "type": "string" + }, + "stageShufflePushReadMetrics": { + "$ref": "StageShufflePushReadMetrics" } }, "type": "object" }, - "SessionOperationMetadata": { - "description": "Metadata describing the Session operation.", - "id": "SessionOperationMetadata", + "StageShuffleWriteMetrics": { + "description": "Shuffle data written for the stage.", + "id": "StageShuffleWriteMetrics", "properties": { - "createTime": { - "description": "The time when the operation was created.", - "format": "google-datetime", + "bytesWritten": { + "format": "int64", "type": "string" }, - "description": { - "description": "Short description of the operation.", + "recordsWritten": { + "format": "int64", "type": "string" }, - "doneTime": { - "description": "The time when the operation was finished.", - "format": "google-datetime", + "writeTimeNanos": { + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "StagesSummary": { + "description": "Data related to Stages page summary", + "id": "StagesSummary", + "properties": { + "applicationId": { "type": "string" }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels associated with the operation.", - "type": "object" + "numActiveStages": { + "format": "int32", + "type": "integer" }, - "operationType": { - "description": "The operation type.", - "enum": [ - "SESSION_OPERATION_TYPE_UNSPECIFIED", - "CREATE", - "TERMINATE", - "DELETE" - ], - "enumDescriptions": [ - "Session operation type is unknown.", - "Create Session operation type.", - "Terminate Session operation type.", - "Delete Session operation type." - ], - "type": "string" + "numCompletedStages": { + "format": "int32", + "type": "integer" }, - "session": { - "description": "Name of the session for the operation.", - "type": "string" + "numFailedStages": { + "format": "int32", + "type": "integer" }, - "sessionUuid": { - "description": "Session UUID for the operation.", + "numPendingStages": { + "format": "int32", + "type": "integer" + }, + "numSkippedStages": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "StartClusterRequest": { + "description": "A request to start a cluster.", + "id": "StartClusterRequest", + "properties": { + "clusterUuid": { + "description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", "type": "string" }, - "warnings": { - "description": "Warnings encountered during operation execution.", - "items": { - "type": "string" - }, - "type": "array" + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the server receives two StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "type": "string" } }, "type": "object" }, - "SessionStateHistory": { + "StartupConfig": { + "description": "Configuration to handle the startup of instances during cluster create and update process.", + "id": "StartupConfig", + "properties": { + "requiredRegistrationFraction": { + "description": "Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "StateHistory": { "description": "Historical state information.", - "id": "SessionStateHistory", + "id": "StateHistory", "properties": { "state": { - "description": "Output only. The state of the session at this point in the session history.", + "description": "Output only. The state of the batch at this point in history.", "enum": [ "STATE_UNSPECIFIED", - "CREATING", - "ACTIVE", - "TERMINATING", - "TERMINATED", + "PENDING", + "RUNNING", + "CANCELLING", + "CANCELLED", + "SUCCEEDED", "FAILED" ], "enumDescriptions": [ - "The session state is unknown.", - "The session is created prior to running.", - "The session is running.", - "The session is terminating.", - "The session is terminated successfully.", - "The session is no longer running due to an error." + "The batch state is unknown.", + "The batch is created before running.", + "The batch is running.", + "The batch is cancelling.", + "The batch cancellation was successful.", + "The batch completed successfully.", + "The batch is no longer running due to an error." ], "readOnly": true, "type": "string" }, "stateMessage": { - "description": "Output only. Details about the state at this point in the session history.", + "description": "Output only. Details about the state at this point in history.", "readOnly": true, "type": "string" }, "stateStartTime": { - "description": "Output only. The time when the session entered the historical state.", + "description": "Output only. The time when the batch entered the historical state.", "format": "google-datetime", "readOnly": true, "type": "string" @@ -6515,555 +11652,569 @@ }, "type": "object" }, - "SessionTemplate": { - "description": "A representation of a session template.", - "id": "SessionTemplate", + "StateOperatorProgress": { + "id": "StateOperatorProgress", "properties": { - "createTime": { - "description": "Output only. The time when the template was created.", - "format": "google-datetime", - "readOnly": true, + "allRemovalsTimeMs": { + "format": "int64", "type": "string" }, - "creator": { - "description": "Output only. The email address of the user who created the template.", - "readOnly": true, + "allUpdatesTimeMs": { + "format": "int64", "type": "string" }, - "description": { - "description": "Optional. Brief description of the template.", + "commitTimeMs": { + "format": "int64", "type": "string" }, - "environmentConfig": { - "$ref": "EnvironmentConfig", - "description": "Optional. Environment configuration for session execution." - }, - "jupyterSession": { - "$ref": "JupyterConfig", - "description": "Optional. Jupyter session config." - }, - "labels": { + "customMetrics": { "additionalProperties": { + "format": "int64", "type": "string" }, - "description": "Optional. Labels to associate with sessions created using this template. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.", "type": "object" }, - "name": { - "description": "Required. The resource name of the session template.", + "memoryUsedBytes": { + "format": "int64", "type": "string" }, - "runtimeConfig": { - "$ref": "RuntimeConfig", - "description": "Optional. Runtime configuration for session execution." + "numRowsDroppedByWatermark": { + "format": "int64", + "type": "string" }, - "updateTime": { - "description": "Output only. The time the template was last updated.", - "format": "google-datetime", - "readOnly": true, + "numRowsRemoved": { + "format": "int64", "type": "string" }, - "uuid": { - "description": "Output only. A session template UUID (Unique Universal Identifier). The service generates this value when it creates the session template.", - "readOnly": true, + "numRowsTotal": { + "format": "int64", "type": "string" - } - }, - "type": "object" - }, - "SetIamPolicyRequest": { - "description": "Request message for SetIamPolicy method.", - "id": "SetIamPolicyRequest", - "properties": { - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the resource. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them." - } - }, - "type": "object" - }, - "ShieldedInstanceConfig": { - "description": "Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).", - "id": "ShieldedInstanceConfig", - "properties": { - "enableIntegrityMonitoring": { - "description": "Optional. Defines whether instances have integrity monitoring enabled.", - "type": "boolean" }, - "enableSecureBoot": { - "description": "Optional. Defines whether instances have Secure Boot enabled.", - "type": "boolean" + "numRowsUpdated": { + "format": "int64", + "type": "string" }, - "enableVtpm": { - "description": "Optional. Defines whether instances have the vTPM enabled.", - "type": "boolean" + "numShufflePartitions": { + "format": "int64", + "type": "string" + }, + "numStateStoreInstances": { + "format": "int64", + "type": "string" + }, + "operatorName": { + "type": "string" } }, "type": "object" }, - "SoftwareConfig": { - "description": "Specifies the selection and config of software inside the cluster.", - "id": "SoftwareConfig", + "Status": { + "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors).", + "id": "Status", "properties": { - "imageVersion": { - "description": "Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the \"preview\" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", - "type": "string" + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" }, - "optionalComponents": { - "description": "Optional. The set of components to activate on the cluster.", - "items": { - "enum": [ - "COMPONENT_UNSPECIFIED", - "ANACONDA", - "DOCKER", - "DRUID", - "FLINK", - "HBASE", - "HIVE_WEBHCAT", - "HUDI", - "JUPYTER", - "PRESTO", - "TRINO", - "RANGER", - "SOLR", - "ZEPPELIN", - "ZOOKEEPER" - ], - "enumDescriptions": [ - "Unspecified component. Specifying this will cause Cluster creation to fail.", - "The Anaconda python distribution. The Anaconda component is not supported in the Dataproc 2.0 image. The 2.0 image is pre-installed with Miniconda.", - "Docker", - "The Druid query engine. (alpha)", - "Flink", - "HBase. (beta)", - "The Hive Web HCatalog (the REST service for accessing HCatalog).", - "Hudi.", - "The Jupyter Notebook.", - "The Presto query engine.", - "The Trino query engine.", - "The Ranger service.", - "The Solr service.", - "The Zeppelin notebook.", - "The Zookeeper service." - ], - "type": "string" + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" }, "type": "array" }, - "properties": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", - "type": "object" + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" } }, "type": "object" }, - "SparkBatch": { - "description": "A configuration for running an Apache Spark (https://spark.apache.org/) batch workload.", - "id": "SparkBatch", + "StopClusterRequest": { + "description": "A request to stop a cluster.", + "id": "StopClusterRequest", "properties": { - "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - }, - "type": "array" + "clusterUuid": { + "description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", + "type": "string" }, - "args": { - "description": "Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", - "items": { - "type": "string" - }, - "type": "array" + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "type": "string" + } + }, + "type": "object" + }, + "StreamBlockData": { + "description": "Stream Block Data.", + "id": "StreamBlockData", + "properties": { + "deserialized": { + "type": "boolean" }, - "fileUris": { - "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", - "items": { - "type": "string" - }, - "type": "array" + "diskSize": { + "format": "int64", + "type": "string" }, - "jarFileUris": { - "description": "Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.", - "items": { - "type": "string" - }, - "type": "array" + "executorId": { + "type": "string" }, - "mainClass": { - "description": "Optional. The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris.", + "hostPort": { "type": "string" }, - "mainJarFileUri": { - "description": "Optional. The HCFS URI of the jar file that contains the main class.", + "memSize": { + "format": "int64", + "type": "string" + }, + "name": { "type": "string" + }, + "storageLevel": { + "type": "string" + }, + "useDisk": { + "type": "boolean" + }, + "useMemory": { + "type": "boolean" } }, "type": "object" }, - "SparkHistoryServerConfig": { - "description": "Spark History Server configuration for the workload.", - "id": "SparkHistoryServerConfig", + "StreamingQueryData": { + "description": "Streaming", + "id": "StreamingQueryData", "properties": { - "dataprocCluster": { - "description": "Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name]", + "endTimestamp": { + "format": "int64", + "type": "string" + }, + "exception": { + "type": "string" + }, + "isActive": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "runId": { + "type": "string" + }, + "startTimestamp": { + "format": "int64", + "type": "string" + }, + "streamingQueryId": { "type": "string" } }, "type": "object" }, - "SparkJob": { - "description": "A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN.", - "id": "SparkJob", + "StreamingQueryProgress": { + "id": "StreamingQueryProgress", "properties": { - "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - }, - "type": "array" + "batchDuration": { + "format": "int64", + "type": "string" }, - "args": { - "description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - }, - "type": "array" + "batchId": { + "format": "int64", + "type": "string" }, - "fileUris": { - "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", - "items": { + "durationMillis": { + "additionalProperties": { + "format": "int64", "type": "string" }, - "type": "array" + "type": "object" }, - "jarFileUris": { - "description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", - "items": { + "eventTime": { + "additionalProperties": { "type": "string" }, - "type": "array" - }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "Optional. The runtime log config for job execution." - }, - "mainClass": { - "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris.", - "type": "string" + "type": "object" }, - "mainJarFileUri": { - "description": "The HCFS URI of the jar file that contains the main class.", + "name": { "type": "string" }, - "properties": { + "observedMetrics": { "additionalProperties": { "type": "string" }, - "description": "Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", "type": "object" - } - }, - "type": "object" - }, - "SparkRBatch": { - "description": "A configuration for running an Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) batch workload.", - "id": "SparkRBatch", - "properties": { - "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - }, - "type": "array" }, - "args": { - "description": "Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", + "runId": { + "type": "string" + }, + "sink": { + "$ref": "SinkProgress" + }, + "sources": { "items": { - "type": "string" + "$ref": "SourceProgress" }, "type": "array" }, - "fileUris": { - "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", + "stateOperators": { "items": { - "type": "string" + "$ref": "StateOperatorProgress" }, "type": "array" }, - "mainRFileUri": { - "description": "Required. The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.", + "streamingQueryProgressId": { + "type": "string" + }, + "timestamp": { "type": "string" } }, "type": "object" }, - "SparkRJob": { - "description": "A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.", - "id": "SparkRJob", + "SubmitJobRequest": { + "description": "A request to submit a job.", + "id": "SubmitJobRequest", "properties": { - "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - }, - "type": "array" - }, - "args": { - "description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - }, - "type": "array" - }, - "fileUris": { - "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", - "items": { - "type": "string" - }, - "type": "array" + "job": { + "$ref": "Job", + "description": "Required. The job resource." }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "Optional. The runtime log config for job execution." + "requestId": { + "description": "Optional. A unique id used to identify the request. If the server receives two SubmitJobRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s with the same id, then the second request will be ignored and the first Job created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "type": "string" + } + }, + "type": "object" + }, + "SummarizeSessionSparkApplicationExecutorsResponse": { + "description": "Consolidated summary of executors for a Spark Application.", + "id": "SummarizeSessionSparkApplicationExecutorsResponse", + "properties": { + "activeExecutorSummary": { + "$ref": "ConsolidatedExecutorSummary", + "description": "Consolidated summary for active executors." }, - "mainRFileUri": { - "description": "Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.", + "applicationId": { + "description": "Spark Application Id", "type": "string" }, - "properties": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - "type": "object" + "deadExecutorSummary": { + "$ref": "ConsolidatedExecutorSummary", + "description": "Consolidated summary for dead executors." + }, + "totalExecutorSummary": { + "$ref": "ConsolidatedExecutorSummary", + "description": "Overall consolidated summary for all executors." } }, "type": "object" }, - "SparkSqlBatch": { - "description": "A configuration for running Apache Spark SQL (https://spark.apache.org/sql/) queries as a batch workload.", - "id": "SparkSqlBatch", + "SummarizeSessionSparkApplicationJobsResponse": { + "description": "Summary of a Spark Application jobs.", + "id": "SummarizeSessionSparkApplicationJobsResponse", "properties": { - "jarFileUris": { - "description": "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", - "items": { - "type": "string" - }, - "type": "array" + "jobsSummary": { + "$ref": "JobsSummary", + "description": "Summary of a Spark Application Jobs" + } + }, + "type": "object" + }, + "SummarizeSessionSparkApplicationStageAttemptTasksResponse": { + "description": "Summary of tasks for a Spark Application stage attempt.", + "id": "SummarizeSessionSparkApplicationStageAttemptTasksResponse", + "properties": { + "stageAttemptTasksSummary": { + "$ref": "StageAttemptTasksSummary", + "description": "Summary of tasks for a Spark Application Stage Attempt" + } + }, + "type": "object" + }, + "SummarizeSessionSparkApplicationStagesResponse": { + "description": "Summary of a Spark Application stages.", + "id": "SummarizeSessionSparkApplicationStagesResponse", + "properties": { + "stagesSummary": { + "$ref": "StagesSummary", + "description": "Summary of a Spark Application Stages" + } + }, + "type": "object" + }, + "SummarizeSparkApplicationExecutorsResponse": { + "description": "Consolidated summary of executors for a Spark Application.", + "id": "SummarizeSparkApplicationExecutorsResponse", + "properties": { + "activeExecutorSummary": { + "$ref": "ConsolidatedExecutorSummary", + "description": "Consolidated summary for active executors." }, - "queryFileUri": { - "description": "Required. The HCFS URI of the script that contains Spark SQL queries to execute.", + "applicationId": { + "description": "Spark Application Id", "type": "string" }, - "queryVariables": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", - "type": "object" + "deadExecutorSummary": { + "$ref": "ConsolidatedExecutorSummary", + "description": "Consolidated summary for dead executors." + }, + "totalExecutorSummary": { + "$ref": "ConsolidatedExecutorSummary", + "description": "Overall consolidated summary for all executors." + } + }, + "type": "object" + }, + "SummarizeSparkApplicationJobsResponse": { + "description": "Summary of a Spark Application jobs.", + "id": "SummarizeSparkApplicationJobsResponse", + "properties": { + "jobsSummary": { + "$ref": "JobsSummary", + "description": "Summary of a Spark Application Jobs" + } + }, + "type": "object" + }, + "SummarizeSparkApplicationStageAttemptTasksResponse": { + "description": "Summary of tasks for a Spark Application stage attempt.", + "id": "SummarizeSparkApplicationStageAttemptTasksResponse", + "properties": { + "stageAttemptTasksSummary": { + "$ref": "StageAttemptTasksSummary", + "description": "Summary of tasks for a Spark Application Stage Attempt" } }, "type": "object" }, - "SparkSqlJob": { - "description": "A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries.", - "id": "SparkSqlJob", + "SummarizeSparkApplicationStagesResponse": { + "description": "Summary of a Spark Application stages.", + "id": "SummarizeSparkApplicationStagesResponse", "properties": { - "jarFileUris": { - "description": "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", + "stagesSummary": { + "$ref": "StagesSummary", + "description": "Summary of a Spark Application Stages" + } + }, + "type": "object" + }, + "TaskData": { + "description": "Data corresponding to tasks created by spark.", + "id": "TaskData", + "properties": { + "accumulatorUpdates": { "items": { - "type": "string" + "$ref": "AccumulableInfo" }, "type": "array" }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "Optional. The runtime log config for job execution." + "attempt": { + "format": "int32", + "type": "integer" }, - "properties": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten.", - "type": "object" + "durationMillis": { + "format": "int64", + "type": "string" }, - "queryFileUri": { - "description": "The HCFS URI of the script that contains SQL queries.", + "errorMessage": { "type": "string" }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." + "executorId": { + "type": "string" }, - "scriptVariables": { + "executorLogs": { "additionalProperties": { "type": "string" }, - "description": "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", "type": "object" - } - }, - "type": "object" - }, - "SparkStandaloneAutoscalingConfig": { - "description": "Basic autoscaling configurations for Spark Standalone.", - "id": "SparkStandaloneAutoscalingConfig", - "properties": { - "gracefulDecommissionTimeout": { - "description": "Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.", - "format": "google-duration", + }, + "gettingResultTimeMillis": { + "format": "int64", "type": "string" }, - "removeOnlyIdleWorkers": { - "description": "Optional. Remove only idle workers when scaling down cluster", + "hasMetrics": { "type": "boolean" }, - "scaleDownFactor": { - "description": "Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.", - "format": "double", - "type": "number" + "host": { + "type": "string" }, - "scaleDownMinWorkerFraction": { - "description": "Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", - "format": "double", - "type": "number" + "index": { + "format": "int32", + "type": "integer" }, - "scaleUpFactor": { - "description": "Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.", - "format": "double", - "type": "number" + "launchTime": { + "format": "google-datetime", + "type": "string" }, - "scaleUpMinWorkerFraction": { - "description": "Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", - "format": "double", - "type": "number" - } - }, - "type": "object" - }, - "StartClusterRequest": { - "description": "A request to start a cluster.", - "id": "StartClusterRequest", - "properties": { - "clusterUuid": { - "description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", + "partitionId": { + "format": "int32", + "type": "integer" + }, + "resultFetchStart": { + "format": "google-datetime", "type": "string" }, - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the server receives two StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "schedulerDelayMillis": { + "format": "int64", "type": "string" - } - }, - "type": "object" - }, - "StartupConfig": { - "description": "Configuration to handle the startup of instances during cluster create and update process.", - "id": "StartupConfig", - "properties": { - "requiredRegistrationFraction": { - "description": "Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).", - "format": "double", - "type": "number" - } - }, - "type": "object" - }, - "StateHistory": { - "description": "Historical state information.", - "id": "StateHistory", - "properties": { - "state": { - "description": "Output only. The state of the batch at this point in history.", - "enum": [ - "STATE_UNSPECIFIED", - "PENDING", - "RUNNING", - "CANCELLING", - "CANCELLED", - "SUCCEEDED", - "FAILED" - ], - "enumDescriptions": [ - "The batch state is unknown.", - "The batch is created before running.", - "The batch is running.", - "The batch is cancelling.", - "The batch cancellation was successful.", - "The batch completed successfully.", - "The batch is no longer running due to an error." - ], - "readOnly": true, + }, + "speculative": { + "type": "boolean" + }, + "stageAttemptId": { + "format": "int32", + "type": "integer" + }, + "stageId": { + "format": "int64", "type": "string" }, - "stateMessage": { - "description": "Output only. Details about the state at this point in history.", - "readOnly": true, + "status": { "type": "string" }, - "stateStartTime": { - "description": "Output only. The time when the batch entered the historical state.", - "format": "google-datetime", - "readOnly": true, + "taskId": { + "format": "int64", "type": "string" + }, + "taskLocality": { + "type": "string" + }, + "taskMetrics": { + "$ref": "TaskMetrics" } }, "type": "object" }, - "Status": { - "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors).", - "id": "Status", + "TaskMetrics": { + "description": "Executor Task Metrics", + "id": "TaskMetrics", "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32", - "type": "integer" + "diskBytesSpilled": { + "format": "int64", + "type": "string" }, - "details": { - "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", - "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "type": "array" + "executorCpuTimeNanos": { + "format": "int64", + "type": "string" }, - "message": { - "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "executorDeserializeCpuTimeNanos": { + "format": "int64", + "type": "string" + }, + "executorDeserializeTimeMillis": { + "format": "int64", + "type": "string" + }, + "executorRunTimeMillis": { + "format": "int64", + "type": "string" + }, + "inputMetrics": { + "$ref": "InputMetrics" + }, + "jvmGcTimeMillis": { + "format": "int64", + "type": "string" + }, + "memoryBytesSpilled": { + "format": "int64", + "type": "string" + }, + "outputMetrics": { + "$ref": "OutputMetrics" + }, + "peakExecutionMemoryBytes": { + "format": "int64", + "type": "string" + }, + "resultSerializationTimeMillis": { + "format": "int64", + "type": "string" + }, + "resultSize": { + "format": "int64", "type": "string" + }, + "shuffleReadMetrics": { + "$ref": "ShuffleReadMetrics" + }, + "shuffleWriteMetrics": { + "$ref": "ShuffleWriteMetrics" } }, "type": "object" }, - "StopClusterRequest": { - "description": "A request to stop a cluster.", - "id": "StopClusterRequest", + "TaskQuantileMetrics": { + "id": "TaskQuantileMetrics", "properties": { - "clusterUuid": { - "description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", - "type": "string" + "diskBytesSpilled": { + "$ref": "Quantiles" }, - "requestId": { - "description": "Optional. A unique ID used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", - "type": "string" + "durationMillis": { + "$ref": "Quantiles" + }, + "executorCpuTimeNanos": { + "$ref": "Quantiles" + }, + "executorDeserializeCpuTimeNanos": { + "$ref": "Quantiles" + }, + "executorDeserializeTimeMillis": { + "$ref": "Quantiles" + }, + "executorRunTimeMillis": { + "$ref": "Quantiles" + }, + "gettingResultTimeMillis": { + "$ref": "Quantiles" + }, + "inputMetrics": { + "$ref": "InputQuantileMetrics" + }, + "jvmGcTimeMillis": { + "$ref": "Quantiles" + }, + "memoryBytesSpilled": { + "$ref": "Quantiles" + }, + "outputMetrics": { + "$ref": "OutputQuantileMetrics" + }, + "peakExecutionMemoryBytes": { + "$ref": "Quantiles" + }, + "resultSerializationTimeMillis": { + "$ref": "Quantiles" + }, + "resultSize": { + "$ref": "Quantiles" + }, + "schedulerDelayMillis": { + "$ref": "Quantiles" + }, + "shuffleReadMetrics": { + "$ref": "ShuffleReadQuantileMetrics" + }, + "shuffleWriteMetrics": { + "$ref": "ShuffleWriteQuantileMetrics" } }, "type": "object" }, - "SubmitJobRequest": { - "description": "A request to submit a job.", - "id": "SubmitJobRequest", + "TaskResourceRequest": { + "description": "Resources used per task created by the application.", + "id": "TaskResourceRequest", "properties": { - "job": { - "$ref": "Job", - "description": "Required. The job resource." + "amount": { + "format": "double", + "type": "number" }, - "requestId": { - "description": "Optional. A unique id used to identify the request. If the server receives two SubmitJobRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s with the same id, then the second request will be ignored and the first Job created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "resourceName": { "type": "string" } }, @@ -7519,6 +12670,53 @@ }, "type": "object" }, + "WriteSessionSparkApplicationContextRequest": { + "description": "Write Spark Application data to internal storage systems", + "id": "WriteSessionSparkApplicationContextRequest", + "properties": { + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "type": "string" + }, + "sparkWrapperObjects": { + "description": "Required. The batch of spark application context objects sent for ingestion.", + "items": { + "$ref": "SparkWrapperObject" + }, + "type": "array" + } + }, + "type": "object" + }, + "WriteSessionSparkApplicationContextResponse": { + "description": "Response returned as an acknowledgement of receipt of data.", + "id": "WriteSessionSparkApplicationContextResponse", + "properties": {}, + "type": "object" + }, + "WriteSparkApplicationContextRequest": { + "description": "Write Spark Application data to internal storage systems", + "id": "WriteSparkApplicationContextRequest", + "properties": { + "parent": { + "description": "Required. Parent (Batch) resource reference.", + "type": "string" + }, + "sparkWrapperObjects": { + "items": { + "$ref": "SparkWrapperObject" + }, + "type": "array" + } + }, + "type": "object" + }, + "WriteSparkApplicationContextResponse": { + "description": "Response returned as an acknowledgement of receipt of data.", + "id": "WriteSparkApplicationContextResponse", + "properties": {}, + "type": "object" + }, "YarnApplication": { "description": "A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", "id": "YarnApplication", diff --git a/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go b/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go index bb21c2d084b..4df61dc2b2e 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go @@ -209,11 +209,23 @@ type ProjectsLocationsAutoscalingPoliciesService struct { func NewProjectsLocationsBatchesService(s *Service) *ProjectsLocationsBatchesService { rs := &ProjectsLocationsBatchesService{s: s} + rs.SparkApplications = NewProjectsLocationsBatchesSparkApplicationsService(s) return rs } type ProjectsLocationsBatchesService struct { s *Service + + SparkApplications *ProjectsLocationsBatchesSparkApplicationsService +} + +func NewProjectsLocationsBatchesSparkApplicationsService(s *Service) *ProjectsLocationsBatchesSparkApplicationsService { + rs := &ProjectsLocationsBatchesSparkApplicationsService{s: s} + return rs +} + +type ProjectsLocationsBatchesSparkApplicationsService struct { + s *Service } func NewProjectsLocationsOperationsService(s *Service) *ProjectsLocationsOperationsService { @@ -236,11 +248,23 @@ type ProjectsLocationsSessionTemplatesService struct { func NewProjectsLocationsSessionsService(s *Service) *ProjectsLocationsSessionsService { rs := &ProjectsLocationsSessionsService{s: s} + rs.SparkApplications = NewProjectsLocationsSessionsSparkApplicationsService(s) return rs } type ProjectsLocationsSessionsService struct { s *Service + + SparkApplications *ProjectsLocationsSessionsSparkApplicationsService +} + +func NewProjectsLocationsSessionsSparkApplicationsService(s *Service) *ProjectsLocationsSessionsSparkApplicationsService { + rs := &ProjectsLocationsSessionsSparkApplicationsService{s: s} + return rs +} + +type ProjectsLocationsSessionsSparkApplicationsService struct { + s *Service } func NewProjectsLocationsWorkflowTemplatesService(s *Service) *ProjectsLocationsWorkflowTemplatesService { @@ -365,5702 +389,14140 @@ type AcceleratorConfig struct { NullFields []string `json:"-"` } -func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { +func (s AcceleratorConfig) MarshalJSON() ([]byte, error) { type NoMethod AcceleratorConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// AnalyzeBatchRequest: A request to analyze a batch workload. -type AnalyzeBatchRequest struct { - // RequestId: Optional. A unique ID used to identify the request. If the - // service receives two AnalyzeBatchRequest - // (http://cloud/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.AnalyzeBatchRequest)s - // with the same request_id, the second request is ignored and the Operation - // that corresponds to the first request created and stored in the backend is - // returned.Recommendation: Set this value to a UUID - // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must - // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens - // (-). The maximum length is 40 characters. - RequestId string `json:"requestId,omitempty"` - // ForceSendFields is a list of field names (e.g. "RequestId") to - // unconditionally include in API requests. By default, fields with empty or +// AccessSessionSparkApplicationEnvironmentInfoResponse: Environment details of +// a Saprk Application. +type AccessSessionSparkApplicationEnvironmentInfoResponse struct { + // ApplicationEnvironmentInfo: Details about the Environment that the + // application is running in. + ApplicationEnvironmentInfo *ApplicationEnvironmentInfo `json:"applicationEnvironmentInfo,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "ApplicationEnvironmentInfo") + // to unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "RequestId") to include in API + // NullFields is a list of field names (e.g. "ApplicationEnvironmentInfo") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AccessSessionSparkApplicationEnvironmentInfoResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSessionSparkApplicationEnvironmentInfoResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// AccessSessionSparkApplicationJobResponse: Details of a particular job +// associated with Spark Application +type AccessSessionSparkApplicationJobResponse struct { + // JobData: Output only. Data corresponding to a spark job. + JobData *JobData `json:"jobData,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "JobData") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "JobData") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *AnalyzeBatchRequest) MarshalJSON() ([]byte, error) { - type NoMethod AnalyzeBatchRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AccessSessionSparkApplicationJobResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSessionSparkApplicationJobResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// AnalyzeOperationMetadata: Metadata describing the Analyze operation. -type AnalyzeOperationMetadata struct { - // AnalyzedWorkloadName: Output only. name of the workload being analyzed. - AnalyzedWorkloadName string `json:"analyzedWorkloadName,omitempty"` - // AnalyzedWorkloadType: Output only. Type of the workload being analyzed. - // - // Possible values: - // "WORKLOAD_TYPE_UNSPECIFIED" - Undefined option - // "BATCH" - Serverless batch job - AnalyzedWorkloadType string `json:"analyzedWorkloadType,omitempty"` - // AnalyzedWorkloadUuid: Output only. unique identifier of the workload - // typically generated by control plane. E.g. batch uuid. - AnalyzedWorkloadUuid string `json:"analyzedWorkloadUuid,omitempty"` - // CreateTime: Output only. The time when the operation was created. - CreateTime string `json:"createTime,omitempty"` - // Description: Output only. Short description of the operation. - Description string `json:"description,omitempty"` - // DoneTime: Output only. The time when the operation finished. - DoneTime string `json:"doneTime,omitempty"` - // Labels: Output only. Labels associated with the operation. - Labels map[string]string `json:"labels,omitempty"` - // Warnings: Output only. Warnings encountered during operation execution. - Warnings []string `json:"warnings,omitempty"` - // ForceSendFields is a list of field names (e.g. "AnalyzedWorkloadName") to +// AccessSessionSparkApplicationResponse: A summary of Spark Application +type AccessSessionSparkApplicationResponse struct { + // Application: Output only. High level information corresponding to an + // application. + Application *ApplicationInfo `json:"application,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Application") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AnalyzedWorkloadName") to include - // in API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. See + // NullFields is a list of field names (e.g. "Application") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *AnalyzeOperationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod AnalyzeOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AccessSessionSparkApplicationResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSessionSparkApplicationResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// AutoscalingConfig: Autoscaling Policy config associated with the cluster. -type AutoscalingConfig struct { - // PolicyUri: Optional. The autoscaling policy used by the cluster.Only - // resource names including projectid and location (region) are valid. - // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] - // projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy - // _id]Note that the policy must be in the same project and Dataproc region. - PolicyUri string `json:"policyUri,omitempty"` - // ForceSendFields is a list of field names (e.g. "PolicyUri") to +// AccessSessionSparkApplicationSqlQueryResponse: Details of a query for a +// Spark Application +type AccessSessionSparkApplicationSqlQueryResponse struct { + // ExecutionData: SQL Execution Data + ExecutionData *SqlExecutionUiData `json:"executionData,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "ExecutionData") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "PolicyUri") to include in API + // NullFields is a list of field names (e.g. "ExecutionData") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *AutoscalingConfig) MarshalJSON() ([]byte, error) { - type NoMethod AutoscalingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AccessSessionSparkApplicationSqlQueryResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSessionSparkApplicationSqlQueryResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster -// autoscaler. -type AutoscalingPolicy struct { - BasicAlgorithm *BasicAutoscalingAlgorithm `json:"basicAlgorithm,omitempty"` - // Id: Required. The policy id.The id must contain only letters (a-z, A-Z), - // numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with - // underscore or hyphen. Must consist of between 3 and 50 characters. - Id string `json:"id,omitempty"` - // Labels: Optional. The labels to associate with this autoscaling policy. - // Label keys must contain 1 to 63 characters, and must conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if - // present, must contain 1 to 63 characters, and must conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be - // associated with an autoscaling policy. - Labels map[string]string `json:"labels,omitempty"` - // Name: Output only. The "resource name" of the autoscaling policy, as - // described in https://cloud.google.com/apis/design/resource_names. For - // projects.regions.autoscalingPolicies, the resource name of the policy has - // the following format: - // projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For - // projects.locations.autoscalingPolicies, the resource name of the policy has - // the following format: - // projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id} - Name string `json:"name,omitempty"` - // SecondaryWorkerConfig: Optional. Describes how the autoscaler will operate - // for secondary workers. - SecondaryWorkerConfig *InstanceGroupAutoscalingPolicyConfig `json:"secondaryWorkerConfig,omitempty"` - // WorkerConfig: Required. Describes how the autoscaler will operate for - // primary workers. - WorkerConfig *InstanceGroupAutoscalingPolicyConfig `json:"workerConfig,omitempty"` +// AccessSessionSparkApplicationSqlSparkPlanGraphResponse: SparkPlanGraph for a +// Spark Application execution limited to maximum 10000 clusters. +type AccessSessionSparkApplicationSqlSparkPlanGraphResponse struct { + // SparkPlanGraph: SparkPlanGraph for a Spark Application execution. + SparkPlanGraph *SparkPlanGraph `json:"sparkPlanGraph,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "BasicAlgorithm") to + // ForceSendFields is a list of field names (e.g. "SparkPlanGraph") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BasicAlgorithm") to include in + // NullFields is a list of field names (e.g. "SparkPlanGraph") to include in // API requests with the JSON null value. By default, fields with empty values // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { - type NoMethod AutoscalingPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AccessSessionSparkApplicationSqlSparkPlanGraphResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSessionSparkApplicationSqlSparkPlanGraphResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// AutotuningConfig: Autotuning configuration of the workload. -type AutotuningConfig struct { - // Scenarios: Optional. Scenarios for which tunings are applied. - // - // Possible values: - // "SCENARIO_UNSPECIFIED" - Default value. - // "SCALING" - Scaling recommendations such as initialExecutors. - // "BROADCAST_HASH_JOIN" - Adding hints for potential relation broadcasts. - // "MEMORY" - Memory management for workloads. - Scenarios []string `json:"scenarios,omitempty"` - // ForceSendFields is a list of field names (e.g. "Scenarios") to +// AccessSessionSparkApplicationStageAttemptResponse: Stage Attempt for a Stage +// of a Spark Application +type AccessSessionSparkApplicationStageAttemptResponse struct { + // StageData: Output only. Data corresponding to a stage. + StageData *StageData `json:"stageData,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "StageData") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Scenarios") to include in API + // NullFields is a list of field names (e.g. "StageData") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *AutotuningConfig) MarshalJSON() ([]byte, error) { - type NoMethod AutotuningConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AccessSessionSparkApplicationStageAttemptResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSessionSparkApplicationStageAttemptResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// AuxiliaryNodeGroup: Node group identification and configuration information. -type AuxiliaryNodeGroup struct { - // NodeGroup: Required. Node group configuration. - NodeGroup *NodeGroup `json:"nodeGroup,omitempty"` - // NodeGroupId: Optional. A node group ID. Generated if not specified.The ID - // must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and - // hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of - // from 3 to 33 characters. - NodeGroupId string `json:"nodeGroupId,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodeGroup") to +// AccessSessionSparkApplicationStageRddOperationGraphResponse: RDD operation +// graph for a Spark Application Stage limited to maximum 10000 clusters. +type AccessSessionSparkApplicationStageRddOperationGraphResponse struct { + // RddOperationGraph: RDD operation graph for a Spark Application Stage. + RddOperationGraph *RddOperationGraph `json:"rddOperationGraph,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "RddOperationGraph") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodeGroup") to include in API + // NullFields is a list of field names (e.g. "RddOperationGraph") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AccessSessionSparkApplicationStageRddOperationGraphResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSessionSparkApplicationStageRddOperationGraphResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// AccessSparkApplicationEnvironmentInfoResponse: Environment details of a +// Saprk Application. +type AccessSparkApplicationEnvironmentInfoResponse struct { + // ApplicationEnvironmentInfo: Details about the Environment that the + // application is running in. + ApplicationEnvironmentInfo *ApplicationEnvironmentInfo `json:"applicationEnvironmentInfo,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "ApplicationEnvironmentInfo") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ApplicationEnvironmentInfo") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AccessSparkApplicationEnvironmentInfoResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSparkApplicationEnvironmentInfoResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// AccessSparkApplicationJobResponse: Details of a particular job associated +// with Spark Application +type AccessSparkApplicationJobResponse struct { + // JobData: Output only. Data corresponding to a spark job. + JobData *JobData `json:"jobData,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "JobData") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "JobData") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *AuxiliaryNodeGroup) MarshalJSON() ([]byte, error) { - type NoMethod AuxiliaryNodeGroup - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AccessSparkApplicationJobResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSparkApplicationJobResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// AuxiliaryServicesConfig: Auxiliary services configuration for a Cluster. -type AuxiliaryServicesConfig struct { - // MetastoreConfig: Optional. The Hive Metastore configuration for this - // workload. - MetastoreConfig *MetastoreConfig `json:"metastoreConfig,omitempty"` - // SparkHistoryServerConfig: Optional. The Spark History Server configuration - // for the workload. - SparkHistoryServerConfig *SparkHistoryServerConfig `json:"sparkHistoryServerConfig,omitempty"` - // ForceSendFields is a list of field names (e.g. "MetastoreConfig") to +// AccessSparkApplicationResponse: A summary of Spark Application +type AccessSparkApplicationResponse struct { + // Application: Output only. High level information corresponding to an + // application. + Application *ApplicationInfo `json:"application,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Application") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MetastoreConfig") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "Application") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *AuxiliaryServicesConfig) MarshalJSON() ([]byte, error) { - type NoMethod AuxiliaryServicesConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AccessSparkApplicationResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSparkApplicationResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BasicAutoscalingAlgorithm: Basic algorithm for autoscaling. -type BasicAutoscalingAlgorithm struct { - // CooldownPeriod: Optional. Duration between scaling events. A scaling period - // starts after the update operation from the previous event has - // completed.Bounds: 2m, 1d. Default: 2m. - CooldownPeriod string `json:"cooldownPeriod,omitempty"` - // SparkStandaloneConfig: Optional. Spark Standalone autoscaling configuration - SparkStandaloneConfig *SparkStandaloneAutoscalingConfig `json:"sparkStandaloneConfig,omitempty"` - // YarnConfig: Optional. YARN autoscaling configuration. - YarnConfig *BasicYarnAutoscalingConfig `json:"yarnConfig,omitempty"` - // ForceSendFields is a list of field names (e.g. "CooldownPeriod") to +// AccessSparkApplicationSqlQueryResponse: Details of a query for a Spark +// Application +type AccessSparkApplicationSqlQueryResponse struct { + // ExecutionData: SQL Execution Data + ExecutionData *SqlExecutionUiData `json:"executionData,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "ExecutionData") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CooldownPeriod") to include in + // NullFields is a list of field names (e.g. "ExecutionData") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AccessSparkApplicationSqlQueryResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSparkApplicationSqlQueryResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// AccessSparkApplicationSqlSparkPlanGraphResponse: SparkPlanGraph for a Spark +// Application execution limited to maximum 10000 clusters. +type AccessSparkApplicationSqlSparkPlanGraphResponse struct { + // SparkPlanGraph: SparkPlanGraph for a Spark Application execution. + SparkPlanGraph *SparkPlanGraph `json:"sparkPlanGraph,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "SparkPlanGraph") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "SparkPlanGraph") to include in // API requests with the JSON null value. By default, fields with empty values // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BasicAutoscalingAlgorithm) MarshalJSON() ([]byte, error) { - type NoMethod BasicAutoscalingAlgorithm - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AccessSparkApplicationSqlSparkPlanGraphResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSparkApplicationSqlSparkPlanGraphResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BasicYarnAutoscalingConfig: Basic autoscaling configurations for YARN. -type BasicYarnAutoscalingConfig struct { - // GracefulDecommissionTimeout: Required. Timeout for YARN graceful - // decommissioning of Node Managers. Specifies the duration to wait for jobs to - // complete before forcefully removing workers (and potentially interrupting - // jobs). Only applicable to downscaling operations.Bounds: 0s, 1d. - GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout,omitempty"` - // ScaleDownFactor: Required. Fraction of average YARN pending memory in the - // last cooldown period for which to remove workers. A scale-down factor of 1 - // will result in scaling down so that there is no available memory remaining - // after the update (more aggressive scaling). A scale-down factor of 0 - // disables removing workers, which can be beneficial for autoscaling a single - // job. See How autoscaling works - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) - // for more information.Bounds: 0.0, 1.0. - ScaleDownFactor float64 `json:"scaleDownFactor,omitempty"` - // ScaleDownMinWorkerFraction: Optional. Minimum scale-down threshold as a - // fraction of total cluster size before scaling occurs. For example, in a - // 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at - // least a 2 worker scale-down for the cluster to scale. A threshold of 0 means - // the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. - // Default: 0.0. - ScaleDownMinWorkerFraction float64 `json:"scaleDownMinWorkerFraction,omitempty"` - // ScaleUpFactor: Required. Fraction of average YARN pending memory in the last - // cooldown period for which to add workers. A scale-up factor of 1.0 will - // result in scaling up so that there is no pending memory remaining after the - // update (more aggressive scaling). A scale-up factor closer to 0 will result - // in a smaller magnitude of scaling up (less aggressive scaling). See How - // autoscaling works - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) - // for more information.Bounds: 0.0, 1.0. - ScaleUpFactor float64 `json:"scaleUpFactor,omitempty"` - // ScaleUpMinWorkerFraction: Optional. Minimum scale-up threshold as a fraction - // of total cluster size before scaling occurs. For example, in a 20-worker - // cluster, a threshold of 0.1 means the autoscaler must recommend at least a - // 2-worker scale-up for the cluster to scale. A threshold of 0 means the - // autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. - // Default: 0.0. - ScaleUpMinWorkerFraction float64 `json:"scaleUpMinWorkerFraction,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "GracefulDecommissionTimeout") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. See https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields - // for more details. +// AccessSparkApplicationStageAttemptResponse: Stage Attempt for a Stage of a +// Spark Application +type AccessSparkApplicationStageAttemptResponse struct { + // StageData: Output only. Data corresponding to a stage. + StageData *StageData `json:"stageData,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "StageData") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "GracefulDecommissionTimeout") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See + // NullFields is a list of field names (e.g. "StageData") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BasicYarnAutoscalingConfig) MarshalJSON() ([]byte, error) { - type NoMethod BasicYarnAutoscalingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -func (s *BasicYarnAutoscalingConfig) UnmarshalJSON(data []byte) error { - type NoMethod BasicYarnAutoscalingConfig - var s1 struct { - ScaleDownFactor gensupport.JSONFloat64 `json:"scaleDownFactor"` - ScaleDownMinWorkerFraction gensupport.JSONFloat64 `json:"scaleDownMinWorkerFraction"` - ScaleUpFactor gensupport.JSONFloat64 `json:"scaleUpFactor"` - ScaleUpMinWorkerFraction gensupport.JSONFloat64 `json:"scaleUpMinWorkerFraction"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.ScaleDownFactor = float64(s1.ScaleDownFactor) - s.ScaleDownMinWorkerFraction = float64(s1.ScaleDownMinWorkerFraction) - s.ScaleUpFactor = float64(s1.ScaleUpFactor) - s.ScaleUpMinWorkerFraction = float64(s1.ScaleUpMinWorkerFraction) - return nil +func (s AccessSparkApplicationStageAttemptResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSparkApplicationStageAttemptResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// Batch: A representation of a batch workload in the service. -type Batch struct { - // CreateTime: Output only. The time when the batch was created. - CreateTime string `json:"createTime,omitempty"` - // Creator: Output only. The email address of the user who created the batch. - Creator string `json:"creator,omitempty"` - // EnvironmentConfig: Optional. Environment configuration for the batch - // execution. - EnvironmentConfig *EnvironmentConfig `json:"environmentConfig,omitempty"` - // Labels: Optional. The labels to associate with this batch. Label keys must - // contain 1 to 63 characters, and must conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if - // present, must contain 1 to 63 characters, and must conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be - // associated with a batch. - Labels map[string]string `json:"labels,omitempty"` - // Name: Output only. The resource name of the batch. - Name string `json:"name,omitempty"` - // Operation: Output only. The resource name of the operation associated with - // this batch. - Operation string `json:"operation,omitempty"` - // PysparkBatch: Optional. PySpark batch config. - PysparkBatch *PySparkBatch `json:"pysparkBatch,omitempty"` - // RuntimeConfig: Optional. Runtime configuration for the batch execution. - RuntimeConfig *RuntimeConfig `json:"runtimeConfig,omitempty"` - // RuntimeInfo: Output only. Runtime information about batch execution. - RuntimeInfo *RuntimeInfo `json:"runtimeInfo,omitempty"` - // SparkBatch: Optional. Spark batch config. - SparkBatch *SparkBatch `json:"sparkBatch,omitempty"` - // SparkRBatch: Optional. SparkR batch config. - SparkRBatch *SparkRBatch `json:"sparkRBatch,omitempty"` - // SparkSqlBatch: Optional. SparkSql batch config. - SparkSqlBatch *SparkSqlBatch `json:"sparkSqlBatch,omitempty"` - // State: Output only. The state of the batch. - // - // Possible values: - // "STATE_UNSPECIFIED" - The batch state is unknown. - // "PENDING" - The batch is created before running. - // "RUNNING" - The batch is running. - // "CANCELLING" - The batch is cancelling. - // "CANCELLED" - The batch cancellation was successful. - // "SUCCEEDED" - The batch completed successfully. - // "FAILED" - The batch is no longer running due to an error. - State string `json:"state,omitempty"` - // StateHistory: Output only. Historical state information for the batch. - StateHistory []*StateHistory `json:"stateHistory,omitempty"` - // StateMessage: Output only. Batch state details, such as a failure - // description if the state is FAILED. - StateMessage string `json:"stateMessage,omitempty"` - // StateTime: Output only. The time when the batch entered a current state. - StateTime string `json:"stateTime,omitempty"` - // Uuid: Output only. A batch UUID (Unique Universal Identifier). The service - // generates this value when it creates the batch. - Uuid string `json:"uuid,omitempty"` +// AccessSparkApplicationStageRddOperationGraphResponse: RDD operation graph +// for a Spark Application Stage limited to maximum 10000 clusters. +type AccessSparkApplicationStageRddOperationGraphResponse struct { + // RddOperationGraph: RDD operation graph for a Spark Application Stage. + RddOperationGraph *RddOperationGraph `json:"rddOperationGraph,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreateTime") to + // ForceSendFields is a list of field names (e.g. "RddOperationGraph") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreateTime") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "RddOperationGraph") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Batch) MarshalJSON() ([]byte, error) { - type NoMethod Batch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AccessSparkApplicationStageRddOperationGraphResponse) MarshalJSON() ([]byte, error) { + type NoMethod AccessSparkApplicationStageRddOperationGraphResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BatchOperationMetadata: Metadata describing the Batch operation. -type BatchOperationMetadata struct { - // Batch: Name of the batch for the operation. - Batch string `json:"batch,omitempty"` - // BatchUuid: Batch UUID for the operation. - BatchUuid string `json:"batchUuid,omitempty"` - // CreateTime: The time when the operation was created. - CreateTime string `json:"createTime,omitempty"` - // Description: Short description of the operation. - Description string `json:"description,omitempty"` - // DoneTime: The time when the operation finished. - DoneTime string `json:"doneTime,omitempty"` - // Labels: Labels associated with the operation. - Labels map[string]string `json:"labels,omitempty"` - // OperationType: The operation type. - // - // Possible values: - // "BATCH_OPERATION_TYPE_UNSPECIFIED" - Batch operation type is unknown. - // "BATCH" - Batch operation type. - OperationType string `json:"operationType,omitempty"` - // Warnings: Warnings encountered during operation execution. - Warnings []string `json:"warnings,omitempty"` - // ForceSendFields is a list of field names (e.g. "Batch") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +type AccumulableInfo struct { + AccumullableInfoId int64 `json:"accumullableInfoId,omitempty,string"` + Name string `json:"name,omitempty"` + Update string `json:"update,omitempty"` + Value string `json:"value,omitempty"` + // ForceSendFields is a list of field names (e.g. "AccumullableInfoId") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Batch") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "AccumullableInfoId") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BatchOperationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod BatchOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AccumulableInfo) MarshalJSON() ([]byte, error) { + type NoMethod AccumulableInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// Binding: Associates members, or principals, with a role. -type Binding struct { - // Condition: The condition that is associated with this binding.If the - // condition evaluates to true, then this binding applies to the current - // request.If the condition evaluates to false, then this binding does not - // apply to the current request. However, a different role binding might grant - // the same role to one or more of the principals in this binding.To learn - // which resources support conditions in their IAM policies, see the IAM - // documentation - // (https://cloud.google.com/iam/help/conditions/resource-policies). - Condition *Expr `json:"condition,omitempty"` - // Members: Specifies the principals requesting access for a Google Cloud - // resource. members can have the following values: allUsers: A special - // identifier that represents anyone who is on the internet; with or without a - // Google account. allAuthenticatedUsers: A special identifier that represents - // anyone who is authenticated with a Google account or a service account. Does - // not include identities that come from external identity providers (IdPs) - // through identity federation. user:{emailid}: An email address that - // represents a specific Google account. For example, alice@example.com . - // serviceAccount:{emailid}: An email address that represents a Google service - // account. For example, my-other-app@appspot.gserviceaccount.com. - // serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An - // identifier for a Kubernetes service account - // (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). - // For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. - // group:{emailid}: An email address that represents a Google group. For - // example, admins@example.com. domain:{domain}: The G Suite domain (primary) - // that represents all the users of that domain. For example, google.com or - // example.com. - // principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subj - // ect/{subject_attribute_value}: A single identity in a workforce identity - // pool. - // principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/g - // roup/{group_id}: All workforce identities in a group. - // principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/a - // ttribute.{attribute_name}/{attribute_value}: All workforce identities with a - // specific attribute value. - // principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/* - // : All identities in a workforce identity pool. - // principal://iam.googleapis.com/projects/{project_number}/locations/global/wor - // kloadIdentityPools/{pool_id}/subject/{subject_attribute_value}: A single - // identity in a workload identity pool. - // principalSet://iam.googleapis.com/projects/{project_number}/locations/global/ - // workloadIdentityPools/{pool_id}/group/{group_id}: A workload identity pool - // group. - // principalSet://iam.googleapis.com/projects/{project_number}/locations/global/ - // workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}: - // All identities in a workload identity pool with a certain attribute. - // principalSet://iam.googleapis.com/projects/{project_number}/locations/global/ - // workloadIdentityPools/{pool_id}/*: All identities in a workload identity - // pool. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique - // identifier) representing a user that has been recently deleted. For example, - // alice@example.com?uid=123456789012345678901. If the user is recovered, this - // value reverts to user:{emailid} and the recovered user retains the role in - // the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email - // address (plus unique identifier) representing a service account that has - // been recently deleted. For example, - // my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the - // service account is undeleted, this value reverts to serviceAccount:{emailid} - // and the undeleted service account retains the role in the binding. - // deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique - // identifier) representing a Google group that has been recently deleted. For - // example, admins@example.com?uid=123456789012345678901. If the group is - // recovered, this value reverts to group:{emailid} and the recovered group - // retains the role in the binding. - // deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_ - // id}/subject/{subject_attribute_value}: Deleted single identity in a - // workforce identity pool. For example, - // deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-poo - // l-id/subject/my-subject-attribute-value. - Members []string `json:"members,omitempty"` - // Role: Role that is assigned to the list of members, or principals. For - // example, roles/viewer, roles/editor, or roles/owner.For an overview of the - // IAM roles and permissions, see the IAM documentation - // (https://cloud.google.com/iam/docs/roles-overview). For a list of the - // available pre-defined roles, see here - // (https://cloud.google.com/iam/docs/understanding-roles). - Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to +// AnalyzeBatchRequest: A request to analyze a batch workload. +type AnalyzeBatchRequest struct { + // RequestId: Optional. A unique ID used to identify the request. If the + // service receives two AnalyzeBatchRequest + // (http://cloud/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.AnalyzeBatchRequest)s + // with the same request_id, the second request is ignored and the Operation + // that corresponds to the first request created and stored in the backend is + // returned.Recommendation: Set this value to a UUID + // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must + // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens + // (-). The maximum length is 40 characters. + RequestId string `json:"requestId,omitempty"` + // ForceSendFields is a list of field names (e.g. "RequestId") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in API + // NullFields is a list of field names (e.g. "RequestId") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { - type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AnalyzeBatchRequest) MarshalJSON() ([]byte, error) { + type NoMethod AnalyzeBatchRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// CancelJobRequest: A request to cancel a job. -type CancelJobRequest struct { -} - -// Cluster: Describes the identifying information, config, and status of a -// Dataproc cluster -type Cluster struct { - // ClusterName: Required. The cluster name, which must be unique within a - // project. The name must start with a lowercase letter, and can contain up to - // 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The - // name of a deleted cluster can be reused. - ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: Output only. A cluster UUID (Unique Universal Identifier). - // Dataproc generates this value when it creates the cluster. - ClusterUuid string `json:"clusterUuid,omitempty"` - // Config: Optional. The cluster config for a cluster of Compute Engine - // Instances. Note that Dataproc may set default values, and values may change - // when clusters are updated.Exactly one of ClusterConfig or - // VirtualClusterConfig must be specified. - Config *ClusterConfig `json:"config,omitempty"` - // Labels: Optional. The labels to associate with this cluster. Label keys must - // contain 1 to 63 characters, and must conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if - // present, must contain 1 to 63 characters, and must conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be - // associated with a cluster. +// AnalyzeOperationMetadata: Metadata describing the Analyze operation. +type AnalyzeOperationMetadata struct { + // AnalyzedWorkloadName: Output only. name of the workload being analyzed. + AnalyzedWorkloadName string `json:"analyzedWorkloadName,omitempty"` + // AnalyzedWorkloadType: Output only. Type of the workload being analyzed. + // + // Possible values: + // "WORKLOAD_TYPE_UNSPECIFIED" - Undefined option + // "BATCH" - Serverless batch job + AnalyzedWorkloadType string `json:"analyzedWorkloadType,omitempty"` + // AnalyzedWorkloadUuid: Output only. unique identifier of the workload + // typically generated by control plane. E.g. batch uuid. + AnalyzedWorkloadUuid string `json:"analyzedWorkloadUuid,omitempty"` + // CreateTime: Output only. The time when the operation was created. + CreateTime string `json:"createTime,omitempty"` + // Description: Output only. Short description of the operation. + Description string `json:"description,omitempty"` + // DoneTime: Output only. The time when the operation finished. + DoneTime string `json:"doneTime,omitempty"` + // Labels: Output only. Labels associated with the operation. Labels map[string]string `json:"labels,omitempty"` - // Metrics: Output only. Contains cluster daemon metrics such as HDFS and YARN - // stats.Beta Feature: This report is available for testing purposes only. It - // may be changed before final release. - Metrics *ClusterMetrics `json:"metrics,omitempty"` - // ProjectId: Required. The Google Cloud Platform project ID that the cluster - // belongs to. - ProjectId string `json:"projectId,omitempty"` - // Status: Output only. Cluster status. - Status *ClusterStatus `json:"status,omitempty"` - // StatusHistory: Output only. The previous cluster status. - StatusHistory []*ClusterStatus `json:"statusHistory,omitempty"` - // VirtualClusterConfig: Optional. The virtual cluster config is used when - // creating a Dataproc cluster that does not directly control the underlying - // compute resources, for example, when creating a Dataproc-on-GKE cluster - // (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). - // Dataproc may set default values, and values may change when clusters are - // updated. Exactly one of config or virtual_cluster_config must be specified. - VirtualClusterConfig *VirtualClusterConfig `json:"virtualClusterConfig,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "ClusterName") to + // Warnings: Output only. Warnings encountered during operation execution. + Warnings []string `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "AnalyzedWorkloadName") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterName") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "AnalyzedWorkloadName") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Cluster) MarshalJSON() ([]byte, error) { - type NoMethod Cluster - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AnalyzeOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod AnalyzeOperationMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ClusterConfig: The cluster config. -type ClusterConfig struct { - // AutoscalingConfig: Optional. Autoscaling config for the policy associated - // with the cluster. Cluster does not autoscale if this field is unset. - AutoscalingConfig *AutoscalingConfig `json:"autoscalingConfig,omitempty"` - // AuxiliaryNodeGroups: Optional. The node group settings. - AuxiliaryNodeGroups []*AuxiliaryNodeGroup `json:"auxiliaryNodeGroups,omitempty"` - // ConfigBucket: Optional. A Cloud Storage bucket used to stage job - // dependencies, config files, and job driver console output. If you do not - // specify a staging bucket, Cloud Dataproc will determine a Cloud Storage - // location (US, ASIA, or EU) for your cluster's staging bucket according to - // the Compute Engine zone where your cluster is deployed, and then create and - // manage this project-level, per-location bucket (see Dataproc staging and - // temp buckets - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). - // This field requires a Cloud Storage bucket name, not a gs://... URI to a - // Cloud Storage bucket. - ConfigBucket string `json:"configBucket,omitempty"` - // DataprocMetricConfig: Optional. The config for Dataproc metrics. - DataprocMetricConfig *DataprocMetricConfig `json:"dataprocMetricConfig,omitempty"` - // EncryptionConfig: Optional. Encryption settings for the cluster. - EncryptionConfig *EncryptionConfig `json:"encryptionConfig,omitempty"` - // EndpointConfig: Optional. Port/endpoint configuration for this cluster - EndpointConfig *EndpointConfig `json:"endpointConfig,omitempty"` - // GceClusterConfig: Optional. The shared Compute Engine config settings for - // all instances in a cluster. - GceClusterConfig *GceClusterConfig `json:"gceClusterConfig,omitempty"` - // GkeClusterConfig: Optional. BETA. The Kubernetes Engine config for Dataproc - // clusters deployed to The Kubernetes Engine config for Dataproc clusters - // deployed to Kubernetes. These config settings are mutually exclusive with - // Compute Engine-based options, such as gce_cluster_config, master_config, - // worker_config, secondary_worker_config, and autoscaling_config. - GkeClusterConfig *GkeClusterConfig `json:"gkeClusterConfig,omitempty"` - // InitializationActions: Optional. Commands to execute on each node after - // config is completed. By default, executables are run on master and all - // worker nodes. You can test a node's role metadata to run an executable on a - // master or worker node, as shown below using curl (you can also use wget): - // ROLE=$(curl -H Metadata-Flavor:Google - // http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ - // "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... - // worker specific actions ... fi - InitializationActions []*NodeInitializationAction `json:"initializationActions,omitempty"` - // LifecycleConfig: Optional. Lifecycle setting for the cluster. - LifecycleConfig *LifecycleConfig `json:"lifecycleConfig,omitempty"` - // MasterConfig: Optional. The Compute Engine config settings for the cluster's - // master instance. - MasterConfig *InstanceGroupConfig `json:"masterConfig,omitempty"` - // MetastoreConfig: Optional. Metastore configuration. - MetastoreConfig *MetastoreConfig `json:"metastoreConfig,omitempty"` - // SecondaryWorkerConfig: Optional. The Compute Engine config settings for a - // cluster's secondary worker instances - SecondaryWorkerConfig *InstanceGroupConfig `json:"secondaryWorkerConfig,omitempty"` - // SecurityConfig: Optional. Security settings for the cluster. - SecurityConfig *SecurityConfig `json:"securityConfig,omitempty"` - // SoftwareConfig: Optional. The config settings for cluster software. - SoftwareConfig *SoftwareConfig `json:"softwareConfig,omitempty"` - // TempBucket: Optional. A Cloud Storage bucket used to store ephemeral cluster - // and jobs data, such as Spark and MapReduce history files. If you do not - // specify a temp bucket, Dataproc will determine a Cloud Storage location (US, - // ASIA, or EU) for your cluster's temp bucket according to the Compute Engine - // zone where your cluster is deployed, and then create and manage this - // project-level, per-location bucket. The default bucket has a TTL of 90 days, - // but you can use any TTL (or none) if you specify a bucket (see Dataproc - // staging and temp buckets - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). - // This field requires a Cloud Storage bucket name, not a gs://... URI to a - // Cloud Storage bucket. - TempBucket string `json:"tempBucket,omitempty"` - // WorkerConfig: Optional. The Compute Engine config settings for the cluster's - // worker instances. - WorkerConfig *InstanceGroupConfig `json:"workerConfig,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoscalingConfig") to +type AppSummary struct { + NumCompletedJobs int64 `json:"numCompletedJobs,omitempty"` + NumCompletedStages int64 `json:"numCompletedStages,omitempty"` + // ForceSendFields is a list of field names (e.g. "NumCompletedJobs") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoscalingConfig") to include in + // NullFields is a list of field names (e.g. "NumCompletedJobs") to include in // API requests with the JSON null value. By default, fields with empty values // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ClusterConfig) MarshalJSON() ([]byte, error) { - type NoMethod ClusterConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AppSummary) MarshalJSON() ([]byte, error) { + type NoMethod AppSummary + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ClusterMetrics: Contains cluster daemon metrics, such as HDFS and YARN -// stats.Beta Feature: This report is available for testing purposes only. It -// may be changed before final release. -type ClusterMetrics struct { - // HdfsMetrics: The HDFS metrics. - HdfsMetrics map[string]string `json:"hdfsMetrics,omitempty"` - // YarnMetrics: YARN metrics. - YarnMetrics map[string]string `json:"yarnMetrics,omitempty"` - // ForceSendFields is a list of field names (e.g. "HdfsMetrics") to +// ApplicationAttemptInfo: Specific attempt of an application. +type ApplicationAttemptInfo struct { + AppSparkVersion string `json:"appSparkVersion,omitempty"` + AttemptId string `json:"attemptId,omitempty"` + Completed bool `json:"completed,omitempty"` + DurationMillis int64 `json:"durationMillis,omitempty,string"` + EndTime string `json:"endTime,omitempty"` + LastUpdated string `json:"lastUpdated,omitempty"` + SparkUser string `json:"sparkUser,omitempty"` + StartTime string `json:"startTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "AppSparkVersion") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HdfsMetrics") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "AppSparkVersion") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ClusterMetrics) MarshalJSON() ([]byte, error) { - type NoMethod ClusterMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ApplicationAttemptInfo) MarshalJSON() ([]byte, error) { + type NoMethod ApplicationAttemptInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ClusterOperation: The cluster operation triggered by a workflow. -type ClusterOperation struct { - // Done: Output only. Indicates the operation is done. - Done bool `json:"done,omitempty"` - // Error: Output only. Error, if operation failed. - Error string `json:"error,omitempty"` - // OperationId: Output only. The id of the cluster operation. - OperationId string `json:"operationId,omitempty"` - // ForceSendFields is a list of field names (e.g. "Done") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// ApplicationEnvironmentInfo: Details about the Environment that the +// application is running in. +type ApplicationEnvironmentInfo struct { + ClasspathEntries map[string]string `json:"classpathEntries,omitempty"` + HadoopProperties map[string]string `json:"hadoopProperties,omitempty"` + MetricsProperties map[string]string `json:"metricsProperties,omitempty"` + ResourceProfiles []*ResourceProfileInfo `json:"resourceProfiles,omitempty"` + Runtime *SparkRuntimeInfo `json:"runtime,omitempty"` + SparkProperties map[string]string `json:"sparkProperties,omitempty"` + SystemProperties map[string]string `json:"systemProperties,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClasspathEntries") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Done") to include in API requests - // with the JSON null value. By default, fields with empty values are omitted - // from API requests. See + // NullFields is a list of field names (e.g. "ClasspathEntries") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ClusterOperation) MarshalJSON() ([]byte, error) { - type NoMethod ClusterOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ApplicationEnvironmentInfo) MarshalJSON() ([]byte, error) { + type NoMethod ApplicationEnvironmentInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ClusterOperationMetadata: Metadata describing the operation. -type ClusterOperationMetadata struct { - // ChildOperationIds: Output only. Child operation ids - ChildOperationIds []string `json:"childOperationIds,omitempty"` - // ClusterName: Output only. Name of the cluster for the operation. - ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: Output only. Cluster UUID for the operation. - ClusterUuid string `json:"clusterUuid,omitempty"` - // Description: Output only. Short description of operation. - Description string `json:"description,omitempty"` - // Labels: Output only. Labels associated with the operation - Labels map[string]string `json:"labels,omitempty"` - // OperationType: Output only. The operation type. - OperationType string `json:"operationType,omitempty"` - // Status: Output only. Current operation status. - Status *ClusterOperationStatus `json:"status,omitempty"` - // StatusHistory: Output only. The previous operation status. - StatusHistory []*ClusterOperationStatus `json:"statusHistory,omitempty"` - // Warnings: Output only. Errors encountered during operation execution. - Warnings []string `json:"warnings,omitempty"` - // ForceSendFields is a list of field names (e.g. "ChildOperationIds") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// ApplicationInfo: High level information corresponding to an application. +type ApplicationInfo struct { + // Possible values: + // "APPLICATION_CONTEXT_INGESTION_STATUS_UNSPECIFIED" + // "APPLICATION_CONTEXT_INGESTION_STATUS_COMPLETED" + ApplicationContextIngestionStatus string `json:"applicationContextIngestionStatus,omitempty"` + ApplicationId string `json:"applicationId,omitempty"` + Attempts []*ApplicationAttemptInfo `json:"attempts,omitempty"` + CoresGranted int64 `json:"coresGranted,omitempty"` + CoresPerExecutor int64 `json:"coresPerExecutor,omitempty"` + MaxCores int64 `json:"maxCores,omitempty"` + MemoryPerExecutorMb int64 `json:"memoryPerExecutorMb,omitempty"` + Name string `json:"name,omitempty"` + // Possible values: + // "QUANTILE_DATA_STATUS_UNSPECIFIED" + // "QUANTILE_DATA_STATUS_COMPLETED" + // "QUANTILE_DATA_STATUS_FAILED" + QuantileDataStatus string `json:"quantileDataStatus,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "ApplicationContextIngestionStatus") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted from + // API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ChildOperationIds") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + // NullFields is a list of field names (e.g. + // "ApplicationContextIngestionStatus") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted from API + // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for + // more details. NullFields []string `json:"-"` } -func (s *ClusterOperationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod ClusterOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ApplicationInfo) MarshalJSON() ([]byte, error) { + type NoMethod ApplicationInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ClusterOperationStatus: The status of the operation. -type ClusterOperationStatus struct { - // Details: Output only. A message containing any operation metadata details. - Details string `json:"details,omitempty"` - // InnerState: Output only. A message containing the detailed operation state. - InnerState string `json:"innerState,omitempty"` - // State: Output only. A message containing the operation state. - // - // Possible values: - // "UNKNOWN" - Unused. - // "PENDING" - The operation has been created. - // "RUNNING" - The operation is running. - // "DONE" - The operation is done; either cancelled or completed. - State string `json:"state,omitempty"` - // StateStartTime: Output only. The time this state was entered. - StateStartTime string `json:"stateStartTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "Details") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// AutoscalingConfig: Autoscaling Policy config associated with the cluster. +type AutoscalingConfig struct { + // PolicyUri: Optional. The autoscaling policy used by the cluster.Only + // resource names including projectid and location (region) are valid. + // Examples: + // https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] + // projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy + // _id]Note that the policy must be in the same project and Dataproc region. + PolicyUri string `json:"policyUri,omitempty"` + // ForceSendFields is a list of field names (e.g. "PolicyUri") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Details") to include in API + // NullFields is a list of field names (e.g. "PolicyUri") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ClusterOperationStatus) MarshalJSON() ([]byte, error) { - type NoMethod ClusterOperationStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AutoscalingConfig) MarshalJSON() ([]byte, error) { + type NoMethod AutoscalingConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ClusterSelector: A selector that chooses target cluster for jobs based on -// metadata. -type ClusterSelector struct { - // ClusterLabels: Required. The cluster labels. Cluster must have all labels to - // match. - ClusterLabels map[string]string `json:"clusterLabels,omitempty"` - // Zone: Optional. The zone where workflow process executes. This parameter - // does not affect the selection of the cluster.If unspecified, the zone of the - // first cluster matching the selector is used. - Zone string `json:"zone,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterLabels") to +// AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster +// autoscaler. +type AutoscalingPolicy struct { + BasicAlgorithm *BasicAutoscalingAlgorithm `json:"basicAlgorithm,omitempty"` + // Id: Required. The policy id.The id must contain only letters (a-z, A-Z), + // numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with + // underscore or hyphen. Must consist of between 3 and 50 characters. + Id string `json:"id,omitempty"` + // Labels: Optional. The labels to associate with this autoscaling policy. + // Label keys must contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if + // present, must contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + // associated with an autoscaling policy. + Labels map[string]string `json:"labels,omitempty"` + // Name: Output only. The "resource name" of the autoscaling policy, as + // described in https://cloud.google.com/apis/design/resource_names. For + // projects.regions.autoscalingPolicies, the resource name of the policy has + // the following format: + // projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For + // projects.locations.autoscalingPolicies, the resource name of the policy has + // the following format: + // projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id} + Name string `json:"name,omitempty"` + // SecondaryWorkerConfig: Optional. Describes how the autoscaler will operate + // for secondary workers. + SecondaryWorkerConfig *InstanceGroupAutoscalingPolicyConfig `json:"secondaryWorkerConfig,omitempty"` + // WorkerConfig: Required. Describes how the autoscaler will operate for + // primary workers. + WorkerConfig *InstanceGroupAutoscalingPolicyConfig `json:"workerConfig,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "BasicAlgorithm") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterLabels") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "BasicAlgorithm") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ClusterSelector) MarshalJSON() ([]byte, error) { - type NoMethod ClusterSelector - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AutoscalingPolicy) MarshalJSON() ([]byte, error) { + type NoMethod AutoscalingPolicy + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ClusterStatus: The status of a cluster and its instances. -type ClusterStatus struct { - // Detail: Optional. Output only. Details of cluster's state. - Detail string `json:"detail,omitempty"` - // State: Output only. The cluster's state. - // - // Possible values: - // "UNKNOWN" - The cluster state is unknown. - // "CREATING" - The cluster is being created and set up. It is not ready for - // use. - // "RUNNING" - The cluster is currently running and healthy. It is ready for - // use.Note: The cluster state changes from "creating" to "running" status - // after the master node(s), first two primary worker nodes (and the last - // primary worker node if primary workers > 2) are running. - // "ERROR" - The cluster encountered an error. It is not ready for use. - // "ERROR_DUE_TO_UPDATE" - The cluster has encountered an error while being - // updated. Jobs can be submitted to the cluster, but the cluster cannot be - // updated. - // "DELETING" - The cluster is being deleted. It cannot be used. - // "UPDATING" - The cluster is being updated. It continues to accept and - // process jobs. - // "STOPPING" - The cluster is being stopped. It cannot be used. - // "STOPPED" - The cluster is currently stopped. It is not ready for use. - // "STARTING" - The cluster is being started. It is not ready for use. - // "REPAIRING" - The cluster is being repaired. It is not ready for use. - State string `json:"state,omitempty"` - // StateStartTime: Output only. Time when this state was entered (see JSON - // representation of Timestamp - // (https://developers.google.com/protocol-buffers/docs/proto3#json)). - StateStartTime string `json:"stateStartTime,omitempty"` - // Substate: Output only. Additional state information that includes status - // reported by the agent. +// AutotuningConfig: Autotuning configuration of the workload. +type AutotuningConfig struct { + // Scenarios: Optional. Scenarios for which tunings are applied. // // Possible values: - // "UNSPECIFIED" - The cluster substate is unknown. - // "UNHEALTHY" - The cluster is known to be in an unhealthy state (for - // example, critical daemons are not running or HDFS capacity is - // exhausted).Applies to RUNNING state. - // "STALE_STATUS" - The agent-reported status is out of date (may occur if - // Dataproc loses communication with Agent).Applies to RUNNING state. - Substate string `json:"substate,omitempty"` - // ForceSendFields is a list of field names (e.g. "Detail") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See + // "SCENARIO_UNSPECIFIED" - Default value. + // "SCALING" - Scaling recommendations such as initialExecutors. + // "BROADCAST_HASH_JOIN" - Adding hints for potential relation broadcasts. + // "MEMORY" - Memory management for workloads. + Scenarios []string `json:"scenarios,omitempty"` + // ForceSendFields is a list of field names (e.g. "Scenarios") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Detail") to include in API + // NullFields is a list of field names (e.g. "Scenarios") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ClusterStatus) MarshalJSON() ([]byte, error) { - type NoMethod ClusterStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AutotuningConfig) MarshalJSON() ([]byte, error) { + type NoMethod AutotuningConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ConfidentialInstanceConfig: Confidential Instance Config for clusters using -// Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs) -type ConfidentialInstanceConfig struct { - // EnableConfidentialCompute: Optional. Defines whether the instance should - // have confidential compute enabled. - EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` - // ForceSendFields is a list of field names (e.g. "EnableConfidentialCompute") - // to unconditionally include in API requests. By default, fields with empty or +// AuxiliaryNodeGroup: Node group identification and configuration information. +type AuxiliaryNodeGroup struct { + // NodeGroup: Required. Node group configuration. + NodeGroup *NodeGroup `json:"nodeGroup,omitempty"` + // NodeGroupId: Optional. A node group ID. Generated if not specified.The ID + // must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and + // hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of + // from 3 to 33 characters. + NodeGroupId string `json:"nodeGroupId,omitempty"` + // ForceSendFields is a list of field names (e.g. "NodeGroup") to + // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "EnableConfidentialCompute") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *ConfidentialInstanceConfig) MarshalJSON() ([]byte, error) { - type NoMethod ConfidentialInstanceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// DataprocMetricConfig: Dataproc metric config. -type DataprocMetricConfig struct { - // Metrics: Required. Metrics sources to enable. - Metrics []*Metric `json:"metrics,omitempty"` - // ForceSendFields is a list of field names (e.g. "Metrics") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Metrics") to include in API + // NullFields is a list of field names (e.g. "NodeGroup") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *DataprocMetricConfig) MarshalJSON() ([]byte, error) { - type NoMethod DataprocMetricConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AuxiliaryNodeGroup) MarshalJSON() ([]byte, error) { + type NoMethod AuxiliaryNodeGroup + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// DiagnoseClusterRequest: A request to collect cluster diagnostic information. -type DiagnoseClusterRequest struct { - // DiagnosisInterval: Optional. Time interval in which diagnosis should be - // carried out on the cluster. - DiagnosisInterval *Interval `json:"diagnosisInterval,omitempty"` - // Job: Optional. DEPRECATED Specifies the job on which diagnosis is to be - // performed. Format: projects/{project}/regions/{region}/jobs/{job} - Job string `json:"job,omitempty"` - // Jobs: Optional. Specifies a list of jobs on which diagnosis is to be - // performed. Format: projects/{project}/regions/{region}/jobs/{job} - Jobs []string `json:"jobs,omitempty"` - // TarballAccess: Optional. (Optional) The access type to the diagnostic - // tarball. If not specified, falls back to default access of the bucket - // - // Possible values: - // "TARBALL_ACCESS_UNSPECIFIED" - Tarball Access unspecified. Falls back to - // default access of the bucket - // "GOOGLE_CLOUD_SUPPORT" - Google Cloud Support group has read access to the - // diagnostic tarball - // "GOOGLE_DATAPROC_DIAGNOSE" - Google Cloud Dataproc Diagnose service - // account has read access to the diagnostic tarball - TarballAccess string `json:"tarballAccess,omitempty"` - // TarballGcsDir: Optional. (Optional) The output Cloud Storage directory for - // the diagnostic tarball. If not specified, a task-specific directory in the - // cluster's staging bucket will be used. - TarballGcsDir string `json:"tarballGcsDir,omitempty"` - // YarnApplicationId: Optional. DEPRECATED Specifies the yarn application on - // which diagnosis is to be performed. - YarnApplicationId string `json:"yarnApplicationId,omitempty"` - // YarnApplicationIds: Optional. Specifies a list of yarn applications on which - // diagnosis is to be performed. - YarnApplicationIds []string `json:"yarnApplicationIds,omitempty"` - // ForceSendFields is a list of field names (e.g. "DiagnosisInterval") to +// AuxiliaryServicesConfig: Auxiliary services configuration for a Cluster. +type AuxiliaryServicesConfig struct { + // MetastoreConfig: Optional. The Hive Metastore configuration for this + // workload. + MetastoreConfig *MetastoreConfig `json:"metastoreConfig,omitempty"` + // SparkHistoryServerConfig: Optional. The Spark History Server configuration + // for the workload. + SparkHistoryServerConfig *SparkHistoryServerConfig `json:"sparkHistoryServerConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "MetastoreConfig") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DiagnosisInterval") to include in + // NullFields is a list of field names (e.g. "MetastoreConfig") to include in // API requests with the JSON null value. By default, fields with empty values // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *DiagnoseClusterRequest) MarshalJSON() ([]byte, error) { - type NoMethod DiagnoseClusterRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// DiagnoseClusterResults: The location of diagnostic output. -type DiagnoseClusterResults struct { - // OutputUri: Output only. The Cloud Storage URI of the diagnostic output. The - // output report is a plain text file with a summary of collected diagnostics. - OutputUri string `json:"outputUri,omitempty"` - // ForceSendFields is a list of field names (e.g. "OutputUri") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "OutputUri") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *DiagnoseClusterResults) MarshalJSON() ([]byte, error) { - type NoMethod DiagnoseClusterResults - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s AuxiliaryServicesConfig) MarshalJSON() ([]byte, error) { + type NoMethod AuxiliaryServicesConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// DiskConfig: Specifies the config of disk options for a group of VM -// instances. -type DiskConfig struct { - // BootDiskSizeGb: Optional. Size in GB of the boot disk (default is 500GB). - BootDiskSizeGb int64 `json:"bootDiskSizeGb,omitempty"` - // BootDiskType: Optional. Type of the boot disk (default is "pd-standard"). - // Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), - // "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent - // Disk Hard Disk Drive). See Disk types - // (https://cloud.google.com/compute/docs/disks#disk-types). - BootDiskType string `json:"bootDiskType,omitempty"` - // LocalSsdInterface: Optional. Interface type of local SSDs (default is - // "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" - // (Non-Volatile Memory Express). See local SSD performance - // (https://cloud.google.com/compute/docs/disks/local-ssd#performance). - LocalSsdInterface string `json:"localSsdInterface,omitempty"` - // NumLocalSsds: Optional. Number of attached SSDs, from 0 to 8 (default is 0). - // If SSDs are not attached, the boot disk is used to store runtime logs and - // HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If - // one or more SSDs are attached, this runtime bulk data is spread across them, - // and the boot disk contains only basic config and installed binaries.Note: - // Local SSD options may vary by machine type and number of vCPUs selected. - NumLocalSsds int64 `json:"numLocalSsds,omitempty"` - // ForceSendFields is a list of field names (e.g. "BootDiskSizeGb") to +// BasicAutoscalingAlgorithm: Basic algorithm for autoscaling. +type BasicAutoscalingAlgorithm struct { + // CooldownPeriod: Optional. Duration between scaling events. A scaling period + // starts after the update operation from the previous event has + // completed.Bounds: 2m, 1d. Default: 2m. + CooldownPeriod string `json:"cooldownPeriod,omitempty"` + // SparkStandaloneConfig: Optional. Spark Standalone autoscaling configuration + SparkStandaloneConfig *SparkStandaloneAutoscalingConfig `json:"sparkStandaloneConfig,omitempty"` + // YarnConfig: Optional. YARN autoscaling configuration. + YarnConfig *BasicYarnAutoscalingConfig `json:"yarnConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "CooldownPeriod") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BootDiskSizeGb") to include in + // NullFields is a list of field names (e.g. "CooldownPeriod") to include in // API requests with the JSON null value. By default, fields with empty values // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *DiskConfig) MarshalJSON() ([]byte, error) { - type NoMethod DiskConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s BasicAutoscalingAlgorithm) MarshalJSON() ([]byte, error) { + type NoMethod BasicAutoscalingAlgorithm + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// DriverSchedulingConfig: Driver scheduling configuration. -type DriverSchedulingConfig struct { - // MemoryMb: Required. The amount of memory in MB the driver is requesting. - MemoryMb int64 `json:"memoryMb,omitempty"` - // Vcores: Required. The number of vCPUs the driver is requesting. - Vcores int64 `json:"vcores,omitempty"` - // ForceSendFields is a list of field names (e.g. "MemoryMb") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. +// BasicYarnAutoscalingConfig: Basic autoscaling configurations for YARN. +type BasicYarnAutoscalingConfig struct { + // GracefulDecommissionTimeout: Required. Timeout for YARN graceful + // decommissioning of Node Managers. Specifies the duration to wait for jobs to + // complete before forcefully removing workers (and potentially interrupting + // jobs). Only applicable to downscaling operations.Bounds: 0s, 1d. + GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout,omitempty"` + // ScaleDownFactor: Required. Fraction of average YARN pending memory in the + // last cooldown period for which to remove workers. A scale-down factor of 1 + // will result in scaling down so that there is no available memory remaining + // after the update (more aggressive scaling). A scale-down factor of 0 + // disables removing workers, which can be beneficial for autoscaling a single + // job. See How autoscaling works + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + // for more information.Bounds: 0.0, 1.0. + ScaleDownFactor float64 `json:"scaleDownFactor,omitempty"` + // ScaleDownMinWorkerFraction: Optional. Minimum scale-down threshold as a + // fraction of total cluster size before scaling occurs. For example, in a + // 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at + // least a 2 worker scale-down for the cluster to scale. A threshold of 0 means + // the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. + // Default: 0.0. + ScaleDownMinWorkerFraction float64 `json:"scaleDownMinWorkerFraction,omitempty"` + // ScaleUpFactor: Required. Fraction of average YARN pending memory in the last + // cooldown period for which to add workers. A scale-up factor of 1.0 will + // result in scaling up so that there is no pending memory remaining after the + // update (more aggressive scaling). A scale-up factor closer to 0 will result + // in a smaller magnitude of scaling up (less aggressive scaling). See How + // autoscaling works + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + // for more information.Bounds: 0.0, 1.0. + ScaleUpFactor float64 `json:"scaleUpFactor,omitempty"` + // ScaleUpMinWorkerFraction: Optional. Minimum scale-up threshold as a fraction + // of total cluster size before scaling occurs. For example, in a 20-worker + // cluster, a threshold of 0.1 means the autoscaler must recommend at least a + // 2-worker scale-up for the cluster to scale. A threshold of 0 means the + // autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. + // Default: 0.0. + ScaleUpMinWorkerFraction float64 `json:"scaleUpMinWorkerFraction,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "GracefulDecommissionTimeout") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. See https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields + // for more details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MemoryMb") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "GracefulDecommissionTimeout") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *DriverSchedulingConfig) MarshalJSON() ([]byte, error) { - type NoMethod DriverSchedulingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// Empty: A generic empty message that you can re-use to avoid defining -// duplicated empty messages in your APIs. A typical example is to use it as -// the request or the response type of an API method. For instance: service Foo -// { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } -type Empty struct { - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` +func (s BasicYarnAutoscalingConfig) MarshalJSON() ([]byte, error) { + type NoMethod BasicYarnAutoscalingConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// EncryptionConfig: Encryption settings for the cluster. -type EncryptionConfig struct { - // GcePdKmsKeyName: Optional. The Cloud KMS key resource name to use for - // persistent disk encryption for all instances in the cluster. See Use CMEK - // with cluster data - // (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) - // for more information. - GcePdKmsKeyName string `json:"gcePdKmsKeyName,omitempty"` - // KmsKey: Optional. The Cloud KMS key resource name to use for cluster - // persistent disk and job argument encryption. See Use CMEK with cluster data - // (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) - // for more information.When this key resource name is provided, the following - // job arguments of the following job types submitted to the cluster are - // encrypted using CMEK: FlinkJob args - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) - // HadoopJob args - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) - // SparkJob args - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) - // SparkRJob args - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) - // PySparkJob args - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) - // SparkSqlJob - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) - // scriptVariables and queryList.queries HiveJob - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) - // scriptVariables and queryList.queries PigJob - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) - // scriptVariables and queryList.queries PrestoJob - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) - // scriptVariables and queryList.queries - KmsKey string `json:"kmsKey,omitempty"` - // ForceSendFields is a list of field names (e.g. "GcePdKmsKeyName") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "GcePdKmsKeyName") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` +func (s *BasicYarnAutoscalingConfig) UnmarshalJSON(data []byte) error { + type NoMethod BasicYarnAutoscalingConfig + var s1 struct { + ScaleDownFactor gensupport.JSONFloat64 `json:"scaleDownFactor"` + ScaleDownMinWorkerFraction gensupport.JSONFloat64 `json:"scaleDownMinWorkerFraction"` + ScaleUpFactor gensupport.JSONFloat64 `json:"scaleUpFactor"` + ScaleUpMinWorkerFraction gensupport.JSONFloat64 `json:"scaleUpMinWorkerFraction"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.ScaleDownFactor = float64(s1.ScaleDownFactor) + s.ScaleDownMinWorkerFraction = float64(s1.ScaleDownMinWorkerFraction) + s.ScaleUpFactor = float64(s1.ScaleUpFactor) + s.ScaleUpMinWorkerFraction = float64(s1.ScaleUpMinWorkerFraction) + return nil } -func (s *EncryptionConfig) MarshalJSON() ([]byte, error) { - type NoMethod EncryptionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} +// Batch: A representation of a batch workload in the service. +type Batch struct { + // CreateTime: Output only. The time when the batch was created. + CreateTime string `json:"createTime,omitempty"` + // Creator: Output only. The email address of the user who created the batch. + Creator string `json:"creator,omitempty"` + // EnvironmentConfig: Optional. Environment configuration for the batch + // execution. + EnvironmentConfig *EnvironmentConfig `json:"environmentConfig,omitempty"` + // Labels: Optional. The labels to associate with this batch. Label keys must + // contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if + // present, must contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + // associated with a batch. + Labels map[string]string `json:"labels,omitempty"` + // Name: Output only. The resource name of the batch. + Name string `json:"name,omitempty"` + // Operation: Output only. The resource name of the operation associated with + // this batch. + Operation string `json:"operation,omitempty"` + // PysparkBatch: Optional. PySpark batch config. + PysparkBatch *PySparkBatch `json:"pysparkBatch,omitempty"` + // RuntimeConfig: Optional. Runtime configuration for the batch execution. + RuntimeConfig *RuntimeConfig `json:"runtimeConfig,omitempty"` + // RuntimeInfo: Output only. Runtime information about batch execution. + RuntimeInfo *RuntimeInfo `json:"runtimeInfo,omitempty"` + // SparkBatch: Optional. Spark batch config. + SparkBatch *SparkBatch `json:"sparkBatch,omitempty"` + // SparkRBatch: Optional. SparkR batch config. + SparkRBatch *SparkRBatch `json:"sparkRBatch,omitempty"` + // SparkSqlBatch: Optional. SparkSql batch config. + SparkSqlBatch *SparkSqlBatch `json:"sparkSqlBatch,omitempty"` + // State: Output only. The state of the batch. + // + // Possible values: + // "STATE_UNSPECIFIED" - The batch state is unknown. + // "PENDING" - The batch is created before running. + // "RUNNING" - The batch is running. + // "CANCELLING" - The batch is cancelling. + // "CANCELLED" - The batch cancellation was successful. + // "SUCCEEDED" - The batch completed successfully. + // "FAILED" - The batch is no longer running due to an error. + State string `json:"state,omitempty"` + // StateHistory: Output only. Historical state information for the batch. + StateHistory []*StateHistory `json:"stateHistory,omitempty"` + // StateMessage: Output only. Batch state details, such as a failure + // description if the state is FAILED. + StateMessage string `json:"stateMessage,omitempty"` + // StateTime: Output only. The time when the batch entered a current state. + StateTime string `json:"stateTime,omitempty"` + // Uuid: Output only. A batch UUID (Unique Universal Identifier). The service + // generates this value when it creates the batch. + Uuid string `json:"uuid,omitempty"` -// EndpointConfig: Endpoint config for this cluster -type EndpointConfig struct { - // EnableHttpPortAccess: Optional. If true, enable http access to specific - // ports on the cluster from external sources. Defaults to false. - EnableHttpPortAccess bool `json:"enableHttpPortAccess,omitempty"` - // HttpPorts: Output only. The map of port descriptions to URLs. Will only be - // populated if enable_http_port_access is true. - HttpPorts map[string]string `json:"httpPorts,omitempty"` - // ForceSendFields is a list of field names (e.g. "EnableHttpPortAccess") to + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "EnableHttpPortAccess") to include - // in API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. See + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *EndpointConfig) MarshalJSON() ([]byte, error) { - type NoMethod EndpointConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s Batch) MarshalJSON() ([]byte, error) { + type NoMethod Batch + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// EnvironmentConfig: Environment configuration for a workload. -type EnvironmentConfig struct { - // ExecutionConfig: Optional. Execution configuration for a workload. - ExecutionConfig *ExecutionConfig `json:"executionConfig,omitempty"` - // PeripheralsConfig: Optional. Peripherals configuration that workload has - // access to. - PeripheralsConfig *PeripheralsConfig `json:"peripheralsConfig,omitempty"` - // ForceSendFields is a list of field names (e.g. "ExecutionConfig") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// BatchOperationMetadata: Metadata describing the Batch operation. +type BatchOperationMetadata struct { + // Batch: Name of the batch for the operation. + Batch string `json:"batch,omitempty"` + // BatchUuid: Batch UUID for the operation. + BatchUuid string `json:"batchUuid,omitempty"` + // CreateTime: The time when the operation was created. + CreateTime string `json:"createTime,omitempty"` + // Description: Short description of the operation. + Description string `json:"description,omitempty"` + // DoneTime: The time when the operation finished. + DoneTime string `json:"doneTime,omitempty"` + // Labels: Labels associated with the operation. + Labels map[string]string `json:"labels,omitempty"` + // OperationType: The operation type. + // + // Possible values: + // "BATCH_OPERATION_TYPE_UNSPECIFIED" - Batch operation type is unknown. + // "BATCH" - Batch operation type. + OperationType string `json:"operationType,omitempty"` + // Warnings: Warnings encountered during operation execution. + Warnings []string `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "Batch") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ExecutionConfig") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "Batch") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *EnvironmentConfig) MarshalJSON() ([]byte, error) { - type NoMethod EnvironmentConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s BatchOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod BatchOperationMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ExecutionConfig: Execution configuration for a workload. -type ExecutionConfig struct { - // IdleTtl: Optional. Applies to sessions only. The duration to keep the - // session alive while it's idling. Exceeding this threshold causes the session - // to terminate. This field cannot be set on a batch workload. Minimum value is - // 10 minutes; maximum value is 14 days (see JSON representation of Duration - // (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults - // to 1 hour if not set. If both ttl and idle_ttl are specified for an - // interactive session, the conditions are treated as OR conditions: the - // workload will be terminated when it has been idle for idle_ttl or when ttl - // has been exceeded, whichever occurs first. - IdleTtl string `json:"idleTtl,omitempty"` - // KmsKey: Optional. The Cloud KMS key to use for encryption. - KmsKey string `json:"kmsKey,omitempty"` - // NetworkTags: Optional. Tags used for network traffic control. - NetworkTags []string `json:"networkTags,omitempty"` - // NetworkUri: Optional. Network URI to connect workload to. - NetworkUri string `json:"networkUri,omitempty"` - // ServiceAccount: Optional. Service account that used to execute workload. - ServiceAccount string `json:"serviceAccount,omitempty"` - // StagingBucket: Optional. A Cloud Storage bucket used to stage workload - // dependencies, config files, and store workload output and other ephemeral - // data, such as Spark history files. If you do not specify a staging bucket, - // Cloud Dataproc will determine a Cloud Storage location according to the - // region where your workload is running, and then create and manage - // project-level, per-location staging and temporary buckets. This field - // requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage - // bucket. - StagingBucket string `json:"stagingBucket,omitempty"` - // SubnetworkUri: Optional. Subnetwork URI to connect workload to. - SubnetworkUri string `json:"subnetworkUri,omitempty"` - // Ttl: Optional. The duration after which the workload will be terminated, - // specified as the JSON representation for Duration - // (https://protobuf.dev/programming-guides/proto3/#json). When the workload - // exceeds this duration, it will be unconditionally terminated without waiting - // for ongoing work to finish. If ttl is not specified for a batch workload, - // the workload will be allowed to run until it exits naturally (or run forever - // without exiting). If ttl is not specified for an interactive session, it - // defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ - // runtime version, it defaults to 4 hours. Minimum value is 10 minutes; - // maximum value is 14 days. If both ttl and idle_ttl are specified (for an - // interactive session), the conditions are treated as OR conditions: the - // workload will be terminated when it has been idle for idle_ttl or when ttl - // has been exceeded, whichever occurs first. - Ttl string `json:"ttl,omitempty"` - // ForceSendFields is a list of field names (e.g. "IdleTtl") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// Binding: Associates members, or principals, with a role. +type Binding struct { + // Condition: The condition that is associated with this binding.If the + // condition evaluates to true, then this binding applies to the current + // request.If the condition evaluates to false, then this binding does not + // apply to the current request. However, a different role binding might grant + // the same role to one or more of the principals in this binding.To learn + // which resources support conditions in their IAM policies, see the IAM + // documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). + Condition *Expr `json:"condition,omitempty"` + // Members: Specifies the principals requesting access for a Google Cloud + // resource. members can have the following values: allUsers: A special + // identifier that represents anyone who is on the internet; with or without a + // Google account. allAuthenticatedUsers: A special identifier that represents + // anyone who is authenticated with a Google account or a service account. Does + // not include identities that come from external identity providers (IdPs) + // through identity federation. user:{emailid}: An email address that + // represents a specific Google account. For example, alice@example.com . + // serviceAccount:{emailid}: An email address that represents a Google service + // account. For example, my-other-app@appspot.gserviceaccount.com. + // serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An + // identifier for a Kubernetes service account + // (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). + // For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. + // group:{emailid}: An email address that represents a Google group. For + // example, admins@example.com. domain:{domain}: The G Suite domain (primary) + // that represents all the users of that domain. For example, google.com or + // example.com. + // principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subj + // ect/{subject_attribute_value}: A single identity in a workforce identity + // pool. + // principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/g + // roup/{group_id}: All workforce identities in a group. + // principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/a + // ttribute.{attribute_name}/{attribute_value}: All workforce identities with a + // specific attribute value. + // principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/* + // : All identities in a workforce identity pool. + // principal://iam.googleapis.com/projects/{project_number}/locations/global/wor + // kloadIdentityPools/{pool_id}/subject/{subject_attribute_value}: A single + // identity in a workload identity pool. + // principalSet://iam.googleapis.com/projects/{project_number}/locations/global/ + // workloadIdentityPools/{pool_id}/group/{group_id}: A workload identity pool + // group. + // principalSet://iam.googleapis.com/projects/{project_number}/locations/global/ + // workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}: + // All identities in a workload identity pool with a certain attribute. + // principalSet://iam.googleapis.com/projects/{project_number}/locations/global/ + // workloadIdentityPools/{pool_id}/*: All identities in a workload identity + // pool. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique + // identifier) representing a user that has been recently deleted. For example, + // alice@example.com?uid=123456789012345678901. If the user is recovered, this + // value reverts to user:{emailid} and the recovered user retains the role in + // the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email + // address (plus unique identifier) representing a service account that has + // been recently deleted. For example, + // my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the + // service account is undeleted, this value reverts to serviceAccount:{emailid} + // and the undeleted service account retains the role in the binding. + // deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique + // identifier) representing a Google group that has been recently deleted. For + // example, admins@example.com?uid=123456789012345678901. If the group is + // recovered, this value reverts to group:{emailid} and the recovered group + // retains the role in the binding. + // deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_ + // id}/subject/{subject_attribute_value}: Deleted single identity in a + // workforce identity pool. For example, + // deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-poo + // l-id/subject/my-subject-attribute-value. + Members []string `json:"members,omitempty"` + // Role: Role that is assigned to the list of members, or principals. For + // example, roles/viewer, roles/editor, or roles/owner.For an overview of the + // IAM roles and permissions, see the IAM documentation + // (https://cloud.google.com/iam/docs/roles-overview). For a list of the + // available pre-defined roles, see here + // (https://cloud.google.com/iam/docs/understanding-roles). + Role string `json:"role,omitempty"` + // ForceSendFields is a list of field names (e.g. "Condition") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IdleTtl") to include in API + // NullFields is a list of field names (e.g. "Condition") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ExecutionConfig) MarshalJSON() ([]byte, error) { - type NoMethod ExecutionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s Binding) MarshalJSON() ([]byte, error) { + type NoMethod Binding + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// Expr: Represents a textual expression in the Common Expression Language -// (CEL) syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL are documented at https://github.com/google/cel-spec.Example -// (Comparison): title: "Summary size limit" description: "Determines if a -// summary is less than 100 chars" expression: "document.summary.size() < 100" -// Example (Equality): title: "Requestor is owner" description: "Determines if -// requestor is the document owner" expression: "document.owner == -// request.auth.claims.email" Example (Logic): title: "Public documents" -// description: "Determine whether the document should be publicly visible" -// expression: "document.type != 'private' && document.type != 'internal'" -// Example (Data Manipulation): title: "Notification string" description: -// "Create a notification string with a timestamp." expression: "'New message -// received at ' + string(document.create_time)" The exact variables and -// functions that may be referenced within an expression are determined by the -// service that evaluates it. See the service documentation for additional -// information. -type Expr struct { - // Description: Optional. Description of the expression. This is a longer text - // which describes the expression, e.g. when hovered over it in a UI. - Description string `json:"description,omitempty"` - // Expression: Textual representation of an expression in Common Expression - // Language syntax. - Expression string `json:"expression,omitempty"` - // Location: Optional. String indicating the location of the expression for - // error reporting, e.g. a file name and a position in the file. - Location string `json:"location,omitempty"` - // Title: Optional. Title for the expression, i.e. a short string describing - // its purpose. This can be used e.g. in UIs which allow to enter the - // expression. - Title string `json:"title,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to +// CancelJobRequest: A request to cancel a job. +type CancelJobRequest struct { +} + +// Cluster: Describes the identifying information, config, and status of a +// Dataproc cluster +type Cluster struct { + // ClusterName: Required. The cluster name, which must be unique within a + // project. The name must start with a lowercase letter, and can contain up to + // 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The + // name of a deleted cluster can be reused. + ClusterName string `json:"clusterName,omitempty"` + // ClusterUuid: Output only. A cluster UUID (Unique Universal Identifier). + // Dataproc generates this value when it creates the cluster. + ClusterUuid string `json:"clusterUuid,omitempty"` + // Config: Optional. The cluster config for a cluster of Compute Engine + // Instances. Note that Dataproc may set default values, and values may change + // when clusters are updated.Exactly one of ClusterConfig or + // VirtualClusterConfig must be specified. + Config *ClusterConfig `json:"config,omitempty"` + // Labels: Optional. The labels to associate with this cluster. Label keys must + // contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if + // present, must contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + // associated with a cluster. + Labels map[string]string `json:"labels,omitempty"` + // Metrics: Output only. Contains cluster daemon metrics such as HDFS and YARN + // stats.Beta Feature: This report is available for testing purposes only. It + // may be changed before final release. + Metrics *ClusterMetrics `json:"metrics,omitempty"` + // ProjectId: Required. The Google Cloud Platform project ID that the cluster + // belongs to. + ProjectId string `json:"projectId,omitempty"` + // Status: Output only. Cluster status. + Status *ClusterStatus `json:"status,omitempty"` + // StatusHistory: Output only. The previous cluster status. + StatusHistory []*ClusterStatus `json:"statusHistory,omitempty"` + // VirtualClusterConfig: Optional. The virtual cluster config is used when + // creating a Dataproc cluster that does not directly control the underlying + // compute resources, for example, when creating a Dataproc-on-GKE cluster + // (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). + // Dataproc may set default values, and values may change when clusters are + // updated. Exactly one of config or virtual_cluster_config must be specified. + VirtualClusterConfig *VirtualClusterConfig `json:"virtualClusterConfig,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "ClusterName") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include in API + // NullFields is a list of field names (e.g. "ClusterName") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { - type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// FlinkJob: A Dataproc job for running Apache Flink applications on YARN. -type FlinkJob struct { - // Args: Optional. The arguments to pass to the driver. Do not include - // arguments, such as --conf, that can be set as job properties, since a - // collision might occur that causes an incorrect job submission. - Args []string `json:"args,omitempty"` - // JarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of - // the Flink driver and tasks. - JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfig: Optional. The runtime log config for job execution. - LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` - // MainClass: The name of the driver's main class. The jar file that contains - // the class must be in the default CLASSPATH or specified in jarFileUris. - MainClass string `json:"mainClass,omitempty"` - // MainJarFileUri: The HCFS URI of the jar file that contains the main class. - MainJarFileUri string `json:"mainJarFileUri,omitempty"` - // Properties: Optional. A mapping of property names to values, used to - // configure Flink. Properties that conflict with values set by the Dataproc - // API might beoverwritten. Can include properties set - // in/etc/flink/conf/flink-defaults.conf and classes in user code. - Properties map[string]string `json:"properties,omitempty"` - // SavepointUri: Optional. HCFS URI of the savepoint, which contains the last - // saved progress for starting the current job. - SavepointUri string `json:"savepointUri,omitempty"` - // ForceSendFields is a list of field names (e.g. "Args") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Args") to include in API requests - // with the JSON null value. By default, fields with empty values are omitted - // from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *FlinkJob) MarshalJSON() ([]byte, error) { - type NoMethod FlinkJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s Cluster) MarshalJSON() ([]byte, error) { + type NoMethod Cluster + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// GceClusterConfig: Common config settings for resources of Compute Engine -// cluster instances, applicable to all instances in the cluster. -type GceClusterConfig struct { - // ConfidentialInstanceConfig: Optional. Confidential Instance Config for - // clusters using Confidential VMs - // (https://cloud.google.com/compute/confidential-vm/docs). - ConfidentialInstanceConfig *ConfidentialInstanceConfig `json:"confidentialInstanceConfig,omitempty"` - // InternalIpOnly: Optional. This setting applies to subnetwork-enabled - // networks. It is set to true by default in clusters created with image - // versions 2.2.x.When set to true: All cluster VMs have internal IP addresses. - // Google Private Access - // (https://cloud.google.com/vpc/docs/private-google-access) must be enabled to - // access Dataproc and other Google Cloud APIs. Off-cluster dependencies must - // be configured to be accessible without external IP addresses.When set to - // false: Cluster VMs are not restricted to internal IP addresses. Ephemeral - // external IP addresses are assigned to each cluster VM. - InternalIpOnly bool `json:"internalIpOnly,omitempty"` - // Metadata: Optional. The Compute Engine metadata entries to add to all - // instances (see Project and instance metadata - // (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). - Metadata map[string]string `json:"metadata,omitempty"` - // NetworkUri: Optional. The Compute Engine network to be used for machine - // communications. Cannot be specified with subnetwork_uri. If neither - // network_uri nor subnetwork_uri is specified, the "default" network of the - // project is used, if it exists. Cannot be a "Custom Subnet Network" (see - // Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for - // more information).A full URL, partial URI, or short name are valid. - // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default - // projects/[project_id]/global/networks/default default - NetworkUri string `json:"networkUri,omitempty"` - // NodeGroupAffinity: Optional. Node Group Affinity for sole-tenant clusters. - NodeGroupAffinity *NodeGroupAffinity `json:"nodeGroupAffinity,omitempty"` - // PrivateIpv6GoogleAccess: Optional. The type of IPv6 access for a cluster. - // - // Possible values: - // "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED" - If unspecified, Compute Engine - // default behavior will apply, which is the same as INHERIT_FROM_SUBNETWORK. - // "INHERIT_FROM_SUBNETWORK" - Private access to and from Google Services - // configuration inherited from the subnetwork configuration. This is the - // default Compute Engine behavior. - // "OUTBOUND" - Enables outbound private IPv6 access to Google Services from - // the Dataproc cluster. - // "BIDIRECTIONAL" - Enables bidirectional private IPv6 access between Google - // Services and the Dataproc cluster. - PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` - // ReservationAffinity: Optional. Reservation Affinity for consuming Zonal - // reservation. - ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` - // ServiceAccount: Optional. The Dataproc service account - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) - // (also see VM Data Plane identity - // (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) - // used by Dataproc cluster VM instances to access Google Cloud Platform - // services.If not specified, the Compute Engine default service account - // (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) - // is used. - ServiceAccount string `json:"serviceAccount,omitempty"` - // ServiceAccountScopes: Optional. The URIs of service account scopes to be - // included in Compute Engine instances. The following base set of scopes is - // always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly - // https://www.googleapis.com/auth/devstorage.read_write - // https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the - // following defaults are also provided: - // https://www.googleapis.com/auth/bigquery - // https://www.googleapis.com/auth/bigtable.admin.table - // https://www.googleapis.com/auth/bigtable.data - // https://www.googleapis.com/auth/devstorage.full_control - ServiceAccountScopes []string `json:"serviceAccountScopes,omitempty"` - // ShieldedInstanceConfig: Optional. Shielded Instance Config for clusters - // using Compute Engine Shielded VMs - // (https://cloud.google.com/security/shielded-cloud/shielded-vm). - ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` - // SubnetworkUri: Optional. The Compute Engine subnetwork to be used for - // machine communications. Cannot be specified with network_uri.A full URL, - // partial URI, or short name are valid. Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 - // projects/[project_id]/regions/[region]/subnetworks/sub0 sub0 - SubnetworkUri string `json:"subnetworkUri,omitempty"` - // Tags: The Compute Engine tags to add to all instances (see Tagging instances - // (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). - Tags []string `json:"tags,omitempty"` - // ZoneUri: Optional. The Compute Engine zone where the Dataproc cluster will - // be located. If omitted, the service will pick a zone in the cluster's - // Compute Engine region. On a get request, zone will always be present.A full - // URL, partial URI, or short name are valid. Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] - // projects/[project_id]/zones/[zone] [zone] - ZoneUri string `json:"zoneUri,omitempty"` - // ForceSendFields is a list of field names (e.g. "ConfidentialInstanceConfig") - // to unconditionally include in API requests. By default, fields with empty or +// ClusterConfig: The cluster config. +type ClusterConfig struct { + // AutoscalingConfig: Optional. Autoscaling config for the policy associated + // with the cluster. Cluster does not autoscale if this field is unset. + AutoscalingConfig *AutoscalingConfig `json:"autoscalingConfig,omitempty"` + // AuxiliaryNodeGroups: Optional. The node group settings. + AuxiliaryNodeGroups []*AuxiliaryNodeGroup `json:"auxiliaryNodeGroups,omitempty"` + // ConfigBucket: Optional. A Cloud Storage bucket used to stage job + // dependencies, config files, and job driver console output. If you do not + // specify a staging bucket, Cloud Dataproc will determine a Cloud Storage + // location (US, ASIA, or EU) for your cluster's staging bucket according to + // the Compute Engine zone where your cluster is deployed, and then create and + // manage this project-level, per-location bucket (see Dataproc staging and + // temp buckets + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // This field requires a Cloud Storage bucket name, not a gs://... URI to a + // Cloud Storage bucket. + ConfigBucket string `json:"configBucket,omitempty"` + // DataprocMetricConfig: Optional. The config for Dataproc metrics. + DataprocMetricConfig *DataprocMetricConfig `json:"dataprocMetricConfig,omitempty"` + // EncryptionConfig: Optional. Encryption settings for the cluster. + EncryptionConfig *EncryptionConfig `json:"encryptionConfig,omitempty"` + // EndpointConfig: Optional. Port/endpoint configuration for this cluster + EndpointConfig *EndpointConfig `json:"endpointConfig,omitempty"` + // GceClusterConfig: Optional. The shared Compute Engine config settings for + // all instances in a cluster. + GceClusterConfig *GceClusterConfig `json:"gceClusterConfig,omitempty"` + // GkeClusterConfig: Optional. BETA. The Kubernetes Engine config for Dataproc + // clusters deployed to The Kubernetes Engine config for Dataproc clusters + // deployed to Kubernetes. These config settings are mutually exclusive with + // Compute Engine-based options, such as gce_cluster_config, master_config, + // worker_config, secondary_worker_config, and autoscaling_config. + GkeClusterConfig *GkeClusterConfig `json:"gkeClusterConfig,omitempty"` + // InitializationActions: Optional. Commands to execute on each node after + // config is completed. By default, executables are run on master and all + // worker nodes. You can test a node's role metadata to run an executable on a + // master or worker node, as shown below using curl (you can also use wget): + // ROLE=$(curl -H Metadata-Flavor:Google + // http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ + // "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... + // worker specific actions ... fi + InitializationActions []*NodeInitializationAction `json:"initializationActions,omitempty"` + // LifecycleConfig: Optional. Lifecycle setting for the cluster. + LifecycleConfig *LifecycleConfig `json:"lifecycleConfig,omitempty"` + // MasterConfig: Optional. The Compute Engine config settings for the cluster's + // master instance. + MasterConfig *InstanceGroupConfig `json:"masterConfig,omitempty"` + // MetastoreConfig: Optional. Metastore configuration. + MetastoreConfig *MetastoreConfig `json:"metastoreConfig,omitempty"` + // SecondaryWorkerConfig: Optional. The Compute Engine config settings for a + // cluster's secondary worker instances + SecondaryWorkerConfig *InstanceGroupConfig `json:"secondaryWorkerConfig,omitempty"` + // SecurityConfig: Optional. Security settings for the cluster. + SecurityConfig *SecurityConfig `json:"securityConfig,omitempty"` + // SoftwareConfig: Optional. The config settings for cluster software. + SoftwareConfig *SoftwareConfig `json:"softwareConfig,omitempty"` + // TempBucket: Optional. A Cloud Storage bucket used to store ephemeral cluster + // and jobs data, such as Spark and MapReduce history files. If you do not + // specify a temp bucket, Dataproc will determine a Cloud Storage location (US, + // ASIA, or EU) for your cluster's temp bucket according to the Compute Engine + // zone where your cluster is deployed, and then create and manage this + // project-level, per-location bucket. The default bucket has a TTL of 90 days, + // but you can use any TTL (or none) if you specify a bucket (see Dataproc + // staging and temp buckets + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // This field requires a Cloud Storage bucket name, not a gs://... URI to a + // Cloud Storage bucket. + TempBucket string `json:"tempBucket,omitempty"` + // WorkerConfig: Optional. The Compute Engine config settings for the cluster's + // worker instances. + WorkerConfig *InstanceGroupConfig `json:"workerConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "AutoscalingConfig") to + // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ConfidentialInstanceConfig") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See + // NullFields is a list of field names (e.g. "AutoscalingConfig") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GceClusterConfig) MarshalJSON() ([]byte, error) { - type NoMethod GceClusterConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ClusterConfig) MarshalJSON() ([]byte, error) { + type NoMethod ClusterConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// GetIamPolicyRequest: Request message for GetIamPolicy method. -type GetIamPolicyRequest struct { - // Options: OPTIONAL: A GetPolicyOptions object for specifying options to - // GetIamPolicy. - Options *GetPolicyOptions `json:"options,omitempty"` - // ForceSendFields is a list of field names (e.g. "Options") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// ClusterMetrics: Contains cluster daemon metrics, such as HDFS and YARN +// stats.Beta Feature: This report is available for testing purposes only. It +// may be changed before final release. +type ClusterMetrics struct { + // HdfsMetrics: The HDFS metrics. + HdfsMetrics map[string]string `json:"hdfsMetrics,omitempty"` + // YarnMetrics: YARN metrics. + YarnMetrics map[string]string `json:"yarnMetrics,omitempty"` + // ForceSendFields is a list of field names (e.g. "HdfsMetrics") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Options") to include in API + // NullFields is a list of field names (e.g. "HdfsMetrics") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { - type NoMethod GetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ClusterMetrics) MarshalJSON() ([]byte, error) { + type NoMethod ClusterMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. -type GetPolicyOptions struct { - // RequestedPolicyVersion: Optional. The maximum policy version that will be - // used to format the policy.Valid values are 0, 1, and 3. Requests specifying - // an invalid value will be rejected.Requests for policies with any conditional - // role bindings must specify version 3. Policies with no conditional role - // bindings may specify any valid value or leave the field unset.The policy in - // the response might use the policy version that you specified, or it might - // use a lower policy version. For example, if you specify version 3, but the - // policy has no conditional role bindings, the response uses version 1.To - // learn which resources support conditions in their IAM policies, see the IAM - // documentation - // (https://cloud.google.com/iam/help/conditions/resource-policies). - RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` - // ForceSendFields is a list of field names (e.g. "RequestedPolicyVersion") to +// ClusterOperation: The cluster operation triggered by a workflow. +type ClusterOperation struct { + // Done: Output only. Indicates the operation is done. + Done bool `json:"done,omitempty"` + // Error: Output only. Error, if operation failed. + Error string `json:"error,omitempty"` + // OperationId: Output only. The id of the cluster operation. + OperationId string `json:"operationId,omitempty"` + // ForceSendFields is a list of field names (e.g. "Done") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Done") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ClusterOperation) MarshalJSON() ([]byte, error) { + type NoMethod ClusterOperation + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ClusterOperationMetadata: Metadata describing the operation. +type ClusterOperationMetadata struct { + // ChildOperationIds: Output only. Child operation ids + ChildOperationIds []string `json:"childOperationIds,omitempty"` + // ClusterName: Output only. Name of the cluster for the operation. + ClusterName string `json:"clusterName,omitempty"` + // ClusterUuid: Output only. Cluster UUID for the operation. + ClusterUuid string `json:"clusterUuid,omitempty"` + // Description: Output only. Short description of operation. + Description string `json:"description,omitempty"` + // Labels: Output only. Labels associated with the operation + Labels map[string]string `json:"labels,omitempty"` + // OperationType: Output only. The operation type. + OperationType string `json:"operationType,omitempty"` + // Status: Output only. Current operation status. + Status *ClusterOperationStatus `json:"status,omitempty"` + // StatusHistory: Output only. The previous operation status. + StatusHistory []*ClusterOperationStatus `json:"statusHistory,omitempty"` + // Warnings: Output only. Errors encountered during operation execution. + Warnings []string `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "ChildOperationIds") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "RequestedPolicyVersion") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See + // NullFields is a list of field names (e.g. "ChildOperationIds") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { - type NoMethod GetPolicyOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ClusterOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod ClusterOperationMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// GkeClusterConfig: The cluster's GKE config. -type GkeClusterConfig struct { - // GkeClusterTarget: Optional. A target GKE cluster to deploy to. It must be in - // the same project and region as the Dataproc cluster (the GKE cluster can be - // zonal or regional). Format: - // 'projects/{project}/locations/{location}/clusters/{cluster_id}' - GkeClusterTarget string `json:"gkeClusterTarget,omitempty"` - // NamespacedGkeDeploymentTarget: Optional. Deprecated. Use gkeClusterTarget. - // Used only for the deprecated beta. A target for the deployment. - NamespacedGkeDeploymentTarget *NamespacedGkeDeploymentTarget `json:"namespacedGkeDeploymentTarget,omitempty"` - // NodePoolTarget: Optional. GKE node pools where workloads will be scheduled. - // At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. - // If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT - // GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All - // node pools must have the same location settings. - NodePoolTarget []*GkeNodePoolTarget `json:"nodePoolTarget,omitempty"` - // ForceSendFields is a list of field names (e.g. "GkeClusterTarget") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// ClusterOperationStatus: The status of the operation. +type ClusterOperationStatus struct { + // Details: Output only. A message containing any operation metadata details. + Details string `json:"details,omitempty"` + // InnerState: Output only. A message containing the detailed operation state. + InnerState string `json:"innerState,omitempty"` + // State: Output only. A message containing the operation state. + // + // Possible values: + // "UNKNOWN" - Unused. + // "PENDING" - The operation has been created. + // "RUNNING" - The operation is running. + // "DONE" - The operation is done; either cancelled or completed. + State string `json:"state,omitempty"` + // StateStartTime: Output only. The time this state was entered. + StateStartTime string `json:"stateStartTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "Details") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "GkeClusterTarget") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "Details") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GkeClusterConfig) MarshalJSON() ([]byte, error) { - type NoMethod GkeClusterConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ClusterOperationStatus) MarshalJSON() ([]byte, error) { + type NoMethod ClusterOperationStatus + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// GkeNodeConfig: Parameters that describe cluster nodes. -type GkeNodeConfig struct { - // Accelerators: Optional. A list of hardware accelerators - // (https://cloud.google.com/compute/docs/gpus) to attach to each node. - Accelerators []*GkeNodePoolAcceleratorConfig `json:"accelerators,omitempty"` - // BootDiskKmsKey: Optional. The Customer Managed Encryption Key (CMEK) - // (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to - // encrypt the boot disk attached to each node in the node pool. Specify the - // key using the following format: - // projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypt - // o_key} - BootDiskKmsKey string `json:"bootDiskKmsKey,omitempty"` - // LocalSsdCount: Optional. The number of local SSD disks to attach to the - // node, which is limited by the maximum number of disks allowable per zone - // (see Adding Local SSDs - // (https://cloud.google.com/compute/docs/disks/local-ssd)). - LocalSsdCount int64 `json:"localSsdCount,omitempty"` - // MachineType: Optional. The name of a Compute Engine machine type - // (https://cloud.google.com/compute/docs/machine-types). - MachineType string `json:"machineType,omitempty"` - // MinCpuPlatform: Optional. Minimum CPU platform - // (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) - // to be used by this instance. The instance may be scheduled on the specified - // or a newer CPU platform. Specify the friendly names of CPU platforms, such - // as "Intel Haswell" or Intel Sandy Bridge". - MinCpuPlatform string `json:"minCpuPlatform,omitempty"` - // Preemptible: Optional. Whether the nodes are created as legacy preemptible - // VM instances (https://cloud.google.com/compute/docs/instances/preemptible). - // Also see Spot VMs, preemptible VM instances without a maximum lifetime. - // Legacy and Spot preemptible nodes cannot be used in a node pool with the - // CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not - // assigned (the DEFAULT node pool will assume the CONTROLLER role). - Preemptible bool `json:"preemptible,omitempty"` - // Spot: Optional. Whether the nodes are created as Spot VM instances - // (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the - // latest update to legacy preemptible VMs. Spot VMs do not have a maximum - // lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool - // with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role - // is not assigned (the DEFAULT node pool will assume the CONTROLLER role). - Spot bool `json:"spot,omitempty"` - // ForceSendFields is a list of field names (e.g. "Accelerators") to +// ClusterSelector: A selector that chooses target cluster for jobs based on +// metadata. +type ClusterSelector struct { + // ClusterLabels: Required. The cluster labels. Cluster must have all labels to + // match. + ClusterLabels map[string]string `json:"clusterLabels,omitempty"` + // Zone: Optional. The zone where workflow process executes. This parameter + // does not affect the selection of the cluster.If unspecified, the zone of the + // first cluster matching the selector is used. + Zone string `json:"zone,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterLabels") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Accelerators") to include in API + // NullFields is a list of field names (e.g. "ClusterLabels") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GkeNodeConfig) MarshalJSON() ([]byte, error) { - type NoMethod GkeNodeConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ClusterSelector) MarshalJSON() ([]byte, error) { + type NoMethod ClusterSelector + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// GkeNodePoolAcceleratorConfig: A GkeNodeConfigAcceleratorConfig represents a -// Hardware Accelerator request for a node pool. -type GkeNodePoolAcceleratorConfig struct { - // AcceleratorCount: The number of accelerator cards exposed to an instance. - AcceleratorCount int64 `json:"acceleratorCount,omitempty,string"` - // AcceleratorType: The accelerator type resource namename (see GPUs on Compute - // Engine). - AcceleratorType string `json:"acceleratorType,omitempty"` - // GpuPartitionSize: Size of partitions to create on the GPU. Valid values are - // described in the NVIDIA mig user guide - // (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). - GpuPartitionSize string `json:"gpuPartitionSize,omitempty"` - // ForceSendFields is a list of field names (e.g. "AcceleratorCount") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// ClusterStatus: The status of a cluster and its instances. +type ClusterStatus struct { + // Detail: Optional. Output only. Details of cluster's state. + Detail string `json:"detail,omitempty"` + // State: Output only. The cluster's state. + // + // Possible values: + // "UNKNOWN" - The cluster state is unknown. + // "CREATING" - The cluster is being created and set up. It is not ready for + // use. + // "RUNNING" - The cluster is currently running and healthy. It is ready for + // use.Note: The cluster state changes from "creating" to "running" status + // after the master node(s), first two primary worker nodes (and the last + // primary worker node if primary workers > 2) are running. + // "ERROR" - The cluster encountered an error. It is not ready for use. + // "ERROR_DUE_TO_UPDATE" - The cluster has encountered an error while being + // updated. Jobs can be submitted to the cluster, but the cluster cannot be + // updated. + // "DELETING" - The cluster is being deleted. It cannot be used. + // "UPDATING" - The cluster is being updated. It continues to accept and + // process jobs. + // "STOPPING" - The cluster is being stopped. It cannot be used. + // "STOPPED" - The cluster is currently stopped. It is not ready for use. + // "STARTING" - The cluster is being started. It is not ready for use. + // "REPAIRING" - The cluster is being repaired. It is not ready for use. + State string `json:"state,omitempty"` + // StateStartTime: Output only. Time when this state was entered (see JSON + // representation of Timestamp + // (https://developers.google.com/protocol-buffers/docs/proto3#json)). + StateStartTime string `json:"stateStartTime,omitempty"` + // Substate: Output only. Additional state information that includes status + // reported by the agent. + // + // Possible values: + // "UNSPECIFIED" - The cluster substate is unknown. + // "UNHEALTHY" - The cluster is known to be in an unhealthy state (for + // example, critical daemons are not running or HDFS capacity is + // exhausted).Applies to RUNNING state. + // "STALE_STATUS" - The agent-reported status is out of date (may occur if + // Dataproc loses communication with Agent).Applies to RUNNING state. + Substate string `json:"substate,omitempty"` + // ForceSendFields is a list of field names (e.g. "Detail") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AcceleratorCount") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "Detail") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GkeNodePoolAcceleratorConfig) MarshalJSON() ([]byte, error) { - type NoMethod GkeNodePoolAcceleratorConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ClusterStatus) MarshalJSON() ([]byte, error) { + type NoMethod ClusterStatus + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// GkeNodePoolAutoscalingConfig: GkeNodePoolAutoscaling contains information -// the cluster autoscaler needs to adjust the size of the node pool to the -// current cluster usage. -type GkeNodePoolAutoscalingConfig struct { - // MaxNodeCount: The maximum number of nodes in the node pool. Must be >= - // min_node_count, and must be > 0. Note: Quota must be sufficient to scale up - // the cluster. - MaxNodeCount int64 `json:"maxNodeCount,omitempty"` - // MinNodeCount: The minimum number of nodes in the node pool. Must be >= 0 and - // <= max_node_count. - MinNodeCount int64 `json:"minNodeCount,omitempty"` - // ForceSendFields is a list of field names (e.g. "MaxNodeCount") to +// ClusterToRepair: Cluster to be repaired +type ClusterToRepair struct { + // ClusterRepairAction: Required. Repair action to take on the cluster + // resource. + // + // Possible values: + // "CLUSTER_REPAIR_ACTION_UNSPECIFIED" - No action will be taken by default. + // "REPAIR_ERROR_DUE_TO_UPDATE_CLUSTER" - Repair cluster in + // ERROR_DUE_TO_UPDATE states. + ClusterRepairAction string `json:"clusterRepairAction,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterRepairAction") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MaxNodeCount") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "ClusterRepairAction") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GkeNodePoolAutoscalingConfig) MarshalJSON() ([]byte, error) { - type NoMethod GkeNodePoolAutoscalingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ClusterToRepair) MarshalJSON() ([]byte, error) { + type NoMethod ClusterToRepair + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// GkeNodePoolConfig: The configuration of a GKE node pool used by a -// Dataproc-on-GKE cluster -// (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster). -type GkeNodePoolConfig struct { - // Autoscaling: Optional. The autoscaler configuration for this node pool. The - // autoscaler is enabled only when a valid configuration is present. - Autoscaling *GkeNodePoolAutoscalingConfig `json:"autoscaling,omitempty"` - // Config: Optional. The node pool configuration. - Config *GkeNodeConfig `json:"config,omitempty"` - // Locations: Optional. The list of Compute Engine zones - // (https://cloud.google.com/compute/docs/zones#available) where node pool - // nodes associated with a Dataproc on GKE virtual cluster will be - // located.Note: All node pools associated with a virtual cluster must be - // located in the same region as the virtual cluster, and they must be located - // in the same zone within that region.If a location is not specified during - // node pool creation, Dataproc on GKE will choose the zone. - Locations []string `json:"locations,omitempty"` - // ForceSendFields is a list of field names (e.g. "Autoscaling") to - // unconditionally include in API requests. By default, fields with empty or +// ConfidentialInstanceConfig: Confidential Instance Config for clusters using +// Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs) +type ConfidentialInstanceConfig struct { + // EnableConfidentialCompute: Optional. Defines whether the instance should + // have confidential compute enabled. + EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` + // ForceSendFields is a list of field names (e.g. "EnableConfidentialCompute") + // to unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Autoscaling") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "EnableConfidentialCompute") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GkeNodePoolConfig) MarshalJSON() ([]byte, error) { - type NoMethod GkeNodePoolConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GkeNodePoolTarget: GKE node pools that Dataproc workloads run on. -type GkeNodePoolTarget struct { - // NodePool: Required. The target GKE node pool. Format: - // 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_p - // ool}' - NodePool string `json:"nodePool,omitempty"` - // NodePoolConfig: Input only. The configuration for the GKE node pool.If - // specified, Dataproc attempts to create a node pool with the specified shape. - // If one with the same name already exists, it is verified against all - // specified fields. If a field differs, the virtual cluster creation will - // fail.If omitted, any node pool with the specified name is used. If a node - // pool with the specified name does not exist, Dataproc create a node pool - // with default values.This is an input only field. It will not be returned by - // the API. - NodePoolConfig *GkeNodePoolConfig `json:"nodePoolConfig,omitempty"` - // Roles: Required. The roles associated with the GKE node pool. - // - // Possible values: - // "ROLE_UNSPECIFIED" - Role is unspecified. - // "DEFAULT" - At least one node pool must have the DEFAULT role. Work - // assigned to a role that is not associated with a node pool is assigned to - // the node pool with the DEFAULT role. For example, work assigned to the - // CONTROLLER role will be assigned to the node pool with the DEFAULT role if - // no node pool has the CONTROLLER role. - // "CONTROLLER" - Run work associated with the Dataproc control plane (for - // example, controllers and webhooks). Very low resource requirements. - // "SPARK_DRIVER" - Run work associated with a Spark driver of a job. - // "SPARK_EXECUTOR" - Run work associated with a Spark executor of a job. - Roles []string `json:"roles,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodePool") to +func (s ConfidentialInstanceConfig) MarshalJSON() ([]byte, error) { + type NoMethod ConfidentialInstanceConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ConsolidatedExecutorSummary: Consolidated summary about executors used by +// the application. +type ConsolidatedExecutorSummary struct { + ActiveTasks int64 `json:"activeTasks,omitempty"` + CompletedTasks int64 `json:"completedTasks,omitempty"` + Count int64 `json:"count,omitempty"` + DiskUsed int64 `json:"diskUsed,omitempty,string"` + FailedTasks int64 `json:"failedTasks,omitempty"` + IsExcluded int64 `json:"isExcluded,omitempty"` + MaxMemory int64 `json:"maxMemory,omitempty,string"` + MemoryMetrics *MemoryMetrics `json:"memoryMetrics,omitempty"` + MemoryUsed int64 `json:"memoryUsed,omitempty,string"` + RddBlocks int64 `json:"rddBlocks,omitempty"` + TotalCores int64 `json:"totalCores,omitempty"` + TotalDurationMillis int64 `json:"totalDurationMillis,omitempty,string"` + TotalGcTimeMillis int64 `json:"totalGcTimeMillis,omitempty,string"` + TotalInputBytes int64 `json:"totalInputBytes,omitempty,string"` + TotalShuffleRead int64 `json:"totalShuffleRead,omitempty,string"` + TotalShuffleWrite int64 `json:"totalShuffleWrite,omitempty,string"` + TotalTasks int64 `json:"totalTasks,omitempty"` + // ForceSendFields is a list of field names (e.g. "ActiveTasks") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodePool") to include in API + // NullFields is a list of field names (e.g. "ActiveTasks") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GkeNodePoolTarget) MarshalJSON() ([]byte, error) { - type NoMethod GkeNodePoolTarget - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ConsolidatedExecutorSummary) MarshalJSON() ([]byte, error) { + type NoMethod ConsolidatedExecutorSummary + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig: Encryption settings -// for encrypting workflow template job arguments. -type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig struct { - // KmsKey: Optional. The Cloud KMS key name to use for encrypting workflow - // template job arguments.When this this key is provided, the following - // workflow template job arguments - // (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template), - // if present, are CMEK encrypted - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data): - // FlinkJob args - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) - // HadoopJob args - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) - // SparkJob args - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) - // SparkRJob args - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) - // PySparkJob args - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) - // SparkSqlJob - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) - // scriptVariables and queryList.queries HiveJob - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) - // scriptVariables and queryList.queries PigJob - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) - // scriptVariables and queryList.queries PrestoJob - // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) - // scriptVariables and queryList.queries - KmsKey string `json:"kmsKey,omitempty"` - // ForceSendFields is a list of field names (e.g. "KmsKey") to unconditionally +// DataprocMetricConfig: Dataproc metric config. +type DataprocMetricConfig struct { + // Metrics: Required. Metrics sources to enable. + Metrics []*Metric `json:"metrics,omitempty"` + // ForceSendFields is a list of field names (e.g. "Metrics") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "KmsKey") to include in API + // NullFields is a list of field names (e.g. "Metrics") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig) MarshalJSON() ([]byte, error) { - type NoMethod GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s DataprocMetricConfig) MarshalJSON() ([]byte, error) { + type NoMethod DataprocMetricConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// HadoopJob: A Dataproc job for running Apache Hadoop MapReduce -// (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) -// jobs on Apache Hadoop YARN -// (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). -type HadoopJob struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted in the working - // directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, - // .tar.gz, .tgz, or .zip. - ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: Optional. The arguments to pass to the driver. Do not include - // arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, - // since a collision might occur that causes an incorrect job submission. - Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be - // copied to the working directory of Hadoop drivers and distributed tasks. - // Useful for naively parallel tasks. - FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop - // driver and tasks. - JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfig: Optional. The runtime log config for job execution. - LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` - // MainClass: The name of the driver's main class. The jar file containing the - // class must be in the default CLASSPATH or specified in jar_file_uris. - MainClass string `json:"mainClass,omitempty"` - // MainJarFileUri: The HCFS URI of the jar file containing the main class. - // Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' - // 'hdfs:/tmp/test-samples/custom-wordcount.jar' - // 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' - MainJarFileUri string `json:"mainJarFileUri,omitempty"` - // Properties: Optional. A mapping of property names to values, used to - // configure Hadoop. Properties that conflict with values set by the Dataproc - // API might be overwritten. Can include properties set in - // /etc/hadoop/conf/*-site and classes in user code. - Properties map[string]string `json:"properties,omitempty"` - // ForceSendFields is a list of field names (e.g. "ArchiveUris") to +// DiagnoseClusterRequest: A request to collect cluster diagnostic information. +type DiagnoseClusterRequest struct { + // DiagnosisInterval: Optional. Time interval in which diagnosis should be + // carried out on the cluster. + DiagnosisInterval *Interval `json:"diagnosisInterval,omitempty"` + // Job: Optional. DEPRECATED Specifies the job on which diagnosis is to be + // performed. Format: projects/{project}/regions/{region}/jobs/{job} + Job string `json:"job,omitempty"` + // Jobs: Optional. Specifies a list of jobs on which diagnosis is to be + // performed. Format: projects/{project}/regions/{region}/jobs/{job} + Jobs []string `json:"jobs,omitempty"` + // TarballAccess: Optional. (Optional) The access type to the diagnostic + // tarball. If not specified, falls back to default access of the bucket + // + // Possible values: + // "TARBALL_ACCESS_UNSPECIFIED" - Tarball Access unspecified. Falls back to + // default access of the bucket + // "GOOGLE_CLOUD_SUPPORT" - Google Cloud Support group has read access to the + // diagnostic tarball + // "GOOGLE_DATAPROC_DIAGNOSE" - Google Cloud Dataproc Diagnose service + // account has read access to the diagnostic tarball + TarballAccess string `json:"tarballAccess,omitempty"` + // TarballGcsDir: Optional. (Optional) The output Cloud Storage directory for + // the diagnostic tarball. If not specified, a task-specific directory in the + // cluster's staging bucket will be used. + TarballGcsDir string `json:"tarballGcsDir,omitempty"` + // YarnApplicationId: Optional. DEPRECATED Specifies the yarn application on + // which diagnosis is to be performed. + YarnApplicationId string `json:"yarnApplicationId,omitempty"` + // YarnApplicationIds: Optional. Specifies a list of yarn applications on which + // diagnosis is to be performed. + YarnApplicationIds []string `json:"yarnApplicationIds,omitempty"` + // ForceSendFields is a list of field names (e.g. "DiagnosisInterval") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ArchiveUris") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "DiagnosisInterval") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *HadoopJob) MarshalJSON() ([]byte, error) { - type NoMethod HadoopJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s DiagnoseClusterRequest) MarshalJSON() ([]byte, error) { + type NoMethod DiagnoseClusterRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// HiveJob: A Dataproc job for running Apache Hive (https://hive.apache.org/) -// queries on YARN. -type HiveJob struct { - // ContinueOnFailure: Optional. Whether to continue executing queries if a - // query fails. The default value is false. Setting to true can be useful when - // executing independent parallel queries. - ContinueOnFailure bool `json:"continueOnFailure,omitempty"` - // JarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATH of the - // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and - // UDFs. - JarFileUris []string `json:"jarFileUris,omitempty"` - // Properties: Optional. A mapping of property names and values, used to - // configure Hive. Properties that conflict with values set by the Dataproc API - // might be overwritten. Can include properties set in - // /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in - // user code. - Properties map[string]string `json:"properties,omitempty"` - // QueryFileUri: The HCFS URI of the script that contains Hive queries. - QueryFileUri string `json:"queryFileUri,omitempty"` - // QueryList: A list of queries. - QueryList *QueryList `json:"queryList,omitempty"` - // ScriptVariables: Optional. Mapping of query variable names to values - // (equivalent to the Hive command: SET name="value";). - ScriptVariables map[string]string `json:"scriptVariables,omitempty"` - // ForceSendFields is a list of field names (e.g. "ContinueOnFailure") to +// DiagnoseClusterResults: The location of diagnostic output. +type DiagnoseClusterResults struct { + // OutputUri: Output only. The Cloud Storage URI of the diagnostic output. The + // output report is a plain text file with a summary of collected diagnostics. + OutputUri string `json:"outputUri,omitempty"` + // ForceSendFields is a list of field names (e.g. "OutputUri") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ContinueOnFailure") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "OutputUri") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *HiveJob) MarshalJSON() ([]byte, error) { - type NoMethod HiveJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s DiagnoseClusterResults) MarshalJSON() ([]byte, error) { + type NoMethod DiagnoseClusterResults + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// IdentityConfig: Identity related configuration, including service account -// based secure multi-tenancy user mappings. -type IdentityConfig struct { - // UserServiceAccountMapping: Required. Map of user to service account. - UserServiceAccountMapping map[string]string `json:"userServiceAccountMapping,omitempty"` - // ForceSendFields is a list of field names (e.g. "UserServiceAccountMapping") - // to unconditionally include in API requests. By default, fields with empty or +// DiskConfig: Specifies the config of disk options for a group of VM +// instances. +type DiskConfig struct { + // BootDiskProvisionedIops: Optional. Indicates how many IOPS to provision for + // the disk. This sets the number of I/O operations per second that the disk + // can handle. Note: This field is only supported if boot_disk_type is + // hyperdisk-balanced. + BootDiskProvisionedIops int64 `json:"bootDiskProvisionedIops,omitempty,string"` + // BootDiskProvisionedThroughput: Optional. Indicates how much throughput to + // provision for the disk. This sets the number of throughput mb per second + // that the disk can handle. Values must be greater than or equal to 1. Note: + // This field is only supported if boot_disk_type is hyperdisk-balanced. + BootDiskProvisionedThroughput int64 `json:"bootDiskProvisionedThroughput,omitempty,string"` + // BootDiskSizeGb: Optional. Size in GB of the boot disk (default is 500GB). + BootDiskSizeGb int64 `json:"bootDiskSizeGb,omitempty"` + // BootDiskType: Optional. Type of the boot disk (default is "pd-standard"). + // Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), + // "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent + // Disk Hard Disk Drive). See Disk types + // (https://cloud.google.com/compute/docs/disks#disk-types). + BootDiskType string `json:"bootDiskType,omitempty"` + // LocalSsdInterface: Optional. Interface type of local SSDs (default is + // "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" + // (Non-Volatile Memory Express). See local SSD performance + // (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + LocalSsdInterface string `json:"localSsdInterface,omitempty"` + // NumLocalSsds: Optional. Number of attached SSDs, from 0 to 8 (default is 0). + // If SSDs are not attached, the boot disk is used to store runtime logs and + // HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If + // one or more SSDs are attached, this runtime bulk data is spread across them, + // and the boot disk contains only basic config and installed binaries.Note: + // Local SSD options may vary by machine type and number of vCPUs selected. + NumLocalSsds int64 `json:"numLocalSsds,omitempty"` + // ForceSendFields is a list of field names (e.g. "BootDiskProvisionedIops") to + // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "UserServiceAccountMapping") to + // NullFields is a list of field names (e.g. "BootDiskProvisionedIops") to // include in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *IdentityConfig) MarshalJSON() ([]byte, error) { - type NoMethod IdentityConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s DiskConfig) MarshalJSON() ([]byte, error) { + type NoMethod DiskConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// InjectCredentialsRequest: A request to inject credentials into a cluster. -type InjectCredentialsRequest struct { - // ClusterUuid: Required. The cluster UUID. - ClusterUuid string `json:"clusterUuid,omitempty"` - // CredentialsCiphertext: Required. The encrypted credentials being injected in - // to the cluster.The client is responsible for encrypting the credentials in a - // way that is supported by the cluster.A wrapped value is used here so that - // the actual contents of the encrypted credentials are not written to audit - // logs. - CredentialsCiphertext string `json:"credentialsCiphertext,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterUuid") to +// DriverSchedulingConfig: Driver scheduling configuration. +type DriverSchedulingConfig struct { + // MemoryMb: Required. The amount of memory in MB the driver is requesting. + MemoryMb int64 `json:"memoryMb,omitempty"` + // Vcores: Required. The number of vCPUs the driver is requesting. + Vcores int64 `json:"vcores,omitempty"` + // ForceSendFields is a list of field names (e.g. "MemoryMb") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterUuid") to include in API + // NullFields is a list of field names (e.g. "MemoryMb") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *InjectCredentialsRequest) MarshalJSON() ([]byte, error) { - type NoMethod InjectCredentialsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// InstanceFlexibilityPolicy: Instance flexibility Policy allowing a mixture of -// VM shapes and provisioning models. -type InstanceFlexibilityPolicy struct { - // InstanceSelectionList: Optional. List of instance selection options that the - // group will use when creating new VMs. - InstanceSelectionList []*InstanceSelection `json:"instanceSelectionList,omitempty"` - // InstanceSelectionResults: Output only. A list of instance selection results - // in the group. - InstanceSelectionResults []*InstanceSelectionResult `json:"instanceSelectionResults,omitempty"` - // ForceSendFields is a list of field names (e.g. "InstanceSelectionList") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceSelectionList") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` +func (s DriverSchedulingConfig) MarshalJSON() ([]byte, error) { + type NoMethod DriverSchedulingConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -func (s *InstanceFlexibilityPolicy) MarshalJSON() ([]byte, error) { - type NoMethod InstanceFlexibilityPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +// Empty: A generic empty message that you can re-use to avoid defining +// duplicated empty messages in your APIs. A typical example is to use it as +// the request or the response type of an API method. For instance: service Foo +// { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } +type Empty struct { + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` } -// InstanceGroupAutoscalingPolicyConfig: Configuration for the size bounds of -// an instance group, including its proportional size to other groups. -type InstanceGroupAutoscalingPolicyConfig struct { - // MaxInstances: Required. Maximum number of instances for this group. Required - // for primary workers. Note that by default, clusters will not use secondary - // workers. Required for secondary workers if the minimum secondary instances - // is set.Primary workers - Bounds: [min_instances, ). Secondary workers - - // Bounds: [min_instances, ). Default: 0. - MaxInstances int64 `json:"maxInstances,omitempty"` - // MinInstances: Optional. Minimum number of instances for this group.Primary - // workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: - // 0, max_instances. Default: 0. - MinInstances int64 `json:"minInstances,omitempty"` - // Weight: Optional. Weight for the instance group, which is used to determine - // the fraction of total workers in the cluster from this instance group. For - // example, if primary workers have weight 2, and secondary workers have weight - // 1, the cluster will have approximately 2 primary workers for each secondary - // worker.The cluster may not reach the specified balance if constrained by - // min/max bounds or other autoscaling settings. For example, if max_instances - // for secondary workers is 0, then only primary workers will be added. The - // cluster can also be out of balance when created.If weight is not set on any - // instance group, the cluster will default to equal weight for all groups: the - // cluster will attempt to maintain an equal number of workers in each group - // within the configured size bounds for each group. If weight is set for one - // group only, the cluster will default to zero weight on the unset group. For - // example if weight is set only on primary workers, the cluster will use - // primary workers only and no secondary workers. - Weight int64 `json:"weight,omitempty"` - // ForceSendFields is a list of field names (e.g. "MaxInstances") to +// EncryptionConfig: Encryption settings for the cluster. +type EncryptionConfig struct { + // GcePdKmsKeyName: Optional. The Cloud KMS key resource name to use for + // persistent disk encryption for all instances in the cluster. See Use CMEK + // with cluster data + // (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) + // for more information. + GcePdKmsKeyName string `json:"gcePdKmsKeyName,omitempty"` + // KmsKey: Optional. The Cloud KMS key resource name to use for cluster + // persistent disk and job argument encryption. See Use CMEK with cluster data + // (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) + // for more information.When this key resource name is provided, the following + // job arguments of the following job types submitted to the cluster are + // encrypted using CMEK: FlinkJob args + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) + // HadoopJob args + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) + // SparkJob args + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) + // SparkRJob args + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) + // PySparkJob args + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) + // SparkSqlJob + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) + // scriptVariables and queryList.queries HiveJob + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) + // scriptVariables and queryList.queries PigJob + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) + // scriptVariables and queryList.queries PrestoJob + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) + // scriptVariables and queryList.queries + KmsKey string `json:"kmsKey,omitempty"` + // ForceSendFields is a list of field names (e.g. "GcePdKmsKeyName") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MaxInstances") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "GcePdKmsKeyName") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *InstanceGroupAutoscalingPolicyConfig) MarshalJSON() ([]byte, error) { - type NoMethod InstanceGroupAutoscalingPolicyConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s EncryptionConfig) MarshalJSON() ([]byte, error) { + type NoMethod EncryptionConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// InstanceGroupConfig: The config settings for Compute Engine resources in an -// instance group, such as a master or worker group. -type InstanceGroupConfig struct { - // Accelerators: Optional. The Compute Engine accelerator configuration for - // these instances. - Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` - // DiskConfig: Optional. Disk option config settings. - DiskConfig *DiskConfig `json:"diskConfig,omitempty"` - // ImageUri: Optional. The Compute Engine image resource used for cluster - // instances.The URI can represent an image or image family.Image examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] - // projects/[project_id]/global/images/[image-id] image-idImage family - // examples. Dataproc will use the most recent image from the family: - // https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] - // projects/[project_id]/global/images/family/[custom-image-family-name]If the - // URI is unspecified, it will be inferred from SoftwareConfig.image_version or - // the system default. - ImageUri string `json:"imageUri,omitempty"` - // InstanceFlexibilityPolicy: Optional. Instance flexibility Policy allowing a - // mixture of VM shapes and provisioning models. - InstanceFlexibilityPolicy *InstanceFlexibilityPolicy `json:"instanceFlexibilityPolicy,omitempty"` - // InstanceNames: Output only. The list of instance names. Dataproc derives the - // names from cluster_name, num_instances, and the instance group. - InstanceNames []string `json:"instanceNames,omitempty"` - // InstanceReferences: Output only. List of references to Compute Engine - // instances. - InstanceReferences []*InstanceReference `json:"instanceReferences,omitempty"` - // IsPreemptible: Output only. Specifies that this instance group contains - // preemptible instances. - IsPreemptible bool `json:"isPreemptible,omitempty"` - // MachineTypeUri: Optional. The Compute Engine machine type used for cluster - // instances.A full URL, partial URI, or short name are valid. Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 - // projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 - // n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone - // Placement - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) - // feature, you must use the short name of the machine type resource, for - // example, n1-standard-2. - MachineTypeUri string `json:"machineTypeUri,omitempty"` - // ManagedGroupConfig: Output only. The config for Compute Engine Instance - // Group Manager that manages this group. This is only used for preemptible - // instance groups. - ManagedGroupConfig *ManagedGroupConfig `json:"managedGroupConfig,omitempty"` - // MinCpuPlatform: Optional. Specifies the minimum cpu platform for the - // Instance Group. See Dataproc -> Minimum CPU Platform - // (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). - MinCpuPlatform string `json:"minCpuPlatform,omitempty"` - // MinNumInstances: Optional. The minimum number of primary worker instances to - // create. If min_num_instances is set, cluster creation will succeed if the - // number of primary workers created is at least equal to the min_num_instances - // number.Example: Cluster creation request with num_instances = 5 and - // min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed - // VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING - // state. If 2 instances are created and 3 instances fail, the cluster in - // placed in an ERROR state. The failed VMs are not deleted. - MinNumInstances int64 `json:"minNumInstances,omitempty"` - // NumInstances: Optional. The number of VM instances in the instance group. - // For HA cluster master_config groups, must be set to 3. For standard cluster - // master_config groups, must be set to 1. - NumInstances int64 `json:"numInstances,omitempty"` - // Preemptibility: Optional. Specifies the preemptibility of the instance - // group.The default value for master and worker groups is NON_PREEMPTIBLE. - // This default cannot be changed.The default value for secondary instances is - // PREEMPTIBLE. - // - // Possible values: - // "PREEMPTIBILITY_UNSPECIFIED" - Preemptibility is unspecified, the system - // will choose the appropriate setting for each instance group. - // "NON_PREEMPTIBLE" - Instances are non-preemptible.This option is allowed - // for all instance groups and is the only valid value for Master and Worker - // instance groups. - // "PREEMPTIBLE" - Instances are preemptible - // (https://cloud.google.com/compute/docs/instances/preemptible).This option is - // allowed only for secondary worker - // (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) - // groups. - // "SPOT" - Instances are Spot VMs - // (https://cloud.google.com/compute/docs/instances/spot).This option is - // allowed only for secondary worker - // (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) - // groups. Spot VMs are the latest version of preemptible VMs - // (https://cloud.google.com/compute/docs/instances/preemptible), and provide - // additional features. - Preemptibility string `json:"preemptibility,omitempty"` - // StartupConfig: Optional. Configuration to handle the startup of instances - // during cluster create and update process. - StartupConfig *StartupConfig `json:"startupConfig,omitempty"` - // ForceSendFields is a list of field names (e.g. "Accelerators") to +// EndpointConfig: Endpoint config for this cluster +type EndpointConfig struct { + // EnableHttpPortAccess: Optional. If true, enable http access to specific + // ports on the cluster from external sources. Defaults to false. + EnableHttpPortAccess bool `json:"enableHttpPortAccess,omitempty"` + // HttpPorts: Output only. The map of port descriptions to URLs. Will only be + // populated if enable_http_port_access is true. + HttpPorts map[string]string `json:"httpPorts,omitempty"` + // ForceSendFields is a list of field names (e.g. "EnableHttpPortAccess") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Accelerators") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "EnableHttpPortAccess") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *InstanceGroupConfig) MarshalJSON() ([]byte, error) { - type NoMethod InstanceGroupConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s EndpointConfig) MarshalJSON() ([]byte, error) { + type NoMethod EndpointConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// InstanceReference: A reference to a Compute Engine instance. -type InstanceReference struct { - // InstanceId: The unique identifier of the Compute Engine instance. - InstanceId string `json:"instanceId,omitempty"` - // InstanceName: The user-friendly name of the Compute Engine instance. - InstanceName string `json:"instanceName,omitempty"` - // PublicEciesKey: The public ECIES key used for sharing data with this - // instance. - PublicEciesKey string `json:"publicEciesKey,omitempty"` - // PublicKey: The public RSA key used for sharing data with this instance. - PublicKey string `json:"publicKey,omitempty"` - // ForceSendFields is a list of field names (e.g. "InstanceId") to +// EnvironmentConfig: Environment configuration for a workload. +type EnvironmentConfig struct { + // ExecutionConfig: Optional. Execution configuration for a workload. + ExecutionConfig *ExecutionConfig `json:"executionConfig,omitempty"` + // PeripheralsConfig: Optional. Peripherals configuration that workload has + // access to. + PeripheralsConfig *PeripheralsConfig `json:"peripheralsConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "ExecutionConfig") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceId") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "ExecutionConfig") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *InstanceReference) MarshalJSON() ([]byte, error) { - type NoMethod InstanceReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s EnvironmentConfig) MarshalJSON() ([]byte, error) { + type NoMethod EnvironmentConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// InstanceSelection: Defines machines types and a rank to which the machines -// types belong. -type InstanceSelection struct { - // MachineTypes: Optional. Full machine-type names, e.g. "n1-standard-16". - MachineTypes []string `json:"machineTypes,omitempty"` - // Rank: Optional. Preference of this instance selection. Lower number means - // higher preference. Dataproc will first try to create a VM based on the - // machine-type with priority rank and fallback to next rank based on - // availability. Machine types and instance selections with the same priority - // have the same preference. - Rank int64 `json:"rank,omitempty"` - // ForceSendFields is a list of field names (e.g. "MachineTypes") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// ExecutionConfig: Execution configuration for a workload. +type ExecutionConfig struct { + // IdleTtl: Optional. Applies to sessions only. The duration to keep the + // session alive while it's idling. Exceeding this threshold causes the session + // to terminate. This field cannot be set on a batch workload. Minimum value is + // 10 minutes; maximum value is 14 days (see JSON representation of Duration + // (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults + // to 1 hour if not set. If both ttl and idle_ttl are specified for an + // interactive session, the conditions are treated as OR conditions: the + // workload will be terminated when it has been idle for idle_ttl or when ttl + // has been exceeded, whichever occurs first. + IdleTtl string `json:"idleTtl,omitempty"` + // KmsKey: Optional. The Cloud KMS key to use for encryption. + KmsKey string `json:"kmsKey,omitempty"` + // NetworkTags: Optional. Tags used for network traffic control. + NetworkTags []string `json:"networkTags,omitempty"` + // NetworkUri: Optional. Network URI to connect workload to. + NetworkUri string `json:"networkUri,omitempty"` + // ServiceAccount: Optional. Service account that used to execute workload. + ServiceAccount string `json:"serviceAccount,omitempty"` + // StagingBucket: Optional. A Cloud Storage bucket used to stage workload + // dependencies, config files, and store workload output and other ephemeral + // data, such as Spark history files. If you do not specify a staging bucket, + // Cloud Dataproc will determine a Cloud Storage location according to the + // region where your workload is running, and then create and manage + // project-level, per-location staging and temporary buckets. This field + // requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage + // bucket. + StagingBucket string `json:"stagingBucket,omitempty"` + // SubnetworkUri: Optional. Subnetwork URI to connect workload to. + SubnetworkUri string `json:"subnetworkUri,omitempty"` + // Ttl: Optional. The duration after which the workload will be terminated, + // specified as the JSON representation for Duration + // (https://protobuf.dev/programming-guides/proto3/#json). When the workload + // exceeds this duration, it will be unconditionally terminated without waiting + // for ongoing work to finish. If ttl is not specified for a batch workload, + // the workload will be allowed to run until it exits naturally (or run forever + // without exiting). If ttl is not specified for an interactive session, it + // defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ + // runtime version, it defaults to 4 hours. Minimum value is 10 minutes; + // maximum value is 14 days. If both ttl and idle_ttl are specified (for an + // interactive session), the conditions are treated as OR conditions: the + // workload will be terminated when it has been idle for idle_ttl or when ttl + // has been exceeded, whichever occurs first. + Ttl string `json:"ttl,omitempty"` + // ForceSendFields is a list of field names (e.g. "IdleTtl") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MachineTypes") to include in API + // NullFields is a list of field names (e.g. "IdleTtl") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *InstanceSelection) MarshalJSON() ([]byte, error) { - type NoMethod InstanceSelection - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ExecutionConfig) MarshalJSON() ([]byte, error) { + type NoMethod ExecutionConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// InstanceSelectionResult: Defines a mapping from machine types to the number -// of VMs that are created with each machine type. -type InstanceSelectionResult struct { - // MachineType: Output only. Full machine-type names, e.g. "n1-standard-16". - MachineType string `json:"machineType,omitempty"` - // VmCount: Output only. Number of VM provisioned with the machine_type. - VmCount int64 `json:"vmCount,omitempty"` - // ForceSendFields is a list of field names (e.g. "MachineType") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +type ExecutorMetrics struct { + Metrics map[string]string `json:"metrics,omitempty"` + // ForceSendFields is a list of field names (e.g. "Metrics") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MachineType") to include in API + // NullFields is a list of field names (e.g. "Metrics") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *InstanceSelectionResult) MarshalJSON() ([]byte, error) { - type NoMethod InstanceSelectionResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// InstantiateWorkflowTemplateRequest: A request to instantiate a workflow -// template. -type InstantiateWorkflowTemplateRequest struct { - // Parameters: Optional. Map from parameter names to values that should be used - // for those parameters. Values may not exceed 1000 characters. - Parameters map[string]string `json:"parameters,omitempty"` - // RequestId: Optional. A tag that prevents multiple concurrent workflow - // instances with the same tag from running. This mitigates risk of concurrent - // instances started due to retries.It is recommended to always set this value - // to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The - // tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), - // and hyphens (-). The maximum length is 40 characters. - RequestId string `json:"requestId,omitempty"` - // Version: Optional. The version of workflow template to instantiate. If - // specified, the workflow will be instantiated only if the current version of - // the workflow template has the supplied version.This option cannot be used to - // instantiate a previous version of workflow template. - Version int64 `json:"version,omitempty"` - // ForceSendFields is a list of field names (e.g. "Parameters") to +func (s ExecutorMetrics) MarshalJSON() ([]byte, error) { + type NoMethod ExecutorMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type ExecutorMetricsDistributions struct { + DiskBytesSpilled []float64 `json:"diskBytesSpilled,omitempty"` + FailedTasks []float64 `json:"failedTasks,omitempty"` + InputBytes []float64 `json:"inputBytes,omitempty"` + InputRecords []float64 `json:"inputRecords,omitempty"` + KilledTasks []float64 `json:"killedTasks,omitempty"` + MemoryBytesSpilled []float64 `json:"memoryBytesSpilled,omitempty"` + OutputBytes []float64 `json:"outputBytes,omitempty"` + OutputRecords []float64 `json:"outputRecords,omitempty"` + PeakMemoryMetrics *ExecutorPeakMetricsDistributions `json:"peakMemoryMetrics,omitempty"` + Quantiles []float64 `json:"quantiles,omitempty"` + ShuffleRead []float64 `json:"shuffleRead,omitempty"` + ShuffleReadRecords []float64 `json:"shuffleReadRecords,omitempty"` + ShuffleWrite []float64 `json:"shuffleWrite,omitempty"` + ShuffleWriteRecords []float64 `json:"shuffleWriteRecords,omitempty"` + SucceededTasks []float64 `json:"succeededTasks,omitempty"` + TaskTimeMillis []float64 `json:"taskTimeMillis,omitempty"` + // ForceSendFields is a list of field names (e.g. "DiskBytesSpilled") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Parameters") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "DiskBytesSpilled") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *InstantiateWorkflowTemplateRequest) MarshalJSON() ([]byte, error) { - type NoMethod InstantiateWorkflowTemplateRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ExecutorMetricsDistributions) MarshalJSON() ([]byte, error) { + type NoMethod ExecutorMetricsDistributions + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// Interval: Represents a time interval, encoded as a Timestamp start -// (inclusive) and a Timestamp end (exclusive).The start must be less than or -// equal to the end. When the start equals the end, the interval is empty -// (matches no time). When both start and end are unspecified, the interval -// matches any time. -type Interval struct { - // EndTime: Optional. Exclusive end of the interval.If specified, a Timestamp - // matching this interval will have to be before the end. - EndTime string `json:"endTime,omitempty"` - // StartTime: Optional. Inclusive start of the interval.If specified, a - // Timestamp matching this interval will have to be the same or after the - // start. - StartTime string `json:"startTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "EndTime") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +func (s *ExecutorMetricsDistributions) UnmarshalJSON(data []byte) error { + type NoMethod ExecutorMetricsDistributions + var s1 struct { + DiskBytesSpilled []gensupport.JSONFloat64 `json:"diskBytesSpilled"` + FailedTasks []gensupport.JSONFloat64 `json:"failedTasks"` + InputBytes []gensupport.JSONFloat64 `json:"inputBytes"` + InputRecords []gensupport.JSONFloat64 `json:"inputRecords"` + KilledTasks []gensupport.JSONFloat64 `json:"killedTasks"` + MemoryBytesSpilled []gensupport.JSONFloat64 `json:"memoryBytesSpilled"` + OutputBytes []gensupport.JSONFloat64 `json:"outputBytes"` + OutputRecords []gensupport.JSONFloat64 `json:"outputRecords"` + Quantiles []gensupport.JSONFloat64 `json:"quantiles"` + ShuffleRead []gensupport.JSONFloat64 `json:"shuffleRead"` + ShuffleReadRecords []gensupport.JSONFloat64 `json:"shuffleReadRecords"` + ShuffleWrite []gensupport.JSONFloat64 `json:"shuffleWrite"` + ShuffleWriteRecords []gensupport.JSONFloat64 `json:"shuffleWriteRecords"` + SucceededTasks []gensupport.JSONFloat64 `json:"succeededTasks"` + TaskTimeMillis []gensupport.JSONFloat64 `json:"taskTimeMillis"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.DiskBytesSpilled = make([]float64, len(s1.DiskBytesSpilled)) + for i := range s1.DiskBytesSpilled { + s.DiskBytesSpilled[i] = float64(s1.DiskBytesSpilled[i]) + } + s.FailedTasks = make([]float64, len(s1.FailedTasks)) + for i := range s1.FailedTasks { + s.FailedTasks[i] = float64(s1.FailedTasks[i]) + } + s.InputBytes = make([]float64, len(s1.InputBytes)) + for i := range s1.InputBytes { + s.InputBytes[i] = float64(s1.InputBytes[i]) + } + s.InputRecords = make([]float64, len(s1.InputRecords)) + for i := range s1.InputRecords { + s.InputRecords[i] = float64(s1.InputRecords[i]) + } + s.KilledTasks = make([]float64, len(s1.KilledTasks)) + for i := range s1.KilledTasks { + s.KilledTasks[i] = float64(s1.KilledTasks[i]) + } + s.MemoryBytesSpilled = make([]float64, len(s1.MemoryBytesSpilled)) + for i := range s1.MemoryBytesSpilled { + s.MemoryBytesSpilled[i] = float64(s1.MemoryBytesSpilled[i]) + } + s.OutputBytes = make([]float64, len(s1.OutputBytes)) + for i := range s1.OutputBytes { + s.OutputBytes[i] = float64(s1.OutputBytes[i]) + } + s.OutputRecords = make([]float64, len(s1.OutputRecords)) + for i := range s1.OutputRecords { + s.OutputRecords[i] = float64(s1.OutputRecords[i]) + } + s.Quantiles = make([]float64, len(s1.Quantiles)) + for i := range s1.Quantiles { + s.Quantiles[i] = float64(s1.Quantiles[i]) + } + s.ShuffleRead = make([]float64, len(s1.ShuffleRead)) + for i := range s1.ShuffleRead { + s.ShuffleRead[i] = float64(s1.ShuffleRead[i]) + } + s.ShuffleReadRecords = make([]float64, len(s1.ShuffleReadRecords)) + for i := range s1.ShuffleReadRecords { + s.ShuffleReadRecords[i] = float64(s1.ShuffleReadRecords[i]) + } + s.ShuffleWrite = make([]float64, len(s1.ShuffleWrite)) + for i := range s1.ShuffleWrite { + s.ShuffleWrite[i] = float64(s1.ShuffleWrite[i]) + } + s.ShuffleWriteRecords = make([]float64, len(s1.ShuffleWriteRecords)) + for i := range s1.ShuffleWriteRecords { + s.ShuffleWriteRecords[i] = float64(s1.ShuffleWriteRecords[i]) + } + s.SucceededTasks = make([]float64, len(s1.SucceededTasks)) + for i := range s1.SucceededTasks { + s.SucceededTasks[i] = float64(s1.SucceededTasks[i]) + } + s.TaskTimeMillis = make([]float64, len(s1.TaskTimeMillis)) + for i := range s1.TaskTimeMillis { + s.TaskTimeMillis[i] = float64(s1.TaskTimeMillis[i]) + } + return nil +} + +type ExecutorPeakMetricsDistributions struct { + ExecutorMetrics []*ExecutorMetrics `json:"executorMetrics,omitempty"` + Quantiles []float64 `json:"quantiles,omitempty"` + // ForceSendFields is a list of field names (e.g. "ExecutorMetrics") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "EndTime") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "ExecutorMetrics") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Interval) MarshalJSON() ([]byte, error) { - type NoMethod Interval - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ExecutorPeakMetricsDistributions) MarshalJSON() ([]byte, error) { + type NoMethod ExecutorPeakMetricsDistributions + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// Job: A Dataproc job resource. -type Job struct { - // Done: Output only. Indicates whether the job is completed. If the value is - // false, the job is still in progress. If true, the job is completed, and - // status.state field will indicate if it was successful, failed, or cancelled. - Done bool `json:"done,omitempty"` - // DriverControlFilesUri: Output only. If present, the location of - // miscellaneous control files which can be used as part of job setup and - // handling. If not present, control files might be placed in the same location - // as driver_output_uri. - DriverControlFilesUri string `json:"driverControlFilesUri,omitempty"` - // DriverOutputResourceUri: Output only. A URI pointing to the location of the - // stdout of the job's driver program. - DriverOutputResourceUri string `json:"driverOutputResourceUri,omitempty"` - // DriverSchedulingConfig: Optional. Driver scheduling configuration. - DriverSchedulingConfig *DriverSchedulingConfig `json:"driverSchedulingConfig,omitempty"` - // FlinkJob: Optional. Job is a Flink job. - FlinkJob *FlinkJob `json:"flinkJob,omitempty"` - // HadoopJob: Optional. Job is a Hadoop job. - HadoopJob *HadoopJob `json:"hadoopJob,omitempty"` - // HiveJob: Optional. Job is a Hive job. - HiveJob *HiveJob `json:"hiveJob,omitempty"` - // JobUuid: Output only. A UUID that uniquely identifies a job within the - // project over time. This is in contrast to a user-settable reference.job_id - // that might be reused over time. - JobUuid string `json:"jobUuid,omitempty"` - // Labels: Optional. The labels to associate with this job. Label keys must - // contain 1 to 63 characters, and must conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if - // present, must contain 1 to 63 characters, and must conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be - // associated with a job. - Labels map[string]string `json:"labels,omitempty"` - // PigJob: Optional. Job is a Pig job. - PigJob *PigJob `json:"pigJob,omitempty"` - // Placement: Required. Job information, including how, when, and where to run - // the job. - Placement *JobPlacement `json:"placement,omitempty"` - // PrestoJob: Optional. Job is a Presto job. - PrestoJob *PrestoJob `json:"prestoJob,omitempty"` - // PysparkJob: Optional. Job is a PySpark job. - PysparkJob *PySparkJob `json:"pysparkJob,omitempty"` - // Reference: Optional. The fully qualified reference to the job, which can be - // used to obtain the equivalent REST path of the job resource. If this - // property is not specified when a job is created, the server generates a - // job_id. - Reference *JobReference `json:"reference,omitempty"` - // Scheduling: Optional. Job scheduling configuration. - Scheduling *JobScheduling `json:"scheduling,omitempty"` - // SparkJob: Optional. Job is a Spark job. - SparkJob *SparkJob `json:"sparkJob,omitempty"` - // SparkRJob: Optional. Job is a SparkR job. - SparkRJob *SparkRJob `json:"sparkRJob,omitempty"` - // SparkSqlJob: Optional. Job is a SparkSql job. - SparkSqlJob *SparkSqlJob `json:"sparkSqlJob,omitempty"` - // Status: Output only. The job status. Additional application-specific status - // information might be contained in the type_job and yarn_applications fields. - Status *JobStatus `json:"status,omitempty"` - // StatusHistory: Output only. The previous job status. - StatusHistory []*JobStatus `json:"statusHistory,omitempty"` - // TrinoJob: Optional. Job is a Trino job. - TrinoJob *TrinoJob `json:"trinoJob,omitempty"` - // YarnApplications: Output only. The collection of YARN applications spun up - // by this job.Beta Feature: This report is available for testing purposes - // only. It might be changed before final release. - YarnApplications []*YarnApplication `json:"yarnApplications,omitempty"` +func (s *ExecutorPeakMetricsDistributions) UnmarshalJSON(data []byte) error { + type NoMethod ExecutorPeakMetricsDistributions + var s1 struct { + Quantiles []gensupport.JSONFloat64 `json:"quantiles"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Quantiles = make([]float64, len(s1.Quantiles)) + for i := range s1.Quantiles { + s.Quantiles[i] = float64(s1.Quantiles[i]) + } + return nil +} - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Done") to unconditionally +// ExecutorResourceRequest: Resources used per executor used by the +// application. +type ExecutorResourceRequest struct { + Amount int64 `json:"amount,omitempty,string"` + DiscoveryScript string `json:"discoveryScript,omitempty"` + ResourceName string `json:"resourceName,omitempty"` + Vendor string `json:"vendor,omitempty"` + // ForceSendFields is a list of field names (e.g. "Amount") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Done") to include in API requests - // with the JSON null value. By default, fields with empty values are omitted - // from API requests. See + // NullFields is a list of field names (e.g. "Amount") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Job) MarshalJSON() ([]byte, error) { - type NoMethod Job - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ExecutorResourceRequest) MarshalJSON() ([]byte, error) { + type NoMethod ExecutorResourceRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ExecutorStageSummary: Executor resources consumed by a stage. +type ExecutorStageSummary struct { + DiskBytesSpilled int64 `json:"diskBytesSpilled,omitempty,string"` + ExecutorId string `json:"executorId,omitempty"` + FailedTasks int64 `json:"failedTasks,omitempty"` + InputBytes int64 `json:"inputBytes,omitempty,string"` + InputRecords int64 `json:"inputRecords,omitempty,string"` + IsExcludedForStage bool `json:"isExcludedForStage,omitempty"` + KilledTasks int64 `json:"killedTasks,omitempty"` + MemoryBytesSpilled int64 `json:"memoryBytesSpilled,omitempty,string"` + OutputBytes int64 `json:"outputBytes,omitempty,string"` + OutputRecords int64 `json:"outputRecords,omitempty,string"` + PeakMemoryMetrics *ExecutorMetrics `json:"peakMemoryMetrics,omitempty"` + ShuffleRead int64 `json:"shuffleRead,omitempty,string"` + ShuffleReadRecords int64 `json:"shuffleReadRecords,omitempty,string"` + ShuffleWrite int64 `json:"shuffleWrite,omitempty,string"` + ShuffleWriteRecords int64 `json:"shuffleWriteRecords,omitempty,string"` + StageAttemptId int64 `json:"stageAttemptId,omitempty"` + StageId int64 `json:"stageId,omitempty,string"` + SucceededTasks int64 `json:"succeededTasks,omitempty"` + TaskTimeMillis int64 `json:"taskTimeMillis,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "DiskBytesSpilled") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DiskBytesSpilled") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` } -// JobMetadata: Job Operation metadata. -type JobMetadata struct { - // JobId: Output only. The job id. - JobId string `json:"jobId,omitempty"` - // OperationType: Output only. Operation type. - OperationType string `json:"operationType,omitempty"` - // StartTime: Output only. Job submission time. - StartTime string `json:"startTime,omitempty"` - // Status: Output only. Most recent job status. - Status *JobStatus `json:"status,omitempty"` - // ForceSendFields is a list of field names (e.g. "JobId") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +func (s ExecutorStageSummary) MarshalJSON() ([]byte, error) { + type NoMethod ExecutorStageSummary + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ExecutorSummary: Details about executors used by the application. +type ExecutorSummary struct { + ActiveTasks int64 `json:"activeTasks,omitempty"` + AddTime string `json:"addTime,omitempty"` + Attributes map[string]string `json:"attributes,omitempty"` + CompletedTasks int64 `json:"completedTasks,omitempty"` + DiskUsed int64 `json:"diskUsed,omitempty,string"` + ExcludedInStages googleapi.Int64s `json:"excludedInStages,omitempty"` + ExecutorId string `json:"executorId,omitempty"` + ExecutorLogs map[string]string `json:"executorLogs,omitempty"` + FailedTasks int64 `json:"failedTasks,omitempty"` + HostPort string `json:"hostPort,omitempty"` + IsActive bool `json:"isActive,omitempty"` + IsExcluded bool `json:"isExcluded,omitempty"` + MaxMemory int64 `json:"maxMemory,omitempty,string"` + MaxTasks int64 `json:"maxTasks,omitempty"` + MemoryMetrics *MemoryMetrics `json:"memoryMetrics,omitempty"` + MemoryUsed int64 `json:"memoryUsed,omitempty,string"` + PeakMemoryMetrics *ExecutorMetrics `json:"peakMemoryMetrics,omitempty"` + RddBlocks int64 `json:"rddBlocks,omitempty"` + RemoveReason string `json:"removeReason,omitempty"` + RemoveTime string `json:"removeTime,omitempty"` + ResourceProfileId int64 `json:"resourceProfileId,omitempty"` + Resources map[string]ResourceInformation `json:"resources,omitempty"` + TotalCores int64 `json:"totalCores,omitempty"` + TotalDurationMillis int64 `json:"totalDurationMillis,omitempty,string"` + TotalGcTimeMillis int64 `json:"totalGcTimeMillis,omitempty,string"` + TotalInputBytes int64 `json:"totalInputBytes,omitempty,string"` + TotalShuffleRead int64 `json:"totalShuffleRead,omitempty,string"` + TotalShuffleWrite int64 `json:"totalShuffleWrite,omitempty,string"` + TotalTasks int64 `json:"totalTasks,omitempty"` + // ForceSendFields is a list of field names (e.g. "ActiveTasks") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "JobId") to include in API + // NullFields is a list of field names (e.g. "ActiveTasks") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *JobMetadata) MarshalJSON() ([]byte, error) { - type NoMethod JobMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ExecutorSummary) MarshalJSON() ([]byte, error) { + type NoMethod ExecutorSummary + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// JobPlacement: Dataproc job config. -type JobPlacement struct { - // ClusterLabels: Optional. Cluster labels to identify a cluster where the job - // will be submitted. - ClusterLabels map[string]string `json:"clusterLabels,omitempty"` - // ClusterName: Required. The name of the cluster where the job will be - // submitted. - ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: Output only. A cluster UUID generated by the Dataproc service - // when the job is submitted. - ClusterUuid string `json:"clusterUuid,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterLabels") to +// Expr: Represents a textual expression in the Common Expression Language +// (CEL) syntax. CEL is a C-like expression language. The syntax and semantics +// of CEL are documented at https://github.com/google/cel-spec.Example +// (Comparison): title: "Summary size limit" description: "Determines if a +// summary is less than 100 chars" expression: "document.summary.size() < 100" +// Example (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly visible" +// expression: "document.type != 'private' && document.type != 'internal'" +// Example (Data Manipulation): title: "Notification string" description: +// "Create a notification string with a timestamp." expression: "'New message +// received at ' + string(document.create_time)" The exact variables and +// functions that may be referenced within an expression are determined by the +// service that evaluates it. See the service documentation for additional +// information. +type Expr struct { + // Description: Optional. Description of the expression. This is a longer text + // which describes the expression, e.g. when hovered over it in a UI. + Description string `json:"description,omitempty"` + // Expression: Textual representation of an expression in Common Expression + // Language syntax. + Expression string `json:"expression,omitempty"` + // Location: Optional. String indicating the location of the expression for + // error reporting, e.g. a file name and a position in the file. + Location string `json:"location,omitempty"` + // Title: Optional. Title for the expression, i.e. a short string describing + // its purpose. This can be used e.g. in UIs which allow to enter the + // expression. + Title string `json:"title,omitempty"` + // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterLabels") to include in API + // NullFields is a list of field names (e.g. "Description") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *JobPlacement) MarshalJSON() ([]byte, error) { - type NoMethod JobPlacement - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s Expr) MarshalJSON() ([]byte, error) { + type NoMethod Expr + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// JobReference: Encapsulates the full scoping used to reference a job. -type JobReference struct { - // JobId: Optional. The job ID, which must be unique within the project.The ID - // must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or - // hyphens (-). The maximum length is 100 characters.If not specified by the - // caller, the job ID will be provided by the server. - JobId string `json:"jobId,omitempty"` - // ProjectId: Optional. The ID of the Google Cloud Platform project that the - // job belongs to. If specified, must match the request project ID. - ProjectId string `json:"projectId,omitempty"` - // ForceSendFields is a list of field names (e.g. "JobId") to unconditionally +// FlinkJob: A Dataproc job for running Apache Flink applications on YARN. +type FlinkJob struct { + // Args: Optional. The arguments to pass to the driver. Do not include + // arguments, such as --conf, that can be set as job properties, since a + // collision might occur that causes an incorrect job submission. + Args []string `json:"args,omitempty"` + // JarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of + // the Flink driver and tasks. + JarFileUris []string `json:"jarFileUris,omitempty"` + // LoggingConfig: Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` + // MainClass: The name of the driver's main class. The jar file that contains + // the class must be in the default CLASSPATH or specified in jarFileUris. + MainClass string `json:"mainClass,omitempty"` + // MainJarFileUri: The HCFS URI of the jar file that contains the main class. + MainJarFileUri string `json:"mainJarFileUri,omitempty"` + // Properties: Optional. A mapping of property names to values, used to + // configure Flink. Properties that conflict with values set by the Dataproc + // API might be overwritten. Can include properties set in + // /etc/flink/conf/flink-defaults.conf and classes in user code. + Properties map[string]string `json:"properties,omitempty"` + // SavepointUri: Optional. HCFS URI of the savepoint, which contains the last + // saved progress for starting the current job. + SavepointUri string `json:"savepointUri,omitempty"` + // ForceSendFields is a list of field names (e.g. "Args") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "JobId") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "Args") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *JobReference) MarshalJSON() ([]byte, error) { - type NoMethod JobReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s FlinkJob) MarshalJSON() ([]byte, error) { + type NoMethod FlinkJob + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// JobScheduling: Job scheduling options. -type JobScheduling struct { - // MaxFailuresPerHour: Optional. Maximum number of times per hour a driver can - // be restarted as a result of driver exiting with non-zero code before job is - // reported failed.A job might be reported as thrashing if the driver exits - // with a non-zero code four times within a 10-minute window.Maximum value is - // 10.Note: This restartable job option is not supported in Dataproc workflow - // templates - // (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). - MaxFailuresPerHour int64 `json:"maxFailuresPerHour,omitempty"` - // MaxFailuresTotal: Optional. Maximum total number of times a driver can be - // restarted as a result of the driver exiting with a non-zero code. After the - // maximum number is reached, the job will be reported as failed.Maximum value - // is 240.Note: Currently, this restartable job option is not supported in - // Dataproc workflow templates - // (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). - MaxFailuresTotal int64 `json:"maxFailuresTotal,omitempty"` - // ForceSendFields is a list of field names (e.g. "MaxFailuresPerHour") to - // unconditionally include in API requests. By default, fields with empty or +// GceClusterConfig: Common config settings for resources of Compute Engine +// cluster instances, applicable to all instances in the cluster. +type GceClusterConfig struct { + // ConfidentialInstanceConfig: Optional. Confidential Instance Config for + // clusters using Confidential VMs + // (https://cloud.google.com/compute/confidential-vm/docs). + ConfidentialInstanceConfig *ConfidentialInstanceConfig `json:"confidentialInstanceConfig,omitempty"` + // InternalIpOnly: Optional. This setting applies to subnetwork-enabled + // networks. It is set to true by default in clusters created with image + // versions 2.2.x.When set to true: All cluster VMs have internal IP addresses. + // Google Private Access + // (https://cloud.google.com/vpc/docs/private-google-access) must be enabled to + // access Dataproc and other Google Cloud APIs. Off-cluster dependencies must + // be configured to be accessible without external IP addresses.When set to + // false: Cluster VMs are not restricted to internal IP addresses. Ephemeral + // external IP addresses are assigned to each cluster VM. + InternalIpOnly bool `json:"internalIpOnly,omitempty"` + // Metadata: Optional. The Compute Engine metadata entries to add to all + // instances (see Project and instance metadata + // (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + Metadata map[string]string `json:"metadata,omitempty"` + // NetworkUri: Optional. The Compute Engine network to be used for machine + // communications. Cannot be specified with subnetwork_uri. If neither + // network_uri nor subnetwork_uri is specified, the "default" network of the + // project is used, if it exists. Cannot be a "Custom Subnet Network" (see + // Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for + // more information).A full URL, partial URI, or short name are valid. + // Examples: + // https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default + // projects/[project_id]/global/networks/default default + NetworkUri string `json:"networkUri,omitempty"` + // NodeGroupAffinity: Optional. Node Group Affinity for sole-tenant clusters. + NodeGroupAffinity *NodeGroupAffinity `json:"nodeGroupAffinity,omitempty"` + // PrivateIpv6GoogleAccess: Optional. The type of IPv6 access for a cluster. + // + // Possible values: + // "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED" - If unspecified, Compute Engine + // default behavior will apply, which is the same as INHERIT_FROM_SUBNETWORK. + // "INHERIT_FROM_SUBNETWORK" - Private access to and from Google Services + // configuration inherited from the subnetwork configuration. This is the + // default Compute Engine behavior. + // "OUTBOUND" - Enables outbound private IPv6 access to Google Services from + // the Dataproc cluster. + // "BIDIRECTIONAL" - Enables bidirectional private IPv6 access between Google + // Services and the Dataproc cluster. + PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` + // ReservationAffinity: Optional. Reservation Affinity for consuming Zonal + // reservation. + ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` + // ServiceAccount: Optional. The Dataproc service account + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) + // (also see VM Data Plane identity + // (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) + // used by Dataproc cluster VM instances to access Google Cloud Platform + // services.If not specified, the Compute Engine default service account + // (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) + // is used. + ServiceAccount string `json:"serviceAccount,omitempty"` + // ServiceAccountScopes: Optional. The URIs of service account scopes to be + // included in Compute Engine instances. The following base set of scopes is + // always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly + // https://www.googleapis.com/auth/devstorage.read_write + // https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the + // following defaults are also provided: + // https://www.googleapis.com/auth/bigquery + // https://www.googleapis.com/auth/bigtable.admin.table + // https://www.googleapis.com/auth/bigtable.data + // https://www.googleapis.com/auth/devstorage.full_control + ServiceAccountScopes []string `json:"serviceAccountScopes,omitempty"` + // ShieldedInstanceConfig: Optional. Shielded Instance Config for clusters + // using Compute Engine Shielded VMs + // (https://cloud.google.com/security/shielded-cloud/shielded-vm). + ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + // SubnetworkUri: Optional. The Compute Engine subnetwork to be used for + // machine communications. Cannot be specified with network_uri.A full URL, + // partial URI, or short name are valid. Examples: + // https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 + // projects/[project_id]/regions/[region]/subnetworks/sub0 sub0 + SubnetworkUri string `json:"subnetworkUri,omitempty"` + // Tags: The Compute Engine network tags to add to all instances (see Tagging + // instances (https://cloud.google.com/vpc/docs/add-remove-network-tags)). + Tags []string `json:"tags,omitempty"` + // ZoneUri: Optional. The Compute Engine zone where the Dataproc cluster will + // be located. If omitted, the service will pick a zone in the cluster's + // Compute Engine region. On a get request, zone will always be present.A full + // URL, partial URI, or short name are valid. Examples: + // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] + // projects/[project_id]/zones/[zone] [zone] + ZoneUri string `json:"zoneUri,omitempty"` + // ForceSendFields is a list of field names (e.g. "ConfidentialInstanceConfig") + // to unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MaxFailuresPerHour") to include - // in API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. See + // NullFields is a list of field names (e.g. "ConfidentialInstanceConfig") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *JobScheduling) MarshalJSON() ([]byte, error) { - type NoMethod JobScheduling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s GceClusterConfig) MarshalJSON() ([]byte, error) { + type NoMethod GceClusterConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// JobStatus: Dataproc job status. -type JobStatus struct { - // Details: Optional. Output only. Job state details, such as an error - // description if the state is ERROR. - Details string `json:"details,omitempty"` - // State: Output only. A state message specifying the overall job state. - // - // Possible values: - // "STATE_UNSPECIFIED" - The job state is unknown. - // "PENDING" - The job is pending; it has been submitted, but is not yet - // running. - // "SETUP_DONE" - Job has been received by the service and completed initial - // setup; it will soon be submitted to the cluster. - // "RUNNING" - The job is running on the cluster. - // "CANCEL_PENDING" - A CancelJob request has been received, but is pending. - // "CANCEL_STARTED" - Transient in-flight resources have been canceled, and - // the request to cancel the running job has been issued to the cluster. - // "CANCELLED" - The job cancellation was successful. - // "DONE" - The job has completed successfully. - // "ERROR" - The job has completed, but encountered an error. - // "ATTEMPT_FAILURE" - Job attempt has failed. The detail field contains - // failure details for this attempt.Applies to restartable jobs only. - State string `json:"state,omitempty"` - // StateStartTime: Output only. The time when this state was entered. - StateStartTime string `json:"stateStartTime,omitempty"` - // Substate: Output only. Additional state information, which includes status - // reported by the agent. - // - // Possible values: - // "UNSPECIFIED" - The job substate is unknown. - // "SUBMITTED" - The Job is submitted to the agent.Applies to RUNNING state. - // "QUEUED" - The Job has been received and is awaiting execution (it might - // be waiting for a condition to be met). See the "details" field for the - // reason for the delay.Applies to RUNNING state. - // "STALE_STATUS" - The agent-reported status is out of date, which can be - // caused by a loss of communication between the agent and Dataproc. If the - // agent does not send a timely update, the job will fail.Applies to RUNNING - // state. - Substate string `json:"substate,omitempty"` - // ForceSendFields is a list of field names (e.g. "Details") to unconditionally +// GetIamPolicyRequest: Request message for GetIamPolicy method. +type GetIamPolicyRequest struct { + // Options: OPTIONAL: A GetPolicyOptions object for specifying options to + // GetIamPolicy. + Options *GetPolicyOptions `json:"options,omitempty"` + // ForceSendFields is a list of field names (e.g. "Options") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Details") to include in API + // NullFields is a list of field names (e.g. "Options") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *JobStatus) MarshalJSON() ([]byte, error) { - type NoMethod JobStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s GetIamPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod GetIamPolicyRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// JupyterConfig: Jupyter configuration for an interactive session. -type JupyterConfig struct { - // DisplayName: Optional. Display name, shown in the Jupyter kernelspec card. - DisplayName string `json:"displayName,omitempty"` - // Kernel: Optional. Kernel - // - // Possible values: - // "KERNEL_UNSPECIFIED" - The kernel is unknown. - // "PYTHON" - Python kernel. - // "SCALA" - Scala kernel. - Kernel string `json:"kernel,omitempty"` - // ForceSendFields is a list of field names (e.g. "DisplayName") to +// GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. +type GetPolicyOptions struct { + // RequestedPolicyVersion: Optional. The maximum policy version that will be + // used to format the policy.Valid values are 0, 1, and 3. Requests specifying + // an invalid value will be rejected.Requests for policies with any conditional + // role bindings must specify version 3. Policies with no conditional role + // bindings may specify any valid value or leave the field unset.The policy in + // the response might use the policy version that you specified, or it might + // use a lower policy version. For example, if you specify version 3, but the + // policy has no conditional role bindings, the response uses version 1.To + // learn which resources support conditions in their IAM policies, see the IAM + // documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). + RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` + // ForceSendFields is a list of field names (e.g. "RequestedPolicyVersion") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DisplayName") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "RequestedPolicyVersion") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *JupyterConfig) MarshalJSON() ([]byte, error) { - type NoMethod JupyterConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s GetPolicyOptions) MarshalJSON() ([]byte, error) { + type NoMethod GetPolicyOptions + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// KerberosConfig: Specifies Kerberos related configuration. -type KerberosConfig struct { - // CrossRealmTrustAdminServer: Optional. The admin server (IP or hostname) for - // the remote trusted realm in a cross realm trust relationship. - CrossRealmTrustAdminServer string `json:"crossRealmTrustAdminServer,omitempty"` - // CrossRealmTrustKdc: Optional. The KDC (IP or hostname) for the remote - // trusted realm in a cross realm trust relationship. - CrossRealmTrustKdc string `json:"crossRealmTrustKdc,omitempty"` - // CrossRealmTrustRealm: Optional. The remote realm the Dataproc on-cluster KDC - // will trust, should the user enable cross realm trust. - CrossRealmTrustRealm string `json:"crossRealmTrustRealm,omitempty"` - // CrossRealmTrustSharedPasswordUri: Optional. The Cloud Storage URI of a KMS - // encrypted file containing the shared password between the on-cluster - // Kerberos realm and the remote trusted realm, in a cross realm trust - // relationship. - CrossRealmTrustSharedPasswordUri string `json:"crossRealmTrustSharedPasswordUri,omitempty"` - // EnableKerberos: Optional. Flag to indicate whether to Kerberize the cluster - // (default: false). Set this field to true to enable Kerberos on a cluster. - EnableKerberos bool `json:"enableKerberos,omitempty"` - // KdcDbKeyUri: Optional. The Cloud Storage URI of a KMS encrypted file - // containing the master key of the KDC database. - KdcDbKeyUri string `json:"kdcDbKeyUri,omitempty"` - // KeyPasswordUri: Optional. The Cloud Storage URI of a KMS encrypted file - // containing the password to the user provided key. For the self-signed - // certificate, this password is generated by Dataproc. - KeyPasswordUri string `json:"keyPasswordUri,omitempty"` - // KeystorePasswordUri: Optional. The Cloud Storage URI of a KMS encrypted file - // containing the password to the user provided keystore. For the self-signed - // certificate, this password is generated by Dataproc. - KeystorePasswordUri string `json:"keystorePasswordUri,omitempty"` - // KeystoreUri: Optional. The Cloud Storage URI of the keystore file used for - // SSL encryption. If not provided, Dataproc will provide a self-signed - // certificate. - KeystoreUri string `json:"keystoreUri,omitempty"` - // KmsKeyUri: Optional. The URI of the KMS key used to encrypt sensitive files. - KmsKeyUri string `json:"kmsKeyUri,omitempty"` - // Realm: Optional. The name of the on-cluster Kerberos realm. If not - // specified, the uppercased domain of hostnames will be the realm. - Realm string `json:"realm,omitempty"` - // RootPrincipalPasswordUri: Optional. The Cloud Storage URI of a KMS encrypted - // file containing the root principal password. - RootPrincipalPasswordUri string `json:"rootPrincipalPasswordUri,omitempty"` - // TgtLifetimeHours: Optional. The lifetime of the ticket granting ticket, in - // hours. If not specified, or user specifies 0, then default value 10 will be - // used. - TgtLifetimeHours int64 `json:"tgtLifetimeHours,omitempty"` - // TruststorePasswordUri: Optional. The Cloud Storage URI of a KMS encrypted - // file containing the password to the user provided truststore. For the - // self-signed certificate, this password is generated by Dataproc. - TruststorePasswordUri string `json:"truststorePasswordUri,omitempty"` - // TruststoreUri: Optional. The Cloud Storage URI of the truststore file used - // for SSL encryption. If not provided, Dataproc will provide a self-signed - // certificate. - TruststoreUri string `json:"truststoreUri,omitempty"` - // ForceSendFields is a list of field names (e.g. "CrossRealmTrustAdminServer") - // to unconditionally include in API requests. By default, fields with empty or +// GkeClusterConfig: The cluster's GKE config. +type GkeClusterConfig struct { + // GkeClusterTarget: Optional. A target GKE cluster to deploy to. It must be in + // the same project and region as the Dataproc cluster (the GKE cluster can be + // zonal or regional). Format: + // 'projects/{project}/locations/{location}/clusters/{cluster_id}' + GkeClusterTarget string `json:"gkeClusterTarget,omitempty"` + // NamespacedGkeDeploymentTarget: Optional. Deprecated. Use gkeClusterTarget. + // Used only for the deprecated beta. A target for the deployment. + NamespacedGkeDeploymentTarget *NamespacedGkeDeploymentTarget `json:"namespacedGkeDeploymentTarget,omitempty"` + // NodePoolTarget: Optional. GKE node pools where workloads will be scheduled. + // At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. + // If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT + // GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All + // node pools must have the same location settings. + NodePoolTarget []*GkeNodePoolTarget `json:"nodePoolTarget,omitempty"` + // ForceSendFields is a list of field names (e.g. "GkeClusterTarget") to + // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CrossRealmTrustAdminServer") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See + // NullFields is a list of field names (e.g. "GkeClusterTarget") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *KerberosConfig) MarshalJSON() ([]byte, error) { - type NoMethod KerberosConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s GkeClusterConfig) MarshalJSON() ([]byte, error) { + type NoMethod GkeClusterConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// KubernetesClusterConfig: The configuration for running the Dataproc cluster -// on Kubernetes. -type KubernetesClusterConfig struct { - // GkeClusterConfig: Required. The configuration for running the Dataproc - // cluster on GKE. - GkeClusterConfig *GkeClusterConfig `json:"gkeClusterConfig,omitempty"` - // KubernetesNamespace: Optional. A namespace within the Kubernetes cluster to - // deploy into. If this namespace does not exist, it is created. If it exists, - // Dataproc verifies that another Dataproc VirtualCluster is not installed into - // it. If not specified, the name of the Dataproc Cluster is used. - KubernetesNamespace string `json:"kubernetesNamespace,omitempty"` - // KubernetesSoftwareConfig: Optional. The software configuration for this - // Dataproc cluster running on Kubernetes. - KubernetesSoftwareConfig *KubernetesSoftwareConfig `json:"kubernetesSoftwareConfig,omitempty"` - // ForceSendFields is a list of field names (e.g. "GkeClusterConfig") to +// GkeNodeConfig: Parameters that describe cluster nodes. +type GkeNodeConfig struct { + // Accelerators: Optional. A list of hardware accelerators + // (https://cloud.google.com/compute/docs/gpus) to attach to each node. + Accelerators []*GkeNodePoolAcceleratorConfig `json:"accelerators,omitempty"` + // BootDiskKmsKey: Optional. The Customer Managed Encryption Key (CMEK) + // (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to + // encrypt the boot disk attached to each node in the node pool. Specify the + // key using the following format: + // projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypt + // o_key} + BootDiskKmsKey string `json:"bootDiskKmsKey,omitempty"` + // LocalSsdCount: Optional. The number of local SSD disks to attach to the + // node, which is limited by the maximum number of disks allowable per zone + // (see Adding Local SSDs + // (https://cloud.google.com/compute/docs/disks/local-ssd)). + LocalSsdCount int64 `json:"localSsdCount,omitempty"` + // MachineType: Optional. The name of a Compute Engine machine type + // (https://cloud.google.com/compute/docs/machine-types). + MachineType string `json:"machineType,omitempty"` + // MinCpuPlatform: Optional. Minimum CPU platform + // (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) + // to be used by this instance. The instance may be scheduled on the specified + // or a newer CPU platform. Specify the friendly names of CPU platforms, such + // as "Intel Haswell" or Intel Sandy Bridge". + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + // Preemptible: Optional. Whether the nodes are created as legacy preemptible + // VM instances (https://cloud.google.com/compute/docs/instances/preemptible). + // Also see Spot VMs, preemptible VM instances without a maximum lifetime. + // Legacy and Spot preemptible nodes cannot be used in a node pool with the + // CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not + // assigned (the DEFAULT node pool will assume the CONTROLLER role). + Preemptible bool `json:"preemptible,omitempty"` + // Spot: Optional. Whether the nodes are created as Spot VM instances + // (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the + // latest update to legacy preemptible VMs. Spot VMs do not have a maximum + // lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool + // with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role + // is not assigned (the DEFAULT node pool will assume the CONTROLLER role). + Spot bool `json:"spot,omitempty"` + // ForceSendFields is a list of field names (e.g. "Accelerators") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "GkeClusterConfig") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "Accelerators") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *KubernetesClusterConfig) MarshalJSON() ([]byte, error) { - type NoMethod KubernetesClusterConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s GkeNodeConfig) MarshalJSON() ([]byte, error) { + type NoMethod GkeNodeConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// KubernetesSoftwareConfig: The software configuration for this Dataproc -// cluster running on Kubernetes. -type KubernetesSoftwareConfig struct { - // ComponentVersion: The components that should be installed in this Dataproc - // cluster. The key must be a string from the KubernetesComponent enumeration. - // The value is the version of the software to be installed. At least one entry - // must be specified. - ComponentVersion map[string]string `json:"componentVersion,omitempty"` - // Properties: The properties to set on daemon config files.Property keys are - // specified in prefix:property format, for example - // spark:spark.kubernetes.container.image. The following are supported prefixes - // and their mappings: spark: spark-defaults.confFor more information, see - // Cluster properties - // (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). - Properties map[string]string `json:"properties,omitempty"` - // ForceSendFields is a list of field names (e.g. "ComponentVersion") to +// GkeNodePoolAcceleratorConfig: A GkeNodeConfigAcceleratorConfig represents a +// Hardware Accelerator request for a node pool. +type GkeNodePoolAcceleratorConfig struct { + // AcceleratorCount: The number of accelerator cards exposed to an instance. + AcceleratorCount int64 `json:"acceleratorCount,omitempty,string"` + // AcceleratorType: The accelerator type resource namename (see GPUs on Compute + // Engine). + AcceleratorType string `json:"acceleratorType,omitempty"` + // GpuPartitionSize: Size of partitions to create on the GPU. Valid values are + // described in the NVIDIA mig user guide + // (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). + GpuPartitionSize string `json:"gpuPartitionSize,omitempty"` + // ForceSendFields is a list of field names (e.g. "AcceleratorCount") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ComponentVersion") to include in + // NullFields is a list of field names (e.g. "AcceleratorCount") to include in // API requests with the JSON null value. By default, fields with empty values // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *KubernetesSoftwareConfig) MarshalJSON() ([]byte, error) { - type NoMethod KubernetesSoftwareConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s GkeNodePoolAcceleratorConfig) MarshalJSON() ([]byte, error) { + type NoMethod GkeNodePoolAcceleratorConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// LifecycleConfig: Specifies the cluster auto-delete schedule configuration. -type LifecycleConfig struct { - // AutoDeleteTime: Optional. The time when cluster will be auto-deleted (see - // JSON representation of Timestamp - // (https://developers.google.com/protocol-buffers/docs/proto3#json)). - AutoDeleteTime string `json:"autoDeleteTime,omitempty"` - // AutoDeleteTtl: Optional. The lifetime duration of cluster. The cluster will - // be auto-deleted at the end of this period. Minimum value is 10 minutes; - // maximum value is 14 days (see JSON representation of Duration - // (https://developers.google.com/protocol-buffers/docs/proto3#json)). - AutoDeleteTtl string `json:"autoDeleteTtl,omitempty"` - // IdleDeleteTtl: Optional. The duration to keep the cluster alive while idling - // (when no jobs are running). Passing this threshold will cause the cluster to - // be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON - // representation of Duration - // (https://developers.google.com/protocol-buffers/docs/proto3#json)). - IdleDeleteTtl string `json:"idleDeleteTtl,omitempty"` - // IdleStartTime: Output only. The time when cluster became idle (most recent - // job finished) and became eligible for deletion due to idleness (see JSON - // representation of Timestamp - // (https://developers.google.com/protocol-buffers/docs/proto3#json)). - IdleStartTime string `json:"idleStartTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoDeleteTime") to +// GkeNodePoolAutoscalingConfig: GkeNodePoolAutoscaling contains information +// the cluster autoscaler needs to adjust the size of the node pool to the +// current cluster usage. +type GkeNodePoolAutoscalingConfig struct { + // MaxNodeCount: The maximum number of nodes in the node pool. Must be >= + // min_node_count, and must be > 0. Note: Quota must be sufficient to scale up + // the cluster. + MaxNodeCount int64 `json:"maxNodeCount,omitempty"` + // MinNodeCount: The minimum number of nodes in the node pool. Must be >= 0 and + // <= max_node_count. + MinNodeCount int64 `json:"minNodeCount,omitempty"` + // ForceSendFields is a list of field names (e.g. "MaxNodeCount") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoDeleteTime") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "MaxNodeCount") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *LifecycleConfig) MarshalJSON() ([]byte, error) { - type NoMethod LifecycleConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s GkeNodePoolAutoscalingConfig) MarshalJSON() ([]byte, error) { + type NoMethod GkeNodePoolAutoscalingConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ListAutoscalingPoliciesResponse: A response to a request to list autoscaling -// policies in a project. -type ListAutoscalingPoliciesResponse struct { - // NextPageToken: Output only. This token is included in the response if there - // are more results to fetch. - NextPageToken string `json:"nextPageToken,omitempty"` - // Policies: Output only. Autoscaling policies list. - Policies []*AutoscalingPolicy `json:"policies,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "NextPageToken") to +// GkeNodePoolConfig: The configuration of a GKE node pool used by a +// Dataproc-on-GKE cluster +// (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster). +type GkeNodePoolConfig struct { + // Autoscaling: Optional. The autoscaler configuration for this node pool. The + // autoscaler is enabled only when a valid configuration is present. + Autoscaling *GkeNodePoolAutoscalingConfig `json:"autoscaling,omitempty"` + // Config: Optional. The node pool configuration. + Config *GkeNodeConfig `json:"config,omitempty"` + // Locations: Optional. The list of Compute Engine zones + // (https://cloud.google.com/compute/docs/zones#available) where node pool + // nodes associated with a Dataproc on GKE virtual cluster will be + // located.Note: All node pools associated with a virtual cluster must be + // located in the same region as the virtual cluster, and they must be located + // in the same zone within that region.If a location is not specified during + // node pool creation, Dataproc on GKE will choose the zone. + Locations []string `json:"locations,omitempty"` + // ForceSendFields is a list of field names (e.g. "Autoscaling") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // NullFields is a list of field names (e.g. "Autoscaling") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ListAutoscalingPoliciesResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListAutoscalingPoliciesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s GkeNodePoolConfig) MarshalJSON() ([]byte, error) { + type NoMethod GkeNodePoolConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ListBatchesResponse: A list of batch workloads. -type ListBatchesResponse struct { - // Batches: Output only. The batches from the specified collection. - Batches []*Batch `json:"batches,omitempty"` - // NextPageToken: A token, which can be sent as page_token to retrieve the next - // page. If this field is omitted, there are no subsequent pages. - NextPageToken string `json:"nextPageToken,omitempty"` - // Unreachable: Output only. List of Batches that could not be included in the - // response. Attempting to get one of these resources may indicate why it was - // not included in the list response. - Unreachable []string `json:"unreachable,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Batches") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more +// GkeNodePoolTarget: GKE node pools that Dataproc workloads run on. +type GkeNodePoolTarget struct { + // NodePool: Required. The target GKE node pool. Format: + // 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_p + // ool}' + NodePool string `json:"nodePool,omitempty"` + // NodePoolConfig: Input only. The configuration for the GKE node pool.If + // specified, Dataproc attempts to create a node pool with the specified shape. + // If one with the same name already exists, it is verified against all + // specified fields. If a field differs, the virtual cluster creation will + // fail.If omitted, any node pool with the specified name is used. If a node + // pool with the specified name does not exist, Dataproc create a node pool + // with default values.This is an input only field. It will not be returned by + // the API. + NodePoolConfig *GkeNodePoolConfig `json:"nodePoolConfig,omitempty"` + // Roles: Required. The roles associated with the GKE node pool. + // + // Possible values: + // "ROLE_UNSPECIFIED" - Role is unspecified. + // "DEFAULT" - At least one node pool must have the DEFAULT role. Work + // assigned to a role that is not associated with a node pool is assigned to + // the node pool with the DEFAULT role. For example, work assigned to the + // CONTROLLER role will be assigned to the node pool with the DEFAULT role if + // no node pool has the CONTROLLER role. + // "CONTROLLER" - Run work associated with the Dataproc control plane (for + // example, controllers and webhooks). Very low resource requirements. + // "SPARK_DRIVER" - Run work associated with a Spark driver of a job. + // "SPARK_EXECUTOR" - Run work associated with a Spark executor of a job. + Roles []string `json:"roles,omitempty"` + // ForceSendFields is a list of field names (e.g. "NodePool") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Batches") to include in API + // NullFields is a list of field names (e.g. "NodePool") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ListBatchesResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListBatchesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s GkeNodePoolTarget) MarshalJSON() ([]byte, error) { + type NoMethod GkeNodePoolTarget + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ListClustersResponse: The list of all clusters in a project. -type ListClustersResponse struct { - // Clusters: Output only. The clusters in the project. - Clusters []*Cluster `json:"clusters,omitempty"` - // NextPageToken: Output only. This token is included in the response if there - // are more results to fetch. To fetch additional results, provide this value - // as the page_token in a subsequent ListClustersRequest. - NextPageToken string `json:"nextPageToken,omitempty"` +// GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig: Encryption settings +// for encrypting workflow template job arguments. +type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig struct { + // KmsKey: Optional. The Cloud KMS key name to use for encrypting workflow + // template job arguments.When this this key is provided, the following + // workflow template job arguments + // (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template), + // if present, are CMEK encrypted + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data): + // FlinkJob args + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) + // HadoopJob args + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) + // SparkJob args + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) + // SparkRJob args + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) + // PySparkJob args + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) + // SparkSqlJob + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) + // scriptVariables and queryList.queries HiveJob + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) + // scriptVariables and queryList.queries PigJob + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) + // scriptVariables and queryList.queries PrestoJob + // (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) + // scriptVariables and queryList.queries + KmsKey string `json:"kmsKey,omitempty"` + // ForceSendFields is a list of field names (e.g. "KmsKey") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "KmsKey") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Clusters") to +func (s GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// HadoopJob: A Dataproc job for running Apache Hadoop MapReduce +// (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) +// jobs on Apache Hadoop YARN +// (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). +type HadoopJob struct { + // ArchiveUris: Optional. HCFS URIs of archives to be extracted in the working + // directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, + // .tar.gz, .tgz, or .zip. + ArchiveUris []string `json:"archiveUris,omitempty"` + // Args: Optional. The arguments to pass to the driver. Do not include + // arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, + // since a collision might occur that causes an incorrect job submission. + Args []string `json:"args,omitempty"` + // FileUris: Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be + // copied to the working directory of Hadoop drivers and distributed tasks. + // Useful for naively parallel tasks. + FileUris []string `json:"fileUris,omitempty"` + // JarFileUris: Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop + // driver and tasks. + JarFileUris []string `json:"jarFileUris,omitempty"` + // LoggingConfig: Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` + // MainClass: The name of the driver's main class. The jar file containing the + // class must be in the default CLASSPATH or specified in jar_file_uris. + MainClass string `json:"mainClass,omitempty"` + // MainJarFileUri: The HCFS URI of the jar file containing the main class. + // Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' + // 'hdfs:/tmp/test-samples/custom-wordcount.jar' + // 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' + MainJarFileUri string `json:"mainJarFileUri,omitempty"` + // Properties: Optional. A mapping of property names to values, used to + // configure Hadoop. Properties that conflict with values set by the Dataproc + // API might be overwritten. Can include properties set in + // /etc/hadoop/conf/*-site and classes in user code. + Properties map[string]string `json:"properties,omitempty"` + // ForceSendFields is a list of field names (e.g. "ArchiveUris") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Clusters") to include in API + // NullFields is a list of field names (e.g. "ArchiveUris") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListClustersResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s HadoopJob) MarshalJSON() ([]byte, error) { + type NoMethod HadoopJob + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ListJobsResponse: A list of jobs in a project. -type ListJobsResponse struct { - // Jobs: Output only. Jobs list. - Jobs []*Job `json:"jobs,omitempty"` - // NextPageToken: Optional. This token is included in the response if there are - // more results to fetch. To fetch additional results, provide this value as - // the page_token in a subsequent ListJobsRequest. - NextPageToken string `json:"nextPageToken,omitempty"` - // Unreachable: Output only. List of jobs with kms_key-encrypted parameters - // that could not be decrypted. A response to a jobs.get request may indicate - // the reason for the decryption failure for a specific job. - Unreachable []string `json:"unreachable,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Jobs") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// HiveJob: A Dataproc job for running Apache Hive (https://hive.apache.org/) +// queries on YARN. +type HiveJob struct { + // ContinueOnFailure: Optional. Whether to continue executing queries if a + // query fails. The default value is false. Setting to true can be useful when + // executing independent parallel queries. + ContinueOnFailure bool `json:"continueOnFailure,omitempty"` + // JarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATH of the + // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and + // UDFs. + JarFileUris []string `json:"jarFileUris,omitempty"` + // Properties: Optional. A mapping of property names and values, used to + // configure Hive. Properties that conflict with values set by the Dataproc API + // might be overwritten. Can include properties set in + // /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in + // user code. + Properties map[string]string `json:"properties,omitempty"` + // QueryFileUri: The HCFS URI of the script that contains Hive queries. + QueryFileUri string `json:"queryFileUri,omitempty"` + // QueryList: A list of queries. + QueryList *QueryList `json:"queryList,omitempty"` + // ScriptVariables: Optional. Mapping of query variable names to values + // (equivalent to the Hive command: SET name="value";). + ScriptVariables map[string]string `json:"scriptVariables,omitempty"` + // ForceSendFields is a list of field names (e.g. "ContinueOnFailure") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Jobs") to include in API requests - // with the JSON null value. By default, fields with empty values are omitted - // from API requests. See + // NullFields is a list of field names (e.g. "ContinueOnFailure") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ListJobsResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListJobsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s HiveJob) MarshalJSON() ([]byte, error) { + type NoMethod HiveJob + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ListOperationsResponse: The response message for Operations.ListOperations. -type ListOperationsResponse struct { - // NextPageToken: The standard List next-page token. - NextPageToken string `json:"nextPageToken,omitempty"` - // Operations: A list of operations that matches the specified filter in the - // request. - Operations []*Operation `json:"operations,omitempty"` +// IdentityConfig: Identity related configuration, including service account +// based secure multi-tenancy user mappings. +type IdentityConfig struct { + // UserServiceAccountMapping: Required. Map of user to service account. + UserServiceAccountMapping map[string]string `json:"userServiceAccountMapping,omitempty"` + // ForceSendFields is a list of field names (e.g. "UserServiceAccountMapping") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "UserServiceAccountMapping") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "NextPageToken") to +func (s IdentityConfig) MarshalJSON() ([]byte, error) { + type NoMethod IdentityConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// InjectCredentialsRequest: A request to inject credentials into a cluster. +type InjectCredentialsRequest struct { + // ClusterUuid: Required. The cluster UUID. + ClusterUuid string `json:"clusterUuid,omitempty"` + // CredentialsCiphertext: Required. The encrypted credentials being injected in + // to the cluster.The client is responsible for encrypting the credentials in a + // way that is supported by the cluster.A wrapped value is used here so that + // the actual contents of the encrypted credentials are not written to audit + // logs. + CredentialsCiphertext string `json:"credentialsCiphertext,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterUuid") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // NullFields is a list of field names (e.g. "ClusterUuid") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s InjectCredentialsRequest) MarshalJSON() ([]byte, error) { + type NoMethod InjectCredentialsRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ListSessionTemplatesResponse: A list of session templates. -type ListSessionTemplatesResponse struct { - // NextPageToken: A token, which can be sent as page_token to retrieve the next - // page. If this field is omitted, there are no subsequent pages. - NextPageToken string `json:"nextPageToken,omitempty"` - // SessionTemplates: Output only. Session template list - SessionTemplates []*SessionTemplate `json:"sessionTemplates,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "NextPageToken") to +// InputMetrics: Metrics about the input data read by the task. +type InputMetrics struct { + BytesRead int64 `json:"bytesRead,omitempty,string"` + RecordsRead int64 `json:"recordsRead,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "BytesRead") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // NullFields is a list of field names (e.g. "BytesRead") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ListSessionTemplatesResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListSessionTemplatesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s InputMetrics) MarshalJSON() ([]byte, error) { + type NoMethod InputMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ListSessionsResponse: A list of interactive sessions. -type ListSessionsResponse struct { - // NextPageToken: A token, which can be sent as page_token, to retrieve the - // next page. If this field is omitted, there are no subsequent pages. - NextPageToken string `json:"nextPageToken,omitempty"` - // Sessions: Output only. The sessions from the specified collection. - Sessions []*Session `json:"sessions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "NextPageToken") to +type InputQuantileMetrics struct { + BytesRead *Quantiles `json:"bytesRead,omitempty"` + RecordsRead *Quantiles `json:"recordsRead,omitempty"` + // ForceSendFields is a list of field names (e.g. "BytesRead") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // NullFields is a list of field names (e.g. "BytesRead") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ListSessionsResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListSessionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s InputQuantileMetrics) MarshalJSON() ([]byte, error) { + type NoMethod InputQuantileMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ListWorkflowTemplatesResponse: A response to a request to list workflow -// templates in a project. -type ListWorkflowTemplatesResponse struct { - // NextPageToken: Output only. This token is included in the response if there - // are more results to fetch. To fetch additional results, provide this value - // as the page_token in a subsequent ListWorkflowTemplatesRequest. - NextPageToken string `json:"nextPageToken,omitempty"` - // Templates: Output only. WorkflowTemplates list. - Templates []*WorkflowTemplate `json:"templates,omitempty"` - // Unreachable: Output only. List of workflow templates that could not be - // included in the response. Attempting to get one of these resources may - // indicate why it was not included in the list response. - Unreachable []string `json:"unreachable,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "NextPageToken") to +// InstanceFlexibilityPolicy: Instance flexibility Policy allowing a mixture of +// VM shapes and provisioning models. +type InstanceFlexibilityPolicy struct { + // InstanceSelectionList: Optional. List of instance selection options that the + // group will use when creating new VMs. + InstanceSelectionList []*InstanceSelection `json:"instanceSelectionList,omitempty"` + // InstanceSelectionResults: Output only. A list of instance selection results + // in the group. + InstanceSelectionResults []*InstanceSelectionResult `json:"instanceSelectionResults,omitempty"` + // ProvisioningModelMix: Optional. Defines how the Group selects the + // provisioning model to ensure required reliability. + ProvisioningModelMix *ProvisioningModelMix `json:"provisioningModelMix,omitempty"` + // ForceSendFields is a list of field names (e.g. "InstanceSelectionList") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NextPageToken") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "InstanceSelectionList") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ListWorkflowTemplatesResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListWorkflowTemplatesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s InstanceFlexibilityPolicy) MarshalJSON() ([]byte, error) { + type NoMethod InstanceFlexibilityPolicy + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// LoggingConfig: The runtime logging config of the job. -type LoggingConfig struct { - // DriverLogLevels: The per-package log levels for the driver. This can include - // "root" package name to configure rootLogger. Examples: - 'com.google = - // FATAL' - 'root = INFO' - 'org.apache = DEBUG' - DriverLogLevels map[string]string `json:"driverLogLevels,omitempty"` - // ForceSendFields is a list of field names (e.g. "DriverLogLevels") to +// InstanceGroupAutoscalingPolicyConfig: Configuration for the size bounds of +// an instance group, including its proportional size to other groups. +type InstanceGroupAutoscalingPolicyConfig struct { + // MaxInstances: Required. Maximum number of instances for this group. Required + // for primary workers. Note that by default, clusters will not use secondary + // workers. Required for secondary workers if the minimum secondary instances + // is set.Primary workers - Bounds: [min_instances, ). Secondary workers - + // Bounds: [min_instances, ). Default: 0. + MaxInstances int64 `json:"maxInstances,omitempty"` + // MinInstances: Optional. Minimum number of instances for this group.Primary + // workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: + // 0, max_instances. Default: 0. + MinInstances int64 `json:"minInstances,omitempty"` + // Weight: Optional. Weight for the instance group, which is used to determine + // the fraction of total workers in the cluster from this instance group. For + // example, if primary workers have weight 2, and secondary workers have weight + // 1, the cluster will have approximately 2 primary workers for each secondary + // worker.The cluster may not reach the specified balance if constrained by + // min/max bounds or other autoscaling settings. For example, if max_instances + // for secondary workers is 0, then only primary workers will be added. The + // cluster can also be out of balance when created.If weight is not set on any + // instance group, the cluster will default to equal weight for all groups: the + // cluster will attempt to maintain an equal number of workers in each group + // within the configured size bounds for each group. If weight is set for one + // group only, the cluster will default to zero weight on the unset group. For + // example if weight is set only on primary workers, the cluster will use + // primary workers only and no secondary workers. + Weight int64 `json:"weight,omitempty"` + // ForceSendFields is a list of field names (e.g. "MaxInstances") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DriverLogLevels") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "MaxInstances") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *LoggingConfig) MarshalJSON() ([]byte, error) { - type NoMethod LoggingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s InstanceGroupAutoscalingPolicyConfig) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupAutoscalingPolicyConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ManagedCluster: Cluster that is managed by the workflow. -type ManagedCluster struct { - // ClusterName: Required. The cluster name prefix. A unique cluster name will - // be formed by appending a random suffix.The name must contain only lower-case - // letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. - // Cannot begin or end with hyphen. Must consist of between 2 and 35 - // characters. - ClusterName string `json:"clusterName,omitempty"` - // Config: Required. The cluster configuration. - Config *ClusterConfig `json:"config,omitempty"` - // Labels: Optional. The labels to associate with this cluster.Label keys must - // be between 1 and 63 characters long, and must conform to the following PCRE - // regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 - // characters long, and must conform to the following PCRE regular expression: - // \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a - // given cluster. - Labels map[string]string `json:"labels,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterName") to +// InstanceGroupConfig: The config settings for Compute Engine resources in an +// instance group, such as a master or worker group. +type InstanceGroupConfig struct { + // Accelerators: Optional. The Compute Engine accelerator configuration for + // these instances. + Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` + // DiskConfig: Optional. Disk option config settings. + DiskConfig *DiskConfig `json:"diskConfig,omitempty"` + // ImageUri: Optional. The Compute Engine image resource used for cluster + // instances.The URI can represent an image or image family.Image examples: + // https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] + // projects/[project_id]/global/images/[image-id] image-idImage family + // examples. Dataproc will use the most recent image from the family: + // https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] + // projects/[project_id]/global/images/family/[custom-image-family-name]If the + // URI is unspecified, it will be inferred from SoftwareConfig.image_version or + // the system default. + ImageUri string `json:"imageUri,omitempty"` + // InstanceFlexibilityPolicy: Optional. Instance flexibility Policy allowing a + // mixture of VM shapes and provisioning models. + InstanceFlexibilityPolicy *InstanceFlexibilityPolicy `json:"instanceFlexibilityPolicy,omitempty"` + // InstanceNames: Output only. The list of instance names. Dataproc derives the + // names from cluster_name, num_instances, and the instance group. + InstanceNames []string `json:"instanceNames,omitempty"` + // InstanceReferences: Output only. List of references to Compute Engine + // instances. + InstanceReferences []*InstanceReference `json:"instanceReferences,omitempty"` + // IsPreemptible: Output only. Specifies that this instance group contains + // preemptible instances. + IsPreemptible bool `json:"isPreemptible,omitempty"` + // MachineTypeUri: Optional. The Compute Engine machine type used for cluster + // instances.A full URL, partial URI, or short name are valid. Examples: + // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 + // projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 + // n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone + // Placement + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + // feature, you must use the short name of the machine type resource, for + // example, n1-standard-2. + MachineTypeUri string `json:"machineTypeUri,omitempty"` + // ManagedGroupConfig: Output only. The config for Compute Engine Instance + // Group Manager that manages this group. This is only used for preemptible + // instance groups. + ManagedGroupConfig *ManagedGroupConfig `json:"managedGroupConfig,omitempty"` + // MinCpuPlatform: Optional. Specifies the minimum cpu platform for the + // Instance Group. See Dataproc -> Minimum CPU Platform + // (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + // MinNumInstances: Optional. The minimum number of primary worker instances to + // create. If min_num_instances is set, cluster creation will succeed if the + // number of primary workers created is at least equal to the min_num_instances + // number.Example: Cluster creation request with num_instances = 5 and + // min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed + // VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING + // state. If 2 instances are created and 3 instances fail, the cluster in + // placed in an ERROR state. The failed VMs are not deleted. + MinNumInstances int64 `json:"minNumInstances,omitempty"` + // NumInstances: Optional. The number of VM instances in the instance group. + // For HA cluster master_config groups, must be set to 3. For standard cluster + // master_config groups, must be set to 1. + NumInstances int64 `json:"numInstances,omitempty"` + // Preemptibility: Optional. Specifies the preemptibility of the instance + // group.The default value for master and worker groups is NON_PREEMPTIBLE. + // This default cannot be changed.The default value for secondary instances is + // PREEMPTIBLE. + // + // Possible values: + // "PREEMPTIBILITY_UNSPECIFIED" - Preemptibility is unspecified, the system + // will choose the appropriate setting for each instance group. + // "NON_PREEMPTIBLE" - Instances are non-preemptible.This option is allowed + // for all instance groups and is the only valid value for Master and Worker + // instance groups. + // "PREEMPTIBLE" - Instances are preemptible + // (https://cloud.google.com/compute/docs/instances/preemptible).This option is + // allowed only for secondary worker + // (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) + // groups. + // "SPOT" - Instances are Spot VMs + // (https://cloud.google.com/compute/docs/instances/spot).This option is + // allowed only for secondary worker + // (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) + // groups. Spot VMs are the latest version of preemptible VMs + // (https://cloud.google.com/compute/docs/instances/preemptible), and provide + // additional features. + Preemptibility string `json:"preemptibility,omitempty"` + // StartupConfig: Optional. Configuration to handle the startup of instances + // during cluster create and update process. + StartupConfig *StartupConfig `json:"startupConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "Accelerators") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterName") to include in API + // NullFields is a list of field names (e.g. "Accelerators") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ManagedCluster) MarshalJSON() ([]byte, error) { - type NoMethod ManagedCluster - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s InstanceGroupConfig) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ManagedGroupConfig: Specifies the resources used to actively manage an -// instance group. -type ManagedGroupConfig struct { - // InstanceGroupManagerName: Output only. The name of the Instance Group - // Manager for this group. - InstanceGroupManagerName string `json:"instanceGroupManagerName,omitempty"` - // InstanceGroupManagerUri: Output only. The partial URI to the instance group - // manager for this group. E.g. - // projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. - InstanceGroupManagerUri string `json:"instanceGroupManagerUri,omitempty"` - // InstanceTemplateName: Output only. The name of the Instance Template used - // for the Managed Instance Group. - InstanceTemplateName string `json:"instanceTemplateName,omitempty"` - // ForceSendFields is a list of field names (e.g. "InstanceGroupManagerName") - // to unconditionally include in API requests. By default, fields with empty or +// InstanceReference: A reference to a Compute Engine instance. +type InstanceReference struct { + // InstanceId: The unique identifier of the Compute Engine instance. + InstanceId string `json:"instanceId,omitempty"` + // InstanceName: The user-friendly name of the Compute Engine instance. + InstanceName string `json:"instanceName,omitempty"` + // PublicEciesKey: The public ECIES key used for sharing data with this + // instance. + PublicEciesKey string `json:"publicEciesKey,omitempty"` + // PublicKey: The public RSA key used for sharing data with this instance. + PublicKey string `json:"publicKey,omitempty"` + // ForceSendFields is a list of field names (e.g. "InstanceId") to + // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceGroupManagerName") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See + // NullFields is a list of field names (e.g. "InstanceId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ManagedGroupConfig) MarshalJSON() ([]byte, error) { - type NoMethod ManagedGroupConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s InstanceReference) MarshalJSON() ([]byte, error) { + type NoMethod InstanceReference + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// MetastoreConfig: Specifies a Metastore configuration. -type MetastoreConfig struct { - // DataprocMetastoreService: Required. Resource name of an existing Dataproc - // Metastore service.Example: - // projects/[project_id]/locations/[dataproc_region]/services/[service-name] - DataprocMetastoreService string `json:"dataprocMetastoreService,omitempty"` - // ForceSendFields is a list of field names (e.g. "DataprocMetastoreService") - // to unconditionally include in API requests. By default, fields with empty or +// InstanceSelection: Defines machines types and a rank to which the machines +// types belong. +type InstanceSelection struct { + // MachineTypes: Optional. Full machine-type names, e.g. "n1-standard-16". + MachineTypes []string `json:"machineTypes,omitempty"` + // Rank: Optional. Preference of this instance selection. Lower number means + // higher preference. Dataproc will first try to create a VM based on the + // machine-type with priority rank and fallback to next rank based on + // availability. Machine types and instance selections with the same priority + // have the same preference. + Rank int64 `json:"rank,omitempty"` + // ForceSendFields is a list of field names (e.g. "MachineTypes") to + // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DataprocMetastoreService") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See + // NullFields is a list of field names (e.g. "MachineTypes") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *MetastoreConfig) MarshalJSON() ([]byte, error) { - type NoMethod MetastoreConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s InstanceSelection) MarshalJSON() ([]byte, error) { + type NoMethod InstanceSelection + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// Metric: A Dataproc custom metric. -type Metric struct { - // MetricOverrides: Optional. Specify one or more Custom metrics - // (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) - // to collect for the metric course (for the SPARK metric source (any Spark - // metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be - // specified).Provide metrics in the following format: METRIC_SOURCE: - // INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: - // yarn:ResourceManager:QueueMetrics:AppsCompleted - // spark:driver:DAGScheduler:job.allJobs - // sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed - // hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified - // overridden metrics are collected for the metric source. For example, if one - // or more spark:executive metrics are listed as metric overrides, other SPARK - // metrics are not collected. The collection of the metrics for other enabled - // custom metric sources is unaffected. For example, if both SPARK andd YARN - // metric sources are enabled, and overrides are provided for Spark metrics - // only, all YARN metrics are collected. - MetricOverrides []string `json:"metricOverrides,omitempty"` - // MetricSource: Required. A standard set of metrics is collected unless - // metricOverrides are specified for the metric source (see Custom metrics - // (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) - // for more information). - // - // Possible values: - // "METRIC_SOURCE_UNSPECIFIED" - Required unspecified metric source. - // "MONITORING_AGENT_DEFAULTS" - Monitoring agent metrics. If this source is - // enabled, Dataproc enables the monitoring agent in Compute Engine, and - // collects monitoring agent metrics, which are published with an - // agent.googleapis.com prefix. - // "HDFS" - HDFS metric source. - // "SPARK" - Spark metric source. - // "YARN" - YARN metric source. - // "SPARK_HISTORY_SERVER" - Spark History Server metric source. - // "HIVESERVER2" - Hiveserver2 metric source. - // "HIVEMETASTORE" - hivemetastore metric source - // "FLINK" - flink metric source - MetricSource string `json:"metricSource,omitempty"` - // ForceSendFields is a list of field names (e.g. "MetricOverrides") to +// InstanceSelectionResult: Defines a mapping from machine types to the number +// of VMs that are created with each machine type. +type InstanceSelectionResult struct { + // MachineType: Output only. Full machine-type names, e.g. "n1-standard-16". + MachineType string `json:"machineType,omitempty"` + // VmCount: Output only. Number of VM provisioned with the machine_type. + VmCount int64 `json:"vmCount,omitempty"` + // ForceSendFields is a list of field names (e.g. "MachineType") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MetricOverrides") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "MachineType") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Metric) MarshalJSON() ([]byte, error) { - type NoMethod Metric - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s InstanceSelectionResult) MarshalJSON() ([]byte, error) { + type NoMethod InstanceSelectionResult + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// NamespacedGkeDeploymentTarget: Deprecated. Used only for the deprecated -// beta. A full, namespace-isolated deployment target for an existing GKE -// cluster. -type NamespacedGkeDeploymentTarget struct { - // ClusterNamespace: Optional. A namespace within the GKE cluster to deploy - // into. - ClusterNamespace string `json:"clusterNamespace,omitempty"` - // TargetGkeCluster: Optional. The target GKE cluster to deploy to. Format: - // 'projects/{project}/locations/{location}/clusters/{cluster_id}' - TargetGkeCluster string `json:"targetGkeCluster,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterNamespace") to +// InstantiateWorkflowTemplateRequest: A request to instantiate a workflow +// template. +type InstantiateWorkflowTemplateRequest struct { + // Parameters: Optional. Map from parameter names to values that should be used + // for those parameters. Values may not exceed 1000 characters. + Parameters map[string]string `json:"parameters,omitempty"` + // RequestId: Optional. A tag that prevents multiple concurrent workflow + // instances with the same tag from running. This mitigates risk of concurrent + // instances started due to retries.It is recommended to always set this value + // to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The + // tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), + // and hyphens (-). The maximum length is 40 characters. + RequestId string `json:"requestId,omitempty"` + // Version: Optional. The version of workflow template to instantiate. If + // specified, the workflow will be instantiated only if the current version of + // the workflow template has the supplied version.This option cannot be used to + // instantiate a previous version of workflow template. + Version int64 `json:"version,omitempty"` + // ForceSendFields is a list of field names (e.g. "Parameters") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterNamespace") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "Parameters") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *NamespacedGkeDeploymentTarget) MarshalJSON() ([]byte, error) { - type NoMethod NamespacedGkeDeploymentTarget - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s InstantiateWorkflowTemplateRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstantiateWorkflowTemplateRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// NodeGroup: Dataproc Node Group. The Dataproc NodeGroup resource is not -// related to the Dataproc NodeGroupAffinity resource. -type NodeGroup struct { - // Labels: Optional. Node group labels. Label keys must consist of from 1 to 63 - // characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). - // Label values can be empty. If specified, they must consist of from 1 to 63 - // characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). - // The node group must have no more than 32 labelsn. - Labels map[string]string `json:"labels,omitempty"` - // Name: The Node group resource name (https://aip.dev/122). - Name string `json:"name,omitempty"` - // NodeGroupConfig: Optional. The node group instance group configuration. - NodeGroupConfig *InstanceGroupConfig `json:"nodeGroupConfig,omitempty"` - // Roles: Required. Node group roles. - // - // Possible values: - // "ROLE_UNSPECIFIED" - Required unspecified role. - // "DRIVER" - Job drivers run on the node pool. - Roles []string `json:"roles,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Labels") to unconditionally +// Interval: Represents a time interval, encoded as a Timestamp start +// (inclusive) and a Timestamp end (exclusive).The start must be less than or +// equal to the end. When the start equals the end, the interval is empty +// (matches no time). When both start and end are unspecified, the interval +// matches any time. +type Interval struct { + // EndTime: Optional. Exclusive end of the interval.If specified, a Timestamp + // matching this interval will have to be before the end. + EndTime string `json:"endTime,omitempty"` + // StartTime: Optional. Inclusive start of the interval.If specified, a + // Timestamp matching this interval will have to be the same or after the + // start. + StartTime string `json:"startTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "EndTime") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Labels") to include in API + // NullFields is a list of field names (e.g. "EndTime") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *NodeGroup) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroup - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s Interval) MarshalJSON() ([]byte, error) { + type NoMethod Interval + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// NodeGroupAffinity: Node Group Affinity for clusters using sole-tenant node -// groups. The Dataproc NodeGroupAffinity resource is not related to the -// Dataproc NodeGroup resource. -type NodeGroupAffinity struct { - // NodeGroupUri: Required. The URI of a sole-tenant node group resource - // (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that - // the cluster will be created on.A full URL, partial URI, or node group name - // are valid. Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 - // projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1 - NodeGroupUri string `json:"nodeGroupUri,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodeGroupUri") to +// Job: A Dataproc job resource. +type Job struct { + // Done: Output only. Indicates whether the job is completed. If the value is + // false, the job is still in progress. If true, the job is completed, and + // status.state field will indicate if it was successful, failed, or cancelled. + Done bool `json:"done,omitempty"` + // DriverControlFilesUri: Output only. If present, the location of + // miscellaneous control files which can be used as part of job setup and + // handling. If not present, control files might be placed in the same location + // as driver_output_uri. + DriverControlFilesUri string `json:"driverControlFilesUri,omitempty"` + // DriverOutputResourceUri: Output only. A URI pointing to the location of the + // stdout of the job's driver program. + DriverOutputResourceUri string `json:"driverOutputResourceUri,omitempty"` + // DriverSchedulingConfig: Optional. Driver scheduling configuration. + DriverSchedulingConfig *DriverSchedulingConfig `json:"driverSchedulingConfig,omitempty"` + // FlinkJob: Optional. Job is a Flink job. + FlinkJob *FlinkJob `json:"flinkJob,omitempty"` + // HadoopJob: Optional. Job is a Hadoop job. + HadoopJob *HadoopJob `json:"hadoopJob,omitempty"` + // HiveJob: Optional. Job is a Hive job. + HiveJob *HiveJob `json:"hiveJob,omitempty"` + // JobUuid: Output only. A UUID that uniquely identifies a job within the + // project over time. This is in contrast to a user-settable reference.job_id + // that might be reused over time. + JobUuid string `json:"jobUuid,omitempty"` + // Labels: Optional. The labels to associate with this job. Label keys must + // contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if + // present, must contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + // associated with a job. + Labels map[string]string `json:"labels,omitempty"` + // PigJob: Optional. Job is a Pig job. + PigJob *PigJob `json:"pigJob,omitempty"` + // Placement: Required. Job information, including how, when, and where to run + // the job. + Placement *JobPlacement `json:"placement,omitempty"` + // PrestoJob: Optional. Job is a Presto job. + PrestoJob *PrestoJob `json:"prestoJob,omitempty"` + // PysparkJob: Optional. Job is a PySpark job. + PysparkJob *PySparkJob `json:"pysparkJob,omitempty"` + // Reference: Optional. The fully qualified reference to the job, which can be + // used to obtain the equivalent REST path of the job resource. If this + // property is not specified when a job is created, the server generates a + // job_id. + Reference *JobReference `json:"reference,omitempty"` + // Scheduling: Optional. Job scheduling configuration. + Scheduling *JobScheduling `json:"scheduling,omitempty"` + // SparkJob: Optional. Job is a Spark job. + SparkJob *SparkJob `json:"sparkJob,omitempty"` + // SparkRJob: Optional. Job is a SparkR job. + SparkRJob *SparkRJob `json:"sparkRJob,omitempty"` + // SparkSqlJob: Optional. Job is a SparkSql job. + SparkSqlJob *SparkSqlJob `json:"sparkSqlJob,omitempty"` + // Status: Output only. The job status. Additional application-specific status + // information might be contained in the type_job and yarn_applications fields. + Status *JobStatus `json:"status,omitempty"` + // StatusHistory: Output only. The previous job status. + StatusHistory []*JobStatus `json:"statusHistory,omitempty"` + // TrinoJob: Optional. Job is a Trino job. + TrinoJob *TrinoJob `json:"trinoJob,omitempty"` + // YarnApplications: Output only. The collection of YARN applications spun up + // by this job.Beta Feature: This report is available for testing purposes + // only. It might be changed before final release. + YarnApplications []*YarnApplication `json:"yarnApplications,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Done") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Done") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Job) MarshalJSON() ([]byte, error) { + type NoMethod Job + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// JobData: Data corresponding to a spark job. +type JobData struct { + CompletionTime string `json:"completionTime,omitempty"` + Description string `json:"description,omitempty"` + JobGroup string `json:"jobGroup,omitempty"` + JobId int64 `json:"jobId,omitempty,string"` + KillTasksSummary map[string]int64 `json:"killTasksSummary,omitempty"` + Name string `json:"name,omitempty"` + NumActiveStages int64 `json:"numActiveStages,omitempty"` + NumActiveTasks int64 `json:"numActiveTasks,omitempty"` + NumCompletedIndices int64 `json:"numCompletedIndices,omitempty"` + NumCompletedStages int64 `json:"numCompletedStages,omitempty"` + NumCompletedTasks int64 `json:"numCompletedTasks,omitempty"` + NumFailedStages int64 `json:"numFailedStages,omitempty"` + NumFailedTasks int64 `json:"numFailedTasks,omitempty"` + NumKilledTasks int64 `json:"numKilledTasks,omitempty"` + NumSkippedStages int64 `json:"numSkippedStages,omitempty"` + NumSkippedTasks int64 `json:"numSkippedTasks,omitempty"` + NumTasks int64 `json:"numTasks,omitempty"` + SkippedStages []int64 `json:"skippedStages,omitempty"` + SqlExecutionId int64 `json:"sqlExecutionId,omitempty,string"` + StageIds googleapi.Int64s `json:"stageIds,omitempty"` + // Possible values: + // "JOB_EXECUTION_STATUS_UNSPECIFIED" + // "JOB_EXECUTION_STATUS_RUNNING" + // "JOB_EXECUTION_STATUS_SUCCEEDED" + // "JOB_EXECUTION_STATUS_FAILED" + // "JOB_EXECUTION_STATUS_UNKNOWN" + Status string `json:"status,omitempty"` + SubmissionTime string `json:"submissionTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "CompletionTime") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodeGroupUri") to include in API + // NullFields is a list of field names (e.g. "CompletionTime") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s JobData) MarshalJSON() ([]byte, error) { + type NoMethod JobData + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// JobMetadata: Job Operation metadata. +type JobMetadata struct { + // JobId: Output only. The job id. + JobId string `json:"jobId,omitempty"` + // OperationType: Output only. Operation type. + OperationType string `json:"operationType,omitempty"` + // StartTime: Output only. Job submission time. + StartTime string `json:"startTime,omitempty"` + // Status: Output only. Most recent job status. + Status *JobStatus `json:"status,omitempty"` + // ForceSendFields is a list of field names (e.g. "JobId") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "JobId") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *NodeGroupAffinity) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupAffinity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s JobMetadata) MarshalJSON() ([]byte, error) { + type NoMethod JobMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// NodeGroupOperationMetadata: Metadata describing the node group operation. -type NodeGroupOperationMetadata struct { - // ClusterUuid: Output only. Cluster UUID associated with the node group - // operation. +// JobPlacement: Dataproc job config. +type JobPlacement struct { + // ClusterLabels: Optional. Cluster labels to identify a cluster where the job + // will be submitted. + ClusterLabels map[string]string `json:"clusterLabels,omitempty"` + // ClusterName: Required. The name of the cluster where the job will be + // submitted. + ClusterName string `json:"clusterName,omitempty"` + // ClusterUuid: Output only. A cluster UUID generated by the Dataproc service + // when the job is submitted. ClusterUuid string `json:"clusterUuid,omitempty"` - // Description: Output only. Short description of operation. - Description string `json:"description,omitempty"` - // Labels: Output only. Labels associated with the operation. - Labels map[string]string `json:"labels,omitempty"` - // NodeGroupId: Output only. Node group ID for the operation. - NodeGroupId string `json:"nodeGroupId,omitempty"` - // OperationType: The operation type. - // - // Possible values: - // "NODE_GROUP_OPERATION_TYPE_UNSPECIFIED" - Node group operation type is - // unknown. - // "CREATE" - Create node group operation type. - // "UPDATE" - Update node group operation type. - // "DELETE" - Delete node group operation type. - // "RESIZE" - Resize node group operation type. - // "REPAIR" - Repair node group operation type. - // "UPDATE_LABELS" - Update node group label operation type. - // "START" - Start node group operation type. - // "STOP" - Stop node group operation type. - OperationType string `json:"operationType,omitempty"` - // Status: Output only. Current operation status. - Status *ClusterOperationStatus `json:"status,omitempty"` - // StatusHistory: Output only. The previous operation status. - StatusHistory []*ClusterOperationStatus `json:"statusHistory,omitempty"` - // Warnings: Output only. Errors encountered during operation execution. - Warnings []string `json:"warnings,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterUuid") to + // ForceSendFields is a list of field names (e.g. "ClusterLabels") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterUuid") to include in API + // NullFields is a list of field names (e.g. "ClusterLabels") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *NodeGroupOperationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s JobPlacement) MarshalJSON() ([]byte, error) { + type NoMethod JobPlacement + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// NodeInitializationAction: Specifies an executable to run on a fully -// configured node and a timeout period for executable completion. -type NodeInitializationAction struct { - // ExecutableFile: Required. Cloud Storage URI of executable file. - ExecutableFile string `json:"executableFile,omitempty"` - // ExecutionTimeout: Optional. Amount of time executable has to complete. - // Default is 10 minutes (see JSON representation of Duration - // (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster - // creation fails with an explanatory error message (the name of the executable - // that caused the error and the exceeded timeout period) if the executable is - // not completed at end of the timeout period. - ExecutionTimeout string `json:"executionTimeout,omitempty"` - // ForceSendFields is a list of field names (e.g. "ExecutableFile") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// JobReference: Encapsulates the full scoping used to reference a job. +type JobReference struct { + // JobId: Optional. The job ID, which must be unique within the project.The ID + // must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or + // hyphens (-). The maximum length is 100 characters.If not specified by the + // caller, the job ID will be provided by the server. + JobId string `json:"jobId,omitempty"` + // ProjectId: Optional. The ID of the Google Cloud Platform project that the + // job belongs to. If specified, must match the request project ID. + ProjectId string `json:"projectId,omitempty"` + // ForceSendFields is a list of field names (e.g. "JobId") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ExecutableFile") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "JobId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *NodeInitializationAction) MarshalJSON() ([]byte, error) { - type NoMethod NodeInitializationAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s JobReference) MarshalJSON() ([]byte, error) { + type NoMethod JobReference + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// NodePool: indicating a list of workers of same type -type NodePool struct { - // Id: Required. A unique id of the node pool. Primary and Secondary workers - // can be specified using special reserved ids PRIMARY_WORKER_POOL and - // SECONDARY_WORKER_POOL respectively. Aux node pools can be referenced using - // corresponding pool id. - Id string `json:"id,omitempty"` - // InstanceNames: Name of instances to be repaired. These instances must belong - // to specified node pool. - InstanceNames []string `json:"instanceNames,omitempty"` - // RepairAction: Required. Repair action to take on specified resources of the - // node pool. - // - // Possible values: - // "REPAIR_ACTION_UNSPECIFIED" - No action will be taken by default. - // "DELETE" - delete the specified list of nodes. - RepairAction string `json:"repairAction,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// JobScheduling: Job scheduling options. +type JobScheduling struct { + // MaxFailuresPerHour: Optional. Maximum number of times per hour a driver can + // be restarted as a result of driver exiting with non-zero code before job is + // reported failed.A job might be reported as thrashing if the driver exits + // with a non-zero code four times within a 10-minute window.Maximum value is + // 10.Note: This restartable job option is not supported in Dataproc workflow + // templates + // (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + MaxFailuresPerHour int64 `json:"maxFailuresPerHour,omitempty"` + // MaxFailuresTotal: Optional. Maximum total number of times a driver can be + // restarted as a result of the driver exiting with a non-zero code. After the + // maximum number is reached, the job will be reported as failed.Maximum value + // is 240.Note: Currently, this restartable job option is not supported in + // Dataproc workflow templates + // (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + MaxFailuresTotal int64 `json:"maxFailuresTotal,omitempty"` + // ForceSendFields is a list of field names (e.g. "MaxFailuresPerHour") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API requests - // with the JSON null value. By default, fields with empty values are omitted - // from API requests. See + // NullFields is a list of field names (e.g. "MaxFailuresPerHour") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *NodePool) MarshalJSON() ([]byte, error) { - type NoMethod NodePool - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s JobScheduling) MarshalJSON() ([]byte, error) { + type NoMethod JobScheduling + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// Operation: This resource represents a long-running operation that is the -// result of a network API call. -type Operation struct { - // Done: If the value is false, it means the operation is still in progress. If - // true, the operation is completed, and either error or response is available. - Done bool `json:"done,omitempty"` - // Error: The error result of the operation in case of failure or cancellation. - Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. It - // typically contains progress information and common metadata such as create - // time. Some services might not provide such metadata. Any method that returns - // a long-running operation should document the metadata type, if any. - Metadata googleapi.RawMessage `json:"metadata,omitempty"` - // Name: The server-assigned name, which is only unique within the same service - // that originally returns it. If you use the default HTTP mapping, the name - // should be a resource name ending with operations/{unique_id}. - Name string `json:"name,omitempty"` - // Response: The normal, successful response of the operation. If the original - // method returns no data on success, such as Delete, the response is - // google.protobuf.Empty. If the original method is standard Get/Create/Update, - // the response should be the resource. For other methods, the response should - // have the type XxxResponse, where Xxx is the original method name. For - // example, if the original method name is TakeSnapshot(), the inferred - // response type is TakeSnapshotResponse. - Response googleapi.RawMessage `json:"response,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Done") to unconditionally +// JobStatus: Dataproc job status. +type JobStatus struct { + // Details: Optional. Output only. Job state details, such as an error + // description if the state is ERROR. + Details string `json:"details,omitempty"` + // State: Output only. A state message specifying the overall job state. + // + // Possible values: + // "STATE_UNSPECIFIED" - The job state is unknown. + // "PENDING" - The job is pending; it has been submitted, but is not yet + // running. + // "SETUP_DONE" - Job has been received by the service and completed initial + // setup; it will soon be submitted to the cluster. + // "RUNNING" - The job is running on the cluster. + // "CANCEL_PENDING" - A CancelJob request has been received, but is pending. + // "CANCEL_STARTED" - Transient in-flight resources have been canceled, and + // the request to cancel the running job has been issued to the cluster. + // "CANCELLED" - The job cancellation was successful. + // "DONE" - The job has completed successfully. + // "ERROR" - The job has completed, but encountered an error. + // "ATTEMPT_FAILURE" - Job attempt has failed. The detail field contains + // failure details for this attempt.Applies to restartable jobs only. + State string `json:"state,omitempty"` + // StateStartTime: Output only. The time when this state was entered. + StateStartTime string `json:"stateStartTime,omitempty"` + // Substate: Output only. Additional state information, which includes status + // reported by the agent. + // + // Possible values: + // "UNSPECIFIED" - The job substate is unknown. + // "SUBMITTED" - The Job is submitted to the agent.Applies to RUNNING state. + // "QUEUED" - The Job has been received and is awaiting execution (it might + // be waiting for a condition to be met). See the "details" field for the + // reason for the delay.Applies to RUNNING state. + // "STALE_STATUS" - The agent-reported status is out of date, which can be + // caused by a loss of communication between the agent and Dataproc. If the + // agent does not send a timely update, the job will fail.Applies to RUNNING + // state. + Substate string `json:"substate,omitempty"` + // ForceSendFields is a list of field names (e.g. "Details") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Done") to include in API requests - // with the JSON null value. By default, fields with empty values are omitted - // from API requests. See + // NullFields is a list of field names (e.g. "Details") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { - type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// OrderedJob: A job executed by the workflow. -type OrderedJob struct { - // FlinkJob: Optional. Job is a Flink job. - FlinkJob *FlinkJob `json:"flinkJob,omitempty"` - // HadoopJob: Optional. Job is a Hadoop job. - HadoopJob *HadoopJob `json:"hadoopJob,omitempty"` - // HiveJob: Optional. Job is a Hive job. - HiveJob *HiveJob `json:"hiveJob,omitempty"` - // Labels: Optional. The labels to associate with this job.Label keys must be - // between 1 and 63 characters long, and must conform to the following regular - // expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 - // characters long, and must conform to the following regular expression: - // \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a - // given job. - Labels map[string]string `json:"labels,omitempty"` - // PigJob: Optional. Job is a Pig job. - PigJob *PigJob `json:"pigJob,omitempty"` - // PrerequisiteStepIds: Optional. The optional list of prerequisite job - // step_ids. If not specified, the job will start at the beginning of workflow. - PrerequisiteStepIds []string `json:"prerequisiteStepIds,omitempty"` - // PrestoJob: Optional. Job is a Presto job. - PrestoJob *PrestoJob `json:"prestoJob,omitempty"` - // PysparkJob: Optional. Job is a PySpark job. - PysparkJob *PySparkJob `json:"pysparkJob,omitempty"` - // Scheduling: Optional. Job scheduling configuration. - Scheduling *JobScheduling `json:"scheduling,omitempty"` - // SparkJob: Optional. Job is a Spark job. - SparkJob *SparkJob `json:"sparkJob,omitempty"` - // SparkRJob: Optional. Job is a SparkR job. - SparkRJob *SparkRJob `json:"sparkRJob,omitempty"` - // SparkSqlJob: Optional. Job is a SparkSql job. - SparkSqlJob *SparkSqlJob `json:"sparkSqlJob,omitempty"` - // StepId: Required. The step id. The id must be unique among all jobs within - // the template.The step id is used as prefix for job id, as job - // goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from - // other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). Cannot begin or end with underscore or - // hyphen. Must consist of between 3 and 50 characters. - StepId string `json:"stepId,omitempty"` - // TrinoJob: Optional. Job is a Trino job. - TrinoJob *TrinoJob `json:"trinoJob,omitempty"` - // ForceSendFields is a list of field names (e.g. "FlinkJob") to +func (s JobStatus) MarshalJSON() ([]byte, error) { + type NoMethod JobStatus + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// JobsSummary: Data related to Jobs page summary +type JobsSummary struct { + // ActiveJobs: Number of active jobs + ActiveJobs int64 `json:"activeJobs,omitempty"` + // ApplicationId: Spark Application Id + ApplicationId string `json:"applicationId,omitempty"` + // Attempts: Attempts info + Attempts []*ApplicationAttemptInfo `json:"attempts,omitempty"` + // CompletedJobs: Number of completed jobs + CompletedJobs int64 `json:"completedJobs,omitempty"` + // FailedJobs: Number of failed jobs + FailedJobs int64 `json:"failedJobs,omitempty"` + // SchedulingMode: Spark Scheduling mode + SchedulingMode string `json:"schedulingMode,omitempty"` + // ForceSendFields is a list of field names (e.g. "ActiveJobs") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "FlinkJob") to include in API + // NullFields is a list of field names (e.g. "ActiveJobs") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *OrderedJob) MarshalJSON() ([]byte, error) { - type NoMethod OrderedJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s JobsSummary) MarshalJSON() ([]byte, error) { + type NoMethod JobsSummary + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ParameterValidation: Configuration for parameter validation. -type ParameterValidation struct { - // Regex: Validation based on regular expressions. - Regex *RegexValidation `json:"regex,omitempty"` - // Values: Validation based on a list of allowed values. - Values *ValueValidation `json:"values,omitempty"` - // ForceSendFields is a list of field names (e.g. "Regex") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// JupyterConfig: Jupyter configuration for an interactive session. +type JupyterConfig struct { + // DisplayName: Optional. Display name, shown in the Jupyter kernelspec card. + DisplayName string `json:"displayName,omitempty"` + // Kernel: Optional. Kernel + // + // Possible values: + // "KERNEL_UNSPECIFIED" - The kernel is unknown. + // "PYTHON" - Python kernel. + // "SCALA" - Scala kernel. + Kernel string `json:"kernel,omitempty"` + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Regex") to include in API + // NullFields is a list of field names (e.g. "DisplayName") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ParameterValidation) MarshalJSON() ([]byte, error) { - type NoMethod ParameterValidation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s JupyterConfig) MarshalJSON() ([]byte, error) { + type NoMethod JupyterConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// PeripheralsConfig: Auxiliary services configuration for a workload. -type PeripheralsConfig struct { - // MetastoreService: Optional. Resource name of an existing Dataproc Metastore - // service.Example: - // projects/[project_id]/locations/[region]/services/[service_id] - MetastoreService string `json:"metastoreService,omitempty"` - // SparkHistoryServerConfig: Optional. The Spark History Server configuration - // for the workload. - SparkHistoryServerConfig *SparkHistoryServerConfig `json:"sparkHistoryServerConfig,omitempty"` - // ForceSendFields is a list of field names (e.g. "MetastoreService") to - // unconditionally include in API requests. By default, fields with empty or +// KerberosConfig: Specifies Kerberos related configuration. +type KerberosConfig struct { + // CrossRealmTrustAdminServer: Optional. The admin server (IP or hostname) for + // the remote trusted realm in a cross realm trust relationship. + CrossRealmTrustAdminServer string `json:"crossRealmTrustAdminServer,omitempty"` + // CrossRealmTrustKdc: Optional. The KDC (IP or hostname) for the remote + // trusted realm in a cross realm trust relationship. + CrossRealmTrustKdc string `json:"crossRealmTrustKdc,omitempty"` + // CrossRealmTrustRealm: Optional. The remote realm the Dataproc on-cluster KDC + // will trust, should the user enable cross realm trust. + CrossRealmTrustRealm string `json:"crossRealmTrustRealm,omitempty"` + // CrossRealmTrustSharedPasswordUri: Optional. The Cloud Storage URI of a KMS + // encrypted file containing the shared password between the on-cluster + // Kerberos realm and the remote trusted realm, in a cross realm trust + // relationship. + CrossRealmTrustSharedPasswordUri string `json:"crossRealmTrustSharedPasswordUri,omitempty"` + // EnableKerberos: Optional. Flag to indicate whether to Kerberize the cluster + // (default: false). Set this field to true to enable Kerberos on a cluster. + EnableKerberos bool `json:"enableKerberos,omitempty"` + // KdcDbKeyUri: Optional. The Cloud Storage URI of a KMS encrypted file + // containing the master key of the KDC database. + KdcDbKeyUri string `json:"kdcDbKeyUri,omitempty"` + // KeyPasswordUri: Optional. The Cloud Storage URI of a KMS encrypted file + // containing the password to the user provided key. For the self-signed + // certificate, this password is generated by Dataproc. + KeyPasswordUri string `json:"keyPasswordUri,omitempty"` + // KeystorePasswordUri: Optional. The Cloud Storage URI of a KMS encrypted file + // containing the password to the user provided keystore. For the self-signed + // certificate, this password is generated by Dataproc. + KeystorePasswordUri string `json:"keystorePasswordUri,omitempty"` + // KeystoreUri: Optional. The Cloud Storage URI of the keystore file used for + // SSL encryption. If not provided, Dataproc will provide a self-signed + // certificate. + KeystoreUri string `json:"keystoreUri,omitempty"` + // KmsKeyUri: Optional. The URI of the KMS key used to encrypt sensitive files. + KmsKeyUri string `json:"kmsKeyUri,omitempty"` + // Realm: Optional. The name of the on-cluster Kerberos realm. If not + // specified, the uppercased domain of hostnames will be the realm. + Realm string `json:"realm,omitempty"` + // RootPrincipalPasswordUri: Optional. The Cloud Storage URI of a KMS encrypted + // file containing the root principal password. + RootPrincipalPasswordUri string `json:"rootPrincipalPasswordUri,omitempty"` + // TgtLifetimeHours: Optional. The lifetime of the ticket granting ticket, in + // hours. If not specified, or user specifies 0, then default value 10 will be + // used. + TgtLifetimeHours int64 `json:"tgtLifetimeHours,omitempty"` + // TruststorePasswordUri: Optional. The Cloud Storage URI of a KMS encrypted + // file containing the password to the user provided truststore. For the + // self-signed certificate, this password is generated by Dataproc. + TruststorePasswordUri string `json:"truststorePasswordUri,omitempty"` + // TruststoreUri: Optional. The Cloud Storage URI of the truststore file used + // for SSL encryption. If not provided, Dataproc will provide a self-signed + // certificate. + TruststoreUri string `json:"truststoreUri,omitempty"` + // ForceSendFields is a list of field names (e.g. "CrossRealmTrustAdminServer") + // to unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MetastoreService") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "CrossRealmTrustAdminServer") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *PeripheralsConfig) MarshalJSON() ([]byte, error) { - type NoMethod PeripheralsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s KerberosConfig) MarshalJSON() ([]byte, error) { + type NoMethod KerberosConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// PigJob: A Dataproc job for running Apache Pig (https://pig.apache.org/) -// queries on YARN. -type PigJob struct { - // ContinueOnFailure: Optional. Whether to continue executing queries if a - // query fails. The default value is false. Setting to true can be useful when - // executing independent parallel queries. - ContinueOnFailure bool `json:"continueOnFailure,omitempty"` - // JarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATH of the - // Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. - JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfig: Optional. The runtime log config for job execution. - LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` - // Properties: Optional. A mapping of property names to values, used to - // configure Pig. Properties that conflict with values set by the Dataproc API - // might be overwritten. Can include properties set in - // /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in - // user code. - Properties map[string]string `json:"properties,omitempty"` - // QueryFileUri: The HCFS URI of the script that contains the Pig queries. - QueryFileUri string `json:"queryFileUri,omitempty"` - // QueryList: A list of queries. - QueryList *QueryList `json:"queryList,omitempty"` - // ScriptVariables: Optional. Mapping of query variable names to values - // (equivalent to the Pig command: name=[value]). - ScriptVariables map[string]string `json:"scriptVariables,omitempty"` - // ForceSendFields is a list of field names (e.g. "ContinueOnFailure") to +// KubernetesClusterConfig: The configuration for running the Dataproc cluster +// on Kubernetes. +type KubernetesClusterConfig struct { + // GkeClusterConfig: Required. The configuration for running the Dataproc + // cluster on GKE. + GkeClusterConfig *GkeClusterConfig `json:"gkeClusterConfig,omitempty"` + // KubernetesNamespace: Optional. A namespace within the Kubernetes cluster to + // deploy into. If this namespace does not exist, it is created. If it exists, + // Dataproc verifies that another Dataproc VirtualCluster is not installed into + // it. If not specified, the name of the Dataproc Cluster is used. + KubernetesNamespace string `json:"kubernetesNamespace,omitempty"` + // KubernetesSoftwareConfig: Optional. The software configuration for this + // Dataproc cluster running on Kubernetes. + KubernetesSoftwareConfig *KubernetesSoftwareConfig `json:"kubernetesSoftwareConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "GkeClusterConfig") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ContinueOnFailure") to include in + // NullFields is a list of field names (e.g. "GkeClusterConfig") to include in // API requests with the JSON null value. By default, fields with empty values // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *PigJob) MarshalJSON() ([]byte, error) { - type NoMethod PigJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s KubernetesClusterConfig) MarshalJSON() ([]byte, error) { + type NoMethod KubernetesClusterConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// Policy: An Identity and Access Management (IAM) policy, which specifies -// access controls for Google Cloud resources.A Policy is a collection of -// bindings. A binding binds one or more members, or principals, to a single -// role. Principals can be user accounts, service accounts, Google groups, and -// domains (such as G Suite). A role is a named list of permissions; each role -// can be an IAM predefined role or a user-created custom role.For some types -// of Google Cloud resources, a binding can also specify a condition, which is -// a logical expression that allows access to a resource only if the expression -// evaluates to true. A condition can add constraints based on attributes of -// the request, the resource, or both. To learn which resources support -// conditions in their IAM policies, see the IAM documentation -// (https://cloud.google.com/iam/help/conditions/resource-policies).JSON -// example: { "bindings": [ { "role": -// "roles/resourcemanager.organizationAdmin", "members": [ -// "user:mike@example.com", "group:admins@example.com", "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": -// "roles/resourcemanager.organizationViewer", "members": [ -// "user:eve@example.com" ], "condition": { "title": "expirable access", -// "description": "Does not grant access after Sep 2020", "expression": -// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": -// "BwWWja0YfJA=", "version": 3 } YAML example: bindings: - members: - -// user:mike@example.com - group:admins@example.com - domain:google.com - -// serviceAccount:my-project-id@appspot.gserviceaccount.com role: -// roles/resourcemanager.organizationAdmin - members: - user:eve@example.com -// role: roles/resourcemanager.organizationViewer condition: title: expirable -// access description: Does not grant access after Sep 2020 expression: -// request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= -// version: 3 For a description of IAM and its features, see the IAM -// documentation (https://cloud.google.com/iam/docs/). -type Policy struct { - // Bindings: Associates a list of members, or principals, with a role. - // Optionally, may specify a condition that determines how and when the - // bindings are applied. Each of the bindings must contain at least one - // principal.The bindings in a Policy can refer to up to 1,500 principals; up - // to 250 of these principals can be Google groups. Each occurrence of a - // principal counts towards these limits. For example, if the bindings grant 50 - // different roles to user:alice@example.com, and not to any other principal, - // then you can add another 1,450 principals to the bindings in the Policy. - Bindings []*Binding `json:"bindings,omitempty"` - // Etag: etag is used for optimistic concurrency control as a way to help - // prevent simultaneous updates of a policy from overwriting each other. It is - // strongly suggested that systems make use of the etag in the - // read-modify-write cycle to perform policy updates in order to avoid race - // conditions: An etag is returned in the response to getIamPolicy, and systems - // are expected to put that etag in the request to setIamPolicy to ensure that - // their change will be applied to the same version of the policy.Important: If - // you use IAM Conditions, you must include the etag field whenever you call - // setIamPolicy. If you omit this field, then IAM allows you to overwrite a - // version 3 policy with a version 1 policy, and all of the conditions in the - // version 3 policy are lost. - Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy.Valid values are 0, 1, and 3. - // Requests that specify an invalid value are rejected.Any operation that - // affects conditional role bindings must specify version 3. This requirement - // applies to the following operations: Getting a policy that includes a - // conditional role binding Adding a conditional role binding to a policy - // Changing a conditional role binding in a policy Removing any role binding, - // with or without a condition, from a policy that includes - // conditionsImportant: If you use IAM Conditions, you must include the etag - // field whenever you call setIamPolicy. If you omit this field, then IAM - // allows you to overwrite a version 3 policy with a version 1 policy, and all - // of the conditions in the version 3 policy are lost.If a policy does not - // include any conditions, operations on that policy may specify any valid - // version or leave the field unset.To learn which resources support conditions - // in their IAM policies, see the IAM documentation - // (https://cloud.google.com/iam/help/conditions/resource-policies). - Version int64 `json:"version,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Bindings") to +// KubernetesSoftwareConfig: The software configuration for this Dataproc +// cluster running on Kubernetes. +type KubernetesSoftwareConfig struct { + // ComponentVersion: The components that should be installed in this Dataproc + // cluster. The key must be a string from the KubernetesComponent enumeration. + // The value is the version of the software to be installed. At least one entry + // must be specified. + ComponentVersion map[string]string `json:"componentVersion,omitempty"` + // Properties: The properties to set on daemon config files.Property keys are + // specified in prefix:property format, for example + // spark:spark.kubernetes.container.image. The following are supported prefixes + // and their mappings: spark: spark-defaults.confFor more information, see + // Cluster properties + // (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). + Properties map[string]string `json:"properties,omitempty"` + // ForceSendFields is a list of field names (e.g. "ComponentVersion") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bindings") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "ComponentVersion") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { - type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s KubernetesSoftwareConfig) MarshalJSON() ([]byte, error) { + type NoMethod KubernetesSoftwareConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// PrestoJob: A Dataproc job for running Presto (https://prestosql.io/) -// queries. IMPORTANT: The Dataproc Presto Optional Component -// (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be -// enabled when the cluster is created to submit a Presto job to the cluster. -type PrestoJob struct { - // ClientTags: Optional. Presto client tags to attach to this query - ClientTags []string `json:"clientTags,omitempty"` - // ContinueOnFailure: Optional. Whether to continue executing queries if a - // query fails. The default value is false. Setting to true can be useful when - // executing independent parallel queries. - ContinueOnFailure bool `json:"continueOnFailure,omitempty"` - // LoggingConfig: Optional. The runtime log config for job execution. - LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` - // OutputFormat: Optional. The format in which query output will be displayed. - // See the Presto documentation for supported output formats - OutputFormat string `json:"outputFormat,omitempty"` - // Properties: Optional. A mapping of property names to values. Used to set - // Presto session properties - // (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using - // the --session flag in the Presto CLI - Properties map[string]string `json:"properties,omitempty"` - // QueryFileUri: The HCFS URI of the script that contains SQL queries. - QueryFileUri string `json:"queryFileUri,omitempty"` - // QueryList: A list of queries. - QueryList *QueryList `json:"queryList,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClientTags") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClientTags") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *PrestoJob) MarshalJSON() ([]byte, error) { - type NoMethod PrestoJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// PyPiRepositoryConfig: Configuration for PyPi repository -type PyPiRepositoryConfig struct { - // PypiRepository: Optional. PyPi repository address - PypiRepository string `json:"pypiRepository,omitempty"` - // ForceSendFields is a list of field names (e.g. "PypiRepository") to +// LifecycleConfig: Specifies the cluster auto-delete schedule configuration. +type LifecycleConfig struct { + // AutoDeleteTime: Optional. The time when cluster will be auto-deleted (see + // JSON representation of Timestamp + // (https://developers.google.com/protocol-buffers/docs/proto3#json)). + AutoDeleteTime string `json:"autoDeleteTime,omitempty"` + // AutoDeleteTtl: Optional. The lifetime duration of cluster. The cluster will + // be auto-deleted at the end of this period. Minimum value is 10 minutes; + // maximum value is 14 days (see JSON representation of Duration + // (https://developers.google.com/protocol-buffers/docs/proto3#json)). + AutoDeleteTtl string `json:"autoDeleteTtl,omitempty"` + // IdleDeleteTtl: Optional. The duration to keep the cluster alive while idling + // (when no jobs are running). Passing this threshold will cause the cluster to + // be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON + // representation of Duration + // (https://developers.google.com/protocol-buffers/docs/proto3#json)). + IdleDeleteTtl string `json:"idleDeleteTtl,omitempty"` + // IdleStartTime: Output only. The time when cluster became idle (most recent + // job finished) and became eligible for deletion due to idleness (see JSON + // representation of Timestamp + // (https://developers.google.com/protocol-buffers/docs/proto3#json)). + IdleStartTime string `json:"idleStartTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "AutoDeleteTime") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "PypiRepository") to include in + // NullFields is a list of field names (e.g. "AutoDeleteTime") to include in // API requests with the JSON null value. By default, fields with empty values // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *PyPiRepositoryConfig) MarshalJSON() ([]byte, error) { - type NoMethod PyPiRepositoryConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s LifecycleConfig) MarshalJSON() ([]byte, error) { + type NoMethod LifecycleConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// PySparkBatch: A configuration for running an Apache PySpark -// (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) -// batch workload. -type PySparkBatch struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the - // working directory of each executor. Supported file types: .jar, .tar, - // .tar.gz, .tgz, and .zip. - ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: Optional. The arguments to pass to the driver. Do not include - // arguments that can be set as batch properties, such as --conf, since a - // collision can occur that causes an incorrect batch submission. - Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be placed in the working directory - // of each executor. - FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: Optional. HCFS URIs of jar files to add to the classpath of the - // Spark driver and tasks. - JarFileUris []string `json:"jarFileUris,omitempty"` - // MainPythonFileUri: Required. The HCFS URI of the main Python file to use as - // the Spark driver. Must be a .py file. - MainPythonFileUri string `json:"mainPythonFileUri,omitempty"` - // PythonFileUris: Optional. HCFS file URIs of Python files to pass to the - // PySpark framework. Supported file types: .py, .egg, and .zip. - PythonFileUris []string `json:"pythonFileUris,omitempty"` - // ForceSendFields is a list of field names (e.g. "ArchiveUris") to +// ListAutoscalingPoliciesResponse: A response to a request to list autoscaling +// policies in a project. +type ListAutoscalingPoliciesResponse struct { + // NextPageToken: Output only. This token is included in the response if there + // are more results to fetch. + NextPageToken string `json:"nextPageToken,omitempty"` + // Policies: Output only. Autoscaling policies list. + Policies []*AutoscalingPolicy `json:"policies,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ArchiveUris") to include in API + // NullFields is a list of field names (e.g. "NextPageToken") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *PySparkBatch) MarshalJSON() ([]byte, error) { - type NoMethod PySparkBatch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ListAutoscalingPoliciesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListAutoscalingPoliciesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// PySparkJob: A Dataproc job for running Apache PySpark -// (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) -// applications on YARN. -type PySparkJob struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the - // working directory of each executor. Supported file types: .jar, .tar, - // .tar.gz, .tgz, and .zip. - ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: Optional. The arguments to pass to the driver. Do not include - // arguments, such as --conf, that can be set as job properties, since a - // collision may occur that causes an incorrect job submission. - Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be placed in the working directory - // of each executor. Useful for naively parallel tasks. - FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of - // the Python driver and tasks. - JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfig: Optional. The runtime log config for job execution. - LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` - // MainPythonFileUri: Required. The HCFS URI of the main Python file to use as - // the driver. Must be a .py file. - MainPythonFileUri string `json:"mainPythonFileUri,omitempty"` - // Properties: Optional. A mapping of property names to values, used to - // configure PySpark. Properties that conflict with values set by the Dataproc - // API might be overwritten. Can include properties set in - // /etc/spark/conf/spark-defaults.conf and classes in user code. - Properties map[string]string `json:"properties,omitempty"` - // PythonFileUris: Optional. HCFS file URIs of Python files to pass to the - // PySpark framework. Supported file types: .py, .egg, and .zip. - PythonFileUris []string `json:"pythonFileUris,omitempty"` - // ForceSendFields is a list of field names (e.g. "ArchiveUris") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// ListBatchesResponse: A list of batch workloads. +type ListBatchesResponse struct { + // Batches: Output only. The batches from the specified collection. + Batches []*Batch `json:"batches,omitempty"` + // NextPageToken: A token, which can be sent as page_token to retrieve the next + // page. If this field is omitted, there are no subsequent pages. + NextPageToken string `json:"nextPageToken,omitempty"` + // Unreachable: Output only. List of Batches that could not be included in the + // response. Attempting to get one of these resources may indicate why it was + // not included in the list response. + Unreachable []string `json:"unreachable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Batches") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ArchiveUris") to include in API + // NullFields is a list of field names (e.g. "Batches") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *PySparkJob) MarshalJSON() ([]byte, error) { - type NoMethod PySparkJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ListBatchesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListBatchesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// QueryList: A list of queries to run on a cluster. -type QueryList struct { - // Queries: Required. The queries to execute. You do not need to end a query - // expression with a semicolon. Multiple queries can be specified in one string - // by separating each with a semicolon. Here is an example of a Dataproc API - // snippet that uses a QueryList to specify a HiveJob: "hiveJob": { - // "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } - Queries []string `json:"queries,omitempty"` - // ForceSendFields is a list of field names (e.g. "Queries") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// ListClustersResponse: The list of all clusters in a project. +type ListClustersResponse struct { + // Clusters: Output only. The clusters in the project. + Clusters []*Cluster `json:"clusters,omitempty"` + // NextPageToken: Output only. This token is included in the response if there + // are more results to fetch. To fetch additional results, provide this value + // as the page_token in a subsequent ListClustersRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Clusters") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Queries") to include in API + // NullFields is a list of field names (e.g. "Clusters") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *QueryList) MarshalJSON() ([]byte, error) { - type NoMethod QueryList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ListClustersResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListClustersResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// RegexValidation: Validation based on regular expressions. -type RegexValidation struct { - // Regexes: Required. RE2 regular expressions used to validate the parameter's - // value. The value must match the regex in its entirety (substring matches are - // not sufficient). - Regexes []string `json:"regexes,omitempty"` - // ForceSendFields is a list of field names (e.g. "Regexes") to unconditionally +// ListJobsResponse: A list of jobs in a project. +type ListJobsResponse struct { + // Jobs: Output only. Jobs list. + Jobs []*Job `json:"jobs,omitempty"` + // NextPageToken: Optional. This token is included in the response if there are + // more results to fetch. To fetch additional results, provide this value as + // the page_token in a subsequent ListJobsRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // Unreachable: Output only. List of jobs with kms_key-encrypted parameters + // that could not be decrypted. A response to a jobs.get request may indicate + // the reason for the decryption failure for a specific job. + Unreachable []string `json:"unreachable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Jobs") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Regexes") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "Jobs") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *RegexValidation) MarshalJSON() ([]byte, error) { - type NoMethod RegexValidation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ListJobsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListJobsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// RepairClusterRequest: A request to repair a cluster. -type RepairClusterRequest struct { - // ClusterUuid: Optional. Specifying the cluster_uuid means the RPC will fail - // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - ClusterUuid string `json:"clusterUuid,omitempty"` - // GracefulDecommissionTimeout: Optional. Timeout for graceful YARN - // decommissioning. Graceful decommissioning facilitates the removal of cluster - // nodes without interrupting jobs in progress. The timeout specifies the - // amount of time to wait for jobs finish before forcefully removing nodes. The - // default timeout is 0 for forceful decommissioning, and the maximum timeout - // period is 1 day. (see JSON Mapping—Duration - // (https://developers.google.com/protocol-buffers/docs/proto3#json)).graceful_decommission_timeout - // is supported in Dataproc image versions 1.2+. - GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout,omitempty"` - // NodePools: Optional. Node pools and corresponding repair action to be taken. - // All node pools should be unique in this request. i.e. Multiple entries for - // the same node pool id are not allowed. - NodePools []*NodePool `json:"nodePools,omitempty"` - // ParentOperationId: Optional. operation id of the parent operation sending - // the repair request - ParentOperationId string `json:"parentOperationId,omitempty"` - // RequestId: Optional. A unique ID used to identify the request. If the server - // receives two RepairClusterRequests with the same ID, the second request is - // ignored, and the first google.longrunning.Operation created and stored in - // the backend is returned.Recommendation: Set this value to a UUID - // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must - // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens - // (-). The maximum length is 40 characters. - RequestId string `json:"requestId,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterUuid") to +// ListOperationsResponse: The response message for Operations.ListOperations. +type ListOperationsResponse struct { + // NextPageToken: The standard List next-page token. + NextPageToken string `json:"nextPageToken,omitempty"` + // Operations: A list of operations that matches the specified filter in the + // request. + Operations []*Operation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterUuid") to include in API + // NullFields is a list of field names (e.g. "NextPageToken") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *RepairClusterRequest) MarshalJSON() ([]byte, error) { - type NoMethod RepairClusterRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListOperationsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -type RepairNodeGroupRequest struct { - // InstanceNames: Required. Name of instances to be repaired. These instances - // must belong to specified node pool. - InstanceNames []string `json:"instanceNames,omitempty"` - // RepairAction: Required. Repair action to take on specified resources of the - // node pool. - // - // Possible values: - // "REPAIR_ACTION_UNSPECIFIED" - No action will be taken by default. - // "REPLACE" - replace the specified list of nodes. - RepairAction string `json:"repairAction,omitempty"` - // RequestId: Optional. A unique ID used to identify the request. If the server - // receives two RepairNodeGroupRequest with the same ID, the second request is - // ignored and the first google.longrunning.Operation created and stored in the - // backend is returned.Recommendation: Set this value to a UUID - // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must - // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens - // (-). The maximum length is 40 characters. - RequestId string `json:"requestId,omitempty"` - // ForceSendFields is a list of field names (e.g. "InstanceNames") to +// ListSessionTemplatesResponse: A list of session templates. +type ListSessionTemplatesResponse struct { + // NextPageToken: A token, which can be sent as page_token to retrieve the next + // page. If this field is omitted, there are no subsequent pages. + NextPageToken string `json:"nextPageToken,omitempty"` + // SessionTemplates: Output only. Session template list + SessionTemplates []*SessionTemplate `json:"sessionTemplates,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceNames") to include in API + // NullFields is a list of field names (e.g. "NextPageToken") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *RepairNodeGroupRequest) MarshalJSON() ([]byte, error) { - type NoMethod RepairNodeGroupRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ListSessionTemplatesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListSessionTemplatesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// RepositoryConfig: Configuration for dependency repositories -type RepositoryConfig struct { - // PypiRepositoryConfig: Optional. Configuration for PyPi repository. - PypiRepositoryConfig *PyPiRepositoryConfig `json:"pypiRepositoryConfig,omitempty"` - // ForceSendFields is a list of field names (e.g. "PypiRepositoryConfig") to +// ListSessionsResponse: A list of interactive sessions. +type ListSessionsResponse struct { + // NextPageToken: A token, which can be sent as page_token, to retrieve the + // next page. If this field is omitted, there are no subsequent pages. + NextPageToken string `json:"nextPageToken,omitempty"` + // Sessions: Output only. The sessions from the specified collection. + Sessions []*Session `json:"sessions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "PypiRepositoryConfig") to include - // in API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. See + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *RepositoryConfig) MarshalJSON() ([]byte, error) { - type NoMethod RepositoryConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ListSessionsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListSessionsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ReservationAffinity: Reservation Affinity for consuming Zonal reservation. -type ReservationAffinity struct { - // ConsumeReservationType: Optional. Type of reservation to consume - // - // Possible values: - // "TYPE_UNSPECIFIED" - // "NO_RESERVATION" - Do not consume from any allocated capacity. - // "ANY_RESERVATION" - Consume any reservation available. - // "SPECIFIC_RESERVATION" - Must consume from a specific reservation. Must - // specify key value fields for specifying the reservations. - ConsumeReservationType string `json:"consumeReservationType,omitempty"` - // Key: Optional. Corresponds to the label key of reservation resource. - Key string `json:"key,omitempty"` - // Values: Optional. Corresponds to the label values of reservation resource. - Values []string `json:"values,omitempty"` - // ForceSendFields is a list of field names (e.g. "ConsumeReservationType") to +// ListWorkflowTemplatesResponse: A response to a request to list workflow +// templates in a project. +type ListWorkflowTemplatesResponse struct { + // NextPageToken: Output only. This token is included in the response if there + // are more results to fetch. To fetch additional results, provide this value + // as the page_token in a subsequent ListWorkflowTemplatesRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // Templates: Output only. WorkflowTemplates list. + Templates []*WorkflowTemplate `json:"templates,omitempty"` + // Unreachable: Output only. List of workflow templates that could not be + // included in the response. Attempting to get one of these resources may + // indicate why it was not included in the list response. + Unreachable []string `json:"unreachable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ConsumeReservationType") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *ReservationAffinity) MarshalJSON() ([]byte, error) { - type NoMethod ReservationAffinity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// ResizeNodeGroupRequest: A request to resize a node group. -type ResizeNodeGroupRequest struct { - // GracefulDecommissionTimeout: Optional. Timeout for graceful YARN - // decommissioning. Graceful decommissioning - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) - // allows the removal of nodes from the Compute Engine node group without - // interrupting jobs in progress. This timeout specifies how long to wait for - // jobs in progress to finish before forcefully removing nodes (and potentially - // interrupting jobs). Default timeout is 0 (for forceful decommission), and - // the maximum allowed timeout is 1 day. (see JSON representation of Duration - // (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only - // supported on Dataproc image versions 1.2 and higher. - GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout,omitempty"` - // ParentOperationId: Optional. operation id of the parent operation sending - // the resize request - ParentOperationId string `json:"parentOperationId,omitempty"` - // RequestId: Optional. A unique ID used to identify the request. If the server - // receives two ResizeNodeGroupRequest - // (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests) - // with the same ID, the second request is ignored and the first - // google.longrunning.Operation created and stored in the backend is - // returned.Recommendation: Set this value to a UUID - // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must - // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens - // (-). The maximum length is 40 characters. - RequestId string `json:"requestId,omitempty"` - // Size: Required. The number of running instances for the node group to - // maintain. The group adds or removes instances to maintain the number of - // instances specified by this parameter. - Size int64 `json:"size,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "GracefulDecommissionTimeout") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. See https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields - // for more details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "GracefulDecommissionTimeout") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ResizeNodeGroupRequest) MarshalJSON() ([]byte, error) { - type NoMethod ResizeNodeGroupRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ListWorkflowTemplatesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListWorkflowTemplatesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// RuntimeConfig: Runtime configuration for a workload. -type RuntimeConfig struct { - // AutotuningConfig: Optional. Autotuning configuration of the workload. - AutotuningConfig *AutotuningConfig `json:"autotuningConfig,omitempty"` - // Cohort: Optional. Cohort identifier. Identifies families of the workloads - // having the same shape, e.g. daily ETL jobs. - Cohort string `json:"cohort,omitempty"` - // ContainerImage: Optional. Optional custom container image for the job - // runtime environment. If not specified, a default container image will be - // used. - ContainerImage string `json:"containerImage,omitempty"` - // Properties: Optional. A mapping of property names to values, which are used - // to configure workload execution. - Properties map[string]string `json:"properties,omitempty"` - // RepositoryConfig: Optional. Dependency repository configuration. - RepositoryConfig *RepositoryConfig `json:"repositoryConfig,omitempty"` - // Version: Optional. Version of the batch runtime. - Version string `json:"version,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutotuningConfig") to +// LoggingConfig: The runtime logging config of the job. +type LoggingConfig struct { + // DriverLogLevels: The per-package log levels for the driver. This can include + // "root" package name to configure rootLogger. Examples: - 'com.google = + // FATAL' - 'root = INFO' - 'org.apache = DEBUG' + DriverLogLevels map[string]string `json:"driverLogLevels,omitempty"` + // ForceSendFields is a list of field names (e.g. "DriverLogLevels") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutotuningConfig") to include in + // NullFields is a list of field names (e.g. "DriverLogLevels") to include in // API requests with the JSON null value. By default, fields with empty values // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *RuntimeConfig) MarshalJSON() ([]byte, error) { - type NoMethod RuntimeConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s LoggingConfig) MarshalJSON() ([]byte, error) { + type NoMethod LoggingConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// RuntimeInfo: Runtime information about workload execution. -type RuntimeInfo struct { - // ApproximateUsage: Output only. Approximate workload resource usage, - // calculated when the workload completes (see Dataproc Serverless pricing - // (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric - // calculation may change in the future, for example, to capture cumulative - // workload resource consumption during workload execution (see the Dataproc - // Serverless release notes - // (https://cloud.google.com/dataproc-serverless/docs/release-notes) for - // announcements, changes, fixes and other Dataproc developments). - ApproximateUsage *UsageMetrics `json:"approximateUsage,omitempty"` - // CurrentUsage: Output only. Snapshot of current workload resource usage. - CurrentUsage *UsageSnapshot `json:"currentUsage,omitempty"` - // DiagnosticOutputUri: Output only. A URI pointing to the location of the - // diagnostics tarball. - DiagnosticOutputUri string `json:"diagnosticOutputUri,omitempty"` - // Endpoints: Output only. Map of remote access endpoints (such as web - // interfaces and APIs) to their URIs. - Endpoints map[string]string `json:"endpoints,omitempty"` - // OutputUri: Output only. A URI pointing to the location of the stdout and - // stderr of the workload. - OutputUri string `json:"outputUri,omitempty"` - // ForceSendFields is a list of field names (e.g. "ApproximateUsage") to +// ManagedCluster: Cluster that is managed by the workflow. +type ManagedCluster struct { + // ClusterName: Required. The cluster name prefix. A unique cluster name will + // be formed by appending a random suffix.The name must contain only lower-case + // letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. + // Cannot begin or end with hyphen. Must consist of between 2 and 35 + // characters. + ClusterName string `json:"clusterName,omitempty"` + // Config: Required. The cluster configuration. + Config *ClusterConfig `json:"config,omitempty"` + // Labels: Optional. The labels to associate with this cluster.Label keys must + // be between 1 and 63 characters long, and must conform to the following PCRE + // regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 + // characters long, and must conform to the following PCRE regular expression: + // \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a + // given cluster. + Labels map[string]string `json:"labels,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterName") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ApproximateUsage") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "ClusterName") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *RuntimeInfo) MarshalJSON() ([]byte, error) { - type NoMethod RuntimeInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ManagedCluster) MarshalJSON() ([]byte, error) { + type NoMethod ManagedCluster + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SecurityConfig: Security related configuration, including encryption, -// Kerberos, etc. -type SecurityConfig struct { - // IdentityConfig: Optional. Identity related configuration, including service - // account based secure multi-tenancy user mappings. - IdentityConfig *IdentityConfig `json:"identityConfig,omitempty"` - // KerberosConfig: Optional. Kerberos related configuration. - KerberosConfig *KerberosConfig `json:"kerberosConfig,omitempty"` - // ForceSendFields is a list of field names (e.g. "IdentityConfig") to - // unconditionally include in API requests. By default, fields with empty or +// ManagedGroupConfig: Specifies the resources used to actively manage an +// instance group. +type ManagedGroupConfig struct { + // InstanceGroupManagerName: Output only. The name of the Instance Group + // Manager for this group. + InstanceGroupManagerName string `json:"instanceGroupManagerName,omitempty"` + // InstanceGroupManagerUri: Output only. The partial URI to the instance group + // manager for this group. E.g. + // projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + InstanceGroupManagerUri string `json:"instanceGroupManagerUri,omitempty"` + // InstanceTemplateName: Output only. The name of the Instance Template used + // for the Managed Instance Group. + InstanceTemplateName string `json:"instanceTemplateName,omitempty"` + // ForceSendFields is a list of field names (e.g. "InstanceGroupManagerName") + // to unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IdentityConfig") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` + // NullFields is a list of field names (e.g. "InstanceGroupManagerName") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` } -func (s *SecurityConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ManagedGroupConfig) MarshalJSON() ([]byte, error) { + type NoMethod ManagedGroupConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// Session: A representation of a session. -type Session struct { - // CreateTime: Output only. The time when the session was created. - CreateTime string `json:"createTime,omitempty"` - // Creator: Output only. The email address of the user who created the session. - Creator string `json:"creator,omitempty"` - // EnvironmentConfig: Optional. Environment configuration for the session - // execution. - EnvironmentConfig *EnvironmentConfig `json:"environmentConfig,omitempty"` - // JupyterSession: Optional. Jupyter session config. - JupyterSession *JupyterConfig `json:"jupyterSession,omitempty"` - // Labels: Optional. The labels to associate with the session. Label keys must - // contain 1 to 63 characters, and must conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if - // present, must contain 1 to 63 characters, and must conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be - // associated with a session. - Labels map[string]string `json:"labels,omitempty"` - // Name: Required. The resource name of the session. - Name string `json:"name,omitempty"` - // RuntimeConfig: Optional. Runtime configuration for the session execution. - RuntimeConfig *RuntimeConfig `json:"runtimeConfig,omitempty"` - // RuntimeInfo: Output only. Runtime information about session execution. - RuntimeInfo *RuntimeInfo `json:"runtimeInfo,omitempty"` - // SessionTemplate: Optional. The session template used by the session.Only - // resource names, including project ID and location, are valid.Example: * - // https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] - // * - // projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_ - // id]The template must be in the same project and Dataproc region as the - // session. - SessionTemplate string `json:"sessionTemplate,omitempty"` - // State: Output only. A state of the session. - // - // Possible values: - // "STATE_UNSPECIFIED" - The session state is unknown. - // "CREATING" - The session is created prior to running. - // "ACTIVE" - The session is running. - // "TERMINATING" - The session is terminating. - // "TERMINATED" - The session is terminated successfully. - // "FAILED" - The session is no longer running due to an error. - State string `json:"state,omitempty"` - // StateHistory: Output only. Historical state information for the session. - StateHistory []*SessionStateHistory `json:"stateHistory,omitempty"` - // StateMessage: Output only. Session state details, such as the failure - // description if the state is FAILED. - StateMessage string `json:"stateMessage,omitempty"` - // StateTime: Output only. The time when the session entered the current state. - StateTime string `json:"stateTime,omitempty"` - // User: Optional. The email address of the user who owns the session. - User string `json:"user,omitempty"` - // Uuid: Output only. A session UUID (Unique Universal Identifier). The service - // generates this value when it creates the session. - Uuid string `json:"uuid,omitempty"` +type MemoryMetrics struct { + TotalOffHeapStorageMemory int64 `json:"totalOffHeapStorageMemory,omitempty,string"` + TotalOnHeapStorageMemory int64 `json:"totalOnHeapStorageMemory,omitempty,string"` + UsedOffHeapStorageMemory int64 `json:"usedOffHeapStorageMemory,omitempty,string"` + UsedOnHeapStorageMemory int64 `json:"usedOnHeapStorageMemory,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "TotalOffHeapStorageMemory") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "TotalOffHeapStorageMemory") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreateTime") to - // unconditionally include in API requests. By default, fields with empty or +func (s MemoryMetrics) MarshalJSON() ([]byte, error) { + type NoMethod MemoryMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// MetastoreConfig: Specifies a Metastore configuration. +type MetastoreConfig struct { + // DataprocMetastoreService: Required. Resource name of an existing Dataproc + // Metastore service.Example: + // projects/[project_id]/locations/[dataproc_region]/services/[service-name] + DataprocMetastoreService string `json:"dataprocMetastoreService,omitempty"` + // ForceSendFields is a list of field names (e.g. "DataprocMetastoreService") + // to unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreateTime") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "DataprocMetastoreService") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Session) MarshalJSON() ([]byte, error) { - type NoMethod Session - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s MetastoreConfig) MarshalJSON() ([]byte, error) { + type NoMethod MetastoreConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SessionOperationMetadata: Metadata describing the Session operation. -type SessionOperationMetadata struct { - // CreateTime: The time when the operation was created. - CreateTime string `json:"createTime,omitempty"` - // Description: Short description of the operation. - Description string `json:"description,omitempty"` - // DoneTime: The time when the operation was finished. - DoneTime string `json:"doneTime,omitempty"` - // Labels: Labels associated with the operation. - Labels map[string]string `json:"labels,omitempty"` - // OperationType: The operation type. +// Metric: A Dataproc custom metric. +type Metric struct { + // MetricOverrides: Optional. Specify one or more Custom metrics + // (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) + // to collect for the metric course (for the SPARK metric source (any Spark + // metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be + // specified).Provide metrics in the following format: METRIC_SOURCE: + // INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: + // yarn:ResourceManager:QueueMetrics:AppsCompleted + // spark:driver:DAGScheduler:job.allJobs + // sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed + // hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified + // overridden metrics are collected for the metric source. For example, if one + // or more spark:executive metrics are listed as metric overrides, other SPARK + // metrics are not collected. The collection of the metrics for other enabled + // custom metric sources is unaffected. For example, if both SPARK andd YARN + // metric sources are enabled, and overrides are provided for Spark metrics + // only, all YARN metrics are collected. + MetricOverrides []string `json:"metricOverrides,omitempty"` + // MetricSource: Required. A standard set of metrics is collected unless + // metricOverrides are specified for the metric source (see Custom metrics + // (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) + // for more information). // // Possible values: - // "SESSION_OPERATION_TYPE_UNSPECIFIED" - Session operation type is unknown. - // "CREATE" - Create Session operation type. - // "TERMINATE" - Terminate Session operation type. - // "DELETE" - Delete Session operation type. - OperationType string `json:"operationType,omitempty"` - // Session: Name of the session for the operation. - Session string `json:"session,omitempty"` - // SessionUuid: Session UUID for the operation. - SessionUuid string `json:"sessionUuid,omitempty"` - // Warnings: Warnings encountered during operation execution. - Warnings []string `json:"warnings,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreateTime") to + // "METRIC_SOURCE_UNSPECIFIED" - Required unspecified metric source. + // "MONITORING_AGENT_DEFAULTS" - Monitoring agent metrics. If this source is + // enabled, Dataproc enables the monitoring agent in Compute Engine, and + // collects monitoring agent metrics, which are published with an + // agent.googleapis.com prefix. + // "HDFS" - HDFS metric source. + // "SPARK" - Spark metric source. + // "YARN" - YARN metric source. + // "SPARK_HISTORY_SERVER" - Spark History Server metric source. + // "HIVESERVER2" - Hiveserver2 metric source. + // "HIVEMETASTORE" - hivemetastore metric source + // "FLINK" - flink metric source + MetricSource string `json:"metricSource,omitempty"` + // ForceSendFields is a list of field names (e.g. "MetricOverrides") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreateTime") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "MetricOverrides") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SessionOperationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod SessionOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s Metric) MarshalJSON() ([]byte, error) { + type NoMethod Metric + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SessionStateHistory: Historical state information. -type SessionStateHistory struct { - // State: Output only. The state of the session at this point in the session - // history. - // - // Possible values: - // "STATE_UNSPECIFIED" - The session state is unknown. - // "CREATING" - The session is created prior to running. - // "ACTIVE" - The session is running. - // "TERMINATING" - The session is terminating. - // "TERMINATED" - The session is terminated successfully. - // "FAILED" - The session is no longer running due to an error. - State string `json:"state,omitempty"` - // StateMessage: Output only. Details about the state at this point in the - // session history. - StateMessage string `json:"stateMessage,omitempty"` - // StateStartTime: Output only. The time when the session entered the - // historical state. - StateStartTime string `json:"stateStartTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "State") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// NamespacedGkeDeploymentTarget: Deprecated. Used only for the deprecated +// beta. A full, namespace-isolated deployment target for an existing GKE +// cluster. +type NamespacedGkeDeploymentTarget struct { + // ClusterNamespace: Optional. A namespace within the GKE cluster to deploy + // into. + ClusterNamespace string `json:"clusterNamespace,omitempty"` + // TargetGkeCluster: Optional. The target GKE cluster to deploy to. Format: + // 'projects/{project}/locations/{location}/clusters/{cluster_id}' + TargetGkeCluster string `json:"targetGkeCluster,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterNamespace") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "State") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "ClusterNamespace") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SessionStateHistory) MarshalJSON() ([]byte, error) { - type NoMethod SessionStateHistory - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s NamespacedGkeDeploymentTarget) MarshalJSON() ([]byte, error) { + type NoMethod NamespacedGkeDeploymentTarget + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SessionTemplate: A representation of a session template. -type SessionTemplate struct { - // CreateTime: Output only. The time when the template was created. - CreateTime string `json:"createTime,omitempty"` - // Creator: Output only. The email address of the user who created the - // template. - Creator string `json:"creator,omitempty"` - // Description: Optional. Brief description of the template. - Description string `json:"description,omitempty"` - // EnvironmentConfig: Optional. Environment configuration for session - // execution. - EnvironmentConfig *EnvironmentConfig `json:"environmentConfig,omitempty"` - // JupyterSession: Optional. Jupyter session config. - JupyterSession *JupyterConfig `json:"jupyterSession,omitempty"` - // Labels: Optional. Labels to associate with sessions created using this - // template. Label keys must contain 1 to 63 characters, and must conform to - // RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, - // but, if present, must contain 1 to 63 characters and conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be - // associated with a session. +// NodeGroup: Dataproc Node Group. The Dataproc NodeGroup resource is not +// related to the Dataproc NodeGroupAffinity resource. +type NodeGroup struct { + // Labels: Optional. Node group labels. Label keys must consist of from 1 to 63 + // characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). + // Label values can be empty. If specified, they must consist of from 1 to 63 + // characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). + // The node group must have no more than 32 labels. Labels map[string]string `json:"labels,omitempty"` - // Name: Required. The resource name of the session template. + // Name: The Node group resource name (https://aip.dev/122). Name string `json:"name,omitempty"` - // RuntimeConfig: Optional. Runtime configuration for session execution. - RuntimeConfig *RuntimeConfig `json:"runtimeConfig,omitempty"` - // UpdateTime: Output only. The time the template was last updated. - UpdateTime string `json:"updateTime,omitempty"` - // Uuid: Output only. A session template UUID (Unique Universal Identifier). - // The service generates this value when it creates the session template. - Uuid string `json:"uuid,omitempty"` + // NodeGroupConfig: Optional. The node group instance group configuration. + NodeGroupConfig *InstanceGroupConfig `json:"nodeGroupConfig,omitempty"` + // Roles: Required. Node group roles. + // + // Possible values: + // "ROLE_UNSPECIFIED" - Required unspecified role. + // "DRIVER" - Job drivers run on the node pool. + Roles []string `json:"roles,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreateTime") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See + // ForceSendFields is a list of field names (e.g. "Labels") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreateTime") to include in API + // NullFields is a list of field names (e.g. "Labels") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SessionTemplate) MarshalJSON() ([]byte, error) { - type NoMethod SessionTemplate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s NodeGroup) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroup + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SetIamPolicyRequest: Request message for SetIamPolicy method. -type SetIamPolicyRequest struct { - // Policy: REQUIRED: The complete policy to be applied to the resource. The - // size of the policy is limited to a few 10s of KB. An empty policy is a valid - // policy but certain Google Cloud services (such as Projects) might reject - // them. - Policy *Policy `json:"policy,omitempty"` - // ForceSendFields is a list of field names (e.g. "Policy") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// NodeGroupAffinity: Node Group Affinity for clusters using sole-tenant node +// groups. The Dataproc NodeGroupAffinity resource is not related to the +// Dataproc NodeGroup resource. +type NodeGroupAffinity struct { + // NodeGroupUri: Required. The URI of a sole-tenant node group resource + // (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that + // the cluster will be created on.A full URL, partial URI, or node group name + // are valid. Examples: + // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 + // projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1 + NodeGroupUri string `json:"nodeGroupUri,omitempty"` + // ForceSendFields is a list of field names (e.g. "NodeGroupUri") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Policy") to include in API + // NullFields is a list of field names (e.g. "NodeGroupUri") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { - type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s NodeGroupAffinity) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupAffinity + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ShieldedInstanceConfig: Shielded Instance Config for clusters using Compute -// Engine Shielded VMs -// (https://cloud.google.com/security/shielded-cloud/shielded-vm). -type ShieldedInstanceConfig struct { - // EnableIntegrityMonitoring: Optional. Defines whether instances have - // integrity monitoring enabled. - EnableIntegrityMonitoring bool `json:"enableIntegrityMonitoring,omitempty"` - // EnableSecureBoot: Optional. Defines whether instances have Secure Boot - // enabled. - EnableSecureBoot bool `json:"enableSecureBoot,omitempty"` - // EnableVtpm: Optional. Defines whether instances have the vTPM enabled. - EnableVtpm bool `json:"enableVtpm,omitempty"` - // ForceSendFields is a list of field names (e.g. "EnableIntegrityMonitoring") - // to unconditionally include in API requests. By default, fields with empty or +// NodeGroupOperationMetadata: Metadata describing the node group operation. +type NodeGroupOperationMetadata struct { + // ClusterUuid: Output only. Cluster UUID associated with the node group + // operation. + ClusterUuid string `json:"clusterUuid,omitempty"` + // Description: Output only. Short description of operation. + Description string `json:"description,omitempty"` + // Labels: Output only. Labels associated with the operation. + Labels map[string]string `json:"labels,omitempty"` + // NodeGroupId: Output only. Node group ID for the operation. + NodeGroupId string `json:"nodeGroupId,omitempty"` + // OperationType: The operation type. + // + // Possible values: + // "NODE_GROUP_OPERATION_TYPE_UNSPECIFIED" - Node group operation type is + // unknown. + // "CREATE" - Create node group operation type. + // "UPDATE" - Update node group operation type. + // "DELETE" - Delete node group operation type. + // "RESIZE" - Resize node group operation type. + // "REPAIR" - Repair node group operation type. + // "UPDATE_LABELS" - Update node group label operation type. + // "START" - Start node group operation type. + // "STOP" - Stop node group operation type. + OperationType string `json:"operationType,omitempty"` + // Status: Output only. Current operation status. + Status *ClusterOperationStatus `json:"status,omitempty"` + // StatusHistory: Output only. The previous operation status. + StatusHistory []*ClusterOperationStatus `json:"statusHistory,omitempty"` + // Warnings: Output only. Errors encountered during operation execution. + Warnings []string `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterUuid") to + // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "EnableIntegrityMonitoring") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See + // NullFields is a list of field names (e.g. "ClusterUuid") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ShieldedInstanceConfig) MarshalJSON() ([]byte, error) { - type NoMethod ShieldedInstanceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s NodeGroupOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupOperationMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SoftwareConfig: Specifies the selection and config of software inside the -// cluster. -type SoftwareConfig struct { - // ImageVersion: Optional. The version of software inside the cluster. It must - // be one of the supported Dataproc Versions - // (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), - // such as "1.2" (including a subminor version, such as "1.2.29"), or the - // "preview" version - // (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). - // If unspecified, it defaults to the latest Debian version. - ImageVersion string `json:"imageVersion,omitempty"` - // OptionalComponents: Optional. The set of components to activate on the - // cluster. - // - // Possible values: - // "COMPONENT_UNSPECIFIED" - Unspecified component. Specifying this will - // cause Cluster creation to fail. - // "ANACONDA" - The Anaconda python distribution. The Anaconda component is - // not supported in the Dataproc 2.0 image. The 2.0 image is pre-installed with - // Miniconda. - // "DOCKER" - Docker - // "DRUID" - The Druid query engine. (alpha) - // "FLINK" - Flink - // "HBASE" - HBase. (beta) - // "HIVE_WEBHCAT" - The Hive Web HCatalog (the REST service for accessing - // HCatalog). - // "HUDI" - Hudi. - // "JUPYTER" - The Jupyter Notebook. - // "PRESTO" - The Presto query engine. - // "TRINO" - The Trino query engine. - // "RANGER" - The Ranger service. - // "SOLR" - The Solr service. - // "ZEPPELIN" - The Zeppelin notebook. - // "ZOOKEEPER" - The Zookeeper service. - OptionalComponents []string `json:"optionalComponents,omitempty"` - // Properties: Optional. The properties to set on daemon config files.Property - // keys are specified in prefix:property format, for example - // core:hadoop.tmp.dir. The following are supported prefixes and their - // mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml - // distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: - // mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: - // yarn-site.xmlFor more information, see Cluster properties - // (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). - Properties map[string]string `json:"properties,omitempty"` - // ForceSendFields is a list of field names (e.g. "ImageVersion") to +// NodeInitializationAction: Specifies an executable to run on a fully +// configured node and a timeout period for executable completion. +type NodeInitializationAction struct { + // ExecutableFile: Required. Cloud Storage URI of executable file. + ExecutableFile string `json:"executableFile,omitempty"` + // ExecutionTimeout: Optional. Amount of time executable has to complete. + // Default is 10 minutes (see JSON representation of Duration + // (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster + // creation fails with an explanatory error message (the name of the executable + // that caused the error and the exceeded timeout period) if the executable is + // not completed at end of the timeout period. + ExecutionTimeout string `json:"executionTimeout,omitempty"` + // ForceSendFields is a list of field names (e.g. "ExecutableFile") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ImageVersion") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "ExecutableFile") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SoftwareConfig) MarshalJSON() ([]byte, error) { - type NoMethod SoftwareConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s NodeInitializationAction) MarshalJSON() ([]byte, error) { + type NoMethod NodeInitializationAction + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SparkBatch: A configuration for running an Apache Spark -// (https://spark.apache.org/) batch workload. -type SparkBatch struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the - // working directory of each executor. Supported file types: .jar, .tar, - // .tar.gz, .tgz, and .zip. - ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: Optional. The arguments to pass to the driver. Do not include - // arguments that can be set as batch properties, such as --conf, since a - // collision can occur that causes an incorrect batch submission. - Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be placed in the working directory - // of each executor. - FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: Optional. HCFS URIs of jar files to add to the classpath of the - // Spark driver and tasks. - JarFileUris []string `json:"jarFileUris,omitempty"` - // MainClass: Optional. The name of the driver main class. The jar file that - // contains the class must be in the classpath or specified in jar_file_uris. - MainClass string `json:"mainClass,omitempty"` - // MainJarFileUri: Optional. The HCFS URI of the jar file that contains the - // main class. - MainJarFileUri string `json:"mainJarFileUri,omitempty"` - // ForceSendFields is a list of field names (e.g. "ArchiveUris") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// NodePool: indicating a list of workers of same type +type NodePool struct { + // Id: Required. A unique id of the node pool. Primary and Secondary workers + // can be specified using special reserved ids PRIMARY_WORKER_POOL and + // SECONDARY_WORKER_POOL respectively. Aux node pools can be referenced using + // corresponding pool id. + Id string `json:"id,omitempty"` + // InstanceNames: Name of instances to be repaired. These instances must belong + // to specified node pool. + InstanceNames []string `json:"instanceNames,omitempty"` + // RepairAction: Required. Repair action to take on specified resources of the + // node pool. + // + // Possible values: + // "REPAIR_ACTION_UNSPECIFIED" - No action will be taken by default. + // "DELETE" - delete the specified list of nodes. + RepairAction string `json:"repairAction,omitempty"` + // ForceSendFields is a list of field names (e.g. "Id") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ArchiveUris") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "Id") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SparkBatch) MarshalJSON() ([]byte, error) { - type NoMethod SparkBatch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s NodePool) MarshalJSON() ([]byte, error) { + type NoMethod NodePool + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SparkHistoryServerConfig: Spark History Server configuration for the -// workload. -type SparkHistoryServerConfig struct { - // DataprocCluster: Optional. Resource name of an existing Dataproc Cluster to - // act as a Spark History Server for the workload.Example: - // projects/[project_id]/regions/[region]/clusters/[cluster_name] - DataprocCluster string `json:"dataprocCluster,omitempty"` - // ForceSendFields is a list of field names (e.g. "DataprocCluster") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// Operation: This resource represents a long-running operation that is the +// result of a network API call. +type Operation struct { + // Done: If the value is false, it means the operation is still in progress. If + // true, the operation is completed, and either error or response is available. + Done bool `json:"done,omitempty"` + // Error: The error result of the operation in case of failure or cancellation. + Error *Status `json:"error,omitempty"` + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as create + // time. Some services might not provide such metadata. Any method that returns + // a long-running operation should document the metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + // Name: The server-assigned name, which is only unique within the same service + // that originally returns it. If you use the default HTTP mapping, the name + // should be a resource name ending with operations/{unique_id}. + Name string `json:"name,omitempty"` + // Response: The normal, successful response of the operation. If the original + // method returns no data on success, such as Delete, the response is + // google.protobuf.Empty. If the original method is standard Get/Create/Update, + // the response should be the resource. For other methods, the response should + // have the type XxxResponse, where Xxx is the original method name. For + // example, if the original method name is TakeSnapshot(), the inferred + // response type is TakeSnapshotResponse. + Response googleapi.RawMessage `json:"response,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Done") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DataprocCluster") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "Done") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SparkHistoryServerConfig) MarshalJSON() ([]byte, error) { - type NoMethod SparkHistoryServerConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s Operation) MarshalJSON() ([]byte, error) { + type NoMethod Operation + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SparkJob: A Dataproc job for running Apache Spark -// (https://spark.apache.org/) applications on YARN. -type SparkJob struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the - // working directory of each executor. Supported file types: .jar, .tar, - // .tar.gz, .tgz, and .zip. - ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: Optional. The arguments to pass to the driver. Do not include - // arguments, such as --conf, that can be set as job properties, since a - // collision may occur that causes an incorrect job submission. - Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be placed in the working directory - // of each executor. Useful for naively parallel tasks. - FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of - // the Spark driver and tasks. - JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfig: Optional. The runtime log config for job execution. - LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` - // MainClass: The name of the driver's main class. The jar file that contains - // the class must be in the default CLASSPATH or specified in - // SparkJob.jar_file_uris. - MainClass string `json:"mainClass,omitempty"` - // MainJarFileUri: The HCFS URI of the jar file that contains the main class. - MainJarFileUri string `json:"mainJarFileUri,omitempty"` - // Properties: Optional. A mapping of property names to values, used to - // configure Spark. Properties that conflict with values set by the Dataproc - // API might be overwritten. Can include properties set in - // /etc/spark/conf/spark-defaults.conf and classes in user code. - Properties map[string]string `json:"properties,omitempty"` - // ForceSendFields is a list of field names (e.g. "ArchiveUris") to +// OrderedJob: A job executed by the workflow. +type OrderedJob struct { + // FlinkJob: Optional. Job is a Flink job. + FlinkJob *FlinkJob `json:"flinkJob,omitempty"` + // HadoopJob: Optional. Job is a Hadoop job. + HadoopJob *HadoopJob `json:"hadoopJob,omitempty"` + // HiveJob: Optional. Job is a Hive job. + HiveJob *HiveJob `json:"hiveJob,omitempty"` + // Labels: Optional. The labels to associate with this job.Label keys must be + // between 1 and 63 characters long, and must conform to the following regular + // expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 + // characters long, and must conform to the following regular expression: + // \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a + // given job. + Labels map[string]string `json:"labels,omitempty"` + // PigJob: Optional. Job is a Pig job. + PigJob *PigJob `json:"pigJob,omitempty"` + // PrerequisiteStepIds: Optional. The optional list of prerequisite job + // step_ids. If not specified, the job will start at the beginning of workflow. + PrerequisiteStepIds []string `json:"prerequisiteStepIds,omitempty"` + // PrestoJob: Optional. Job is a Presto job. + PrestoJob *PrestoJob `json:"prestoJob,omitempty"` + // PysparkJob: Optional. Job is a PySpark job. + PysparkJob *PySparkJob `json:"pysparkJob,omitempty"` + // Scheduling: Optional. Job scheduling configuration. + Scheduling *JobScheduling `json:"scheduling,omitempty"` + // SparkJob: Optional. Job is a Spark job. + SparkJob *SparkJob `json:"sparkJob,omitempty"` + // SparkRJob: Optional. Job is a SparkR job. + SparkRJob *SparkRJob `json:"sparkRJob,omitempty"` + // SparkSqlJob: Optional. Job is a SparkSql job. + SparkSqlJob *SparkSqlJob `json:"sparkSqlJob,omitempty"` + // StepId: Required. The step id. The id must be unique among all jobs within + // the template.The step id is used as prefix for job id, as job + // goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from + // other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). Cannot begin or end with underscore or + // hyphen. Must consist of between 3 and 50 characters. + StepId string `json:"stepId,omitempty"` + // TrinoJob: Optional. Job is a Trino job. + TrinoJob *TrinoJob `json:"trinoJob,omitempty"` + // ForceSendFields is a list of field names (e.g. "FlinkJob") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ArchiveUris") to include in API + // NullFields is a list of field names (e.g. "FlinkJob") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SparkJob) MarshalJSON() ([]byte, error) { - type NoMethod SparkJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s OrderedJob) MarshalJSON() ([]byte, error) { + type NoMethod OrderedJob + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SparkRBatch: A configuration for running an Apache SparkR -// (https://spark.apache.org/docs/latest/sparkr.html) batch workload. -type SparkRBatch struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the - // working directory of each executor. Supported file types: .jar, .tar, - // .tar.gz, .tgz, and .zip. - ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: Optional. The arguments to pass to the Spark driver. Do not include - // arguments that can be set as batch properties, such as --conf, since a - // collision can occur that causes an incorrect batch submission. - Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be placed in the working directory - // of each executor. - FileUris []string `json:"fileUris,omitempty"` - // MainRFileUri: Required. The HCFS URI of the main R file to use as the - // driver. Must be a .R or .r file. - MainRFileUri string `json:"mainRFileUri,omitempty"` - // ForceSendFields is a list of field names (e.g. "ArchiveUris") to +// OutputMetrics: Metrics about the data written by the task. +type OutputMetrics struct { + BytesWritten int64 `json:"bytesWritten,omitempty,string"` + RecordsWritten int64 `json:"recordsWritten,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "BytesWritten") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ArchiveUris") to include in API + // NullFields is a list of field names (e.g. "BytesWritten") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SparkRBatch) MarshalJSON() ([]byte, error) { - type NoMethod SparkRBatch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s OutputMetrics) MarshalJSON() ([]byte, error) { + type NoMethod OutputMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SparkRJob: A Dataproc job for running Apache SparkR -// (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN. -type SparkRJob struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the - // working directory of each executor. Supported file types: .jar, .tar, - // .tar.gz, .tgz, and .zip. - ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: Optional. The arguments to pass to the driver. Do not include - // arguments, such as --conf, that can be set as job properties, since a - // collision may occur that causes an incorrect job submission. - Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be placed in the working directory - // of each executor. Useful for naively parallel tasks. - FileUris []string `json:"fileUris,omitempty"` - // LoggingConfig: Optional. The runtime log config for job execution. - LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` - // MainRFileUri: Required. The HCFS URI of the main R file to use as the - // driver. Must be a .R file. - MainRFileUri string `json:"mainRFileUri,omitempty"` - // Properties: Optional. A mapping of property names to values, used to - // configure SparkR. Properties that conflict with values set by the Dataproc - // API might be overwritten. Can include properties set in - // /etc/spark/conf/spark-defaults.conf and classes in user code. - Properties map[string]string `json:"properties,omitempty"` - // ForceSendFields is a list of field names (e.g. "ArchiveUris") to +type OutputQuantileMetrics struct { + BytesWritten *Quantiles `json:"bytesWritten,omitempty"` + RecordsWritten *Quantiles `json:"recordsWritten,omitempty"` + // ForceSendFields is a list of field names (e.g. "BytesWritten") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ArchiveUris") to include in API + // NullFields is a list of field names (e.g. "BytesWritten") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SparkRJob) MarshalJSON() ([]byte, error) { - type NoMethod SparkRJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s OutputQuantileMetrics) MarshalJSON() ([]byte, error) { + type NoMethod OutputQuantileMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SparkSqlBatch: A configuration for running Apache Spark SQL -// (https://spark.apache.org/sql/) queries as a batch workload. -type SparkSqlBatch struct { - // JarFileUris: Optional. HCFS URIs of jar files to be added to the Spark - // CLASSPATH. - JarFileUris []string `json:"jarFileUris,omitempty"` - // QueryFileUri: Required. The HCFS URI of the script that contains Spark SQL - // queries to execute. - QueryFileUri string `json:"queryFileUri,omitempty"` - // QueryVariables: Optional. Mapping of query variable names to values - // (equivalent to the Spark SQL command: SET name="value";). - QueryVariables map[string]string `json:"queryVariables,omitempty"` - // ForceSendFields is a list of field names (e.g. "JarFileUris") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// ParameterValidation: Configuration for parameter validation. +type ParameterValidation struct { + // Regex: Validation based on regular expressions. + Regex *RegexValidation `json:"regex,omitempty"` + // Values: Validation based on a list of allowed values. + Values *ValueValidation `json:"values,omitempty"` + // ForceSendFields is a list of field names (e.g. "Regex") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "JarFileUris") to include in API + // NullFields is a list of field names (e.g. "Regex") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SparkSqlBatch) MarshalJSON() ([]byte, error) { - type NoMethod SparkSqlBatch - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ParameterValidation) MarshalJSON() ([]byte, error) { + type NoMethod ParameterValidation + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SparkSqlJob: A Dataproc job for running Apache Spark SQL -// (https://spark.apache.org/sql/) queries. -type SparkSqlJob struct { - // JarFileUris: Optional. HCFS URIs of jar files to be added to the Spark - // CLASSPATH. +// PeripheralsConfig: Auxiliary services configuration for a workload. +type PeripheralsConfig struct { + // MetastoreService: Optional. Resource name of an existing Dataproc Metastore + // service.Example: + // projects/[project_id]/locations/[region]/services/[service_id] + MetastoreService string `json:"metastoreService,omitempty"` + // SparkHistoryServerConfig: Optional. The Spark History Server configuration + // for the workload. + SparkHistoryServerConfig *SparkHistoryServerConfig `json:"sparkHistoryServerConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "MetastoreService") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "MetastoreService") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s PeripheralsConfig) MarshalJSON() ([]byte, error) { + type NoMethod PeripheralsConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// PigJob: A Dataproc job for running Apache Pig (https://pig.apache.org/) +// queries on YARN. +type PigJob struct { + // ContinueOnFailure: Optional. Whether to continue executing queries if a + // query fails. The default value is false. Setting to true can be useful when + // executing independent parallel queries. + ContinueOnFailure bool `json:"continueOnFailure,omitempty"` + // JarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATH of the + // Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. JarFileUris []string `json:"jarFileUris,omitempty"` // LoggingConfig: Optional. The runtime log config for job execution. LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` // Properties: Optional. A mapping of property names to values, used to - // configure Spark SQL's SparkConf. Properties that conflict with values set by - // the Dataproc API might be overwritten. + // configure Pig. Properties that conflict with values set by the Dataproc API + // might be overwritten. Can include properties set in + // /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in + // user code. Properties map[string]string `json:"properties,omitempty"` - // QueryFileUri: The HCFS URI of the script that contains SQL queries. + // QueryFileUri: The HCFS URI of the script that contains the Pig queries. QueryFileUri string `json:"queryFileUri,omitempty"` // QueryList: A list of queries. QueryList *QueryList `json:"queryList,omitempty"` // ScriptVariables: Optional. Mapping of query variable names to values - // (equivalent to the Spark SQL command: SET name="value";). + // (equivalent to the Pig command: name=[value]). ScriptVariables map[string]string `json:"scriptVariables,omitempty"` - // ForceSendFields is a list of field names (e.g. "JarFileUris") to + // ForceSendFields is a list of field names (e.g. "ContinueOnFailure") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "JarFileUris") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "ContinueOnFailure") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SparkSqlJob) MarshalJSON() ([]byte, error) { - type NoMethod SparkSqlJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// SparkStandaloneAutoscalingConfig: Basic autoscaling configurations for Spark -// Standalone. -type SparkStandaloneAutoscalingConfig struct { - // GracefulDecommissionTimeout: Required. Timeout for Spark graceful - // decommissioning of spark workers. Specifies the duration to wait for spark - // worker to complete spark decommissioning tasks before forcefully removing - // workers. Only applicable to downscaling operations.Bounds: 0s, 1d. - GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout,omitempty"` - // RemoveOnlyIdleWorkers: Optional. Remove only idle workers when scaling down - // cluster - RemoveOnlyIdleWorkers bool `json:"removeOnlyIdleWorkers,omitempty"` - // ScaleDownFactor: Required. Fraction of required executors to remove from - // Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling - // down so that there are no more executors for the Spark Job.(more aggressive - // scaling). A scale-down factor closer to 0 will result in a smaller magnitude - // of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0. - ScaleDownFactor float64 `json:"scaleDownFactor,omitempty"` - // ScaleDownMinWorkerFraction: Optional. Minimum scale-down threshold as a - // fraction of total cluster size before scaling occurs. For example, in a - // 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at - // least a 2 worker scale-down for the cluster to scale. A threshold of 0 means - // the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. - // Default: 0.0. - ScaleDownMinWorkerFraction float64 `json:"scaleDownMinWorkerFraction,omitempty"` - // ScaleUpFactor: Required. Fraction of required workers to add to Spark - // Standalone clusters. A scale-up factor of 1.0 will result in scaling up so - // that there are no more required workers for the Spark Job (more aggressive - // scaling). A scale-up factor closer to 0 will result in a smaller magnitude - // of scaling up (less aggressive scaling).Bounds: 0.0, 1.0. - ScaleUpFactor float64 `json:"scaleUpFactor,omitempty"` - // ScaleUpMinWorkerFraction: Optional. Minimum scale-up threshold as a fraction - // of total cluster size before scaling occurs. For example, in a 20-worker - // cluster, a threshold of 0.1 means the autoscaler must recommend at least a - // 2-worker scale-up for the cluster to scale. A threshold of 0 means the - // autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. - // Default: 0.0. - ScaleUpMinWorkerFraction float64 `json:"scaleUpMinWorkerFraction,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "GracefulDecommissionTimeout") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. See https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields - // for more details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "GracefulDecommissionTimeout") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` +func (s PigJob) MarshalJSON() ([]byte, error) { + type NoMethod PigJob + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -func (s *SparkStandaloneAutoscalingConfig) MarshalJSON() ([]byte, error) { - type NoMethod SparkStandaloneAutoscalingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -func (s *SparkStandaloneAutoscalingConfig) UnmarshalJSON(data []byte) error { - type NoMethod SparkStandaloneAutoscalingConfig - var s1 struct { - ScaleDownFactor gensupport.JSONFloat64 `json:"scaleDownFactor"` - ScaleDownMinWorkerFraction gensupport.JSONFloat64 `json:"scaleDownMinWorkerFraction"` - ScaleUpFactor gensupport.JSONFloat64 `json:"scaleUpFactor"` - ScaleUpMinWorkerFraction gensupport.JSONFloat64 `json:"scaleUpMinWorkerFraction"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.ScaleDownFactor = float64(s1.ScaleDownFactor) - s.ScaleDownMinWorkerFraction = float64(s1.ScaleDownMinWorkerFraction) - s.ScaleUpFactor = float64(s1.ScaleUpFactor) - s.ScaleUpMinWorkerFraction = float64(s1.ScaleUpMinWorkerFraction) - return nil -} +// Policy: An Identity and Access Management (IAM) policy, which specifies +// access controls for Google Cloud resources.A Policy is a collection of +// bindings. A binding binds one or more members, or principals, to a single +// role. Principals can be user accounts, service accounts, Google groups, and +// domains (such as G Suite). A role is a named list of permissions; each role +// can be an IAM predefined role or a user-created custom role.For some types +// of Google Cloud resources, a binding can also specify a condition, which is +// a logical expression that allows access to a resource only if the expression +// evaluates to true. A condition can add constraints based on attributes of +// the request, the resource, or both. To learn which resources support +// conditions in their IAM policies, see the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies).JSON +// example: { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": +// "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": +// "BwWWja0YfJA=", "version": 3 } YAML example: bindings: - members: - +// user:mike@example.com - group:admins@example.com - domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - user:eve@example.com +// role: roles/resourcemanager.organizationViewer condition: title: expirable +// access description: Does not grant access after Sep 2020 expression: +// request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= +// version: 3 For a description of IAM and its features, see the IAM +// documentation (https://cloud.google.com/iam/docs/). +type Policy struct { + // Bindings: Associates a list of members, or principals, with a role. + // Optionally, may specify a condition that determines how and when the + // bindings are applied. Each of the bindings must contain at least one + // principal.The bindings in a Policy can refer to up to 1,500 principals; up + // to 250 of these principals can be Google groups. Each occurrence of a + // principal counts towards these limits. For example, if the bindings grant 50 + // different roles to user:alice@example.com, and not to any other principal, + // then you can add another 1,450 principals to the bindings in the Policy. + Bindings []*Binding `json:"bindings,omitempty"` + // Etag: etag is used for optimistic concurrency control as a way to help + // prevent simultaneous updates of a policy from overwriting each other. It is + // strongly suggested that systems make use of the etag in the + // read-modify-write cycle to perform policy updates in order to avoid race + // conditions: An etag is returned in the response to getIamPolicy, and systems + // are expected to put that etag in the request to setIamPolicy to ensure that + // their change will be applied to the same version of the policy.Important: If + // you use IAM Conditions, you must include the etag field whenever you call + // setIamPolicy. If you omit this field, then IAM allows you to overwrite a + // version 3 policy with a version 1 policy, and all of the conditions in the + // version 3 policy are lost. + Etag string `json:"etag,omitempty"` + // Version: Specifies the format of the policy.Valid values are 0, 1, and 3. + // Requests that specify an invalid value are rejected.Any operation that + // affects conditional role bindings must specify version 3. This requirement + // applies to the following operations: Getting a policy that includes a + // conditional role binding Adding a conditional role binding to a policy + // Changing a conditional role binding in a policy Removing any role binding, + // with or without a condition, from a policy that includes + // conditionsImportant: If you use IAM Conditions, you must include the etag + // field whenever you call setIamPolicy. If you omit this field, then IAM + // allows you to overwrite a version 3 policy with a version 1 policy, and all + // of the conditions in the version 3 policy are lost.If a policy does not + // include any conditions, operations on that policy may specify any valid + // version or leave the field unset.To learn which resources support conditions + // in their IAM policies, see the IAM documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). + Version int64 `json:"version,omitempty"` -// StartClusterRequest: A request to start a cluster. -type StartClusterRequest struct { - // ClusterUuid: Optional. Specifying the cluster_uuid means the RPC will fail - // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - ClusterUuid string `json:"clusterUuid,omitempty"` - // RequestId: Optional. A unique ID used to identify the request. If the server - // receives two StartClusterRequest - // (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s - // with the same id, then the second request will be ignored and the first - // google.longrunning.Operation created and stored in the backend is - // returned.Recommendation: Set this value to a UUID - // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must - // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens - // (-). The maximum length is 40 characters. - RequestId string `json:"requestId,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterUuid") to + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Bindings") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterUuid") to include in API + // NullFields is a list of field names (e.g. "Bindings") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *StartClusterRequest) MarshalJSON() ([]byte, error) { - type NoMethod StartClusterRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s Policy) MarshalJSON() ([]byte, error) { + type NoMethod Policy + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// StartupConfig: Configuration to handle the startup of instances during -// cluster create and update process. -type StartupConfig struct { - // RequiredRegistrationFraction: Optional. The config setting to enable cluster - // creation/ updation to be successful only after - // required_registration_fraction of instances are up and running. This - // configuration is applicable to only secondary workers for now. The cluster - // will fail if required_registration_fraction of instances are not available. - // This will include instance creation, agent registration, and service - // registration (if enabled). - RequiredRegistrationFraction float64 `json:"requiredRegistrationFraction,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "RequiredRegistrationFraction") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. See https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields - // for more details. +// PoolData: Pool Data +type PoolData struct { + Name string `json:"name,omitempty"` + StageIds googleapi.Int64s `json:"stageIds,omitempty"` + // ForceSendFields is a list of field names (e.g. "Name") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "RequiredRegistrationFraction") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See + // NullFields is a list of field names (e.g. "Name") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *StartupConfig) MarshalJSON() ([]byte, error) { - type NoMethod StartupConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -func (s *StartupConfig) UnmarshalJSON(data []byte) error { - type NoMethod StartupConfig - var s1 struct { - RequiredRegistrationFraction gensupport.JSONFloat64 `json:"requiredRegistrationFraction"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.RequiredRegistrationFraction = float64(s1.RequiredRegistrationFraction) - return nil +func (s PoolData) MarshalJSON() ([]byte, error) { + type NoMethod PoolData + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// StateHistory: Historical state information. -type StateHistory struct { - // State: Output only. The state of the batch at this point in history. - // - // Possible values: - // "STATE_UNSPECIFIED" - The batch state is unknown. - // "PENDING" - The batch is created before running. - // "RUNNING" - The batch is running. - // "CANCELLING" - The batch is cancelling. - // "CANCELLED" - The batch cancellation was successful. - // "SUCCEEDED" - The batch completed successfully. - // "FAILED" - The batch is no longer running due to an error. - State string `json:"state,omitempty"` - // StateMessage: Output only. Details about the state at this point in history. - StateMessage string `json:"stateMessage,omitempty"` - // StateStartTime: Output only. The time when the batch entered the historical - // state. - StateStartTime string `json:"stateStartTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "State") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// PrestoJob: A Dataproc job for running Presto (https://prestosql.io/) +// queries. IMPORTANT: The Dataproc Presto Optional Component +// (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be +// enabled when the cluster is created to submit a Presto job to the cluster. +type PrestoJob struct { + // ClientTags: Optional. Presto client tags to attach to this query + ClientTags []string `json:"clientTags,omitempty"` + // ContinueOnFailure: Optional. Whether to continue executing queries if a + // query fails. The default value is false. Setting to true can be useful when + // executing independent parallel queries. + ContinueOnFailure bool `json:"continueOnFailure,omitempty"` + // LoggingConfig: Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` + // OutputFormat: Optional. The format in which query output will be displayed. + // See the Presto documentation for supported output formats + OutputFormat string `json:"outputFormat,omitempty"` + // Properties: Optional. A mapping of property names to values. Used to set + // Presto session properties + // (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using + // the --session flag in the Presto CLI + Properties map[string]string `json:"properties,omitempty"` + // QueryFileUri: The HCFS URI of the script that contains SQL queries. + QueryFileUri string `json:"queryFileUri,omitempty"` + // QueryList: A list of queries. + QueryList *QueryList `json:"queryList,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClientTags") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "State") to include in API + // NullFields is a list of field names (e.g. "ClientTags") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *StateHistory) MarshalJSON() ([]byte, error) { - type NoMethod StateHistory - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// Status: The Status type defines a logical error model that is suitable for -// different programming environments, including REST APIs and RPC APIs. It is -// used by gRPC (https://github.com/grpc). Each Status message contains three -// pieces of data: error code, error message, and error details.You can find -// out more about this error model and how to work with it in the API Design -// Guide (https://cloud.google.com/apis/design/errors). -type Status struct { - // Code: The status code, which should be an enum value of google.rpc.Code. - Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a common - // set of message types for APIs to use. - Details []googleapi.RawMessage `json:"details,omitempty"` - // Message: A developer-facing error message, which should be in English. Any - // user-facing error message should be localized and sent in the - // google.rpc.Status.details field, or localized by the client. - Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to unconditionally +func (s PrestoJob) MarshalJSON() ([]byte, error) { + type NoMethod PrestoJob + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ProcessSummary: Process Summary +type ProcessSummary struct { + AddTime string `json:"addTime,omitempty"` + HostPort string `json:"hostPort,omitempty"` + IsActive bool `json:"isActive,omitempty"` + ProcessId string `json:"processId,omitempty"` + ProcessLogs map[string]string `json:"processLogs,omitempty"` + RemoveTime string `json:"removeTime,omitempty"` + TotalCores int64 `json:"totalCores,omitempty"` + // ForceSendFields is a list of field names (e.g. "AddTime") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API requests - // with the JSON null value. By default, fields with empty values are omitted - // from API requests. See + // NullFields is a list of field names (e.g. "AddTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { - type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// StopClusterRequest: A request to stop a cluster. -type StopClusterRequest struct { - // ClusterUuid: Optional. Specifying the cluster_uuid means the RPC will fail - // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - ClusterUuid string `json:"clusterUuid,omitempty"` - // RequestId: Optional. A unique ID used to identify the request. If the server - // receives two StopClusterRequest - // (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s - // with the same id, then the second request will be ignored and the first - // google.longrunning.Operation created and stored in the backend is - // returned.Recommendation: Set this value to a UUID - // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must - // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens - // (-). The maximum length is 40 characters. - RequestId string `json:"requestId,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterUuid") to +func (s ProcessSummary) MarshalJSON() ([]byte, error) { + type NoMethod ProcessSummary + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ProvisioningModelMix: Defines how Dataproc should create VMs with a mixture +// of provisioning models. +type ProvisioningModelMix struct { + // StandardCapacityBase: Optional. The base capacity that will always use + // Standard VMs to avoid risk of more preemption than the minimum capacity you + // need. Dataproc will create only standard VMs until it reaches + // standard_capacity_base, then it will start using + // standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If + // 15 instances are requested and standard_capacity_base is 5, Dataproc will + // create 5 standard VMs and then start mixing spot and standard VMs for + // remaining 10 instances. + StandardCapacityBase int64 `json:"standardCapacityBase,omitempty"` + // StandardCapacityPercentAboveBase: Optional. The percentage of target + // capacity that should use Standard VM. The remaining percentage will use Spot + // VMs. The percentage applies only to the capacity above + // standard_capacity_base. eg. If 15 instances are requested and + // standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, + // Dataproc will create 5 standard VMs and then start mixing spot and standard + // VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + StandardCapacityPercentAboveBase int64 `json:"standardCapacityPercentAboveBase,omitempty"` + // ForceSendFields is a list of field names (e.g. "StandardCapacityBase") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterUuid") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "StandardCapacityBase") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *StopClusterRequest) MarshalJSON() ([]byte, error) { - type NoMethod StopClusterRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ProvisioningModelMix) MarshalJSON() ([]byte, error) { + type NoMethod ProvisioningModelMix + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SubmitJobRequest: A request to submit a job. -type SubmitJobRequest struct { - // Job: Required. The job resource. - Job *Job `json:"job,omitempty"` - // RequestId: Optional. A unique id used to identify the request. If the server - // receives two SubmitJobRequest - // (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s - // with the same id, then the second request will be ignored and the first Job - // created and stored in the backend is returned.It is recommended to always - // set this value to a UUID - // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must - // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens - // (-). The maximum length is 40 characters. - RequestId string `json:"requestId,omitempty"` - // ForceSendFields is a list of field names (e.g. "Job") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// PyPiRepositoryConfig: Configuration for PyPi repository +type PyPiRepositoryConfig struct { + // PypiRepository: Optional. PyPi repository address + PypiRepository string `json:"pypiRepository,omitempty"` + // ForceSendFields is a list of field names (e.g. "PypiRepository") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Job") to include in API requests - // with the JSON null value. By default, fields with empty values are omitted - // from API requests. See + // NullFields is a list of field names (e.g. "PypiRepository") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SubmitJobRequest) MarshalJSON() ([]byte, error) { - type NoMethod SubmitJobRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s PyPiRepositoryConfig) MarshalJSON() ([]byte, error) { + type NoMethod PyPiRepositoryConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// TemplateParameter: A configurable parameter that replaces one or more fields -// in the template. Parameterizable fields: - Labels - File uris - Job -// properties - Job arguments - Script variables - Main class (in HadoopJob and -// SparkJob) - Zone (in ClusterSelector) -type TemplateParameter struct { - // Description: Optional. Brief description of the parameter. Must not exceed - // 1024 characters. - Description string `json:"description,omitempty"` - // Fields: Required. Paths to all fields that the parameter replaces. A field - // is allowed to appear in at most one parameter's list of field paths.A field - // path is similar in syntax to a google.protobuf.FieldMask. For example, a - // field path that references the zone field of a workflow template's cluster - // selector would be specified as placement.clusterSelector.zone.Also, field - // paths can reference fields using the following syntax: Values in maps can be - // referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' - // placement.managedCluster.labels'key' - // placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs - // in the jobs list can be referenced by step-id: - // jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri - // jobs'step-id'.pySparkJob.mainPythonFileUri - // jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 - // jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 - // Items in repeated fields can be referenced by a zero-based index: - // jobs'step-id'.sparkJob.args0 Other examples: - // jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 - // jobs'step-id'.hiveJob.scriptVariables'key' - // jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may - // not be possible to parameterize maps and repeated fields in their entirety - // since only individual map values and individual items in repeated fields can - // be referenced. For example, the following field paths are invalid: - // placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args - Fields []string `json:"fields,omitempty"` - // Name: Required. Parameter name. The parameter name is used as the key, and - // paired with the parameter value, which are passed to the template when the - // template is instantiated. The name must contain only capital letters (A-Z), - // numbers (0-9), and underscores (_), and must not start with a number. The - // maximum length is 40 characters. - Name string `json:"name,omitempty"` - // Validation: Optional. Validation rules to be applied to this parameter's - // value. - Validation *ParameterValidation `json:"validation,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to +// PySparkBatch: A configuration for running an Apache PySpark +// (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) +// batch workload. +type PySparkBatch struct { + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. + ArchiveUris []string `json:"archiveUris,omitempty"` + // Args: Optional. The arguments to pass to the driver. Do not include + // arguments that can be set as batch properties, such as --conf, since a + // collision can occur that causes an incorrect batch submission. + Args []string `json:"args,omitempty"` + // FileUris: Optional. HCFS URIs of files to be placed in the working directory + // of each executor. + FileUris []string `json:"fileUris,omitempty"` + // JarFileUris: Optional. HCFS URIs of jar files to add to the classpath of the + // Spark driver and tasks. + JarFileUris []string `json:"jarFileUris,omitempty"` + // MainPythonFileUri: Required. The HCFS URI of the main Python file to use as + // the Spark driver. Must be a .py file. + MainPythonFileUri string `json:"mainPythonFileUri,omitempty"` + // PythonFileUris: Optional. HCFS file URIs of Python files to pass to the + // PySpark framework. Supported file types: .py, .egg, and .zip. + PythonFileUris []string `json:"pythonFileUris,omitempty"` + // ForceSendFields is a list of field names (e.g. "ArchiveUris") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include in API + // NullFields is a list of field names (e.g. "ArchiveUris") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *TemplateParameter) MarshalJSON() ([]byte, error) { - type NoMethod TemplateParameter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s PySparkBatch) MarshalJSON() ([]byte, error) { + type NoMethod PySparkBatch + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// TerminateSessionRequest: A request to terminate an interactive session. -type TerminateSessionRequest struct { - // RequestId: Optional. A unique ID used to identify the request. If the - // service receives two TerminateSessionRequest - // (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.TerminateSessionRequest)s - // with the same ID, the second request is ignored.Recommendation: Set this - // value to a UUID - // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must - // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens - // (-). The maximum length is 40 characters. - RequestId string `json:"requestId,omitempty"` - // ForceSendFields is a list of field names (e.g. "RequestId") to +// PySparkJob: A Dataproc job for running Apache PySpark +// (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) +// applications on YARN. +type PySparkJob struct { + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. + ArchiveUris []string `json:"archiveUris,omitempty"` + // Args: Optional. The arguments to pass to the driver. Do not include + // arguments, such as --conf, that can be set as job properties, since a + // collision may occur that causes an incorrect job submission. + Args []string `json:"args,omitempty"` + // FileUris: Optional. HCFS URIs of files to be placed in the working directory + // of each executor. Useful for naively parallel tasks. + FileUris []string `json:"fileUris,omitempty"` + // JarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of + // the Python driver and tasks. + JarFileUris []string `json:"jarFileUris,omitempty"` + // LoggingConfig: Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` + // MainPythonFileUri: Required. The HCFS URI of the main Python file to use as + // the driver. Must be a .py file. + MainPythonFileUri string `json:"mainPythonFileUri,omitempty"` + // Properties: Optional. A mapping of property names to values, used to + // configure PySpark. Properties that conflict with values set by the Dataproc + // API might be overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + Properties map[string]string `json:"properties,omitempty"` + // PythonFileUris: Optional. HCFS file URIs of Python files to pass to the + // PySpark framework. Supported file types: .py, .egg, and .zip. + PythonFileUris []string `json:"pythonFileUris,omitempty"` + // ForceSendFields is a list of field names (e.g. "ArchiveUris") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "RequestId") to include in API + // NullFields is a list of field names (e.g. "ArchiveUris") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *TerminateSessionRequest) MarshalJSON() ([]byte, error) { - type NoMethod TerminateSessionRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// TestIamPermissionsRequest: Request message for TestIamPermissions method. -type TestIamPermissionsRequest struct { - // Permissions: The set of permissions to check for the resource. Permissions - // with wildcards (such as * or storage.*) are not allowed. For more - // information see IAM Overview - // (https://cloud.google.com/iam/docs/overview#permissions). - Permissions []string `json:"permissions,omitempty"` - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +func (s PySparkJob) MarshalJSON() ([]byte, error) { + type NoMethod PySparkJob + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Quantiles: Quantile metrics data related to Tasks. Units can be seconds, +// bytes, milliseconds, etc depending on the message type. +type Quantiles struct { + Count int64 `json:"count,omitempty,string"` + Maximum int64 `json:"maximum,omitempty,string"` + Minimum int64 `json:"minimum,omitempty,string"` + Percentile25 int64 `json:"percentile25,omitempty,string"` + Percentile50 int64 `json:"percentile50,omitempty,string"` + Percentile75 int64 `json:"percentile75,omitempty,string"` + Sum int64 `json:"sum,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Count") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Permissions") to include in API + // NullFields is a list of field names (e.g. "Count") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { - type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s Quantiles) MarshalJSON() ([]byte, error) { + type NoMethod Quantiles + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// TestIamPermissionsResponse: Response message for TestIamPermissions method. -type TestIamPermissionsResponse struct { - // Permissions: A subset of TestPermissionsRequest.permissions that the caller - // is allowed. - Permissions []string `json:"permissions,omitempty"` +// QueryList: A list of queries to run on a cluster. +type QueryList struct { + // Queries: Required. The queries to execute. You do not need to end a query + // expression with a semicolon. Multiple queries can be specified in one string + // by separating each with a semicolon. Here is an example of a Dataproc API + // snippet that uses a QueryList to specify a HiveJob: "hiveJob": { + // "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + Queries []string `json:"queries,omitempty"` + // ForceSendFields is a list of field names (e.g. "Queries") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Queries") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +func (s QueryList) MarshalJSON() ([]byte, error) { + type NoMethod QueryList + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RddDataDistribution: Details about RDD usage. +type RddDataDistribution struct { + Address string `json:"address,omitempty"` + DiskUsed int64 `json:"diskUsed,omitempty,string"` + MemoryRemaining int64 `json:"memoryRemaining,omitempty,string"` + MemoryUsed int64 `json:"memoryUsed,omitempty,string"` + OffHeapMemoryRemaining int64 `json:"offHeapMemoryRemaining,omitempty,string"` + OffHeapMemoryUsed int64 `json:"offHeapMemoryUsed,omitempty,string"` + OnHeapMemoryRemaining int64 `json:"onHeapMemoryRemaining,omitempty,string"` + OnHeapMemoryUsed int64 `json:"onHeapMemoryUsed,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Address") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Permissions") to include in API + // NullFields is a list of field names (e.g. "Address") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { - type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s RddDataDistribution) MarshalJSON() ([]byte, error) { + type NoMethod RddDataDistribution + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// TrinoJob: A Dataproc job for running Trino (https://trino.io/) queries. -// IMPORTANT: The Dataproc Trino Optional Component -// (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be -// enabled when the cluster is created to submit a Trino job to the cluster. -type TrinoJob struct { - // ClientTags: Optional. Trino client tags to attach to this query - ClientTags []string `json:"clientTags,omitempty"` - // ContinueOnFailure: Optional. Whether to continue executing queries if a - // query fails. The default value is false. Setting to true can be useful when - // executing independent parallel queries. - ContinueOnFailure bool `json:"continueOnFailure,omitempty"` - // LoggingConfig: Optional. The runtime log config for job execution. - LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` - // OutputFormat: Optional. The format in which query output will be displayed. - // See the Trino documentation for supported output formats - OutputFormat string `json:"outputFormat,omitempty"` - // Properties: Optional. A mapping of property names to values. Used to set - // Trino session properties - // (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the - // --session flag in the Trino CLI - Properties map[string]string `json:"properties,omitempty"` - // QueryFileUri: The HCFS URI of the script that contains SQL queries. - QueryFileUri string `json:"queryFileUri,omitempty"` - // QueryList: A list of queries. - QueryList *QueryList `json:"queryList,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClientTags") to +// RddOperationCluster: A grouping of nodes representing higher level +// constructs (stage, job etc.). +type RddOperationCluster struct { + ChildClusters []*RddOperationCluster `json:"childClusters,omitempty"` + ChildNodes []*RddOperationNode `json:"childNodes,omitempty"` + Name string `json:"name,omitempty"` + RddClusterId string `json:"rddClusterId,omitempty"` + // ForceSendFields is a list of field names (e.g. "ChildClusters") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClientTags") to include in API + // NullFields is a list of field names (e.g. "ChildClusters") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *TrinoJob) MarshalJSON() ([]byte, error) { - type NoMethod TrinoJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s RddOperationCluster) MarshalJSON() ([]byte, error) { + type NoMethod RddOperationCluster + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// UsageMetrics: Usage metrics represent approximate total resources consumed -// by a workload. -type UsageMetrics struct { - // AcceleratorType: Optional. Accelerator type being used, if any - AcceleratorType string `json:"acceleratorType,omitempty"` - // MilliAcceleratorSeconds: Optional. Accelerator usage in (milliAccelerator x - // seconds) (see Dataproc Serverless pricing - // (https://cloud.google.com/dataproc-serverless/pricing)). - MilliAcceleratorSeconds int64 `json:"milliAcceleratorSeconds,omitempty,string"` - // MilliDcuSeconds: Optional. DCU (Dataproc Compute Units) usage in (milliDCU x - // seconds) (see Dataproc Serverless pricing - // (https://cloud.google.com/dataproc-serverless/pricing)). - MilliDcuSeconds int64 `json:"milliDcuSeconds,omitempty,string"` - // ShuffleStorageGbSeconds: Optional. Shuffle storage usage in (GB x seconds) - // (see Dataproc Serverless pricing - // (https://cloud.google.com/dataproc-serverless/pricing)). - ShuffleStorageGbSeconds int64 `json:"shuffleStorageGbSeconds,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "AcceleratorType") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// RddOperationEdge: A directed edge representing dependency between two RDDs. +type RddOperationEdge struct { + FromId int64 `json:"fromId,omitempty"` + ToId int64 `json:"toId,omitempty"` + // ForceSendFields is a list of field names (e.g. "FromId") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AcceleratorType") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "FromId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *UsageMetrics) MarshalJSON() ([]byte, error) { - type NoMethod UsageMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s RddOperationEdge) MarshalJSON() ([]byte, error) { + type NoMethod RddOperationEdge + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// UsageSnapshot: The usage snapshot represents the resources consumed by a -// workload at a specified time. -type UsageSnapshot struct { - // AcceleratorType: Optional. Accelerator type being used, if any - AcceleratorType string `json:"acceleratorType,omitempty"` - // MilliAccelerator: Optional. Milli (one-thousandth) accelerator. (see - // Dataproc Serverless pricing - // (https://cloud.google.com/dataproc-serverless/pricing)) - MilliAccelerator int64 `json:"milliAccelerator,omitempty,string"` - // MilliDcu: Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) - // (see Dataproc Serverless pricing - // (https://cloud.google.com/dataproc-serverless/pricing)). - MilliDcu int64 `json:"milliDcu,omitempty,string"` - // MilliDcuPremium: Optional. Milli (one-thousandth) Dataproc Compute Units - // (DCUs) charged at premium tier (see Dataproc Serverless pricing - // (https://cloud.google.com/dataproc-serverless/pricing)). - MilliDcuPremium int64 `json:"milliDcuPremium,omitempty,string"` - // ShuffleStorageGb: Optional. Shuffle Storage in gigabytes (GB). (see Dataproc - // Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) - ShuffleStorageGb int64 `json:"shuffleStorageGb,omitempty,string"` - // ShuffleStorageGbPremium: Optional. Shuffle Storage in gigabytes (GB) charged - // at premium tier. (see Dataproc Serverless pricing - // (https://cloud.google.com/dataproc-serverless/pricing)) - ShuffleStorageGbPremium int64 `json:"shuffleStorageGbPremium,omitempty,string"` - // SnapshotTime: Optional. The timestamp of the usage snapshot. - SnapshotTime string `json:"snapshotTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "AcceleratorType") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See +// RddOperationGraph: Graph representing RDD dependencies. Consists of edges +// and a root cluster. +type RddOperationGraph struct { + Edges []*RddOperationEdge `json:"edges,omitempty"` + IncomingEdges []*RddOperationEdge `json:"incomingEdges,omitempty"` + OutgoingEdges []*RddOperationEdge `json:"outgoingEdges,omitempty"` + RootCluster *RddOperationCluster `json:"rootCluster,omitempty"` + StageId int64 `json:"stageId,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Edges") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AcceleratorType") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See + // NullFields is a list of field names (e.g. "Edges") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *UsageSnapshot) MarshalJSON() ([]byte, error) { - type NoMethod UsageSnapshot - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s RddOperationGraph) MarshalJSON() ([]byte, error) { + type NoMethod RddOperationGraph + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ValueValidation: Validation based on a list of allowed values. -type ValueValidation struct { - // Values: Required. List of allowed values for the parameter. - Values []string `json:"values,omitempty"` - // ForceSendFields is a list of field names (e.g. "Values") to unconditionally +// RddOperationNode: A node in the RDD operation graph. Corresponds to a single +// RDD. +type RddOperationNode struct { + Barrier bool `json:"barrier,omitempty"` + Cached bool `json:"cached,omitempty"` + Callsite string `json:"callsite,omitempty"` + Name string `json:"name,omitempty"` + NodeId int64 `json:"nodeId,omitempty"` + // Possible values: + // "DETERMINISTIC_LEVEL_UNSPECIFIED" + // "DETERMINISTIC_LEVEL_DETERMINATE" + // "DETERMINISTIC_LEVEL_UNORDERED" + // "DETERMINISTIC_LEVEL_INDETERMINATE" + OutputDeterministicLevel string `json:"outputDeterministicLevel,omitempty"` + // ForceSendFields is a list of field names (e.g. "Barrier") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Values") to include in API + // NullFields is a list of field names (e.g. "Barrier") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ValueValidation) MarshalJSON() ([]byte, error) { - type NoMethod ValueValidation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s RddOperationNode) MarshalJSON() ([]byte, error) { + type NoMethod RddOperationNode + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// VirtualClusterConfig: The Dataproc cluster config for a cluster that does -// not directly control the underlying compute resources, such as a -// Dataproc-on-GKE cluster -// (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). -type VirtualClusterConfig struct { - // AuxiliaryServicesConfig: Optional. Configuration of auxiliary services used - // by this cluster. - AuxiliaryServicesConfig *AuxiliaryServicesConfig `json:"auxiliaryServicesConfig,omitempty"` - // KubernetesClusterConfig: Required. The configuration for running the - // Dataproc cluster on Kubernetes. - KubernetesClusterConfig *KubernetesClusterConfig `json:"kubernetesClusterConfig,omitempty"` - // StagingBucket: Optional. A Cloud Storage bucket used to stage job - // dependencies, config files, and job driver console output. If you do not - // specify a staging bucket, Cloud Dataproc will determine a Cloud Storage - // location (US, ASIA, or EU) for your cluster's staging bucket according to - // the Compute Engine zone where your cluster is deployed, and then create and - // manage this project-level, per-location bucket (see Dataproc staging and - // temp buckets - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). - // This field requires a Cloud Storage bucket name, not a gs://... URI to a - // Cloud Storage bucket. - StagingBucket string `json:"stagingBucket,omitempty"` - // ForceSendFields is a list of field names (e.g. "AuxiliaryServicesConfig") to +// RddPartitionInfo: Information about RDD partitions. +type RddPartitionInfo struct { + BlockName string `json:"blockName,omitempty"` + DiskUsed int64 `json:"diskUsed,omitempty,string"` + Executors []string `json:"executors,omitempty"` + MemoryUsed int64 `json:"memoryUsed,omitempty,string"` + StorageLevel string `json:"storageLevel,omitempty"` + // ForceSendFields is a list of field names (e.g. "BlockName") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AuxiliaryServicesConfig") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See + // NullFields is a list of field names (e.g. "BlockName") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *VirtualClusterConfig) MarshalJSON() ([]byte, error) { - type NoMethod VirtualClusterConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s RddPartitionInfo) MarshalJSON() ([]byte, error) { + type NoMethod RddPartitionInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// WorkflowGraph: The workflow graph. -type WorkflowGraph struct { - // Nodes: Output only. The workflow nodes. - Nodes []*WorkflowNode `json:"nodes,omitempty"` - // ForceSendFields is a list of field names (e.g. "Nodes") to unconditionally +// RddStorageInfo: Overall data about RDD storage. +type RddStorageInfo struct { + DataDistribution []*RddDataDistribution `json:"dataDistribution,omitempty"` + DiskUsed int64 `json:"diskUsed,omitempty,string"` + MemoryUsed int64 `json:"memoryUsed,omitempty,string"` + Name string `json:"name,omitempty"` + NumCachedPartitions int64 `json:"numCachedPartitions,omitempty"` + NumPartitions int64 `json:"numPartitions,omitempty"` + Partitions []*RddPartitionInfo `json:"partitions,omitempty"` + RddStorageId int64 `json:"rddStorageId,omitempty"` + StorageLevel string `json:"storageLevel,omitempty"` + // ForceSendFields is a list of field names (e.g. "DataDistribution") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DataDistribution") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RddStorageInfo) MarshalJSON() ([]byte, error) { + type NoMethod RddStorageInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RegexValidation: Validation based on regular expressions. +type RegexValidation struct { + // Regexes: Required. RE2 regular expressions used to validate the parameter's + // value. The value must match the regex in its entirety (substring matches are + // not sufficient). + Regexes []string `json:"regexes,omitempty"` + // ForceSendFields is a list of field names (e.g. "Regexes") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Nodes") to include in API + // NullFields is a list of field names (e.g. "Regexes") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *WorkflowGraph) MarshalJSON() ([]byte, error) { - type NoMethod WorkflowGraph - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s RegexValidation) MarshalJSON() ([]byte, error) { + type NoMethod RegexValidation + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// WorkflowMetadata: A Dataproc workflow template resource. -type WorkflowMetadata struct { - // ClusterName: Output only. The name of the target cluster. - ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: Output only. The UUID of target cluster. +// RepairClusterRequest: A request to repair a cluster. +type RepairClusterRequest struct { + // Cluster: Optional. Cluster to be repaired + Cluster *ClusterToRepair `json:"cluster,omitempty"` + // ClusterUuid: Optional. Specifying the cluster_uuid means the RPC will fail + // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. ClusterUuid string `json:"clusterUuid,omitempty"` - // CreateCluster: Output only. The create cluster operation metadata. - CreateCluster *ClusterOperation `json:"createCluster,omitempty"` - // DagEndTime: Output only. DAG end time, only set for workflows with - // dag_timeout when DAG ends. - DagEndTime string `json:"dagEndTime,omitempty"` - // DagStartTime: Output only. DAG start time, only set for workflows with - // dag_timeout when DAG begins. - DagStartTime string `json:"dagStartTime,omitempty"` - // DagTimeout: Output only. The timeout duration for the DAG of jobs, expressed - // in seconds (see JSON representation of duration - // (https://developers.google.com/protocol-buffers/docs/proto3#json)). - DagTimeout string `json:"dagTimeout,omitempty"` - // DeleteCluster: Output only. The delete cluster operation metadata. - DeleteCluster *ClusterOperation `json:"deleteCluster,omitempty"` - // EndTime: Output only. Workflow end time. - EndTime string `json:"endTime,omitempty"` - // Graph: Output only. The workflow graph. - Graph *WorkflowGraph `json:"graph,omitempty"` - // Parameters: Map from parameter names to values that were used for those - // parameters. - Parameters map[string]string `json:"parameters,omitempty"` - // StartTime: Output only. Workflow start time. - StartTime string `json:"startTime,omitempty"` - // State: Output only. The workflow state. - // - // Possible values: - // "UNKNOWN" - Unused. - // "PENDING" - The operation has been created. - // "RUNNING" - The operation is running. - // "DONE" - The operation is done; either cancelled or completed. - State string `json:"state,omitempty"` - // Template: Output only. The resource name of the workflow template as - // described in https://cloud.google.com/apis/design/resource_names. For - // projects.regions.workflowTemplates, the resource name of the template has - // the following format: - // projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For - // projects.locations.workflowTemplates, the resource name of the template has - // the following format: - // projects/{project_id}/locations/{location}/workflowTemplates/{template_id} - Template string `json:"template,omitempty"` - // Version: Output only. The version of template at the time of workflow - // instantiation. - Version int64 `json:"version,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterName") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See + // GracefulDecommissionTimeout: Optional. Timeout for graceful YARN + // decommissioning. Graceful decommissioning facilitates the removal of cluster + // nodes without interrupting jobs in progress. The timeout specifies the + // amount of time to wait for jobs finish before forcefully removing nodes. The + // default timeout is 0 for forceful decommissioning, and the maximum timeout + // period is 1 day. (see JSON Mapping—Duration + // (https://developers.google.com/protocol-buffers/docs/proto3#json)).graceful_decommission_timeout + // is supported in Dataproc image versions 1.2+. + GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout,omitempty"` + // NodePools: Optional. Node pools and corresponding repair action to be taken. + // All node pools should be unique in this request. i.e. Multiple entries for + // the same node pool id are not allowed. + NodePools []*NodePool `json:"nodePools,omitempty"` + // ParentOperationId: Optional. operation id of the parent operation sending + // the repair request + ParentOperationId string `json:"parentOperationId,omitempty"` + // RequestId: Optional. A unique ID used to identify the request. If the server + // receives two RepairClusterRequests with the same ID, the second request is + // ignored, and the first google.longrunning.Operation created and stored in + // the backend is returned.Recommendation: Set this value to a UUID + // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must + // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens + // (-). The maximum length is 40 characters. + RequestId string `json:"requestId,omitempty"` + // ForceSendFields is a list of field names (e.g. "Cluster") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterName") to include in API + // NullFields is a list of field names (e.g. "Cluster") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *WorkflowMetadata) MarshalJSON() ([]byte, error) { - type NoMethod WorkflowMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s RepairClusterRequest) MarshalJSON() ([]byte, error) { + type NoMethod RepairClusterRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// WorkflowNode: The workflow node. -type WorkflowNode struct { - // Error: Output only. The error detail. - Error string `json:"error,omitempty"` - // JobId: Output only. The job id; populated after the node enters RUNNING - // state. - JobId string `json:"jobId,omitempty"` - // PrerequisiteStepIds: Output only. Node's prerequisite nodes. - PrerequisiteStepIds []string `json:"prerequisiteStepIds,omitempty"` - // State: Output only. The node state. +type RepairNodeGroupRequest struct { + // InstanceNames: Required. Name of instances to be repaired. These instances + // must belong to specified node pool. + InstanceNames []string `json:"instanceNames,omitempty"` + // RepairAction: Required. Repair action to take on specified resources of the + // node pool. // // Possible values: - // "NODE_STATE_UNSPECIFIED" - State is unspecified. - // "BLOCKED" - The node is awaiting prerequisite node to finish. - // "RUNNABLE" - The node is runnable but not running. - // "RUNNING" - The node is running. - // "COMPLETED" - The node completed successfully. - // "FAILED" - The node failed. A node can be marked FAILED because its - // ancestor or peer failed. - State string `json:"state,omitempty"` - // StepId: Output only. The name of the node. - StepId string `json:"stepId,omitempty"` - // ForceSendFields is a list of field names (e.g. "Error") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See + // "REPAIR_ACTION_UNSPECIFIED" - No action will be taken by default. + // "REPLACE" - replace the specified list of nodes. + RepairAction string `json:"repairAction,omitempty"` + // RequestId: Optional. A unique ID used to identify the request. If the server + // receives two RepairNodeGroupRequest with the same ID, the second request is + // ignored and the first google.longrunning.Operation created and stored in the + // backend is returned.Recommendation: Set this value to a UUID + // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must + // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens + // (-). The maximum length is 40 characters. + RequestId string `json:"requestId,omitempty"` + // ForceSendFields is a list of field names (e.g. "InstanceNames") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Error") to include in API + // NullFields is a list of field names (e.g. "InstanceNames") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *WorkflowNode) MarshalJSON() ([]byte, error) { - type NoMethod WorkflowNode - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s RepairNodeGroupRequest) MarshalJSON() ([]byte, error) { + type NoMethod RepairNodeGroupRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// WorkflowTemplate: A Dataproc workflow template resource. -type WorkflowTemplate struct { - // CreateTime: Output only. The time template was created. - CreateTime string `json:"createTime,omitempty"` - // DagTimeout: Optional. Timeout duration for the DAG of jobs, expressed in - // seconds (see JSON representation of duration - // (https://developers.google.com/protocol-buffers/docs/proto3#json)). The - // timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). - // The timer begins when the first job is submitted. If the workflow is running - // at the end of the timeout period, any remaining jobs are cancelled, the - // workflow is ended, and if the workflow was running on a managed cluster, the - // cluster is deleted. - DagTimeout string `json:"dagTimeout,omitempty"` - // EncryptionConfig: Optional. Encryption settings for encrypting workflow - // template job arguments. - EncryptionConfig *GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig `json:"encryptionConfig,omitempty"` - Id string `json:"id,omitempty"` - // Jobs: Required. The Directed Acyclic Graph of Jobs to submit. - Jobs []*OrderedJob `json:"jobs,omitempty"` - // Labels: Optional. The labels to associate with this template. These labels - // will be propagated to all jobs and clusters created by the workflow - // instance.Label keys must contain 1 to 63 characters, and must conform to RFC - // 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, - // if present, must contain 1 to 63 characters, and must conform to RFC 1035 - // (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be - // associated with a template. - Labels map[string]string `json:"labels,omitempty"` - // Name: Output only. The resource name of the workflow template, as described - // in https://cloud.google.com/apis/design/resource_names. For - // projects.regions.workflowTemplates, the resource name of the template has - // the following format: - // projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For - // projects.locations.workflowTemplates, the resource name of the template has - // the following format: - // projects/{project_id}/locations/{location}/workflowTemplates/{template_id} - Name string `json:"name,omitempty"` - // Parameters: Optional. Template parameters whose values are substituted into - // the template. Values for parameters must be provided when the template is - // instantiated. - Parameters []*TemplateParameter `json:"parameters,omitempty"` - // Placement: Required. WorkflowTemplate scheduling information. - Placement *WorkflowTemplatePlacement `json:"placement,omitempty"` - // UpdateTime: Output only. The time template was last updated. - UpdateTime string `json:"updateTime,omitempty"` - // Version: Optional. Used to perform a consistent read-modify-write.This field - // should be left blank for a CreateWorkflowTemplate request. It is required - // for an UpdateWorkflowTemplate request, and must match the current server - // version. A typical update template flow would fetch the current template - // with a GetWorkflowTemplate request, which will return the current template - // with the version field filled in with the current server version. The user - // updates other fields in the template, then returns it as part of the - // UpdateWorkflowTemplate request. - Version int64 `json:"version,omitempty"` +// RepositoryConfig: Configuration for dependency repositories +type RepositoryConfig struct { + // PypiRepositoryConfig: Optional. Configuration for PyPi repository. + PypiRepositoryConfig *PyPiRepositoryConfig `json:"pypiRepositoryConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "PypiRepositoryConfig") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "PypiRepositoryConfig") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} - // ServerResponse contains the HTTP response code and headers from the server. - googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreateTime") to +func (s RepositoryConfig) MarshalJSON() ([]byte, error) { + type NoMethod RepositoryConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ReservationAffinity: Reservation Affinity for consuming Zonal reservation. +type ReservationAffinity struct { + // ConsumeReservationType: Optional. Type of reservation to consume + // + // Possible values: + // "TYPE_UNSPECIFIED" + // "NO_RESERVATION" - Do not consume from any allocated capacity. + // "ANY_RESERVATION" - Consume any reservation available. + // "SPECIFIC_RESERVATION" - Must consume from a specific reservation. Must + // specify key value fields for specifying the reservations. + ConsumeReservationType string `json:"consumeReservationType,omitempty"` + // Key: Optional. Corresponds to the label key of reservation resource. + Key string `json:"key,omitempty"` + // Values: Optional. Corresponds to the label values of reservation resource. + Values []string `json:"values,omitempty"` + // ForceSendFields is a list of field names (e.g. "ConsumeReservationType") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreateTime") to include in API + // NullFields is a list of field names (e.g. "ConsumeReservationType") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ReservationAffinity) MarshalJSON() ([]byte, error) { + type NoMethod ReservationAffinity + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ResizeNodeGroupRequest: A request to resize a node group. +type ResizeNodeGroupRequest struct { + // GracefulDecommissionTimeout: Optional. Timeout for graceful YARN + // decommissioning. Graceful decommissioning + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) + // allows the removal of nodes from the Compute Engine node group without + // interrupting jobs in progress. This timeout specifies how long to wait for + // jobs in progress to finish before forcefully removing nodes (and potentially + // interrupting jobs). Default timeout is 0 (for forceful decommission), and + // the maximum allowed timeout is 1 day. (see JSON representation of Duration + // (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only + // supported on Dataproc image versions 1.2 and higher. + GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout,omitempty"` + // ParentOperationId: Optional. operation id of the parent operation sending + // the resize request + ParentOperationId string `json:"parentOperationId,omitempty"` + // RequestId: Optional. A unique ID used to identify the request. If the server + // receives two ResizeNodeGroupRequest + // (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests) + // with the same ID, the second request is ignored and the first + // google.longrunning.Operation created and stored in the backend is + // returned.Recommendation: Set this value to a UUID + // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must + // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens + // (-). The maximum length is 40 characters. + RequestId string `json:"requestId,omitempty"` + // Size: Required. The number of running instances for the node group to + // maintain. The group adds or removes instances to maintain the number of + // instances specified by this parameter. + Size int64 `json:"size,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "GracefulDecommissionTimeout") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. See https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields + // for more details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "GracefulDecommissionTimeout") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ResizeNodeGroupRequest) MarshalJSON() ([]byte, error) { + type NoMethod ResizeNodeGroupRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type ResourceInformation struct { + Addresses []string `json:"addresses,omitempty"` + Name string `json:"name,omitempty"` + // ForceSendFields is a list of field names (e.g. "Addresses") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Addresses") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *WorkflowTemplate) MarshalJSON() ([]byte, error) { - type NoMethod WorkflowTemplate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ResourceInformation) MarshalJSON() ([]byte, error) { + type NoMethod ResourceInformation + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// WorkflowTemplatePlacement: Specifies workflow execution target.Either -// managed_cluster or cluster_selector is required. -type WorkflowTemplatePlacement struct { - // ClusterSelector: Optional. A selector that chooses target cluster for jobs - // based on metadata.The selector is evaluated at the time each job is - // submitted. - ClusterSelector *ClusterSelector `json:"clusterSelector,omitempty"` - // ManagedCluster: A cluster that is managed by the workflow. - ManagedCluster *ManagedCluster `json:"managedCluster,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterSelector") to +// ResourceProfileInfo: Resource profile that contains information about all +// the resources required by executors and tasks. +type ResourceProfileInfo struct { + ExecutorResources map[string]ExecutorResourceRequest `json:"executorResources,omitempty"` + ResourceProfileId int64 `json:"resourceProfileId,omitempty"` + TaskResources map[string]TaskResourceRequest `json:"taskResources,omitempty"` + // ForceSendFields is a list of field names (e.g. "ExecutorResources") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterSelector") to include in + // NullFields is a list of field names (e.g. "ExecutorResources") to include in // API requests with the JSON null value. By default, fields with empty values // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *WorkflowTemplatePlacement) MarshalJSON() ([]byte, error) { - type NoMethod WorkflowTemplatePlacement - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s ResourceProfileInfo) MarshalJSON() ([]byte, error) { + type NoMethod ResourceProfileInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// YarnApplication: A YARN application created by a job. Application -// information is a subset of -// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta Feature: -// This report is available for testing purposes only. It may be changed before -// final release. -type YarnApplication struct { - // Name: Required. The application name. - Name string `json:"name,omitempty"` - // Progress: Required. The numerical progress of the application, from 1 to - // 100. - Progress float64 `json:"progress,omitempty"` - // State: Required. The application state. - // - // Possible values: - // "STATE_UNSPECIFIED" - Status is unspecified. - // "NEW" - Status is NEW. - // "NEW_SAVING" - Status is NEW_SAVING. - // "SUBMITTED" - Status is SUBMITTED. - // "ACCEPTED" - Status is ACCEPTED. - // "RUNNING" - Status is RUNNING. - // "FINISHED" - Status is FINISHED. - // "FAILED" - Status is FAILED. - // "KILLED" - Status is KILLED. - State string `json:"state,omitempty"` - // TrackingUrl: Optional. The HTTP URL of the ApplicationMaster, HistoryServer, - // or TimelineServer that provides application-specific information. The URL - // uses the internal hostname, and requires a proxy server for resolution and, - // possibly, access. - TrackingUrl string `json:"trackingUrl,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See +// RuntimeConfig: Runtime configuration for a workload. +type RuntimeConfig struct { + // AutotuningConfig: Optional. Autotuning configuration of the workload. + AutotuningConfig *AutotuningConfig `json:"autotuningConfig,omitempty"` + // Cohort: Optional. Cohort identifier. Identifies families of the workloads + // having the same shape, e.g. daily ETL jobs. + Cohort string `json:"cohort,omitempty"` + // ContainerImage: Optional. Optional custom container image for the job + // runtime environment. If not specified, a default container image will be + // used. + ContainerImage string `json:"containerImage,omitempty"` + // Properties: Optional. A mapping of property names to values, which are used + // to configure workload execution. + Properties map[string]string `json:"properties,omitempty"` + // RepositoryConfig: Optional. Dependency repository configuration. + RepositoryConfig *RepositoryConfig `json:"repositoryConfig,omitempty"` + // Version: Optional. Version of the batch runtime. + Version string `json:"version,omitempty"` + // ForceSendFields is a list of field names (e.g. "AutotuningConfig") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API requests - // with the JSON null value. By default, fields with empty values are omitted - // from API requests. See + // NullFields is a list of field names (e.g. "AutotuningConfig") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *YarnApplication) MarshalJSON() ([]byte, error) { - type NoMethod YarnApplication - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +func (s RuntimeConfig) MarshalJSON() ([]byte, error) { + type NoMethod RuntimeConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -func (s *YarnApplication) UnmarshalJSON(data []byte) error { - type NoMethod YarnApplication - var s1 struct { - Progress gensupport.JSONFloat64 `json:"progress"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Progress = float64(s1.Progress) - return nil +// RuntimeInfo: Runtime information about workload execution. +type RuntimeInfo struct { + // ApproximateUsage: Output only. Approximate workload resource usage, + // calculated when the workload completes (see Dataproc Serverless pricing + // (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric + // calculation may change in the future, for example, to capture cumulative + // workload resource consumption during workload execution (see the Dataproc + // Serverless release notes + // (https://cloud.google.com/dataproc-serverless/docs/release-notes) for + // announcements, changes, fixes and other Dataproc developments). + ApproximateUsage *UsageMetrics `json:"approximateUsage,omitempty"` + // CurrentUsage: Output only. Snapshot of current workload resource usage. + CurrentUsage *UsageSnapshot `json:"currentUsage,omitempty"` + // DiagnosticOutputUri: Output only. A URI pointing to the location of the + // diagnostics tarball. + DiagnosticOutputUri string `json:"diagnosticOutputUri,omitempty"` + // Endpoints: Output only. Map of remote access endpoints (such as web + // interfaces and APIs) to their URIs. + Endpoints map[string]string `json:"endpoints,omitempty"` + // OutputUri: Output only. A URI pointing to the location of the stdout and + // stderr of the workload. + OutputUri string `json:"outputUri,omitempty"` + // ForceSendFields is a list of field names (e.g. "ApproximateUsage") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ApproximateUsage") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` } -type ProjectsLocationsAutoscalingPoliciesCreateCall struct { - s *Service - parent string - autoscalingpolicy *AutoscalingPolicy - urlParams_ gensupport.URLParams +func (s RuntimeInfo) MarshalJSON() ([]byte, error) { + type NoMethod RuntimeInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSessionSparkApplicationExecutorStageSummaryResponse: List of Executors +// associated with a Spark Application Stage. +type SearchSessionSparkApplicationExecutorStageSummaryResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent + // SearchSessionSparkApplicationExecutorStageSummaryRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationStageExecutors: Details about executors used by the + // application stage. + SparkApplicationStageExecutors []*ExecutorStageSummary `json:"sparkApplicationStageExecutors,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSessionSparkApplicationExecutorStageSummaryResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSessionSparkApplicationExecutorStageSummaryResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSessionSparkApplicationExecutorsResponse: List of Executors associated +// with a Spark Application. +type SearchSessionSparkApplicationExecutorsResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent SearchSessionSparkApplicationExecutorsRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationExecutors: Details about executors used by the application. + SparkApplicationExecutors []*ExecutorSummary `json:"sparkApplicationExecutors,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSessionSparkApplicationExecutorsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSessionSparkApplicationExecutorsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSessionSparkApplicationJobsResponse: A list of Jobs associated with a +// Spark Application. +type SearchSessionSparkApplicationJobsResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent SearchSessionSparkApplicationJobsRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationJobs: Output only. Data corresponding to a spark job. + SparkApplicationJobs []*JobData `json:"sparkApplicationJobs,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSessionSparkApplicationJobsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSessionSparkApplicationJobsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSessionSparkApplicationSqlQueriesResponse: List of all queries for a +// Spark Application. +type SearchSessionSparkApplicationSqlQueriesResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent SearchSessionSparkApplicationSqlQueriesRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationSqlQueries: Output only. SQL Execution Data + SparkApplicationSqlQueries []*SqlExecutionUiData `json:"sparkApplicationSqlQueries,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSessionSparkApplicationSqlQueriesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSessionSparkApplicationSqlQueriesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSessionSparkApplicationStageAttemptTasksResponse: List of tasks for a +// stage of a Spark Application +type SearchSessionSparkApplicationStageAttemptTasksResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent + // SearchSessionSparkApplicationStageAttemptTasksRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationStageAttemptTasks: Output only. Data corresponding to tasks + // created by spark. + SparkApplicationStageAttemptTasks []*TaskData `json:"sparkApplicationStageAttemptTasks,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSessionSparkApplicationStageAttemptTasksResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSessionSparkApplicationStageAttemptTasksResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSessionSparkApplicationStageAttemptsResponse: A list of Stage Attempts +// for a Stage of a Spark Application. +type SearchSessionSparkApplicationStageAttemptsResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent + // SearchSessionSparkApplicationStageAttemptsRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationStageAttempts: Output only. Data corresponding to a stage + // attempts + SparkApplicationStageAttempts []*StageData `json:"sparkApplicationStageAttempts,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSessionSparkApplicationStageAttemptsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSessionSparkApplicationStageAttemptsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSessionSparkApplicationStagesResponse: A list of stages associated +// with a Spark Application. +type SearchSessionSparkApplicationStagesResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent SearchSessionSparkApplicationStages. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationStages: Output only. Data corresponding to a stage. + SparkApplicationStages []*StageData `json:"sparkApplicationStages,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSessionSparkApplicationStagesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSessionSparkApplicationStagesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSessionSparkApplicationsResponse: A list of summary of Spark +// Applications +type SearchSessionSparkApplicationsResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent SearchSessionSparkApplicationsRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplications: Output only. High level information corresponding to an + // application. + SparkApplications []*SparkApplication `json:"sparkApplications,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSessionSparkApplicationsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSessionSparkApplicationsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSparkApplicationExecutorStageSummaryResponse: List of Executors +// associated with a Spark Application Stage. +type SearchSparkApplicationExecutorStageSummaryResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent SearchSparkApplicationExecutorsListRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationStageExecutors: Details about executors used by the + // application stage. + SparkApplicationStageExecutors []*ExecutorStageSummary `json:"sparkApplicationStageExecutors,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSparkApplicationExecutorStageSummaryResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSparkApplicationExecutorStageSummaryResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSparkApplicationExecutorsResponse: List of Executors associated with a +// Spark Application. +type SearchSparkApplicationExecutorsResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent SearchSparkApplicationExecutorsListRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationExecutors: Details about executors used by the application. + SparkApplicationExecutors []*ExecutorSummary `json:"sparkApplicationExecutors,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSparkApplicationExecutorsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSparkApplicationExecutorsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSparkApplicationJobsResponse: A list of Jobs associated with a Spark +// Application. +type SearchSparkApplicationJobsResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent SearchSparkApplicationJobsRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationJobs: Output only. Data corresponding to a spark job. + SparkApplicationJobs []*JobData `json:"sparkApplicationJobs,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSparkApplicationJobsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSparkApplicationJobsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSparkApplicationSqlQueriesResponse: List of all queries for a Spark +// Application. +type SearchSparkApplicationSqlQueriesResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent SearchSparkApplicationSqlQueriesRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationSqlQueries: Output only. SQL Execution Data + SparkApplicationSqlQueries []*SqlExecutionUiData `json:"sparkApplicationSqlQueries,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSparkApplicationSqlQueriesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSparkApplicationSqlQueriesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSparkApplicationStageAttemptTasksResponse: List of tasks for a stage +// of a Spark Application +type SearchSparkApplicationStageAttemptTasksResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent ListSparkApplicationStageAttemptTasksRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationStageAttemptTasks: Output only. Data corresponding to tasks + // created by spark. + SparkApplicationStageAttemptTasks []*TaskData `json:"sparkApplicationStageAttemptTasks,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSparkApplicationStageAttemptTasksResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSparkApplicationStageAttemptTasksResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSparkApplicationStageAttemptsResponse: A list of Stage Attempts for a +// Stage of a Spark Application. +type SearchSparkApplicationStageAttemptsResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent ListSparkApplicationStageAttemptsRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationStageAttempts: Output only. Data corresponding to a stage + // attempts + SparkApplicationStageAttempts []*StageData `json:"sparkApplicationStageAttempts,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSparkApplicationStageAttemptsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSparkApplicationStageAttemptsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSparkApplicationStagesResponse: A list of stages associated with a +// Spark Application. +type SearchSparkApplicationStagesResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent SearchSparkApplicationStages. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplicationStages: Output only. Data corresponding to a stage. + SparkApplicationStages []*StageData `json:"sparkApplicationStages,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSparkApplicationStagesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSparkApplicationStagesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SearchSparkApplicationsResponse: A list of summary of Spark Applications +type SearchSparkApplicationsResponse struct { + // NextPageToken: This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent SearchSparkApplicationsRequest. + NextPageToken string `json:"nextPageToken,omitempty"` + // SparkApplications: Output only. High level information corresponding to an + // application. + SparkApplications []*SparkApplication `json:"sparkApplications,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NextPageToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SearchSparkApplicationsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchSparkApplicationsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SecurityConfig: Security related configuration, including encryption, +// Kerberos, etc. +type SecurityConfig struct { + // IdentityConfig: Optional. Identity related configuration, including service + // account based secure multi-tenancy user mappings. + IdentityConfig *IdentityConfig `json:"identityConfig,omitempty"` + // KerberosConfig: Optional. Kerberos related configuration. + KerberosConfig *KerberosConfig `json:"kerberosConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "IdentityConfig") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "IdentityConfig") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SecurityConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Session: A representation of a session. +type Session struct { + // CreateTime: Output only. The time when the session was created. + CreateTime string `json:"createTime,omitempty"` + // Creator: Output only. The email address of the user who created the session. + Creator string `json:"creator,omitempty"` + // EnvironmentConfig: Optional. Environment configuration for the session + // execution. + EnvironmentConfig *EnvironmentConfig `json:"environmentConfig,omitempty"` + // JupyterSession: Optional. Jupyter session config. + JupyterSession *JupyterConfig `json:"jupyterSession,omitempty"` + // Labels: Optional. The labels to associate with the session. Label keys must + // contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if + // present, must contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + // associated with a session. + Labels map[string]string `json:"labels,omitempty"` + // Name: Required. The resource name of the session. + Name string `json:"name,omitempty"` + // RuntimeConfig: Optional. Runtime configuration for the session execution. + RuntimeConfig *RuntimeConfig `json:"runtimeConfig,omitempty"` + // RuntimeInfo: Output only. Runtime information about session execution. + RuntimeInfo *RuntimeInfo `json:"runtimeInfo,omitempty"` + // SessionTemplate: Optional. The session template used by the session.Only + // resource names, including project ID and location, are valid.Example: * + // https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] + // * + // projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_ + // id]The template must be in the same project and Dataproc region as the + // session. + SessionTemplate string `json:"sessionTemplate,omitempty"` + // SparkConnectSession: Optional. Spark connect session config. + SparkConnectSession *SparkConnectConfig `json:"sparkConnectSession,omitempty"` + // State: Output only. A state of the session. + // + // Possible values: + // "STATE_UNSPECIFIED" - The session state is unknown. + // "CREATING" - The session is created prior to running. + // "ACTIVE" - The session is running. + // "TERMINATING" - The session is terminating. + // "TERMINATED" - The session is terminated successfully. + // "FAILED" - The session is no longer running due to an error. + State string `json:"state,omitempty"` + // StateHistory: Output only. Historical state information for the session. + StateHistory []*SessionStateHistory `json:"stateHistory,omitempty"` + // StateMessage: Output only. Session state details, such as the failure + // description if the state is FAILED. + StateMessage string `json:"stateMessage,omitempty"` + // StateTime: Output only. The time when the session entered the current state. + StateTime string `json:"stateTime,omitempty"` + // User: Optional. The email address of the user who owns the session. + User string `json:"user,omitempty"` + // Uuid: Output only. A session UUID (Unique Universal Identifier). The service + // generates this value when it creates the session. + Uuid string `json:"uuid,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Session) MarshalJSON() ([]byte, error) { + type NoMethod Session + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SessionOperationMetadata: Metadata describing the Session operation. +type SessionOperationMetadata struct { + // CreateTime: The time when the operation was created. + CreateTime string `json:"createTime,omitempty"` + // Description: Short description of the operation. + Description string `json:"description,omitempty"` + // DoneTime: The time when the operation was finished. + DoneTime string `json:"doneTime,omitempty"` + // Labels: Labels associated with the operation. + Labels map[string]string `json:"labels,omitempty"` + // OperationType: The operation type. + // + // Possible values: + // "SESSION_OPERATION_TYPE_UNSPECIFIED" - Session operation type is unknown. + // "CREATE" - Create Session operation type. + // "TERMINATE" - Terminate Session operation type. + // "DELETE" - Delete Session operation type. + OperationType string `json:"operationType,omitempty"` + // Session: Name of the session for the operation. + Session string `json:"session,omitempty"` + // SessionUuid: Session UUID for the operation. + SessionUuid string `json:"sessionUuid,omitempty"` + // Warnings: Warnings encountered during operation execution. + Warnings []string `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SessionOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod SessionOperationMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SessionStateHistory: Historical state information. +type SessionStateHistory struct { + // State: Output only. The state of the session at this point in the session + // history. + // + // Possible values: + // "STATE_UNSPECIFIED" - The session state is unknown. + // "CREATING" - The session is created prior to running. + // "ACTIVE" - The session is running. + // "TERMINATING" - The session is terminating. + // "TERMINATED" - The session is terminated successfully. + // "FAILED" - The session is no longer running due to an error. + State string `json:"state,omitempty"` + // StateMessage: Output only. Details about the state at this point in the + // session history. + StateMessage string `json:"stateMessage,omitempty"` + // StateStartTime: Output only. The time when the session entered the + // historical state. + StateStartTime string `json:"stateStartTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "State") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "State") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SessionStateHistory) MarshalJSON() ([]byte, error) { + type NoMethod SessionStateHistory + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SessionTemplate: A representation of a session template. +type SessionTemplate struct { + // CreateTime: Output only. The time when the template was created. + CreateTime string `json:"createTime,omitempty"` + // Creator: Output only. The email address of the user who created the + // template. + Creator string `json:"creator,omitempty"` + // Description: Optional. Brief description of the template. + Description string `json:"description,omitempty"` + // EnvironmentConfig: Optional. Environment configuration for session + // execution. + EnvironmentConfig *EnvironmentConfig `json:"environmentConfig,omitempty"` + // JupyterSession: Optional. Jupyter session config. + JupyterSession *JupyterConfig `json:"jupyterSession,omitempty"` + // Labels: Optional. Labels to associate with sessions created using this + // template. Label keys must contain 1 to 63 characters, and must conform to + // RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, + // but, if present, must contain 1 to 63 characters and conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + // associated with a session. + Labels map[string]string `json:"labels,omitempty"` + // Name: Required. The resource name of the session template. + Name string `json:"name,omitempty"` + // RuntimeConfig: Optional. Runtime configuration for session execution. + RuntimeConfig *RuntimeConfig `json:"runtimeConfig,omitempty"` + // SparkConnectSession: Optional. Spark connect session config. + SparkConnectSession *SparkConnectConfig `json:"sparkConnectSession,omitempty"` + // UpdateTime: Output only. The time the template was last updated. + UpdateTime string `json:"updateTime,omitempty"` + // Uuid: Output only. A session template UUID (Unique Universal Identifier). + // The service generates this value when it creates the session template. + Uuid string `json:"uuid,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SessionTemplate) MarshalJSON() ([]byte, error) { + type NoMethod SessionTemplate + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SetIamPolicyRequest: Request message for SetIamPolicy method. +type SetIamPolicyRequest struct { + // Policy: REQUIRED: The complete policy to be applied to the resource. The + // size of the policy is limited to a few 10s of KB. An empty policy is a valid + // policy but certain Google Cloud services (such as Projects) might reject + // them. + Policy *Policy `json:"policy,omitempty"` + // ForceSendFields is a list of field names (e.g. "Policy") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Policy") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod SetIamPolicyRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ShieldedInstanceConfig: Shielded Instance Config for clusters using Compute +// Engine Shielded VMs +// (https://cloud.google.com/security/shielded-cloud/shielded-vm). +type ShieldedInstanceConfig struct { + // EnableIntegrityMonitoring: Optional. Defines whether instances have + // integrity monitoring enabled. + EnableIntegrityMonitoring bool `json:"enableIntegrityMonitoring,omitempty"` + // EnableSecureBoot: Optional. Defines whether instances have Secure Boot + // enabled. + EnableSecureBoot bool `json:"enableSecureBoot,omitempty"` + // EnableVtpm: Optional. Defines whether instances have the vTPM enabled. + EnableVtpm bool `json:"enableVtpm,omitempty"` + // ForceSendFields is a list of field names (e.g. "EnableIntegrityMonitoring") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "EnableIntegrityMonitoring") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ShieldedInstanceConfig) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type ShufflePushReadMetrics struct { + CorruptMergedBlockChunks int64 `json:"corruptMergedBlockChunks,omitempty,string"` + LocalMergedBlocksFetched int64 `json:"localMergedBlocksFetched,omitempty,string"` + LocalMergedBytesRead int64 `json:"localMergedBytesRead,omitempty,string"` + LocalMergedChunksFetched int64 `json:"localMergedChunksFetched,omitempty,string"` + MergedFetchFallbackCount int64 `json:"mergedFetchFallbackCount,omitempty,string"` + RemoteMergedBlocksFetched int64 `json:"remoteMergedBlocksFetched,omitempty,string"` + RemoteMergedBytesRead int64 `json:"remoteMergedBytesRead,omitempty,string"` + RemoteMergedChunksFetched int64 `json:"remoteMergedChunksFetched,omitempty,string"` + RemoteMergedReqsDuration int64 `json:"remoteMergedReqsDuration,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "CorruptMergedBlockChunks") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CorruptMergedBlockChunks") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ShufflePushReadMetrics) MarshalJSON() ([]byte, error) { + type NoMethod ShufflePushReadMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type ShufflePushReadQuantileMetrics struct { + CorruptMergedBlockChunks *Quantiles `json:"corruptMergedBlockChunks,omitempty"` + LocalMergedBlocksFetched *Quantiles `json:"localMergedBlocksFetched,omitempty"` + LocalMergedBytesRead *Quantiles `json:"localMergedBytesRead,omitempty"` + LocalMergedChunksFetched *Quantiles `json:"localMergedChunksFetched,omitempty"` + MergedFetchFallbackCount *Quantiles `json:"mergedFetchFallbackCount,omitempty"` + RemoteMergedBlocksFetched *Quantiles `json:"remoteMergedBlocksFetched,omitempty"` + RemoteMergedBytesRead *Quantiles `json:"remoteMergedBytesRead,omitempty"` + RemoteMergedChunksFetched *Quantiles `json:"remoteMergedChunksFetched,omitempty"` + RemoteMergedReqsDuration *Quantiles `json:"remoteMergedReqsDuration,omitempty"` + // ForceSendFields is a list of field names (e.g. "CorruptMergedBlockChunks") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CorruptMergedBlockChunks") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ShufflePushReadQuantileMetrics) MarshalJSON() ([]byte, error) { + type NoMethod ShufflePushReadQuantileMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ShuffleReadMetrics: Shuffle data read by the task. +type ShuffleReadMetrics struct { + FetchWaitTimeMillis int64 `json:"fetchWaitTimeMillis,omitempty,string"` + LocalBlocksFetched int64 `json:"localBlocksFetched,omitempty,string"` + LocalBytesRead int64 `json:"localBytesRead,omitempty,string"` + RecordsRead int64 `json:"recordsRead,omitempty,string"` + RemoteBlocksFetched int64 `json:"remoteBlocksFetched,omitempty,string"` + RemoteBytesRead int64 `json:"remoteBytesRead,omitempty,string"` + RemoteBytesReadToDisk int64 `json:"remoteBytesReadToDisk,omitempty,string"` + RemoteReqsDuration int64 `json:"remoteReqsDuration,omitempty,string"` + ShufflePushReadMetrics *ShufflePushReadMetrics `json:"shufflePushReadMetrics,omitempty"` + // ForceSendFields is a list of field names (e.g. "FetchWaitTimeMillis") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "FetchWaitTimeMillis") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ShuffleReadMetrics) MarshalJSON() ([]byte, error) { + type NoMethod ShuffleReadMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type ShuffleReadQuantileMetrics struct { + FetchWaitTimeMillis *Quantiles `json:"fetchWaitTimeMillis,omitempty"` + LocalBlocksFetched *Quantiles `json:"localBlocksFetched,omitempty"` + ReadBytes *Quantiles `json:"readBytes,omitempty"` + ReadRecords *Quantiles `json:"readRecords,omitempty"` + RemoteBlocksFetched *Quantiles `json:"remoteBlocksFetched,omitempty"` + RemoteBytesRead *Quantiles `json:"remoteBytesRead,omitempty"` + RemoteBytesReadToDisk *Quantiles `json:"remoteBytesReadToDisk,omitempty"` + RemoteReqsDuration *Quantiles `json:"remoteReqsDuration,omitempty"` + ShufflePushReadMetrics *ShufflePushReadQuantileMetrics `json:"shufflePushReadMetrics,omitempty"` + TotalBlocksFetched *Quantiles `json:"totalBlocksFetched,omitempty"` + // ForceSendFields is a list of field names (e.g. "FetchWaitTimeMillis") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "FetchWaitTimeMillis") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ShuffleReadQuantileMetrics) MarshalJSON() ([]byte, error) { + type NoMethod ShuffleReadQuantileMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ShuffleWriteMetrics: Shuffle data written by task. +type ShuffleWriteMetrics struct { + BytesWritten int64 `json:"bytesWritten,omitempty,string"` + RecordsWritten int64 `json:"recordsWritten,omitempty,string"` + WriteTimeNanos int64 `json:"writeTimeNanos,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "BytesWritten") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BytesWritten") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ShuffleWriteMetrics) MarshalJSON() ([]byte, error) { + type NoMethod ShuffleWriteMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type ShuffleWriteQuantileMetrics struct { + WriteBytes *Quantiles `json:"writeBytes,omitempty"` + WriteRecords *Quantiles `json:"writeRecords,omitempty"` + WriteTimeNanos *Quantiles `json:"writeTimeNanos,omitempty"` + // ForceSendFields is a list of field names (e.g. "WriteBytes") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "WriteBytes") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ShuffleWriteQuantileMetrics) MarshalJSON() ([]byte, error) { + type NoMethod ShuffleWriteQuantileMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type SinkProgress struct { + Description string `json:"description,omitempty"` + Metrics map[string]string `json:"metrics,omitempty"` + NumOutputRows int64 `json:"numOutputRows,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Description") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SinkProgress) MarshalJSON() ([]byte, error) { + type NoMethod SinkProgress + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SoftwareConfig: Specifies the selection and config of software inside the +// cluster. +type SoftwareConfig struct { + // ImageVersion: Optional. The version of software inside the cluster. It must + // be one of the supported Dataproc Versions + // (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported-dataproc-image-versions), + // such as "1.2" (including a subminor version, such as "1.2.29"), or the + // "preview" version + // (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). + // If unspecified, it defaults to the latest Debian version. + ImageVersion string `json:"imageVersion,omitempty"` + // OptionalComponents: Optional. The set of components to activate on the + // cluster. + // + // Possible values: + // "COMPONENT_UNSPECIFIED" - Unspecified component. Specifying this will + // cause Cluster creation to fail. + // "ANACONDA" - The Anaconda component is no longer supported or applicable + // to supported Dataproc on Compute Engine image versions + // (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-version-clusters#supported-dataproc-image-versions). + // It cannot be activated on clusters created with supported Dataproc on + // Compute Engine image versions. + // "DOCKER" - Docker + // "DRUID" - The Druid query engine. (alpha) + // "FLINK" - Flink + // "HBASE" - HBase. (beta) + // "HIVE_WEBHCAT" - The Hive Web HCatalog (the REST service for accessing + // HCatalog). + // "HUDI" - Hudi. + // "JUPYTER" - The Jupyter Notebook. + // "PRESTO" - The Presto query engine. + // "TRINO" - The Trino query engine. + // "RANGER" - The Ranger service. + // "SOLR" - The Solr service. + // "ZEPPELIN" - The Zeppelin notebook. + // "ZOOKEEPER" - The Zookeeper service. + OptionalComponents []string `json:"optionalComponents,omitempty"` + // Properties: Optional. The properties to set on daemon config files.Property + // keys are specified in prefix:property format, for example + // core:hadoop.tmp.dir. The following are supported prefixes and their + // mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml + // distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: + // mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: + // yarn-site.xmlFor more information, see Cluster properties + // (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). + Properties map[string]string `json:"properties,omitempty"` + // ForceSendFields is a list of field names (e.g. "ImageVersion") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ImageVersion") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SoftwareConfig) MarshalJSON() ([]byte, error) { + type NoMethod SoftwareConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type SourceProgress struct { + Description string `json:"description,omitempty"` + EndOffset string `json:"endOffset,omitempty"` + InputRowsPerSecond float64 `json:"inputRowsPerSecond,omitempty"` + LatestOffset string `json:"latestOffset,omitempty"` + Metrics map[string]string `json:"metrics,omitempty"` + NumInputRows int64 `json:"numInputRows,omitempty,string"` + ProcessedRowsPerSecond float64 `json:"processedRowsPerSecond,omitempty"` + StartOffset string `json:"startOffset,omitempty"` + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Description") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SourceProgress) MarshalJSON() ([]byte, error) { + type NoMethod SourceProgress + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *SourceProgress) UnmarshalJSON(data []byte) error { + type NoMethod SourceProgress + var s1 struct { + InputRowsPerSecond gensupport.JSONFloat64 `json:"inputRowsPerSecond"` + ProcessedRowsPerSecond gensupport.JSONFloat64 `json:"processedRowsPerSecond"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.InputRowsPerSecond = float64(s1.InputRowsPerSecond) + s.ProcessedRowsPerSecond = float64(s1.ProcessedRowsPerSecond) + return nil +} + +// SparkApplication: A summary of Spark Application +type SparkApplication struct { + // Application: Output only. High level information corresponding to an + // application. + Application *ApplicationInfo `json:"application,omitempty"` + // Name: Identifier. Name of the spark application + Name string `json:"name,omitempty"` + // ForceSendFields is a list of field names (e.g. "Application") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Application") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkApplication) MarshalJSON() ([]byte, error) { + type NoMethod SparkApplication + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkBatch: A configuration for running an Apache Spark +// (https://spark.apache.org/) batch workload. +type SparkBatch struct { + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. + ArchiveUris []string `json:"archiveUris,omitempty"` + // Args: Optional. The arguments to pass to the driver. Do not include + // arguments that can be set as batch properties, such as --conf, since a + // collision can occur that causes an incorrect batch submission. + Args []string `json:"args,omitempty"` + // FileUris: Optional. HCFS URIs of files to be placed in the working directory + // of each executor. + FileUris []string `json:"fileUris,omitempty"` + // JarFileUris: Optional. HCFS URIs of jar files to add to the classpath of the + // Spark driver and tasks. + JarFileUris []string `json:"jarFileUris,omitempty"` + // MainClass: Optional. The name of the driver main class. The jar file that + // contains the class must be in the classpath or specified in jar_file_uris. + MainClass string `json:"mainClass,omitempty"` + // MainJarFileUri: Optional. The HCFS URI of the jar file that contains the + // main class. + MainJarFileUri string `json:"mainJarFileUri,omitempty"` + // ForceSendFields is a list of field names (e.g. "ArchiveUris") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ArchiveUris") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkBatch) MarshalJSON() ([]byte, error) { + type NoMethod SparkBatch + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkConnectConfig: Spark connect configuration for an interactive session. +type SparkConnectConfig struct { +} + +// SparkHistoryServerConfig: Spark History Server configuration for the +// workload. +type SparkHistoryServerConfig struct { + // DataprocCluster: Optional. Resource name of an existing Dataproc Cluster to + // act as a Spark History Server for the workload.Example: + // projects/[project_id]/regions/[region]/clusters/[cluster_name] + DataprocCluster string `json:"dataprocCluster,omitempty"` + // ForceSendFields is a list of field names (e.g. "DataprocCluster") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DataprocCluster") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkHistoryServerConfig) MarshalJSON() ([]byte, error) { + type NoMethod SparkHistoryServerConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkJob: A Dataproc job for running Apache Spark +// (https://spark.apache.org/) applications on YARN. +type SparkJob struct { + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. + ArchiveUris []string `json:"archiveUris,omitempty"` + // Args: Optional. The arguments to pass to the driver. Do not include + // arguments, such as --conf, that can be set as job properties, since a + // collision may occur that causes an incorrect job submission. + Args []string `json:"args,omitempty"` + // FileUris: Optional. HCFS URIs of files to be placed in the working directory + // of each executor. Useful for naively parallel tasks. + FileUris []string `json:"fileUris,omitempty"` + // JarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of + // the Spark driver and tasks. + JarFileUris []string `json:"jarFileUris,omitempty"` + // LoggingConfig: Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` + // MainClass: The name of the driver's main class. The jar file that contains + // the class must be in the default CLASSPATH or specified in + // SparkJob.jar_file_uris. + MainClass string `json:"mainClass,omitempty"` + // MainJarFileUri: The HCFS URI of the jar file that contains the main class. + MainJarFileUri string `json:"mainJarFileUri,omitempty"` + // Properties: Optional. A mapping of property names to values, used to + // configure Spark. Properties that conflict with values set by the Dataproc + // API might be overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + Properties map[string]string `json:"properties,omitempty"` + // ForceSendFields is a list of field names (e.g. "ArchiveUris") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ArchiveUris") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkJob) MarshalJSON() ([]byte, error) { + type NoMethod SparkJob + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkPlanGraph: A graph used for storing information of an executionPlan of +// DataFrame. +type SparkPlanGraph struct { + Edges []*SparkPlanGraphEdge `json:"edges,omitempty"` + ExecutionId int64 `json:"executionId,omitempty,string"` + Nodes []*SparkPlanGraphNodeWrapper `json:"nodes,omitempty"` + // ForceSendFields is a list of field names (e.g. "Edges") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Edges") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkPlanGraph) MarshalJSON() ([]byte, error) { + type NoMethod SparkPlanGraph + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkPlanGraphCluster: Represents a tree of spark plan. +type SparkPlanGraphCluster struct { + Desc string `json:"desc,omitempty"` + Metrics []*SqlPlanMetric `json:"metrics,omitempty"` + Name string `json:"name,omitempty"` + Nodes []*SparkPlanGraphNodeWrapper `json:"nodes,omitempty"` + SparkPlanGraphClusterId int64 `json:"sparkPlanGraphClusterId,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Desc") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Desc") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkPlanGraphCluster) MarshalJSON() ([]byte, error) { + type NoMethod SparkPlanGraphCluster + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkPlanGraphEdge: Represents a directed edge in the spark plan tree from +// child to parent. +type SparkPlanGraphEdge struct { + FromId int64 `json:"fromId,omitempty,string"` + ToId int64 `json:"toId,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "FromId") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "FromId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkPlanGraphEdge) MarshalJSON() ([]byte, error) { + type NoMethod SparkPlanGraphEdge + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkPlanGraphNode: Represents a node in the spark plan tree. +type SparkPlanGraphNode struct { + Desc string `json:"desc,omitempty"` + Metrics []*SqlPlanMetric `json:"metrics,omitempty"` + Name string `json:"name,omitempty"` + SparkPlanGraphNodeId int64 `json:"sparkPlanGraphNodeId,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Desc") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Desc") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkPlanGraphNode) MarshalJSON() ([]byte, error) { + type NoMethod SparkPlanGraphNode + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkPlanGraphNodeWrapper: Wrapper user to represent either a node or a +// cluster. +type SparkPlanGraphNodeWrapper struct { + Cluster *SparkPlanGraphCluster `json:"cluster,omitempty"` + Node *SparkPlanGraphNode `json:"node,omitempty"` + // ForceSendFields is a list of field names (e.g. "Cluster") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Cluster") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkPlanGraphNodeWrapper) MarshalJSON() ([]byte, error) { + type NoMethod SparkPlanGraphNodeWrapper + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkRBatch: A configuration for running an Apache SparkR +// (https://spark.apache.org/docs/latest/sparkr.html) batch workload. +type SparkRBatch struct { + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. + ArchiveUris []string `json:"archiveUris,omitempty"` + // Args: Optional. The arguments to pass to the Spark driver. Do not include + // arguments that can be set as batch properties, such as --conf, since a + // collision can occur that causes an incorrect batch submission. + Args []string `json:"args,omitempty"` + // FileUris: Optional. HCFS URIs of files to be placed in the working directory + // of each executor. + FileUris []string `json:"fileUris,omitempty"` + // MainRFileUri: Required. The HCFS URI of the main R file to use as the + // driver. Must be a .R or .r file. + MainRFileUri string `json:"mainRFileUri,omitempty"` + // ForceSendFields is a list of field names (e.g. "ArchiveUris") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ArchiveUris") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkRBatch) MarshalJSON() ([]byte, error) { + type NoMethod SparkRBatch + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkRJob: A Dataproc job for running Apache SparkR +// (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN. +type SparkRJob struct { + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. + ArchiveUris []string `json:"archiveUris,omitempty"` + // Args: Optional. The arguments to pass to the driver. Do not include + // arguments, such as --conf, that can be set as job properties, since a + // collision may occur that causes an incorrect job submission. + Args []string `json:"args,omitempty"` + // FileUris: Optional. HCFS URIs of files to be placed in the working directory + // of each executor. Useful for naively parallel tasks. + FileUris []string `json:"fileUris,omitempty"` + // LoggingConfig: Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` + // MainRFileUri: Required. The HCFS URI of the main R file to use as the + // driver. Must be a .R file. + MainRFileUri string `json:"mainRFileUri,omitempty"` + // Properties: Optional. A mapping of property names to values, used to + // configure SparkR. Properties that conflict with values set by the Dataproc + // API might be overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + Properties map[string]string `json:"properties,omitempty"` + // ForceSendFields is a list of field names (e.g. "ArchiveUris") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ArchiveUris") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkRJob) MarshalJSON() ([]byte, error) { + type NoMethod SparkRJob + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type SparkRuntimeInfo struct { + JavaHome string `json:"javaHome,omitempty"` + JavaVersion string `json:"javaVersion,omitempty"` + ScalaVersion string `json:"scalaVersion,omitempty"` + // ForceSendFields is a list of field names (e.g. "JavaHome") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "JavaHome") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkRuntimeInfo) MarshalJSON() ([]byte, error) { + type NoMethod SparkRuntimeInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkSqlBatch: A configuration for running Apache Spark SQL +// (https://spark.apache.org/sql/) queries as a batch workload. +type SparkSqlBatch struct { + // JarFileUris: Optional. HCFS URIs of jar files to be added to the Spark + // CLASSPATH. + JarFileUris []string `json:"jarFileUris,omitempty"` + // QueryFileUri: Required. The HCFS URI of the script that contains Spark SQL + // queries to execute. + QueryFileUri string `json:"queryFileUri,omitempty"` + // QueryVariables: Optional. Mapping of query variable names to values + // (equivalent to the Spark SQL command: SET name="value";). + QueryVariables map[string]string `json:"queryVariables,omitempty"` + // ForceSendFields is a list of field names (e.g. "JarFileUris") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "JarFileUris") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkSqlBatch) MarshalJSON() ([]byte, error) { + type NoMethod SparkSqlBatch + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkSqlJob: A Dataproc job for running Apache Spark SQL +// (https://spark.apache.org/sql/) queries. +type SparkSqlJob struct { + // JarFileUris: Optional. HCFS URIs of jar files to be added to the Spark + // CLASSPATH. + JarFileUris []string `json:"jarFileUris,omitempty"` + // LoggingConfig: Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` + // Properties: Optional. A mapping of property names to values, used to + // configure Spark SQL's SparkConf. Properties that conflict with values set by + // the Dataproc API might be overwritten. + Properties map[string]string `json:"properties,omitempty"` + // QueryFileUri: The HCFS URI of the script that contains SQL queries. + QueryFileUri string `json:"queryFileUri,omitempty"` + // QueryList: A list of queries. + QueryList *QueryList `json:"queryList,omitempty"` + // ScriptVariables: Optional. Mapping of query variable names to values + // (equivalent to the Spark SQL command: SET name="value";). + ScriptVariables map[string]string `json:"scriptVariables,omitempty"` + // ForceSendFields is a list of field names (e.g. "JarFileUris") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "JarFileUris") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkSqlJob) MarshalJSON() ([]byte, error) { + type NoMethod SparkSqlJob + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SparkStandaloneAutoscalingConfig: Basic autoscaling configurations for Spark +// Standalone. +type SparkStandaloneAutoscalingConfig struct { + // GracefulDecommissionTimeout: Required. Timeout for Spark graceful + // decommissioning of spark workers. Specifies the duration to wait for spark + // worker to complete spark decommissioning tasks before forcefully removing + // workers. Only applicable to downscaling operations.Bounds: 0s, 1d. + GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout,omitempty"` + // RemoveOnlyIdleWorkers: Optional. Remove only idle workers when scaling down + // cluster + RemoveOnlyIdleWorkers bool `json:"removeOnlyIdleWorkers,omitempty"` + // ScaleDownFactor: Required. Fraction of required executors to remove from + // Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling + // down so that there are no more executors for the Spark Job.(more aggressive + // scaling). A scale-down factor closer to 0 will result in a smaller magnitude + // of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0. + ScaleDownFactor float64 `json:"scaleDownFactor,omitempty"` + // ScaleDownMinWorkerFraction: Optional. Minimum scale-down threshold as a + // fraction of total cluster size before scaling occurs. For example, in a + // 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at + // least a 2 worker scale-down for the cluster to scale. A threshold of 0 means + // the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. + // Default: 0.0. + ScaleDownMinWorkerFraction float64 `json:"scaleDownMinWorkerFraction,omitempty"` + // ScaleUpFactor: Required. Fraction of required workers to add to Spark + // Standalone clusters. A scale-up factor of 1.0 will result in scaling up so + // that there are no more required workers for the Spark Job (more aggressive + // scaling). A scale-up factor closer to 0 will result in a smaller magnitude + // of scaling up (less aggressive scaling).Bounds: 0.0, 1.0. + ScaleUpFactor float64 `json:"scaleUpFactor,omitempty"` + // ScaleUpMinWorkerFraction: Optional. Minimum scale-up threshold as a fraction + // of total cluster size before scaling occurs. For example, in a 20-worker + // cluster, a threshold of 0.1 means the autoscaler must recommend at least a + // 2-worker scale-up for the cluster to scale. A threshold of 0 means the + // autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. + // Default: 0.0. + ScaleUpMinWorkerFraction float64 `json:"scaleUpMinWorkerFraction,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "GracefulDecommissionTimeout") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. See https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields + // for more details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "GracefulDecommissionTimeout") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkStandaloneAutoscalingConfig) MarshalJSON() ([]byte, error) { + type NoMethod SparkStandaloneAutoscalingConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *SparkStandaloneAutoscalingConfig) UnmarshalJSON(data []byte) error { + type NoMethod SparkStandaloneAutoscalingConfig + var s1 struct { + ScaleDownFactor gensupport.JSONFloat64 `json:"scaleDownFactor"` + ScaleDownMinWorkerFraction gensupport.JSONFloat64 `json:"scaleDownMinWorkerFraction"` + ScaleUpFactor gensupport.JSONFloat64 `json:"scaleUpFactor"` + ScaleUpMinWorkerFraction gensupport.JSONFloat64 `json:"scaleUpMinWorkerFraction"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.ScaleDownFactor = float64(s1.ScaleDownFactor) + s.ScaleDownMinWorkerFraction = float64(s1.ScaleDownMinWorkerFraction) + s.ScaleUpFactor = float64(s1.ScaleUpFactor) + s.ScaleUpMinWorkerFraction = float64(s1.ScaleUpMinWorkerFraction) + return nil +} + +// SparkWrapperObject: Outer message that contains the data obtained from spark +// listener, packaged with information that is required to process it. +type SparkWrapperObject struct { + AppSummary *AppSummary `json:"appSummary,omitempty"` + ApplicationEnvironmentInfo *ApplicationEnvironmentInfo `json:"applicationEnvironmentInfo,omitempty"` + // ApplicationId: Application Id created by Spark. + ApplicationId string `json:"applicationId,omitempty"` + ApplicationInfo *ApplicationInfo `json:"applicationInfo,omitempty"` + // EventTimestamp: VM Timestamp associated with the data object. + EventTimestamp string `json:"eventTimestamp,omitempty"` + ExecutorStageSummary *ExecutorStageSummary `json:"executorStageSummary,omitempty"` + ExecutorSummary *ExecutorSummary `json:"executorSummary,omitempty"` + JobData *JobData `json:"jobData,omitempty"` + PoolData *PoolData `json:"poolData,omitempty"` + ProcessSummary *ProcessSummary `json:"processSummary,omitempty"` + RddOperationGraph *RddOperationGraph `json:"rddOperationGraph,omitempty"` + RddStorageInfo *RddStorageInfo `json:"rddStorageInfo,omitempty"` + ResourceProfileInfo *ResourceProfileInfo `json:"resourceProfileInfo,omitempty"` + SparkPlanGraph *SparkPlanGraph `json:"sparkPlanGraph,omitempty"` + SpeculationStageSummary *SpeculationStageSummary `json:"speculationStageSummary,omitempty"` + SqlExecutionUiData *SqlExecutionUiData `json:"sqlExecutionUiData,omitempty"` + StageData *StageData `json:"stageData,omitempty"` + StreamBlockData *StreamBlockData `json:"streamBlockData,omitempty"` + StreamingQueryData *StreamingQueryData `json:"streamingQueryData,omitempty"` + StreamingQueryProgress *StreamingQueryProgress `json:"streamingQueryProgress,omitempty"` + TaskData *TaskData `json:"taskData,omitempty"` + // ForceSendFields is a list of field names (e.g. "AppSummary") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AppSummary") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SparkWrapperObject) MarshalJSON() ([]byte, error) { + type NoMethod SparkWrapperObject + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SpeculationStageSummary: Details of the speculation task when speculative +// execution is enabled. +type SpeculationStageSummary struct { + NumActiveTasks int64 `json:"numActiveTasks,omitempty"` + NumCompletedTasks int64 `json:"numCompletedTasks,omitempty"` + NumFailedTasks int64 `json:"numFailedTasks,omitempty"` + NumKilledTasks int64 `json:"numKilledTasks,omitempty"` + NumTasks int64 `json:"numTasks,omitempty"` + StageAttemptId int64 `json:"stageAttemptId,omitempty"` + StageId int64 `json:"stageId,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "NumActiveTasks") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "NumActiveTasks") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SpeculationStageSummary) MarshalJSON() ([]byte, error) { + type NoMethod SpeculationStageSummary + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SqlExecutionUiData: SQL Execution Data +type SqlExecutionUiData struct { + CompletionTime string `json:"completionTime,omitempty"` + Description string `json:"description,omitempty"` + Details string `json:"details,omitempty"` + ErrorMessage string `json:"errorMessage,omitempty"` + ExecutionId int64 `json:"executionId,omitempty,string"` + Jobs map[string]string `json:"jobs,omitempty"` + MetricValues map[string]string `json:"metricValues,omitempty"` + MetricValuesIsNull bool `json:"metricValuesIsNull,omitempty"` + Metrics []*SqlPlanMetric `json:"metrics,omitempty"` + ModifiedConfigs map[string]string `json:"modifiedConfigs,omitempty"` + PhysicalPlanDescription string `json:"physicalPlanDescription,omitempty"` + RootExecutionId int64 `json:"rootExecutionId,omitempty,string"` + Stages googleapi.Int64s `json:"stages,omitempty"` + SubmissionTime string `json:"submissionTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "CompletionTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CompletionTime") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SqlExecutionUiData) MarshalJSON() ([]byte, error) { + type NoMethod SqlExecutionUiData + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SqlPlanMetric: Metrics related to SQL execution. +type SqlPlanMetric struct { + AccumulatorId int64 `json:"accumulatorId,omitempty,string"` + MetricType string `json:"metricType,omitempty"` + Name string `json:"name,omitempty"` + // ForceSendFields is a list of field names (e.g. "AccumulatorId") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AccumulatorId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SqlPlanMetric) MarshalJSON() ([]byte, error) { + type NoMethod SqlPlanMetric + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StageAttemptTasksSummary: Data related to tasks summary for a Spark Stage +// Attempt +type StageAttemptTasksSummary struct { + ApplicationId string `json:"applicationId,omitempty"` + NumFailedTasks int64 `json:"numFailedTasks,omitempty"` + NumKilledTasks int64 `json:"numKilledTasks,omitempty"` + NumPendingTasks int64 `json:"numPendingTasks,omitempty"` + NumRunningTasks int64 `json:"numRunningTasks,omitempty"` + NumSuccessTasks int64 `json:"numSuccessTasks,omitempty"` + NumTasks int64 `json:"numTasks,omitempty"` + StageAttemptId int64 `json:"stageAttemptId,omitempty"` + StageId int64 `json:"stageId,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "ApplicationId") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ApplicationId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StageAttemptTasksSummary) MarshalJSON() ([]byte, error) { + type NoMethod StageAttemptTasksSummary + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StageData: Data corresponding to a stage. +type StageData struct { + AccumulatorUpdates []*AccumulableInfo `json:"accumulatorUpdates,omitempty"` + CompletionTime string `json:"completionTime,omitempty"` + Description string `json:"description,omitempty"` + Details string `json:"details,omitempty"` + ExecutorMetricsDistributions *ExecutorMetricsDistributions `json:"executorMetricsDistributions,omitempty"` + ExecutorSummary map[string]ExecutorStageSummary `json:"executorSummary,omitempty"` + FailureReason string `json:"failureReason,omitempty"` + FirstTaskLaunchedTime string `json:"firstTaskLaunchedTime,omitempty"` + IsShufflePushEnabled bool `json:"isShufflePushEnabled,omitempty"` + JobIds googleapi.Int64s `json:"jobIds,omitempty"` + KilledTasksSummary map[string]int64 `json:"killedTasksSummary,omitempty"` + Locality map[string]string `json:"locality,omitempty"` + Name string `json:"name,omitempty"` + NumActiveTasks int64 `json:"numActiveTasks,omitempty"` + NumCompleteTasks int64 `json:"numCompleteTasks,omitempty"` + NumCompletedIndices int64 `json:"numCompletedIndices,omitempty"` + NumFailedTasks int64 `json:"numFailedTasks,omitempty"` + NumKilledTasks int64 `json:"numKilledTasks,omitempty"` + NumTasks int64 `json:"numTasks,omitempty"` + ParentStageIds googleapi.Int64s `json:"parentStageIds,omitempty"` + PeakExecutorMetrics *ExecutorMetrics `json:"peakExecutorMetrics,omitempty"` + RddIds googleapi.Int64s `json:"rddIds,omitempty"` + ResourceProfileId int64 `json:"resourceProfileId,omitempty"` + SchedulingPool string `json:"schedulingPool,omitempty"` + ShuffleMergersCount int64 `json:"shuffleMergersCount,omitempty"` + SpeculationSummary *SpeculationStageSummary `json:"speculationSummary,omitempty"` + StageAttemptId int64 `json:"stageAttemptId,omitempty"` + StageId int64 `json:"stageId,omitempty,string"` + StageMetrics *StageMetrics `json:"stageMetrics,omitempty"` + // Possible values: + // "STAGE_STATUS_UNSPECIFIED" + // "STAGE_STATUS_ACTIVE" + // "STAGE_STATUS_COMPLETE" + // "STAGE_STATUS_FAILED" + // "STAGE_STATUS_PENDING" + // "STAGE_STATUS_SKIPPED" + Status string `json:"status,omitempty"` + SubmissionTime string `json:"submissionTime,omitempty"` + // TaskQuantileMetrics: Summary metrics fields. These are included in response + // only if present in summary_metrics_mask field in request + TaskQuantileMetrics *TaskQuantileMetrics `json:"taskQuantileMetrics,omitempty"` + Tasks map[string]TaskData `json:"tasks,omitempty"` + // ForceSendFields is a list of field names (e.g. "AccumulatorUpdates") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AccumulatorUpdates") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StageData) MarshalJSON() ([]byte, error) { + type NoMethod StageData + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StageInputMetrics: Metrics about the input read by the stage. +type StageInputMetrics struct { + BytesRead int64 `json:"bytesRead,omitempty,string"` + RecordsRead int64 `json:"recordsRead,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "BytesRead") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BytesRead") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StageInputMetrics) MarshalJSON() ([]byte, error) { + type NoMethod StageInputMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StageMetrics: Stage Level Aggregated Metrics +type StageMetrics struct { + DiskBytesSpilled int64 `json:"diskBytesSpilled,omitempty,string"` + ExecutorCpuTimeNanos int64 `json:"executorCpuTimeNanos,omitempty,string"` + ExecutorDeserializeCpuTimeNanos int64 `json:"executorDeserializeCpuTimeNanos,omitempty,string"` + ExecutorDeserializeTimeMillis int64 `json:"executorDeserializeTimeMillis,omitempty,string"` + ExecutorRunTimeMillis int64 `json:"executorRunTimeMillis,omitempty,string"` + JvmGcTimeMillis int64 `json:"jvmGcTimeMillis,omitempty,string"` + MemoryBytesSpilled int64 `json:"memoryBytesSpilled,omitempty,string"` + PeakExecutionMemoryBytes int64 `json:"peakExecutionMemoryBytes,omitempty,string"` + ResultSerializationTimeMillis int64 `json:"resultSerializationTimeMillis,omitempty,string"` + ResultSize int64 `json:"resultSize,omitempty,string"` + StageInputMetrics *StageInputMetrics `json:"stageInputMetrics,omitempty"` + StageOutputMetrics *StageOutputMetrics `json:"stageOutputMetrics,omitempty"` + StageShuffleReadMetrics *StageShuffleReadMetrics `json:"stageShuffleReadMetrics,omitempty"` + StageShuffleWriteMetrics *StageShuffleWriteMetrics `json:"stageShuffleWriteMetrics,omitempty"` + // ForceSendFields is a list of field names (e.g. "DiskBytesSpilled") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DiskBytesSpilled") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StageMetrics) MarshalJSON() ([]byte, error) { + type NoMethod StageMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StageOutputMetrics: Metrics about the output written by the stage. +type StageOutputMetrics struct { + BytesWritten int64 `json:"bytesWritten,omitempty,string"` + RecordsWritten int64 `json:"recordsWritten,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "BytesWritten") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BytesWritten") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StageOutputMetrics) MarshalJSON() ([]byte, error) { + type NoMethod StageOutputMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type StageShufflePushReadMetrics struct { + CorruptMergedBlockChunks int64 `json:"corruptMergedBlockChunks,omitempty,string"` + LocalMergedBlocksFetched int64 `json:"localMergedBlocksFetched,omitempty,string"` + LocalMergedBytesRead int64 `json:"localMergedBytesRead,omitempty,string"` + LocalMergedChunksFetched int64 `json:"localMergedChunksFetched,omitempty,string"` + MergedFetchFallbackCount int64 `json:"mergedFetchFallbackCount,omitempty,string"` + RemoteMergedBlocksFetched int64 `json:"remoteMergedBlocksFetched,omitempty,string"` + RemoteMergedBytesRead int64 `json:"remoteMergedBytesRead,omitempty,string"` + RemoteMergedChunksFetched int64 `json:"remoteMergedChunksFetched,omitempty,string"` + RemoteMergedReqsDuration int64 `json:"remoteMergedReqsDuration,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "CorruptMergedBlockChunks") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CorruptMergedBlockChunks") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StageShufflePushReadMetrics) MarshalJSON() ([]byte, error) { + type NoMethod StageShufflePushReadMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StageShuffleReadMetrics: Shuffle data read for the stage. +type StageShuffleReadMetrics struct { + BytesRead int64 `json:"bytesRead,omitempty,string"` + FetchWaitTimeMillis int64 `json:"fetchWaitTimeMillis,omitempty,string"` + LocalBlocksFetched int64 `json:"localBlocksFetched,omitempty,string"` + LocalBytesRead int64 `json:"localBytesRead,omitempty,string"` + RecordsRead int64 `json:"recordsRead,omitempty,string"` + RemoteBlocksFetched int64 `json:"remoteBlocksFetched,omitempty,string"` + RemoteBytesRead int64 `json:"remoteBytesRead,omitempty,string"` + RemoteBytesReadToDisk int64 `json:"remoteBytesReadToDisk,omitempty,string"` + RemoteReqsDuration int64 `json:"remoteReqsDuration,omitempty,string"` + StageShufflePushReadMetrics *StageShufflePushReadMetrics `json:"stageShufflePushReadMetrics,omitempty"` + // ForceSendFields is a list of field names (e.g. "BytesRead") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BytesRead") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StageShuffleReadMetrics) MarshalJSON() ([]byte, error) { + type NoMethod StageShuffleReadMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StageShuffleWriteMetrics: Shuffle data written for the stage. +type StageShuffleWriteMetrics struct { + BytesWritten int64 `json:"bytesWritten,omitempty,string"` + RecordsWritten int64 `json:"recordsWritten,omitempty,string"` + WriteTimeNanos int64 `json:"writeTimeNanos,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "BytesWritten") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BytesWritten") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StageShuffleWriteMetrics) MarshalJSON() ([]byte, error) { + type NoMethod StageShuffleWriteMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StagesSummary: Data related to Stages page summary +type StagesSummary struct { + ApplicationId string `json:"applicationId,omitempty"` + NumActiveStages int64 `json:"numActiveStages,omitempty"` + NumCompletedStages int64 `json:"numCompletedStages,omitempty"` + NumFailedStages int64 `json:"numFailedStages,omitempty"` + NumPendingStages int64 `json:"numPendingStages,omitempty"` + NumSkippedStages int64 `json:"numSkippedStages,omitempty"` + // ForceSendFields is a list of field names (e.g. "ApplicationId") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ApplicationId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StagesSummary) MarshalJSON() ([]byte, error) { + type NoMethod StagesSummary + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StartClusterRequest: A request to start a cluster. +type StartClusterRequest struct { + // ClusterUuid: Optional. Specifying the cluster_uuid means the RPC will fail + // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. + ClusterUuid string `json:"clusterUuid,omitempty"` + // RequestId: Optional. A unique ID used to identify the request. If the server + // receives two StartClusterRequest + // (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s + // with the same id, then the second request will be ignored and the first + // google.longrunning.Operation created and stored in the backend is + // returned.Recommendation: Set this value to a UUID + // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must + // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens + // (-). The maximum length is 40 characters. + RequestId string `json:"requestId,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterUuid") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ClusterUuid") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StartClusterRequest) MarshalJSON() ([]byte, error) { + type NoMethod StartClusterRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StartupConfig: Configuration to handle the startup of instances during +// cluster create and update process. +type StartupConfig struct { + // RequiredRegistrationFraction: Optional. The config setting to enable cluster + // creation/ updation to be successful only after + // required_registration_fraction of instances are up and running. This + // configuration is applicable to only secondary workers for now. The cluster + // will fail if required_registration_fraction of instances are not available. + // This will include instance creation, agent registration, and service + // registration (if enabled). + RequiredRegistrationFraction float64 `json:"requiredRegistrationFraction,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "RequiredRegistrationFraction") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. See https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields + // for more details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "RequiredRegistrationFraction") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StartupConfig) MarshalJSON() ([]byte, error) { + type NoMethod StartupConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *StartupConfig) UnmarshalJSON(data []byte) error { + type NoMethod StartupConfig + var s1 struct { + RequiredRegistrationFraction gensupport.JSONFloat64 `json:"requiredRegistrationFraction"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.RequiredRegistrationFraction = float64(s1.RequiredRegistrationFraction) + return nil +} + +// StateHistory: Historical state information. +type StateHistory struct { + // State: Output only. The state of the batch at this point in history. + // + // Possible values: + // "STATE_UNSPECIFIED" - The batch state is unknown. + // "PENDING" - The batch is created before running. + // "RUNNING" - The batch is running. + // "CANCELLING" - The batch is cancelling. + // "CANCELLED" - The batch cancellation was successful. + // "SUCCEEDED" - The batch completed successfully. + // "FAILED" - The batch is no longer running due to an error. + State string `json:"state,omitempty"` + // StateMessage: Output only. Details about the state at this point in history. + StateMessage string `json:"stateMessage,omitempty"` + // StateStartTime: Output only. The time when the batch entered the historical + // state. + StateStartTime string `json:"stateStartTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "State") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "State") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StateHistory) MarshalJSON() ([]byte, error) { + type NoMethod StateHistory + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type StateOperatorProgress struct { + AllRemovalsTimeMs int64 `json:"allRemovalsTimeMs,omitempty,string"` + AllUpdatesTimeMs int64 `json:"allUpdatesTimeMs,omitempty,string"` + CommitTimeMs int64 `json:"commitTimeMs,omitempty,string"` + CustomMetrics map[string]string `json:"customMetrics,omitempty"` + MemoryUsedBytes int64 `json:"memoryUsedBytes,omitempty,string"` + NumRowsDroppedByWatermark int64 `json:"numRowsDroppedByWatermark,omitempty,string"` + NumRowsRemoved int64 `json:"numRowsRemoved,omitempty,string"` + NumRowsTotal int64 `json:"numRowsTotal,omitempty,string"` + NumRowsUpdated int64 `json:"numRowsUpdated,omitempty,string"` + NumShufflePartitions int64 `json:"numShufflePartitions,omitempty,string"` + NumStateStoreInstances int64 `json:"numStateStoreInstances,omitempty,string"` + OperatorName string `json:"operatorName,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllRemovalsTimeMs") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllRemovalsTimeMs") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StateOperatorProgress) MarshalJSON() ([]byte, error) { + type NoMethod StateOperatorProgress + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Status: The Status type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by gRPC (https://github.com/grpc). Each Status message contains three +// pieces of data: error code, error message, and error details.You can find +// out more about this error model and how to work with it in the API Design +// Guide (https://cloud.google.com/apis/design/errors). +type Status struct { + // Code: The status code, which should be an enum value of google.rpc.Code. + Code int64 `json:"code,omitempty"` + // Details: A list of messages that carry the error details. There is a common + // set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + // Message: A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + // ForceSendFields is a list of field names (e.g. "Code") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Code") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Status) MarshalJSON() ([]byte, error) { + type NoMethod Status + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StopClusterRequest: A request to stop a cluster. +type StopClusterRequest struct { + // ClusterUuid: Optional. Specifying the cluster_uuid means the RPC will fail + // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. + ClusterUuid string `json:"clusterUuid,omitempty"` + // RequestId: Optional. A unique ID used to identify the request. If the server + // receives two StopClusterRequest + // (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s + // with the same id, then the second request will be ignored and the first + // google.longrunning.Operation created and stored in the backend is + // returned.Recommendation: Set this value to a UUID + // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must + // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens + // (-). The maximum length is 40 characters. + RequestId string `json:"requestId,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterUuid") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ClusterUuid") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StopClusterRequest) MarshalJSON() ([]byte, error) { + type NoMethod StopClusterRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StreamBlockData: Stream Block Data. +type StreamBlockData struct { + Deserialized bool `json:"deserialized,omitempty"` + DiskSize int64 `json:"diskSize,omitempty,string"` + ExecutorId string `json:"executorId,omitempty"` + HostPort string `json:"hostPort,omitempty"` + MemSize int64 `json:"memSize,omitempty,string"` + Name string `json:"name,omitempty"` + StorageLevel string `json:"storageLevel,omitempty"` + UseDisk bool `json:"useDisk,omitempty"` + UseMemory bool `json:"useMemory,omitempty"` + // ForceSendFields is a list of field names (e.g. "Deserialized") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Deserialized") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StreamBlockData) MarshalJSON() ([]byte, error) { + type NoMethod StreamBlockData + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StreamingQueryData: Streaming +type StreamingQueryData struct { + EndTimestamp int64 `json:"endTimestamp,omitempty,string"` + Exception string `json:"exception,omitempty"` + IsActive bool `json:"isActive,omitempty"` + Name string `json:"name,omitempty"` + RunId string `json:"runId,omitempty"` + StartTimestamp int64 `json:"startTimestamp,omitempty,string"` + StreamingQueryId string `json:"streamingQueryId,omitempty"` + // ForceSendFields is a list of field names (e.g. "EndTimestamp") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "EndTimestamp") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StreamingQueryData) MarshalJSON() ([]byte, error) { + type NoMethod StreamingQueryData + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type StreamingQueryProgress struct { + BatchDuration int64 `json:"batchDuration,omitempty,string"` + BatchId int64 `json:"batchId,omitempty,string"` + DurationMillis map[string]string `json:"durationMillis,omitempty"` + EventTime map[string]string `json:"eventTime,omitempty"` + Name string `json:"name,omitempty"` + ObservedMetrics map[string]string `json:"observedMetrics,omitempty"` + RunId string `json:"runId,omitempty"` + Sink *SinkProgress `json:"sink,omitempty"` + Sources []*SourceProgress `json:"sources,omitempty"` + StateOperators []*StateOperatorProgress `json:"stateOperators,omitempty"` + StreamingQueryProgressId string `json:"streamingQueryProgressId,omitempty"` + Timestamp string `json:"timestamp,omitempty"` + // ForceSendFields is a list of field names (e.g. "BatchDuration") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BatchDuration") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StreamingQueryProgress) MarshalJSON() ([]byte, error) { + type NoMethod StreamingQueryProgress + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SubmitJobRequest: A request to submit a job. +type SubmitJobRequest struct { + // Job: Required. The job resource. + Job *Job `json:"job,omitempty"` + // RequestId: Optional. A unique id used to identify the request. If the server + // receives two SubmitJobRequest + // (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s + // with the same id, then the second request will be ignored and the first Job + // created and stored in the backend is returned.It is recommended to always + // set this value to a UUID + // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must + // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens + // (-). The maximum length is 40 characters. + RequestId string `json:"requestId,omitempty"` + // ForceSendFields is a list of field names (e.g. "Job") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Job") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SubmitJobRequest) MarshalJSON() ([]byte, error) { + type NoMethod SubmitJobRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SummarizeSessionSparkApplicationExecutorsResponse: Consolidated summary of +// executors for a Spark Application. +type SummarizeSessionSparkApplicationExecutorsResponse struct { + // ActiveExecutorSummary: Consolidated summary for active executors. + ActiveExecutorSummary *ConsolidatedExecutorSummary `json:"activeExecutorSummary,omitempty"` + // ApplicationId: Spark Application Id + ApplicationId string `json:"applicationId,omitempty"` + // DeadExecutorSummary: Consolidated summary for dead executors. + DeadExecutorSummary *ConsolidatedExecutorSummary `json:"deadExecutorSummary,omitempty"` + // TotalExecutorSummary: Overall consolidated summary for all executors. + TotalExecutorSummary *ConsolidatedExecutorSummary `json:"totalExecutorSummary,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "ActiveExecutorSummary") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ActiveExecutorSummary") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SummarizeSessionSparkApplicationExecutorsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SummarizeSessionSparkApplicationExecutorsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SummarizeSessionSparkApplicationJobsResponse: Summary of a Spark Application +// jobs. +type SummarizeSessionSparkApplicationJobsResponse struct { + // JobsSummary: Summary of a Spark Application Jobs + JobsSummary *JobsSummary `json:"jobsSummary,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "JobsSummary") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "JobsSummary") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SummarizeSessionSparkApplicationJobsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SummarizeSessionSparkApplicationJobsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SummarizeSessionSparkApplicationStageAttemptTasksResponse: Summary of tasks +// for a Spark Application stage attempt. +type SummarizeSessionSparkApplicationStageAttemptTasksResponse struct { + // StageAttemptTasksSummary: Summary of tasks for a Spark Application Stage + // Attempt + StageAttemptTasksSummary *StageAttemptTasksSummary `json:"stageAttemptTasksSummary,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "StageAttemptTasksSummary") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "StageAttemptTasksSummary") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SummarizeSessionSparkApplicationStageAttemptTasksResponse) MarshalJSON() ([]byte, error) { + type NoMethod SummarizeSessionSparkApplicationStageAttemptTasksResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SummarizeSessionSparkApplicationStagesResponse: Summary of a Spark +// Application stages. +type SummarizeSessionSparkApplicationStagesResponse struct { + // StagesSummary: Summary of a Spark Application Stages + StagesSummary *StagesSummary `json:"stagesSummary,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "StagesSummary") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "StagesSummary") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SummarizeSessionSparkApplicationStagesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SummarizeSessionSparkApplicationStagesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SummarizeSparkApplicationExecutorsResponse: Consolidated summary of +// executors for a Spark Application. +type SummarizeSparkApplicationExecutorsResponse struct { + // ActiveExecutorSummary: Consolidated summary for active executors. + ActiveExecutorSummary *ConsolidatedExecutorSummary `json:"activeExecutorSummary,omitempty"` + // ApplicationId: Spark Application Id + ApplicationId string `json:"applicationId,omitempty"` + // DeadExecutorSummary: Consolidated summary for dead executors. + DeadExecutorSummary *ConsolidatedExecutorSummary `json:"deadExecutorSummary,omitempty"` + // TotalExecutorSummary: Overall consolidated summary for all executors. + TotalExecutorSummary *ConsolidatedExecutorSummary `json:"totalExecutorSummary,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "ActiveExecutorSummary") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ActiveExecutorSummary") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SummarizeSparkApplicationExecutorsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SummarizeSparkApplicationExecutorsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SummarizeSparkApplicationJobsResponse: Summary of a Spark Application jobs. +type SummarizeSparkApplicationJobsResponse struct { + // JobsSummary: Summary of a Spark Application Jobs + JobsSummary *JobsSummary `json:"jobsSummary,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "JobsSummary") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "JobsSummary") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SummarizeSparkApplicationJobsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SummarizeSparkApplicationJobsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SummarizeSparkApplicationStageAttemptTasksResponse: Summary of tasks for a +// Spark Application stage attempt. +type SummarizeSparkApplicationStageAttemptTasksResponse struct { + // StageAttemptTasksSummary: Summary of tasks for a Spark Application Stage + // Attempt + StageAttemptTasksSummary *StageAttemptTasksSummary `json:"stageAttemptTasksSummary,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "StageAttemptTasksSummary") + // to unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "StageAttemptTasksSummary") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SummarizeSparkApplicationStageAttemptTasksResponse) MarshalJSON() ([]byte, error) { + type NoMethod SummarizeSparkApplicationStageAttemptTasksResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SummarizeSparkApplicationStagesResponse: Summary of a Spark Application +// stages. +type SummarizeSparkApplicationStagesResponse struct { + // StagesSummary: Summary of a Spark Application Stages + StagesSummary *StagesSummary `json:"stagesSummary,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "StagesSummary") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "StagesSummary") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SummarizeSparkApplicationStagesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SummarizeSparkApplicationStagesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// TaskData: Data corresponding to tasks created by spark. +type TaskData struct { + AccumulatorUpdates []*AccumulableInfo `json:"accumulatorUpdates,omitempty"` + Attempt int64 `json:"attempt,omitempty"` + DurationMillis int64 `json:"durationMillis,omitempty,string"` + ErrorMessage string `json:"errorMessage,omitempty"` + ExecutorId string `json:"executorId,omitempty"` + ExecutorLogs map[string]string `json:"executorLogs,omitempty"` + GettingResultTimeMillis int64 `json:"gettingResultTimeMillis,omitempty,string"` + HasMetrics bool `json:"hasMetrics,omitempty"` + Host string `json:"host,omitempty"` + Index int64 `json:"index,omitempty"` + LaunchTime string `json:"launchTime,omitempty"` + PartitionId int64 `json:"partitionId,omitempty"` + ResultFetchStart string `json:"resultFetchStart,omitempty"` + SchedulerDelayMillis int64 `json:"schedulerDelayMillis,omitempty,string"` + Speculative bool `json:"speculative,omitempty"` + StageAttemptId int64 `json:"stageAttemptId,omitempty"` + StageId int64 `json:"stageId,omitempty,string"` + Status string `json:"status,omitempty"` + TaskId int64 `json:"taskId,omitempty,string"` + TaskLocality string `json:"taskLocality,omitempty"` + TaskMetrics *TaskMetrics `json:"taskMetrics,omitempty"` + // ForceSendFields is a list of field names (e.g. "AccumulatorUpdates") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AccumulatorUpdates") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TaskData) MarshalJSON() ([]byte, error) { + type NoMethod TaskData + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// TaskMetrics: Executor Task Metrics +type TaskMetrics struct { + DiskBytesSpilled int64 `json:"diskBytesSpilled,omitempty,string"` + ExecutorCpuTimeNanos int64 `json:"executorCpuTimeNanos,omitempty,string"` + ExecutorDeserializeCpuTimeNanos int64 `json:"executorDeserializeCpuTimeNanos,omitempty,string"` + ExecutorDeserializeTimeMillis int64 `json:"executorDeserializeTimeMillis,omitempty,string"` + ExecutorRunTimeMillis int64 `json:"executorRunTimeMillis,omitempty,string"` + InputMetrics *InputMetrics `json:"inputMetrics,omitempty"` + JvmGcTimeMillis int64 `json:"jvmGcTimeMillis,omitempty,string"` + MemoryBytesSpilled int64 `json:"memoryBytesSpilled,omitempty,string"` + OutputMetrics *OutputMetrics `json:"outputMetrics,omitempty"` + PeakExecutionMemoryBytes int64 `json:"peakExecutionMemoryBytes,omitempty,string"` + ResultSerializationTimeMillis int64 `json:"resultSerializationTimeMillis,omitempty,string"` + ResultSize int64 `json:"resultSize,omitempty,string"` + ShuffleReadMetrics *ShuffleReadMetrics `json:"shuffleReadMetrics,omitempty"` + ShuffleWriteMetrics *ShuffleWriteMetrics `json:"shuffleWriteMetrics,omitempty"` + // ForceSendFields is a list of field names (e.g. "DiskBytesSpilled") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DiskBytesSpilled") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TaskMetrics) MarshalJSON() ([]byte, error) { + type NoMethod TaskMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type TaskQuantileMetrics struct { + DiskBytesSpilled *Quantiles `json:"diskBytesSpilled,omitempty"` + DurationMillis *Quantiles `json:"durationMillis,omitempty"` + ExecutorCpuTimeNanos *Quantiles `json:"executorCpuTimeNanos,omitempty"` + ExecutorDeserializeCpuTimeNanos *Quantiles `json:"executorDeserializeCpuTimeNanos,omitempty"` + ExecutorDeserializeTimeMillis *Quantiles `json:"executorDeserializeTimeMillis,omitempty"` + ExecutorRunTimeMillis *Quantiles `json:"executorRunTimeMillis,omitempty"` + GettingResultTimeMillis *Quantiles `json:"gettingResultTimeMillis,omitempty"` + InputMetrics *InputQuantileMetrics `json:"inputMetrics,omitempty"` + JvmGcTimeMillis *Quantiles `json:"jvmGcTimeMillis,omitempty"` + MemoryBytesSpilled *Quantiles `json:"memoryBytesSpilled,omitempty"` + OutputMetrics *OutputQuantileMetrics `json:"outputMetrics,omitempty"` + PeakExecutionMemoryBytes *Quantiles `json:"peakExecutionMemoryBytes,omitempty"` + ResultSerializationTimeMillis *Quantiles `json:"resultSerializationTimeMillis,omitempty"` + ResultSize *Quantiles `json:"resultSize,omitempty"` + SchedulerDelayMillis *Quantiles `json:"schedulerDelayMillis,omitempty"` + ShuffleReadMetrics *ShuffleReadQuantileMetrics `json:"shuffleReadMetrics,omitempty"` + ShuffleWriteMetrics *ShuffleWriteQuantileMetrics `json:"shuffleWriteMetrics,omitempty"` + // ForceSendFields is a list of field names (e.g. "DiskBytesSpilled") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DiskBytesSpilled") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TaskQuantileMetrics) MarshalJSON() ([]byte, error) { + type NoMethod TaskQuantileMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// TaskResourceRequest: Resources used per task created by the application. +type TaskResourceRequest struct { + Amount float64 `json:"amount,omitempty"` + ResourceName string `json:"resourceName,omitempty"` + // ForceSendFields is a list of field names (e.g. "Amount") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Amount") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TaskResourceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TaskResourceRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *TaskResourceRequest) UnmarshalJSON(data []byte) error { + type NoMethod TaskResourceRequest + var s1 struct { + Amount gensupport.JSONFloat64 `json:"amount"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Amount = float64(s1.Amount) + return nil +} + +// TemplateParameter: A configurable parameter that replaces one or more fields +// in the template. Parameterizable fields: - Labels - File uris - Job +// properties - Job arguments - Script variables - Main class (in HadoopJob and +// SparkJob) - Zone (in ClusterSelector) +type TemplateParameter struct { + // Description: Optional. Brief description of the parameter. Must not exceed + // 1024 characters. + Description string `json:"description,omitempty"` + // Fields: Required. Paths to all fields that the parameter replaces. A field + // is allowed to appear in at most one parameter's list of field paths.A field + // path is similar in syntax to a google.protobuf.FieldMask. For example, a + // field path that references the zone field of a workflow template's cluster + // selector would be specified as placement.clusterSelector.zone.Also, field + // paths can reference fields using the following syntax: Values in maps can be + // referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' + // placement.managedCluster.labels'key' + // placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs + // in the jobs list can be referenced by step-id: + // jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri + // jobs'step-id'.pySparkJob.mainPythonFileUri + // jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 + // jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 + // Items in repeated fields can be referenced by a zero-based index: + // jobs'step-id'.sparkJob.args0 Other examples: + // jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 + // jobs'step-id'.hiveJob.scriptVariables'key' + // jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may + // not be possible to parameterize maps and repeated fields in their entirety + // since only individual map values and individual items in repeated fields can + // be referenced. For example, the following field paths are invalid: + // placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args + Fields []string `json:"fields,omitempty"` + // Name: Required. Parameter name. The parameter name is used as the key, and + // paired with the parameter value, which are passed to the template when the + // template is instantiated. The name must contain only capital letters (A-Z), + // numbers (0-9), and underscores (_), and must not start with a number. The + // maximum length is 40 characters. + Name string `json:"name,omitempty"` + // Validation: Optional. Validation rules to be applied to this parameter's + // value. + Validation *ParameterValidation `json:"validation,omitempty"` + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Description") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TemplateParameter) MarshalJSON() ([]byte, error) { + type NoMethod TemplateParameter + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// TerminateSessionRequest: A request to terminate an interactive session. +type TerminateSessionRequest struct { + // RequestId: Optional. A unique ID used to identify the request. If the + // service receives two TerminateSessionRequest + // (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.TerminateSessionRequest)s + // with the same ID, the second request is ignored.Recommendation: Set this + // value to a UUID + // (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must + // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens + // (-). The maximum length is 40 characters. + RequestId string `json:"requestId,omitempty"` + // ForceSendFields is a list of field names (e.g. "RequestId") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "RequestId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TerminateSessionRequest) MarshalJSON() ([]byte, error) { + type NoMethod TerminateSessionRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsRequest: Request message for TestIamPermissions method. +type TestIamPermissionsRequest struct { + // Permissions: The set of permissions to check for the resource. Permissions + // with wildcards (such as * or storage.*) are not allowed. For more + // information see IAM Overview + // (https://cloud.google.com/iam/docs/overview#permissions). + Permissions []string `json:"permissions,omitempty"` + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Permissions") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { + type NoMethod TestIamPermissionsRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsResponse: Response message for TestIamPermissions method. +type TestIamPermissionsResponse struct { + // Permissions: A subset of TestPermissionsRequest.permissions that the caller + // is allowed. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Permissions") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { + type NoMethod TestIamPermissionsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// TrinoJob: A Dataproc job for running Trino (https://trino.io/) queries. +// IMPORTANT: The Dataproc Trino Optional Component +// (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be +// enabled when the cluster is created to submit a Trino job to the cluster. +type TrinoJob struct { + // ClientTags: Optional. Trino client tags to attach to this query + ClientTags []string `json:"clientTags,omitempty"` + // ContinueOnFailure: Optional. Whether to continue executing queries if a + // query fails. The default value is false. Setting to true can be useful when + // executing independent parallel queries. + ContinueOnFailure bool `json:"continueOnFailure,omitempty"` + // LoggingConfig: Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` + // OutputFormat: Optional. The format in which query output will be displayed. + // See the Trino documentation for supported output formats + OutputFormat string `json:"outputFormat,omitempty"` + // Properties: Optional. A mapping of property names to values. Used to set + // Trino session properties + // (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the + // --session flag in the Trino CLI + Properties map[string]string `json:"properties,omitempty"` + // QueryFileUri: The HCFS URI of the script that contains SQL queries. + QueryFileUri string `json:"queryFileUri,omitempty"` + // QueryList: A list of queries. + QueryList *QueryList `json:"queryList,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClientTags") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ClientTags") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TrinoJob) MarshalJSON() ([]byte, error) { + type NoMethod TrinoJob + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// UsageMetrics: Usage metrics represent approximate total resources consumed +// by a workload. +type UsageMetrics struct { + // AcceleratorType: Optional. Accelerator type being used, if any + AcceleratorType string `json:"acceleratorType,omitempty"` + // MilliAcceleratorSeconds: Optional. Accelerator usage in (milliAccelerator x + // seconds) (see Dataproc Serverless pricing + // (https://cloud.google.com/dataproc-serverless/pricing)). + MilliAcceleratorSeconds int64 `json:"milliAcceleratorSeconds,omitempty,string"` + // MilliDcuSeconds: Optional. DCU (Dataproc Compute Units) usage in (milliDCU x + // seconds) (see Dataproc Serverless pricing + // (https://cloud.google.com/dataproc-serverless/pricing)). + MilliDcuSeconds int64 `json:"milliDcuSeconds,omitempty,string"` + // ShuffleStorageGbSeconds: Optional. Shuffle storage usage in (GB x seconds) + // (see Dataproc Serverless pricing + // (https://cloud.google.com/dataproc-serverless/pricing)). + ShuffleStorageGbSeconds int64 `json:"shuffleStorageGbSeconds,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "AcceleratorType") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AcceleratorType") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s UsageMetrics) MarshalJSON() ([]byte, error) { + type NoMethod UsageMetrics + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// UsageSnapshot: The usage snapshot represents the resources consumed by a +// workload at a specified time. +type UsageSnapshot struct { + // AcceleratorType: Optional. Accelerator type being used, if any + AcceleratorType string `json:"acceleratorType,omitempty"` + // MilliAccelerator: Optional. Milli (one-thousandth) accelerator. (see + // Dataproc Serverless pricing + // (https://cloud.google.com/dataproc-serverless/pricing)) + MilliAccelerator int64 `json:"milliAccelerator,omitempty,string"` + // MilliDcu: Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) + // (see Dataproc Serverless pricing + // (https://cloud.google.com/dataproc-serverless/pricing)). + MilliDcu int64 `json:"milliDcu,omitempty,string"` + // MilliDcuPremium: Optional. Milli (one-thousandth) Dataproc Compute Units + // (DCUs) charged at premium tier (see Dataproc Serverless pricing + // (https://cloud.google.com/dataproc-serverless/pricing)). + MilliDcuPremium int64 `json:"milliDcuPremium,omitempty,string"` + // ShuffleStorageGb: Optional. Shuffle Storage in gigabytes (GB). (see Dataproc + // Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) + ShuffleStorageGb int64 `json:"shuffleStorageGb,omitempty,string"` + // ShuffleStorageGbPremium: Optional. Shuffle Storage in gigabytes (GB) charged + // at premium tier. (see Dataproc Serverless pricing + // (https://cloud.google.com/dataproc-serverless/pricing)) + ShuffleStorageGbPremium int64 `json:"shuffleStorageGbPremium,omitempty,string"` + // SnapshotTime: Optional. The timestamp of the usage snapshot. + SnapshotTime string `json:"snapshotTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "AcceleratorType") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AcceleratorType") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s UsageSnapshot) MarshalJSON() ([]byte, error) { + type NoMethod UsageSnapshot + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ValueValidation: Validation based on a list of allowed values. +type ValueValidation struct { + // Values: Required. List of allowed values for the parameter. + Values []string `json:"values,omitempty"` + // ForceSendFields is a list of field names (e.g. "Values") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Values") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ValueValidation) MarshalJSON() ([]byte, error) { + type NoMethod ValueValidation + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// VirtualClusterConfig: The Dataproc cluster config for a cluster that does +// not directly control the underlying compute resources, such as a +// Dataproc-on-GKE cluster +// (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). +type VirtualClusterConfig struct { + // AuxiliaryServicesConfig: Optional. Configuration of auxiliary services used + // by this cluster. + AuxiliaryServicesConfig *AuxiliaryServicesConfig `json:"auxiliaryServicesConfig,omitempty"` + // KubernetesClusterConfig: Required. The configuration for running the + // Dataproc cluster on Kubernetes. + KubernetesClusterConfig *KubernetesClusterConfig `json:"kubernetesClusterConfig,omitempty"` + // StagingBucket: Optional. A Cloud Storage bucket used to stage job + // dependencies, config files, and job driver console output. If you do not + // specify a staging bucket, Cloud Dataproc will determine a Cloud Storage + // location (US, ASIA, or EU) for your cluster's staging bucket according to + // the Compute Engine zone where your cluster is deployed, and then create and + // manage this project-level, per-location bucket (see Dataproc staging and + // temp buckets + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // This field requires a Cloud Storage bucket name, not a gs://... URI to a + // Cloud Storage bucket. + StagingBucket string `json:"stagingBucket,omitempty"` + // ForceSendFields is a list of field names (e.g. "AuxiliaryServicesConfig") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AuxiliaryServicesConfig") to + // include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s VirtualClusterConfig) MarshalJSON() ([]byte, error) { + type NoMethod VirtualClusterConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// WorkflowGraph: The workflow graph. +type WorkflowGraph struct { + // Nodes: Output only. The workflow nodes. + Nodes []*WorkflowNode `json:"nodes,omitempty"` + // ForceSendFields is a list of field names (e.g. "Nodes") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Nodes") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s WorkflowGraph) MarshalJSON() ([]byte, error) { + type NoMethod WorkflowGraph + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// WorkflowMetadata: A Dataproc workflow template resource. +type WorkflowMetadata struct { + // ClusterName: Output only. The name of the target cluster. + ClusterName string `json:"clusterName,omitempty"` + // ClusterUuid: Output only. The UUID of target cluster. + ClusterUuid string `json:"clusterUuid,omitempty"` + // CreateCluster: Output only. The create cluster operation metadata. + CreateCluster *ClusterOperation `json:"createCluster,omitempty"` + // DagEndTime: Output only. DAG end time, only set for workflows with + // dag_timeout when DAG ends. + DagEndTime string `json:"dagEndTime,omitempty"` + // DagStartTime: Output only. DAG start time, only set for workflows with + // dag_timeout when DAG begins. + DagStartTime string `json:"dagStartTime,omitempty"` + // DagTimeout: Output only. The timeout duration for the DAG of jobs, expressed + // in seconds (see JSON representation of duration + // (https://developers.google.com/protocol-buffers/docs/proto3#json)). + DagTimeout string `json:"dagTimeout,omitempty"` + // DeleteCluster: Output only. The delete cluster operation metadata. + DeleteCluster *ClusterOperation `json:"deleteCluster,omitempty"` + // EndTime: Output only. Workflow end time. + EndTime string `json:"endTime,omitempty"` + // Graph: Output only. The workflow graph. + Graph *WorkflowGraph `json:"graph,omitempty"` + // Parameters: Map from parameter names to values that were used for those + // parameters. + Parameters map[string]string `json:"parameters,omitempty"` + // StartTime: Output only. Workflow start time. + StartTime string `json:"startTime,omitempty"` + // State: Output only. The workflow state. + // + // Possible values: + // "UNKNOWN" - Unused. + // "PENDING" - The operation has been created. + // "RUNNING" - The operation is running. + // "DONE" - The operation is done; either cancelled or completed. + State string `json:"state,omitempty"` + // Template: Output only. The resource name of the workflow template as + // described in https://cloud.google.com/apis/design/resource_names. For + // projects.regions.workflowTemplates, the resource name of the template has + // the following format: + // projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For + // projects.locations.workflowTemplates, the resource name of the template has + // the following format: + // projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + Template string `json:"template,omitempty"` + // Version: Output only. The version of template at the time of workflow + // instantiation. + Version int64 `json:"version,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterName") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ClusterName") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s WorkflowMetadata) MarshalJSON() ([]byte, error) { + type NoMethod WorkflowMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// WorkflowNode: The workflow node. +type WorkflowNode struct { + // Error: Output only. The error detail. + Error string `json:"error,omitempty"` + // JobId: Output only. The job id; populated after the node enters RUNNING + // state. + JobId string `json:"jobId,omitempty"` + // PrerequisiteStepIds: Output only. Node's prerequisite nodes. + PrerequisiteStepIds []string `json:"prerequisiteStepIds,omitempty"` + // State: Output only. The node state. + // + // Possible values: + // "NODE_STATE_UNSPECIFIED" - State is unspecified. + // "BLOCKED" - The node is awaiting prerequisite node to finish. + // "RUNNABLE" - The node is runnable but not running. + // "RUNNING" - The node is running. + // "COMPLETED" - The node completed successfully. + // "FAILED" - The node failed. A node can be marked FAILED because its + // ancestor or peer failed. + State string `json:"state,omitempty"` + // StepId: Output only. The name of the node. + StepId string `json:"stepId,omitempty"` + // ForceSendFields is a list of field names (e.g. "Error") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Error") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s WorkflowNode) MarshalJSON() ([]byte, error) { + type NoMethod WorkflowNode + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// WorkflowTemplate: A Dataproc workflow template resource. +type WorkflowTemplate struct { + // CreateTime: Output only. The time template was created. + CreateTime string `json:"createTime,omitempty"` + // DagTimeout: Optional. Timeout duration for the DAG of jobs, expressed in + // seconds (see JSON representation of duration + // (https://developers.google.com/protocol-buffers/docs/proto3#json)). The + // timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). + // The timer begins when the first job is submitted. If the workflow is running + // at the end of the timeout period, any remaining jobs are cancelled, the + // workflow is ended, and if the workflow was running on a managed cluster, the + // cluster is deleted. + DagTimeout string `json:"dagTimeout,omitempty"` + // EncryptionConfig: Optional. Encryption settings for encrypting workflow + // template job arguments. + EncryptionConfig *GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig `json:"encryptionConfig,omitempty"` + Id string `json:"id,omitempty"` + // Jobs: Required. The Directed Acyclic Graph of Jobs to submit. + Jobs []*OrderedJob `json:"jobs,omitempty"` + // Labels: Optional. The labels to associate with this template. These labels + // will be propagated to all jobs and clusters created by the workflow + // instance.Label keys must contain 1 to 63 characters, and must conform to RFC + // 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, + // if present, must contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be + // associated with a template. + Labels map[string]string `json:"labels,omitempty"` + // Name: Output only. The resource name of the workflow template, as described + // in https://cloud.google.com/apis/design/resource_names. For + // projects.regions.workflowTemplates, the resource name of the template has + // the following format: + // projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For + // projects.locations.workflowTemplates, the resource name of the template has + // the following format: + // projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + Name string `json:"name,omitempty"` + // Parameters: Optional. Template parameters whose values are substituted into + // the template. Values for parameters must be provided when the template is + // instantiated. + Parameters []*TemplateParameter `json:"parameters,omitempty"` + // Placement: Required. WorkflowTemplate scheduling information. + Placement *WorkflowTemplatePlacement `json:"placement,omitempty"` + // UpdateTime: Output only. The time template was last updated. + UpdateTime string `json:"updateTime,omitempty"` + // Version: Optional. Used to perform a consistent read-modify-write.This field + // should be left blank for a CreateWorkflowTemplate request. It is required + // for an UpdateWorkflowTemplate request, and must match the current server + // version. A typical update template flow would fetch the current template + // with a GetWorkflowTemplate request, which will return the current template + // with the version field filled in with the current server version. The user + // updates other fields in the template, then returns it as part of the + // UpdateWorkflowTemplate request. + Version int64 `json:"version,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s WorkflowTemplate) MarshalJSON() ([]byte, error) { + type NoMethod WorkflowTemplate + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// WorkflowTemplatePlacement: Specifies workflow execution target.Either +// managed_cluster or cluster_selector is required. +type WorkflowTemplatePlacement struct { + // ClusterSelector: Optional. A selector that chooses target cluster for jobs + // based on metadata.The selector is evaluated at the time each job is + // submitted. + ClusterSelector *ClusterSelector `json:"clusterSelector,omitempty"` + // ManagedCluster: A cluster that is managed by the workflow. + ManagedCluster *ManagedCluster `json:"managedCluster,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterSelector") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ClusterSelector") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s WorkflowTemplatePlacement) MarshalJSON() ([]byte, error) { + type NoMethod WorkflowTemplatePlacement + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// WriteSessionSparkApplicationContextRequest: Write Spark Application data to +// internal storage systems +type WriteSessionSparkApplicationContextRequest struct { + // Parent: Required. Parent (Batch) resource reference. + Parent string `json:"parent,omitempty"` + // SparkWrapperObjects: Required. The batch of spark application context + // objects sent for ingestion. + SparkWrapperObjects []*SparkWrapperObject `json:"sparkWrapperObjects,omitempty"` + // ForceSendFields is a list of field names (e.g. "Parent") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Parent") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s WriteSessionSparkApplicationContextRequest) MarshalJSON() ([]byte, error) { + type NoMethod WriteSessionSparkApplicationContextRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// WriteSessionSparkApplicationContextResponse: Response returned as an +// acknowledgement of receipt of data. +type WriteSessionSparkApplicationContextResponse struct { + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` +} + +// WriteSparkApplicationContextRequest: Write Spark Application data to +// internal storage systems +type WriteSparkApplicationContextRequest struct { + // Parent: Required. Parent (Batch) resource reference. + Parent string `json:"parent,omitempty"` + SparkWrapperObjects []*SparkWrapperObject `json:"sparkWrapperObjects,omitempty"` + // ForceSendFields is a list of field names (e.g. "Parent") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Parent") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s WriteSparkApplicationContextRequest) MarshalJSON() ([]byte, error) { + type NoMethod WriteSparkApplicationContextRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// WriteSparkApplicationContextResponse: Response returned as an +// acknowledgement of receipt of data. +type WriteSparkApplicationContextResponse struct { + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` +} + +// YarnApplication: A YARN application created by a job. Application +// information is a subset of +// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta Feature: +// This report is available for testing purposes only. It may be changed before +// final release. +type YarnApplication struct { + // Name: Required. The application name. + Name string `json:"name,omitempty"` + // Progress: Required. The numerical progress of the application, from 1 to + // 100. + Progress float64 `json:"progress,omitempty"` + // State: Required. The application state. + // + // Possible values: + // "STATE_UNSPECIFIED" - Status is unspecified. + // "NEW" - Status is NEW. + // "NEW_SAVING" - Status is NEW_SAVING. + // "SUBMITTED" - Status is SUBMITTED. + // "ACCEPTED" - Status is ACCEPTED. + // "RUNNING" - Status is RUNNING. + // "FINISHED" - Status is FINISHED. + // "FAILED" - Status is FAILED. + // "KILLED" - Status is KILLED. + State string `json:"state,omitempty"` + // TrackingUrl: Optional. The HTTP URL of the ApplicationMaster, HistoryServer, + // or TimelineServer that provides application-specific information. The URL + // uses the internal hostname, and requires a proxy server for resolution and, + // possibly, access. + TrackingUrl string `json:"trackingUrl,omitempty"` + // ForceSendFields is a list of field names (e.g. "Name") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Name") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s YarnApplication) MarshalJSON() ([]byte, error) { + type NoMethod YarnApplication + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +func (s *YarnApplication) UnmarshalJSON(data []byte) error { + type NoMethod YarnApplication + var s1 struct { + Progress gensupport.JSONFloat64 `json:"progress"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Progress = float64(s1.Progress) + return nil +} + +type ProjectsLocationsAutoscalingPoliciesCreateCall struct { + s *Service + parent string + autoscalingpolicy *AutoscalingPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates new autoscaling policy. +// +// - parent: The "resource name" of the region or location, as described in +// https://cloud.google.com/apis/design/resource_names. For +// projects.regions.autoscalingPolicies.create, the resource name of the +// region has the following format: projects/{project_id}/regions/{region} +// For projects.locations.autoscalingPolicies.create, the resource name of +// the location has the following format: +// projects/{project_id}/locations/{location}. +func (r *ProjectsLocationsAutoscalingPoliciesService) Create(parent string, autoscalingpolicy *AutoscalingPolicy) *ProjectsLocationsAutoscalingPoliciesCreateCall { + c := &ProjectsLocationsAutoscalingPoliciesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.autoscalingpolicy = autoscalingpolicy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscalingpolicy) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/autoscalingPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.autoscalingPolicies.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *AutoscalingPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Do(opts ...googleapi.CallOption) (*AutoscalingPolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AutoscalingPolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsAutoscalingPoliciesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an autoscaling policy. It is an error to delete an +// autoscaling policy that is in use by one or more clusters. +// +// - name: The "resource name" of the autoscaling policy, as described in +// https://cloud.google.com/apis/design/resource_names. For +// projects.regions.autoscalingPolicies.delete, the resource name of the +// policy has the following format: +// projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For +// projects.locations.autoscalingPolicies.delete, the resource name of the +// policy has the following format: +// projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}. +func (r *ProjectsLocationsAutoscalingPoliciesService) Delete(name string) *ProjectsLocationsAutoscalingPoliciesDeleteCall { + c := &ProjectsLocationsAutoscalingPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.autoscalingPolicies.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsAutoscalingPoliciesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves autoscaling policy. +// +// - name: The "resource name" of the autoscaling policy, as described in +// https://cloud.google.com/apis/design/resource_names. For +// projects.regions.autoscalingPolicies.get, the resource name of the policy +// has the following format: +// projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For +// projects.locations.autoscalingPolicies.get, the resource name of the +// policy has the following format: +// projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}. +func (r *ProjectsLocationsAutoscalingPoliciesService) Get(name string) *ProjectsLocationsAutoscalingPoliciesGetCall { + c := &ProjectsLocationsAutoscalingPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsAutoscalingPoliciesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsAutoscalingPoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsAutoscalingPoliciesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.autoscalingPolicies.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *AutoscalingPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Do(opts ...googleapi.CallOption) (*AutoscalingPolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AutoscalingPolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall struct { + s *Service + resource string + getiampolicyrequest *GetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. Returns an +// empty policy if the resource exists and does not have a policy set. +// +// - resource: REQUIRED: The resource for which the policy is being requested. +// See Resource names (https://cloud.google.com/apis/design/resource_names) +// for the appropriate value for this field. +func (r *ProjectsLocationsAutoscalingPoliciesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall { + c := &ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.getiampolicyrequest = getiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.autoscalingPolicies.getIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsAutoscalingPoliciesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists autoscaling policies in the project. +// +// - parent: The "resource name" of the region or location, as described in +// https://cloud.google.com/apis/design/resource_names. For +// projects.regions.autoscalingPolicies.list, the resource name of the region +// has the following format: projects/{project_id}/regions/{region} For +// projects.locations.autoscalingPolicies.list, the resource name of the +// location has the following format: +// projects/{project_id}/locations/{location}. +func (r *ProjectsLocationsAutoscalingPoliciesService) List(parent string) *ProjectsLocationsAutoscalingPoliciesListCall { + c := &ProjectsLocationsAutoscalingPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number of +// results to return in each response. Must be less than or equal to 1000. +// Defaults to 100. +func (c *ProjectsLocationsAutoscalingPoliciesListCall) PageSize(pageSize int64) *ProjectsLocationsAutoscalingPoliciesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The page token, returned +// by a previous call, to request the next page of results. +func (c *ProjectsLocationsAutoscalingPoliciesListCall) PageToken(pageToken string) *ProjectsLocationsAutoscalingPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsAutoscalingPoliciesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsAutoscalingPoliciesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsAutoscalingPoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsAutoscalingPoliciesListCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsAutoscalingPoliciesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsAutoscalingPoliciesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/autoscalingPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.autoscalingPolicies.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListAutoscalingPoliciesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsAutoscalingPoliciesListCall) Do(opts ...googleapi.CallOption) (*ListAutoscalingPoliciesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListAutoscalingPoliciesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsAutoscalingPoliciesListCall) Pages(ctx context.Context, f func(*ListAutoscalingPoliciesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified resource. +// Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and +// PERMISSION_DENIED errors. +// +// - resource: REQUIRED: The resource for which the policy is being specified. +// See Resource names (https://cloud.google.com/apis/design/resource_names) +// for the appropriate value for this field. +func (r *ProjectsLocationsAutoscalingPoliciesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall { + c := &ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.autoscalingPolicies.setIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the specified +// resource. If the resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error.Note: This operation is designed to be +// used for building permission-aware UIs and command-line tools, not for +// authorization checking. This operation may "fail open" without warning. +// +// - resource: REQUIRED: The resource for which the policy detail is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the appropriate +// value for this field. +func (r *ProjectsLocationsAutoscalingPoliciesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall { + c := &ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.autoscalingPolicies.testIamPermissions" call. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsAutoscalingPoliciesUpdateCall struct { + s *Service + name string + autoscalingpolicy *AutoscalingPolicy + urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Create: Creates new autoscaling policy. -// -// - parent: The "resource name" of the region or location, as described in -// https://cloud.google.com/apis/design/resource_names. For -// projects.regions.autoscalingPolicies.create, the resource name of the -// region has the following format: projects/{project_id}/regions/{region} -// For projects.locations.autoscalingPolicies.create, the resource name of -// the location has the following format: -// projects/{project_id}/locations/{location}. -func (r *ProjectsLocationsAutoscalingPoliciesService) Create(parent string, autoscalingpolicy *AutoscalingPolicy) *ProjectsLocationsAutoscalingPoliciesCreateCall { - c := &ProjectsLocationsAutoscalingPoliciesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.autoscalingpolicy = autoscalingpolicy +// Update: Updates (replaces) autoscaling policy.Disabled check for +// update_mask, because all updates will be full replacements. +// +// - name: Output only. The "resource name" of the autoscaling policy, as +// described in https://cloud.google.com/apis/design/resource_names. For +// projects.regions.autoscalingPolicies, the resource name of the policy has +// the following format: +// projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For +// projects.locations.autoscalingPolicies, the resource name of the policy +// has the following format: +// projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}. +func (r *ProjectsLocationsAutoscalingPoliciesService) Update(name string, autoscalingpolicy *AutoscalingPolicy) *ProjectsLocationsAutoscalingPoliciesUpdateCall { + c := &ProjectsLocationsAutoscalingPoliciesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.autoscalingpolicy = autoscalingpolicy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscalingpolicy) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.autoscalingPolicies.update" call. +// Any non-2xx status code is an error. Response headers are in either +// *AutoscalingPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*AutoscalingPolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AutoscalingPolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesAnalyzeCall struct { + s *Service + name string + analyzebatchrequest *AnalyzeBatchRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Analyze: Analyze a Batch for possible recommendations and insights. +// +// - name: The fully qualified name of the batch to analyze in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID". +func (r *ProjectsLocationsBatchesService) Analyze(name string, analyzebatchrequest *AnalyzeBatchRequest) *ProjectsLocationsBatchesAnalyzeCall { + c := &ProjectsLocationsBatchesAnalyzeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.analyzebatchrequest = analyzebatchrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesAnalyzeCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesAnalyzeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesAnalyzeCall) Context(ctx context.Context) *ProjectsLocationsBatchesAnalyzeCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesAnalyzeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesAnalyzeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.analyzebatchrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:analyze") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.analyze" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesAnalyzeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesCreateCall struct { + s *Service + parent string + batch *Batch + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a batch workload that executes asynchronously. +// +// - parent: The parent resource where this batch will be created. +func (r *ProjectsLocationsBatchesService) Create(parent string, batch *Batch) *ProjectsLocationsBatchesCreateCall { + c := &ProjectsLocationsBatchesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.batch = batch + return c +} + +// BatchId sets the optional parameter "batchId": The ID to use for the batch, +// which will become the final component of the batch's resource name.This +// value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. +func (c *ProjectsLocationsBatchesCreateCall) BatchId(batchId string) *ProjectsLocationsBatchesCreateCall { + c.urlParams_.Set("batchId", batchId) + return c +} + +// RequestId sets the optional parameter "requestId": A unique ID used to +// identify the request. If the service receives two CreateBatchRequest +// (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s +// with the same request_id, the second request is ignored and the Operation +// that corresponds to the first Batch created and stored in the backend is +// returned.Recommendation: Set this value to a UUID +// (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must +// contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens +// (-). The maximum length is 40 characters. +func (c *ProjectsLocationsBatchesCreateCall) RequestId(requestId string) *ProjectsLocationsBatchesCreateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesCreateCall) Context(ctx context.Context) *ProjectsLocationsBatchesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.batch) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/batches") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the batch workload resource. If the batch is not in a +// CANCELLED, SUCCEEDED or FAILED State, the delete operation fails and the +// response returns FAILED_PRECONDITION. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID". +func (r *ProjectsLocationsBatchesService) Delete(name string) *ProjectsLocationsBatchesDeleteCall { + c := &ProjectsLocationsBatchesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesDeleteCall) Context(ctx context.Context) *ProjectsLocationsBatchesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the batch workload resource representation. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID". +func (r *ProjectsLocationsBatchesService) Get(name string) *ProjectsLocationsBatchesGetCall { + c := &ProjectsLocationsBatchesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesGetCall) Context(ctx context.Context) *ProjectsLocationsBatchesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *Batch.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesGetCall) Do(opts ...googleapi.CallOption) (*Batch, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Batch{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists batch workloads. +// +// - parent: The parent, which owns this collection of batches. +func (r *ProjectsLocationsBatchesService) List(parent string) *ProjectsLocationsBatchesListCall { + c := &ProjectsLocationsBatchesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": A filter for the batches to +// return in the response.A filter is a logical expression constraining the +// values of various fields in each batch resource. Filters are case sensitive, +// and may contain multiple clauses combined with logical operators (AND/OR). +// Supported fields are batch_id, batch_uuid, state, create_time, and +// labels.e.g. state = RUNNING and create_time < "2023-01-01T00:00:00Z" filters +// for batches in state RUNNING that were created before 2023-01-01. state = +// RUNNING and labels.environment=production filters for batches in state in a +// RUNNING state that have a production environment label.See +// https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed +// description of the filter syntax and a list of supported comparisons. +func (c *ProjectsLocationsBatchesListCall) Filter(filter string) *ProjectsLocationsBatchesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": Field(s) on which to sort the +// list of batches.Currently the only supported sort orders are unspecified +// (empty) and create_time desc to sort by most recently created batches +// first.See https://google.aip.dev/132#ordering for more details. +func (c *ProjectsLocationsBatchesListCall) OrderBy(orderBy string) *ProjectsLocationsBatchesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number of +// batches to return in each response. The service may return fewer than this +// value. The default page size is 20; the maximum page size is 1000. +func (c *ProjectsLocationsBatchesListCall) PageSize(pageSize int64) *ProjectsLocationsBatchesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous ListBatches call. Provide this token to retrieve the +// subsequent page. +func (c *ProjectsLocationsBatchesListCall) PageToken(pageToken string) *ProjectsLocationsBatchesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesListCall) Context(ctx context.Context) *ProjectsLocationsBatchesListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/batches") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListBatchesResponse.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsBatchesListCall) Do(opts ...googleapi.CallOption) (*ListBatchesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListBatchesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBatchesListCall) Pages(ctx context.Context, f func(*ListBatchesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsBatchesSparkApplicationsAccessCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Access: Obtain high level information corresponding to a single Spark +// Application. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) Access(name string) *ProjectsLocationsBatchesSparkApplicationsAccessCall { + c := &ProjectsLocationsBatchesSparkApplicationsAccessCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsAccessCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsAccessCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsAccessCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsAccessCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsAccessCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:access") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.access" call. +// Any non-2xx status code is an error. Response headers are in either +// *AccessSparkApplicationResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessCall) Do(opts ...googleapi.CallOption) (*AccessSparkApplicationResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AccessSparkApplicationResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AccessEnvironmentInfo: Obtain environment details for a Spark Application +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) AccessEnvironmentInfo(name string) *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall { + c := &ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessEnvironmentInfo") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.accessEnvironmentInfo" call. +// Any non-2xx status code is an error. Response headers are in either +// *AccessSparkApplicationEnvironmentInfoResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoCall) Do(opts ...googleapi.CallOption) (*AccessSparkApplicationEnvironmentInfoResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AccessSparkApplicationEnvironmentInfoResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesSparkApplicationsAccessJobCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AccessJob: Obtain data corresponding to a spark job for a Spark Application. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) AccessJob(name string) *ProjectsLocationsBatchesSparkApplicationsAccessJobCall { + c := &ProjectsLocationsBatchesSparkApplicationsAccessJobCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// JobId sets the optional parameter "jobId": Required. Job ID to fetch data +// for. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessJobCall) JobId(jobId int64) *ProjectsLocationsBatchesSparkApplicationsAccessJobCall { + c.urlParams_.Set("jobId", fmt.Sprint(jobId)) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessJobCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsAccessJobCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessJobCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsAccessJobCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessJobCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsAccessJobCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessJobCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsAccessJobCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessJobCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsAccessJobCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessJob") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.accessJob" call. +// Any non-2xx status code is an error. Response headers are in either +// *AccessSparkApplicationJobResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessJobCall) Do(opts ...googleapi.CallOption) (*AccessSparkApplicationJobResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AccessSparkApplicationJobResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AccessSqlPlan: Obtain Spark Plan Graph for a Spark Application SQL +// execution. Limits the number of clusters returned as part of the graph to +// 10000. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) AccessSqlPlan(name string) *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall { + c := &ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// ExecutionId sets the optional parameter "executionId": Required. Execution +// ID +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall) ExecutionId(executionId int64) *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall { + c.urlParams_.Set("executionId", fmt.Sprint(executionId)) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessSqlPlan") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.accessSqlPlan" call. +// Any non-2xx status code is an error. Response headers are in either +// *AccessSparkApplicationSqlSparkPlanGraphResponse.ServerResponse.Header or +// (if a response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlPlanCall) Do(opts ...googleapi.CallOption) (*AccessSparkApplicationSqlSparkPlanGraphResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AccessSparkApplicationSqlSparkPlanGraphResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AccessSqlQuery: Obtain data corresponding to a particular SQL Query for a +// Spark Application. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) AccessSqlQuery(name string) *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall { + c := &ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Details sets the optional parameter "details": Lists/ hides details of Spark +// plan nodes. True is set to list and false to hide. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall) Details(details bool) *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall { + c.urlParams_.Set("details", fmt.Sprint(details)) + return c +} + +// ExecutionId sets the optional parameter "executionId": Required. Execution +// ID +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall) ExecutionId(executionId int64) *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall { + c.urlParams_.Set("executionId", fmt.Sprint(executionId)) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall { + c.urlParams_.Set("parent", parent) + return c +} + +// PlanDescription sets the optional parameter "planDescription": Enables/ +// disables physical plan description on demand +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall) PlanDescription(planDescription bool) *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall { + c.urlParams_.Set("planDescription", fmt.Sprint(planDescription)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessSqlQuery") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.accessSqlQuery" call. +// Any non-2xx status code is an error. Response headers are in either +// *AccessSparkApplicationSqlQueryResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessSqlQueryCall) Do(opts ...googleapi.CallOption) (*AccessSparkApplicationSqlQueryResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AccessSparkApplicationSqlQueryResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AccessStageAttempt: Obtain data corresponding to a spark stage attempt for a +// Spark Application. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) AccessStageAttempt(name string) *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall { + c := &ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall { + c.urlParams_.Set("parent", parent) + return c +} + +// StageAttemptId sets the optional parameter "stageAttemptId": Required. Stage +// Attempt ID +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall) StageAttemptId(stageAttemptId int64) *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall { + c.urlParams_.Set("stageAttemptId", fmt.Sprint(stageAttemptId)) + return c +} + +// StageId sets the optional parameter "stageId": Required. Stage ID +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall) StageId(stageId int64) *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) + return c +} + +// SummaryMetricsMask sets the optional parameter "summaryMetricsMask": The +// list of summary metrics fields to include. Empty list will default to skip +// all summary metrics fields. Example, if the response should include +// TaskQuantileMetrics, the request should have task_quantile_metrics in +// summary_metrics_mask field +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall) SummaryMetricsMask(summaryMetricsMask string) *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall { + c.urlParams_.Set("summaryMetricsMask", summaryMetricsMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessStageAttempt") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.accessStageAttempt" call. +// Any non-2xx status code is an error. Response headers are in either +// *AccessSparkApplicationStageAttemptResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageAttemptCall) Do(opts ...googleapi.CallOption) (*AccessSparkApplicationStageAttemptResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AccessSparkApplicationStageAttemptResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AccessStageRddGraph: Obtain RDD operation graph for a Spark Application +// Stage. Limits the number of clusters returned as part of the graph to 10000. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) AccessStageRddGraph(name string) *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall { + c := &ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall { + c.urlParams_.Set("parent", parent) + return c +} + +// StageId sets the optional parameter "stageId": Required. Stage ID +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall) StageId(stageId int64) *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessStageRddGraph") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.accessStageRddGraph" call. +// Any non-2xx status code is an error. Response headers are in either +// *AccessSparkApplicationStageRddOperationGraphResponse.ServerResponse.Header +// or (if a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphCall) Do(opts ...googleapi.CallOption) (*AccessSparkApplicationStageRddOperationGraphResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AccessSparkApplicationStageRddOperationGraphResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesSparkApplicationsSearchCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Search: Obtain high level information and list of Spark Applications +// corresponding to a batch +// +// - parent: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) Search(parent string) *ProjectsLocationsBatchesSparkApplicationsSearchCall { + c := &ProjectsLocationsBatchesSparkApplicationsSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// ApplicationStatus sets the optional parameter "applicationStatus": Search +// only applications in the chosen state. +// +// Possible values: +// +// "APPLICATION_STATUS_UNSPECIFIED" +// "APPLICATION_STATUS_RUNNING" +// "APPLICATION_STATUS_COMPLETED" +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) ApplicationStatus(applicationStatus string) *ProjectsLocationsBatchesSparkApplicationsSearchCall { + c.urlParams_.Set("applicationStatus", applicationStatus) + return c +} + +// MaxEndTime sets the optional parameter "maxEndTime": Latest end timestamp to +// list. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) MaxEndTime(maxEndTime string) *ProjectsLocationsBatchesSparkApplicationsSearchCall { + c.urlParams_.Set("maxEndTime", maxEndTime) + return c +} + +// MaxTime sets the optional parameter "maxTime": Latest start timestamp to +// list. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) MaxTime(maxTime string) *ProjectsLocationsBatchesSparkApplicationsSearchCall { + c.urlParams_.Set("maxTime", maxTime) + return c +} + +// MinEndTime sets the optional parameter "minEndTime": Earliest end timestamp +// to list. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) MinEndTime(minEndTime string) *ProjectsLocationsBatchesSparkApplicationsSearchCall { + c.urlParams_.Set("minEndTime", minEndTime) + return c +} + +// MinTime sets the optional parameter "minTime": Earliest start timestamp to +// list. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) MinTime(minTime string) *ProjectsLocationsBatchesSparkApplicationsSearchCall { + c.urlParams_.Set("minTime", minTime) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// applications to return in each response. The service may return fewer than +// this. The default page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) PageSize(pageSize int64) *ProjectsLocationsBatchesSparkApplicationsSearchCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous SearchSparkApplications call. Provide this token to retrieve +// the subsequent page. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) PageToken(pageToken string) *ProjectsLocationsBatchesSparkApplicationsSearchCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSearchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSearchCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSearchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/sparkApplications:search") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.search" call. +// Any non-2xx status code is an error. Response headers are in either +// *SearchSparkApplicationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) Do(opts ...googleapi.CallOption) (*SearchSparkApplicationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SearchSparkApplicationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchCall) Pages(ctx context.Context, f func(*SearchSparkApplicationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SearchExecutorStageSummary: Obtain executor summary with respect to a spark +// stage attempt. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) SearchExecutorStageSummary(name string) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall { + c := &ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of executors +// to return in each response. The service may return fewer than this. The +// default page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) PageSize(pageSize int64) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous AccessSparkApplicationExecutorsList call. Provide this token +// to retrieve the subsequent page. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) PageToken(pageToken string) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall { + c.urlParams_.Set("parent", parent) + return c +} + +// StageAttemptId sets the optional parameter "stageAttemptId": Required. Stage +// Attempt ID +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) StageAttemptId(stageAttemptId int64) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall { + c.urlParams_.Set("stageAttemptId", fmt.Sprint(stageAttemptId)) + return c +} + +// StageId sets the optional parameter "stageId": Required. Stage ID +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) StageId(stageId int64) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchExecutorStageSummary") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.searchExecutorStageSummary" call. +// Any non-2xx status code is an error. Response headers are in either +// *SearchSparkApplicationExecutorStageSummaryResponse.ServerResponse.Header or +// (if a response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) Do(opts ...googleapi.CallOption) (*SearchSparkApplicationExecutorStageSummaryResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SearchSparkApplicationExecutorStageSummaryResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryCall) Pages(ctx context.Context, f func(*SearchSparkApplicationExecutorStageSummaryResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SearchExecutors: Obtain data corresponding to executors for a Spark +// Application. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) SearchExecutors(name string) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall { + c := &ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// ExecutorStatus sets the optional parameter "executorStatus": Filter to +// select whether active/ dead or all executors should be selected. +// +// Possible values: +// +// "EXECUTOR_STATUS_UNSPECIFIED" +// "EXECUTOR_STATUS_ACTIVE" +// "EXECUTOR_STATUS_DEAD" +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall) ExecutorStatus(executorStatus string) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall { + c.urlParams_.Set("executorStatus", executorStatus) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of executors +// to return in each response. The service may return fewer than this. The +// default page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall) PageSize(pageSize int64) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous AccessSparkApplicationExecutorsList call. Provide this token +// to retrieve the subsequent page. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall) PageToken(pageToken string) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchExecutors") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.searchExecutors" call. +// Any non-2xx status code is an error. Response headers are in either +// *SearchSparkApplicationExecutorsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall) Do(opts ...googleapi.CallOption) (*SearchSparkApplicationExecutorsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SearchSparkApplicationExecutorsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchExecutorsCall) Pages(ctx context.Context, f func(*SearchSparkApplicationExecutorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsBatchesSparkApplicationsSearchJobsCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SearchJobs: Obtain list of spark jobs corresponding to a Spark Application. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) SearchJobs(name string) *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall { + c := &ProjectsLocationsBatchesSparkApplicationsSearchJobsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// JobStatus sets the optional parameter "jobStatus": List only jobs in the +// specific state. +// +// Possible values: +// +// "JOB_EXECUTION_STATUS_UNSPECIFIED" +// "JOB_EXECUTION_STATUS_RUNNING" +// "JOB_EXECUTION_STATUS_SUCCEEDED" +// "JOB_EXECUTION_STATUS_FAILED" +// "JOB_EXECUTION_STATUS_UNKNOWN" +func (c *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall) JobStatus(jobStatus string) *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall { + c.urlParams_.Set("jobStatus", jobStatus) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of jobs to +// return in each response. The service may return fewer than this. The default +// page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall) PageSize(pageSize int64) *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous SearchSparkApplicationJobs call. Provide this token to +// retrieve the subsequent page. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall) PageToken(pageToken string) *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchJobs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.searchJobs" call. +// Any non-2xx status code is an error. Response headers are in either +// *SearchSparkApplicationJobsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall) Do(opts ...googleapi.CallOption) (*SearchSparkApplicationJobsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SearchSparkApplicationJobsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchJobsCall) Pages(ctx context.Context, f func(*SearchSparkApplicationJobsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SearchSqlQueries: Obtain data corresponding to SQL Queries for a Spark +// Application. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) SearchSqlQueries(name string) *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall { + c := &ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Details sets the optional parameter "details": Lists/ hides details of Spark +// plan nodes. True is set to list and false to hide. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) Details(details bool) *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall { + c.urlParams_.Set("details", fmt.Sprint(details)) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of queries +// to return in each response. The service may return fewer than this. The +// default page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) PageSize(pageSize int64) *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous SearchSparkApplicationSqlQueries call. Provide this token to +// retrieve the subsequent page. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) PageToken(pageToken string) *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall { + c.urlParams_.Set("parent", parent) + return c +} + +// PlanDescription sets the optional parameter "planDescription": Enables/ +// disables physical plan description on demand +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) PlanDescription(planDescription bool) *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall { + c.urlParams_.Set("planDescription", fmt.Sprint(planDescription)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchSqlQueries") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.searchSqlQueries" call. +// Any non-2xx status code is an error. Response headers are in either +// *SearchSparkApplicationSqlQueriesResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) Do(opts ...googleapi.CallOption) (*SearchSparkApplicationSqlQueriesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SearchSparkApplicationSqlQueriesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesCall) Pages(ctx context.Context, f func(*SearchSparkApplicationSqlQueriesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SearchStageAttemptTasks: Obtain data corresponding to tasks for a spark +// stage attempt for a Spark Application. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) SearchStageAttemptTasks(name string) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall { + c := &ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of tasks to +// return in each response. The service may return fewer than this. The default +// page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) PageSize(pageSize int64) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous ListSparkApplicationStageAttemptTasks call. Provide this +// token to retrieve the subsequent page. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) PageToken(pageToken string) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("parent", parent) + return c +} + +// SortRuntime sets the optional parameter "sortRuntime": Sort the tasks by +// runtime. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) SortRuntime(sortRuntime bool) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("sortRuntime", fmt.Sprint(sortRuntime)) + return c +} + +// StageAttemptId sets the optional parameter "stageAttemptId": Stage Attempt +// ID +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) StageAttemptId(stageAttemptId int64) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("stageAttemptId", fmt.Sprint(stageAttemptId)) + return c +} + +// StageId sets the optional parameter "stageId": Stage ID +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) StageId(stageId int64) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) + return c +} + +// TaskStatus sets the optional parameter "taskStatus": List only tasks in the +// state. +// +// Possible values: +// +// "TASK_STATUS_UNSPECIFIED" +// "TASK_STATUS_RUNNING" +// "TASK_STATUS_SUCCESS" +// "TASK_STATUS_FAILED" +// "TASK_STATUS_KILLED" +// "TASK_STATUS_PENDING" +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) TaskStatus(taskStatus string) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("taskStatus", taskStatus) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchStageAttemptTasks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.searchStageAttemptTasks" call. +// Any non-2xx status code is an error. Response headers are in either +// *SearchSparkApplicationStageAttemptTasksResponse.ServerResponse.Header or +// (if a response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) Do(opts ...googleapi.CallOption) (*SearchSparkApplicationStageAttemptTasksResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SearchSparkApplicationStageAttemptTasksResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksCall) Pages(ctx context.Context, f func(*SearchSparkApplicationStageAttemptTasksResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SearchStageAttempts: Obtain data corresponding to a spark stage attempts for +// a Spark Application. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) SearchStageAttempts(name string) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall { + c := &ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of stage +// attempts (paging based on stage_attempt_id) to return in each response. The +// service may return fewer than this. The default page size is 10; the maximum +// page size is 100. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) PageSize(pageSize int64) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous SearchSparkApplicationStageAttempts call. Provide this token +// to retrieve the subsequent page. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) PageToken(pageToken string) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall { + c.urlParams_.Set("parent", parent) + return c +} + +// StageId sets the optional parameter "stageId": Required. Stage ID for which +// attempts are to be fetched +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) StageId(stageId int64) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) + return c +} + +// SummaryMetricsMask sets the optional parameter "summaryMetricsMask": The +// list of summary metrics fields to include. Empty list will default to skip +// all summary metrics fields. Example, if the response should include +// TaskQuantileMetrics, the request should have task_quantile_metrics in +// summary_metrics_mask field +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) SummaryMetricsMask(summaryMetricsMask string) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall { + c.urlParams_.Set("summaryMetricsMask", summaryMetricsMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchStageAttempts") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.searchStageAttempts" call. +// Any non-2xx status code is an error. Response headers are in either +// *SearchSparkApplicationStageAttemptsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) Do(opts ...googleapi.CallOption) (*SearchSparkApplicationStageAttemptsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SearchSparkApplicationStageAttemptsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsCall) Pages(ctx context.Context, f func(*SearchSparkApplicationStageAttemptsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsBatchesSparkApplicationsSearchStagesCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SearchStages: Obtain data corresponding to stages for a Spark Application. +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) SearchStages(name string) *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall { + c := &ProjectsLocationsBatchesSparkApplicationsSearchStagesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of stages +// (paging based on stage_id) to return in each response. The service may +// return fewer than this. The default page size is 10; the maximum page size +// is 100. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) PageSize(pageSize int64) *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous FetchSparkApplicationStagesList call. Provide this token to +// retrieve the subsequent page. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) PageToken(pageToken string) *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall { + c.urlParams_.Set("parent", parent) + return c +} + +// StageStatus sets the optional parameter "stageStatus": List only stages in +// the given state. +// +// Possible values: +// +// "STAGE_STATUS_UNSPECIFIED" +// "STAGE_STATUS_ACTIVE" +// "STAGE_STATUS_COMPLETE" +// "STAGE_STATUS_FAILED" +// "STAGE_STATUS_PENDING" +// "STAGE_STATUS_SKIPPED" +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) StageStatus(stageStatus string) *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall { + c.urlParams_.Set("stageStatus", stageStatus) + return c +} + +// SummaryMetricsMask sets the optional parameter "summaryMetricsMask": The +// list of summary metrics fields to include. Empty list will default to skip +// all summary metrics fields. Example, if the response should include +// TaskQuantileMetrics, the request should have task_quantile_metrics in +// summary_metrics_mask field +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) SummaryMetricsMask(summaryMetricsMask string) *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall { + c.urlParams_.Set("summaryMetricsMask", summaryMetricsMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchStages") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.searchStages" call. +// Any non-2xx status code is an error. Response headers are in either +// *SearchSparkApplicationStagesResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) Do(opts ...googleapi.CallOption) (*SearchSparkApplicationStagesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SearchSparkApplicationStagesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBatchesSparkApplicationsSearchStagesCall) Pages(ctx context.Context, f func(*SearchSparkApplicationStagesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SummarizeExecutors: Obtain summary of Executor Summary for a Spark +// Application +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) SummarizeExecutors(name string) *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall { + c := &ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:summarizeExecutors") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.summarizeExecutors" call. +// Any non-2xx status code is an error. Response headers are in either +// *SummarizeSparkApplicationExecutorsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsCall) Do(opts ...googleapi.CallOption) (*SummarizeSparkApplicationExecutorsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SummarizeSparkApplicationExecutorsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SummarizeJobs: Obtain summary of Jobs for a Spark Application +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) SummarizeJobs(name string) *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall { + c := &ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:summarizeJobs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.summarizeJobs" call. +// Any non-2xx status code is an error. Response headers are in either +// *SummarizeSparkApplicationJobsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeJobsCall) Do(opts ...googleapi.CallOption) (*SummarizeSparkApplicationJobsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SummarizeSparkApplicationJobsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SummarizeStageAttemptTasks: Obtain summary of Tasks for a Spark Application +// Stage Attempt +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) SummarizeStageAttemptTasks(name string) *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall { + c := &ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall { + c.urlParams_.Set("parent", parent) + return c +} + +// StageAttemptId sets the optional parameter "stageAttemptId": Required. Stage +// Attempt ID +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall) StageAttemptId(stageAttemptId int64) *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall { + c.urlParams_.Set("stageAttemptId", fmt.Sprint(stageAttemptId)) + return c +} + +// StageId sets the optional parameter "stageId": Required. Stage ID +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall) StageId(stageId int64) *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:summarizeStageAttemptTasks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.summarizeStageAttemptTasks" call. +// Any non-2xx status code is an error. Response headers are in either +// *SummarizeSparkApplicationStageAttemptTasksResponse.ServerResponse.Header or +// (if a response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksCall) Do(opts ...googleapi.CallOption) (*SummarizeSparkApplicationStageAttemptTasksResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SummarizeSparkApplicationStageAttemptTasksResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SummarizeStages: Obtain summary of Stages for a Spark Application +// +// - name: The fully qualified name of the batch to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) SummarizeStages(name string) *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall { + c := &ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Batch) +// resource reference. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall) Parent(parent string) *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:summarizeStages") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.summarizeStages" call. +// Any non-2xx status code is an error. Response headers are in either +// *SummarizeSparkApplicationStagesResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsSummarizeStagesCall) Do(opts ...googleapi.CallOption) (*SummarizeSparkApplicationStagesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SummarizeSparkApplicationStagesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBatchesSparkApplicationsWriteCall struct { + s *Service + name string + writesparkapplicationcontextrequest *WriteSparkApplicationContextRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Write: Write wrapper objects from dataplane to spanner +// +// - name: The fully qualified name of the spark application to write data +// about in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplic +// ations/APPLICATION_ID". +func (r *ProjectsLocationsBatchesSparkApplicationsService) Write(name string, writesparkapplicationcontextrequest *WriteSparkApplicationContextRequest) *ProjectsLocationsBatchesSparkApplicationsWriteCall { + c := &ProjectsLocationsBatchesSparkApplicationsWriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.writesparkapplicationcontextrequest = writesparkapplicationcontextrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBatchesSparkApplicationsWriteCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesSparkApplicationsWriteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBatchesSparkApplicationsWriteCall) Context(ctx context.Context) *ProjectsLocationsBatchesSparkApplicationsWriteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBatchesSparkApplicationsWriteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBatchesSparkApplicationsWriteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.writesparkapplicationcontextrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:write") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.batches.sparkApplications.write" call. +// Any non-2xx status code is an error. Response headers are in either +// *WriteSparkApplicationContextResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBatchesSparkApplicationsWriteCall) Do(opts ...googleapi.CallOption) (*WriteSparkApplicationContextResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &WriteSparkApplicationContextResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsOperationsCancelCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Starts asynchronous cancellation on a long-running operation. The +// server makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn't support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, the +// operation is not deleted; instead, it becomes an operation with an +// Operation.error value with a google.rpc.Status.code of 1, corresponding to +// Code.CANCELLED. +// +// - name: The name of the operation resource to be cancelled. +func (r *ProjectsLocationsOperationsService) Cancel(name string) *ProjectsLocationsOperationsCancelCall { + c := &ProjectsLocationsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsOperationsCancelCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsOperationsCancelCall) Context(ctx context.Context) *ProjectsLocationsOperationsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsOperationsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.operations.cancel" call. +// Any non-2xx status code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsOperationsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a long-running operation. This method indicates that the +// client is no longer interested in the operation result. It does not cancel +// the operation. If the server doesn't support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +// +// - name: The name of the operation resource to be deleted. +func (r *ProjectsLocationsOperationsService) Delete(name string) *ProjectsLocationsOperationsDeleteCall { + c := &ProjectsLocationsOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsOperationsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsOperationsDeleteCall) Context(ctx context.Context) *ProjectsLocationsOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsOperationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.operations.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsOperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +// +// - name: The name of the operation resource. +func (r *ProjectsLocationsOperationsService) Get(name string) *ProjectsLocationsOperationsGetCall { + c := &ProjectsLocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.operations.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsOperationsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists operations that match the specified filter in the request. If +// the server doesn't support this method, it returns UNIMPLEMENTED. +// +// - name: The name of the operation's parent resource. +func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall { + c := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Filter sets the optional parameter "filter": The standard list filter. +func (c *ProjectsLocationsOperationsListCall) Filter(filter string) *ProjectsLocationsOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list page +// size. +func (c *ProjectsLocationsOperationsListCall) PageSize(pageSize int64) *ProjectsLocationsOperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list page +// token. +func (c *ProjectsLocationsOperationsListCall) PageToken(pageToken string) *ProjectsLocationsOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsOperationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsOperationsListCall) Context(ctx context.Context) *ProjectsLocationsOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.operations.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListOperationsResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsSessionTemplatesCreateCall struct { + s *Service + parent string + sessiontemplate *SessionTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Create a session template synchronously. +// +// - parent: The parent resource where this session template will be created. +func (r *ProjectsLocationsSessionTemplatesService) Create(parent string, sessiontemplate *SessionTemplate) *ProjectsLocationsSessionTemplatesCreateCall { + c := &ProjectsLocationsSessionTemplatesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.sessiontemplate = sessiontemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsSessionTemplatesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionTemplatesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsSessionTemplatesCreateCall) Context(ctx context.Context) *ProjectsLocationsSessionTemplatesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsSessionTemplatesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsSessionTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sessiontemplate) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/sessionTemplates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.sessionTemplates.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *SessionTemplate.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsSessionTemplatesCreateCall) Do(opts ...googleapi.CallOption) (*SessionTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SessionTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsSessionTemplatesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a session template. +// +// - name: The name of the session template resource to delete. +func (r *ProjectsLocationsSessionTemplatesService) Delete(name string) *ProjectsLocationsSessionTemplatesDeleteCall { + c := &ProjectsLocationsSessionTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsSessionTemplatesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsSessionTemplatesDeleteCall) Context(ctx context.Context) *ProjectsLocationsSessionTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsSessionTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsSessionTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.sessionTemplates.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsSessionTemplatesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the resource representation for a session template. +// +// - name: The name of the session template to retrieve. +func (r *ProjectsLocationsSessionTemplatesService) Get(name string) *ProjectsLocationsSessionTemplatesGetCall { + c := &ProjectsLocationsSessionTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsSessionTemplatesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionTemplatesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsSessionTemplatesGetCall) Context(ctx context.Context) *ProjectsLocationsSessionTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsSessionTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsSessionTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.locations.sessionTemplates.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *SessionTemplate.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsSessionTemplatesGetCall) Do(opts ...googleapi.CallOption) (*SessionTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SessionTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsSessionTemplatesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists session templates. +// +// - parent: The parent that owns this collection of session templates. +func (r *ProjectsLocationsSessionTemplatesService) List(parent string) *ProjectsLocationsSessionTemplatesListCall { + c := &ProjectsLocationsSessionTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": A filter for the session +// templates to return in the response. Filters are case sensitive and have the +// following syntax:field = value AND field = value ... +func (c *ProjectsLocationsSessionTemplatesListCall) Filter(filter string) *ProjectsLocationsSessionTemplatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number of +// sessions to return in each response. The service may return fewer than this +// value. +func (c *ProjectsLocationsSessionTemplatesListCall) PageSize(pageSize int64) *ProjectsLocationsSessionTemplatesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous ListSessions call. Provide this token to retrieve the +// subsequent page. +func (c *ProjectsLocationsSessionTemplatesListCall) PageToken(pageToken string) *ProjectsLocationsSessionTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesCreateCall { +func (c *ProjectsLocationsSessionTemplatesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionTemplatesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesCreateCall { +func (c *ProjectsLocationsSessionTemplatesListCall) Context(ctx context.Context) *ProjectsLocationsSessionTemplatesListCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Header() http.Header { +func (c *ProjectsLocationsSessionTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscalingpolicy) - if err != nil { - return nil, err +func (c *ProjectsLocationsSessionTemplatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/autoscalingPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/sessionTemplates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -6071,13 +14533,13 @@ func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) doRequest(alt string) ( return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.autoscalingPolicies.create" call. +// Do executes the "dataproc.projects.locations.sessionTemplates.list" call. // Any non-2xx status code is an error. Response headers are in either -// *AutoscalingPolicy.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Do(opts ...googleapi.CallOption) (*AutoscalingPolicy, error) { +// *ListSessionTemplatesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionTemplatesListCall) Do(opts ...googleapi.CallOption) (*ListSessionTemplatesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6096,7 +14558,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Do(opts ...googleapi.Ca if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &AutoscalingPolicy{ + ret := &ListSessionTemplatesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6109,62 +14571,81 @@ func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Do(opts ...googleapi.Ca return ret, nil } -type ProjectsLocationsAutoscalingPoliciesDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsSessionTemplatesListCall) Pages(ctx context.Context, f func(*ListSessionTemplatesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Delete: Deletes an autoscaling policy. It is an error to delete an -// autoscaling policy that is in use by one or more clusters. +type ProjectsLocationsSessionTemplatesPatchCall struct { + s *Service + name string + sessiontemplate *SessionTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the session template synchronously. // -// - name: The "resource name" of the autoscaling policy, as described in -// https://cloud.google.com/apis/design/resource_names. For -// projects.regions.autoscalingPolicies.delete, the resource name of the -// policy has the following format: -// projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For -// projects.locations.autoscalingPolicies.delete, the resource name of the -// policy has the following format: -// projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}. -func (r *ProjectsLocationsAutoscalingPoliciesService) Delete(name string) *ProjectsLocationsAutoscalingPoliciesDeleteCall { - c := &ProjectsLocationsAutoscalingPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The resource name of the session template. +func (r *ProjectsLocationsSessionTemplatesService) Patch(name string, sessiontemplate *SessionTemplate) *ProjectsLocationsSessionTemplatesPatchCall { + c := &ProjectsLocationsSessionTemplatesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name + c.sessiontemplate = sessiontemplate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesDeleteCall { +func (c *ProjectsLocationsSessionTemplatesPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionTemplatesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesDeleteCall { +func (c *ProjectsLocationsSessionTemplatesPatchCall) Context(ctx context.Context) *ProjectsLocationsSessionTemplatesPatchCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Header() http.Header { +func (c *ProjectsLocationsSessionTemplatesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +func (c *ProjectsLocationsSessionTemplatesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sessiontemplate) + if err != nil { + return nil, err + } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -6175,12 +14656,13 @@ func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) doRequest(alt string) ( return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.autoscalingPolicies.delete" call. +// Do executes the "dataproc.projects.locations.sessionTemplates.patch" call. // Any non-2xx status code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +// *SessionTemplate.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsSessionTemplatesPatchCall) Do(opts ...googleapi.CallOption) (*SessionTemplate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6199,7 +14681,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Do(opts ...googleapi.Ca if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Empty{ + ret := &SessionTemplate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6212,90 +14694,97 @@ func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Do(opts ...googleapi.Ca return ret, nil } -type ProjectsLocationsAutoscalingPoliciesGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsCreateCall struct { + s *Service + parent string + session *Session + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves autoscaling policy. +// Create: Create an interactive session asynchronously. // -// - name: The "resource name" of the autoscaling policy, as described in -// https://cloud.google.com/apis/design/resource_names. For -// projects.regions.autoscalingPolicies.get, the resource name of the policy -// has the following format: -// projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For -// projects.locations.autoscalingPolicies.get, the resource name of the -// policy has the following format: -// projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}. -func (r *ProjectsLocationsAutoscalingPoliciesService) Get(name string) *ProjectsLocationsAutoscalingPoliciesGetCall { - c := &ProjectsLocationsAutoscalingPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// - parent: The parent resource where this session will be created. +func (r *ProjectsLocationsSessionsService) Create(parent string, session *Session) *ProjectsLocationsSessionsCreateCall { + c := &ProjectsLocationsSessionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.session = session + return c +} + +// RequestId sets the optional parameter "requestId": A unique ID used to +// identify the request. If the service receives two CreateSessionRequests +// (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateSessionRequest)s +// with the same ID, the second request is ignored, and the first Session is +// created and stored in the backend.Recommendation: Set this value to a UUID +// (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must +// contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens +// (-). The maximum length is 40 characters. +func (c *ProjectsLocationsSessionsCreateCall) RequestId(requestId string) *ProjectsLocationsSessionsCreateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// SessionId sets the optional parameter "sessionId": Required. The ID to use +// for the session, which becomes the final component of the session's resource +// name.This value must be 4-63 characters. Valid characters are /a-z-/. +func (c *ProjectsLocationsSessionsCreateCall) SessionId(sessionId string) *ProjectsLocationsSessionsCreateCall { + c.urlParams_.Set("sessionId", sessionId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesGetCall { +func (c *ProjectsLocationsSessionsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets an optional parameter which makes the operation fail if the -// object's ETag matches the given value. This is useful for getting updates -// only after the object has changed since the last request. -func (c *ProjectsLocationsAutoscalingPoliciesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsAutoscalingPoliciesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesGetCall { +func (c *ProjectsLocationsSessionsCreateCall) Context(ctx context.Context) *ProjectsLocationsSessionsCreateCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Header() http.Header { +func (c *ProjectsLocationsSessionsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsAutoscalingPoliciesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } +func (c *ProjectsLocationsSessionsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.session) + if err != nil { + return nil, err + } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/sessions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.autoscalingPolicies.get" call. +// Do executes the "dataproc.projects.locations.sessions.create" call. // Any non-2xx status code is an error. Response headers are in either -// *AutoscalingPolicy.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Do(opts ...googleapi.CallOption) (*AutoscalingPolicy, error) { +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6314,7 +14803,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &AutoscalingPolicy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6327,79 +14816,84 @@ func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Do(opts ...googleapi.CallO return ret, nil } -type ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall struct { - s *Service - resource string - getiampolicyrequest *GetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the interactive session resource. If the session is not in +// terminal state, it is terminated, and then deleted. +// +// - name: The name of the session resource to delete. +func (r *ProjectsLocationsSessionsService) Delete(name string) *ProjectsLocationsSessionsDeleteCall { + c := &ProjectsLocationsSessionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c } -// GetIamPolicy: Gets the access control policy for a resource. Returns an -// empty policy if the resource exists and does not have a policy set. -// -// - resource: REQUIRED: The resource for which the policy is being requested. -// See Resource names (https://cloud.google.com/apis/design/resource_names) -// for the appropriate value for this field. -func (r *ProjectsLocationsAutoscalingPoliciesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall { - c := &ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.getiampolicyrequest = getiampolicyrequest +// RequestId sets the optional parameter "requestId": A unique ID used to +// identify the request. If the service receives two DeleteSessionRequest +// (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteSessionRequest)s +// with the same ID, the second request is ignored.Recommendation: Set this +// value to a UUID +// (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must +// contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens +// (-). The maximum length is 40 characters. +func (c *ProjectsLocationsSessionsDeleteCall) RequestId(requestId string) *ProjectsLocationsSessionsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall { +func (c *ProjectsLocationsSessionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall { +func (c *ProjectsLocationsSessionsDeleteCall) Context(ctx context.Context) *ProjectsLocationsSessionsDeleteCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Header() http.Header { +func (c *ProjectsLocationsSessionsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +func (c *ProjectsLocationsSessionsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) - if err != nil { - return nil, err - } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.autoscalingPolicies.getIamPolicy" call. +// Do executes the "dataproc.projects.locations.sessions.delete" call. // Any non-2xx status code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) in +// *Operation.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *ProjectsLocationsSessionsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6418,7 +14912,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Do(opts ...google if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6431,49 +14925,28 @@ func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Do(opts ...google return ret, nil } -type ProjectsLocationsAutoscalingPoliciesListCall struct { +type ProjectsLocationsSessionsGetCall struct { s *Service - parent string + name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists autoscaling policies in the project. +// Get: Gets the resource representation for an interactive session. // -// - parent: The "resource name" of the region or location, as described in -// https://cloud.google.com/apis/design/resource_names. For -// projects.regions.autoscalingPolicies.list, the resource name of the region -// has the following format: projects/{project_id}/regions/{region} For -// projects.locations.autoscalingPolicies.list, the resource name of the -// location has the following format: -// projects/{project_id}/locations/{location}. -func (r *ProjectsLocationsAutoscalingPoliciesService) List(parent string) *ProjectsLocationsAutoscalingPoliciesListCall { - c := &ProjectsLocationsAutoscalingPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number of -// results to return in each response. Must be less than or equal to 1000. -// Defaults to 100. -func (c *ProjectsLocationsAutoscalingPoliciesListCall) PageSize(pageSize int64) *ProjectsLocationsAutoscalingPoliciesListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": The page token, returned -// by a previous call, to request the next page of results. -func (c *ProjectsLocationsAutoscalingPoliciesListCall) PageToken(pageToken string) *ProjectsLocationsAutoscalingPoliciesListCall { - c.urlParams_.Set("pageToken", pageToken) +// - name: The name of the session to retrieve. +func (r *ProjectsLocationsSessionsService) Get(name string) *ProjectsLocationsSessionsGetCall { + c := &ProjectsLocationsSessionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsAutoscalingPoliciesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesListCall { +func (c *ProjectsLocationsSessionsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6481,27 +14954,27 @@ func (c *ProjectsLocationsAutoscalingPoliciesListCall) Fields(s ...googleapi.Fie // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *ProjectsLocationsAutoscalingPoliciesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsAutoscalingPoliciesListCall { +func (c *ProjectsLocationsSessionsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsAutoscalingPoliciesListCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesListCall { +func (c *ProjectsLocationsSessionsGetCall) Context(ctx context.Context) *ProjectsLocationsSessionsGetCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsAutoscalingPoliciesListCall) Header() http.Header { +func (c *ProjectsLocationsSessionsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsAutoscalingPoliciesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -6509,7 +14982,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesListCall) doRequest(alt string) (*h var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/autoscalingPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -6517,18 +14990,17 @@ func (c *ProjectsLocationsAutoscalingPoliciesListCall) doRequest(alt string) (*h } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.autoscalingPolicies.list" call. +// Do executes the "dataproc.projects.locations.sessions.get" call. // Any non-2xx status code is an error. Response headers are in either -// *ListAutoscalingPoliciesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsAutoscalingPoliciesListCall) Do(opts ...googleapi.CallOption) (*ListAutoscalingPoliciesResponse, error) { +// *Session.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsGetCall) Do(opts ...googleapi.CallOption) (*Session, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6547,7 +15019,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesListCall) Do(opts ...googleapi.Call if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListAutoscalingPoliciesResponse{ + ret := &Session{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6560,101 +15032,116 @@ func (c *ProjectsLocationsAutoscalingPoliciesListCall) Do(opts ...googleapi.Call return ret, nil } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsAutoscalingPoliciesListCall) Pages(ctx context.Context, f func(*ListAutoscalingPoliciesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } +type ProjectsLocationsSessionsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -type ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// List: Lists interactive sessions. +// +// - parent: The parent, which owns this collection of sessions. +func (r *ProjectsLocationsSessionsService) List(parent string) *ProjectsLocationsSessionsListCall { + c := &ProjectsLocationsSessionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c } -// SetIamPolicy: Sets the access control policy on the specified resource. -// Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and -// PERMISSION_DENIED errors. -// -// - resource: REQUIRED: The resource for which the policy is being specified. -// See Resource names (https://cloud.google.com/apis/design/resource_names) -// for the appropriate value for this field. -func (r *ProjectsLocationsAutoscalingPoliciesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall { - c := &ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest +// Filter sets the optional parameter "filter": A filter for the sessions to +// return in the response.A filter is a logical expression constraining the +// values of various fields in each session resource. Filters are case +// sensitive, and may contain multiple clauses combined with logical operators +// (AND, OR). Supported fields are session_id, session_uuid, state, +// create_time, and labels.Example: state = ACTIVE and create_time < +// "2023-01-01T00:00:00Z" is a filter for sessions in an ACTIVE state that were +// created before 2023-01-01. state = ACTIVE and labels.environment=production +// is a filter for sessions in an ACTIVE state that have a production +// environment label.See https://google.aip.dev/assets/misc/ebnf-filtering.txt +// for a detailed description of the filter syntax and a list of supported +// comparators. +func (c *ProjectsLocationsSessionsListCall) Filter(filter string) *ProjectsLocationsSessionsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number of +// sessions to return in each response. The service may return fewer than this +// value. +func (c *ProjectsLocationsSessionsListCall) PageSize(pageSize int64) *ProjectsLocationsSessionsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous ListSessions call. Provide this token to retrieve the +// subsequent page. +func (c *ProjectsLocationsSessionsListCall) PageToken(pageToken string) *ProjectsLocationsSessionsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall { +func (c *ProjectsLocationsSessionsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall { +func (c *ProjectsLocationsSessionsListCall) Context(ctx context.Context) *ProjectsLocationsSessionsListCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Header() http.Header { +func (c *ProjectsLocationsSessionsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err +func (c *ProjectsLocationsSessionsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/sessions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.autoscalingPolicies.setIamPolicy" call. +// Do executes the "dataproc.projects.locations.sessions.list" call. // Any non-2xx status code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// *ListSessionsResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsSessionsListCall) Do(opts ...googleapi.CallOption) (*ListSessionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6673,7 +15160,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Do(opts ...google if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &ListSessionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6683,68 +15170,82 @@ func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Do(opts ...google if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } - return ret, nil + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsSessionsListCall) Pages(ctx context.Context, f func(*ListSessionsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -type ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsTerminateCall struct { + s *Service + name string + terminatesessionrequest *TerminateSessionRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the specified -// resource. If the resource does not exist, this will return an empty set of -// permissions, not a NOT_FOUND error.Note: This operation is designed to be -// used for building permission-aware UIs and command-line tools, not for -// authorization checking. This operation may "fail open" without warning. +// Terminate: Terminates the interactive session. // -// - resource: REQUIRED: The resource for which the policy detail is being -// requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the appropriate -// value for this field. -func (r *ProjectsLocationsAutoscalingPoliciesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall { - c := &ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest +// - name: The name of the session resource to terminate. +func (r *ProjectsLocationsSessionsService) Terminate(name string, terminatesessionrequest *TerminateSessionRequest) *ProjectsLocationsSessionsTerminateCall { + c := &ProjectsLocationsSessionsTerminateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.terminatesessionrequest = terminatesessionrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall { +func (c *ProjectsLocationsSessionsTerminateCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsTerminateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall { +func (c *ProjectsLocationsSessionsTerminateCall) Context(ctx context.Context) *ProjectsLocationsSessionsTerminateCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Header() http.Header { +func (c *ProjectsLocationsSessionsTerminateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsTerminateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.terminatesessionrequest) if err != nil { return nil, err } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:terminate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -6752,18 +15253,17 @@ func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) doRequest(a } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.autoscalingPolicies.testIamPermissions" call. +// Do executes the "dataproc.projects.locations.sessions.terminate" call. // Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsTerminateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6782,7 +15282,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Do(opts ... if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &TestIamPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6795,68 +15295,76 @@ func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Do(opts ... return ret, nil } -type ProjectsLocationsAutoscalingPoliciesUpdateCall struct { - s *Service - name string - autoscalingpolicy *AutoscalingPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsSparkApplicationsAccessCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates (replaces) autoscaling policy.Disabled check for -// update_mask, because all updates will be full replacements. +// Access: Obtain high level information corresponding to a single Spark +// Application. // -// - name: Output only. The "resource name" of the autoscaling policy, as -// described in https://cloud.google.com/apis/design/resource_names. For -// projects.regions.autoscalingPolicies, the resource name of the policy has -// the following format: -// projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For -// projects.locations.autoscalingPolicies, the resource name of the policy -// has the following format: -// projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}. -func (r *ProjectsLocationsAutoscalingPoliciesService) Update(name string, autoscalingpolicy *AutoscalingPolicy) *ProjectsLocationsAutoscalingPoliciesUpdateCall { - c := &ProjectsLocationsAutoscalingPoliciesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) Access(name string) *ProjectsLocationsSessionsSparkApplicationsAccessCall { + c := &ProjectsLocationsSessionsSparkApplicationsAccessCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.autoscalingpolicy = autoscalingpolicy + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsAccessCall { + c.urlParams_.Set("parent", parent) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Fields(s ...googleapi.Field) *ProjectsLocationsAutoscalingPoliciesUpdateCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsAccessCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsAccessCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Context(ctx context.Context) *ProjectsLocationsAutoscalingPoliciesUpdateCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsAccessCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscalingpolicy) - if err != nil { - return nil, err +func (c *ProjectsLocationsSessionsSparkApplicationsAccessCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:access") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -6867,13 +15375,13 @@ func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) doRequest(alt string) ( return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.autoscalingPolicies.update" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.access" call. // Any non-2xx status code is an error. Response headers are in either -// *AutoscalingPolicy.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*AutoscalingPolicy, error) { +// *AccessSessionSparkApplicationResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessCall) Do(opts ...googleapi.CallOption) (*AccessSessionSparkApplicationResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6892,7 +15400,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Do(opts ...googleapi.Ca if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &AutoscalingPolicy{ + ret := &AccessSessionSparkApplicationResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6905,61 +15413,75 @@ func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Do(opts ...googleapi.Ca return ret, nil } -type ProjectsLocationsBatchesAnalyzeCall struct { - s *Service - name string - analyzebatchrequest *AnalyzeBatchRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Analyze: Analyze a Batch for possible recommendations and insights. +// AccessEnvironmentInfo: Obtain environment details for a Spark Application // -// - name: The fully qualified name of the batch to analyze in the format -// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID". -func (r *ProjectsLocationsBatchesService) Analyze(name string, analyzebatchrequest *AnalyzeBatchRequest) *ProjectsLocationsBatchesAnalyzeCall { - c := &ProjectsLocationsBatchesAnalyzeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) AccessEnvironmentInfo(name string) *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall { + c := &ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.analyzebatchrequest = analyzebatchrequest + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall { + c.urlParams_.Set("parent", parent) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsBatchesAnalyzeCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesAnalyzeCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsBatchesAnalyzeCall) Context(ctx context.Context) *ProjectsLocationsBatchesAnalyzeCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsBatchesAnalyzeCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsBatchesAnalyzeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.analyzebatchrequest) - if err != nil { - return nil, err +func (c *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:analyze") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessEnvironmentInfo") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -6970,12 +15492,13 @@ func (c *ProjectsLocationsBatchesAnalyzeCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.batches.analyze" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.accessEnvironmentInfo" call. // Any non-2xx status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsBatchesAnalyzeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *AccessSessionSparkApplicationEnvironmentInfoResponse.ServerResponse.Header +// or (if a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoCall) Do(opts ...googleapi.CallOption) (*AccessSessionSparkApplicationEnvironmentInfoResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6994,7 +15517,7 @@ func (c *ProjectsLocationsBatchesAnalyzeCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &AccessSessionSparkApplicationEnvironmentInfoResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7007,98 +15530,99 @@ func (c *ProjectsLocationsBatchesAnalyzeCall) Do(opts ...googleapi.CallOption) ( return ret, nil } -type ProjectsLocationsBatchesCreateCall struct { - s *Service - parent string - batch *Batch - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsSparkApplicationsAccessJobCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Create: Creates a batch workload that executes asynchronously. +// AccessJob: Obtain data corresponding to a spark job for a Spark Application. // -// - parent: The parent resource where this batch will be created. -func (r *ProjectsLocationsBatchesService) Create(parent string, batch *Batch) *ProjectsLocationsBatchesCreateCall { - c := &ProjectsLocationsBatchesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.batch = batch +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) AccessJob(name string) *ProjectsLocationsSessionsSparkApplicationsAccessJobCall { + c := &ProjectsLocationsSessionsSparkApplicationsAccessJobCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } -// BatchId sets the optional parameter "batchId": The ID to use for the batch, -// which will become the final component of the batch's resource name.This -// value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. -func (c *ProjectsLocationsBatchesCreateCall) BatchId(batchId string) *ProjectsLocationsBatchesCreateCall { - c.urlParams_.Set("batchId", batchId) +// JobId sets the optional parameter "jobId": Required. Job ID to fetch data +// for. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessJobCall) JobId(jobId int64) *ProjectsLocationsSessionsSparkApplicationsAccessJobCall { + c.urlParams_.Set("jobId", fmt.Sprint(jobId)) return c } -// RequestId sets the optional parameter "requestId": A unique ID used to -// identify the request. If the service receives two CreateBatchRequest -// (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s -// with the same request_id, the second request is ignored and the Operation -// that corresponds to the first Batch created and stored in the backend is -// returned.Recommendation: Set this value to a UUID -// (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must -// contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens -// (-). The maximum length is 40 characters. -func (c *ProjectsLocationsBatchesCreateCall) RequestId(requestId string) *ProjectsLocationsBatchesCreateCall { - c.urlParams_.Set("requestId", requestId) +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessJobCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsAccessJobCall { + c.urlParams_.Set("parent", parent) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsBatchesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesCreateCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessJobCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsAccessJobCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessJobCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsAccessJobCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsBatchesCreateCall) Context(ctx context.Context) *ProjectsLocationsBatchesCreateCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessJobCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsAccessJobCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsBatchesCreateCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessJobCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsBatchesCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.batch) - if err != nil { - return nil, err +func (c *ProjectsLocationsSessionsSparkApplicationsAccessJobCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/batches") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessJob") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.batches.create" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.accessJob" call. // Any non-2xx status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsBatchesCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *AccessSessionSparkApplicationJobResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessJobCall) Do(opts ...googleapi.CallOption) (*AccessSessionSparkApplicationJobResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7117,7 +15641,7 @@ func (c *ProjectsLocationsBatchesCreateCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &AccessSessionSparkApplicationJobResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7130,57 +15654,84 @@ func (c *ProjectsLocationsBatchesCreateCall) Do(opts ...googleapi.CallOption) (* return ret, nil } -type ProjectsLocationsBatchesDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the batch workload resource. If the batch is not in a -// CANCELLED, SUCCEEDED or FAILED State, the delete operation fails and the -// response returns FAILED_PRECONDITION. +// AccessSqlPlan: Obtain Spark Plan Graph for a Spark Application SQL +// execution. Limits the number of clusters returned as part of the graph to +// 10000. // -// - name: The fully qualified name of the batch to retrieve in the format -// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID". -func (r *ProjectsLocationsBatchesService) Delete(name string) *ProjectsLocationsBatchesDeleteCall { - c := &ProjectsLocationsBatchesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) AccessSqlPlan(name string) *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall { + c := &ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } +// ExecutionId sets the optional parameter "executionId": Required. Execution +// ID +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall) ExecutionId(executionId int64) *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall { + c.urlParams_.Set("executionId", fmt.Sprint(executionId)) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall { + c.urlParams_.Set("parent", parent) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsBatchesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesDeleteCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsBatchesDeleteCall) Context(ctx context.Context) *ProjectsLocationsBatchesDeleteCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsBatchesDeleteCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsBatchesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessSqlPlan") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -7191,12 +15742,15 @@ func (c *ProjectsLocationsBatchesDeleteCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.batches.delete" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.accessSqlPlan" call. // Any non-2xx status code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsBatchesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +// *AccessSessionSparkApplicationSqlSparkPlanGraphResponse.ServerResponse.Header +// +// or (if a response was returned at all) in error.(*googleapi.Error).Header. +// +// Use googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlPlanCall) Do(opts ...googleapi.CallOption) (*AccessSessionSparkApplicationSqlSparkPlanGraphResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7215,7 +15769,7 @@ func (c *ProjectsLocationsBatchesDeleteCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Empty{ + ret := &AccessSessionSparkApplicationSqlSparkPlanGraphResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7228,7 +15782,7 @@ func (c *ProjectsLocationsBatchesDeleteCall) Do(opts ...googleapi.CallOption) (* return ret, nil } -type ProjectsLocationsBatchesGetCall struct { +type ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -7237,20 +15791,50 @@ type ProjectsLocationsBatchesGetCall struct { header_ http.Header } -// Get: Gets the batch workload resource representation. +// AccessSqlQuery: Obtain data corresponding to a particular SQL Query for a +// Spark Application. // -// - name: The fully qualified name of the batch to retrieve in the format -// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID". -func (r *ProjectsLocationsBatchesService) Get(name string) *ProjectsLocationsBatchesGetCall { - c := &ProjectsLocationsBatchesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) AccessSqlQuery(name string) *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall { + c := &ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } +// Details sets the optional parameter "details": Lists/ hides details of Spark +// plan nodes. True is set to list and false to hide. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall) Details(details bool) *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall { + c.urlParams_.Set("details", fmt.Sprint(details)) + return c +} + +// ExecutionId sets the optional parameter "executionId": Required. Execution +// ID +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall) ExecutionId(executionId int64) *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall { + c.urlParams_.Set("executionId", fmt.Sprint(executionId)) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall { + c.urlParams_.Set("parent", parent) + return c +} + +// PlanDescription sets the optional parameter "planDescription": Enables/ +// disables physical plan description on demand +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall) PlanDescription(planDescription bool) *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall { + c.urlParams_.Set("planDescription", fmt.Sprint(planDescription)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsBatchesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7258,27 +15842,27 @@ func (c *ProjectsLocationsBatchesGetCall) Fields(s ...googleapi.Field) *Projects // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *ProjectsLocationsBatchesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsBatchesGetCall) Context(ctx context.Context) *ProjectsLocationsBatchesGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsBatchesGetCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsBatchesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -7286,7 +15870,7 @@ func (c *ProjectsLocationsBatchesGetCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessSqlQuery") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7299,12 +15883,13 @@ func (c *ProjectsLocationsBatchesGetCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.batches.get" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.accessSqlQuery" call. // Any non-2xx status code is an error. Response headers are in either -// *Batch.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsBatchesGetCall) Do(opts ...googleapi.CallOption) (*Batch, error) { +// *AccessSessionSparkApplicationSqlQueryResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessSqlQueryCall) Do(opts ...googleapi.CallOption) (*AccessSessionSparkApplicationSqlQueryResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7323,7 +15908,7 @@ func (c *ProjectsLocationsBatchesGetCall) Do(opts ...googleapi.CallOption) (*Bat if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Batch{ + ret := &AccessSessionSparkApplicationSqlQueryResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7336,69 +15921,61 @@ func (c *ProjectsLocationsBatchesGetCall) Do(opts ...googleapi.CallOption) (*Bat return ret, nil } -type ProjectsLocationsBatchesListCall struct { +type ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall struct { s *Service - parent string + name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists batch workloads. +// AccessStageAttempt: Obtain data corresponding to a spark stage attempt for a +// Spark Application. // -// - parent: The parent, which owns this collection of batches. -func (r *ProjectsLocationsBatchesService) List(parent string) *ProjectsLocationsBatchesListCall { - c := &ProjectsLocationsBatchesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) AccessStageAttempt(name string) *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall { + c := &ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } -// Filter sets the optional parameter "filter": A filter for the batches to -// return in the response.A filter is a logical expression constraining the -// values of various fields in each batch resource. Filters are case sensitive, -// and may contain multiple clauses combined with logical operators (AND/OR). -// Supported fields are batch_id, batch_uuid, state, create_time, and -// labels.e.g. state = RUNNING and create_time < "2023-01-01T00:00:00Z" filters -// for batches in state RUNNING that were created before 2023-01-01. state = -// RUNNING and labels.environment=production filters for batches in state in a -// RUNNING state that have a production environment label.See -// https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed -// description of the filter syntax and a list of supported comparisons. -func (c *ProjectsLocationsBatchesListCall) Filter(filter string) *ProjectsLocationsBatchesListCall { - c.urlParams_.Set("filter", filter) +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall { + c.urlParams_.Set("parent", parent) return c } -// OrderBy sets the optional parameter "orderBy": Field(s) on which to sort the -// list of batches.Currently the only supported sort orders are unspecified -// (empty) and create_time desc to sort by most recently created batches -// first.See https://google.aip.dev/132#ordering for more details. -func (c *ProjectsLocationsBatchesListCall) OrderBy(orderBy string) *ProjectsLocationsBatchesListCall { - c.urlParams_.Set("orderBy", orderBy) +// StageAttemptId sets the optional parameter "stageAttemptId": Required. Stage +// Attempt ID +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall) StageAttemptId(stageAttemptId int64) *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall { + c.urlParams_.Set("stageAttemptId", fmt.Sprint(stageAttemptId)) return c } -// PageSize sets the optional parameter "pageSize": The maximum number of -// batches to return in each response. The service may return fewer than this -// value. The default page size is 20; the maximum page size is 1000. -func (c *ProjectsLocationsBatchesListCall) PageSize(pageSize int64) *ProjectsLocationsBatchesListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) +// StageId sets the optional parameter "stageId": Required. Stage ID +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall) StageId(stageId int64) *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) return c } -// PageToken sets the optional parameter "pageToken": A page token received -// from a previous ListBatches call. Provide this token to retrieve the -// subsequent page. -func (c *ProjectsLocationsBatchesListCall) PageToken(pageToken string) *ProjectsLocationsBatchesListCall { - c.urlParams_.Set("pageToken", pageToken) +// SummaryMetricsMask sets the optional parameter "summaryMetricsMask": The +// list of summary metrics fields to include. Empty list will default to skip +// all summary metrics fields. Example, if the response should include +// TaskQuantileMetrics, the request should have task_quantile_metrics in +// summary_metrics_mask field +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall) SummaryMetricsMask(summaryMetricsMask string) *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall { + c.urlParams_.Set("summaryMetricsMask", summaryMetricsMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsBatchesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsBatchesListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7406,27 +15983,27 @@ func (c *ProjectsLocationsBatchesListCall) Fields(s ...googleapi.Field) *Project // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *ProjectsLocationsBatchesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsBatchesListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsBatchesListCall) Context(ctx context.Context) *ProjectsLocationsBatchesListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsBatchesListCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsBatchesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -7434,7 +16011,7 @@ func (c *ProjectsLocationsBatchesListCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/batches") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessStageAttempt") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7442,18 +16019,18 @@ func (c *ProjectsLocationsBatchesListCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.batches.list" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.accessStageAttempt" call. // Any non-2xx status code is an error. Response headers are in either -// *ListBatchesResponse.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsLocationsBatchesListCall) Do(opts ...googleapi.CallOption) (*ListBatchesResponse, error) { +// *AccessSessionSparkApplicationStageAttemptResponse.ServerResponse.Header or +// (if a response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageAttemptCall) Do(opts ...googleapi.CallOption) (*AccessSessionSparkApplicationStageAttemptResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7472,7 +16049,7 @@ func (c *ProjectsLocationsBatchesListCall) Do(opts ...googleapi.CallOption) (*Li if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListBatchesResponse{ + ret := &AccessSessionSparkApplicationStageAttemptResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7485,83 +16062,82 @@ func (c *ProjectsLocationsBatchesListCall) Do(opts ...googleapi.CallOption) (*Li return ret, nil } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsBatchesListCall) Pages(ctx context.Context, f func(*ListBatchesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -type ProjectsLocationsOperationsCancelCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Cancel: Starts asynchronous cancellation on a long-running operation. The -// server makes a best effort to cancel the operation, but success is not -// guaranteed. If the server doesn't support this method, it returns -// google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether the -// operation completed despite cancellation. On successful cancellation, the -// operation is not deleted; instead, it becomes an operation with an -// Operation.error value with a google.rpc.Status.code of 1, corresponding to -// Code.CANCELLED. +// AccessStageRddGraph: Obtain RDD operation graph for a Spark Application +// Stage. Limits the number of clusters returned as part of the graph to 10000. // -// - name: The name of the operation resource to be cancelled. -func (r *ProjectsLocationsOperationsService) Cancel(name string) *ProjectsLocationsOperationsCancelCall { - c := &ProjectsLocationsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) AccessStageRddGraph(name string) *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall { + c := &ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall { + c.urlParams_.Set("parent", parent) + return c +} + +// StageId sets the optional parameter "stageId": Required. Stage ID +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall) StageId(stageId int64) *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsOperationsCancelCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsCancelCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsOperationsCancelCall) Context(ctx context.Context) *ProjectsLocationsOperationsCancelCall { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsOperationsCancelCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:accessStageRddGraph") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -7572,12 +16148,13 @@ func (c *ProjectsLocationsOperationsCancelCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.operations.cancel" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.accessStageRddGraph" call. // Any non-2xx status code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) in +// *AccessSessionSparkApplicationStageRddOperationGraphResponse.ServerResponse.H +// eader or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphCall) Do(opts ...googleapi.CallOption) (*AccessSessionSparkApplicationStageRddOperationGraphResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7596,7 +16173,7 @@ func (c *ProjectsLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Empty{ + ret := &AccessSessionSparkApplicationStageRddOperationGraphResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7609,73 +16186,142 @@ func (c *ProjectsLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) return ret, nil } -type ProjectsLocationsOperationsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsSparkApplicationsSearchCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes a long-running operation. This method indicates that the -// client is no longer interested in the operation result. It does not cancel -// the operation. If the server doesn't support this method, it returns -// google.rpc.Code.UNIMPLEMENTED. +// Search: Obtain high level information and list of Spark Applications +// corresponding to a batch // -// - name: The name of the operation resource to be deleted. -func (r *ProjectsLocationsOperationsService) Delete(name string) *ProjectsLocationsOperationsDeleteCall { - c := &ProjectsLocationsOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// - parent: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) Search(parent string) *ProjectsLocationsSessionsSparkApplicationsSearchCall { + c := &ProjectsLocationsSessionsSparkApplicationsSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// ApplicationStatus sets the optional parameter "applicationStatus": Search +// only applications in the chosen state. +// +// Possible values: +// +// "APPLICATION_STATUS_UNSPECIFIED" +// "APPLICATION_STATUS_RUNNING" +// "APPLICATION_STATUS_COMPLETED" +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) ApplicationStatus(applicationStatus string) *ProjectsLocationsSessionsSparkApplicationsSearchCall { + c.urlParams_.Set("applicationStatus", applicationStatus) + return c +} + +// MaxEndTime sets the optional parameter "maxEndTime": Latest end timestamp to +// list. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) MaxEndTime(maxEndTime string) *ProjectsLocationsSessionsSparkApplicationsSearchCall { + c.urlParams_.Set("maxEndTime", maxEndTime) + return c +} + +// MaxTime sets the optional parameter "maxTime": Latest start timestamp to +// list. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) MaxTime(maxTime string) *ProjectsLocationsSessionsSparkApplicationsSearchCall { + c.urlParams_.Set("maxTime", maxTime) + return c +} + +// MinEndTime sets the optional parameter "minEndTime": Earliest end timestamp +// to list. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) MinEndTime(minEndTime string) *ProjectsLocationsSessionsSparkApplicationsSearchCall { + c.urlParams_.Set("minEndTime", minEndTime) + return c +} + +// MinTime sets the optional parameter "minTime": Earliest start timestamp to +// list. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) MinTime(minTime string) *ProjectsLocationsSessionsSparkApplicationsSearchCall { + c.urlParams_.Set("minTime", minTime) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// applications to return in each response. The service may return fewer than +// this. The default page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) PageSize(pageSize int64) *ProjectsLocationsSessionsSparkApplicationsSearchCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous SearchSessionSparkApplications call. Provide this token to +// retrieve the subsequent page. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) PageToken(pageToken string) *ProjectsLocationsSessionsSparkApplicationsSearchCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsOperationsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsDeleteCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSearchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSearchCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsOperationsDeleteCall) Context(ctx context.Context) *ProjectsLocationsOperationsDeleteCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSearchCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsOperationsDeleteCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/sparkApplications:search") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.operations.delete" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.search" call. // Any non-2xx status code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsOperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +// *SearchSessionSparkApplicationsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) Do(opts ...googleapi.CallOption) (*SearchSessionSparkApplicationsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7694,7 +16340,7 @@ func (c *ProjectsLocationsOperationsDeleteCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Empty{ + ret := &SearchSessionSparkApplicationsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7707,7 +16353,28 @@ func (c *ProjectsLocationsOperationsDeleteCall) Do(opts ...googleapi.CallOption) return ret, nil } -type ProjectsLocationsOperationsGetCall struct { +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchCall) Pages(ctx context.Context, f func(*SearchSessionSparkApplicationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -7716,21 +16383,58 @@ type ProjectsLocationsOperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can use this -// method to poll the operation result at intervals as recommended by the API -// service. +// SearchExecutorStageSummary: Obtain executor summary with respect to a spark +// stage attempt. // -// - name: The name of the operation resource. -func (r *ProjectsLocationsOperationsService) Get(name string) *ProjectsLocationsOperationsGetCall { - c := &ProjectsLocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) SearchExecutorStageSummary(name string) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall { + c := &ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } +// PageSize sets the optional parameter "pageSize": Maximum number of executors +// to return in each response. The service may return fewer than this. The +// default page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) PageSize(pageSize int64) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous SearchSessionSparkApplicationExecutorStageSummary call. +// Provide this token to retrieve the subsequent page. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) PageToken(pageToken string) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall { + c.urlParams_.Set("parent", parent) + return c +} + +// StageAttemptId sets the optional parameter "stageAttemptId": Required. Stage +// Attempt ID +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) StageAttemptId(stageAttemptId int64) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall { + c.urlParams_.Set("stageAttemptId", fmt.Sprint(stageAttemptId)) + return c +} + +// StageId sets the optional parameter "stageId": Required. Stage ID +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) StageId(stageId int64) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7738,27 +16442,27 @@ func (c *ProjectsLocationsOperationsGetCall) Fields(s ...googleapi.Field) *Proje // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *ProjectsLocationsOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsOperationsGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsOperationsGetCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -7766,7 +16470,7 @@ func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchExecutorStageSummary") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7779,12 +16483,13 @@ func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.operations.get" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.searchExecutorStageSummary" call. // Any non-2xx status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at all) in +// *SearchSessionSparkApplicationExecutorStageSummaryResponse.ServerResponse.Hea +// der or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) Do(opts ...googleapi.CallOption) (*SearchSessionSparkApplicationExecutorStageSummaryResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7803,7 +16508,7 @@ func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &SearchSessionSparkApplicationExecutorStageSummaryResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7816,7 +16521,28 @@ func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (* return ret, nil } -type ProjectsLocationsOperationsListCall struct { +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryCall) Pages(ctx context.Context, f func(*SearchSessionSparkApplicationExecutorStageSummaryResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -7825,40 +16551,58 @@ type ProjectsLocationsOperationsListCall struct { header_ http.Header } -// List: Lists operations that match the specified filter in the request. If -// the server doesn't support this method, it returns UNIMPLEMENTED. +// SearchExecutors: Obtain data corresponding to executors for a Spark +// Application. // -// - name: The name of the operation's parent resource. -func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall { - c := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) SearchExecutors(name string) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall { + c := &ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } -// Filter sets the optional parameter "filter": The standard list filter. -func (c *ProjectsLocationsOperationsListCall) Filter(filter string) *ProjectsLocationsOperationsListCall { - c.urlParams_.Set("filter", filter) +// ExecutorStatus sets the optional parameter "executorStatus": Filter to +// select whether active/ dead or all executors should be selected. +// +// Possible values: +// +// "EXECUTOR_STATUS_UNSPECIFIED" +// "EXECUTOR_STATUS_ACTIVE" +// "EXECUTOR_STATUS_DEAD" +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall) ExecutorStatus(executorStatus string) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall { + c.urlParams_.Set("executorStatus", executorStatus) return c } -// PageSize sets the optional parameter "pageSize": The standard list page -// size. -func (c *ProjectsLocationsOperationsListCall) PageSize(pageSize int64) *ProjectsLocationsOperationsListCall { +// PageSize sets the optional parameter "pageSize": Maximum number of executors +// to return in each response. The service may return fewer than this. The +// default page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall) PageSize(pageSize int64) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// PageToken sets the optional parameter "pageToken": The standard list page -// token. -func (c *ProjectsLocationsOperationsListCall) PageToken(pageToken string) *ProjectsLocationsOperationsListCall { +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous SearchSessionSparkApplicationExecutors call. Provide this +// token to retrieve the subsequent page. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall) PageToken(pageToken string) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall { c.urlParams_.Set("pageToken", pageToken) return c } +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall { + c.urlParams_.Set("parent", parent) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7866,27 +16610,27 @@ func (c *ProjectsLocationsOperationsListCall) Fields(s ...googleapi.Field) *Proj // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *ProjectsLocationsOperationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsOperationsListCall) Context(ctx context.Context) *ProjectsLocationsOperationsListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsOperationsListCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -7894,7 +16638,7 @@ func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Respo var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchExecutors") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7907,13 +16651,13 @@ func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.operations.list" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.searchExecutors" call. // Any non-2xx status code is an error. Response headers are in either -// *ListOperationsResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { +// *SearchSessionSparkApplicationExecutorsResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall) Do(opts ...googleapi.CallOption) (*SearchSessionSparkApplicationExecutorsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7932,7 +16676,7 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListOperationsResponse{ + ret := &SearchSessionSparkApplicationExecutorsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7948,7 +16692,7 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchExecutorsCall) Pages(ctx context.Context, f func(*SearchSessionSparkApplicationExecutorsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) for { @@ -7966,77 +16710,123 @@ func (c *ProjectsLocationsOperationsListCall) Pages(ctx context.Context, f func( } } -type ProjectsLocationsSessionTemplatesCreateCall struct { - s *Service - parent string - sessiontemplate *SessionTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsSparkApplicationsSearchJobsCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Create: Create a session template synchronously. +// SearchJobs: Obtain list of spark jobs corresponding to a Spark Application. // -// - parent: The parent resource where this session template will be created. -func (r *ProjectsLocationsSessionTemplatesService) Create(parent string, sessiontemplate *SessionTemplate) *ProjectsLocationsSessionTemplatesCreateCall { - c := &ProjectsLocationsSessionTemplatesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.sessiontemplate = sessiontemplate +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) SearchJobs(name string) *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall { + c := &ProjectsLocationsSessionsSparkApplicationsSearchJobsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// JobStatus sets the optional parameter "jobStatus": List only jobs in the +// specific state. +// +// Possible values: +// +// "JOB_EXECUTION_STATUS_UNSPECIFIED" +// "JOB_EXECUTION_STATUS_RUNNING" +// "JOB_EXECUTION_STATUS_SUCCEEDED" +// "JOB_EXECUTION_STATUS_FAILED" +// "JOB_EXECUTION_STATUS_UNKNOWN" +func (c *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall) JobStatus(jobStatus string) *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall { + c.urlParams_.Set("jobStatus", jobStatus) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of jobs to +// return in each response. The service may return fewer than this. The default +// page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall) PageSize(pageSize int64) *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous SearchSessionSparkApplicationJobs call. Provide this token +// to retrieve the subsequent page. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall) PageToken(pageToken string) *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall { + c.urlParams_.Set("parent", parent) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsSessionTemplatesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionTemplatesCreateCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsSessionTemplatesCreateCall) Context(ctx context.Context) *ProjectsLocationsSessionTemplatesCreateCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsSessionTemplatesCreateCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsSessionTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sessiontemplate) - if err != nil { - return nil, err +func (c *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/sessionTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchJobs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.sessionTemplates.create" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.searchJobs" call. // Any non-2xx status code is an error. Response headers are in either -// *SessionTemplate.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsLocationsSessionTemplatesCreateCall) Do(opts ...googleapi.CallOption) (*SessionTemplate, error) { +// *SearchSessionSparkApplicationJobsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall) Do(opts ...googleapi.CallOption) (*SearchSessionSparkApplicationJobsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8055,7 +16845,7 @@ func (c *ProjectsLocationsSessionTemplatesCreateCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &SessionTemplate{ + ret := &SearchSessionSparkApplicationJobsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8068,54 +16858,127 @@ func (c *ProjectsLocationsSessionTemplatesCreateCall) Do(opts ...googleapi.CallO return ret, nil } -type ProjectsLocationsSessionTemplatesDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchJobsCall) Pages(ctx context.Context, f func(*SearchSessionSparkApplicationJobsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SearchSqlQueries: Obtain data corresponding to SQL Queries for a Spark +// Application. +// +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) SearchSqlQueries(name string) *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall { + c := &ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Details sets the optional parameter "details": Lists/ hides details of Spark +// plan nodes. True is set to list and false to hide. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) Details(details bool) *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall { + c.urlParams_.Set("details", fmt.Sprint(details)) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of queries +// to return in each response. The service may return fewer than this. The +// default page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) PageSize(pageSize int64) *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous SearchSessionSparkApplicationSqlQueries call. Provide this +// token to retrieve the subsequent page. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) PageToken(pageToken string) *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall { + c.urlParams_.Set("pageToken", pageToken) + return c } -// Delete: Deletes a session template. -// -// - name: The name of the session template resource to delete. -func (r *ProjectsLocationsSessionTemplatesService) Delete(name string) *ProjectsLocationsSessionTemplatesDeleteCall { - c := &ProjectsLocationsSessionTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall { + c.urlParams_.Set("parent", parent) + return c +} + +// PlanDescription sets the optional parameter "planDescription": Enables/ +// disables physical plan description on demand +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) PlanDescription(planDescription bool) *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall { + c.urlParams_.Set("planDescription", fmt.Sprint(planDescription)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsSessionTemplatesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionTemplatesDeleteCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsSessionTemplatesDeleteCall) Context(ctx context.Context) *ProjectsLocationsSessionTemplatesDeleteCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsSessionTemplatesDeleteCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsSessionTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchSqlQueries") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -8126,12 +16989,13 @@ func (c *ProjectsLocationsSessionTemplatesDeleteCall) doRequest(alt string) (*ht return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.sessionTemplates.delete" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.searchSqlQueries" call. // Any non-2xx status code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsSessionTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +// *SearchSessionSparkApplicationSqlQueriesResponse.ServerResponse.Header or +// (if a response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) Do(opts ...googleapi.CallOption) (*SearchSessionSparkApplicationSqlQueriesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8150,7 +17014,7 @@ func (c *ProjectsLocationsSessionTemplatesDeleteCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Empty{ + ret := &SearchSessionSparkApplicationSqlQueriesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8163,7 +17027,28 @@ func (c *ProjectsLocationsSessionTemplatesDeleteCall) Do(opts ...googleapi.CallO return ret, nil } -type ProjectsLocationsSessionTemplatesGetCall struct { +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesCall) Pages(ctx context.Context, f func(*SearchSessionSparkApplicationSqlQueriesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -8172,19 +17057,81 @@ type ProjectsLocationsSessionTemplatesGetCall struct { header_ http.Header } -// Get: Gets the resource representation for a session template. +// SearchStageAttemptTasks: Obtain data corresponding to tasks for a spark +// stage attempt for a Spark Application. // -// - name: The name of the session template to retrieve. -func (r *ProjectsLocationsSessionTemplatesService) Get(name string) *ProjectsLocationsSessionTemplatesGetCall { - c := &ProjectsLocationsSessionTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) SearchStageAttemptTasks(name string) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall { + c := &ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } +// PageSize sets the optional parameter "pageSize": Maximum number of tasks to +// return in each response. The service may return fewer than this. The default +// page size is 10; the maximum page size is 100. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) PageSize(pageSize int64) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous SearchSessionSparkApplicationStageAttemptTasks call. Provide +// this token to retrieve the subsequent page. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) PageToken(pageToken string) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("parent", parent) + return c +} + +// SortRuntime sets the optional parameter "sortRuntime": Sort the tasks by +// runtime. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) SortRuntime(sortRuntime bool) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("sortRuntime", fmt.Sprint(sortRuntime)) + return c +} + +// StageAttemptId sets the optional parameter "stageAttemptId": Stage Attempt +// ID +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) StageAttemptId(stageAttemptId int64) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("stageAttemptId", fmt.Sprint(stageAttemptId)) + return c +} + +// StageId sets the optional parameter "stageId": Stage ID +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) StageId(stageId int64) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) + return c +} + +// TaskStatus sets the optional parameter "taskStatus": List only tasks in the +// state. +// +// Possible values: +// +// "TASK_STATUS_UNSPECIFIED" +// "TASK_STATUS_RUNNING" +// "TASK_STATUS_SUCCESS" +// "TASK_STATUS_FAILED" +// "TASK_STATUS_KILLED" +// "TASK_STATUS_PENDING" +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) TaskStatus(taskStatus string) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall { + c.urlParams_.Set("taskStatus", taskStatus) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsSessionTemplatesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionTemplatesGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8192,27 +17139,27 @@ func (c *ProjectsLocationsSessionTemplatesGetCall) Fields(s ...googleapi.Field) // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *ProjectsLocationsSessionTemplatesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionTemplatesGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsSessionTemplatesGetCall) Context(ctx context.Context) *ProjectsLocationsSessionTemplatesGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsSessionTemplatesGetCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsSessionTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -8220,7 +17167,7 @@ func (c *ProjectsLocationsSessionTemplatesGetCall) doRequest(alt string) (*http. var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchStageAttemptTasks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -8233,13 +17180,15 @@ func (c *ProjectsLocationsSessionTemplatesGetCall) doRequest(alt string) (*http. return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.sessionTemplates.get" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.searchStageAttemptTasks" call. // Any non-2xx status code is an error. Response headers are in either -// *SessionTemplate.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsLocationsSessionTemplatesGetCall) Do(opts ...googleapi.CallOption) (*SessionTemplate, error) { +// *SearchSessionSparkApplicationStageAttemptTasksResponse.ServerResponse.Header +// +// or (if a response was returned at all) in error.(*googleapi.Error).Header. +// +// Use googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) Do(opts ...googleapi.CallOption) (*SearchSessionSparkApplicationStageAttemptTasksResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8258,7 +17207,7 @@ func (c *ProjectsLocationsSessionTemplatesGetCall) Do(opts ...googleapi.CallOpti if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &SessionTemplate{ + ret := &SearchSessionSparkApplicationStageAttemptTasksResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8271,52 +17220,93 @@ func (c *ProjectsLocationsSessionTemplatesGetCall) Do(opts ...googleapi.CallOpti return ret, nil } -type ProjectsLocationsSessionTemplatesListCall struct { +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksCall) Pages(ctx context.Context, f func(*SearchSessionSparkApplicationStageAttemptTasksResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall struct { s *Service - parent string + name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists session templates. +// SearchStageAttempts: Obtain data corresponding to a spark stage attempts for +// a Spark Application. // -// - parent: The parent that owns this collection of session templates. -func (r *ProjectsLocationsSessionTemplatesService) List(parent string) *ProjectsLocationsSessionTemplatesListCall { - c := &ProjectsLocationsSessionTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// Filter sets the optional parameter "filter": A filter for the session -// templates to return in the response. Filters are case sensitive and have the -// following syntax:field = value AND field = value ... -func (c *ProjectsLocationsSessionTemplatesListCall) Filter(filter string) *ProjectsLocationsSessionTemplatesListCall { - c.urlParams_.Set("filter", filter) +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) SearchStageAttempts(name string) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall { + c := &ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } -// PageSize sets the optional parameter "pageSize": The maximum number of -// sessions to return in each response. The service may return fewer than this -// value. -func (c *ProjectsLocationsSessionTemplatesListCall) PageSize(pageSize int64) *ProjectsLocationsSessionTemplatesListCall { +// PageSize sets the optional parameter "pageSize": Maximum number of stage +// attempts (paging based on stage_attempt_id) to return in each response. The +// service may return fewer than this. The default page size is 10; the maximum +// page size is 100. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) PageSize(pageSize int64) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A page token received -// from a previous ListSessions call. Provide this token to retrieve the -// subsequent page. -func (c *ProjectsLocationsSessionTemplatesListCall) PageToken(pageToken string) *ProjectsLocationsSessionTemplatesListCall { +// from a previous SearchSessionSparkApplicationStageAttempts call. Provide +// this token to retrieve the subsequent page. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) PageToken(pageToken string) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall { c.urlParams_.Set("pageToken", pageToken) return c } +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall { + c.urlParams_.Set("parent", parent) + return c +} + +// StageId sets the optional parameter "stageId": Required. Stage ID for which +// attempts are to be fetched +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) StageId(stageId int64) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) + return c +} + +// SummaryMetricsMask sets the optional parameter "summaryMetricsMask": The +// list of summary metrics fields to include. Empty list will default to skip +// all summary metrics fields. Example, if the response should include +// TaskQuantileMetrics, the request should have task_quantile_metrics in +// summary_metrics_mask field +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) SummaryMetricsMask(summaryMetricsMask string) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall { + c.urlParams_.Set("summaryMetricsMask", summaryMetricsMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsSessionTemplatesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionTemplatesListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8324,27 +17314,27 @@ func (c *ProjectsLocationsSessionTemplatesListCall) Fields(s ...googleapi.Field) // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *ProjectsLocationsSessionTemplatesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionTemplatesListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsSessionTemplatesListCall) Context(ctx context.Context) *ProjectsLocationsSessionTemplatesListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsSessionTemplatesListCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsSessionTemplatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -8352,7 +17342,7 @@ func (c *ProjectsLocationsSessionTemplatesListCall) doRequest(alt string) (*http var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/sessionTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchStageAttempts") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -8360,18 +17350,18 @@ func (c *ProjectsLocationsSessionTemplatesListCall) doRequest(alt string) (*http } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.sessionTemplates.list" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.searchStageAttempts" call. // Any non-2xx status code is an error. Response headers are in either -// *ListSessionTemplatesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// *SearchSessionSparkApplicationStageAttemptsResponse.ServerResponse.Header or +// (if a response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsLocationsSessionTemplatesListCall) Do(opts ...googleapi.CallOption) (*ListSessionTemplatesResponse, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) Do(opts ...googleapi.CallOption) (*SearchSessionSparkApplicationStageAttemptsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8390,7 +17380,7 @@ func (c *ProjectsLocationsSessionTemplatesListCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListSessionTemplatesResponse{ + ret := &SearchSessionSparkApplicationStageAttemptsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8406,7 +17396,7 @@ func (c *ProjectsLocationsSessionTemplatesListCall) Do(opts ...googleapi.CallOpt // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsSessionTemplatesListCall) Pages(ctx context.Context, f func(*ListSessionTemplatesResponse) error) error { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsCall) Pages(ctx context.Context, f func(*SearchSessionSparkApplicationStageAttemptsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) for { @@ -8424,60 +17414,118 @@ func (c *ProjectsLocationsSessionTemplatesListCall) Pages(ctx context.Context, f } } -type ProjectsLocationsSessionTemplatesPatchCall struct { - s *Service - name string - sessiontemplate *SessionTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsSparkApplicationsSearchStagesCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Updates the session template synchronously. +// SearchStages: Obtain data corresponding to stages for a Spark Application. // -// - name: The resource name of the session template. -func (r *ProjectsLocationsSessionTemplatesService) Patch(name string, sessiontemplate *SessionTemplate) *ProjectsLocationsSessionTemplatesPatchCall { - c := &ProjectsLocationsSessionTemplatesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) SearchStages(name string) *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall { + c := &ProjectsLocationsSessionsSparkApplicationsSearchStagesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.sessiontemplate = sessiontemplate + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of stages +// (paging based on stage_id) to return in each response. The service may +// return fewer than this. The default page size is 10; the maximum page size +// is 100. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) PageSize(pageSize int64) *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token received +// from a previous SearchSessionSparkApplicationStages call. Provide this token +// to retrieve the subsequent page. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) PageToken(pageToken string) *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall { + c.urlParams_.Set("parent", parent) + return c +} + +// StageStatus sets the optional parameter "stageStatus": List only stages in +// the given state. +// +// Possible values: +// +// "STAGE_STATUS_UNSPECIFIED" +// "STAGE_STATUS_ACTIVE" +// "STAGE_STATUS_COMPLETE" +// "STAGE_STATUS_FAILED" +// "STAGE_STATUS_PENDING" +// "STAGE_STATUS_SKIPPED" +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) StageStatus(stageStatus string) *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall { + c.urlParams_.Set("stageStatus", stageStatus) + return c +} + +// SummaryMetricsMask sets the optional parameter "summaryMetricsMask": The +// list of summary metrics fields to include. Empty list will default to skip +// all summary metrics fields. Example, if the response should include +// TaskQuantileMetrics, the request should have task_quantile_metrics in +// summary_metrics_mask field +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) SummaryMetricsMask(summaryMetricsMask string) *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall { + c.urlParams_.Set("summaryMetricsMask", summaryMetricsMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsSessionTemplatesPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionTemplatesPatchCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsSessionTemplatesPatchCall) Context(ctx context.Context) *ProjectsLocationsSessionTemplatesPatchCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsSessionTemplatesPatchCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsSessionTemplatesPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sessiontemplate) - if err != nil { - return nil, err +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:searchStages") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -8488,13 +17536,13 @@ func (c *ProjectsLocationsSessionTemplatesPatchCall) doRequest(alt string) (*htt return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.sessionTemplates.patch" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.searchStages" call. // Any non-2xx status code is an error. Response headers are in either -// *SessionTemplate.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsLocationsSessionTemplatesPatchCall) Do(opts ...googleapi.CallOption) (*SessionTemplate, error) { +// *SearchSessionSparkApplicationStagesResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) Do(opts ...googleapi.CallOption) (*SearchSessionSparkApplicationStagesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8513,7 +17561,7 @@ func (c *ProjectsLocationsSessionTemplatesPatchCall) Do(opts ...googleapi.CallOp if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &SessionTemplate{ + ret := &SearchSessionSparkApplicationStagesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8523,100 +17571,117 @@ func (c *ProjectsLocationsSessionTemplatesPatchCall) Do(opts ...googleapi.CallOp if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } - return ret, nil + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsSessionsSparkApplicationsSearchStagesCall) Pages(ctx context.Context, f func(*SearchSessionSparkApplicationStagesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -type ProjectsLocationsSessionsCreateCall struct { - s *Service - parent string - session *Session - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Create: Create an interactive session asynchronously. +// SummarizeExecutors: Obtain summary of Executor Summary for a Spark +// Application // -// - parent: The parent resource where this session will be created. -func (r *ProjectsLocationsSessionsService) Create(parent string, session *Session) *ProjectsLocationsSessionsCreateCall { - c := &ProjectsLocationsSessionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.session = session - return c -} - -// RequestId sets the optional parameter "requestId": A unique ID used to -// identify the request. If the service receives two CreateSessionRequests -// (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateSessionRequest)s -// with the same ID, the second request is ignored, and the first Session is -// created and stored in the backend.Recommendation: Set this value to a UUID -// (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must -// contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens -// (-). The maximum length is 40 characters. -func (c *ProjectsLocationsSessionsCreateCall) RequestId(requestId string) *ProjectsLocationsSessionsCreateCall { - c.urlParams_.Set("requestId", requestId) +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) SummarizeExecutors(name string) *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall { + c := &ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } -// SessionId sets the optional parameter "sessionId": Required. The ID to use -// for the session, which becomes the final component of the session's resource -// name.This value must be 4-63 characters. Valid characters are /a-z-/. -func (c *ProjectsLocationsSessionsCreateCall) SessionId(sessionId string) *ProjectsLocationsSessionsCreateCall { - c.urlParams_.Set("sessionId", sessionId) +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall { + c.urlParams_.Set("parent", parent) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsSessionsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsCreateCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsSessionsCreateCall) Context(ctx context.Context) *ProjectsLocationsSessionsCreateCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsSessionsCreateCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsSessionsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.session) - if err != nil { - return nil, err +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/sessions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:summarizeExecutors") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.sessions.create" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.summarizeExecutors" call. // Any non-2xx status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsSessionsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *SummarizeSessionSparkApplicationExecutorsResponse.ServerResponse.Header or +// (if a response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsCall) Do(opts ...googleapi.CallOption) (*SummarizeSessionSparkApplicationExecutorsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8635,7 +17700,7 @@ func (c *ProjectsLocationsSessionsCreateCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &SummarizeSessionSparkApplicationExecutorsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8648,68 +17713,75 @@ func (c *ProjectsLocationsSessionsCreateCall) Do(opts ...googleapi.CallOption) ( return ret, nil } -type ProjectsLocationsSessionsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the interactive session resource. If the session is not in -// terminal state, it is terminated, and then deleted. +// SummarizeJobs: Obtain summary of Jobs for a Spark Application // -// - name: The name of the session resource to delete. -func (r *ProjectsLocationsSessionsService) Delete(name string) *ProjectsLocationsSessionsDeleteCall { - c := &ProjectsLocationsSessionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) SummarizeJobs(name string) *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall { + c := &ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } -// RequestId sets the optional parameter "requestId": A unique ID used to -// identify the request. If the service receives two DeleteSessionRequest -// (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteSessionRequest)s -// with the same ID, the second request is ignored.Recommendation: Set this -// value to a UUID -// (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must -// contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens -// (-). The maximum length is 40 characters. -func (c *ProjectsLocationsSessionsDeleteCall) RequestId(requestId string) *ProjectsLocationsSessionsDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall { + c.urlParams_.Set("parent", parent) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsSessionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsDeleteCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsSessionsDeleteCall) Context(ctx context.Context) *ProjectsLocationsSessionsDeleteCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsSessionsDeleteCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsSessionsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:summarizeJobs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -8720,12 +17792,13 @@ func (c *ProjectsLocationsSessionsDeleteCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.sessions.delete" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.summarizeJobs" call. // Any non-2xx status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsSessionsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *SummarizeSessionSparkApplicationJobsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeJobsCall) Do(opts ...googleapi.CallOption) (*SummarizeSessionSparkApplicationJobsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8744,7 +17817,7 @@ func (c *ProjectsLocationsSessionsDeleteCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &SummarizeSessionSparkApplicationJobsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8757,7 +17830,7 @@ func (c *ProjectsLocationsSessionsDeleteCall) Do(opts ...googleapi.CallOption) ( return ret, nil } -type ProjectsLocationsSessionsGetCall struct { +type ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -8766,19 +17839,42 @@ type ProjectsLocationsSessionsGetCall struct { header_ http.Header } -// Get: Gets the resource representation for an interactive session. +// SummarizeStageAttemptTasks: Obtain summary of Tasks for a Spark Application +// Stage Attempt // -// - name: The name of the session to retrieve. -func (r *ProjectsLocationsSessionsService) Get(name string) *ProjectsLocationsSessionsGetCall { - c := &ProjectsLocationsSessionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) SummarizeStageAttemptTasks(name string) *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall { + c := &ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall { + c.urlParams_.Set("parent", parent) + return c +} + +// StageAttemptId sets the optional parameter "stageAttemptId": Required. Stage +// Attempt ID +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall) StageAttemptId(stageAttemptId int64) *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall { + c.urlParams_.Set("stageAttemptId", fmt.Sprint(stageAttemptId)) + return c +} + +// StageId sets the optional parameter "stageId": Required. Stage ID +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall) StageId(stageId int64) *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall { + c.urlParams_.Set("stageId", fmt.Sprint(stageId)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsSessionsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8786,27 +17882,27 @@ func (c *ProjectsLocationsSessionsGetCall) Fields(s ...googleapi.Field) *Project // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *ProjectsLocationsSessionsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsSessionsGetCall) Context(ctx context.Context) *ProjectsLocationsSessionsGetCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsSessionsGetCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsSessionsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -8814,7 +17910,7 @@ func (c *ProjectsLocationsSessionsGetCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:summarizeStageAttemptTasks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -8827,12 +17923,13 @@ func (c *ProjectsLocationsSessionsGetCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.sessions.get" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.summarizeStageAttemptTasks" call. // Any non-2xx status code is an error. Response headers are in either -// *Session.ServerResponse.Header or (if a response was returned at all) in +// *SummarizeSessionSparkApplicationStageAttemptTasksResponse.ServerResponse.Hea +// der or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsSessionsGetCall) Do(opts ...googleapi.CallOption) (*Session, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksCall) Do(opts ...googleapi.CallOption) (*SummarizeSessionSparkApplicationStageAttemptTasksResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8851,7 +17948,7 @@ func (c *ProjectsLocationsSessionsGetCall) Do(opts ...googleapi.CallOption) (*Se if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Session{ + ret := &SummarizeSessionSparkApplicationStageAttemptTasksResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8864,61 +17961,37 @@ func (c *ProjectsLocationsSessionsGetCall) Do(opts ...googleapi.CallOption) (*Se return ret, nil } -type ProjectsLocationsSessionsListCall struct { +type ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall struct { s *Service - parent string + name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists interactive sessions. +// SummarizeStages: Obtain summary of Stages for a Spark Application // -// - parent: The parent, which owns this collection of sessions. -func (r *ProjectsLocationsSessionsService) List(parent string) *ProjectsLocationsSessionsListCall { - c := &ProjectsLocationsSessionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// Filter sets the optional parameter "filter": A filter for the sessions to -// return in the response.A filter is a logical expression constraining the -// values of various fields in each session resource. Filters are case -// sensitive, and may contain multiple clauses combined with logical operators -// (AND, OR). Supported fields are session_id, session_uuid, state, -// create_time, and labels.Example: state = ACTIVE and create_time < -// "2023-01-01T00:00:00Z" is a filter for sessions in an ACTIVE state that were -// created before 2023-01-01. state = ACTIVE and labels.environment=production -// is a filter for sessions in an ACTIVE state that have a production -// environment label.See https://google.aip.dev/assets/misc/ebnf-filtering.txt -// for a detailed description of the filter syntax and a list of supported -// comparators. -func (c *ProjectsLocationsSessionsListCall) Filter(filter string) *ProjectsLocationsSessionsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number of -// sessions to return in each response. The service may return fewer than this -// value. -func (c *ProjectsLocationsSessionsListCall) PageSize(pageSize int64) *ProjectsLocationsSessionsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) +// - name: The fully qualified name of the session to retrieve in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) SummarizeStages(name string) *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall { + c := &ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } -// PageToken sets the optional parameter "pageToken": A page token received -// from a previous ListSessions call. Provide this token to retrieve the -// subsequent page. -func (c *ProjectsLocationsSessionsListCall) PageToken(pageToken string) *ProjectsLocationsSessionsListCall { - c.urlParams_.Set("pageToken", pageToken) +// Parent sets the optional parameter "parent": Required. Parent (Session) +// resource reference. +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall) Parent(parent string) *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall { + c.urlParams_.Set("parent", parent) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsSessionsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8926,27 +17999,27 @@ func (c *ProjectsLocationsSessionsListCall) Fields(s ...googleapi.Field) *Projec // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *ProjectsLocationsSessionsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall) IfNoneMatch(entityTag string) *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsSessionsListCall) Context(ctx context.Context) *ProjectsLocationsSessionsListCall { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsSessionsListCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsSessionsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -8954,7 +18027,7 @@ func (c *ProjectsLocationsSessionsListCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/sessions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:summarizeStages") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -8962,18 +18035,18 @@ func (c *ProjectsLocationsSessionsListCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.sessions.list" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.summarizeStages" call. // Any non-2xx status code is an error. Response headers are in either -// *ListSessionsResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsLocationsSessionsListCall) Do(opts ...googleapi.CallOption) (*ListSessionsResponse, error) { +// *SummarizeSessionSparkApplicationStagesResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsSummarizeStagesCall) Do(opts ...googleapi.CallOption) (*SummarizeSessionSparkApplicationStagesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8992,7 +18065,7 @@ func (c *ProjectsLocationsSessionsListCall) Do(opts ...googleapi.CallOption) (*L if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListSessionsResponse{ + ret := &SummarizeSessionSparkApplicationStagesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9005,79 +18078,61 @@ func (c *ProjectsLocationsSessionsListCall) Do(opts ...googleapi.CallOption) (*L return ret, nil } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsSessionsListCall) Pages(ctx context.Context, f func(*ListSessionsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -type ProjectsLocationsSessionsTerminateCall struct { - s *Service - name string - terminatesessionrequest *TerminateSessionRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsSessionsSparkApplicationsWriteCall struct { + s *Service + name string + writesessionsparkapplicationcontextrequest *WriteSessionSparkApplicationContextRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Terminate: Terminates the interactive session. +// Write: Write wrapper objects from dataplane to spanner // -// - name: The name of the session resource to terminate. -func (r *ProjectsLocationsSessionsService) Terminate(name string, terminatesessionrequest *TerminateSessionRequest) *ProjectsLocationsSessionsTerminateCall { - c := &ProjectsLocationsSessionsTerminateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The fully qualified name of the spark application to write data +// about in the format +// "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApp +// lications/APPLICATION_ID". +func (r *ProjectsLocationsSessionsSparkApplicationsService) Write(name string, writesessionsparkapplicationcontextrequest *WriteSessionSparkApplicationContextRequest) *ProjectsLocationsSessionsSparkApplicationsWriteCall { + c := &ProjectsLocationsSessionsSparkApplicationsWriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.terminatesessionrequest = terminatesessionrequest + c.writesessionsparkapplicationcontextrequest = writesessionsparkapplicationcontextrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsSessionsTerminateCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsTerminateCall { +func (c *ProjectsLocationsSessionsSparkApplicationsWriteCall) Fields(s ...googleapi.Field) *ProjectsLocationsSessionsSparkApplicationsWriteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsSessionsTerminateCall) Context(ctx context.Context) *ProjectsLocationsSessionsTerminateCall { +func (c *ProjectsLocationsSessionsSparkApplicationsWriteCall) Context(ctx context.Context) *ProjectsLocationsSessionsSparkApplicationsWriteCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsSessionsTerminateCall) Header() http.Header { +func (c *ProjectsLocationsSessionsSparkApplicationsWriteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsSessionsTerminateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsSessionsSparkApplicationsWriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.terminatesessionrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.writesessionsparkapplicationcontextrequest) if err != nil { return nil, err } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:terminate") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:write") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -9090,12 +18145,13 @@ func (c *ProjectsLocationsSessionsTerminateCall) doRequest(alt string) (*http.Re return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataproc.projects.locations.sessions.terminate" call. +// Do executes the "dataproc.projects.locations.sessions.sparkApplications.write" call. // Any non-2xx status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsSessionsTerminateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *WriteSessionSparkApplicationContextResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsSessionsSparkApplicationsWriteCall) Do(opts ...googleapi.CallOption) (*WriteSessionSparkApplicationContextResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9114,7 +18170,7 @@ func (c *ProjectsLocationsSessionsTerminateCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &WriteSessionSparkApplicationContextResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, diff --git a/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-api.json b/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-api.json index c53eb2a522b..da4c8ba6259 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-api.json @@ -1065,7 +1065,7 @@ ] }, "run": { - "description": "Use this method to start, resume or recover a stream with a non default CDC strategy. NOTE: This feature is currently experimental.", + "description": "Use this method to start, resume or recover a stream with a non default CDC strategy.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/streams/{streamsId}:run", "httpMethod": "POST", "id": "datastream.projects.locations.streams.run", @@ -1250,7 +1250,7 @@ } } }, - "revision": "20240529", + "revision": "20240930", "rootUrl": "https://datastream.googleapis.com/", "schemas": { "AppendOnly": { @@ -2067,7 +2067,7 @@ "type": "object" }, "MysqlProfile": { - "description": "MySQL database profile.", + "description": "MySQL database profile. Next ID: 7.", "id": "MysqlProfile", "properties": { "hostname": { @@ -2075,7 +2075,7 @@ "type": "string" }, "password": { - "description": "Required. Input only. Password for the MySQL connection.", + "description": "Optional. Input only. Password for the MySQL connection. Mutually exclusive with the `secret_manager_stored_password` field.", "type": "string" }, "port": { @@ -2338,7 +2338,7 @@ "type": "object" }, "OracleProfile": { - "description": "Oracle database profile.", + "description": "Oracle database profile. Next ID: 10.", "id": "OracleProfile", "properties": { "connectionAttributes": { @@ -2361,7 +2361,7 @@ "description": "Optional. SSL configuration for the Oracle connection." }, "password": { - "description": "Required. Password for the Oracle connection.", + "description": "Optional. Password for the Oracle connection. Mutually exclusive with the `secret_manager_stored_password` field.", "type": "string" }, "port": { @@ -2546,7 +2546,7 @@ "type": "object" }, "PostgresqlProfile": { - "description": "PostgreSQL database profile.", + "description": "PostgreSQL database profile. Next ID: 7.", "id": "PostgresqlProfile", "properties": { "database": { @@ -2558,7 +2558,7 @@ "type": "string" }, "password": { - "description": "Required. Password for the PostgreSQL connection.", + "description": "Optional. Password for the PostgreSQL connection. Mutually exclusive with the `secret_manager_stored_password` field.", "type": "string" }, "port": { @@ -2778,6 +2778,10 @@ "cdcStrategy": { "$ref": "CdcStrategy", "description": "Optional. The CDC strategy of the stream. If not set, the system's default value will be used." + }, + "force": { + "description": "Optional. Update the stream without validating it.", + "type": "boolean" } }, "type": "object" @@ -2934,7 +2938,7 @@ "type": "object" }, "SqlServerProfile": { - "description": "SQLServer database profile", + "description": "SQLServer database profile. Next ID: 8.", "id": "SqlServerProfile", "properties": { "database": { @@ -2946,7 +2950,7 @@ "type": "string" }, "password": { - "description": "Required. Password for the SQLServer connection.", + "description": "Optional. Password for the SQLServer connection. Mutually exclusive with the `secret_manager_stored_password` field.", "type": "string" }, "port": { diff --git a/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-gen.go b/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-gen.go index 5ef17259496..77ff9c4afb0 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-gen.go @@ -281,9 +281,9 @@ type BackfillAllStrategy struct { NullFields []string `json:"-"` } -func (s *BackfillAllStrategy) MarshalJSON() ([]byte, error) { +func (s BackfillAllStrategy) MarshalJSON() ([]byte, error) { type NoMethod BackfillAllStrategy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackfillJob: Represents a backfill job on a specific stream object. @@ -331,9 +331,9 @@ type BackfillJob struct { NullFields []string `json:"-"` } -func (s *BackfillJob) MarshalJSON() ([]byte, error) { +func (s BackfillJob) MarshalJSON() ([]byte, error) { type NoMethod BackfillJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackfillNoneStrategy: Backfill strategy to disable automatic backfill for @@ -370,9 +370,9 @@ type BigQueryDestinationConfig struct { NullFields []string `json:"-"` } -func (s *BigQueryDestinationConfig) MarshalJSON() ([]byte, error) { +func (s BigQueryDestinationConfig) MarshalJSON() ([]byte, error) { type NoMethod BigQueryDestinationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BigQueryProfile: BigQuery warehouse profile. @@ -407,9 +407,9 @@ type CdcStrategy struct { NullFields []string `json:"-"` } -func (s *CdcStrategy) MarshalJSON() ([]byte, error) { +func (s CdcStrategy) MarshalJSON() ([]byte, error) { type NoMethod CdcStrategy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConnectionProfile: A set of reusable connection configurations to be used as @@ -459,9 +459,9 @@ type ConnectionProfile struct { NullFields []string `json:"-"` } -func (s *ConnectionProfile) MarshalJSON() ([]byte, error) { +func (s ConnectionProfile) MarshalJSON() ([]byte, error) { type NoMethod ConnectionProfile - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatasetTemplate: Dataset template used for dynamic dataset creation. @@ -495,9 +495,9 @@ type DatasetTemplate struct { NullFields []string `json:"-"` } -func (s *DatasetTemplate) MarshalJSON() ([]byte, error) { +func (s DatasetTemplate) MarshalJSON() ([]byte, error) { type NoMethod DatasetTemplate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DestinationConfig: The configuration of the stream destination. @@ -524,9 +524,9 @@ type DestinationConfig struct { NullFields []string `json:"-"` } -func (s *DestinationConfig) MarshalJSON() ([]byte, error) { +func (s DestinationConfig) MarshalJSON() ([]byte, error) { type NoMethod DestinationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiscoverConnectionProfileRequest: Request message for 'discover' @@ -565,9 +565,9 @@ type DiscoverConnectionProfileRequest struct { NullFields []string `json:"-"` } -func (s *DiscoverConnectionProfileRequest) MarshalJSON() ([]byte, error) { +func (s DiscoverConnectionProfileRequest) MarshalJSON() ([]byte, error) { type NoMethod DiscoverConnectionProfileRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiscoverConnectionProfileResponse: Response from a discover request. @@ -596,9 +596,9 @@ type DiscoverConnectionProfileResponse struct { NullFields []string `json:"-"` } -func (s *DiscoverConnectionProfileResponse) MarshalJSON() ([]byte, error) { +func (s DiscoverConnectionProfileResponse) MarshalJSON() ([]byte, error) { type NoMethod DiscoverConnectionProfileResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DropLargeObjects: Configuration to drop large object values. @@ -641,9 +641,9 @@ type Error struct { NullFields []string `json:"-"` } -func (s *Error) MarshalJSON() ([]byte, error) { +func (s Error) MarshalJSON() ([]byte, error) { type NoMethod Error - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FetchStaticIpsResponse: Response message for a 'FetchStaticIps' response. @@ -669,9 +669,9 @@ type FetchStaticIpsResponse struct { NullFields []string `json:"-"` } -func (s *FetchStaticIpsResponse) MarshalJSON() ([]byte, error) { +func (s FetchStaticIpsResponse) MarshalJSON() ([]byte, error) { type NoMethod FetchStaticIpsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ForwardSshTunnelConnectivity: Forward SSH Tunnel connectivity. @@ -699,9 +699,9 @@ type ForwardSshTunnelConnectivity struct { NullFields []string `json:"-"` } -func (s *ForwardSshTunnelConnectivity) MarshalJSON() ([]byte, error) { +func (s ForwardSshTunnelConnectivity) MarshalJSON() ([]byte, error) { type NoMethod ForwardSshTunnelConnectivity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GcsDestinationConfig: Google Cloud Storage destination configuration @@ -731,9 +731,9 @@ type GcsDestinationConfig struct { NullFields []string `json:"-"` } -func (s *GcsDestinationConfig) MarshalJSON() ([]byte, error) { +func (s GcsDestinationConfig) MarshalJSON() ([]byte, error) { type NoMethod GcsDestinationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GcsProfile: Cloud Storage bucket profile. @@ -755,9 +755,9 @@ type GcsProfile struct { NullFields []string `json:"-"` } -func (s *GcsProfile) MarshalJSON() ([]byte, error) { +func (s GcsProfile) MarshalJSON() ([]byte, error) { type NoMethod GcsProfile - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JsonFileFormat: JSON file format configuration. @@ -789,9 +789,9 @@ type JsonFileFormat struct { NullFields []string `json:"-"` } -func (s *JsonFileFormat) MarshalJSON() ([]byte, error) { +func (s JsonFileFormat) MarshalJSON() ([]byte, error) { type NoMethod JsonFileFormat - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListConnectionProfilesResponse: Response message for listing connection @@ -820,9 +820,9 @@ type ListConnectionProfilesResponse struct { NullFields []string `json:"-"` } -func (s *ListConnectionProfilesResponse) MarshalJSON() ([]byte, error) { +func (s ListConnectionProfilesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListConnectionProfilesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLocationsResponse: The response message for Locations.ListLocations. @@ -848,9 +848,9 @@ type ListLocationsResponse struct { NullFields []string `json:"-"` } -func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { +func (s ListLocationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLocationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -876,9 +876,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListPrivateConnectionsResponse: Response containing a list of private @@ -907,9 +907,9 @@ type ListPrivateConnectionsResponse struct { NullFields []string `json:"-"` } -func (s *ListPrivateConnectionsResponse) MarshalJSON() ([]byte, error) { +func (s ListPrivateConnectionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListPrivateConnectionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListRoutesResponse: Route list response. @@ -937,9 +937,9 @@ type ListRoutesResponse struct { NullFields []string `json:"-"` } -func (s *ListRoutesResponse) MarshalJSON() ([]byte, error) { +func (s ListRoutesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListRoutesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListStreamObjectsResponse: Response containing the objects for a stream. @@ -965,9 +965,9 @@ type ListStreamObjectsResponse struct { NullFields []string `json:"-"` } -func (s *ListStreamObjectsResponse) MarshalJSON() ([]byte, error) { +func (s ListStreamObjectsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListStreamObjectsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListStreamsResponse: Response message for listing streams. @@ -995,9 +995,9 @@ type ListStreamsResponse struct { NullFields []string `json:"-"` } -func (s *ListStreamsResponse) MarshalJSON() ([]byte, error) { +func (s ListStreamsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListStreamsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Location: A resource that represents a Google Cloud location. @@ -1033,9 +1033,9 @@ type Location struct { NullFields []string `json:"-"` } -func (s *Location) MarshalJSON() ([]byte, error) { +func (s Location) MarshalJSON() ([]byte, error) { type NoMethod Location - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LookupStreamObjectRequest: Request for looking up a specific stream object @@ -1057,9 +1057,9 @@ type LookupStreamObjectRequest struct { NullFields []string `json:"-"` } -func (s *LookupStreamObjectRequest) MarshalJSON() ([]byte, error) { +func (s LookupStreamObjectRequest) MarshalJSON() ([]byte, error) { type NoMethod LookupStreamObjectRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Merge: Merge mode defines that all changes to a table will be merged at the @@ -1106,9 +1106,9 @@ type MysqlColumn struct { NullFields []string `json:"-"` } -func (s *MysqlColumn) MarshalJSON() ([]byte, error) { +func (s MysqlColumn) MarshalJSON() ([]byte, error) { type NoMethod MysqlColumn - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MysqlDatabase: MySQL database. @@ -1130,9 +1130,9 @@ type MysqlDatabase struct { NullFields []string `json:"-"` } -func (s *MysqlDatabase) MarshalJSON() ([]byte, error) { +func (s MysqlDatabase) MarshalJSON() ([]byte, error) { type NoMethod MysqlDatabase - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MysqlLogPosition: MySQL log position @@ -1155,9 +1155,9 @@ type MysqlLogPosition struct { NullFields []string `json:"-"` } -func (s *MysqlLogPosition) MarshalJSON() ([]byte, error) { +func (s MysqlLogPosition) MarshalJSON() ([]byte, error) { type NoMethod MysqlLogPosition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MysqlObjectIdentifier: Mysql data source object identifier. @@ -1179,16 +1179,17 @@ type MysqlObjectIdentifier struct { NullFields []string `json:"-"` } -func (s *MysqlObjectIdentifier) MarshalJSON() ([]byte, error) { +func (s MysqlObjectIdentifier) MarshalJSON() ([]byte, error) { type NoMethod MysqlObjectIdentifier - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// MysqlProfile: MySQL database profile. +// MysqlProfile: MySQL database profile. Next ID: 7. type MysqlProfile struct { // Hostname: Required. Hostname for the MySQL connection. Hostname string `json:"hostname,omitempty"` - // Password: Required. Input only. Password for the MySQL connection. + // Password: Optional. Input only. Password for the MySQL connection. Mutually + // exclusive with the `secret_manager_stored_password` field. Password string `json:"password,omitempty"` // Port: Port for the MySQL connection, default value is 3306. Port int64 `json:"port,omitempty"` @@ -1209,9 +1210,9 @@ type MysqlProfile struct { NullFields []string `json:"-"` } -func (s *MysqlProfile) MarshalJSON() ([]byte, error) { +func (s MysqlProfile) MarshalJSON() ([]byte, error) { type NoMethod MysqlProfile - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MysqlRdbms: MySQL database structure @@ -1231,9 +1232,9 @@ type MysqlRdbms struct { NullFields []string `json:"-"` } -func (s *MysqlRdbms) MarshalJSON() ([]byte, error) { +func (s MysqlRdbms) MarshalJSON() ([]byte, error) { type NoMethod MysqlRdbms - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MysqlSourceConfig: MySQL source configuration @@ -1263,9 +1264,9 @@ type MysqlSourceConfig struct { NullFields []string `json:"-"` } -func (s *MysqlSourceConfig) MarshalJSON() ([]byte, error) { +func (s MysqlSourceConfig) MarshalJSON() ([]byte, error) { type NoMethod MysqlSourceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MysqlSslConfig: MySQL SSL configuration information. @@ -1303,9 +1304,9 @@ type MysqlSslConfig struct { NullFields []string `json:"-"` } -func (s *MysqlSslConfig) MarshalJSON() ([]byte, error) { +func (s MysqlSslConfig) MarshalJSON() ([]byte, error) { type NoMethod MysqlSslConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MysqlTable: MySQL table. @@ -1328,9 +1329,9 @@ type MysqlTable struct { NullFields []string `json:"-"` } -func (s *MysqlTable) MarshalJSON() ([]byte, error) { +func (s MysqlTable) MarshalJSON() ([]byte, error) { type NoMethod MysqlTable - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NextAvailableStartPosition: CDC strategy to resume replication from the next @@ -1380,9 +1381,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadata: Represents the metadata of the long-running operation. @@ -1421,9 +1422,9 @@ type OperationMetadata struct { NullFields []string `json:"-"` } -func (s *OperationMetadata) MarshalJSON() ([]byte, error) { +func (s OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OracleColumn: Oracle Column. @@ -1459,9 +1460,9 @@ type OracleColumn struct { NullFields []string `json:"-"` } -func (s *OracleColumn) MarshalJSON() ([]byte, error) { +func (s OracleColumn) MarshalJSON() ([]byte, error) { type NoMethod OracleColumn - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OracleObjectIdentifier: Oracle data source object identifier. @@ -1483,12 +1484,12 @@ type OracleObjectIdentifier struct { NullFields []string `json:"-"` } -func (s *OracleObjectIdentifier) MarshalJSON() ([]byte, error) { +func (s OracleObjectIdentifier) MarshalJSON() ([]byte, error) { type NoMethod OracleObjectIdentifier - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// OracleProfile: Oracle database profile. +// OracleProfile: Oracle database profile. Next ID: 10. type OracleProfile struct { // ConnectionAttributes: Connection string attributes ConnectionAttributes map[string]string `json:"connectionAttributes,omitempty"` @@ -1498,7 +1499,8 @@ type OracleProfile struct { Hostname string `json:"hostname,omitempty"` // OracleSslConfig: Optional. SSL configuration for the Oracle connection. OracleSslConfig *OracleSslConfig `json:"oracleSslConfig,omitempty"` - // Password: Required. Password for the Oracle connection. + // Password: Optional. Password for the Oracle connection. Mutually exclusive + // with the `secret_manager_stored_password` field. Password string `json:"password,omitempty"` // Port: Port for the Oracle connection, default value is 1521. Port int64 `json:"port,omitempty"` @@ -1517,9 +1519,9 @@ type OracleProfile struct { NullFields []string `json:"-"` } -func (s *OracleProfile) MarshalJSON() ([]byte, error) { +func (s OracleProfile) MarshalJSON() ([]byte, error) { type NoMethod OracleProfile - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OracleRdbms: Oracle database structure. @@ -1539,9 +1541,9 @@ type OracleRdbms struct { NullFields []string `json:"-"` } -func (s *OracleRdbms) MarshalJSON() ([]byte, error) { +func (s OracleRdbms) MarshalJSON() ([]byte, error) { type NoMethod OracleRdbms - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OracleSchema: Oracle schema. @@ -1563,9 +1565,9 @@ type OracleSchema struct { NullFields []string `json:"-"` } -func (s *OracleSchema) MarshalJSON() ([]byte, error) { +func (s OracleSchema) MarshalJSON() ([]byte, error) { type NoMethod OracleSchema - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OracleScnPosition: Oracle SCN position @@ -1585,9 +1587,9 @@ type OracleScnPosition struct { NullFields []string `json:"-"` } -func (s *OracleScnPosition) MarshalJSON() ([]byte, error) { +func (s OracleScnPosition) MarshalJSON() ([]byte, error) { type NoMethod OracleScnPosition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OracleSourceConfig: Oracle data source configuration @@ -1621,9 +1623,9 @@ type OracleSourceConfig struct { NullFields []string `json:"-"` } -func (s *OracleSourceConfig) MarshalJSON() ([]byte, error) { +func (s OracleSourceConfig) MarshalJSON() ([]byte, error) { type NoMethod OracleSourceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OracleSslConfig: Oracle SSL configuration information. @@ -1647,9 +1649,9 @@ type OracleSslConfig struct { NullFields []string `json:"-"` } -func (s *OracleSslConfig) MarshalJSON() ([]byte, error) { +func (s OracleSslConfig) MarshalJSON() ([]byte, error) { type NoMethod OracleSslConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OracleTable: Oracle table. @@ -1672,9 +1674,9 @@ type OracleTable struct { NullFields []string `json:"-"` } -func (s *OracleTable) MarshalJSON() ([]byte, error) { +func (s OracleTable) MarshalJSON() ([]byte, error) { type NoMethod OracleTable - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PostgresqlColumn: PostgreSQL Column. @@ -1708,9 +1710,9 @@ type PostgresqlColumn struct { NullFields []string `json:"-"` } -func (s *PostgresqlColumn) MarshalJSON() ([]byte, error) { +func (s PostgresqlColumn) MarshalJSON() ([]byte, error) { type NoMethod PostgresqlColumn - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PostgresqlObjectIdentifier: PostgreSQL data source object identifier. @@ -1732,18 +1734,19 @@ type PostgresqlObjectIdentifier struct { NullFields []string `json:"-"` } -func (s *PostgresqlObjectIdentifier) MarshalJSON() ([]byte, error) { +func (s PostgresqlObjectIdentifier) MarshalJSON() ([]byte, error) { type NoMethod PostgresqlObjectIdentifier - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// PostgresqlProfile: PostgreSQL database profile. +// PostgresqlProfile: PostgreSQL database profile. Next ID: 7. type PostgresqlProfile struct { // Database: Required. Database for the PostgreSQL connection. Database string `json:"database,omitempty"` // Hostname: Required. Hostname for the PostgreSQL connection. Hostname string `json:"hostname,omitempty"` - // Password: Required. Password for the PostgreSQL connection. + // Password: Optional. Password for the PostgreSQL connection. Mutually + // exclusive with the `secret_manager_stored_password` field. Password string `json:"password,omitempty"` // Port: Port for the PostgreSQL connection, default value is 5432. Port int64 `json:"port,omitempty"` @@ -1762,9 +1765,9 @@ type PostgresqlProfile struct { NullFields []string `json:"-"` } -func (s *PostgresqlProfile) MarshalJSON() ([]byte, error) { +func (s PostgresqlProfile) MarshalJSON() ([]byte, error) { type NoMethod PostgresqlProfile - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PostgresqlRdbms: PostgreSQL database structure. @@ -1784,9 +1787,9 @@ type PostgresqlRdbms struct { NullFields []string `json:"-"` } -func (s *PostgresqlRdbms) MarshalJSON() ([]byte, error) { +func (s PostgresqlRdbms) MarshalJSON() ([]byte, error) { type NoMethod PostgresqlRdbms - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PostgresqlSchema: PostgreSQL schema. @@ -1808,9 +1811,9 @@ type PostgresqlSchema struct { NullFields []string `json:"-"` } -func (s *PostgresqlSchema) MarshalJSON() ([]byte, error) { +func (s PostgresqlSchema) MarshalJSON() ([]byte, error) { type NoMethod PostgresqlSchema - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PostgresqlSourceConfig: PostgreSQL data source configuration @@ -1842,9 +1845,9 @@ type PostgresqlSourceConfig struct { NullFields []string `json:"-"` } -func (s *PostgresqlSourceConfig) MarshalJSON() ([]byte, error) { +func (s PostgresqlSourceConfig) MarshalJSON() ([]byte, error) { type NoMethod PostgresqlSourceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PostgresqlTable: PostgreSQL table. @@ -1867,9 +1870,9 @@ type PostgresqlTable struct { NullFields []string `json:"-"` } -func (s *PostgresqlTable) MarshalJSON() ([]byte, error) { +func (s PostgresqlTable) MarshalJSON() ([]byte, error) { type NoMethod PostgresqlTable - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PrivateConnection: The PrivateConnection resource is used to establish @@ -1919,9 +1922,9 @@ type PrivateConnection struct { NullFields []string `json:"-"` } -func (s *PrivateConnection) MarshalJSON() ([]byte, error) { +func (s PrivateConnection) MarshalJSON() ([]byte, error) { type NoMethod PrivateConnection - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PrivateConnectivity: Private Connectivity @@ -1942,9 +1945,9 @@ type PrivateConnectivity struct { NullFields []string `json:"-"` } -func (s *PrivateConnectivity) MarshalJSON() ([]byte, error) { +func (s PrivateConnectivity) MarshalJSON() ([]byte, error) { type NoMethod PrivateConnectivity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Route: The route resource is the child of the private connection resource, @@ -1980,9 +1983,9 @@ type Route struct { NullFields []string `json:"-"` } -func (s *Route) MarshalJSON() ([]byte, error) { +func (s Route) MarshalJSON() ([]byte, error) { type NoMethod Route - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RunStreamRequest: Request message for running a stream. @@ -1990,6 +1993,8 @@ type RunStreamRequest struct { // CdcStrategy: Optional. The CDC strategy of the stream. If not set, the // system's default value will be used. CdcStrategy *CdcStrategy `json:"cdcStrategy,omitempty"` + // Force: Optional. Update the stream without validating it. + Force bool `json:"force,omitempty"` // ForceSendFields is a list of field names (e.g. "CdcStrategy") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -2003,9 +2008,9 @@ type RunStreamRequest struct { NullFields []string `json:"-"` } -func (s *RunStreamRequest) MarshalJSON() ([]byte, error) { +func (s RunStreamRequest) MarshalJSON() ([]byte, error) { type NoMethod RunStreamRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SingleTargetDataset: A single target dataset to which all data will be @@ -2028,9 +2033,9 @@ type SingleTargetDataset struct { NullFields []string `json:"-"` } -func (s *SingleTargetDataset) MarshalJSON() ([]byte, error) { +func (s SingleTargetDataset) MarshalJSON() ([]byte, error) { type NoMethod SingleTargetDataset - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceConfig: The configuration of the stream source. @@ -2059,9 +2064,9 @@ type SourceConfig struct { NullFields []string `json:"-"` } -func (s *SourceConfig) MarshalJSON() ([]byte, error) { +func (s SourceConfig) MarshalJSON() ([]byte, error) { type NoMethod SourceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceHierarchyDatasets: Destination datasets are created so that hierarchy @@ -2082,9 +2087,9 @@ type SourceHierarchyDatasets struct { NullFields []string `json:"-"` } -func (s *SourceHierarchyDatasets) MarshalJSON() ([]byte, error) { +func (s SourceHierarchyDatasets) MarshalJSON() ([]byte, error) { type NoMethod SourceHierarchyDatasets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceObjectIdentifier: Represents an identifier of an object in the data @@ -2111,9 +2116,9 @@ type SourceObjectIdentifier struct { NullFields []string `json:"-"` } -func (s *SourceObjectIdentifier) MarshalJSON() ([]byte, error) { +func (s SourceObjectIdentifier) MarshalJSON() ([]byte, error) { type NoMethod SourceObjectIdentifier - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SpecificStartPosition: CDC strategy to start replicating from a specific @@ -2136,9 +2141,9 @@ type SpecificStartPosition struct { NullFields []string `json:"-"` } -func (s *SpecificStartPosition) MarshalJSON() ([]byte, error) { +func (s SpecificStartPosition) MarshalJSON() ([]byte, error) { type NoMethod SpecificStartPosition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlServerChangeTables: Configuration to use Change Tables CDC read method. @@ -2176,9 +2181,9 @@ type SqlServerColumn struct { NullFields []string `json:"-"` } -func (s *SqlServerColumn) MarshalJSON() ([]byte, error) { +func (s SqlServerColumn) MarshalJSON() ([]byte, error) { type NoMethod SqlServerColumn - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlServerObjectIdentifier: SQLServer data source object identifier. @@ -2200,18 +2205,19 @@ type SqlServerObjectIdentifier struct { NullFields []string `json:"-"` } -func (s *SqlServerObjectIdentifier) MarshalJSON() ([]byte, error) { +func (s SqlServerObjectIdentifier) MarshalJSON() ([]byte, error) { type NoMethod SqlServerObjectIdentifier - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// SqlServerProfile: SQLServer database profile +// SqlServerProfile: SQLServer database profile. Next ID: 8. type SqlServerProfile struct { // Database: Required. Database for the SQLServer connection. Database string `json:"database,omitempty"` // Hostname: Required. Hostname for the SQLServer connection. Hostname string `json:"hostname,omitempty"` - // Password: Required. Password for the SQLServer connection. + // Password: Optional. Password for the SQLServer connection. Mutually + // exclusive with the `secret_manager_stored_password` field. Password string `json:"password,omitempty"` // Port: Port for the SQLServer connection, default value is 1433. Port int64 `json:"port,omitempty"` @@ -2230,9 +2236,9 @@ type SqlServerProfile struct { NullFields []string `json:"-"` } -func (s *SqlServerProfile) MarshalJSON() ([]byte, error) { +func (s SqlServerProfile) MarshalJSON() ([]byte, error) { type NoMethod SqlServerProfile - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlServerRdbms: SQLServer database structure. @@ -2252,9 +2258,9 @@ type SqlServerRdbms struct { NullFields []string `json:"-"` } -func (s *SqlServerRdbms) MarshalJSON() ([]byte, error) { +func (s SqlServerRdbms) MarshalJSON() ([]byte, error) { type NoMethod SqlServerRdbms - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlServerSchema: SQLServer schema. @@ -2276,9 +2282,9 @@ type SqlServerSchema struct { NullFields []string `json:"-"` } -func (s *SqlServerSchema) MarshalJSON() ([]byte, error) { +func (s SqlServerSchema) MarshalJSON() ([]byte, error) { type NoMethod SqlServerSchema - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlServerSourceConfig: SQLServer data source configuration @@ -2308,9 +2314,9 @@ type SqlServerSourceConfig struct { NullFields []string `json:"-"` } -func (s *SqlServerSourceConfig) MarshalJSON() ([]byte, error) { +func (s SqlServerSourceConfig) MarshalJSON() ([]byte, error) { type NoMethod SqlServerSourceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlServerTable: SQLServer table. @@ -2333,9 +2339,9 @@ type SqlServerTable struct { NullFields []string `json:"-"` } -func (s *SqlServerTable) MarshalJSON() ([]byte, error) { +func (s SqlServerTable) MarshalJSON() ([]byte, error) { type NoMethod SqlServerTable - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlServerTransactionLogs: Configuration to use Transaction Logs CDC read @@ -2369,9 +2375,9 @@ type StartBackfillJobResponse struct { NullFields []string `json:"-"` } -func (s *StartBackfillJobResponse) MarshalJSON() ([]byte, error) { +func (s StartBackfillJobResponse) MarshalJSON() ([]byte, error) { type NoMethod StartBackfillJobResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StaticServiceIpConnectivity: Static IP address connectivity. Used when the @@ -2410,9 +2416,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StopBackfillJobRequest: Request for manually stopping a running backfill job @@ -2441,9 +2447,9 @@ type StopBackfillJobResponse struct { NullFields []string `json:"-"` } -func (s *StopBackfillJobResponse) MarshalJSON() ([]byte, error) { +func (s StopBackfillJobResponse) MarshalJSON() ([]byte, error) { type NoMethod StopBackfillJobResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Stream: A resource representing streaming data from a source to a @@ -2511,9 +2517,9 @@ type Stream struct { NullFields []string `json:"-"` } -func (s *Stream) MarshalJSON() ([]byte, error) { +func (s Stream) MarshalJSON() ([]byte, error) { type NoMethod Stream - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamLargeObjects: Configuration to stream large object values. @@ -2553,9 +2559,9 @@ type StreamObject struct { NullFields []string `json:"-"` } -func (s *StreamObject) MarshalJSON() ([]byte, error) { +func (s StreamObject) MarshalJSON() ([]byte, error) { type NoMethod StreamObject - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Validation: A validation to perform on a stream. @@ -2588,9 +2594,9 @@ type Validation struct { NullFields []string `json:"-"` } -func (s *Validation) MarshalJSON() ([]byte, error) { +func (s Validation) MarshalJSON() ([]byte, error) { type NoMethod Validation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ValidationMessage: Represent user-facing validation result message. @@ -2621,9 +2627,9 @@ type ValidationMessage struct { NullFields []string `json:"-"` } -func (s *ValidationMessage) MarshalJSON() ([]byte, error) { +func (s ValidationMessage) MarshalJSON() ([]byte, error) { type NoMethod ValidationMessage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ValidationResult: Contains the current validation results. @@ -2644,9 +2650,9 @@ type ValidationResult struct { NullFields []string `json:"-"` } -func (s *ValidationResult) MarshalJSON() ([]byte, error) { +func (s ValidationResult) MarshalJSON() ([]byte, error) { type NoMethod ValidationResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpcPeeringConfig: The VPC Peering configuration is used to create VPC @@ -2670,9 +2676,9 @@ type VpcPeeringConfig struct { NullFields []string `json:"-"` } -func (s *VpcPeeringConfig) MarshalJSON() ([]byte, error) { +func (s VpcPeeringConfig) MarshalJSON() ([]byte, error) { type NoMethod VpcPeeringConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsLocationsFetchStaticIpsCall struct { @@ -6003,7 +6009,7 @@ type ProjectsLocationsStreamsRunCall struct { } // Run: Use this method to start, resume or recover a stream with a non default -// CDC strategy. NOTE: This feature is currently experimental. +// CDC strategy. // // - name: Name of the stream resource to start, in the format: // projects/{project_id}/locations/{location}/streams/{stream_name}. diff --git a/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-api.json b/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-api.json index 780a1140f46..ba51c634b1d 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-api.json @@ -1824,7 +1824,7 @@ } } }, - "revision": "20240531", + "revision": "20240719", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { @@ -2893,6 +2893,10 @@ "format": "int32", "type": "integer" }, + "internetHealthChecksPerManagedZone": { + "format": "int32", + "type": "integer" + }, "itemsPerRoutingPolicy": { "description": "Maximum allowed number of items per routing policy.", "format": "int32", @@ -3009,6 +3013,10 @@ "geo": { "$ref": "RRSetRoutingPolicyGeoPolicy" }, + "healthCheck": { + "description": "The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks", + "type": "string" + }, "kind": { "default": "dns#rRSetRoutingPolicy", "type": "string" @@ -3080,6 +3088,13 @@ "description": "HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response.", "id": "RRSetRoutingPolicyHealthCheckTargets", "properties": { + "externalEndpoints": { + "description": "The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1)", + "items": { + "type": "string" + }, + "type": "array" + }, "internalLoadBalancers": { "description": "Configuration for internal load balancers to be health checked.", "items": { diff --git a/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-gen.go b/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-gen.go index 74ff17c75b0..6068a09d91d 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -322,9 +322,9 @@ type Change struct { NullFields []string `json:"-"` } -func (s *Change) MarshalJSON() ([]byte, error) { +func (s Change) MarshalJSON() ([]byte, error) { type NoMethod Change - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ChangesListResponse: The response to a request to enumerate Changes to a @@ -359,9 +359,9 @@ type ChangesListResponse struct { NullFields []string `json:"-"` } -func (s *ChangesListResponse) MarshalJSON() ([]byte, error) { +func (s ChangesListResponse) MarshalJSON() ([]byte, error) { type NoMethod ChangesListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DnsKey: A DNSSEC key pair. @@ -432,9 +432,9 @@ type DnsKey struct { NullFields []string `json:"-"` } -func (s *DnsKey) MarshalJSON() ([]byte, error) { +func (s DnsKey) MarshalJSON() ([]byte, error) { type NoMethod DnsKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type DnsKeyDigest struct { @@ -461,9 +461,9 @@ type DnsKeyDigest struct { NullFields []string `json:"-"` } -func (s *DnsKeyDigest) MarshalJSON() ([]byte, error) { +func (s DnsKeyDigest) MarshalJSON() ([]byte, error) { type NoMethod DnsKeyDigest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DnsKeySpec: Parameters for DnsKey key generation. Used for generating @@ -504,9 +504,9 @@ type DnsKeySpec struct { NullFields []string `json:"-"` } -func (s *DnsKeySpec) MarshalJSON() ([]byte, error) { +func (s DnsKeySpec) MarshalJSON() ([]byte, error) { type NoMethod DnsKeySpec - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DnsKeysListResponse: The response to a request to enumerate DnsKeys in a @@ -541,9 +541,9 @@ type DnsKeysListResponse struct { NullFields []string `json:"-"` } -func (s *DnsKeysListResponse) MarshalJSON() ([]byte, error) { +func (s DnsKeysListResponse) MarshalJSON() ([]byte, error) { type NoMethod DnsKeysListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents a textual expression in the Common Expression Language @@ -589,9 +589,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1AuditConfig: Specifies the audit configuration for a service. The @@ -630,9 +630,9 @@ type GoogleIamV1AuditConfig struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1AuditConfig) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1AuditLogConfig: Provides the configuration for logging a type of @@ -665,9 +665,9 @@ type GoogleIamV1AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1Binding: Associates `members`, or principals, with a `role`. @@ -764,9 +764,9 @@ type GoogleIamV1Binding struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1Binding) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1Binding) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1GetIamPolicyRequest: Request message for `GetIamPolicy` method. @@ -787,9 +787,9 @@ type GoogleIamV1GetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1GetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1GetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1GetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. @@ -819,9 +819,9 @@ type GoogleIamV1GetPolicyOptions struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1GetPolicyOptions) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1GetPolicyOptions) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1GetPolicyOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1Policy: An Identity and Access Management (IAM) policy, which @@ -911,9 +911,9 @@ type GoogleIamV1Policy struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1Policy) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1Policy) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -940,9 +940,9 @@ type GoogleIamV1SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1TestIamPermissionsRequest: Request message for @@ -966,9 +966,9 @@ type GoogleIamV1TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1TestIamPermissionsResponse: Response message for @@ -993,9 +993,9 @@ type GoogleIamV1TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedZone: A zone is a subtree of the DNS namespace under one @@ -1076,9 +1076,9 @@ type ManagedZone struct { NullFields []string `json:"-"` } -func (s *ManagedZone) MarshalJSON() ([]byte, error) { +func (s ManagedZone) MarshalJSON() ([]byte, error) { type NoMethod ManagedZone - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedZoneCloudLoggingConfig: Cloud Logging configurations for publicly @@ -1101,9 +1101,9 @@ type ManagedZoneCloudLoggingConfig struct { NullFields []string `json:"-"` } -func (s *ManagedZoneCloudLoggingConfig) MarshalJSON() ([]byte, error) { +func (s ManagedZoneCloudLoggingConfig) MarshalJSON() ([]byte, error) { type NoMethod ManagedZoneCloudLoggingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZoneDnsSecConfig struct { @@ -1142,9 +1142,9 @@ type ManagedZoneDnsSecConfig struct { NullFields []string `json:"-"` } -func (s *ManagedZoneDnsSecConfig) MarshalJSON() ([]byte, error) { +func (s ManagedZoneDnsSecConfig) MarshalJSON() ([]byte, error) { type NoMethod ManagedZoneDnsSecConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZoneForwardingConfig struct { @@ -1165,9 +1165,9 @@ type ManagedZoneForwardingConfig struct { NullFields []string `json:"-"` } -func (s *ManagedZoneForwardingConfig) MarshalJSON() ([]byte, error) { +func (s ManagedZoneForwardingConfig) MarshalJSON() ([]byte, error) { type NoMethod ManagedZoneForwardingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZoneForwardingConfigNameServerTarget struct { @@ -1202,9 +1202,9 @@ type ManagedZoneForwardingConfigNameServerTarget struct { NullFields []string `json:"-"` } -func (s *ManagedZoneForwardingConfigNameServerTarget) MarshalJSON() ([]byte, error) { +func (s ManagedZoneForwardingConfigNameServerTarget) MarshalJSON() ([]byte, error) { type NoMethod ManagedZoneForwardingConfigNameServerTarget - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZoneOperationsListResponse struct { @@ -1237,9 +1237,9 @@ type ManagedZoneOperationsListResponse struct { NullFields []string `json:"-"` } -func (s *ManagedZoneOperationsListResponse) MarshalJSON() ([]byte, error) { +func (s ManagedZoneOperationsListResponse) MarshalJSON() ([]byte, error) { type NoMethod ManagedZoneOperationsListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZonePeeringConfig struct { @@ -1259,9 +1259,9 @@ type ManagedZonePeeringConfig struct { NullFields []string `json:"-"` } -func (s *ManagedZonePeeringConfig) MarshalJSON() ([]byte, error) { +func (s ManagedZonePeeringConfig) MarshalJSON() ([]byte, error) { type NoMethod ManagedZonePeeringConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZonePeeringConfigTargetNetwork struct { @@ -1290,9 +1290,9 @@ type ManagedZonePeeringConfigTargetNetwork struct { NullFields []string `json:"-"` } -func (s *ManagedZonePeeringConfigTargetNetwork) MarshalJSON() ([]byte, error) { +func (s ManagedZonePeeringConfigTargetNetwork) MarshalJSON() ([]byte, error) { type NoMethod ManagedZonePeeringConfigTargetNetwork - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZonePrivateVisibilityConfig struct { @@ -1315,9 +1315,9 @@ type ManagedZonePrivateVisibilityConfig struct { NullFields []string `json:"-"` } -func (s *ManagedZonePrivateVisibilityConfig) MarshalJSON() ([]byte, error) { +func (s ManagedZonePrivateVisibilityConfig) MarshalJSON() ([]byte, error) { type NoMethod ManagedZonePrivateVisibilityConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZonePrivateVisibilityConfigGKECluster struct { @@ -1341,9 +1341,9 @@ type ManagedZonePrivateVisibilityConfigGKECluster struct { NullFields []string `json:"-"` } -func (s *ManagedZonePrivateVisibilityConfigGKECluster) MarshalJSON() ([]byte, error) { +func (s ManagedZonePrivateVisibilityConfigGKECluster) MarshalJSON() ([]byte, error) { type NoMethod ManagedZonePrivateVisibilityConfigGKECluster - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZonePrivateVisibilityConfigNetwork struct { @@ -1366,9 +1366,9 @@ type ManagedZonePrivateVisibilityConfigNetwork struct { NullFields []string `json:"-"` } -func (s *ManagedZonePrivateVisibilityConfigNetwork) MarshalJSON() ([]byte, error) { +func (s ManagedZonePrivateVisibilityConfigNetwork) MarshalJSON() ([]byte, error) { type NoMethod ManagedZonePrivateVisibilityConfigNetwork - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZoneReverseLookupConfig struct { @@ -1386,9 +1386,9 @@ type ManagedZoneReverseLookupConfig struct { NullFields []string `json:"-"` } -func (s *ManagedZoneReverseLookupConfig) MarshalJSON() ([]byte, error) { +func (s ManagedZoneReverseLookupConfig) MarshalJSON() ([]byte, error) { type NoMethod ManagedZoneReverseLookupConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedZoneServiceDirectoryConfig: Contains information about Service @@ -1411,9 +1411,9 @@ type ManagedZoneServiceDirectoryConfig struct { NullFields []string `json:"-"` } -func (s *ManagedZoneServiceDirectoryConfig) MarshalJSON() ([]byte, error) { +func (s ManagedZoneServiceDirectoryConfig) MarshalJSON() ([]byte, error) { type NoMethod ManagedZoneServiceDirectoryConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZoneServiceDirectoryConfigNamespace struct { @@ -1440,9 +1440,9 @@ type ManagedZoneServiceDirectoryConfigNamespace struct { NullFields []string `json:"-"` } -func (s *ManagedZoneServiceDirectoryConfigNamespace) MarshalJSON() ([]byte, error) { +func (s ManagedZoneServiceDirectoryConfigNamespace) MarshalJSON() ([]byte, error) { type NoMethod ManagedZoneServiceDirectoryConfigNamespace - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ManagedZonesListResponse struct { @@ -1475,9 +1475,9 @@ type ManagedZonesListResponse struct { NullFields []string `json:"-"` } -func (s *ManagedZonesListResponse) MarshalJSON() ([]byte, error) { +func (s ManagedZonesListResponse) MarshalJSON() ([]byte, error) { type NoMethod ManagedZonesListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: An operation represents a successful mutation performed on a @@ -1533,9 +1533,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationDnsKeyContext struct { @@ -1556,9 +1556,9 @@ type OperationDnsKeyContext struct { NullFields []string `json:"-"` } -func (s *OperationDnsKeyContext) MarshalJSON() ([]byte, error) { +func (s OperationDnsKeyContext) MarshalJSON() ([]byte, error) { type NoMethod OperationDnsKeyContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationManagedZoneContext struct { @@ -1579,9 +1579,9 @@ type OperationManagedZoneContext struct { NullFields []string `json:"-"` } -func (s *OperationManagedZoneContext) MarshalJSON() ([]byte, error) { +func (s OperationManagedZoneContext) MarshalJSON() ([]byte, error) { type NoMethod OperationManagedZoneContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PoliciesListResponse struct { @@ -1614,9 +1614,9 @@ type PoliciesListResponse struct { NullFields []string `json:"-"` } -func (s *PoliciesListResponse) MarshalJSON() ([]byte, error) { +func (s PoliciesListResponse) MarshalJSON() ([]byte, error) { type NoMethod PoliciesListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PoliciesPatchResponse struct { @@ -1637,9 +1637,9 @@ type PoliciesPatchResponse struct { NullFields []string `json:"-"` } -func (s *PoliciesPatchResponse) MarshalJSON() ([]byte, error) { +func (s PoliciesPatchResponse) MarshalJSON() ([]byte, error) { type NoMethod PoliciesPatchResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PoliciesUpdateResponse struct { @@ -1660,9 +1660,9 @@ type PoliciesUpdateResponse struct { NullFields []string `json:"-"` } -func (s *PoliciesUpdateResponse) MarshalJSON() ([]byte, error) { +func (s PoliciesUpdateResponse) MarshalJSON() ([]byte, error) { type NoMethod PoliciesUpdateResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: A policy is a collection of DNS rules applied to one or more Virtual @@ -1709,9 +1709,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PolicyAlternativeNameServerConfig struct { @@ -1734,9 +1734,9 @@ type PolicyAlternativeNameServerConfig struct { NullFields []string `json:"-"` } -func (s *PolicyAlternativeNameServerConfig) MarshalJSON() ([]byte, error) { +func (s PolicyAlternativeNameServerConfig) MarshalJSON() ([]byte, error) { type NoMethod PolicyAlternativeNameServerConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PolicyAlternativeNameServerConfigTargetNameServer struct { @@ -1771,9 +1771,9 @@ type PolicyAlternativeNameServerConfigTargetNameServer struct { NullFields []string `json:"-"` } -func (s *PolicyAlternativeNameServerConfigTargetNameServer) MarshalJSON() ([]byte, error) { +func (s PolicyAlternativeNameServerConfigTargetNameServer) MarshalJSON() ([]byte, error) { type NoMethod PolicyAlternativeNameServerConfigTargetNameServer - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PolicyNetwork struct { @@ -1795,9 +1795,9 @@ type PolicyNetwork struct { NullFields []string `json:"-"` } -func (s *PolicyNetwork) MarshalJSON() ([]byte, error) { +func (s PolicyNetwork) MarshalJSON() ([]byte, error) { type NoMethod PolicyNetwork - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Project: A project resource. The project is a top level container for @@ -1828,9 +1828,9 @@ type Project struct { NullFields []string `json:"-"` } -func (s *Project) MarshalJSON() ([]byte, error) { +func (s Project) MarshalJSON() ([]byte, error) { type NoMethod Project - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Quota: Limits associated with a Project. @@ -1844,7 +1844,8 @@ type Quota struct { GkeClustersPerPolicy int64 `json:"gkeClustersPerPolicy,omitempty"` // GkeClustersPerResponsePolicy: Maximum allowed number of GKE clusters per // response policy. - GkeClustersPerResponsePolicy int64 `json:"gkeClustersPerResponsePolicy,omitempty"` + GkeClustersPerResponsePolicy int64 `json:"gkeClustersPerResponsePolicy,omitempty"` + InternetHealthChecksPerManagedZone int64 `json:"internetHealthChecksPerManagedZone,omitempty"` // ItemsPerRoutingPolicy: Maximum allowed number of items per routing policy. ItemsPerRoutingPolicy int64 `json:"itemsPerRoutingPolicy,omitempty"` Kind string `json:"kind,omitempty"` @@ -1914,16 +1915,20 @@ type Quota struct { NullFields []string `json:"-"` } -func (s *Quota) MarshalJSON() ([]byte, error) { +func (s Quota) MarshalJSON() ([]byte, error) { type NoMethod Quota - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RRSetRoutingPolicy: A RRSetRoutingPolicy represents ResourceRecordSet data // that is returned dynamically with the response varying based on configured // properties such as geolocation or by weighted random selection. type RRSetRoutingPolicy struct { - Geo *RRSetRoutingPolicyGeoPolicy `json:"geo,omitempty"` + Geo *RRSetRoutingPolicyGeoPolicy `json:"geo,omitempty"` + // HealthCheck: The selfLink attribute of the HealthCheck resource to use for + // this RRSetRoutingPolicy. + // https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks + HealthCheck string `json:"healthCheck,omitempty"` Kind string `json:"kind,omitempty"` PrimaryBackup *RRSetRoutingPolicyPrimaryBackupPolicy `json:"primaryBackup,omitempty"` Wrr *RRSetRoutingPolicyWrrPolicy `json:"wrr,omitempty"` @@ -1940,9 +1945,9 @@ type RRSetRoutingPolicy struct { NullFields []string `json:"-"` } -func (s *RRSetRoutingPolicy) MarshalJSON() ([]byte, error) { +func (s RRSetRoutingPolicy) MarshalJSON() ([]byte, error) { type NoMethod RRSetRoutingPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RRSetRoutingPolicyGeoPolicy: Configures a `RRSetRoutingPolicy` that routes @@ -1973,9 +1978,9 @@ type RRSetRoutingPolicyGeoPolicy struct { NullFields []string `json:"-"` } -func (s *RRSetRoutingPolicyGeoPolicy) MarshalJSON() ([]byte, error) { +func (s RRSetRoutingPolicyGeoPolicy) MarshalJSON() ([]byte, error) { type NoMethod RRSetRoutingPolicyGeoPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RRSetRoutingPolicyGeoPolicyGeoPolicyItem: ResourceRecordSet data for one geo @@ -2008,34 +2013,38 @@ type RRSetRoutingPolicyGeoPolicyGeoPolicyItem struct { NullFields []string `json:"-"` } -func (s *RRSetRoutingPolicyGeoPolicyGeoPolicyItem) MarshalJSON() ([]byte, error) { +func (s RRSetRoutingPolicyGeoPolicyGeoPolicyItem) MarshalJSON() ([]byte, error) { type NoMethod RRSetRoutingPolicyGeoPolicyGeoPolicyItem - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RRSetRoutingPolicyHealthCheckTargets: HealthCheckTargets describes endpoints // to health-check when responding to Routing Policy queries. Only the healthy // endpoints will be included in the response. type RRSetRoutingPolicyHealthCheckTargets struct { + // ExternalEndpoints: The Internet IP addresses to be health checked. The + // format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 + // (section 5) and RFC 1034 (section 3.6.1) + ExternalEndpoints []string `json:"externalEndpoints,omitempty"` // InternalLoadBalancers: Configuration for internal load balancers to be // health checked. InternalLoadBalancers []*RRSetRoutingPolicyLoadBalancerTarget `json:"internalLoadBalancers,omitempty"` - // ForceSendFields is a list of field names (e.g. "InternalLoadBalancers") to + // ForceSendFields is a list of field names (e.g. "ExternalEndpoints") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InternalLoadBalancers") to - // include in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. See + // NullFields is a list of field names (e.g. "ExternalEndpoints") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *RRSetRoutingPolicyHealthCheckTargets) MarshalJSON() ([]byte, error) { +func (s RRSetRoutingPolicyHealthCheckTargets) MarshalJSON() ([]byte, error) { type NoMethod RRSetRoutingPolicyHealthCheckTargets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RRSetRoutingPolicyLoadBalancerTarget: The configuration for an individual @@ -2091,9 +2100,9 @@ type RRSetRoutingPolicyLoadBalancerTarget struct { NullFields []string `json:"-"` } -func (s *RRSetRoutingPolicyLoadBalancerTarget) MarshalJSON() ([]byte, error) { +func (s RRSetRoutingPolicyLoadBalancerTarget) MarshalJSON() ([]byte, error) { type NoMethod RRSetRoutingPolicyLoadBalancerTarget - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RRSetRoutingPolicyPrimaryBackupPolicy: Configures a RRSetRoutingPolicy such @@ -2126,9 +2135,9 @@ type RRSetRoutingPolicyPrimaryBackupPolicy struct { NullFields []string `json:"-"` } -func (s *RRSetRoutingPolicyPrimaryBackupPolicy) MarshalJSON() ([]byte, error) { +func (s RRSetRoutingPolicyPrimaryBackupPolicy) MarshalJSON() ([]byte, error) { type NoMethod RRSetRoutingPolicyPrimaryBackupPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *RRSetRoutingPolicyPrimaryBackupPolicy) UnmarshalJSON(data []byte) error { @@ -2163,9 +2172,9 @@ type RRSetRoutingPolicyWrrPolicy struct { NullFields []string `json:"-"` } -func (s *RRSetRoutingPolicyWrrPolicy) MarshalJSON() ([]byte, error) { +func (s RRSetRoutingPolicyWrrPolicy) MarshalJSON() ([]byte, error) { type NoMethod RRSetRoutingPolicyWrrPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RRSetRoutingPolicyWrrPolicyWrrPolicyItem: A routing block which contains the @@ -2202,9 +2211,9 @@ type RRSetRoutingPolicyWrrPolicyWrrPolicyItem struct { NullFields []string `json:"-"` } -func (s *RRSetRoutingPolicyWrrPolicyWrrPolicyItem) MarshalJSON() ([]byte, error) { +func (s RRSetRoutingPolicyWrrPolicyWrrPolicyItem) MarshalJSON() ([]byte, error) { type NoMethod RRSetRoutingPolicyWrrPolicyWrrPolicyItem - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *RRSetRoutingPolicyWrrPolicyWrrPolicyItem) UnmarshalJSON(data []byte) error { @@ -2258,9 +2267,9 @@ type ResourceRecordSet struct { NullFields []string `json:"-"` } -func (s *ResourceRecordSet) MarshalJSON() ([]byte, error) { +func (s ResourceRecordSet) MarshalJSON() ([]byte, error) { type NoMethod ResourceRecordSet - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResourceRecordSetsDeleteResponse struct { @@ -2298,9 +2307,9 @@ type ResourceRecordSetsListResponse struct { NullFields []string `json:"-"` } -func (s *ResourceRecordSetsListResponse) MarshalJSON() ([]byte, error) { +func (s ResourceRecordSetsListResponse) MarshalJSON() ([]byte, error) { type NoMethod ResourceRecordSetsListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResponsePoliciesListResponse struct { @@ -2331,9 +2340,9 @@ type ResponsePoliciesListResponse struct { NullFields []string `json:"-"` } -func (s *ResponsePoliciesListResponse) MarshalJSON() ([]byte, error) { +func (s ResponsePoliciesListResponse) MarshalJSON() ([]byte, error) { type NoMethod ResponsePoliciesListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResponsePoliciesPatchResponse struct { @@ -2354,9 +2363,9 @@ type ResponsePoliciesPatchResponse struct { NullFields []string `json:"-"` } -func (s *ResponsePoliciesPatchResponse) MarshalJSON() ([]byte, error) { +func (s ResponsePoliciesPatchResponse) MarshalJSON() ([]byte, error) { type NoMethod ResponsePoliciesPatchResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResponsePoliciesUpdateResponse struct { @@ -2377,9 +2386,9 @@ type ResponsePoliciesUpdateResponse struct { NullFields []string `json:"-"` } -func (s *ResponsePoliciesUpdateResponse) MarshalJSON() ([]byte, error) { +func (s ResponsePoliciesUpdateResponse) MarshalJSON() ([]byte, error) { type NoMethod ResponsePoliciesUpdateResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResponsePolicy: A Response Policy is a collection of selectors that apply to @@ -2416,9 +2425,9 @@ type ResponsePolicy struct { NullFields []string `json:"-"` } -func (s *ResponsePolicy) MarshalJSON() ([]byte, error) { +func (s ResponsePolicy) MarshalJSON() ([]byte, error) { type NoMethod ResponsePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResponsePolicyGKECluster struct { @@ -2442,9 +2451,9 @@ type ResponsePolicyGKECluster struct { NullFields []string `json:"-"` } -func (s *ResponsePolicyGKECluster) MarshalJSON() ([]byte, error) { +func (s ResponsePolicyGKECluster) MarshalJSON() ([]byte, error) { type NoMethod ResponsePolicyGKECluster - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResponsePolicyNetwork struct { @@ -2467,9 +2476,9 @@ type ResponsePolicyNetwork struct { NullFields []string `json:"-"` } -func (s *ResponsePolicyNetwork) MarshalJSON() ([]byte, error) { +func (s ResponsePolicyNetwork) MarshalJSON() ([]byte, error) { type NoMethod ResponsePolicyNetwork - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResponsePolicyRule: A Response Policy Rule is a selector that applies its @@ -2524,9 +2533,9 @@ type ResponsePolicyRule struct { NullFields []string `json:"-"` } -func (s *ResponsePolicyRule) MarshalJSON() ([]byte, error) { +func (s ResponsePolicyRule) MarshalJSON() ([]byte, error) { type NoMethod ResponsePolicyRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResponsePolicyRuleLocalData struct { @@ -2546,9 +2555,9 @@ type ResponsePolicyRuleLocalData struct { NullFields []string `json:"-"` } -func (s *ResponsePolicyRuleLocalData) MarshalJSON() ([]byte, error) { +func (s ResponsePolicyRuleLocalData) MarshalJSON() ([]byte, error) { type NoMethod ResponsePolicyRuleLocalData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResponsePolicyRulesListResponse struct { @@ -2579,9 +2588,9 @@ type ResponsePolicyRulesListResponse struct { NullFields []string `json:"-"` } -func (s *ResponsePolicyRulesListResponse) MarshalJSON() ([]byte, error) { +func (s ResponsePolicyRulesListResponse) MarshalJSON() ([]byte, error) { type NoMethod ResponsePolicyRulesListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResponsePolicyRulesPatchResponse struct { @@ -2602,9 +2611,9 @@ type ResponsePolicyRulesPatchResponse struct { NullFields []string `json:"-"` } -func (s *ResponsePolicyRulesPatchResponse) MarshalJSON() ([]byte, error) { +func (s ResponsePolicyRulesPatchResponse) MarshalJSON() ([]byte, error) { type NoMethod ResponsePolicyRulesPatchResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ResponsePolicyRulesUpdateResponse struct { @@ -2625,9 +2634,9 @@ type ResponsePolicyRulesUpdateResponse struct { NullFields []string `json:"-"` } -func (s *ResponsePolicyRulesUpdateResponse) MarshalJSON() ([]byte, error) { +func (s ResponsePolicyRulesUpdateResponse) MarshalJSON() ([]byte, error) { type NoMethod ResponsePolicyRulesUpdateResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ChangesCreateCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json b/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json index 26505f53657..c687007865e 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json @@ -2117,6 +2117,35 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "setBlobStorageSettings": { + "description": "SetBlobStorageSettings sets the blob storage settings of the specified resources.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:setBlobStorageSettings", + "httpMethod": "POST", + "id": "healthcare.projects.locations.datasets.dicomStores.setBlobStorageSettings", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "Required. The path of the resource to update the blob storage settings in the format of `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores/{dicomStoreID}/dicomWeb/studies/{studyUID}`, `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores/{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/`, or `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores/{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/instances/{instanceUID}`. If `filter_config` is specified, set the value of `resource` to the resource name of a DICOM store in the format `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores/{dicomStoreID}`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:setBlobStorageSettings", + "request": { + "$ref": "SetBlobStorageSettingsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-healthcare", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:setIamPolicy", @@ -2243,6 +2272,35 @@ "https://www.googleapis.com/auth/cloud-healthcare", "https://www.googleapis.com/auth/cloud-platform" ] + }, + "setBlobStorageSettings": { + "description": "SetBlobStorageSettings sets the blob storage settings of the specified resources.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}:setBlobStorageSettings", + "httpMethod": "POST", + "id": "healthcare.projects.locations.datasets.dicomStores.dicomWeb.studies.setBlobStorageSettings", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "Required. The path of the resource to update the blob storage settings in the format of `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores/{dicomStoreID}/dicomWeb/studies/{studyUID}`, `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores/{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/`, or `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores/{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/instances/{instanceUID}`. If `filter_config` is specified, set the value of `resource` to the resource name of a DICOM store in the format `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores/{dicomStoreID}`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+/dicomWeb/studies/.*$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:setBlobStorageSettings", + "request": { + "$ref": "SetBlobStorageSettingsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-healthcare", + "https://www.googleapis.com/auth/cloud-platform" + ] } }, "resources": { @@ -2274,6 +2332,38 @@ "https://www.googleapis.com/auth/cloud-platform" ] } + }, + "resources": { + "instances": { + "methods": { + "getStorageInfo": { + "description": "GetStorageInfo returns the storage info of the specified resource.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}:getStorageInfo", + "httpMethod": "GET", + "id": "healthcare.projects.locations.datasets.dicomStores.dicomWeb.studies.series.instances.getStorageInfo", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "Required. The path of the instance to return storage info for, in the form: `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores/{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/instances/{instanceUID}`", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+/dicomWeb/studies/[^/]+/series/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:getStorageInfo", + "response": { + "$ref": "StorageInfo" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-healthcare", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } } } } @@ -3251,6 +3341,116 @@ "resources": { "fhir": { "methods": { + "Binary-create": { + "description": "Creates a FHIR Binary resource. This method can be used to create a Binary resource either by using one of the accepted FHIR JSON content types, or as a raw data stream. If a resource is created with this method using the FHIR content type this method's behavior is the same as [`fhir.create`](https://cloud.google.com/healthcare-api/docs/reference/rest/v1/projects.locations.datasets.fhirStores.fhir/create). If a resource type other than Binary is used in the request it's treated in the same way as non-FHIR data (e.g., images, zip archives, pdf files, documents). When a non-FHIR content type is used in the request, a Binary resource will be generated, and the uploaded data will be stored in the `content` field (`DSTU2` and `STU3`), or the `data` field (`R4`). The Binary resource's `contentType` will be filled in using the value of the `Content-Type` header, and the `securityContext` field (not present in `DSTU2`) will be populated from the `X-Security-Context` header if it exists. At this time `securityContext` has no special behavior in the Cloud Healthcare API. Note: the limit on data ingested through this method is 2 GB. For best performance, use a non-FHIR data type instead of wrapping the data in a Binary resource. Some of the Healthcare API features, such as [exporting to BigQuery](https://cloud.google.com/healthcare-api/docs/how-tos/fhir-export-bigquery) or [Pub/Sub notifications](https://cloud.google.com/healthcare-api/docs/fhir-pubsub#behavior_when_a_fhir_resource_is_too_large_or_traffic_is_high) with full resource content, do not support Binary resources that are larger than 10 MB. In these cases the resource's `data` field will be omitted. Instead, the \"http://hl7.org/fhir/StructureDefinition/data-absent-reason\" extension will be present to indicate that including the data is `unsupported`. On success, an empty `201 Created` response is returned. The newly created resource's ID and version are returned in the Location header. Using `Prefer: representation=resource` is not allowed for this method. The definition of the Binary REST API can be found at https://hl7.org/fhir/binary.html#rest.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/Binary", + "httpMethod": "POST", + "id": "healthcare.projects.locations.datasets.fhirStores.fhir.Binary-create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the FHIR store this resource belongs to.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/fhir/Binary", + "request": { + "$ref": "HttpBody" + }, + "response": { + "$ref": "HttpBody" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-healthcare", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "Binary-read": { + "description": "Gets the contents of a FHIR Binary resource. This method can be used to retrieve a Binary resource either by using the FHIR JSON mimetype as the value for the Accept header, or as a raw data stream. If the FHIR Accept type is used this method will return a Binary resource with the data base64-encoded, regardless of how the resource was created. The resource data can be retrieved in base64-decoded form if the Accept type of the request matches the value of the resource's `contentType` field. The definition of the Binary REST API can be found at https://hl7.org/fhir/binary.html#rest.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/Binary/{BinaryId}", + "httpMethod": "GET", + "id": "healthcare.projects.locations.datasets.fhirStores.fhir.Binary-read", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the Binary resource to retrieve.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+/fhir/Binary/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "HttpBody" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-healthcare", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "Binary-update": { + "description": "Updates the entire contents of a Binary resource. If the specified resource does not exist and the FHIR store has enable_update_create set, creates the resource with the client-specified ID. It is strongly advised not to include or encode any sensitive data such as patient identifiers in client-specified resource IDs. Those IDs are part of the FHIR resource path recorded in Cloud Audit Logs and Pub/Sub notifications. Those IDs can also be contained in reference fields within other resources. This method can be used to update a Binary resource either by using one of the accepted FHIR JSON content types, or as a raw data stream. If a resource is updated with this method using the FHIR content type this method's behavior is the same as `update`. If a resource type other than Binary is used in the request it will be treated in the same way as non-FHIR data. When a non-FHIR content type is used in the request, a Binary resource will be generated using the ID from the resource path, and the uploaded data will be stored in the `content` field (`DSTU2` and `STU3`), or the `data` field (`R4`). The Binary resource's `contentType` will be filled in using the value of the `Content-Type` header, and the `securityContext` field (not present in `DSTU2`) will be populated from the `X-Security-Context` header if it exists. At this time `securityContext` has no special behavior in the Cloud Healthcare API. Note: the limit on data ingested through this method is 2 GB. For best performance, use a non-FHIR data type instead of wrapping the data in a Binary resource. Some of the Healthcare API features, such as [exporting to BigQuery](https://cloud.google.com/healthcare-api/docs/how-tos/fhir-export-bigquery) or [Pub/Sub notifications](https://cloud.google.com/healthcare-api/docs/fhir-pubsub#behavior_when_a_fhir_resource_is_too_large_or_traffic_is_high) with full resource content, do not support Binary resources that are larger than 10 MB. In these cases the resource's `data` field will be omitted. Instead, the \"http://hl7.org/fhir/StructureDefinition/data-absent-reason\" extension will be present to indicate that including the data is `unsupported`. On success, an empty 200 OK response will be returned, or a 201 Created if the resource did not exit. The resource's ID and version are returned in the Location header. Using `Prefer: representation=resource` is not allowed for this method. The definition of the Binary REST API can be found at https://hl7.org/fhir/binary.html#rest.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/Binary/{BinaryId}", + "httpMethod": "PUT", + "id": "healthcare.projects.locations.datasets.fhirStores.fhir.Binary-update", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the resource to update.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+/fhir/Binary/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "HttpBody" + }, + "response": { + "$ref": "HttpBody" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-healthcare", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "Binary-vread": { + "description": "Gets the contents of a version (current or historical) of a FHIR Binary resource by version ID. This method can be used to retrieve a Binary resource version either by using the FHIR JSON mimetype as the value for the Accept header, or as a raw data stream. If the FHIR Accept type is used this method will return a Binary resource with the data base64-encoded, regardless of how the resource version was created. The resource data can be retrieved in base64-decoded form if the Accept type of the request matches the value of the resource version's `contentType` field. The definition of the Binary REST API can be found at https://hl7.org/fhir/binary.html#rest.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/Binary/{BinaryId}/_history/{_historyId}", + "httpMethod": "GET", + "id": "healthcare.projects.locations.datasets.fhirStores.fhir.Binary-vread", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the Binary resource version to retrieve.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+/fhir/Binary/[^/]+/_history/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "HttpBody" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-healthcare", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "Patient-everything": { "description": "Retrieves a Patient resource and resources related to that patient. Implements the FHIR extended operation Patient-everything ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/patient-operations.html#everything), [STU3](http://hl7.org/implement/standards/fhir/STU3/patient-operations.html#everything), [R4](http://hl7.org/implement/standards/fhir/R4/patient-operations.html#everything)). On success, the response body contains a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the operation. Errors generated by the FHIR store contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The resources in scope for the response are: * The patient resource itself. * All the resources directly referenced by the patient resource. * Resources directly referencing the patient resource that meet the inclusion criteria. The inclusion criteria are based on the membership rules in the patient compartment definition ([DSTU2](http://hl7.org/fhir/DSTU2/compartment-patient.html), [STU3](http://www.hl7.org/fhir/stu3/compartmentdefinition-patient.html), [R4](http://hl7.org/fhir/R4/compartmentdefinition-patient.html)), which details the eligible resource types and referencing search parameters. For samples that show how to call `Patient-everything`, see [Getting all patient compartment resources](https://cloud.google.com/healthcare/docs/how-tos/fhir-resources#getting_all_patient_compartment_resources).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/Patient/{PatientId}/$everything", @@ -4109,6 +4309,35 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "rollback": { + "description": "Rolls back messages from the HL7v2 store to the specified time. This method returns an Operation that can be used to track the status of the rollback by calling GetOperation. Immediate fatal errors appear in the error field, errors are also logged to Cloud Logging (see [Viewing error logs in Cloud Logging](https://cloud.google.com/healthcare/docs/how-tos/logging)). Otherwise, when the operation finishes, a detailed response of type RollbackHl7V2MessagesResponse is returned in the response field. The metadata field type for this operation is OperationMetadata.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}:rollback", + "httpMethod": "POST", + "id": "healthcare.projects.locations.datasets.hl7V2Stores.rollback", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the HL7v2 store to rollback, in the format of \"projects/{project_id}/locations/{location_id}/datasets/{dataset_id} /hl7V2Stores/{hl7v2_store_id}\".", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:rollback", + "request": { + "$ref": "RollbackHl7V2MessagesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-healthcare", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}:setIamPolicy", @@ -4380,7 +4609,7 @@ ], "parameters": { "name": { - "description": "Output only. Resource name of the Message, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_store_id}/messages/{message_id}`. Assigned by the server.", + "description": "Output only. Resource name of the Message, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_store_id}/messages/{message_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+/messages/[^/]+$", "required": true, @@ -4554,7 +4783,7 @@ } } }, - "revision": "20240429", + "revision": "20240910", "rootUrl": "https://healthcare.googleapis.com/", "schemas": { "ActivateConsentRequest": { @@ -4797,6 +5026,66 @@ }, "type": "object" }, + "BlobStorageInfo": { + "description": "BlobStorageInfo contains details about the data stored in Blob Storage for the referenced resource. Note: Storage class is only valid for DICOM and hence will only be populated for DICOM resources.", + "id": "BlobStorageInfo", + "properties": { + "sizeBytes": { + "description": "Size in bytes of data stored in Blob Storage.", + "format": "int64", + "type": "string" + }, + "storageClass": { + "description": "The storage class in which the Blob data is stored.", + "enum": [ + "BLOB_STORAGE_CLASS_UNSPECIFIED", + "STANDARD", + "NEARLINE", + "COLDLINE", + "ARCHIVE" + ], + "enumDescriptions": [ + "If unspecified in CreateDataset, the StorageClass defaults to STANDARD. If unspecified in UpdateDataset and the StorageClass is set in the field mask, an InvalidRequest error is thrown.", + "This stores the Object in Blob Standard Storage: https://cloud.google.com/storage/docs/storage-classes#standard", + "This stores the Object in Blob Nearline Storage: https://cloud.google.com/storage/docs/storage-classes#nearline", + "This stores the Object in Blob Coldline Storage: https://cloud.google.com/storage/docs/storage-classes#coldline", + "This stores the Object in Blob Archive Storage: https://cloud.google.com/storage/docs/storage-classes#archive" + ], + "type": "string" + }, + "storageClassUpdateTime": { + "description": "The time at which the storage class was updated. This is used to compute early deletion fees of the resource.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "BlobStorageSettings": { + "description": "Settings for data stored in Blob storage.", + "id": "BlobStorageSettings", + "properties": { + "blobStorageClass": { + "description": "The Storage class in which the Blob data is stored.", + "enum": [ + "BLOB_STORAGE_CLASS_UNSPECIFIED", + "STANDARD", + "NEARLINE", + "COLDLINE", + "ARCHIVE" + ], + "enumDescriptions": [ + "If unspecified in CreateDataset, the StorageClass defaults to STANDARD. If unspecified in UpdateDataset and the StorageClass is set in the field mask, an InvalidRequest error is thrown.", + "This stores the Object in Blob Standard Storage: https://cloud.google.com/storage/docs/storage-classes#standard", + "This stores the Object in Blob Nearline Storage: https://cloud.google.com/storage/docs/storage-classes#nearline", + "This stores the Object in Blob Coldline Storage: https://cloud.google.com/storage/docs/storage-classes#coldline", + "This stores the Object in Blob Archive Storage: https://cloud.google.com/storage/docs/storage-classes#archive" + ], + "type": "string" + } + }, + "type": "object" + }, "CancelOperationRequest": { "description": "The request message for Operations.CancelOperation.", "id": "CancelOperationRequest", @@ -5084,12 +5373,16 @@ "description": "A message representing a health dataset. A health dataset represents a collection of healthcare data pertaining to one or more patients. This may include multiple modalities of healthcare data, such as electronic medical records or medical imaging data.", "id": "Dataset", "properties": { + "encryptionSpec": { + "$ref": "EncryptionSpec", + "description": "Optional. Customer-managed encryption key spec for a Dataset. If set, this Dataset and all of its sub-resources will be secured by this key. If empty, the Dataset is secured by the default Google encryption key." + }, "name": { "description": "Identifier. Resource name of the dataset, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", "type": "string" }, "timeZone": { - "description": "The default timezone used by this dataset. Must be a either a valid IANA time zone name such as \"America/New_York\" or empty, which defaults to UTC. This is used for parsing times in resources, such as HL7 messages, where no explicit timezone is specified.", + "description": "Optional. The default timezone used by this dataset. Must be a either a valid IANA time zone name such as \"America/New_York\" or empty, which defaults to UTC. This is used for parsing times in resources, such as HL7 messages, where no explicit timezone is specified.", "type": "string" } }, @@ -5147,7 +5440,7 @@ "description": "Configures de-identification of text wherever it is found in the source_dataset." }, "useRegionalDataProcessing": { - "description": "Ensures in-flight data remains in the region of origin during de-identification. Using this option results in a significant reduction of throughput, and is not compatible with `LOCATION` or `ORGANIZATION_NAME` infoTypes. `LOCATION` must be excluded within TextConfig, and must also be excluded within ImageConfig if image redaction is required.", + "description": "Ensures in-flight data remains in the region of origin during de-identification. The default value is false. Using this option results in a significant reduction of throughput, and is not compatible with `LOCATION` or `ORGANIZATION_NAME` infoTypes. `LOCATION` must be excluded within TextConfig, and must also be excluded within ImageConfig if image redaction is required.", "type": "boolean" } }, @@ -5293,7 +5586,7 @@ }, "notificationConfig": { "$ref": "NotificationConfig", - "description": "Notification destination for new DICOM instances. Supplied by the client." + "description": "Optional. Notification destination for new DICOM instances. Supplied by the client." }, "streamConfigs": { "description": "Optional. A list of streaming configs used to configure the destination of streaming exports for every DICOM instance insertion in this DICOM store. After a new config is added to `stream_configs`, DICOM instance insertions are streamed to the new destination. When a config is removed from `stream_configs`, the server stops streaming to that destination. Each config must contain a unique destination.", @@ -5347,6 +5640,17 @@ "properties": {}, "type": "object" }, + "EncryptionSpec": { + "description": "Represents a customer-managed encryption key spec that can be applied to a resource.", + "id": "EncryptionSpec", + "properties": { + "kmsKeyName": { + "description": "Required. The resource name of customer-managed encryption key that is used to secure a resource and its sub-resources. Only the key in the same location as this Dataset is allowed to be used for encryption. Format is: `projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{key}`", + "type": "string" + } + }, + "type": "object" + }, "Entity": { "description": "The candidate entities that an entity mention could link to.", "id": "Entity", @@ -5659,15 +5963,15 @@ "id": "FhirNotificationConfig", "properties": { "pubsubTopic": { - "description": "The [Pub/Sub](https://cloud.google.com/pubsub/docs/) topic that notifications of changes are published on. Supplied by the client. The notification is a `PubsubMessage` with the following fields: * `PubsubMessage.Data` contains the resource name. * `PubsubMessage.MessageId` is the ID of this notification. It is guaranteed to be unique within the topic. * `PubsubMessage.PublishTime` is the time when the message was published. Note that notifications are only sent if the topic is non-empty. [Topic names](https://cloud.google.com/pubsub/docs/overview#names) must be scoped to a project. The Cloud Healthcare API service account, service-@gcp-sa-healthcare.iam.gserviceaccount.com, must have publisher permissions on the given Pub/Sub topic. Not having adequate permissions causes the calls that send notifications to fail (https://cloud.google.com/healthcare-api/docs/permissions-healthcare-api-gcp-products#dicom_fhir_and_hl7v2_store_cloud_pubsub_permissions). If a notification can't be published to Pub/Sub, errors are logged to Cloud Logging. For more information, see [Viewing error logs in Cloud Logging](https://cloud.google.com/healthcare-api/docs/how-tos/logging).", + "description": "Optional. The [Pub/Sub](https://cloud.google.com/pubsub/docs/) topic that notifications of changes are published on. Supplied by the client. The notification is a `PubsubMessage` with the following fields: * `PubsubMessage.Data` contains the resource name. * `PubsubMessage.MessageId` is the ID of this notification. It is guaranteed to be unique within the topic. * `PubsubMessage.PublishTime` is the time when the message was published. Note that notifications are only sent if the topic is non-empty. [Topic names](https://cloud.google.com/pubsub/docs/overview#names) must be scoped to a project. The Cloud Healthcare API service account, service-@gcp-sa-healthcare.iam.gserviceaccount.com, must have publisher permissions on the given Pub/Sub topic. Not having adequate permissions causes the calls that send notifications to fail (https://cloud.google.com/healthcare-api/docs/permissions-healthcare-api-gcp-products#dicom_fhir_and_hl7v2_store_cloud_pubsub_permissions). If a notification can't be published to Pub/Sub, errors are logged to Cloud Logging. For more information, see [Viewing error logs in Cloud Logging](https://cloud.google.com/healthcare-api/docs/how-tos/logging).", "type": "string" }, "sendFullResource": { - "description": "Whether to send full FHIR resource to this Pub/Sub topic.", + "description": "Optional. Whether to send full FHIR resource to this Pub/Sub topic. The default value is false.", "type": "boolean" }, "sendPreviousResourceOnDelete": { - "description": "Whether to send full FHIR resource to this Pub/Sub topic for deleting FHIR resource. Note that setting this to true does not guarantee that all previous resources will be sent in the format of full FHIR resource. When a resource change is too large or during heavy traffic, only the resource name will be sent. Clients should always check the \"payloadType\" label from a Pub/Sub message to determine whether it needs to fetch the full previous resource as a separate operation.", + "description": "Optional. Whether to send full FHIR resource to this Pub/Sub topic for deleting FHIR resource. The default value is false. Note that setting this to true does not guarantee that all previous resources will be sent in the format of full FHIR resource. When a resource change is too large or during heavy traffic, only the resource name will be sent. Clients should always check the \"payloadType\" label from a Pub/Sub message to determine whether it needs to fetch the full previous resource as a separate operation.", "type": "boolean" } }, @@ -5692,7 +5996,7 @@ "type": "string" }, "defaultSearchHandlingStrict": { - "description": "If true, overrides the default search behavior for this FHIR store to `handling=strict` which returns an error for unrecognized search parameters. If false, uses the FHIR specification default `handling=lenient` which ignores unrecognized search parameters. The handling can always be changed from the default on an individual API call by setting the HTTP header `Prefer: handling=strict` or `Prefer: handling=lenient`.", + "description": "Optional. If true, overrides the default search behavior for this FHIR store to `handling=strict` which returns an error for unrecognized search parameters. If false, uses the FHIR specification default `handling=lenient` which ignores unrecognized search parameters. The handling can always be changed from the default on an individual API call by setting the HTTP header `Prefer: handling=strict` or `Prefer: handling=lenient`. Defaults to false.", "type": "boolean" }, "disableReferentialIntegrity": { @@ -5700,11 +6004,11 @@ "type": "boolean" }, "disableResourceVersioning": { - "description": "Immutable. Whether to disable resource versioning for this FHIR store. This field can not be changed after the creation of FHIR store. If set to false, which is the default behavior, all write operations cause historical versions to be recorded automatically. The historical versions can be fetched through the history APIs, but cannot be updated. If set to true, no historical versions are kept. The server sends errors for attempts to read the historical versions.", + "description": "Immutable. Whether to disable resource versioning for this FHIR store. This field can not be changed after the creation of FHIR store. If set to false, all write operations cause historical versions to be recorded automatically. The historical versions can be fetched through the history APIs, but cannot be updated. If set to true, no historical versions are kept. The server sends errors for attempts to read the historical versions. Defaults to false.", "type": "boolean" }, "enableUpdateCreate": { - "description": "Whether this FHIR store has the [updateCreate capability](https://www.hl7.org/fhir/capabilitystatement-definitions.html#CapabilityStatement.rest.resource.updateCreate). This determines if the client can use an Update operation to create a new resource with a client-specified ID. If false, all IDs are server-assigned through the Create operation and attempts to update a non-existent resource return errors. It is strongly advised not to include or encode any sensitive data such as patient identifiers in client-specified resource IDs. Those IDs are part of the FHIR resource path recorded in Cloud audit logs and Pub/Sub notifications. Those IDs can also be contained in reference fields within other resources.", + "description": "Optional. Whether this FHIR store has the [updateCreate capability](https://www.hl7.org/fhir/capabilitystatement-definitions.html#CapabilityStatement.rest.resource.updateCreate). This determines if the client can use an Update operation to create a new resource with a client-specified ID. If false, all IDs are server-assigned through the Create operation and attempts to update a non-existent resource return errors. It is strongly advised not to include or encode any sensitive data such as patient identifiers in client-specified resource IDs. Those IDs are part of the FHIR resource path recorded in Cloud audit logs and Pub/Sub notifications. Those IDs can also be contained in reference fields within other resources. Defaults to false.", "type": "boolean" }, "labels": { @@ -5724,14 +6028,14 @@ "description": "Deprecated. Use `notification_configs` instead. If non-empty, publish all resource modifications of this FHIR store to this destination. The Pub/Sub message attributes contain a map with a string describing the action that has triggered the notification. For example, \"action\":\"CreateResource\"." }, "notificationConfigs": { - "description": "Specifies where and whether to send notifications upon changes to a FHIR store.", + "description": "Optional. Specifies where and whether to send notifications upon changes to a FHIR store.", "items": { "$ref": "FhirNotificationConfig" }, "type": "array" }, "streamConfigs": { - "description": "A list of streaming configs that configure the destinations of streaming export for every resource mutation in this FHIR store. Each store is allowed to have up to 10 streaming configs. After a new config is added, the next resource mutation is streamed to the new location in addition to the existing ones. When a location is removed from the list, the server stops streaming to that location. Before adding a new config, you must add the required [`bigquery.dataEditor`](https://cloud.google.com/bigquery/docs/access-control#bigquery.dataEditor) role to your project's **Cloud Healthcare Service Agent** [service account](https://cloud.google.com/iam/docs/service-accounts). Some lag (typically on the order of dozens of seconds) is expected before the results show up in the streaming destination.", + "description": "Optional. A list of streaming configs that configure the destinations of streaming export for every resource mutation in this FHIR store. Each store is allowed to have up to 10 streaming configs. After a new config is added, the next resource mutation is streamed to the new location in addition to the existing ones. When a location is removed from the list, the server stops streaming to that location. Before adding a new config, you must add the required [`bigquery.dataEditor`](https://cloud.google.com/bigquery/docs/access-control#bigquery.dataEditor) role to your project's **Cloud Healthcare Service Agent** [service account](https://cloud.google.com/iam/docs/service-accounts). Some lag (typically on the order of dozens of seconds) is expected before the results show up in the streaming destination.", "items": { "$ref": "StreamConfig" }, @@ -6026,19 +6330,19 @@ "id": "GoogleCloudHealthcareV1FhirBigQueryDestination", "properties": { "datasetUri": { - "description": "BigQuery URI to an existing dataset, up to 2000 characters long, in the format `bq://projectId.bqDatasetId`.", + "description": "Optional. BigQuery URI to an existing dataset, up to 2000 characters long, in the format `bq://projectId.bqDatasetId`.", "type": "string" }, "force": { - "description": "If this flag is `TRUE`, all tables are deleted from the dataset before the new exported tables are written. If the flag is not set and the destination dataset contains tables, the export call returns an error. If `write_disposition` is specified, this parameter is ignored. force=false is equivalent to write_disposition=WRITE_EMPTY and force=true is equivalent to write_disposition=WRITE_TRUNCATE.", + "description": "Optional. The default value is false. If this flag is `TRUE`, all tables are deleted from the dataset before the new exported tables are written. If the flag is not set and the destination dataset contains tables, the export call returns an error. If `write_disposition` is specified, this parameter is ignored. force=false is equivalent to write_disposition=WRITE_EMPTY and force=true is equivalent to write_disposition=WRITE_TRUNCATE.", "type": "boolean" }, "schemaConfig": { "$ref": "SchemaConfig", - "description": "The configuration for the exported BigQuery schema." + "description": "Optional. The configuration for the exported BigQuery schema." }, "writeDisposition": { - "description": "Determines if existing data in the destination dataset is overwritten, appended to, or not written if the tables contain data. If a write_disposition is specified, the `force` parameter is ignored.", + "description": "Optional. Determines if existing data in the destination dataset is overwritten, appended to, or not written if the tables contain data. If a write_disposition is specified, the `force` parameter is ignored.", "enum": [ "WRITE_DISPOSITION_UNSPECIFIED", "WRITE_EMPTY", @@ -6138,7 +6442,7 @@ "id": "Hl7V2NotificationConfig", "properties": { "filter": { - "description": "Restricts notifications sent for messages matching a filter. If this is empty, all messages are matched. The following syntax is available: * A string field value can be written as text inside quotation marks, for example `\"query text\"`. The only valid relational operation for text fields is equality (`=`), where text is searched within the field, rather than having the field be equal to the text. For example, `\"Comment = great\"` returns messages with `great` in the comment field. * A number field value can be written as an integer, a decimal, or an exponential. The valid relational operators for number fields are the equality operator (`=`), along with the less than/greater than operators (`\u003c`, `\u003c=`, `\u003e`, `\u003e=`). Note that there is no inequality (`!=`) operator. You can prepend the `NOT` operator to an expression to negate it. * A date field value must be written in `yyyy-mm-dd` form. Fields with date and time use the RFC3339 time format. Leading zeros are required for one-digit months and days. The valid relational operators for date fields are the equality operator (`=`) , along with the less than/greater than operators (`\u003c`, `\u003c=`, `\u003e`, `\u003e=`). Note that there is no inequality (`!=`) operator. You can prepend the `NOT` operator to an expression to negate it. * Multiple field query expressions can be combined in one query by adding `AND` or `OR` operators between the expressions. If a boolean operator appears within a quoted string, it is not treated as special, it's just another part of the character string to be matched. You can prepend the `NOT` operator to an expression to negate it. The following fields and functions are available for filtering: * `message_type`, from the MSH-9.1 field. For example, `NOT message_type = \"ADT\"`. * `send_date` or `sendDate`, the YYYY-MM-DD date the message was sent in the dataset's time_zone, from the MSH-7 segment. For example, `send_date \u003c \"2017-01-02\"`. * `send_time`, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, `send_time \u003c \"2017-01-02T00:00:00-05:00\"`. * `create_time`, the timestamp when the message was created in the HL7v2 store. Use the RFC3339 time format for comparisons. For example, `create_time \u003c \"2017-01-02T00:00:00-05:00\"`. * `send_facility`, the care center that the message came from, from the MSH-4 segment. For example, `send_facility = \"ABC\"`. * `PatientId(value, type)`, which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, `PatientId(\"123456\", \"MRN\")`. * `labels.x`, a string value of the label with key `x` as set using the Message.labels map. For example, `labels.\"priority\"=\"high\"`. The operator `:*` can be used to assert the existence of a label. For example, `labels.\"priority\":*`.", + "description": "Optional. Restricts notifications sent for messages matching a filter. If this is empty, all messages are matched. The following syntax is available: * A string field value can be written as text inside quotation marks, for example `\"query text\"`. The only valid relational operation for text fields is equality (`=`), where text is searched within the field, rather than having the field be equal to the text. For example, `\"Comment = great\"` returns messages with `great` in the comment field. * A number field value can be written as an integer, a decimal, or an exponential. The valid relational operators for number fields are the equality operator (`=`), along with the less than/greater than operators (`\u003c`, `\u003c=`, `\u003e`, `\u003e=`). Note that there is no inequality (`!=`) operator. You can prepend the `NOT` operator to an expression to negate it. * A date field value must be written in `yyyy-mm-dd` form. Fields with date and time use the RFC3339 time format. Leading zeros are required for one-digit months and days. The valid relational operators for date fields are the equality operator (`=`) , along with the less than/greater than operators (`\u003c`, `\u003c=`, `\u003e`, `\u003e=`). Note that there is no inequality (`!=`) operator. You can prepend the `NOT` operator to an expression to negate it. * Multiple field query expressions can be combined in one query by adding `AND` or `OR` operators between the expressions. If a boolean operator appears within a quoted string, it is not treated as special, it's just another part of the character string to be matched. You can prepend the `NOT` operator to an expression to negate it. The following fields and functions are available for filtering: * `message_type`, from the MSH-9.1 field. For example, `NOT message_type = \"ADT\"`. * `send_date` or `sendDate`, the YYYY-MM-DD date the message was sent in the dataset's time_zone, from the MSH-7 segment. For example, `send_date \u003c \"2017-01-02\"`. * `send_time`, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, `send_time \u003c \"2017-01-02T00:00:00-05:00\"`. * `create_time`, the timestamp when the message was created in the HL7v2 store. Use the RFC3339 time format for comparisons. For example, `create_time \u003c \"2017-01-02T00:00:00-05:00\"`. * `send_facility`, the care center that the message came from, from the MSH-4 segment. For example, `send_facility = \"ABC\"`. * `PatientId(value, type)`, which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, `PatientId(\"123456\", \"MRN\")`. * `labels.x`, a string value of the label with key `x` as set using the Message.labels map. For example, `labels.\"priority\"=\"high\"`. The operator `:*` can be used to assert the existence of a label. For example, `labels.\"priority\":*`.", "type": "string" }, "pubsubTopic": { @@ -6164,7 +6468,7 @@ "type": "string" }, "notificationConfigs": { - "description": "A list of notification configs. Each configuration uses a filter to determine whether to publish a message (both Ingest \u0026 Create) on the corresponding notification destination. Only the message name is sent as part of the notification. Supplied by the client.", + "description": "Optional. A list of notification configs. Each configuration uses a filter to determine whether to publish a message (both Ingest \u0026 Create) on the corresponding notification destination. Only the message name is sent as part of the notification. Supplied by the client.", "items": { "$ref": "Hl7V2NotificationConfig" }, @@ -6172,10 +6476,10 @@ }, "parserConfig": { "$ref": "ParserConfig", - "description": "The configuration for the parser. It determines how the server parses the messages." + "description": "Optional. The configuration for the parser. It determines how the server parses the messages." }, "rejectDuplicateMessage": { - "description": "Determines whether to reject duplicate messages. A duplicate message is a message with the same raw bytes as a message that has already been ingested/created in this HL7v2 store. The default value is false, meaning that the store accepts the duplicate messages and it also returns the same ACK message in the IngestMessageResponse as has been returned previously. Note that only one resource is created in the store. When this field is set to true, CreateMessage/IngestMessage requests with a duplicate message will be rejected by the store, and IngestMessageErrorDetail returns a NACK message upon rejection.", + "description": "Optional. Determines whether to reject duplicate messages. A duplicate message is a message with the same raw bytes as a message that has already been ingested/created in this HL7v2 store. The default value is false, meaning that the store accepts the duplicate messages and it also returns the same ACK message in the IngestMessageResponse as has been returned previously. Note that only one resource is created in the store. When this field is set to true, CreateMessage/IngestMessage requests with a duplicate message will be rejected by the store, and IngestMessageErrorDetail returns a NACK message upon rejection.", "type": "boolean" } }, @@ -6290,6 +6594,10 @@ "description": "Imports data into the specified DICOM store. Returns an error if any of the files to import are not DICOM files. This API accepts duplicate DICOM instances by ignoring the newly-pushed instance. It does not overwrite.", "id": "ImportDicomDataRequest", "properties": { + "blobStorageSettings": { + "$ref": "BlobStorageSettings", + "description": "Optional. The blob storage settings for the data imported by this operation." + }, "gcsSource": { "$ref": "GoogleCloudHealthcareV1DicomGcsSource", "description": "Cloud Storage source data location and import configuration. The Cloud Healthcare Service Agent requires the `roles/storage.objectViewer` Cloud IAM roles on the Cloud Storage location." @@ -6728,11 +7036,12 @@ "type": "object" }, "messageType": { - "description": "The message type for this message. MSH-9.1.", + "description": "Output only. The message type for this message. MSH-9.1.", + "readOnly": true, "type": "string" }, "name": { - "description": "Output only. Resource name of the Message, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_store_id}/messages/{message_id}`. Assigned by the server.", + "description": "Output only. Resource name of the Message, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_store_id}/messages/{message_id}`.", "readOnly": true, "type": "string" }, @@ -6742,23 +7051,27 @@ "readOnly": true }, "patientIds": { - "description": "All patient IDs listed in the PID-2, PID-3, and PID-4 segments of this message.", + "description": "Output only. All patient IDs listed in the PID-2, PID-3, and PID-4 segments of this message.", "items": { "$ref": "PatientId" }, + "readOnly": true, "type": "array" }, "schematizedData": { "$ref": "SchematizedData", - "description": "The parsed version of the raw message data schematized according to this store's schemas and type definitions." + "description": "Output only. The parsed version of the raw message data schematized according to this store's schemas and type definitions.", + "readOnly": true }, "sendFacility": { - "description": "The hospital that this message came from. MSH-4.", + "description": "Output only. The hospital that this message came from. MSH-4.", + "readOnly": true, "type": "string" }, "sendTime": { - "description": "The datetime the sending application sent this message. MSH-7.", + "description": "Output only. The datetime the sending application sent this message. MSH-7.", "format": "google-datetime", + "readOnly": true, "type": "string" } }, @@ -6864,12 +7177,12 @@ "id": "ParserConfig", "properties": { "allowNullHeader": { - "description": "Determines whether messages with no header are allowed.", + "description": "Optional. Determines whether messages with no header are allowed.", "type": "boolean" }, "schema": { "$ref": "SchemaPackage", - "description": "Schemas used to parse messages in this store, if schematized parsing is desired." + "description": "Optional. Schemas used to parse messages in this store, if schematized parsing is desired." }, "segmentTerminator": { "description": "Byte(s) to use as the segment terminator. If this is unset, '\\r' is used as segment terminator, matching the HL7 version 2 specification.", @@ -7168,6 +7481,82 @@ }, "type": "object" }, + "RollbackHL7MessagesFilteringFields": { + "description": "Filtering fields for an HL7v2 rollback. Currently only supports a list of operation ids to roll back.", + "id": "RollbackHL7MessagesFilteringFields", + "properties": { + "operationIds": { + "description": "Optional. A list of operation IDs to roll back.", + "items": { + "format": "uint64", + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RollbackHl7V2MessagesRequest": { + "description": "Point in time recovery rollback request.", + "id": "RollbackHl7V2MessagesRequest", + "properties": { + "changeType": { + "description": "Optional. CREATE/UPDATE/DELETE/ALL for reverting all txns of a certain type.", + "enum": [ + "CHANGE_TYPE_UNSPECIFIED", + "ALL", + "CREATE", + "UPDATE", + "DELETE" + ], + "enumDescriptions": [ + "When unspecified, revert all transactions", + "All transactions", + "Revert only CREATE transactions", + "Revert only Update transactions", + "Revert only Delete transactions" + ], + "type": "string" + }, + "excludeRollbacks": { + "description": "Optional. Specifies whether to exclude earlier rollbacks.", + "type": "boolean" + }, + "filteringFields": { + "$ref": "RollbackHL7MessagesFilteringFields", + "description": "Optional. Parameters for filtering." + }, + "force": { + "description": "Optional. When enabled, changes will be reverted without explicit confirmation.", + "type": "boolean" + }, + "inputGcsObject": { + "description": "Optional. Cloud storage object containing list of {resourceId} lines, identifying resources to be reverted", + "type": "string" + }, + "resultGcsBucket": { + "description": "Required. Bucket to deposit result", + "type": "string" + }, + "rollbackTime": { + "description": "Required. Times point to rollback to.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "RollbackHl7V2MessagesResponse": { + "description": "Final response of rollback HL7v2 messages request.", + "id": "RollbackHl7V2MessagesResponse", + "properties": { + "hl7v2Store": { + "description": "The name of the HL7v2 store to rollback, in the format of \"projects/{project_id}/locations/{location_id}/datasets/{dataset_id} /hl7v2Stores/{hl7v2_store_id}\".", + "type": "string" + } + }, + "type": "object" + }, "SchemaConfig": { "description": "Configuration for the FHIR BigQuery schema. Determines how the server generates the schema.", "id": "SchemaConfig", @@ -7235,18 +7624,18 @@ "id": "SchemaPackage", "properties": { "ignoreMinOccurs": { - "description": "Flag to ignore all min_occurs restrictions in the schema. This means that incoming messages can omit any group, segment, field, component, or subcomponent.", + "description": "Optional. Flag to ignore all min_occurs restrictions in the schema. This means that incoming messages can omit any group, segment, field, component, or subcomponent.", "type": "boolean" }, "schemas": { - "description": "Schema configs that are layered based on their VersionSources that match the incoming message. Schema configs present in higher indices override those in lower indices with the same message type and trigger event if their VersionSources all match an incoming message.", + "description": "Optional. Schema configs that are layered based on their VersionSources that match the incoming message. Schema configs present in higher indices override those in lower indices with the same message type and trigger event if their VersionSources all match an incoming message.", "items": { "$ref": "Hl7SchemaConfig" }, "type": "array" }, "schematizedParsingType": { - "description": "Determines how messages that fail to parse are handled.", + "description": "Optional. Determines how messages that fail to parse are handled.", "enum": [ "SCHEMATIZED_PARSING_TYPE_UNSPECIFIED", "SOFT_FAIL", @@ -7260,14 +7649,14 @@ "type": "string" }, "types": { - "description": "Schema type definitions that are layered based on their VersionSources that match the incoming message. Type definitions present in higher indices override those in lower indices with the same type name if their VersionSources all match an incoming message.", + "description": "Optional. Schema type definitions that are layered based on their VersionSources that match the incoming message. Type definitions present in higher indices override those in lower indices with the same type name if their VersionSources all match an incoming message.", "items": { "$ref": "Hl7TypesConfig" }, "type": "array" }, "unexpectedSegmentHandling": { - "description": "Determines how unexpected segments (segments not matched to the schema) are handled.", + "description": "Optional. Determines how unexpected segments (segments not matched to the schema) are handled.", "enum": [ "UNEXPECTED_SEGMENT_HANDLING_MODE_UNSPECIFIED", "FAIL", @@ -7380,6 +7769,27 @@ }, "type": "object" }, + "SetBlobStorageSettingsRequest": { + "description": "Request message for `SetBlobStorageSettings` method.", + "id": "SetBlobStorageSettingsRequest", + "properties": { + "blobStorageSettings": { + "$ref": "BlobStorageSettings", + "description": "The blob storage settings to update for the specified resources. Only fields listed in `update_mask` are applied." + }, + "filterConfig": { + "$ref": "DicomFilterConfig", + "description": "Optional. A filter configuration. If `filter_config` is specified, set the value of `resource` to the resource name of a DICOM store in the format `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores/{dicomStoreID}`." + } + }, + "type": "object" + }, + "SetBlobStorageSettingsResponse": { + "description": "Returns additional info in regards to a completed set blob storage settings API.", + "id": "SetBlobStorageSettingsResponse", + "properties": {}, + "type": "object" + }, "SetIamPolicyRequest": { "description": "Request message for `SetIamPolicy` method.", "id": "SetIamPolicyRequest", @@ -7450,20 +7860,39 @@ }, "type": "object" }, + "StorageInfo": { + "description": "StorageInfo encapsulates all the storage info of a resource.", + "id": "StorageInfo", + "properties": { + "blobStorageInfo": { + "$ref": "BlobStorageInfo", + "description": "Info about the data stored in blob storage for the resource." + }, + "referencedResource": { + "description": "The resource whose storage info is returned. For example: `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores/{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/instances/{instanceUID}`", + "type": "string" + }, + "structuredStorageInfo": { + "$ref": "StructuredStorageInfo", + "description": "Info about the data stored in structured storage for the resource." + } + }, + "type": "object" + }, "StreamConfig": { "description": "Contains configuration for streaming FHIR export.", "id": "StreamConfig", "properties": { "bigqueryDestination": { "$ref": "GoogleCloudHealthcareV1FhirBigQueryDestination", - "description": "The destination BigQuery structure that contains both the dataset location and corresponding schema config. The output is organized in one table per resource type. The server reuses the existing tables (if any) that are named after the resource types. For example, \"Patient\", \"Observation\". When there is no existing table for a given resource type, the server attempts to create one. When a table schema doesn't align with the schema config, either because of existing incompatible schema or out of band incompatible modification, the server does not stream in new data. BigQuery imposes a 1 MB limit on streaming insert row size, therefore any resource mutation that generates more than 1 MB of BigQuery data is not streamed. One resolution in this case is to delete the incompatible table and let the server recreate one, though the newly created table only contains data after the table recreation. Results are written to BigQuery tables according to the parameters in BigQueryDestination.WriteDisposition. Different versions of the same resource are distinguishable by the meta.versionId and meta.lastUpdated columns. The operation (CREATE/UPDATE/DELETE) that results in the new version is recorded in the meta.tag. The tables contain all historical resource versions since streaming was enabled. For query convenience, the server also creates one view per table of the same name containing only the current resource version. The streamed data in the BigQuery dataset is not guaranteed to be completely unique. The combination of the id and meta.versionId columns should ideally identify a single unique row. But in rare cases, duplicates may exist. At query time, users may use the SQL select statement to keep only one of the duplicate rows given an id and meta.versionId pair. Alternatively, the server created view mentioned above also filters out duplicates. If a resource mutation cannot be streamed to BigQuery, errors are logged to Cloud Logging. For more information, see [Viewing error logs in Cloud Logging](https://cloud.google.com/healthcare/docs/how-tos/logging))." + "description": "Optional. The destination BigQuery structure that contains both the dataset location and corresponding schema config. The output is organized in one table per resource type. The server reuses the existing tables (if any) that are named after the resource types. For example, \"Patient\", \"Observation\". When there is no existing table for a given resource type, the server attempts to create one. When a table schema doesn't align with the schema config, either because of existing incompatible schema or out of band incompatible modification, the server does not stream in new data. BigQuery imposes a 1 MB limit on streaming insert row size, therefore any resource mutation that generates more than 1 MB of BigQuery data is not streamed. One resolution in this case is to delete the incompatible table and let the server recreate one, though the newly created table only contains data after the table recreation. Results are written to BigQuery tables according to the parameters in BigQueryDestination.WriteDisposition. Different versions of the same resource are distinguishable by the meta.versionId and meta.lastUpdated columns. The operation (CREATE/UPDATE/DELETE) that results in the new version is recorded in the meta.tag. The tables contain all historical resource versions since streaming was enabled. For query convenience, the server also creates one view per table of the same name containing only the current resource version. The streamed data in the BigQuery dataset is not guaranteed to be completely unique. The combination of the id and meta.versionId columns should ideally identify a single unique row. But in rare cases, duplicates may exist. At query time, users may use the SQL select statement to keep only one of the duplicate rows given an id and meta.versionId pair. Alternatively, the server created view mentioned above also filters out duplicates. If a resource mutation cannot be streamed to BigQuery, errors are logged to Cloud Logging. For more information, see [Viewing error logs in Cloud Logging](https://cloud.google.com/healthcare/docs/how-tos/logging))." }, "deidentifiedStoreDestination": { "$ref": "DeidentifiedStoreDestination", "description": "The destination FHIR store for de-identified resources. After this field is added, all subsequent creates/updates/patches to the source store will be de-identified using the provided configuration and applied to the destination store. Importing resources to the source store will not trigger the streaming. If the source store already contains resources when this option is enabled, those resources will not be copied to the destination store unless they are subsequently updated. This may result in invalid references in the destination store. Before adding this config, you must grant the healthcare.fhirResources.update permission on the destination store to your project's **Cloud Healthcare Service Agent** [service account](https://cloud.google.com/healthcare/docs/how-tos/permissions-healthcare-api-gcp-products#the_cloud_healthcare_service_agent). The destination store must set enable_update_create to true. The destination store must have disable_referential_integrity set to true. If a resource cannot be de-identified, errors will be logged to Cloud Logging (see [Viewing error logs in Cloud Logging](https://cloud.google.com/healthcare/docs/how-tos/logging))." }, "resourceTypes": { - "description": "Supply a FHIR resource type (such as \"Patient\" or \"Observation\"). See https://www.hl7.org/fhir/valueset-resource-types.html for a list of all FHIR resource types. The server treats an empty list as an intent to stream all the supported resource types in this FHIR store.", + "description": "Optional. Supply a FHIR resource type (such as \"Patient\" or \"Observation\"). See https://www.hl7.org/fhir/valueset-resource-types.html for a list of all FHIR resource types. The server treats an empty list as an intent to stream all the supported resource types in this FHIR store.", "items": { "type": "string" }, @@ -7472,6 +7901,18 @@ }, "type": "object" }, + "StructuredStorageInfo": { + "description": "StructuredStorageInfo contains details about the data stored in Structured Storage for the referenced resource.", + "id": "StructuredStorageInfo", + "properties": { + "sizeBytes": { + "description": "Size in bytes of data stored in structured storage.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "StudyMetrics": { "description": "StudyMetrics contains metrics describing a DICOM study.", "id": "StudyMetrics", @@ -7694,19 +8135,19 @@ "id": "ValidationConfig", "properties": { "disableFhirpathValidation": { - "description": "Whether to disable FHIRPath validation for incoming resources. Set this to true to disable checking incoming resources for conformance against FHIRPath requirement defined in the FHIR specification. This property only affects resource types that do not have profiles configured for them, any rules in enabled implementation guides will still be enforced.", + "description": "Whether to disable FHIRPath validation for incoming resources. The default value is false. Set this to true to disable checking incoming resources for conformance against FHIRPath requirement defined in the FHIR specification. This property only affects resource types that do not have profiles configured for them, any rules in enabled implementation guides will still be enforced.", "type": "boolean" }, "disableProfileValidation": { - "description": "Whether to disable profile validation for this FHIR store. Set this to true to disable checking incoming resources for conformance against structure definitions in this FHIR store.", + "description": "Whether to disable profile validation for this FHIR store. The default value is false. Set this to true to disable checking incoming resources for conformance against structure definitions in this FHIR store.", "type": "boolean" }, "disableReferenceTypeValidation": { - "description": "Whether to disable reference type validation for incoming resources. Set this to true to disable checking incoming resources for conformance against reference type requirement defined in the FHIR specification. This property only affects resource types that do not have profiles configured for them, any rules in enabled implementation guides will still be enforced.", + "description": "Whether to disable reference type validation for incoming resources. The default value is false. Set this to true to disable checking incoming resources for conformance against reference type requirement defined in the FHIR specification. This property only affects resource types that do not have profiles configured for them, any rules in enabled implementation guides will still be enforced.", "type": "boolean" }, "disableRequiredFieldValidation": { - "description": "Whether to disable required fields validation for incoming resources. Set this to true to disable checking incoming resources for conformance against required fields requirement defined in the FHIR specification. This property only affects resource types that do not have profiles configured for them, any rules in enabled implementation guides will still be enforced.", + "description": "Whether to disable required fields validation for incoming resources. The default value is false. Set this to true to disable checking incoming resources for conformance against required fields requirement defined in the FHIR specification. This property only affects resource types that do not have profiles configured for them, any rules in enabled implementation guides will still be enforced.", "type": "boolean" }, "enabledImplementationGuides": { diff --git a/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go b/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go index ff06cf23cb6..ae77e8cffae 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go @@ -324,11 +324,23 @@ type ProjectsLocationsDatasetsDicomStoresDicomWebStudiesService struct { func NewProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesService(s *Service) *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesService { rs := &ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesService{s: s} + rs.Instances = NewProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesService(s) return rs } type ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesService struct { s *Service + + Instances *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesService +} + +func NewProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesService(s *Service) *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesService { + rs := &ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesService{s: s} + return rs +} + +type ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesService struct { + s *Service } func NewProjectsLocationsDatasetsDicomStoresStudiesService(s *Service) *ProjectsLocationsDatasetsDicomStoresStudiesService { @@ -477,9 +489,9 @@ type ActivateConsentRequest struct { NullFields []string `json:"-"` } -func (s *ActivateConsentRequest) MarshalJSON() ([]byte, error) { +func (s ActivateConsentRequest) MarshalJSON() ([]byte, error) { type NoMethod ActivateConsentRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AnalyzeEntitiesRequest: The request to analyze healthcare entities in a @@ -516,9 +528,9 @@ type AnalyzeEntitiesRequest struct { NullFields []string `json:"-"` } -func (s *AnalyzeEntitiesRequest) MarshalJSON() ([]byte, error) { +func (s AnalyzeEntitiesRequest) MarshalJSON() ([]byte, error) { type NoMethod AnalyzeEntitiesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AnalyzeEntitiesResponse: Includes recognized entity mentions and @@ -554,9 +566,9 @@ type AnalyzeEntitiesResponse struct { NullFields []string `json:"-"` } -func (s *AnalyzeEntitiesResponse) MarshalJSON() ([]byte, error) { +func (s AnalyzeEntitiesResponse) MarshalJSON() ([]byte, error) { type NoMethod AnalyzeEntitiesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ArchiveUserDataMappingRequest: Archives the specified User data mapping. @@ -595,9 +607,9 @@ type Attribute struct { NullFields []string `json:"-"` } -func (s *Attribute) MarshalJSON() ([]byte, error) { +func (s Attribute) MarshalJSON() ([]byte, error) { type NoMethod Attribute - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AttributeDefinition: A client-defined consent attribute. @@ -648,9 +660,9 @@ type AttributeDefinition struct { NullFields []string `json:"-"` } -func (s *AttributeDefinition) MarshalJSON() ([]byte, error) { +func (s AttributeDefinition) MarshalJSON() ([]byte, error) { type NoMethod AttributeDefinition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditConfig: Specifies the audit configuration for a service. The @@ -689,9 +701,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -724,9 +736,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates `members`, or principals, with a `role`. @@ -823,9 +835,86 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BlobStorageInfo: BlobStorageInfo contains details about the data stored in +// Blob Storage for the referenced resource. Note: Storage class is only valid +// for DICOM and hence will only be populated for DICOM resources. +type BlobStorageInfo struct { + // SizeBytes: Size in bytes of data stored in Blob Storage. + SizeBytes int64 `json:"sizeBytes,omitempty,string"` + // StorageClass: The storage class in which the Blob data is stored. + // + // Possible values: + // "BLOB_STORAGE_CLASS_UNSPECIFIED" - If unspecified in CreateDataset, the + // StorageClass defaults to STANDARD. If unspecified in UpdateDataset and the + // StorageClass is set in the field mask, an InvalidRequest error is thrown. + // "STANDARD" - This stores the Object in Blob Standard Storage: + // https://cloud.google.com/storage/docs/storage-classes#standard + // "NEARLINE" - This stores the Object in Blob Nearline Storage: + // https://cloud.google.com/storage/docs/storage-classes#nearline + // "COLDLINE" - This stores the Object in Blob Coldline Storage: + // https://cloud.google.com/storage/docs/storage-classes#coldline + // "ARCHIVE" - This stores the Object in Blob Archive Storage: + // https://cloud.google.com/storage/docs/storage-classes#archive + StorageClass string `json:"storageClass,omitempty"` + // StorageClassUpdateTime: The time at which the storage class was updated. + // This is used to compute early deletion fees of the resource. + StorageClassUpdateTime string `json:"storageClassUpdateTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "SizeBytes") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "SizeBytes") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BlobStorageInfo) MarshalJSON() ([]byte, error) { + type NoMethod BlobStorageInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BlobStorageSettings: Settings for data stored in Blob storage. +type BlobStorageSettings struct { + // BlobStorageClass: The Storage class in which the Blob data is stored. + // + // Possible values: + // "BLOB_STORAGE_CLASS_UNSPECIFIED" - If unspecified in CreateDataset, the + // StorageClass defaults to STANDARD. If unspecified in UpdateDataset and the + // StorageClass is set in the field mask, an InvalidRequest error is thrown. + // "STANDARD" - This stores the Object in Blob Standard Storage: + // https://cloud.google.com/storage/docs/storage-classes#standard + // "NEARLINE" - This stores the Object in Blob Nearline Storage: + // https://cloud.google.com/storage/docs/storage-classes#nearline + // "COLDLINE" - This stores the Object in Blob Coldline Storage: + // https://cloud.google.com/storage/docs/storage-classes#coldline + // "ARCHIVE" - This stores the Object in Blob Archive Storage: + // https://cloud.google.com/storage/docs/storage-classes#archive + BlobStorageClass string `json:"blobStorageClass,omitempty"` + // ForceSendFields is a list of field names (e.g. "BlobStorageClass") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BlobStorageClass") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BlobStorageSettings) MarshalJSON() ([]byte, error) { + type NoMethod BlobStorageSettings + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CancelOperationRequest: The request message for Operations.CancelOperation. @@ -851,9 +940,9 @@ type CharacterMaskConfig struct { NullFields []string `json:"-"` } -func (s *CharacterMaskConfig) MarshalJSON() ([]byte, error) { +func (s CharacterMaskConfig) MarshalJSON() ([]byte, error) { type NoMethod CharacterMaskConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CheckDataAccessRequest: Checks if a particular data_id of a User data @@ -900,9 +989,9 @@ type CheckDataAccessRequest struct { NullFields []string `json:"-"` } -func (s *CheckDataAccessRequest) MarshalJSON() ([]byte, error) { +func (s CheckDataAccessRequest) MarshalJSON() ([]byte, error) { type NoMethod CheckDataAccessRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CheckDataAccessResponse: Checks if a particular data_id of a User data @@ -929,9 +1018,9 @@ type CheckDataAccessResponse struct { NullFields []string `json:"-"` } -func (s *CheckDataAccessResponse) MarshalJSON() ([]byte, error) { +func (s CheckDataAccessResponse) MarshalJSON() ([]byte, error) { type NoMethod CheckDataAccessResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Consent: Represents a user's consent. @@ -1004,9 +1093,9 @@ type Consent struct { NullFields []string `json:"-"` } -func (s *Consent) MarshalJSON() ([]byte, error) { +func (s Consent) MarshalJSON() ([]byte, error) { type NoMethod Consent - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConsentArtifact: Documentation of a user's consent. @@ -1049,9 +1138,9 @@ type ConsentArtifact struct { NullFields []string `json:"-"` } -func (s *ConsentArtifact) MarshalJSON() ([]byte, error) { +func (s ConsentArtifact) MarshalJSON() ([]byte, error) { type NoMethod ConsentArtifact - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConsentEvaluation: The detailed evaluation of a particular Consent. @@ -1087,9 +1176,9 @@ type ConsentEvaluation struct { NullFields []string `json:"-"` } -func (s *ConsentEvaluation) MarshalJSON() ([]byte, error) { +func (s ConsentEvaluation) MarshalJSON() ([]byte, error) { type NoMethod ConsentEvaluation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConsentList: List of resource names of Consent resources. @@ -1112,9 +1201,9 @@ type ConsentList struct { NullFields []string `json:"-"` } -func (s *ConsentList) MarshalJSON() ([]byte, error) { +func (s ConsentList) MarshalJSON() ([]byte, error) { type NoMethod ConsentList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConsentStore: Represents a consent store. @@ -1156,9 +1245,9 @@ type ConsentStore struct { NullFields []string `json:"-"` } -func (s *ConsentStore) MarshalJSON() ([]byte, error) { +func (s ConsentStore) MarshalJSON() ([]byte, error) { type NoMethod ConsentStore - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateMessageRequest: Creates a new message. @@ -1178,9 +1267,9 @@ type CreateMessageRequest struct { NullFields []string `json:"-"` } -func (s *CreateMessageRequest) MarshalJSON() ([]byte, error) { +func (s CreateMessageRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateMessageRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CryptoHashConfig: Pseudonymization method that generates surrogates via @@ -1208,9 +1297,9 @@ type CryptoHashConfig struct { NullFields []string `json:"-"` } -func (s *CryptoHashConfig) MarshalJSON() ([]byte, error) { +func (s CryptoHashConfig) MarshalJSON() ([]byte, error) { type NoMethod CryptoHashConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Dataset: A message representing a health dataset. A health dataset @@ -1218,33 +1307,38 @@ func (s *CryptoHashConfig) MarshalJSON() ([]byte, error) { // patients. This may include multiple modalities of healthcare data, such as // electronic medical records or medical imaging data. type Dataset struct { + // EncryptionSpec: Optional. Customer-managed encryption key spec for a + // Dataset. If set, this Dataset and all of its sub-resources will be secured + // by this key. If empty, the Dataset is secured by the default Google + // encryption key. + EncryptionSpec *EncryptionSpec `json:"encryptionSpec,omitempty"` // Name: Identifier. Resource name of the dataset, of the form // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`. Name string `json:"name,omitempty"` - // TimeZone: The default timezone used by this dataset. Must be a either a - // valid IANA time zone name such as "America/New_York" or empty, which - // defaults to UTC. This is used for parsing times in resources, such as HL7 - // messages, where no explicit timezone is specified. + // TimeZone: Optional. The default timezone used by this dataset. Must be a + // either a valid IANA time zone name such as "America/New_York" or empty, + // which defaults to UTC. This is used for parsing times in resources, such as + // HL7 messages, where no explicit timezone is specified. TimeZone string `json:"timeZone,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Name") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See + // ForceSendFields is a list of field names (e.g. "EncryptionSpec") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API requests - // with the JSON null value. By default, fields with empty values are omitted - // from API requests. See + // NullFields is a list of field names (e.g. "EncryptionSpec") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Dataset) MarshalJSON() ([]byte, error) { +func (s Dataset) MarshalJSON() ([]byte, error) { type NoMethod Dataset - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DateShiftConfig: Shift a date forward or backward in time by a random amount @@ -1275,9 +1369,9 @@ type DateShiftConfig struct { NullFields []string `json:"-"` } -func (s *DateShiftConfig) MarshalJSON() ([]byte, error) { +func (s DateShiftConfig) MarshalJSON() ([]byte, error) { type NoMethod DateShiftConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeidentifiedStoreDestination: Contains configuration for streaming @@ -1303,9 +1397,9 @@ type DeidentifiedStoreDestination struct { NullFields []string `json:"-"` } -func (s *DeidentifiedStoreDestination) MarshalJSON() ([]byte, error) { +func (s DeidentifiedStoreDestination) MarshalJSON() ([]byte, error) { type NoMethod DeidentifiedStoreDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeidentifyConfig: Configures de-id options specific to different types of @@ -1324,11 +1418,11 @@ type DeidentifyConfig struct { // source_dataset. Text *TextConfig `json:"text,omitempty"` // UseRegionalDataProcessing: Ensures in-flight data remains in the region of - // origin during de-identification. Using this option results in a significant - // reduction of throughput, and is not compatible with `LOCATION` or - // `ORGANIZATION_NAME` infoTypes. `LOCATION` must be excluded within - // TextConfig, and must also be excluded within ImageConfig if image redaction - // is required. + // origin during de-identification. The default value is false. Using this + // option results in a significant reduction of throughput, and is not + // compatible with `LOCATION` or `ORGANIZATION_NAME` infoTypes. `LOCATION` must + // be excluded within TextConfig, and must also be excluded within ImageConfig + // if image redaction is required. UseRegionalDataProcessing bool `json:"useRegionalDataProcessing,omitempty"` // ForceSendFields is a list of field names (e.g. "Dicom") to unconditionally // include in API requests. By default, fields with empty or default values are @@ -1343,9 +1437,9 @@ type DeidentifyConfig struct { NullFields []string `json:"-"` } -func (s *DeidentifyConfig) MarshalJSON() ([]byte, error) { +func (s DeidentifyConfig) MarshalJSON() ([]byte, error) { type NoMethod DeidentifyConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeidentifyDatasetRequest: Redacts identifying information from the specified @@ -1379,9 +1473,9 @@ type DeidentifyDatasetRequest struct { NullFields []string `json:"-"` } -func (s *DeidentifyDatasetRequest) MarshalJSON() ([]byte, error) { +func (s DeidentifyDatasetRequest) MarshalJSON() ([]byte, error) { type NoMethod DeidentifyDatasetRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeidentifyDicomStoreRequest: Creates a new DICOM store with sensitive @@ -1421,9 +1515,9 @@ type DeidentifyDicomStoreRequest struct { NullFields []string `json:"-"` } -func (s *DeidentifyDicomStoreRequest) MarshalJSON() ([]byte, error) { +func (s DeidentifyDicomStoreRequest) MarshalJSON() ([]byte, error) { type NoMethod DeidentifyDicomStoreRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeidentifyFhirStoreRequest: Creates a new FHIR store with sensitive @@ -1468,9 +1562,9 @@ type DeidentifyFhirStoreRequest struct { NullFields []string `json:"-"` } -func (s *DeidentifyFhirStoreRequest) MarshalJSON() ([]byte, error) { +func (s DeidentifyFhirStoreRequest) MarshalJSON() ([]byte, error) { type NoMethod DeidentifyFhirStoreRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeidentifySummary: Contains a summary of the Deidentify operation. @@ -1524,9 +1618,9 @@ type DicomConfig struct { NullFields []string `json:"-"` } -func (s *DicomConfig) MarshalJSON() ([]byte, error) { +func (s DicomConfig) MarshalJSON() ([]byte, error) { type NoMethod DicomConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DicomFilterConfig: Specifies the filter configuration for DICOM resources. @@ -1552,9 +1646,9 @@ type DicomFilterConfig struct { NullFields []string `json:"-"` } -func (s *DicomFilterConfig) MarshalJSON() ([]byte, error) { +func (s DicomFilterConfig) MarshalJSON() ([]byte, error) { type NoMethod DicomFilterConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DicomStore: Represents a DICOM store. @@ -1572,8 +1666,8 @@ type DicomStore struct { // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomSto // res/{dicom_store_id}`. Name string `json:"name,omitempty"` - // NotificationConfig: Notification destination for new DICOM instances. - // Supplied by the client. + // NotificationConfig: Optional. Notification destination for new DICOM + // instances. Supplied by the client. NotificationConfig *NotificationConfig `json:"notificationConfig,omitempty"` // StreamConfigs: Optional. A list of streaming configs used to configure the // destination of streaming exports for every DICOM instance insertion in this @@ -1598,9 +1692,9 @@ type DicomStore struct { NullFields []string `json:"-"` } -func (s *DicomStore) MarshalJSON() ([]byte, error) { +func (s DicomStore) MarshalJSON() ([]byte, error) { type NoMethod DicomStore - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DicomStoreMetrics: DicomStoreMetrics contains metrics describing a DICOM @@ -1638,9 +1732,9 @@ type DicomStoreMetrics struct { NullFields []string `json:"-"` } -func (s *DicomStoreMetrics) MarshalJSON() ([]byte, error) { +func (s DicomStoreMetrics) MarshalJSON() ([]byte, error) { type NoMethod DicomStoreMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -1652,6 +1746,33 @@ type Empty struct { googleapi.ServerResponse `json:"-"` } +// EncryptionSpec: Represents a customer-managed encryption key spec that can +// be applied to a resource. +type EncryptionSpec struct { + // KmsKeyName: Required. The resource name of customer-managed encryption key + // that is used to secure a resource and its sub-resources. Only the key in the + // same location as this Dataset is allowed to be used for encryption. Format + // is: + // `projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{key}` + KmsKeyName string `json:"kmsKeyName,omitempty"` + // ForceSendFields is a list of field names (e.g. "KmsKeyName") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "KmsKeyName") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s EncryptionSpec) MarshalJSON() ([]byte, error) { + type NoMethod EncryptionSpec + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // Entity: The candidate entities that an entity mention could link to. type Entity struct { // EntityId: entity_id is a first class field entity_id uniquely identifies @@ -1681,9 +1802,9 @@ type Entity struct { NullFields []string `json:"-"` } -func (s *Entity) MarshalJSON() ([]byte, error) { +func (s Entity) MarshalJSON() ([]byte, error) { type NoMethod Entity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EntityMention: An entity mention in the document. @@ -1732,9 +1853,9 @@ type EntityMention struct { NullFields []string `json:"-"` } -func (s *EntityMention) MarshalJSON() ([]byte, error) { +func (s EntityMention) MarshalJSON() ([]byte, error) { type NoMethod EntityMention - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *EntityMention) UnmarshalJSON(data []byte) error { @@ -1774,9 +1895,9 @@ type EntityMentionRelationship struct { NullFields []string `json:"-"` } -func (s *EntityMentionRelationship) MarshalJSON() ([]byte, error) { +func (s EntityMentionRelationship) MarshalJSON() ([]byte, error) { type NoMethod EntityMentionRelationship - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *EntityMentionRelationship) UnmarshalJSON(data []byte) error { @@ -1846,9 +1967,9 @@ type EvaluateUserConsentsRequest struct { NullFields []string `json:"-"` } -func (s *EvaluateUserConsentsRequest) MarshalJSON() ([]byte, error) { +func (s EvaluateUserConsentsRequest) MarshalJSON() ([]byte, error) { type NoMethod EvaluateUserConsentsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type EvaluateUserConsentsResponse struct { @@ -1874,9 +1995,9 @@ type EvaluateUserConsentsResponse struct { NullFields []string `json:"-"` } -func (s *EvaluateUserConsentsResponse) MarshalJSON() ([]byte, error) { +func (s EvaluateUserConsentsResponse) MarshalJSON() ([]byte, error) { type NoMethod EvaluateUserConsentsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExportDicomDataRequest: Exports data from the specified DICOM store. If a @@ -1908,9 +2029,9 @@ type ExportDicomDataRequest struct { NullFields []string `json:"-"` } -func (s *ExportDicomDataRequest) MarshalJSON() ([]byte, error) { +func (s ExportDicomDataRequest) MarshalJSON() ([]byte, error) { type NoMethod ExportDicomDataRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExportDicomDataResponse: Returns additional information in regards to a @@ -1991,9 +2112,9 @@ type ExportMessagesRequest struct { NullFields []string `json:"-"` } -func (s *ExportMessagesRequest) MarshalJSON() ([]byte, error) { +func (s ExportMessagesRequest) MarshalJSON() ([]byte, error) { type NoMethod ExportMessagesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExportMessagesResponse: Final response for the export operation. This @@ -2036,9 +2157,9 @@ type ExportResourcesRequest struct { NullFields []string `json:"-"` } -func (s *ExportResourcesRequest) MarshalJSON() ([]byte, error) { +func (s ExportResourcesRequest) MarshalJSON() ([]byte, error) { type NoMethod ExportResourcesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExportResourcesResponse: Response when all resources export successfully. @@ -2090,9 +2211,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Feature: A feature of an entity mention. @@ -2116,9 +2237,9 @@ type Feature struct { NullFields []string `json:"-"` } -func (s *Feature) MarshalJSON() ([]byte, error) { +func (s Feature) MarshalJSON() ([]byte, error) { type NoMethod Feature - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Feature) UnmarshalJSON(data []byte) error { @@ -2160,9 +2281,9 @@ type FhirConfig struct { NullFields []string `json:"-"` } -func (s *FhirConfig) MarshalJSON() ([]byte, error) { +func (s FhirConfig) MarshalJSON() ([]byte, error) { type NoMethod FhirConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FhirFilter: Filter configuration. @@ -2183,16 +2304,16 @@ type FhirFilter struct { NullFields []string `json:"-"` } -func (s *FhirFilter) MarshalJSON() ([]byte, error) { +func (s FhirFilter) MarshalJSON() ([]byte, error) { type NoMethod FhirFilter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FhirNotificationConfig: Contains the configuration for FHIR notifications. type FhirNotificationConfig struct { - // PubsubTopic: The Pub/Sub (https://cloud.google.com/pubsub/docs/) topic that - // notifications of changes are published on. Supplied by the client. The - // notification is a `PubsubMessage` with the following fields: * + // PubsubTopic: Optional. The Pub/Sub (https://cloud.google.com/pubsub/docs/) + // topic that notifications of changes are published on. Supplied by the + // client. The notification is a `PubsubMessage` with the following fields: * // `PubsubMessage.Data` contains the resource name. * `PubsubMessage.MessageId` // is the ID of this notification. It is guaranteed to be unique within the // topic. * `PubsubMessage.PublishTime` is the time when the message was @@ -2207,15 +2328,17 @@ type FhirNotificationConfig struct { // Logging. For more information, see Viewing error logs in Cloud Logging // (https://cloud.google.com/healthcare-api/docs/how-tos/logging). PubsubTopic string `json:"pubsubTopic,omitempty"` - // SendFullResource: Whether to send full FHIR resource to this Pub/Sub topic. + // SendFullResource: Optional. Whether to send full FHIR resource to this + // Pub/Sub topic. The default value is false. SendFullResource bool `json:"sendFullResource,omitempty"` - // SendPreviousResourceOnDelete: Whether to send full FHIR resource to this - // Pub/Sub topic for deleting FHIR resource. Note that setting this to true - // does not guarantee that all previous resources will be sent in the format of - // full FHIR resource. When a resource change is too large or during heavy - // traffic, only the resource name will be sent. Clients should always check - // the "payloadType" label from a Pub/Sub message to determine whether it needs - // to fetch the full previous resource as a separate operation. + // SendPreviousResourceOnDelete: Optional. Whether to send full FHIR resource + // to this Pub/Sub topic for deleting FHIR resource. The default value is + // false. Note that setting this to true does not guarantee that all previous + // resources will be sent in the format of full FHIR resource. When a resource + // change is too large or during heavy traffic, only the resource name will be + // sent. Clients should always check the "payloadType" label from a Pub/Sub + // message to determine whether it needs to fetch the full previous resource as + // a separate operation. SendPreviousResourceOnDelete bool `json:"sendPreviousResourceOnDelete,omitempty"` // ForceSendFields is a list of field names (e.g. "PubsubTopic") to // unconditionally include in API requests. By default, fields with empty or @@ -2230,9 +2353,9 @@ type FhirNotificationConfig struct { NullFields []string `json:"-"` } -func (s *FhirNotificationConfig) MarshalJSON() ([]byte, error) { +func (s FhirNotificationConfig) MarshalJSON() ([]byte, error) { type NoMethod FhirNotificationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FhirStore: Represents a FHIR store. @@ -2252,13 +2375,13 @@ type FhirStore struct { // "DISABLED" - References in complex data types are ignored. // "ENABLED" - References in complex data types are parsed. ComplexDataTypeReferenceParsing string `json:"complexDataTypeReferenceParsing,omitempty"` - // DefaultSearchHandlingStrict: If true, overrides the default search behavior - // for this FHIR store to `handling=strict` which returns an error for + // DefaultSearchHandlingStrict: Optional. If true, overrides the default search + // behavior for this FHIR store to `handling=strict` which returns an error for // unrecognized search parameters. If false, uses the FHIR specification // default `handling=lenient` which ignores unrecognized search parameters. The // handling can always be changed from the default on an individual API call by // setting the HTTP header `Prefer: handling=strict` or `Prefer: - // handling=lenient`. + // handling=lenient`. Defaults to false. DefaultSearchHandlingStrict bool `json:"defaultSearchHandlingStrict,omitempty"` // DisableReferentialIntegrity: Immutable. Whether to disable referential // integrity in this FHIR store. This field is immutable after FHIR store @@ -2271,13 +2394,14 @@ type FhirStore struct { DisableReferentialIntegrity bool `json:"disableReferentialIntegrity,omitempty"` // DisableResourceVersioning: Immutable. Whether to disable resource versioning // for this FHIR store. This field can not be changed after the creation of - // FHIR store. If set to false, which is the default behavior, all write - // operations cause historical versions to be recorded automatically. The - // historical versions can be fetched through the history APIs, but cannot be - // updated. If set to true, no historical versions are kept. The server sends - // errors for attempts to read the historical versions. + // FHIR store. If set to false, all write operations cause historical versions + // to be recorded automatically. The historical versions can be fetched through + // the history APIs, but cannot be updated. If set to true, no historical + // versions are kept. The server sends errors for attempts to read the + // historical versions. Defaults to false. DisableResourceVersioning bool `json:"disableResourceVersioning,omitempty"` - // EnableUpdateCreate: Whether this FHIR store has the updateCreate capability + // EnableUpdateCreate: Optional. Whether this FHIR store has the updateCreate + // capability // (https://www.hl7.org/fhir/capabilitystatement-definitions.html#CapabilityStatement.rest.resource.updateCreate). // This determines if the client can use an Update operation to create a new // resource with a client-specified ID. If false, all IDs are server-assigned @@ -2286,7 +2410,7 @@ type FhirStore struct { // data such as patient identifiers in client-specified resource IDs. Those IDs // are part of the FHIR resource path recorded in Cloud audit logs and Pub/Sub // notifications. Those IDs can also be contained in reference fields within - // other resources. + // other resources. Defaults to false. EnableUpdateCreate bool `json:"enableUpdateCreate,omitempty"` // Labels: User-supplied key-value pairs used to organize FHIR stores. Label // keys must be between 1 and 63 characters long, have a UTF-8 encoding of @@ -2307,15 +2431,15 @@ type FhirStore struct { // describing the action that has triggered the notification. For example, // "action":"CreateResource". NotificationConfig *NotificationConfig `json:"notificationConfig,omitempty"` - // NotificationConfigs: Specifies where and whether to send notifications upon - // changes to a FHIR store. + // NotificationConfigs: Optional. Specifies where and whether to send + // notifications upon changes to a FHIR store. NotificationConfigs []*FhirNotificationConfig `json:"notificationConfigs,omitempty"` - // StreamConfigs: A list of streaming configs that configure the destinations - // of streaming export for every resource mutation in this FHIR store. Each - // store is allowed to have up to 10 streaming configs. After a new config is - // added, the next resource mutation is streamed to the new location in - // addition to the existing ones. When a location is removed from the list, the - // server stops streaming to that location. Before adding a new config, you + // StreamConfigs: Optional. A list of streaming configs that configure the + // destinations of streaming export for every resource mutation in this FHIR + // store. Each store is allowed to have up to 10 streaming configs. After a new + // config is added, the next resource mutation is streamed to the new location + // in addition to the existing ones. When a location is removed from the list, + // the server stops streaming to that location. Before adding a new config, you // must add the required `bigquery.dataEditor` // (https://cloud.google.com/bigquery/docs/access-control#bigquery.dataEditor) // role to your project's **Cloud Healthcare Service Agent** service account @@ -2357,9 +2481,9 @@ type FhirStore struct { NullFields []string `json:"-"` } -func (s *FhirStore) MarshalJSON() ([]byte, error) { +func (s FhirStore) MarshalJSON() ([]byte, error) { type NoMethod FhirStore - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FhirStoreMetric: Count of resources and total storage size by type for a @@ -2385,9 +2509,9 @@ type FhirStoreMetric struct { NullFields []string `json:"-"` } -func (s *FhirStoreMetric) MarshalJSON() ([]byte, error) { +func (s FhirStoreMetric) MarshalJSON() ([]byte, error) { type NoMethod FhirStoreMetric - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FhirStoreMetrics: List of metrics for a given FHIR store. @@ -2413,9 +2537,9 @@ type FhirStoreMetrics struct { NullFields []string `json:"-"` } -func (s *FhirStoreMetrics) MarshalJSON() ([]byte, error) { +func (s FhirStoreMetrics) MarshalJSON() ([]byte, error) { type NoMethod FhirStoreMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Field: A (sub) field of a type. @@ -2446,9 +2570,9 @@ type Field struct { NullFields []string `json:"-"` } -func (s *Field) MarshalJSON() ([]byte, error) { +func (s Field) MarshalJSON() ([]byte, error) { type NoMethod Field - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FieldMetadata: Specifies FHIR paths to match, and how to handle @@ -2485,9 +2609,9 @@ type FieldMetadata struct { NullFields []string `json:"-"` } -func (s *FieldMetadata) MarshalJSON() ([]byte, error) { +func (s FieldMetadata) MarshalJSON() ([]byte, error) { type NoMethod FieldMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GcsDestination: The Cloud Storage output destination. The Cloud Healthcare @@ -2534,9 +2658,9 @@ type GcsDestination struct { NullFields []string `json:"-"` } -func (s *GcsDestination) MarshalJSON() ([]byte, error) { +func (s GcsDestination) MarshalJSON() ([]byte, error) { type NoMethod GcsDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GcsSource: Specifies the configuration for importing data from Cloud @@ -2568,9 +2692,9 @@ type GcsSource struct { NullFields []string `json:"-"` } -func (s *GcsSource) MarshalJSON() ([]byte, error) { +func (s GcsSource) MarshalJSON() ([]byte, error) { type NoMethod GcsSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudHealthcareV1ConsentGcsDestination: The Cloud Storage location for @@ -2595,9 +2719,9 @@ type GoogleCloudHealthcareV1ConsentGcsDestination struct { NullFields []string `json:"-"` } -func (s *GoogleCloudHealthcareV1ConsentGcsDestination) MarshalJSON() ([]byte, error) { +func (s GoogleCloudHealthcareV1ConsentGcsDestination) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudHealthcareV1ConsentGcsDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudHealthcareV1ConsentPolicy: Represents a user's consent in terms @@ -2625,9 +2749,9 @@ type GoogleCloudHealthcareV1ConsentPolicy struct { NullFields []string `json:"-"` } -func (s *GoogleCloudHealthcareV1ConsentPolicy) MarshalJSON() ([]byte, error) { +func (s GoogleCloudHealthcareV1ConsentPolicy) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudHealthcareV1ConsentPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudHealthcareV1DeidentifyDeidentifyDicomStoreSummary: Contains a @@ -2676,9 +2800,9 @@ type GoogleCloudHealthcareV1DicomBigQueryDestination struct { NullFields []string `json:"-"` } -func (s *GoogleCloudHealthcareV1DicomBigQueryDestination) MarshalJSON() ([]byte, error) { +func (s GoogleCloudHealthcareV1DicomBigQueryDestination) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudHealthcareV1DicomBigQueryDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudHealthcareV1DicomGcsDestination: The Cloud Storage location where @@ -2727,9 +2851,9 @@ type GoogleCloudHealthcareV1DicomGcsDestination struct { NullFields []string `json:"-"` } -func (s *GoogleCloudHealthcareV1DicomGcsDestination) MarshalJSON() ([]byte, error) { +func (s GoogleCloudHealthcareV1DicomGcsDestination) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudHealthcareV1DicomGcsDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudHealthcareV1DicomGcsSource: Specifies the configuration for @@ -2761,9 +2885,9 @@ type GoogleCloudHealthcareV1DicomGcsSource struct { NullFields []string `json:"-"` } -func (s *GoogleCloudHealthcareV1DicomGcsSource) MarshalJSON() ([]byte, error) { +func (s GoogleCloudHealthcareV1DicomGcsSource) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudHealthcareV1DicomGcsSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudHealthcareV1DicomStreamConfig: StreamConfig specifies @@ -2804,29 +2928,30 @@ type GoogleCloudHealthcareV1DicomStreamConfig struct { NullFields []string `json:"-"` } -func (s *GoogleCloudHealthcareV1DicomStreamConfig) MarshalJSON() ([]byte, error) { +func (s GoogleCloudHealthcareV1DicomStreamConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudHealthcareV1DicomStreamConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudHealthcareV1FhirBigQueryDestination: The configuration for // exporting to BigQuery. type GoogleCloudHealthcareV1FhirBigQueryDestination struct { - // DatasetUri: BigQuery URI to an existing dataset, up to 2000 characters long, - // in the format `bq://projectId.bqDatasetId`. + // DatasetUri: Optional. BigQuery URI to an existing dataset, up to 2000 + // characters long, in the format `bq://projectId.bqDatasetId`. DatasetUri string `json:"datasetUri,omitempty"` - // Force: If this flag is `TRUE`, all tables are deleted from the dataset - // before the new exported tables are written. If the flag is not set and the - // destination dataset contains tables, the export call returns an error. If - // `write_disposition` is specified, this parameter is ignored. force=false is - // equivalent to write_disposition=WRITE_EMPTY and force=true is equivalent to + // Force: Optional. The default value is false. If this flag is `TRUE`, all + // tables are deleted from the dataset before the new exported tables are + // written. If the flag is not set and the destination dataset contains tables, + // the export call returns an error. If `write_disposition` is specified, this + // parameter is ignored. force=false is equivalent to + // write_disposition=WRITE_EMPTY and force=true is equivalent to // write_disposition=WRITE_TRUNCATE. Force bool `json:"force,omitempty"` - // SchemaConfig: The configuration for the exported BigQuery schema. + // SchemaConfig: Optional. The configuration for the exported BigQuery schema. SchemaConfig *SchemaConfig `json:"schemaConfig,omitempty"` - // WriteDisposition: Determines if existing data in the destination dataset is - // overwritten, appended to, or not written if the tables contain data. If a - // write_disposition is specified, the `force` parameter is ignored. + // WriteDisposition: Optional. Determines if existing data in the destination + // dataset is overwritten, appended to, or not written if the tables contain + // data. If a write_disposition is specified, the `force` parameter is ignored. // // Possible values: // "WRITE_DISPOSITION_UNSPECIFIED" - Default behavior is the same as @@ -2849,9 +2974,9 @@ type GoogleCloudHealthcareV1FhirBigQueryDestination struct { NullFields []string `json:"-"` } -func (s *GoogleCloudHealthcareV1FhirBigQueryDestination) MarshalJSON() ([]byte, error) { +func (s GoogleCloudHealthcareV1FhirBigQueryDestination) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudHealthcareV1FhirBigQueryDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudHealthcareV1FhirGcsDestination: The configuration for exporting @@ -2876,9 +3001,9 @@ type GoogleCloudHealthcareV1FhirGcsDestination struct { NullFields []string `json:"-"` } -func (s *GoogleCloudHealthcareV1FhirGcsDestination) MarshalJSON() ([]byte, error) { +func (s GoogleCloudHealthcareV1FhirGcsDestination) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudHealthcareV1FhirGcsDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudHealthcareV1FhirGcsSource: Specifies the configuration for @@ -2910,9 +3035,9 @@ type GoogleCloudHealthcareV1FhirGcsSource struct { NullFields []string `json:"-"` } -func (s *GoogleCloudHealthcareV1FhirGcsSource) MarshalJSON() ([]byte, error) { +func (s GoogleCloudHealthcareV1FhirGcsSource) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudHealthcareV1FhirGcsSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GroupOrSegment: Construct representing a logical group or a segment. @@ -2932,9 +3057,9 @@ type GroupOrSegment struct { NullFields []string `json:"-"` } -func (s *GroupOrSegment) MarshalJSON() ([]byte, error) { +func (s GroupOrSegment) MarshalJSON() ([]byte, error) { type NoMethod GroupOrSegment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Hl7SchemaConfig: Root config message for HL7v2 schema. This contains a @@ -2960,9 +3085,9 @@ type Hl7SchemaConfig struct { NullFields []string `json:"-"` } -func (s *Hl7SchemaConfig) MarshalJSON() ([]byte, error) { +func (s Hl7SchemaConfig) MarshalJSON() ([]byte, error) { type NoMethod Hl7SchemaConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Hl7TypesConfig: Root config for HL7v2 datatype definitions for a specific @@ -2986,22 +3111,22 @@ type Hl7TypesConfig struct { NullFields []string `json:"-"` } -func (s *Hl7TypesConfig) MarshalJSON() ([]byte, error) { +func (s Hl7TypesConfig) MarshalJSON() ([]byte, error) { type NoMethod Hl7TypesConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Hl7V2NotificationConfig: Specifies where and whether to send notifications // upon changes to a data store. type Hl7V2NotificationConfig struct { - // Filter: Restricts notifications sent for messages matching a filter. If this - // is empty, all messages are matched. The following syntax is available: * A - // string field value can be written as text inside quotation marks, for - // example "query text". The only valid relational operation for text fields - // is equality (`=`), where text is searched within the field, rather than - // having the field be equal to the text. For example, "Comment = great" - // returns messages with `great` in the comment field. * A number field value - // can be written as an integer, a decimal, or an exponential. The valid + // Filter: Optional. Restricts notifications sent for messages matching a + // filter. If this is empty, all messages are matched. The following syntax is + // available: * A string field value can be written as text inside quotation + // marks, for example "query text". The only valid relational operation for + // text fields is equality (`=`), where text is searched within the field, + // rather than having the field be equal to the text. For example, "Comment = + // great" returns messages with `great` in the comment field. * A number field + // value can be written as an integer, a decimal, or an exponential. The valid // relational operators for number fields are the equality operator (`=`), // along with the less than/greater than operators (`<`, `<=`, `>`, `>=`). Note // that there is no inequality (`!=`) operator. You can prepend the `NOT` @@ -3064,9 +3189,9 @@ type Hl7V2NotificationConfig struct { NullFields []string `json:"-"` } -func (s *Hl7V2NotificationConfig) MarshalJSON() ([]byte, error) { +func (s Hl7V2NotificationConfig) MarshalJSON() ([]byte, error) { type NoMethod Hl7V2NotificationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Hl7V2Store: Represents an HL7v2 store. @@ -3084,23 +3209,24 @@ type Hl7V2Store struct { // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/hl7V2Sto // res/{hl7v2_store_id}`. Name string `json:"name,omitempty"` - // NotificationConfigs: A list of notification configs. Each configuration uses - // a filter to determine whether to publish a message (both Ingest & Create) on - // the corresponding notification destination. Only the message name is sent as - // part of the notification. Supplied by the client. + // NotificationConfigs: Optional. A list of notification configs. Each + // configuration uses a filter to determine whether to publish a message (both + // Ingest & Create) on the corresponding notification destination. Only the + // message name is sent as part of the notification. Supplied by the client. NotificationConfigs []*Hl7V2NotificationConfig `json:"notificationConfigs,omitempty"` - // ParserConfig: The configuration for the parser. It determines how the server - // parses the messages. + // ParserConfig: Optional. The configuration for the parser. It determines how + // the server parses the messages. ParserConfig *ParserConfig `json:"parserConfig,omitempty"` - // RejectDuplicateMessage: Determines whether to reject duplicate messages. A - // duplicate message is a message with the same raw bytes as a message that has - // already been ingested/created in this HL7v2 store. The default value is - // false, meaning that the store accepts the duplicate messages and it also - // returns the same ACK message in the IngestMessageResponse as has been - // returned previously. Note that only one resource is created in the store. - // When this field is set to true, CreateMessage/IngestMessage requests with a - // duplicate message will be rejected by the store, and - // IngestMessageErrorDetail returns a NACK message upon rejection. + // RejectDuplicateMessage: Optional. Determines whether to reject duplicate + // messages. A duplicate message is a message with the same raw bytes as a + // message that has already been ingested/created in this HL7v2 store. The + // default value is false, meaning that the store accepts the duplicate + // messages and it also returns the same ACK message in the + // IngestMessageResponse as has been returned previously. Note that only one + // resource is created in the store. When this field is set to true, + // CreateMessage/IngestMessage requests with a duplicate message will be + // rejected by the store, and IngestMessageErrorDetail returns a NACK message + // upon rejection. RejectDuplicateMessage bool `json:"rejectDuplicateMessage,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. @@ -3118,9 +3244,9 @@ type Hl7V2Store struct { NullFields []string `json:"-"` } -func (s *Hl7V2Store) MarshalJSON() ([]byte, error) { +func (s Hl7V2Store) MarshalJSON() ([]byte, error) { type NoMethod Hl7V2Store - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Hl7V2StoreMetric: Count of messages and total storage size by type for a @@ -3148,9 +3274,9 @@ type Hl7V2StoreMetric struct { NullFields []string `json:"-"` } -func (s *Hl7V2StoreMetric) MarshalJSON() ([]byte, error) { +func (s Hl7V2StoreMetric) MarshalJSON() ([]byte, error) { type NoMethod Hl7V2StoreMetric - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Hl7V2StoreMetrics: List of metrics for a given HL7v2 store. @@ -3176,9 +3302,9 @@ type Hl7V2StoreMetrics struct { NullFields []string `json:"-"` } -func (s *Hl7V2StoreMetrics) MarshalJSON() ([]byte, error) { +func (s Hl7V2StoreMetrics) MarshalJSON() ([]byte, error) { type NoMethod Hl7V2StoreMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpBody: Message that represents an arbitrary HTTP body. It should only be @@ -3223,9 +3349,9 @@ type HttpBody struct { NullFields []string `json:"-"` } -func (s *HttpBody) MarshalJSON() ([]byte, error) { +func (s HttpBody) MarshalJSON() ([]byte, error) { type NoMethod HttpBody - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Image: Raw bytes representing consent artifact content. @@ -3255,9 +3381,9 @@ type Image struct { NullFields []string `json:"-"` } -func (s *Image) MarshalJSON() ([]byte, error) { +func (s Image) MarshalJSON() ([]byte, error) { type NoMethod Image - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImageConfig: Specifies how to handle de-identification of image pixels. @@ -3287,9 +3413,9 @@ type ImageConfig struct { NullFields []string `json:"-"` } -func (s *ImageConfig) MarshalJSON() ([]byte, error) { +func (s ImageConfig) MarshalJSON() ([]byte, error) { type NoMethod ImageConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportDicomDataRequest: Imports data into the specified DICOM store. Returns @@ -3297,26 +3423,29 @@ func (s *ImageConfig) MarshalJSON() ([]byte, error) { // duplicate DICOM instances by ignoring the newly-pushed instance. It does not // overwrite. type ImportDicomDataRequest struct { + // BlobStorageSettings: Optional. The blob storage settings for the data + // imported by this operation. + BlobStorageSettings *BlobStorageSettings `json:"blobStorageSettings,omitempty"` // GcsSource: Cloud Storage source data location and import configuration. The // Cloud Healthcare Service Agent requires the `roles/storage.objectViewer` // Cloud IAM roles on the Cloud Storage location. GcsSource *GoogleCloudHealthcareV1DicomGcsSource `json:"gcsSource,omitempty"` - // ForceSendFields is a list of field names (e.g. "GcsSource") to + // ForceSendFields is a list of field names (e.g. "BlobStorageSettings") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "GcsSource") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "BlobStorageSettings") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ImportDicomDataRequest) MarshalJSON() ([]byte, error) { +func (s ImportDicomDataRequest) MarshalJSON() ([]byte, error) { type NoMethod ImportDicomDataRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportDicomDataResponse: Returns additional information in regards to a @@ -3343,9 +3472,9 @@ type ImportMessagesRequest struct { NullFields []string `json:"-"` } -func (s *ImportMessagesRequest) MarshalJSON() ([]byte, error) { +func (s ImportMessagesRequest) MarshalJSON() ([]byte, error) { type NoMethod ImportMessagesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportMessagesResponse: Final response of importing messages. This structure @@ -3389,9 +3518,9 @@ type ImportResourcesRequest struct { NullFields []string `json:"-"` } -func (s *ImportResourcesRequest) MarshalJSON() ([]byte, error) { +func (s ImportResourcesRequest) MarshalJSON() ([]byte, error) { type NoMethod ImportResourcesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportResourcesResponse: Final response of importing resources. This @@ -3429,9 +3558,9 @@ type InfoTypeTransformation struct { NullFields []string `json:"-"` } -func (s *InfoTypeTransformation) MarshalJSON() ([]byte, error) { +func (s InfoTypeTransformation) MarshalJSON() ([]byte, error) { type NoMethod InfoTypeTransformation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IngestMessageRequest: Ingests a message into the specified HL7v2 store. @@ -3451,9 +3580,9 @@ type IngestMessageRequest struct { NullFields []string `json:"-"` } -func (s *IngestMessageRequest) MarshalJSON() ([]byte, error) { +func (s IngestMessageRequest) MarshalJSON() ([]byte, error) { type NoMethod IngestMessageRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IngestMessageResponse: Acknowledges that a message has been ingested into @@ -3479,9 +3608,9 @@ type IngestMessageResponse struct { NullFields []string `json:"-"` } -func (s *IngestMessageResponse) MarshalJSON() ([]byte, error) { +func (s IngestMessageResponse) MarshalJSON() ([]byte, error) { type NoMethod IngestMessageResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // KmsWrappedCryptoKey: Include to use an existing data crypto key wrapped by @@ -3511,9 +3640,9 @@ type KmsWrappedCryptoKey struct { NullFields []string `json:"-"` } -func (s *KmsWrappedCryptoKey) MarshalJSON() ([]byte, error) { +func (s KmsWrappedCryptoKey) MarshalJSON() ([]byte, error) { type NoMethod KmsWrappedCryptoKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LinkedEntity: EntityMentions can be linked to multiple entities using a @@ -3538,9 +3667,9 @@ type LinkedEntity struct { NullFields []string `json:"-"` } -func (s *LinkedEntity) MarshalJSON() ([]byte, error) { +func (s LinkedEntity) MarshalJSON() ([]byte, error) { type NoMethod LinkedEntity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ListAttributeDefinitionsResponse struct { @@ -3567,9 +3696,9 @@ type ListAttributeDefinitionsResponse struct { NullFields []string `json:"-"` } -func (s *ListAttributeDefinitionsResponse) MarshalJSON() ([]byte, error) { +func (s ListAttributeDefinitionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListAttributeDefinitionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ListConsentArtifactsResponse struct { @@ -3596,9 +3725,9 @@ type ListConsentArtifactsResponse struct { NullFields []string `json:"-"` } -func (s *ListConsentArtifactsResponse) MarshalJSON() ([]byte, error) { +func (s ListConsentArtifactsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListConsentArtifactsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ListConsentRevisionsResponse struct { @@ -3625,9 +3754,9 @@ type ListConsentRevisionsResponse struct { NullFields []string `json:"-"` } -func (s *ListConsentRevisionsResponse) MarshalJSON() ([]byte, error) { +func (s ListConsentRevisionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListConsentRevisionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ListConsentStoresResponse struct { @@ -3654,9 +3783,9 @@ type ListConsentStoresResponse struct { NullFields []string `json:"-"` } -func (s *ListConsentStoresResponse) MarshalJSON() ([]byte, error) { +func (s ListConsentStoresResponse) MarshalJSON() ([]byte, error) { type NoMethod ListConsentStoresResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ListConsentsResponse struct { @@ -3682,9 +3811,9 @@ type ListConsentsResponse struct { NullFields []string `json:"-"` } -func (s *ListConsentsResponse) MarshalJSON() ([]byte, error) { +func (s ListConsentsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListConsentsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListDatasetsResponse: Lists the available datasets. @@ -3710,9 +3839,9 @@ type ListDatasetsResponse struct { NullFields []string `json:"-"` } -func (s *ListDatasetsResponse) MarshalJSON() ([]byte, error) { +func (s ListDatasetsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListDatasetsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListDicomStoresResponse: Lists the DICOM stores in the given dataset. @@ -3739,9 +3868,9 @@ type ListDicomStoresResponse struct { NullFields []string `json:"-"` } -func (s *ListDicomStoresResponse) MarshalJSON() ([]byte, error) { +func (s ListDicomStoresResponse) MarshalJSON() ([]byte, error) { type NoMethod ListDicomStoresResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListFhirStoresResponse: Lists the FHIR stores in the given dataset. @@ -3768,9 +3897,9 @@ type ListFhirStoresResponse struct { NullFields []string `json:"-"` } -func (s *ListFhirStoresResponse) MarshalJSON() ([]byte, error) { +func (s ListFhirStoresResponse) MarshalJSON() ([]byte, error) { type NoMethod ListFhirStoresResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListHl7V2StoresResponse: Lists the HL7v2 stores in the given dataset. @@ -3797,9 +3926,9 @@ type ListHl7V2StoresResponse struct { NullFields []string `json:"-"` } -func (s *ListHl7V2StoresResponse) MarshalJSON() ([]byte, error) { +func (s ListHl7V2StoresResponse) MarshalJSON() ([]byte, error) { type NoMethod ListHl7V2StoresResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLocationsResponse: The response message for Locations.ListLocations. @@ -3825,9 +3954,9 @@ type ListLocationsResponse struct { NullFields []string `json:"-"` } -func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { +func (s ListLocationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLocationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListMessagesResponse: Lists the messages in the specified HL7v2 store. @@ -3854,9 +3983,9 @@ type ListMessagesResponse struct { NullFields []string `json:"-"` } -func (s *ListMessagesResponse) MarshalJSON() ([]byte, error) { +func (s ListMessagesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListMessagesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -3882,9 +4011,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ListUserDataMappingsResponse struct { @@ -3911,9 +4040,9 @@ type ListUserDataMappingsResponse struct { NullFields []string `json:"-"` } -func (s *ListUserDataMappingsResponse) MarshalJSON() ([]byte, error) { +func (s ListUserDataMappingsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListUserDataMappingsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Location: A resource that represents a Google Cloud location. @@ -3949,9 +4078,9 @@ type Location struct { NullFields []string `json:"-"` } -func (s *Location) MarshalJSON() ([]byte, error) { +func (s Location) MarshalJSON() ([]byte, error) { type NoMethod Location - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Message: A complete HL7v2 message. See [Introduction to HL7 Standards] @@ -3972,23 +4101,24 @@ type Message struct { // [\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 labels can be associated with a // given store. Labels map[string]string `json:"labels,omitempty"` - // MessageType: The message type for this message. MSH-9.1. + // MessageType: Output only. The message type for this message. MSH-9.1. MessageType string `json:"messageType,omitempty"` // Name: Output only. Resource name of the Message, of the form // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/hl7V2Sto - // res/{hl7_v2_store_id}/messages/{message_id}`. Assigned by the server. + // res/{hl7_v2_store_id}/messages/{message_id}`. Name string `json:"name,omitempty"` // ParsedData: Output only. The parsed version of the raw message data. ParsedData *ParsedData `json:"parsedData,omitempty"` - // PatientIds: All patient IDs listed in the PID-2, PID-3, and PID-4 segments - // of this message. + // PatientIds: Output only. All patient IDs listed in the PID-2, PID-3, and + // PID-4 segments of this message. PatientIds []*PatientId `json:"patientIds,omitempty"` - // SchematizedData: The parsed version of the raw message data schematized - // according to this store's schemas and type definitions. + // SchematizedData: Output only. The parsed version of the raw message data + // schematized according to this store's schemas and type definitions. SchematizedData *SchematizedData `json:"schematizedData,omitempty"` - // SendFacility: The hospital that this message came from. MSH-4. + // SendFacility: Output only. The hospital that this message came from. MSH-4. SendFacility string `json:"sendFacility,omitempty"` - // SendTime: The datetime the sending application sent this message. MSH-7. + // SendTime: Output only. The datetime the sending application sent this + // message. MSH-7. SendTime string `json:"sendTime,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. @@ -4006,9 +4136,9 @@ type Message struct { NullFields []string `json:"-"` } -func (s *Message) MarshalJSON() ([]byte, error) { +func (s Message) MarshalJSON() ([]byte, error) { type NoMethod Message - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NotificationConfig: Specifies where to send notifications upon changes to a @@ -4048,9 +4178,9 @@ type NotificationConfig struct { NullFields []string `json:"-"` } -func (s *NotificationConfig) MarshalJSON() ([]byte, error) { +func (s NotificationConfig) MarshalJSON() ([]byte, error) { type NoMethod NotificationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -4095,9 +4225,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadata: OperationMetadata provides information about the @@ -4130,9 +4260,9 @@ type OperationMetadata struct { NullFields []string `json:"-"` } -func (s *OperationMetadata) MarshalJSON() ([]byte, error) { +func (s OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ParsedData: The content of a HL7v2 message in a structured format. @@ -4151,18 +4281,19 @@ type ParsedData struct { NullFields []string `json:"-"` } -func (s *ParsedData) MarshalJSON() ([]byte, error) { +func (s ParsedData) MarshalJSON() ([]byte, error) { type NoMethod ParsedData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ParserConfig: The configuration for the parser. It determines how the server // parses the messages. type ParserConfig struct { - // AllowNullHeader: Determines whether messages with no header are allowed. + // AllowNullHeader: Optional. Determines whether messages with no header are + // allowed. AllowNullHeader bool `json:"allowNullHeader,omitempty"` - // Schema: Schemas used to parse messages in this store, if schematized parsing - // is desired. + // Schema: Optional. Schemas used to parse messages in this store, if + // schematized parsing is desired. Schema *SchemaPackage `json:"schema,omitempty"` // SegmentTerminator: Byte(s) to use as the segment terminator. If this is // unset, '\r' is used as segment terminator, matching the HL7 version 2 @@ -4200,9 +4331,9 @@ type ParserConfig struct { NullFields []string `json:"-"` } -func (s *ParserConfig) MarshalJSON() ([]byte, error) { +func (s ParserConfig) MarshalJSON() ([]byte, error) { type NoMethod ParserConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PatientId: A patient identifier and associated type. @@ -4224,9 +4355,9 @@ type PatientId struct { NullFields []string `json:"-"` } -func (s *PatientId) MarshalJSON() ([]byte, error) { +func (s PatientId) MarshalJSON() ([]byte, error) { type NoMethod PatientId - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -4316,9 +4447,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ProgressCounter: ProgressCounter provides counters to describe an @@ -4343,9 +4474,9 @@ type ProgressCounter struct { NullFields []string `json:"-"` } -func (s *ProgressCounter) MarshalJSON() ([]byte, error) { +func (s ProgressCounter) MarshalJSON() ([]byte, error) { type NoMethod ProgressCounter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PubsubDestination: The Pub/Sub output destination. The Cloud Healthcare @@ -4377,9 +4508,9 @@ type PubsubDestination struct { NullFields []string `json:"-"` } -func (s *PubsubDestination) MarshalJSON() ([]byte, error) { +func (s PubsubDestination) MarshalJSON() ([]byte, error) { type NoMethod PubsubDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryAccessibleDataRequest: Queries all data_ids that are consented for a @@ -4414,9 +4545,9 @@ type QueryAccessibleDataRequest struct { NullFields []string `json:"-"` } -func (s *QueryAccessibleDataRequest) MarshalJSON() ([]byte, error) { +func (s QueryAccessibleDataRequest) MarshalJSON() ([]byte, error) { type NoMethod QueryAccessibleDataRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryAccessibleDataResponse: Response for successful QueryAccessibleData @@ -4439,9 +4570,9 @@ type QueryAccessibleDataResponse struct { NullFields []string `json:"-"` } -func (s *QueryAccessibleDataResponse) MarshalJSON() ([]byte, error) { +func (s QueryAccessibleDataResponse) MarshalJSON() ([]byte, error) { type NoMethod QueryAccessibleDataResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RedactConfig: Define how to redact sensitive values. Default behaviour is @@ -4474,9 +4605,9 @@ type RejectConsentRequest struct { NullFields []string `json:"-"` } -func (s *RejectConsentRequest) MarshalJSON() ([]byte, error) { +func (s RejectConsentRequest) MarshalJSON() ([]byte, error) { type NoMethod RejectConsentRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReplaceWithInfoTypeConfig: When using the INSPECT_AND_TRANSFORM action, each @@ -4503,9 +4634,9 @@ type Resources struct { NullFields []string `json:"-"` } -func (s *Resources) MarshalJSON() ([]byte, error) { +func (s Resources) MarshalJSON() ([]byte, error) { type NoMethod Resources - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Result: The consent evaluation result for a single `data_id`. @@ -4530,9 +4661,9 @@ type Result struct { NullFields []string `json:"-"` } -func (s *Result) MarshalJSON() ([]byte, error) { +func (s Result) MarshalJSON() ([]byte, error) { type NoMethod Result - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RevokeConsentRequest: Revokes the latest revision of the specified Consent @@ -4558,9 +4689,9 @@ type RevokeConsentRequest struct { NullFields []string `json:"-"` } -func (s *RevokeConsentRequest) MarshalJSON() ([]byte, error) { +func (s RevokeConsentRequest) MarshalJSON() ([]byte, error) { type NoMethod RevokeConsentRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RollbackFhirResourceFilteringFields struct { @@ -4587,9 +4718,9 @@ type RollbackFhirResourceFilteringFields struct { NullFields []string `json:"-"` } -func (s *RollbackFhirResourceFilteringFields) MarshalJSON() ([]byte, error) { +func (s RollbackFhirResourceFilteringFields) MarshalJSON() ([]byte, error) { type NoMethod RollbackFhirResourceFilteringFields - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type RollbackFhirResourcesRequest struct { @@ -4632,9 +4763,9 @@ type RollbackFhirResourcesRequest struct { NullFields []string `json:"-"` } -func (s *RollbackFhirResourcesRequest) MarshalJSON() ([]byte, error) { +func (s RollbackFhirResourcesRequest) MarshalJSON() ([]byte, error) { type NoMethod RollbackFhirResourcesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RollbackFhirResourcesResponse: Final response of rollback FIHR resources @@ -4657,9 +4788,101 @@ type RollbackFhirResourcesResponse struct { NullFields []string `json:"-"` } -func (s *RollbackFhirResourcesResponse) MarshalJSON() ([]byte, error) { +func (s RollbackFhirResourcesResponse) MarshalJSON() ([]byte, error) { type NoMethod RollbackFhirResourcesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RollbackHL7MessagesFilteringFields: Filtering fields for an HL7v2 rollback. +// Currently only supports a list of operation ids to roll back. +type RollbackHL7MessagesFilteringFields struct { + // OperationIds: Optional. A list of operation IDs to roll back. + OperationIds googleapi.Uint64s `json:"operationIds,omitempty"` + // ForceSendFields is a list of field names (e.g. "OperationIds") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "OperationIds") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RollbackHL7MessagesFilteringFields) MarshalJSON() ([]byte, error) { + type NoMethod RollbackHL7MessagesFilteringFields + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RollbackHl7V2MessagesRequest: Point in time recovery rollback request. +type RollbackHl7V2MessagesRequest struct { + // ChangeType: Optional. CREATE/UPDATE/DELETE/ALL for reverting all txns of a + // certain type. + // + // Possible values: + // "CHANGE_TYPE_UNSPECIFIED" - When unspecified, revert all transactions + // "ALL" - All transactions + // "CREATE" - Revert only CREATE transactions + // "UPDATE" - Revert only Update transactions + // "DELETE" - Revert only Delete transactions + ChangeType string `json:"changeType,omitempty"` + // ExcludeRollbacks: Optional. Specifies whether to exclude earlier rollbacks. + ExcludeRollbacks bool `json:"excludeRollbacks,omitempty"` + // FilteringFields: Optional. Parameters for filtering. + FilteringFields *RollbackHL7MessagesFilteringFields `json:"filteringFields,omitempty"` + // Force: Optional. When enabled, changes will be reverted without explicit + // confirmation. + Force bool `json:"force,omitempty"` + // InputGcsObject: Optional. Cloud storage object containing list of + // {resourceId} lines, identifying resources to be reverted + InputGcsObject string `json:"inputGcsObject,omitempty"` + // ResultGcsBucket: Required. Bucket to deposit result + ResultGcsBucket string `json:"resultGcsBucket,omitempty"` + // RollbackTime: Required. Times point to rollback to. + RollbackTime string `json:"rollbackTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "ChangeType") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ChangeType") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RollbackHl7V2MessagesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RollbackHl7V2MessagesRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RollbackHl7V2MessagesResponse: Final response of rollback HL7v2 messages +// request. +type RollbackHl7V2MessagesResponse struct { + // Hl7v2Store: The name of the HL7v2 store to rollback, in the format of + // "projects/{project_id}/locations/{location_id}/datasets/{dataset_id} + // /hl7v2Stores/{hl7v2_store_id}". + Hl7v2Store string `json:"hl7v2Store,omitempty"` + // ForceSendFields is a list of field names (e.g. "Hl7v2Store") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Hl7v2Store") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RollbackHl7V2MessagesResponse) MarshalJSON() ([]byte, error) { + type NoMethod RollbackHl7V2MessagesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SchemaConfig: Configuration for the FHIR BigQuery schema. Determines how the @@ -4711,9 +4934,9 @@ type SchemaConfig struct { NullFields []string `json:"-"` } -func (s *SchemaConfig) MarshalJSON() ([]byte, error) { +func (s SchemaConfig) MarshalJSON() ([]byte, error) { type NoMethod SchemaConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SchemaGroup: An HL7v2 logical group construct. @@ -4743,25 +4966,25 @@ type SchemaGroup struct { NullFields []string `json:"-"` } -func (s *SchemaGroup) MarshalJSON() ([]byte, error) { +func (s SchemaGroup) MarshalJSON() ([]byte, error) { type NoMethod SchemaGroup - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SchemaPackage: A schema package contains a set of schemas and type // definitions. type SchemaPackage struct { - // IgnoreMinOccurs: Flag to ignore all min_occurs restrictions in the schema. - // This means that incoming messages can omit any group, segment, field, - // component, or subcomponent. + // IgnoreMinOccurs: Optional. Flag to ignore all min_occurs restrictions in the + // schema. This means that incoming messages can omit any group, segment, + // field, component, or subcomponent. IgnoreMinOccurs bool `json:"ignoreMinOccurs,omitempty"` - // Schemas: Schema configs that are layered based on their VersionSources that - // match the incoming message. Schema configs present in higher indices - // override those in lower indices with the same message type and trigger event - // if their VersionSources all match an incoming message. + // Schemas: Optional. Schema configs that are layered based on their + // VersionSources that match the incoming message. Schema configs present in + // higher indices override those in lower indices with the same message type + // and trigger event if their VersionSources all match an incoming message. Schemas []*Hl7SchemaConfig `json:"schemas,omitempty"` - // SchematizedParsingType: Determines how messages that fail to parse are - // handled. + // SchematizedParsingType: Optional. Determines how messages that fail to parse + // are handled. // // Possible values: // "SCHEMATIZED_PARSING_TYPE_UNSPECIFIED" - Unspecified schematized parsing @@ -4771,13 +4994,13 @@ type SchemaPackage struct { // "HARD_FAIL" - Messages that fail to parse are rejected from // ingestion/insertion and return an error code. SchematizedParsingType string `json:"schematizedParsingType,omitempty"` - // Types: Schema type definitions that are layered based on their + // Types: Optional. Schema type definitions that are layered based on their // VersionSources that match the incoming message. Type definitions present in // higher indices override those in lower indices with the same type name if // their VersionSources all match an incoming message. Types []*Hl7TypesConfig `json:"types,omitempty"` - // UnexpectedSegmentHandling: Determines how unexpected segments (segments not - // matched to the schema) are handled. + // UnexpectedSegmentHandling: Optional. Determines how unexpected segments + // (segments not matched to the schema) are handled. // // Possible values: // "UNEXPECTED_SEGMENT_HANDLING_MODE_UNSPECIFIED" - Unspecified handling @@ -4801,9 +5024,9 @@ type SchemaPackage struct { NullFields []string `json:"-"` } -func (s *SchemaPackage) MarshalJSON() ([]byte, error) { +func (s SchemaPackage) MarshalJSON() ([]byte, error) { type NoMethod SchemaPackage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SchemaSegment: An HL7v2 Segment. @@ -4829,9 +5052,9 @@ type SchemaSegment struct { NullFields []string `json:"-"` } -func (s *SchemaSegment) MarshalJSON() ([]byte, error) { +func (s SchemaSegment) MarshalJSON() ([]byte, error) { type NoMethod SchemaSegment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SchematizedData: The content of an HL7v2 message in a structured format as @@ -4854,9 +5077,9 @@ type SchematizedData struct { NullFields []string `json:"-"` } -func (s *SchematizedData) MarshalJSON() ([]byte, error) { +func (s SchematizedData) MarshalJSON() ([]byte, error) { type NoMethod SchematizedData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SearchResourcesRequest: Request to search the resources in the specified @@ -4881,9 +5104,9 @@ type SearchResourcesRequest struct { NullFields []string `json:"-"` } -func (s *SearchResourcesRequest) MarshalJSON() ([]byte, error) { +func (s SearchResourcesRequest) MarshalJSON() ([]byte, error) { type NoMethod SearchResourcesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Segment: A segment in a structured format. @@ -4917,9 +5140,9 @@ type Segment struct { NullFields []string `json:"-"` } -func (s *Segment) MarshalJSON() ([]byte, error) { +func (s Segment) MarshalJSON() ([]byte, error) { type NoMethod Segment - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SeriesMetrics: SeriesMetrics contains metrics describing a DICOM series. @@ -4952,9 +5175,44 @@ type SeriesMetrics struct { NullFields []string `json:"-"` } -func (s *SeriesMetrics) MarshalJSON() ([]byte, error) { +func (s SeriesMetrics) MarshalJSON() ([]byte, error) { type NoMethod SeriesMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SetBlobStorageSettingsRequest: Request message for `SetBlobStorageSettings` +// method. +type SetBlobStorageSettingsRequest struct { + // BlobStorageSettings: The blob storage settings to update for the specified + // resources. Only fields listed in `update_mask` are applied. + BlobStorageSettings *BlobStorageSettings `json:"blobStorageSettings,omitempty"` + // FilterConfig: Optional. A filter configuration. If `filter_config` is + // specified, set the value of `resource` to the resource name of a DICOM store + // in the format + // `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores + // /{dicomStoreID}`. + FilterConfig *DicomFilterConfig `json:"filterConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "BlobStorageSettings") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BlobStorageSettings") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SetBlobStorageSettingsRequest) MarshalJSON() ([]byte, error) { + type NoMethod SetBlobStorageSettingsRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SetBlobStorageSettingsResponse: Returns additional info in regards to a +// completed set blob storage settings API. +type SetBlobStorageSettingsResponse struct { } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -4981,9 +5239,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Signature: User signature. @@ -5010,9 +5268,9 @@ type Signature struct { NullFields []string `json:"-"` } -func (s *Signature) MarshalJSON() ([]byte, error) { +func (s Signature) MarshalJSON() ([]byte, error) { type NoMethod Signature - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -5044,19 +5302,54 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StorageInfo: StorageInfo encapsulates all the storage info of a resource. +type StorageInfo struct { + // BlobStorageInfo: Info about the data stored in blob storage for the + // resource. + BlobStorageInfo *BlobStorageInfo `json:"blobStorageInfo,omitempty"` + // ReferencedResource: The resource whose storage info is returned. For + // example: + // `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStores + // /{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/instances/{ins + // tanceUID}` + ReferencedResource string `json:"referencedResource,omitempty"` + // StructuredStorageInfo: Info about the data stored in structured storage for + // the resource. + StructuredStorageInfo *StructuredStorageInfo `json:"structuredStorageInfo,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "BlobStorageInfo") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BlobStorageInfo") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StorageInfo) MarshalJSON() ([]byte, error) { + type NoMethod StorageInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StreamConfig: Contains configuration for streaming FHIR export. type StreamConfig struct { - // BigqueryDestination: The destination BigQuery structure that contains both - // the dataset location and corresponding schema config. The output is - // organized in one table per resource type. The server reuses the existing - // tables (if any) that are named after the resource types. For example, - // "Patient", "Observation". When there is no existing table for a given - // resource type, the server attempts to create one. When a table schema + // BigqueryDestination: Optional. The destination BigQuery structure that + // contains both the dataset location and corresponding schema config. The + // output is organized in one table per resource type. The server reuses the + // existing tables (if any) that are named after the resource types. For + // example, "Patient", "Observation". When there is no existing table for a + // given resource type, the server attempts to create one. When a table schema // doesn't align with the schema config, either because of existing // incompatible schema or out of band incompatible modification, the server // does not stream in new data. BigQuery imposes a 1 MB limit on streaming @@ -5100,7 +5393,7 @@ type StreamConfig struct { // error logs in Cloud Logging // (https://cloud.google.com/healthcare/docs/how-tos/logging)). DeidentifiedStoreDestination *DeidentifiedStoreDestination `json:"deidentifiedStoreDestination,omitempty"` - // ResourceTypes: Supply a FHIR resource type (such as "Patient" or + // ResourceTypes: Optional. Supply a FHIR resource type (such as "Patient" or // "Observation"). See https://www.hl7.org/fhir/valueset-resource-types.html // for a list of all FHIR resource types. The server treats an empty list as an // intent to stream all the supported resource types in this FHIR store. @@ -5118,9 +5411,32 @@ type StreamConfig struct { NullFields []string `json:"-"` } -func (s *StreamConfig) MarshalJSON() ([]byte, error) { +func (s StreamConfig) MarshalJSON() ([]byte, error) { type NoMethod StreamConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// StructuredStorageInfo: StructuredStorageInfo contains details about the data +// stored in Structured Storage for the referenced resource. +type StructuredStorageInfo struct { + // SizeBytes: Size in bytes of data stored in structured storage. + SizeBytes int64 `json:"sizeBytes,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "SizeBytes") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "SizeBytes") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s StructuredStorageInfo) MarshalJSON() ([]byte, error) { + type NoMethod StructuredStorageInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StudyMetrics: StudyMetrics contains metrics describing a DICOM study. @@ -5155,9 +5471,9 @@ type StudyMetrics struct { NullFields []string `json:"-"` } -func (s *StudyMetrics) MarshalJSON() ([]byte, error) { +func (s StudyMetrics) MarshalJSON() ([]byte, error) { type NoMethod StudyMetrics - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TagFilterList: List of tags to be filtered. @@ -5181,9 +5497,9 @@ type TagFilterList struct { NullFields []string `json:"-"` } -func (s *TagFilterList) MarshalJSON() ([]byte, error) { +func (s TagFilterList) MarshalJSON() ([]byte, error) { type NoMethod TagFilterList - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` method. @@ -5206,9 +5522,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -5233,9 +5549,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type TextConfig struct { @@ -5261,9 +5577,9 @@ type TextConfig struct { NullFields []string `json:"-"` } -func (s *TextConfig) MarshalJSON() ([]byte, error) { +func (s TextConfig) MarshalJSON() ([]byte, error) { type NoMethod TextConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TextSpan: A span of text in the provided document. @@ -5285,9 +5601,9 @@ type TextSpan struct { NullFields []string `json:"-"` } -func (s *TextSpan) MarshalJSON() ([]byte, error) { +func (s TextSpan) MarshalJSON() ([]byte, error) { type NoMethod TextSpan - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TimePartitioning: Configuration for FHIR BigQuery time-partitioned tables. @@ -5317,9 +5633,9 @@ type TimePartitioning struct { NullFields []string `json:"-"` } -func (s *TimePartitioning) MarshalJSON() ([]byte, error) { +func (s TimePartitioning) MarshalJSON() ([]byte, error) { type NoMethod TimePartitioning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Type: A type definition for some HL7v2 type (incl. Segments and Datatypes). @@ -5352,9 +5668,9 @@ type Type struct { NullFields []string `json:"-"` } -func (s *Type) MarshalJSON() ([]byte, error) { +func (s Type) MarshalJSON() ([]byte, error) { type NoMethod Type - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UserDataMapping: Maps a resource to the associated user and Attributes. @@ -5393,38 +5709,39 @@ type UserDataMapping struct { NullFields []string `json:"-"` } -func (s *UserDataMapping) MarshalJSON() ([]byte, error) { +func (s UserDataMapping) MarshalJSON() ([]byte, error) { type NoMethod UserDataMapping - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ValidationConfig: Contains the configuration for FHIR profiles and // validation. type ValidationConfig struct { // DisableFhirpathValidation: Whether to disable FHIRPath validation for - // incoming resources. Set this to true to disable checking incoming resources - // for conformance against FHIRPath requirement defined in the FHIR - // specification. This property only affects resource types that do not have - // profiles configured for them, any rules in enabled implementation guides - // will still be enforced. + // incoming resources. The default value is false. Set this to true to disable + // checking incoming resources for conformance against FHIRPath requirement + // defined in the FHIR specification. This property only affects resource types + // that do not have profiles configured for them, any rules in enabled + // implementation guides will still be enforced. DisableFhirpathValidation bool `json:"disableFhirpathValidation,omitempty"` // DisableProfileValidation: Whether to disable profile validation for this - // FHIR store. Set this to true to disable checking incoming resources for - // conformance against structure definitions in this FHIR store. + // FHIR store. The default value is false. Set this to true to disable checking + // incoming resources for conformance against structure definitions in this + // FHIR store. DisableProfileValidation bool `json:"disableProfileValidation,omitempty"` // DisableReferenceTypeValidation: Whether to disable reference type validation - // for incoming resources. Set this to true to disable checking incoming - // resources for conformance against reference type requirement defined in the - // FHIR specification. This property only affects resource types that do not - // have profiles configured for them, any rules in enabled implementation - // guides will still be enforced. + // for incoming resources. The default value is false. Set this to true to + // disable checking incoming resources for conformance against reference type + // requirement defined in the FHIR specification. This property only affects + // resource types that do not have profiles configured for them, any rules in + // enabled implementation guides will still be enforced. DisableReferenceTypeValidation bool `json:"disableReferenceTypeValidation,omitempty"` // DisableRequiredFieldValidation: Whether to disable required fields - // validation for incoming resources. Set this to true to disable checking - // incoming resources for conformance against required fields requirement - // defined in the FHIR specification. This property only affects resource types - // that do not have profiles configured for them, any rules in enabled - // implementation guides will still be enforced. + // validation for incoming resources. The default value is false. Set this to + // true to disable checking incoming resources for conformance against required + // fields requirement defined in the FHIR specification. This property only + // affects resource types that do not have profiles configured for them, any + // rules in enabled implementation guides will still be enforced. DisableRequiredFieldValidation bool `json:"disableRequiredFieldValidation,omitempty"` // EnabledImplementationGuides: A list of implementation guide URLs in this // FHIR store that are used to configure the profiles to use for validation. @@ -5454,9 +5771,9 @@ type ValidationConfig struct { NullFields []string `json:"-"` } -func (s *ValidationConfig) MarshalJSON() ([]byte, error) { +func (s ValidationConfig) MarshalJSON() ([]byte, error) { type NoMethod ValidationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VersionSource: Describes a selector for extracting and matching an MSH field @@ -5481,9 +5798,9 @@ type VersionSource struct { NullFields []string `json:"-"` } -func (s *VersionSource) MarshalJSON() ([]byte, error) { +func (s VersionSource) MarshalJSON() ([]byte, error) { type NoMethod VersionSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsLocationsGetCall struct { @@ -12797,62 +13114,70 @@ func (c *ProjectsLocationsDatasetsDicomStoresSearchForStudiesCall) Do(opts ...go return c.doRequest("") } -type ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsDatasetsDicomStoresSetBlobStorageSettingsCall struct { + s *Service + resource string + setblobstoragesettingsrequest *SetBlobStorageSettingsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified resource. -// Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, -// and `PERMISSION_DENIED` errors. +// SetBlobStorageSettings: SetBlobStorageSettings sets the blob storage +// settings of the specified resources. // -// - resource: REQUIRED: The resource for which the policy is being specified. -// See Resource names (https://cloud.google.com/apis/design/resource_names) -// for the appropriate value for this field. -func (r *ProjectsLocationsDatasetsDicomStoresService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall { - c := &ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: The path of the resource to update the blob storage settings in +// the format of +// `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStor +// es/{dicomStoreID}/dicomWeb/studies/{studyUID}`, +// `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStor +// es/{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/`, or +// `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStor +// es/{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/instances/ +// {instanceUID}`. If `filter_config` is specified, set the value of +// `resource` to the resource name of a DICOM store in the format +// `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStor +// es/{dicomStoreID}`. +func (r *ProjectsLocationsDatasetsDicomStoresService) SetBlobStorageSettings(resource string, setblobstoragesettingsrequest *SetBlobStorageSettingsRequest) *ProjectsLocationsDatasetsDicomStoresSetBlobStorageSettingsCall { + c := &ProjectsLocationsDatasetsDicomStoresSetBlobStorageSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource - c.setiampolicyrequest = setiampolicyrequest + c.setblobstoragesettingsrequest = setblobstoragesettingsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall { +func (c *ProjectsLocationsDatasetsDicomStoresSetBlobStorageSettingsCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsDicomStoresSetBlobStorageSettingsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall { +func (c *ProjectsLocationsDatasetsDicomStoresSetBlobStorageSettingsCall) Context(ctx context.Context) *ProjectsLocationsDatasetsDicomStoresSetBlobStorageSettingsCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Header() http.Header { +func (c *ProjectsLocationsDatasetsDicomStoresSetBlobStorageSettingsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsDatasetsDicomStoresSetBlobStorageSettingsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setblobstoragesettingsrequest) if err != nil { return nil, err } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setBlobStorageSettings") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -12865,12 +13190,12 @@ func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) doRequest(alt str return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "healthcare.projects.locations.datasets.dicomStores.setIamPolicy" call. +// Do executes the "healthcare.projects.locations.datasets.dicomStores.setBlobStorageSettings" call. // Any non-2xx status code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) in +// *Operation.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *ProjectsLocationsDatasetsDicomStoresSetBlobStorageSettingsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -12889,7 +13214,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Do(opts ...google if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -12902,36 +13227,141 @@ func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Do(opts ...google return ret, nil } -type ProjectsLocationsDatasetsDicomStoresStoreInstancesCall struct { - s *Service - parent string - dicomWebPath string - body_ io.Reader - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// StoreInstances: StoreInstances stores DICOM instances associated with study -// instance unique identifiers (SUID). See [Store Transaction] -// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.5). -// For details on the implementation of StoreInstances, see Store transaction -// (https://cloud.google.com/healthcare/docs/dicom#store_transaction) in the -// Cloud Healthcare API conformance statement. For samples that show how to -// call StoreInstances, see Store DICOM data -// (https://cloud.google.com/healthcare/docs/how-tos/dicomweb#store-dicom). +// SetIamPolicy: Sets the access control policy on the specified resource. +// Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, +// and `PERMISSION_DENIED` errors. // -// - dicomWebPath: The path of the StoreInstances DICOMweb request. For -// example, `studies/[{study_uid}]`. Note that the `study_uid` is optional. -// - parent: The name of the DICOM store that is being accessed. For example, -// `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomS -// tores/{dicom_store_id}`. -func (r *ProjectsLocationsDatasetsDicomStoresService) StoreInstances(parent string, dicomWebPath string, body_ io.Reader) *ProjectsLocationsDatasetsDicomStoresStoreInstancesCall { - c := &ProjectsLocationsDatasetsDicomStoresStoreInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.dicomWebPath = dicomWebPath - c.body_ = body_ - return c +// - resource: REQUIRED: The resource for which the policy is being specified. +// See Resource names (https://cloud.google.com/apis/design/resource_names) +// for the appropriate value for this field. +func (r *ProjectsLocationsDatasetsDicomStoresService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall { + c := &ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "healthcare.projects.locations.datasets.dicomStores.setIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsDatasetsDicomStoresStoreInstancesCall struct { + s *Service + parent string + dicomWebPath string + body_ io.Reader + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// StoreInstances: StoreInstances stores DICOM instances associated with study +// instance unique identifiers (SUID). See [Store Transaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.5). +// For details on the implementation of StoreInstances, see Store transaction +// (https://cloud.google.com/healthcare/docs/dicom#store_transaction) in the +// Cloud Healthcare API conformance statement. For samples that show how to +// call StoreInstances, see Store DICOM data +// (https://cloud.google.com/healthcare/docs/how-tos/dicomweb#store-dicom). +// +// - dicomWebPath: The path of the StoreInstances DICOMweb request. For +// example, `studies/[{study_uid}]`. Note that the `study_uid` is optional. +// - parent: The name of the DICOM store that is being accessed. For example, +// `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomS +// tores/{dicom_store_id}`. +func (r *ProjectsLocationsDatasetsDicomStoresService) StoreInstances(parent string, dicomWebPath string, body_ io.Reader) *ProjectsLocationsDatasetsDicomStoresStoreInstancesCall { + c := &ProjectsLocationsDatasetsDicomStoresStoreInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.dicomWebPath = dicomWebPath + c.body_ = body_ + return c } // Fields allows partial responses to be retrieved. See @@ -13199,6 +13629,119 @@ func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesGetStudyMetricsCall) return ret, nil } +type ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSetBlobStorageSettingsCall struct { + s *Service + resource string + setblobstoragesettingsrequest *SetBlobStorageSettingsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetBlobStorageSettings: SetBlobStorageSettings sets the blob storage +// settings of the specified resources. +// +// - resource: The path of the resource to update the blob storage settings in +// the format of +// `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStor +// es/{dicomStoreID}/dicomWeb/studies/{studyUID}`, +// `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStor +// es/{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/`, or +// `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStor +// es/{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/instances/ +// {instanceUID}`. If `filter_config` is specified, set the value of +// `resource` to the resource name of a DICOM store in the format +// `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStor +// es/{dicomStoreID}`. +func (r *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesService) SetBlobStorageSettings(resource string, setblobstoragesettingsrequest *SetBlobStorageSettingsRequest) *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSetBlobStorageSettingsCall { + c := &ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSetBlobStorageSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setblobstoragesettingsrequest = setblobstoragesettingsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSetBlobStorageSettingsCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSetBlobStorageSettingsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSetBlobStorageSettingsCall) Context(ctx context.Context) *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSetBlobStorageSettingsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSetBlobStorageSettingsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSetBlobStorageSettingsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setblobstoragesettingsrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setBlobStorageSettings") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "healthcare.projects.locations.datasets.dicomStores.dicomWeb.studies.setBlobStorageSettings" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSetBlobStorageSettingsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesGetSeriesMetricsCall struct { s *Service series string @@ -13308,82 +13851,87 @@ func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesGetSeriesMetri return ret, nil } -type ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall struct { +type ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall struct { s *Service - parent string - dicomWebPath string + resource string urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Delete: DeleteStudy deletes all instances within the given study. Delete -// requests are equivalent to the GET requests specified in the Retrieve -// transaction. The method returns an Operation which will be marked successful -// when the deletion is complete. Warning: Instances cannot be inserted into a -// study that is being deleted by an operation until the operation completes. -// For samples that show how to call DeleteStudy, see Delete a study, series, -// or instance -// (https://cloud.google.com/healthcare/docs/how-tos/dicomweb#delete-dicom). +// GetStorageInfo: GetStorageInfo returns the storage info of the specified +// resource. // -// - dicomWebPath: The path of the DeleteStudy request. For example, -// `studies/{study_uid}`. -// - parent: . -func (r *ProjectsLocationsDatasetsDicomStoresStudiesService) Delete(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall { - c := &ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.dicomWebPath = dicomWebPath +// - resource: The path of the instance to return storage info for, in the +// form: +// `projects/{projectID}/locations/{locationID}/datasets/{datasetID}/dicomStor +// es/{dicomStoreID}/dicomWeb/studies/{studyUID}/series/{seriesUID}/instances/ +// {instanceUID}`. +func (r *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesService) GetStorageInfo(resource string) *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall { + c := &ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall { +func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall) IfNoneMatch(entityTag string) *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Context(ctx context.Context) *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall { +func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall) Context(ctx context.Context) *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Header() http.Header { +func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/dicomWeb/{+dicomWebPath}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getStorageInfo") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - "dicomWebPath": c.dicomWebPath, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "healthcare.projects.locations.datasets.dicomStores.studies.delete" call. +// Do executes the "healthcare.projects.locations.datasets.dicomStores.dicomWeb.studies.series.instances.getStorageInfo" call. // Any non-2xx status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at all) in +// *StorageInfo.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsLocationsDatasetsDicomStoresDicomWebStudiesSeriesInstancesGetStorageInfoCall) Do(opts ...googleapi.CallOption) (*StorageInfo, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -13402,7 +13950,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Do(opts ...googl if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &StorageInfo{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -13415,17 +13963,124 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Do(opts ...googl return ret, nil } -type ProjectsLocationsDatasetsDicomStoresStudiesRetrieveMetadataCall struct { +type ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall struct { s *Service parent string dicomWebPath string urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// RetrieveMetadata: RetrieveStudyMetadata returns instance associated with the +// Delete: DeleteStudy deletes all instances within the given study. Delete +// requests are equivalent to the GET requests specified in the Retrieve +// transaction. The method returns an Operation which will be marked successful +// when the deletion is complete. Warning: Instances cannot be inserted into a +// study that is being deleted by an operation until the operation completes. +// For samples that show how to call DeleteStudy, see Delete a study, series, +// or instance +// (https://cloud.google.com/healthcare/docs/how-tos/dicomweb#delete-dicom). +// +// - dicomWebPath: The path of the DeleteStudy request. For example, +// `studies/{study_uid}`. +// - parent: . +func (r *ProjectsLocationsDatasetsDicomStoresStudiesService) Delete(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall { + c := &ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.dicomWebPath = dicomWebPath + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Context(ctx context.Context) *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/dicomWeb/{+dicomWebPath}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + "dicomWebPath": c.dicomWebPath, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "healthcare.projects.locations.datasets.dicomStores.studies.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsDatasetsDicomStoresStudiesRetrieveMetadataCall struct { + s *Service + parent string + dicomWebPath string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// RetrieveMetadata: RetrieveStudyMetadata returns instance associated with the // given study presented as metadata with the bulk data removed. See // [RetrieveTransaction] // (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). @@ -16145,209 +16800,571 @@ type ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall struct { header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified resource. -// Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, -// and `PERMISSION_DENIED` errors. +// SetIamPolicy: Sets the access control policy on the specified resource. +// Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, +// and `PERMISSION_DENIED` errors. +// +// - resource: REQUIRED: The resource for which the policy is being specified. +// See Resource names (https://cloud.google.com/apis/design/resource_names) +// for the appropriate value for this field. +func (r *ProjectsLocationsDatasetsFhirStoresService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall { + c := &ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "healthcare.projects.locations.datasets.fhirStores.setIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the specified +// resource. If the resource does not exist, this will return an empty set of +// permissions, not a `NOT_FOUND` error. Note: This operation is designed to be +// used for building permission-aware UIs and command-line tools, not for +// authorization checking. This operation may "fail open" without warning. +// +// - resource: REQUIRED: The resource for which the policy detail is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the appropriate +// value for this field. +func (r *ProjectsLocationsDatasetsFhirStoresService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall { + c := &ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "healthcare.projects.locations.datasets.fhirStores.testIamPermissions" call. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsDatasetsFhirStoresFhirBinaryCreateCall struct { + s *Service + parent string + body_ io.Reader + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BinaryCreate: Creates a FHIR Binary resource. This method can be used to +// create a Binary resource either by using one of the accepted FHIR JSON +// content types, or as a raw data stream. If a resource is created with this +// method using the FHIR content type this method's behavior is the same as +// `fhir.create` +// (https://cloud.google.com/healthcare-api/docs/reference/rest/v1/projects.locations.datasets.fhirStores.fhir/create). +// If a resource type other than Binary is used in the request it's treated in +// the same way as non-FHIR data (e.g., images, zip archives, pdf files, +// documents). When a non-FHIR content type is used in the request, a Binary +// resource will be generated, and the uploaded data will be stored in the +// `content` field (`DSTU2` and `STU3`), or the `data` field (`R4`). The Binary +// resource's `contentType` will be filled in using the value of the +// `Content-Type` header, and the `securityContext` field (not present in +// `DSTU2`) will be populated from the `X-Security-Context` header if it +// exists. At this time `securityContext` has no special behavior in the Cloud +// Healthcare API. Note: the limit on data ingested through this method is 2 +// GB. For best performance, use a non-FHIR data type instead of wrapping the +// data in a Binary resource. Some of the Healthcare API features, such as +// exporting to BigQuery +// (https://cloud.google.com/healthcare-api/docs/how-tos/fhir-export-bigquery) +// or Pub/Sub notifications +// (https://cloud.google.com/healthcare-api/docs/fhir-pubsub#behavior_when_a_fhir_resource_is_too_large_or_traffic_is_high) +// with full resource content, do not support Binary resources that are larger +// than 10 MB. In these cases the resource's `data` field will be omitted. +// Instead, the "http://hl7.org/fhir/StructureDefinition/data-absent-reason" +// extension will be present to indicate that including the data is +// `unsupported`. On success, an empty `201 Created` response is returned. The +// newly created resource's ID and version are returned in the Location header. +// Using `Prefer: representation=resource` is not allowed for this method. The +// definition of the Binary REST API can be found at +// https://hl7.org/fhir/binary.html#rest. +// +// - parent: The name of the FHIR store this resource belongs to. +func (r *ProjectsLocationsDatasetsFhirStoresFhirService) BinaryCreate(parent string, body_ io.Reader) *ProjectsLocationsDatasetsFhirStoresFhirBinaryCreateCall { + c := &ProjectsLocationsDatasetsFhirStoresFhirBinaryCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.body_ = body_ + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsFhirStoresFhirBinaryCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryCreateCall) Context(ctx context.Context) *ProjectsLocationsDatasetsFhirStoresFhirBinaryCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + body = c.body_ + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/fhir/Binary") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "healthcare.projects.locations.datasets.fhirStores.fhir.Binary-create" call. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryCreateCall) Do(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + return c.doRequest("") +} + +type ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// BinaryRead: Gets the contents of a FHIR Binary resource. This method can be +// used to retrieve a Binary resource either by using the FHIR JSON mimetype as +// the value for the Accept header, or as a raw data stream. If the FHIR Accept +// type is used this method will return a Binary resource with the data +// base64-encoded, regardless of how the resource was created. The resource +// data can be retrieved in base64-decoded form if the Accept type of the +// request matches the value of the resource's `contentType` field. The +// definition of the Binary REST API can be found at +// https://hl7.org/fhir/binary.html#rest. +// +// - name: The name of the Binary resource to retrieve. +func (r *ProjectsLocationsDatasetsFhirStoresFhirService) BinaryRead(name string) *ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall { + c := &ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall) IfNoneMatch(entityTag string) *ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall) Context(ctx context.Context) *ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "healthcare.projects.locations.datasets.fhirStores.fhir.Binary-read" call. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryReadCall) Do(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + return c.doRequest("") +} + +type ProjectsLocationsDatasetsFhirStoresFhirBinaryUpdateCall struct { + s *Service + name string + body_ io.Reader + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BinaryUpdate: Updates the entire contents of a Binary resource. If the +// specified resource does not exist and the FHIR store has +// enable_update_create set, creates the resource with the client-specified ID. +// It is strongly advised not to include or encode any sensitive data such as +// patient identifiers in client-specified resource IDs. Those IDs are part of +// the FHIR resource path recorded in Cloud Audit Logs and Pub/Sub +// notifications. Those IDs can also be contained in reference fields within +// other resources. This method can be used to update a Binary resource either +// by using one of the accepted FHIR JSON content types, or as a raw data +// stream. If a resource is updated with this method using the FHIR content +// type this method's behavior is the same as `update`. If a resource type +// other than Binary is used in the request it will be treated in the same way +// as non-FHIR data. When a non-FHIR content type is used in the request, a +// Binary resource will be generated using the ID from the resource path, and +// the uploaded data will be stored in the `content` field (`DSTU2` and +// `STU3`), or the `data` field (`R4`). The Binary resource's `contentType` +// will be filled in using the value of the `Content-Type` header, and the +// `securityContext` field (not present in `DSTU2`) will be populated from the +// `X-Security-Context` header if it exists. At this time `securityContext` has +// no special behavior in the Cloud Healthcare API. Note: the limit on data +// ingested through this method is 2 GB. For best performance, use a non-FHIR +// data type instead of wrapping the data in a Binary resource. Some of the +// Healthcare API features, such as exporting to BigQuery +// (https://cloud.google.com/healthcare-api/docs/how-tos/fhir-export-bigquery) +// or Pub/Sub notifications +// (https://cloud.google.com/healthcare-api/docs/fhir-pubsub#behavior_when_a_fhir_resource_is_too_large_or_traffic_is_high) +// with full resource content, do not support Binary resources that are larger +// than 10 MB. In these cases the resource's `data` field will be omitted. +// Instead, the "http://hl7.org/fhir/StructureDefinition/data-absent-reason" +// extension will be present to indicate that including the data is +// `unsupported`. On success, an empty 200 OK response will be returned, or a +// 201 Created if the resource did not exit. The resource's ID and version are +// returned in the Location header. Using `Prefer: representation=resource` is +// not allowed for this method. The definition of the Binary REST API can be +// found at https://hl7.org/fhir/binary.html#rest. // -// - resource: REQUIRED: The resource for which the policy is being specified. -// See Resource names (https://cloud.google.com/apis/design/resource_names) -// for the appropriate value for this field. -func (r *ProjectsLocationsDatasetsFhirStoresService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall { - c := &ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest +// - name: The name of the resource to update. +func (r *ProjectsLocationsDatasetsFhirStoresFhirService) BinaryUpdate(name string, body_ io.Reader) *ProjectsLocationsDatasetsFhirStoresFhirBinaryUpdateCall { + c := &ProjectsLocationsDatasetsFhirStoresFhirBinaryUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.body_ = body_ return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall { +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryUpdateCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsFhirStoresFhirBinaryUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall { +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryUpdateCall) Context(ctx context.Context) *ProjectsLocationsDatasetsFhirStoresFhirBinaryUpdateCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) Header() http.Header { +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + body = c.body_ + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "healthcare.projects.locations.datasets.fhirStores.setIamPolicy" call. -// Any non-2xx status code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "healthcare.projects.locations.datasets.fhirStores.fhir.Binary-update" call. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryUpdateCall) Do(opts ...googleapi.CallOption) (*http.Response, error) { gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil + return c.doRequest("") } -type ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the specified -// resource. If the resource does not exist, this will return an empty set of -// permissions, not a `NOT_FOUND` error. Note: This operation is designed to be -// used for building permission-aware UIs and command-line tools, not for -// authorization checking. This operation may "fail open" without warning. +// BinaryVread: Gets the contents of a version (current or historical) of a +// FHIR Binary resource by version ID. This method can be used to retrieve a +// Binary resource version either by using the FHIR JSON mimetype as the value +// for the Accept header, or as a raw data stream. If the FHIR Accept type is +// used this method will return a Binary resource with the data base64-encoded, +// regardless of how the resource version was created. The resource data can be +// retrieved in base64-decoded form if the Accept type of the request matches +// the value of the resource version's `contentType` field. The definition of +// the Binary REST API can be found at https://hl7.org/fhir/binary.html#rest. // -// - resource: REQUIRED: The resource for which the policy detail is being -// requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the appropriate -// value for this field. -func (r *ProjectsLocationsDatasetsFhirStoresService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall { - c := &ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest +// - name: The name of the Binary resource version to retrieve. +func (r *ProjectsLocationsDatasetsFhirStoresFhirService) BinaryVread(name string) *ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall { + c := &ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall { +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall) IfNoneMatch(entityTag string) *ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall { +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall) Context(ctx context.Context) *ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) Header() http.Header { +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "healthcare.projects.locations.datasets.fhirStores.testIamPermissions" call. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { +// Do executes the "healthcare.projects.locations.datasets.fhirStores.fhir.Binary-vread" call. +func (c *ProjectsLocationsDatasetsFhirStoresFhirBinaryVreadCall) Do(opts ...googleapi.CallOption) (*http.Response, error) { gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil + return c.doRequest("") } type ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall struct { @@ -19199,6 +20216,116 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresPatchCall) Do(opts ...googleapi.Cal return ret, nil } +type ProjectsLocationsDatasetsHl7V2StoresRollbackCall struct { + s *Service + name string + rollbackhl7v2messagesrequest *RollbackHl7V2MessagesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Rollback: Rolls back messages from the HL7v2 store to the specified time. +// This method returns an Operation that can be used to track the status of the +// rollback by calling GetOperation. Immediate fatal errors appear in the error +// field, errors are also logged to Cloud Logging (see Viewing error logs in +// Cloud Logging (https://cloud.google.com/healthcare/docs/how-tos/logging)). +// Otherwise, when the operation finishes, a detailed response of type +// RollbackHl7V2MessagesResponse is returned in the response field. The +// metadata field type for this operation is OperationMetadata. +// +// - name: The name of the HL7v2 store to rollback, in the format of +// "projects/{project_id}/locations/{location_id}/datasets/{dataset_id} +// /hl7V2Stores/{hl7v2_store_id}". +func (r *ProjectsLocationsDatasetsHl7V2StoresService) Rollback(name string, rollbackhl7v2messagesrequest *RollbackHl7V2MessagesRequest) *ProjectsLocationsDatasetsHl7V2StoresRollbackCall { + c := &ProjectsLocationsDatasetsHl7V2StoresRollbackCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.rollbackhl7v2messagesrequest = rollbackhl7v2messagesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsDatasetsHl7V2StoresRollbackCall) Fields(s ...googleapi.Field) *ProjectsLocationsDatasetsHl7V2StoresRollbackCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsDatasetsHl7V2StoresRollbackCall) Context(ctx context.Context) *ProjectsLocationsDatasetsHl7V2StoresRollbackCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsDatasetsHl7V2StoresRollbackCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsDatasetsHl7V2StoresRollbackCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.rollbackhl7v2messagesrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:rollback") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "healthcare.projects.locations.datasets.hl7V2Stores.rollback" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsDatasetsHl7V2StoresRollbackCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type ProjectsLocationsDatasetsHl7V2StoresSetIamPolicyCall struct { s *Service resource string @@ -20100,7 +21227,7 @@ type ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall struct { // // - name: Output only. Resource name of the Message, of the form // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/hl7V2S -// tores/{hl7_v2_store_id}/messages/{message_id}`. Assigned by the server. +// tores/{hl7_v2_store_id}/messages/{message_id}`. func (r *ProjectsLocationsDatasetsHl7V2StoresMessagesService) Patch(name string, message *Message) *ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall { c := &ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name diff --git a/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-api.json b/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-api.json index aacbc83fc93..2dd20fbd4d1 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-api.json @@ -988,7 +988,7 @@ ] }, "delete": { - "description": "Deletes a custom Role. When you delete a custom role, the following changes occur immediately: * You cannot bind a principal to the custom role in an IAM Policy. * Existing bindings to the custom role are not changed, but they have no effect. * By default, the response from ListRoles does not include the custom role. You have 7 days to undelete the custom role. After 7 days, the following changes occur: * The custom role is permanently deleted and cannot be recovered. * If an IAM policy contains a binding to the custom role, the binding is permanently removed.", + "description": "Deletes a custom Role. When you delete a custom role, the following changes occur immediately: * You cannot bind a principal to the custom role in an IAM Policy. * Existing bindings to the custom role are not changed, but they have no effect. * By default, the response from ListRoles does not include the custom role. A deleted custom role still counts toward the [custom role limit](https://cloud.google.com/iam/help/limits) until it is permanently deleted. You have 7 days to undelete the custom role. After 7 days, the following changes occur: * The custom role is permanently deleted and cannot be recovered. * If an IAM policy contains a binding to the custom role, the binding is permanently removed. * The custom role no longer counts toward your custom role limit.", "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", "httpMethod": "DELETE", "id": "iam.organizations.roles.delete", @@ -2288,7 +2288,7 @@ ] }, "delete": { - "description": "Deletes a custom Role. When you delete a custom role, the following changes occur immediately: * You cannot bind a principal to the custom role in an IAM Policy. * Existing bindings to the custom role are not changed, but they have no effect. * By default, the response from ListRoles does not include the custom role. You have 7 days to undelete the custom role. After 7 days, the following changes occur: * The custom role is permanently deleted and cannot be recovered. * If an IAM policy contains a binding to the custom role, the binding is permanently removed.", + "description": "Deletes a custom Role. When you delete a custom role, the following changes occur immediately: * You cannot bind a principal to the custom role in an IAM Policy. * Existing bindings to the custom role are not changed, but they have no effect. * By default, the response from ListRoles does not include the custom role. A deleted custom role still counts toward the [custom role limit](https://cloud.google.com/iam/help/limits) until it is permanently deleted. You have 7 days to undelete the custom role. After 7 days, the following changes occur: * The custom role is permanently deleted and cannot be recovered. * If an IAM policy contains a binding to the custom role, the binding is permanently removed. * The custom role no longer counts toward your custom role limit.", "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", "httpMethod": "DELETE", "id": "iam.projects.roles.delete", @@ -3056,34 +3056,6 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, - "patch": { - "description": "Patches a ServiceAccountKey.", - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}:patch", - "httpMethod": "POST", - "id": "iam.projects.serviceAccounts.keys.patch", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "The resource name of the service account key in the following format `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.", - "location": "path", - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:patch", - "request": { - "$ref": "PatchServiceAccountKeyRequest" - }, - "response": { - "$ref": "ServiceAccountKey" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, "upload": { "description": "Uploads the public key portion of a key pair that you manage, and associates the public key with a ServiceAccount. After you upload the public key, you can use the private key from the key pair as a service account key.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys:upload", @@ -3216,7 +3188,7 @@ } } }, - "revision": "20240530", + "revision": "20240918", "rootUrl": "https://iam.googleapis.com/", "schemas": { "AccessRestrictions": { @@ -3583,7 +3555,7 @@ ], "enumDescriptions": [ "No AttributesType specified.", - "Used to get the user's group claims from the Azure AD identity provider using configuration provided in ExtraAttributesOAuth2Client and `mail` property of the `microsoft.graph.group` object is used for claim mapping. See https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0#properties for more details on `microsoft.graph.group` properties. The attributes obtained from idntity provider are mapped to `assertion.groups`." + "Used to get the user's group claims from the Microsoft Entra ID identity provider using configuration provided in ExtraAttributesOAuth2Client and `mail` property of the `microsoft.graph.group` object is used for claim mapping. See https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0#properties for more details on `microsoft.graph.group` properties. The attributes obtained from idntity provider are mapped to `assertion.groups`." ], "type": "string" }, @@ -3718,7 +3690,7 @@ "id": "GoogleIamAdminV1WorkforcePoolProviderSaml", "properties": { "idpMetadataXml": { - "description": "Required. SAML Identity provider configuration metadata xml doc. The xml document should comply with [SAML 2.0 specification](https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf). The max size of the acceptable xml document will be bounded to 128k characters. The metadata xml document should satisfy the following constraints: 1) Must contain an Identity Provider Entity ID. 2) Must contain at least one non-expired signing key certificate. 3) For each signing key: a) Valid from should be no more than 7 days from now. b) Valid to should be no more than 15 years in the future. 4) Up to 3 IdP signing keys are allowed in the metadata xml. When updating the provider's metadata xml, at least one non-expired signing key must overlap with the existing metadata. This requirement is skipped if there are no non-expired signing keys present in the existing metadata.", + "description": "Required. SAML Identity provider configuration metadata xml doc. The xml document should comply with [SAML 2.0 specification](https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf). The max size of the acceptable xml document will be bounded to 128k characters. The metadata xml document should satisfy the following constraints: 1) Must contain an Identity Provider Entity ID. 2) Must contain at least one non-expired signing key certificate. 3) For each signing key: a) Valid from should be no more than 7 days from now. b) Valid to should be no more than 20 years in the future. 4) Up to 3 IdP signing keys are allowed in the metadata xml. When updating the provider's metadata xml, at least one non-expired signing key must overlap with the existing metadata. This requirement is skipped if there are no non-expired signing keys present in the existing metadata.", "type": "string" } }, @@ -4271,22 +4243,6 @@ }, "type": "object" }, - "PatchServiceAccountKeyRequest": { - "description": "The service account key patch request.", - "id": "PatchServiceAccountKeyRequest", - "properties": { - "serviceAccountKey": { - "$ref": "ServiceAccountKey", - "description": "Required. The service account key to update." - }, - "updateMask": { - "description": "Required. The update mask to apply to the service account key. Only the following fields are eligible for patching: - contact - description", - "format": "google-fieldmask", - "type": "string" - } - }, - "type": "object" - }, "PatchServiceAccountRequest": { "description": "The service account patch request. You can patch only the `display_name` and `description` fields. You must use the `update_mask` field to specify which of these fields you want to patch. Only the fields specified in the request are guaranteed to be returned in the response. Other fields may be empty in the response.", "id": "PatchServiceAccountRequest", @@ -4462,7 +4418,7 @@ "type": "string" }, "pageSize": { - "description": "Optional limit on the number of roles to include in the response. The default is 300, and the maximum is 1,000.", + "description": "Optional limit on the number of roles to include in the response. The default is 300, and the maximum is 2,000.", "format": "int32", "type": "integer" }, @@ -4631,7 +4587,7 @@ "id": "Saml", "properties": { "idpMetadataXml": { - "description": "Required. SAML identity provider (IdP) configuration metadata XML doc. The XML document must comply with the [SAML 2.0 specification](https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf). The maximum size of an acceptable XML document is 128K characters. The SAML metadata XML document must satisfy the following constraints: * Must contain an IdP Entity ID. * Must contain at least one non-expired signing certificate. * For each signing certificate, the expiration must be: * From no more than 7 days in the future. * To no more than 15 years in the future. * Up to three IdP signing keys are allowed. When updating the provider's metadata XML, at least one non-expired signing key must overlap with the existing metadata. This requirement is skipped if there are no non-expired signing keys present in the existing metadata.", + "description": "Required. SAML identity provider (IdP) configuration metadata XML doc. The XML document must comply with the [SAML 2.0 specification](https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf). The maximum size of an acceptable XML document is 128K characters. The SAML metadata XML document must satisfy the following constraints: * Must contain an IdP Entity ID. * Must contain at least one non-expired signing certificate. * For each signing certificate, the expiration must be: * From no more than 7 days in the future. * To no more than 20 years in the future. * Up to three IdP signing keys are allowed. When updating the provider's metadata XML, at least one non-expired signing key must overlap with the existing metadata. This requirement is skipped if there are no non-expired signing keys present in the existing metadata.", "type": "string" } }, @@ -4691,19 +4647,6 @@ "description": "Represents a service account key. A service account has two sets of key-pairs: user-managed, and system-managed. User-managed key-pairs can be created and deleted by users. Users are responsible for rotating these keys periodically to ensure security of their service accounts. Users retain the private key of these key-pairs, and Google retains ONLY the public key. System-managed keys are automatically rotated by Google, and are used for signing for a maximum of two weeks. The rotation process is probabilistic, and usage of the new key will gradually ramp up and down over the key's lifetime. If you cache the public key set for a service account, we recommend that you update the cache every 15 minutes. User-managed keys can be added and removed at any time, so it is important to update the cache frequently. For Google-managed keys, Google will publish a key at least 6 hours before it is first used for signing and will keep publishing it for at least 6 hours after it was last used for signing. Public keys for all service accounts are also published at the OAuth2 Service Account API.", "id": "ServiceAccountKey", "properties": { - "contact": { - "description": "Optional. A user provided email address as the point of contact for this service account key. Must be an email address. Limit 64 characters.", - "type": "string" - }, - "creator": { - "description": "Output only. The cloud identity that created this service account key. Populated automatically when the key is created and not editable by the user.", - "readOnly": true, - "type": "string" - }, - "description": { - "description": "Optional. A user provided description of this service account key.", - "type": "string" - }, "disableReason": { "description": "Output only. optional. If the key is disabled, it may have a DisableReason describing why it was disabled.", "enum": [ diff --git a/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-gen.go b/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-gen.go index 2b457fc84e4..a769cfefba7 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-gen.go @@ -569,9 +569,9 @@ type AccessRestrictions struct { NullFields []string `json:"-"` } -func (s *AccessRestrictions) MarshalJSON() ([]byte, error) { +func (s AccessRestrictions) MarshalJSON() ([]byte, error) { type NoMethod AccessRestrictions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AdminAuditData: Audit log information specific to Cloud IAM admin APIs. This @@ -593,9 +593,9 @@ type AdminAuditData struct { NullFields []string `json:"-"` } -func (s *AdminAuditData) MarshalJSON() ([]byte, error) { +func (s AdminAuditData) MarshalJSON() ([]byte, error) { type NoMethod AdminAuditData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditConfig: Specifies the audit configuration for a service. The @@ -634,9 +634,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditData: Audit log information specific to Cloud IAM. This message is @@ -659,9 +659,9 @@ type AuditData struct { NullFields []string `json:"-"` } -func (s *AuditData) MarshalJSON() ([]byte, error) { +func (s AuditData) MarshalJSON() ([]byte, error) { type NoMethod AuditData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -694,9 +694,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditableService: Contains information about an auditable service. @@ -717,9 +717,9 @@ type AuditableService struct { NullFields []string `json:"-"` } -func (s *AuditableService) MarshalJSON() ([]byte, error) { +func (s AuditableService) MarshalJSON() ([]byte, error) { type NoMethod AuditableService - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Aws: Represents an Amazon Web Services identity provider. @@ -739,9 +739,9 @@ type Aws struct { NullFields []string `json:"-"` } -func (s *Aws) MarshalJSON() ([]byte, error) { +func (s Aws) MarshalJSON() ([]byte, error) { type NoMethod Aws - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates `members`, or principals, with a `role`. @@ -838,9 +838,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BindingDelta: One delta entry for Binding. Each individual change (only one @@ -874,9 +874,9 @@ type BindingDelta struct { NullFields []string `json:"-"` } -func (s *BindingDelta) MarshalJSON() ([]byte, error) { +func (s BindingDelta) MarshalJSON() ([]byte, error) { type NoMethod BindingDelta - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateRoleRequest: The request to create a new role. @@ -900,9 +900,9 @@ type CreateRoleRequest struct { NullFields []string `json:"-"` } -func (s *CreateRoleRequest) MarshalJSON() ([]byte, error) { +func (s CreateRoleRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateRoleRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateServiceAccountKeyRequest: The service account key create request. @@ -938,9 +938,9 @@ type CreateServiceAccountKeyRequest struct { NullFields []string `json:"-"` } -func (s *CreateServiceAccountKeyRequest) MarshalJSON() ([]byte, error) { +func (s CreateServiceAccountKeyRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateServiceAccountKeyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateServiceAccountRequest: The service account create request. @@ -966,9 +966,9 @@ type CreateServiceAccountRequest struct { NullFields []string `json:"-"` } -func (s *CreateServiceAccountRequest) MarshalJSON() ([]byte, error) { +func (s CreateServiceAccountRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateServiceAccountRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DisableServiceAccountKeyRequest: The service account key disable request. @@ -1005,9 +1005,9 @@ type DisableServiceAccountKeyRequest struct { NullFields []string `json:"-"` } -func (s *DisableServiceAccountKeyRequest) MarshalJSON() ([]byte, error) { +func (s DisableServiceAccountKeyRequest) MarshalJSON() ([]byte, error) { type NoMethod DisableServiceAccountKeyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DisableServiceAccountRequest: The service account disable request. @@ -1074,9 +1074,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExtendedStatus: Extended status can store additional metadata. For example, @@ -1110,9 +1110,9 @@ type ExtendedStatus struct { NullFields []string `json:"-"` } -func (s *ExtendedStatus) MarshalJSON() ([]byte, error) { +func (s ExtendedStatus) MarshalJSON() ([]byte, error) { type NoMethod ExtendedStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetIamPolicyRequest: Request message for `GetIamPolicy` method. @@ -1133,9 +1133,9 @@ type GetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. @@ -1165,9 +1165,9 @@ type GetPolicyOptions struct { NullFields []string `json:"-"` } -func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { +func (s GetPolicyOptions) MarshalJSON() ([]byte, error) { type NoMethod GetPolicyOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamAdminV1WorkforcePoolProviderExtraAttributesOAuth2Client: Represents @@ -1183,7 +1183,7 @@ type GoogleIamAdminV1WorkforcePoolProviderExtraAttributesOAuth2Client struct { // Possible values: // "ATTRIBUTES_TYPE_UNSPECIFIED" - No AttributesType specified. // "AZURE_AD_GROUPS_MAIL" - Used to get the user's group claims from the - // Azure AD identity provider using configuration provided in + // Microsoft Entra ID identity provider using configuration provided in // ExtraAttributesOAuth2Client and `mail` property of the // `microsoft.graph.group` object is used for claim mapping. See // https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0#properties @@ -1218,9 +1218,9 @@ type GoogleIamAdminV1WorkforcePoolProviderExtraAttributesOAuth2Client struct { NullFields []string `json:"-"` } -func (s *GoogleIamAdminV1WorkforcePoolProviderExtraAttributesOAuth2Client) MarshalJSON() ([]byte, error) { +func (s GoogleIamAdminV1WorkforcePoolProviderExtraAttributesOAuth2Client) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamAdminV1WorkforcePoolProviderExtraAttributesOAuth2Client - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamAdminV1WorkforcePoolProviderExtraAttributesOAuth2ClientQueryParamete @@ -1248,9 +1248,9 @@ type GoogleIamAdminV1WorkforcePoolProviderExtraAttributesOAuth2ClientQueryParame NullFields []string `json:"-"` } -func (s *GoogleIamAdminV1WorkforcePoolProviderExtraAttributesOAuth2ClientQueryParameters) MarshalJSON() ([]byte, error) { +func (s GoogleIamAdminV1WorkforcePoolProviderExtraAttributesOAuth2ClientQueryParameters) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamAdminV1WorkforcePoolProviderExtraAttributesOAuth2ClientQueryParameters - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamAdminV1WorkforcePoolProviderOidc: Represents an OpenId Connect 1.0 @@ -1290,9 +1290,9 @@ type GoogleIamAdminV1WorkforcePoolProviderOidc struct { NullFields []string `json:"-"` } -func (s *GoogleIamAdminV1WorkforcePoolProviderOidc) MarshalJSON() ([]byte, error) { +func (s GoogleIamAdminV1WorkforcePoolProviderOidc) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamAdminV1WorkforcePoolProviderOidc - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret: Representation of a @@ -1313,9 +1313,9 @@ type GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret struct { NullFields []string `json:"-"` } -func (s *GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret) MarshalJSON() ([]byte, error) { +func (s GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue: Representation @@ -1341,9 +1341,9 @@ type GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue struct { NullFields []string `json:"-"` } -func (s *GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue) MarshalJSON() ([]byte, error) { +func (s GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig: Configuration for web @@ -1391,9 +1391,9 @@ type GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig struct { NullFields []string `json:"-"` } -func (s *GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig) MarshalJSON() ([]byte, error) { +func (s GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamAdminV1WorkforcePoolProviderSaml: Represents a SAML identity @@ -1407,7 +1407,7 @@ type GoogleIamAdminV1WorkforcePoolProviderSaml struct { // constraints: 1) Must contain an Identity Provider Entity ID. 2) Must contain // at least one non-expired signing key certificate. 3) For each signing key: // a) Valid from should be no more than 7 days from now. b) Valid to should be - // no more than 15 years in the future. 4) Up to 3 IdP signing keys are allowed + // no more than 20 years in the future. 4) Up to 3 IdP signing keys are allowed // in the metadata xml. When updating the provider's metadata xml, at least one // non-expired signing key must overlap with the existing metadata. This // requirement is skipped if there are no non-expired signing keys present in @@ -1426,9 +1426,9 @@ type GoogleIamAdminV1WorkforcePoolProviderSaml struct { NullFields []string `json:"-"` } -func (s *GoogleIamAdminV1WorkforcePoolProviderSaml) MarshalJSON() ([]byte, error) { +func (s GoogleIamAdminV1WorkforcePoolProviderSaml) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamAdminV1WorkforcePoolProviderSaml - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // KeyData: Represents a public key data along with its format. @@ -1475,9 +1475,9 @@ type KeyData struct { NullFields []string `json:"-"` } -func (s *KeyData) MarshalJSON() ([]byte, error) { +func (s KeyData) MarshalJSON() ([]byte, error) { type NoMethod KeyData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LintPolicyRequest: The request to lint an IAM policy object. @@ -1504,9 +1504,9 @@ type LintPolicyRequest struct { NullFields []string `json:"-"` } -func (s *LintPolicyRequest) MarshalJSON() ([]byte, error) { +func (s LintPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod LintPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LintPolicyResponse: The response of a lint operation. An empty response @@ -1531,9 +1531,9 @@ type LintPolicyResponse struct { NullFields []string `json:"-"` } -func (s *LintPolicyResponse) MarshalJSON() ([]byte, error) { +func (s LintPolicyResponse) MarshalJSON() ([]byte, error) { type NoMethod LintPolicyResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LintResult: Structured response of a single validation unit. @@ -1597,9 +1597,9 @@ type LintResult struct { NullFields []string `json:"-"` } -func (s *LintResult) MarshalJSON() ([]byte, error) { +func (s LintResult) MarshalJSON() ([]byte, error) { type NoMethod LintResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOauthClientCredentialsResponse: Response message for @@ -1623,9 +1623,9 @@ type ListOauthClientCredentialsResponse struct { NullFields []string `json:"-"` } -func (s *ListOauthClientCredentialsResponse) MarshalJSON() ([]byte, error) { +func (s ListOauthClientCredentialsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOauthClientCredentialsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOauthClientsResponse: Response message for ListOauthClients. @@ -1652,9 +1652,9 @@ type ListOauthClientsResponse struct { NullFields []string `json:"-"` } -func (s *ListOauthClientsResponse) MarshalJSON() ([]byte, error) { +func (s ListOauthClientsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOauthClientsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListRolesResponse: The response containing the roles defined under a @@ -1681,9 +1681,9 @@ type ListRolesResponse struct { NullFields []string `json:"-"` } -func (s *ListRolesResponse) MarshalJSON() ([]byte, error) { +func (s ListRolesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListRolesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListServiceAccountKeysResponse: The service account keys list response. @@ -1706,9 +1706,9 @@ type ListServiceAccountKeysResponse struct { NullFields []string `json:"-"` } -func (s *ListServiceAccountKeysResponse) MarshalJSON() ([]byte, error) { +func (s ListServiceAccountKeysResponse) MarshalJSON() ([]byte, error) { type NoMethod ListServiceAccountKeysResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListServiceAccountsResponse: The service account list response. @@ -1734,9 +1734,9 @@ type ListServiceAccountsResponse struct { NullFields []string `json:"-"` } -func (s *ListServiceAccountsResponse) MarshalJSON() ([]byte, error) { +func (s ListServiceAccountsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListServiceAccountsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListWorkforcePoolProviderKeysResponse: Response message for @@ -1763,9 +1763,9 @@ type ListWorkforcePoolProviderKeysResponse struct { NullFields []string `json:"-"` } -func (s *ListWorkforcePoolProviderKeysResponse) MarshalJSON() ([]byte, error) { +func (s ListWorkforcePoolProviderKeysResponse) MarshalJSON() ([]byte, error) { type NoMethod ListWorkforcePoolProviderKeysResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListWorkforcePoolProvidersResponse: Response message for @@ -1792,9 +1792,9 @@ type ListWorkforcePoolProvidersResponse struct { NullFields []string `json:"-"` } -func (s *ListWorkforcePoolProvidersResponse) MarshalJSON() ([]byte, error) { +func (s ListWorkforcePoolProvidersResponse) MarshalJSON() ([]byte, error) { type NoMethod ListWorkforcePoolProvidersResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListWorkforcePoolsResponse: Response message for ListWorkforcePools. @@ -1820,9 +1820,9 @@ type ListWorkforcePoolsResponse struct { NullFields []string `json:"-"` } -func (s *ListWorkforcePoolsResponse) MarshalJSON() ([]byte, error) { +func (s ListWorkforcePoolsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListWorkforcePoolsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListWorkloadIdentityPoolProviderKeysResponse: Response message for @@ -1849,9 +1849,9 @@ type ListWorkloadIdentityPoolProviderKeysResponse struct { NullFields []string `json:"-"` } -func (s *ListWorkloadIdentityPoolProviderKeysResponse) MarshalJSON() ([]byte, error) { +func (s ListWorkloadIdentityPoolProviderKeysResponse) MarshalJSON() ([]byte, error) { type NoMethod ListWorkloadIdentityPoolProviderKeysResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListWorkloadIdentityPoolProvidersResponse: Response message for @@ -1878,9 +1878,9 @@ type ListWorkloadIdentityPoolProvidersResponse struct { NullFields []string `json:"-"` } -func (s *ListWorkloadIdentityPoolProvidersResponse) MarshalJSON() ([]byte, error) { +func (s ListWorkloadIdentityPoolProvidersResponse) MarshalJSON() ([]byte, error) { type NoMethod ListWorkloadIdentityPoolProvidersResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListWorkloadIdentityPoolsResponse: Response message for @@ -1907,9 +1907,9 @@ type ListWorkloadIdentityPoolsResponse struct { NullFields []string `json:"-"` } -func (s *ListWorkloadIdentityPoolsResponse) MarshalJSON() ([]byte, error) { +func (s ListWorkloadIdentityPoolsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListWorkloadIdentityPoolsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OauthClient: Represents an OauthClient. Used to access Google Cloud @@ -1984,9 +1984,9 @@ type OauthClient struct { NullFields []string `json:"-"` } -func (s *OauthClient) MarshalJSON() ([]byte, error) { +func (s OauthClient) MarshalJSON() ([]byte, error) { type NoMethod OauthClient - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OauthClientCredential: Represents an OauthClientCredential. Used to @@ -2025,9 +2025,9 @@ type OauthClientCredential struct { NullFields []string `json:"-"` } -func (s *OauthClientCredential) MarshalJSON() ([]byte, error) { +func (s OauthClientCredential) MarshalJSON() ([]byte, error) { type NoMethod OauthClientCredential - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Oidc: Represents an OpenId Connect 1.0 identity provider. @@ -2066,9 +2066,9 @@ type Oidc struct { NullFields []string `json:"-"` } -func (s *Oidc) MarshalJSON() ([]byte, error) { +func (s Oidc) MarshalJSON() ([]byte, error) { type NoMethod Oidc - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -2113,9 +2113,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadata: Represents the metadata of the long-running operation. @@ -2151,34 +2151,9 @@ type OperationMetadata struct { NullFields []string `json:"-"` } -func (s *OperationMetadata) MarshalJSON() ([]byte, error) { +func (s OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// PatchServiceAccountKeyRequest: The service account key patch request. -type PatchServiceAccountKeyRequest struct { - // ServiceAccountKey: Required. The service account key to update. - ServiceAccountKey *ServiceAccountKey `json:"serviceAccountKey,omitempty"` - // UpdateMask: Required. The update mask to apply to the service account key. - // Only the following fields are eligible for patching: - contact - description - UpdateMask string `json:"updateMask,omitempty"` - // ForceSendFields is a list of field names (e.g. "ServiceAccountKey") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ServiceAccountKey") to include in - // API requests with the JSON null value. By default, fields with empty values - // are omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *PatchServiceAccountKeyRequest) MarshalJSON() ([]byte, error) { - type NoMethod PatchServiceAccountKeyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PatchServiceAccountRequest: The service account patch request. You can patch @@ -2202,9 +2177,9 @@ type PatchServiceAccountRequest struct { NullFields []string `json:"-"` } -func (s *PatchServiceAccountRequest) MarshalJSON() ([]byte, error) { +func (s PatchServiceAccountRequest) MarshalJSON() ([]byte, error) { type NoMethod PatchServiceAccountRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Permission: A permission which can be included by a role. @@ -2252,9 +2227,9 @@ type Permission struct { NullFields []string `json:"-"` } -func (s *Permission) MarshalJSON() ([]byte, error) { +func (s Permission) MarshalJSON() ([]byte, error) { type NoMethod Permission - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PermissionDelta: A PermissionDelta message to record the added_permissions @@ -2277,9 +2252,9 @@ type PermissionDelta struct { NullFields []string `json:"-"` } -func (s *PermissionDelta) MarshalJSON() ([]byte, error) { +func (s PermissionDelta) MarshalJSON() ([]byte, error) { type NoMethod PermissionDelta - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -2369,9 +2344,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PolicyDelta: The difference delta between two policies. @@ -2391,9 +2366,9 @@ type PolicyDelta struct { NullFields []string `json:"-"` } -func (s *PolicyDelta) MarshalJSON() ([]byte, error) { +func (s PolicyDelta) MarshalJSON() ([]byte, error) { type NoMethod PolicyDelta - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryAuditableServicesRequest: A request to get the list of auditable @@ -2417,9 +2392,9 @@ type QueryAuditableServicesRequest struct { NullFields []string `json:"-"` } -func (s *QueryAuditableServicesRequest) MarshalJSON() ([]byte, error) { +func (s QueryAuditableServicesRequest) MarshalJSON() ([]byte, error) { type NoMethod QueryAuditableServicesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryAuditableServicesResponse: A response containing a list of auditable @@ -2443,9 +2418,9 @@ type QueryAuditableServicesResponse struct { NullFields []string `json:"-"` } -func (s *QueryAuditableServicesResponse) MarshalJSON() ([]byte, error) { +func (s QueryAuditableServicesResponse) MarshalJSON() ([]byte, error) { type NoMethod QueryAuditableServicesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryGrantableRolesRequest: The grantable role query request. @@ -2456,7 +2431,7 @@ type QueryGrantableRolesRequest struct { // `//cloudresourcemanager.googleapis.com/projects/my-project`. FullResourceName string `json:"fullResourceName,omitempty"` // PageSize: Optional limit on the number of roles to include in the response. - // The default is 300, and the maximum is 1,000. + // The default is 300, and the maximum is 2,000. PageSize int64 `json:"pageSize,omitempty"` // PageToken: Optional pagination token returned in an earlier // QueryGrantableRolesResponse. @@ -2479,9 +2454,9 @@ type QueryGrantableRolesRequest struct { NullFields []string `json:"-"` } -func (s *QueryGrantableRolesRequest) MarshalJSON() ([]byte, error) { +func (s QueryGrantableRolesRequest) MarshalJSON() ([]byte, error) { type NoMethod QueryGrantableRolesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryGrantableRolesResponse: The grantable role query response. @@ -2507,9 +2482,9 @@ type QueryGrantableRolesResponse struct { NullFields []string `json:"-"` } -func (s *QueryGrantableRolesResponse) MarshalJSON() ([]byte, error) { +func (s QueryGrantableRolesResponse) MarshalJSON() ([]byte, error) { type NoMethod QueryGrantableRolesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryTestablePermissionsRequest: A request to get permissions which can be @@ -2539,9 +2514,9 @@ type QueryTestablePermissionsRequest struct { NullFields []string `json:"-"` } -func (s *QueryTestablePermissionsRequest) MarshalJSON() ([]byte, error) { +func (s QueryTestablePermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod QueryTestablePermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryTestablePermissionsResponse: The response containing permissions which @@ -2568,9 +2543,9 @@ type QueryTestablePermissionsResponse struct { NullFields []string `json:"-"` } -func (s *QueryTestablePermissionsResponse) MarshalJSON() ([]byte, error) { +func (s QueryTestablePermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod QueryTestablePermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReconciliationOperationMetadata: Operation metadata returned by the CLH @@ -2603,9 +2578,9 @@ type ReconciliationOperationMetadata struct { NullFields []string `json:"-"` } -func (s *ReconciliationOperationMetadata) MarshalJSON() ([]byte, error) { +func (s ReconciliationOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod ReconciliationOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Role: A role in the Identity and Access Management API. @@ -2662,9 +2637,9 @@ type Role struct { NullFields []string `json:"-"` } -func (s *Role) MarshalJSON() ([]byte, error) { +func (s Role) MarshalJSON() ([]byte, error) { type NoMethod Role - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Saml: Represents an SAML 2.0 identity provider. @@ -2677,7 +2652,7 @@ type Saml struct { // metadata XML document must satisfy the following constraints: * Must contain // an IdP Entity ID. * Must contain at least one non-expired signing // certificate. * For each signing certificate, the expiration must be: * From - // no more than 7 days in the future. * To no more than 15 years in the future. + // no more than 7 days in the future. * To no more than 20 years in the future. // * Up to three IdP signing keys are allowed. When updating the provider's // metadata XML, at least one non-expired signing key must overlap with the // existing metadata. This requirement is skipped if there are no non-expired @@ -2696,9 +2671,9 @@ type Saml struct { NullFields []string `json:"-"` } -func (s *Saml) MarshalJSON() ([]byte, error) { +func (s Saml) MarshalJSON() ([]byte, error) { type NoMethod Saml - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAccount: An IAM service account. A service account is an account for @@ -2762,9 +2737,9 @@ type ServiceAccount struct { NullFields []string `json:"-"` } -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { +func (s ServiceAccount) MarshalJSON() ([]byte, error) { type NoMethod ServiceAccount - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAccountKey: Represents a service account key. A service account has @@ -2784,16 +2759,6 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { // Public keys for all service accounts are also published at the OAuth2 // Service Account API. type ServiceAccountKey struct { - // Contact: Optional. A user provided email address as the point of contact for - // this service account key. Must be an email address. Limit 64 characters. - Contact string `json:"contact,omitempty"` - // Creator: Output only. The cloud identity that created this service account - // key. Populated automatically when the key is created and not editable by the - // user. - Creator string `json:"creator,omitempty"` - // Description: Optional. A user provided description of this service account - // key. - Description string `json:"description,omitempty"` // DisableReason: Output only. optional. If the key is disabled, it may have a // DisableReason describing why it was disabled. // @@ -2870,22 +2835,22 @@ type ServiceAccountKey struct { // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Contact") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See + // ForceSendFields is a list of field names (e.g. "DisableReason") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Contact") to include in API + // NullFields is a list of field names (e.g. "DisableReason") to include in API // requests with the JSON null value. By default, fields with empty values are // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ServiceAccountKey) MarshalJSON() ([]byte, error) { +func (s ServiceAccountKey) MarshalJSON() ([]byte, error) { type NoMethod ServiceAccountKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceConfig: Configuration for a service. @@ -2905,9 +2870,9 @@ type ServiceConfig struct { NullFields []string `json:"-"` } -func (s *ServiceConfig) MarshalJSON() ([]byte, error) { +func (s ServiceConfig) MarshalJSON() ([]byte, error) { type NoMethod ServiceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -2934,9 +2899,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SignBlobRequest: Deprecated. Migrate to Service Account Credentials API @@ -2960,9 +2925,9 @@ type SignBlobRequest struct { NullFields []string `json:"-"` } -func (s *SignBlobRequest) MarshalJSON() ([]byte, error) { +func (s SignBlobRequest) MarshalJSON() ([]byte, error) { type NoMethod SignBlobRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SignBlobResponse: Deprecated. Migrate to Service Account Credentials API @@ -2993,9 +2958,9 @@ type SignBlobResponse struct { NullFields []string `json:"-"` } -func (s *SignBlobResponse) MarshalJSON() ([]byte, error) { +func (s SignBlobResponse) MarshalJSON() ([]byte, error) { type NoMethod SignBlobResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SignJwtRequest: Deprecated. Migrate to Service Account Credentials API @@ -3025,9 +2990,9 @@ type SignJwtRequest struct { NullFields []string `json:"-"` } -func (s *SignJwtRequest) MarshalJSON() ([]byte, error) { +func (s SignJwtRequest) MarshalJSON() ([]byte, error) { type NoMethod SignJwtRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SignJwtResponse: Deprecated. Migrate to Service Account Credentials API @@ -3057,9 +3022,9 @@ type SignJwtResponse struct { NullFields []string `json:"-"` } -func (s *SignJwtResponse) MarshalJSON() ([]byte, error) { +func (s SignJwtResponse) MarshalJSON() ([]byte, error) { type NoMethod SignJwtResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -3091,9 +3056,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` method. @@ -3116,9 +3081,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -3143,9 +3108,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UndeleteOauthClientRequest: Request message for UndeleteOauthClient. @@ -3169,9 +3134,9 @@ type UndeleteRoleRequest struct { NullFields []string `json:"-"` } -func (s *UndeleteRoleRequest) MarshalJSON() ([]byte, error) { +func (s UndeleteRoleRequest) MarshalJSON() ([]byte, error) { type NoMethod UndeleteRoleRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UndeleteServiceAccountRequest: The service account undelete request. @@ -3197,9 +3162,9 @@ type UndeleteServiceAccountResponse struct { NullFields []string `json:"-"` } -func (s *UndeleteServiceAccountResponse) MarshalJSON() ([]byte, error) { +func (s UndeleteServiceAccountResponse) MarshalJSON() ([]byte, error) { type NoMethod UndeleteServiceAccountResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UndeleteWorkforcePoolProviderKeyRequest: Request message for @@ -3256,9 +3221,9 @@ type UploadServiceAccountKeyRequest struct { NullFields []string `json:"-"` } -func (s *UploadServiceAccountKeyRequest) MarshalJSON() ([]byte, error) { +func (s UploadServiceAccountKeyRequest) MarshalJSON() ([]byte, error) { type NoMethod UploadServiceAccountKeyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkforcePool: Represents a collection of external workforces. Provides @@ -3324,9 +3289,9 @@ type WorkforcePool struct { NullFields []string `json:"-"` } -func (s *WorkforcePool) MarshalJSON() ([]byte, error) { +func (s WorkforcePool) MarshalJSON() ([]byte, error) { type NoMethod WorkforcePool - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkforcePoolProvider: A configuration for an external identity provider. @@ -3443,9 +3408,9 @@ type WorkforcePoolProvider struct { NullFields []string `json:"-"` } -func (s *WorkforcePoolProvider) MarshalJSON() ([]byte, error) { +func (s WorkforcePoolProvider) MarshalJSON() ([]byte, error) { type NoMethod WorkforcePoolProvider - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkforcePoolProviderKey: Represents a public key configuration for a @@ -3492,9 +3457,9 @@ type WorkforcePoolProviderKey struct { NullFields []string `json:"-"` } -func (s *WorkforcePoolProviderKey) MarshalJSON() ([]byte, error) { +func (s WorkforcePoolProviderKey) MarshalJSON() ([]byte, error) { type NoMethod WorkforcePoolProviderKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkloadIdentityPool: Represents a collection of workload identities. You @@ -3542,9 +3507,9 @@ type WorkloadIdentityPool struct { NullFields []string `json:"-"` } -func (s *WorkloadIdentityPool) MarshalJSON() ([]byte, error) { +func (s WorkloadIdentityPool) MarshalJSON() ([]byte, error) { type NoMethod WorkloadIdentityPool - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkloadIdentityPoolOperationMetadata: Metadata for long-running @@ -3656,9 +3621,9 @@ type WorkloadIdentityPoolProvider struct { NullFields []string `json:"-"` } -func (s *WorkloadIdentityPoolProvider) MarshalJSON() ([]byte, error) { +func (s WorkloadIdentityPoolProvider) MarshalJSON() ([]byte, error) { type NoMethod WorkloadIdentityPoolProvider - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WorkloadIdentityPoolProviderKey: Represents a public key configuration for @@ -3706,9 +3671,9 @@ type WorkloadIdentityPoolProviderKey struct { NullFields []string `json:"-"` } -func (s *WorkloadIdentityPoolProviderKey) MarshalJSON() ([]byte, error) { +func (s WorkloadIdentityPoolProviderKey) MarshalJSON() ([]byte, error) { type NoMethod WorkloadIdentityPoolProviderKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type IamPoliciesLintPolicyCall struct { @@ -6967,10 +6932,13 @@ type OrganizationsRolesDeleteCall struct { // changes occur immediately: * You cannot bind a principal to the custom role // in an IAM Policy. * Existing bindings to the custom role are not changed, // but they have no effect. * By default, the response from ListRoles does not -// include the custom role. You have 7 days to undelete the custom role. After -// 7 days, the following changes occur: * The custom role is permanently -// deleted and cannot be recovered. * If an IAM policy contains a binding to -// the custom role, the binding is permanently removed. +// include the custom role. A deleted custom role still counts toward the +// custom role limit (https://cloud.google.com/iam/help/limits) until it is +// permanently deleted. You have 7 days to undelete the custom role. After 7 +// days, the following changes occur: * The custom role is permanently deleted +// and cannot be recovered. * If an IAM policy contains a binding to the custom +// role, the binding is permanently removed. * The custom role no longer counts +// toward your custom role limit. // // - name: The `name` parameter's value depends on the target resource for the // request, namely projects @@ -11739,10 +11707,13 @@ type ProjectsRolesDeleteCall struct { // changes occur immediately: * You cannot bind a principal to the custom role // in an IAM Policy. * Existing bindings to the custom role are not changed, // but they have no effect. * By default, the response from ListRoles does not -// include the custom role. You have 7 days to undelete the custom role. After -// 7 days, the following changes occur: * The custom role is permanently -// deleted and cannot be recovered. * If an IAM policy contains a binding to -// the custom role, the binding is permanently removed. +// include the custom role. A deleted custom role still counts toward the +// custom role limit (https://cloud.google.com/iam/help/limits) until it is +// permanently deleted. You have 7 days to undelete the custom role. After 7 +// days, the following changes occur: * The custom role is permanently deleted +// and cannot be recovered. * If an IAM policy contains a binding to the custom +// role, the binding is permanently removed. * The custom role no longer counts +// toward your custom role limit. // // - name: The `name` parameter's value depends on the target resource for the // request, namely projects @@ -14793,109 +14764,6 @@ func (c *ProjectsServiceAccountsKeysListCall) Do(opts ...googleapi.CallOption) ( return ret, nil } -type ProjectsServiceAccountsKeysPatchCall struct { - s *Service - name string - patchserviceaccountkeyrequest *PatchServiceAccountKeyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches a ServiceAccountKey. -// -// - name: The resource name of the service account key in the following format -// `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. -func (r *ProjectsServiceAccountsKeysService) Patch(name string, patchserviceaccountkeyrequest *PatchServiceAccountKeyRequest) *ProjectsServiceAccountsKeysPatchCall { - c := &ProjectsServiceAccountsKeysPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.patchserviceaccountkeyrequest = patchserviceaccountkeyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more -// details. -func (c *ProjectsServiceAccountsKeysPatchCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsKeysPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. -func (c *ProjectsServiceAccountsKeysPatchCall) Context(ctx context.Context) *ProjectsServiceAccountsKeysPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns a http.Header that can be modified by the caller to add -// headers to the request. -func (c *ProjectsServiceAccountsKeysPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsServiceAccountsKeysPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.patchserviceaccountkeyrequest) - if err != nil { - return nil, err - } - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:patch") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "iam.projects.serviceAccounts.keys.patch" call. -// Any non-2xx status code is an error. Response headers are in either -// *ServiceAccountKey.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsServiceAccountsKeysPatchCall) Do(opts ...googleapi.CallOption) (*ServiceAccountKey, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ServiceAccountKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil -} - type ProjectsServiceAccountsKeysUploadCall struct { s *Service name string diff --git a/terraform/providers/google/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/terraform/providers/google/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 70b57dea6a4..f0c69484583 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -217,9 +217,9 @@ type GenerateAccessTokenRequest struct { NullFields []string `json:"-"` } -func (s *GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { +func (s GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateAccessTokenRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateAccessTokenResponse struct { @@ -243,9 +243,9 @@ type GenerateAccessTokenResponse struct { NullFields []string `json:"-"` } -func (s *GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { +func (s GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateAccessTokenResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateIdTokenRequest struct { @@ -277,9 +277,9 @@ type GenerateIdTokenRequest struct { NullFields []string `json:"-"` } -func (s *GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { +func (s GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateIdTokenRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateIdTokenResponse struct { @@ -301,9 +301,9 @@ type GenerateIdTokenResponse struct { NullFields []string `json:"-"` } -func (s *GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { +func (s GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateIdTokenResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignBlobRequest struct { @@ -331,9 +331,9 @@ type SignBlobRequest struct { NullFields []string `json:"-"` } -func (s *SignBlobRequest) MarshalJSON() ([]byte, error) { +func (s SignBlobRequest) MarshalJSON() ([]byte, error) { type NoMethod SignBlobRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignBlobResponse struct { @@ -368,9 +368,9 @@ type SignBlobResponse struct { NullFields []string `json:"-"` } -func (s *SignBlobResponse) MarshalJSON() ([]byte, error) { +func (s SignBlobResponse) MarshalJSON() ([]byte, error) { type NoMethod SignBlobResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignJwtRequest struct { @@ -402,9 +402,9 @@ type SignJwtRequest struct { NullFields []string `json:"-"` } -func (s *SignJwtRequest) MarshalJSON() ([]byte, error) { +func (s SignJwtRequest) MarshalJSON() ([]byte, error) { type NoMethod SignJwtRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignJwtResponse struct { @@ -441,9 +441,9 @@ type SignJwtResponse struct { NullFields []string `json:"-"` } -func (s *SignJwtResponse) MarshalJSON() ([]byte, error) { +func (s SignJwtResponse) MarshalJSON() ([]byte, error) { type NoMethod SignJwtResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsServiceAccountsGenerateAccessTokenCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/creds.go b/terraform/providers/google/vendor/google.golang.org/api/internal/creds.go index e6c4fe90d42..4ebeb61c1a2 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/creds.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/creds.go @@ -100,18 +100,12 @@ func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credenti aud = settings.DefaultAudience } - tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, settings) - if err != nil { - return nil, err - } creds, err := credentials.DetectDefault(&credentials.DetectOptions{ Scopes: scopes, Audience: aud, CredentialsFile: settings.CredentialsFile, CredentialsJSON: settings.CredentialsJSON, UseSelfSignedJWT: useSelfSignedJWT, - TokenURL: tokenURL, - Client: oauth2Client, }) if err != nil { return nil, err @@ -127,7 +121,7 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro if ds.Credentials != nil { return ds.Credentials, nil } - if ds.CredentialsJSON != nil { + if len(ds.CredentialsJSON) > 0 { return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { @@ -302,14 +296,3 @@ func baseTransport() *http.Transport { ExpectContinueTimeout: 1 * time.Second, } } - -// ErrUniverseNotMatch composes an error string from the provided universe -// domain sources (DialSettings and Credentials, respectively). -func ErrUniverseNotMatch(settingsUD, credsUD string) error { - return fmt.Errorf( - "the configured universe domain (%q) does not match the universe "+ - "domain found in the credentials (%q). If you haven't configured "+ - "WithUniverseDomain explicitly, \"googleapis.com\" is the default", - settingsUD, - credsUD) -} diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/resumable.go b/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/resumable.go index 08e7aacefb6..f828ddb60e6 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -171,6 +171,10 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err if resp != nil && resp.Body != nil { resp.Body.Close() } + // If there were retries, indicate this in the error message and wrap the final error. + if rx.attempts > 1 { + return nil, fmt.Errorf("chunk upload failed after %d attempts;, final error: %w", rx.attempts, err) + } return nil, err } // This case is very unlikely but possible only if rx.ChunkRetryDeadline is diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/settings.go b/terraform/providers/google/vendor/google.golang.org/api/internal/settings.go index 6d0c18e5a87..32949cccbd1 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/settings.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/settings.go @@ -126,7 +126,7 @@ func (ds *DialSettings) Validate() error { if ds.Credentials != nil { nCreds++ } - if ds.CredentialsJSON != nil { + if len(ds.CredentialsJSON) > 0 { nCreds++ } if ds.CredentialsFile != "" { @@ -204,8 +204,7 @@ func (ds *DialSettings) IsUniverseDomainGDU() bool { } // GetUniverseDomain returns the default service domain for a given Cloud -// universe, from google.Credentials, for comparison with the value returned by -// (*DialSettings).GetUniverseDomain. This wrapper function should be removed +// universe, from google.Credentials. This wrapper function should be removed // to close https://github.com/googleapis/google-api-go-client/issues/2399. func GetUniverseDomain(creds *google.Credentials) (string, error) { timer := time.NewTimer(time.Second) diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/version.go b/terraform/providers/google/vendor/google.golang.org/api/internal/version.go index 720cba16a44..3bfd292b39a 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/version.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.185.0" +const Version = "0.201.0" diff --git a/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-api.json b/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-api.json index 4ef28e6247f..487b58fbc47 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-api.json @@ -1226,6 +1226,11 @@ "parent" ], "parameters": { + "filter": { + "description": "Optional. Specifies the type (\"Logging\" or \"OpsAnalytics\") of the recent queries to list. The only valid value for this field is one of the two allowable type function calls, which are the following: type(\"Logging\") type(\"OpsAnalytics\")", + "location": "query", + "type": "string" + }, "pageSize": { "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", "format": "int32", @@ -1320,6 +1325,34 @@ "https://www.googleapis.com/auth/logging.admin" ] }, + "get": { + "description": "Returns all data associated with the requested query.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "GET", + "id": "logging.billingAccounts.locations.savedQueries.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the saved query. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For example: \"projects/my-project/locations/global/savedQueries/my-saved-query\" ", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/savedQueries/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "SavedQuery" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, "list": { "description": "Lists the SavedQueries that were created by the user making the request.", "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/savedQueries", @@ -1329,6 +1362,11 @@ "parent" ], "parameters": { + "filter": { + "description": "Optional. Specifies the type (\"Logging\" or \"OpsAnalytics\") and the visibility (PRIVATE or SHARED) of the saved queries to list. If provided, the filter must contain either the type function or a visibility token, or both. If both are chosen, they can be placed in any order, but they must be joined by the AND operator or the empty character.The two supported type function calls are: type(\"Logging\") type(\"OpsAnalytics\")The two supported visibility tokens are: visibility = PRIVATE visibility = SHAREDFor example:type(\"Logging\") AND visibility = PRIVATE visibility=SHARED type(\"OpsAnalytics\") type(\"OpsAnalytics)\" visibility = PRIVATE visibility = SHARED", + "location": "query", + "type": "string" + }, "pageSize": { "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", "format": "int32", @@ -1358,6 +1396,41 @@ "https://www.googleapis.com/auth/logging.admin", "https://www.googleapis.com/auth/logging.read" ] + }, + "patch": { + "description": "Updates an existing SavedQuery.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "PATCH", + "id": "logging.billingAccounts.locations.savedQueries.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. Resource name of the saved query.In the format: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For a list of supported locations, see Supported Regions (https://cloud.google.com/logging/docs/region-support#bucket-regions)After the saved query is created, the location cannot be changed.If the user doesn't provide a QUERY_ID, the system will generate an alphanumeric ID.", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/savedQueries/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. A non-empty list of fields to change in the existing saved query. Fields are relative to the saved_query and new values for the fields are taken from the corresponding fields in the SavedQuery included in this request. Fields not mentioned in update_mask are not changed and are ignored in the request.To update all mutable fields, specify an update_mask of *.For example, to change the description and query filter text of a saved query, specify an update_mask of \"description, query.filter\".", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "SavedQuery" + }, + "response": { + "$ref": "SavedQuery" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] } } } @@ -1450,7 +1523,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -1587,7 +1660,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -1632,7 +1705,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -1985,7 +2058,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"", + "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -2954,6 +3027,172 @@ } } }, + "logScopes": { + "methods": { + "create": { + "description": "Creates a log scope.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/logScopes", + "httpMethod": "POST", + "id": "logging.folders.locations.logScopes.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "logScopeId": { + "description": "Required. A client-assigned identifier such as \"log-scope\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent project in which to create the log scope \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" For example:\"projects/my-project/locations/global\"", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/logScopes", + "request": { + "$ref": "LogScope" + }, + "response": { + "$ref": "LogScope" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a log scope.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/logScopes/{logScopesId}", + "httpMethod": "DELETE", + "id": "logging.folders.locations.logScopes.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the log scope to delete: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]\" For example:\"projects/my-project/locations/global/logScopes/my-log-scope\"", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/logScopes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets a log scope.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/logScopes/{logScopesId}", + "httpMethod": "GET", + "id": "logging.folders.locations.logScopes.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the log scope: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]\" For example:\"projects/my-project/locations/global/logScopes/my-log-scope\"", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/logScopes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "LogScope" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists log scopes.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/logScopes", + "httpMethod": "GET", + "id": "logging.folders.locations.logScopes.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource whose log scopes are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" ", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/logScopes", + "response": { + "$ref": "ListLogScopesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "patch": { + "description": "Updates a log scope.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/logScopes/{logScopesId}", + "httpMethod": "PATCH", + "id": "logging.folders.locations.logScopes.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. The resource name of the log scope.For example:projects/my-project/locations/global/logScopes/my-log-scope", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/logScopes/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Optional. Field mask that specifies the fields in log_scope that need an update. A field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor example: updateMask=description", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "LogScope" + }, + "response": { + "$ref": "LogScope" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + } + } + }, "operations": { "methods": { "cancel": { @@ -3070,6 +3309,11 @@ "parent" ], "parameters": { + "filter": { + "description": "Optional. Specifies the type (\"Logging\" or \"OpsAnalytics\") of the recent queries to list. The only valid value for this field is one of the two allowable type function calls, which are the following: type(\"Logging\") type(\"OpsAnalytics\")", + "location": "query", + "type": "string" + }, "pageSize": { "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", "format": "int32", @@ -3164,6 +3408,34 @@ "https://www.googleapis.com/auth/logging.admin" ] }, + "get": { + "description": "Returns all data associated with the requested query.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "GET", + "id": "logging.folders.locations.savedQueries.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the saved query. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For example: \"projects/my-project/locations/global/savedQueries/my-saved-query\" ", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/savedQueries/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "SavedQuery" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, "list": { "description": "Lists the SavedQueries that were created by the user making the request.", "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/savedQueries", @@ -3173,6 +3445,11 @@ "parent" ], "parameters": { + "filter": { + "description": "Optional. Specifies the type (\"Logging\" or \"OpsAnalytics\") and the visibility (PRIVATE or SHARED) of the saved queries to list. If provided, the filter must contain either the type function or a visibility token, or both. If both are chosen, they can be placed in any order, but they must be joined by the AND operator or the empty character.The two supported type function calls are: type(\"Logging\") type(\"OpsAnalytics\")The two supported visibility tokens are: visibility = PRIVATE visibility = SHAREDFor example:type(\"Logging\") AND visibility = PRIVATE visibility=SHARED type(\"OpsAnalytics\") type(\"OpsAnalytics)\" visibility = PRIVATE visibility = SHARED", + "location": "query", + "type": "string" + }, "pageSize": { "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", "format": "int32", @@ -3202,6 +3479,41 @@ "https://www.googleapis.com/auth/logging.admin", "https://www.googleapis.com/auth/logging.read" ] + }, + "patch": { + "description": "Updates an existing SavedQuery.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "PATCH", + "id": "logging.folders.locations.savedQueries.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. Resource name of the saved query.In the format: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For a list of supported locations, see Supported Regions (https://cloud.google.com/logging/docs/region-support#bucket-regions)After the saved query is created, the location cannot be changed.If the user doesn't provide a QUERY_ID, the system will generate an alphanumeric ID.", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/savedQueries/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. A non-empty list of fields to change in the existing saved query. Fields are relative to the saved_query and new values for the fields are taken from the corresponding fields in the SavedQuery included in this request. Fields not mentioned in update_mask are not changed and are ignored in the request.To update all mutable fields, specify an update_mask of *.For example, to change the description and query filter text of a saved query, specify an update_mask of \"description, query.filter\".", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "SavedQuery" + }, + "response": { + "$ref": "SavedQuery" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] } } } @@ -3294,7 +3606,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -3431,7 +3743,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -3476,7 +3788,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -4564,7 +4876,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"", + "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -5533,6 +5845,172 @@ } } }, + "logScopes": { + "methods": { + "create": { + "description": "Creates a log scope.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/logScopes", + "httpMethod": "POST", + "id": "logging.organizations.locations.logScopes.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "logScopeId": { + "description": "Required. A client-assigned identifier such as \"log-scope\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent project in which to create the log scope \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" For example:\"projects/my-project/locations/global\"", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/logScopes", + "request": { + "$ref": "LogScope" + }, + "response": { + "$ref": "LogScope" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a log scope.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/logScopes/{logScopesId}", + "httpMethod": "DELETE", + "id": "logging.organizations.locations.logScopes.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the log scope to delete: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]\" For example:\"projects/my-project/locations/global/logScopes/my-log-scope\"", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/logScopes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets a log scope.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/logScopes/{logScopesId}", + "httpMethod": "GET", + "id": "logging.organizations.locations.logScopes.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the log scope: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]\" For example:\"projects/my-project/locations/global/logScopes/my-log-scope\"", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/logScopes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "LogScope" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists log scopes.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/logScopes", + "httpMethod": "GET", + "id": "logging.organizations.locations.logScopes.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource whose log scopes are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" ", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/logScopes", + "response": { + "$ref": "ListLogScopesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "patch": { + "description": "Updates a log scope.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/logScopes/{logScopesId}", + "httpMethod": "PATCH", + "id": "logging.organizations.locations.logScopes.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. The resource name of the log scope.For example:projects/my-project/locations/global/logScopes/my-log-scope", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/logScopes/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Optional. Field mask that specifies the fields in log_scope that need an update. A field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor example: updateMask=description", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "LogScope" + }, + "response": { + "$ref": "LogScope" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + } + } + }, "operations": { "methods": { "cancel": { @@ -5649,6 +6127,11 @@ "parent" ], "parameters": { + "filter": { + "description": "Optional. Specifies the type (\"Logging\" or \"OpsAnalytics\") of the recent queries to list. The only valid value for this field is one of the two allowable type function calls, which are the following: type(\"Logging\") type(\"OpsAnalytics\")", + "location": "query", + "type": "string" + }, "pageSize": { "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", "format": "int32", @@ -5743,6 +6226,34 @@ "https://www.googleapis.com/auth/logging.admin" ] }, + "get": { + "description": "Returns all data associated with the requested query.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "GET", + "id": "logging.organizations.locations.savedQueries.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the saved query. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For example: \"projects/my-project/locations/global/savedQueries/my-saved-query\" ", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/savedQueries/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "SavedQuery" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, "list": { "description": "Lists the SavedQueries that were created by the user making the request.", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/savedQueries", @@ -5752,6 +6263,11 @@ "parent" ], "parameters": { + "filter": { + "description": "Optional. Specifies the type (\"Logging\" or \"OpsAnalytics\") and the visibility (PRIVATE or SHARED) of the saved queries to list. If provided, the filter must contain either the type function or a visibility token, or both. If both are chosen, they can be placed in any order, but they must be joined by the AND operator or the empty character.The two supported type function calls are: type(\"Logging\") type(\"OpsAnalytics\")The two supported visibility tokens are: visibility = PRIVATE visibility = SHAREDFor example:type(\"Logging\") AND visibility = PRIVATE visibility=SHARED type(\"OpsAnalytics\") type(\"OpsAnalytics)\" visibility = PRIVATE visibility = SHARED", + "location": "query", + "type": "string" + }, "pageSize": { "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", "format": "int32", @@ -5771,15 +6287,50 @@ "type": "string" } }, - "path": "v2/{+parent}/savedQueries", + "path": "v2/{+parent}/savedQueries", + "response": { + "$ref": "ListSavedQueriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "patch": { + "description": "Updates an existing SavedQuery.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "PATCH", + "id": "logging.organizations.locations.savedQueries.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. Resource name of the saved query.In the format: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For a list of supported locations, see Supported Regions (https://cloud.google.com/logging/docs/region-support#bucket-regions)After the saved query is created, the location cannot be changed.If the user doesn't provide a QUERY_ID, the system will generate an alphanumeric ID.", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/savedQueries/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. A non-empty list of fields to change in the existing saved query. Fields are relative to the saved_query and new values for the fields are taken from the corresponding fields in the SavedQuery included in this request. Fields not mentioned in update_mask are not changed and are ignored in the request.To update all mutable fields, specify an update_mask of *.For example, to change the description and query filter text of a saved query, specify an update_mask of \"description, query.filter\".", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "SavedQuery" + }, "response": { - "$ref": "ListSavedQueriesResponse" + "$ref": "SavedQuery" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" + "https://www.googleapis.com/auth/logging.admin" ] } } @@ -5873,7 +6424,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -6010,7 +6561,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -6055,7 +6606,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -7096,6 +7647,172 @@ } } }, + "logScopes": { + "methods": { + "create": { + "description": "Creates a log scope.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/logScopes", + "httpMethod": "POST", + "id": "logging.projects.locations.logScopes.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "logScopeId": { + "description": "Required. A client-assigned identifier such as \"log-scope\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent project in which to create the log scope \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" For example:\"projects/my-project/locations/global\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/logScopes", + "request": { + "$ref": "LogScope" + }, + "response": { + "$ref": "LogScope" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a log scope.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/logScopes/{logScopesId}", + "httpMethod": "DELETE", + "id": "logging.projects.locations.logScopes.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the log scope to delete: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]\" For example:\"projects/my-project/locations/global/logScopes/my-log-scope\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/logScopes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets a log scope.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/logScopes/{logScopesId}", + "httpMethod": "GET", + "id": "logging.projects.locations.logScopes.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the log scope: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]\" For example:\"projects/my-project/locations/global/logScopes/my-log-scope\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/logScopes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "LogScope" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists log scopes.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/logScopes", + "httpMethod": "GET", + "id": "logging.projects.locations.logScopes.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource whose log scopes are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" ", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/logScopes", + "response": { + "$ref": "ListLogScopesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "patch": { + "description": "Updates a log scope.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/logScopes/{logScopesId}", + "httpMethod": "PATCH", + "id": "logging.projects.locations.logScopes.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. The resource name of the log scope.For example:projects/my-project/locations/global/logScopes/my-log-scope", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/logScopes/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Optional. Field mask that specifies the fields in log_scope that need an update. A field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor example: updateMask=description", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "LogScope" + }, + "response": { + "$ref": "LogScope" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + } + } + }, "operations": { "methods": { "cancel": { @@ -7212,6 +7929,11 @@ "parent" ], "parameters": { + "filter": { + "description": "Optional. Specifies the type (\"Logging\" or \"OpsAnalytics\") of the recent queries to list. The only valid value for this field is one of the two allowable type function calls, which are the following: type(\"Logging\") type(\"OpsAnalytics\")", + "location": "query", + "type": "string" + }, "pageSize": { "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", "format": "int32", @@ -7306,6 +8028,34 @@ "https://www.googleapis.com/auth/logging.admin" ] }, + "get": { + "description": "Returns all data associated with the requested query.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "GET", + "id": "logging.projects.locations.savedQueries.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the saved query. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For example: \"projects/my-project/locations/global/savedQueries/my-saved-query\" ", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/savedQueries/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "SavedQuery" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, "list": { "description": "Lists the SavedQueries that were created by the user making the request.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/savedQueries", @@ -7315,6 +8065,11 @@ "parent" ], "parameters": { + "filter": { + "description": "Optional. Specifies the type (\"Logging\" or \"OpsAnalytics\") and the visibility (PRIVATE or SHARED) of the saved queries to list. If provided, the filter must contain either the type function or a visibility token, or both. If both are chosen, they can be placed in any order, but they must be joined by the AND operator or the empty character.The two supported type function calls are: type(\"Logging\") type(\"OpsAnalytics\")The two supported visibility tokens are: visibility = PRIVATE visibility = SHAREDFor example:type(\"Logging\") AND visibility = PRIVATE visibility=SHARED type(\"OpsAnalytics\") type(\"OpsAnalytics)\" visibility = PRIVATE visibility = SHARED", + "location": "query", + "type": "string" + }, "pageSize": { "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", "format": "int32", @@ -7344,6 +8099,41 @@ "https://www.googleapis.com/auth/logging.admin", "https://www.googleapis.com/auth/logging.read" ] + }, + "patch": { + "description": "Updates an existing SavedQuery.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "PATCH", + "id": "logging.projects.locations.savedQueries.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. Resource name of the saved query.In the format: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For a list of supported locations, see Supported Regions (https://cloud.google.com/logging/docs/region-support#bucket-regions)After the saved query is created, the location cannot be changed.If the user doesn't provide a QUERY_ID, the system will generate an alphanumeric ID.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/savedQueries/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. A non-empty list of fields to change in the existing saved query. Fields are relative to the saved_query and new values for the fields are taken from the corresponding fields in the SavedQuery included in this request. Fields not mentioned in update_mask are not changed and are ignored in the request.To update all mutable fields, specify an update_mask of *.For example, to change the description and query filter text of a saved query, specify an update_mask of \"description, query.filter\".", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "SavedQuery" + }, + "response": { + "$ref": "SavedQuery" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] } } } @@ -7594,7 +8384,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -7731,7 +8521,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -7776,7 +8566,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -7827,7 +8617,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -7964,7 +8754,7 @@ ], "parameters": { "customWriterIdentity": { - "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "description": "Optional. The service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified when you are routing logs to a log bucket that is in a different project than the sink. When not specified, a Logging service account will automatically be generated.", "location": "query", "type": "string" }, @@ -8104,7 +8894,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"", + "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -8132,7 +8922,7 @@ } } }, - "revision": "20240503", + "revision": "20240913", "rootUrl": "https://logging.googleapis.com/", "schemas": { "AuditConfig": { @@ -8629,12 +9419,12 @@ "type": "boolean" }, "latency": { - "description": "The request processing latency on the server, from the time the request was received until the response was sent.", + "description": "The request processing latency on the server, from the time the request was received until the response was sent. For WebSocket connections, this field refers to the entire time duration of the connection.", "format": "google-duration", "type": "string" }, "protocol": { - "description": "Protocol used for the request. Examples: \"HTTP/1.1\", \"HTTP/2\", \"websocket\"", + "description": "Protocol used for the request. Examples: \"HTTP/1.1\", \"HTTP/2\"", "type": "string" }, "referer": { @@ -8690,7 +9480,7 @@ "type": "string" }, "fieldPath": { - "description": "Required. The LogEntry field path to index.Note that some paths are automatically indexed, and other paths are not eligible for indexing. See indexing documentation( https://cloud.google.com/logging/docs/view/advanced-queries#indexed-fields) for details.For example: jsonPayload.request.status", + "description": "Required. The LogEntry field path to index.Note that some paths are automatically indexed, and other paths are not eligible for indexing. See indexing documentation( https://cloud.google.com/logging/docs/analyze/custom-index) for details.For example: jsonPayload.request.status", "type": "string" }, "type": { @@ -8960,7 +9750,7 @@ "type": "array" }, "resourceNames": { - "description": "Required. Names of one or more parent resources from which to retrieve log entries: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]May alternatively be one or more views: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]Projects listed in the project_ids field are added to this list. A maximum of 100 resources may be specified in a single request.", + "description": "Required. Names of one or more parent resources from which to retrieve log entries. Resources may either be resource containers or specific LogViews. For the case of resource containers, all logs ingested into that container will be returned regardless of which LogBuckets they are actually stored in - i.e. these queries may fan out to multiple regions. In the event of region unavailability, specify a specific set of LogViews that do not include the unavailable region. projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID] projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]Projects listed in the project_ids field are added to this list. A maximum of 100 resources may be specified in a single request.", "items": { "type": "string" }, @@ -9005,6 +9795,24 @@ }, "type": "object" }, + "ListLogScopesResponse": { + "description": "The response from ListLogScopes. Every project has a _Default log scope that cannot be modified or deleted.", + "id": "ListLogScopesResponse", + "properties": { + "logScopes": { + "description": "A list of log scopes.", + "items": { + "$ref": "LogScope" + }, + "type": "array" + }, + "nextPageToken": { + "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call the same method again using the value of nextPageToken as pageToken.", + "type": "string" + } + }, + "type": "object" + }, "ListLogsResponse": { "description": "Result returned from ListLogs.", "id": "ListLogsResponse", @@ -9195,7 +10003,7 @@ "id": "LogBucket", "properties": { "analyticsEnabled": { - "description": "Whether log analytics is enabled for this bucket.Once enabled, log analytics features cannot be disabled.", + "description": "Optional. Whether log analytics is enabled for this bucket.Once enabled, log analytics features cannot be disabled.", "type": "boolean" }, "cmekSettings": { @@ -9607,6 +10415,41 @@ }, "type": "object" }, + "LogScope": { + "description": "Describes a group of resources to read log entries from.", + "id": "LogScope", + "properties": { + "createTime": { + "description": "Output only. The creation timestamp of the log scope.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Optional. Describes this log scope.The maximum length of the description is 8000 characters.", + "type": "string" + }, + "name": { + "description": "Output only. The resource name of the log scope.For example:projects/my-project/locations/global/logScopes/my-log-scope", + "readOnly": true, + "type": "string" + }, + "resourceNames": { + "description": "Required. Names of one or more parent resources: projects/[PROJECT_ID]May alternatively be one or more views: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]A log scope can include a maximum of 50 projects and a maximum of 100 resources in total.", + "items": { + "type": "string" + }, + "type": "array" + }, + "updateTime": { + "description": "Output only. The last update timestamp of the log scope.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "LogSink": { "description": "Describes a sink used to export log entries to one of the following destinations: a Cloud Logging log bucket, a Cloud Storage bucket, a BigQuery dataset, a Pub/Sub topic, a Cloud project.A logs filter controls which log entries are exported. The sink must be created within a project, organization, billing account, or folder.", "id": "LogSink", @@ -9917,6 +10760,25 @@ "description": "The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period.", "format": "google-duration", "type": "string" + }, + "timeSeriesResourceHierarchyLevel": { + "description": "The scope of the timeseries data of the metric.", + "items": { + "enum": [ + "TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED", + "PROJECT", + "ORGANIZATION", + "FOLDER" + ], + "enumDescriptions": [ + "Do not use this default value.", + "Scopes a metric to a project.", + "Scopes a metric to an organization.", + "Scopes a metric to a folder." + ], + "type": "string" + }, + "type": "array" } }, "type": "object" diff --git a/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-gen.go b/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-gen.go index 0acca196a04..b82d1b829f9 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-gen.go @@ -400,6 +400,7 @@ type FoldersExclusionsService struct { func NewFoldersLocationsService(s *Service) *FoldersLocationsService { rs := &FoldersLocationsService{s: s} rs.Buckets = NewFoldersLocationsBucketsService(s) + rs.LogScopes = NewFoldersLocationsLogScopesService(s) rs.Operations = NewFoldersLocationsOperationsService(s) rs.RecentQueries = NewFoldersLocationsRecentQueriesService(s) rs.SavedQueries = NewFoldersLocationsSavedQueriesService(s) @@ -411,6 +412,8 @@ type FoldersLocationsService struct { Buckets *FoldersLocationsBucketsService + LogScopes *FoldersLocationsLogScopesService + Operations *FoldersLocationsOperationsService RecentQueries *FoldersLocationsRecentQueriesService @@ -463,6 +466,15 @@ type FoldersLocationsBucketsViewsLogsService struct { s *Service } +func NewFoldersLocationsLogScopesService(s *Service) *FoldersLocationsLogScopesService { + rs := &FoldersLocationsLogScopesService{s: s} + return rs +} + +type FoldersLocationsLogScopesService struct { + s *Service +} + func NewFoldersLocationsOperationsService(s *Service) *FoldersLocationsOperationsService { rs := &FoldersLocationsOperationsService{s: s} return rs @@ -616,6 +628,7 @@ type OrganizationsExclusionsService struct { func NewOrganizationsLocationsService(s *Service) *OrganizationsLocationsService { rs := &OrganizationsLocationsService{s: s} rs.Buckets = NewOrganizationsLocationsBucketsService(s) + rs.LogScopes = NewOrganizationsLocationsLogScopesService(s) rs.Operations = NewOrganizationsLocationsOperationsService(s) rs.RecentQueries = NewOrganizationsLocationsRecentQueriesService(s) rs.SavedQueries = NewOrganizationsLocationsSavedQueriesService(s) @@ -627,6 +640,8 @@ type OrganizationsLocationsService struct { Buckets *OrganizationsLocationsBucketsService + LogScopes *OrganizationsLocationsLogScopesService + Operations *OrganizationsLocationsOperationsService RecentQueries *OrganizationsLocationsRecentQueriesService @@ -679,6 +694,15 @@ type OrganizationsLocationsBucketsViewsLogsService struct { s *Service } +func NewOrganizationsLocationsLogScopesService(s *Service) *OrganizationsLocationsLogScopesService { + rs := &OrganizationsLocationsLogScopesService{s: s} + return rs +} + +type OrganizationsLocationsLogScopesService struct { + s *Service +} + func NewOrganizationsLocationsOperationsService(s *Service) *OrganizationsLocationsOperationsService { rs := &OrganizationsLocationsOperationsService{s: s} return rs @@ -760,6 +784,7 @@ type ProjectsExclusionsService struct { func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { rs := &ProjectsLocationsService{s: s} rs.Buckets = NewProjectsLocationsBucketsService(s) + rs.LogScopes = NewProjectsLocationsLogScopesService(s) rs.Operations = NewProjectsLocationsOperationsService(s) rs.RecentQueries = NewProjectsLocationsRecentQueriesService(s) rs.SavedQueries = NewProjectsLocationsSavedQueriesService(s) @@ -771,6 +796,8 @@ type ProjectsLocationsService struct { Buckets *ProjectsLocationsBucketsService + LogScopes *ProjectsLocationsLogScopesService + Operations *ProjectsLocationsOperationsService RecentQueries *ProjectsLocationsRecentQueriesService @@ -823,6 +850,15 @@ type ProjectsLocationsBucketsViewsLogsService struct { s *Service } +func NewProjectsLocationsLogScopesService(s *Service) *ProjectsLocationsLogScopesService { + rs := &ProjectsLocationsLogScopesService{s: s} + return rs +} + +type ProjectsLocationsLogScopesService struct { + s *Service +} + func NewProjectsLocationsOperationsService(s *Service) *ProjectsLocationsOperationsService { rs := &ProjectsLocationsOperationsService{s: s} return rs @@ -931,9 +967,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -966,9 +1002,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BigQueryDataset: Describes a BigQuery dataset that was created by a link. @@ -992,9 +1028,9 @@ type BigQueryDataset struct { NullFields []string `json:"-"` } -func (s *BigQueryDataset) MarshalJSON() ([]byte, error) { +func (s BigQueryDataset) MarshalJSON() ([]byte, error) { type NoMethod BigQueryDataset - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BigQueryOptions: Options that change functionality of a sink exporting data @@ -1028,9 +1064,9 @@ type BigQueryOptions struct { NullFields []string `json:"-"` } -func (s *BigQueryOptions) MarshalJSON() ([]byte, error) { +func (s BigQueryOptions) MarshalJSON() ([]byte, error) { type NoMethod BigQueryOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates members, or principals, with a role. @@ -1124,9 +1160,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketMetadata: Metadata for LongRunningUpdateBucket Operations. @@ -1165,9 +1201,9 @@ type BucketMetadata struct { NullFields []string `json:"-"` } -func (s *BucketMetadata) MarshalJSON() ([]byte, error) { +func (s BucketMetadata) MarshalJSON() ([]byte, error) { type NoMethod BucketMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketOptions: BucketOptions describes the bucket boundaries used to create @@ -1204,9 +1240,9 @@ type BucketOptions struct { NullFields []string `json:"-"` } -func (s *BucketOptions) MarshalJSON() ([]byte, error) { +func (s BucketOptions) MarshalJSON() ([]byte, error) { type NoMethod BucketOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CancelOperationRequest: The request message for Operations.CancelOperation. @@ -1278,9 +1314,9 @@ type CmekSettings struct { NullFields []string `json:"-"` } -func (s *CmekSettings) MarshalJSON() ([]byte, error) { +func (s CmekSettings) MarshalJSON() ([]byte, error) { type NoMethod CmekSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CopyLogEntriesMetadata: Metadata for CopyLogEntries long running operations. @@ -1335,9 +1371,9 @@ type CopyLogEntriesMetadata struct { NullFields []string `json:"-"` } -func (s *CopyLogEntriesMetadata) MarshalJSON() ([]byte, error) { +func (s CopyLogEntriesMetadata) MarshalJSON() ([]byte, error) { type NoMethod CopyLogEntriesMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CopyLogEntriesRequest: The parameters to CopyLogEntries. @@ -1364,9 +1400,9 @@ type CopyLogEntriesRequest struct { NullFields []string `json:"-"` } -func (s *CopyLogEntriesRequest) MarshalJSON() ([]byte, error) { +func (s CopyLogEntriesRequest) MarshalJSON() ([]byte, error) { type NoMethod CopyLogEntriesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CopyLogEntriesResponse: Response type for CopyLogEntries long running @@ -1387,9 +1423,9 @@ type CopyLogEntriesResponse struct { NullFields []string `json:"-"` } -func (s *CopyLogEntriesResponse) MarshalJSON() ([]byte, error) { +func (s CopyLogEntriesResponse) MarshalJSON() ([]byte, error) { type NoMethod CopyLogEntriesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateBucketRequest: The parameters to CreateBucket. @@ -1420,9 +1456,9 @@ type CreateBucketRequest struct { NullFields []string `json:"-"` } -func (s *CreateBucketRequest) MarshalJSON() ([]byte, error) { +func (s CreateBucketRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateBucketRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateLinkRequest: The parameters to CreateLink. @@ -1453,9 +1489,9 @@ type CreateLinkRequest struct { NullFields []string `json:"-"` } -func (s *CreateLinkRequest) MarshalJSON() ([]byte, error) { +func (s CreateLinkRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateLinkRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DefaultSinkConfig: Describes the custom _Default sink configuration that is @@ -1500,9 +1536,9 @@ type DefaultSinkConfig struct { NullFields []string `json:"-"` } -func (s *DefaultSinkConfig) MarshalJSON() ([]byte, error) { +func (s DefaultSinkConfig) MarshalJSON() ([]byte, error) { type NoMethod DefaultSinkConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeleteLinkRequest: The parameters to DeleteLink. @@ -1530,9 +1566,9 @@ type DeleteLinkRequest struct { NullFields []string `json:"-"` } -func (s *DeleteLinkRequest) MarshalJSON() ([]byte, error) { +func (s DeleteLinkRequest) MarshalJSON() ([]byte, error) { type NoMethod DeleteLinkRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -1566,9 +1602,9 @@ type Explicit struct { NullFields []string `json:"-"` } -func (s *Explicit) MarshalJSON() ([]byte, error) { +func (s Explicit) MarshalJSON() ([]byte, error) { type NoMethod Explicit - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Explicit) UnmarshalJSON(data []byte) error { @@ -1614,9 +1650,9 @@ type Exponential struct { NullFields []string `json:"-"` } -func (s *Exponential) MarshalJSON() ([]byte, error) { +func (s Exponential) MarshalJSON() ([]byte, error) { type NoMethod Exponential - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Exponential) UnmarshalJSON(data []byte) error { @@ -1678,9 +1714,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetIamPolicyRequest: Request message for GetIamPolicy method. @@ -1701,9 +1737,9 @@ type GetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. @@ -1733,9 +1769,9 @@ type GetPolicyOptions struct { NullFields []string `json:"-"` } -func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { +func (s GetPolicyOptions) MarshalJSON() ([]byte, error) { type NoMethod GetPolicyOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpRequest: A common proto for logging HTTP requests. Only contains @@ -1755,10 +1791,10 @@ type HttpRequest struct { // meaningful if cache_hit is True. CacheValidatedWithOriginServer bool `json:"cacheValidatedWithOriginServer,omitempty"` // Latency: The request processing latency on the server, from the time the - // request was received until the response was sent. + // request was received until the response was sent. For WebSocket connections, + // this field refers to the entire time duration of the connection. Latency string `json:"latency,omitempty"` - // Protocol: Protocol used for the request. Examples: "HTTP/1.1", "HTTP/2", - // "websocket" + // Protocol: Protocol used for the request. Examples: "HTTP/1.1", "HTTP/2" Protocol string `json:"protocol,omitempty"` // Referer: The referer URL of the request, as defined in HTTP/1.1 Header Field // Definitions (https://datatracker.ietf.org/doc/html/rfc2616#section-14.36). @@ -1802,9 +1838,9 @@ type HttpRequest struct { NullFields []string `json:"-"` } -func (s *HttpRequest) MarshalJSON() ([]byte, error) { +func (s HttpRequest) MarshalJSON() ([]byte, error) { type NoMethod HttpRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IndexConfig: Configuration for an indexed field. @@ -1816,8 +1852,8 @@ type IndexConfig struct { // FieldPath: Required. The LogEntry field path to index.Note that some paths // are automatically indexed, and other paths are not eligible for indexing. // See indexing documentation( - // https://cloud.google.com/logging/docs/view/advanced-queries#indexed-fields) - // for details.For example: jsonPayload.request.status + // https://cloud.google.com/logging/docs/analyze/custom-index) for details.For + // example: jsonPayload.request.status FieldPath string `json:"fieldPath,omitempty"` // Type: Required. The type of data in this index. // @@ -1839,9 +1875,9 @@ type IndexConfig struct { NullFields []string `json:"-"` } -func (s *IndexConfig) MarshalJSON() ([]byte, error) { +func (s IndexConfig) MarshalJSON() ([]byte, error) { type NoMethod IndexConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LabelDescriptor: A description of a label. @@ -1870,9 +1906,9 @@ type LabelDescriptor struct { NullFields []string `json:"-"` } -func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { +func (s LabelDescriptor) MarshalJSON() ([]byte, error) { type NoMethod LabelDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Linear: Specifies a linear sequence of buckets that all have the same width @@ -1901,9 +1937,9 @@ type Linear struct { NullFields []string `json:"-"` } -func (s *Linear) MarshalJSON() ([]byte, error) { +func (s Linear) MarshalJSON() ([]byte, error) { type NoMethod Linear - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Linear) UnmarshalJSON(data []byte) error { @@ -1978,9 +2014,9 @@ type Link struct { NullFields []string `json:"-"` } -func (s *Link) MarshalJSON() ([]byte, error) { +func (s Link) MarshalJSON() ([]byte, error) { type NoMethod Link - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LinkMetadata: Metadata for long running Link operations. @@ -2019,9 +2055,9 @@ type LinkMetadata struct { NullFields []string `json:"-"` } -func (s *LinkMetadata) MarshalJSON() ([]byte, error) { +func (s LinkMetadata) MarshalJSON() ([]byte, error) { type NoMethod LinkMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListBucketsResponse: The response from ListBuckets. @@ -2048,9 +2084,9 @@ type ListBucketsResponse struct { NullFields []string `json:"-"` } -func (s *ListBucketsResponse) MarshalJSON() ([]byte, error) { +func (s ListBucketsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListBucketsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListExclusionsResponse: Result returned from ListExclusions. @@ -2077,9 +2113,9 @@ type ListExclusionsResponse struct { NullFields []string `json:"-"` } -func (s *ListExclusionsResponse) MarshalJSON() ([]byte, error) { +func (s ListExclusionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListExclusionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLinksResponse: The response from ListLinks. @@ -2106,9 +2142,9 @@ type ListLinksResponse struct { NullFields []string `json:"-"` } -func (s *ListLinksResponse) MarshalJSON() ([]byte, error) { +func (s ListLinksResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLinksResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLocationsResponse: The response message for Locations.ListLocations. @@ -2134,9 +2170,9 @@ type ListLocationsResponse struct { NullFields []string `json:"-"` } -func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { +func (s ListLocationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLocationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLogEntriesRequest: The parameters to ListLogEntries. @@ -2179,9 +2215,14 @@ type ListLogEntriesRequest struct { // Example: "my-project-1A". ProjectIds []string `json:"projectIds,omitempty"` // ResourceNames: Required. Names of one or more parent resources from which to - // retrieve log entries: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] - // billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]May alternatively be - // one or more views: + // retrieve log entries. Resources may either be resource containers or + // specific LogViews. For the case of resource containers, all logs ingested + // into that container will be returned regardless of which LogBuckets they are + // actually stored in - i.e. these queries may fan out to multiple regions. In + // the event of region unavailability, specify a specific set of LogViews that + // do not include the unavailable region. projects/[PROJECT_ID] + // organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] + // folders/[FOLDER_ID] // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW // _ID] // organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/v @@ -2205,9 +2246,9 @@ type ListLogEntriesRequest struct { NullFields []string `json:"-"` } -func (s *ListLogEntriesRequest) MarshalJSON() ([]byte, error) { +func (s ListLogEntriesRequest) MarshalJSON() ([]byte, error) { type NoMethod ListLogEntriesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLogEntriesResponse: Result returned from ListLogEntries. @@ -2242,9 +2283,9 @@ type ListLogEntriesResponse struct { NullFields []string `json:"-"` } -func (s *ListLogEntriesResponse) MarshalJSON() ([]byte, error) { +func (s ListLogEntriesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLogEntriesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLogMetricsResponse: Result returned from ListLogMetrics. @@ -2271,9 +2312,39 @@ type ListLogMetricsResponse struct { NullFields []string `json:"-"` } -func (s *ListLogMetricsResponse) MarshalJSON() ([]byte, error) { +func (s ListLogMetricsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLogMetricsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ListLogScopesResponse: The response from ListLogScopes. Every project has a +// _Default log scope that cannot be modified or deleted. +type ListLogScopesResponse struct { + // LogScopes: A list of log scopes. + LogScopes []*LogScope `json:"logScopes,omitempty"` + // NextPageToken: If there might be more results than appear in this response, + // then nextPageToken is included. To get the next set of results, call the + // same method again using the value of nextPageToken as pageToken. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "LogScopes") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "LogScopes") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ListLogScopesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListLogScopesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListLogsResponse: Result returned from ListLogs. @@ -2302,9 +2373,9 @@ type ListLogsResponse struct { NullFields []string `json:"-"` } -func (s *ListLogsResponse) MarshalJSON() ([]byte, error) { +func (s ListLogsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLogsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListMonitoredResourceDescriptorsResponse: Result returned from @@ -2332,9 +2403,9 @@ type ListMonitoredResourceDescriptorsResponse struct { NullFields []string `json:"-"` } -func (s *ListMonitoredResourceDescriptorsResponse) MarshalJSON() ([]byte, error) { +func (s ListMonitoredResourceDescriptorsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListMonitoredResourceDescriptorsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -2360,9 +2431,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListRecentQueriesResponse: The response from ListRecentQueries. @@ -2399,9 +2470,9 @@ type ListRecentQueriesResponse struct { NullFields []string `json:"-"` } -func (s *ListRecentQueriesResponse) MarshalJSON() ([]byte, error) { +func (s ListRecentQueriesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListRecentQueriesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListSavedQueriesResponse: The response from ListSavedQueries. @@ -2438,9 +2509,9 @@ type ListSavedQueriesResponse struct { NullFields []string `json:"-"` } -func (s *ListSavedQueriesResponse) MarshalJSON() ([]byte, error) { +func (s ListSavedQueriesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListSavedQueriesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListSinksResponse: Result returned from ListSinks. @@ -2467,9 +2538,9 @@ type ListSinksResponse struct { NullFields []string `json:"-"` } -func (s *ListSinksResponse) MarshalJSON() ([]byte, error) { +func (s ListSinksResponse) MarshalJSON() ([]byte, error) { type NoMethod ListSinksResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListViewsResponse: The response from ListViews. @@ -2496,9 +2567,9 @@ type ListViewsResponse struct { NullFields []string `json:"-"` } -func (s *ListViewsResponse) MarshalJSON() ([]byte, error) { +func (s ListViewsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListViewsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Location: A resource that represents a Google Cloud location. @@ -2533,9 +2604,9 @@ type Location struct { NullFields []string `json:"-"` } -func (s *Location) MarshalJSON() ([]byte, error) { +func (s Location) MarshalJSON() ([]byte, error) { type NoMethod Location - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LocationMetadata: Cloud Logging specific location metadata. @@ -2556,15 +2627,15 @@ type LocationMetadata struct { NullFields []string `json:"-"` } -func (s *LocationMetadata) MarshalJSON() ([]byte, error) { +func (s LocationMetadata) MarshalJSON() ([]byte, error) { type NoMethod LocationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogBucket: Describes a repository in which log entries are stored. type LogBucket struct { - // AnalyticsEnabled: Whether log analytics is enabled for this bucket.Once - // enabled, log analytics features cannot be disabled. + // AnalyticsEnabled: Optional. Whether log analytics is enabled for this + // bucket.Once enabled, log analytics features cannot be disabled. AnalyticsEnabled bool `json:"analyticsEnabled,omitempty"` // CmekSettings: Optional. The CMEK settings of the log bucket. If present, new // log entries written to this log bucket are encrypted using the CMEK key @@ -2635,9 +2706,9 @@ type LogBucket struct { NullFields []string `json:"-"` } -func (s *LogBucket) MarshalJSON() ([]byte, error) { +func (s LogBucket) MarshalJSON() ([]byte, error) { type NoMethod LogBucket - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogEntry: An individual entry in a log. @@ -2796,9 +2867,9 @@ type LogEntry struct { NullFields []string `json:"-"` } -func (s *LogEntry) MarshalJSON() ([]byte, error) { +func (s LogEntry) MarshalJSON() ([]byte, error) { type NoMethod LogEntry - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogEntryOperation: Additional information about a potentially long-running @@ -2830,9 +2901,9 @@ type LogEntryOperation struct { NullFields []string `json:"-"` } -func (s *LogEntryOperation) MarshalJSON() ([]byte, error) { +func (s LogEntryOperation) MarshalJSON() ([]byte, error) { type NoMethod LogEntryOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogEntrySourceLocation: Additional information about the source code @@ -2864,9 +2935,9 @@ type LogEntrySourceLocation struct { NullFields []string `json:"-"` } -func (s *LogEntrySourceLocation) MarshalJSON() ([]byte, error) { +func (s LogEntrySourceLocation) MarshalJSON() ([]byte, error) { type NoMethod LogEntrySourceLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogErrorGroup: Contains metadata that associates the LogEntry to Error @@ -2892,9 +2963,9 @@ type LogErrorGroup struct { NullFields []string `json:"-"` } -func (s *LogErrorGroup) MarshalJSON() ([]byte, error) { +func (s LogErrorGroup) MarshalJSON() ([]byte, error) { type NoMethod LogErrorGroup - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogExclusion: Specifies a set of log entries that are filtered out by a @@ -2945,9 +3016,9 @@ type LogExclusion struct { NullFields []string `json:"-"` } -func (s *LogExclusion) MarshalJSON() ([]byte, error) { +func (s LogExclusion) MarshalJSON() ([]byte, error) { type NoMethod LogExclusion - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogLine: Application log line emitted while processing a request. @@ -2985,9 +3056,9 @@ type LogLine struct { NullFields []string `json:"-"` } -func (s *LogLine) MarshalJSON() ([]byte, error) { +func (s LogLine) MarshalJSON() ([]byte, error) { type NoMethod LogLine - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogMetric: Describes a logs-based metric. The value of the metric is the @@ -3102,9 +3173,48 @@ type LogMetric struct { NullFields []string `json:"-"` } -func (s *LogMetric) MarshalJSON() ([]byte, error) { +func (s LogMetric) MarshalJSON() ([]byte, error) { type NoMethod LogMetric - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// LogScope: Describes a group of resources to read log entries from. +type LogScope struct { + // CreateTime: Output only. The creation timestamp of the log scope. + CreateTime string `json:"createTime,omitempty"` + // Description: Optional. Describes this log scope.The maximum length of the + // description is 8000 characters. + Description string `json:"description,omitempty"` + // Name: Output only. The resource name of the log scope.For + // example:projects/my-project/locations/global/logScopes/my-log-scope + Name string `json:"name,omitempty"` + // ResourceNames: Required. Names of one or more parent resources: + // projects/[PROJECT_ID]May alternatively be one or more views: + // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW + // _ID]A log scope can include a maximum of 50 projects and a maximum of 100 + // resources in total. + ResourceNames []string `json:"resourceNames,omitempty"` + // UpdateTime: Output only. The last update timestamp of the log scope. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreateTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s LogScope) MarshalJSON() ([]byte, error) { + type NoMethod LogScope + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogSink: Describes a sink used to export log entries to one of the following @@ -3222,9 +3332,9 @@ type LogSink struct { NullFields []string `json:"-"` } -func (s *LogSink) MarshalJSON() ([]byte, error) { +func (s LogSink) MarshalJSON() ([]byte, error) { type NoMethod LogSink - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogSplit: Additional information used to correlate multiple log entries. @@ -3255,9 +3365,9 @@ type LogSplit struct { NullFields []string `json:"-"` } -func (s *LogSplit) MarshalJSON() ([]byte, error) { +func (s LogSplit) MarshalJSON() ([]byte, error) { type NoMethod LogSplit - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogView: Describes a view over log entries in a bucket. @@ -3296,9 +3406,9 @@ type LogView struct { NullFields []string `json:"-"` } -func (s *LogView) MarshalJSON() ([]byte, error) { +func (s LogView) MarshalJSON() ([]byte, error) { type NoMethod LogView - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LoggingQuery: Describes a Cloud Logging query that can be run in Logs @@ -3330,9 +3440,9 @@ type LoggingQuery struct { NullFields []string `json:"-"` } -func (s *LoggingQuery) MarshalJSON() ([]byte, error) { +func (s LoggingQuery) MarshalJSON() ([]byte, error) { type NoMethod LoggingQuery - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricDescriptor: Defines a metric type and its schema. Once a metric @@ -3491,9 +3601,9 @@ type MetricDescriptor struct { NullFields []string `json:"-"` } -func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { +func (s MetricDescriptor) MarshalJSON() ([]byte, error) { type NoMethod MetricDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricDescriptorMetadata: Additional annotations that can be used to guide @@ -3543,6 +3653,16 @@ type MetricDescriptorMetadata struct { // interval, excluding data loss due to errors. Metrics with a higher // granularity have a smaller sampling period. SamplePeriod string `json:"samplePeriod,omitempty"` + // TimeSeriesResourceHierarchyLevel: The scope of the timeseries data of the + // metric. + // + // Possible values: + // "TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED" - Do not use this + // default value. + // "PROJECT" - Scopes a metric to a project. + // "ORGANIZATION" - Scopes a metric to an organization. + // "FOLDER" - Scopes a metric to a folder. + TimeSeriesResourceHierarchyLevel []string `json:"timeSeriesResourceHierarchyLevel,omitempty"` // ForceSendFields is a list of field names (e.g. "IngestDelay") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -3556,9 +3676,9 @@ type MetricDescriptorMetadata struct { NullFields []string `json:"-"` } -func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { +func (s MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { type NoMethod MetricDescriptorMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MonitoredResource: An object representing a resource that can be used for @@ -3596,9 +3716,9 @@ type MonitoredResource struct { NullFields []string `json:"-"` } -func (s *MonitoredResource) MarshalJSON() ([]byte, error) { +func (s MonitoredResource) MarshalJSON() ([]byte, error) { type NoMethod MonitoredResource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MonitoredResourceDescriptor: An object that describes the schema of a @@ -3683,9 +3803,9 @@ type MonitoredResourceDescriptor struct { NullFields []string `json:"-"` } -func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { +func (s MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { type NoMethod MonitoredResourceDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MonitoredResourceMetadata: Auxiliary metadata for a MonitoredResource @@ -3717,9 +3837,9 @@ type MonitoredResourceMetadata struct { NullFields []string `json:"-"` } -func (s *MonitoredResourceMetadata) MarshalJSON() ([]byte, error) { +func (s MonitoredResourceMetadata) MarshalJSON() ([]byte, error) { type NoMethod MonitoredResourceMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -3763,9 +3883,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OpsAnalyticsQuery: Describes an analytics query that can be run in the Log @@ -3789,9 +3909,9 @@ type OpsAnalyticsQuery struct { NullFields []string `json:"-"` } -func (s *OpsAnalyticsQuery) MarshalJSON() ([]byte, error) { +func (s OpsAnalyticsQuery) MarshalJSON() ([]byte, error) { type NoMethod OpsAnalyticsQuery - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -3879,9 +3999,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RecentQuery: Describes a recent query executed on the Logs Explorer or Log @@ -3913,9 +4033,9 @@ type RecentQuery struct { NullFields []string `json:"-"` } -func (s *RecentQuery) MarshalJSON() ([]byte, error) { +func (s RecentQuery) MarshalJSON() ([]byte, error) { type NoMethod RecentQuery - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RequestLog: Complete log information about a single HTTP request to an App @@ -4020,9 +4140,9 @@ type RequestLog struct { NullFields []string `json:"-"` } -func (s *RequestLog) MarshalJSON() ([]byte, error) { +func (s RequestLog) MarshalJSON() ([]byte, error) { type NoMethod RequestLog - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *RequestLog) UnmarshalJSON(data []byte) error { @@ -4087,9 +4207,9 @@ type SavedQuery struct { NullFields []string `json:"-"` } -func (s *SavedQuery) MarshalJSON() ([]byte, error) { +func (s SavedQuery) MarshalJSON() ([]byte, error) { type NoMethod SavedQuery - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for SetIamPolicy method. @@ -4116,9 +4236,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Settings: Describes the settings associated with a project, folder, @@ -4184,9 +4304,9 @@ type Settings struct { NullFields []string `json:"-"` } -func (s *Settings) MarshalJSON() ([]byte, error) { +func (s Settings) MarshalJSON() ([]byte, error) { type NoMethod Settings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceLocation: Specifies a location in a source code file. @@ -4215,9 +4335,9 @@ type SourceLocation struct { NullFields []string `json:"-"` } -func (s *SourceLocation) MarshalJSON() ([]byte, error) { +func (s SourceLocation) MarshalJSON() ([]byte, error) { type NoMethod SourceLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceReference: A reference to a particular snapshot of the source tree @@ -4242,9 +4362,9 @@ type SourceReference struct { NullFields []string `json:"-"` } -func (s *SourceReference) MarshalJSON() ([]byte, error) { +func (s SourceReference) MarshalJSON() ([]byte, error) { type NoMethod SourceReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The Status type defines a logical error model that is suitable for @@ -4276,9 +4396,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SummaryField: A field from the LogEntry that is added to the summary line @@ -4301,9 +4421,9 @@ type SummaryField struct { NullFields []string `json:"-"` } -func (s *SummaryField) MarshalJSON() ([]byte, error) { +func (s SummaryField) MarshalJSON() ([]byte, error) { type NoMethod SummaryField - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SuppressionInfo: Information about entries that were omitted from the @@ -4335,9 +4455,9 @@ type SuppressionInfo struct { NullFields []string `json:"-"` } -func (s *SuppressionInfo) MarshalJSON() ([]byte, error) { +func (s SuppressionInfo) MarshalJSON() ([]byte, error) { type NoMethod SuppressionInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TailLogEntriesRequest: The parameters to TailLogEntries. @@ -4379,9 +4499,9 @@ type TailLogEntriesRequest struct { NullFields []string `json:"-"` } -func (s *TailLogEntriesRequest) MarshalJSON() ([]byte, error) { +func (s TailLogEntriesRequest) MarshalJSON() ([]byte, error) { type NoMethod TailLogEntriesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TailLogEntriesResponse: Result returned from TailLogEntries. @@ -4412,9 +4532,9 @@ type TailLogEntriesResponse struct { NullFields []string `json:"-"` } -func (s *TailLogEntriesResponse) MarshalJSON() ([]byte, error) { +func (s TailLogEntriesResponse) MarshalJSON() ([]byte, error) { type NoMethod TailLogEntriesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for TestIamPermissions method. @@ -4437,9 +4557,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for TestIamPermissions method. @@ -4463,9 +4583,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UndeleteBucketRequest: The parameters to UndeleteBucket. @@ -4504,9 +4624,9 @@ type UpdateBucketRequest struct { NullFields []string `json:"-"` } -func (s *UpdateBucketRequest) MarshalJSON() ([]byte, error) { +func (s UpdateBucketRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateBucketRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WriteLogEntriesRequest: The parameters to WriteLogEntries. @@ -4578,9 +4698,9 @@ type WriteLogEntriesRequest struct { NullFields []string `json:"-"` } -func (s *WriteLogEntriesRequest) MarshalJSON() ([]byte, error) { +func (s WriteLogEntriesRequest) MarshalJSON() ([]byte, error) { type NoMethod WriteLogEntriesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // WriteLogEntriesResponse: Result returned from WriteLogEntries. @@ -8255,6 +8375,15 @@ func (r *BillingAccountsLocationsRecentQueriesService) List(parent string) *Bill return c } +// Filter sets the optional parameter "filter": Specifies the type ("Logging" +// or "OpsAnalytics") of the recent queries to list. The only valid value for +// this field is one of the two allowable type function calls, which are the +// following: type("Logging") type("OpsAnalytics") +func (c *BillingAccountsLocationsRecentQueriesListCall) Filter(filter string) *BillingAccountsLocationsRecentQueriesListCall { + c.urlParams_.Set("filter", filter) + return c +} + // PageSize sets the optional parameter "pageSize": The maximum number of // results to return from this request. Non-positive values are ignored. The // presence of nextPageToken in the response indicates that more results might @@ -8607,6 +8736,121 @@ func (c *BillingAccountsLocationsSavedQueriesDeleteCall) Do(opts ...googleapi.Ca return ret, nil } +type BillingAccountsLocationsSavedQueriesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns all data associated with the requested query. +// +// - name: The resource name of the saved query. +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" +// "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUER +// Y_ID]" +// "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/ +// [QUERY_ID]" +// "folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" For +// example: +// "projects/my-project/locations/global/savedQueries/my-saved-query". +func (r *BillingAccountsLocationsSavedQueriesService) Get(name string) *BillingAccountsLocationsSavedQueriesGetCall { + c := &BillingAccountsLocationsSavedQueriesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BillingAccountsLocationsSavedQueriesGetCall) Fields(s ...googleapi.Field) *BillingAccountsLocationsSavedQueriesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *BillingAccountsLocationsSavedQueriesGetCall) IfNoneMatch(entityTag string) *BillingAccountsLocationsSavedQueriesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BillingAccountsLocationsSavedQueriesGetCall) Context(ctx context.Context) *BillingAccountsLocationsSavedQueriesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BillingAccountsLocationsSavedQueriesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BillingAccountsLocationsSavedQueriesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.billingAccounts.locations.savedQueries.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *SavedQuery.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *BillingAccountsLocationsSavedQueriesGetCall) Do(opts ...googleapi.CallOption) (*SavedQuery, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SavedQuery{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type BillingAccountsLocationsSavedQueriesListCall struct { s *Service parent string @@ -8634,6 +8878,21 @@ func (r *BillingAccountsLocationsSavedQueriesService) List(parent string) *Billi return c } +// Filter sets the optional parameter "filter": Specifies the type ("Logging" +// or "OpsAnalytics") and the visibility (PRIVATE or SHARED) of the saved +// queries to list. If provided, the filter must contain either the type +// function or a visibility token, or both. If both are chosen, they can be +// placed in any order, but they must be joined by the AND operator or the +// empty character.The two supported type function calls are: type("Logging") +// type("OpsAnalytics")The two supported visibility tokens are: visibility = +// PRIVATE visibility = SHAREDFor example:type("Logging") AND visibility = +// PRIVATE visibility=SHARED type("OpsAnalytics") type("OpsAnalytics)" +// visibility = PRIVATE visibility = SHARED +func (c *BillingAccountsLocationsSavedQueriesListCall) Filter(filter string) *BillingAccountsLocationsSavedQueriesListCall { + c.urlParams_.Set("filter", filter) + return c +} + // PageSize sets the optional parameter "pageSize": The maximum number of // results to return from this request.Non-positive values are ignored. The // presence of nextPageToken in the response indicates that more results might @@ -8763,6 +9022,125 @@ func (c *BillingAccountsLocationsSavedQueriesListCall) Pages(ctx context.Context } } +type BillingAccountsLocationsSavedQueriesPatchCall struct { + s *Service + name string + savedquery *SavedQuery + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an existing SavedQuery. +// +// - name: Output only. Resource name of the saved query.In the format: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" +// For a list of supported locations, see Supported Regions +// (https://cloud.google.com/logging/docs/region-support#bucket-regions)After +// the saved query is created, the location cannot be changed.If the user +// doesn't provide a QUERY_ID, the system will generate an alphanumeric ID. +func (r *BillingAccountsLocationsSavedQueriesService) Patch(name string, savedquery *SavedQuery) *BillingAccountsLocationsSavedQueriesPatchCall { + c := &BillingAccountsLocationsSavedQueriesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.savedquery = savedquery + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. A non-empty +// list of fields to change in the existing saved query. Fields are relative to +// the saved_query and new values for the fields are taken from the +// corresponding fields in the SavedQuery included in this request. Fields not +// mentioned in update_mask are not changed and are ignored in the request.To +// update all mutable fields, specify an update_mask of *.For example, to +// change the description and query filter text of a saved query, specify an +// update_mask of "description, query.filter". +func (c *BillingAccountsLocationsSavedQueriesPatchCall) UpdateMask(updateMask string) *BillingAccountsLocationsSavedQueriesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BillingAccountsLocationsSavedQueriesPatchCall) Fields(s ...googleapi.Field) *BillingAccountsLocationsSavedQueriesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BillingAccountsLocationsSavedQueriesPatchCall) Context(ctx context.Context) *BillingAccountsLocationsSavedQueriesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BillingAccountsLocationsSavedQueriesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BillingAccountsLocationsSavedQueriesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.savedquery) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.billingAccounts.locations.savedQueries.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *SavedQuery.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *BillingAccountsLocationsSavedQueriesPatchCall) Do(opts ...googleapi.CallOption) (*SavedQuery, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SavedQuery{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type BillingAccountsLogsDeleteCall struct { s *Service logName string @@ -9061,12 +9439,12 @@ func (r *BillingAccountsSinksService) Create(parent string, logsink *LogSink) *B return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *BillingAccountsSinksCreateCall) CustomWriterIdentity(customWriterIdentity string) *BillingAccountsSinksCreateCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -9575,12 +9953,12 @@ func (r *BillingAccountsSinksService) Patch(sinkNameid string, logsink *LogSink) return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *BillingAccountsSinksPatchCall) CustomWriterIdentity(customWriterIdentity string) *BillingAccountsSinksPatchCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -9726,12 +10104,12 @@ func (r *BillingAccountsSinksService) Update(sinkNameid string, logsink *LogSink return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *BillingAccountsSinksUpdateCall) CustomWriterIdentity(customWriterIdentity string) *BillingAccountsSinksUpdateCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -11105,8 +11483,8 @@ type FoldersUpdateSettingsCall struct { // information. // // - name: The resource name for the settings to update. -// "organizations/[ORGANIZATION_ID]/settings" For -// example:"organizations/12345/settings". +// "organizations/[ORGANIZATION_ID]/settings" "folders/[FOLDER_ID]/settings" +// For example:"organizations/12345/settings". func (r *FoldersService) UpdateSettings(name string, settings *Settings) *FoldersUpdateSettingsCall { c := &FoldersUpdateSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -14558,66 +14936,69 @@ func (c *FoldersLocationsBucketsViewsLogsListCall) Pages(ctx context.Context, f } } -type FoldersLocationsOperationsCancelCall struct { - s *Service - name string - canceloperationrequest *CancelOperationRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FoldersLocationsLogScopesCreateCall struct { + s *Service + parent string + logscope *LogScope + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Cancel: Starts asynchronous cancellation on a long-running operation. The -// server makes a best effort to cancel the operation, but success is not -// guaranteed. If the server doesn't support this method, it returns -// google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether the -// operation completed despite cancellation. On successful cancellation, the -// operation is not deleted; instead, it becomes an operation with an -// Operation.error value with a google.rpc.Status.code of 1, corresponding to -// Code.CANCELLED. +// Create: Creates a log scope. // -// - name: The name of the operation resource to be cancelled. -func (r *FoldersLocationsOperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *FoldersLocationsOperationsCancelCall { - c := &FoldersLocationsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.canceloperationrequest = canceloperationrequest +// - parent: The parent project in which to create the log scope +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]" For +// example:"projects/my-project/locations/global". +func (r *FoldersLocationsLogScopesService) Create(parent string, logscope *LogScope) *FoldersLocationsLogScopesCreateCall { + c := &FoldersLocationsLogScopesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.logscope = logscope + return c +} + +// LogScopeId sets the optional parameter "logScopeId": Required. A +// client-assigned identifier such as "log-scope". Identifiers are limited to +// 100 characters and can include only letters, digits, underscores, hyphens, +// and periods. First character has to be alphanumeric. +func (c *FoldersLocationsLogScopesCreateCall) LogScopeId(logScopeId string) *FoldersLocationsLogScopesCreateCall { + c.urlParams_.Set("logScopeId", logScopeId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *FoldersLocationsOperationsCancelCall) Fields(s ...googleapi.Field) *FoldersLocationsOperationsCancelCall { +func (c *FoldersLocationsLogScopesCreateCall) Fields(s ...googleapi.Field) *FoldersLocationsLogScopesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *FoldersLocationsOperationsCancelCall) Context(ctx context.Context) *FoldersLocationsOperationsCancelCall { +func (c *FoldersLocationsLogScopesCreateCall) Context(ctx context.Context) *FoldersLocationsLogScopesCreateCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *FoldersLocationsOperationsCancelCall) Header() http.Header { +func (c *FoldersLocationsLogScopesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersLocationsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLocationsLogScopesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logscope) if err != nil { return nil, err } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:cancel") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logScopes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -14625,17 +15006,17 @@ func (c *FoldersLocationsOperationsCancelCall) doRequest(alt string) (*http.Resp } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.locations.operations.cancel" call. +// Do executes the "logging.folders.locations.logScopes.create" call. // Any non-2xx status code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) in +// *LogScope.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *FoldersLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *FoldersLocationsLogScopesCreateCall) Do(opts ...googleapi.CallOption) (*LogScope, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -14654,7 +15035,7 @@ func (c *FoldersLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Empty{ + ret := &LogScope{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -14667,22 +15048,21 @@ func (c *FoldersLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) return ret, nil } -type FoldersLocationsOperationsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FoldersLocationsLogScopesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can use this -// method to poll the operation result at intervals as recommended by the API -// service. +// Delete: Deletes a log scope. // -// - name: The name of the operation resource. -func (r *FoldersLocationsOperationsService) Get(name string) *FoldersLocationsOperationsGetCall { - c := &FoldersLocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The resource name of the log scope to delete: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]" +// For example:"projects/my-project/locations/global/logScopes/my-log-scope". +func (r *FoldersLocationsLogScopesService) Delete(name string) *FoldersLocationsLogScopesDeleteCall { + c := &FoldersLocationsLogScopesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -14690,45 +15070,34 @@ func (r *FoldersLocationsOperationsService) Get(name string) *FoldersLocationsOp // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *FoldersLocationsOperationsGetCall) Fields(s ...googleapi.Field) *FoldersLocationsOperationsGetCall { +func (c *FoldersLocationsLogScopesDeleteCall) Fields(s ...googleapi.Field) *FoldersLocationsLogScopesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets an optional parameter which makes the operation fail if the -// object's ETag matches the given value. This is useful for getting updates -// only after the object has changed since the last request. -func (c *FoldersLocationsOperationsGetCall) IfNoneMatch(entityTag string) *FoldersLocationsOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. -func (c *FoldersLocationsOperationsGetCall) Context(ctx context.Context) *FoldersLocationsOperationsGetCall { +func (c *FoldersLocationsLogScopesDeleteCall) Context(ctx context.Context) *FoldersLocationsLogScopesDeleteCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *FoldersLocationsOperationsGetCall) Header() http.Header { +func (c *FoldersLocationsLogScopesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLocationsLogScopesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -14739,12 +15108,12 @@ func (c *FoldersLocationsOperationsGetCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.locations.operations.get" call. +// Do executes the "logging.folders.locations.logScopes.delete" call. // Any non-2xx status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at all) in +// *Empty.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *FoldersLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FoldersLocationsLogScopesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -14763,7 +15132,7 @@ func (c *FoldersLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*O if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -14776,7 +15145,7 @@ func (c *FoldersLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*O return ret, nil } -type FoldersLocationsOperationsListCall struct { +type FoldersLocationsLogScopesGetCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -14785,40 +15154,21 @@ type FoldersLocationsOperationsListCall struct { header_ http.Header } -// List: Lists operations that match the specified filter in the request. If -// the server doesn't support this method, it returns UNIMPLEMENTED. +// Get: Gets a log scope. // -// - name: The name of the operation's parent resource. -func (r *FoldersLocationsOperationsService) List(name string) *FoldersLocationsOperationsListCall { - c := &FoldersLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The resource name of the log scope: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]" +// For example:"projects/my-project/locations/global/logScopes/my-log-scope". +func (r *FoldersLocationsLogScopesService) Get(name string) *FoldersLocationsLogScopesGetCall { + c := &FoldersLocationsLogScopesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } -// Filter sets the optional parameter "filter": The standard list filter. -func (c *FoldersLocationsOperationsListCall) Filter(filter string) *FoldersLocationsOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": The standard list page -// size. -func (c *FoldersLocationsOperationsListCall) PageSize(pageSize int64) *FoldersLocationsOperationsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": The standard list page -// token. -func (c *FoldersLocationsOperationsListCall) PageToken(pageToken string) *FoldersLocationsOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *FoldersLocationsOperationsListCall) Fields(s ...googleapi.Field) *FoldersLocationsOperationsListCall { +func (c *FoldersLocationsLogScopesGetCall) Fields(s ...googleapi.Field) *FoldersLocationsLogScopesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -14826,27 +15176,27 @@ func (c *FoldersLocationsOperationsListCall) Fields(s ...googleapi.Field) *Folde // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *FoldersLocationsOperationsListCall) IfNoneMatch(entityTag string) *FoldersLocationsOperationsListCall { +func (c *FoldersLocationsLogScopesGetCall) IfNoneMatch(entityTag string) *FoldersLocationsLogScopesGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *FoldersLocationsOperationsListCall) Context(ctx context.Context) *FoldersLocationsOperationsListCall { +func (c *FoldersLocationsLogScopesGetCall) Context(ctx context.Context) *FoldersLocationsLogScopesGetCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *FoldersLocationsOperationsListCall) Header() http.Header { +func (c *FoldersLocationsLogScopesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLocationsLogScopesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -14854,7 +15204,7 @@ func (c *FoldersLocationsOperationsListCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -14867,13 +15217,12 @@ func (c *FoldersLocationsOperationsListCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.locations.operations.list" call. +// Do executes the "logging.folders.locations.logScopes.get" call. // Any non-2xx status code is an error. Response headers are in either -// *ListOperationsResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified was -// returned. -func (c *FoldersLocationsOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { +// *LogScope.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *FoldersLocationsLogScopesGetCall) Do(opts ...googleapi.CallOption) (*LogScope, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -14892,7 +15241,7 @@ func (c *FoldersLocationsOperationsListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListOperationsResponse{ + ret := &LogScope{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -14905,28 +15254,7 @@ func (c *FoldersLocationsOperationsListCall) Do(opts ...googleapi.CallOption) (* return ret, nil } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *FoldersLocationsOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -type FoldersLocationsRecentQueriesListCall struct { +type FoldersLocationsLogScopesListCall struct { s *Service parent string urlParams_ gensupport.URLParams @@ -14935,28 +15263,21 @@ type FoldersLocationsRecentQueriesListCall struct { header_ http.Header } -// List: Lists the RecentQueries that were created by the user making the -// request. +// List: Lists log scopes. // -// - parent: The resource to which the listed queries belong. -// "projects/[PROJECT_ID]/locations/[LOCATION_ID]" -// "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" -// "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" -// "folders/[FOLDER_ID]/locations/[LOCATION_ID]" For -// example:projects/my-project/locations/us-central1Note: The location -// portion of the resource must be specified, but supplying the character - -// in place of LOCATION_ID will return all recent queries. -func (r *FoldersLocationsRecentQueriesService) List(parent string) *FoldersLocationsRecentQueriesListCall { - c := &FoldersLocationsRecentQueriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - parent: The parent resource whose log scopes are to be listed: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]". +func (r *FoldersLocationsLogScopesService) List(parent string) *FoldersLocationsLogScopesListCall { + c := &FoldersLocationsLogScopesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // PageSize sets the optional parameter "pageSize": The maximum number of -// results to return from this request. Non-positive values are ignored. The +// results to return from this request.Non-positive values are ignored. The // presence of nextPageToken in the response indicates that more results might // be available. -func (c *FoldersLocationsRecentQueriesListCall) PageSize(pageSize int64) *FoldersLocationsRecentQueriesListCall { +func (c *FoldersLocationsLogScopesListCall) PageSize(pageSize int64) *FoldersLocationsLogScopesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } @@ -14965,7 +15286,7 @@ func (c *FoldersLocationsRecentQueriesListCall) PageSize(pageSize int64) *Folder // the next batch of results from the preceding call to this method. pageToken // must be the value of nextPageToken from the previous response. The values of // other method parameters should be identical to those in the previous call. -func (c *FoldersLocationsRecentQueriesListCall) PageToken(pageToken string) *FoldersLocationsRecentQueriesListCall { +func (c *FoldersLocationsLogScopesListCall) PageToken(pageToken string) *FoldersLocationsLogScopesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -14973,7 +15294,7 @@ func (c *FoldersLocationsRecentQueriesListCall) PageToken(pageToken string) *Fol // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *FoldersLocationsRecentQueriesListCall) Fields(s ...googleapi.Field) *FoldersLocationsRecentQueriesListCall { +func (c *FoldersLocationsLogScopesListCall) Fields(s ...googleapi.Field) *FoldersLocationsLogScopesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -14981,27 +15302,27 @@ func (c *FoldersLocationsRecentQueriesListCall) Fields(s ...googleapi.Field) *Fo // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *FoldersLocationsRecentQueriesListCall) IfNoneMatch(entityTag string) *FoldersLocationsRecentQueriesListCall { +func (c *FoldersLocationsLogScopesListCall) IfNoneMatch(entityTag string) *FoldersLocationsLogScopesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *FoldersLocationsRecentQueriesListCall) Context(ctx context.Context) *FoldersLocationsRecentQueriesListCall { +func (c *FoldersLocationsLogScopesListCall) Context(ctx context.Context) *FoldersLocationsLogScopesListCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *FoldersLocationsRecentQueriesListCall) Header() http.Header { +func (c *FoldersLocationsLogScopesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersLocationsRecentQueriesListCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLocationsLogScopesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -15009,7 +15330,7 @@ func (c *FoldersLocationsRecentQueriesListCall) doRequest(alt string) (*http.Res var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/recentQueries") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logScopes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -15022,13 +15343,13 @@ func (c *FoldersLocationsRecentQueriesListCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.locations.recentQueries.list" call. +// Do executes the "logging.folders.locations.logScopes.list" call. // Any non-2xx status code is an error. Response headers are in either -// *ListRecentQueriesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FoldersLocationsRecentQueriesListCall) Do(opts ...googleapi.CallOption) (*ListRecentQueriesResponse, error) { +// *ListLogScopesResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *FoldersLocationsLogScopesListCall) Do(opts ...googleapi.CallOption) (*ListLogScopesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -15047,7 +15368,7 @@ func (c *FoldersLocationsRecentQueriesListCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListRecentQueriesResponse{ + ret := &ListLogScopesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -15063,7 +15384,7 @@ func (c *FoldersLocationsRecentQueriesListCall) Do(opts ...googleapi.CallOption) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *FoldersLocationsRecentQueriesListCall) Pages(ctx context.Context, f func(*ListRecentQueriesResponse) error) error { +func (c *FoldersLocationsLogScopesListCall) Pages(ctx context.Context, f func(*ListLogScopesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) for { @@ -15081,94 +15402,88 @@ func (c *FoldersLocationsRecentQueriesListCall) Pages(ctx context.Context, f fun } } -type FoldersLocationsSavedQueriesCreateCall struct { +type FoldersLocationsLogScopesPatchCall struct { s *Service - parent string - savedquery *SavedQuery + name string + logscope *LogScope urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Create: Creates a new SavedQuery for the user making the request. +// Patch: Updates a log scope. // -// - parent: The parent resource in which to create the saved query: -// "projects/[PROJECT_ID]/locations/[LOCATION_ID]" -// "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" -// "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" -// "folders/[FOLDER_ID]/locations/[LOCATION_ID]" For example: -// "projects/my-project/locations/global" -// "organizations/123456789/locations/us-central1". -func (r *FoldersLocationsSavedQueriesService) Create(parent string, savedquery *SavedQuery) *FoldersLocationsSavedQueriesCreateCall { - c := &FoldersLocationsSavedQueriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.savedquery = savedquery +// - name: Output only. The resource name of the log scope.For +// example:projects/my-project/locations/global/logScopes/my-log-scope. +func (r *FoldersLocationsLogScopesService) Patch(name string, logscope *LogScope) *FoldersLocationsLogScopesPatchCall { + c := &FoldersLocationsLogScopesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.logscope = logscope return c } -// SavedQueryId sets the optional parameter "savedQueryId": The ID to use for -// the saved query, which will become the final component of the saved query's -// resource name.If the saved_query_id is not provided, the system will -// generate an alphanumeric ID.The saved_query_id is limited to 100 characters -// and can include only the following characters: upper and lower-case -// alphanumeric characters, underscores, hyphens, periods.First character has -// to be alphanumeric. -func (c *FoldersLocationsSavedQueriesCreateCall) SavedQueryId(savedQueryId string) *FoldersLocationsSavedQueriesCreateCall { - c.urlParams_.Set("savedQueryId", savedQueryId) +// UpdateMask sets the optional parameter "updateMask": Field mask that +// specifies the fields in log_scope that need an update. A field will be +// overwritten if, and only if, it is in the update mask. name and output only +// fields cannot be updated.For a detailed FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor +// example: updateMask=description +func (c *FoldersLocationsLogScopesPatchCall) UpdateMask(updateMask string) *FoldersLocationsLogScopesPatchCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *FoldersLocationsSavedQueriesCreateCall) Fields(s ...googleapi.Field) *FoldersLocationsSavedQueriesCreateCall { +func (c *FoldersLocationsLogScopesPatchCall) Fields(s ...googleapi.Field) *FoldersLocationsLogScopesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *FoldersLocationsSavedQueriesCreateCall) Context(ctx context.Context) *FoldersLocationsSavedQueriesCreateCall { +func (c *FoldersLocationsLogScopesPatchCall) Context(ctx context.Context) *FoldersLocationsLogScopesPatchCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *FoldersLocationsSavedQueriesCreateCall) Header() http.Header { +func (c *FoldersLocationsLogScopesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersLocationsSavedQueriesCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLocationsLogScopesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.savedquery) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logscope) if err != nil { return nil, err } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/savedQueries") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.locations.savedQueries.create" call. +// Do executes the "logging.folders.locations.logScopes.patch" call. // Any non-2xx status code is an error. Response headers are in either -// *SavedQuery.ServerResponse.Header or (if a response was returned at all) in +// *LogScope.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *FoldersLocationsSavedQueriesCreateCall) Do(opts ...googleapi.CallOption) (*SavedQuery, error) { +func (c *FoldersLocationsLogScopesPatchCall) Do(opts ...googleapi.CallOption) (*LogScope, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -15187,7 +15502,7 @@ func (c *FoldersLocationsSavedQueriesCreateCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &SavedQuery{ + ret := &LogScope{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -15200,63 +15515,68 @@ func (c *FoldersLocationsSavedQueriesCreateCall) Do(opts ...googleapi.CallOption return ret, nil } -type FoldersLocationsSavedQueriesDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FoldersLocationsOperationsCancelCall struct { + s *Service + name string + canceloperationrequest *CancelOperationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes an existing SavedQuery that was created by the user making -// the request. +// Cancel: Starts asynchronous cancellation on a long-running operation. The +// server makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn't support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, the +// operation is not deleted; instead, it becomes an operation with an +// Operation.error value with a google.rpc.Status.code of 1, corresponding to +// Code.CANCELLED. // -// - name: The full resource name of the saved query to delete. -// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" -// "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUER -// Y_ID]" -// "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/ -// [QUERY_ID]" -// "folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" For -// example: -// "projects/my-project/locations/global/savedQueries/my-saved-query". -func (r *FoldersLocationsSavedQueriesService) Delete(name string) *FoldersLocationsSavedQueriesDeleteCall { - c := &FoldersLocationsSavedQueriesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The name of the operation resource to be cancelled. +func (r *FoldersLocationsOperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *FoldersLocationsOperationsCancelCall { + c := &FoldersLocationsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name + c.canceloperationrequest = canceloperationrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *FoldersLocationsSavedQueriesDeleteCall) Fields(s ...googleapi.Field) *FoldersLocationsSavedQueriesDeleteCall { +func (c *FoldersLocationsOperationsCancelCall) Fields(s ...googleapi.Field) *FoldersLocationsOperationsCancelCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *FoldersLocationsSavedQueriesDeleteCall) Context(ctx context.Context) *FoldersLocationsSavedQueriesDeleteCall { +func (c *FoldersLocationsOperationsCancelCall) Context(ctx context.Context) *FoldersLocationsOperationsCancelCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *FoldersLocationsSavedQueriesDeleteCall) Header() http.Header { +func (c *FoldersLocationsOperationsCancelCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersLocationsSavedQueriesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +func (c *FoldersLocationsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) + if err != nil { + return nil, err + } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:cancel") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -15267,12 +15587,12 @@ func (c *FoldersLocationsSavedQueriesDeleteCall) doRequest(alt string) (*http.Re return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.locations.savedQueries.delete" call. +// Do executes the "logging.folders.locations.operations.cancel" call. // Any non-2xx status code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *FoldersLocationsSavedQueriesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *FoldersLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -15304,6 +15624,767 @@ func (c *FoldersLocationsSavedQueriesDeleteCall) Do(opts ...googleapi.CallOption return ret, nil } +type FoldersLocationsOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +// +// - name: The name of the operation resource. +func (r *FoldersLocationsOperationsService) Get(name string) *FoldersLocationsOperationsGetCall { + c := &FoldersLocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersLocationsOperationsGetCall) Fields(s ...googleapi.Field) *FoldersLocationsOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *FoldersLocationsOperationsGetCall) IfNoneMatch(entityTag string) *FoldersLocationsOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersLocationsOperationsGetCall) Context(ctx context.Context) *FoldersLocationsOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersLocationsOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.folders.locations.operations.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *FoldersLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type FoldersLocationsOperationsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists operations that match the specified filter in the request. If +// the server doesn't support this method, it returns UNIMPLEMENTED. +// +// - name: The name of the operation's parent resource. +func (r *FoldersLocationsOperationsService) List(name string) *FoldersLocationsOperationsListCall { + c := &FoldersLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Filter sets the optional parameter "filter": The standard list filter. +func (c *FoldersLocationsOperationsListCall) Filter(filter string) *FoldersLocationsOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list page +// size. +func (c *FoldersLocationsOperationsListCall) PageSize(pageSize int64) *FoldersLocationsOperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list page +// token. +func (c *FoldersLocationsOperationsListCall) PageToken(pageToken string) *FoldersLocationsOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersLocationsOperationsListCall) Fields(s ...googleapi.Field) *FoldersLocationsOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *FoldersLocationsOperationsListCall) IfNoneMatch(entityTag string) *FoldersLocationsOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersLocationsOperationsListCall) Context(ctx context.Context) *FoldersLocationsOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersLocationsOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}/operations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.folders.locations.operations.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListOperationsResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *FoldersLocationsOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *FoldersLocationsOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type FoldersLocationsRecentQueriesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the RecentQueries that were created by the user making the +// request. +// +// - parent: The resource to which the listed queries belong. +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]" +// "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" +// "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" +// "folders/[FOLDER_ID]/locations/[LOCATION_ID]" For +// example:projects/my-project/locations/us-central1Note: The location +// portion of the resource must be specified, but supplying the character - +// in place of LOCATION_ID will return all recent queries. +func (r *FoldersLocationsRecentQueriesService) List(parent string) *FoldersLocationsRecentQueriesListCall { + c := &FoldersLocationsRecentQueriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Specifies the type ("Logging" +// or "OpsAnalytics") of the recent queries to list. The only valid value for +// this field is one of the two allowable type function calls, which are the +// following: type("Logging") type("OpsAnalytics") +func (c *FoldersLocationsRecentQueriesListCall) Filter(filter string) *FoldersLocationsRecentQueriesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number of +// results to return from this request. Non-positive values are ignored. The +// presence of nextPageToken in the response indicates that more results might +// be available. +func (c *FoldersLocationsRecentQueriesListCall) PageSize(pageSize int64) *FoldersLocationsRecentQueriesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then retrieve +// the next batch of results from the preceding call to this method. pageToken +// must be the value of nextPageToken from the previous response. The values of +// other method parameters should be identical to those in the previous call. +func (c *FoldersLocationsRecentQueriesListCall) PageToken(pageToken string) *FoldersLocationsRecentQueriesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersLocationsRecentQueriesListCall) Fields(s ...googleapi.Field) *FoldersLocationsRecentQueriesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *FoldersLocationsRecentQueriesListCall) IfNoneMatch(entityTag string) *FoldersLocationsRecentQueriesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersLocationsRecentQueriesListCall) Context(ctx context.Context) *FoldersLocationsRecentQueriesListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersLocationsRecentQueriesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersLocationsRecentQueriesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/recentQueries") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.folders.locations.recentQueries.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListRecentQueriesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FoldersLocationsRecentQueriesListCall) Do(opts ...googleapi.CallOption) (*ListRecentQueriesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListRecentQueriesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *FoldersLocationsRecentQueriesListCall) Pages(ctx context.Context, f func(*ListRecentQueriesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type FoldersLocationsSavedQueriesCreateCall struct { + s *Service + parent string + savedquery *SavedQuery + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new SavedQuery for the user making the request. +// +// - parent: The parent resource in which to create the saved query: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]" +// "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" +// "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" +// "folders/[FOLDER_ID]/locations/[LOCATION_ID]" For example: +// "projects/my-project/locations/global" +// "organizations/123456789/locations/us-central1". +func (r *FoldersLocationsSavedQueriesService) Create(parent string, savedquery *SavedQuery) *FoldersLocationsSavedQueriesCreateCall { + c := &FoldersLocationsSavedQueriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.savedquery = savedquery + return c +} + +// SavedQueryId sets the optional parameter "savedQueryId": The ID to use for +// the saved query, which will become the final component of the saved query's +// resource name.If the saved_query_id is not provided, the system will +// generate an alphanumeric ID.The saved_query_id is limited to 100 characters +// and can include only the following characters: upper and lower-case +// alphanumeric characters, underscores, hyphens, periods.First character has +// to be alphanumeric. +func (c *FoldersLocationsSavedQueriesCreateCall) SavedQueryId(savedQueryId string) *FoldersLocationsSavedQueriesCreateCall { + c.urlParams_.Set("savedQueryId", savedQueryId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersLocationsSavedQueriesCreateCall) Fields(s ...googleapi.Field) *FoldersLocationsSavedQueriesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersLocationsSavedQueriesCreateCall) Context(ctx context.Context) *FoldersLocationsSavedQueriesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersLocationsSavedQueriesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersLocationsSavedQueriesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.savedquery) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/savedQueries") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.folders.locations.savedQueries.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *SavedQuery.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *FoldersLocationsSavedQueriesCreateCall) Do(opts ...googleapi.CallOption) (*SavedQuery, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SavedQuery{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type FoldersLocationsSavedQueriesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an existing SavedQuery that was created by the user making +// the request. +// +// - name: The full resource name of the saved query to delete. +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" +// "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUER +// Y_ID]" +// "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/ +// [QUERY_ID]" +// "folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" For +// example: +// "projects/my-project/locations/global/savedQueries/my-saved-query". +func (r *FoldersLocationsSavedQueriesService) Delete(name string) *FoldersLocationsSavedQueriesDeleteCall { + c := &FoldersLocationsSavedQueriesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersLocationsSavedQueriesDeleteCall) Fields(s ...googleapi.Field) *FoldersLocationsSavedQueriesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersLocationsSavedQueriesDeleteCall) Context(ctx context.Context) *FoldersLocationsSavedQueriesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersLocationsSavedQueriesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersLocationsSavedQueriesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.folders.locations.savedQueries.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *FoldersLocationsSavedQueriesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type FoldersLocationsSavedQueriesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns all data associated with the requested query. +// +// - name: The resource name of the saved query. +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" +// "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUER +// Y_ID]" +// "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/ +// [QUERY_ID]" +// "folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" For +// example: +// "projects/my-project/locations/global/savedQueries/my-saved-query". +func (r *FoldersLocationsSavedQueriesService) Get(name string) *FoldersLocationsSavedQueriesGetCall { + c := &FoldersLocationsSavedQueriesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersLocationsSavedQueriesGetCall) Fields(s ...googleapi.Field) *FoldersLocationsSavedQueriesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *FoldersLocationsSavedQueriesGetCall) IfNoneMatch(entityTag string) *FoldersLocationsSavedQueriesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersLocationsSavedQueriesGetCall) Context(ctx context.Context) *FoldersLocationsSavedQueriesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersLocationsSavedQueriesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersLocationsSavedQueriesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.folders.locations.savedQueries.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *SavedQuery.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *FoldersLocationsSavedQueriesGetCall) Do(opts ...googleapi.CallOption) (*SavedQuery, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SavedQuery{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type FoldersLocationsSavedQueriesListCall struct { s *Service parent string @@ -15331,6 +16412,21 @@ func (r *FoldersLocationsSavedQueriesService) List(parent string) *FoldersLocati return c } +// Filter sets the optional parameter "filter": Specifies the type ("Logging" +// or "OpsAnalytics") and the visibility (PRIVATE or SHARED) of the saved +// queries to list. If provided, the filter must contain either the type +// function or a visibility token, or both. If both are chosen, they can be +// placed in any order, but they must be joined by the AND operator or the +// empty character.The two supported type function calls are: type("Logging") +// type("OpsAnalytics")The two supported visibility tokens are: visibility = +// PRIVATE visibility = SHAREDFor example:type("Logging") AND visibility = +// PRIVATE visibility=SHARED type("OpsAnalytics") type("OpsAnalytics)" +// visibility = PRIVATE visibility = SHARED +func (c *FoldersLocationsSavedQueriesListCall) Filter(filter string) *FoldersLocationsSavedQueriesListCall { + c.urlParams_.Set("filter", filter) + return c +} + // PageSize sets the optional parameter "pageSize": The maximum number of // results to return from this request.Non-positive values are ignored. The // presence of nextPageToken in the response indicates that more results might @@ -15460,6 +16556,125 @@ func (c *FoldersLocationsSavedQueriesListCall) Pages(ctx context.Context, f func } } +type FoldersLocationsSavedQueriesPatchCall struct { + s *Service + name string + savedquery *SavedQuery + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an existing SavedQuery. +// +// - name: Output only. Resource name of the saved query.In the format: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" +// For a list of supported locations, see Supported Regions +// (https://cloud.google.com/logging/docs/region-support#bucket-regions)After +// the saved query is created, the location cannot be changed.If the user +// doesn't provide a QUERY_ID, the system will generate an alphanumeric ID. +func (r *FoldersLocationsSavedQueriesService) Patch(name string, savedquery *SavedQuery) *FoldersLocationsSavedQueriesPatchCall { + c := &FoldersLocationsSavedQueriesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.savedquery = savedquery + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. A non-empty +// list of fields to change in the existing saved query. Fields are relative to +// the saved_query and new values for the fields are taken from the +// corresponding fields in the SavedQuery included in this request. Fields not +// mentioned in update_mask are not changed and are ignored in the request.To +// update all mutable fields, specify an update_mask of *.For example, to +// change the description and query filter text of a saved query, specify an +// update_mask of "description, query.filter". +func (c *FoldersLocationsSavedQueriesPatchCall) UpdateMask(updateMask string) *FoldersLocationsSavedQueriesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersLocationsSavedQueriesPatchCall) Fields(s ...googleapi.Field) *FoldersLocationsSavedQueriesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersLocationsSavedQueriesPatchCall) Context(ctx context.Context) *FoldersLocationsSavedQueriesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersLocationsSavedQueriesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersLocationsSavedQueriesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.savedquery) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.folders.locations.savedQueries.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *SavedQuery.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *FoldersLocationsSavedQueriesPatchCall) Do(opts ...googleapi.CallOption) (*SavedQuery, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SavedQuery{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type FoldersLogsDeleteCall struct { s *Service logName string @@ -15758,12 +16973,12 @@ func (r *FoldersSinksService) Create(parent string, logsink *LogSink) *FoldersSi return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *FoldersSinksCreateCall) CustomWriterIdentity(customWriterIdentity string) *FoldersSinksCreateCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -16272,12 +17487,12 @@ func (r *FoldersSinksService) Patch(sinkNameid string, logsink *LogSink) *Folder return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *FoldersSinksPatchCall) CustomWriterIdentity(customWriterIdentity string) *FoldersSinksPatchCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -16423,12 +17638,12 @@ func (r *FoldersSinksService) Update(sinkNameid string, logsink *LogSink) *Folde return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *FoldersSinksUpdateCall) CustomWriterIdentity(customWriterIdentity string) *FoldersSinksUpdateCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -20314,8 +21529,8 @@ type OrganizationsUpdateSettingsCall struct { // information. // // - name: The resource name for the settings to update. -// "organizations/[ORGANIZATION_ID]/settings" For -// example:"organizations/12345/settings". +// "organizations/[ORGANIZATION_ID]/settings" "folders/[FOLDER_ID]/settings" +// For example:"organizations/12345/settings". func (r *OrganizationsService) UpdateSettings(name string, settings *Settings) *OrganizationsUpdateSettingsCall { c := &OrganizationsUpdateSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -23171,21 +24386,523 @@ func (c *OrganizationsLocationsBucketsViewsListCall) IfNoneMatch(entityTag strin } // Context sets the context to be used in this call's Do method. -func (c *OrganizationsLocationsBucketsViewsListCall) Context(ctx context.Context) *OrganizationsLocationsBucketsViewsListCall { +func (c *OrganizationsLocationsBucketsViewsListCall) Context(ctx context.Context) *OrganizationsLocationsBucketsViewsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OrganizationsLocationsBucketsViewsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsBucketsViewsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/views") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.buckets.views.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListViewsResponse.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsLocationsBucketsViewsListCall) Do(opts ...googleapi.CallOption) (*ListViewsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListViewsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OrganizationsLocationsBucketsViewsListCall) Pages(ctx context.Context, f func(*ListViewsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type OrganizationsLocationsBucketsViewsPatchCall struct { + s *Service + name string + logview *LogView + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a view on a log bucket. This method replaces the value of the +// filter field from the existing view with the corresponding value from the +// new view. If an UNAVAILABLE error is returned, this indicates that system is +// not in a state where it can update the view. If this occurs, please try +// again in a few minutes. +// +// - name: The full resource name of the view to update +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[V +// IEW_ID]" For +// example:"projects/my-project/locations/global/buckets/my-bucket/views/my-vi +// ew". +func (r *OrganizationsLocationsBucketsViewsService) Patch(name string, logview *LogView) *OrganizationsLocationsBucketsViewsPatchCall { + c := &OrganizationsLocationsBucketsViewsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.logview = logview + return c +} + +// UpdateMask sets the optional parameter "updateMask": Field mask that +// specifies the fields in view that need an update. A field will be +// overwritten if, and only if, it is in the update mask. name and output only +// fields cannot be updated.For a detailed FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor +// example: updateMask=filter +func (c *OrganizationsLocationsBucketsViewsPatchCall) UpdateMask(updateMask string) *OrganizationsLocationsBucketsViewsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OrganizationsLocationsBucketsViewsPatchCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsViewsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OrganizationsLocationsBucketsViewsPatchCall) Context(ctx context.Context) *OrganizationsLocationsBucketsViewsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OrganizationsLocationsBucketsViewsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsBucketsViewsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logview) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.buckets.views.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *LogView.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *OrganizationsLocationsBucketsViewsPatchCall) Do(opts ...googleapi.CallOption) (*LogView, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &LogView{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type OrganizationsLocationsBucketsViewsSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified resource. +// Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and +// PERMISSION_DENIED errors. +// +// - resource: REQUIRED: The resource for which the policy is being specified. +// See Resource names (https://cloud.google.com/apis/design/resource_names) +// for the appropriate value for this field. +func (r *OrganizationsLocationsBucketsViewsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *OrganizationsLocationsBucketsViewsSetIamPolicyCall { + c := &OrganizationsLocationsBucketsViewsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsViewsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) Context(ctx context.Context) *OrganizationsLocationsBucketsViewsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.buckets.views.setIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type OrganizationsLocationsBucketsViewsTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the specified +// resource. If the resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error.Note: This operation is designed to be +// used for building permission-aware UIs and command-line tools, not for +// authorization checking. This operation may "fail open" without warning. +// +// - resource: REQUIRED: The resource for which the policy detail is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the appropriate +// value for this field. +func (r *OrganizationsLocationsBucketsViewsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *OrganizationsLocationsBucketsViewsTestIamPermissionsCall { + c := &OrganizationsLocationsBucketsViewsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsViewsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) Context(ctx context.Context) *OrganizationsLocationsBucketsViewsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.buckets.views.testIamPermissions" call. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type OrganizationsLocationsBucketsViewsLogsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. +// +// - parent: The resource name to list logs for: projects/[PROJECT_ID] +// organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] +// folders/[FOLDER_ID]. +func (r *OrganizationsLocationsBucketsViewsLogsService) List(parent string) *OrganizationsLocationsBucketsViewsLogsListCall { + c := &OrganizationsLocationsBucketsViewsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number of +// results to return from this request. Non-positive values are ignored. The +// presence of nextPageToken in the response indicates that more results might +// be available. +func (c *OrganizationsLocationsBucketsViewsLogsListCall) PageSize(pageSize int64) *OrganizationsLocationsBucketsViewsLogsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then retrieve +// the next batch of results from the preceding call to this method. pageToken +// must be the value of nextPageToken from the previous response. The values of +// other method parameters should be identical to those in the previous call. +func (c *OrganizationsLocationsBucketsViewsLogsListCall) PageToken(pageToken string) *OrganizationsLocationsBucketsViewsLogsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ResourceNames sets the optional parameter "resourceNames": List of resource +// names to list logs for: +// projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW +// _ID] +// organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/v +// iews/[VIEW_ID] +// billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ +// ID]/views/[VIEW_ID] +// folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_I +// D]To support legacy queries, it could also be: projects/[PROJECT_ID] +// organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] +// folders/[FOLDER_ID]The resource name in the parent field is added to this +// list. +func (c *OrganizationsLocationsBucketsViewsLogsListCall) ResourceNames(resourceNames ...string) *OrganizationsLocationsBucketsViewsLogsListCall { + c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OrganizationsLocationsBucketsViewsLogsListCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsViewsLogsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *OrganizationsLocationsBucketsViewsLogsListCall) IfNoneMatch(entityTag string) *OrganizationsLocationsBucketsViewsLogsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OrganizationsLocationsBucketsViewsLogsListCall) Context(ctx context.Context) *OrganizationsLocationsBucketsViewsLogsListCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *OrganizationsLocationsBucketsViewsListCall) Header() http.Header { +func (c *OrganizationsLocationsBucketsViewsLogsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsLocationsBucketsViewsListCall) doRequest(alt string) (*http.Response, error) { +func (c *OrganizationsLocationsBucketsViewsLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -23193,7 +24910,7 @@ func (c *OrganizationsLocationsBucketsViewsListCall) doRequest(alt string) (*htt var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/views") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -23206,13 +24923,13 @@ func (c *OrganizationsLocationsBucketsViewsListCall) doRequest(alt string) (*htt return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.locations.buckets.views.list" call. +// Do executes the "logging.organizations.locations.buckets.views.logs.list" call. // Any non-2xx status code is an error. Response headers are in either -// *ListViewsResponse.ServerResponse.Header or (if a response was returned at +// *ListLogsResponse.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified was // returned. -func (c *OrganizationsLocationsBucketsViewsListCall) Do(opts ...googleapi.CallOption) (*ListViewsResponse, error) { +func (c *OrganizationsLocationsBucketsViewsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23231,7 +24948,7 @@ func (c *OrganizationsLocationsBucketsViewsListCall) Do(opts ...googleapi.CallOp if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListViewsResponse{ + ret := &ListLogsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23247,7 +24964,7 @@ func (c *OrganizationsLocationsBucketsViewsListCall) Do(opts ...googleapi.CallOp // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *OrganizationsLocationsBucketsViewsListCall) Pages(ctx context.Context, f func(*ListViewsResponse) error) error { +func (c *OrganizationsLocationsBucketsViewsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) for { @@ -23265,95 +24982,87 @@ func (c *OrganizationsLocationsBucketsViewsListCall) Pages(ctx context.Context, } } -type OrganizationsLocationsBucketsViewsPatchCall struct { +type OrganizationsLocationsLogScopesCreateCall struct { s *Service - name string - logview *LogView + parent string + logscope *LogScope urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Updates a view on a log bucket. This method replaces the value of the -// filter field from the existing view with the corresponding value from the -// new view. If an UNAVAILABLE error is returned, this indicates that system is -// not in a state where it can update the view. If this occurs, please try -// again in a few minutes. +// Create: Creates a log scope. // -// - name: The full resource name of the view to update -// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[V -// IEW_ID]" For -// example:"projects/my-project/locations/global/buckets/my-bucket/views/my-vi -// ew". -func (r *OrganizationsLocationsBucketsViewsService) Patch(name string, logview *LogView) *OrganizationsLocationsBucketsViewsPatchCall { - c := &OrganizationsLocationsBucketsViewsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.logview = logview +// - parent: The parent project in which to create the log scope +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]" For +// example:"projects/my-project/locations/global". +func (r *OrganizationsLocationsLogScopesService) Create(parent string, logscope *LogScope) *OrganizationsLocationsLogScopesCreateCall { + c := &OrganizationsLocationsLogScopesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.logscope = logscope return c } -// UpdateMask sets the optional parameter "updateMask": Field mask that -// specifies the fields in view that need an update. A field will be -// overwritten if, and only if, it is in the update mask. name and output only -// fields cannot be updated.For a detailed FieldMask definition, see -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor -// example: updateMask=filter -func (c *OrganizationsLocationsBucketsViewsPatchCall) UpdateMask(updateMask string) *OrganizationsLocationsBucketsViewsPatchCall { - c.urlParams_.Set("updateMask", updateMask) +// LogScopeId sets the optional parameter "logScopeId": Required. A +// client-assigned identifier such as "log-scope". Identifiers are limited to +// 100 characters and can include only letters, digits, underscores, hyphens, +// and periods. First character has to be alphanumeric. +func (c *OrganizationsLocationsLogScopesCreateCall) LogScopeId(logScopeId string) *OrganizationsLocationsLogScopesCreateCall { + c.urlParams_.Set("logScopeId", logScopeId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *OrganizationsLocationsBucketsViewsPatchCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsViewsPatchCall { +func (c *OrganizationsLocationsLogScopesCreateCall) Fields(s ...googleapi.Field) *OrganizationsLocationsLogScopesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *OrganizationsLocationsBucketsViewsPatchCall) Context(ctx context.Context) *OrganizationsLocationsBucketsViewsPatchCall { +func (c *OrganizationsLocationsLogScopesCreateCall) Context(ctx context.Context) *OrganizationsLocationsLogScopesCreateCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *OrganizationsLocationsBucketsViewsPatchCall) Header() http.Header { +func (c *OrganizationsLocationsLogScopesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsLocationsBucketsViewsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *OrganizationsLocationsLogScopesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logview) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logscope) if err != nil { return nil, err } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logScopes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.locations.buckets.views.patch" call. +// Do executes the "logging.organizations.locations.logScopes.create" call. // Any non-2xx status code is an error. Response headers are in either -// *LogView.ServerResponse.Header or (if a response was returned at all) in +// *LogScope.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *OrganizationsLocationsBucketsViewsPatchCall) Do(opts ...googleapi.CallOption) (*LogView, error) { +func (c *OrganizationsLocationsLogScopesCreateCall) Do(opts ...googleapi.CallOption) (*LogScope, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23372,7 +25081,7 @@ func (c *OrganizationsLocationsBucketsViewsPatchCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &LogView{ + ret := &LogScope{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23385,80 +25094,72 @@ func (c *OrganizationsLocationsBucketsViewsPatchCall) Do(opts ...googleapi.CallO return ret, nil } -type OrganizationsLocationsBucketsViewsSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type OrganizationsLocationsLogScopesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified resource. -// Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and -// PERMISSION_DENIED errors. +// Delete: Deletes a log scope. // -// - resource: REQUIRED: The resource for which the policy is being specified. -// See Resource names (https://cloud.google.com/apis/design/resource_names) -// for the appropriate value for this field. -func (r *OrganizationsLocationsBucketsViewsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *OrganizationsLocationsBucketsViewsSetIamPolicyCall { - c := &OrganizationsLocationsBucketsViewsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest +// - name: The resource name of the log scope to delete: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]" +// For example:"projects/my-project/locations/global/logScopes/my-log-scope". +func (r *OrganizationsLocationsLogScopesService) Delete(name string) *OrganizationsLocationsLogScopesDeleteCall { + c := &OrganizationsLocationsLogScopesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsViewsSetIamPolicyCall { +func (c *OrganizationsLocationsLogScopesDeleteCall) Fields(s ...googleapi.Field) *OrganizationsLocationsLogScopesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) Context(ctx context.Context) *OrganizationsLocationsBucketsViewsSetIamPolicyCall { +func (c *OrganizationsLocationsLogScopesDeleteCall) Context(ctx context.Context) *OrganizationsLocationsLogScopesDeleteCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) Header() http.Header { +func (c *OrganizationsLocationsLogScopesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +func (c *OrganizationsLocationsLogScopesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.locations.buckets.views.setIamPolicy" call. +// Do executes the "logging.organizations.locations.logScopes.delete" call. // Any non-2xx status code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) in +// *Empty.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *OrganizationsLocationsLogScopesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23477,7 +25178,7 @@ func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) Do(opts ...googleap if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23490,84 +25191,84 @@ func (c *OrganizationsLocationsBucketsViewsSetIamPolicyCall) Do(opts ...googleap return ret, nil } -type OrganizationsLocationsBucketsViewsTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type OrganizationsLocationsLogScopesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the specified -// resource. If the resource does not exist, this will return an empty set of -// permissions, not a NOT_FOUND error.Note: This operation is designed to be -// used for building permission-aware UIs and command-line tools, not for -// authorization checking. This operation may "fail open" without warning. +// Get: Gets a log scope. // -// - resource: REQUIRED: The resource for which the policy detail is being -// requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the appropriate -// value for this field. -func (r *OrganizationsLocationsBucketsViewsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *OrganizationsLocationsBucketsViewsTestIamPermissionsCall { - c := &OrganizationsLocationsBucketsViewsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest +// - name: The resource name of the log scope: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]" +// For example:"projects/my-project/locations/global/logScopes/my-log-scope". +func (r *OrganizationsLocationsLogScopesService) Get(name string) *OrganizationsLocationsLogScopesGetCall { + c := &OrganizationsLocationsLogScopesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsViewsTestIamPermissionsCall { +func (c *OrganizationsLocationsLogScopesGetCall) Fields(s ...googleapi.Field) *OrganizationsLocationsLogScopesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *OrganizationsLocationsLogScopesGetCall) IfNoneMatch(entityTag string) *OrganizationsLocationsLogScopesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) Context(ctx context.Context) *OrganizationsLocationsBucketsViewsTestIamPermissionsCall { +func (c *OrganizationsLocationsLogScopesGetCall) Context(ctx context.Context) *OrganizationsLocationsLogScopesGetCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) Header() http.Header { +func (c *OrganizationsLocationsLogScopesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err +func (c *OrganizationsLocationsLogScopesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.locations.buckets.views.testIamPermissions" call. +// Do executes the "logging.organizations.locations.logScopes.get" call. // Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { +// *LogScope.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *OrganizationsLocationsLogScopesGetCall) Do(opts ...googleapi.CallOption) (*LogScope, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23586,7 +25287,7 @@ func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) Do(opts ...go if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &TestIamPermissionsResponse{ + ret := &LogScope{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23599,7 +25300,7 @@ func (c *OrganizationsLocationsBucketsViewsTestIamPermissionsCall) Do(opts ...go return ret, nil } -type OrganizationsLocationsBucketsViewsLogsListCall struct { +type OrganizationsLocationsLogScopesListCall struct { s *Service parent string urlParams_ gensupport.URLParams @@ -23608,23 +25309,21 @@ type OrganizationsLocationsBucketsViewsLogsListCall struct { header_ http.Header } -// List: Lists the logs in projects, organizations, folders, or billing -// accounts. Only logs that have entries are listed. +// List: Lists log scopes. // -// - parent: The resource name to list logs for: projects/[PROJECT_ID] -// organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] -// folders/[FOLDER_ID]. -func (r *OrganizationsLocationsBucketsViewsLogsService) List(parent string) *OrganizationsLocationsBucketsViewsLogsListCall { - c := &OrganizationsLocationsBucketsViewsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - parent: The parent resource whose log scopes are to be listed: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]". +func (r *OrganizationsLocationsLogScopesService) List(parent string) *OrganizationsLocationsLogScopesListCall { + c := &OrganizationsLocationsLogScopesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // PageSize sets the optional parameter "pageSize": The maximum number of -// results to return from this request. Non-positive values are ignored. The +// results to return from this request.Non-positive values are ignored. The // presence of nextPageToken in the response indicates that more results might // be available. -func (c *OrganizationsLocationsBucketsViewsLogsListCall) PageSize(pageSize int64) *OrganizationsLocationsBucketsViewsLogsListCall { +func (c *OrganizationsLocationsLogScopesListCall) PageSize(pageSize int64) *OrganizationsLocationsLogScopesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } @@ -23633,33 +25332,15 @@ func (c *OrganizationsLocationsBucketsViewsLogsListCall) PageSize(pageSize int64 // the next batch of results from the preceding call to this method. pageToken // must be the value of nextPageToken from the previous response. The values of // other method parameters should be identical to those in the previous call. -func (c *OrganizationsLocationsBucketsViewsLogsListCall) PageToken(pageToken string) *OrganizationsLocationsBucketsViewsLogsListCall { +func (c *OrganizationsLocationsLogScopesListCall) PageToken(pageToken string) *OrganizationsLocationsLogScopesListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// ResourceNames sets the optional parameter "resourceNames": List of resource -// names to list logs for: -// projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW -// _ID] -// organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/v -// iews/[VIEW_ID] -// billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ -// ID]/views/[VIEW_ID] -// folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_I -// D]To support legacy queries, it could also be: projects/[PROJECT_ID] -// organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] -// folders/[FOLDER_ID]The resource name in the parent field is added to this -// list. -func (c *OrganizationsLocationsBucketsViewsLogsListCall) ResourceNames(resourceNames ...string) *OrganizationsLocationsBucketsViewsLogsListCall { - c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *OrganizationsLocationsBucketsViewsLogsListCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsViewsLogsListCall { +func (c *OrganizationsLocationsLogScopesListCall) Fields(s ...googleapi.Field) *OrganizationsLocationsLogScopesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -23667,27 +25348,27 @@ func (c *OrganizationsLocationsBucketsViewsLogsListCall) Fields(s ...googleapi.F // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *OrganizationsLocationsBucketsViewsLogsListCall) IfNoneMatch(entityTag string) *OrganizationsLocationsBucketsViewsLogsListCall { +func (c *OrganizationsLocationsLogScopesListCall) IfNoneMatch(entityTag string) *OrganizationsLocationsLogScopesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *OrganizationsLocationsBucketsViewsLogsListCall) Context(ctx context.Context) *OrganizationsLocationsBucketsViewsLogsListCall { +func (c *OrganizationsLocationsLogScopesListCall) Context(ctx context.Context) *OrganizationsLocationsLogScopesListCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *OrganizationsLocationsBucketsViewsLogsListCall) Header() http.Header { +func (c *OrganizationsLocationsLogScopesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsLocationsBucketsViewsLogsListCall) doRequest(alt string) (*http.Response, error) { +func (c *OrganizationsLocationsLogScopesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -23695,7 +25376,7 @@ func (c *OrganizationsLocationsBucketsViewsLogsListCall) doRequest(alt string) ( var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logScopes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -23708,13 +25389,13 @@ func (c *OrganizationsLocationsBucketsViewsLogsListCall) doRequest(alt string) ( return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.locations.buckets.views.logs.list" call. +// Do executes the "logging.organizations.locations.logScopes.list" call. // Any non-2xx status code is an error. Response headers are in either -// *ListLogsResponse.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// *ListLogScopesResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified was // returned. -func (c *OrganizationsLocationsBucketsViewsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { +func (c *OrganizationsLocationsLogScopesListCall) Do(opts ...googleapi.CallOption) (*ListLogScopesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23733,7 +25414,7 @@ func (c *OrganizationsLocationsBucketsViewsLogsListCall) Do(opts ...googleapi.Ca if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListLogsResponse{ + ret := &ListLogScopesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23749,7 +25430,7 @@ func (c *OrganizationsLocationsBucketsViewsLogsListCall) Do(opts ...googleapi.Ca // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *OrganizationsLocationsBucketsViewsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { +func (c *OrganizationsLocationsLogScopesListCall) Pages(ctx context.Context, f func(*ListLogScopesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) for { @@ -23767,6 +25448,119 @@ func (c *OrganizationsLocationsBucketsViewsLogsListCall) Pages(ctx context.Conte } } +type OrganizationsLocationsLogScopesPatchCall struct { + s *Service + name string + logscope *LogScope + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a log scope. +// +// - name: Output only. The resource name of the log scope.For +// example:projects/my-project/locations/global/logScopes/my-log-scope. +func (r *OrganizationsLocationsLogScopesService) Patch(name string, logscope *LogScope) *OrganizationsLocationsLogScopesPatchCall { + c := &OrganizationsLocationsLogScopesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.logscope = logscope + return c +} + +// UpdateMask sets the optional parameter "updateMask": Field mask that +// specifies the fields in log_scope that need an update. A field will be +// overwritten if, and only if, it is in the update mask. name and output only +// fields cannot be updated.For a detailed FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor +// example: updateMask=description +func (c *OrganizationsLocationsLogScopesPatchCall) UpdateMask(updateMask string) *OrganizationsLocationsLogScopesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OrganizationsLocationsLogScopesPatchCall) Fields(s ...googleapi.Field) *OrganizationsLocationsLogScopesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OrganizationsLocationsLogScopesPatchCall) Context(ctx context.Context) *OrganizationsLocationsLogScopesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OrganizationsLocationsLogScopesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsLogScopesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logscope) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.logScopes.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *LogScope.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *OrganizationsLocationsLogScopesPatchCall) Do(opts ...googleapi.CallOption) (*LogScope, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &LogScope{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type OrganizationsLocationsOperationsCancelCall struct { s *Service name string @@ -24161,6 +25955,15 @@ func (r *OrganizationsLocationsRecentQueriesService) List(parent string) *Organi return c } +// Filter sets the optional parameter "filter": Specifies the type ("Logging" +// or "OpsAnalytics") of the recent queries to list. The only valid value for +// this field is one of the two allowable type function calls, which are the +// following: type("Logging") type("OpsAnalytics") +func (c *OrganizationsLocationsRecentQueriesListCall) Filter(filter string) *OrganizationsLocationsRecentQueriesListCall { + c.urlParams_.Set("filter", filter) + return c +} + // PageSize sets the optional parameter "pageSize": The maximum number of // results to return from this request. Non-positive values are ignored. The // presence of nextPageToken in the response indicates that more results might @@ -24513,6 +26316,121 @@ func (c *OrganizationsLocationsSavedQueriesDeleteCall) Do(opts ...googleapi.Call return ret, nil } +type OrganizationsLocationsSavedQueriesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns all data associated with the requested query. +// +// - name: The resource name of the saved query. +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" +// "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUER +// Y_ID]" +// "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/ +// [QUERY_ID]" +// "folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" For +// example: +// "projects/my-project/locations/global/savedQueries/my-saved-query". +func (r *OrganizationsLocationsSavedQueriesService) Get(name string) *OrganizationsLocationsSavedQueriesGetCall { + c := &OrganizationsLocationsSavedQueriesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OrganizationsLocationsSavedQueriesGetCall) Fields(s ...googleapi.Field) *OrganizationsLocationsSavedQueriesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *OrganizationsLocationsSavedQueriesGetCall) IfNoneMatch(entityTag string) *OrganizationsLocationsSavedQueriesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OrganizationsLocationsSavedQueriesGetCall) Context(ctx context.Context) *OrganizationsLocationsSavedQueriesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OrganizationsLocationsSavedQueriesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsSavedQueriesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.savedQueries.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *SavedQuery.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *OrganizationsLocationsSavedQueriesGetCall) Do(opts ...googleapi.CallOption) (*SavedQuery, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SavedQuery{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type OrganizationsLocationsSavedQueriesListCall struct { s *Service parent string @@ -24540,6 +26458,21 @@ func (r *OrganizationsLocationsSavedQueriesService) List(parent string) *Organiz return c } +// Filter sets the optional parameter "filter": Specifies the type ("Logging" +// or "OpsAnalytics") and the visibility (PRIVATE or SHARED) of the saved +// queries to list. If provided, the filter must contain either the type +// function or a visibility token, or both. If both are chosen, they can be +// placed in any order, but they must be joined by the AND operator or the +// empty character.The two supported type function calls are: type("Logging") +// type("OpsAnalytics")The two supported visibility tokens are: visibility = +// PRIVATE visibility = SHAREDFor example:type("Logging") AND visibility = +// PRIVATE visibility=SHARED type("OpsAnalytics") type("OpsAnalytics)" +// visibility = PRIVATE visibility = SHARED +func (c *OrganizationsLocationsSavedQueriesListCall) Filter(filter string) *OrganizationsLocationsSavedQueriesListCall { + c.urlParams_.Set("filter", filter) + return c +} + // PageSize sets the optional parameter "pageSize": The maximum number of // results to return from this request.Non-positive values are ignored. The // presence of nextPageToken in the response indicates that more results might @@ -24669,6 +26602,125 @@ func (c *OrganizationsLocationsSavedQueriesListCall) Pages(ctx context.Context, } } +type OrganizationsLocationsSavedQueriesPatchCall struct { + s *Service + name string + savedquery *SavedQuery + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an existing SavedQuery. +// +// - name: Output only. Resource name of the saved query.In the format: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" +// For a list of supported locations, see Supported Regions +// (https://cloud.google.com/logging/docs/region-support#bucket-regions)After +// the saved query is created, the location cannot be changed.If the user +// doesn't provide a QUERY_ID, the system will generate an alphanumeric ID. +func (r *OrganizationsLocationsSavedQueriesService) Patch(name string, savedquery *SavedQuery) *OrganizationsLocationsSavedQueriesPatchCall { + c := &OrganizationsLocationsSavedQueriesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.savedquery = savedquery + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. A non-empty +// list of fields to change in the existing saved query. Fields are relative to +// the saved_query and new values for the fields are taken from the +// corresponding fields in the SavedQuery included in this request. Fields not +// mentioned in update_mask are not changed and are ignored in the request.To +// update all mutable fields, specify an update_mask of *.For example, to +// change the description and query filter text of a saved query, specify an +// update_mask of "description, query.filter". +func (c *OrganizationsLocationsSavedQueriesPatchCall) UpdateMask(updateMask string) *OrganizationsLocationsSavedQueriesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OrganizationsLocationsSavedQueriesPatchCall) Fields(s ...googleapi.Field) *OrganizationsLocationsSavedQueriesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OrganizationsLocationsSavedQueriesPatchCall) Context(ctx context.Context) *OrganizationsLocationsSavedQueriesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OrganizationsLocationsSavedQueriesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsSavedQueriesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.savedquery) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.savedQueries.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *SavedQuery.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *OrganizationsLocationsSavedQueriesPatchCall) Do(opts ...googleapi.CallOption) (*SavedQuery, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SavedQuery{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type OrganizationsLogsDeleteCall struct { s *Service logName string @@ -24967,12 +27019,12 @@ func (r *OrganizationsSinksService) Create(parent string, logsink *LogSink) *Org return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *OrganizationsSinksCreateCall) CustomWriterIdentity(customWriterIdentity string) *OrganizationsSinksCreateCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -25481,12 +27533,12 @@ func (r *OrganizationsSinksService) Patch(sinkNameid string, logsink *LogSink) * return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *OrganizationsSinksPatchCall) CustomWriterIdentity(customWriterIdentity string) *OrganizationsSinksPatchCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -25632,12 +27684,12 @@ func (r *OrganizationsSinksService) Update(sinkNameid string, logsink *LogSink) return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *OrganizationsSinksUpdateCall) CustomWriterIdentity(customWriterIdentity string) *OrganizationsSinksUpdateCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -28749,21 +30801,523 @@ func (c *ProjectsLocationsBucketsViewsListCall) IfNoneMatch(entityTag string) *P } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsBucketsViewsListCall) Context(ctx context.Context) *ProjectsLocationsBucketsViewsListCall { +func (c *ProjectsLocationsBucketsViewsListCall) Context(ctx context.Context) *ProjectsLocationsBucketsViewsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBucketsViewsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBucketsViewsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/views") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.projects.locations.buckets.views.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListViewsResponse.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsBucketsViewsListCall) Do(opts ...googleapi.CallOption) (*ListViewsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListViewsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBucketsViewsListCall) Pages(ctx context.Context, f func(*ListViewsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsLocationsBucketsViewsPatchCall struct { + s *Service + name string + logview *LogView + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a view on a log bucket. This method replaces the value of the +// filter field from the existing view with the corresponding value from the +// new view. If an UNAVAILABLE error is returned, this indicates that system is +// not in a state where it can update the view. If this occurs, please try +// again in a few minutes. +// +// - name: The full resource name of the view to update +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[V +// IEW_ID]" For +// example:"projects/my-project/locations/global/buckets/my-bucket/views/my-vi +// ew". +func (r *ProjectsLocationsBucketsViewsService) Patch(name string, logview *LogView) *ProjectsLocationsBucketsViewsPatchCall { + c := &ProjectsLocationsBucketsViewsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.logview = logview + return c +} + +// UpdateMask sets the optional parameter "updateMask": Field mask that +// specifies the fields in view that need an update. A field will be +// overwritten if, and only if, it is in the update mask. name and output only +// fields cannot be updated.For a detailed FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor +// example: updateMask=filter +func (c *ProjectsLocationsBucketsViewsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsBucketsViewsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBucketsViewsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsBucketsViewsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBucketsViewsPatchCall) Context(ctx context.Context) *ProjectsLocationsBucketsViewsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBucketsViewsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBucketsViewsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logview) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.projects.locations.buckets.views.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *LogView.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsBucketsViewsPatchCall) Do(opts ...googleapi.CallOption) (*LogView, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &LogView{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBucketsViewsSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified resource. +// Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and +// PERMISSION_DENIED errors. +// +// - resource: REQUIRED: The resource for which the policy is being specified. +// See Resource names (https://cloud.google.com/apis/design/resource_names) +// for the appropriate value for this field. +func (r *ProjectsLocationsBucketsViewsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsBucketsViewsSetIamPolicyCall { + c := &ProjectsLocationsBucketsViewsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsBucketsViewsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsBucketsViewsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.projects.locations.buckets.views.setIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBucketsViewsTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the specified +// resource. If the resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error.Note: This operation is designed to be +// used for building permission-aware UIs and command-line tools, not for +// authorization checking. This operation may "fail open" without warning. +// +// - resource: REQUIRED: The resource for which the policy detail is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the appropriate +// value for this field. +func (r *ProjectsLocationsBucketsViewsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsBucketsViewsTestIamPermissionsCall { + c := &ProjectsLocationsBucketsViewsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsBucketsViewsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsBucketsViewsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.projects.locations.buckets.views.testIamPermissions" call. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBucketsViewsLogsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. +// +// - parent: The resource name to list logs for: projects/[PROJECT_ID] +// organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] +// folders/[FOLDER_ID]. +func (r *ProjectsLocationsBucketsViewsLogsService) List(parent string) *ProjectsLocationsBucketsViewsLogsListCall { + c := &ProjectsLocationsBucketsViewsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number of +// results to return from this request. Non-positive values are ignored. The +// presence of nextPageToken in the response indicates that more results might +// be available. +func (c *ProjectsLocationsBucketsViewsLogsListCall) PageSize(pageSize int64) *ProjectsLocationsBucketsViewsLogsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then retrieve +// the next batch of results from the preceding call to this method. pageToken +// must be the value of nextPageToken from the previous response. The values of +// other method parameters should be identical to those in the previous call. +func (c *ProjectsLocationsBucketsViewsLogsListCall) PageToken(pageToken string) *ProjectsLocationsBucketsViewsLogsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ResourceNames sets the optional parameter "resourceNames": List of resource +// names to list logs for: +// projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW +// _ID] +// organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/v +// iews/[VIEW_ID] +// billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ +// ID]/views/[VIEW_ID] +// folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_I +// D]To support legacy queries, it could also be: projects/[PROJECT_ID] +// organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] +// folders/[FOLDER_ID]The resource name in the parent field is added to this +// list. +func (c *ProjectsLocationsBucketsViewsLogsListCall) ResourceNames(resourceNames ...string) *ProjectsLocationsBucketsViewsLogsListCall { + c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBucketsViewsLogsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsBucketsViewsLogsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsBucketsViewsLogsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsBucketsViewsLogsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBucketsViewsLogsListCall) Context(ctx context.Context) *ProjectsLocationsBucketsViewsLogsListCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsBucketsViewsListCall) Header() http.Header { +func (c *ProjectsLocationsBucketsViewsLogsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsBucketsViewsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsBucketsViewsLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -28771,7 +31325,7 @@ func (c *ProjectsLocationsBucketsViewsListCall) doRequest(alt string) (*http.Res var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/views") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -28784,13 +31338,13 @@ func (c *ProjectsLocationsBucketsViewsListCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.projects.locations.buckets.views.list" call. +// Do executes the "logging.projects.locations.buckets.views.logs.list" call. // Any non-2xx status code is an error. Response headers are in either -// *ListViewsResponse.ServerResponse.Header or (if a response was returned at +// *ListLogsResponse.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified was // returned. -func (c *ProjectsLocationsBucketsViewsListCall) Do(opts ...googleapi.CallOption) (*ListViewsResponse, error) { +func (c *ProjectsLocationsBucketsViewsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28809,7 +31363,7 @@ func (c *ProjectsLocationsBucketsViewsListCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListViewsResponse{ + ret := &ListLogsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28825,7 +31379,7 @@ func (c *ProjectsLocationsBucketsViewsListCall) Do(opts ...googleapi.CallOption) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsBucketsViewsListCall) Pages(ctx context.Context, f func(*ListViewsResponse) error) error { +func (c *ProjectsLocationsBucketsViewsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) for { @@ -28843,95 +31397,87 @@ func (c *ProjectsLocationsBucketsViewsListCall) Pages(ctx context.Context, f fun } } -type ProjectsLocationsBucketsViewsPatchCall struct { +type ProjectsLocationsLogScopesCreateCall struct { s *Service - name string - logview *LogView + parent string + logscope *LogScope urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Updates a view on a log bucket. This method replaces the value of the -// filter field from the existing view with the corresponding value from the -// new view. If an UNAVAILABLE error is returned, this indicates that system is -// not in a state where it can update the view. If this occurs, please try -// again in a few minutes. +// Create: Creates a log scope. // -// - name: The full resource name of the view to update -// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[V -// IEW_ID]" For -// example:"projects/my-project/locations/global/buckets/my-bucket/views/my-vi -// ew". -func (r *ProjectsLocationsBucketsViewsService) Patch(name string, logview *LogView) *ProjectsLocationsBucketsViewsPatchCall { - c := &ProjectsLocationsBucketsViewsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.logview = logview +// - parent: The parent project in which to create the log scope +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]" For +// example:"projects/my-project/locations/global". +func (r *ProjectsLocationsLogScopesService) Create(parent string, logscope *LogScope) *ProjectsLocationsLogScopesCreateCall { + c := &ProjectsLocationsLogScopesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.logscope = logscope return c } -// UpdateMask sets the optional parameter "updateMask": Field mask that -// specifies the fields in view that need an update. A field will be -// overwritten if, and only if, it is in the update mask. name and output only -// fields cannot be updated.For a detailed FieldMask definition, see -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor -// example: updateMask=filter -func (c *ProjectsLocationsBucketsViewsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsBucketsViewsPatchCall { - c.urlParams_.Set("updateMask", updateMask) +// LogScopeId sets the optional parameter "logScopeId": Required. A +// client-assigned identifier such as "log-scope". Identifiers are limited to +// 100 characters and can include only letters, digits, underscores, hyphens, +// and periods. First character has to be alphanumeric. +func (c *ProjectsLocationsLogScopesCreateCall) LogScopeId(logScopeId string) *ProjectsLocationsLogScopesCreateCall { + c.urlParams_.Set("logScopeId", logScopeId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsBucketsViewsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsBucketsViewsPatchCall { +func (c *ProjectsLocationsLogScopesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsLogScopesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsBucketsViewsPatchCall) Context(ctx context.Context) *ProjectsLocationsBucketsViewsPatchCall { +func (c *ProjectsLocationsLogScopesCreateCall) Context(ctx context.Context) *ProjectsLocationsLogScopesCreateCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsBucketsViewsPatchCall) Header() http.Header { +func (c *ProjectsLocationsLogScopesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsBucketsViewsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsLogScopesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logview) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logscope) if err != nil { return nil, err } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logScopes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.projects.locations.buckets.views.patch" call. +// Do executes the "logging.projects.locations.logScopes.create" call. // Any non-2xx status code is an error. Response headers are in either -// *LogView.ServerResponse.Header or (if a response was returned at all) in +// *LogScope.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsBucketsViewsPatchCall) Do(opts ...googleapi.CallOption) (*LogView, error) { +func (c *ProjectsLocationsLogScopesCreateCall) Do(opts ...googleapi.CallOption) (*LogScope, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28950,7 +31496,7 @@ func (c *ProjectsLocationsBucketsViewsPatchCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &LogView{ + ret := &LogScope{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28963,80 +31509,72 @@ func (c *ProjectsLocationsBucketsViewsPatchCall) Do(opts ...googleapi.CallOption return ret, nil } -type ProjectsLocationsBucketsViewsSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsLogScopesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified resource. -// Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and -// PERMISSION_DENIED errors. +// Delete: Deletes a log scope. // -// - resource: REQUIRED: The resource for which the policy is being specified. -// See Resource names (https://cloud.google.com/apis/design/resource_names) -// for the appropriate value for this field. -func (r *ProjectsLocationsBucketsViewsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsBucketsViewsSetIamPolicyCall { - c := &ProjectsLocationsBucketsViewsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest +// - name: The resource name of the log scope to delete: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]" +// For example:"projects/my-project/locations/global/logScopes/my-log-scope". +func (r *ProjectsLocationsLogScopesService) Delete(name string) *ProjectsLocationsLogScopesDeleteCall { + c := &ProjectsLocationsLogScopesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsBucketsViewsSetIamPolicyCall { +func (c *ProjectsLocationsLogScopesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsLogScopesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsBucketsViewsSetIamPolicyCall { +func (c *ProjectsLocationsLogScopesDeleteCall) Context(ctx context.Context) *ProjectsLocationsLogScopesDeleteCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) Header() http.Header { +func (c *ProjectsLocationsLogScopesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +func (c *ProjectsLocationsLogScopesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.projects.locations.buckets.views.setIamPolicy" call. +// Do executes the "logging.projects.locations.logScopes.delete" call. // Any non-2xx status code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) in +// *Empty.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *ProjectsLocationsLogScopesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29055,7 +31593,7 @@ func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29068,84 +31606,84 @@ func (c *ProjectsLocationsBucketsViewsSetIamPolicyCall) Do(opts ...googleapi.Cal return ret, nil } -type ProjectsLocationsBucketsViewsTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsLogScopesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the specified -// resource. If the resource does not exist, this will return an empty set of -// permissions, not a NOT_FOUND error.Note: This operation is designed to be -// used for building permission-aware UIs and command-line tools, not for -// authorization checking. This operation may "fail open" without warning. +// Get: Gets a log scope. // -// - resource: REQUIRED: The resource for which the policy detail is being -// requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the appropriate -// value for this field. -func (r *ProjectsLocationsBucketsViewsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsBucketsViewsTestIamPermissionsCall { - c := &ProjectsLocationsBucketsViewsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest +// - name: The resource name of the log scope: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/logScopes/[LOG_SCOPE_ID]" +// For example:"projects/my-project/locations/global/logScopes/my-log-scope". +func (r *ProjectsLocationsLogScopesService) Get(name string) *ProjectsLocationsLogScopesGetCall { + c := &ProjectsLocationsLogScopesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsBucketsViewsTestIamPermissionsCall { +func (c *ProjectsLocationsLogScopesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsLogScopesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsLogScopesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsLogScopesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsBucketsViewsTestIamPermissionsCall { +func (c *ProjectsLocationsLogScopesGetCall) Context(ctx context.Context) *ProjectsLocationsLogScopesGetCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) Header() http.Header { +func (c *ProjectsLocationsLogScopesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err +func (c *ProjectsLocationsLogScopesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.projects.locations.buckets.views.testIamPermissions" call. +// Do executes the "logging.projects.locations.logScopes.get" call. // Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { +// *LogScope.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsLogScopesGetCall) Do(opts ...googleapi.CallOption) (*LogScope, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29164,7 +31702,7 @@ func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) Do(opts ...googlea if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &TestIamPermissionsResponse{ + ret := &LogScope{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29177,7 +31715,7 @@ func (c *ProjectsLocationsBucketsViewsTestIamPermissionsCall) Do(opts ...googlea return ret, nil } -type ProjectsLocationsBucketsViewsLogsListCall struct { +type ProjectsLocationsLogScopesListCall struct { s *Service parent string urlParams_ gensupport.URLParams @@ -29186,23 +31724,21 @@ type ProjectsLocationsBucketsViewsLogsListCall struct { header_ http.Header } -// List: Lists the logs in projects, organizations, folders, or billing -// accounts. Only logs that have entries are listed. +// List: Lists log scopes. // -// - parent: The resource name to list logs for: projects/[PROJECT_ID] -// organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] -// folders/[FOLDER_ID]. -func (r *ProjectsLocationsBucketsViewsLogsService) List(parent string) *ProjectsLocationsBucketsViewsLogsListCall { - c := &ProjectsLocationsBucketsViewsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - parent: The parent resource whose log scopes are to be listed: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]". +func (r *ProjectsLocationsLogScopesService) List(parent string) *ProjectsLocationsLogScopesListCall { + c := &ProjectsLocationsLogScopesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // PageSize sets the optional parameter "pageSize": The maximum number of -// results to return from this request. Non-positive values are ignored. The +// results to return from this request.Non-positive values are ignored. The // presence of nextPageToken in the response indicates that more results might // be available. -func (c *ProjectsLocationsBucketsViewsLogsListCall) PageSize(pageSize int64) *ProjectsLocationsBucketsViewsLogsListCall { +func (c *ProjectsLocationsLogScopesListCall) PageSize(pageSize int64) *ProjectsLocationsLogScopesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } @@ -29211,33 +31747,15 @@ func (c *ProjectsLocationsBucketsViewsLogsListCall) PageSize(pageSize int64) *Pr // the next batch of results from the preceding call to this method. pageToken // must be the value of nextPageToken from the previous response. The values of // other method parameters should be identical to those in the previous call. -func (c *ProjectsLocationsBucketsViewsLogsListCall) PageToken(pageToken string) *ProjectsLocationsBucketsViewsLogsListCall { +func (c *ProjectsLocationsLogScopesListCall) PageToken(pageToken string) *ProjectsLocationsLogScopesListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// ResourceNames sets the optional parameter "resourceNames": List of resource -// names to list logs for: -// projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW -// _ID] -// organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/v -// iews/[VIEW_ID] -// billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ -// ID]/views/[VIEW_ID] -// folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_I -// D]To support legacy queries, it could also be: projects/[PROJECT_ID] -// organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] -// folders/[FOLDER_ID]The resource name in the parent field is added to this -// list. -func (c *ProjectsLocationsBucketsViewsLogsListCall) ResourceNames(resourceNames ...string) *ProjectsLocationsBucketsViewsLogsListCall { - c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. -func (c *ProjectsLocationsBucketsViewsLogsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsBucketsViewsLogsListCall { +func (c *ProjectsLocationsLogScopesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsLogScopesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29245,27 +31763,27 @@ func (c *ProjectsLocationsBucketsViewsLogsListCall) Fields(s ...googleapi.Field) // IfNoneMatch sets an optional parameter which makes the operation fail if the // object's ETag matches the given value. This is useful for getting updates // only after the object has changed since the last request. -func (c *ProjectsLocationsBucketsViewsLogsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsBucketsViewsLogsListCall { +func (c *ProjectsLocationsLogScopesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsLogScopesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. -func (c *ProjectsLocationsBucketsViewsLogsListCall) Context(ctx context.Context) *ProjectsLocationsBucketsViewsLogsListCall { +func (c *ProjectsLocationsLogScopesListCall) Context(ctx context.Context) *ProjectsLocationsLogScopesListCall { c.ctx_ = ctx return c } // Header returns a http.Header that can be modified by the caller to add // headers to the request. -func (c *ProjectsLocationsBucketsViewsLogsListCall) Header() http.Header { +func (c *ProjectsLocationsLogScopesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsBucketsViewsLogsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsLogScopesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) @@ -29273,7 +31791,7 @@ func (c *ProjectsLocationsBucketsViewsLogsListCall) doRequest(alt string) (*http var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logScopes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -29286,13 +31804,13 @@ func (c *ProjectsLocationsBucketsViewsLogsListCall) doRequest(alt string) (*http return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.projects.locations.buckets.views.logs.list" call. +// Do executes the "logging.projects.locations.logScopes.list" call. // Any non-2xx status code is an error. Response headers are in either -// *ListLogsResponse.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// *ListLogScopesResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified was // returned. -func (c *ProjectsLocationsBucketsViewsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { +func (c *ProjectsLocationsLogScopesListCall) Do(opts ...googleapi.CallOption) (*ListLogScopesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29311,7 +31829,7 @@ func (c *ProjectsLocationsBucketsViewsLogsListCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListLogsResponse{ + ret := &ListLogScopesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29327,7 +31845,7 @@ func (c *ProjectsLocationsBucketsViewsLogsListCall) Do(opts ...googleapi.CallOpt // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsBucketsViewsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { +func (c *ProjectsLocationsLogScopesListCall) Pages(ctx context.Context, f func(*ListLogScopesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) for { @@ -29345,6 +31863,119 @@ func (c *ProjectsLocationsBucketsViewsLogsListCall) Pages(ctx context.Context, f } } +type ProjectsLocationsLogScopesPatchCall struct { + s *Service + name string + logscope *LogScope + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a log scope. +// +// - name: Output only. The resource name of the log scope.For +// example:projects/my-project/locations/global/logScopes/my-log-scope. +func (r *ProjectsLocationsLogScopesService) Patch(name string, logscope *LogScope) *ProjectsLocationsLogScopesPatchCall { + c := &ProjectsLocationsLogScopesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.logscope = logscope + return c +} + +// UpdateMask sets the optional parameter "updateMask": Field mask that +// specifies the fields in log_scope that need an update. A field will be +// overwritten if, and only if, it is in the update mask. name and output only +// fields cannot be updated.For a detailed FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor +// example: updateMask=description +func (c *ProjectsLocationsLogScopesPatchCall) UpdateMask(updateMask string) *ProjectsLocationsLogScopesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsLogScopesPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsLogScopesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsLogScopesPatchCall) Context(ctx context.Context) *ProjectsLocationsLogScopesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsLogScopesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsLogScopesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logscope) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.projects.locations.logScopes.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *LogScope.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsLogScopesPatchCall) Do(opts ...googleapi.CallOption) (*LogScope, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &LogScope{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type ProjectsLocationsOperationsCancelCall struct { s *Service name string @@ -29739,6 +32370,15 @@ func (r *ProjectsLocationsRecentQueriesService) List(parent string) *ProjectsLoc return c } +// Filter sets the optional parameter "filter": Specifies the type ("Logging" +// or "OpsAnalytics") of the recent queries to list. The only valid value for +// this field is one of the two allowable type function calls, which are the +// following: type("Logging") type("OpsAnalytics") +func (c *ProjectsLocationsRecentQueriesListCall) Filter(filter string) *ProjectsLocationsRecentQueriesListCall { + c.urlParams_.Set("filter", filter) + return c +} + // PageSize sets the optional parameter "pageSize": The maximum number of // results to return from this request. Non-positive values are ignored. The // presence of nextPageToken in the response indicates that more results might @@ -30091,6 +32731,121 @@ func (c *ProjectsLocationsSavedQueriesDeleteCall) Do(opts ...googleapi.CallOptio return ret, nil } +type ProjectsLocationsSavedQueriesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns all data associated with the requested query. +// +// - name: The resource name of the saved query. +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" +// "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUER +// Y_ID]" +// "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/ +// [QUERY_ID]" +// "folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" For +// example: +// "projects/my-project/locations/global/savedQueries/my-saved-query". +func (r *ProjectsLocationsSavedQueriesService) Get(name string) *ProjectsLocationsSavedQueriesGetCall { + c := &ProjectsLocationsSavedQueriesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsSavedQueriesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsSavedQueriesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsSavedQueriesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsSavedQueriesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsSavedQueriesGetCall) Context(ctx context.Context) *ProjectsLocationsSavedQueriesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsSavedQueriesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsSavedQueriesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.projects.locations.savedQueries.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *SavedQuery.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsSavedQueriesGetCall) Do(opts ...googleapi.CallOption) (*SavedQuery, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SavedQuery{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type ProjectsLocationsSavedQueriesListCall struct { s *Service parent string @@ -30118,6 +32873,21 @@ func (r *ProjectsLocationsSavedQueriesService) List(parent string) *ProjectsLoca return c } +// Filter sets the optional parameter "filter": Specifies the type ("Logging" +// or "OpsAnalytics") and the visibility (PRIVATE or SHARED) of the saved +// queries to list. If provided, the filter must contain either the type +// function or a visibility token, or both. If both are chosen, they can be +// placed in any order, but they must be joined by the AND operator or the +// empty character.The two supported type function calls are: type("Logging") +// type("OpsAnalytics")The two supported visibility tokens are: visibility = +// PRIVATE visibility = SHAREDFor example:type("Logging") AND visibility = +// PRIVATE visibility=SHARED type("OpsAnalytics") type("OpsAnalytics)" +// visibility = PRIVATE visibility = SHARED +func (c *ProjectsLocationsSavedQueriesListCall) Filter(filter string) *ProjectsLocationsSavedQueriesListCall { + c.urlParams_.Set("filter", filter) + return c +} + // PageSize sets the optional parameter "pageSize": The maximum number of // results to return from this request.Non-positive values are ignored. The // presence of nextPageToken in the response indicates that more results might @@ -30247,6 +33017,125 @@ func (c *ProjectsLocationsSavedQueriesListCall) Pages(ctx context.Context, f fun } } +type ProjectsLocationsSavedQueriesPatchCall struct { + s *Service + name string + savedquery *SavedQuery + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an existing SavedQuery. +// +// - name: Output only. Resource name of the saved query.In the format: +// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]" +// For a list of supported locations, see Supported Regions +// (https://cloud.google.com/logging/docs/region-support#bucket-regions)After +// the saved query is created, the location cannot be changed.If the user +// doesn't provide a QUERY_ID, the system will generate an alphanumeric ID. +func (r *ProjectsLocationsSavedQueriesService) Patch(name string, savedquery *SavedQuery) *ProjectsLocationsSavedQueriesPatchCall { + c := &ProjectsLocationsSavedQueriesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.savedquery = savedquery + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. A non-empty +// list of fields to change in the existing saved query. Fields are relative to +// the saved_query and new values for the fields are taken from the +// corresponding fields in the SavedQuery included in this request. Fields not +// mentioned in update_mask are not changed and are ignored in the request.To +// update all mutable fields, specify an update_mask of *.For example, to +// change the description and query filter text of a saved query, specify an +// update_mask of "description, query.filter". +func (c *ProjectsLocationsSavedQueriesPatchCall) UpdateMask(updateMask string) *ProjectsLocationsSavedQueriesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsSavedQueriesPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsSavedQueriesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsSavedQueriesPatchCall) Context(ctx context.Context) *ProjectsLocationsSavedQueriesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsSavedQueriesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsSavedQueriesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.savedquery) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.projects.locations.savedQueries.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *SavedQuery.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsLocationsSavedQueriesPatchCall) Do(opts ...googleapi.CallOption) (*SavedQuery, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SavedQuery{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type ProjectsLogsDeleteCall struct { s *Service logName string @@ -31104,12 +33993,12 @@ func (r *ProjectsSinksService) Create(parent string, logsink *LogSink) *Projects return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *ProjectsSinksCreateCall) CustomWriterIdentity(customWriterIdentity string) *ProjectsSinksCreateCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -31618,12 +34507,12 @@ func (r *ProjectsSinksService) Patch(sinkNameid string, logsink *LogSink) *Proje return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *ProjectsSinksPatchCall) CustomWriterIdentity(customWriterIdentity string) *ProjectsSinksPatchCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -31769,12 +34658,12 @@ func (r *ProjectsSinksService) Update(sinkNameid string, logsink *LogSink) *Proj return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *ProjectsSinksUpdateCall) CustomWriterIdentity(customWriterIdentity string) *ProjectsSinksUpdateCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -31917,12 +34806,12 @@ func (r *SinksService) Create(parent string, logsink *LogSink) *SinksCreateCall return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *SinksCreateCall) CustomWriterIdentity(customWriterIdentity string) *SinksCreateCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -32431,12 +35320,12 @@ func (r *SinksService) Update(sinkNameid string, logsink *LogSink) *SinksUpdateC return c } -// CustomWriterIdentity sets the optional parameter "customWriterIdentity": A +// CustomWriterIdentity sets the optional parameter "customWriterIdentity": The // service account provided by the caller that will be used to write the log // entries. The format must be serviceAccount:some@email. This field can only -// be specified if you are routing logs to a destination outside this sink's -// project. If not specified, a Logging service account will automatically be -// generated. +// be specified when you are routing logs to a log bucket that is in a +// different project than the sink. When not specified, a Logging service +// account will automatically be generated. func (c *SinksUpdateCall) CustomWriterIdentity(customWriterIdentity string) *SinksUpdateCall { c.urlParams_.Set("customWriterIdentity", customWriterIdentity) return c @@ -32942,8 +35831,8 @@ type V2UpdateSettingsCall struct { // information. // // - name: The resource name for the settings to update. -// "organizations/[ORGANIZATION_ID]/settings" For -// example:"organizations/12345/settings". +// "organizations/[ORGANIZATION_ID]/settings" "folders/[FOLDER_ID]/settings" +// For example:"organizations/12345/settings". func (r *V2Service) UpdateSettings(name string, settings *Settings) *V2UpdateSettingsCall { c := &V2UpdateSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name diff --git a/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json b/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json index ed38964e05c..b638bd10d63 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json @@ -29,10 +29,65 @@ "endpointUrl": "https://pubsub.europe-west3.rep.googleapis.com/", "location": "europe-west3" }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://pubsub.europe-west8.rep.googleapis.com/", + "location": "europe-west8" + }, { "description": "Regional Endpoint", "endpointUrl": "https://pubsub.europe-west9.rep.googleapis.com/", "location": "europe-west9" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://pubsub.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://pubsub.us-central2.rep.googleapis.com/", + "location": "us-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://pubsub.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://pubsub.us-east4.rep.googleapis.com/", + "location": "us-east4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://pubsub.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://pubsub.us-south1.rep.googleapis.com/", + "location": "us-south1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://pubsub.us-west1.rep.googleapis.com/", + "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://pubsub.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://pubsub.us-west3.rep.googleapis.com/", + "location": "us-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://pubsub.us-west4.rep.googleapis.com/", + "location": "us-west4" } ], "icons": { @@ -1583,7 +1638,7 @@ } } }, - "revision": "20240607", + "revision": "20240918", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { @@ -1600,6 +1655,21 @@ }, "type": "object" }, + "AnalyticsHubSubscriptionInfo": { + "description": "Information about an associated Analytics Hub subscription (https://cloud.google.com/bigquery/docs/analytics-hub-manage-subscriptions).", + "id": "AnalyticsHubSubscriptionInfo", + "properties": { + "listing": { + "description": "Optional. The name of the associated Analytics Hub listing resource. Pattern: \"projects/{project}/locations/{location}/dataExchanges/{data_exchange}/listings/{listing}\"", + "type": "string" + }, + "subscription": { + "description": "Optional. The name of the associated Analytics Hub subscription resource. Pattern: \"projects/{project}/locations/{location}/subscriptions/{subscription}\"", + "type": "string" + } + }, + "type": "object" + }, "AvroConfig": { "description": "Configuration for writing message data in Avro format. Message payloads and metadata will be written to files as an Avro binary.", "id": "AvroConfig", @@ -1615,6 +1685,12 @@ }, "type": "object" }, + "AvroFormat": { + "description": "Configuration for reading Cloud Storage data in Avro binary format. The bytes of each object will be set to the `data` field of a Pub/Sub message.", + "id": "AvroFormat", + "properties": {}, + "type": "object" + }, "AwsKinesis": { "description": "Ingestion settings for Amazon Kinesis Data Streams.", "id": "AwsKinesis", @@ -1733,6 +1809,59 @@ }, "type": "object" }, + "CloudStorage": { + "description": "Ingestion settings for Cloud Storage.", + "id": "CloudStorage", + "properties": { + "avroFormat": { + "$ref": "AvroFormat", + "description": "Optional. Data from Cloud Storage will be interpreted in Avro format." + }, + "bucket": { + "description": "Optional. Cloud Storage bucket. The bucket name must be without any prefix like \"gs://\". See the [bucket naming requirements] (https://cloud.google.com/storage/docs/buckets#naming).", + "type": "string" + }, + "matchGlob": { + "description": "Optional. Glob pattern used to match objects that will be ingested. If unset, all objects will be ingested. See the [supported patterns](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob).", + "type": "string" + }, + "minimumObjectCreateTime": { + "description": "Optional. Only objects with a larger or equal creation timestamp will be ingested.", + "format": "google-datetime", + "type": "string" + }, + "pubsubAvroFormat": { + "$ref": "PubSubAvroFormat", + "description": "Optional. It will be assumed data from Cloud Storage was written via [Cloud Storage subscriptions](https://cloud.google.com/pubsub/docs/cloudstorage)." + }, + "state": { + "description": "Output only. An output-only field that indicates the state of the Cloud Storage ingestion source.", + "enum": [ + "STATE_UNSPECIFIED", + "ACTIVE", + "CLOUD_STORAGE_PERMISSION_DENIED", + "PUBLISH_PERMISSION_DENIED", + "BUCKET_NOT_FOUND", + "TOO_MANY_OBJECTS" + ], + "enumDescriptions": [ + "Default value. This value is unused.", + "Ingestion is active.", + "Permission denied encountered while calling the Cloud Storage API. This can happen if the Pub/Sub SA has not been granted the [appropriate permissions](https://cloud.google.com/storage/docs/access-control/iam-permissions): - storage.objects.list: to list the objects in a bucket. - storage.objects.get: to read the objects in a bucket. - storage.buckets.get: to verify the bucket exists.", + "Permission denied encountered while publishing to the topic. This can happen if the Pub/Sub SA has not been granted the [appropriate publish permissions](https://cloud.google.com/pubsub/docs/access-control#pubsub.publisher)", + "The provided Cloud Storage bucket doesn't exist.", + "The Cloud Storage bucket has too many objects, ingestion will be paused." + ], + "readOnly": true, + "type": "string" + }, + "textFormat": { + "$ref": "TextFormat", + "description": "Optional. Data from Cloud Storage will be interpreted as text." + } + }, + "type": "object" + }, "CloudStorageConfig": { "description": "Configuration for a Cloud Storage subscription.", "id": "CloudStorageConfig", @@ -1767,6 +1896,11 @@ "format": "google-duration", "type": "string" }, + "maxMessages": { + "description": "Optional. The maximum number of messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages.", + "format": "int64", + "type": "string" + }, "serviceAccountEmail": { "description": "Optional. The service account to use to write to Cloud Storage. The subscription creator or updater that specifies this field must have `iam.serviceAccounts.actAs` permission on the service account. If not specified, the Pub/Sub [service agent](https://cloud.google.com/iam/docs/service-agents), service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used.", "type": "string" @@ -1898,6 +2032,14 @@ "awsKinesis": { "$ref": "AwsKinesis", "description": "Optional. Amazon Kinesis Data Streams." + }, + "cloudStorage": { + "$ref": "CloudStorage", + "description": "Optional. Cloud Storage." + }, + "platformLogsSettings": { + "$ref": "PlatformLogsSettings", + "description": "Optional. Platform Logs settings. If unset, no Platform Logs will be generated." } }, "type": "object" @@ -2102,6 +2244,33 @@ }, "type": "object" }, + "PlatformLogsSettings": { + "description": "Settings for Platform Logs produced by Pub/Sub.", + "id": "PlatformLogsSettings", + "properties": { + "severity": { + "description": "Optional. The minimum severity level of Platform Logs that will be written.", + "enum": [ + "SEVERITY_UNSPECIFIED", + "DISABLED", + "DEBUG", + "INFO", + "WARNING", + "ERROR" + ], + "enumDescriptions": [ + "Default value. Logs level is unspecified. Logs will be disabled.", + "Logs will be disabled.", + "Debug logs and higher-severity logs will be written.", + "Info logs and higher-severity logs will be written.", + "Warning logs and higher-severity logs will be written.", + "Only error logs will be written." + ], + "type": "string" + } + }, + "type": "object" + }, "Policy": { "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", @@ -2126,6 +2295,12 @@ }, "type": "object" }, + "PubSubAvroFormat": { + "description": "Configuration for reading Cloud Storage data written via [Cloud Storage subscriptions](https://cloud.google.com/pubsub/docs/cloudstorage). The data and attributes fields of the originally exported Pub/Sub message will be restored when publishing.", + "id": "PubSubAvroFormat", + "properties": {}, + "type": "object" + }, "PublishRequest": { "description": "Request for the Publish method.", "id": "PublishRequest", @@ -2443,6 +2618,11 @@ "format": "int32", "type": "integer" }, + "analyticsHubSubscriptionInfo": { + "$ref": "AnalyticsHubSubscriptionInfo", + "description": "Output only. Information about the associated Analytics Hub subscription. Only set if the subscritpion is created by Analytics Hub.", + "readOnly": true + }, "bigqueryConfig": { "$ref": "BigQueryConfig", "description": "Optional. If delivery to BigQuery is used with this subscription, this field is used to configure it." @@ -2483,7 +2663,7 @@ "type": "object" }, "messageRetentionDuration": { - "description": "Optional. How long to retain unacknowledged messages in the subscription's backlog, from the moment a message is published. If `retain_acked_messages` is true, then this also configures the retention of acknowledged messages, and thus configures how far back in time a `Seek` can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10 minutes.", + "description": "Optional. How long to retain unacknowledged messages in the subscription's backlog, from the moment a message is published. If `retain_acked_messages` is true, then this also configures the retention of acknowledged messages, and thus configures how far back in time a `Seek` can be done. Defaults to 7 days. Cannot be more than 31 days or less than 10 minutes.", "format": "google-duration", "type": "string" }, @@ -2565,6 +2745,17 @@ "properties": {}, "type": "object" }, + "TextFormat": { + "description": "Configuration for reading Cloud Storage data in text format. Each line of text as specified by the delimiter will be set to the `data` field of a Pub/Sub message.", + "id": "TextFormat", + "properties": { + "delimiter": { + "description": "Optional. When unset, '\\n' is used.", + "type": "string" + } + }, + "type": "object" + }, "Topic": { "description": "A topic resource.", "id": "Topic", diff --git a/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go b/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go index 5a27833f3b3..bedf9465b3c 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go @@ -265,9 +265,40 @@ type AcknowledgeRequest struct { NullFields []string `json:"-"` } -func (s *AcknowledgeRequest) MarshalJSON() ([]byte, error) { +func (s AcknowledgeRequest) MarshalJSON() ([]byte, error) { type NoMethod AcknowledgeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// AnalyticsHubSubscriptionInfo: Information about an associated Analytics Hub +// subscription +// (https://cloud.google.com/bigquery/docs/analytics-hub-manage-subscriptions). +type AnalyticsHubSubscriptionInfo struct { + // Listing: Optional. The name of the associated Analytics Hub listing + // resource. Pattern: + // "projects/{project}/locations/{location}/dataExchanges/{data_exchange}/listin + // gs/{listing}" + Listing string `json:"listing,omitempty"` + // Subscription: Optional. The name of the associated Analytics Hub + // subscription resource. Pattern: + // "projects/{project}/locations/{location}/subscriptions/{subscription}" + Subscription string `json:"subscription,omitempty"` + // ForceSendFields is a list of field names (e.g. "Listing") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Listing") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AnalyticsHubSubscriptionInfo) MarshalJSON() ([]byte, error) { + type NoMethod AnalyticsHubSubscriptionInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AvroConfig: Configuration for writing message data in Avro format. Message @@ -296,9 +327,15 @@ type AvroConfig struct { NullFields []string `json:"-"` } -func (s *AvroConfig) MarshalJSON() ([]byte, error) { +func (s AvroConfig) MarshalJSON() ([]byte, error) { type NoMethod AvroConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// AvroFormat: Configuration for reading Cloud Storage data in Avro binary +// format. The bytes of each object will be set to the `data` field of a +// Pub/Sub message. +type AvroFormat struct { } // AwsKinesis: Ingestion settings for Amazon Kinesis Data Streams. @@ -352,9 +389,9 @@ type AwsKinesis struct { NullFields []string `json:"-"` } -func (s *AwsKinesis) MarshalJSON() ([]byte, error) { +func (s AwsKinesis) MarshalJSON() ([]byte, error) { type NoMethod AwsKinesis - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BigQueryConfig: Configuration for a BigQuery subscription. @@ -423,9 +460,9 @@ type BigQueryConfig struct { NullFields []string `json:"-"` } -func (s *BigQueryConfig) MarshalJSON() ([]byte, error) { +func (s BigQueryConfig) MarshalJSON() ([]byte, error) { type NoMethod BigQueryConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates `members`, or principals, with a `role`. @@ -522,9 +559,71 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CloudStorage: Ingestion settings for Cloud Storage. +type CloudStorage struct { + // AvroFormat: Optional. Data from Cloud Storage will be interpreted in Avro + // format. + AvroFormat *AvroFormat `json:"avroFormat,omitempty"` + // Bucket: Optional. Cloud Storage bucket. The bucket name must be without any + // prefix like "gs://". See the [bucket naming requirements] + // (https://cloud.google.com/storage/docs/buckets#naming). + Bucket string `json:"bucket,omitempty"` + // MatchGlob: Optional. Glob pattern used to match objects that will be + // ingested. If unset, all objects will be ingested. See the supported patterns + // (https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob). + MatchGlob string `json:"matchGlob,omitempty"` + // MinimumObjectCreateTime: Optional. Only objects with a larger or equal + // creation timestamp will be ingested. + MinimumObjectCreateTime string `json:"minimumObjectCreateTime,omitempty"` + // PubsubAvroFormat: Optional. It will be assumed data from Cloud Storage was + // written via Cloud Storage subscriptions + // (https://cloud.google.com/pubsub/docs/cloudstorage). + PubsubAvroFormat *PubSubAvroFormat `json:"pubsubAvroFormat,omitempty"` + // State: Output only. An output-only field that indicates the state of the + // Cloud Storage ingestion source. + // + // Possible values: + // "STATE_UNSPECIFIED" - Default value. This value is unused. + // "ACTIVE" - Ingestion is active. + // "CLOUD_STORAGE_PERMISSION_DENIED" - Permission denied encountered while + // calling the Cloud Storage API. This can happen if the Pub/Sub SA has not + // been granted the [appropriate + // permissions](https://cloud.google.com/storage/docs/access-control/iam-permiss + // ions): - storage.objects.list: to list the objects in a bucket. - + // storage.objects.get: to read the objects in a bucket. - storage.buckets.get: + // to verify the bucket exists. + // "PUBLISH_PERMISSION_DENIED" - Permission denied encountered while + // publishing to the topic. This can happen if the Pub/Sub SA has not been + // granted the [appropriate publish + // permissions](https://cloud.google.com/pubsub/docs/access-control#pubsub.publi + // sher) + // "BUCKET_NOT_FOUND" - The provided Cloud Storage bucket doesn't exist. + // "TOO_MANY_OBJECTS" - The Cloud Storage bucket has too many objects, + // ingestion will be paused. + State string `json:"state,omitempty"` + // TextFormat: Optional. Data from Cloud Storage will be interpreted as text. + TextFormat *TextFormat `json:"textFormat,omitempty"` + // ForceSendFields is a list of field names (e.g. "AvroFormat") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AvroFormat") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s CloudStorage) MarshalJSON() ([]byte, error) { + type NoMethod CloudStorage + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloudStorageConfig: Configuration for a Cloud Storage subscription. @@ -558,6 +657,9 @@ type CloudStorageConfig struct { // Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 // minutes. May not exceed the subscription's acknowledgement deadline. MaxDuration string `json:"maxDuration,omitempty"` + // MaxMessages: Optional. The maximum number of messages that can be written to + // a Cloud Storage file before a new file is created. Min 1000 messages. + MaxMessages int64 `json:"maxMessages,omitempty,string"` // ServiceAccountEmail: Optional. The service account to use to write to Cloud // Storage. The subscription creator or updater that specifies this field must // have `iam.serviceAccounts.actAs` permission on the service account. If not @@ -597,9 +699,9 @@ type CloudStorageConfig struct { NullFields []string `json:"-"` } -func (s *CloudStorageConfig) MarshalJSON() ([]byte, error) { +func (s CloudStorageConfig) MarshalJSON() ([]byte, error) { type NoMethod CloudStorageConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CommitSchemaRequest: Request for CommitSchema method. @@ -619,9 +721,9 @@ type CommitSchemaRequest struct { NullFields []string `json:"-"` } -func (s *CommitSchemaRequest) MarshalJSON() ([]byte, error) { +func (s CommitSchemaRequest) MarshalJSON() ([]byte, error) { type NoMethod CommitSchemaRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateSnapshotRequest: Request for the `CreateSnapshot` method. @@ -651,9 +753,9 @@ type CreateSnapshotRequest struct { NullFields []string `json:"-"` } -func (s *CreateSnapshotRequest) MarshalJSON() ([]byte, error) { +func (s CreateSnapshotRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateSnapshotRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeadLetterPolicy: Dead lettering is done on a best effort basis. The same @@ -692,9 +794,9 @@ type DeadLetterPolicy struct { NullFields []string `json:"-"` } -func (s *DeadLetterPolicy) MarshalJSON() ([]byte, error) { +func (s DeadLetterPolicy) MarshalJSON() ([]byte, error) { type NoMethod DeadLetterPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DetachSubscriptionResponse: Response for the DetachSubscription method. @@ -736,9 +838,9 @@ type ExpirationPolicy struct { NullFields []string `json:"-"` } -func (s *ExpirationPolicy) MarshalJSON() ([]byte, error) { +func (s ExpirationPolicy) MarshalJSON() ([]byte, error) { type NoMethod ExpirationPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents a textual expression in the Common Expression Language @@ -784,9 +886,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IngestionDataSourceSettings: Settings for an ingestion data source on a @@ -794,6 +896,11 @@ func (s *Expr) MarshalJSON() ([]byte, error) { type IngestionDataSourceSettings struct { // AwsKinesis: Optional. Amazon Kinesis Data Streams. AwsKinesis *AwsKinesis `json:"awsKinesis,omitempty"` + // CloudStorage: Optional. Cloud Storage. + CloudStorage *CloudStorage `json:"cloudStorage,omitempty"` + // PlatformLogsSettings: Optional. Platform Logs settings. If unset, no + // Platform Logs will be generated. + PlatformLogsSettings *PlatformLogsSettings `json:"platformLogsSettings,omitempty"` // ForceSendFields is a list of field names (e.g. "AwsKinesis") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -807,9 +914,9 @@ type IngestionDataSourceSettings struct { NullFields []string `json:"-"` } -func (s *IngestionDataSourceSettings) MarshalJSON() ([]byte, error) { +func (s IngestionDataSourceSettings) MarshalJSON() ([]byte, error) { type NoMethod IngestionDataSourceSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListSchemaRevisionsResponse: Response for the `ListSchemaRevisions` method. @@ -835,9 +942,9 @@ type ListSchemaRevisionsResponse struct { NullFields []string `json:"-"` } -func (s *ListSchemaRevisionsResponse) MarshalJSON() ([]byte, error) { +func (s ListSchemaRevisionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListSchemaRevisionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListSchemasResponse: Response for the `ListSchemas` method. @@ -864,9 +971,9 @@ type ListSchemasResponse struct { NullFields []string `json:"-"` } -func (s *ListSchemasResponse) MarshalJSON() ([]byte, error) { +func (s ListSchemasResponse) MarshalJSON() ([]byte, error) { type NoMethod ListSchemasResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListSnapshotsResponse: Response for the `ListSnapshots` method. @@ -893,9 +1000,9 @@ type ListSnapshotsResponse struct { NullFields []string `json:"-"` } -func (s *ListSnapshotsResponse) MarshalJSON() ([]byte, error) { +func (s ListSnapshotsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListSnapshotsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListSubscriptionsResponse: Response for the `ListSubscriptions` method. @@ -922,9 +1029,9 @@ type ListSubscriptionsResponse struct { NullFields []string `json:"-"` } -func (s *ListSubscriptionsResponse) MarshalJSON() ([]byte, error) { +func (s ListSubscriptionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListSubscriptionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListTopicSnapshotsResponse: Response for the `ListTopicSnapshots` method. @@ -951,9 +1058,9 @@ type ListTopicSnapshotsResponse struct { NullFields []string `json:"-"` } -func (s *ListTopicSnapshotsResponse) MarshalJSON() ([]byte, error) { +func (s ListTopicSnapshotsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListTopicSnapshotsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListTopicSubscriptionsResponse: Response for the `ListTopicSubscriptions` @@ -982,9 +1089,9 @@ type ListTopicSubscriptionsResponse struct { NullFields []string `json:"-"` } -func (s *ListTopicSubscriptionsResponse) MarshalJSON() ([]byte, error) { +func (s ListTopicSubscriptionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListTopicSubscriptionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListTopicsResponse: Response for the `ListTopics` method. @@ -1011,9 +1118,9 @@ type ListTopicsResponse struct { NullFields []string `json:"-"` } -func (s *ListTopicsResponse) MarshalJSON() ([]byte, error) { +func (s ListTopicsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListTopicsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MessageStoragePolicy: A policy constraining the storage of messages @@ -1045,9 +1152,9 @@ type MessageStoragePolicy struct { NullFields []string `json:"-"` } -func (s *MessageStoragePolicy) MarshalJSON() ([]byte, error) { +func (s MessageStoragePolicy) MarshalJSON() ([]byte, error) { type NoMethod MessageStoragePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ModifyAckDeadlineRequest: Request for the ModifyAckDeadline method. @@ -1077,9 +1184,9 @@ type ModifyAckDeadlineRequest struct { NullFields []string `json:"-"` } -func (s *ModifyAckDeadlineRequest) MarshalJSON() ([]byte, error) { +func (s ModifyAckDeadlineRequest) MarshalJSON() ([]byte, error) { type NoMethod ModifyAckDeadlineRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ModifyPushConfigRequest: Request for the ModifyPushConfig method. @@ -1103,9 +1210,9 @@ type ModifyPushConfigRequest struct { NullFields []string `json:"-"` } -func (s *ModifyPushConfigRequest) MarshalJSON() ([]byte, error) { +func (s ModifyPushConfigRequest) MarshalJSON() ([]byte, error) { type NoMethod ModifyPushConfigRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NoWrapper: Sets the `data` field as the HTTP body for delivery. @@ -1127,9 +1234,9 @@ type NoWrapper struct { NullFields []string `json:"-"` } -func (s *NoWrapper) MarshalJSON() ([]byte, error) { +func (s NoWrapper) MarshalJSON() ([]byte, error) { type NoMethod NoWrapper - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OidcToken: Contains information needed for generating an OpenID Connect @@ -1160,9 +1267,41 @@ type OidcToken struct { NullFields []string `json:"-"` } -func (s *OidcToken) MarshalJSON() ([]byte, error) { +func (s OidcToken) MarshalJSON() ([]byte, error) { type NoMethod OidcToken - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// PlatformLogsSettings: Settings for Platform Logs produced by Pub/Sub. +type PlatformLogsSettings struct { + // Severity: Optional. The minimum severity level of Platform Logs that will be + // written. + // + // Possible values: + // "SEVERITY_UNSPECIFIED" - Default value. Logs level is unspecified. Logs + // will be disabled. + // "DISABLED" - Logs will be disabled. + // "DEBUG" - Debug logs and higher-severity logs will be written. + // "INFO" - Info logs and higher-severity logs will be written. + // "WARNING" - Warning logs and higher-severity logs will be written. + // "ERROR" - Only error logs will be written. + Severity string `json:"severity,omitempty"` + // ForceSendFields is a list of field names (e.g. "Severity") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Severity") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s PlatformLogsSettings) MarshalJSON() ([]byte, error) { + type NoMethod PlatformLogsSettings + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -1250,9 +1389,17 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// PubSubAvroFormat: Configuration for reading Cloud Storage data written via +// Cloud Storage subscriptions +// (https://cloud.google.com/pubsub/docs/cloudstorage). The data and attributes +// fields of the originally exported Pub/Sub message will be restored when +// publishing. +type PubSubAvroFormat struct { } // PublishRequest: Request for the Publish method. @@ -1272,9 +1419,9 @@ type PublishRequest struct { NullFields []string `json:"-"` } -func (s *PublishRequest) MarshalJSON() ([]byte, error) { +func (s PublishRequest) MarshalJSON() ([]byte, error) { type NoMethod PublishRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PublishResponse: Response for the `Publish` method. @@ -1299,9 +1446,9 @@ type PublishResponse struct { NullFields []string `json:"-"` } -func (s *PublishResponse) MarshalJSON() ([]byte, error) { +func (s PublishResponse) MarshalJSON() ([]byte, error) { type NoMethod PublishResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PubsubMessage: A message that is published by publishers and consumed by @@ -1351,9 +1498,9 @@ type PubsubMessage struct { NullFields []string `json:"-"` } -func (s *PubsubMessage) MarshalJSON() ([]byte, error) { +func (s PubsubMessage) MarshalJSON() ([]byte, error) { type NoMethod PubsubMessage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PubsubWrapper: The payload to the push endpoint is in the form of the JSON @@ -1389,9 +1536,9 @@ type PullRequest struct { NullFields []string `json:"-"` } -func (s *PullRequest) MarshalJSON() ([]byte, error) { +func (s PullRequest) MarshalJSON() ([]byte, error) { type NoMethod PullRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PullResponse: Response for the `Pull` method. @@ -1419,9 +1566,9 @@ type PullResponse struct { NullFields []string `json:"-"` } -func (s *PullResponse) MarshalJSON() ([]byte, error) { +func (s PullResponse) MarshalJSON() ([]byte, error) { type NoMethod PullResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PushConfig: Configuration for a push delivery endpoint. @@ -1469,9 +1616,9 @@ type PushConfig struct { NullFields []string `json:"-"` } -func (s *PushConfig) MarshalJSON() ([]byte, error) { +func (s PushConfig) MarshalJSON() ([]byte, error) { type NoMethod PushConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReceivedMessage: A message and its corresponding acknowledgment ID. @@ -1505,9 +1652,9 @@ type ReceivedMessage struct { NullFields []string `json:"-"` } -func (s *ReceivedMessage) MarshalJSON() ([]byte, error) { +func (s ReceivedMessage) MarshalJSON() ([]byte, error) { type NoMethod ReceivedMessage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RetryPolicy: A policy that specifies how Pub/Sub retries message delivery. @@ -1539,9 +1686,9 @@ type RetryPolicy struct { NullFields []string `json:"-"` } -func (s *RetryPolicy) MarshalJSON() ([]byte, error) { +func (s RetryPolicy) MarshalJSON() ([]byte, error) { type NoMethod RetryPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RollbackSchemaRequest: Request for the `RollbackSchema` method. @@ -1562,9 +1709,9 @@ type RollbackSchemaRequest struct { NullFields []string `json:"-"` } -func (s *RollbackSchemaRequest) MarshalJSON() ([]byte, error) { +func (s RollbackSchemaRequest) MarshalJSON() ([]byte, error) { type NoMethod RollbackSchemaRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Schema: A schema resource. @@ -1604,9 +1751,9 @@ type Schema struct { NullFields []string `json:"-"` } -func (s *Schema) MarshalJSON() ([]byte, error) { +func (s Schema) MarshalJSON() ([]byte, error) { type NoMethod Schema - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SchemaSettings: Settings for validating messages published against a schema. @@ -1645,9 +1792,9 @@ type SchemaSettings struct { NullFields []string `json:"-"` } -func (s *SchemaSettings) MarshalJSON() ([]byte, error) { +func (s SchemaSettings) MarshalJSON() ([]byte, error) { type NoMethod SchemaSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SeekRequest: Request for the `Seek` method. @@ -1680,9 +1827,9 @@ type SeekRequest struct { NullFields []string `json:"-"` } -func (s *SeekRequest) MarshalJSON() ([]byte, error) { +func (s SeekRequest) MarshalJSON() ([]byte, error) { type NoMethod SeekRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SeekResponse: Response for the `Seek` method (this response is empty). @@ -1711,9 +1858,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Snapshot: A snapshot resource. Snapshots are used in Seek @@ -1757,9 +1904,9 @@ type Snapshot struct { NullFields []string `json:"-"` } -func (s *Snapshot) MarshalJSON() ([]byte, error) { +func (s Snapshot) MarshalJSON() ([]byte, error) { type NoMethod Snapshot - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Subscription: A subscription resource. If none of `push_config`, @@ -1783,6 +1930,10 @@ type Subscription struct { // the push endpoint. If the subscriber never acknowledges the message, the // Pub/Sub system will eventually redeliver the message. AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` + // AnalyticsHubSubscriptionInfo: Output only. Information about the associated + // Analytics Hub subscription. Only set if the subscritpion is created by + // Analytics Hub. + AnalyticsHubSubscriptionInfo *AnalyticsHubSubscriptionInfo `json:"analyticsHubSubscriptionInfo,omitempty"` // BigqueryConfig: Optional. If delivery to BigQuery is used with this // subscription, this field is used to configure it. BigqueryConfig *BigQueryConfig `json:"bigqueryConfig,omitempty"` @@ -1838,8 +1989,8 @@ type Subscription struct { // messages in the subscription's backlog, from the moment a message is // published. If `retain_acked_messages` is true, then this also configures the // retention of acknowledged messages, and thus configures how far back in time - // a `Seek` can be done. Defaults to 7 days. Cannot be more than 7 days or less - // than 10 minutes. + // a `Seek` can be done. Defaults to 7 days. Cannot be more than 31 days or + // less than 10 minutes. MessageRetentionDuration string `json:"messageRetentionDuration,omitempty"` // Name: Required. The name of the subscription. It must have the format // "projects/{project}/subscriptions/{subscription}". `{subscription}` must @@ -1903,9 +2054,9 @@ type Subscription struct { NullFields []string `json:"-"` } -func (s *Subscription) MarshalJSON() ([]byte, error) { +func (s Subscription) MarshalJSON() ([]byte, error) { type NoMethod Subscription - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` method. @@ -1928,9 +2079,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -1955,9 +2106,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TextConfig: Configuration for writing message data in text format. Message @@ -1965,6 +2116,30 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type TextConfig struct { } +// TextFormat: Configuration for reading Cloud Storage data in text format. +// Each line of text as specified by the delimiter will be set to the `data` +// field of a Pub/Sub message. +type TextFormat struct { + // Delimiter: Optional. When unset, '\n' is used. + Delimiter string `json:"delimiter,omitempty"` + // ForceSendFields is a list of field names (e.g. "Delimiter") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Delimiter") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TextFormat) MarshalJSON() ([]byte, error) { + type NoMethod TextFormat + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // Topic: A topic resource. type Topic struct { // IngestionDataSourceSettings: Optional. Settings for ingestion from a data @@ -2029,9 +2204,9 @@ type Topic struct { NullFields []string `json:"-"` } -func (s *Topic) MarshalJSON() ([]byte, error) { +func (s Topic) MarshalJSON() ([]byte, error) { type NoMethod Topic - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateSnapshotRequest: Request for the UpdateSnapshot method. @@ -2054,9 +2229,9 @@ type UpdateSnapshotRequest struct { NullFields []string `json:"-"` } -func (s *UpdateSnapshotRequest) MarshalJSON() ([]byte, error) { +func (s UpdateSnapshotRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateSnapshotRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateSubscriptionRequest: Request for the UpdateSubscription method. @@ -2079,9 +2254,9 @@ type UpdateSubscriptionRequest struct { NullFields []string `json:"-"` } -func (s *UpdateSubscriptionRequest) MarshalJSON() ([]byte, error) { +func (s UpdateSubscriptionRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateSubscriptionRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateTopicRequest: Request for the UpdateTopic method. @@ -2107,9 +2282,9 @@ type UpdateTopicRequest struct { NullFields []string `json:"-"` } -func (s *UpdateTopicRequest) MarshalJSON() ([]byte, error) { +func (s UpdateTopicRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateTopicRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ValidateMessageRequest: Request for the `ValidateMessage` method. @@ -2142,9 +2317,9 @@ type ValidateMessageRequest struct { NullFields []string `json:"-"` } -func (s *ValidateMessageRequest) MarshalJSON() ([]byte, error) { +func (s ValidateMessageRequest) MarshalJSON() ([]byte, error) { type NoMethod ValidateMessageRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ValidateMessageResponse: Response for the `ValidateMessage` method. Empty @@ -2171,9 +2346,9 @@ type ValidateSchemaRequest struct { NullFields []string `json:"-"` } -func (s *ValidateSchemaRequest) MarshalJSON() ([]byte, error) { +func (s ValidateSchemaRequest) MarshalJSON() ([]byte, error) { type NoMethod ValidateSchemaRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ValidateSchemaResponse: Response for the `ValidateSchema` method. Empty for diff --git a/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-api.json b/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-api.json index 2942fe2ff5a..4a7889ee9d0 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-api.json @@ -388,9 +388,66 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] + }, + "exportProjectMetadata": { + "description": "Export generated customer metadata for a given project.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}:exportProjectMetadata", + "httpMethod": "GET", + "id": "run.projects.locations.exportProjectMetadata", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the project of which metadata should be exported. Format: `projects/{project_id_or_number}/locations/{location}` for Project in a given location.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:exportProjectMetadata", + "response": { + "$ref": "GoogleCloudRunV2Metadata" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] } }, "resources": { + "builds": { + "methods": { + "submit": { + "description": "Submits a build in a given project.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/builds:submit", + "httpMethod": "POST", + "id": "run.projects.locations.builds.submit", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The project and location to build in. Location must be a region, e.g., 'us-central1' or 'global' if the global builder is to be used. Format: `projects/{project}/locations/{location}`", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/builds:submit", + "request": { + "$ref": "GoogleCloudRunV2SubmitBuildRequest" + }, + "response": { + "$ref": "GoogleCloudRunV2SubmitBuildResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, "jobs": { "methods": { "create": { @@ -1469,7 +1526,7 @@ } } }, - "revision": "20240607", + "revision": "20241004", "rootUrl": "https://run.googleapis.com/", "schemas": { "GoogleCloudRunV2BinaryAuthorization": { @@ -1481,7 +1538,7 @@ "type": "string" }, "policy": { - "description": "Optional. The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name}", + "description": "Optional. The path to a binary authorization policy. Format: `projects/{project}/platforms/cloudRun/{policy-name}`", "type": "string" }, "useDefault": { @@ -1491,6 +1548,41 @@ }, "type": "object" }, + "GoogleCloudRunV2BuildpacksBuild": { + "description": "Build the source using Buildpacks.", + "id": "GoogleCloudRunV2BuildpacksBuild", + "properties": { + "baseImage": { + "description": "Optional. The base image used to opt into automatic base image updates.", + "type": "string" + }, + "cacheImageUri": { + "description": "Optional. cache_image_uri is the GCR/AR URL where the cache image will be stored. cache_image_uri is optional and omitting it will disable caching. This URL must be stable across builds. It is used to derive a build-specific temporary URL by substituting the tag with the build ID. The build will clean up the temporary image on a best-effort basis.", + "type": "string" + }, + "enableAutomaticUpdates": { + "description": "Optional. Whether or not the application container will be enrolled in automatic base image updates. When true, the application will be built on a scratch base image, so the base layers can be appended at run time.", + "type": "boolean" + }, + "environmentVariables": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. User-provided build-time environment variables.", + "type": "object" + }, + "functionTarget": { + "description": "Optional. Name of the function target if the source is a function source. Required for function builds.", + "type": "string" + }, + "runtime": { + "deprecated": true, + "description": "The runtime name, e.g. 'go113'. Leave blank for generic builds.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudRunV2CancelExecutionRequest": { "description": "Request message for deleting an Execution.", "id": "GoogleCloudRunV2CancelExecutionRequest", @@ -1787,6 +1879,12 @@ }, "type": "object" }, + "GoogleCloudRunV2DockerBuild": { + "description": "Build the source using Docker. This means the source has a Dockerfile.", + "id": "GoogleCloudRunV2DockerBuild", + "properties": {}, + "type": "object" + }, "GoogleCloudRunV2EmptyDirVolumeSource": { "description": "In memory (tmpfs) ephemeral storage. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs).", "id": "GoogleCloudRunV2EmptyDirVolumeSource", @@ -1819,7 +1917,7 @@ "type": "string" }, "value": { - "description": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\", and the maximum length is 32768 bytes.", + "description": "Literal value of the environment variable. Defaults to \"\", and the maximum length is 32768 bytes. Variable references are not supported in Cloud Run.", "type": "string" }, "valueSource": { @@ -2029,6 +2127,26 @@ "description": "Reference to an Execution. Use /Executions.GetExecution with the given name to get full execution including the latest status.", "id": "GoogleCloudRunV2ExecutionReference", "properties": { + "completionStatus": { + "description": "Status for the execution completion.", + "enum": [ + "COMPLETION_STATUS_UNSPECIFIED", + "EXECUTION_SUCCEEDED", + "EXECUTION_FAILED", + "EXECUTION_RUNNING", + "EXECUTION_PENDING", + "EXECUTION_CANCELLED" + ], + "enumDescriptions": [ + "The default value. This value is used if the state is omitted.", + "Job execution has succeeded.", + "Job execution has failed.", + "Job execution is running normally.", + "Waiting for backing resources to be provisioned.", + "Job execution has been cancelled by the user." + ], + "type": "string" + }, "completionTime": { "description": "Creation timestamp of the execution.", "format": "google-datetime", @@ -2039,6 +2157,11 @@ "format": "google-datetime", "type": "string" }, + "deleteTime": { + "description": "The deletion time of the execution. It is only populated as a response to a Delete request.", + "format": "google-datetime", + "type": "string" + }, "name": { "description": "Name of the execution.", "type": "string" @@ -2141,7 +2264,7 @@ "id": "GoogleCloudRunV2GCSVolumeSource", "properties": { "bucket": { - "description": "Cloud Storage Bucket name. TODO (b/344678062) Fix the error validation once dynamic mounting is public.", + "description": "Cloud Storage Bucket name.", "type": "string" }, "readOnly": { @@ -2282,7 +2405,7 @@ "type": "string" }, "deleteTime": { - "description": "Output only. The deletion time.", + "description": "Output only. The deletion time. It is only populated as a response to a Delete request.", "format": "google-datetime", "readOnly": true, "type": "string" @@ -2818,6 +2941,10 @@ "description": "Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has.", "type": "string" }, + "serviceMesh": { + "$ref": "GoogleCloudRunV2ServiceMesh", + "description": "Enables service mesh connectivity." + }, "sessionAffinity": { "description": "Enable session affinity.", "type": "boolean" @@ -2857,7 +2984,7 @@ "id": "GoogleCloudRunV2RevisionScaling", "properties": { "maxInstanceCount": { - "description": "Optional. Maximum number of serving instances that this resource should have.", + "description": "Optional. Maximum number of serving instances that this resource should have. When unspecified, the field is set to the server default value of 100. For more information see https://cloud.google.com/run/docs/configuring/max-instances", "format": "int32", "type": "integer" }, @@ -2929,7 +3056,7 @@ "type": "object" }, "maxInstanceRequestConcurrency": { - "description": "Optional. Sets the maximum number of requests that each serving instance can receive.", + "description": "Optional. Sets the maximum number of requests that each serving instance can receive. If not specified or 0, defaults to 80 when requested `CPU \u003e= 1` and defaults to 1 when requested `CPU \u003c 1`.", "format": "int32", "type": "integer" }, @@ -2949,6 +3076,10 @@ "description": "Optional. Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account.", "type": "string" }, + "serviceMesh": { + "$ref": "GoogleCloudRunV2ServiceMesh", + "description": "Optional. Enables service mesh connectivity." + }, "sessionAffinity": { "description": "Optional. Enable session affinity.", "type": "boolean" @@ -3083,7 +3214,7 @@ "type": "boolean" }, "deleteTime": { - "description": "Output only. The deletion time.", + "description": "Output only. The deletion time. It is only populated as a response to a Delete request.", "format": "google-datetime", "readOnly": true, "type": "string" @@ -3127,6 +3258,10 @@ ], "type": "string" }, + "invokerIamDisabled": { + "description": "Optional. Disables IAM permission check for run.routes.invoke for callers of this service. This setting should not be used with external ingress.", + "type": "boolean" + }, "labels": { "additionalProperties": { "type": "string" @@ -3236,6 +3371,25 @@ "description": "Output only. The main URI in which this Service is serving traffic.", "readOnly": true, "type": "string" + }, + "urls": { + "description": "Output only. All URLs serving traffic for this Service.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudRunV2ServiceMesh": { + "description": "Settings for Cloud Service Mesh. For more information see https://cloud.google.com/service-mesh/docs/overview.", + "id": "GoogleCloudRunV2ServiceMesh", + "properties": { + "mesh": { + "description": "The Mesh resource name. Format: `projects/{project}/locations/global/meshes/{mesh}`, where `{project}` can be project id or number.", + "type": "string" } }, "type": "object" @@ -3245,9 +3399,100 @@ "id": "GoogleCloudRunV2ServiceScaling", "properties": { "minInstanceCount": { - "description": "Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. (BETA)", + "description": "Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving.", "format": "int32", "type": "integer" + }, + "scalingMode": { + "description": "Optional. The scaling mode for the service.", + "enum": [ + "SCALING_MODE_UNSPECIFIED", + "AUTOMATIC", + "MANUAL" + ], + "enumDescriptions": [ + "Unspecified.", + "Scale based on traffic between min and max instances.", + "Scale to exactly min instances and ignore max instances." + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudRunV2StorageSource": { + "description": "Location of the source in an archive file in Google Cloud Storage.", + "id": "GoogleCloudRunV2StorageSource", + "properties": { + "bucket": { + "description": "Required. Google Cloud Storage bucket containing the source (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).", + "type": "string" + }, + "generation": { + "description": "Optional. Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used.", + "format": "int64", + "type": "string" + }, + "object": { + "description": "Required. Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudRunV2SubmitBuildRequest": { + "description": "Request message for submitting a Build.", + "id": "GoogleCloudRunV2SubmitBuildRequest", + "properties": { + "buildpackBuild": { + "$ref": "GoogleCloudRunV2BuildpacksBuild", + "description": "Build the source using Buildpacks." + }, + "dockerBuild": { + "$ref": "GoogleCloudRunV2DockerBuild", + "description": "Build the source using Docker. This means the source has a Dockerfile." + }, + "imageUri": { + "description": "Required. Artifact Registry URI to store the built image.", + "type": "string" + }, + "serviceAccount": { + "description": "Optional. The service account to use for the build. If not set, the default Cloud Build service account for the project will be used.", + "type": "string" + }, + "storageSource": { + "$ref": "GoogleCloudRunV2StorageSource", + "description": "Required. Source for the build." + }, + "tags": { + "description": "Optional. Additional tags to annotate the build.", + "items": { + "type": "string" + }, + "type": "array" + }, + "workerPool": { + "description": "Optional. Name of the Cloud Build Custom Worker Pool that should be used to build the function. The format of this field is `projects/{project}/locations/{region}/workerPools/{workerPool}` where `{project}` and `{region}` are the project id and region respectively where the worker pool is defined and `{workerPool}` is the short name of the worker pool.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudRunV2SubmitBuildResponse": { + "description": "Response message for submitting a Build.", + "id": "GoogleCloudRunV2SubmitBuildResponse", + "properties": { + "baseImageUri": { + "description": "URI of the base builder image in Artifact Registry being used in the build. Used to opt into automatic base image updates.", + "type": "string" + }, + "baseImageWarning": { + "description": "Warning message for the base image.", + "type": "string" + }, + "buildOperation": { + "$ref": "GoogleLongrunningOperation", + "description": "Cloud Build operation to be polled via CloudBuild API." } }, "type": "object" @@ -3676,7 +3921,7 @@ "id": "GoogleCloudRunV2VpcAccess", "properties": { "connector": { - "description": "VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. For more information on sending traffic to a VPC network via a connector, visit https://cloud.google.com/run/docs/configuring/vpc-connectors.", + "description": "VPC Access connector name. Format: `projects/{project}/locations/{location}/connectors/{connector}`, where `{project}` can be project id or number. For more information on sending traffic to a VPC network via a connector, visit https://cloud.google.com/run/docs/configuring/vpc-connectors.", "type": "string" }, "egress": { @@ -4068,11 +4313,13 @@ "description": "Optional. Option to specify how default logs buckets are setup.", "enum": [ "DEFAULT_LOGS_BUCKET_BEHAVIOR_UNSPECIFIED", - "REGIONAL_USER_OWNED_BUCKET" + "REGIONAL_USER_OWNED_BUCKET", + "LEGACY_BUCKET" ], "enumDescriptions": [ "Unspecified.", - "Bucket is located in user-owned project in the same region as the build. The builder service account must have access to create and write to Cloud Storage buckets in the build project." + "Bucket is located in user-owned project in the same region as the build. The builder service account must have access to create and write to Cloud Storage buckets in the build project.", + "Bucket is located in a Google-owned project and is not regionalized." ], "type": "string" }, @@ -4464,26 +4711,6 @@ }, "type": "object" }, - "GoogleDevtoolsCloudbuildV1GCSLocation": { - "description": "Represents a storage location in Cloud Storage", - "id": "GoogleDevtoolsCloudbuildV1GCSLocation", - "properties": { - "bucket": { - "description": "Cloud Storage bucket. See https://cloud.google.com/storage/docs/naming#requirements", - "type": "string" - }, - "generation": { - "description": "Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used.", - "format": "int64", - "type": "string" - }, - "object": { - "description": "Cloud Storage object. See https://cloud.google.com/storage/docs/naming#objectnames", - "type": "string" - } - }, - "type": "object" - }, "GoogleDevtoolsCloudbuildV1GitConfig": { "description": "GitConfig is a configuration for git operations.", "id": "GoogleDevtoolsCloudbuildV1GitConfig", @@ -4547,12 +4774,8 @@ "id": "GoogleDevtoolsCloudbuildV1HttpConfig", "properties": { "proxySecretVersionName": { - "description": "SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port].", + "description": "SecretVersion resource of the HTTP proxy URL. The Service Account used in the build (either the default Service Account or user-specified Service Account) should have `secretmanager.versions.access` permissions on this secret. The proxy URL should be in format `protocol://@]proxyhost[:port]`.", "type": "string" - }, - "proxySslCaInfo": { - "$ref": "GoogleDevtoolsCloudbuildV1GCSLocation", - "description": "Optional. Cloud Storage object storing the certificate to use with the HTTP proxy." } }, "type": "object" diff --git a/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-gen.go b/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-gen.go index b0ac182b37b..641ce717b8a 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-gen.go @@ -168,6 +168,7 @@ type ProjectsService struct { func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { rs := &ProjectsLocationsService{s: s} + rs.Builds = NewProjectsLocationsBuildsService(s) rs.Jobs = NewProjectsLocationsJobsService(s) rs.Operations = NewProjectsLocationsOperationsService(s) rs.Services = NewProjectsLocationsServicesService(s) @@ -177,6 +178,8 @@ func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { type ProjectsLocationsService struct { s *Service + Builds *ProjectsLocationsBuildsService + Jobs *ProjectsLocationsJobsService Operations *ProjectsLocationsOperationsService @@ -184,6 +187,15 @@ type ProjectsLocationsService struct { Services *ProjectsLocationsServicesService } +func NewProjectsLocationsBuildsService(s *Service) *ProjectsLocationsBuildsService { + rs := &ProjectsLocationsBuildsService{s: s} + return rs +} + +type ProjectsLocationsBuildsService struct { + s *Service +} + func NewProjectsLocationsJobsService(s *Service) *ProjectsLocationsJobsService { rs := &ProjectsLocationsJobsService{s: s} rs.Executions = NewProjectsLocationsJobsExecutionsService(s) @@ -256,7 +268,7 @@ type GoogleCloudRunV2BinaryAuthorization struct { // https://cloud.google.com/binary-authorization/docs/using-breakglass BreakglassJustification string `json:"breakglassJustification,omitempty"` // Policy: Optional. The path to a binary authorization policy. Format: - // projects/{project}/platforms/cloudRun/{policy-name} + // `projects/{project}/platforms/cloudRun/{policy-name}` Policy string `json:"policy,omitempty"` // UseDefault: Optional. If True, indicates to use the default project's binary // authorization policy. If False, binary authorization will be disabled. @@ -274,9 +286,51 @@ type GoogleCloudRunV2BinaryAuthorization struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2BinaryAuthorization) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2BinaryAuthorization) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2BinaryAuthorization - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleCloudRunV2BuildpacksBuild: Build the source using Buildpacks. +type GoogleCloudRunV2BuildpacksBuild struct { + // BaseImage: Optional. The base image used to opt into automatic base image + // updates. + BaseImage string `json:"baseImage,omitempty"` + // CacheImageUri: Optional. cache_image_uri is the GCR/AR URL where the cache + // image will be stored. cache_image_uri is optional and omitting it will + // disable caching. This URL must be stable across builds. It is used to derive + // a build-specific temporary URL by substituting the tag with the build ID. + // The build will clean up the temporary image on a best-effort basis. + CacheImageUri string `json:"cacheImageUri,omitempty"` + // EnableAutomaticUpdates: Optional. Whether or not the application container + // will be enrolled in automatic base image updates. When true, the application + // will be built on a scratch base image, so the base layers can be appended at + // run time. + EnableAutomaticUpdates bool `json:"enableAutomaticUpdates,omitempty"` + // EnvironmentVariables: Optional. User-provided build-time environment + // variables. + EnvironmentVariables map[string]string `json:"environmentVariables,omitempty"` + // FunctionTarget: Optional. Name of the function target if the source is a + // function source. Required for function builds. + FunctionTarget string `json:"functionTarget,omitempty"` + // Runtime: The runtime name, e.g. 'go113'. Leave blank for generic builds. + Runtime string `json:"runtime,omitempty"` + // ForceSendFields is a list of field names (e.g. "BaseImage") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BaseImage") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudRunV2BuildpacksBuild) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudRunV2BuildpacksBuild + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2CancelExecutionRequest: Request message for deleting an @@ -301,9 +355,9 @@ type GoogleCloudRunV2CancelExecutionRequest struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2CancelExecutionRequest) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2CancelExecutionRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2CancelExecutionRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2CloudSqlInstance: Represents a set of Cloud SQL instances. @@ -330,9 +384,9 @@ type GoogleCloudRunV2CloudSqlInstance struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2CloudSqlInstance) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2CloudSqlInstance) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2CloudSqlInstance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2Condition: Defines a status condition for a resource. @@ -444,9 +498,9 @@ type GoogleCloudRunV2Condition struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2Condition) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2Condition) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2Condition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2Container: A single application container. This specifies @@ -504,9 +558,9 @@ type GoogleCloudRunV2Container struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2Container) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2Container) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2Container - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ContainerOverride: Per-container override specification. @@ -535,9 +589,9 @@ type GoogleCloudRunV2ContainerOverride struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ContainerOverride) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ContainerOverride) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ContainerOverride - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ContainerPort: ContainerPort represents a network port in a @@ -562,9 +616,14 @@ type GoogleCloudRunV2ContainerPort struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ContainerPort) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ContainerPort) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ContainerPort - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleCloudRunV2DockerBuild: Build the source using Docker. This means the +// source has a Dockerfile. +type GoogleCloudRunV2DockerBuild struct { } // GoogleCloudRunV2EmptyDirVolumeSource: In memory (tmpfs) ephemeral storage. @@ -602,9 +661,9 @@ type GoogleCloudRunV2EmptyDirVolumeSource struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2EmptyDirVolumeSource) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2EmptyDirVolumeSource) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2EmptyDirVolumeSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2EnvVar: EnvVar represents an environment variable present in @@ -613,13 +672,9 @@ type GoogleCloudRunV2EnvVar struct { // Name: Required. Name of the environment variable. Must not exceed 32768 // characters. Name string `json:"name,omitempty"` - // Value: Variable references $(VAR_NAME) are expanded using the previous - // defined environment variables in the container and any route environment - // variables. If a variable cannot be resolved, the reference in the input - // string will be unchanged. The $(VAR_NAME) syntax can be escaped with a - // double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. Defaults to "", and the - // maximum length is 32768 bytes. + // Value: Literal value of the environment variable. Defaults to "", and the + // maximum length is 32768 bytes. Variable references are not supported in + // Cloud Run. Value string `json:"value,omitempty"` // ValueSource: Source for the environment variable's value. ValueSource *GoogleCloudRunV2EnvVarSource `json:"valueSource,omitempty"` @@ -636,9 +691,9 @@ type GoogleCloudRunV2EnvVar struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2EnvVar) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2EnvVar) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2EnvVar - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2EnvVarSource: EnvVarSource represents a source for the value @@ -660,9 +715,9 @@ type GoogleCloudRunV2EnvVarSource struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2EnvVarSource) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2EnvVarSource) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2EnvVarSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2Execution: Execution represents the configuration of a @@ -813,37 +868,51 @@ type GoogleCloudRunV2Execution struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2Execution) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2Execution) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2Execution - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ExecutionReference: Reference to an Execution. Use // /Executions.GetExecution with the given name to get full execution including // the latest status. type GoogleCloudRunV2ExecutionReference struct { + // CompletionStatus: Status for the execution completion. + // + // Possible values: + // "COMPLETION_STATUS_UNSPECIFIED" - The default value. This value is used if + // the state is omitted. + // "EXECUTION_SUCCEEDED" - Job execution has succeeded. + // "EXECUTION_FAILED" - Job execution has failed. + // "EXECUTION_RUNNING" - Job execution is running normally. + // "EXECUTION_PENDING" - Waiting for backing resources to be provisioned. + // "EXECUTION_CANCELLED" - Job execution has been cancelled by the user. + CompletionStatus string `json:"completionStatus,omitempty"` // CompletionTime: Creation timestamp of the execution. CompletionTime string `json:"completionTime,omitempty"` // CreateTime: Creation timestamp of the execution. CreateTime string `json:"createTime,omitempty"` + // DeleteTime: The deletion time of the execution. It is only populated as a + // response to a Delete request. + DeleteTime string `json:"deleteTime,omitempty"` // Name: Name of the execution. Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "CompletionTime") to + // ForceSendFields is a list of field names (e.g. "CompletionStatus") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CompletionTime") to include in + // NullFields is a list of field names (e.g. "CompletionStatus") to include in // API requests with the JSON null value. By default, fields with empty values // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ExecutionReference) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ExecutionReference) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ExecutionReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ExecutionTemplate: ExecutionTemplate describes the data an @@ -895,9 +964,9 @@ type GoogleCloudRunV2ExecutionTemplate struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ExecutionTemplate) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ExecutionTemplate) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ExecutionTemplate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ExportImageRequest: Request message for exporting Cloud Run @@ -919,9 +988,9 @@ type GoogleCloudRunV2ExportImageRequest struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ExportImageRequest) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ExportImageRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ExportImageRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ExportImageResponse: ExportImageResponse contains an @@ -946,9 +1015,9 @@ type GoogleCloudRunV2ExportImageResponse struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ExportImageResponse) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ExportImageResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ExportImageResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ExportStatusResponse: ExportStatusResponse contains the @@ -981,16 +1050,15 @@ type GoogleCloudRunV2ExportStatusResponse struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ExportStatusResponse) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ExportStatusResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ExportStatusResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2GCSVolumeSource: Represents a volume backed by a Cloud // Storage bucket using Cloud Storage FUSE. type GoogleCloudRunV2GCSVolumeSource struct { - // Bucket: Cloud Storage Bucket name. TODO (b/344678062) Fix the error - // validation once dynamic mounting is public. + // Bucket: Cloud Storage Bucket name. Bucket string `json:"bucket,omitempty"` // ReadOnly: If true, the volume will be mounted as read only for all mounts. ReadOnly bool `json:"readOnly,omitempty"` @@ -1007,9 +1075,9 @@ type GoogleCloudRunV2GCSVolumeSource struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2GCSVolumeSource) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2GCSVolumeSource) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2GCSVolumeSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2GRPCAction: GRPCAction describes an action involving a GRPC @@ -1037,9 +1105,9 @@ type GoogleCloudRunV2GRPCAction struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2GRPCAction) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2GRPCAction) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2GRPCAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2HTTPGetAction: HTTPGetAction describes an action based on @@ -1067,9 +1135,9 @@ type GoogleCloudRunV2HTTPGetAction struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2HTTPGetAction) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2HTTPGetAction) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2HTTPGetAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2HTTPHeader: HTTPHeader describes a custom header to be used @@ -1092,9 +1160,9 @@ type GoogleCloudRunV2HTTPHeader struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2HTTPHeader) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2HTTPHeader) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2HTTPHeader - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ImageExportStatus: The status of an image export job. @@ -1127,9 +1195,9 @@ type GoogleCloudRunV2ImageExportStatus struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ImageExportStatus) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ImageExportStatus) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ImageExportStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2Job: Job represents the configuration of a single job, which @@ -1159,7 +1227,8 @@ type GoogleCloudRunV2Job struct { CreateTime string `json:"createTime,omitempty"` // Creator: Output only. Email address of the authenticated creator. Creator string `json:"creator,omitempty"` - // DeleteTime: Output only. The deletion time. + // DeleteTime: Output only. The deletion time. It is only populated as a + // response to a Delete request. DeleteTime string `json:"deleteTime,omitempty"` // Etag: Output only. A system-generated fingerprint for this version of the // resource. May be used to detect modification conflict during updates. @@ -1293,9 +1362,9 @@ type GoogleCloudRunV2Job struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2Job) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2Job) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2Job - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ListExecutionsResponse: Response message containing a list @@ -1322,9 +1391,9 @@ type GoogleCloudRunV2ListExecutionsResponse struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ListExecutionsResponse) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ListExecutionsResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ListExecutionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ListJobsResponse: Response message containing a list of @@ -1351,9 +1420,9 @@ type GoogleCloudRunV2ListJobsResponse struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ListJobsResponse) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ListJobsResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ListJobsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ListRevisionsResponse: Response message containing a list of @@ -1380,9 +1449,9 @@ type GoogleCloudRunV2ListRevisionsResponse struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ListRevisionsResponse) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ListRevisionsResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ListRevisionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ListServicesResponse: Response message containing a list of @@ -1409,9 +1478,9 @@ type GoogleCloudRunV2ListServicesResponse struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ListServicesResponse) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ListServicesResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ListServicesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ListTasksResponse: Response message containing a list of @@ -1438,9 +1507,9 @@ type GoogleCloudRunV2ListTasksResponse struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ListTasksResponse) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ListTasksResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ListTasksResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2Metadata: Metadata represents the JSON encoded generated @@ -1465,9 +1534,9 @@ type GoogleCloudRunV2Metadata struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2Metadata) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2Metadata) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2Metadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2NFSVolumeSource: Represents an NFS mount. @@ -1491,9 +1560,9 @@ type GoogleCloudRunV2NFSVolumeSource struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2NFSVolumeSource) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2NFSVolumeSource) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2NFSVolumeSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2NetworkInterface: Direct VPC egress settings. @@ -1525,9 +1594,9 @@ type GoogleCloudRunV2NetworkInterface struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2NetworkInterface) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2NetworkInterface) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2NetworkInterface - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2NodeSelector: Hardware constraints configuration. @@ -1547,9 +1616,9 @@ type GoogleCloudRunV2NodeSelector struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2NodeSelector) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2NodeSelector) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2NodeSelector - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2Overrides: RunJob Overrides that contains Execution fields @@ -1577,9 +1646,9 @@ type GoogleCloudRunV2Overrides struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2Overrides) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2Overrides) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2Overrides - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2Probe: Probe describes a health check to be performed @@ -1625,9 +1694,9 @@ type GoogleCloudRunV2Probe struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2Probe) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2Probe) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2Probe - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ResourceRequirements: ResourceRequirements describes the @@ -1661,9 +1730,9 @@ type GoogleCloudRunV2ResourceRequirements struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ResourceRequirements) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ResourceRequirements) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ResourceRequirements - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2Revision: A Revision is an immutable snapshot of code and @@ -1798,6 +1867,8 @@ type GoogleCloudRunV2Revision struct { // revision of the service. The service account represents the identity of the // running revision, and determines what permissions the revision has. ServiceAccount string `json:"serviceAccount,omitempty"` + // ServiceMesh: Enables service mesh connectivity. + ServiceMesh *GoogleCloudRunV2ServiceMesh `json:"serviceMesh,omitempty"` // SessionAffinity: Enable session affinity. SessionAffinity bool `json:"sessionAffinity,omitempty"` // Timeout: Max allowed time for an instance to respond to a request. @@ -1829,16 +1900,18 @@ type GoogleCloudRunV2Revision struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2Revision) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2Revision) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2Revision - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2RevisionScaling: Settings for revision-level scaling // settings. type GoogleCloudRunV2RevisionScaling struct { // MaxInstanceCount: Optional. Maximum number of serving instances that this - // resource should have. + // resource should have. When unspecified, the field is set to the server + // default value of 100. For more information see + // https://cloud.google.com/run/docs/configuring/max-instances MaxInstanceCount int64 `json:"maxInstanceCount,omitempty"` // MinInstanceCount: Optional. Minimum number of serving instances that this // resource should have. @@ -1856,9 +1929,9 @@ type GoogleCloudRunV2RevisionScaling struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2RevisionScaling) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2RevisionScaling) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2RevisionScaling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2RevisionScalingStatus: Effective settings for the current @@ -1880,9 +1953,9 @@ type GoogleCloudRunV2RevisionScalingStatus struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2RevisionScalingStatus) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2RevisionScalingStatus) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2RevisionScalingStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2RevisionTemplate: RevisionTemplate describes the data a @@ -1927,7 +2000,8 @@ type GoogleCloudRunV2RevisionTemplate struct { // v2 RevisionTemplate. Labels map[string]string `json:"labels,omitempty"` // MaxInstanceRequestConcurrency: Optional. Sets the maximum number of requests - // that each serving instance can receive. + // that each serving instance can receive. If not specified or 0, defaults to + // 80 when requested `CPU >= 1` and defaults to 1 when requested `CPU < 1`. MaxInstanceRequestConcurrency int64 `json:"maxInstanceRequestConcurrency,omitempty"` // NodeSelector: Optional. The node selector for the revision template. NodeSelector *GoogleCloudRunV2NodeSelector `json:"nodeSelector,omitempty"` @@ -1942,6 +2016,8 @@ type GoogleCloudRunV2RevisionTemplate struct { // revision has. If not provided, the revision will use the project's default // service account. ServiceAccount string `json:"serviceAccount,omitempty"` + // ServiceMesh: Optional. Enables service mesh connectivity. + ServiceMesh *GoogleCloudRunV2ServiceMesh `json:"serviceMesh,omitempty"` // SessionAffinity: Optional. Enable session affinity. SessionAffinity bool `json:"sessionAffinity,omitempty"` // Timeout: Optional. Max allowed time for an instance to respond to a request. @@ -1965,9 +2041,9 @@ type GoogleCloudRunV2RevisionTemplate struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2RevisionTemplate) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2RevisionTemplate) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2RevisionTemplate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2RunJobRequest: Request message to create a new Execution of @@ -1995,9 +2071,9 @@ type GoogleCloudRunV2RunJobRequest struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2RunJobRequest) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2RunJobRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2RunJobRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2SecretKeySelector: SecretEnvVarSource represents a source @@ -2024,9 +2100,9 @@ type GoogleCloudRunV2SecretKeySelector struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2SecretKeySelector) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2SecretKeySelector) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2SecretKeySelector - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2SecretVolumeSource: The secret's value will be presented as @@ -2070,9 +2146,9 @@ type GoogleCloudRunV2SecretVolumeSource struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2SecretVolumeSource) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2SecretVolumeSource) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2SecretVolumeSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2Service: Service acts as a top-level container that manages @@ -2115,7 +2191,8 @@ type GoogleCloudRunV2Service struct { // DefaultUriDisabled: Optional. Disables public resolution of the default URI // of this service. DefaultUriDisabled bool `json:"defaultUriDisabled,omitempty"` - // DeleteTime: Output only. The deletion time. + // DeleteTime: Output only. The deletion time. It is only populated as a + // response to a Delete request. DeleteTime string `json:"deleteTime,omitempty"` // Description: User-provided description of the Service. This field currently // has a 512-character limit. @@ -2143,6 +2220,10 @@ type GoogleCloudRunV2Service struct { // Load Balancer traffic is allowed. // "INGRESS_TRAFFIC_NONE" - No ingress traffic is allowed. Ingress string `json:"ingress,omitempty"` + // InvokerIamDisabled: Optional. Disables IAM permission check for + // run.routes.invoke for callers of this service. This setting should not be + // used with external ingress. + InvokerIamDisabled bool `json:"invokerIamDisabled,omitempty"` // Labels: Optional. Unstructured key value map that can be used to organize // and categorize objects. User-provided labels are shared with Google's // billing system, so they can be used to filter, or break down billing charges @@ -2262,6 +2343,8 @@ type GoogleCloudRunV2Service struct { UpdateTime string `json:"updateTime,omitempty"` // Uri: Output only. The main URI in which this Service is serving traffic. Uri string `json:"uri,omitempty"` + // Urls: Output only. All URLs serving traffic for this Service. + Urls []string `json:"urls,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` @@ -2278,9 +2361,34 @@ type GoogleCloudRunV2Service struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2Service) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2Service) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2Service - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleCloudRunV2ServiceMesh: Settings for Cloud Service Mesh. For more +// information see https://cloud.google.com/service-mesh/docs/overview. +type GoogleCloudRunV2ServiceMesh struct { + // Mesh: The Mesh resource name. Format: + // `projects/{project}/locations/global/meshes/{mesh}`, where `{project}` can + // be project id or number. + Mesh string `json:"mesh,omitempty"` + // ForceSendFields is a list of field names (e.g. "Mesh") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Mesh") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudRunV2ServiceMesh) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudRunV2ServiceMesh + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2ServiceScaling: Scaling settings applied at the service @@ -2288,8 +2396,15 @@ func (s *GoogleCloudRunV2Service) MarshalJSON() ([]byte, error) { type GoogleCloudRunV2ServiceScaling struct { // MinInstanceCount: Optional. total min instances for the service. This number // of instances is divided among all revisions with specified traffic based on - // the percent of traffic they are receiving. (BETA) + // the percent of traffic they are receiving. MinInstanceCount int64 `json:"minInstanceCount,omitempty"` + // ScalingMode: Optional. The scaling mode for the service. + // + // Possible values: + // "SCALING_MODE_UNSPECIFIED" - Unspecified. + // "AUTOMATIC" - Scale based on traffic between min and max instances. + // "MANUAL" - Scale to exactly min instances and ignore max instances. + ScalingMode string `json:"scalingMode,omitempty"` // ForceSendFields is a list of field names (e.g. "MinInstanceCount") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -2303,9 +2418,113 @@ type GoogleCloudRunV2ServiceScaling struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2ServiceScaling) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2ServiceScaling) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2ServiceScaling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleCloudRunV2StorageSource: Location of the source in an archive file in +// Google Cloud Storage. +type GoogleCloudRunV2StorageSource struct { + // Bucket: Required. Google Cloud Storage bucket containing the source (see + // Bucket Name Requirements + // (https://cloud.google.com/storage/docs/bucket-naming#requirements)). + Bucket string `json:"bucket,omitempty"` + // Generation: Optional. Google Cloud Storage generation for the object. If the + // generation is omitted, the latest generation will be used. + Generation int64 `json:"generation,omitempty,string"` + // Object: Required. Google Cloud Storage object containing the source. This + // object must be a gzipped archive file (`.tar.gz`) containing source to + // build. + Object string `json:"object,omitempty"` + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudRunV2StorageSource) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudRunV2StorageSource + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleCloudRunV2SubmitBuildRequest: Request message for submitting a Build. +type GoogleCloudRunV2SubmitBuildRequest struct { + // BuildpackBuild: Build the source using Buildpacks. + BuildpackBuild *GoogleCloudRunV2BuildpacksBuild `json:"buildpackBuild,omitempty"` + // DockerBuild: Build the source using Docker. This means the source has a + // Dockerfile. + DockerBuild *GoogleCloudRunV2DockerBuild `json:"dockerBuild,omitempty"` + // ImageUri: Required. Artifact Registry URI to store the built image. + ImageUri string `json:"imageUri,omitempty"` + // ServiceAccount: Optional. The service account to use for the build. If not + // set, the default Cloud Build service account for the project will be used. + ServiceAccount string `json:"serviceAccount,omitempty"` + // StorageSource: Required. Source for the build. + StorageSource *GoogleCloudRunV2StorageSource `json:"storageSource,omitempty"` + // Tags: Optional. Additional tags to annotate the build. + Tags []string `json:"tags,omitempty"` + // WorkerPool: Optional. Name of the Cloud Build Custom Worker Pool that should + // be used to build the function. The format of this field is + // `projects/{project}/locations/{region}/workerPools/{workerPool}` where + // `{project}` and `{region}` are the project id and region respectively where + // the worker pool is defined and `{workerPool}` is the short name of the + // worker pool. + WorkerPool string `json:"workerPool,omitempty"` + // ForceSendFields is a list of field names (e.g. "BuildpackBuild") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BuildpackBuild") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudRunV2SubmitBuildRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudRunV2SubmitBuildRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleCloudRunV2SubmitBuildResponse: Response message for submitting a +// Build. +type GoogleCloudRunV2SubmitBuildResponse struct { + // BaseImageUri: URI of the base builder image in Artifact Registry being used + // in the build. Used to opt into automatic base image updates. + BaseImageUri string `json:"baseImageUri,omitempty"` + // BaseImageWarning: Warning message for the base image. + BaseImageWarning string `json:"baseImageWarning,omitempty"` + // BuildOperation: Cloud Build operation to be polled via CloudBuild API. + BuildOperation *GoogleLongrunningOperation `json:"buildOperation,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "BaseImageUri") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BaseImageUri") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleCloudRunV2SubmitBuildResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudRunV2SubmitBuildResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2TCPSocketAction: TCPSocketAction describes an action based @@ -2328,9 +2547,9 @@ type GoogleCloudRunV2TCPSocketAction struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2TCPSocketAction) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2TCPSocketAction) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2TCPSocketAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2Task: Task represents a single run of a container to @@ -2462,9 +2681,9 @@ type GoogleCloudRunV2Task struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2Task) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2Task) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2Task - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2TaskAttemptResult: Result of a task attempt. @@ -2489,9 +2708,9 @@ type GoogleCloudRunV2TaskAttemptResult struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2TaskAttemptResult) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2TaskAttemptResult) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2TaskAttemptResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2TaskTemplate: TaskTemplate describes the data a task should @@ -2544,9 +2763,9 @@ type GoogleCloudRunV2TaskTemplate struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2TaskTemplate) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2TaskTemplate) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2TaskTemplate - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2TrafficTarget: Holds a single traffic routing entry for the @@ -2585,9 +2804,9 @@ type GoogleCloudRunV2TrafficTarget struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2TrafficTarget) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2TrafficTarget) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2TrafficTarget - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2TrafficTargetStatus: Represents the observed state of a @@ -2625,9 +2844,9 @@ type GoogleCloudRunV2TrafficTargetStatus struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2TrafficTargetStatus) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2TrafficTargetStatus) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2TrafficTargetStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2VersionToPath: VersionToPath maps a specific version of a @@ -2662,9 +2881,9 @@ type GoogleCloudRunV2VersionToPath struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2VersionToPath) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2VersionToPath) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2VersionToPath - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2Volume: Volume represents a named volume in a container. @@ -2697,9 +2916,9 @@ type GoogleCloudRunV2Volume struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2Volume) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2Volume) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2Volume - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2VolumeMount: VolumeMount describes a mounting of a Volume @@ -2726,9 +2945,9 @@ type GoogleCloudRunV2VolumeMount struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2VolumeMount) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2VolumeMount) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2VolumeMount - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudRunV2VpcAccess: VPC Access settings. For more information on @@ -2736,8 +2955,8 @@ func (s *GoogleCloudRunV2VolumeMount) MarshalJSON() ([]byte, error) { // https://cloud.google.com/run/docs/configuring/connecting-vpc. type GoogleCloudRunV2VpcAccess struct { // Connector: VPC Access connector name. Format: - // projects/{project}/locations/{location}/connectors/{connector}, where - // {project} can be project id or number. For more information on sending + // `projects/{project}/locations/{location}/connectors/{connector}`, where + // `{project}` can be project id or number. For more information on sending // traffic to a VPC network via a connector, visit // https://cloud.google.com/run/docs/configuring/vpc-connectors. Connector string `json:"connector,omitempty"` @@ -2766,9 +2985,9 @@ type GoogleCloudRunV2VpcAccess struct { NullFields []string `json:"-"` } -func (s *GoogleCloudRunV2VpcAccess) MarshalJSON() ([]byte, error) { +func (s GoogleCloudRunV2VpcAccess) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudRunV2VpcAccess - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1ApprovalConfig: ApprovalConfig describes @@ -2791,9 +3010,9 @@ type GoogleDevtoolsCloudbuildV1ApprovalConfig struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1ApprovalConfig) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1ApprovalConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1ApprovalConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1ApprovalResult: ApprovalResult describes the @@ -2831,9 +3050,9 @@ type GoogleDevtoolsCloudbuildV1ApprovalResult struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1ApprovalResult) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1ApprovalResult) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1ApprovalResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1ArtifactObjects: Files in the workspace to upload @@ -2863,9 +3082,9 @@ type GoogleDevtoolsCloudbuildV1ArtifactObjects struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1ArtifactObjects) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1ArtifactObjects) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1ArtifactObjects - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1Artifacts: Artifacts produced by a build that @@ -2915,9 +3134,9 @@ type GoogleDevtoolsCloudbuildV1Artifacts struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1Artifacts) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1Artifacts) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1Artifacts - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1Build: A build resource in the Cloud Build API. At @@ -3054,9 +3273,9 @@ type GoogleDevtoolsCloudbuildV1Build struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1Build) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1Build) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1Build - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1BuildApproval: BuildApproval describes a build's @@ -3088,9 +3307,9 @@ type GoogleDevtoolsCloudbuildV1BuildApproval struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1BuildApproval) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1BuildApproval) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1BuildApproval - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1BuildOperationMetadata: Metadata for build @@ -3111,9 +3330,9 @@ type GoogleDevtoolsCloudbuildV1BuildOperationMetadata struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1BuildOperationMetadata) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1BuildOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1BuildOperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1BuildOptions: Optional arguments to enable @@ -3130,6 +3349,8 @@ type GoogleDevtoolsCloudbuildV1BuildOptions struct { // "REGIONAL_USER_OWNED_BUCKET" - Bucket is located in user-owned project in // the same region as the build. The builder service account must have access // to create and write to Cloud Storage buckets in the build project. + // "LEGACY_BUCKET" - Bucket is located in a Google-owned project and is not + // regionalized. DefaultLogsBucketBehavior string `json:"defaultLogsBucketBehavior,omitempty"` // DiskSizeGb: Requested disk size for the VM that runs the build. Note that // this is *NOT* "disk free"; some of the space will be used by the operating @@ -3238,9 +3459,9 @@ type GoogleDevtoolsCloudbuildV1BuildOptions struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1BuildOptions) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1BuildOptions) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1BuildOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1BuildStep: A step in the build pipeline. @@ -3358,9 +3579,9 @@ type GoogleDevtoolsCloudbuildV1BuildStep struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1BuildStep) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1BuildStep) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1BuildStep - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1BuiltImage: An image built by the pipeline. @@ -3386,9 +3607,9 @@ type GoogleDevtoolsCloudbuildV1BuiltImage struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1BuiltImage) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1BuiltImage) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1BuiltImage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1ConnectedRepository: Location of the source in a @@ -3416,9 +3637,9 @@ type GoogleDevtoolsCloudbuildV1ConnectedRepository struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1ConnectedRepository) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1ConnectedRepository) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1ConnectedRepository - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1DeveloperConnectConfig: This config defines the @@ -3446,9 +3667,9 @@ type GoogleDevtoolsCloudbuildV1DeveloperConnectConfig struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1DeveloperConnectConfig) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1DeveloperConnectConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1DeveloperConnectConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1FailureInfo: A fatal problem encountered during @@ -3480,9 +3701,9 @@ type GoogleDevtoolsCloudbuildV1FailureInfo struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1FailureInfo) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1FailureInfo) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1FailureInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1FileHashes: Container message for hashes of byte @@ -3504,39 +3725,9 @@ type GoogleDevtoolsCloudbuildV1FileHashes struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1FileHashes) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1FileHashes) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1FileHashes - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) -} - -// GoogleDevtoolsCloudbuildV1GCSLocation: Represents a storage location in -// Cloud Storage -type GoogleDevtoolsCloudbuildV1GCSLocation struct { - // Bucket: Cloud Storage bucket. See - // https://cloud.google.com/storage/docs/naming#requirements - Bucket string `json:"bucket,omitempty"` - // Generation: Cloud Storage generation for the object. If the generation is - // omitted, the latest generation will be used. - Generation int64 `json:"generation,omitempty,string"` - // Object: Cloud Storage object. See - // https://cloud.google.com/storage/docs/naming#objectnames - Object string `json:"object,omitempty"` - // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more - // details. - ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bucket") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See - // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. - NullFields []string `json:"-"` -} - -func (s *GoogleDevtoolsCloudbuildV1GCSLocation) MarshalJSON() ([]byte, error) { - type NoMethod GoogleDevtoolsCloudbuildV1GCSLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1GitConfig: GitConfig is a configuration for git @@ -3557,9 +3748,9 @@ type GoogleDevtoolsCloudbuildV1GitConfig struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1GitConfig) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1GitConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1GitConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1GitSource: Location of the source in any @@ -3593,9 +3784,9 @@ type GoogleDevtoolsCloudbuildV1GitSource struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1GitSource) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1GitSource) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1GitSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1Hash: Container message for hash values. @@ -3623,20 +3814,20 @@ type GoogleDevtoolsCloudbuildV1Hash struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1Hash) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1Hash) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1Hash - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1HttpConfig: HttpConfig is a configuration for HTTP // related git operations. type GoogleDevtoolsCloudbuildV1HttpConfig struct { // ProxySecretVersionName: SecretVersion resource of the HTTP proxy URL. The - // proxy URL should be in format protocol://@]proxyhost[:port]. + // Service Account used in the build (either the default Service Account or + // user-specified Service Account) should have `secretmanager.versions.access` + // permissions on this secret. The proxy URL should be in format + // `protocol://@]proxyhost[:port]`. ProxySecretVersionName string `json:"proxySecretVersionName,omitempty"` - // ProxySslCaInfo: Optional. Cloud Storage object storing the certificate to - // use with the HTTP proxy. - ProxySslCaInfo *GoogleDevtoolsCloudbuildV1GCSLocation `json:"proxySslCaInfo,omitempty"` // ForceSendFields is a list of field names (e.g. "ProxySecretVersionName") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -3650,9 +3841,9 @@ type GoogleDevtoolsCloudbuildV1HttpConfig struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1HttpConfig) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1HttpConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1HttpConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1InlineSecret: Pairs a set of secret environment @@ -3681,9 +3872,9 @@ type GoogleDevtoolsCloudbuildV1InlineSecret struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1InlineSecret) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1InlineSecret) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1InlineSecret - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1MavenArtifact: A Maven artifact to upload to @@ -3721,9 +3912,9 @@ type GoogleDevtoolsCloudbuildV1MavenArtifact struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1MavenArtifact) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1MavenArtifact) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1MavenArtifact - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1NpmPackage: Npm package to upload to Artifact @@ -3749,9 +3940,9 @@ type GoogleDevtoolsCloudbuildV1NpmPackage struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1NpmPackage) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1NpmPackage) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1NpmPackage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1PoolOption: Details about how a build should be @@ -3776,9 +3967,9 @@ type GoogleDevtoolsCloudbuildV1PoolOption struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1PoolOption) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1PoolOption) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1PoolOption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1PythonPackage: Python package to upload to @@ -3806,9 +3997,9 @@ type GoogleDevtoolsCloudbuildV1PythonPackage struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1PythonPackage) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1PythonPackage) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1PythonPackage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1RepoSource: Location of the source in a Google @@ -3852,9 +4043,9 @@ type GoogleDevtoolsCloudbuildV1RepoSource struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1RepoSource) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1RepoSource) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1RepoSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1Results: Artifacts created by the build pipeline. @@ -3902,9 +4093,9 @@ type GoogleDevtoolsCloudbuildV1Results struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1Results) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1Results) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1Results - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1Secret: Pairs a set of secret environment @@ -3934,9 +4125,9 @@ type GoogleDevtoolsCloudbuildV1Secret struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1Secret) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1Secret) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1Secret - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1SecretManagerSecret: Pairs a secret environment @@ -3962,9 +4153,9 @@ type GoogleDevtoolsCloudbuildV1SecretManagerSecret struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1SecretManagerSecret) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1SecretManagerSecret) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1SecretManagerSecret - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1Secrets: Secrets and secret environment variables. @@ -3988,9 +4179,9 @@ type GoogleDevtoolsCloudbuildV1Secrets struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1Secrets) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1Secrets) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1Secrets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1Source: Location of the source in a supported @@ -4027,9 +4218,9 @@ type GoogleDevtoolsCloudbuildV1Source struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1Source) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1Source) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1Source - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1SourceProvenance: Provenance of the source. Ways @@ -4073,9 +4264,9 @@ type GoogleDevtoolsCloudbuildV1SourceProvenance struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1SourceProvenance) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1SourceProvenance) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1SourceProvenance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1StorageSource: Location of the source in an @@ -4114,9 +4305,9 @@ type GoogleDevtoolsCloudbuildV1StorageSource struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1StorageSource) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1StorageSource) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1StorageSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1StorageSourceManifest: Location of the source @@ -4146,9 +4337,9 @@ type GoogleDevtoolsCloudbuildV1StorageSourceManifest struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1StorageSourceManifest) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1StorageSourceManifest) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1StorageSourceManifest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1TimeSpan: Start and end times for a build @@ -4171,9 +4362,9 @@ type GoogleDevtoolsCloudbuildV1TimeSpan struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1TimeSpan) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1TimeSpan) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1TimeSpan - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1UploadedMavenArtifact: A Maven artifact uploaded @@ -4199,9 +4390,9 @@ type GoogleDevtoolsCloudbuildV1UploadedMavenArtifact struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1UploadedMavenArtifact) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1UploadedMavenArtifact) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1UploadedMavenArtifact - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1UploadedNpmPackage: An npm package uploaded to @@ -4227,9 +4418,9 @@ type GoogleDevtoolsCloudbuildV1UploadedNpmPackage struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1UploadedNpmPackage) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1UploadedNpmPackage) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1UploadedNpmPackage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1UploadedPythonPackage: Artifact uploaded using the @@ -4255,9 +4446,9 @@ type GoogleDevtoolsCloudbuildV1UploadedPythonPackage struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1UploadedPythonPackage) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1UploadedPythonPackage) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1UploadedPythonPackage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1Volume: Volume describes a Docker container volume @@ -4285,9 +4476,9 @@ type GoogleDevtoolsCloudbuildV1Volume struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1Volume) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1Volume) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1Volume - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleDevtoolsCloudbuildV1Warning: A non-fatal problem encountered during @@ -4316,9 +4507,9 @@ type GoogleDevtoolsCloudbuildV1Warning struct { NullFields []string `json:"-"` } -func (s *GoogleDevtoolsCloudbuildV1Warning) MarshalJSON() ([]byte, error) { +func (s GoogleDevtoolsCloudbuildV1Warning) MarshalJSON() ([]byte, error) { type NoMethod GoogleDevtoolsCloudbuildV1Warning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1AuditConfig: Specifies the audit configuration for a service. The @@ -4357,9 +4548,9 @@ type GoogleIamV1AuditConfig struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1AuditConfig) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1AuditLogConfig: Provides the configuration for logging a type of @@ -4392,9 +4583,9 @@ type GoogleIamV1AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1Binding: Associates `members`, or principals, with a `role`. @@ -4491,9 +4682,9 @@ type GoogleIamV1Binding struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1Binding) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1Binding) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1Policy: An Identity and Access Management (IAM) policy, which @@ -4583,9 +4774,9 @@ type GoogleIamV1Policy struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1Policy) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1Policy) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -4612,9 +4803,9 @@ type GoogleIamV1SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1TestIamPermissionsRequest: Request message for @@ -4638,9 +4829,9 @@ type GoogleIamV1TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleIamV1TestIamPermissionsResponse: Response message for @@ -4665,9 +4856,9 @@ type GoogleIamV1TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *GoogleIamV1TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s GoogleIamV1TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleIamV1TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleLongrunningListOperationsResponse: The response message for @@ -4694,9 +4885,9 @@ type GoogleLongrunningListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleLongrunningListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleLongrunningOperation: This resource represents a long-running @@ -4741,9 +4932,9 @@ type GoogleLongrunningOperation struct { NullFields []string `json:"-"` } -func (s *GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { +func (s GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { type NoMethod GoogleLongrunningOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleLongrunningWaitOperationRequest: The request message for @@ -4766,9 +4957,9 @@ type GoogleLongrunningWaitOperationRequest struct { NullFields []string `json:"-"` } -func (s *GoogleLongrunningWaitOperationRequest) MarshalJSON() ([]byte, error) { +func (s GoogleLongrunningWaitOperationRequest) MarshalJSON() ([]byte, error) { type NoMethod GoogleLongrunningWaitOperationRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleProtobufEmpty: A generic empty message that you can re-use to avoid @@ -4810,9 +5001,9 @@ type GoogleRpcStatus struct { NullFields []string `json:"-"` } -func (s *GoogleRpcStatus) MarshalJSON() ([]byte, error) { +func (s GoogleRpcStatus) MarshalJSON() ([]byte, error) { type NoMethod GoogleRpcStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleTypeExpr: Represents a textual expression in the Common Expression @@ -4858,9 +5049,9 @@ type GoogleTypeExpr struct { NullFields []string `json:"-"` } -func (s *GoogleTypeExpr) MarshalJSON() ([]byte, error) { +func (s GoogleTypeExpr) MarshalJSON() ([]byte, error) { type NoMethod GoogleTypeExpr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Proto2BridgeMessageSet: This is proto2's version of MessageSet. @@ -4897,9 +5088,9 @@ type UtilStatusProto struct { NullFields []string `json:"-"` } -func (s *UtilStatusProto) MarshalJSON() ([]byte, error) { +func (s UtilStatusProto) MarshalJSON() ([]byte, error) { type NoMethod UtilStatusProto - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsLocationsExportImageCall struct { @@ -5237,6 +5428,221 @@ func (c *ProjectsLocationsExportMetadataCall) Do(opts ...googleapi.CallOption) ( return ret, nil } +type ProjectsLocationsExportProjectMetadataCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ExportProjectMetadata: Export generated customer metadata for a given +// project. +// +// - name: The name of the project of which metadata should be exported. +// Format: `projects/{project_id_or_number}/locations/{location}` for Project +// in a given location. +func (r *ProjectsLocationsService) ExportProjectMetadata(name string) *ProjectsLocationsExportProjectMetadataCall { + c := &ProjectsLocationsExportProjectMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsExportProjectMetadataCall) Fields(s ...googleapi.Field) *ProjectsLocationsExportProjectMetadataCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsLocationsExportProjectMetadataCall) IfNoneMatch(entityTag string) *ProjectsLocationsExportProjectMetadataCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsExportProjectMetadataCall) Context(ctx context.Context) *ProjectsLocationsExportProjectMetadataCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsExportProjectMetadataCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsExportProjectMetadataCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:exportProjectMetadata") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "run.projects.locations.exportProjectMetadata" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleCloudRunV2Metadata.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsExportProjectMetadataCall) Do(opts ...googleapi.CallOption) (*GoogleCloudRunV2Metadata, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleCloudRunV2Metadata{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsLocationsBuildsSubmitCall struct { + s *Service + parent string + googlecloudrunv2submitbuildrequest *GoogleCloudRunV2SubmitBuildRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Submit: Submits a build in a given project. +// +// - parent: The project and location to build in. Location must be a region, +// e.g., 'us-central1' or 'global' if the global builder is to be used. +// Format: `projects/{project}/locations/{location}`. +func (r *ProjectsLocationsBuildsService) Submit(parent string, googlecloudrunv2submitbuildrequest *GoogleCloudRunV2SubmitBuildRequest) *ProjectsLocationsBuildsSubmitCall { + c := &ProjectsLocationsBuildsSubmitCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googlecloudrunv2submitbuildrequest = googlecloudrunv2submitbuildrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsLocationsBuildsSubmitCall) Fields(s ...googleapi.Field) *ProjectsLocationsBuildsSubmitCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsLocationsBuildsSubmitCall) Context(ctx context.Context) *ProjectsLocationsBuildsSubmitCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsLocationsBuildsSubmitCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBuildsSubmitCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googlecloudrunv2submitbuildrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/builds:submit") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "run.projects.locations.builds.submit" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleCloudRunV2SubmitBuildResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBuildsSubmitCall) Do(opts ...googleapi.CallOption) (*GoogleCloudRunV2SubmitBuildResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleCloudRunV2SubmitBuildResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type ProjectsLocationsJobsCreateCall struct { s *Service parent string diff --git a/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json b/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json index 3bb2cc61c41..06f683fb598 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json @@ -830,7 +830,7 @@ } } }, - "revision": "20240607", + "revision": "20240927", "rootUrl": "https://servicemanagement.googleapis.com/", "schemas": { "Advice": { @@ -1283,6 +1283,10 @@ "deprecated": true, "description": "Link to automatically generated reference documentation. Example: https://cloud.google.com/nodejs/docs/reference/asset/latest", "type": "string" + }, + "selectiveGapicGeneration": { + "$ref": "SelectiveGapicGeneration", + "description": "Configuration for which RPCs should be generated in the GAPIC client." } }, "type": "object" @@ -1427,14 +1431,14 @@ "type": "array" }, "provided": { - "description": "A list of full type names of provided contexts.", + "description": "A list of full type names of provided contexts. It is used to support propagating HTTP headers and ETags from the response extension.", "items": { "type": "string" }, "type": "array" }, "requested": { - "description": "A list of full type names of requested contexts.", + "description": "A list of full type names of requested contexts, only the requested context will be made available to the backend.", "items": { "type": "string" }, @@ -1775,6 +1779,17 @@ }, "type": "object" }, + "ExperimentalFeatures": { + "description": "Experimental features to be included during client library generation. These fields will be deprecated once the feature graduates and is enabled by default.", + "id": "ExperimentalFeatures", + "properties": { + "restAsyncIoEnabled": { + "description": "Enables generation of asynchronous REST clients if `rest` transport is enabled. By default, asynchronous REST clients will not be generated. This feature will be enabled by default 1 month after launching the feature in preview packages.", + "type": "boolean" + } + }, + "type": "object" + }, "Expr": { "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", @@ -2043,7 +2058,7 @@ "type": "object" }, "HttpRule": { - "description": "gRPC Transcoding gRPC Transcoding is a feature for mapping between a gRPC method and one or more HTTP REST endpoints. It allows developers to build a single API service that supports both gRPC APIs and REST APIs. Many systems, including [Google APIs](https://github.com/googleapis/googleapis), [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature and use it for large scale production services. `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies how different portions of the gRPC request message are mapped to the URL path, URL query parameters, and HTTP request body. It also controls how the gRPC response message is mapped to the HTTP response body. `HttpRule` is typically specified as an `google.api.http` annotation on the gRPC method. Each mapping specifies a URL path template and an HTTP method. The path template may refer to one or more fields in the gRPC request message, as long as each field is a non-repeated field with a primitive (non-message) type. The path template controls how fields of the request message are mapped to the URL path. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/{name=messages/*}\" }; } } message GetMessageRequest { string name = 1; // Mapped to URL path. } message Message { string text = 1; // The resource content. } This enables an HTTP REST to gRPC mapping as below: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(name: \"messages/123456\")` Any fields in the request message which are not bound by the path template automatically become HTTP query parameters if there is no HTTP request body. For example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get:\"/v1/messages/{message_id}\" }; } } message GetMessageRequest { message SubMessage { string subfield = 1; } string message_id = 1; // Mapped to URL path. int64 revision = 2; // Mapped to URL query parameter `revision`. SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. } This enables a HTTP JSON to RPC mapping as below: - HTTP: `GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` - gRPC: `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))` Note that fields which are mapped to URL query parameters must have a primitive type or a repeated primitive type or a non-repeated message type. In the case of a repeated type, the parameter can be repeated in the URL as `...?param=A\u0026param=B`. In the case of a message type, each field of the message is mapped to a separate parameter, such as `...?foo.a=A\u0026foo.b=B\u0026foo.c=C`. For HTTP methods that allow a request body, the `body` field specifies the mapping. Consider a REST update method on the message resource collection: service Messaging { rpc UpdateMessage(UpdateMessageRequest) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"message\" }; } } message UpdateMessageRequest { string message_id = 1; // mapped to the URL Message message = 2; // mapped to the body } The following HTTP JSON to RPC mapping is enabled, where the representation of the JSON in the request body is determined by protos JSON encoding: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })` The special name `*` can be used in the body mapping to define that every field not bound by the path template should be mapped to the request body. This enables the following alternative definition of the update method: service Messaging { rpc UpdateMessage(Message) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"*\" }; } } message Message { string message_id = 1; string text = 2; } The following HTTP JSON to RPC mapping is enabled: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" text: \"Hi!\")` Note that when using `*` in the body mapping, it is not possible to have HTTP parameters, as all fields not bound by the path end in the body. This makes this option more rarely used in practice when defining REST APIs. The common usage of `*` is in custom methods which don't use the URL at all for transferring data. It is possible to define multiple HTTP methods for one RPC by using the `additional_bindings` option. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/messages/{message_id}\" additional_bindings { get: \"/v1/users/{user_id}/messages/{message_id}\" } }; } } message GetMessageRequest { string message_id = 1; string user_id = 2; } This enables the following two alternative HTTP JSON to RPC mappings: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(message_id: \"123456\")` - HTTP: `GET /v1/users/me/messages/123456` - gRPC: `GetMessage(user_id: \"me\" message_id: \"123456\")` Rules for HTTP mapping 1. Leaf request fields (recursive expansion nested messages in the request message) are classified into three categories: - Fields referred by the path template. They are passed via the URL path. - Fields referred by the HttpRule.body. They are passed via the HTTP request body. - All other fields are passed via the URL query parameters, and the parameter name is the field path in the request message. A repeated field can be represented as multiple query parameters under the same name. 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields are passed via URL path and HTTP request body. 3. If HttpRule.body is omitted, there is no HTTP request body, all fields are passed via URL path and URL query parameters. Path template syntax Template = \"/\" Segments [ Verb ] ; Segments = Segment { \"/\" Segment } ; Segment = \"*\" | \"**\" | LITERAL | Variable ; Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ; FieldPath = IDENT { \".\" IDENT } ; Verb = \":\" LITERAL ; The syntax `*` matches a single URL path segment. The syntax `**` matches zero or more URL path segments, which must be the last part of the URL path except the `Verb`. The syntax `Variable` matches part of the URL path as specified by its template. A variable template must not contain other variables. If a variable matches a single path segment, its template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` contains any reserved character, such characters should be percent-encoded before the matching. If a variable contains exactly one path segment, such as `\"{var}\"` or `\"{var=*}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{var}`. If a variable contains multiple path segments, such as `\"{var=foo/*}\"` or `\"{var=**}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding, except \"%2F\" and \"%2f\" are left unchanged. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{+var}`. Using gRPC API Service Configuration gRPC API Service Configuration (service config) is a configuration language for configuring a gRPC service to become a user-facing product. The service config is simply the YAML representation of the `google.api.Service` proto message. As an alternative to annotating your proto file, you can configure gRPC transcoding in your service config YAML files. You do this by specifying a `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same effect as the proto annotation. This can be particularly useful if you have a proto that is reused in multiple services. Note that any transcoding specified in the service config will override any matching transcoding configuration in the proto. Example below selects a gRPC method and applies HttpRule to it. http: rules: - selector: example.v1.Messaging.GetMessage get: /v1/messages/{message_id}/{sub.subfield} Special notes When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion must follow the [proto3 specification](https://developers.google.com/protocol-buffers/docs/proto3#json). While the single segment variable follows the semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String Expansion, the multi segment variable **does not** follow RFC 6570 Section 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion does not expand special characters like `?` and `#`, which would lead to invalid URLs. As the result, gRPC Transcoding uses a custom encoding for multi segment variables. The path variables **must not** refer to any repeated or mapped field, because client libraries are not capable of handling such variable expansion. The path variables **must not** capture the leading \"/\" character. The reason is that the most common use case \"{var}\" does not capture the leading \"/\" character. For consistency, all path variables must share the same behavior. Repeated message fields must not be mapped to URL query parameters, because no client library can support such complicated mapping. If an API needs to use a JSON array for request or response body, it can map the request or response body to a repeated field. However, some gRPC Transcoding implementations may not support this feature.", + "description": "gRPC Transcoding gRPC Transcoding is a feature for mapping between a gRPC method and one or more HTTP REST endpoints. It allows developers to build a single API service that supports both gRPC APIs and REST APIs. Many systems, including [Google APIs](https://github.com/googleapis/googleapis), [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature and use it for large scale production services. `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies how different portions of the gRPC request message are mapped to the URL path, URL query parameters, and HTTP request body. It also controls how the gRPC response message is mapped to the HTTP response body. `HttpRule` is typically specified as an `google.api.http` annotation on the gRPC method. Each mapping specifies a URL path template and an HTTP method. The path template may refer to one or more fields in the gRPC request message, as long as each field is a non-repeated field with a primitive (non-message) type. The path template controls how fields of the request message are mapped to the URL path. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/{name=messages/*}\" }; } } message GetMessageRequest { string name = 1; // Mapped to URL path. } message Message { string text = 1; // The resource content. } This enables an HTTP REST to gRPC mapping as below: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(name: \"messages/123456\")` Any fields in the request message which are not bound by the path template automatically become HTTP query parameters if there is no HTTP request body. For example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get:\"/v1/messages/{message_id}\" }; } } message GetMessageRequest { message SubMessage { string subfield = 1; } string message_id = 1; // Mapped to URL path. int64 revision = 2; // Mapped to URL query parameter `revision`. SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. } This enables a HTTP JSON to RPC mapping as below: - HTTP: `GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` - gRPC: `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))` Note that fields which are mapped to URL query parameters must have a primitive type or a repeated primitive type or a non-repeated message type. In the case of a repeated type, the parameter can be repeated in the URL as `...?param=A\u0026param=B`. In the case of a message type, each field of the message is mapped to a separate parameter, such as `...?foo.a=A\u0026foo.b=B\u0026foo.c=C`. For HTTP methods that allow a request body, the `body` field specifies the mapping. Consider a REST update method on the message resource collection: service Messaging { rpc UpdateMessage(UpdateMessageRequest) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"message\" }; } } message UpdateMessageRequest { string message_id = 1; // mapped to the URL Message message = 2; // mapped to the body } The following HTTP JSON to RPC mapping is enabled, where the representation of the JSON in the request body is determined by protos JSON encoding: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })` The special name `*` can be used in the body mapping to define that every field not bound by the path template should be mapped to the request body. This enables the following alternative definition of the update method: service Messaging { rpc UpdateMessage(Message) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"*\" }; } } message Message { string message_id = 1; string text = 2; } The following HTTP JSON to RPC mapping is enabled: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" text: \"Hi!\")` Note that when using `*` in the body mapping, it is not possible to have HTTP parameters, as all fields not bound by the path end in the body. This makes this option more rarely used in practice when defining REST APIs. The common usage of `*` is in custom methods which don't use the URL at all for transferring data. It is possible to define multiple HTTP methods for one RPC by using the `additional_bindings` option. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/messages/{message_id}\" additional_bindings { get: \"/v1/users/{user_id}/messages/{message_id}\" } }; } } message GetMessageRequest { string message_id = 1; string user_id = 2; } This enables the following two alternative HTTP JSON to RPC mappings: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(message_id: \"123456\")` - HTTP: `GET /v1/users/me/messages/123456` - gRPC: `GetMessage(user_id: \"me\" message_id: \"123456\")` Rules for HTTP mapping 1. Leaf request fields (recursive expansion nested messages in the request message) are classified into three categories: - Fields referred by the path template. They are passed via the URL path. - Fields referred by the HttpRule.body. They are passed via the HTTP request body. - All other fields are passed via the URL query parameters, and the parameter name is the field path in the request message. A repeated field can be represented as multiple query parameters under the same name. 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields are passed via URL path and HTTP request body. 3. If HttpRule.body is omitted, there is no HTTP request body, all fields are passed via URL path and URL query parameters. Path template syntax Template = \"/\" Segments [ Verb ] ; Segments = Segment { \"/\" Segment } ; Segment = \"*\" | \"**\" | LITERAL | Variable ; Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ; FieldPath = IDENT { \".\" IDENT } ; Verb = \":\" LITERAL ; The syntax `*` matches a single URL path segment. The syntax `**` matches zero or more URL path segments, which must be the last part of the URL path except the `Verb`. The syntax `Variable` matches part of the URL path as specified by its template. A variable template must not contain other variables. If a variable matches a single path segment, its template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` contains any reserved character, such characters should be percent-encoded before the matching. If a variable contains exactly one path segment, such as `\"{var}\"` or `\"{var=*}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{var}`. If a variable contains multiple path segments, such as `\"{var=foo/*}\"` or `\"{var=**}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding, except \"%2F\" and \"%2f\" are left unchanged. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{+var}`. Using gRPC API Service Configuration gRPC API Service Configuration (service config) is a configuration language for configuring a gRPC service to become a user-facing product. The service config is simply the YAML representation of the `google.api.Service` proto message. As an alternative to annotating your proto file, you can configure gRPC transcoding in your service config YAML files. You do this by specifying a `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same effect as the proto annotation. This can be particularly useful if you have a proto that is reused in multiple services. Note that any transcoding specified in the service config will override any matching transcoding configuration in the proto. The following example selects a gRPC method and applies an `HttpRule` to it: http: rules: - selector: example.v1.Messaging.GetMessage get: /v1/messages/{message_id}/{sub.subfield} Special notes When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion must follow the [proto3 specification](https://developers.google.com/protocol-buffers/docs/proto3#json). While the single segment variable follows the semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String Expansion, the multi segment variable **does not** follow RFC 6570 Section 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion does not expand special characters like `?` and `#`, which would lead to invalid URLs. As the result, gRPC Transcoding uses a custom encoding for multi segment variables. The path variables **must not** refer to any repeated or mapped field, because client libraries are not capable of handling such variable expansion. The path variables **must not** capture the leading \"/\" character. The reason is that the most common use case \"{var}\" does not capture the leading \"/\" character. For consistency, all path variables must share the same behavior. Repeated message fields must not be mapped to URL query parameters, because no client library can support such complicated mapping. If an API needs to use a JSON array for request or response body, it can map the request or response body to a repeated field. However, some gRPC Transcoding implementations may not support this feature.", "id": "HttpRule", "properties": { "additionalBindings": { @@ -2578,6 +2593,25 @@ "description": "The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period.", "format": "google-duration", "type": "string" + }, + "timeSeriesResourceHierarchyLevel": { + "description": "The scope of the timeseries data of the metric.", + "items": { + "enum": [ + "TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED", + "PROJECT", + "ORGANIZATION", + "FOLDER" + ], + "enumDescriptions": [ + "Do not use this default value.", + "Scopes a metric to a project.", + "Scopes a metric to an organization.", + "Scopes a metric to a folder." + ], + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -2602,7 +2636,7 @@ "type": "object" }, "Mixin": { - "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", + "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { @@ -2982,6 +3016,10 @@ "common": { "$ref": "CommonLanguageSettings", "description": "Some settings." + }, + "experimentalFeatures": { + "$ref": "ExperimentalFeatures", + "description": "Experimental features to be included during client library generation." } }, "type": "object" @@ -3141,6 +3179,20 @@ }, "type": "object" }, + "SelectiveGapicGeneration": { + "description": "This message is used to configure the generation of a subset of the RPCs in a service for client libraries.", + "id": "SelectiveGapicGeneration", + "properties": { + "methods": { + "description": "An allowlist of the fully qualified names of RPCs that should be included on public client surfaces.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "Service": { "description": "`Service` is the root object of Google API service configuration (service config). It describes the basic information about a logical service, such as the service name and the user-facing title, and delegates other aspects to sub-sections. Each sub-section is either a proto message or a repeated proto message that configures a specific aspect, such as auth. For more information, see each proto message definition. Example: type: google.api.Service name: calendar.googleapis.com title: Google Calendar API apis: - name: google.calendar.v3.Calendar visibility: rules: - selector: \"google.calendar.v3.*\" restriction: PREVIEW backend: rules: - selector: \"google.calendar.v3.*\" address: calendar.example.com authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "Service", diff --git a/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go b/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go index 51b9dbdf0d5..0335684a836 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go @@ -248,9 +248,9 @@ type Advice struct { NullFields []string `json:"-"` } -func (s *Advice) MarshalJSON() ([]byte, error) { +func (s Advice) MarshalJSON() ([]byte, error) { type NoMethod Advice - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Api: Api is a light-weight descriptor for an API Interface. Interfaces are @@ -309,9 +309,9 @@ type Api struct { NullFields []string `json:"-"` } -func (s *Api) MarshalJSON() ([]byte, error) { +func (s Api) MarshalJSON() ([]byte, error) { type NoMethod Api - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditConfig: Specifies the audit configuration for a service. The @@ -350,9 +350,9 @@ type AuditConfig struct { NullFields []string `json:"-"` } -func (s *AuditConfig) MarshalJSON() ([]byte, error) { +func (s AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of @@ -385,9 +385,9 @@ type AuditLogConfig struct { NullFields []string `json:"-"` } -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { +func (s AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthProvider: Configuration for an authentication provider, including @@ -451,9 +451,9 @@ type AuthProvider struct { NullFields []string `json:"-"` } -func (s *AuthProvider) MarshalJSON() ([]byte, error) { +func (s AuthProvider) MarshalJSON() ([]byte, error) { type NoMethod AuthProvider - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthRequirement: User-defined authentication requirements, including support @@ -489,9 +489,9 @@ type AuthRequirement struct { NullFields []string `json:"-"` } -func (s *AuthRequirement) MarshalJSON() ([]byte, error) { +func (s AuthRequirement) MarshalJSON() ([]byte, error) { type NoMethod AuthRequirement - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Authentication: `Authentication` defines the authentication configuration @@ -521,9 +521,9 @@ type Authentication struct { NullFields []string `json:"-"` } -func (s *Authentication) MarshalJSON() ([]byte, error) { +func (s Authentication) MarshalJSON() ([]byte, error) { type NoMethod Authentication - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthenticationRule: Authentication rules for the service. By default, if a @@ -555,9 +555,9 @@ type AuthenticationRule struct { NullFields []string `json:"-"` } -func (s *AuthenticationRule) MarshalJSON() ([]byte, error) { +func (s AuthenticationRule) MarshalJSON() ([]byte, error) { type NoMethod AuthenticationRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Backend: `Backend` defines the backend configuration for a service. @@ -578,9 +578,9 @@ type Backend struct { NullFields []string `json:"-"` } -func (s *Backend) MarshalJSON() ([]byte, error) { +func (s Backend) MarshalJSON() ([]byte, error) { type NoMethod Backend - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendRule: A backend rule provides configuration for an individual API @@ -665,9 +665,9 @@ type BackendRule struct { NullFields []string `json:"-"` } -func (s *BackendRule) MarshalJSON() ([]byte, error) { +func (s BackendRule) MarshalJSON() ([]byte, error) { type NoMethod BackendRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *BackendRule) UnmarshalJSON(data []byte) error { @@ -723,9 +723,9 @@ type Billing struct { NullFields []string `json:"-"` } -func (s *Billing) MarshalJSON() ([]byte, error) { +func (s Billing) MarshalJSON() ([]byte, error) { type NoMethod Billing - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BillingDestination: Configuration of a specific billing destination @@ -750,9 +750,9 @@ type BillingDestination struct { NullFields []string `json:"-"` } -func (s *BillingDestination) MarshalJSON() ([]byte, error) { +func (s BillingDestination) MarshalJSON() ([]byte, error) { type NoMethod BillingDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates `members`, or principals, with a `role`. @@ -849,9 +849,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ChangeReport: Change report associated with a particular service @@ -877,9 +877,9 @@ type ChangeReport struct { NullFields []string `json:"-"` } -func (s *ChangeReport) MarshalJSON() ([]byte, error) { +func (s ChangeReport) MarshalJSON() ([]byte, error) { type NoMethod ChangeReport - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ClientLibrarySettings: Details about how and where to publish client @@ -957,9 +957,9 @@ type ClientLibrarySettings struct { NullFields []string `json:"-"` } -func (s *ClientLibrarySettings) MarshalJSON() ([]byte, error) { +func (s ClientLibrarySettings) MarshalJSON() ([]byte, error) { type NoMethod ClientLibrarySettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CommonLanguageSettings: Required information for every language. @@ -978,6 +978,9 @@ type CommonLanguageSettings struct { // ReferenceDocsUri: Link to automatically generated reference documentation. // Example: https://cloud.google.com/nodejs/docs/reference/asset/latest ReferenceDocsUri string `json:"referenceDocsUri,omitempty"` + // SelectiveGapicGeneration: Configuration for which RPCs should be generated + // in the GAPIC client. + SelectiveGapicGeneration *SelectiveGapicGeneration `json:"selectiveGapicGeneration,omitempty"` // ForceSendFields is a list of field names (e.g. "Destinations") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -991,9 +994,9 @@ type CommonLanguageSettings struct { NullFields []string `json:"-"` } -func (s *CommonLanguageSettings) MarshalJSON() ([]byte, error) { +func (s CommonLanguageSettings) MarshalJSON() ([]byte, error) { type NoMethod CommonLanguageSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConfigChange: Output generated from semantically comparing two versions of a @@ -1043,9 +1046,9 @@ type ConfigChange struct { NullFields []string `json:"-"` } -func (s *ConfigChange) MarshalJSON() ([]byte, error) { +func (s ConfigChange) MarshalJSON() ([]byte, error) { type NoMethod ConfigChange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConfigFile: Generic specification of a source configuration file @@ -1085,9 +1088,9 @@ type ConfigFile struct { NullFields []string `json:"-"` } -func (s *ConfigFile) MarshalJSON() ([]byte, error) { +func (s ConfigFile) MarshalJSON() ([]byte, error) { type NoMethod ConfigFile - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConfigRef: Represents a service configuration with its name and id. @@ -1108,9 +1111,9 @@ type ConfigRef struct { NullFields []string `json:"-"` } -func (s *ConfigRef) MarshalJSON() ([]byte, error) { +func (s ConfigRef) MarshalJSON() ([]byte, error) { type NoMethod ConfigRef - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConfigSource: Represents a source file which is used to generate the service @@ -1136,9 +1139,9 @@ type ConfigSource struct { NullFields []string `json:"-"` } -func (s *ConfigSource) MarshalJSON() ([]byte, error) { +func (s ConfigSource) MarshalJSON() ([]byte, error) { type NoMethod ConfigSource - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Context: `Context` defines which contexts an API requests. Example: context: @@ -1172,9 +1175,9 @@ type Context struct { NullFields []string `json:"-"` } -func (s *Context) MarshalJSON() ([]byte, error) { +func (s Context) MarshalJSON() ([]byte, error) { type NoMethod Context - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ContextRule: A context rule provides information about the context for an @@ -1186,9 +1189,11 @@ type ContextRule struct { // AllowedResponseExtensions: A list of full type names or extension IDs of // extensions allowed in grpc side channel from backend to client. AllowedResponseExtensions []string `json:"allowedResponseExtensions,omitempty"` - // Provided: A list of full type names of provided contexts. + // Provided: A list of full type names of provided contexts. It is used to + // support propagating HTTP headers and ETags from the response extension. Provided []string `json:"provided,omitempty"` - // Requested: A list of full type names of requested contexts. + // Requested: A list of full type names of requested contexts, only the + // requested context will be made available to the backend. Requested []string `json:"requested,omitempty"` // Selector: Selects the methods to which this rule applies. Refer to selector // for syntax details. @@ -1206,9 +1211,9 @@ type ContextRule struct { NullFields []string `json:"-"` } -func (s *ContextRule) MarshalJSON() ([]byte, error) { +func (s ContextRule) MarshalJSON() ([]byte, error) { type NoMethod ContextRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Control: Selects and configures the service controller used by the service. @@ -1233,9 +1238,9 @@ type Control struct { NullFields []string `json:"-"` } -func (s *Control) MarshalJSON() ([]byte, error) { +func (s Control) MarshalJSON() ([]byte, error) { type NoMethod Control - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CppSettings: Settings for C++ client libraries. @@ -1255,9 +1260,9 @@ type CppSettings struct { NullFields []string `json:"-"` } -func (s *CppSettings) MarshalJSON() ([]byte, error) { +func (s CppSettings) MarshalJSON() ([]byte, error) { type NoMethod CppSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CustomError: Customize service error responses. For example, list any @@ -1284,9 +1289,9 @@ type CustomError struct { NullFields []string `json:"-"` } -func (s *CustomError) MarshalJSON() ([]byte, error) { +func (s CustomError) MarshalJSON() ([]byte, error) { type NoMethod CustomError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CustomErrorRule: A custom error rule. @@ -1311,9 +1316,9 @@ type CustomErrorRule struct { NullFields []string `json:"-"` } -func (s *CustomErrorRule) MarshalJSON() ([]byte, error) { +func (s CustomErrorRule) MarshalJSON() ([]byte, error) { type NoMethod CustomErrorRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CustomHttpPattern: A custom pattern is used for defining custom HTTP verb. @@ -1335,9 +1340,9 @@ type CustomHttpPattern struct { NullFields []string `json:"-"` } -func (s *CustomHttpPattern) MarshalJSON() ([]byte, error) { +func (s CustomHttpPattern) MarshalJSON() ([]byte, error) { type NoMethod CustomHttpPattern - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeleteServiceStrategy: Strategy used to delete a service. This strategy is a @@ -1370,9 +1375,9 @@ type Diagnostic struct { NullFields []string `json:"-"` } -func (s *Diagnostic) MarshalJSON() ([]byte, error) { +func (s Diagnostic) MarshalJSON() ([]byte, error) { type NoMethod Diagnostic - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Documentation: `Documentation` provides the information for describing a @@ -1443,9 +1448,9 @@ type Documentation struct { NullFields []string `json:"-"` } -func (s *Documentation) MarshalJSON() ([]byte, error) { +func (s Documentation) MarshalJSON() ([]byte, error) { type NoMethod Documentation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DocumentationRule: A documentation rule provides information about @@ -1483,9 +1488,9 @@ type DocumentationRule struct { NullFields []string `json:"-"` } -func (s *DocumentationRule) MarshalJSON() ([]byte, error) { +func (s DocumentationRule) MarshalJSON() ([]byte, error) { type NoMethod DocumentationRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DotnetSettings: Settings for Dotnet client libraries. @@ -1527,9 +1532,9 @@ type DotnetSettings struct { NullFields []string `json:"-"` } -func (s *DotnetSettings) MarshalJSON() ([]byte, error) { +func (s DotnetSettings) MarshalJSON() ([]byte, error) { type NoMethod DotnetSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EnableServiceResponse: Operation payload for EnableService method. @@ -1582,9 +1587,9 @@ type Endpoint struct { NullFields []string `json:"-"` } -func (s *Endpoint) MarshalJSON() ([]byte, error) { +func (s Endpoint) MarshalJSON() ([]byte, error) { type NoMethod Endpoint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Enum: Enum type definition. @@ -1620,9 +1625,9 @@ type Enum struct { NullFields []string `json:"-"` } -func (s *Enum) MarshalJSON() ([]byte, error) { +func (s Enum) MarshalJSON() ([]byte, error) { type NoMethod Enum - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EnumValue: Enum value definition. @@ -1646,9 +1651,36 @@ type EnumValue struct { NullFields []string `json:"-"` } -func (s *EnumValue) MarshalJSON() ([]byte, error) { +func (s EnumValue) MarshalJSON() ([]byte, error) { type NoMethod EnumValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ExperimentalFeatures: Experimental features to be included during client +// library generation. These fields will be deprecated once the feature +// graduates and is enabled by default. +type ExperimentalFeatures struct { + // RestAsyncIoEnabled: Enables generation of asynchronous REST clients if + // `rest` transport is enabled. By default, asynchronous REST clients will not + // be generated. This feature will be enabled by default 1 month after + // launching the feature in preview packages. + RestAsyncIoEnabled bool `json:"restAsyncIoEnabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "RestAsyncIoEnabled") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "RestAsyncIoEnabled") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ExperimentalFeatures) MarshalJSON() ([]byte, error) { + type NoMethod ExperimentalFeatures + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents a textual expression in the Common Expression Language @@ -1694,9 +1726,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Field: A single field of a message type. @@ -1765,9 +1797,9 @@ type Field struct { NullFields []string `json:"-"` } -func (s *Field) MarshalJSON() ([]byte, error) { +func (s Field) MarshalJSON() ([]byte, error) { type NoMethod Field - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FieldPolicy: Google API Policy Annotation This message defines a simple API @@ -1806,9 +1838,9 @@ type FieldPolicy struct { NullFields []string `json:"-"` } -func (s *FieldPolicy) MarshalJSON() ([]byte, error) { +func (s FieldPolicy) MarshalJSON() ([]byte, error) { type NoMethod FieldPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FlowErrorDetails: Encapsulation of flow-specific error details for @@ -1832,9 +1864,9 @@ type FlowErrorDetails struct { NullFields []string `json:"-"` } -func (s *FlowErrorDetails) MarshalJSON() ([]byte, error) { +func (s FlowErrorDetails) MarshalJSON() ([]byte, error) { type NoMethod FlowErrorDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GenerateConfigReportRequest: Request message for GenerateConfigReport @@ -1863,9 +1895,9 @@ type GenerateConfigReportRequest struct { NullFields []string `json:"-"` } -func (s *GenerateConfigReportRequest) MarshalJSON() ([]byte, error) { +func (s GenerateConfigReportRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateConfigReportRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GenerateConfigReportResponse: Response message for GenerateConfigReport @@ -1897,9 +1929,9 @@ type GenerateConfigReportResponse struct { NullFields []string `json:"-"` } -func (s *GenerateConfigReportResponse) MarshalJSON() ([]byte, error) { +func (s GenerateConfigReportResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateConfigReportResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetIamPolicyRequest: Request message for `GetIamPolicy` method. @@ -1920,9 +1952,9 @@ type GetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. @@ -1952,9 +1984,9 @@ type GetPolicyOptions struct { NullFields []string `json:"-"` } -func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { +func (s GetPolicyOptions) MarshalJSON() ([]byte, error) { type NoMethod GetPolicyOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoSettings: Settings for Go client libraries. @@ -1974,9 +2006,9 @@ type GoSettings struct { NullFields []string `json:"-"` } -func (s *GoSettings) MarshalJSON() ([]byte, error) { +func (s GoSettings) MarshalJSON() ([]byte, error) { type NoMethod GoSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Http: Defines the HTTP configuration for an API service. It contains a list @@ -2005,9 +2037,9 @@ type Http struct { NullFields []string `json:"-"` } -func (s *Http) MarshalJSON() ([]byte, error) { +func (s Http) MarshalJSON() ([]byte, error) { type NoMethod Http - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpRule: gRPC Transcoding gRPC Transcoding is a feature for mapping between @@ -2122,9 +2154,9 @@ func (s *Http) MarshalJSON() ([]byte, error) { // to a REST endpoint, achieving the same effect as the proto annotation. This // can be particularly useful if you have a proto that is reused in multiple // services. Note that any transcoding specified in the service config will -// override any matching transcoding configuration in the proto. Example below -// selects a gRPC method and applies HttpRule to it. http: rules: - selector: -// example.v1.Messaging.GetMessage get: +// override any matching transcoding configuration in the proto. The following +// example selects a gRPC method and applies an `HttpRule` to it: http: rules: +// - selector: example.v1.Messaging.GetMessage get: // /v1/messages/{message_id}/{sub.subfield} Special notes When gRPC Transcoding // is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion // must follow the proto3 specification @@ -2194,9 +2226,9 @@ type HttpRule struct { NullFields []string `json:"-"` } -func (s *HttpRule) MarshalJSON() ([]byte, error) { +func (s HttpRule) MarshalJSON() ([]byte, error) { type NoMethod HttpRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JavaSettings: Settings for Java client libraries. @@ -2232,9 +2264,9 @@ type JavaSettings struct { NullFields []string `json:"-"` } -func (s *JavaSettings) MarshalJSON() ([]byte, error) { +func (s JavaSettings) MarshalJSON() ([]byte, error) { type NoMethod JavaSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JwtLocation: Specifies a location to extract JWT from an API request. @@ -2265,9 +2297,9 @@ type JwtLocation struct { NullFields []string `json:"-"` } -func (s *JwtLocation) MarshalJSON() ([]byte, error) { +func (s JwtLocation) MarshalJSON() ([]byte, error) { type NoMethod JwtLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LabelDescriptor: A description of a label. @@ -2296,9 +2328,9 @@ type LabelDescriptor struct { NullFields []string `json:"-"` } -func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { +func (s LabelDescriptor) MarshalJSON() ([]byte, error) { type NoMethod LabelDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -2324,9 +2356,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListServiceConfigsResponse: Response message for ListServiceConfigs method. @@ -2351,9 +2383,9 @@ type ListServiceConfigsResponse struct { NullFields []string `json:"-"` } -func (s *ListServiceConfigsResponse) MarshalJSON() ([]byte, error) { +func (s ListServiceConfigsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListServiceConfigsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListServiceRolloutsResponse: Response message for ListServiceRollouts @@ -2379,9 +2411,9 @@ type ListServiceRolloutsResponse struct { NullFields []string `json:"-"` } -func (s *ListServiceRolloutsResponse) MarshalJSON() ([]byte, error) { +func (s ListServiceRolloutsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListServiceRolloutsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListServicesResponse: Response message for `ListServices` method. @@ -2407,9 +2439,9 @@ type ListServicesResponse struct { NullFields []string `json:"-"` } -func (s *ListServicesResponse) MarshalJSON() ([]byte, error) { +func (s ListServicesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListServicesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogDescriptor: A description of a log type. Example in YAML format: - name: @@ -2445,9 +2477,9 @@ type LogDescriptor struct { NullFields []string `json:"-"` } -func (s *LogDescriptor) MarshalJSON() ([]byte, error) { +func (s LogDescriptor) MarshalJSON() ([]byte, error) { type NoMethod LogDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Logging: Logging configuration of the service. The following example shows @@ -2486,9 +2518,9 @@ type Logging struct { NullFields []string `json:"-"` } -func (s *Logging) MarshalJSON() ([]byte, error) { +func (s Logging) MarshalJSON() ([]byte, error) { type NoMethod Logging - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LoggingDestination: Configuration of a specific logging destination (the @@ -2515,9 +2547,9 @@ type LoggingDestination struct { NullFields []string `json:"-"` } -func (s *LoggingDestination) MarshalJSON() ([]byte, error) { +func (s LoggingDestination) MarshalJSON() ([]byte, error) { type NoMethod LoggingDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LongRunning: Describes settings to use when generating API methods that use @@ -2549,9 +2581,9 @@ type LongRunning struct { NullFields []string `json:"-"` } -func (s *LongRunning) MarshalJSON() ([]byte, error) { +func (s LongRunning) MarshalJSON() ([]byte, error) { type NoMethod LongRunning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *LongRunning) UnmarshalJSON(data []byte) error { @@ -2593,9 +2625,9 @@ type ManagedService struct { NullFields []string `json:"-"` } -func (s *ManagedService) MarshalJSON() ([]byte, error) { +func (s ManagedService) MarshalJSON() ([]byte, error) { type NoMethod ManagedService - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Method: Method represents a method of an API interface. @@ -2632,9 +2664,9 @@ type Method struct { NullFields []string `json:"-"` } -func (s *Method) MarshalJSON() ([]byte, error) { +func (s Method) MarshalJSON() ([]byte, error) { type NoMethod Method - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MethodPolicy: Defines policies applying to an RPC method. @@ -2659,9 +2691,9 @@ type MethodPolicy struct { NullFields []string `json:"-"` } -func (s *MethodPolicy) MarshalJSON() ([]byte, error) { +func (s MethodPolicy) MarshalJSON() ([]byte, error) { type NoMethod MethodPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MethodSettings: Describes the generator configuration for a method. @@ -2700,9 +2732,9 @@ type MethodSettings struct { NullFields []string `json:"-"` } -func (s *MethodSettings) MarshalJSON() ([]byte, error) { +func (s MethodSettings) MarshalJSON() ([]byte, error) { type NoMethod MethodSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricDescriptor: Defines a metric type and its schema. Once a metric @@ -2865,9 +2897,9 @@ type MetricDescriptor struct { NullFields []string `json:"-"` } -func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { +func (s MetricDescriptor) MarshalJSON() ([]byte, error) { type NoMethod MetricDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricDescriptorMetadata: Additional annotations that can be used to guide @@ -2917,6 +2949,16 @@ type MetricDescriptorMetadata struct { // interval, excluding data loss due to errors. Metrics with a higher // granularity have a smaller sampling period. SamplePeriod string `json:"samplePeriod,omitempty"` + // TimeSeriesResourceHierarchyLevel: The scope of the timeseries data of the + // metric. + // + // Possible values: + // "TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED" - Do not use this + // default value. + // "PROJECT" - Scopes a metric to a project. + // "ORGANIZATION" - Scopes a metric to an organization. + // "FOLDER" - Scopes a metric to a folder. + TimeSeriesResourceHierarchyLevel []string `json:"timeSeriesResourceHierarchyLevel,omitempty"` // ForceSendFields is a list of field names (e.g. "IngestDelay") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -2930,9 +2972,9 @@ type MetricDescriptorMetadata struct { NullFields []string `json:"-"` } -func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { +func (s MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { type NoMethod MetricDescriptorMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricRule: Bind API methods to metrics. Binding a method to a metric causes @@ -2959,9 +3001,9 @@ type MetricRule struct { NullFields []string `json:"-"` } -func (s *MetricRule) MarshalJSON() ([]byte, error) { +func (s MetricRule) MarshalJSON() ([]byte, error) { type NoMethod MetricRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Mixin: Declares an API Interface to be included in this interface. The @@ -2984,7 +3026,7 @@ func (s *MetricRule) MarshalJSON() ([]byte, error) { // mixin construct implies that all methods in `AccessControl` are also // declared with same name and request/response types in `Storage`. A // documentation generator or annotation processor will see the effective -// `Storage.GetAcl` method after inherting documentation and annotations as +// `Storage.GetAcl` method after inheriting documentation and annotations as // follows: service Storage { // Get the underlying ACL object. rpc // GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = // "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern @@ -3014,9 +3056,9 @@ type Mixin struct { NullFields []string `json:"-"` } -func (s *Mixin) MarshalJSON() ([]byte, error) { +func (s Mixin) MarshalJSON() ([]byte, error) { type NoMethod Mixin - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MonitoredResourceDescriptor: An object that describes the schema of a @@ -3103,9 +3145,9 @@ type MonitoredResourceDescriptor struct { NullFields []string `json:"-"` } -func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { +func (s MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { type NoMethod MonitoredResourceDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Monitoring: Monitoring configuration of the service. The example below shows @@ -3160,9 +3202,9 @@ type Monitoring struct { NullFields []string `json:"-"` } -func (s *Monitoring) MarshalJSON() ([]byte, error) { +func (s Monitoring) MarshalJSON() ([]byte, error) { type NoMethod Monitoring - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MonitoringDestination: Configuration of a specific monitoring destination @@ -3187,9 +3229,9 @@ type MonitoringDestination struct { NullFields []string `json:"-"` } -func (s *MonitoringDestination) MarshalJSON() ([]byte, error) { +func (s MonitoringDestination) MarshalJSON() ([]byte, error) { type NoMethod MonitoringDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeSettings: Settings for Node client libraries. @@ -3209,9 +3251,9 @@ type NodeSettings struct { NullFields []string `json:"-"` } -func (s *NodeSettings) MarshalJSON() ([]byte, error) { +func (s NodeSettings) MarshalJSON() ([]byte, error) { type NoMethod NodeSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OAuthRequirements: OAuth scopes are a way to define data and permissions on @@ -3248,9 +3290,9 @@ type OAuthRequirements struct { NullFields []string `json:"-"` } -func (s *OAuthRequirements) MarshalJSON() ([]byte, error) { +func (s OAuthRequirements) MarshalJSON() ([]byte, error) { type NoMethod OAuthRequirements - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -3295,9 +3337,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationInfo: A message representing the message types used by a @@ -3330,9 +3372,9 @@ type OperationInfo struct { NullFields []string `json:"-"` } -func (s *OperationInfo) MarshalJSON() ([]byte, error) { +func (s OperationInfo) MarshalJSON() ([]byte, error) { type NoMethod OperationInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadata: The metadata associated with a long running operation @@ -3361,9 +3403,9 @@ type OperationMetadata struct { NullFields []string `json:"-"` } -func (s *OperationMetadata) MarshalJSON() ([]byte, error) { +func (s OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Option: A protocol buffer option, which can be attached to a message, field, @@ -3393,9 +3435,9 @@ type Option struct { NullFields []string `json:"-"` } -func (s *Option) MarshalJSON() ([]byte, error) { +func (s Option) MarshalJSON() ([]byte, error) { type NoMethod Option - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Page: Represents a documentation page. A page can contain subpages to @@ -3429,9 +3471,9 @@ type Page struct { NullFields []string `json:"-"` } -func (s *Page) MarshalJSON() ([]byte, error) { +func (s Page) MarshalJSON() ([]byte, error) { type NoMethod Page - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PhpSettings: Settings for Php client libraries. @@ -3451,9 +3493,9 @@ type PhpSettings struct { NullFields []string `json:"-"` } -func (s *PhpSettings) MarshalJSON() ([]byte, error) { +func (s PhpSettings) MarshalJSON() ([]byte, error) { type NoMethod PhpSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -3543,9 +3585,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Publishing: This message configures the settings for publishing Google Cloud @@ -3612,15 +3654,18 @@ type Publishing struct { NullFields []string `json:"-"` } -func (s *Publishing) MarshalJSON() ([]byte, error) { +func (s Publishing) MarshalJSON() ([]byte, error) { type NoMethod Publishing - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PythonSettings: Settings for Python client libraries. type PythonSettings struct { // Common: Some settings. Common *CommonLanguageSettings `json:"common,omitempty"` + // ExperimentalFeatures: Experimental features to be included during client + // library generation. + ExperimentalFeatures *ExperimentalFeatures `json:"experimentalFeatures,omitempty"` // ForceSendFields is a list of field names (e.g. "Common") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See @@ -3634,9 +3679,9 @@ type PythonSettings struct { NullFields []string `json:"-"` } -func (s *PythonSettings) MarshalJSON() ([]byte, error) { +func (s PythonSettings) MarshalJSON() ([]byte, error) { type NoMethod PythonSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Quota: Quota configuration helps to achieve fairness and budgeting in @@ -3679,9 +3724,9 @@ type Quota struct { NullFields []string `json:"-"` } -func (s *Quota) MarshalJSON() ([]byte, error) { +func (s Quota) MarshalJSON() ([]byte, error) { type NoMethod Quota - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QuotaLimit: `QuotaLimit` defines a specific limit that applies over a @@ -3753,9 +3798,9 @@ type QuotaLimit struct { NullFields []string `json:"-"` } -func (s *QuotaLimit) MarshalJSON() ([]byte, error) { +func (s QuotaLimit) MarshalJSON() ([]byte, error) { type NoMethod QuotaLimit - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResourceReference: Defines a proto annotation that describes a string field @@ -3787,9 +3832,9 @@ type ResourceReference struct { NullFields []string `json:"-"` } -func (s *ResourceReference) MarshalJSON() ([]byte, error) { +func (s ResourceReference) MarshalJSON() ([]byte, error) { type NoMethod ResourceReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Rollout: A rollout resource that defines how service configuration versions @@ -3847,9 +3892,9 @@ type Rollout struct { NullFields []string `json:"-"` } -func (s *Rollout) MarshalJSON() ([]byte, error) { +func (s Rollout) MarshalJSON() ([]byte, error) { type NoMethod Rollout - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RubySettings: Settings for Ruby client libraries. @@ -3869,9 +3914,33 @@ type RubySettings struct { NullFields []string `json:"-"` } -func (s *RubySettings) MarshalJSON() ([]byte, error) { +func (s RubySettings) MarshalJSON() ([]byte, error) { type NoMethod RubySettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SelectiveGapicGeneration: This message is used to configure the generation +// of a subset of the RPCs in a service for client libraries. +type SelectiveGapicGeneration struct { + // Methods: An allowlist of the fully qualified names of RPCs that should be + // included on public client surfaces. + Methods []string `json:"methods,omitempty"` + // ForceSendFields is a list of field names (e.g. "Methods") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Methods") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SelectiveGapicGeneration) MarshalJSON() ([]byte, error) { + type NoMethod SelectiveGapicGeneration + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Service: `Service` is the root object of Google API service configuration @@ -3992,9 +4061,9 @@ type Service struct { NullFields []string `json:"-"` } -func (s *Service) MarshalJSON() ([]byte, error) { +func (s Service) MarshalJSON() ([]byte, error) { type NoMethod Service - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -4021,9 +4090,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceContext: `SourceContext` represents information about the source of a @@ -4046,9 +4115,9 @@ type SourceContext struct { NullFields []string `json:"-"` } -func (s *SourceContext) MarshalJSON() ([]byte, error) { +func (s SourceContext) MarshalJSON() ([]byte, error) { type NoMethod SourceContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceInfo: Source information used to create a Service Config @@ -4068,9 +4137,9 @@ type SourceInfo struct { NullFields []string `json:"-"` } -func (s *SourceInfo) MarshalJSON() ([]byte, error) { +func (s SourceInfo) MarshalJSON() ([]byte, error) { type NoMethod SourceInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -4102,9 +4171,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Step: Represents the status of one operation step. @@ -4135,9 +4204,9 @@ type Step struct { NullFields []string `json:"-"` } -func (s *Step) MarshalJSON() ([]byte, error) { +func (s Step) MarshalJSON() ([]byte, error) { type NoMethod Step - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SubmitConfigSourceRequest: Request message for SubmitConfigSource method. @@ -4161,9 +4230,9 @@ type SubmitConfigSourceRequest struct { NullFields []string `json:"-"` } -func (s *SubmitConfigSourceRequest) MarshalJSON() ([]byte, error) { +func (s SubmitConfigSourceRequest) MarshalJSON() ([]byte, error) { type NoMethod SubmitConfigSourceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SubmitConfigSourceResponse: Response message for SubmitConfigSource method. @@ -4183,9 +4252,9 @@ type SubmitConfigSourceResponse struct { NullFields []string `json:"-"` } -func (s *SubmitConfigSourceResponse) MarshalJSON() ([]byte, error) { +func (s SubmitConfigSourceResponse) MarshalJSON() ([]byte, error) { type NoMethod SubmitConfigSourceResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SystemParameter: Define a parameter's name and location. The parameter may @@ -4214,9 +4283,9 @@ type SystemParameter struct { NullFields []string `json:"-"` } -func (s *SystemParameter) MarshalJSON() ([]byte, error) { +func (s SystemParameter) MarshalJSON() ([]byte, error) { type NoMethod SystemParameter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SystemParameterRule: Define a system parameter rule mapping system parameter @@ -4243,9 +4312,9 @@ type SystemParameterRule struct { NullFields []string `json:"-"` } -func (s *SystemParameterRule) MarshalJSON() ([]byte, error) { +func (s SystemParameterRule) MarshalJSON() ([]byte, error) { type NoMethod SystemParameterRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SystemParameters: ### System parameter configuration A system parameter is a @@ -4278,9 +4347,9 @@ type SystemParameters struct { NullFields []string `json:"-"` } -func (s *SystemParameters) MarshalJSON() ([]byte, error) { +func (s SystemParameters) MarshalJSON() ([]byte, error) { type NoMethod SystemParameters - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` method. @@ -4303,9 +4372,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -4330,9 +4399,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TrafficPercentStrategy: Strategy that specifies how clients of Google @@ -4363,9 +4432,9 @@ type TrafficPercentStrategy struct { NullFields []string `json:"-"` } -func (s *TrafficPercentStrategy) MarshalJSON() ([]byte, error) { +func (s TrafficPercentStrategy) MarshalJSON() ([]byte, error) { type NoMethod TrafficPercentStrategy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Type: A protocol buffer message type. @@ -4403,9 +4472,9 @@ type Type struct { NullFields []string `json:"-"` } -func (s *Type) MarshalJSON() ([]byte, error) { +func (s Type) MarshalJSON() ([]byte, error) { type NoMethod Type - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UndeleteServiceResponse: Response message for UndeleteService method. @@ -4425,9 +4494,9 @@ type UndeleteServiceResponse struct { NullFields []string `json:"-"` } -func (s *UndeleteServiceResponse) MarshalJSON() ([]byte, error) { +func (s UndeleteServiceResponse) MarshalJSON() ([]byte, error) { type NoMethod UndeleteServiceResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Usage: Configuration controlling usage of a service. @@ -4464,9 +4533,9 @@ type Usage struct { NullFields []string `json:"-"` } -func (s *Usage) MarshalJSON() ([]byte, error) { +func (s Usage) MarshalJSON() ([]byte, error) { type NoMethod Usage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UsageRule: Usage configuration rules for the service. NOTE: Under @@ -4505,9 +4574,9 @@ type UsageRule struct { NullFields []string `json:"-"` } -func (s *UsageRule) MarshalJSON() ([]byte, error) { +func (s UsageRule) MarshalJSON() ([]byte, error) { type NoMethod UsageRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationsGetCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json b/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json index 48f90c298c1..88feacde98f 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json @@ -1029,7 +1029,7 @@ } } }, - "revision": "20240529", + "revision": "20240925", "rootUrl": "https://servicenetworking.googleapis.com/", "schemas": { "AddDnsRecordSetMetadata": { @@ -1211,6 +1211,10 @@ }, "type": "array" }, + "skipRequestedAddressValidation": { + "description": "Optional. Skips validating if the requested_address is in use by SN VPC’s peering group. Compute Engine will still perform this check and fail the request if the requested_address is in use. Note that Compute Engine does not check for the existence of dynamic routes when performing this check. Caller of this API should make sure that there are no dynamic routes overlapping with the requested_address/prefix_length IP address range otherwise the created subnet could cause misrouting.", + "type": "boolean" + }, "subnetwork": { "description": "Required. A name for the new subnet. For information about the naming requirements, see [subnetwork](/compute/docs/reference/rest/v1/subnetworks) in the Compute API documentation.", "type": "string" @@ -1610,6 +1614,10 @@ "deprecated": true, "description": "Link to automatically generated reference documentation. Example: https://cloud.google.com/nodejs/docs/reference/asset/latest", "type": "string" + }, + "selectiveGapicGeneration": { + "$ref": "SelectiveGapicGeneration", + "description": "Configuration for which RPCs should be generated in the GAPIC client." } }, "type": "object" @@ -1765,14 +1773,14 @@ "type": "array" }, "provided": { - "description": "A list of full type names of provided contexts.", + "description": "A list of full type names of provided contexts. It is used to support propagating HTTP headers and ETags from the response extension.", "items": { "type": "string" }, "type": "array" }, "requested": { - "description": "A list of full type names of requested contexts.", + "description": "A list of full type names of requested contexts, only the requested context will be made available to the backend.", "items": { "type": "string" }, @@ -2182,6 +2190,17 @@ }, "type": "object" }, + "ExperimentalFeatures": { + "description": "Experimental features to be included during client library generation. These fields will be deprecated once the feature graduates and is enabled by default.", + "id": "ExperimentalFeatures", + "properties": { + "restAsyncIoEnabled": { + "description": "Enables generation of asynchronous REST clients if `rest` transport is enabled. By default, asynchronous REST clients will not be generated. This feature will be enabled by default 1 month after launching the feature in preview packages.", + "type": "boolean" + } + }, + "type": "object" + }, "Field": { "description": "A single field of a message type.", "id": "Field", @@ -2421,7 +2440,7 @@ "type": "object" }, "HttpRule": { - "description": "gRPC Transcoding gRPC Transcoding is a feature for mapping between a gRPC method and one or more HTTP REST endpoints. It allows developers to build a single API service that supports both gRPC APIs and REST APIs. Many systems, including [Google APIs](https://github.com/googleapis/googleapis), [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature and use it for large scale production services. `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies how different portions of the gRPC request message are mapped to the URL path, URL query parameters, and HTTP request body. It also controls how the gRPC response message is mapped to the HTTP response body. `HttpRule` is typically specified as an `google.api.http` annotation on the gRPC method. Each mapping specifies a URL path template and an HTTP method. The path template may refer to one or more fields in the gRPC request message, as long as each field is a non-repeated field with a primitive (non-message) type. The path template controls how fields of the request message are mapped to the URL path. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/{name=messages/*}\" }; } } message GetMessageRequest { string name = 1; // Mapped to URL path. } message Message { string text = 1; // The resource content. } This enables an HTTP REST to gRPC mapping as below: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(name: \"messages/123456\")` Any fields in the request message which are not bound by the path template automatically become HTTP query parameters if there is no HTTP request body. For example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get:\"/v1/messages/{message_id}\" }; } } message GetMessageRequest { message SubMessage { string subfield = 1; } string message_id = 1; // Mapped to URL path. int64 revision = 2; // Mapped to URL query parameter `revision`. SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. } This enables a HTTP JSON to RPC mapping as below: - HTTP: `GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` - gRPC: `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))` Note that fields which are mapped to URL query parameters must have a primitive type or a repeated primitive type or a non-repeated message type. In the case of a repeated type, the parameter can be repeated in the URL as `...?param=A\u0026param=B`. In the case of a message type, each field of the message is mapped to a separate parameter, such as `...?foo.a=A\u0026foo.b=B\u0026foo.c=C`. For HTTP methods that allow a request body, the `body` field specifies the mapping. Consider a REST update method on the message resource collection: service Messaging { rpc UpdateMessage(UpdateMessageRequest) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"message\" }; } } message UpdateMessageRequest { string message_id = 1; // mapped to the URL Message message = 2; // mapped to the body } The following HTTP JSON to RPC mapping is enabled, where the representation of the JSON in the request body is determined by protos JSON encoding: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })` The special name `*` can be used in the body mapping to define that every field not bound by the path template should be mapped to the request body. This enables the following alternative definition of the update method: service Messaging { rpc UpdateMessage(Message) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"*\" }; } } message Message { string message_id = 1; string text = 2; } The following HTTP JSON to RPC mapping is enabled: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" text: \"Hi!\")` Note that when using `*` in the body mapping, it is not possible to have HTTP parameters, as all fields not bound by the path end in the body. This makes this option more rarely used in practice when defining REST APIs. The common usage of `*` is in custom methods which don't use the URL at all for transferring data. It is possible to define multiple HTTP methods for one RPC by using the `additional_bindings` option. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/messages/{message_id}\" additional_bindings { get: \"/v1/users/{user_id}/messages/{message_id}\" } }; } } message GetMessageRequest { string message_id = 1; string user_id = 2; } This enables the following two alternative HTTP JSON to RPC mappings: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(message_id: \"123456\")` - HTTP: `GET /v1/users/me/messages/123456` - gRPC: `GetMessage(user_id: \"me\" message_id: \"123456\")` Rules for HTTP mapping 1. Leaf request fields (recursive expansion nested messages in the request message) are classified into three categories: - Fields referred by the path template. They are passed via the URL path. - Fields referred by the HttpRule.body. They are passed via the HTTP request body. - All other fields are passed via the URL query parameters, and the parameter name is the field path in the request message. A repeated field can be represented as multiple query parameters under the same name. 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields are passed via URL path and HTTP request body. 3. If HttpRule.body is omitted, there is no HTTP request body, all fields are passed via URL path and URL query parameters. Path template syntax Template = \"/\" Segments [ Verb ] ; Segments = Segment { \"/\" Segment } ; Segment = \"*\" | \"**\" | LITERAL | Variable ; Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ; FieldPath = IDENT { \".\" IDENT } ; Verb = \":\" LITERAL ; The syntax `*` matches a single URL path segment. The syntax `**` matches zero or more URL path segments, which must be the last part of the URL path except the `Verb`. The syntax `Variable` matches part of the URL path as specified by its template. A variable template must not contain other variables. If a variable matches a single path segment, its template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` contains any reserved character, such characters should be percent-encoded before the matching. If a variable contains exactly one path segment, such as `\"{var}\"` or `\"{var=*}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{var}`. If a variable contains multiple path segments, such as `\"{var=foo/*}\"` or `\"{var=**}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding, except \"%2F\" and \"%2f\" are left unchanged. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{+var}`. Using gRPC API Service Configuration gRPC API Service Configuration (service config) is a configuration language for configuring a gRPC service to become a user-facing product. The service config is simply the YAML representation of the `google.api.Service` proto message. As an alternative to annotating your proto file, you can configure gRPC transcoding in your service config YAML files. You do this by specifying a `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same effect as the proto annotation. This can be particularly useful if you have a proto that is reused in multiple services. Note that any transcoding specified in the service config will override any matching transcoding configuration in the proto. Example below selects a gRPC method and applies HttpRule to it. http: rules: - selector: example.v1.Messaging.GetMessage get: /v1/messages/{message_id}/{sub.subfield} Special notes When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion must follow the [proto3 specification](https://developers.google.com/protocol-buffers/docs/proto3#json). While the single segment variable follows the semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String Expansion, the multi segment variable **does not** follow RFC 6570 Section 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion does not expand special characters like `?` and `#`, which would lead to invalid URLs. As the result, gRPC Transcoding uses a custom encoding for multi segment variables. The path variables **must not** refer to any repeated or mapped field, because client libraries are not capable of handling such variable expansion. The path variables **must not** capture the leading \"/\" character. The reason is that the most common use case \"{var}\" does not capture the leading \"/\" character. For consistency, all path variables must share the same behavior. Repeated message fields must not be mapped to URL query parameters, because no client library can support such complicated mapping. If an API needs to use a JSON array for request or response body, it can map the request or response body to a repeated field. However, some gRPC Transcoding implementations may not support this feature.", + "description": "gRPC Transcoding gRPC Transcoding is a feature for mapping between a gRPC method and one or more HTTP REST endpoints. It allows developers to build a single API service that supports both gRPC APIs and REST APIs. Many systems, including [Google APIs](https://github.com/googleapis/googleapis), [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature and use it for large scale production services. `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies how different portions of the gRPC request message are mapped to the URL path, URL query parameters, and HTTP request body. It also controls how the gRPC response message is mapped to the HTTP response body. `HttpRule` is typically specified as an `google.api.http` annotation on the gRPC method. Each mapping specifies a URL path template and an HTTP method. The path template may refer to one or more fields in the gRPC request message, as long as each field is a non-repeated field with a primitive (non-message) type. The path template controls how fields of the request message are mapped to the URL path. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/{name=messages/*}\" }; } } message GetMessageRequest { string name = 1; // Mapped to URL path. } message Message { string text = 1; // The resource content. } This enables an HTTP REST to gRPC mapping as below: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(name: \"messages/123456\")` Any fields in the request message which are not bound by the path template automatically become HTTP query parameters if there is no HTTP request body. For example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get:\"/v1/messages/{message_id}\" }; } } message GetMessageRequest { message SubMessage { string subfield = 1; } string message_id = 1; // Mapped to URL path. int64 revision = 2; // Mapped to URL query parameter `revision`. SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. } This enables a HTTP JSON to RPC mapping as below: - HTTP: `GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` - gRPC: `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))` Note that fields which are mapped to URL query parameters must have a primitive type or a repeated primitive type or a non-repeated message type. In the case of a repeated type, the parameter can be repeated in the URL as `...?param=A\u0026param=B`. In the case of a message type, each field of the message is mapped to a separate parameter, such as `...?foo.a=A\u0026foo.b=B\u0026foo.c=C`. For HTTP methods that allow a request body, the `body` field specifies the mapping. Consider a REST update method on the message resource collection: service Messaging { rpc UpdateMessage(UpdateMessageRequest) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"message\" }; } } message UpdateMessageRequest { string message_id = 1; // mapped to the URL Message message = 2; // mapped to the body } The following HTTP JSON to RPC mapping is enabled, where the representation of the JSON in the request body is determined by protos JSON encoding: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })` The special name `*` can be used in the body mapping to define that every field not bound by the path template should be mapped to the request body. This enables the following alternative definition of the update method: service Messaging { rpc UpdateMessage(Message) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"*\" }; } } message Message { string message_id = 1; string text = 2; } The following HTTP JSON to RPC mapping is enabled: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" text: \"Hi!\")` Note that when using `*` in the body mapping, it is not possible to have HTTP parameters, as all fields not bound by the path end in the body. This makes this option more rarely used in practice when defining REST APIs. The common usage of `*` is in custom methods which don't use the URL at all for transferring data. It is possible to define multiple HTTP methods for one RPC by using the `additional_bindings` option. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/messages/{message_id}\" additional_bindings { get: \"/v1/users/{user_id}/messages/{message_id}\" } }; } } message GetMessageRequest { string message_id = 1; string user_id = 2; } This enables the following two alternative HTTP JSON to RPC mappings: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(message_id: \"123456\")` - HTTP: `GET /v1/users/me/messages/123456` - gRPC: `GetMessage(user_id: \"me\" message_id: \"123456\")` Rules for HTTP mapping 1. Leaf request fields (recursive expansion nested messages in the request message) are classified into three categories: - Fields referred by the path template. They are passed via the URL path. - Fields referred by the HttpRule.body. They are passed via the HTTP request body. - All other fields are passed via the URL query parameters, and the parameter name is the field path in the request message. A repeated field can be represented as multiple query parameters under the same name. 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields are passed via URL path and HTTP request body. 3. If HttpRule.body is omitted, there is no HTTP request body, all fields are passed via URL path and URL query parameters. Path template syntax Template = \"/\" Segments [ Verb ] ; Segments = Segment { \"/\" Segment } ; Segment = \"*\" | \"**\" | LITERAL | Variable ; Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ; FieldPath = IDENT { \".\" IDENT } ; Verb = \":\" LITERAL ; The syntax `*` matches a single URL path segment. The syntax `**` matches zero or more URL path segments, which must be the last part of the URL path except the `Verb`. The syntax `Variable` matches part of the URL path as specified by its template. A variable template must not contain other variables. If a variable matches a single path segment, its template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` contains any reserved character, such characters should be percent-encoded before the matching. If a variable contains exactly one path segment, such as `\"{var}\"` or `\"{var=*}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{var}`. If a variable contains multiple path segments, such as `\"{var=foo/*}\"` or `\"{var=**}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding, except \"%2F\" and \"%2f\" are left unchanged. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{+var}`. Using gRPC API Service Configuration gRPC API Service Configuration (service config) is a configuration language for configuring a gRPC service to become a user-facing product. The service config is simply the YAML representation of the `google.api.Service` proto message. As an alternative to annotating your proto file, you can configure gRPC transcoding in your service config YAML files. You do this by specifying a `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same effect as the proto annotation. This can be particularly useful if you have a proto that is reused in multiple services. Note that any transcoding specified in the service config will override any matching transcoding configuration in the proto. The following example selects a gRPC method and applies an `HttpRule` to it: http: rules: - selector: example.v1.Messaging.GetMessage get: /v1/messages/{message_id}/{sub.subfield} Special notes When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion must follow the [proto3 specification](https://developers.google.com/protocol-buffers/docs/proto3#json). While the single segment variable follows the semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String Expansion, the multi segment variable **does not** follow RFC 6570 Section 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion does not expand special characters like `?` and `#`, which would lead to invalid URLs. As the result, gRPC Transcoding uses a custom encoding for multi segment variables. The path variables **must not** refer to any repeated or mapped field, because client libraries are not capable of handling such variable expansion. The path variables **must not** capture the leading \"/\" character. The reason is that the most common use case \"{var}\" does not capture the leading \"/\" character. For consistency, all path variables must share the same behavior. Repeated message fields must not be mapped to URL query parameters, because no client library can support such complicated mapping. If an API needs to use a JSON array for request or response body, it can map the request or response body to a repeated field. However, some gRPC Transcoding implementations may not support this feature.", "id": "HttpRule", "properties": { "additionalBindings": { @@ -2943,6 +2962,25 @@ "description": "The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period.", "format": "google-duration", "type": "string" + }, + "timeSeriesResourceHierarchyLevel": { + "description": "The scope of the timeseries data of the metric.", + "items": { + "enum": [ + "TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED", + "PROJECT", + "ORGANIZATION", + "FOLDER" + ], + "enumDescriptions": [ + "Do not use this default value.", + "Scopes a metric to a project.", + "Scopes a metric to an organization.", + "Scopes a metric to a folder." + ], + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -2967,7 +3005,7 @@ "type": "object" }, "Mixin": { - "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", + "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { @@ -3219,7 +3257,7 @@ "type": "string" }, "role": { - "description": "Required. Role to apply. Only allowlisted roles can be used at the specified granularity. The role must be one of the following: - 'roles/container.hostServiceAgentUser' applied on the shared VPC host project - 'roles/compute.securityAdmin' applied on the shared VPC host project - 'roles/compute.networkAdmin' applied on the shared VPC host project - 'roles/compute.xpnAdmin' applied on the shared VPC host project - 'roles/dns.admin' applied on the shared VPC host project - 'roles/logging.admin' applied on the shared VPC host project", + "description": "Required. Role to apply. Only allowlisted roles can be used at the specified granularity. The role must be one of the following: - 'roles/container.hostServiceAgentUser' applied on the shared VPC host project - 'roles/compute.securityAdmin' applied on the shared VPC host project - 'roles/compute.networkAdmin' applied on the shared VPC host project - 'roles/tpu.xpnAgent' applied on the shared VPC host project - 'roles/dns.admin' applied on the shared VPC host project - 'roles/logging.admin' applied on the shared VPC host project - 'roles/monitoring.viewer' applied on the shared VPC host project - 'roles/servicemanagement.quotaViewer' applied on the shared VPC host project", "type": "string" } }, @@ -3312,6 +3350,10 @@ "common": { "$ref": "CommonLanguageSettings", "description": "Some settings." + }, + "experimentalFeatures": { + "$ref": "ExperimentalFeatures", + "description": "Experimental features to be included during client library generation." } }, "type": "object" @@ -3585,6 +3627,20 @@ }, "type": "object" }, + "SelectiveGapicGeneration": { + "description": "This message is used to configure the generation of a subset of the RPCs in a service for client libraries.", + "id": "SelectiveGapicGeneration", + "properties": { + "methods": { + "description": "An allowlist of the fully qualified names of RPCs that should be included on public client surfaces.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "Service": { "description": "`Service` is the root object of Google API service configuration (service config). It describes the basic information about a logical service, such as the service name and the user-facing title, and delegates other aspects to sub-sections. Each sub-section is either a proto message or a repeated proto message that configures a specific aspect, such as auth. For more information, see each proto message definition. Example: type: google.api.Service name: calendar.googleapis.com title: Google Calendar API apis: - name: google.calendar.v3.Calendar visibility: rules: - selector: \"google.calendar.v3.*\" restriction: PREVIEW backend: rules: - selector: \"google.calendar.v3.*\" address: calendar.example.com authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "Service", diff --git a/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go b/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go index 9075c85c3fe..3a7474063a0 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go @@ -323,9 +323,9 @@ type AddDnsRecordSetRequest struct { NullFields []string `json:"-"` } -func (s *AddDnsRecordSetRequest) MarshalJSON() ([]byte, error) { +func (s AddDnsRecordSetRequest) MarshalJSON() ([]byte, error) { type NoMethod AddDnsRecordSetRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AddDnsZoneMetadata: Metadata provided through GetOperation request for the @@ -364,9 +364,9 @@ type AddDnsZoneRequest struct { NullFields []string `json:"-"` } -func (s *AddDnsZoneRequest) MarshalJSON() ([]byte, error) { +func (s AddDnsZoneRequest) MarshalJSON() ([]byte, error) { type NoMethod AddDnsZoneRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AddDnsZoneResponse: Represents managed DNS zones created in the shared @@ -390,9 +390,9 @@ type AddDnsZoneResponse struct { NullFields []string `json:"-"` } -func (s *AddDnsZoneResponse) MarshalJSON() ([]byte, error) { +func (s AddDnsZoneResponse) MarshalJSON() ([]byte, error) { type NoMethod AddDnsZoneResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AddRolesMetadata: Metadata provided through GetOperation request for the LRO @@ -424,9 +424,9 @@ type AddRolesRequest struct { NullFields []string `json:"-"` } -func (s *AddRolesRequest) MarshalJSON() ([]byte, error) { +func (s AddRolesRequest) MarshalJSON() ([]byte, error) { type NoMethod AddRolesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AddRolesResponse: Represents IAM roles added to the shared VPC host project. @@ -447,9 +447,9 @@ type AddRolesResponse struct { NullFields []string `json:"-"` } -func (s *AddRolesResponse) MarshalJSON() ([]byte, error) { +func (s AddRolesResponse) MarshalJSON() ([]byte, error) { type NoMethod AddRolesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AddSubnetworkRequest: Request to create a subnetwork in a previously peered @@ -542,6 +542,15 @@ type AddSubnetworkRequest struct { // SecondaryIpRangeSpecs: Optional. A list of secondary IP ranges to be created // within the new subnetwork. SecondaryIpRangeSpecs []*SecondaryIpRangeSpec `json:"secondaryIpRangeSpecs,omitempty"` + // SkipRequestedAddressValidation: Optional. Skips validating if the + // requested_address is in use by SN VPC’s peering group. Compute Engine will + // still perform this check and fail the request if the requested_address is in + // use. Note that Compute Engine does not check for the existence of dynamic + // routes when performing this check. Caller of this API should make sure that + // there are no dynamic routes overlapping with the + // requested_address/prefix_length IP address range otherwise the created + // subnet could cause misrouting. + SkipRequestedAddressValidation bool `json:"skipRequestedAddressValidation,omitempty"` // Subnetwork: Required. A name for the new subnet. For information about the // naming requirements, see subnetwork // (/compute/docs/reference/rest/v1/subnetworks) in the Compute API @@ -570,9 +579,9 @@ type AddSubnetworkRequest struct { NullFields []string `json:"-"` } -func (s *AddSubnetworkRequest) MarshalJSON() ([]byte, error) { +func (s AddSubnetworkRequest) MarshalJSON() ([]byte, error) { type NoMethod AddSubnetworkRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Api: Api is a light-weight descriptor for an API Interface. Interfaces are @@ -631,9 +640,9 @@ type Api struct { NullFields []string `json:"-"` } -func (s *Api) MarshalJSON() ([]byte, error) { +func (s Api) MarshalJSON() ([]byte, error) { type NoMethod Api - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthProvider: Configuration for an authentication provider, including @@ -697,9 +706,9 @@ type AuthProvider struct { NullFields []string `json:"-"` } -func (s *AuthProvider) MarshalJSON() ([]byte, error) { +func (s AuthProvider) MarshalJSON() ([]byte, error) { type NoMethod AuthProvider - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthRequirement: User-defined authentication requirements, including support @@ -735,9 +744,9 @@ type AuthRequirement struct { NullFields []string `json:"-"` } -func (s *AuthRequirement) MarshalJSON() ([]byte, error) { +func (s AuthRequirement) MarshalJSON() ([]byte, error) { type NoMethod AuthRequirement - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Authentication: `Authentication` defines the authentication configuration @@ -767,9 +776,9 @@ type Authentication struct { NullFields []string `json:"-"` } -func (s *Authentication) MarshalJSON() ([]byte, error) { +func (s Authentication) MarshalJSON() ([]byte, error) { type NoMethod Authentication - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthenticationRule: Authentication rules for the service. By default, if a @@ -801,9 +810,9 @@ type AuthenticationRule struct { NullFields []string `json:"-"` } -func (s *AuthenticationRule) MarshalJSON() ([]byte, error) { +func (s AuthenticationRule) MarshalJSON() ([]byte, error) { type NoMethod AuthenticationRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Backend: `Backend` defines the backend configuration for a service. @@ -824,9 +833,9 @@ type Backend struct { NullFields []string `json:"-"` } -func (s *Backend) MarshalJSON() ([]byte, error) { +func (s Backend) MarshalJSON() ([]byte, error) { type NoMethod Backend - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendRule: A backend rule provides configuration for an individual API @@ -911,9 +920,9 @@ type BackendRule struct { NullFields []string `json:"-"` } -func (s *BackendRule) MarshalJSON() ([]byte, error) { +func (s BackendRule) MarshalJSON() ([]byte, error) { type NoMethod BackendRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *BackendRule) UnmarshalJSON(data []byte) error { @@ -969,9 +978,9 @@ type Billing struct { NullFields []string `json:"-"` } -func (s *Billing) MarshalJSON() ([]byte, error) { +func (s Billing) MarshalJSON() ([]byte, error) { type NoMethod Billing - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BillingDestination: Configuration of a specific billing destination @@ -996,9 +1005,9 @@ type BillingDestination struct { NullFields []string `json:"-"` } -func (s *BillingDestination) MarshalJSON() ([]byte, error) { +func (s BillingDestination) MarshalJSON() ([]byte, error) { type NoMethod BillingDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CancelOperationRequest: The request message for Operations.CancelOperation. @@ -1080,9 +1089,9 @@ type ClientLibrarySettings struct { NullFields []string `json:"-"` } -func (s *ClientLibrarySettings) MarshalJSON() ([]byte, error) { +func (s ClientLibrarySettings) MarshalJSON() ([]byte, error) { type NoMethod ClientLibrarySettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloudSQLConfig: Cloud SQL configuration. @@ -1107,9 +1116,9 @@ type CloudSQLConfig struct { NullFields []string `json:"-"` } -func (s *CloudSQLConfig) MarshalJSON() ([]byte, error) { +func (s CloudSQLConfig) MarshalJSON() ([]byte, error) { type NoMethod CloudSQLConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CommonLanguageSettings: Required information for every language. @@ -1128,6 +1137,9 @@ type CommonLanguageSettings struct { // ReferenceDocsUri: Link to automatically generated reference documentation. // Example: https://cloud.google.com/nodejs/docs/reference/asset/latest ReferenceDocsUri string `json:"referenceDocsUri,omitempty"` + // SelectiveGapicGeneration: Configuration for which RPCs should be generated + // in the GAPIC client. + SelectiveGapicGeneration *SelectiveGapicGeneration `json:"selectiveGapicGeneration,omitempty"` // ForceSendFields is a list of field names (e.g. "Destinations") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -1141,9 +1153,9 @@ type CommonLanguageSettings struct { NullFields []string `json:"-"` } -func (s *CommonLanguageSettings) MarshalJSON() ([]byte, error) { +func (s CommonLanguageSettings) MarshalJSON() ([]byte, error) { type NoMethod CommonLanguageSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Connection: Represents a private connection resource. A private connection @@ -1183,9 +1195,9 @@ type Connection struct { NullFields []string `json:"-"` } -func (s *Connection) MarshalJSON() ([]byte, error) { +func (s Connection) MarshalJSON() ([]byte, error) { type NoMethod Connection - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConsumerConfig: Configuration information for a private service access @@ -1248,9 +1260,9 @@ type ConsumerConfig struct { NullFields []string `json:"-"` } -func (s *ConsumerConfig) MarshalJSON() ([]byte, error) { +func (s ConsumerConfig) MarshalJSON() ([]byte, error) { type NoMethod ConsumerConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConsumerConfigMetadata: Metadata provided through GetOperation request for @@ -1277,9 +1289,9 @@ type ConsumerProject struct { NullFields []string `json:"-"` } -func (s *ConsumerProject) MarshalJSON() ([]byte, error) { +func (s ConsumerProject) MarshalJSON() ([]byte, error) { type NoMethod ConsumerProject - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Context: `Context` defines which contexts an API requests. Example: context: @@ -1313,9 +1325,9 @@ type Context struct { NullFields []string `json:"-"` } -func (s *Context) MarshalJSON() ([]byte, error) { +func (s Context) MarshalJSON() ([]byte, error) { type NoMethod Context - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ContextRule: A context rule provides information about the context for an @@ -1327,9 +1339,11 @@ type ContextRule struct { // AllowedResponseExtensions: A list of full type names or extension IDs of // extensions allowed in grpc side channel from backend to client. AllowedResponseExtensions []string `json:"allowedResponseExtensions,omitempty"` - // Provided: A list of full type names of provided contexts. + // Provided: A list of full type names of provided contexts. It is used to + // support propagating HTTP headers and ETags from the response extension. Provided []string `json:"provided,omitempty"` - // Requested: A list of full type names of requested contexts. + // Requested: A list of full type names of requested contexts, only the + // requested context will be made available to the backend. Requested []string `json:"requested,omitempty"` // Selector: Selects the methods to which this rule applies. Refer to selector // for syntax details. @@ -1347,9 +1361,9 @@ type ContextRule struct { NullFields []string `json:"-"` } -func (s *ContextRule) MarshalJSON() ([]byte, error) { +func (s ContextRule) MarshalJSON() ([]byte, error) { type NoMethod ContextRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Control: Selects and configures the service controller used by the service. @@ -1374,9 +1388,9 @@ type Control struct { NullFields []string `json:"-"` } -func (s *Control) MarshalJSON() ([]byte, error) { +func (s Control) MarshalJSON() ([]byte, error) { type NoMethod Control - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CppSettings: Settings for C++ client libraries. @@ -1396,9 +1410,9 @@ type CppSettings struct { NullFields []string `json:"-"` } -func (s *CppSettings) MarshalJSON() ([]byte, error) { +func (s CppSettings) MarshalJSON() ([]byte, error) { type NoMethod CppSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CustomError: Customize service error responses. For example, list any @@ -1425,9 +1439,9 @@ type CustomError struct { NullFields []string `json:"-"` } -func (s *CustomError) MarshalJSON() ([]byte, error) { +func (s CustomError) MarshalJSON() ([]byte, error) { type NoMethod CustomError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CustomErrorRule: A custom error rule. @@ -1452,9 +1466,9 @@ type CustomErrorRule struct { NullFields []string `json:"-"` } -func (s *CustomErrorRule) MarshalJSON() ([]byte, error) { +func (s CustomErrorRule) MarshalJSON() ([]byte, error) { type NoMethod CustomErrorRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CustomHttpPattern: A custom pattern is used for defining custom HTTP verb. @@ -1476,9 +1490,9 @@ type CustomHttpPattern struct { NullFields []string `json:"-"` } -func (s *CustomHttpPattern) MarshalJSON() ([]byte, error) { +func (s CustomHttpPattern) MarshalJSON() ([]byte, error) { type NoMethod CustomHttpPattern - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeleteConnectionMetadata: Metadata provided through GetOperation request for @@ -1508,9 +1522,9 @@ type DeleteConnectionRequest struct { NullFields []string `json:"-"` } -func (s *DeleteConnectionRequest) MarshalJSON() ([]byte, error) { +func (s DeleteConnectionRequest) MarshalJSON() ([]byte, error) { type NoMethod DeleteConnectionRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeletePeeredDnsDomainMetadata: Metadata provided through GetOperation @@ -1538,9 +1552,9 @@ type DisableVpcServiceControlsRequest struct { NullFields []string `json:"-"` } -func (s *DisableVpcServiceControlsRequest) MarshalJSON() ([]byte, error) { +func (s DisableVpcServiceControlsRequest) MarshalJSON() ([]byte, error) { type NoMethod DisableVpcServiceControlsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DnsRecordSet: Represents a DNS record set resource. @@ -1573,9 +1587,9 @@ type DnsRecordSet struct { NullFields []string `json:"-"` } -func (s *DnsRecordSet) MarshalJSON() ([]byte, error) { +func (s DnsRecordSet) MarshalJSON() ([]byte, error) { type NoMethod DnsRecordSet - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DnsZone: Represents a DNS zone resource. @@ -1601,9 +1615,9 @@ type DnsZone struct { NullFields []string `json:"-"` } -func (s *DnsZone) MarshalJSON() ([]byte, error) { +func (s DnsZone) MarshalJSON() ([]byte, error) { type NoMethod DnsZone - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DnsZonePair: * Represents a pair of private and peering DNS zone resources. @@ -1627,9 +1641,9 @@ type DnsZonePair struct { NullFields []string `json:"-"` } -func (s *DnsZonePair) MarshalJSON() ([]byte, error) { +func (s DnsZonePair) MarshalJSON() ([]byte, error) { type NoMethod DnsZonePair - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Documentation: `Documentation` provides the information for describing a @@ -1700,9 +1714,9 @@ type Documentation struct { NullFields []string `json:"-"` } -func (s *Documentation) MarshalJSON() ([]byte, error) { +func (s Documentation) MarshalJSON() ([]byte, error) { type NoMethod Documentation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DocumentationRule: A documentation rule provides information about @@ -1740,9 +1754,9 @@ type DocumentationRule struct { NullFields []string `json:"-"` } -func (s *DocumentationRule) MarshalJSON() ([]byte, error) { +func (s DocumentationRule) MarshalJSON() ([]byte, error) { type NoMethod DocumentationRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DotnetSettings: Settings for Dotnet client libraries. @@ -1784,9 +1798,9 @@ type DotnetSettings struct { NullFields []string `json:"-"` } -func (s *DotnetSettings) MarshalJSON() ([]byte, error) { +func (s DotnetSettings) MarshalJSON() ([]byte, error) { type NoMethod DotnetSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -1818,9 +1832,9 @@ type EnableVpcServiceControlsRequest struct { NullFields []string `json:"-"` } -func (s *EnableVpcServiceControlsRequest) MarshalJSON() ([]byte, error) { +func (s EnableVpcServiceControlsRequest) MarshalJSON() ([]byte, error) { type NoMethod EnableVpcServiceControlsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Endpoint: `Endpoint` describes a network address of a service that serves a @@ -1869,9 +1883,9 @@ type Endpoint struct { NullFields []string `json:"-"` } -func (s *Endpoint) MarshalJSON() ([]byte, error) { +func (s Endpoint) MarshalJSON() ([]byte, error) { type NoMethod Endpoint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Enum: Enum type definition. @@ -1907,9 +1921,9 @@ type Enum struct { NullFields []string `json:"-"` } -func (s *Enum) MarshalJSON() ([]byte, error) { +func (s Enum) MarshalJSON() ([]byte, error) { type NoMethod Enum - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EnumValue: Enum value definition. @@ -1933,9 +1947,36 @@ type EnumValue struct { NullFields []string `json:"-"` } -func (s *EnumValue) MarshalJSON() ([]byte, error) { +func (s EnumValue) MarshalJSON() ([]byte, error) { type NoMethod EnumValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ExperimentalFeatures: Experimental features to be included during client +// library generation. These fields will be deprecated once the feature +// graduates and is enabled by default. +type ExperimentalFeatures struct { + // RestAsyncIoEnabled: Enables generation of asynchronous REST clients if + // `rest` transport is enabled. By default, asynchronous REST clients will not + // be generated. This feature will be enabled by default 1 month after + // launching the feature in preview packages. + RestAsyncIoEnabled bool `json:"restAsyncIoEnabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "RestAsyncIoEnabled") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "RestAsyncIoEnabled") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ExperimentalFeatures) MarshalJSON() ([]byte, error) { + type NoMethod ExperimentalFeatures + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Field: A single field of a message type. @@ -2004,9 +2045,9 @@ type Field struct { NullFields []string `json:"-"` } -func (s *Field) MarshalJSON() ([]byte, error) { +func (s Field) MarshalJSON() ([]byte, error) { type NoMethod Field - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FieldPolicy: Google API Policy Annotation This message defines a simple API @@ -2045,9 +2086,9 @@ type FieldPolicy struct { NullFields []string `json:"-"` } -func (s *FieldPolicy) MarshalJSON() ([]byte, error) { +func (s FieldPolicy) MarshalJSON() ([]byte, error) { type NoMethod FieldPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetDnsZoneResponse: Represents managed DNS zones created in the shared @@ -2074,9 +2115,9 @@ type GetDnsZoneResponse struct { NullFields []string `json:"-"` } -func (s *GetDnsZoneResponse) MarshalJSON() ([]byte, error) { +func (s GetDnsZoneResponse) MarshalJSON() ([]byte, error) { type NoMethod GetDnsZoneResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoSettings: Settings for Go client libraries. @@ -2096,9 +2137,9 @@ type GoSettings struct { NullFields []string `json:"-"` } -func (s *GoSettings) MarshalJSON() ([]byte, error) { +func (s GoSettings) MarshalJSON() ([]byte, error) { type NoMethod GoSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudServicenetworkingV1ConsumerConfigReservedRange: Allocated IP @@ -2125,9 +2166,9 @@ type GoogleCloudServicenetworkingV1ConsumerConfigReservedRange struct { NullFields []string `json:"-"` } -func (s *GoogleCloudServicenetworkingV1ConsumerConfigReservedRange) MarshalJSON() ([]byte, error) { +func (s GoogleCloudServicenetworkingV1ConsumerConfigReservedRange) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudServicenetworkingV1ConsumerConfigReservedRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudServicenetworkingV1betaConnection: Represents a private @@ -2165,9 +2206,9 @@ type GoogleCloudServicenetworkingV1betaConnection struct { NullFields []string `json:"-"` } -func (s *GoogleCloudServicenetworkingV1betaConnection) MarshalJSON() ([]byte, error) { +func (s GoogleCloudServicenetworkingV1betaConnection) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudServicenetworkingV1betaConnection - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleCloudServicenetworkingV1betaSubnetwork: Represents a subnet that was @@ -2197,9 +2238,9 @@ type GoogleCloudServicenetworkingV1betaSubnetwork struct { NullFields []string `json:"-"` } -func (s *GoogleCloudServicenetworkingV1betaSubnetwork) MarshalJSON() ([]byte, error) { +func (s GoogleCloudServicenetworkingV1betaSubnetwork) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudServicenetworkingV1betaSubnetwork - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Http: Defines the HTTP configuration for an API service. It contains a list @@ -2228,9 +2269,9 @@ type Http struct { NullFields []string `json:"-"` } -func (s *Http) MarshalJSON() ([]byte, error) { +func (s Http) MarshalJSON() ([]byte, error) { type NoMethod Http - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpRule: gRPC Transcoding gRPC Transcoding is a feature for mapping between @@ -2345,9 +2386,9 @@ func (s *Http) MarshalJSON() ([]byte, error) { // to a REST endpoint, achieving the same effect as the proto annotation. This // can be particularly useful if you have a proto that is reused in multiple // services. Note that any transcoding specified in the service config will -// override any matching transcoding configuration in the proto. Example below -// selects a gRPC method and applies HttpRule to it. http: rules: - selector: -// example.v1.Messaging.GetMessage get: +// override any matching transcoding configuration in the proto. The following +// example selects a gRPC method and applies an `HttpRule` to it: http: rules: +// - selector: example.v1.Messaging.GetMessage get: // /v1/messages/{message_id}/{sub.subfield} Special notes When gRPC Transcoding // is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion // must follow the proto3 specification @@ -2417,9 +2458,9 @@ type HttpRule struct { NullFields []string `json:"-"` } -func (s *HttpRule) MarshalJSON() ([]byte, error) { +func (s HttpRule) MarshalJSON() ([]byte, error) { type NoMethod HttpRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JavaSettings: Settings for Java client libraries. @@ -2455,9 +2496,9 @@ type JavaSettings struct { NullFields []string `json:"-"` } -func (s *JavaSettings) MarshalJSON() ([]byte, error) { +func (s JavaSettings) MarshalJSON() ([]byte, error) { type NoMethod JavaSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JwtLocation: Specifies a location to extract JWT from an API request. @@ -2488,9 +2529,9 @@ type JwtLocation struct { NullFields []string `json:"-"` } -func (s *JwtLocation) MarshalJSON() ([]byte, error) { +func (s JwtLocation) MarshalJSON() ([]byte, error) { type NoMethod JwtLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LabelDescriptor: A description of a label. @@ -2519,9 +2560,9 @@ type LabelDescriptor struct { NullFields []string `json:"-"` } -func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { +func (s LabelDescriptor) MarshalJSON() ([]byte, error) { type NoMethod LabelDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListConnectionsResponse: ListConnectionsResponse is the response to list @@ -2545,9 +2586,9 @@ type ListConnectionsResponse struct { NullFields []string `json:"-"` } -func (s *ListConnectionsResponse) MarshalJSON() ([]byte, error) { +func (s ListConnectionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListConnectionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListDnsRecordSetsResponse: Represents all DNS RecordSets associated with the @@ -2571,9 +2612,9 @@ type ListDnsRecordSetsResponse struct { NullFields []string `json:"-"` } -func (s *ListDnsRecordSetsResponse) MarshalJSON() ([]byte, error) { +func (s ListDnsRecordSetsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListDnsRecordSetsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListDnsZonesResponse: Represents all DNS zones in the shared producer host @@ -2598,9 +2639,9 @@ type ListDnsZonesResponse struct { NullFields []string `json:"-"` } -func (s *ListDnsZonesResponse) MarshalJSON() ([]byte, error) { +func (s ListDnsZonesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListDnsZonesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -2626,9 +2667,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListPeeredDnsDomainsResponse: Response to list peered DNS domains for a @@ -2652,9 +2693,9 @@ type ListPeeredDnsDomainsResponse struct { NullFields []string `json:"-"` } -func (s *ListPeeredDnsDomainsResponse) MarshalJSON() ([]byte, error) { +func (s ListPeeredDnsDomainsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListPeeredDnsDomainsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogDescriptor: A description of a log type. Example in YAML format: - name: @@ -2690,9 +2731,9 @@ type LogDescriptor struct { NullFields []string `json:"-"` } -func (s *LogDescriptor) MarshalJSON() ([]byte, error) { +func (s LogDescriptor) MarshalJSON() ([]byte, error) { type NoMethod LogDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Logging: Logging configuration of the service. The following example shows @@ -2731,9 +2772,9 @@ type Logging struct { NullFields []string `json:"-"` } -func (s *Logging) MarshalJSON() ([]byte, error) { +func (s Logging) MarshalJSON() ([]byte, error) { type NoMethod Logging - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LoggingDestination: Configuration of a specific logging destination (the @@ -2760,9 +2801,9 @@ type LoggingDestination struct { NullFields []string `json:"-"` } -func (s *LoggingDestination) MarshalJSON() ([]byte, error) { +func (s LoggingDestination) MarshalJSON() ([]byte, error) { type NoMethod LoggingDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LongRunning: Describes settings to use when generating API methods that use @@ -2794,9 +2835,9 @@ type LongRunning struct { NullFields []string `json:"-"` } -func (s *LongRunning) MarshalJSON() ([]byte, error) { +func (s LongRunning) MarshalJSON() ([]byte, error) { type NoMethod LongRunning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *LongRunning) UnmarshalJSON(data []byte) error { @@ -2847,9 +2888,9 @@ type Method struct { NullFields []string `json:"-"` } -func (s *Method) MarshalJSON() ([]byte, error) { +func (s Method) MarshalJSON() ([]byte, error) { type NoMethod Method - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MethodPolicy: Defines policies applying to an RPC method. @@ -2874,9 +2915,9 @@ type MethodPolicy struct { NullFields []string `json:"-"` } -func (s *MethodPolicy) MarshalJSON() ([]byte, error) { +func (s MethodPolicy) MarshalJSON() ([]byte, error) { type NoMethod MethodPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MethodSettings: Describes the generator configuration for a method. @@ -2915,9 +2956,9 @@ type MethodSettings struct { NullFields []string `json:"-"` } -func (s *MethodSettings) MarshalJSON() ([]byte, error) { +func (s MethodSettings) MarshalJSON() ([]byte, error) { type NoMethod MethodSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricDescriptor: Defines a metric type and its schema. Once a metric @@ -3080,9 +3121,9 @@ type MetricDescriptor struct { NullFields []string `json:"-"` } -func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { +func (s MetricDescriptor) MarshalJSON() ([]byte, error) { type NoMethod MetricDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricDescriptorMetadata: Additional annotations that can be used to guide @@ -3132,6 +3173,16 @@ type MetricDescriptorMetadata struct { // interval, excluding data loss due to errors. Metrics with a higher // granularity have a smaller sampling period. SamplePeriod string `json:"samplePeriod,omitempty"` + // TimeSeriesResourceHierarchyLevel: The scope of the timeseries data of the + // metric. + // + // Possible values: + // "TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED" - Do not use this + // default value. + // "PROJECT" - Scopes a metric to a project. + // "ORGANIZATION" - Scopes a metric to an organization. + // "FOLDER" - Scopes a metric to a folder. + TimeSeriesResourceHierarchyLevel []string `json:"timeSeriesResourceHierarchyLevel,omitempty"` // ForceSendFields is a list of field names (e.g. "IngestDelay") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -3145,9 +3196,9 @@ type MetricDescriptorMetadata struct { NullFields []string `json:"-"` } -func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { +func (s MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { type NoMethod MetricDescriptorMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricRule: Bind API methods to metrics. Binding a method to a metric causes @@ -3174,9 +3225,9 @@ type MetricRule struct { NullFields []string `json:"-"` } -func (s *MetricRule) MarshalJSON() ([]byte, error) { +func (s MetricRule) MarshalJSON() ([]byte, error) { type NoMethod MetricRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Mixin: Declares an API Interface to be included in this interface. The @@ -3199,7 +3250,7 @@ func (s *MetricRule) MarshalJSON() ([]byte, error) { // mixin construct implies that all methods in `AccessControl` are also // declared with same name and request/response types in `Storage`. A // documentation generator or annotation processor will see the effective -// `Storage.GetAcl` method after inherting documentation and annotations as +// `Storage.GetAcl` method after inheriting documentation and annotations as // follows: service Storage { // Get the underlying ACL object. rpc // GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = // "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern @@ -3229,9 +3280,9 @@ type Mixin struct { NullFields []string `json:"-"` } -func (s *Mixin) MarshalJSON() ([]byte, error) { +func (s Mixin) MarshalJSON() ([]byte, error) { type NoMethod Mixin - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MonitoredResourceDescriptor: An object that describes the schema of a @@ -3318,9 +3369,9 @@ type MonitoredResourceDescriptor struct { NullFields []string `json:"-"` } -func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { +func (s MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { type NoMethod MonitoredResourceDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Monitoring: Monitoring configuration of the service. The example below shows @@ -3375,9 +3426,9 @@ type Monitoring struct { NullFields []string `json:"-"` } -func (s *Monitoring) MarshalJSON() ([]byte, error) { +func (s Monitoring) MarshalJSON() ([]byte, error) { type NoMethod Monitoring - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MonitoringDestination: Configuration of a specific monitoring destination @@ -3402,9 +3453,9 @@ type MonitoringDestination struct { NullFields []string `json:"-"` } -func (s *MonitoringDestination) MarshalJSON() ([]byte, error) { +func (s MonitoringDestination) MarshalJSON() ([]byte, error) { type NoMethod MonitoringDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeSettings: Settings for Node client libraries. @@ -3424,9 +3475,9 @@ type NodeSettings struct { NullFields []string `json:"-"` } -func (s *NodeSettings) MarshalJSON() ([]byte, error) { +func (s NodeSettings) MarshalJSON() ([]byte, error) { type NoMethod NodeSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OAuthRequirements: OAuth scopes are a way to define data and permissions on @@ -3463,9 +3514,9 @@ type OAuthRequirements struct { NullFields []string `json:"-"` } -func (s *OAuthRequirements) MarshalJSON() ([]byte, error) { +func (s OAuthRequirements) MarshalJSON() ([]byte, error) { type NoMethod OAuthRequirements - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -3510,9 +3561,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Option: A protocol buffer option, which can be attached to a message, field, @@ -3542,9 +3593,9 @@ type Option struct { NullFields []string `json:"-"` } -func (s *Option) MarshalJSON() ([]byte, error) { +func (s Option) MarshalJSON() ([]byte, error) { type NoMethod Option - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Page: Represents a documentation page. A page can contain subpages to @@ -3578,9 +3629,9 @@ type Page struct { NullFields []string `json:"-"` } -func (s *Page) MarshalJSON() ([]byte, error) { +func (s Page) MarshalJSON() ([]byte, error) { type NoMethod Page - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartialDeleteConnectionMetadata: Metadata provided through GetOperation @@ -3612,9 +3663,9 @@ type PeeredDnsDomain struct { NullFields []string `json:"-"` } -func (s *PeeredDnsDomain) MarshalJSON() ([]byte, error) { +func (s PeeredDnsDomain) MarshalJSON() ([]byte, error) { type NoMethod PeeredDnsDomain - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PeeredDnsDomainMetadata: Metadata provided through GetOperation request for @@ -3639,9 +3690,9 @@ type PhpSettings struct { NullFields []string `json:"-"` } -func (s *PhpSettings) MarshalJSON() ([]byte, error) { +func (s PhpSettings) MarshalJSON() ([]byte, error) { type NoMethod PhpSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PolicyBinding: Grouping of IAM role and IAM member. @@ -3656,9 +3707,11 @@ type PolicyBinding struct { // 'roles/container.hostServiceAgentUser' applied on the shared VPC host // project - 'roles/compute.securityAdmin' applied on the shared VPC host // project - 'roles/compute.networkAdmin' applied on the shared VPC host - // project - 'roles/compute.xpnAdmin' applied on the shared VPC host project - + // project - 'roles/tpu.xpnAgent' applied on the shared VPC host project - // 'roles/dns.admin' applied on the shared VPC host project - - // 'roles/logging.admin' applied on the shared VPC host project + // 'roles/logging.admin' applied on the shared VPC host project - + // 'roles/monitoring.viewer' applied on the shared VPC host project - + // 'roles/servicemanagement.quotaViewer' applied on the shared VPC host project Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Member") to unconditionally // include in API requests. By default, fields with empty or default values are @@ -3673,9 +3726,9 @@ type PolicyBinding struct { NullFields []string `json:"-"` } -func (s *PolicyBinding) MarshalJSON() ([]byte, error) { +func (s PolicyBinding) MarshalJSON() ([]byte, error) { type NoMethod PolicyBinding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Publishing: This message configures the settings for publishing Google Cloud @@ -3742,15 +3795,18 @@ type Publishing struct { NullFields []string `json:"-"` } -func (s *Publishing) MarshalJSON() ([]byte, error) { +func (s Publishing) MarshalJSON() ([]byte, error) { type NoMethod Publishing - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PythonSettings: Settings for Python client libraries. type PythonSettings struct { // Common: Some settings. Common *CommonLanguageSettings `json:"common,omitempty"` + // ExperimentalFeatures: Experimental features to be included during client + // library generation. + ExperimentalFeatures *ExperimentalFeatures `json:"experimentalFeatures,omitempty"` // ForceSendFields is a list of field names (e.g. "Common") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See @@ -3764,9 +3820,9 @@ type PythonSettings struct { NullFields []string `json:"-"` } -func (s *PythonSettings) MarshalJSON() ([]byte, error) { +func (s PythonSettings) MarshalJSON() ([]byte, error) { type NoMethod PythonSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Quota: Quota configuration helps to achieve fairness and budgeting in @@ -3809,9 +3865,9 @@ type Quota struct { NullFields []string `json:"-"` } -func (s *Quota) MarshalJSON() ([]byte, error) { +func (s Quota) MarshalJSON() ([]byte, error) { type NoMethod Quota - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QuotaLimit: `QuotaLimit` defines a specific limit that applies over a @@ -3883,9 +3939,9 @@ type QuotaLimit struct { NullFields []string `json:"-"` } -func (s *QuotaLimit) MarshalJSON() ([]byte, error) { +func (s QuotaLimit) MarshalJSON() ([]byte, error) { type NoMethod QuotaLimit - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Range: Represents a found unused range. @@ -3910,9 +3966,9 @@ type Range struct { NullFields []string `json:"-"` } -func (s *Range) MarshalJSON() ([]byte, error) { +func (s Range) MarshalJSON() ([]byte, error) { type NoMethod Range - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RangeReservation: Represents a range reservation. @@ -3954,9 +4010,9 @@ type RangeReservation struct { NullFields []string `json:"-"` } -func (s *RangeReservation) MarshalJSON() ([]byte, error) { +func (s RangeReservation) MarshalJSON() ([]byte, error) { type NoMethod RangeReservation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RemoveDnsRecordSetMetadata: Metadata provided through GetOperation request @@ -3992,9 +4048,9 @@ type RemoveDnsRecordSetRequest struct { NullFields []string `json:"-"` } -func (s *RemoveDnsRecordSetRequest) MarshalJSON() ([]byte, error) { +func (s RemoveDnsRecordSetRequest) MarshalJSON() ([]byte, error) { type NoMethod RemoveDnsRecordSetRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RemoveDnsRecordSetResponse: Blank message response type for @@ -4032,9 +4088,9 @@ type RemoveDnsZoneRequest struct { NullFields []string `json:"-"` } -func (s *RemoveDnsZoneRequest) MarshalJSON() ([]byte, error) { +func (s RemoveDnsZoneRequest) MarshalJSON() ([]byte, error) { type NoMethod RemoveDnsZoneRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RemoveDnsZoneResponse: Blank message response type for RemoveDnsZone API @@ -4069,9 +4125,9 @@ type Route struct { NullFields []string `json:"-"` } -func (s *Route) MarshalJSON() ([]byte, error) { +func (s Route) MarshalJSON() ([]byte, error) { type NoMethod Route - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RubySettings: Settings for Ruby client libraries. @@ -4091,9 +4147,9 @@ type RubySettings struct { NullFields []string `json:"-"` } -func (s *RubySettings) MarshalJSON() ([]byte, error) { +func (s RubySettings) MarshalJSON() ([]byte, error) { type NoMethod RubySettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SearchRangeRequest: Request to search for an unused range within allocated @@ -4122,9 +4178,9 @@ type SearchRangeRequest struct { NullFields []string `json:"-"` } -func (s *SearchRangeRequest) MarshalJSON() ([]byte, error) { +func (s SearchRangeRequest) MarshalJSON() ([]byte, error) { type NoMethod SearchRangeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecondaryIpRange struct { @@ -4145,9 +4201,9 @@ type SecondaryIpRange struct { NullFields []string `json:"-"` } -func (s *SecondaryIpRange) MarshalJSON() ([]byte, error) { +func (s SecondaryIpRange) MarshalJSON() ([]byte, error) { type NoMethod SecondaryIpRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SecondaryIpRangeSpec struct { @@ -4184,9 +4240,33 @@ type SecondaryIpRangeSpec struct { NullFields []string `json:"-"` } -func (s *SecondaryIpRangeSpec) MarshalJSON() ([]byte, error) { +func (s SecondaryIpRangeSpec) MarshalJSON() ([]byte, error) { type NoMethod SecondaryIpRangeSpec - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SelectiveGapicGeneration: This message is used to configure the generation +// of a subset of the RPCs in a service for client libraries. +type SelectiveGapicGeneration struct { + // Methods: An allowlist of the fully qualified names of RPCs that should be + // included on public client surfaces. + Methods []string `json:"methods,omitempty"` + // ForceSendFields is a list of field names (e.g. "Methods") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Methods") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SelectiveGapicGeneration) MarshalJSON() ([]byte, error) { + type NoMethod SelectiveGapicGeneration + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Service: `Service` is the root object of Google API service configuration @@ -4304,9 +4384,9 @@ type Service struct { NullFields []string `json:"-"` } -func (s *Service) MarshalJSON() ([]byte, error) { +func (s Service) MarshalJSON() ([]byte, error) { type NoMethod Service - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceContext: `SourceContext` represents information about the source of a @@ -4329,9 +4409,9 @@ type SourceContext struct { NullFields []string `json:"-"` } -func (s *SourceContext) MarshalJSON() ([]byte, error) { +func (s SourceContext) MarshalJSON() ([]byte, error) { type NoMethod SourceContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceInfo: Source information used to create a Service Config @@ -4351,9 +4431,9 @@ type SourceInfo struct { NullFields []string `json:"-"` } -func (s *SourceInfo) MarshalJSON() ([]byte, error) { +func (s SourceInfo) MarshalJSON() ([]byte, error) { type NoMethod SourceInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -4385,9 +4465,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Subnetwork: Represents a subnet that was created or discovered by a private @@ -4421,9 +4501,9 @@ type Subnetwork struct { NullFields []string `json:"-"` } -func (s *Subnetwork) MarshalJSON() ([]byte, error) { +func (s Subnetwork) MarshalJSON() ([]byte, error) { type NoMethod Subnetwork - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SystemParameter: Define a parameter's name and location. The parameter may @@ -4452,9 +4532,9 @@ type SystemParameter struct { NullFields []string `json:"-"` } -func (s *SystemParameter) MarshalJSON() ([]byte, error) { +func (s SystemParameter) MarshalJSON() ([]byte, error) { type NoMethod SystemParameter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SystemParameterRule: Define a system parameter rule mapping system parameter @@ -4481,9 +4561,9 @@ type SystemParameterRule struct { NullFields []string `json:"-"` } -func (s *SystemParameterRule) MarshalJSON() ([]byte, error) { +func (s SystemParameterRule) MarshalJSON() ([]byte, error) { type NoMethod SystemParameterRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SystemParameters: ### System parameter configuration A system parameter is a @@ -4516,9 +4596,9 @@ type SystemParameters struct { NullFields []string `json:"-"` } -func (s *SystemParameters) MarshalJSON() ([]byte, error) { +func (s SystemParameters) MarshalJSON() ([]byte, error) { type NoMethod SystemParameters - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Type: A protocol buffer message type. @@ -4556,9 +4636,9 @@ type Type struct { NullFields []string `json:"-"` } -func (s *Type) MarshalJSON() ([]byte, error) { +func (s Type) MarshalJSON() ([]byte, error) { type NoMethod Type - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateConsumerConfigRequest: Request to update the configuration of a @@ -4580,9 +4660,9 @@ type UpdateConsumerConfigRequest struct { NullFields []string `json:"-"` } -func (s *UpdateConsumerConfigRequest) MarshalJSON() ([]byte, error) { +func (s UpdateConsumerConfigRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateConsumerConfigRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateDnsRecordSetMetadata: Metadata provided through GetOperation request @@ -4621,9 +4701,9 @@ type UpdateDnsRecordSetRequest struct { NullFields []string `json:"-"` } -func (s *UpdateDnsRecordSetRequest) MarshalJSON() ([]byte, error) { +func (s UpdateDnsRecordSetRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateDnsRecordSetRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Usage: Configuration controlling usage of a service. @@ -4660,9 +4740,9 @@ type Usage struct { NullFields []string `json:"-"` } -func (s *Usage) MarshalJSON() ([]byte, error) { +func (s Usage) MarshalJSON() ([]byte, error) { type NoMethod Usage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UsageRule: Usage configuration rules for the service. NOTE: Under @@ -4701,9 +4781,9 @@ type UsageRule struct { NullFields []string `json:"-"` } -func (s *UsageRule) MarshalJSON() ([]byte, error) { +func (s UsageRule) MarshalJSON() ([]byte, error) { type NoMethod UsageRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ValidateConsumerConfigRequest struct { @@ -4746,9 +4826,9 @@ type ValidateConsumerConfigRequest struct { NullFields []string `json:"-"` } -func (s *ValidateConsumerConfigRequest) MarshalJSON() ([]byte, error) { +func (s ValidateConsumerConfigRequest) MarshalJSON() ([]byte, error) { type NoMethod ValidateConsumerConfigRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ValidateConsumerConfigResponse struct { @@ -4803,9 +4883,9 @@ type ValidateConsumerConfigResponse struct { NullFields []string `json:"-"` } -func (s *ValidateConsumerConfigResponse) MarshalJSON() ([]byte, error) { +func (s ValidateConsumerConfigResponse) MarshalJSON() ([]byte, error) { type NoMethod ValidateConsumerConfigResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // VpcServiceControls: Response for the get VPC Service Controls request. @@ -4831,9 +4911,9 @@ type VpcServiceControls struct { NullFields []string `json:"-"` } -func (s *VpcServiceControls) MarshalJSON() ([]byte, error) { +func (s VpcServiceControls) MarshalJSON() ([]byte, error) { type NoMethod VpcServiceControls - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationsCancelCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json b/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json index bd43982f322..a004d52f04a 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json @@ -426,7 +426,7 @@ } } }, - "revision": "20240602", + "revision": "20240929", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AddEnableRulesMetadata": { @@ -488,6 +488,83 @@ }, "type": "object" }, + "Analysis": { + "description": "A message to group the analysis information.", + "id": "Analysis", + "properties": { + "analysis": { + "$ref": "AnalysisResult", + "description": "Output only. Analysis result of updating a policy.", + "readOnly": true + }, + "analysisType": { + "description": "Output only. The type of analysis.", + "enum": [ + "ANALYSIS_TYPE_UNSPECIFIED", + "ANALYSIS_TYPE_DEPENDENCY", + "ANALYSIS_TYPE_RESOURCE_USAGE" + ], + "enumDescriptions": [ + "Unspecified analysis type. Do not use.", + "The analysis of service dependencies.", + "The analysis of service resource usage." + ], + "readOnly": true, + "type": "string" + }, + "displayName": { + "description": "Output only. The user friendly display name of the analysis type. E.g. service dependency analysis, service resource usage analysis, etc.", + "readOnly": true, + "type": "string" + }, + "service": { + "description": "The names of the service that has analysis result of warnings or blockers. Example: `services/storage.googleapis.com`.", + "type": "string" + } + }, + "type": "object" + }, + "AnalysisResult": { + "description": "An analysis result including blockers and warnings.", + "id": "AnalysisResult", + "properties": { + "blockers": { + "description": "Blocking information that would prevent the policy changes at runtime.", + "items": { + "$ref": "Impact" + }, + "type": "array" + }, + "warnings": { + "description": "Warning information indicating that the policy changes might be unsafe, but will not block the changes at runtime.", + "items": { + "$ref": "Impact" + }, + "type": "array" + } + }, + "type": "object" + }, + "AnalyzeConsumerPolicyMetadata": { + "description": "Metadata for the `AnalyzeConsumerPolicy` method.", + "id": "AnalyzeConsumerPolicyMetadata", + "properties": {}, + "type": "object" + }, + "AnalyzeConsumerPolicyResponse": { + "description": "The response of analyzing a consumer policy update.", + "id": "AnalyzeConsumerPolicyResponse", + "properties": { + "analysis": { + "description": "The list of analyses returned from performing the intended policy update analysis. The analysis is grouped by service name and different analysis types. The empty analysis list means that the consumer policy can be updated without any warnings or blockers.", + "items": { + "$ref": "Analysis" + }, + "type": "array" + } + }, + "type": "object" + }, "Api": { "description": "Api is a light-weight descriptor for an API Interface. Interfaces are also described as \"protocol buffer services\" in some contexts, such as by the \"service\" keyword in a .proto file, but they are different from API Services, which represent a concrete implementation of an interface as opposed to simply a description of methods and bindings. They are also sometimes simply referred to as \"APIs\" in other contexts, such as the name of this message itself. See https://cloud.google.com/apis/design/glossary for detailed terminology.", "id": "Api", @@ -926,6 +1003,10 @@ "deprecated": true, "description": "Link to automatically generated reference documentation. Example: https://cloud.google.com/nodejs/docs/reference/asset/latest", "type": "string" + }, + "selectiveGapicGeneration": { + "$ref": "SelectiveGapicGeneration", + "description": "Configuration for which RPCs should be generated in the GAPIC client." } }, "type": "object" @@ -998,14 +1079,14 @@ "type": "array" }, "provided": { - "description": "A list of full type names of provided contexts.", + "description": "A list of full type names of provided contexts. It is used to support propagating HTTP headers and ETags from the response extension.", "items": { "type": "string" }, "type": "array" }, "requested": { - "description": "A list of full type names of requested contexts.", + "description": "A list of full type names of requested contexts, only the requested context will be made available to the backend.", "items": { "type": "string" }, @@ -1439,6 +1520,17 @@ }, "type": "object" }, + "ExperimentalFeatures": { + "description": "Experimental features to be included during client library generation. These fields will be deprecated once the feature graduates and is enabled by default.", + "id": "ExperimentalFeatures", + "properties": { + "restAsyncIoEnabled": { + "description": "Enables generation of asynchronous REST clients if `rest` transport is enabled. By default, asynchronous REST clients will not be generated. This feature will be enabled by default 1 month after launching the feature in preview packages.", + "type": "boolean" + } + }, + "type": "object" + }, "Field": { "description": "A single field of a message type.", "id": "Field", @@ -1971,7 +2063,7 @@ "type": "object" }, "HttpRule": { - "description": "gRPC Transcoding gRPC Transcoding is a feature for mapping between a gRPC method and one or more HTTP REST endpoints. It allows developers to build a single API service that supports both gRPC APIs and REST APIs. Many systems, including [Google APIs](https://github.com/googleapis/googleapis), [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature and use it for large scale production services. `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies how different portions of the gRPC request message are mapped to the URL path, URL query parameters, and HTTP request body. It also controls how the gRPC response message is mapped to the HTTP response body. `HttpRule` is typically specified as an `google.api.http` annotation on the gRPC method. Each mapping specifies a URL path template and an HTTP method. The path template may refer to one or more fields in the gRPC request message, as long as each field is a non-repeated field with a primitive (non-message) type. The path template controls how fields of the request message are mapped to the URL path. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/{name=messages/*}\" }; } } message GetMessageRequest { string name = 1; // Mapped to URL path. } message Message { string text = 1; // The resource content. } This enables an HTTP REST to gRPC mapping as below: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(name: \"messages/123456\")` Any fields in the request message which are not bound by the path template automatically become HTTP query parameters if there is no HTTP request body. For example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get:\"/v1/messages/{message_id}\" }; } } message GetMessageRequest { message SubMessage { string subfield = 1; } string message_id = 1; // Mapped to URL path. int64 revision = 2; // Mapped to URL query parameter `revision`. SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. } This enables a HTTP JSON to RPC mapping as below: - HTTP: `GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` - gRPC: `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))` Note that fields which are mapped to URL query parameters must have a primitive type or a repeated primitive type or a non-repeated message type. In the case of a repeated type, the parameter can be repeated in the URL as `...?param=A\u0026param=B`. In the case of a message type, each field of the message is mapped to a separate parameter, such as `...?foo.a=A\u0026foo.b=B\u0026foo.c=C`. For HTTP methods that allow a request body, the `body` field specifies the mapping. Consider a REST update method on the message resource collection: service Messaging { rpc UpdateMessage(UpdateMessageRequest) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"message\" }; } } message UpdateMessageRequest { string message_id = 1; // mapped to the URL Message message = 2; // mapped to the body } The following HTTP JSON to RPC mapping is enabled, where the representation of the JSON in the request body is determined by protos JSON encoding: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })` The special name `*` can be used in the body mapping to define that every field not bound by the path template should be mapped to the request body. This enables the following alternative definition of the update method: service Messaging { rpc UpdateMessage(Message) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"*\" }; } } message Message { string message_id = 1; string text = 2; } The following HTTP JSON to RPC mapping is enabled: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" text: \"Hi!\")` Note that when using `*` in the body mapping, it is not possible to have HTTP parameters, as all fields not bound by the path end in the body. This makes this option more rarely used in practice when defining REST APIs. The common usage of `*` is in custom methods which don't use the URL at all for transferring data. It is possible to define multiple HTTP methods for one RPC by using the `additional_bindings` option. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/messages/{message_id}\" additional_bindings { get: \"/v1/users/{user_id}/messages/{message_id}\" } }; } } message GetMessageRequest { string message_id = 1; string user_id = 2; } This enables the following two alternative HTTP JSON to RPC mappings: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(message_id: \"123456\")` - HTTP: `GET /v1/users/me/messages/123456` - gRPC: `GetMessage(user_id: \"me\" message_id: \"123456\")` Rules for HTTP mapping 1. Leaf request fields (recursive expansion nested messages in the request message) are classified into three categories: - Fields referred by the path template. They are passed via the URL path. - Fields referred by the HttpRule.body. They are passed via the HTTP request body. - All other fields are passed via the URL query parameters, and the parameter name is the field path in the request message. A repeated field can be represented as multiple query parameters under the same name. 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields are passed via URL path and HTTP request body. 3. If HttpRule.body is omitted, there is no HTTP request body, all fields are passed via URL path and URL query parameters. Path template syntax Template = \"/\" Segments [ Verb ] ; Segments = Segment { \"/\" Segment } ; Segment = \"*\" | \"**\" | LITERAL | Variable ; Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ; FieldPath = IDENT { \".\" IDENT } ; Verb = \":\" LITERAL ; The syntax `*` matches a single URL path segment. The syntax `**` matches zero or more URL path segments, which must be the last part of the URL path except the `Verb`. The syntax `Variable` matches part of the URL path as specified by its template. A variable template must not contain other variables. If a variable matches a single path segment, its template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` contains any reserved character, such characters should be percent-encoded before the matching. If a variable contains exactly one path segment, such as `\"{var}\"` or `\"{var=*}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{var}`. If a variable contains multiple path segments, such as `\"{var=foo/*}\"` or `\"{var=**}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding, except \"%2F\" and \"%2f\" are left unchanged. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{+var}`. Using gRPC API Service Configuration gRPC API Service Configuration (service config) is a configuration language for configuring a gRPC service to become a user-facing product. The service config is simply the YAML representation of the `google.api.Service` proto message. As an alternative to annotating your proto file, you can configure gRPC transcoding in your service config YAML files. You do this by specifying a `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same effect as the proto annotation. This can be particularly useful if you have a proto that is reused in multiple services. Note that any transcoding specified in the service config will override any matching transcoding configuration in the proto. Example below selects a gRPC method and applies HttpRule to it. http: rules: - selector: example.v1.Messaging.GetMessage get: /v1/messages/{message_id}/{sub.subfield} Special notes When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion must follow the [proto3 specification](https://developers.google.com/protocol-buffers/docs/proto3#json). While the single segment variable follows the semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String Expansion, the multi segment variable **does not** follow RFC 6570 Section 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion does not expand special characters like `?` and `#`, which would lead to invalid URLs. As the result, gRPC Transcoding uses a custom encoding for multi segment variables. The path variables **must not** refer to any repeated or mapped field, because client libraries are not capable of handling such variable expansion. The path variables **must not** capture the leading \"/\" character. The reason is that the most common use case \"{var}\" does not capture the leading \"/\" character. For consistency, all path variables must share the same behavior. Repeated message fields must not be mapped to URL query parameters, because no client library can support such complicated mapping. If an API needs to use a JSON array for request or response body, it can map the request or response body to a repeated field. However, some gRPC Transcoding implementations may not support this feature.", + "description": "gRPC Transcoding gRPC Transcoding is a feature for mapping between a gRPC method and one or more HTTP REST endpoints. It allows developers to build a single API service that supports both gRPC APIs and REST APIs. Many systems, including [Google APIs](https://github.com/googleapis/googleapis), [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature and use it for large scale production services. `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies how different portions of the gRPC request message are mapped to the URL path, URL query parameters, and HTTP request body. It also controls how the gRPC response message is mapped to the HTTP response body. `HttpRule` is typically specified as an `google.api.http` annotation on the gRPC method. Each mapping specifies a URL path template and an HTTP method. The path template may refer to one or more fields in the gRPC request message, as long as each field is a non-repeated field with a primitive (non-message) type. The path template controls how fields of the request message are mapped to the URL path. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/{name=messages/*}\" }; } } message GetMessageRequest { string name = 1; // Mapped to URL path. } message Message { string text = 1; // The resource content. } This enables an HTTP REST to gRPC mapping as below: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(name: \"messages/123456\")` Any fields in the request message which are not bound by the path template automatically become HTTP query parameters if there is no HTTP request body. For example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get:\"/v1/messages/{message_id}\" }; } } message GetMessageRequest { message SubMessage { string subfield = 1; } string message_id = 1; // Mapped to URL path. int64 revision = 2; // Mapped to URL query parameter `revision`. SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. } This enables a HTTP JSON to RPC mapping as below: - HTTP: `GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` - gRPC: `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))` Note that fields which are mapped to URL query parameters must have a primitive type or a repeated primitive type or a non-repeated message type. In the case of a repeated type, the parameter can be repeated in the URL as `...?param=A\u0026param=B`. In the case of a message type, each field of the message is mapped to a separate parameter, such as `...?foo.a=A\u0026foo.b=B\u0026foo.c=C`. For HTTP methods that allow a request body, the `body` field specifies the mapping. Consider a REST update method on the message resource collection: service Messaging { rpc UpdateMessage(UpdateMessageRequest) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"message\" }; } } message UpdateMessageRequest { string message_id = 1; // mapped to the URL Message message = 2; // mapped to the body } The following HTTP JSON to RPC mapping is enabled, where the representation of the JSON in the request body is determined by protos JSON encoding: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })` The special name `*` can be used in the body mapping to define that every field not bound by the path template should be mapped to the request body. This enables the following alternative definition of the update method: service Messaging { rpc UpdateMessage(Message) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"*\" }; } } message Message { string message_id = 1; string text = 2; } The following HTTP JSON to RPC mapping is enabled: - HTTP: `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` - gRPC: `UpdateMessage(message_id: \"123456\" text: \"Hi!\")` Note that when using `*` in the body mapping, it is not possible to have HTTP parameters, as all fields not bound by the path end in the body. This makes this option more rarely used in practice when defining REST APIs. The common usage of `*` is in custom methods which don't use the URL at all for transferring data. It is possible to define multiple HTTP methods for one RPC by using the `additional_bindings` option. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/messages/{message_id}\" additional_bindings { get: \"/v1/users/{user_id}/messages/{message_id}\" } }; } } message GetMessageRequest { string message_id = 1; string user_id = 2; } This enables the following two alternative HTTP JSON to RPC mappings: - HTTP: `GET /v1/messages/123456` - gRPC: `GetMessage(message_id: \"123456\")` - HTTP: `GET /v1/users/me/messages/123456` - gRPC: `GetMessage(user_id: \"me\" message_id: \"123456\")` Rules for HTTP mapping 1. Leaf request fields (recursive expansion nested messages in the request message) are classified into three categories: - Fields referred by the path template. They are passed via the URL path. - Fields referred by the HttpRule.body. They are passed via the HTTP request body. - All other fields are passed via the URL query parameters, and the parameter name is the field path in the request message. A repeated field can be represented as multiple query parameters under the same name. 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields are passed via URL path and HTTP request body. 3. If HttpRule.body is omitted, there is no HTTP request body, all fields are passed via URL path and URL query parameters. Path template syntax Template = \"/\" Segments [ Verb ] ; Segments = Segment { \"/\" Segment } ; Segment = \"*\" | \"**\" | LITERAL | Variable ; Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ; FieldPath = IDENT { \".\" IDENT } ; Verb = \":\" LITERAL ; The syntax `*` matches a single URL path segment. The syntax `**` matches zero or more URL path segments, which must be the last part of the URL path except the `Verb`. The syntax `Variable` matches part of the URL path as specified by its template. A variable template must not contain other variables. If a variable matches a single path segment, its template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` contains any reserved character, such characters should be percent-encoded before the matching. If a variable contains exactly one path segment, such as `\"{var}\"` or `\"{var=*}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{var}`. If a variable contains multiple path segments, such as `\"{var=foo/*}\"` or `\"{var=**}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding, except \"%2F\" and \"%2f\" are left unchanged. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{+var}`. Using gRPC API Service Configuration gRPC API Service Configuration (service config) is a configuration language for configuring a gRPC service to become a user-facing product. The service config is simply the YAML representation of the `google.api.Service` proto message. As an alternative to annotating your proto file, you can configure gRPC transcoding in your service config YAML files. You do this by specifying a `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same effect as the proto annotation. This can be particularly useful if you have a proto that is reused in multiple services. Note that any transcoding specified in the service config will override any matching transcoding configuration in the proto. The following example selects a gRPC method and applies an `HttpRule` to it: http: rules: - selector: example.v1.Messaging.GetMessage get: /v1/messages/{message_id}/{sub.subfield} Special notes When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion must follow the [proto3 specification](https://developers.google.com/protocol-buffers/docs/proto3#json). While the single segment variable follows the semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String Expansion, the multi segment variable **does not** follow RFC 6570 Section 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion does not expand special characters like `?` and `#`, which would lead to invalid URLs. As the result, gRPC Transcoding uses a custom encoding for multi segment variables. The path variables **must not** refer to any repeated or mapped field, because client libraries are not capable of handling such variable expansion. The path variables **must not** capture the leading \"/\" character. The reason is that the most common use case \"{var}\" does not capture the leading \"/\" character. For consistency, all path variables must share the same behavior. Repeated message fields must not be mapped to URL query parameters, because no client library can support such complicated mapping. If an API needs to use a JSON array for request or response body, it can map the request or response body to a repeated field. However, some gRPC Transcoding implementations may not support this feature.", "id": "HttpRule", "properties": { "additionalBindings": { @@ -2020,6 +2112,31 @@ }, "type": "object" }, + "Impact": { + "description": "A message to group impacts of updating a policy.", + "id": "Impact", + "properties": { + "detail": { + "description": "Output only. User friendly impact detail in a free form message.", + "readOnly": true, + "type": "string" + }, + "impactType": { + "description": "Output only. The type of impact.", + "enum": [ + "IMPACT_TYPE_UNSPECIFIED", + "DEPENDENCY_MISSING_DEPENDENCIES" + ], + "enumDescriptions": [ + "Reserved Blocks (Block n contains codes from 100n to 100(n+1) -1 Block 0 - Special/Admin codes Block 1 - Impact Type of ANALYSIS_TYPE_DEPENDENCY Block 2 - Impact Type of ANALYSIS_TYPE_RESOURCE_USAGE ...", + "Block 1 - Impact Type of ANALYSIS_TYPE_DEPENDENCY" + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "ImportAdminOverridesMetadata": { "description": "Metadata message that provides information such as progress, partial failures, and similar information on each GetOperation call of LRO returned by ImportAdminOverrides.", "id": "ImportAdminOverridesMetadata", @@ -2515,6 +2632,25 @@ "description": "The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period.", "format": "google-duration", "type": "string" + }, + "timeSeriesResourceHierarchyLevel": { + "description": "The scope of the timeseries data of the metric.", + "items": { + "enum": [ + "TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED", + "PROJECT", + "ORGANIZATION", + "FOLDER" + ], + "enumDescriptions": [ + "Do not use this default value.", + "Scopes a metric to a project.", + "Scopes a metric to an organization.", + "Scopes a metric to a folder." + ], + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -2539,7 +2675,7 @@ "type": "object" }, "Mixin": { - "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", + "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { @@ -2856,6 +2992,10 @@ "common": { "$ref": "CommonLanguageSettings", "description": "Some settings." + }, + "experimentalFeatures": { + "$ref": "ExperimentalFeatures", + "description": "Experimental features to be included during client library generation." } }, "type": "object" @@ -3006,6 +3146,20 @@ }, "type": "object" }, + "SelectiveGapicGeneration": { + "description": "This message is used to configure the generation of a subset of the RPCs in a service for client libraries.", + "id": "SelectiveGapicGeneration", + "properties": { + "methods": { + "description": "An allowlist of the fully qualified names of RPCs that should be included on public client surfaces.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "ServiceIdentity": { "description": "Service identity for a service. This is the identity that service producer should use to access consumer resources.", "id": "ServiceIdentity", diff --git a/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go b/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go index 6ffc593dcba..16bfdd400ea 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go @@ -215,9 +215,9 @@ type AddEnableRulesResponse struct { NullFields []string `json:"-"` } -func (s *AddEnableRulesResponse) MarshalJSON() ([]byte, error) { +func (s AddEnableRulesResponse) MarshalJSON() ([]byte, error) { type NoMethod AddEnableRulesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AdminQuotaPolicy: Quota policy created by quota administrator. @@ -264,9 +264,102 @@ type AdminQuotaPolicy struct { NullFields []string `json:"-"` } -func (s *AdminQuotaPolicy) MarshalJSON() ([]byte, error) { +func (s AdminQuotaPolicy) MarshalJSON() ([]byte, error) { type NoMethod AdminQuotaPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Analysis: A message to group the analysis information. +type Analysis struct { + // Analysis: Output only. Analysis result of updating a policy. + Analysis *AnalysisResult `json:"analysis,omitempty"` + // AnalysisType: Output only. The type of analysis. + // + // Possible values: + // "ANALYSIS_TYPE_UNSPECIFIED" - Unspecified analysis type. Do not use. + // "ANALYSIS_TYPE_DEPENDENCY" - The analysis of service dependencies. + // "ANALYSIS_TYPE_RESOURCE_USAGE" - The analysis of service resource usage. + AnalysisType string `json:"analysisType,omitempty"` + // DisplayName: Output only. The user friendly display name of the analysis + // type. E.g. service dependency analysis, service resource usage analysis, + // etc. + DisplayName string `json:"displayName,omitempty"` + // Service: The names of the service that has analysis result of warnings or + // blockers. Example: `services/storage.googleapis.com`. + Service string `json:"service,omitempty"` + // ForceSendFields is a list of field names (e.g. "Analysis") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Analysis") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Analysis) MarshalJSON() ([]byte, error) { + type NoMethod Analysis + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// AnalysisResult: An analysis result including blockers and warnings. +type AnalysisResult struct { + // Blockers: Blocking information that would prevent the policy changes at + // runtime. + Blockers []*Impact `json:"blockers,omitempty"` + // Warnings: Warning information indicating that the policy changes might be + // unsafe, but will not block the changes at runtime. + Warnings []*Impact `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "Blockers") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Blockers") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AnalysisResult) MarshalJSON() ([]byte, error) { + type NoMethod AnalysisResult + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// AnalyzeConsumerPolicyMetadata: Metadata for the `AnalyzeConsumerPolicy` +// method. +type AnalyzeConsumerPolicyMetadata struct { +} + +// AnalyzeConsumerPolicyResponse: The response of analyzing a consumer policy +// update. +type AnalyzeConsumerPolicyResponse struct { + // Analysis: The list of analyses returned from performing the intended policy + // update analysis. The analysis is grouped by service name and different + // analysis types. The empty analysis list means that the consumer policy can + // be updated without any warnings or blockers. + Analysis []*Analysis `json:"analysis,omitempty"` + // ForceSendFields is a list of field names (e.g. "Analysis") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Analysis") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AnalyzeConsumerPolicyResponse) MarshalJSON() ([]byte, error) { + type NoMethod AnalyzeConsumerPolicyResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Api: Api is a light-weight descriptor for an API Interface. Interfaces are @@ -325,9 +418,9 @@ type Api struct { NullFields []string `json:"-"` } -func (s *Api) MarshalJSON() ([]byte, error) { +func (s Api) MarshalJSON() ([]byte, error) { type NoMethod Api - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthProvider: Configuration for an authentication provider, including @@ -391,9 +484,9 @@ type AuthProvider struct { NullFields []string `json:"-"` } -func (s *AuthProvider) MarshalJSON() ([]byte, error) { +func (s AuthProvider) MarshalJSON() ([]byte, error) { type NoMethod AuthProvider - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthRequirement: User-defined authentication requirements, including support @@ -429,9 +522,9 @@ type AuthRequirement struct { NullFields []string `json:"-"` } -func (s *AuthRequirement) MarshalJSON() ([]byte, error) { +func (s AuthRequirement) MarshalJSON() ([]byte, error) { type NoMethod AuthRequirement - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Authentication: `Authentication` defines the authentication configuration @@ -461,9 +554,9 @@ type Authentication struct { NullFields []string `json:"-"` } -func (s *Authentication) MarshalJSON() ([]byte, error) { +func (s Authentication) MarshalJSON() ([]byte, error) { type NoMethod Authentication - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AuthenticationRule: Authentication rules for the service. By default, if a @@ -495,9 +588,9 @@ type AuthenticationRule struct { NullFields []string `json:"-"` } -func (s *AuthenticationRule) MarshalJSON() ([]byte, error) { +func (s AuthenticationRule) MarshalJSON() ([]byte, error) { type NoMethod AuthenticationRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Backend: `Backend` defines the backend configuration for a service. @@ -518,9 +611,9 @@ type Backend struct { NullFields []string `json:"-"` } -func (s *Backend) MarshalJSON() ([]byte, error) { +func (s Backend) MarshalJSON() ([]byte, error) { type NoMethod Backend - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackendRule: A backend rule provides configuration for an individual API @@ -605,9 +698,9 @@ type BackendRule struct { NullFields []string `json:"-"` } -func (s *BackendRule) MarshalJSON() ([]byte, error) { +func (s BackendRule) MarshalJSON() ([]byte, error) { type NoMethod BackendRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *BackendRule) UnmarshalJSON(data []byte) error { @@ -646,9 +739,9 @@ type BatchCreateAdminOverridesResponse struct { NullFields []string `json:"-"` } -func (s *BatchCreateAdminOverridesResponse) MarshalJSON() ([]byte, error) { +func (s BatchCreateAdminOverridesResponse) MarshalJSON() ([]byte, error) { type NoMethod BatchCreateAdminOverridesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchCreateConsumerOverridesResponse: Response message for @@ -669,9 +762,9 @@ type BatchCreateConsumerOverridesResponse struct { NullFields []string `json:"-"` } -func (s *BatchCreateConsumerOverridesResponse) MarshalJSON() ([]byte, error) { +func (s BatchCreateConsumerOverridesResponse) MarshalJSON() ([]byte, error) { type NoMethod BatchCreateConsumerOverridesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchEnableServicesRequest: Request message for the `BatchEnableServices` @@ -697,9 +790,9 @@ type BatchEnableServicesRequest struct { NullFields []string `json:"-"` } -func (s *BatchEnableServicesRequest) MarshalJSON() ([]byte, error) { +func (s BatchEnableServicesRequest) MarshalJSON() ([]byte, error) { type NoMethod BatchEnableServicesRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchEnableServicesResponse: Response message for the `BatchEnableServices` @@ -724,9 +817,9 @@ type BatchEnableServicesResponse struct { NullFields []string `json:"-"` } -func (s *BatchEnableServicesResponse) MarshalJSON() ([]byte, error) { +func (s BatchEnableServicesResponse) MarshalJSON() ([]byte, error) { type NoMethod BatchEnableServicesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchGetServicesResponse: Response message for the `BatchGetServices` @@ -750,9 +843,9 @@ type BatchGetServicesResponse struct { NullFields []string `json:"-"` } -func (s *BatchGetServicesResponse) MarshalJSON() ([]byte, error) { +func (s BatchGetServicesResponse) MarshalJSON() ([]byte, error) { type NoMethod BatchGetServicesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Billing: Billing related configuration of the service. The following example @@ -790,9 +883,9 @@ type Billing struct { NullFields []string `json:"-"` } -func (s *Billing) MarshalJSON() ([]byte, error) { +func (s Billing) MarshalJSON() ([]byte, error) { type NoMethod Billing - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BillingDestination: Configuration of a specific billing destination @@ -817,9 +910,9 @@ type BillingDestination struct { NullFields []string `json:"-"` } -func (s *BillingDestination) MarshalJSON() ([]byte, error) { +func (s BillingDestination) MarshalJSON() ([]byte, error) { type NoMethod BillingDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CancelOperationRequest: The request message for Operations.CancelOperation. @@ -901,9 +994,9 @@ type ClientLibrarySettings struct { NullFields []string `json:"-"` } -func (s *ClientLibrarySettings) MarshalJSON() ([]byte, error) { +func (s ClientLibrarySettings) MarshalJSON() ([]byte, error) { type NoMethod ClientLibrarySettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CommonLanguageSettings: Required information for every language. @@ -922,6 +1015,9 @@ type CommonLanguageSettings struct { // ReferenceDocsUri: Link to automatically generated reference documentation. // Example: https://cloud.google.com/nodejs/docs/reference/asset/latest ReferenceDocsUri string `json:"referenceDocsUri,omitempty"` + // SelectiveGapicGeneration: Configuration for which RPCs should be generated + // in the GAPIC client. + SelectiveGapicGeneration *SelectiveGapicGeneration `json:"selectiveGapicGeneration,omitempty"` // ForceSendFields is a list of field names (e.g. "Destinations") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -935,9 +1031,9 @@ type CommonLanguageSettings struct { NullFields []string `json:"-"` } -func (s *CommonLanguageSettings) MarshalJSON() ([]byte, error) { +func (s CommonLanguageSettings) MarshalJSON() ([]byte, error) { type NoMethod CommonLanguageSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConsumerPolicy: Consumer Policy is a set of rules that define what services @@ -973,9 +1069,9 @@ type ConsumerPolicy struct { NullFields []string `json:"-"` } -func (s *ConsumerPolicy) MarshalJSON() ([]byte, error) { +func (s ConsumerPolicy) MarshalJSON() ([]byte, error) { type NoMethod ConsumerPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Context: `Context` defines which contexts an API requests. Example: context: @@ -1009,9 +1105,9 @@ type Context struct { NullFields []string `json:"-"` } -func (s *Context) MarshalJSON() ([]byte, error) { +func (s Context) MarshalJSON() ([]byte, error) { type NoMethod Context - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ContextRule: A context rule provides information about the context for an @@ -1023,9 +1119,11 @@ type ContextRule struct { // AllowedResponseExtensions: A list of full type names or extension IDs of // extensions allowed in grpc side channel from backend to client. AllowedResponseExtensions []string `json:"allowedResponseExtensions,omitempty"` - // Provided: A list of full type names of provided contexts. + // Provided: A list of full type names of provided contexts. It is used to + // support propagating HTTP headers and ETags from the response extension. Provided []string `json:"provided,omitempty"` - // Requested: A list of full type names of requested contexts. + // Requested: A list of full type names of requested contexts, only the + // requested context will be made available to the backend. Requested []string `json:"requested,omitempty"` // Selector: Selects the methods to which this rule applies. Refer to selector // for syntax details. @@ -1043,9 +1141,9 @@ type ContextRule struct { NullFields []string `json:"-"` } -func (s *ContextRule) MarshalJSON() ([]byte, error) { +func (s ContextRule) MarshalJSON() ([]byte, error) { type NoMethod ContextRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Control: Selects and configures the service controller used by the service. @@ -1070,9 +1168,9 @@ type Control struct { NullFields []string `json:"-"` } -func (s *Control) MarshalJSON() ([]byte, error) { +func (s Control) MarshalJSON() ([]byte, error) { type NoMethod Control - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CppSettings: Settings for C++ client libraries. @@ -1092,9 +1190,9 @@ type CppSettings struct { NullFields []string `json:"-"` } -func (s *CppSettings) MarshalJSON() ([]byte, error) { +func (s CppSettings) MarshalJSON() ([]byte, error) { type NoMethod CppSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateAdminQuotaPolicyMetadata: Metadata message that provides information @@ -1127,9 +1225,9 @@ type CustomError struct { NullFields []string `json:"-"` } -func (s *CustomError) MarshalJSON() ([]byte, error) { +func (s CustomError) MarshalJSON() ([]byte, error) { type NoMethod CustomError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CustomErrorRule: A custom error rule. @@ -1154,9 +1252,9 @@ type CustomErrorRule struct { NullFields []string `json:"-"` } -func (s *CustomErrorRule) MarshalJSON() ([]byte, error) { +func (s CustomErrorRule) MarshalJSON() ([]byte, error) { type NoMethod CustomErrorRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CustomHttpPattern: A custom pattern is used for defining custom HTTP verb. @@ -1178,9 +1276,9 @@ type CustomHttpPattern struct { NullFields []string `json:"-"` } -func (s *CustomHttpPattern) MarshalJSON() ([]byte, error) { +func (s CustomHttpPattern) MarshalJSON() ([]byte, error) { type NoMethod CustomHttpPattern - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DeleteAdminQuotaPolicyMetadata: Metadata message that provides information @@ -1221,9 +1319,9 @@ type DisableServiceRequest struct { NullFields []string `json:"-"` } -func (s *DisableServiceRequest) MarshalJSON() ([]byte, error) { +func (s DisableServiceRequest) MarshalJSON() ([]byte, error) { type NoMethod DisableServiceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DisableServiceResponse: Response message for the `DisableService` method. @@ -1245,9 +1343,9 @@ type DisableServiceResponse struct { NullFields []string `json:"-"` } -func (s *DisableServiceResponse) MarshalJSON() ([]byte, error) { +func (s DisableServiceResponse) MarshalJSON() ([]byte, error) { type NoMethod DisableServiceResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Documentation: `Documentation` provides the information for describing a @@ -1318,9 +1416,9 @@ type Documentation struct { NullFields []string `json:"-"` } -func (s *Documentation) MarshalJSON() ([]byte, error) { +func (s Documentation) MarshalJSON() ([]byte, error) { type NoMethod Documentation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DocumentationRule: A documentation rule provides information about @@ -1358,9 +1456,9 @@ type DocumentationRule struct { NullFields []string `json:"-"` } -func (s *DocumentationRule) MarshalJSON() ([]byte, error) { +func (s DocumentationRule) MarshalJSON() ([]byte, error) { type NoMethod DocumentationRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DotnetSettings: Settings for Dotnet client libraries. @@ -1402,9 +1500,9 @@ type DotnetSettings struct { NullFields []string `json:"-"` } -func (s *DotnetSettings) MarshalJSON() ([]byte, error) { +func (s DotnetSettings) MarshalJSON() ([]byte, error) { type NoMethod DotnetSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -1436,9 +1534,9 @@ type EnableFailure struct { NullFields []string `json:"-"` } -func (s *EnableFailure) MarshalJSON() ([]byte, error) { +func (s EnableFailure) MarshalJSON() ([]byte, error) { type NoMethod EnableFailure - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EnableRule: The consumer policy rule that defines usable services and @@ -1483,9 +1581,9 @@ type EnableRule struct { NullFields []string `json:"-"` } -func (s *EnableRule) MarshalJSON() ([]byte, error) { +func (s EnableRule) MarshalJSON() ([]byte, error) { type NoMethod EnableRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EnableServiceRequest: Request message for the `EnableService` method. @@ -1511,9 +1609,9 @@ type EnableServiceResponse struct { NullFields []string `json:"-"` } -func (s *EnableServiceResponse) MarshalJSON() ([]byte, error) { +func (s EnableServiceResponse) MarshalJSON() ([]byte, error) { type NoMethod EnableServiceResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Endpoint: `Endpoint` describes a network address of a service that serves a @@ -1562,9 +1660,9 @@ type Endpoint struct { NullFields []string `json:"-"` } -func (s *Endpoint) MarshalJSON() ([]byte, error) { +func (s Endpoint) MarshalJSON() ([]byte, error) { type NoMethod Endpoint - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Enum: Enum type definition. @@ -1600,9 +1698,9 @@ type Enum struct { NullFields []string `json:"-"` } -func (s *Enum) MarshalJSON() ([]byte, error) { +func (s Enum) MarshalJSON() ([]byte, error) { type NoMethod Enum - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EnumValue: Enum value definition. @@ -1626,9 +1724,36 @@ type EnumValue struct { NullFields []string `json:"-"` } -func (s *EnumValue) MarshalJSON() ([]byte, error) { +func (s EnumValue) MarshalJSON() ([]byte, error) { type NoMethod EnumValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ExperimentalFeatures: Experimental features to be included during client +// library generation. These fields will be deprecated once the feature +// graduates and is enabled by default. +type ExperimentalFeatures struct { + // RestAsyncIoEnabled: Enables generation of asynchronous REST clients if + // `rest` transport is enabled. By default, asynchronous REST clients will not + // be generated. This feature will be enabled by default 1 month after + // launching the feature in preview packages. + RestAsyncIoEnabled bool `json:"restAsyncIoEnabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "RestAsyncIoEnabled") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "RestAsyncIoEnabled") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ExperimentalFeatures) MarshalJSON() ([]byte, error) { + type NoMethod ExperimentalFeatures + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Field: A single field of a message type. @@ -1697,9 +1822,9 @@ type Field struct { NullFields []string `json:"-"` } -func (s *Field) MarshalJSON() ([]byte, error) { +func (s Field) MarshalJSON() ([]byte, error) { type NoMethod Field - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FieldPolicy: Google API Policy Annotation This message defines a simple API @@ -1738,9 +1863,9 @@ type FieldPolicy struct { NullFields []string `json:"-"` } -func (s *FieldPolicy) MarshalJSON() ([]byte, error) { +func (s FieldPolicy) MarshalJSON() ([]byte, error) { type NoMethod FieldPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetServiceIdentityMetadata: Metadata for the `GetServiceIdentity` method. @@ -1773,9 +1898,9 @@ type GetServiceIdentityResponse struct { NullFields []string `json:"-"` } -func (s *GetServiceIdentityResponse) MarshalJSON() ([]byte, error) { +func (s GetServiceIdentityResponse) MarshalJSON() ([]byte, error) { type NoMethod GetServiceIdentityResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoSettings: Settings for Go client libraries. @@ -1795,9 +1920,9 @@ type GoSettings struct { NullFields []string `json:"-"` } -func (s *GoSettings) MarshalJSON() ([]byte, error) { +func (s GoSettings) MarshalJSON() ([]byte, error) { type NoMethod GoSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleApiService: `Service` is the root object of Google API service @@ -1915,9 +2040,9 @@ type GoogleApiService struct { NullFields []string `json:"-"` } -func (s *GoogleApiService) MarshalJSON() ([]byte, error) { +func (s GoogleApiService) MarshalJSON() ([]byte, error) { type NoMethod GoogleApiService - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleApiServiceusageV1OperationMetadata: The operation metadata returned @@ -1939,9 +2064,9 @@ type GoogleApiServiceusageV1OperationMetadata struct { NullFields []string `json:"-"` } -func (s *GoogleApiServiceusageV1OperationMetadata) MarshalJSON() ([]byte, error) { +func (s GoogleApiServiceusageV1OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod GoogleApiServiceusageV1OperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleApiServiceusageV1Service: A service that is available for use by the @@ -1986,9 +2111,9 @@ type GoogleApiServiceusageV1Service struct { NullFields []string `json:"-"` } -func (s *GoogleApiServiceusageV1Service) MarshalJSON() ([]byte, error) { +func (s GoogleApiServiceusageV1Service) MarshalJSON() ([]byte, error) { type NoMethod GoogleApiServiceusageV1Service - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleApiServiceusageV1ServiceConfig: The configuration of the service. @@ -2033,9 +2158,9 @@ type GoogleApiServiceusageV1ServiceConfig struct { NullFields []string `json:"-"` } -func (s *GoogleApiServiceusageV1ServiceConfig) MarshalJSON() ([]byte, error) { +func (s GoogleApiServiceusageV1ServiceConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleApiServiceusageV1ServiceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleApiServiceusageV1beta1GetServiceIdentityResponse: Response message for @@ -2065,9 +2190,9 @@ type GoogleApiServiceusageV1beta1GetServiceIdentityResponse struct { NullFields []string `json:"-"` } -func (s *GoogleApiServiceusageV1beta1GetServiceIdentityResponse) MarshalJSON() ([]byte, error) { +func (s GoogleApiServiceusageV1beta1GetServiceIdentityResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleApiServiceusageV1beta1GetServiceIdentityResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleApiServiceusageV1beta1ServiceIdentity: Service identity for a service. @@ -2093,9 +2218,9 @@ type GoogleApiServiceusageV1beta1ServiceIdentity struct { NullFields []string `json:"-"` } -func (s *GoogleApiServiceusageV1beta1ServiceIdentity) MarshalJSON() ([]byte, error) { +func (s GoogleApiServiceusageV1beta1ServiceIdentity) MarshalJSON() ([]byte, error) { type NoMethod GoogleApiServiceusageV1beta1ServiceIdentity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleApiServiceusageV2alphaConsumerPolicy: Consumer Policy is a set of @@ -2137,9 +2262,9 @@ type GoogleApiServiceusageV2alphaConsumerPolicy struct { NullFields []string `json:"-"` } -func (s *GoogleApiServiceusageV2alphaConsumerPolicy) MarshalJSON() ([]byte, error) { +func (s GoogleApiServiceusageV2alphaConsumerPolicy) MarshalJSON() ([]byte, error) { type NoMethod GoogleApiServiceusageV2alphaConsumerPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleApiServiceusageV2alphaEnableRule: The consumer policy rule that @@ -2161,9 +2286,9 @@ type GoogleApiServiceusageV2alphaEnableRule struct { NullFields []string `json:"-"` } -func (s *GoogleApiServiceusageV2alphaEnableRule) MarshalJSON() ([]byte, error) { +func (s GoogleApiServiceusageV2alphaEnableRule) MarshalJSON() ([]byte, error) { type NoMethod GoogleApiServiceusageV2alphaEnableRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleApiServiceusageV2alphaUpdateConsumerPolicyMetadata: Metadata for the @@ -2197,9 +2322,9 @@ type Http struct { NullFields []string `json:"-"` } -func (s *Http) MarshalJSON() ([]byte, error) { +func (s Http) MarshalJSON() ([]byte, error) { type NoMethod Http - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpRule: gRPC Transcoding gRPC Transcoding is a feature for mapping between @@ -2314,9 +2439,9 @@ func (s *Http) MarshalJSON() ([]byte, error) { // to a REST endpoint, achieving the same effect as the proto annotation. This // can be particularly useful if you have a proto that is reused in multiple // services. Note that any transcoding specified in the service config will -// override any matching transcoding configuration in the proto. Example below -// selects a gRPC method and applies HttpRule to it. http: rules: - selector: -// example.v1.Messaging.GetMessage get: +// override any matching transcoding configuration in the proto. The following +// example selects a gRPC method and applies an `HttpRule` to it: http: rules: +// - selector: example.v1.Messaging.GetMessage get: // /v1/messages/{message_id}/{sub.subfield} Special notes When gRPC Transcoding // is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion // must follow the proto3 specification @@ -2386,9 +2511,41 @@ type HttpRule struct { NullFields []string `json:"-"` } -func (s *HttpRule) MarshalJSON() ([]byte, error) { +func (s HttpRule) MarshalJSON() ([]byte, error) { type NoMethod HttpRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Impact: A message to group impacts of updating a policy. +type Impact struct { + // Detail: Output only. User friendly impact detail in a free form message. + Detail string `json:"detail,omitempty"` + // ImpactType: Output only. The type of impact. + // + // Possible values: + // "IMPACT_TYPE_UNSPECIFIED" - Reserved Blocks (Block n contains codes from + // 100n to 100(n+1) -1 Block 0 - Special/Admin codes Block 1 - Impact Type of + // ANALYSIS_TYPE_DEPENDENCY Block 2 - Impact Type of + // ANALYSIS_TYPE_RESOURCE_USAGE ... + // "DEPENDENCY_MISSING_DEPENDENCIES" - Block 1 - Impact Type of + // ANALYSIS_TYPE_DEPENDENCY + ImpactType string `json:"impactType,omitempty"` + // ForceSendFields is a list of field names (e.g. "Detail") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Detail") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Impact) MarshalJSON() ([]byte, error) { + type NoMethod Impact + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportAdminOverridesMetadata: Metadata message that provides information @@ -2414,9 +2571,9 @@ type ImportAdminOverridesResponse struct { NullFields []string `json:"-"` } -func (s *ImportAdminOverridesResponse) MarshalJSON() ([]byte, error) { +func (s ImportAdminOverridesResponse) MarshalJSON() ([]byte, error) { type NoMethod ImportAdminOverridesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportAdminQuotaPoliciesMetadata: Metadata message that provides information @@ -2443,9 +2600,9 @@ type ImportAdminQuotaPoliciesResponse struct { NullFields []string `json:"-"` } -func (s *ImportAdminQuotaPoliciesResponse) MarshalJSON() ([]byte, error) { +func (s ImportAdminQuotaPoliciesResponse) MarshalJSON() ([]byte, error) { type NoMethod ImportAdminQuotaPoliciesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportConsumerOverridesMetadata: Metadata message that provides information @@ -2472,9 +2629,9 @@ type ImportConsumerOverridesResponse struct { NullFields []string `json:"-"` } -func (s *ImportConsumerOverridesResponse) MarshalJSON() ([]byte, error) { +func (s ImportConsumerOverridesResponse) MarshalJSON() ([]byte, error) { type NoMethod ImportConsumerOverridesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JavaSettings: Settings for Java client libraries. @@ -2510,9 +2667,9 @@ type JavaSettings struct { NullFields []string `json:"-"` } -func (s *JavaSettings) MarshalJSON() ([]byte, error) { +func (s JavaSettings) MarshalJSON() ([]byte, error) { type NoMethod JavaSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // JwtLocation: Specifies a location to extract JWT from an API request. @@ -2543,9 +2700,9 @@ type JwtLocation struct { NullFields []string `json:"-"` } -func (s *JwtLocation) MarshalJSON() ([]byte, error) { +func (s JwtLocation) MarshalJSON() ([]byte, error) { type NoMethod JwtLocation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LabelDescriptor: A description of a label. @@ -2574,9 +2731,9 @@ type LabelDescriptor struct { NullFields []string `json:"-"` } -func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { +func (s LabelDescriptor) MarshalJSON() ([]byte, error) { type NoMethod LabelDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -2602,9 +2759,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListServicesResponse: Response message for the `ListServices` method. @@ -2630,9 +2787,9 @@ type ListServicesResponse struct { NullFields []string `json:"-"` } -func (s *ListServicesResponse) MarshalJSON() ([]byte, error) { +func (s ListServicesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListServicesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LogDescriptor: A description of a log type. Example in YAML format: - name: @@ -2668,9 +2825,9 @@ type LogDescriptor struct { NullFields []string `json:"-"` } -func (s *LogDescriptor) MarshalJSON() ([]byte, error) { +func (s LogDescriptor) MarshalJSON() ([]byte, error) { type NoMethod LogDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Logging: Logging configuration of the service. The following example shows @@ -2709,9 +2866,9 @@ type Logging struct { NullFields []string `json:"-"` } -func (s *Logging) MarshalJSON() ([]byte, error) { +func (s Logging) MarshalJSON() ([]byte, error) { type NoMethod Logging - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LoggingDestination: Configuration of a specific logging destination (the @@ -2738,9 +2895,9 @@ type LoggingDestination struct { NullFields []string `json:"-"` } -func (s *LoggingDestination) MarshalJSON() ([]byte, error) { +func (s LoggingDestination) MarshalJSON() ([]byte, error) { type NoMethod LoggingDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LongRunning: Describes settings to use when generating API methods that use @@ -2772,9 +2929,9 @@ type LongRunning struct { NullFields []string `json:"-"` } -func (s *LongRunning) MarshalJSON() ([]byte, error) { +func (s LongRunning) MarshalJSON() ([]byte, error) { type NoMethod LongRunning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *LongRunning) UnmarshalJSON(data []byte) error { @@ -2825,9 +2982,9 @@ type Method struct { NullFields []string `json:"-"` } -func (s *Method) MarshalJSON() ([]byte, error) { +func (s Method) MarshalJSON() ([]byte, error) { type NoMethod Method - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MethodPolicy: Defines policies applying to an RPC method. @@ -2852,9 +3009,9 @@ type MethodPolicy struct { NullFields []string `json:"-"` } -func (s *MethodPolicy) MarshalJSON() ([]byte, error) { +func (s MethodPolicy) MarshalJSON() ([]byte, error) { type NoMethod MethodPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MethodSettings: Describes the generator configuration for a method. @@ -2893,9 +3050,9 @@ type MethodSettings struct { NullFields []string `json:"-"` } -func (s *MethodSettings) MarshalJSON() ([]byte, error) { +func (s MethodSettings) MarshalJSON() ([]byte, error) { type NoMethod MethodSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricDescriptor: Defines a metric type and its schema. Once a metric @@ -3058,9 +3215,9 @@ type MetricDescriptor struct { NullFields []string `json:"-"` } -func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { +func (s MetricDescriptor) MarshalJSON() ([]byte, error) { type NoMethod MetricDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricDescriptorMetadata: Additional annotations that can be used to guide @@ -3110,6 +3267,16 @@ type MetricDescriptorMetadata struct { // interval, excluding data loss due to errors. Metrics with a higher // granularity have a smaller sampling period. SamplePeriod string `json:"samplePeriod,omitempty"` + // TimeSeriesResourceHierarchyLevel: The scope of the timeseries data of the + // metric. + // + // Possible values: + // "TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED" - Do not use this + // default value. + // "PROJECT" - Scopes a metric to a project. + // "ORGANIZATION" - Scopes a metric to an organization. + // "FOLDER" - Scopes a metric to a folder. + TimeSeriesResourceHierarchyLevel []string `json:"timeSeriesResourceHierarchyLevel,omitempty"` // ForceSendFields is a list of field names (e.g. "IngestDelay") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -3123,9 +3290,9 @@ type MetricDescriptorMetadata struct { NullFields []string `json:"-"` } -func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { +func (s MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { type NoMethod MetricDescriptorMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricRule: Bind API methods to metrics. Binding a method to a metric causes @@ -3152,9 +3319,9 @@ type MetricRule struct { NullFields []string `json:"-"` } -func (s *MetricRule) MarshalJSON() ([]byte, error) { +func (s MetricRule) MarshalJSON() ([]byte, error) { type NoMethod MetricRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Mixin: Declares an API Interface to be included in this interface. The @@ -3177,7 +3344,7 @@ func (s *MetricRule) MarshalJSON() ([]byte, error) { // mixin construct implies that all methods in `AccessControl` are also // declared with same name and request/response types in `Storage`. A // documentation generator or annotation processor will see the effective -// `Storage.GetAcl` method after inherting documentation and annotations as +// `Storage.GetAcl` method after inheriting documentation and annotations as // follows: service Storage { // Get the underlying ACL object. rpc // GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = // "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern @@ -3207,9 +3374,9 @@ type Mixin struct { NullFields []string `json:"-"` } -func (s *Mixin) MarshalJSON() ([]byte, error) { +func (s Mixin) MarshalJSON() ([]byte, error) { type NoMethod Mixin - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MonitoredResourceDescriptor: An object that describes the schema of a @@ -3296,9 +3463,9 @@ type MonitoredResourceDescriptor struct { NullFields []string `json:"-"` } -func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { +func (s MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { type NoMethod MonitoredResourceDescriptor - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Monitoring: Monitoring configuration of the service. The example below shows @@ -3353,9 +3520,9 @@ type Monitoring struct { NullFields []string `json:"-"` } -func (s *Monitoring) MarshalJSON() ([]byte, error) { +func (s Monitoring) MarshalJSON() ([]byte, error) { type NoMethod Monitoring - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MonitoringDestination: Configuration of a specific monitoring destination @@ -3380,9 +3547,9 @@ type MonitoringDestination struct { NullFields []string `json:"-"` } -func (s *MonitoringDestination) MarshalJSON() ([]byte, error) { +func (s MonitoringDestination) MarshalJSON() ([]byte, error) { type NoMethod MonitoringDestination - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NodeSettings: Settings for Node client libraries. @@ -3402,9 +3569,9 @@ type NodeSettings struct { NullFields []string `json:"-"` } -func (s *NodeSettings) MarshalJSON() ([]byte, error) { +func (s NodeSettings) MarshalJSON() ([]byte, error) { type NoMethod NodeSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OAuthRequirements: OAuth scopes are a way to define data and permissions on @@ -3441,9 +3608,9 @@ type OAuthRequirements struct { NullFields []string `json:"-"` } -func (s *OAuthRequirements) MarshalJSON() ([]byte, error) { +func (s OAuthRequirements) MarshalJSON() ([]byte, error) { type NoMethod OAuthRequirements - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -3488,9 +3655,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadata: The operation metadata returned for the batchend services @@ -3512,9 +3679,9 @@ type OperationMetadata struct { NullFields []string `json:"-"` } -func (s *OperationMetadata) MarshalJSON() ([]byte, error) { +func (s OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Option: A protocol buffer option, which can be attached to a message, field, @@ -3544,9 +3711,9 @@ type Option struct { NullFields []string `json:"-"` } -func (s *Option) MarshalJSON() ([]byte, error) { +func (s Option) MarshalJSON() ([]byte, error) { type NoMethod Option - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Page: Represents a documentation page. A page can contain subpages to @@ -3580,9 +3747,9 @@ type Page struct { NullFields []string `json:"-"` } -func (s *Page) MarshalJSON() ([]byte, error) { +func (s Page) MarshalJSON() ([]byte, error) { type NoMethod Page - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PhpSettings: Settings for Php client libraries. @@ -3602,9 +3769,9 @@ type PhpSettings struct { NullFields []string `json:"-"` } -func (s *PhpSettings) MarshalJSON() ([]byte, error) { +func (s PhpSettings) MarshalJSON() ([]byte, error) { type NoMethod PhpSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Publishing: This message configures the settings for publishing Google Cloud @@ -3671,15 +3838,18 @@ type Publishing struct { NullFields []string `json:"-"` } -func (s *Publishing) MarshalJSON() ([]byte, error) { +func (s Publishing) MarshalJSON() ([]byte, error) { type NoMethod Publishing - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PythonSettings: Settings for Python client libraries. type PythonSettings struct { // Common: Some settings. Common *CommonLanguageSettings `json:"common,omitempty"` + // ExperimentalFeatures: Experimental features to be included during client + // library generation. + ExperimentalFeatures *ExperimentalFeatures `json:"experimentalFeatures,omitempty"` // ForceSendFields is a list of field names (e.g. "Common") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See @@ -3693,9 +3863,9 @@ type PythonSettings struct { NullFields []string `json:"-"` } -func (s *PythonSettings) MarshalJSON() ([]byte, error) { +func (s PythonSettings) MarshalJSON() ([]byte, error) { type NoMethod PythonSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Quota: Quota configuration helps to achieve fairness and budgeting in @@ -3738,9 +3908,9 @@ type Quota struct { NullFields []string `json:"-"` } -func (s *Quota) MarshalJSON() ([]byte, error) { +func (s Quota) MarshalJSON() ([]byte, error) { type NoMethod Quota - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QuotaLimit: `QuotaLimit` defines a specific limit that applies over a @@ -3812,9 +3982,9 @@ type QuotaLimit struct { NullFields []string `json:"-"` } -func (s *QuotaLimit) MarshalJSON() ([]byte, error) { +func (s QuotaLimit) MarshalJSON() ([]byte, error) { type NoMethod QuotaLimit - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QuotaOverride: A quota override @@ -3871,9 +4041,9 @@ type QuotaOverride struct { NullFields []string `json:"-"` } -func (s *QuotaOverride) MarshalJSON() ([]byte, error) { +func (s QuotaOverride) MarshalJSON() ([]byte, error) { type NoMethod QuotaOverride - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RemoveEnableRulesMetadata: Metadata for the `RemoveEnableRules` method. @@ -3903,9 +4073,9 @@ type RemoveEnableRulesResponse struct { NullFields []string `json:"-"` } -func (s *RemoveEnableRulesResponse) MarshalJSON() ([]byte, error) { +func (s RemoveEnableRulesResponse) MarshalJSON() ([]byte, error) { type NoMethod RemoveEnableRulesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RubySettings: Settings for Ruby client libraries. @@ -3925,9 +4095,33 @@ type RubySettings struct { NullFields []string `json:"-"` } -func (s *RubySettings) MarshalJSON() ([]byte, error) { +func (s RubySettings) MarshalJSON() ([]byte, error) { type NoMethod RubySettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// SelectiveGapicGeneration: This message is used to configure the generation +// of a subset of the RPCs in a service for client libraries. +type SelectiveGapicGeneration struct { + // Methods: An allowlist of the fully qualified names of RPCs that should be + // included on public client surfaces. + Methods []string `json:"methods,omitempty"` + // ForceSendFields is a list of field names (e.g. "Methods") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Methods") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SelectiveGapicGeneration) MarshalJSON() ([]byte, error) { + type NoMethod SelectiveGapicGeneration + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceIdentity: Service identity for a service. This is the identity that @@ -3952,9 +4146,9 @@ type ServiceIdentity struct { NullFields []string `json:"-"` } -func (s *ServiceIdentity) MarshalJSON() ([]byte, error) { +func (s ServiceIdentity) MarshalJSON() ([]byte, error) { type NoMethod ServiceIdentity - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceContext: `SourceContext` represents information about the source of a @@ -3977,9 +4171,9 @@ type SourceContext struct { NullFields []string `json:"-"` } -func (s *SourceContext) MarshalJSON() ([]byte, error) { +func (s SourceContext) MarshalJSON() ([]byte, error) { type NoMethod SourceContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SourceInfo: Source information used to create a Service Config @@ -3999,9 +4193,9 @@ type SourceInfo struct { NullFields []string `json:"-"` } -func (s *SourceInfo) MarshalJSON() ([]byte, error) { +func (s SourceInfo) MarshalJSON() ([]byte, error) { type NoMethod SourceInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -4033,9 +4227,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SystemParameter: Define a parameter's name and location. The parameter may @@ -4064,9 +4258,9 @@ type SystemParameter struct { NullFields []string `json:"-"` } -func (s *SystemParameter) MarshalJSON() ([]byte, error) { +func (s SystemParameter) MarshalJSON() ([]byte, error) { type NoMethod SystemParameter - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SystemParameterRule: Define a system parameter rule mapping system parameter @@ -4093,9 +4287,9 @@ type SystemParameterRule struct { NullFields []string `json:"-"` } -func (s *SystemParameterRule) MarshalJSON() ([]byte, error) { +func (s SystemParameterRule) MarshalJSON() ([]byte, error) { type NoMethod SystemParameterRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SystemParameters: ### System parameter configuration A system parameter is a @@ -4128,9 +4322,9 @@ type SystemParameters struct { NullFields []string `json:"-"` } -func (s *SystemParameters) MarshalJSON() ([]byte, error) { +func (s SystemParameters) MarshalJSON() ([]byte, error) { type NoMethod SystemParameters - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Type: A protocol buffer message type. @@ -4168,9 +4362,9 @@ type Type struct { NullFields []string `json:"-"` } -func (s *Type) MarshalJSON() ([]byte, error) { +func (s Type) MarshalJSON() ([]byte, error) { type NoMethod Type - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateAdminQuotaPolicyMetadata: Metadata message that provides information @@ -4218,9 +4412,9 @@ type Usage struct { NullFields []string `json:"-"` } -func (s *Usage) MarshalJSON() ([]byte, error) { +func (s Usage) MarshalJSON() ([]byte, error) { type NoMethod Usage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UsageRule: Usage configuration rules for the service. NOTE: Under @@ -4259,9 +4453,9 @@ type UsageRule struct { NullFields []string `json:"-"` } -func (s *UsageRule) MarshalJSON() ([]byte, error) { +func (s UsageRule) MarshalJSON() ([]byte, error) { type NoMethod UsageRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type OperationsCancelCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-api.json b/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-api.json index 356e00a76d6..c69e213bdf2 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-api.json @@ -22,10 +22,60 @@ "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/spanner/", "endpoints": [ + { + "description": "Regional Endpoint", + "endpointUrl": "https://spanner.europe-west8.rep.googleapis.com/", + "location": "europe-west8" + }, { "description": "Regional Endpoint", "endpointUrl": "https://spanner.me-central2.rep.googleapis.com/", "location": "me-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://spanner.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://spanner.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://spanner.us-east4.rep.googleapis.com/", + "location": "us-east4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://spanner.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://spanner.us-south1.rep.googleapis.com/", + "location": "us-south1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://spanner.us-west1.rep.googleapis.com/", + "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://spanner.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://spanner.us-west4.rep.googleapis.com/", + "location": "us-west4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://spanner.us-west8.rep.googleapis.com/", + "location": "us-west8" } ], "fullyEncodeReservedExpansion": true, @@ -123,7 +173,7 @@ "instanceConfigOperations": { "methods": { "list": { - "description": "Lists the user-managed instance config long-running operations in the given project. An instance config operation has a name of the form `projects//instanceConfigs//operations/`. The long-running operation metadata field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations. Operations returned are ordered by `operation.metadata.value.start_time` in descending order starting from the most recently started operation.", + "description": "Lists the user-managed instance configuration long-running operations in the given project. An instance configuration operation has a name of the form `projects//instanceConfigs//operations/`. The long-running operation metadata field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations. Operations returned are ordered by `operation.metadata.value.start_time` in descending order starting from the most recently started operation.", "flatPath": "v1/projects/{projectsId}/instanceConfigOperations", "httpMethod": "GET", "id": "spanner.projects.instanceConfigOperations.list", @@ -132,7 +182,7 @@ ], "parameters": { "filter": { - "description": "An expression that filters the list of returned operations. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the Operation are eligible for filtering: * `name` - The name of the long-running operation * `done` - False if the operation is in progress, else true. * `metadata.@type` - the type of metadata. For example, the type string for CreateInstanceConfigMetadata is `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`. * `metadata.` - any field in metadata.value. `metadata.@type` must be specified first, if filtering on metadata fields. * `error` - Error associated with the long-running operation. * `response.@type` - the type of response. * `response.` - any field in response.value. You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic. However, you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `done:true` - The operation is complete. * `(metadata.@type=` \\ `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata) AND` \\ `(metadata.instance_config.name:custom-config) AND` \\ `(metadata.progress.start_time \u003c \\\"2021-03-28T14:50:00Z\\\") AND` \\ `(error:*)` - Return operations where: * The operation's metadata type is CreateInstanceConfigMetadata. * The instance config name contains \"custom-config\". * The operation started before 2021-03-28T14:50:00Z. * The operation resulted in an error.", + "description": "An expression that filters the list of returned operations. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the Operation are eligible for filtering: * `name` - The name of the long-running operation * `done` - False if the operation is in progress, else true. * `metadata.@type` - the type of metadata. For example, the type string for CreateInstanceConfigMetadata is `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`. * `metadata.` - any field in metadata.value. `metadata.@type` must be specified first, if filtering on metadata fields. * `error` - Error associated with the long-running operation. * `response.@type` - the type of response. * `response.` - any field in response.value. You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic. However, you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `done:true` - The operation is complete. * `(metadata.@type=` \\ `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata) AND` \\ `(metadata.instance_config.name:custom-config) AND` \\ `(metadata.progress.start_time \u003c \\\"2021-03-28T14:50:00Z\\\") AND` \\ `(error:*)` - Return operations where: * The operation's metadata type is CreateInstanceConfigMetadata. * The instance configuration name contains \"custom-config\". * The operation started before 2021-03-28T14:50:00Z. * The operation resulted in an error.", "location": "query", "type": "string" }, @@ -148,7 +198,7 @@ "type": "string" }, "parent": { - "description": "Required. The project of the instance config operations. Values are of the form `projects/`.", + "description": "Required. The project of the instance configuration operations. Values are of the form `projects/`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -169,7 +219,7 @@ "instanceConfigs": { "methods": { "create": { - "description": "Creates an instance config and begins preparing it to be used. The returned long-running operation can be used to track the progress of preparing the new instance config. The instance config name is assigned by the caller. If the named instance config already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. Immediately after the request returns: * The instance config is readable via the API, with all requested attributes. The instance config's reconciling field is set to true. Its state is `CREATING`. While the operation is pending: * Cancelling the operation renders the instance config immediately unreadable via the API. * Except for deleting the creating resource, all other attempts to modify the instance config are rejected. Upon completion of the returned operation: * Instances can be created using the instance configuration. * The instance config's reconciling field becomes false. Its state becomes `READY`. The returned long-running operation will have a name of the format `/operations/` and can be used to track creation of the instance config. The metadata field type is CreateInstanceConfigMetadata. The response field type is InstanceConfig, if successful. Authorization requires `spanner.instanceConfigs.create` permission on the resource parent.", + "description": "Creates an instance configuration and begins preparing it to be used. The returned long-running operation can be used to track the progress of preparing the new instance configuration. The instance configuration name is assigned by the caller. If the named instance configuration already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. Immediately after the request returns: * The instance configuration is readable via the API, with all requested attributes. The instance configuration's reconciling field is set to true. Its state is `CREATING`. While the operation is pending: * Cancelling the operation renders the instance configuration immediately unreadable via the API. * Except for deleting the creating resource, all other attempts to modify the instance configuration are rejected. Upon completion of the returned operation: * Instances can be created using the instance configuration. * The instance configuration's reconciling field becomes false. Its state becomes `READY`. The returned long-running operation will have a name of the format `/operations/` and can be used to track creation of the instance configuration. The metadata field type is CreateInstanceConfigMetadata. The response field type is InstanceConfig, if successful. Authorization requires `spanner.instanceConfigs.create` permission on the resource parent.", "flatPath": "v1/projects/{projectsId}/instanceConfigs", "httpMethod": "POST", "id": "spanner.projects.instanceConfigs.create", @@ -178,7 +228,7 @@ ], "parameters": { "parent": { - "description": "Required. The name of the project in which to create the instance config. Values are of the form `projects/`.", + "description": "Required. The name of the project in which to create the instance configuration. Values are of the form `projects/`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -198,7 +248,7 @@ ] }, "delete": { - "description": "Deletes the instance config. Deletion is only allowed when no instances are using the configuration. If any instances are using the config, returns `FAILED_PRECONDITION`. Only user managed configurations can be deleted. Authorization requires `spanner.instanceConfigs.delete` permission on the resource name.", + "description": "Deletes the instance configuration. Deletion is only allowed when no instances are using the configuration. If any instances are using the configuration, returns `FAILED_PRECONDITION`. Only user-managed configurations can be deleted. Authorization requires `spanner.instanceConfigs.delete` permission on the resource name.", "flatPath": "v1/projects/{projectsId}/instanceConfigs/{instanceConfigsId}", "httpMethod": "DELETE", "id": "spanner.projects.instanceConfigs.delete", @@ -207,7 +257,7 @@ ], "parameters": { "etag": { - "description": "Used for optimistic concurrency control as a way to help prevent simultaneous deletes of an instance config from overwriting each other. If not empty, the API only deletes the instance config when the etag provided matches the current status of the requested instance config. Otherwise, deletes the instance config without checking the current status of the requested instance config.", + "description": "Used for optimistic concurrency control as a way to help prevent simultaneous deletes of an instance configuration from overwriting each other. If not empty, the API only deletes the instance configuration when the etag provided matches the current status of the requested instance configuration. Otherwise, deletes the instance configuration without checking the current status of the requested instance configuration.", "location": "query", "type": "string" }, @@ -260,7 +310,7 @@ ] }, "list": { - "description": "Lists the supported instance configurations for a given project. Returns both Google managed configs and user managed configs.", + "description": "Lists the supported instance configurations for a given project. Returns both Google-managed configurations and user-managed configurations.", "flatPath": "v1/projects/{projectsId}/instanceConfigs", "httpMethod": "GET", "id": "spanner.projects.instanceConfigs.list", @@ -297,7 +347,7 @@ ] }, "patch": { - "description": "Updates an instance config. The returned long-running operation can be used to track the progress of updating the instance. If the named instance config does not exist, returns `NOT_FOUND`. Only user managed configurations can be updated. Immediately after the request returns: * The instance config's reconciling field is set to true. While the operation is pending: * Cancelling the operation sets its metadata's cancel_time. The operation is guaranteed to succeed at undoing all changes, after which point it terminates with a `CANCELLED` status. * All other attempts to modify the instance config are rejected. * Reading the instance config via the API continues to give the pre-request values. Upon completion of the returned operation: * Creating instances using the instance configuration uses the new values. * The instance config's new values are readable via the API. * The instance config's reconciling field becomes false. The returned long-running operation will have a name of the format `/operations/` and can be used to track the instance config modification. The metadata field type is UpdateInstanceConfigMetadata. The response field type is InstanceConfig, if successful. Authorization requires `spanner.instanceConfigs.update` permission on the resource name.", + "description": "Updates an instance configuration. The returned long-running operation can be used to track the progress of updating the instance. If the named instance configuration does not exist, returns `NOT_FOUND`. Only user-managed configurations can be updated. Immediately after the request returns: * The instance configuration's reconciling field is set to true. While the operation is pending: * Cancelling the operation sets its metadata's cancel_time. The operation is guaranteed to succeed at undoing all changes, after which point it terminates with a `CANCELLED` status. * All other attempts to modify the instance configuration are rejected. * Reading the instance configuration via the API continues to give the pre-request values. Upon completion of the returned operation: * Creating instances using the instance configuration uses the new values. * The new values of the instance configuration are readable via the API. * The instance configuration's reconciling field becomes false. The returned long-running operation will have a name of the format `/operations/` and can be used to track the instance configuration modification. The metadata field type is UpdateInstanceConfigMetadata. The response field type is InstanceConfig, if successful. Authorization requires `spanner.instanceConfigs.update` permission on the resource name.", "flatPath": "v1/projects/{projectsId}/instanceConfigs/{instanceConfigsId}", "httpMethod": "PATCH", "id": "spanner.projects.instanceConfigs.patch", @@ -306,7 +356,7 @@ ], "parameters": { "name": { - "description": "A unique identifier for the instance configuration. Values are of the form `projects//instanceConfigs/a-z*`. User instance config must start with `custom-`.", + "description": "A unique identifier for the instance configuration. Values are of the form `projects//instanceConfigs/a-z*`. User instance configuration must start with `custom-`.", "location": "path", "pattern": "^projects/[^/]+/instanceConfigs/[^/]+$", "required": true, @@ -748,7 +798,7 @@ ] }, "move": { - "description": "Moves the instance to the target instance config. The returned long-running operation can be used to track the progress of moving the instance. `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of the following criteria: * Has an ongoing move to a different instance config * Has backups * Has an ongoing update * Is under free trial * Contains any CMEK-enabled databases While the operation is pending: * All other attempts to modify the instance, including changes to its compute capacity, are rejected. * The following database and backup admin operations are rejected: * DatabaseAdmin.CreateDatabase, * DatabaseAdmin.UpdateDatabaseDdl (Disabled if default_leader is specified in the request.) * DatabaseAdmin.RestoreDatabase * DatabaseAdmin.CreateBackup * DatabaseAdmin.CopyBackup * Both the source and target instance configs are subject to hourly compute and storage charges. * The instance may experience higher read-write latencies and a higher transaction abort rate. However, moving an instance does not cause any downtime. The returned long-running operation will have a name of the format `/operations/` and can be used to track the move instance operation. The metadata field type is MoveInstanceMetadata. The response field type is Instance, if successful. Cancelling the operation sets its metadata's cancel_time. Cancellation is not immediate since it involves moving any data previously moved to target instance config back to the original instance config. The same operation can be used to track the progress of the cancellation. Upon successful completion of the cancellation, the operation terminates with CANCELLED status. Upon completion(if not cancelled) of the returned operation: * Instance would be successfully moved to the target instance config. * You are billed for compute and storage in target instance config. Authorization requires `spanner.instances.update` permission on the resource instance. For more details, please see [documentation](https://cloud.google.com/spanner/docs/move-instance).", + "description": "Moves an instance to the target instance configuration. You can use the returned long-running operation to track the progress of moving the instance. `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of the following criteria: * Is undergoing a move to a different instance configuration * Has backups * Has an ongoing update * Contains any CMEK-enabled databases * Is a free trial instance While the operation is pending: * All other attempts to modify the instance, including changes to its compute capacity, are rejected. * The following database and backup admin operations are rejected: * `DatabaseAdmin.CreateDatabase` * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is specified in the request.) * `DatabaseAdmin.RestoreDatabase` * `DatabaseAdmin.CreateBackup` * `DatabaseAdmin.CopyBackup` * Both the source and target instance configurations are subject to hourly compute and storage charges. * The instance might experience higher read-write latencies and a higher transaction abort rate. However, moving an instance doesn't cause any downtime. The returned long-running operation has a name of the format `/operations/` and can be used to track the move instance operation. The metadata field type is MoveInstanceMetadata. The response field type is Instance, if successful. Cancelling the operation sets its metadata's cancel_time. Cancellation is not immediate because it involves moving any data previously moved to the target instance configuration back to the original instance configuration. You can use this operation to track the progress of the cancellation. Upon successful completion of the cancellation, the operation terminates with `CANCELLED` status. If not cancelled, upon completion of the returned operation: * The instance successfully moves to the target instance configuration. * You are billed for compute and storage in target instance configuration. Authorization requires the `spanner.instances.update` permission on the resource instance. For more details, see [Move an instance](https://cloud.google.com/spanner/docs/move-instance).", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:move", "httpMethod": "POST", "id": "spanner.projects.instances.move", @@ -1095,7 +1145,7 @@ ], "parameters": { "filter": { - "description": "An expression that filters the list of returned backups. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the Backup are eligible for filtering: * `name` * `database` * `state` * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `size_bytes` You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic, but you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `name:Howl` - The backup's name contains the string \"howl\". * `database:prod` - The database's name contains the string \"prod\". * `state:CREATING` - The backup is pending creation. * `state:READY` - The backup is fully created and ready for use. * `(name:howl) AND (create_time \u003c \\\"2018-03-28T14:50:00Z\\\")` - The backup name contains the string \"howl\" and `create_time` of the backup is before 2018-03-28T14:50:00Z. * `expire_time \u003c \\\"2018-03-28T14:50:00Z\\\"` - The backup `expire_time` is before 2018-03-28T14:50:00Z. * `size_bytes \u003e 10000000000` - The backup's size is greater than 10GB", + "description": "An expression that filters the list of returned backups. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the Backup are eligible for filtering: * `name` * `database` * `state` * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `size_bytes` * `backup_schedules` You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic, but you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `name:Howl` - The backup's name contains the string \"howl\". * `database:prod` - The database's name contains the string \"prod\". * `state:CREATING` - The backup is pending creation. * `state:READY` - The backup is fully created and ready for use. * `(name:howl) AND (create_time \u003c \\\"2018-03-28T14:50:00Z\\\")` - The backup name contains the string \"howl\" and `create_time` of the backup is before 2018-03-28T14:50:00Z. * `expire_time \u003c \\\"2018-03-28T14:50:00Z\\\"` - The backup `expire_time` is before 2018-03-28T14:50:00Z. * `size_bytes \u003e 10000000000` - The backup's size is greater than 10GB * `backup_schedules:daily` - The backup is created from a schedule with \"daily\" in its name.", "location": "query", "type": "string" }, @@ -1397,7 +1447,7 @@ "databases": { "methods": { "changequorum": { - "description": "ChangeQuorum is strictly restricted to databases that use dual region instance configurations. Initiates a background operation to change quorum a database from dual-region mode to single-region mode and vice versa. The returned long-running operation will have a name of the format `projects//instances//databases//operations/` and can be used to track execution of the ChangeQuorum. The metadata field type is ChangeQuorumMetadata. Authorization requires `spanner.databases.changequorum` permission on the resource database.", + "description": "`ChangeQuorum` is strictly restricted to databases that use dual-region instance configurations. Initiates a background operation to change the quorum of a database from dual-region mode to single-region mode or vice versa. The returned long-running operation has a name of the format `projects//instances//databases//operations/` and can be used to track execution of the `ChangeQuorum`. The metadata field type is ChangeQuorumMetadata. Authorization requires `spanner.databases.changequorum` permission on the resource database.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:changequorum", "httpMethod": "POST", "id": "spanner.projects.instances.databases.changequorum", @@ -1406,7 +1456,7 @@ ], "parameters": { "name": { - "description": "Required. Name of the database in which to apply the ChangeQuorum. Values are of the form `projects//instances//databases/`.", + "description": "Required. Name of the database in which to apply `ChangeQuorum`. Values are of the form `projects//instances//databases/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", "required": true, @@ -1426,7 +1476,7 @@ ] }, "create": { - "description": "Creates a new Cloud Spanner database and starts to prepare it for serving. The returned long-running operation will have a name of the format `/operations/` and can be used to track preparation of the database. The metadata field type is CreateDatabaseMetadata. The response field type is Database, if successful.", + "description": "Creates a new Spanner database and starts to prepare it for serving. The returned long-running operation will have a name of the format `/operations/` and can be used to track preparation of the database. The metadata field type is CreateDatabaseMetadata. The response field type is Database, if successful.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases", "httpMethod": "POST", "id": "spanner.projects.instances.databases.create", @@ -1804,6 +1854,255 @@ } }, "resources": { + "backupSchedules": { + "methods": { + "create": { + "description": "Creates a new backup schedule.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/backupSchedules", + "httpMethod": "POST", + "id": "spanner.projects.instances.databases.backupSchedules.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "backupScheduleId": { + "description": "Required. The Id to use for the backup schedule. The `backup_schedule_id` appended to `parent` forms the full backup schedule name of the form `projects//instances//databases//backupSchedules/`.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The name of the database that this backup schedule applies to.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/backupSchedules", + "request": { + "$ref": "BackupSchedule" + }, + "response": { + "$ref": "BackupSchedule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + }, + "delete": { + "description": "Deletes a backup schedule.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/backupSchedules/{backupSchedulesId}", + "httpMethod": "DELETE", + "id": "spanner.projects.instances.databases.backupSchedules.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the schedule to delete. Values are of the form `projects//instances//databases//backupSchedules/`.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/backupSchedules/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + }, + "get": { + "description": "Gets backup schedule for the input schedule name.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/backupSchedules/{backupSchedulesId}", + "httpMethod": "GET", + "id": "spanner.projects.instances.databases.backupSchedules.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the schedule to retrieve. Values are of the form `projects//instances//databases//backupSchedules/`.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/backupSchedules/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "BackupSchedule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/backupSchedules/{backupSchedulesId}:getIamPolicy", + "httpMethod": "POST", + "id": "spanner.projects.instances.databases.backupSchedules.getIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/backupSchedules/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:getIamPolicy", + "request": { + "$ref": "GetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + }, + "list": { + "description": "Lists all the backup schedules for the database.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/backupSchedules", + "httpMethod": "GET", + "id": "spanner.projects.instances.databases.backupSchedules.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. Number of backup schedules to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If non-empty, `page_token` should contain a next_page_token from a previous ListBackupSchedulesResponse to the same `parent`.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Database is the parent resource whose backup schedules should be listed. Values are of the form projects//instances//databases/", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/backupSchedules", + "response": { + "$ref": "ListBackupSchedulesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + }, + "patch": { + "description": "Updates a backup schedule.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/backupSchedules/{backupSchedulesId}", + "httpMethod": "PATCH", + "id": "spanner.projects.instances.databases.backupSchedules.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Identifier. Output only for the CreateBackupSchedule operation. Required for the UpdateBackupSchedule operation. A globally unique identifier for the backup schedule which cannot be changed. Values are of the form `projects//instances//databases//backupSchedules/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/backupSchedules/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. A mask specifying which fields in the BackupSchedule resource should be updated. This mask is relative to the BackupSchedule resource, not to the request message. The field mask must always be specified; this prevents any future fields from being erased accidentally.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "BackupSchedule" + }, + "response": { + "$ref": "BackupSchedule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/backupSchedules/{backupSchedulesId}:setIamPolicy", + "httpMethod": "POST", + "id": "spanner.projects.instances.databases.backupSchedules.setIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for databases resources.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/backupSchedules/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/backupSchedules/{backupSchedulesId}:testIamPermissions", + "httpMethod": "POST", + "id": "spanner.projects.instances.databases.backupSchedules.testIamPermissions", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/backupSchedules/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+resource}:testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + } + } + }, "databaseRoles": { "methods": { "list": { @@ -3005,11 +3304,11 @@ } } }, - "revision": "20240605", + "revision": "20240930", "rootUrl": "https://spanner.googleapis.com/", "schemas": { "AutoscalingConfig": { - "description": "Autoscaling config for an instance.", + "description": "Autoscaling configuration for an instance.", "id": "AutoscalingConfig", "properties": { "autoscalingLimits": { @@ -3071,6 +3370,14 @@ "description": "A backup of a Cloud Spanner database.", "id": "Backup", "properties": { + "backupSchedules": { + "description": "Output only. List of backup schedule URIs that are associated with creating this backup. This is only applicable for scheduled backups, and is empty for on-demand backups. To optimize for storage, whenever possible, multiple schedules are collapsed together to create one backup. In such cases, this field captures the list of all backup schedule URIs that are associated with creating this backup. If collapsing is not done, then this field captures the single backup schedule URI associated with creating this backup.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + }, "createTime": { "description": "Output only. The time the CreateBackup request is received. If the request does not specify `version_time`, the `version_time` of the backup will be equivalent to the `create_time`.", "format": "google-datetime", @@ -3109,11 +3416,28 @@ "readOnly": true, "type": "array" }, + "exclusiveSizeBytes": { + "description": "Output only. For a backup in an incremental backup chain, this is the storage space needed to keep the data that has changed since the previous backup. For all other backups, this is always the size of the backup. This value may change if backups on the same chain get deleted or expired. This field can be used to calculate the total storage space used by a set of backups. For example, the total space used by all backups of a database can be computed by summing up this field.", + "format": "int64", + "readOnly": true, + "type": "string" + }, "expireTime": { "description": "Required for the CreateBackup operation. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 366 days from the time the CreateBackup request is processed. Once the `expire_time` has passed, the backup is eligible to be automatically deleted by Cloud Spanner to free the resources used by the backup.", "format": "google-datetime", "type": "string" }, + "freeableSizeBytes": { + "description": "Output only. The number of bytes that will be freed by deleting this backup. This value will be zero if, for example, this backup is part of an incremental backup chain and younger backups in the chain require that we keep its data. For backups not in an incremental backup chain, this is always the size of the backup. This value may change if backups on the same chain get created, deleted or expired.", + "format": "int64", + "readOnly": true, + "type": "string" + }, + "incrementalBackupChainId": { + "description": "Output only. Populated only for backups in an incremental backup chain. Backups share the same chain id if and only if they belong to the same incremental backup chain. Use this field to determine which backups are part of the same incremental backup chain. The ordering of backups in the chain can be determined by ordering the backup `version_time`.", + "readOnly": true, + "type": "string" + }, "maxExpireTime": { "description": "Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`.", "format": "google-datetime", @@ -3124,6 +3448,12 @@ "description": "Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`.", "type": "string" }, + "oldestVersionTime": { + "description": "Output only. Data deleted at a time older than this is guaranteed not to be retained in order to support this backup. For a backup in an incremental backup chain, this is the version time of the oldest backup that exists or ever existed in the chain. For all other backups, this is the version time of the backup. This field can be used to understand what data is being retained by the backup system.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, "referencingBackups": { "description": "Output only. The names of the destination backups being created by copying this source backup. The backup names are of the form `projects//instances//backups/`. Referencing backups may exist in different instances. The existence of any referencing backup prevents the backup from being deleted. When the copy operation is done (either successfully completed or cancelled or the destination backup is deleted), the reference to the backup is removed.", "items": { @@ -3141,7 +3471,7 @@ "type": "array" }, "sizeBytes": { - "description": "Output only. Size of the backup in bytes.", + "description": "Output only. Size of the backup in bytes. For a backup in an incremental backup chain, this is the sum of the `exclusive_size_bytes` of itself and all older backups in the chain.", "format": "int64", "readOnly": true, "type": "string" @@ -3194,6 +3524,55 @@ }, "type": "object" }, + "BackupSchedule": { + "description": "BackupSchedule expresses the automated backup creation specification for a Spanner database. Next ID: 10", + "id": "BackupSchedule", + "properties": { + "encryptionConfig": { + "$ref": "CreateBackupEncryptionConfig", + "description": "Optional. The encryption configuration that will be used to encrypt the backup. If this field is not specified, the backup will use the same encryption configuration as the database." + }, + "fullBackupSpec": { + "$ref": "FullBackupSpec", + "description": "The schedule creates only full backups." + }, + "incrementalBackupSpec": { + "$ref": "IncrementalBackupSpec", + "description": "The schedule creates incremental backup chains." + }, + "name": { + "description": "Identifier. Output only for the CreateBackupSchedule operation. Required for the UpdateBackupSchedule operation. A globally unique identifier for the backup schedule which cannot be changed. Values are of the form `projects//instances//databases//backupSchedules/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length.", + "type": "string" + }, + "retentionDuration": { + "description": "Optional. The retention duration of a backup that must be at least 6 hours and at most 366 days. The backup is eligible to be automatically deleted once the retention period has elapsed.", + "format": "google-duration", + "type": "string" + }, + "spec": { + "$ref": "BackupScheduleSpec", + "description": "Optional. The schedule specification based on which the backup creations are triggered." + }, + "updateTime": { + "description": "Output only. The timestamp at which the schedule was last updated. If the schedule has never been updated, this field contains the timestamp when the schedule was first created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "BackupScheduleSpec": { + "description": "Defines specifications of the backup schedule.", + "id": "BackupScheduleSpec", + "properties": { + "cronSpec": { + "$ref": "CrontabSpec", + "description": "Cron style schedule specification." + } + }, + "type": "object" + }, "BatchCreateSessionsRequest": { "description": "The request for BatchCreateSessions.", "id": "BatchCreateSessionsRequest", @@ -3333,16 +3712,16 @@ "id": "ChangeQuorumRequest", "properties": { "etag": { - "description": "Optional. The etag is the hash of the QuorumInfo. The ChangeQuorum operation will only be performed if the etag matches that of the QuorumInfo in the current database resource. Otherwise the API will return an `ABORTED` error. The etag is used for optimistic concurrency control as a way to help prevent simultaneous change quorum requests that could create a race condition.", + "description": "Optional. The etag is the hash of the `QuorumInfo`. The `ChangeQuorum` operation is only performed if the etag matches that of the `QuorumInfo` in the current database resource. Otherwise the API returns an `ABORTED` error. The etag is used for optimistic concurrency control as a way to help prevent simultaneous change quorum requests that could create a race condition.", "type": "string" }, "name": { - "description": "Required. Name of the database in which to apply the ChangeQuorum. Values are of the form `projects//instances//databases/`.", + "description": "Required. Name of the database in which to apply `ChangeQuorum`. Values are of the form `projects//instances//databases/`.", "type": "string" }, "quorumType": { "$ref": "QuorumType", - "description": "Required. The type of this Quorum." + "description": "Required. The type of this quorum." } }, "type": "object" @@ -3551,6 +3930,40 @@ }, "type": "object" }, + "CreateBackupEncryptionConfig": { + "description": "Encryption configuration for the backup to create.", + "id": "CreateBackupEncryptionConfig", + "properties": { + "encryptionType": { + "description": "Required. The encryption type of the backup.", + "enum": [ + "ENCRYPTION_TYPE_UNSPECIFIED", + "USE_DATABASE_ENCRYPTION", + "GOOGLE_DEFAULT_ENCRYPTION", + "CUSTOMER_MANAGED_ENCRYPTION" + ], + "enumDescriptions": [ + "Unspecified. Do not use.", + "Use the same encryption configuration as the database. This is the default option when encryption_config is empty. For example, if the database is using `Customer_Managed_Encryption`, the backup will be using the same Cloud KMS key as the database.", + "Use Google default encryption.", + "Use customer managed encryption. If specified, `kms_key_name` must contain a valid Cloud KMS key." + ], + "type": "string" + }, + "kmsKeyName": { + "description": "Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`.", + "type": "string" + }, + "kmsKeyNames": { + "description": "Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "CreateBackupMetadata": { "description": "Metadata type for the operation returned by CreateBackup.", "id": "CreateBackupMetadata", @@ -3620,7 +4033,7 @@ "type": "array" }, "protoDescriptors": { - "description": "Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in 'extra_statements' above. Contains a protobuf-serialized [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto). To generate it, [install](https://grpc.io/docs/protoc-installation/) and run `protoc` with --include_imports and --descriptor_set_out. For example, to generate for moon/shot/app.proto, run ``` $protoc --proto_path=/app_path --proto_path=/lib_path \\ --include_imports \\ --descriptor_set_out=descriptors.data \\ moon/shot/app.proto ``` For more details, see protobuffer [self description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).", + "description": "Optional. Proto descriptors used by `CREATE/ALTER PROTO BUNDLE` statements in 'extra_statements'. Contains a protobuf-serialized [`google.protobuf.FileDescriptorSet`](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto) descriptor set. To generate it, [install](https://grpc.io/docs/protoc-installation/) and run `protoc` with --include_imports and --descriptor_set_out. For example, to generate for moon/shot/app.proto, run ``` $protoc --proto_path=/app_path --proto_path=/lib_path \\ --include_imports \\ --descriptor_set_out=descriptors.data \\ moon/shot/app.proto ``` For more details, see protobuffer [self description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).", "format": "byte", "type": "string" } @@ -3638,7 +4051,7 @@ }, "instanceConfig": { "$ref": "InstanceConfig", - "description": "The target instance config end state." + "description": "The target instance configuration end state." }, "progress": { "$ref": "InstanceOperationProgress", @@ -3653,10 +4066,10 @@ "properties": { "instanceConfig": { "$ref": "InstanceConfig", - "description": "Required. The InstanceConfig proto of the configuration to create. instance_config.name must be `/instanceConfigs/`. instance_config.base_config must be a Google managed configuration name, e.g. /instanceConfigs/us-east1, /instanceConfigs/nam3." + "description": "Required. The `InstanceConfig` proto of the configuration to create. `instance_config.name` must be `/instanceConfigs/`. `instance_config.base_config` must be a Google-managed configuration name, e.g. /instanceConfigs/us-east1, /instanceConfigs/nam3." }, "instanceConfigId": { - "description": "Required. The ID of the instance config to create. Valid identifiers are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in length. The `custom-` prefix is required to avoid name conflicts with Google managed configurations.", + "description": "Required. The ID of the instance configuration to create. Valid identifiers are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in length. The `custom-` prefix is required to avoid name conflicts with Google-managed configurations.", "type": "string" }, "validateOnly": { @@ -3773,6 +4186,28 @@ }, "type": "object" }, + "CrontabSpec": { + "description": "CrontabSpec can be used to specify the version time and frequency at which the backup should be created.", + "id": "CrontabSpec", + "properties": { + "creationWindow": { + "description": "Output only. Schedule backups will contain an externally consistent copy of the database at the version time specified in `schedule_spec.cron_spec`. However, Spanner may not initiate the creation of the scheduled backups at that version time. Spanner will initiate the creation of scheduled backups within the time window bounded by the version_time specified in `schedule_spec.cron_spec` and version_time + `creation_window`.", + "format": "google-duration", + "readOnly": true, + "type": "string" + }, + "text": { + "description": "Required. Textual representation of the crontab. User can customize the backup frequency and the backup version time using the cron expression. The version time must be in UTC timzeone. The backup will contain an externally consistent copy of the database at the version time. Allowed frequencies are 12 hour, 1 day, 1 week and 1 month. Examples of valid cron specifications: * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC. * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC. * `0 2 * * * ` : once a day at 2 past midnight in UTC. * `0 2 * * 0 ` : once a week every Sunday at 2 past midnight in UTC. * `0 2 8 * * ` : once a month on 8th day at 2 past midnight in UTC.", + "type": "string" + }, + "timeZone": { + "description": "Output only. The time zone of the times in `CrontabSpec.text`. Currently only UTC is supported.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "Database": { "description": "A Cloud Spanner database.", "id": "Database", @@ -3810,7 +4245,7 @@ "type": "string" }, "enableDropProtection": { - "description": "Whether drop protection is enabled for this database. Defaults to false, if not set. For more details, please see how to [prevent accidental database deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).", + "description": "Optional. Whether drop protection is enabled for this database. Defaults to false, if not set. For more details, please see how to [prevent accidental database deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).", "type": "boolean" }, "encryptionConfig": { @@ -3832,7 +4267,7 @@ }, "quorumInfo": { "$ref": "QuorumInfo", - "description": "Output only. Applicable only for databases that use dual region instance configurations. Contains information about the quorum.", + "description": "Output only. Applicable only for databases that use dual-region instance configurations. Contains information about the quorum.", "readOnly": true }, "reconciling": { @@ -4152,7 +4587,7 @@ "enumDescriptions": [ "The default mode. Only the statement results are returned.", "This mode returns only the query plan, without any results or execution statistics information.", - "This mode returns both the query plan and the execution statistics along with the results." + "This mode returns the query plan, overall execution statistics, operator level execution statistics along with the results. This has a performance overhead compared to the other modes. It is not recommended to use this mode for production traffic." ], "type": "string" }, @@ -4256,6 +4691,12 @@ }, "type": "object" }, + "FullBackupSpec": { + "description": "The specification for full backups. A full backup stores the entire contents of the database at a given version time.", + "id": "FullBackupSpec", + "properties": {}, + "type": "object" + }, "GetDatabaseDdlResponse": { "description": "The response for GetDatabaseDdl.", "id": "GetDatabaseDdlResponse", @@ -4316,6 +4757,12 @@ }, "type": "object" }, + "IncrementalBackupSpec": { + "description": "The specification for incremental backup chains. An incremental backup stores the delta of changes between a previous backup and the database contents at a given version time. An incremental backup chain consists of a full backup and zero or more successive incremental backups. The first backup created for an incremental backup chain is always a full backup.", + "id": "IncrementalBackupSpec", + "properties": {}, + "type": "object" + }, "IndexAdvice": { "description": "Recommendation to add new indexes to run queries more efficiently.", "id": "IndexAdvice", @@ -4386,6 +4833,22 @@ "description": "Required. The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length.", "type": "string" }, + "edition": { + "description": "Optional. The `Edition` of the current instance.", + "enum": [ + "EDITION_UNSPECIFIED", + "STANDARD", + "ENTERPRISE", + "ENTERPRISE_PLUS" + ], + "enumDescriptions": [ + "Edition not specified.", + "Standard edition.", + "Enterprise edition.", + "Enterprise Plus edition." + ], + "type": "string" + }, "endpointUris": { "description": "Deprecated. This field is not populated.", "items": { @@ -4423,12 +4886,12 @@ "type": "string" }, "nodeCount": { - "description": "The number of nodes allocated to this instance. At most one of either node_count or processing_units should be present in the message. Users can set the node_count field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, node_count is treated as an OUTPUT_ONLY field and reflects the current number of nodes allocated to the instance. This may be zero in API responses for instances that are not yet in state `READY`. See [the documentation](https://cloud.google.com/spanner/docs/compute-capacity) for more information about nodes and processing units.", + "description": "The number of nodes allocated to this instance. At most, one of either `node_count` or `processing_units` should be present in the message. Users can set the `node_count` field to specify the target number of nodes allocated to the instance. If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` field and reflects the current number of nodes allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes, and processing units](https://cloud.google.com/spanner/docs/compute-capacity).", "format": "int32", "type": "integer" }, "processingUnits": { - "description": "The number of processing units allocated to this instance. At most one of processing_units or node_count should be present in the message. Users can set the processing_units field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, processing_units is treated as an OUTPUT_ONLY field and reflects the current number of processing units allocated to the instance. This may be zero in API responses for instances that are not yet in state `READY`. See [the documentation](https://cloud.google.com/spanner/docs/compute-capacity) for more information about nodes and processing units.", + "description": "The number of processing units allocated to this instance. At most, one of either `processing_units` or `node_count` should be present in the message. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance. If autoscaling is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and reflects the current number of processing units allocated to the instance. This might be zero in API responses for instances that are not yet in the `READY` state. For more information, see [Compute capacity, nodes and processing units](https://cloud.google.com/spanner/docs/compute-capacity).", "format": "int32", "type": "integer" }, @@ -4461,11 +4924,11 @@ "id": "InstanceConfig", "properties": { "baseConfig": { - "description": "Base configuration name, e.g. projects//instanceConfigs/nam3, based on which this configuration is created. Only set for user managed configurations. `base_config` must refer to a configuration of type GOOGLE_MANAGED in the same project as this configuration.", + "description": "Base configuration name, e.g. projects//instanceConfigs/nam3, based on which this configuration is created. Only set for user-managed configurations. `base_config` must refer to a configuration of type `GOOGLE_MANAGED` in the same project as this configuration.", "type": "string" }, "configType": { - "description": "Output only. Whether this instance config is a Google or User Managed Configuration.", + "description": "Output only. Whether this instance configuration is a Google-managed or user-managed configuration.", "enum": [ "TYPE_UNSPECIFIED", "GOOGLE_MANAGED", @@ -4473,8 +4936,8 @@ ], "enumDescriptions": [ "Unspecified.", - "Google managed configuration.", - "User managed configuration." + "Google-managed configuration.", + "User-managed configuration." ], "readOnly": true, "type": "string" @@ -4484,11 +4947,11 @@ "type": "string" }, "etag": { - "description": "etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a instance config from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform instance config updates in order to avoid race conditions: An etag is returned in the response which contains instance configs, and systems are expected to put that etag in the request to update instance config to ensure that their change will be applied to the same version of the instance config. If no etag is provided in the call to update instance config, then the existing instance config is overwritten blindly.", + "description": "etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a instance configuration from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform instance configuration updates in order to avoid race conditions: An etag is returned in the response which contains instance configurations, and systems are expected to put that etag in the request to update instance configuration to ensure that their change is applied to the same version of the instance configuration. If no etag is provided in the call to update the instance configuration, then the existing instance configuration is overwritten blindly.", "type": "string" }, "freeInstanceAvailability": { - "description": "Output only. Describes whether free instances are available to be created in this instance config.", + "description": "Output only. Describes whether free instances are available to be created in this instance configuration.", "enum": [ "FREE_INSTANCE_AVAILABILITY_UNSPECIFIED", "AVAILABLE", @@ -4498,10 +4961,10 @@ ], "enumDescriptions": [ "Not specified.", - "Indicates that free instances are available to be created in this instance config.", - "Indicates that free instances are not supported in this instance config.", - "Indicates that free instances are currently not available to be created in this instance config.", - "Indicates that additional free instances cannot be created in this instance config because the project has reached its limit of free instances." + "Indicates that free instances are available to be created in this instance configuration.", + "Indicates that free instances are not supported in this instance configuration.", + "Indicates that free instances are currently not available to be created in this instance configuration.", + "Indicates that additional free instances cannot be created in this instance configuration because the project has reached its limit of free instances." ], "readOnly": true, "type": "string" @@ -4521,11 +4984,11 @@ "type": "array" }, "name": { - "description": "A unique identifier for the instance configuration. Values are of the form `projects//instanceConfigs/a-z*`. User instance config must start with `custom-`.", + "description": "A unique identifier for the instance configuration. Values are of the form `projects//instanceConfigs/a-z*`. User instance configuration must start with `custom-`.", "type": "string" }, "optionalReplicas": { - "description": "Output only. The available optional replicas to choose from for user managed configurations. Populated for Google managed configurations.", + "description": "Output only. The available optional replicas to choose from for user-managed configurations. Populated for Google-managed configurations.", "items": { "$ref": "ReplicaInfo" }, @@ -4541,28 +5004,28 @@ "MULTI_REGION" ], "enumDescriptions": [ - "Not specified.", - "An instance configuration tagged with REGION quorum type forms a write quorum in a single region.", - "An instance configuration tagged with DUAL_REGION quorum type forms a write quorums with exactly two read-write regions in a multi-region configuration. This instance configurations requires reconfiguration in the event of regional failures.", - "An instance configuration tagged with MULTI_REGION quorum type forms a write quorums from replicas are spread across more than one region in a multi-region configuration." + "Quorum type not specified.", + "An instance configuration tagged with `REGION` quorum type forms a write quorum in a single region.", + "An instance configuration tagged with the `DUAL_REGION` quorum type forms a write quorum with exactly two read-write regions in a multi-region configuration. This instance configuration requires failover in the event of regional failures.", + "An instance configuration tagged with the `MULTI_REGION` quorum type forms a write quorum from replicas that are spread across more than one region in a multi-region configuration." ], "readOnly": true, "type": "string" }, "reconciling": { - "description": "Output only. If true, the instance config is being created or updated. If false, there are no ongoing operations for the instance config.", + "description": "Output only. If true, the instance configuration is being created or updated. If false, there are no ongoing operations for the instance configuration.", "readOnly": true, "type": "boolean" }, "replicas": { - "description": "The geographic placement of nodes in this instance configuration and their replication properties. To create user managed configurations, input `replicas` must include all replicas in `replicas` of the `base_config` and include one or more replicas in the `optional_replicas` of the `base_config`.", + "description": "The geographic placement of nodes in this instance configuration and their replication properties. To create user-managed configurations, input `replicas` must include all replicas in `replicas` of the `base_config` and include one or more replicas in the `optional_replicas` of the `base_config`.", "items": { "$ref": "ReplicaInfo" }, "type": "array" }, "state": { - "description": "Output only. The current instance config state. Applicable only for USER_MANAGED configs.", + "description": "Output only. The current instance configuration state. Applicable only for `USER_MANAGED` configurations.", "enum": [ "STATE_UNSPECIFIED", "CREATING", @@ -4570,8 +5033,8 @@ ], "enumDescriptions": [ "Not specified.", - "The instance config is still being created.", - "The instance config is fully created and ready to be used to create instances." + "The instance configuration is still being created.", + "The instance configuration is fully created and ready to be used to create instances." ], "readOnly": true, "type": "string" @@ -4634,17 +5097,18 @@ "type": "string" }, "nodeCount": { - "description": "The number of nodes allocated to this instance partition. Users can set the node_count field to specify the target number of nodes allocated to the instance partition. This may be zero in API responses for instance partitions that are not yet in state `READY`.", + "description": "The number of nodes allocated to this instance partition. Users can set the `node_count` field to specify the target number of nodes allocated to the instance partition. This may be zero in API responses for instance partitions that are not yet in state `READY`.", "format": "int32", "type": "integer" }, "processingUnits": { - "description": "The number of processing units allocated to this instance partition. Users can set the processing_units field to specify the target number of processing units allocated to the instance partition. This may be zero in API responses for instance partitions that are not yet in state `READY`.", + "description": "The number of processing units allocated to this instance partition. Users can set the `processing_units` field to specify the target number of processing units allocated to the instance partition. This might be zero in API responses for instance partitions that are not yet in the `READY` state.", "format": "int32", "type": "integer" }, "referencingBackups": { - "description": "Output only. The names of the backups that reference this instance partition. Referencing backups should share the parent instance. The existence of any referencing backup prevents the instance partition from being deleted.", + "deprecated": true, + "description": "Output only. Deprecated: This field is not populated. Output only. The names of the backups that reference this instance partition. Referencing backups should share the parent instance. The existence of any referencing backup prevents the instance partition from being deleted.", "items": { "type": "string" }, @@ -4834,6 +5298,24 @@ }, "type": "object" }, + "ListBackupSchedulesResponse": { + "description": "The response for ListBackupSchedules.", + "id": "ListBackupSchedulesResponse", + "properties": { + "backupSchedules": { + "description": "The list of backup schedules for a database.", + "items": { + "$ref": "BackupSchedule" + }, + "type": "array" + }, + "nextPageToken": { + "description": "`next_page_token` can be sent in a subsequent ListBackupSchedules call to fetch more of the schedules.", + "type": "string" + } + }, + "type": "object" + }, "ListBackupsResponse": { "description": "The response for ListBackups.", "id": "ListBackupsResponse", @@ -4915,7 +5397,7 @@ "type": "string" }, "operations": { - "description": "The list of matching instance config long-running operations. Each operation's name will be prefixed by the instance config's name. The operation's metadata field type `metadata.type_url` describes the type of the metadata.", + "description": "The list of matching instance configuration long-running operations. Each operation's name will be prefixed by the name of the instance configuration. The operation's metadata field type `metadata.type_url` describes the type of the metadata.", "items": { "$ref": "Operation" }, @@ -5199,12 +5681,29 @@ "id": "MoveInstanceRequest", "properties": { "targetConfig": { - "description": "Required. The target instance config for the instance to move. Values are of the form `projects//instanceConfigs/`.", + "description": "Required. The target instance configuration where to move the instance. Values are of the form `projects//instanceConfigs/`.", "type": "string" } }, "type": "object" }, + "MultiplexedSessionPrecommitToken": { + "description": "When a read-write transaction is executed on a multiplexed session, this precommit token is sent back to the client as a part of the [Transaction] message in the BeginTransaction response and also as a part of the [ResultSet] and [PartialResultSet] responses.", + "id": "MultiplexedSessionPrecommitToken", + "properties": { + "precommitToken": { + "description": "Opaque precommit token.", + "format": "byte", + "type": "string" + }, + "seqNum": { + "description": "An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "Mutation": { "description": "A modification to one or more Cloud Spanner rows. Mutations can be applied to a Cloud Spanner database by sending them in a Commit call.", "id": "Mutation", @@ -5402,7 +5901,7 @@ "description": "Additional options that affect how many partitions are created." }, "sql": { - "description": "Required. The query request to generate partitions for. The request will fail if the query is not root partitionable. For a query to be root partitionable, it needs to satisfy a few conditions. For example, if the query execution plan contains a distributed union operator, then it must be the first operator in the plan. For more information about other conditions, see [Read data in parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel). The query request must not contain DML commands, such as INSERT, UPDATE, or DELETE. Use ExecuteStreamingSql with a PartitionedDml transaction for large, partition-friendly DML operations.", + "description": "Required. The query request to generate partitions for. The request fails if the query is not root partitionable. For a query to be root partitionable, it needs to satisfy a few conditions. For example, if the query execution plan contains a distributed union operator, then it must be the first operator in the plan. For more information about other conditions, see [Read data in parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel). The query request must not contain DML commands, such as `INSERT`, `UPDATE`, or `DELETE`. Use `ExecuteStreamingSql` with a PartitionedDml transaction for large, partition-friendly DML operations.", "type": "string" }, "transaction": { @@ -5629,16 +6128,16 @@ "type": "object" }, "QuorumInfo": { - "description": "Information about the dual region quorum.", + "description": "Information about the dual-region quorum.", "id": "QuorumInfo", "properties": { "etag": { - "description": "Output only. The etag is used for optimistic concurrency control as a way to help prevent simultaneous ChangeQuorum requests that could create a race condition.", + "description": "Output only. The etag is used for optimistic concurrency control as a way to help prevent simultaneous `ChangeQuorum` requests that might create a race condition.", "readOnly": true, "type": "string" }, "initiator": { - "description": "Output only. Whether this ChangeQuorum is a Google or User initiated.", + "description": "Output only. Whether this `ChangeQuorum` is Google or User initiated.", "enum": [ "INITIATOR_UNSPECIFIED", "GOOGLE", @@ -5646,8 +6145,8 @@ ], "enumDescriptions": [ "Unspecified.", - "ChangeQuorum initiated by Google.", - "ChangeQuorum initiated by User." + "`ChangeQuorum` initiated by Google.", + "`ChangeQuorum` initiated by User." ], "readOnly": true, "type": "string" @@ -5667,16 +6166,16 @@ "type": "object" }, "QuorumType": { - "description": "Information about the database quorum type. this applies only for dual region instance configs.", + "description": "Information about the database quorum type. This only applies to dual-region instance configs.", "id": "QuorumType", "properties": { "dualRegion": { "$ref": "DualRegionQuorum", - "description": "Dual region quorum type." + "description": "Dual-region quorum type." }, "singleRegion": { "$ref": "SingleRegionQuorum", - "description": "Single region quorum type." + "description": "Single-region quorum type." } }, "type": "object" @@ -5933,7 +6432,7 @@ "type": "string" }, "kmsKeyNames": { - "description": "Optional. Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs.", + "description": "Optional. Specifies the KMS configuration for the one or more keys used to encrypt the database. Values have the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configurations, specify a single regional location KMS key. * For multi-regional database instance configurations of type `GOOGLE_MANAGED`, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For a database instance configuration of type `USER_MANAGED`, please specify only regional location KMS keys to cover each region in the instance configuration. Multi-regional location KMS keys are not supported for USER_MANAGED instance configurations.", "items": { "type": "string" }, @@ -6238,7 +6737,7 @@ "id": "SingleRegionQuorum", "properties": { "servingLocation": { - "description": "Required. The location of the serving region, e.g. \"us-central1\". The location must be one of the regions within the dual region instance configuration of your database. The list of valid locations is available via [GetInstanceConfig[InstanceAdmin.GetInstanceConfig] API. This should only be used if you plan to change quorum in single-region quorum type.", + "description": "Required. The location of the serving region, e.g. \"us-central1\". The location must be one of the regions within the dual-region instance configuration of your database. The list of valid locations is available using the GetInstanceConfig API. This should only be used if you plan to change quorum to the single-region quorum type.", "type": "string" } }, @@ -6348,6 +6847,10 @@ "format": "byte", "type": "string" }, + "precommitToken": { + "$ref": "MultiplexedSessionPrecommitToken", + "description": "A precommit token will be included in the response of a BeginTransaction request if the read-write transaction is on a multiplexed session and a mutation_key was specified in the BeginTransaction. The precommit token with the highest sequence number from this transaction attempt should be passed to the Commit request for this transaction." + }, "readTimestamp": { "description": "For snapshot read-only transactions, the read timestamp chosen for the transaction. Not returned by default: see TransactionOptions.ReadOnly.return_read_timestamp. A timestamp in RFC3339 UTC \\\"Zulu\\\" format, accurate to nanoseconds. Example: `\"2014-10-02T15:01:23.045123456Z\"`.", "format": "google-datetime", @@ -6585,7 +7088,7 @@ }, "instanceConfig": { "$ref": "InstanceConfig", - "description": "The desired instance config after updating." + "description": "The desired instance configuration after updating." }, "progress": { "$ref": "InstanceOperationProgress", @@ -6600,7 +7103,7 @@ "properties": { "instanceConfig": { "$ref": "InstanceConfig", - "description": "Required. The user instance config to update, which must always include the instance config name. Otherwise, only fields mentioned in update_mask need be included. To prevent conflicts of concurrent updates, etag can be used." + "description": "Required. The user instance configuration to update, which must always include the instance configuration name. Otherwise, only fields mentioned in update_mask need be included. To prevent conflicts of concurrent updates, etag can be used." }, "updateMask": { "description": "Required. A mask specifying which fields in InstanceConfig should be updated. The field mask must always be specified; this prevents any future fields in InstanceConfig from being erased accidentally by clients that do not know about them. Only display_name and labels can be updated.", diff --git a/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-gen.go b/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-gen.go index 8d33865594e..444ab675982 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-gen.go @@ -315,6 +315,7 @@ type ProjectsInstancesDatabaseOperationsService struct { func NewProjectsInstancesDatabasesService(s *Service) *ProjectsInstancesDatabasesService { rs := &ProjectsInstancesDatabasesService{s: s} + rs.BackupSchedules = NewProjectsInstancesDatabasesBackupSchedulesService(s) rs.DatabaseRoles = NewProjectsInstancesDatabasesDatabaseRolesService(s) rs.Operations = NewProjectsInstancesDatabasesOperationsService(s) rs.Sessions = NewProjectsInstancesDatabasesSessionsService(s) @@ -324,6 +325,8 @@ func NewProjectsInstancesDatabasesService(s *Service) *ProjectsInstancesDatabase type ProjectsInstancesDatabasesService struct { s *Service + BackupSchedules *ProjectsInstancesDatabasesBackupSchedulesService + DatabaseRoles *ProjectsInstancesDatabasesDatabaseRolesService Operations *ProjectsInstancesDatabasesOperationsService @@ -331,6 +334,15 @@ type ProjectsInstancesDatabasesService struct { Sessions *ProjectsInstancesDatabasesSessionsService } +func NewProjectsInstancesDatabasesBackupSchedulesService(s *Service) *ProjectsInstancesDatabasesBackupSchedulesService { + rs := &ProjectsInstancesDatabasesBackupSchedulesService{s: s} + return rs +} + +type ProjectsInstancesDatabasesBackupSchedulesService struct { + s *Service +} + func NewProjectsInstancesDatabasesDatabaseRolesService(s *Service) *ProjectsInstancesDatabasesDatabaseRolesService { rs := &ProjectsInstancesDatabasesDatabaseRolesService{s: s} return rs @@ -406,7 +418,7 @@ type ScansService struct { s *Service } -// AutoscalingConfig: Autoscaling config for an instance. +// AutoscalingConfig: Autoscaling configuration for an instance. type AutoscalingConfig struct { // AutoscalingLimits: Required. Autoscaling limits for an instance. AutoscalingLimits *AutoscalingLimits `json:"autoscalingLimits,omitempty"` @@ -425,9 +437,9 @@ type AutoscalingConfig struct { NullFields []string `json:"-"` } -func (s *AutoscalingConfig) MarshalJSON() ([]byte, error) { +func (s AutoscalingConfig) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalingLimits: The autoscaling limits for the instance. Users can define @@ -462,9 +474,9 @@ type AutoscalingLimits struct { NullFields []string `json:"-"` } -func (s *AutoscalingLimits) MarshalJSON() ([]byte, error) { +func (s AutoscalingLimits) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingLimits - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AutoscalingTargets: The autoscaling targets for an instance. @@ -494,13 +506,22 @@ type AutoscalingTargets struct { NullFields []string `json:"-"` } -func (s *AutoscalingTargets) MarshalJSON() ([]byte, error) { +func (s AutoscalingTargets) MarshalJSON() ([]byte, error) { type NoMethod AutoscalingTargets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Backup: A backup of a Cloud Spanner database. type Backup struct { + // BackupSchedules: Output only. List of backup schedule URIs that are + // associated with creating this backup. This is only applicable for scheduled + // backups, and is empty for on-demand backups. To optimize for storage, + // whenever possible, multiple schedules are collapsed together to create one + // backup. In such cases, this field captures the list of all backup schedule + // URIs that are associated with creating this backup. If collapsing is not + // done, then this field captures the single backup schedule URI associated + // with creating this backup. + BackupSchedules []string `json:"backupSchedules,omitempty"` // CreateTime: Output only. The time the CreateBackup request is received. If // the request does not specify `version_time`, the `version_time` of the // backup will be equivalent to the `create_time`. @@ -528,12 +549,34 @@ type Backup struct { // restored. If a key version is revoked in the middle of a restore, the // restore behavior is undefined. EncryptionInformation []*EncryptionInfo `json:"encryptionInformation,omitempty"` + // ExclusiveSizeBytes: Output only. For a backup in an incremental backup + // chain, this is the storage space needed to keep the data that has changed + // since the previous backup. For all other backups, this is always the size of + // the backup. This value may change if backups on the same chain get deleted + // or expired. This field can be used to calculate the total storage space used + // by a set of backups. For example, the total space used by all backups of a + // database can be computed by summing up this field. + ExclusiveSizeBytes int64 `json:"exclusiveSizeBytes,omitempty,string"` // ExpireTime: Required for the CreateBackup operation. The expiration time of // the backup, with microseconds granularity that must be at least 6 hours and // at most 366 days from the time the CreateBackup request is processed. Once // the `expire_time` has passed, the backup is eligible to be automatically // deleted by Cloud Spanner to free the resources used by the backup. ExpireTime string `json:"expireTime,omitempty"` + // FreeableSizeBytes: Output only. The number of bytes that will be freed by + // deleting this backup. This value will be zero if, for example, this backup + // is part of an incremental backup chain and younger backups in the chain + // require that we keep its data. For backups not in an incremental backup + // chain, this is always the size of the backup. This value may change if + // backups on the same chain get created, deleted or expired. + FreeableSizeBytes int64 `json:"freeableSizeBytes,omitempty,string"` + // IncrementalBackupChainId: Output only. Populated only for backups in an + // incremental backup chain. Backups share the same chain id if and only if + // they belong to the same incremental backup chain. Use this field to + // determine which backups are part of the same incremental backup chain. The + // ordering of backups in the chain can be determined by ordering the backup + // `version_time`. + IncrementalBackupChainId string `json:"incrementalBackupChainId,omitempty"` // MaxExpireTime: Output only. The max allowed expiration time of the backup, // with microseconds granularity. A backup's expiration time can be configured // in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or @@ -549,6 +592,13 @@ type Backup struct { // containing the backup, identified by the prefix of the backup name of the // form `projects//instances/`. Name string `json:"name,omitempty"` + // OldestVersionTime: Output only. Data deleted at a time older than this is + // guaranteed not to be retained in order to support this backup. For a backup + // in an incremental backup chain, this is the version time of the oldest + // backup that exists or ever existed in the chain. For all other backups, this + // is the version time of the backup. This field can be used to understand what + // data is being retained by the backup system. + OldestVersionTime string `json:"oldestVersionTime,omitempty"` // ReferencingBackups: Output only. The names of the destination backups being // created by copying this source backup. The backup names are of the form // `projects//instances//backups/`. Referencing backups may exist in different @@ -564,7 +614,9 @@ type Backup struct { // backup from being deleted. When a restored database from the backup enters // the `READY` state, the reference to the backup is removed. ReferencingDatabases []string `json:"referencingDatabases,omitempty"` - // SizeBytes: Output only. Size of the backup in bytes. + // SizeBytes: Output only. Size of the backup in bytes. For a backup in an + // incremental backup chain, this is the sum of the `exclusive_size_bytes` of + // itself and all older backups in the chain. SizeBytes int64 `json:"sizeBytes,omitempty,string"` // State: Output only. The current state of the backup. // @@ -582,22 +634,22 @@ type Backup struct { // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreateTime") to + // ForceSendFields is a list of field names (e.g. "BackupSchedules") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreateTime") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "BackupSchedules") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Backup) MarshalJSON() ([]byte, error) { +func (s Backup) MarshalJSON() ([]byte, error) { type NoMethod Backup - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackupInfo: Information about a backup. @@ -626,9 +678,80 @@ type BackupInfo struct { NullFields []string `json:"-"` } -func (s *BackupInfo) MarshalJSON() ([]byte, error) { +func (s BackupInfo) MarshalJSON() ([]byte, error) { type NoMethod BackupInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BackupSchedule: BackupSchedule expresses the automated backup creation +// specification for a Spanner database. Next ID: 10 +type BackupSchedule struct { + // EncryptionConfig: Optional. The encryption configuration that will be used + // to encrypt the backup. If this field is not specified, the backup will use + // the same encryption configuration as the database. + EncryptionConfig *CreateBackupEncryptionConfig `json:"encryptionConfig,omitempty"` + // FullBackupSpec: The schedule creates only full backups. + FullBackupSpec *FullBackupSpec `json:"fullBackupSpec,omitempty"` + // IncrementalBackupSpec: The schedule creates incremental backup chains. + IncrementalBackupSpec *IncrementalBackupSpec `json:"incrementalBackupSpec,omitempty"` + // Name: Identifier. Output only for the CreateBackupSchedule operation. + // Required for the UpdateBackupSchedule operation. A globally unique + // identifier for the backup schedule which cannot be changed. Values are of + // the form `projects//instances//databases//backupSchedules/a-z*[a-z0-9]` The + // final segment of the name must be between 2 and 60 characters in length. + Name string `json:"name,omitempty"` + // RetentionDuration: Optional. The retention duration of a backup that must be + // at least 6 hours and at most 366 days. The backup is eligible to be + // automatically deleted once the retention period has elapsed. + RetentionDuration string `json:"retentionDuration,omitempty"` + // Spec: Optional. The schedule specification based on which the backup + // creations are triggered. + Spec *BackupScheduleSpec `json:"spec,omitempty"` + // UpdateTime: Output only. The timestamp at which the schedule was last + // updated. If the schedule has never been updated, this field contains the + // timestamp when the schedule was first created. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "EncryptionConfig") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "EncryptionConfig") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BackupSchedule) MarshalJSON() ([]byte, error) { + type NoMethod BackupSchedule + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BackupScheduleSpec: Defines specifications of the backup schedule. +type BackupScheduleSpec struct { + // CronSpec: Cron style schedule specification. + CronSpec *CrontabSpec `json:"cronSpec,omitempty"` + // ForceSendFields is a list of field names (e.g. "CronSpec") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CronSpec") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BackupScheduleSpec) MarshalJSON() ([]byte, error) { + type NoMethod BackupScheduleSpec + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchCreateSessionsRequest: The request for BatchCreateSessions. @@ -653,9 +776,9 @@ type BatchCreateSessionsRequest struct { NullFields []string `json:"-"` } -func (s *BatchCreateSessionsRequest) MarshalJSON() ([]byte, error) { +func (s BatchCreateSessionsRequest) MarshalJSON() ([]byte, error) { type NoMethod BatchCreateSessionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchCreateSessionsResponse: The response for BatchCreateSessions. @@ -678,9 +801,9 @@ type BatchCreateSessionsResponse struct { NullFields []string `json:"-"` } -func (s *BatchCreateSessionsResponse) MarshalJSON() ([]byte, error) { +func (s BatchCreateSessionsResponse) MarshalJSON() ([]byte, error) { type NoMethod BatchCreateSessionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchWriteRequest: The request for BatchWrite. @@ -714,9 +837,9 @@ type BatchWriteRequest struct { NullFields []string `json:"-"` } -func (s *BatchWriteRequest) MarshalJSON() ([]byte, error) { +func (s BatchWriteRequest) MarshalJSON() ([]byte, error) { type NoMethod BatchWriteRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BatchWriteResponse: The result of applying a batch of mutations. @@ -746,9 +869,9 @@ type BatchWriteResponse struct { NullFields []string `json:"-"` } -func (s *BatchWriteResponse) MarshalJSON() ([]byte, error) { +func (s BatchWriteResponse) MarshalJSON() ([]byte, error) { type NoMethod BatchWriteResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BeginTransactionRequest: The request for BeginTransaction. @@ -773,9 +896,9 @@ type BeginTransactionRequest struct { NullFields []string `json:"-"` } -func (s *BeginTransactionRequest) MarshalJSON() ([]byte, error) { +func (s BeginTransactionRequest) MarshalJSON() ([]byte, error) { type NoMethod BeginTransactionRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Binding: Associates `members`, or principals, with a `role`. @@ -872,9 +995,9 @@ type Binding struct { NullFields []string `json:"-"` } -func (s *Binding) MarshalJSON() ([]byte, error) { +func (s Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ChangeQuorumMetadata: Metadata type for the long-running operation returned @@ -900,24 +1023,23 @@ type ChangeQuorumMetadata struct { NullFields []string `json:"-"` } -func (s *ChangeQuorumMetadata) MarshalJSON() ([]byte, error) { +func (s ChangeQuorumMetadata) MarshalJSON() ([]byte, error) { type NoMethod ChangeQuorumMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ChangeQuorumRequest: The request for ChangeQuorum. type ChangeQuorumRequest struct { - // Etag: Optional. The etag is the hash of the QuorumInfo. The ChangeQuorum - // operation will only be performed if the etag matches that of the QuorumInfo - // in the current database resource. Otherwise the API will return an `ABORTED` - // error. The etag is used for optimistic concurrency control as a way to help - // prevent simultaneous change quorum requests that could create a race - // condition. + // Etag: Optional. The etag is the hash of the `QuorumInfo`. The `ChangeQuorum` + // operation is only performed if the etag matches that of the `QuorumInfo` in + // the current database resource. Otherwise the API returns an `ABORTED` error. + // The etag is used for optimistic concurrency control as a way to help prevent + // simultaneous change quorum requests that could create a race condition. Etag string `json:"etag,omitempty"` - // Name: Required. Name of the database in which to apply the ChangeQuorum. + // Name: Required. Name of the database in which to apply `ChangeQuorum`. // Values are of the form `projects//instances//databases/`. Name string `json:"name,omitempty"` - // QuorumType: Required. The type of this Quorum. + // QuorumType: Required. The type of this quorum. QuorumType *QuorumType `json:"quorumType,omitempty"` // ForceSendFields is a list of field names (e.g. "Etag") to unconditionally // include in API requests. By default, fields with empty or default values are @@ -932,9 +1054,9 @@ type ChangeQuorumRequest struct { NullFields []string `json:"-"` } -func (s *ChangeQuorumRequest) MarshalJSON() ([]byte, error) { +func (s ChangeQuorumRequest) MarshalJSON() ([]byte, error) { type NoMethod ChangeQuorumRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ChildLink: Metadata associated with a parent-child relationship appearing in @@ -968,9 +1090,9 @@ type ChildLink struct { NullFields []string `json:"-"` } -func (s *ChildLink) MarshalJSON() ([]byte, error) { +func (s ChildLink) MarshalJSON() ([]byte, error) { type NoMethod ChildLink - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CommitRequest: The request for Commit. @@ -1012,9 +1134,9 @@ type CommitRequest struct { NullFields []string `json:"-"` } -func (s *CommitRequest) MarshalJSON() ([]byte, error) { +func (s CommitRequest) MarshalJSON() ([]byte, error) { type NoMethod CommitRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CommitResponse: The response for Commit. @@ -1041,9 +1163,9 @@ type CommitResponse struct { NullFields []string `json:"-"` } -func (s *CommitResponse) MarshalJSON() ([]byte, error) { +func (s CommitResponse) MarshalJSON() ([]byte, error) { type NoMethod CommitResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CommitStats: Additional statistics about a commit. @@ -1070,9 +1192,9 @@ type CommitStats struct { NullFields []string `json:"-"` } -func (s *CommitStats) MarshalJSON() ([]byte, error) { +func (s CommitStats) MarshalJSON() ([]byte, error) { type NoMethod CommitStats - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ContextValue: A message representing context for a KeyRangeInfo, including a @@ -1106,9 +1228,9 @@ type ContextValue struct { NullFields []string `json:"-"` } -func (s *ContextValue) MarshalJSON() ([]byte, error) { +func (s ContextValue) MarshalJSON() ([]byte, error) { type NoMethod ContextValue - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *ContextValue) UnmarshalJSON(data []byte) error { @@ -1171,9 +1293,9 @@ type CopyBackupEncryptionConfig struct { NullFields []string `json:"-"` } -func (s *CopyBackupEncryptionConfig) MarshalJSON() ([]byte, error) { +func (s CopyBackupEncryptionConfig) MarshalJSON() ([]byte, error) { type NoMethod CopyBackupEncryptionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CopyBackupMetadata: Metadata type for the operation returned by CopyBackup. @@ -1209,9 +1331,9 @@ type CopyBackupMetadata struct { NullFields []string `json:"-"` } -func (s *CopyBackupMetadata) MarshalJSON() ([]byte, error) { +func (s CopyBackupMetadata) MarshalJSON() ([]byte, error) { type NoMethod CopyBackupMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CopyBackupRequest: The request for CopyBackup. @@ -1250,9 +1372,60 @@ type CopyBackupRequest struct { NullFields []string `json:"-"` } -func (s *CopyBackupRequest) MarshalJSON() ([]byte, error) { +func (s CopyBackupRequest) MarshalJSON() ([]byte, error) { type NoMethod CopyBackupRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CreateBackupEncryptionConfig: Encryption configuration for the backup to +// create. +type CreateBackupEncryptionConfig struct { + // EncryptionType: Required. The encryption type of the backup. + // + // Possible values: + // "ENCRYPTION_TYPE_UNSPECIFIED" - Unspecified. Do not use. + // "USE_DATABASE_ENCRYPTION" - Use the same encryption configuration as the + // database. This is the default option when encryption_config is empty. For + // example, if the database is using `Customer_Managed_Encryption`, the backup + // will be using the same Cloud KMS key as the database. + // "GOOGLE_DEFAULT_ENCRYPTION" - Use Google default encryption. + // "CUSTOMER_MANAGED_ENCRYPTION" - Use customer managed encryption. If + // specified, `kms_key_name` must contain a valid Cloud KMS key. + EncryptionType string `json:"encryptionType,omitempty"` + // KmsKeyName: Optional. The Cloud KMS key that will be used to protect the + // backup. This field should be set only when encryption_type is + // `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + KmsKeyName string `json:"kmsKeyName,omitempty"` + // KmsKeyNames: Optional. Specifies the KMS configuration for the one or more + // keys used to protect the backup. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. The keys referenced by + // kms_key_names must fully cover all regions of the backup's instance + // configuration. Some examples: * For single region instance configs, specify + // a single regional location KMS key. * For multi-regional instance configs of + // type GOOGLE_MANAGED, either specify a multi-regional location KMS key or + // multiple regional location KMS keys that cover all regions in the instance + // config. * For an instance config of type USER_MANAGED, please specify only + // regional location KMS keys to cover each region in the instance config. + // Multi-regional location KMS keys are not supported for USER_MANAGED instance + // configs. + KmsKeyNames []string `json:"kmsKeyNames,omitempty"` + // ForceSendFields is a list of field names (e.g. "EncryptionType") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "EncryptionType") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s CreateBackupEncryptionConfig) MarshalJSON() ([]byte, error) { + type NoMethod CreateBackupEncryptionConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateBackupMetadata: Metadata type for the operation returned by @@ -1287,9 +1460,9 @@ type CreateBackupMetadata struct { NullFields []string `json:"-"` } -func (s *CreateBackupMetadata) MarshalJSON() ([]byte, error) { +func (s CreateBackupMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateBackupMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateDatabaseMetadata: Metadata type for the operation returned by @@ -1310,9 +1483,9 @@ type CreateDatabaseMetadata struct { NullFields []string `json:"-"` } -func (s *CreateDatabaseMetadata) MarshalJSON() ([]byte, error) { +func (s CreateDatabaseMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateDatabaseMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateDatabaseRequest: The request for CreateDatabase. @@ -1340,13 +1513,14 @@ type CreateDatabaseRequest struct { // statements execute atomically with the creation of the database: if there is // an error in any statement, the database is not created. ExtraStatements []string `json:"extraStatements,omitempty"` - // ProtoDescriptors: Optional. Proto descriptors used by CREATE/ALTER PROTO - // BUNDLE statements in 'extra_statements' above. Contains a - // protobuf-serialized google.protobuf.FileDescriptorSet - // (https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto). - // To generate it, install (https://grpc.io/docs/protoc-installation/) and run - // `protoc` with --include_imports and --descriptor_set_out. For example, to - // generate for moon/shot/app.proto, run ``` $protoc --proto_path=/app_path + // ProtoDescriptors: Optional. Proto descriptors used by `CREATE/ALTER PROTO + // BUNDLE` statements in 'extra_statements'. Contains a protobuf-serialized + // `google.protobuf.FileDescriptorSet` + // (https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto) + // descriptor set. To generate it, install + // (https://grpc.io/docs/protoc-installation/) and run `protoc` with + // --include_imports and --descriptor_set_out. For example, to generate for + // moon/shot/app.proto, run ``` $protoc --proto_path=/app_path // --proto_path=/lib_path \ --include_imports \ // --descriptor_set_out=descriptors.data \ moon/shot/app.proto ``` For more // details, see protobuffer self description @@ -1365,9 +1539,9 @@ type CreateDatabaseRequest struct { NullFields []string `json:"-"` } -func (s *CreateDatabaseRequest) MarshalJSON() ([]byte, error) { +func (s CreateDatabaseRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateDatabaseRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateInstanceConfigMetadata: Metadata type for the operation returned by @@ -1375,7 +1549,7 @@ func (s *CreateDatabaseRequest) MarshalJSON() ([]byte, error) { type CreateInstanceConfigMetadata struct { // CancelTime: The time at which this operation was cancelled. CancelTime string `json:"cancelTime,omitempty"` - // InstanceConfig: The target instance config end state. + // InstanceConfig: The target instance configuration end state. InstanceConfig *InstanceConfig `json:"instanceConfig,omitempty"` // Progress: The progress of the CreateInstanceConfig operation. Progress *InstanceOperationProgress `json:"progress,omitempty"` @@ -1392,22 +1566,22 @@ type CreateInstanceConfigMetadata struct { NullFields []string `json:"-"` } -func (s *CreateInstanceConfigMetadata) MarshalJSON() ([]byte, error) { +func (s CreateInstanceConfigMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateInstanceConfigMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateInstanceConfigRequest: The request for CreateInstanceConfigRequest. type CreateInstanceConfigRequest struct { - // InstanceConfig: Required. The InstanceConfig proto of the configuration to - // create. instance_config.name must be `/instanceConfigs/`. - // instance_config.base_config must be a Google managed configuration name, + // InstanceConfig: Required. The `InstanceConfig` proto of the configuration to + // create. `instance_config.name` must be `/instanceConfigs/`. + // `instance_config.base_config` must be a Google-managed configuration name, // e.g. /instanceConfigs/us-east1, /instanceConfigs/nam3. InstanceConfig *InstanceConfig `json:"instanceConfig,omitempty"` - // InstanceConfigId: Required. The ID of the instance config to create. Valid - // identifiers are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between - // 2 and 64 characters in length. The `custom-` prefix is required to avoid - // name conflicts with Google managed configurations. + // InstanceConfigId: Required. The ID of the instance configuration to create. + // Valid identifiers are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be + // between 2 and 64 characters in length. The `custom-` prefix is required to + // avoid name conflicts with Google-managed configurations. InstanceConfigId string `json:"instanceConfigId,omitempty"` // ValidateOnly: An option to validate, but not actually execute, a request, // and provide the same response. @@ -1425,9 +1599,9 @@ type CreateInstanceConfigRequest struct { NullFields []string `json:"-"` } -func (s *CreateInstanceConfigRequest) MarshalJSON() ([]byte, error) { +func (s CreateInstanceConfigRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateInstanceConfigRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateInstanceMetadata: Metadata type for the operation returned by @@ -1467,9 +1641,9 @@ type CreateInstanceMetadata struct { NullFields []string `json:"-"` } -func (s *CreateInstanceMetadata) MarshalJSON() ([]byte, error) { +func (s CreateInstanceMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateInstanceMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateInstancePartitionMetadata: Metadata type for the operation returned by @@ -1500,9 +1674,9 @@ type CreateInstancePartitionMetadata struct { NullFields []string `json:"-"` } -func (s *CreateInstancePartitionMetadata) MarshalJSON() ([]byte, error) { +func (s CreateInstancePartitionMetadata) MarshalJSON() ([]byte, error) { type NoMethod CreateInstancePartitionMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateInstancePartitionRequest: The request for CreateInstancePartition. @@ -1528,9 +1702,9 @@ type CreateInstancePartitionRequest struct { NullFields []string `json:"-"` } -func (s *CreateInstancePartitionRequest) MarshalJSON() ([]byte, error) { +func (s CreateInstancePartitionRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateInstancePartitionRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateInstanceRequest: The request for CreateInstance. @@ -1555,9 +1729,9 @@ type CreateInstanceRequest struct { NullFields []string `json:"-"` } -func (s *CreateInstanceRequest) MarshalJSON() ([]byte, error) { +func (s CreateInstanceRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateInstanceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CreateSessionRequest: The request for CreateSession. @@ -1577,9 +1751,52 @@ type CreateSessionRequest struct { NullFields []string `json:"-"` } -func (s *CreateSessionRequest) MarshalJSON() ([]byte, error) { +func (s CreateSessionRequest) MarshalJSON() ([]byte, error) { type NoMethod CreateSessionRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// CrontabSpec: CrontabSpec can be used to specify the version time and +// frequency at which the backup should be created. +type CrontabSpec struct { + // CreationWindow: Output only. Schedule backups will contain an externally + // consistent copy of the database at the version time specified in + // `schedule_spec.cron_spec`. However, Spanner may not initiate the creation of + // the scheduled backups at that version time. Spanner will initiate the + // creation of scheduled backups within the time window bounded by the + // version_time specified in `schedule_spec.cron_spec` and version_time + + // `creation_window`. + CreationWindow string `json:"creationWindow,omitempty"` + // Text: Required. Textual representation of the crontab. User can customize + // the backup frequency and the backup version time using the cron expression. + // The version time must be in UTC timzeone. The backup will contain an + // externally consistent copy of the database at the version time. Allowed + // frequencies are 12 hour, 1 day, 1 week and 1 month. Examples of valid cron + // specifications: * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past + // midnight in UTC. * `0 2,14 * * * ` : every 12 hours at (2,14) hours past + // midnight in UTC. * `0 2 * * * ` : once a day at 2 past midnight in UTC. * `0 + // 2 * * 0 ` : once a week every Sunday at 2 past midnight in UTC. * `0 2 8 * * + // ` : once a month on 8th day at 2 past midnight in UTC. + Text string `json:"text,omitempty"` + // TimeZone: Output only. The time zone of the times in `CrontabSpec.text`. + // Currently only UTC is supported. + TimeZone string `json:"timeZone,omitempty"` + // ForceSendFields is a list of field names (e.g. "CreationWindow") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CreationWindow") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s CrontabSpec) MarshalJSON() ([]byte, error) { + type NoMethod CrontabSpec + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Database: A Cloud Spanner database. @@ -1606,9 +1823,9 @@ type Database struct { // recover data, make sure to account for the time from the moment when the // value is queried to the moment when you initiate the recovery. EarliestVersionTime string `json:"earliestVersionTime,omitempty"` - // EnableDropProtection: Whether drop protection is enabled for this database. - // Defaults to false, if not set. For more details, please see how to prevent - // accidental database deletion + // EnableDropProtection: Optional. Whether drop protection is enabled for this + // database. Defaults to false, if not set. For more details, please see how to + // prevent accidental database deletion // (https://cloud.google.com/spanner/docs/prevent-database-deletion). EnableDropProtection bool `json:"enableDropProtection,omitempty"` // EncryptionConfig: Output only. For databases that are using customer managed @@ -1629,7 +1846,7 @@ type Database struct { // DATABASE` statement. This name can be passed to other API methods to // identify the database. Name string `json:"name,omitempty"` - // QuorumInfo: Output only. Applicable only for databases that use dual region + // QuorumInfo: Output only. Applicable only for databases that use dual-region // instance configurations. Contains information about the quorum. QuorumInfo *QuorumInfo `json:"quorumInfo,omitempty"` // Reconciling: Output only. If true, the database is being updated. If false, @@ -1673,9 +1890,9 @@ type Database struct { NullFields []string `json:"-"` } -func (s *Database) MarshalJSON() ([]byte, error) { +func (s Database) MarshalJSON() ([]byte, error) { type NoMethod Database - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatabaseRole: A Cloud Spanner database role. @@ -1697,9 +1914,9 @@ type DatabaseRole struct { NullFields []string `json:"-"` } -func (s *DatabaseRole) MarshalJSON() ([]byte, error) { +func (s DatabaseRole) MarshalJSON() ([]byte, error) { type NoMethod DatabaseRole - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DdlStatementActionInfo: Action information extracted from a DDL statement. @@ -1731,9 +1948,9 @@ type DdlStatementActionInfo struct { NullFields []string `json:"-"` } -func (s *DdlStatementActionInfo) MarshalJSON() ([]byte, error) { +func (s DdlStatementActionInfo) MarshalJSON() ([]byte, error) { type NoMethod DdlStatementActionInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Delete: Arguments to delete operations. @@ -1759,9 +1976,9 @@ type Delete struct { NullFields []string `json:"-"` } -func (s *Delete) MarshalJSON() ([]byte, error) { +func (s Delete) MarshalJSON() ([]byte, error) { type NoMethod Delete - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DerivedMetric: A message representing a derived metric. @@ -1783,9 +2000,9 @@ type DerivedMetric struct { NullFields []string `json:"-"` } -func (s *DerivedMetric) MarshalJSON() ([]byte, error) { +func (s DerivedMetric) MarshalJSON() ([]byte, error) { type NoMethod DerivedMetric - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiagnosticMessage: A message representing the key visualizer diagnostic @@ -1827,9 +2044,9 @@ type DiagnosticMessage struct { NullFields []string `json:"-"` } -func (s *DiagnosticMessage) MarshalJSON() ([]byte, error) { +func (s DiagnosticMessage) MarshalJSON() ([]byte, error) { type NoMethod DiagnosticMessage - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DirectedReadOptions: The DirectedReadOptions can be used to indicate which @@ -1860,9 +2077,9 @@ type DirectedReadOptions struct { NullFields []string `json:"-"` } -func (s *DirectedReadOptions) MarshalJSON() ([]byte, error) { +func (s DirectedReadOptions) MarshalJSON() ([]byte, error) { type NoMethod DirectedReadOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DualRegionQuorum: Message type for a dual-region quorum. Currently this type @@ -1911,9 +2128,9 @@ type EncryptionConfig struct { NullFields []string `json:"-"` } -func (s *EncryptionConfig) MarshalJSON() ([]byte, error) { +func (s EncryptionConfig) MarshalJSON() ([]byte, error) { type NoMethod EncryptionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EncryptionInfo: Encryption information for a Cloud Spanner database or @@ -1952,9 +2169,9 @@ type EncryptionInfo struct { NullFields []string `json:"-"` } -func (s *EncryptionInfo) MarshalJSON() ([]byte, error) { +func (s EncryptionInfo) MarshalJSON() ([]byte, error) { type NoMethod EncryptionInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExcludeReplicas: An ExcludeReplicas contains a repeated set of @@ -1975,9 +2192,9 @@ type ExcludeReplicas struct { NullFields []string `json:"-"` } -func (s *ExcludeReplicas) MarshalJSON() ([]byte, error) { +func (s ExcludeReplicas) MarshalJSON() ([]byte, error) { type NoMethod ExcludeReplicas - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExecuteBatchDmlRequest: The request for ExecuteBatchDml. @@ -2016,9 +2233,9 @@ type ExecuteBatchDmlRequest struct { NullFields []string `json:"-"` } -func (s *ExecuteBatchDmlRequest) MarshalJSON() ([]byte, error) { +func (s ExecuteBatchDmlRequest) MarshalJSON() ([]byte, error) { type NoMethod ExecuteBatchDmlRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExecuteBatchDmlResponse: The response for ExecuteBatchDml. Contains a list @@ -2063,9 +2280,9 @@ type ExecuteBatchDmlResponse struct { NullFields []string `json:"-"` } -func (s *ExecuteBatchDmlResponse) MarshalJSON() ([]byte, error) { +func (s ExecuteBatchDmlResponse) MarshalJSON() ([]byte, error) { type NoMethod ExecuteBatchDmlResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExecuteSqlRequest: The request for ExecuteSql and ExecuteStreamingSql. @@ -2107,8 +2324,10 @@ type ExecuteSqlRequest struct { // "NORMAL" - The default mode. Only the statement results are returned. // "PLAN" - This mode returns only the query plan, without any results or // execution statistics information. - // "PROFILE" - This mode returns both the query plan and the execution - // statistics along with the results. + // "PROFILE" - This mode returns the query plan, overall execution + // statistics, operator level execution statistics along with the results. This + // has a performance overhead compared to the other modes. It is not + // recommended to use this mode for production traffic. QueryMode string `json:"queryMode,omitempty"` // QueryOptions: Query optimizer configuration to use for the given query. QueryOptions *QueryOptions `json:"queryOptions,omitempty"` @@ -2151,9 +2370,9 @@ type ExecuteSqlRequest struct { NullFields []string `json:"-"` } -func (s *ExecuteSqlRequest) MarshalJSON() ([]byte, error) { +func (s ExecuteSqlRequest) MarshalJSON() ([]byte, error) { type NoMethod ExecuteSqlRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents a textual expression in the Common Expression Language @@ -2199,9 +2418,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Field: Message representing a single field of a struct. @@ -2228,9 +2447,9 @@ type Field struct { NullFields []string `json:"-"` } -func (s *Field) MarshalJSON() ([]byte, error) { +func (s Field) MarshalJSON() ([]byte, error) { type NoMethod Field - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FreeInstanceMetadata: Free instance specific metadata that is kept even @@ -2269,9 +2488,14 @@ type FreeInstanceMetadata struct { NullFields []string `json:"-"` } -func (s *FreeInstanceMetadata) MarshalJSON() ([]byte, error) { +func (s FreeInstanceMetadata) MarshalJSON() ([]byte, error) { type NoMethod FreeInstanceMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// FullBackupSpec: The specification for full backups. A full backup stores the +// entire contents of the database at a given version time. +type FullBackupSpec struct { } // GetDatabaseDdlResponse: The response for GetDatabaseDdl. @@ -2301,9 +2525,9 @@ type GetDatabaseDdlResponse struct { NullFields []string `json:"-"` } -func (s *GetDatabaseDdlResponse) MarshalJSON() ([]byte, error) { +func (s GetDatabaseDdlResponse) MarshalJSON() ([]byte, error) { type NoMethod GetDatabaseDdlResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetIamPolicyRequest: Request message for `GetIamPolicy` method. @@ -2324,9 +2548,9 @@ type GetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s GetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod GetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. @@ -2356,9 +2580,9 @@ type GetPolicyOptions struct { NullFields []string `json:"-"` } -func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { +func (s GetPolicyOptions) MarshalJSON() ([]byte, error) { type NoMethod GetPolicyOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IncludeReplicas: An IncludeReplicas contains a repeated set of @@ -2384,9 +2608,18 @@ type IncludeReplicas struct { NullFields []string `json:"-"` } -func (s *IncludeReplicas) MarshalJSON() ([]byte, error) { +func (s IncludeReplicas) MarshalJSON() ([]byte, error) { type NoMethod IncludeReplicas - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// IncrementalBackupSpec: The specification for incremental backup chains. An +// incremental backup stores the delta of changes between a previous backup and +// the database contents at a given version time. An incremental backup chain +// consists of a full backup and zero or more successive incremental backups. +// The first backup created for an incremental backup chain is always a full +// backup. +type IncrementalBackupSpec struct { } // IndexAdvice: Recommendation to add new indexes to run queries more @@ -2412,9 +2645,9 @@ type IndexAdvice struct { NullFields []string `json:"-"` } -func (s *IndexAdvice) MarshalJSON() ([]byte, error) { +func (s IndexAdvice) MarshalJSON() ([]byte, error) { type NoMethod IndexAdvice - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *IndexAdvice) UnmarshalJSON(data []byte) error { @@ -2452,9 +2685,9 @@ type IndexedHotKey struct { NullFields []string `json:"-"` } -func (s *IndexedHotKey) MarshalJSON() ([]byte, error) { +func (s IndexedHotKey) MarshalJSON() ([]byte, error) { type NoMethod IndexedHotKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IndexedKeyRangeInfos: A message representing a (sparse) collection of @@ -2476,9 +2709,9 @@ type IndexedKeyRangeInfos struct { NullFields []string `json:"-"` } -func (s *IndexedKeyRangeInfos) MarshalJSON() ([]byte, error) { +func (s IndexedKeyRangeInfos) MarshalJSON() ([]byte, error) { type NoMethod IndexedKeyRangeInfos - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Instance: An isolated set of Cloud Spanner resources on which databases can @@ -2499,6 +2732,14 @@ type Instance struct { // in UIs. Must be unique per project and between 4 and 30 characters in // length. DisplayName string `json:"displayName,omitempty"` + // Edition: Optional. The `Edition` of the current instance. + // + // Possible values: + // "EDITION_UNSPECIFIED" - Edition not specified. + // "STANDARD" - Standard edition. + // "ENTERPRISE" - Enterprise edition. + // "ENTERPRISE_PLUS" - Enterprise Plus edition. + Edition string `json:"edition,omitempty"` // EndpointUris: Deprecated. This field is not populated. EndpointUris []string `json:"endpointUris,omitempty"` // FreeInstanceMetadata: Free instance metadata. Only populated for free @@ -2536,26 +2777,25 @@ type Instance struct { // `projects//instances/a-z*[a-z0-9]`. The final segment of the name must be // between 2 and 64 characters in length. Name string `json:"name,omitempty"` - // NodeCount: The number of nodes allocated to this instance. At most one of - // either node_count or processing_units should be present in the message. - // Users can set the node_count field to specify the target number of nodes - // allocated to the instance. If autoscaling is enabled, node_count is treated - // as an OUTPUT_ONLY field and reflects the current number of nodes allocated - // to the instance. This may be zero in API responses for instances that are - // not yet in state `READY`. See the documentation - // (https://cloud.google.com/spanner/docs/compute-capacity) for more - // information about nodes and processing units. + // NodeCount: The number of nodes allocated to this instance. At most, one of + // either `node_count` or `processing_units` should be present in the message. + // Users can set the `node_count` field to specify the target number of nodes + // allocated to the instance. If autoscaling is enabled, `node_count` is + // treated as an `OUTPUT_ONLY` field and reflects the current number of nodes + // allocated to the instance. This might be zero in API responses for instances + // that are not yet in the `READY` state. For more information, see Compute + // capacity, nodes, and processing units + // (https://cloud.google.com/spanner/docs/compute-capacity). NodeCount int64 `json:"nodeCount,omitempty"` // ProcessingUnits: The number of processing units allocated to this instance. - // At most one of processing_units or node_count should be present in the - // message. Users can set the processing_units field to specify the target - // number of processing units allocated to the instance. If autoscaling is - // enabled, processing_units is treated as an OUTPUT_ONLY field and reflects - // the current number of processing units allocated to the instance. This may - // be zero in API responses for instances that are not yet in state `READY`. - // See the documentation - // (https://cloud.google.com/spanner/docs/compute-capacity) for more - // information about nodes and processing units. + // At most, one of either `processing_units` or `node_count` should be present + // in the message. Users can set the `processing_units` field to specify the + // target number of processing units allocated to the instance. If autoscaling + // is enabled, `processing_units` is treated as an `OUTPUT_ONLY` field and + // reflects the current number of processing units allocated to the instance. + // This might be zero in API responses for instances that are not yet in the + // `READY` state. For more information, see Compute capacity, nodes and + // processing units (https://cloud.google.com/spanner/docs/compute-capacity). ProcessingUnits int64 `json:"processingUnits,omitempty"` // State: Output only. The current instance state. For CreateInstance, the // state must be either omitted or set to `CREATING`. For UpdateInstance, the @@ -2587,9 +2827,9 @@ type Instance struct { NullFields []string `json:"-"` } -func (s *Instance) MarshalJSON() ([]byte, error) { +func (s Instance) MarshalJSON() ([]byte, error) { type NoMethod Instance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceConfig: A possible configuration for a Cloud Spanner instance. @@ -2597,44 +2837,45 @@ func (s *Instance) MarshalJSON() ([]byte, error) { // replication. type InstanceConfig struct { // BaseConfig: Base configuration name, e.g. projects//instanceConfigs/nam3, - // based on which this configuration is created. Only set for user managed + // based on which this configuration is created. Only set for user-managed // configurations. `base_config` must refer to a configuration of type - // GOOGLE_MANAGED in the same project as this configuration. + // `GOOGLE_MANAGED` in the same project as this configuration. BaseConfig string `json:"baseConfig,omitempty"` - // ConfigType: Output only. Whether this instance config is a Google or User - // Managed Configuration. + // ConfigType: Output only. Whether this instance configuration is a + // Google-managed or user-managed configuration. // // Possible values: // "TYPE_UNSPECIFIED" - Unspecified. - // "GOOGLE_MANAGED" - Google managed configuration. - // "USER_MANAGED" - User managed configuration. + // "GOOGLE_MANAGED" - Google-managed configuration. + // "USER_MANAGED" - User-managed configuration. ConfigType string `json:"configType,omitempty"` // DisplayName: The name of this instance configuration as it appears in UIs. DisplayName string `json:"displayName,omitempty"` // Etag: etag is used for optimistic concurrency control as a way to help - // prevent simultaneous updates of a instance config from overwriting each - // other. It is strongly suggested that systems make use of the etag in the - // read-modify-write cycle to perform instance config updates in order to avoid - // race conditions: An etag is returned in the response which contains instance - // configs, and systems are expected to put that etag in the request to update - // instance config to ensure that their change will be applied to the same - // version of the instance config. If no etag is provided in the call to update - // instance config, then the existing instance config is overwritten blindly. + // prevent simultaneous updates of a instance configuration from overwriting + // each other. It is strongly suggested that systems make use of the etag in + // the read-modify-write cycle to perform instance configuration updates in + // order to avoid race conditions: An etag is returned in the response which + // contains instance configurations, and systems are expected to put that etag + // in the request to update instance configuration to ensure that their change + // is applied to the same version of the instance configuration. If no etag is + // provided in the call to update the instance configuration, then the existing + // instance configuration is overwritten blindly. Etag string `json:"etag,omitempty"` // FreeInstanceAvailability: Output only. Describes whether free instances are - // available to be created in this instance config. + // available to be created in this instance configuration. // // Possible values: // "FREE_INSTANCE_AVAILABILITY_UNSPECIFIED" - Not specified. // "AVAILABLE" - Indicates that free instances are available to be created in - // this instance config. + // this instance configuration. // "UNSUPPORTED" - Indicates that free instances are not supported in this - // instance config. + // instance configuration. // "DISABLED" - Indicates that free instances are currently not available to - // be created in this instance config. + // be created in this instance configuration. // "QUOTA_EXCEEDED" - Indicates that additional free instances cannot be - // created in this instance config because the project has reached its limit of - // free instances. + // created in this instance configuration because the project has reached its + // limit of free instances. FreeInstanceAvailability string `json:"freeInstanceAvailability,omitempty"` // Labels: Cloud Labels are a flexible and lightweight mechanism for organizing // cloud resources into groups that reflect a customer's organizational needs @@ -2657,44 +2898,45 @@ type InstanceConfig struct { // databases in instances that use this instance configuration. LeaderOptions []string `json:"leaderOptions,omitempty"` // Name: A unique identifier for the instance configuration. Values are of the - // form `projects//instanceConfigs/a-z*`. User instance config must start with - // `custom-`. + // form `projects//instanceConfigs/a-z*`. User instance configuration must + // start with `custom-`. Name string `json:"name,omitempty"` // OptionalReplicas: Output only. The available optional replicas to choose - // from for user managed configurations. Populated for Google managed + // from for user-managed configurations. Populated for Google-managed // configurations. OptionalReplicas []*ReplicaInfo `json:"optionalReplicas,omitempty"` // QuorumType: Output only. The `QuorumType` of the instance configuration. // // Possible values: - // "QUORUM_TYPE_UNSPECIFIED" - Not specified. - // "REGION" - An instance configuration tagged with REGION quorum type forms - // a write quorum in a single region. - // "DUAL_REGION" - An instance configuration tagged with DUAL_REGION quorum - // type forms a write quorums with exactly two read-write regions in a - // multi-region configuration. This instance configurations requires - // reconfiguration in the event of regional failures. - // "MULTI_REGION" - An instance configuration tagged with MULTI_REGION quorum - // type forms a write quorums from replicas are spread across more than one - // region in a multi-region configuration. + // "QUORUM_TYPE_UNSPECIFIED" - Quorum type not specified. + // "REGION" - An instance configuration tagged with `REGION` quorum type + // forms a write quorum in a single region. + // "DUAL_REGION" - An instance configuration tagged with the `DUAL_REGION` + // quorum type forms a write quorum with exactly two read-write regions in a + // multi-region configuration. This instance configuration requires failover in + // the event of regional failures. + // "MULTI_REGION" - An instance configuration tagged with the `MULTI_REGION` + // quorum type forms a write quorum from replicas that are spread across more + // than one region in a multi-region configuration. QuorumType string `json:"quorumType,omitempty"` - // Reconciling: Output only. If true, the instance config is being created or - // updated. If false, there are no ongoing operations for the instance config. + // Reconciling: Output only. If true, the instance configuration is being + // created or updated. If false, there are no ongoing operations for the + // instance configuration. Reconciling bool `json:"reconciling,omitempty"` // Replicas: The geographic placement of nodes in this instance configuration - // and their replication properties. To create user managed configurations, + // and their replication properties. To create user-managed configurations, // input `replicas` must include all replicas in `replicas` of the // `base_config` and include one or more replicas in the `optional_replicas` of // the `base_config`. Replicas []*ReplicaInfo `json:"replicas,omitempty"` - // State: Output only. The current instance config state. Applicable only for - // USER_MANAGED configs. + // State: Output only. The current instance configuration state. Applicable + // only for `USER_MANAGED` configurations. // // Possible values: // "STATE_UNSPECIFIED" - Not specified. - // "CREATING" - The instance config is still being created. - // "READY" - The instance config is fully created and ready to be used to - // create instances. + // "CREATING" - The instance configuration is still being created. + // "READY" - The instance configuration is fully created and ready to be used + // to create instances. State string `json:"state,omitempty"` // StorageLimitPerProcessingUnit: Output only. The storage limit in bytes per // processing unit. @@ -2715,9 +2957,9 @@ type InstanceConfig struct { NullFields []string `json:"-"` } -func (s *InstanceConfig) MarshalJSON() ([]byte, error) { +func (s InstanceConfig) MarshalJSON() ([]byte, error) { type NoMethod InstanceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceOperationProgress: Encapsulates progress related information for a @@ -2744,9 +2986,9 @@ type InstanceOperationProgress struct { NullFields []string `json:"-"` } -func (s *InstanceOperationProgress) MarshalJSON() ([]byte, error) { +func (s InstanceOperationProgress) MarshalJSON() ([]byte, error) { type NoMethod InstanceOperationProgress - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancePartition: An isolated set of Cloud Spanner resources that databases @@ -2781,20 +3023,21 @@ type InstancePartition struct { // created. Name string `json:"name,omitempty"` // NodeCount: The number of nodes allocated to this instance partition. Users - // can set the node_count field to specify the target number of nodes allocated - // to the instance partition. This may be zero in API responses for instance - // partitions that are not yet in state `READY`. + // can set the `node_count` field to specify the target number of nodes + // allocated to the instance partition. This may be zero in API responses for + // instance partitions that are not yet in state `READY`. NodeCount int64 `json:"nodeCount,omitempty"` // ProcessingUnits: The number of processing units allocated to this instance - // partition. Users can set the processing_units field to specify the target - // number of processing units allocated to the instance partition. This may be - // zero in API responses for instance partitions that are not yet in state - // `READY`. + // partition. Users can set the `processing_units` field to specify the target + // number of processing units allocated to the instance partition. This might + // be zero in API responses for instance partitions that are not yet in the + // `READY` state. ProcessingUnits int64 `json:"processingUnits,omitempty"` - // ReferencingBackups: Output only. The names of the backups that reference - // this instance partition. Referencing backups should share the parent - // instance. The existence of any referencing backup prevents the instance - // partition from being deleted. + // ReferencingBackups: Output only. Deprecated: This field is not populated. + // Output only. The names of the backups that reference this instance + // partition. Referencing backups should share the parent instance. The + // existence of any referencing backup prevents the instance partition from + // being deleted. ReferencingBackups []string `json:"referencingBackups,omitempty"` // ReferencingDatabases: Output only. The names of the databases that reference // this instance partition. Referencing databases should share the parent @@ -2830,9 +3073,9 @@ type InstancePartition struct { NullFields []string `json:"-"` } -func (s *InstancePartition) MarshalJSON() ([]byte, error) { +func (s InstancePartition) MarshalJSON() ([]byte, error) { type NoMethod InstancePartition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // KeyRange: KeyRange represents a range of rows in a table or index. A range @@ -2895,9 +3138,9 @@ type KeyRange struct { NullFields []string `json:"-"` } -func (s *KeyRange) MarshalJSON() ([]byte, error) { +func (s KeyRange) MarshalJSON() ([]byte, error) { type NoMethod KeyRange - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // KeyRangeInfo: A message representing information for a key range (possibly @@ -2936,9 +3179,9 @@ type KeyRangeInfo struct { NullFields []string `json:"-"` } -func (s *KeyRangeInfo) MarshalJSON() ([]byte, error) { +func (s KeyRangeInfo) MarshalJSON() ([]byte, error) { type NoMethod KeyRangeInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *KeyRangeInfo) UnmarshalJSON(data []byte) error { @@ -2977,9 +3220,9 @@ type KeyRangeInfos struct { NullFields []string `json:"-"` } -func (s *KeyRangeInfos) MarshalJSON() ([]byte, error) { +func (s KeyRangeInfos) MarshalJSON() ([]byte, error) { type NoMethod KeyRangeInfos - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // KeySet: `KeySet` defines a collection of Cloud Spanner keys and/or key @@ -3013,9 +3256,9 @@ type KeySet struct { NullFields []string `json:"-"` } -func (s *KeySet) MarshalJSON() ([]byte, error) { +func (s KeySet) MarshalJSON() ([]byte, error) { type NoMethod KeySet - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListBackupOperationsResponse: The response for ListBackupOperations. @@ -3047,9 +3290,37 @@ type ListBackupOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListBackupOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListBackupOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListBackupOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ListBackupSchedulesResponse: The response for ListBackupSchedules. +type ListBackupSchedulesResponse struct { + // BackupSchedules: The list of backup schedules for a database. + BackupSchedules []*BackupSchedule `json:"backupSchedules,omitempty"` + // NextPageToken: `next_page_token` can be sent in a subsequent + // ListBackupSchedules call to fetch more of the schedules. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "BackupSchedules") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BackupSchedules") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ListBackupSchedulesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListBackupSchedulesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListBackupsResponse: The response for ListBackups. @@ -3077,9 +3348,9 @@ type ListBackupsResponse struct { NullFields []string `json:"-"` } -func (s *ListBackupsResponse) MarshalJSON() ([]byte, error) { +func (s ListBackupsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListBackupsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListDatabaseOperationsResponse: The response for ListDatabaseOperations. @@ -3107,9 +3378,9 @@ type ListDatabaseOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListDatabaseOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListDatabaseOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListDatabaseOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListDatabaseRolesResponse: The response for ListDatabaseRoles. @@ -3135,9 +3406,9 @@ type ListDatabaseRolesResponse struct { NullFields []string `json:"-"` } -func (s *ListDatabaseRolesResponse) MarshalJSON() ([]byte, error) { +func (s ListDatabaseRolesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListDatabaseRolesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListDatabasesResponse: The response for ListDatabases. @@ -3163,9 +3434,9 @@ type ListDatabasesResponse struct { NullFields []string `json:"-"` } -func (s *ListDatabasesResponse) MarshalJSON() ([]byte, error) { +func (s ListDatabasesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListDatabasesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListInstanceConfigOperationsResponse: The response for @@ -3174,10 +3445,10 @@ type ListInstanceConfigOperationsResponse struct { // NextPageToken: `next_page_token` can be sent in a subsequent // ListInstanceConfigOperations call to fetch more of the matching metadata. NextPageToken string `json:"nextPageToken,omitempty"` - // Operations: The list of matching instance config long-running operations. - // Each operation's name will be prefixed by the instance config's name. The - // operation's metadata field type `metadata.type_url` describes the type of - // the metadata. + // Operations: The list of matching instance configuration long-running + // operations. Each operation's name will be prefixed by the name of the + // instance configuration. The operation's metadata field type + // `metadata.type_url` describes the type of the metadata. Operations []*Operation `json:"operations,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. @@ -3195,9 +3466,9 @@ type ListInstanceConfigOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListInstanceConfigOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListInstanceConfigOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListInstanceConfigOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListInstanceConfigsResponse: The response for ListInstanceConfigs. @@ -3224,9 +3495,9 @@ type ListInstanceConfigsResponse struct { NullFields []string `json:"-"` } -func (s *ListInstanceConfigsResponse) MarshalJSON() ([]byte, error) { +func (s ListInstanceConfigsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListInstanceConfigsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListInstancePartitionOperationsResponse: The response for @@ -3260,9 +3531,9 @@ type ListInstancePartitionOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListInstancePartitionOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListInstancePartitionOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListInstancePartitionOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListInstancePartitionsResponse: The response for ListInstancePartitions. @@ -3293,9 +3564,9 @@ type ListInstancePartitionsResponse struct { NullFields []string `json:"-"` } -func (s *ListInstancePartitionsResponse) MarshalJSON() ([]byte, error) { +func (s ListInstancePartitionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListInstancePartitionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListInstancesResponse: The response for ListInstances. @@ -3324,9 +3595,9 @@ type ListInstancesResponse struct { NullFields []string `json:"-"` } -func (s *ListInstancesResponse) MarshalJSON() ([]byte, error) { +func (s ListInstancesResponse) MarshalJSON() ([]byte, error) { type NoMethod ListInstancesResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -3352,9 +3623,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListScansResponse: Response method from the ListScans method. @@ -3380,9 +3651,9 @@ type ListScansResponse struct { NullFields []string `json:"-"` } -func (s *ListScansResponse) MarshalJSON() ([]byte, error) { +func (s ListScansResponse) MarshalJSON() ([]byte, error) { type NoMethod ListScansResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListSessionsResponse: The response for ListSessions. @@ -3408,9 +3679,9 @@ type ListSessionsResponse struct { NullFields []string `json:"-"` } -func (s *ListSessionsResponse) MarshalJSON() ([]byte, error) { +func (s ListSessionsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListSessionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LocalizedString: A message representing a user-facing string whose value may @@ -3440,9 +3711,9 @@ type LocalizedString struct { NullFields []string `json:"-"` } -func (s *LocalizedString) MarshalJSON() ([]byte, error) { +func (s LocalizedString) MarshalJSON() ([]byte, error) { type NoMethod LocalizedString - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Metric: A message representing the actual monitoring data, values for each @@ -3498,9 +3769,9 @@ type Metric struct { NullFields []string `json:"-"` } -func (s *Metric) MarshalJSON() ([]byte, error) { +func (s Metric) MarshalJSON() ([]byte, error) { type NoMethod Metric - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *Metric) UnmarshalJSON(data []byte) error { @@ -3534,9 +3805,9 @@ type MetricMatrix struct { NullFields []string `json:"-"` } -func (s *MetricMatrix) MarshalJSON() ([]byte, error) { +func (s MetricMatrix) MarshalJSON() ([]byte, error) { type NoMethod MetricMatrix - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetricMatrixRow: A message representing a row of a matrix of floats. @@ -3556,9 +3827,9 @@ type MetricMatrixRow struct { NullFields []string `json:"-"` } -func (s *MetricMatrixRow) MarshalJSON() ([]byte, error) { +func (s MetricMatrixRow) MarshalJSON() ([]byte, error) { type NoMethod MetricMatrixRow - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } func (s *MetricMatrixRow) UnmarshalJSON(data []byte) error { @@ -3580,8 +3851,8 @@ func (s *MetricMatrixRow) UnmarshalJSON(data []byte) error { // MoveInstanceRequest: The request for MoveInstance. type MoveInstanceRequest struct { - // TargetConfig: Required. The target instance config for the instance to move. - // Values are of the form `projects//instanceConfigs/`. + // TargetConfig: Required. The target instance configuration where to move the + // instance. Values are of the form `projects//instanceConfigs/`. TargetConfig string `json:"targetConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "TargetConfig") to // unconditionally include in API requests. By default, fields with empty or @@ -3596,9 +3867,38 @@ type MoveInstanceRequest struct { NullFields []string `json:"-"` } -func (s *MoveInstanceRequest) MarshalJSON() ([]byte, error) { +func (s MoveInstanceRequest) MarshalJSON() ([]byte, error) { type NoMethod MoveInstanceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// MultiplexedSessionPrecommitToken: When a read-write transaction is executed +// on a multiplexed session, this precommit token is sent back to the client as +// a part of the [Transaction] message in the BeginTransaction response and +// also as a part of the [ResultSet] and [PartialResultSet] responses. +type MultiplexedSessionPrecommitToken struct { + // PrecommitToken: Opaque precommit token. + PrecommitToken string `json:"precommitToken,omitempty"` + // SeqNum: An incrementing seq number is generated on every precommit token + // that is returned. Clients should remember the precommit token with the + // highest sequence number from the current transaction attempt. + SeqNum int64 `json:"seqNum,omitempty"` + // ForceSendFields is a list of field names (e.g. "PrecommitToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "PrecommitToken") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s MultiplexedSessionPrecommitToken) MarshalJSON() ([]byte, error) { + type NoMethod MultiplexedSessionPrecommitToken + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Mutation: A modification to one or more Cloud Spanner rows. Mutations can be @@ -3641,9 +3941,9 @@ type Mutation struct { NullFields []string `json:"-"` } -func (s *Mutation) MarshalJSON() ([]byte, error) { +func (s Mutation) MarshalJSON() ([]byte, error) { type NoMethod Mutation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MutationGroup: A group of mutations to be committed together. Related @@ -3666,9 +3966,9 @@ type MutationGroup struct { NullFields []string `json:"-"` } -func (s *MutationGroup) MarshalJSON() ([]byte, error) { +func (s MutationGroup) MarshalJSON() ([]byte, error) { type NoMethod MutationGroup - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -3713,9 +4013,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationProgress: Encapsulates progress related information for a Cloud @@ -3742,9 +4042,9 @@ type OperationProgress struct { NullFields []string `json:"-"` } -func (s *OperationProgress) MarshalJSON() ([]byte, error) { +func (s OperationProgress) MarshalJSON() ([]byte, error) { type NoMethod OperationProgress - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OptimizeRestoredDatabaseMetadata: Metadata type for the long-running @@ -3770,9 +4070,9 @@ type OptimizeRestoredDatabaseMetadata struct { NullFields []string `json:"-"` } -func (s *OptimizeRestoredDatabaseMetadata) MarshalJSON() ([]byte, error) { +func (s OptimizeRestoredDatabaseMetadata) MarshalJSON() ([]byte, error) { type NoMethod OptimizeRestoredDatabaseMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartialResultSet: Partial results from a streaming read or SQL query. @@ -3852,9 +4152,9 @@ type PartialResultSet struct { NullFields []string `json:"-"` } -func (s *PartialResultSet) MarshalJSON() ([]byte, error) { +func (s PartialResultSet) MarshalJSON() ([]byte, error) { type NoMethod PartialResultSet - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Partition: Information returned for each partition returned in a @@ -3877,9 +4177,9 @@ type Partition struct { NullFields []string `json:"-"` } -func (s *Partition) MarshalJSON() ([]byte, error) { +func (s Partition) MarshalJSON() ([]byte, error) { type NoMethod Partition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartitionOptions: Options for a PartitionQueryRequest and @@ -3911,9 +4211,9 @@ type PartitionOptions struct { NullFields []string `json:"-"` } -func (s *PartitionOptions) MarshalJSON() ([]byte, error) { +func (s PartitionOptions) MarshalJSON() ([]byte, error) { type NoMethod PartitionOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartitionQueryRequest: The request for PartitionQuery @@ -3937,15 +4237,15 @@ type PartitionQueryRequest struct { // created. PartitionOptions *PartitionOptions `json:"partitionOptions,omitempty"` // Sql: Required. The query request to generate partitions for. The request - // will fail if the query is not root partitionable. For a query to be root + // fails if the query is not root partitionable. For a query to be root // partitionable, it needs to satisfy a few conditions. For example, if the // query execution plan contains a distributed union operator, then it must be // the first operator in the plan. For more information about other conditions, // see Read data in parallel // (https://cloud.google.com/spanner/docs/reads#read_data_in_parallel). The - // query request must not contain DML commands, such as INSERT, UPDATE, or - // DELETE. Use ExecuteStreamingSql with a PartitionedDml transaction for large, - // partition-friendly DML operations. + // query request must not contain DML commands, such as `INSERT`, `UPDATE`, or + // `DELETE`. Use `ExecuteStreamingSql` with a PartitionedDml transaction for + // large, partition-friendly DML operations. Sql string `json:"sql,omitempty"` // Transaction: Read only snapshot transactions are supported, read/write and // single use transactions are not. @@ -3963,9 +4263,9 @@ type PartitionQueryRequest struct { NullFields []string `json:"-"` } -func (s *PartitionQueryRequest) MarshalJSON() ([]byte, error) { +func (s PartitionQueryRequest) MarshalJSON() ([]byte, error) { type NoMethod PartitionQueryRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartitionReadRequest: The request for PartitionRead @@ -4004,9 +4304,9 @@ type PartitionReadRequest struct { NullFields []string `json:"-"` } -func (s *PartitionReadRequest) MarshalJSON() ([]byte, error) { +func (s PartitionReadRequest) MarshalJSON() ([]byte, error) { type NoMethod PartitionReadRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartitionResponse: The response for PartitionQuery or PartitionRead @@ -4031,9 +4331,9 @@ type PartitionResponse struct { NullFields []string `json:"-"` } -func (s *PartitionResponse) MarshalJSON() ([]byte, error) { +func (s PartitionResponse) MarshalJSON() ([]byte, error) { type NoMethod PartitionResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PartitionedDml: Message type to initiate a Partitioned DML transaction. @@ -4090,9 +4390,9 @@ type PlanNode struct { NullFields []string `json:"-"` } -func (s *PlanNode) MarshalJSON() ([]byte, error) { +func (s PlanNode) MarshalJSON() ([]byte, error) { type NoMethod PlanNode - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which specifies @@ -4180,9 +4480,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PrefixNode: A message representing a key prefix node in the key prefix @@ -4216,9 +4516,9 @@ type PrefixNode struct { NullFields []string `json:"-"` } -func (s *PrefixNode) MarshalJSON() ([]byte, error) { +func (s PrefixNode) MarshalJSON() ([]byte, error) { type NoMethod PrefixNode - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryAdvisorResult: Output of query advisor analysis. @@ -4240,9 +4540,9 @@ type QueryAdvisorResult struct { NullFields []string `json:"-"` } -func (s *QueryAdvisorResult) MarshalJSON() ([]byte, error) { +func (s QueryAdvisorResult) MarshalJSON() ([]byte, error) { type NoMethod QueryAdvisorResult - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryOptions: Query optimizer configuration. @@ -4288,9 +4588,9 @@ type QueryOptions struct { NullFields []string `json:"-"` } -func (s *QueryOptions) MarshalJSON() ([]byte, error) { +func (s QueryOptions) MarshalJSON() ([]byte, error) { type NoMethod QueryOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // QueryPlan: Contains an ordered list of nodes appearing in the query plan. @@ -4315,24 +4615,24 @@ type QueryPlan struct { NullFields []string `json:"-"` } -func (s *QueryPlan) MarshalJSON() ([]byte, error) { +func (s QueryPlan) MarshalJSON() ([]byte, error) { type NoMethod QueryPlan - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// QuorumInfo: Information about the dual region quorum. +// QuorumInfo: Information about the dual-region quorum. type QuorumInfo struct { // Etag: Output only. The etag is used for optimistic concurrency control as a - // way to help prevent simultaneous ChangeQuorum requests that could create a + // way to help prevent simultaneous `ChangeQuorum` requests that might create a // race condition. Etag string `json:"etag,omitempty"` - // Initiator: Output only. Whether this ChangeQuorum is a Google or User + // Initiator: Output only. Whether this `ChangeQuorum` is Google or User // initiated. // // Possible values: // "INITIATOR_UNSPECIFIED" - Unspecified. - // "GOOGLE" - ChangeQuorum initiated by Google. - // "USER" - ChangeQuorum initiated by User. + // "GOOGLE" - `ChangeQuorum` initiated by Google. + // "USER" - `ChangeQuorum` initiated by User. Initiator string `json:"initiator,omitempty"` // QuorumType: Output only. The type of this quorum. See QuorumType for more // information about quorum type specifications. @@ -4352,17 +4652,17 @@ type QuorumInfo struct { NullFields []string `json:"-"` } -func (s *QuorumInfo) MarshalJSON() ([]byte, error) { +func (s QuorumInfo) MarshalJSON() ([]byte, error) { type NoMethod QuorumInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// QuorumType: Information about the database quorum type. this applies only -// for dual region instance configs. +// QuorumType: Information about the database quorum type. This only applies to +// dual-region instance configs. type QuorumType struct { - // DualRegion: Dual region quorum type. + // DualRegion: Dual-region quorum type. DualRegion *DualRegionQuorum `json:"dualRegion,omitempty"` - // SingleRegion: Single region quorum type. + // SingleRegion: Single-region quorum type. SingleRegion *SingleRegionQuorum `json:"singleRegion,omitempty"` // ForceSendFields is a list of field names (e.g. "DualRegion") to // unconditionally include in API requests. By default, fields with empty or @@ -4377,9 +4677,9 @@ type QuorumType struct { NullFields []string `json:"-"` } -func (s *QuorumType) MarshalJSON() ([]byte, error) { +func (s QuorumType) MarshalJSON() ([]byte, error) { type NoMethod QuorumType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReadOnly: Message type to initiate a read-only transaction. @@ -4436,9 +4736,9 @@ type ReadOnly struct { NullFields []string `json:"-"` } -func (s *ReadOnly) MarshalJSON() ([]byte, error) { +func (s ReadOnly) MarshalJSON() ([]byte, error) { type NoMethod ReadOnly - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReadRequest: The request for Read and StreamingRead. @@ -4554,9 +4854,9 @@ type ReadRequest struct { NullFields []string `json:"-"` } -func (s *ReadRequest) MarshalJSON() ([]byte, error) { +func (s ReadRequest) MarshalJSON() ([]byte, error) { type NoMethod ReadRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReadWrite: Message type to initiate a read-write transaction. Currently this @@ -4587,9 +4887,9 @@ type ReadWrite struct { NullFields []string `json:"-"` } -func (s *ReadWrite) MarshalJSON() ([]byte, error) { +func (s ReadWrite) MarshalJSON() ([]byte, error) { type NoMethod ReadWrite - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ReplicaInfo struct { @@ -4630,9 +4930,9 @@ type ReplicaInfo struct { NullFields []string `json:"-"` } -func (s *ReplicaInfo) MarshalJSON() ([]byte, error) { +func (s ReplicaInfo) MarshalJSON() ([]byte, error) { type NoMethod ReplicaInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReplicaSelection: The directed read replica selector. Callers must provide @@ -4668,9 +4968,9 @@ type ReplicaSelection struct { NullFields []string `json:"-"` } -func (s *ReplicaSelection) MarshalJSON() ([]byte, error) { +func (s ReplicaSelection) MarshalJSON() ([]byte, error) { type NoMethod ReplicaSelection - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RequestOptions: Common request options for various APIs. @@ -4716,9 +5016,9 @@ type RequestOptions struct { NullFields []string `json:"-"` } -func (s *RequestOptions) MarshalJSON() ([]byte, error) { +func (s RequestOptions) MarshalJSON() ([]byte, error) { type NoMethod RequestOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestoreDatabaseEncryptionConfig: Encryption configuration for the restored @@ -4740,17 +5040,18 @@ type RestoreDatabaseEncryptionConfig struct { // `projects//locations//keyRings//cryptoKeys/`. KmsKeyName string `json:"kmsKeyName,omitempty"` // KmsKeyNames: Optional. Specifies the KMS configuration for the one or more - // keys used to encrypt the database. Values are of the form + // keys used to encrypt the database. Values have the form // `projects//locations//keyRings//cryptoKeys/`. The keys referenced by // kms_key_names must fully cover all regions of the database instance - // configuration. Some examples: * For single region database instance configs, - // specify a single regional location KMS key. * For multi-regional database - // instance configs of type GOOGLE_MANAGED, either specify a multi-regional - // location KMS key or multiple regional location KMS keys that cover all - // regions in the instance config. * For a database instance config of type - // USER_MANAGED, please specify only regional location KMS keys to cover each - // region in the instance config. Multi-regional location KMS keys are not - // supported for USER_MANAGED instance configs. + // configuration. Some examples: * For single region database instance + // configurations, specify a single regional location KMS key. * For + // multi-regional database instance configurations of type `GOOGLE_MANAGED`, + // either specify a multi-regional location KMS key or multiple regional + // location KMS keys that cover all regions in the instance configuration. * + // For a database instance configuration of type `USER_MANAGED`, please specify + // only regional location KMS keys to cover each region in the instance + // configuration. Multi-regional location KMS keys are not supported for + // USER_MANAGED instance configurations. KmsKeyNames []string `json:"kmsKeyNames,omitempty"` // ForceSendFields is a list of field names (e.g. "EncryptionType") to // unconditionally include in API requests. By default, fields with empty or @@ -4765,9 +5066,9 @@ type RestoreDatabaseEncryptionConfig struct { NullFields []string `json:"-"` } -func (s *RestoreDatabaseEncryptionConfig) MarshalJSON() ([]byte, error) { +func (s RestoreDatabaseEncryptionConfig) MarshalJSON() ([]byte, error) { type NoMethod RestoreDatabaseEncryptionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestoreDatabaseMetadata: Metadata type for the long-running operation @@ -4819,9 +5120,9 @@ type RestoreDatabaseMetadata struct { NullFields []string `json:"-"` } -func (s *RestoreDatabaseMetadata) MarshalJSON() ([]byte, error) { +func (s RestoreDatabaseMetadata) MarshalJSON() ([]byte, error) { type NoMethod RestoreDatabaseMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestoreDatabaseRequest: The request for RestoreDatabase. @@ -4852,9 +5153,9 @@ type RestoreDatabaseRequest struct { NullFields []string `json:"-"` } -func (s *RestoreDatabaseRequest) MarshalJSON() ([]byte, error) { +func (s RestoreDatabaseRequest) MarshalJSON() ([]byte, error) { type NoMethod RestoreDatabaseRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestoreInfo: Information about the database restore. @@ -4881,9 +5182,9 @@ type RestoreInfo struct { NullFields []string `json:"-"` } -func (s *RestoreInfo) MarshalJSON() ([]byte, error) { +func (s RestoreInfo) MarshalJSON() ([]byte, error) { type NoMethod RestoreInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResultSet: Results from Read or ExecuteSql. @@ -4917,9 +5218,9 @@ type ResultSet struct { NullFields []string `json:"-"` } -func (s *ResultSet) MarshalJSON() ([]byte, error) { +func (s ResultSet) MarshalJSON() ([]byte, error) { type NoMethod ResultSet - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResultSetMetadata: Metadata about a ResultSet or PartialResultSet. @@ -4953,9 +5254,9 @@ type ResultSetMetadata struct { NullFields []string `json:"-"` } -func (s *ResultSetMetadata) MarshalJSON() ([]byte, error) { +func (s ResultSetMetadata) MarshalJSON() ([]byte, error) { type NoMethod ResultSetMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResultSetStats: Additional statistics about a ResultSet or PartialResultSet. @@ -4986,9 +5287,9 @@ type ResultSetStats struct { NullFields []string `json:"-"` } -func (s *ResultSetStats) MarshalJSON() ([]byte, error) { +func (s ResultSetStats) MarshalJSON() ([]byte, error) { type NoMethod ResultSetStats - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RollbackRequest: The request for Rollback. @@ -5008,9 +5309,9 @@ type RollbackRequest struct { NullFields []string `json:"-"` } -func (s *RollbackRequest) MarshalJSON() ([]byte, error) { +func (s RollbackRequest) MarshalJSON() ([]byte, error) { type NoMethod RollbackRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Scan: Scan is a structure which describes Cloud Key Visualizer scan @@ -5045,9 +5346,9 @@ type Scan struct { NullFields []string `json:"-"` } -func (s *Scan) MarshalJSON() ([]byte, error) { +func (s Scan) MarshalJSON() ([]byte, error) { type NoMethod Scan - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ScanData: ScanData contains Cloud Key Visualizer scan data used by the @@ -5075,9 +5376,9 @@ type ScanData struct { NullFields []string `json:"-"` } -func (s *ScanData) MarshalJSON() ([]byte, error) { +func (s ScanData) MarshalJSON() ([]byte, error) { type NoMethod ScanData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Session: A session in the Cloud Spanner API. @@ -5122,9 +5423,9 @@ type Session struct { NullFields []string `json:"-"` } -func (s *Session) MarshalJSON() ([]byte, error) { +func (s Session) MarshalJSON() ([]byte, error) { type NoMethod Session - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. @@ -5147,9 +5448,9 @@ type SetIamPolicyRequest struct { NullFields []string `json:"-"` } -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { +func (s SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ShortRepresentation: Condensed representation of a node and its subtree. @@ -5177,19 +5478,18 @@ type ShortRepresentation struct { NullFields []string `json:"-"` } -func (s *ShortRepresentation) MarshalJSON() ([]byte, error) { +func (s ShortRepresentation) MarshalJSON() ([]byte, error) { type NoMethod ShortRepresentation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SingleRegionQuorum: Message type for a single-region quorum. type SingleRegionQuorum struct { // ServingLocation: Required. The location of the serving region, e.g. - // "us-central1". The location must be one of the regions within the dual - // region instance configuration of your database. The list of valid locations - // is available via [GetInstanceConfig[InstanceAdmin.GetInstanceConfig] API. - // This should only be used if you plan to change quorum in single-region - // quorum type. + // "us-central1". The location must be one of the regions within the + // dual-region instance configuration of your database. The list of valid + // locations is available using the GetInstanceConfig API. This should only be + // used if you plan to change quorum to the single-region quorum type. ServingLocation string `json:"servingLocation,omitempty"` // ForceSendFields is a list of field names (e.g. "ServingLocation") to // unconditionally include in API requests. By default, fields with empty or @@ -5204,9 +5504,9 @@ type SingleRegionQuorum struct { NullFields []string `json:"-"` } -func (s *SingleRegionQuorum) MarshalJSON() ([]byte, error) { +func (s SingleRegionQuorum) MarshalJSON() ([]byte, error) { type NoMethod SingleRegionQuorum - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Statement: A single DML statement. @@ -5241,9 +5541,9 @@ type Statement struct { NullFields []string `json:"-"` } -func (s *Statement) MarshalJSON() ([]byte, error) { +func (s Statement) MarshalJSON() ([]byte, error) { type NoMethod Statement - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -5275,9 +5575,9 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // StructType: `StructType` defines the fields of a STRUCT type. @@ -5301,9 +5601,9 @@ type StructType struct { NullFields []string `json:"-"` } -func (s *StructType) MarshalJSON() ([]byte, error) { +func (s StructType) MarshalJSON() ([]byte, error) { type NoMethod StructType - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` method. @@ -5325,9 +5625,9 @@ type TestIamPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` @@ -5352,9 +5652,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Transaction: A transaction. @@ -5364,6 +5664,12 @@ type Transaction struct { // not have IDs, because single-use transactions do not support multiple // requests. Id string `json:"id,omitempty"` + // PrecommitToken: A precommit token will be included in the response of a + // BeginTransaction request if the read-write transaction is on a multiplexed + // session and a mutation_key was specified in the BeginTransaction. The + // precommit token with the highest sequence number from this transaction + // attempt should be passed to the Commit request for this transaction. + PrecommitToken *MultiplexedSessionPrecommitToken `json:"precommitToken,omitempty"` // ReadTimestamp: For snapshot read-only transactions, the read timestamp // chosen for the transaction. Not returned by default: see // TransactionOptions.ReadOnly.return_read_timestamp. A timestamp in RFC3339 @@ -5386,9 +5692,9 @@ type Transaction struct { NullFields []string `json:"-"` } -func (s *Transaction) MarshalJSON() ([]byte, error) { +func (s Transaction) MarshalJSON() ([]byte, error) { type NoMethod Transaction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransactionOptions: Transactions: Each session can have at most one active @@ -5630,9 +5936,9 @@ type TransactionOptions struct { NullFields []string `json:"-"` } -func (s *TransactionOptions) MarshalJSON() ([]byte, error) { +func (s TransactionOptions) MarshalJSON() ([]byte, error) { type NoMethod TransactionOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransactionSelector: This message is used to select the transaction in which @@ -5662,9 +5968,9 @@ type TransactionSelector struct { NullFields []string `json:"-"` } -func (s *TransactionSelector) MarshalJSON() ([]byte, error) { +func (s TransactionSelector) MarshalJSON() ([]byte, error) { type NoMethod TransactionSelector - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Type: `Type` indicates the type of a Cloud Spanner value, as might be stored @@ -5751,9 +6057,9 @@ type Type struct { NullFields []string `json:"-"` } -func (s *Type) MarshalJSON() ([]byte, error) { +func (s Type) MarshalJSON() ([]byte, error) { type NoMethod Type - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateDatabaseDdlMetadata: Metadata type for the operation returned by @@ -5794,9 +6100,9 @@ type UpdateDatabaseDdlMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateDatabaseDdlMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateDatabaseDdlMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateDatabaseDdlMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateDatabaseDdlRequest: Enqueues the given DDL statements to be applied, @@ -5850,9 +6156,9 @@ type UpdateDatabaseDdlRequest struct { NullFields []string `json:"-"` } -func (s *UpdateDatabaseDdlRequest) MarshalJSON() ([]byte, error) { +func (s UpdateDatabaseDdlRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateDatabaseDdlRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateDatabaseMetadata: Metadata type for the operation returned by @@ -5878,9 +6184,9 @@ type UpdateDatabaseMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateDatabaseMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateDatabaseMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateDatabaseMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateDatabaseRequest: The request for UpdateDatabase. @@ -5904,9 +6210,9 @@ type UpdateDatabaseRequest struct { NullFields []string `json:"-"` } -func (s *UpdateDatabaseRequest) MarshalJSON() ([]byte, error) { +func (s UpdateDatabaseRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateDatabaseRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateInstanceConfigMetadata: Metadata type for the operation returned by @@ -5914,7 +6220,7 @@ func (s *UpdateDatabaseRequest) MarshalJSON() ([]byte, error) { type UpdateInstanceConfigMetadata struct { // CancelTime: The time at which this operation was cancelled. CancelTime string `json:"cancelTime,omitempty"` - // InstanceConfig: The desired instance config after updating. + // InstanceConfig: The desired instance configuration after updating. InstanceConfig *InstanceConfig `json:"instanceConfig,omitempty"` // Progress: The progress of the UpdateInstanceConfig operation. Progress *InstanceOperationProgress `json:"progress,omitempty"` @@ -5931,17 +6237,17 @@ type UpdateInstanceConfigMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateInstanceConfigMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateInstanceConfigMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateInstanceConfigMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateInstanceConfigRequest: The request for UpdateInstanceConfigRequest. type UpdateInstanceConfigRequest struct { - // InstanceConfig: Required. The user instance config to update, which must - // always include the instance config name. Otherwise, only fields mentioned in - // update_mask need be included. To prevent conflicts of concurrent updates, - // etag can be used. + // InstanceConfig: Required. The user instance configuration to update, which + // must always include the instance configuration name. Otherwise, only fields + // mentioned in update_mask need be included. To prevent conflicts of + // concurrent updates, etag can be used. InstanceConfig *InstanceConfig `json:"instanceConfig,omitempty"` // UpdateMask: Required. A mask specifying which fields in InstanceConfig // should be updated. The field mask must always be specified; this prevents @@ -5965,9 +6271,9 @@ type UpdateInstanceConfigRequest struct { NullFields []string `json:"-"` } -func (s *UpdateInstanceConfigRequest) MarshalJSON() ([]byte, error) { +func (s UpdateInstanceConfigRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateInstanceConfigRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateInstanceMetadata: Metadata type for the operation returned by @@ -6007,9 +6313,9 @@ type UpdateInstanceMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateInstanceMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateInstanceMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateInstanceMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateInstancePartitionMetadata: Metadata type for the operation returned by @@ -6039,9 +6345,9 @@ type UpdateInstancePartitionMetadata struct { NullFields []string `json:"-"` } -func (s *UpdateInstancePartitionMetadata) MarshalJSON() ([]byte, error) { +func (s UpdateInstancePartitionMetadata) MarshalJSON() ([]byte, error) { type NoMethod UpdateInstancePartitionMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateInstancePartitionRequest: The request for UpdateInstancePartition. @@ -6068,9 +6374,9 @@ type UpdateInstancePartitionRequest struct { NullFields []string `json:"-"` } -func (s *UpdateInstancePartitionRequest) MarshalJSON() ([]byte, error) { +func (s UpdateInstancePartitionRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateInstancePartitionRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateInstanceRequest: The request for UpdateInstance. @@ -6097,9 +6403,9 @@ type UpdateInstanceRequest struct { NullFields []string `json:"-"` } -func (s *UpdateInstanceRequest) MarshalJSON() ([]byte, error) { +func (s UpdateInstanceRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateInstanceRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type VisualizationData struct { @@ -6147,9 +6453,9 @@ type VisualizationData struct { NullFields []string `json:"-"` } -func (s *VisualizationData) MarshalJSON() ([]byte, error) { +func (s VisualizationData) MarshalJSON() ([]byte, error) { type NoMethod VisualizationData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Write: Arguments to insert, update, insert_or_update, and replace @@ -6182,9 +6488,9 @@ type Write struct { NullFields []string `json:"-"` } -func (s *Write) MarshalJSON() ([]byte, error) { +func (s Write) MarshalJSON() ([]byte, error) { type NoMethod Write - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsInstanceConfigOperationsListCall struct { @@ -6196,17 +6502,17 @@ type ProjectsInstanceConfigOperationsListCall struct { header_ http.Header } -// List: Lists the user-managed instance config long-running operations in the -// given project. An instance config operation has a name of the form -// `projects//instanceConfigs//operations/`. The long-running operation +// List: Lists the user-managed instance configuration long-running operations +// in the given project. An instance configuration operation has a name of the +// form `projects//instanceConfigs//operations/`. The long-running operation // metadata field type `metadata.type_url` describes the type of the metadata. // Operations returned include those that have completed/failed/canceled within // the last 7 days, and pending operations. Operations returned are ordered by // `operation.metadata.value.start_time` in descending order starting from the // most recently started operation. // -// - parent: The project of the instance config operations. Values are of the -// form `projects/`. +// - parent: The project of the instance configuration operations. Values are +// of the form `projects/`. func (r *ProjectsInstanceConfigOperationsService) List(parent string) *ProjectsInstanceConfigOperationsListCall { c := &ProjectsInstanceConfigOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6236,7 +6542,7 @@ func (r *ProjectsInstanceConfigOperationsService) List(parent string) *ProjectsI // adata) AND` \ `(metadata.instance_config.name:custom-config) AND` \ // `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \ // `(error:*)` - Return operations where: * The operation's metadata type is -// CreateInstanceConfigMetadata. * The instance config name contains +// CreateInstanceConfigMetadata. * The instance configuration name contains // "custom-config". * The operation started before 2021-03-28T14:50:00Z. * The // operation resulted in an error. func (c *ProjectsInstanceConfigOperationsListCall) Filter(filter string) *ProjectsInstanceConfigOperationsListCall { @@ -6381,27 +6687,28 @@ type ProjectsInstanceConfigsCreateCall struct { header_ http.Header } -// Create: Creates an instance config and begins preparing it to be used. The -// returned long-running operation can be used to track the progress of -// preparing the new instance config. The instance config name is assigned by -// the caller. If the named instance config already exists, -// `CreateInstanceConfig` returns `ALREADY_EXISTS`. Immediately after the -// request returns: * The instance config is readable via the API, with all -// requested attributes. The instance config's reconciling field is set to -// true. Its state is `CREATING`. While the operation is pending: * Cancelling -// the operation renders the instance config immediately unreadable via the -// API. * Except for deleting the creating resource, all other attempts to -// modify the instance config are rejected. Upon completion of the returned -// operation: * Instances can be created using the instance configuration. * -// The instance config's reconciling field becomes false. Its state becomes -// `READY`. The returned long-running operation will have a name of the format -// `/operations/` and can be used to track creation of the instance config. The -// metadata field type is CreateInstanceConfigMetadata. The response field type -// is InstanceConfig, if successful. Authorization requires +// Create: Creates an instance configuration and begins preparing it to be +// used. The returned long-running operation can be used to track the progress +// of preparing the new instance configuration. The instance configuration name +// is assigned by the caller. If the named instance configuration already +// exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. Immediately after +// the request returns: * The instance configuration is readable via the API, +// with all requested attributes. The instance configuration's reconciling +// field is set to true. Its state is `CREATING`. While the operation is +// pending: * Cancelling the operation renders the instance configuration +// immediately unreadable via the API. * Except for deleting the creating +// resource, all other attempts to modify the instance configuration are +// rejected. Upon completion of the returned operation: * Instances can be +// created using the instance configuration. * The instance configuration's +// reconciling field becomes false. Its state becomes `READY`. The returned +// long-running operation will have a name of the format `/operations/` and can +// be used to track creation of the instance configuration. The metadata field +// type is CreateInstanceConfigMetadata. The response field type is +// InstanceConfig, if successful. Authorization requires // `spanner.instanceConfigs.create` permission on the resource parent. // -// - parent: The name of the project in which to create the instance config. -// Values are of the form `projects/`. +// - parent: The name of the project in which to create the instance +// configuration. Values are of the form `projects/`. func (r *ProjectsInstanceConfigsService) Create(parent string, createinstanceconfigrequest *CreateInstanceConfigRequest) *ProjectsInstanceConfigsCreateCall { c := &ProjectsInstanceConfigsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6499,11 +6806,11 @@ type ProjectsInstanceConfigsDeleteCall struct { header_ http.Header } -// Delete: Deletes the instance config. Deletion is only allowed when no +// Delete: Deletes the instance configuration. Deletion is only allowed when no // instances are using the configuration. If any instances are using the -// config, returns `FAILED_PRECONDITION`. Only user managed configurations can -// be deleted. Authorization requires `spanner.instanceConfigs.delete` -// permission on the resource name. +// configuration, returns `FAILED_PRECONDITION`. Only user-managed +// configurations can be deleted. Authorization requires +// `spanner.instanceConfigs.delete` permission on the resource name. // // - name: The name of the instance configuration to be deleted. Values are of // the form `projects//instanceConfigs/`. @@ -6514,11 +6821,12 @@ func (r *ProjectsInstanceConfigsService) Delete(name string) *ProjectsInstanceCo } // Etag sets the optional parameter "etag": Used for optimistic concurrency -// control as a way to help prevent simultaneous deletes of an instance config -// from overwriting each other. If not empty, the API only deletes the instance -// config when the etag provided matches the current status of the requested -// instance config. Otherwise, deletes the instance config without checking the -// current status of the requested instance config. +// control as a way to help prevent simultaneous deletes of an instance +// configuration from overwriting each other. If not empty, the API only +// deletes the instance configuration when the etag provided matches the +// current status of the requested instance configuration. Otherwise, deletes +// the instance configuration without checking the current status of the +// requested instance configuration. func (c *ProjectsInstanceConfigsDeleteCall) Etag(etag string) *ProjectsInstanceConfigsDeleteCall { c.urlParams_.Set("etag", etag) return c @@ -6728,7 +7036,7 @@ type ProjectsInstanceConfigsListCall struct { } // List: Lists the supported instance configurations for a given project. -// Returns both Google managed configs and user managed configs. +// Returns both Google-managed configurations and user-managed configurations. // // - parent: The name of the project for which a list of supported instance // configurations is requested. Values are of the form `projects/`. @@ -6874,28 +7182,29 @@ type ProjectsInstanceConfigsPatchCall struct { header_ http.Header } -// Patch: Updates an instance config. The returned long-running operation can -// be used to track the progress of updating the instance. If the named -// instance config does not exist, returns `NOT_FOUND`. Only user managed -// configurations can be updated. Immediately after the request returns: * The -// instance config's reconciling field is set to true. While the operation is -// pending: * Cancelling the operation sets its metadata's cancel_time. The -// operation is guaranteed to succeed at undoing all changes, after which point -// it terminates with a `CANCELLED` status. * All other attempts to modify the -// instance config are rejected. * Reading the instance config via the API -// continues to give the pre-request values. Upon completion of the returned -// operation: * Creating instances using the instance configuration uses the -// new values. * The instance config's new values are readable via the API. * -// The instance config's reconciling field becomes false. The returned +// Patch: Updates an instance configuration. The returned long-running +// operation can be used to track the progress of updating the instance. If the +// named instance configuration does not exist, returns `NOT_FOUND`. Only +// user-managed configurations can be updated. Immediately after the request +// returns: * The instance configuration's reconciling field is set to true. +// While the operation is pending: * Cancelling the operation sets its +// metadata's cancel_time. The operation is guaranteed to succeed at undoing +// all changes, after which point it terminates with a `CANCELLED` status. * +// All other attempts to modify the instance configuration are rejected. * +// Reading the instance configuration via the API continues to give the +// pre-request values. Upon completion of the returned operation: * Creating +// instances using the instance configuration uses the new values. * The new +// values of the instance configuration are readable via the API. * The +// instance configuration's reconciling field becomes false. The returned // long-running operation will have a name of the format `/operations/` and can -// be used to track the instance config modification. The metadata field type -// is UpdateInstanceConfigMetadata. The response field type is InstanceConfig, -// if successful. Authorization requires `spanner.instanceConfigs.update` -// permission on the resource name. +// be used to track the instance configuration modification. The metadata field +// type is UpdateInstanceConfigMetadata. The response field type is +// InstanceConfig, if successful. Authorization requires +// `spanner.instanceConfigs.update` permission on the resource name. // // - name: A unique identifier for the instance configuration. Values are of -// the form `projects//instanceConfigs/a-z*`. User instance config must start -// with `custom-`. +// the form `projects//instanceConfigs/a-z*`. User instance configuration +// must start with `custom-`. func (r *ProjectsInstanceConfigsService) Patch(nameid string, updateinstanceconfigrequest *UpdateInstanceConfigRequest) *ProjectsInstanceConfigsPatchCall { c := &ProjectsInstanceConfigsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.nameid = nameid @@ -8524,34 +8833,35 @@ type ProjectsInstancesMoveCall struct { header_ http.Header } -// Move: Moves the instance to the target instance config. The returned -// long-running operation can be used to track the progress of moving the +// Move: Moves an instance to the target instance configuration. You can use +// the returned long-running operation to track the progress of moving the // instance. `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets -// any of the following criteria: * Has an ongoing move to a different instance -// config * Has backups * Has an ongoing update * Is under free trial * -// Contains any CMEK-enabled databases While the operation is pending: * All -// other attempts to modify the instance, including changes to its compute -// capacity, are rejected. * The following database and backup admin operations -// are rejected: * DatabaseAdmin.CreateDatabase, * -// DatabaseAdmin.UpdateDatabaseDdl (Disabled if default_leader is specified in -// the request.) * DatabaseAdmin.RestoreDatabase * DatabaseAdmin.CreateBackup * -// DatabaseAdmin.CopyBackup * Both the source and target instance configs are -// subject to hourly compute and storage charges. * The instance may experience -// higher read-write latencies and a higher transaction abort rate. However, -// moving an instance does not cause any downtime. The returned long-running -// operation will have a name of the format `/operations/` and can be used to -// track the move instance operation. The metadata field type is -// MoveInstanceMetadata. The response field type is Instance, if successful. -// Cancelling the operation sets its metadata's cancel_time. Cancellation is -// not immediate since it involves moving any data previously moved to target -// instance config back to the original instance config. The same operation can -// be used to track the progress of the cancellation. Upon successful -// completion of the cancellation, the operation terminates with CANCELLED -// status. Upon completion(if not cancelled) of the returned operation: * -// Instance would be successfully moved to the target instance config. * You -// are billed for compute and storage in target instance config. Authorization -// requires `spanner.instances.update` permission on the resource instance. For -// more details, please see documentation +// any of the following criteria: * Is undergoing a move to a different +// instance configuration * Has backups * Has an ongoing update * Contains any +// CMEK-enabled databases * Is a free trial instance While the operation is +// pending: * All other attempts to modify the instance, including changes to +// its compute capacity, are rejected. * The following database and backup +// admin operations are rejected: * `DatabaseAdmin.CreateDatabase` * +// `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is specified +// in the request.) * `DatabaseAdmin.RestoreDatabase` * +// `DatabaseAdmin.CreateBackup` * `DatabaseAdmin.CopyBackup` * Both the source +// and target instance configurations are subject to hourly compute and storage +// charges. * The instance might experience higher read-write latencies and a +// higher transaction abort rate. However, moving an instance doesn't cause any +// downtime. The returned long-running operation has a name of the format +// `/operations/` and can be used to track the move instance operation. The +// metadata field type is MoveInstanceMetadata. The response field type is +// Instance, if successful. Cancelling the operation sets its metadata's +// cancel_time. Cancellation is not immediate because it involves moving any +// data previously moved to the target instance configuration back to the +// original instance configuration. You can use this operation to track the +// progress of the cancellation. Upon successful completion of the +// cancellation, the operation terminates with `CANCELLED` status. If not +// cancelled, upon completion of the returned operation: * The instance +// successfully moves to the target instance configuration. * You are billed +// for compute and storage in target instance configuration. Authorization +// requires the `spanner.instances.update` permission on the resource instance. +// For more details, see Move an instance // (https://cloud.google.com/spanner/docs/move-instance). // // - name: The instance to move. Values are of the form `projects//instances/`. @@ -9807,18 +10117,20 @@ func (r *ProjectsInstancesBackupsService) List(parent string) *ProjectsInstances // eligible for filtering: * `name` * `database` * `state` * `create_time` (and // values are of the format YYYY-MM-DDTHH:MM:SSZ) * `expire_time` (and values // are of the format YYYY-MM-DDTHH:MM:SSZ) * `version_time` (and values are of -// the format YYYY-MM-DDTHH:MM:SSZ) * `size_bytes` You can combine multiple -// expressions by enclosing each expression in parentheses. By default, -// expressions are combined with AND logic, but you can specify AND, OR, and -// NOT logic explicitly. Here are a few examples: * `name:Howl` - The backup's -// name contains the string "howl". * `database:prod` - The database's name -// contains the string "prod". * `state:CREATING` - The backup is pending +// the format YYYY-MM-DDTHH:MM:SSZ) * `size_bytes` * `backup_schedules` You can +// combine multiple expressions by enclosing each expression in parentheses. By +// default, expressions are combined with AND logic, but you can specify AND, +// OR, and NOT logic explicitly. Here are a few examples: * `name:Howl` - The +// backup's name contains the string "howl". * `database:prod` - The database's +// name contains the string "prod". * `state:CREATING` - The backup is pending // creation. * `state:READY` - The backup is fully created and ready for use. * // `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")` - The backup name // contains the string "howl" and `create_time` of the backup is before // 2018-03-28T14:50:00Z. * `expire_time < \"2018-03-28T14:50:00Z\" - The // backup `expire_time` is before 2018-03-28T14:50:00Z. * `size_bytes > -// 10000000000` - The backup's size is greater than 10GB +// 10000000000` - The backup's size is greater than 10GB * +// `backup_schedules:daily` - The backup is created from a schedule with +// "daily" in its name. func (c *ProjectsInstancesBackupsListCall) Filter(filter string) *ProjectsInstancesBackupsListCall { c.urlParams_.Set("filter", filter) return c @@ -10940,17 +11252,17 @@ type ProjectsInstancesDatabasesChangequorumCall struct { header_ http.Header } -// Changequorum: ChangeQuorum is strictly restricted to databases that use dual -// region instance configurations. Initiates a background operation to change -// quorum a database from dual-region mode to single-region mode and vice -// versa. The returned long-running operation will have a name of the format +// Changequorum: `ChangeQuorum` is strictly restricted to databases that use +// dual-region instance configurations. Initiates a background operation to +// change the quorum of a database from dual-region mode to single-region mode +// or vice versa. The returned long-running operation has a name of the format // `projects//instances//databases//operations/` and can be used to track -// execution of the ChangeQuorum. The metadata field type is +// execution of the `ChangeQuorum`. The metadata field type is // ChangeQuorumMetadata. Authorization requires // `spanner.databases.changequorum` permission on the resource database. // -// - name: Name of the database in which to apply the ChangeQuorum. Values are -// of the form `projects//instances//databases/`. +// - name: Name of the database in which to apply `ChangeQuorum`. Values are of +// the form `projects//instances//databases/`. func (r *ProjectsInstancesDatabasesService) Changequorum(name string, changequorumrequest *ChangeQuorumRequest) *ProjectsInstancesDatabasesChangequorumCall { c := &ProjectsInstancesDatabasesChangequorumCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -11049,8 +11361,8 @@ type ProjectsInstancesDatabasesCreateCall struct { header_ http.Header } -// Create: Creates a new Cloud Spanner database and starts to prepare it for -// serving. The returned long-running operation will have a name of the format +// Create: Creates a new Spanner database and starts to prepare it for serving. +// The returned long-running operation will have a name of the format // `/operations/` and can be used to track preparation of the database. The // metadata field type is CreateDatabaseMetadata. The response field type is // Database, if successful. @@ -12428,6 +12740,907 @@ func (c *ProjectsInstancesDatabasesUpdateDdlCall) Do(opts ...googleapi.CallOptio return ret, nil } +type ProjectsInstancesDatabasesBackupSchedulesCreateCall struct { + s *Service + parent string + backupschedule *BackupSchedule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new backup schedule. +// +// - parent: The name of the database that this backup schedule applies to. +func (r *ProjectsInstancesDatabasesBackupSchedulesService) Create(parent string, backupschedule *BackupSchedule) *ProjectsInstancesDatabasesBackupSchedulesCreateCall { + c := &ProjectsInstancesDatabasesBackupSchedulesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.backupschedule = backupschedule + return c +} + +// BackupScheduleId sets the optional parameter "backupScheduleId": Required. +// The Id to use for the backup schedule. The `backup_schedule_id` appended to +// `parent` forms the full backup schedule name of the form +// `projects//instances//databases//backupSchedules/`. +func (c *ProjectsInstancesDatabasesBackupSchedulesCreateCall) BackupScheduleId(backupScheduleId string) *ProjectsInstancesDatabasesBackupSchedulesCreateCall { + c.urlParams_.Set("backupScheduleId", backupScheduleId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsInstancesDatabasesBackupSchedulesCreateCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesBackupSchedulesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsInstancesDatabasesBackupSchedulesCreateCall) Context(ctx context.Context) *ProjectsInstancesDatabasesBackupSchedulesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsInstancesDatabasesBackupSchedulesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesBackupSchedulesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backupschedule) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/backupSchedules") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.backupSchedules.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *BackupSchedule.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesBackupSchedulesCreateCall) Do(opts ...googleapi.CallOption) (*BackupSchedule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BackupSchedule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsInstancesDatabasesBackupSchedulesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a backup schedule. +// +// - name: The name of the schedule to delete. Values are of the form +// `projects//instances//databases//backupSchedules/`. +func (r *ProjectsInstancesDatabasesBackupSchedulesService) Delete(name string) *ProjectsInstancesDatabasesBackupSchedulesDeleteCall { + c := &ProjectsInstancesDatabasesBackupSchedulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsInstancesDatabasesBackupSchedulesDeleteCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesBackupSchedulesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsInstancesDatabasesBackupSchedulesDeleteCall) Context(ctx context.Context) *ProjectsInstancesDatabasesBackupSchedulesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsInstancesDatabasesBackupSchedulesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesBackupSchedulesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.backupSchedules.delete" call. +// Any non-2xx status code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesBackupSchedulesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsInstancesDatabasesBackupSchedulesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets backup schedule for the input schedule name. +// +// - name: The name of the schedule to retrieve. Values are of the form +// `projects//instances//databases//backupSchedules/`. +func (r *ProjectsInstancesDatabasesBackupSchedulesService) Get(name string) *ProjectsInstancesDatabasesBackupSchedulesGetCall { + c := &ProjectsInstancesDatabasesBackupSchedulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsInstancesDatabasesBackupSchedulesGetCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesBackupSchedulesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsInstancesDatabasesBackupSchedulesGetCall) IfNoneMatch(entityTag string) *ProjectsInstancesDatabasesBackupSchedulesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsInstancesDatabasesBackupSchedulesGetCall) Context(ctx context.Context) *ProjectsInstancesDatabasesBackupSchedulesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsInstancesDatabasesBackupSchedulesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesBackupSchedulesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.backupSchedules.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *BackupSchedule.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesBackupSchedulesGetCall) Do(opts ...googleapi.CallOption) (*BackupSchedule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BackupSchedule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsInstancesDatabasesBackupSchedulesGetIamPolicyCall struct { + s *Service + resource string + getiampolicyrequest *GetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a database or backup +// resource. Returns an empty policy if a database or backup exists but does +// not have a policy set. Authorization requires +// `spanner.databases.getIamPolicy` permission on resource. For backups, +// authorization requires `spanner.backups.getIamPolicy` permission on +// resource. +// +// - resource: REQUIRED: The Cloud Spanner resource for which the policy is +// being retrieved. The format is `projects//instances/` for instance +// resources and `projects//instances//databases/` for database resources. +func (r *ProjectsInstancesDatabasesBackupSchedulesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsInstancesDatabasesBackupSchedulesGetIamPolicyCall { + c := &ProjectsInstancesDatabasesBackupSchedulesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.getiampolicyrequest = getiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsInstancesDatabasesBackupSchedulesGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesBackupSchedulesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsInstancesDatabasesBackupSchedulesGetIamPolicyCall) Context(ctx context.Context) *ProjectsInstancesDatabasesBackupSchedulesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsInstancesDatabasesBackupSchedulesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesBackupSchedulesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.backupSchedules.getIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesBackupSchedulesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsInstancesDatabasesBackupSchedulesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all the backup schedules for the database. +// +// - parent: Database is the parent resource whose backup schedules should be +// listed. Values are of the form projects//instances//databases/. +func (r *ProjectsInstancesDatabasesBackupSchedulesService) List(parent string) *ProjectsInstancesDatabasesBackupSchedulesListCall { + c := &ProjectsInstancesDatabasesBackupSchedulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": Number of backup schedules +// to be returned in the response. If 0 or less, defaults to the server's +// maximum allowed page size. +func (c *ProjectsInstancesDatabasesBackupSchedulesListCall) PageSize(pageSize int64) *ProjectsInstancesDatabasesBackupSchedulesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If non-empty, +// `page_token` should contain a next_page_token from a previous +// ListBackupSchedulesResponse to the same `parent`. +func (c *ProjectsInstancesDatabasesBackupSchedulesListCall) PageToken(pageToken string) *ProjectsInstancesDatabasesBackupSchedulesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsInstancesDatabasesBackupSchedulesListCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesBackupSchedulesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsInstancesDatabasesBackupSchedulesListCall) IfNoneMatch(entityTag string) *ProjectsInstancesDatabasesBackupSchedulesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsInstancesDatabasesBackupSchedulesListCall) Context(ctx context.Context) *ProjectsInstancesDatabasesBackupSchedulesListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsInstancesDatabasesBackupSchedulesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesBackupSchedulesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/backupSchedules") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.backupSchedules.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ListBackupSchedulesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesBackupSchedulesListCall) Do(opts ...googleapi.CallOption) (*ListBackupSchedulesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListBackupSchedulesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsInstancesDatabasesBackupSchedulesListCall) Pages(ctx context.Context, f func(*ListBackupSchedulesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsInstancesDatabasesBackupSchedulesPatchCall struct { + s *Service + nameid string + backupschedule *BackupSchedule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a backup schedule. +// +// - name: Identifier. Output only for the CreateBackupSchedule operation. +// Required for the UpdateBackupSchedule operation. A globally unique +// identifier for the backup schedule which cannot be changed. Values are of +// the form `projects//instances//databases//backupSchedules/a-z*[a-z0-9]` +// The final segment of the name must be between 2 and 60 characters in +// length. +func (r *ProjectsInstancesDatabasesBackupSchedulesService) Patch(nameid string, backupschedule *BackupSchedule) *ProjectsInstancesDatabasesBackupSchedulesPatchCall { + c := &ProjectsInstancesDatabasesBackupSchedulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.nameid = nameid + c.backupschedule = backupschedule + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. A mask +// specifying which fields in the BackupSchedule resource should be updated. +// This mask is relative to the BackupSchedule resource, not to the request +// message. The field mask must always be specified; this prevents any future +// fields from being erased accidentally. +func (c *ProjectsInstancesDatabasesBackupSchedulesPatchCall) UpdateMask(updateMask string) *ProjectsInstancesDatabasesBackupSchedulesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsInstancesDatabasesBackupSchedulesPatchCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesBackupSchedulesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsInstancesDatabasesBackupSchedulesPatchCall) Context(ctx context.Context) *ProjectsInstancesDatabasesBackupSchedulesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsInstancesDatabasesBackupSchedulesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesBackupSchedulesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backupschedule) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.nameid, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.backupSchedules.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *BackupSchedule.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesBackupSchedulesPatchCall) Do(opts ...googleapi.CallOption) (*BackupSchedule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BackupSchedule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsInstancesDatabasesBackupSchedulesSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on a database or backup +// resource. Replaces any existing policy. Authorization requires +// `spanner.databases.setIamPolicy` permission on resource. For backups, +// authorization requires `spanner.backups.setIamPolicy` permission on +// resource. +// +// - resource: REQUIRED: The Cloud Spanner resource for which the policy is +// being set. The format is `projects//instances/` for instance resources and +// `projects//instances//databases/` for databases resources. +func (r *ProjectsInstancesDatabasesBackupSchedulesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsInstancesDatabasesBackupSchedulesSetIamPolicyCall { + c := &ProjectsInstancesDatabasesBackupSchedulesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsInstancesDatabasesBackupSchedulesSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesBackupSchedulesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsInstancesDatabasesBackupSchedulesSetIamPolicyCall) Context(ctx context.Context) *ProjectsInstancesDatabasesBackupSchedulesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsInstancesDatabasesBackupSchedulesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesBackupSchedulesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.backupSchedules.setIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesBackupSchedulesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsInstancesDatabasesBackupSchedulesTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that the caller has on the specified +// database or backup resource. Attempting this RPC on a non-existent Cloud +// Spanner database will result in a NOT_FOUND error if the user has +// `spanner.databases.list` permission on the containing Cloud Spanner +// instance. Otherwise returns an empty set of permissions. Calling this method +// on a backup that does not exist will result in a NOT_FOUND error if the user +// has `spanner.backups.list` permission on the containing instance. +// +// - resource: REQUIRED: The Cloud Spanner resource for which permissions are +// being tested. The format is `projects//instances/` for instance resources +// and `projects//instances//databases/` for database resources. +func (r *ProjectsInstancesDatabasesBackupSchedulesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsInstancesDatabasesBackupSchedulesTestIamPermissionsCall { + c := &ProjectsInstancesDatabasesBackupSchedulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsInstancesDatabasesBackupSchedulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesBackupSchedulesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsInstancesDatabasesBackupSchedulesTestIamPermissionsCall) Context(ctx context.Context) *ProjectsInstancesDatabasesBackupSchedulesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsInstancesDatabasesBackupSchedulesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesBackupSchedulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.backupSchedules.testIamPermissions" call. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesBackupSchedulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type ProjectsInstancesDatabasesDatabaseRolesListCall struct { s *Service parent string diff --git a/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json b/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json index 72c96abfa96..61ed8ad2d21 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json @@ -608,6 +608,73 @@ }, "instances": { "methods": { + "ListServerCertificates": { + "description": "Lists all versions of server certificates and certificate authorities (CAs) for the specified instance. There can be up to three sets of certs listed: the certificate that is currently in use, a future that has been added but not yet used to sign a certificate, and a certificate that has been rotated out.", + "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/listServerCertificates", + "httpMethod": "GET", + "id": "sql.instances.ListServerCertificates", + "parameterOrder": [ + "project", + "instance" + ], + "parameters": { + "instance": { + "description": "Required. Cloud SQL instance ID. This does not include the project ID.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Required. Project ID of the project that contains the instance.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "sql/v1beta4/projects/{project}/instances/{instance}/listServerCertificates", + "response": { + "$ref": "InstancesListServerCertificatesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, + "RotateServerCertificate": { + "description": "Rotates the server certificate version to one previously added with the addServerCertificate method. For instances not using Certificate Authority Service (CAS) server CA, use RotateServerCa instead.", + "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/rotateServerCertificate", + "httpMethod": "POST", + "id": "sql.instances.RotateServerCertificate", + "parameterOrder": [ + "project", + "instance" + ], + "parameters": { + "instance": { + "description": "Required. Cloud SQL instance ID. This does not include the project ID.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Required. Project ID of the project that contains the instance.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "sql/v1beta4/projects/{project}/instances/{instance}/rotateServerCertificate", + "request": { + "$ref": "InstancesRotateServerCertificateRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, "acquireSsrsLease": { "description": "Acquire a lease for the setup of SQL Server Reporting Services (SSRS).", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/acquireSsrsLease", @@ -644,7 +711,7 @@ ] }, "addServerCa": { - "description": "Add a new trusted Certificate Authority (CA) version for the specified instance. Required to prepare for a certificate rotation. If a CA version was previously added but never used in a certificate rotation, this operation replaces that version. There cannot be more than one CA version waiting to be rotated in.", + "description": "Add a new trusted Certificate Authority (CA) version for the specified instance. Required to prepare for a certificate rotation. If a CA version was previously added but never used in a certificate rotation, this operation replaces that version. There cannot be more than one CA version waiting to be rotated in. For instances that have enabled Certificate Authority Service (CAS) based server CA, use AddServerCertificate to add a new server certificate.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/addServerCa", "httpMethod": "POST", "id": "sql.instances.addServerCa", @@ -675,6 +742,38 @@ "https://www.googleapis.com/auth/sqlservice.admin" ] }, + "addServerCertificate": { + "description": "Add a new trusted server certificate version for the specified instance using Certificate Authority Service (CAS) server CA. Required to prepare for a certificate rotation. If a server certificate version was previously added but never used in a certificate rotation, this operation replaces that version. There cannot be more than one certificate version waiting to be rotated in. For instances not using CAS server CA, use AddServerCa instead.", + "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/addServerCertificate", + "httpMethod": "POST", + "id": "sql.instances.addServerCertificate", + "parameterOrder": [ + "project", + "instance" + ], + "parameters": { + "instance": { + "description": "Required. Cloud SQL instance ID. This does not include the project ID.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Required. Project ID of the project that contains the instance.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "sql/v1beta4/projects/{project}/instances/{instance}/addServerCertificate", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, "clone": { "description": "Creates a Cloud SQL instance as a clone of the source instance. Using this operation might cause your instance to restart.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/clone", @@ -720,6 +819,18 @@ "instance" ], "parameters": { + "finalBackupExpiryTime": { + "description": "Optional. Final Backup expiration time. Timestamp in UTC of when this resource is considered expired.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "finalBackupTtlDays": { + "description": "Optional. Retention period of the final backup.", + "format": "int64", + "location": "query", + "type": "string" + }, "instance": { "description": "Cloud SQL instance ID. This does not include the project ID.", "location": "path", @@ -1287,7 +1398,7 @@ ] }, "rotateServerCa": { - "description": "Rotates the server certificate to one signed by the Certificate Authority (CA) version previously added with the addServerCA method.", + "description": "Rotates the server certificate to one signed by the Certificate Authority (CA) version previously added with the addServerCA method. For instances that have enabled Certificate Authority Service (CAS) based server CA, use RotateServerCertificate to rotate the server certificate.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/rotateServerCa", "httpMethod": "POST", "id": "sql.instances.rotateServerCa", @@ -2267,7 +2378,7 @@ } } }, - "revision": "20240529", + "revision": "20241004", "rootUrl": "https://sqladmin.googleapis.com/", "schemas": { "AclEntry": { @@ -2524,8 +2635,8 @@ ], "enumDescriptions": [ "This is an unknown BackupKind.", - "The snapshot based backups", - "Physical backups" + "Snapshot-based backups.", + "Physical backups." ], "type": "string" }, @@ -2572,6 +2683,12 @@ "description": "Location of the backups.", "type": "string" }, + "maxChargeableBytes": { + "description": "Output only. The maximum chargeable bytes for the backup.", + "format": "int64", + "readOnly": true, + "type": "string" + }, "selfLink": { "description": "The URI of this resource.", "type": "string" @@ -2771,6 +2888,7 @@ "POSTGRES_14", "POSTGRES_15", "POSTGRES_16", + "POSTGRES_17", "MYSQL_8_0", "MYSQL_8_0_18", "MYSQL_8_0_26", @@ -2789,7 +2907,6 @@ "MYSQL_8_0_39", "MYSQL_8_0_40", "MYSQL_8_4", - "MYSQL_8_4_0", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS", @@ -2822,8 +2939,8 @@ false, false, false, - true, false, + true, false, false, false, @@ -2863,6 +2980,7 @@ "The database version is PostgreSQL 14.", "The database version is PostgreSQL 15.", "The database version is PostgreSQL 16.", + "The database version is PostgreSQL 17.", "The database version is MySQL 8.", "The database major version is MySQL 8.0 and the minor version is 18.", "The database major version is MySQL 8.0 and the minor version is 26.", @@ -2881,7 +2999,6 @@ "The database major version is MySQL 8.0 and the minor version is 39.", "The database major version is MySQL 8.0 and the minor version is 40.", "The database version is MySQL 8.4.", - "The database version is MySQL 8.4 and the patch version is 0.", "The database version is SQL Server 2019 Standard.", "The database version is SQL Server 2019 Enterprise.", "The database version is SQL Server 2019 Express.", @@ -2919,6 +3036,20 @@ "serverCaCert": { "$ref": "SslCert", "description": "SSL configuration." + }, + "serverCaMode": { + "description": "Specify what type of CA is used for the server certificate.", + "enum": [ + "CA_MODE_UNSPECIFIED", + "GOOGLE_MANAGED_INTERNAL_CA", + "GOOGLE_MANAGED_CAS_CA" + ], + "enumDescriptions": [ + "CA mode is unspecified. It is effectively the same as `GOOGLE_MANAGED_INTERNAL_CA`.", + "Google-managed self-signed internal CA.", + "Google-managed regional CA part of root CA hierarchy hosted on Google Cloud's Certificate Authority Service (CAS)." + ], + "type": "string" } }, "type": "object" @@ -3066,6 +3197,7 @@ "POSTGRES_14", "POSTGRES_15", "POSTGRES_16", + "POSTGRES_17", "MYSQL_8_0", "MYSQL_8_0_18", "MYSQL_8_0_26", @@ -3084,7 +3216,6 @@ "MYSQL_8_0_39", "MYSQL_8_0_40", "MYSQL_8_4", - "MYSQL_8_4_0", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS", @@ -3117,8 +3248,8 @@ false, false, false, - true, false, + true, false, false, false, @@ -3158,6 +3289,7 @@ "The database version is PostgreSQL 14.", "The database version is PostgreSQL 15.", "The database version is PostgreSQL 16.", + "The database version is PostgreSQL 17.", "The database version is MySQL 8.", "The database major version is MySQL 8.0 and the minor version is 18.", "The database major version is MySQL 8.0 and the minor version is 26.", @@ -3176,7 +3308,6 @@ "The database major version is MySQL 8.0 and the minor version is 39.", "The database major version is MySQL 8.0 and the minor version is 40.", "The database version is MySQL 8.4.", - "The database version is MySQL 8.4 and the patch version is 0.", "The database version is SQL Server 2019 Standard.", "The database version is SQL Server 2019 Enterprise.", "The database version is SQL Server 2019 Express.", @@ -3323,6 +3454,11 @@ "description": "Initial root password. Use only on creation. You must set root passwords before you can connect to PostgreSQL instances.", "type": "string" }, + "satisfiesPzi": { + "description": "Output only. This status indicates whether the instance satisfies PZI. The status is reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "This status indicates whether the instance satisfies PZS. The status is reserved for future use.", "type": "boolean" @@ -3420,6 +3556,10 @@ }, "type": "array" }, + "switchTransactionLogsToCloudStorageEnabled": { + "description": "Input only. Whether Cloud SQL is enabled to switch storing point-in-time recovery log files from a data disk to Cloud Storage.", + "type": "boolean" + }, "upgradableDatabaseVersions": { "description": "Output only. All database versions that are available for upgrade.", "items": { @@ -3629,6 +3769,16 @@ "description": "Whether or not the backup can be used as a differential base copy_only backup can not be served as differential base", "type": "boolean" }, + "exportLogEndTime": { + "description": "Optional. The end timestamp when transaction log will be included in the export operation. [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`) in UTC. When omitted, all available logs until current time will be included. Only applied to Cloud SQL for SQL Server.", + "format": "google-datetime", + "type": "string" + }, + "exportLogStartTime": { + "description": "Optional. The begin timestamp when transaction log will be included in the export operation. [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`) in UTC. When omitted, all available logs from the beginning of retention period will be included. Only applied to Cloud SQL for SQL Server.", + "format": "google-datetime", + "type": "string" + }, "stripeCount": { "description": "Option for specifying how many stripes to use for the export. If blank, and the value of the striped field is true, the number of stripes is automatically chosen.", "format": "int32", @@ -3716,6 +3866,20 @@ "description": "Optional. Whether or not the export should be parallel.", "type": "boolean" }, + "postgresExportOptions": { + "description": "Options for exporting from a Cloud SQL for PostgreSQL instance.", + "properties": { + "clean": { + "description": "Optional. Use this option to include DROP SQL statements. These statements are used to delete database objects before running the import operation.", + "type": "boolean" + }, + "ifExists": { + "description": "Optional. Option to include an IF EXISTS SQL statement with each DROP statement produced by clean.", + "type": "boolean" + } + }, + "type": "object" + }, "schemaOnly": { "description": "Export only schemas.", "type": "boolean" @@ -3798,6 +3962,7 @@ "POSTGRES_14", "POSTGRES_15", "POSTGRES_16", + "POSTGRES_17", "MYSQL_8_0", "MYSQL_8_0_18", "MYSQL_8_0_26", @@ -3816,7 +3981,6 @@ "MYSQL_8_0_39", "MYSQL_8_0_40", "MYSQL_8_4", - "MYSQL_8_4_0", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS", @@ -3849,8 +4013,8 @@ false, false, false, - true, false, + true, false, false, false, @@ -3890,6 +4054,7 @@ "The database version is PostgreSQL 14.", "The database version is PostgreSQL 15.", "The database version is PostgreSQL 16.", + "The database version is PostgreSQL 17.", "The database version is MySQL 8.", "The database major version is MySQL 8.0 and the minor version is 18.", "The database major version is MySQL 8.0 and the minor version is 26.", @@ -3908,7 +4073,6 @@ "The database major version is MySQL 8.0 and the minor version is 39.", "The database major version is MySQL 8.0 and the minor version is 40.", "The database version is MySQL 8.4.", - "The database version is MySQL 8.4 and the patch version is 0.", "The database version is SQL Server 2019 Standard.", "The database version is SQL Server 2019 Enterprise.", "The database version is SQL Server 2019 Express.", @@ -4198,6 +4362,20 @@ "description": "Optional. Whether or not the import should be parallel.", "type": "boolean" }, + "postgresImportOptions": { + "description": "Optional. Options for importing from a Cloud SQL for PostgreSQL instance.", + "properties": { + "clean": { + "description": "Optional. The --clean flag for the pg_restore utility. This flag applies only if you enabled Cloud SQL to import files in parallel.", + "type": "boolean" + }, + "ifExists": { + "description": "Optional. The --if-exists flag for the pg_restore utility. This flag applies only if you enabled Cloud SQL to import files in parallel.", + "type": "boolean" + } + }, + "type": "object" + }, "threads": { "description": "Optional. The number of threads to use for parallel import.", "format": "int32", @@ -4388,6 +4566,35 @@ }, "type": "object" }, + "InstancesListServerCertificatesResponse": { + "description": "Instances ListServerCertificatess response.", + "id": "InstancesListServerCertificatesResponse", + "properties": { + "activeVersion": { + "description": "The `sha1_fingerprint` of the active certificate from `server_certs`.", + "type": "string" + }, + "caCerts": { + "description": "List of server CA certificates for the instance.", + "items": { + "$ref": "SslCert" + }, + "type": "array" + }, + "kind": { + "description": "This is always `sql#instancesListServerCertificates`.", + "type": "string" + }, + "serverCerts": { + "description": "List of server certificates for the instance, signed by the corresponding CA from the `ca_certs` list.", + "items": { + "$ref": "SslCert" + }, + "type": "array" + } + }, + "type": "object" + }, "InstancesReencryptRequest": { "description": "Database Instance reencrypt request.", "id": "InstancesReencryptRequest", @@ -4421,6 +4628,17 @@ }, "type": "object" }, + "InstancesRotateServerCertificateRequest": { + "description": "Rotate Server Certificate request.", + "id": "InstancesRotateServerCertificateRequest", + "properties": { + "rotateServerCertificateContext": { + "$ref": "RotateServerCertificateContext", + "description": "Optional. Contains details about the rotate server CA operation." + } + }, + "type": "object" + }, "InstancesTruncateLogRequest": { "description": "Instance truncate log request.", "id": "InstancesTruncateLogRequest", @@ -4467,6 +4685,20 @@ "description": "Use `ssl_mode` instead. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag.", "type": "boolean" }, + "serverCaMode": { + "description": "Specify what type of CA is used for the server certificate.", + "enum": [ + "CA_MODE_UNSPECIFIED", + "GOOGLE_MANAGED_INTERNAL_CA", + "GOOGLE_MANAGED_CAS_CA" + ], + "enumDescriptions": [ + "CA mode is unspecified. It is effectively the same as `GOOGLE_MANAGED_INTERNAL_CA`.", + "Google-managed self-signed internal CA.", + "Google-managed regional CA part of root CA hierarchy hosted on Google Cloud's Certificate Authority Service (CAS)." + ], + "type": "string" + }, "sslMode": { "description": "Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` has priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, `ssl_mode=ENCRYPTED_ONLY` means accept only SSL connections, while `require_ssl=false` means accept both non-SSL and SSL connections. In this case, MySQL and PostgreSQL databases respect `ssl_mode` and accepts only SSL connections.", "enum": [ @@ -4549,12 +4781,12 @@ "id": "MaintenanceWindow", "properties": { "day": { - "description": "day of week (1-7), starting on Monday.", + "description": "Day of week - `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, or `SUNDAY`. Specify in the UTC time zone. Returned in output as an integer, 1 to 7, where `1` equals Monday.", "format": "int32", "type": "integer" }, "hour": { - "description": "hour of day - 0 to 23.", + "description": "Hour of day - 0 to 23. Specify in the UTC time zone.", "format": "int32", "type": "integer" }, @@ -4563,7 +4795,7 @@ "type": "string" }, "updateTrack": { - "description": "Maintenance timing setting: `canary` (Earlier) or `stable` (Later). [Learn more](https://cloud.google.com/sql/docs/mysql/instance-settings#maintenance-timing-2ndgen).", + "description": "Maintenance timing settings: `canary`, `stable`, or `week5`. For more information, see [About maintenance on Cloud SQL instances](https://cloud.google.com/sql/docs/mysql/maintenance).", "enum": [ "SQL_UPDATE_TRACK_UNSPECIFIED", "canary", @@ -4572,9 +4804,9 @@ ], "enumDescriptions": [ "This is an unknown maintenance timing preference.", - "For instance update that requires a restart, this update track indicates your instance prefer to restart for new version early in maintenance window.", - "For instance update that requires a restart, this update track indicates your instance prefer to let Cloud SQL choose the timing of restart (within its Maintenance window, if applicable).", - "For instance update that requires a restart, this update track indicates your instance prefer to let Cloud SQL choose the timing of restart (within its Maintenance window, if applicable) to be at least 5 weeks after the notification." + "For an instance with a scheduled maintenance window, this maintenance timing indicates that the maintenance update is scheduled 7 to 14 days after the notification is sent out. Also referred to as `Week 1` (Console) and `preview` (gcloud CLI).", + "For an instance with a scheduled maintenance window, this maintenance timing indicates that the maintenance update is scheduled 15 to 21 days after the notification is sent out. Also referred to as `Week 2` (Console) and `production` (gcloud CLI).", + "For instance with a scheduled maintenance window, this maintenance timing indicates that the maintenance update is scheduled 35 to 42 days after the notification is sent out." ], "type": "string" } @@ -4784,7 +5016,8 @@ "RECONFIGURE_OLD_PRIMARY", "CLUSTER_MAINTENANCE", "SELF_SERVICE_MAINTENANCE", - "SWITCHOVER_TO_REPLICA" + "SWITCHOVER_TO_REPLICA", + "MAJOR_VERSION_UPGRADE" ], "enumDeprecated": [ false, @@ -4829,7 +5062,8 @@ false, false, false, - false, + true, + true, false, false ], @@ -4878,7 +5112,8 @@ "Reconfigures old primary after a promote replica operation. Effect of a promote operation to the old primary is executed in this operation, asynchronously from the promote replica operation executed to the replica.", "Indicates that the instance, its read replicas, and its cascading replicas are in maintenance. Maintenance typically gets initiated on groups of replicas first, followed by the primary instance. For each instance, maintenance typically causes the instance to be unavailable for 1-3 minutes.", "Indicates that the instance (and any of its replicas) are currently in maintenance. This is initiated as a self-service request by using SSM. Maintenance typically causes the instance to be unavailable for 1-3 minutes.", - "Switches a primary instance to a replica. This operation runs as part of a switchover operation to the original primary instance." + "Switches a primary instance to a replica. This operation runs as part of a switchover operation to the original primary instance.", + "Updates the major version of a Cloud SQL instance." ], "type": "string" }, @@ -5103,6 +5338,33 @@ }, "type": "object" }, + "PscAutoConnectionConfig": { + "description": "Settings for an automatically-setup Private Service Connect consumer endpoint that is used to connect to a Cloud SQL instance.", + "id": "PscAutoConnectionConfig", + "properties": { + "consumerNetwork": { + "description": "The consumer network of this consumer endpoint. This must be a resource path that includes both the host project and the network name. For example, `projects/project1/global/networks/network1`. The consumer host project of this network might be different from the consumer service project.", + "type": "string" + }, + "consumerNetworkStatus": { + "description": "The connection policy status of the consumer network.", + "type": "string" + }, + "consumerProject": { + "description": "This is the project ID of consumer service project of this consumer endpoint. Optional. This is only applicable if consumer_network is a shared vpc network.", + "type": "string" + }, + "ipAddress": { + "description": "The IP address of the consumer endpoint.", + "type": "string" + }, + "status": { + "description": "The connection status of the consumer endpoint.", + "type": "string" + } + }, + "type": "object" + }, "PscConfig": { "description": "PSC settings for a Cloud SQL instance.", "id": "PscConfig", @@ -5114,6 +5376,13 @@ }, "type": "array" }, + "pscAutoConnections": { + "description": "Optional. The list of settings for requested Private Service Connect consumer endpoints that can be used to connect to this Cloud SQL instance.", + "items": { + "$ref": "PscAutoConnectionConfig" + }, + "type": "array" + }, "pscEnabled": { "description": "Whether PSC connectivity is enabled for this instance.", "type": "boolean" @@ -5231,6 +5500,21 @@ }, "type": "object" }, + "RotateServerCertificateContext": { + "description": "Instance rotate server certificate context.", + "id": "RotateServerCertificateContext", + "properties": { + "kind": { + "description": "Optional. This is always `sql#rotateServerCertificateContext`.", + "type": "string" + }, + "nextVersion": { + "description": "Optional. The fingerprint of the next version to be rotated to. If left unspecified, will be rotated to the most recently added server certificate version.", + "type": "string" + } + }, + "type": "object" + }, "Settings": { "description": "Database instance settings.", "id": "Settings", @@ -5381,6 +5665,10 @@ ], "type": "string" }, + "enableDataplexIntegration": { + "description": "Optional. By default, Cloud SQL instances have schema extraction disabled for Dataplex. When this parameter is set to true, schema extraction for Dataplex on Cloud SQL instances is activated.", + "type": "boolean" + }, "enableGoogleMlIntegration": { "description": "Optional. When this parameter is set to true, Cloud SQL instances can connect to Vertex AI to pass requests for real-time predictions and insights to the AI. The default value is false. This applies only to Cloud SQL for PostgreSQL instances.", "type": "boolean" @@ -5551,7 +5839,12 @@ "INSUFFICIENT_MACHINE_TIER", "UNSUPPORTED_EXTENSIONS_NOT_MIGRATED", "EXTENSIONS_NOT_MIGRATED", - "PG_CRON_FLAG_ENABLED_IN_REPLICA" + "PG_CRON_FLAG_ENABLED_IN_REPLICA", + "EXTENSIONS_NOT_ENABLED_IN_REPLICA", + "UNSUPPORTED_COLUMNS", + "USERS_NOT_CREATED_IN_REPLICA", + "UNSUPPORTED_SYSTEM_OBJECTS", + "UNSUPPORTED_TABLES_WITH_REPLICA_IDENTITY" ], "enumDescriptions": [ "", @@ -5574,7 +5867,7 @@ "The primary instance database parameter setup doesn't allow EM sync.", "The gtid_mode is not supported, applicable for MySQL.", "SQL Server Agent is not running.", - "The table definition is not support due to missing primary key or replica identity, applicable for postgres.", + "The table definition is not support due to missing primary key or replica identity, applicable for postgres. Note that this is a warning and won't block the migration.", "The customer has a definer that will break EM setup.", "SQL Server @@SERVERNAME does not match actual host name.", "The primary instance has been setup and will fail the setup.", @@ -5601,7 +5894,12 @@ "The data size of the source instance is greater than 1 TB, the number of cores of the replica instance is less than 8, and the memory of the replica is less than 32 GB.", "The warning message indicates the unsupported extensions will not be migrated to the destination.", "The warning message indicates the pg_cron extension and settings will not be migrated to the destination.", - "The error message indicates that pg_cron flags are enabled on the destination which is not supported during the migration." + "The error message indicates that pg_cron flags are enabled on the destination which is not supported during the migration.", + "This error message indicates that the specified extensions are not enabled on destination instance. For example, before you can migrate data to the destination instance, you must enable the PGAudit extension on the instance.", + "The source database has generated columns that can't be migrated. Please change them to regular columns before migration.", + "The source database has users that aren't created in the replica. First, create all users, which are in the pg_user_mappings table of the source database, in the destination instance. Then, perform the migration.", + "The selected objects include system objects that aren't supported for migration.", + "The source database has tables with the FULL or NOTHING replica identity. Before starting your migration, either remove the identity or change it to DEFAULT. Note that this is an error and will block the migration." ], "type": "string" } @@ -6214,9 +6512,9 @@ "The database's built-in user type.", "Cloud IAM user.", "Cloud IAM service account.", - "Cloud IAM group non-login user.", - "Cloud IAM group login user.", - "Cloud IAM group service account." + "Cloud IAM group. Not used for login.", + "Read-only. Login for a user that belongs to the Cloud IAM group.", + "Read-only. Login for a service account that belongs to the Cloud IAM group." ], "type": "string" } diff --git a/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go b/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go index 901f43fbe11..34a3bec27d7 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go @@ -317,9 +317,9 @@ type AclEntry struct { NullFields []string `json:"-"` } -func (s *AclEntry) MarshalJSON() ([]byte, error) { +func (s AclEntry) MarshalJSON() ([]byte, error) { type NoMethod AclEntry - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AcquireSsrsLeaseContext: Acquire SSRS lease context. @@ -347,9 +347,9 @@ type AcquireSsrsLeaseContext struct { NullFields []string `json:"-"` } -func (s *AcquireSsrsLeaseContext) MarshalJSON() ([]byte, error) { +func (s AcquireSsrsLeaseContext) MarshalJSON() ([]byte, error) { type NoMethod AcquireSsrsLeaseContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AdvancedMachineFeatures: Specifies options for controlling advanced machine @@ -370,9 +370,9 @@ type AdvancedMachineFeatures struct { NullFields []string `json:"-"` } -func (s *AdvancedMachineFeatures) MarshalJSON() ([]byte, error) { +func (s AdvancedMachineFeatures) MarshalJSON() ([]byte, error) { type NoMethod AdvancedMachineFeatures - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ApiWarning: An Admin API warning message. @@ -409,9 +409,9 @@ type ApiWarning struct { NullFields []string `json:"-"` } -func (s *ApiWarning) MarshalJSON() ([]byte, error) { +func (s ApiWarning) MarshalJSON() ([]byte, error) { type NoMethod ApiWarning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AvailableDatabaseVersion: An available database version. It can be a major @@ -437,9 +437,9 @@ type AvailableDatabaseVersion struct { NullFields []string `json:"-"` } -func (s *AvailableDatabaseVersion) MarshalJSON() ([]byte, error) { +func (s AvailableDatabaseVersion) MarshalJSON() ([]byte, error) { type NoMethod AvailableDatabaseVersion - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackupConfiguration: Database instance backup configuration. @@ -494,9 +494,9 @@ type BackupConfiguration struct { NullFields []string `json:"-"` } -func (s *BackupConfiguration) MarshalJSON() ([]byte, error) { +func (s BackupConfiguration) MarshalJSON() ([]byte, error) { type NoMethod BackupConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackupContext: Backup context. @@ -518,9 +518,9 @@ type BackupContext struct { NullFields []string `json:"-"` } -func (s *BackupContext) MarshalJSON() ([]byte, error) { +func (s BackupContext) MarshalJSON() ([]byte, error) { type NoMethod BackupContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackupReencryptionConfig: Backup Reencryption Config @@ -548,9 +548,9 @@ type BackupReencryptionConfig struct { NullFields []string `json:"-"` } -func (s *BackupReencryptionConfig) MarshalJSON() ([]byte, error) { +func (s BackupReencryptionConfig) MarshalJSON() ([]byte, error) { type NoMethod BackupReencryptionConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackupRetentionSettings: We currently only support backup retention by @@ -581,9 +581,9 @@ type BackupRetentionSettings struct { NullFields []string `json:"-"` } -func (s *BackupRetentionSettings) MarshalJSON() ([]byte, error) { +func (s BackupRetentionSettings) MarshalJSON() ([]byte, error) { type NoMethod BackupRetentionSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackupRun: A BackupRun resource. @@ -592,8 +592,8 @@ type BackupRun struct { // // Possible values: // "SQL_BACKUP_KIND_UNSPECIFIED" - This is an unknown BackupKind. - // "SNAPSHOT" - The snapshot based backups - // "PHYSICAL" - Physical backups + // "SNAPSHOT" - Snapshot-based backups. + // "PHYSICAL" - Physical backups. BackupKind string `json:"backupKind,omitempty"` // Description: The description of this run, only applicable to on-demand // backups. @@ -622,6 +622,9 @@ type BackupRun struct { Kind string `json:"kind,omitempty"` // Location: Location of the backups. Location string `json:"location,omitempty"` + // MaxChargeableBytes: Output only. The maximum chargeable bytes for the + // backup. + MaxChargeableBytes int64 `json:"maxChargeableBytes,omitempty,string"` // SelfLink: The URI of this resource. SelfLink string `json:"selfLink,omitempty"` // StartTime: The time the backup operation actually started in UTC timezone in @@ -677,9 +680,9 @@ type BackupRun struct { NullFields []string `json:"-"` } -func (s *BackupRun) MarshalJSON() ([]byte, error) { +func (s BackupRun) MarshalJSON() ([]byte, error) { type NoMethod BackupRun - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BackupRunsListResponse: Backup run list results. @@ -709,9 +712,9 @@ type BackupRunsListResponse struct { NullFields []string `json:"-"` } -func (s *BackupRunsListResponse) MarshalJSON() ([]byte, error) { +func (s BackupRunsListResponse) MarshalJSON() ([]byte, error) { type NoMethod BackupRunsListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BinLogCoordinates: Binary log coordinates. @@ -735,9 +738,9 @@ type BinLogCoordinates struct { NullFields []string `json:"-"` } -func (s *BinLogCoordinates) MarshalJSON() ([]byte, error) { +func (s BinLogCoordinates) MarshalJSON() ([]byte, error) { type NoMethod BinLogCoordinates - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CloneContext: Database instance clone context. @@ -788,9 +791,9 @@ type CloneContext struct { NullFields []string `json:"-"` } -func (s *CloneContext) MarshalJSON() ([]byte, error) { +func (s CloneContext) MarshalJSON() ([]byte, error) { type NoMethod CloneContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ConnectSettings: Connect settings retrieval response. @@ -838,6 +841,7 @@ type ConnectSettings struct { // "POSTGRES_14" - The database version is PostgreSQL 14. // "POSTGRES_15" - The database version is PostgreSQL 15. // "POSTGRES_16" - The database version is PostgreSQL 16. + // "POSTGRES_17" - The database version is PostgreSQL 17. // "MYSQL_8_0" - The database version is MySQL 8. // "MYSQL_8_0_18" - The database major version is MySQL 8.0 and the minor // version is 18. @@ -872,8 +876,6 @@ type ConnectSettings struct { // "MYSQL_8_0_40" - The database major version is MySQL 8.0 and the minor // version is 40. // "MYSQL_8_4" - The database version is MySQL 8.4. - // "MYSQL_8_4_0" - The database version is MySQL 8.4 and the patch version is - // 0. // "SQLSERVER_2019_STANDARD" - The database version is SQL Server 2019 // Standard. // "SQLSERVER_2019_ENTERPRISE" - The database version is SQL Server 2019 @@ -902,6 +904,15 @@ type ConnectSettings struct { Region string `json:"region,omitempty"` // ServerCaCert: SSL configuration. ServerCaCert *SslCert `json:"serverCaCert,omitempty"` + // ServerCaMode: Specify what type of CA is used for the server certificate. + // + // Possible values: + // "CA_MODE_UNSPECIFIED" - CA mode is unspecified. It is effectively the same + // as `GOOGLE_MANAGED_INTERNAL_CA`. + // "GOOGLE_MANAGED_INTERNAL_CA" - Google-managed self-signed internal CA. + // "GOOGLE_MANAGED_CAS_CA" - Google-managed regional CA part of root CA + // hierarchy hosted on Google Cloud's Certificate Authority Service (CAS). + ServerCaMode string `json:"serverCaMode,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` @@ -918,9 +929,9 @@ type ConnectSettings struct { NullFields []string `json:"-"` } -func (s *ConnectSettings) MarshalJSON() ([]byte, error) { +func (s ConnectSettings) MarshalJSON() ([]byte, error) { type NoMethod ConnectSettings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DataCacheConfig: Data cache configurations. @@ -940,9 +951,9 @@ type DataCacheConfig struct { NullFields []string `json:"-"` } -func (s *DataCacheConfig) MarshalJSON() ([]byte, error) { +func (s DataCacheConfig) MarshalJSON() ([]byte, error) { type NoMethod DataCacheConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Database: Represents a SQL database on the Cloud SQL instance. @@ -984,9 +995,9 @@ type Database struct { NullFields []string `json:"-"` } -func (s *Database) MarshalJSON() ([]byte, error) { +func (s Database) MarshalJSON() ([]byte, error) { type NoMethod Database - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatabaseFlags: Database flags for Cloud SQL instances. @@ -1014,9 +1025,9 @@ type DatabaseFlags struct { NullFields []string `json:"-"` } -func (s *DatabaseFlags) MarshalJSON() ([]byte, error) { +func (s DatabaseFlags) MarshalJSON() ([]byte, error) { type NoMethod DatabaseFlags - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatabaseInstance: A Cloud SQL instance resource. @@ -1077,6 +1088,7 @@ type DatabaseInstance struct { // "POSTGRES_14" - The database version is PostgreSQL 14. // "POSTGRES_15" - The database version is PostgreSQL 15. // "POSTGRES_16" - The database version is PostgreSQL 16. + // "POSTGRES_17" - The database version is PostgreSQL 17. // "MYSQL_8_0" - The database version is MySQL 8. // "MYSQL_8_0_18" - The database major version is MySQL 8.0 and the minor // version is 18. @@ -1111,8 +1123,6 @@ type DatabaseInstance struct { // "MYSQL_8_0_40" - The database major version is MySQL 8.0 and the minor // version is 40. // "MYSQL_8_4" - The database version is MySQL 8.4. - // "MYSQL_8_4_0" - The database version is MySQL 8.4 and the patch version is - // 0. // "SQLSERVER_2019_STANDARD" - The database version is SQL Server 2019 // Standard. // "SQLSERVER_2019_ENTERPRISE" - The database version is SQL Server 2019 @@ -1207,6 +1217,9 @@ type DatabaseInstance struct { // RootPassword: Initial root password. Use only on creation. You must set root // passwords before you can connect to PostgreSQL instances. RootPassword string `json:"rootPassword,omitempty"` + // SatisfiesPzi: Output only. This status indicates whether the instance + // satisfies PZI. The status is reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` // SatisfiesPzs: This status indicates whether the instance satisfies PZS. The // status is reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -1265,6 +1278,10 @@ type DatabaseInstance struct { // "KMS_KEY_ISSUE" - The KMS key used by the instance is either revoked or // denied access to SuspensionReason []string `json:"suspensionReason,omitempty"` + // SwitchTransactionLogsToCloudStorageEnabled: Input only. Whether Cloud SQL is + // enabled to switch storing point-in-time recovery log files from a data disk + // to Cloud Storage. + SwitchTransactionLogsToCloudStorageEnabled bool `json:"switchTransactionLogsToCloudStorageEnabled,omitempty"` // UpgradableDatabaseVersions: Output only. All database versions that are // available for upgrade. UpgradableDatabaseVersions []*AvailableDatabaseVersion `json:"upgradableDatabaseVersions,omitempty"` @@ -1287,9 +1304,9 @@ type DatabaseInstance struct { NullFields []string `json:"-"` } -func (s *DatabaseInstance) MarshalJSON() ([]byte, error) { +func (s DatabaseInstance) MarshalJSON() ([]byte, error) { type NoMethod DatabaseInstance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatabaseInstanceFailoverReplica: The name and status of the failover @@ -1316,9 +1333,9 @@ type DatabaseInstanceFailoverReplica struct { NullFields []string `json:"-"` } -func (s *DatabaseInstanceFailoverReplica) MarshalJSON() ([]byte, error) { +func (s DatabaseInstanceFailoverReplica) MarshalJSON() ([]byte, error) { type NoMethod DatabaseInstanceFailoverReplica - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DatabasesListResponse: Database list response. @@ -1343,9 +1360,9 @@ type DatabasesListResponse struct { NullFields []string `json:"-"` } -func (s *DatabasesListResponse) MarshalJSON() ([]byte, error) { +func (s DatabasesListResponse) MarshalJSON() ([]byte, error) { type NoMethod DatabasesListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DemoteContext: This context is used to demote an existing standalone @@ -1369,9 +1386,9 @@ type DemoteContext struct { NullFields []string `json:"-"` } -func (s *DemoteContext) MarshalJSON() ([]byte, error) { +func (s DemoteContext) MarshalJSON() ([]byte, error) { type NoMethod DemoteContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DemoteMasterConfiguration: Read-replica configuration for connecting to the @@ -1399,9 +1416,9 @@ type DemoteMasterConfiguration struct { NullFields []string `json:"-"` } -func (s *DemoteMasterConfiguration) MarshalJSON() ([]byte, error) { +func (s DemoteMasterConfiguration) MarshalJSON() ([]byte, error) { type NoMethod DemoteMasterConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DemoteMasterContext: Database instance demote primary instance context. @@ -1437,9 +1454,9 @@ type DemoteMasterContext struct { NullFields []string `json:"-"` } -func (s *DemoteMasterContext) MarshalJSON() ([]byte, error) { +func (s DemoteMasterContext) MarshalJSON() ([]byte, error) { type NoMethod DemoteMasterContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DemoteMasterMySqlReplicaConfiguration: Read-replica configuration specific @@ -1472,9 +1489,9 @@ type DemoteMasterMySqlReplicaConfiguration struct { NullFields []string `json:"-"` } -func (s *DemoteMasterMySqlReplicaConfiguration) MarshalJSON() ([]byte, error) { +func (s DemoteMasterMySqlReplicaConfiguration) MarshalJSON() ([]byte, error) { type NoMethod DemoteMasterMySqlReplicaConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DenyMaintenancePeriod: Deny Maintenance Periods. This specifies a date range @@ -1506,9 +1523,9 @@ type DenyMaintenancePeriod struct { NullFields []string `json:"-"` } -func (s *DenyMaintenancePeriod) MarshalJSON() ([]byte, error) { +func (s DenyMaintenancePeriod) MarshalJSON() ([]byte, error) { type NoMethod DenyMaintenancePeriod - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskEncryptionConfiguration: Disk encryption configuration for an instance. @@ -1530,9 +1547,9 @@ type DiskEncryptionConfiguration struct { NullFields []string `json:"-"` } -func (s *DiskEncryptionConfiguration) MarshalJSON() ([]byte, error) { +func (s DiskEncryptionConfiguration) MarshalJSON() ([]byte, error) { type NoMethod DiskEncryptionConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // DiskEncryptionStatus: Disk encryption status for an instance. @@ -1555,9 +1572,9 @@ type DiskEncryptionStatus struct { NullFields []string `json:"-"` } -func (s *DiskEncryptionStatus) MarshalJSON() ([]byte, error) { +func (s DiskEncryptionStatus) MarshalJSON() ([]byte, error) { type NoMethod DiskEncryptionStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -1618,9 +1635,9 @@ type ExportContext struct { NullFields []string `json:"-"` } -func (s *ExportContext) MarshalJSON() ([]byte, error) { +func (s ExportContext) MarshalJSON() ([]byte, error) { type NoMethod ExportContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExportContextBakExportOptions: Options for exporting BAK files (SQL @@ -1639,6 +1656,19 @@ type ExportContextBakExportOptions struct { // DifferentialBase: Whether or not the backup can be used as a differential // base copy_only backup can not be served as differential base DifferentialBase bool `json:"differentialBase,omitempty"` + // ExportLogEndTime: Optional. The end timestamp when transaction log will be + // included in the export operation. RFC 3339 + // (https://tools.ietf.org/html/rfc3339) format (for example, + // `2023-10-01T16:19:00.094`) in UTC. When omitted, all available logs until + // current time will be included. Only applied to Cloud SQL for SQL Server. + ExportLogEndTime string `json:"exportLogEndTime,omitempty"` + // ExportLogStartTime: Optional. The begin timestamp when transaction log will + // be included in the export operation. RFC 3339 + // (https://tools.ietf.org/html/rfc3339) format (for example, + // `2023-10-01T16:19:00.094`) in UTC. When omitted, all available logs from the + // beginning of retention period will be included. Only applied to Cloud SQL + // for SQL Server. + ExportLogStartTime string `json:"exportLogStartTime,omitempty"` // StripeCount: Option for specifying how many stripes to use for the export. // If blank, and the value of the striped field is true, the number of stripes // is automatically chosen. @@ -1658,9 +1688,9 @@ type ExportContextBakExportOptions struct { NullFields []string `json:"-"` } -func (s *ExportContextBakExportOptions) MarshalJSON() ([]byte, error) { +func (s ExportContextBakExportOptions) MarshalJSON() ([]byte, error) { type NoMethod ExportContextBakExportOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExportContextCsvExportOptions: Options for exporting data as CSV. `MySQL` @@ -1693,9 +1723,9 @@ type ExportContextCsvExportOptions struct { NullFields []string `json:"-"` } -func (s *ExportContextCsvExportOptions) MarshalJSON() ([]byte, error) { +func (s ExportContextCsvExportOptions) MarshalJSON() ([]byte, error) { type NoMethod ExportContextCsvExportOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExportContextSqlExportOptions: Options for exporting data as SQL statements. @@ -1704,6 +1734,9 @@ type ExportContextSqlExportOptions struct { MysqlExportOptions *ExportContextSqlExportOptionsMysqlExportOptions `json:"mysqlExportOptions,omitempty"` // Parallel: Optional. Whether or not the export should be parallel. Parallel bool `json:"parallel,omitempty"` + // PostgresExportOptions: Options for exporting from a Cloud SQL for PostgreSQL + // instance. + PostgresExportOptions *ExportContextSqlExportOptionsPostgresExportOptions `json:"postgresExportOptions,omitempty"` // SchemaOnly: Export only schemas. SchemaOnly bool `json:"schemaOnly,omitempty"` // Tables: Tables to export, or that were exported, from the specified @@ -1725,9 +1758,9 @@ type ExportContextSqlExportOptions struct { NullFields []string `json:"-"` } -func (s *ExportContextSqlExportOptions) MarshalJSON() ([]byte, error) { +func (s ExportContextSqlExportOptions) MarshalJSON() ([]byte, error) { type NoMethod ExportContextSqlExportOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ExportContextSqlExportOptionsMysqlExportOptions: Options for exporting from @@ -1752,9 +1785,37 @@ type ExportContextSqlExportOptionsMysqlExportOptions struct { NullFields []string `json:"-"` } -func (s *ExportContextSqlExportOptionsMysqlExportOptions) MarshalJSON() ([]byte, error) { +func (s ExportContextSqlExportOptionsMysqlExportOptions) MarshalJSON() ([]byte, error) { type NoMethod ExportContextSqlExportOptionsMysqlExportOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ExportContextSqlExportOptionsPostgresExportOptions: Options for exporting +// from a Cloud SQL for PostgreSQL instance. +type ExportContextSqlExportOptionsPostgresExportOptions struct { + // Clean: Optional. Use this option to include DROP SQL statements. These + // statements are used to delete database objects before running the import + // operation. + Clean bool `json:"clean,omitempty"` + // IfExists: Optional. Option to include an IF EXISTS SQL statement with each + // DROP statement produced by clean. + IfExists bool `json:"ifExists,omitempty"` + // ForceSendFields is a list of field names (e.g. "Clean") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Clean") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ExportContextSqlExportOptionsPostgresExportOptions) MarshalJSON() ([]byte, error) { + type NoMethod ExportContextSqlExportOptionsPostgresExportOptions + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FailoverContext: Database instance failover context. @@ -1777,9 +1838,9 @@ type FailoverContext struct { NullFields []string `json:"-"` } -func (s *FailoverContext) MarshalJSON() ([]byte, error) { +func (s FailoverContext) MarshalJSON() ([]byte, error) { type NoMethod FailoverContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Flag: A flag resource. @@ -1821,6 +1882,7 @@ type Flag struct { // "POSTGRES_14" - The database version is PostgreSQL 14. // "POSTGRES_15" - The database version is PostgreSQL 15. // "POSTGRES_16" - The database version is PostgreSQL 16. + // "POSTGRES_17" - The database version is PostgreSQL 17. // "MYSQL_8_0" - The database version is MySQL 8. // "MYSQL_8_0_18" - The database major version is MySQL 8.0 and the minor // version is 18. @@ -1855,8 +1917,6 @@ type Flag struct { // "MYSQL_8_0_40" - The database major version is MySQL 8.0 and the minor // version is 40. // "MYSQL_8_4" - The database version is MySQL 8.4. - // "MYSQL_8_4_0" - The database version is MySQL 8.4 and the patch version is - // 0. // "SQLSERVER_2019_STANDARD" - The database version is SQL Server 2019 // Standard. // "SQLSERVER_2019_ENTERPRISE" - The database version is SQL Server 2019 @@ -1915,9 +1975,9 @@ type Flag struct { NullFields []string `json:"-"` } -func (s *Flag) MarshalJSON() ([]byte, error) { +func (s Flag) MarshalJSON() ([]byte, error) { type NoMethod Flag - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FlagsListResponse: Flags list response. @@ -1942,9 +2002,9 @@ type FlagsListResponse struct { NullFields []string `json:"-"` } -func (s *FlagsListResponse) MarshalJSON() ([]byte, error) { +func (s FlagsListResponse) MarshalJSON() ([]byte, error) { type NoMethod FlagsListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GeminiInstanceConfig: Gemini instance configuration. @@ -1977,9 +2037,9 @@ type GeminiInstanceConfig struct { NullFields []string `json:"-"` } -func (s *GeminiInstanceConfig) MarshalJSON() ([]byte, error) { +func (s GeminiInstanceConfig) MarshalJSON() ([]byte, error) { type NoMethod GeminiInstanceConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GenerateEphemeralCertRequest: Ephemeral certificate creation request. @@ -2006,9 +2066,9 @@ type GenerateEphemeralCertRequest struct { NullFields []string `json:"-"` } -func (s *GenerateEphemeralCertRequest) MarshalJSON() ([]byte, error) { +func (s GenerateEphemeralCertRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateEphemeralCertRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GenerateEphemeralCertResponse: Ephemeral certificate creation request. @@ -2031,9 +2091,9 @@ type GenerateEphemeralCertResponse struct { NullFields []string `json:"-"` } -func (s *GenerateEphemeralCertResponse) MarshalJSON() ([]byte, error) { +func (s GenerateEphemeralCertResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateEphemeralCertResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportContext: Database instance import context. @@ -2082,9 +2142,9 @@ type ImportContext struct { NullFields []string `json:"-"` } -func (s *ImportContext) MarshalJSON() ([]byte, error) { +func (s ImportContext) MarshalJSON() ([]byte, error) { type NoMethod ImportContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportContextBakImportOptions: Import parameters specific to SQL Server .BAK @@ -2132,9 +2192,9 @@ type ImportContextBakImportOptions struct { NullFields []string `json:"-"` } -func (s *ImportContextBakImportOptions) MarshalJSON() ([]byte, error) { +func (s ImportContextBakImportOptions) MarshalJSON() ([]byte, error) { type NoMethod ImportContextBakImportOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ImportContextBakImportOptionsEncryptionOptions struct { @@ -2161,9 +2221,9 @@ type ImportContextBakImportOptionsEncryptionOptions struct { NullFields []string `json:"-"` } -func (s *ImportContextBakImportOptionsEncryptionOptions) MarshalJSON() ([]byte, error) { +func (s ImportContextBakImportOptionsEncryptionOptions) MarshalJSON() ([]byte, error) { type NoMethod ImportContextBakImportOptionsEncryptionOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportContextCsvImportOptions: Options for importing data as CSV. @@ -2198,9 +2258,9 @@ type ImportContextCsvImportOptions struct { NullFields []string `json:"-"` } -func (s *ImportContextCsvImportOptions) MarshalJSON() ([]byte, error) { +func (s ImportContextCsvImportOptions) MarshalJSON() ([]byte, error) { type NoMethod ImportContextCsvImportOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ImportContextSqlImportOptions: Optional. Options for importing data from SQL @@ -2208,6 +2268,9 @@ func (s *ImportContextCsvImportOptions) MarshalJSON() ([]byte, error) { type ImportContextSqlImportOptions struct { // Parallel: Optional. Whether or not the import should be parallel. Parallel bool `json:"parallel,omitempty"` + // PostgresImportOptions: Optional. Options for importing from a Cloud SQL for + // PostgreSQL instance. + PostgresImportOptions *ImportContextSqlImportOptionsPostgresImportOptions `json:"postgresImportOptions,omitempty"` // Threads: Optional. The number of threads to use for parallel import. Threads int64 `json:"threads,omitempty"` // ForceSendFields is a list of field names (e.g. "Parallel") to @@ -2223,9 +2286,36 @@ type ImportContextSqlImportOptions struct { NullFields []string `json:"-"` } -func (s *ImportContextSqlImportOptions) MarshalJSON() ([]byte, error) { +func (s ImportContextSqlImportOptions) MarshalJSON() ([]byte, error) { type NoMethod ImportContextSqlImportOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ImportContextSqlImportOptionsPostgresImportOptions: Optional. Options for +// importing from a Cloud SQL for PostgreSQL instance. +type ImportContextSqlImportOptionsPostgresImportOptions struct { + // Clean: Optional. The --clean flag for the pg_restore utility. This flag + // applies only if you enabled Cloud SQL to import files in parallel. + Clean bool `json:"clean,omitempty"` + // IfExists: Optional. The --if-exists flag for the pg_restore utility. This + // flag applies only if you enabled Cloud SQL to import files in parallel. + IfExists bool `json:"ifExists,omitempty"` + // ForceSendFields is a list of field names (e.g. "Clean") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Clean") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ImportContextSqlImportOptionsPostgresImportOptions) MarshalJSON() ([]byte, error) { + type NoMethod ImportContextSqlImportOptionsPostgresImportOptions + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InsightsConfig: Insights configuration. This specifies when Cloud SQL @@ -2260,9 +2350,9 @@ type InsightsConfig struct { NullFields []string `json:"-"` } -func (s *InsightsConfig) MarshalJSON() ([]byte, error) { +func (s InsightsConfig) MarshalJSON() ([]byte, error) { type NoMethod InsightsConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstanceReference: Reference to another Cloud SQL instance. @@ -2288,9 +2378,9 @@ type InstanceReference struct { NullFields []string `json:"-"` } -func (s *InstanceReference) MarshalJSON() ([]byte, error) { +func (s InstanceReference) MarshalJSON() ([]byte, error) { type NoMethod InstanceReference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesAcquireSsrsLeaseRequest: Request to acquire an SSRS lease for an @@ -2312,9 +2402,9 @@ type InstancesAcquireSsrsLeaseRequest struct { NullFields []string `json:"-"` } -func (s *InstancesAcquireSsrsLeaseRequest) MarshalJSON() ([]byte, error) { +func (s InstancesAcquireSsrsLeaseRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesAcquireSsrsLeaseRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesCloneRequest: Database instance clone request. @@ -2334,9 +2424,9 @@ type InstancesCloneRequest struct { NullFields []string `json:"-"` } -func (s *InstancesCloneRequest) MarshalJSON() ([]byte, error) { +func (s InstancesCloneRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesCloneRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesDemoteMasterRequest: Database demote primary instance request. @@ -2356,9 +2446,9 @@ type InstancesDemoteMasterRequest struct { NullFields []string `json:"-"` } -func (s *InstancesDemoteMasterRequest) MarshalJSON() ([]byte, error) { +func (s InstancesDemoteMasterRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesDemoteMasterRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesDemoteRequest: This request is used to demote an existing @@ -2382,9 +2472,9 @@ type InstancesDemoteRequest struct { NullFields []string `json:"-"` } -func (s *InstancesDemoteRequest) MarshalJSON() ([]byte, error) { +func (s InstancesDemoteRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesDemoteRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesExportRequest: Database instance export request. @@ -2404,9 +2494,9 @@ type InstancesExportRequest struct { NullFields []string `json:"-"` } -func (s *InstancesExportRequest) MarshalJSON() ([]byte, error) { +func (s InstancesExportRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesExportRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesFailoverRequest: Instance failover request. @@ -2426,9 +2516,9 @@ type InstancesFailoverRequest struct { NullFields []string `json:"-"` } -func (s *InstancesFailoverRequest) MarshalJSON() ([]byte, error) { +func (s InstancesFailoverRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesFailoverRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesImportRequest: Database instance import request. @@ -2448,9 +2538,9 @@ type InstancesImportRequest struct { NullFields []string `json:"-"` } -func (s *InstancesImportRequest) MarshalJSON() ([]byte, error) { +func (s InstancesImportRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesImportRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesListResponse: Database instances list response. @@ -2481,9 +2571,9 @@ type InstancesListResponse struct { NullFields []string `json:"-"` } -func (s *InstancesListResponse) MarshalJSON() ([]byte, error) { +func (s InstancesListResponse) MarshalJSON() ([]byte, error) { type NoMethod InstancesListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesListServerCasResponse: Instances ListServerCas response. @@ -2509,9 +2599,43 @@ type InstancesListServerCasResponse struct { NullFields []string `json:"-"` } -func (s *InstancesListServerCasResponse) MarshalJSON() ([]byte, error) { +func (s InstancesListServerCasResponse) MarshalJSON() ([]byte, error) { type NoMethod InstancesListServerCasResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// InstancesListServerCertificatesResponse: Instances ListServerCertificatess +// response. +type InstancesListServerCertificatesResponse struct { + // ActiveVersion: The `sha1_fingerprint` of the active certificate from + // `server_certs`. + ActiveVersion string `json:"activeVersion,omitempty"` + // CaCerts: List of server CA certificates for the instance. + CaCerts []*SslCert `json:"caCerts,omitempty"` + // Kind: This is always `sql#instancesListServerCertificates`. + Kind string `json:"kind,omitempty"` + // ServerCerts: List of server certificates for the instance, signed by the + // corresponding CA from the `ca_certs` list. + ServerCerts []*SslCert `json:"serverCerts,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "ActiveVersion") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ActiveVersion") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s InstancesListServerCertificatesResponse) MarshalJSON() ([]byte, error) { + type NoMethod InstancesListServerCertificatesResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesReencryptRequest: Database Instance reencrypt request. @@ -2531,9 +2655,9 @@ type InstancesReencryptRequest struct { NullFields []string `json:"-"` } -func (s *InstancesReencryptRequest) MarshalJSON() ([]byte, error) { +func (s InstancesReencryptRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesReencryptRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesRestoreBackupRequest: Database instance restore backup request. @@ -2554,9 +2678,9 @@ type InstancesRestoreBackupRequest struct { NullFields []string `json:"-"` } -func (s *InstancesRestoreBackupRequest) MarshalJSON() ([]byte, error) { +func (s InstancesRestoreBackupRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesRestoreBackupRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesRotateServerCaRequest: Rotate Server CA request. @@ -2577,9 +2701,33 @@ type InstancesRotateServerCaRequest struct { NullFields []string `json:"-"` } -func (s *InstancesRotateServerCaRequest) MarshalJSON() ([]byte, error) { +func (s InstancesRotateServerCaRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesRotateServerCaRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// InstancesRotateServerCertificateRequest: Rotate Server Certificate request. +type InstancesRotateServerCertificateRequest struct { + // RotateServerCertificateContext: Optional. Contains details about the rotate + // server CA operation. + RotateServerCertificateContext *RotateServerCertificateContext `json:"rotateServerCertificateContext,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "RotateServerCertificateContext") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted from + // API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "RotateServerCertificateContext") + // to include in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s InstancesRotateServerCertificateRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesRotateServerCertificateRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // InstancesTruncateLogRequest: Instance truncate log request. @@ -2599,9 +2747,9 @@ type InstancesTruncateLogRequest struct { NullFields []string `json:"-"` } -func (s *InstancesTruncateLogRequest) MarshalJSON() ([]byte, error) { +func (s InstancesTruncateLogRequest) MarshalJSON() ([]byte, error) { type NoMethod InstancesTruncateLogRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IpConfiguration: IP Management configuration. @@ -2637,6 +2785,15 @@ type IpConfiguration struct { // enforcing the requirement for valid client certificates, then use the // `ssl_mode` flag instead of the legacy `require_ssl` flag. RequireSsl bool `json:"requireSsl,omitempty"` + // ServerCaMode: Specify what type of CA is used for the server certificate. + // + // Possible values: + // "CA_MODE_UNSPECIFIED" - CA mode is unspecified. It is effectively the same + // as `GOOGLE_MANAGED_INTERNAL_CA`. + // "GOOGLE_MANAGED_INTERNAL_CA" - Google-managed self-signed internal CA. + // "GOOGLE_MANAGED_CAS_CA" - Google-managed regional CA part of root CA + // hierarchy hosted on Google Cloud's Certificate Authority Service (CAS). + ServerCaMode string `json:"serverCaMode,omitempty"` // SslMode: Specify how SSL/TLS is enforced in database connections. If you // must use the `require_ssl` flag for backward compatibility, then only the // following value pairs are valid: For PostgreSQL and MySQL: * @@ -2687,9 +2844,9 @@ type IpConfiguration struct { NullFields []string `json:"-"` } -func (s *IpConfiguration) MarshalJSON() ([]byte, error) { +func (s IpConfiguration) MarshalJSON() ([]byte, error) { type NoMethod IpConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // IpMapping: Database instance IP mapping @@ -2732,9 +2889,9 @@ type IpMapping struct { NullFields []string `json:"-"` } -func (s *IpMapping) MarshalJSON() ([]byte, error) { +func (s IpMapping) MarshalJSON() ([]byte, error) { type NoMethod IpMapping - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LocationPreference: Preferred location. This specifies where a Cloud SQL @@ -2768,37 +2925,40 @@ type LocationPreference struct { NullFields []string `json:"-"` } -func (s *LocationPreference) MarshalJSON() ([]byte, error) { +func (s LocationPreference) MarshalJSON() ([]byte, error) { type NoMethod LocationPreference - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MaintenanceWindow: Maintenance window. This specifies when a Cloud SQL // instance is restarted for system maintenance purposes. type MaintenanceWindow struct { - // Day: day of week (1-7), starting on Monday. + // Day: Day of week - `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, + // `SATURDAY`, or `SUNDAY`. Specify in the UTC time zone. Returned in output as + // an integer, 1 to 7, where `1` equals Monday. Day int64 `json:"day,omitempty"` - // Hour: hour of day - 0 to 23. + // Hour: Hour of day - 0 to 23. Specify in the UTC time zone. Hour int64 `json:"hour,omitempty"` // Kind: This is always `sql#maintenanceWindow`. Kind string `json:"kind,omitempty"` - // UpdateTrack: Maintenance timing setting: `canary` (Earlier) or `stable` - // (Later). Learn more - // (https://cloud.google.com/sql/docs/mysql/instance-settings#maintenance-timing-2ndgen). + // UpdateTrack: Maintenance timing settings: `canary`, `stable`, or `week5`. + // For more information, see About maintenance on Cloud SQL instances + // (https://cloud.google.com/sql/docs/mysql/maintenance). // // Possible values: // "SQL_UPDATE_TRACK_UNSPECIFIED" - This is an unknown maintenance timing // preference. - // "canary" - For instance update that requires a restart, this update track - // indicates your instance prefer to restart for new version early in - // maintenance window. - // "stable" - For instance update that requires a restart, this update track - // indicates your instance prefer to let Cloud SQL choose the timing of restart - // (within its Maintenance window, if applicable). - // "week5" - For instance update that requires a restart, this update track - // indicates your instance prefer to let Cloud SQL choose the timing of restart - // (within its Maintenance window, if applicable) to be at least 5 weeks after - // the notification. + // "canary" - For an instance with a scheduled maintenance window, this + // maintenance timing indicates that the maintenance update is scheduled 7 to + // 14 days after the notification is sent out. Also referred to as `Week 1` + // (Console) and `preview` (gcloud CLI). + // "stable" - For an instance with a scheduled maintenance window, this + // maintenance timing indicates that the maintenance update is scheduled 15 to + // 21 days after the notification is sent out. Also referred to as `Week 2` + // (Console) and `production` (gcloud CLI). + // "week5" - For instance with a scheduled maintenance window, this + // maintenance timing indicates that the maintenance update is scheduled 35 to + // 42 days after the notification is sent out. UpdateTrack string `json:"updateTrack,omitempty"` // ForceSendFields is a list of field names (e.g. "Day") to unconditionally // include in API requests. By default, fields with empty or default values are @@ -2813,9 +2973,9 @@ type MaintenanceWindow struct { NullFields []string `json:"-"` } -func (s *MaintenanceWindow) MarshalJSON() ([]byte, error) { +func (s MaintenanceWindow) MarshalJSON() ([]byte, error) { type NoMethod MaintenanceWindow - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MySqlReplicaConfiguration: Read-replica configuration specific to MySQL @@ -2864,9 +3024,9 @@ type MySqlReplicaConfiguration struct { NullFields []string `json:"-"` } -func (s *MySqlReplicaConfiguration) MarshalJSON() ([]byte, error) { +func (s MySqlReplicaConfiguration) MarshalJSON() ([]byte, error) { type NoMethod MySqlReplicaConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MySqlSyncConfig: MySQL-specific external server sync settings. @@ -2886,9 +3046,9 @@ type MySqlSyncConfig struct { NullFields []string `json:"-"` } -func (s *MySqlSyncConfig) MarshalJSON() ([]byte, error) { +func (s MySqlSyncConfig) MarshalJSON() ([]byte, error) { type NoMethod MySqlSyncConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OnPremisesConfiguration: On-premises instance configuration. @@ -2926,9 +3086,9 @@ type OnPremisesConfiguration struct { NullFields []string `json:"-"` } -func (s *OnPremisesConfiguration) MarshalJSON() ([]byte, error) { +func (s OnPremisesConfiguration) MarshalJSON() ([]byte, error) { type NoMethod OnPremisesConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: An Operation resource. For successful operations that return an @@ -3036,6 +3196,8 @@ type Operation struct { // "SWITCHOVER_TO_REPLICA" - Switches a primary instance to a replica. This // operation runs as part of a switchover operation to the original primary // instance. + // "MAJOR_VERSION_UPGRADE" - Updates the major version of a Cloud SQL + // instance. OperationType string `json:"operationType,omitempty"` // SelfLink: The URI of this resource. SelfLink string `json:"selfLink,omitempty"` @@ -3076,9 +3238,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationError: Database instance operation error. @@ -3102,9 +3264,9 @@ type OperationError struct { NullFields []string `json:"-"` } -func (s *OperationError) MarshalJSON() ([]byte, error) { +func (s OperationError) MarshalJSON() ([]byte, error) { type NoMethod OperationError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationErrors: Database instance operation errors list wrapper. @@ -3126,9 +3288,9 @@ type OperationErrors struct { NullFields []string `json:"-"` } -func (s *OperationErrors) MarshalJSON() ([]byte, error) { +func (s OperationErrors) MarshalJSON() ([]byte, error) { type NoMethod OperationErrors - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationMetadata: Represents the metadata of the long-running operation. @@ -3164,9 +3326,9 @@ type OperationMetadata struct { NullFields []string `json:"-"` } -func (s *OperationMetadata) MarshalJSON() ([]byte, error) { +func (s OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // OperationsListResponse: Operations list response. @@ -3195,9 +3357,9 @@ type OperationsListResponse struct { NullFields []string `json:"-"` } -func (s *OperationsListResponse) MarshalJSON() ([]byte, error) { +func (s OperationsListResponse) MarshalJSON() ([]byte, error) { type NoMethod OperationsListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PasswordStatus: Read-only password status. @@ -3219,9 +3381,9 @@ type PasswordStatus struct { NullFields []string `json:"-"` } -func (s *PasswordStatus) MarshalJSON() ([]byte, error) { +func (s PasswordStatus) MarshalJSON() ([]byte, error) { type NoMethod PasswordStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PasswordValidationPolicy: Database instance local user password validation @@ -3261,9 +3423,9 @@ type PasswordValidationPolicy struct { NullFields []string `json:"-"` } -func (s *PasswordValidationPolicy) MarshalJSON() ([]byte, error) { +func (s PasswordValidationPolicy) MarshalJSON() ([]byte, error) { type NoMethod PasswordValidationPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PerformDiskShrinkContext: Perform disk shrink context. @@ -3283,9 +3445,46 @@ type PerformDiskShrinkContext struct { NullFields []string `json:"-"` } -func (s *PerformDiskShrinkContext) MarshalJSON() ([]byte, error) { +func (s PerformDiskShrinkContext) MarshalJSON() ([]byte, error) { type NoMethod PerformDiskShrinkContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// PscAutoConnectionConfig: Settings for an automatically-setup Private Service +// Connect consumer endpoint that is used to connect to a Cloud SQL instance. +type PscAutoConnectionConfig struct { + // ConsumerNetwork: The consumer network of this consumer endpoint. This must + // be a resource path that includes both the host project and the network name. + // For example, `projects/project1/global/networks/network1`. The consumer host + // project of this network might be different from the consumer service + // project. + ConsumerNetwork string `json:"consumerNetwork,omitempty"` + // ConsumerNetworkStatus: The connection policy status of the consumer network. + ConsumerNetworkStatus string `json:"consumerNetworkStatus,omitempty"` + // ConsumerProject: This is the project ID of consumer service project of this + // consumer endpoint. Optional. This is only applicable if consumer_network is + // a shared vpc network. + ConsumerProject string `json:"consumerProject,omitempty"` + // IpAddress: The IP address of the consumer endpoint. + IpAddress string `json:"ipAddress,omitempty"` + // Status: The connection status of the consumer endpoint. + Status string `json:"status,omitempty"` + // ForceSendFields is a list of field names (e.g. "ConsumerNetwork") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ConsumerNetwork") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s PscAutoConnectionConfig) MarshalJSON() ([]byte, error) { + type NoMethod PscAutoConnectionConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PscConfig: PSC settings for a Cloud SQL instance. @@ -3296,6 +3495,10 @@ type PscConfig struct { // project in this list may be represented by a project number (numeric) or by // a project id (alphanumeric). AllowedConsumerProjects []string `json:"allowedConsumerProjects,omitempty"` + // PscAutoConnections: Optional. The list of settings for requested Private + // Service Connect consumer endpoints that can be used to connect to this Cloud + // SQL instance. + PscAutoConnections []*PscAutoConnectionConfig `json:"pscAutoConnections,omitempty"` // PscEnabled: Whether PSC connectivity is enabled for this instance. PscEnabled bool `json:"pscEnabled,omitempty"` // ForceSendFields is a list of field names (e.g. "AllowedConsumerProjects") to @@ -3311,9 +3514,9 @@ type PscConfig struct { NullFields []string `json:"-"` } -func (s *PscConfig) MarshalJSON() ([]byte, error) { +func (s PscConfig) MarshalJSON() ([]byte, error) { type NoMethod PscConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReplicaConfiguration: Read-replica configuration for connecting to the @@ -3352,9 +3555,9 @@ type ReplicaConfiguration struct { NullFields []string `json:"-"` } -func (s *ReplicaConfiguration) MarshalJSON() ([]byte, error) { +func (s ReplicaConfiguration) MarshalJSON() ([]byte, error) { type NoMethod ReplicaConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ReplicationCluster: A primary instance and disaster recovery (DR) replica @@ -3395,9 +3598,9 @@ type ReplicationCluster struct { NullFields []string `json:"-"` } -func (s *ReplicationCluster) MarshalJSON() ([]byte, error) { +func (s ReplicationCluster) MarshalJSON() ([]byte, error) { type NoMethod ReplicationCluster - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type Reschedule struct { @@ -3428,9 +3631,9 @@ type Reschedule struct { NullFields []string `json:"-"` } -func (s *Reschedule) MarshalJSON() ([]byte, error) { +func (s Reschedule) MarshalJSON() ([]byte, error) { type NoMethod Reschedule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RestoreBackupContext: Database instance restore from backup context. Backup @@ -3457,9 +3660,9 @@ type RestoreBackupContext struct { NullFields []string `json:"-"` } -func (s *RestoreBackupContext) MarshalJSON() ([]byte, error) { +func (s RestoreBackupContext) MarshalJSON() ([]byte, error) { type NoMethod RestoreBackupContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RotateServerCaContext: Instance rotate server CA context. @@ -3482,9 +3685,35 @@ type RotateServerCaContext struct { NullFields []string `json:"-"` } -func (s *RotateServerCaContext) MarshalJSON() ([]byte, error) { +func (s RotateServerCaContext) MarshalJSON() ([]byte, error) { type NoMethod RotateServerCaContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RotateServerCertificateContext: Instance rotate server certificate context. +type RotateServerCertificateContext struct { + // Kind: Optional. This is always `sql#rotateServerCertificateContext`. + Kind string `json:"kind,omitempty"` + // NextVersion: Optional. The fingerprint of the next version to be rotated to. + // If left unspecified, will be rotated to the most recently added server + // certificate version. + NextVersion string `json:"nextVersion,omitempty"` + // ForceSendFields is a list of field names (e.g. "Kind") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Kind") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RotateServerCertificateContext) MarshalJSON() ([]byte, error) { + type NoMethod RotateServerCertificateContext + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Settings: Database instance settings. @@ -3581,6 +3810,10 @@ type Settings struct { // "ENTERPRISE" - The instance is an enterprise edition. // "ENTERPRISE_PLUS" - The instance is an Enterprise Plus edition. Edition string `json:"edition,omitempty"` + // EnableDataplexIntegration: Optional. By default, Cloud SQL instances have + // schema extraction disabled for Dataplex. When this parameter is set to true, + // schema extraction for Dataplex on Cloud SQL instances is activated. + EnableDataplexIntegration bool `json:"enableDataplexIntegration,omitempty"` // EnableGoogleMlIntegration: Optional. When this parameter is set to true, // Cloud SQL instances can connect to Vertex AI to pass requests for real-time // predictions and insights to the AI. The default value is false. This applies @@ -3665,9 +3898,9 @@ type Settings struct { NullFields []string `json:"-"` } -func (s *Settings) MarshalJSON() ([]byte, error) { +func (s Settings) MarshalJSON() ([]byte, error) { type NoMethod Settings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlActiveDirectoryConfig: Active Directory configuration, relevant only for @@ -3690,9 +3923,9 @@ type SqlActiveDirectoryConfig struct { NullFields []string `json:"-"` } -func (s *SqlActiveDirectoryConfig) MarshalJSON() ([]byte, error) { +func (s SqlActiveDirectoryConfig) MarshalJSON() ([]byte, error) { type NoMethod SqlActiveDirectoryConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlExternalSyncSettingError: External primary instance migration setting @@ -3740,7 +3973,8 @@ type SqlExternalSyncSettingError struct { // MySQL. // "SQLSERVER_AGENT_NOT_RUNNING" - SQL Server Agent is not running. // "UNSUPPORTED_TABLE_DEFINITION" - The table definition is not support due - // to missing primary key or replica identity, applicable for postgres. + // to missing primary key or replica identity, applicable for postgres. Note + // that this is a warning and won't block the migration. // "UNSUPPORTED_DEFINER" - The customer has a definer that will break EM // setup. // "SQLSERVER_SERVERNAME_MISMATCH" - SQL Server @@SERVERNAME does not match @@ -3800,6 +4034,22 @@ type SqlExternalSyncSettingError struct { // "PG_CRON_FLAG_ENABLED_IN_REPLICA" - The error message indicates that // pg_cron flags are enabled on the destination which is not supported during // the migration. + // "EXTENSIONS_NOT_ENABLED_IN_REPLICA" - This error message indicates that + // the specified extensions are not enabled on destination instance. For + // example, before you can migrate data to the destination instance, you must + // enable the PGAudit extension on the instance. + // "UNSUPPORTED_COLUMNS" - The source database has generated columns that + // can't be migrated. Please change them to regular columns before migration. + // "USERS_NOT_CREATED_IN_REPLICA" - The source database has users that aren't + // created in the replica. First, create all users, which are in the + // pg_user_mappings table of the source database, in the destination instance. + // Then, perform the migration. + // "UNSUPPORTED_SYSTEM_OBJECTS" - The selected objects include system objects + // that aren't supported for migration. + // "UNSUPPORTED_TABLES_WITH_REPLICA_IDENTITY" - The source database has + // tables with the FULL or NOTHING replica identity. Before starting your + // migration, either remove the identity or change it to DEFAULT. Note that + // this is an error and will block the migration. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Detail") to unconditionally // include in API requests. By default, fields with empty or default values are @@ -3814,9 +4064,9 @@ type SqlExternalSyncSettingError struct { NullFields []string `json:"-"` } -func (s *SqlExternalSyncSettingError) MarshalJSON() ([]byte, error) { +func (s SqlExternalSyncSettingError) MarshalJSON() ([]byte, error) { type NoMethod SqlExternalSyncSettingError - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlInstancesAcquireSsrsLeaseResponse: Acquire SSRS lease response. @@ -3839,9 +4089,9 @@ type SqlInstancesAcquireSsrsLeaseResponse struct { NullFields []string `json:"-"` } -func (s *SqlInstancesAcquireSsrsLeaseResponse) MarshalJSON() ([]byte, error) { +func (s SqlInstancesAcquireSsrsLeaseResponse) MarshalJSON() ([]byte, error) { type NoMethod SqlInstancesAcquireSsrsLeaseResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlInstancesGetDiskShrinkConfigResponse: Instance get disk shrink config @@ -3870,9 +4120,9 @@ type SqlInstancesGetDiskShrinkConfigResponse struct { NullFields []string `json:"-"` } -func (s *SqlInstancesGetDiskShrinkConfigResponse) MarshalJSON() ([]byte, error) { +func (s SqlInstancesGetDiskShrinkConfigResponse) MarshalJSON() ([]byte, error) { type NoMethod SqlInstancesGetDiskShrinkConfigResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlInstancesGetLatestRecoveryTimeResponse: Instance get latest recovery time @@ -3899,9 +4149,9 @@ type SqlInstancesGetLatestRecoveryTimeResponse struct { NullFields []string `json:"-"` } -func (s *SqlInstancesGetLatestRecoveryTimeResponse) MarshalJSON() ([]byte, error) { +func (s SqlInstancesGetLatestRecoveryTimeResponse) MarshalJSON() ([]byte, error) { type NoMethod SqlInstancesGetLatestRecoveryTimeResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlInstancesReleaseSsrsLeaseResponse: The response for the release of the @@ -3925,9 +4175,9 @@ type SqlInstancesReleaseSsrsLeaseResponse struct { NullFields []string `json:"-"` } -func (s *SqlInstancesReleaseSsrsLeaseResponse) MarshalJSON() ([]byte, error) { +func (s SqlInstancesReleaseSsrsLeaseResponse) MarshalJSON() ([]byte, error) { type NoMethod SqlInstancesReleaseSsrsLeaseResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlInstancesRescheduleMaintenanceRequestBody: Reschedule options for @@ -3948,9 +4198,9 @@ type SqlInstancesRescheduleMaintenanceRequestBody struct { NullFields []string `json:"-"` } -func (s *SqlInstancesRescheduleMaintenanceRequestBody) MarshalJSON() ([]byte, error) { +func (s SqlInstancesRescheduleMaintenanceRequestBody) MarshalJSON() ([]byte, error) { type NoMethod SqlInstancesRescheduleMaintenanceRequestBody - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlInstancesResetReplicaSizeRequest: Instance reset replica size request. @@ -4006,9 +4256,9 @@ type SqlInstancesStartExternalSyncRequest struct { NullFields []string `json:"-"` } -func (s *SqlInstancesStartExternalSyncRequest) MarshalJSON() ([]byte, error) { +func (s SqlInstancesStartExternalSyncRequest) MarshalJSON() ([]byte, error) { type NoMethod SqlInstancesStartExternalSyncRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SqlInstancesVerifyExternalSyncSettingsRequest struct { @@ -4063,9 +4313,9 @@ type SqlInstancesVerifyExternalSyncSettingsRequest struct { NullFields []string `json:"-"` } -func (s *SqlInstancesVerifyExternalSyncSettingsRequest) MarshalJSON() ([]byte, error) { +func (s SqlInstancesVerifyExternalSyncSettingsRequest) MarshalJSON() ([]byte, error) { type NoMethod SqlInstancesVerifyExternalSyncSettingsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlInstancesVerifyExternalSyncSettingsResponse: Instance verify external @@ -4093,9 +4343,9 @@ type SqlInstancesVerifyExternalSyncSettingsResponse struct { NullFields []string `json:"-"` } -func (s *SqlInstancesVerifyExternalSyncSettingsResponse) MarshalJSON() ([]byte, error) { +func (s SqlInstancesVerifyExternalSyncSettingsResponse) MarshalJSON() ([]byte, error) { type NoMethod SqlInstancesVerifyExternalSyncSettingsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlOutOfDiskReport: This message wraps up the information written by @@ -4130,9 +4380,9 @@ type SqlOutOfDiskReport struct { NullFields []string `json:"-"` } -func (s *SqlOutOfDiskReport) MarshalJSON() ([]byte, error) { +func (s SqlOutOfDiskReport) MarshalJSON() ([]byte, error) { type NoMethod SqlOutOfDiskReport - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlScheduledMaintenance: Any scheduled maintenance for this instance. @@ -4159,9 +4409,9 @@ type SqlScheduledMaintenance struct { NullFields []string `json:"-"` } -func (s *SqlScheduledMaintenance) MarshalJSON() ([]byte, error) { +func (s SqlScheduledMaintenance) MarshalJSON() ([]byte, error) { type NoMethod SqlScheduledMaintenance - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlServerAuditConfig: SQL Server specific audit configuration. @@ -4187,9 +4437,9 @@ type SqlServerAuditConfig struct { NullFields []string `json:"-"` } -func (s *SqlServerAuditConfig) MarshalJSON() ([]byte, error) { +func (s SqlServerAuditConfig) MarshalJSON() ([]byte, error) { type NoMethod SqlServerAuditConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlServerDatabaseDetails: Represents a Sql Server database on the Cloud SQL @@ -4213,9 +4463,9 @@ type SqlServerDatabaseDetails struct { NullFields []string `json:"-"` } -func (s *SqlServerDatabaseDetails) MarshalJSON() ([]byte, error) { +func (s SqlServerDatabaseDetails) MarshalJSON() ([]byte, error) { type NoMethod SqlServerDatabaseDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SqlServerUserDetails: Represents a Sql Server user on the Cloud SQL @@ -4238,9 +4488,9 @@ type SqlServerUserDetails struct { NullFields []string `json:"-"` } -func (s *SqlServerUserDetails) MarshalJSON() ([]byte, error) { +func (s SqlServerUserDetails) MarshalJSON() ([]byte, error) { type NoMethod SqlServerUserDetails - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCert: SslCerts Resource @@ -4283,9 +4533,9 @@ type SslCert struct { NullFields []string `json:"-"` } -func (s *SslCert) MarshalJSON() ([]byte, error) { +func (s SslCert) MarshalJSON() ([]byte, error) { type NoMethod SslCert - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertDetail: SslCertDetail. @@ -4308,9 +4558,9 @@ type SslCertDetail struct { NullFields []string `json:"-"` } -func (s *SslCertDetail) MarshalJSON() ([]byte, error) { +func (s SslCertDetail) MarshalJSON() ([]byte, error) { type NoMethod SslCertDetail - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertsCreateEphemeralRequest: SslCerts create ephemeral certificate @@ -4333,9 +4583,9 @@ type SslCertsCreateEphemeralRequest struct { NullFields []string `json:"-"` } -func (s *SslCertsCreateEphemeralRequest) MarshalJSON() ([]byte, error) { +func (s SslCertsCreateEphemeralRequest) MarshalJSON() ([]byte, error) { type NoMethod SslCertsCreateEphemeralRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertsInsertRequest: SslCerts insert request. @@ -4356,9 +4606,9 @@ type SslCertsInsertRequest struct { NullFields []string `json:"-"` } -func (s *SslCertsInsertRequest) MarshalJSON() ([]byte, error) { +func (s SslCertsInsertRequest) MarshalJSON() ([]byte, error) { type NoMethod SslCertsInsertRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertsInsertResponse: SslCert insert response. @@ -4389,9 +4639,9 @@ type SslCertsInsertResponse struct { NullFields []string `json:"-"` } -func (s *SslCertsInsertResponse) MarshalJSON() ([]byte, error) { +func (s SslCertsInsertResponse) MarshalJSON() ([]byte, error) { type NoMethod SslCertsInsertResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SslCertsListResponse: SslCerts list response. @@ -4416,9 +4666,9 @@ type SslCertsListResponse struct { NullFields []string `json:"-"` } -func (s *SslCertsListResponse) MarshalJSON() ([]byte, error) { +func (s SslCertsListResponse) MarshalJSON() ([]byte, error) { type NoMethod SslCertsListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // SyncFlags: Initial sync flags for certain Cloud SQL APIs. Currently used for @@ -4442,9 +4692,9 @@ type SyncFlags struct { NullFields []string `json:"-"` } -func (s *SyncFlags) MarshalJSON() ([]byte, error) { +func (s SyncFlags) MarshalJSON() ([]byte, error) { type NoMethod SyncFlags - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Tier: A Google Cloud SQL service tier resource. @@ -4473,9 +4723,9 @@ type Tier struct { NullFields []string `json:"-"` } -func (s *Tier) MarshalJSON() ([]byte, error) { +func (s Tier) MarshalJSON() ([]byte, error) { type NoMethod Tier - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TiersListResponse: Tiers list response. @@ -4500,9 +4750,9 @@ type TiersListResponse struct { NullFields []string `json:"-"` } -func (s *TiersListResponse) MarshalJSON() ([]byte, error) { +func (s TiersListResponse) MarshalJSON() ([]byte, error) { type NoMethod TiersListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TruncateLogContext: Database Instance truncate log context. @@ -4525,9 +4775,9 @@ type TruncateLogContext struct { NullFields []string `json:"-"` } -func (s *TruncateLogContext) MarshalJSON() ([]byte, error) { +func (s TruncateLogContext) MarshalJSON() ([]byte, error) { type NoMethod TruncateLogContext - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // User: A Cloud SQL user resource. @@ -4575,9 +4825,11 @@ type User struct { // "BUILT_IN" - The database's built-in user type. // "CLOUD_IAM_USER" - Cloud IAM user. // "CLOUD_IAM_SERVICE_ACCOUNT" - Cloud IAM service account. - // "CLOUD_IAM_GROUP" - Cloud IAM group non-login user. - // "CLOUD_IAM_GROUP_USER" - Cloud IAM group login user. - // "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" - Cloud IAM group service account. + // "CLOUD_IAM_GROUP" - Cloud IAM group. Not used for login. + // "CLOUD_IAM_GROUP_USER" - Read-only. Login for a user that belongs to the + // Cloud IAM group. + // "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" - Read-only. Login for a service account + // that belongs to the Cloud IAM group. Type string `json:"type,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. @@ -4595,9 +4847,9 @@ type User struct { NullFields []string `json:"-"` } -func (s *User) MarshalJSON() ([]byte, error) { +func (s User) MarshalJSON() ([]byte, error) { type NoMethod User - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UserPasswordValidationPolicy: User level password validation policy. @@ -4629,9 +4881,9 @@ type UserPasswordValidationPolicy struct { NullFields []string `json:"-"` } -func (s *UserPasswordValidationPolicy) MarshalJSON() ([]byte, error) { +func (s UserPasswordValidationPolicy) MarshalJSON() ([]byte, error) { type NoMethod UserPasswordValidationPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UsersListResponse: User list response. @@ -4658,9 +4910,9 @@ type UsersListResponse struct { NullFields []string `json:"-"` } -func (s *UsersListResponse) MarshalJSON() ([]byte, error) { +func (s UsersListResponse) MarshalJSON() ([]byte, error) { type NoMethod UsersListResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BackupRunsDeleteCall struct { @@ -6134,6 +6386,230 @@ func (c *FlagsListCall) Do(opts ...googleapi.CallOption) (*FlagsListResponse, er return ret, nil } +type InstancesListServerCertificatesCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListServerCertificates: Lists all versions of server certificates and +// certificate authorities (CAs) for the specified instance. There can be up to +// three sets of certs listed: the certificate that is currently in use, a +// future that has been added but not yet used to sign a certificate, and a +// certificate that has been rotated out. +// +// - instance: Cloud SQL instance ID. This does not include the project ID. +// - project: Project ID of the project that contains the instance. +func (r *InstancesService) ListServerCertificates(project string, instance string) *InstancesListServerCertificatesCall { + c := &InstancesListServerCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *InstancesListServerCertificatesCall) Fields(s ...googleapi.Field) *InstancesListServerCertificatesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *InstancesListServerCertificatesCall) IfNoneMatch(entityTag string) *InstancesListServerCertificatesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *InstancesListServerCertificatesCall) Context(ctx context.Context) *InstancesListServerCertificatesCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *InstancesListServerCertificatesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesListServerCertificatesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/instances/{instance}/listServerCertificates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sql.instances.ListServerCertificates" call. +// Any non-2xx status code is an error. Response headers are in either +// *InstancesListServerCertificatesResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesListServerCertificatesCall) Do(opts ...googleapi.CallOption) (*InstancesListServerCertificatesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &InstancesListServerCertificatesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type InstancesRotateServerCertificateCall struct { + s *Service + project string + instance string + instancesrotateservercertificaterequest *InstancesRotateServerCertificateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RotateServerCertificate: Rotates the server certificate version to one +// previously added with the addServerCertificate method. For instances not +// using Certificate Authority Service (CAS) server CA, use RotateServerCa +// instead. +// +// - instance: Cloud SQL instance ID. This does not include the project ID. +// - project: Project ID of the project that contains the instance. +func (r *InstancesService) RotateServerCertificate(project string, instance string, instancesrotateservercertificaterequest *InstancesRotateServerCertificateRequest) *InstancesRotateServerCertificateCall { + c := &InstancesRotateServerCertificateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.instancesrotateservercertificaterequest = instancesrotateservercertificaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *InstancesRotateServerCertificateCall) Fields(s ...googleapi.Field) *InstancesRotateServerCertificateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *InstancesRotateServerCertificateCall) Context(ctx context.Context) *InstancesRotateServerCertificateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *InstancesRotateServerCertificateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesRotateServerCertificateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesrotateservercertificaterequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/instances/{instance}/rotateServerCertificate") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sql.instances.RotateServerCertificate" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *InstancesRotateServerCertificateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type InstancesAcquireSsrsLeaseCall struct { s *Service project string @@ -6258,7 +6734,9 @@ type InstancesAddServerCaCall struct { // specified instance. Required to prepare for a certificate rotation. If a CA // version was previously added but never used in a certificate rotation, this // operation replaces that version. There cannot be more than one CA version -// waiting to be rotated in. +// waiting to be rotated in. For instances that have enabled Certificate +// Authority Service (CAS) based server CA, use AddServerCertificate to add a +// new server certificate. // // - instance: Cloud SQL instance ID. This does not include the project ID. // - project: Project ID of the project that contains the instance. @@ -6348,6 +6826,111 @@ func (c *InstancesAddServerCaCall) Do(opts ...googleapi.CallOption) (*Operation, return ret, nil } +type InstancesAddServerCertificateCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddServerCertificate: Add a new trusted server certificate version for the +// specified instance using Certificate Authority Service (CAS) server CA. +// Required to prepare for a certificate rotation. If a server certificate +// version was previously added but never used in a certificate rotation, this +// operation replaces that version. There cannot be more than one certificate +// version waiting to be rotated in. For instances not using CAS server CA, use +// AddServerCa instead. +// +// - instance: Cloud SQL instance ID. This does not include the project ID. +// - project: Project ID of the project that contains the instance. +func (r *InstancesService) AddServerCertificate(project string, instance string) *InstancesAddServerCertificateCall { + c := &InstancesAddServerCertificateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *InstancesAddServerCertificateCall) Fields(s ...googleapi.Field) *InstancesAddServerCertificateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *InstancesAddServerCertificateCall) Context(ctx context.Context) *InstancesAddServerCertificateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *InstancesAddServerCertificateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesAddServerCertificateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/instances/{instance}/addServerCertificate") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sql.instances.addServerCertificate" call. +// Any non-2xx status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *InstancesAddServerCertificateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type InstancesCloneCall struct { s *Service project string @@ -6476,6 +7059,21 @@ func (r *InstancesService) Delete(project string, instance string) *InstancesDel return c } +// FinalBackupExpiryTime sets the optional parameter "finalBackupExpiryTime": +// Final Backup expiration time. Timestamp in UTC of when this resource is +// considered expired. +func (c *InstancesDeleteCall) FinalBackupExpiryTime(finalBackupExpiryTime string) *InstancesDeleteCall { + c.urlParams_.Set("finalBackupExpiryTime", finalBackupExpiryTime) + return c +} + +// FinalBackupTtlDays sets the optional parameter "finalBackupTtlDays": +// Retention period of the final backup. +func (c *InstancesDeleteCall) FinalBackupTtlDays(finalBackupTtlDays int64) *InstancesDeleteCall { + c.urlParams_.Set("finalBackupTtlDays", fmt.Sprint(finalBackupTtlDays)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details. @@ -8327,7 +8925,9 @@ type InstancesRotateServerCaCall struct { // RotateServerCa: Rotates the server certificate to one signed by the // Certificate Authority (CA) version previously added with the addServerCA -// method. +// method. For instances that have enabled Certificate Authority Service (CAS) +// based server CA, use RotateServerCertificate to rotate the server +// certificate. // // - instance: Cloud SQL instance ID. This does not include the project ID. // - project: Project ID of the project that contains the instance. diff --git a/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-api.json b/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-api.json index 160800c2bb2..09b7f6487aa 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -27,13 +27,73 @@ "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", "endpoints": [ + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west3.rep.googleapis.com/", + "location": "europe-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west8.rep.googleapis.com/", + "location": "europe-west8" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west9.rep.googleapis.com/", + "location": "europe-west9" + }, { "description": "Regional Endpoint", "endpointUrl": "https://storage.me-central2.rep.googleapis.com/", "location": "me-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east4.rep.googleapis.com/", + "location": "us-east4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-south1.rep.googleapis.com/", + "location": "us-south1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west1.rep.googleapis.com/", + "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west3.rep.googleapis.com/", + "location": "us-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west4.rep.googleapis.com/", + "location": "us-west4" } ], - "etag": "\"3132383134303835313436343635393933303731\"", + "etag": "\"3132333635343336333933383332343134323139\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -550,7 +610,7 @@ "buckets": { "methods": { "delete": { - "description": "Permanently deletes an empty bucket.", + "description": "Deletes an empty bucket. Deletions are permanent unless soft delete is enabled on the bucket.", "httpMethod": "DELETE", "id": "storage.buckets.delete", "parameterOrder": [ @@ -602,6 +662,12 @@ "required": true, "type": "string" }, + "generation": { + "description": "If present, specifies the generation of the bucket. This is required if softDeleted is true.", + "format": "int64", + "location": "query", + "type": "string" + }, "ifMetagenerationMatch": { "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", "format": "int64", @@ -627,6 +693,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, return the soft-deleted version of this bucket. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", + "location": "query", + "type": "boolean" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -850,6 +921,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, only soft-deleted bucket versions will be returned. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", + "location": "query", + "type": "boolean" + }, "userProject": { "description": "The project to be billed for this request.", "location": "query", @@ -1003,6 +1079,69 @@ "https://www.googleapis.com/auth/devstorage.full_control" ] }, + "relocate": { + "description": "Initiates a long-running Relocate Bucket operation on the specified bucket.", + "httpMethod": "POST", + "id": "storage.buckets.relocate", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket to be moved.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/relocate", + "request": { + "$ref": "RelocateBucketRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "restore": { + "description": "Restores a soft-deleted bucket.", + "httpMethod": "POST", + "id": "storage.buckets.restore", + "parameterOrder": [ + "bucket", + "generation" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "Generation of a bucket.", + "format": "int64", + "location": "query", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/restore", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "setIamPolicy": { "description": "Updates an IAM policy for the specified bucket.", "httpMethod": "PUT", @@ -2768,8 +2907,13 @@ "location": "query", "type": "string" }, + "restoreToken": { + "description": "Restore token used to differentiate soft-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets and if softDeleted is set to true. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.", + "location": "query", + "type": "string" + }, "softDeleted": { - "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", "location": "query", "type": "boolean" }, @@ -3031,7 +3175,7 @@ "type": "string" }, "softDeleted": { - "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", "location": "query", "type": "boolean" }, @@ -3046,7 +3190,7 @@ "type": "string" }, "versions": { - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see [Object Versioning](https://cloud.google.com/storage/docs/object-versioning).", "location": "query", "type": "boolean" } @@ -3225,7 +3369,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -3243,6 +3387,11 @@ "location": "query", "type": "string" }, + "restoreToken": { + "description": "Restore token used to differentiate sof-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -3694,7 +3843,7 @@ "type": "string" }, "versions": { - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see [Object Versioning](https://cloud.google.com/storage/docs/object-versioning).", "location": "query", "type": "boolean" } @@ -3720,6 +3869,38 @@ }, "operations": { "methods": { + "advanceRelocateBucket": { + "description": "Starts asynchronous advancement of the relocate bucket operation in the case of required write downtime, to allow it to lock the bucket at the source location, and proceed with the bucket location swap. The server makes a best effort to advance the relocate bucket operation, but success is not guaranteed.", + "httpMethod": "POST", + "id": "storage.buckets.operations.advanceRelocateBucket", + "parameterOrder": [ + "bucket", + "operationId" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket to advance the relocate for.", + "location": "path", + "required": true, + "type": "string" + }, + "operationId": { + "description": "ID of the operation resource.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/operations/{operationId}/advanceRelocateBucket", + "request": { + "$ref": "AdvanceRelocateBucketOperationRequest" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "cancel": { "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.", "httpMethod": "POST", @@ -3996,7 +4177,7 @@ ] }, "update": { - "description": "Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states.", + "description": "Updates the state of an HMAC key. See the [HMAC Key resource descriptor](https://cloud.google.com/storage/docs/json_api/v1/projects/hmacKeys/update#request-body) for valid states.", "httpMethod": "PUT", "id": "storage.projects.hmacKeys.update", "parameterOrder": [ @@ -4075,9 +4256,26 @@ } } }, - "revision": "20240524", + "revision": "20241008", "rootUrl": "https://storage.googleapis.com/", "schemas": { + "AdvanceRelocateBucketOperationRequest": { + "description": "An AdvanceRelocateBucketOperation request.", + "id": "AdvanceRelocateBucketOperationRequest", + "properties": { + "expireTime": { + "description": "Specifies the time when the relocation will revert to the sync stage if the relocation hasn't succeeded.", + "format": "date-time", + "type": "string" + }, + "ttl": { + "description": "Specifies the duration after which the relocation will revert to the sync stage if the relocation hasn't succeeded. Optional, if not supplied, a default value of 12h will be used.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, "AnywhereCache": { "description": "An Anywhere Cache instance.", "id": "AnywhereCache", @@ -4283,6 +4481,16 @@ "description": "HTTP 1.1 Entity tag for the bucket.", "type": "string" }, + "generation": { + "description": "The generation of this bucket.", + "format": "int64", + "type": "string" + }, + "hardDeleteTime": { + "description": "The hard delete time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, "hierarchicalNamespace": { "description": "The bucket's hierarchical namespace configuration.", "properties": { @@ -4337,6 +4545,49 @@ "description": "The ID of the bucket. For buckets, the id and name properties are the same.", "type": "string" }, + "ipFilter": { + "description": "The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'.", + "properties": { + "mode": { + "description": "The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'.", + "type": "string" + }, + "publicNetworkSource": { + "description": "The public network source of the bucket's IP filter.", + "properties": { + "allowedIpCidrRanges": { + "description": "The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "vpcNetworkSources": { + "description": "The list of [VPC network](https://cloud.google.com/vpc/docs/vpc) sources of the bucket's IP filter.", + "items": { + "properties": { + "allowedIpCidrRanges": { + "description": "The list of IPv4, IPv6 cidr ranges subnetworks that are allowed to access the bucket.", + "items": { + "type": "string" + }, + "type": "array" + }, + "network": { + "description": "Name of the network. Format: projects/{PROJECT_ID}/global/networks/{NETWORK_NAME}", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, "kind": { "default": "storage#bucket", "description": "The kind of item this is. For buckets, this is always storage#bucket.", @@ -4351,7 +4602,7 @@ "type": "object" }, "lifecycle": { - "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", + "description": "The bucket's lifecycle configuration. See [Lifecycle Management](https://cloud.google.com/storage/docs/lifecycle) for more information.", "properties": { "rule": { "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", @@ -4450,7 +4701,7 @@ "type": "object" }, "location": { - "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.", + "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the [Developer's Guide](https://cloud.google.com/storage/docs/locations) for the authoritative list.", "type": "string" }, "locationType": { @@ -4538,6 +4789,10 @@ "description": "The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO to turn on Turbo Replication on a bucket.", "type": "string" }, + "satisfiesPZI": { + "description": "Reserved for future use.", + "type": "boolean" + }, "satisfiesPZS": { "description": "Reserved for future use.", "type": "boolean" @@ -4562,8 +4817,13 @@ }, "type": "object" }, + "softDeleteTime": { + "description": "The soft delete time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, "storageClass": { - "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", + "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see [Storage Classes](https://cloud.google.com/storage/docs/storage-classes).", "type": "string" }, "timeCreated": { @@ -4587,7 +4847,7 @@ "type": "object" }, "website": { - "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.", + "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the [Static Website Examples](https://cloud.google.com/storage/docs/static-website) for more information.", "properties": { "mainPageSuffix": { "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.", @@ -5007,6 +5267,11 @@ "description": "The response message for storage.buckets.operations.list.", "id": "GoogleLongrunningListOperationsResponse", "properties": { + "kind": { + "default": "storage#operations", + "description": "The kind of item this is. For lists of operations, this is always storage#operations.", + "type": "string" + }, "nextPageToken": { "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", "type": "string" @@ -5033,6 +5298,11 @@ "$ref": "GoogleRpcStatus", "description": "The error result of the operation in case of failure or cancellation." }, + "kind": { + "default": "storage#operation", + "description": "The kind of item this is. For operations, this is always storage#operation.", + "type": "string" + }, "metadata": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -5052,6 +5322,10 @@ }, "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as \"Delete\", the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type \"XxxResponse\", where \"Xxx\" is the original method name. For example, if the original method name is \"TakeSnapshot()\", the inferred response type is \"TakeSnapshotResponse\".", "type": "object" + }, + "selfLink": { + "description": "The link to this long running operation.", + "type": "string" } }, "type": "object" @@ -5368,7 +5642,7 @@ "type": "string" }, "crc32c": { - "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices.", + "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see [Data Validation and Change Detection](https://cloud.google.com/storage/docs/data-validation).", "type": "string" }, "customTime": { @@ -5422,7 +5696,7 @@ "type": "string" }, "md5Hash": { - "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices.", + "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see [Data Validation and Change Detection](https://cloud.google.com/storage/docs/data-validation).", "type": "string" }, "mediaLink": { @@ -5460,6 +5734,10 @@ }, "type": "object" }, + "restoreToken": { + "description": "Restore token used to differentiate deleted objects with the same name and generation. This field is only returned for deleted objects in hierarchical namespace buckets.", + "type": "string" + }, "retention": { "description": "A collection of object level retention parameters.", "properties": { @@ -5728,6 +6006,34 @@ }, "type": "object" }, + "RelocateBucketRequest": { + "description": "A Relocate Bucket request.", + "id": "RelocateBucketRequest", + "properties": { + "destinationCustomPlacementConfig": { + "description": "The bucket's new custom placement configuration if relocating to a Custom Dual Region.", + "properties": { + "dataLocations": { + "description": "The list of regional locations in which data is placed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "destinationLocation": { + "description": "The new location the bucket will be relocated to.", + "type": "string" + }, + "validateOnly": { + "description": "If true, validate the operation, but do not actually relocate the bucket.", + "type": "boolean" + } + }, + "type": "object" + }, "RewriteResponse": { "description": "A rewrite response.", "id": "RewriteResponse", diff --git a/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-gen.go b/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-gen.go index b4d425e59f4..2c11b2d8d61 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -93,6 +93,7 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint var _ = internal.Version +var _ = gax.Version const apiId = "storage:v1" const apiName = "storage" @@ -133,6 +134,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + opts = append(opts, internaloption.EnableNewAuthLibrary()) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -341,6 +343,34 @@ type ProjectsServiceAccountService struct { s *Service } +// AdvanceRelocateBucketOperationRequest: An AdvanceRelocateBucketOperation +// request. +type AdvanceRelocateBucketOperationRequest struct { + // ExpireTime: Specifies the time when the relocation will revert to the sync + // stage if the relocation hasn't succeeded. + ExpireTime string `json:"expireTime,omitempty"` + // Ttl: Specifies the duration after which the relocation will revert to the + // sync stage if the relocation hasn't succeeded. Optional, if not supplied, a + // default value of 12h will be used. + Ttl string `json:"ttl,omitempty"` + // ForceSendFields is a list of field names (e.g. "ExpireTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ExpireTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AdvanceRelocateBucketOperationRequest) MarshalJSON() ([]byte, error) { + type NoMethod AdvanceRelocateBucketOperationRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // AnywhereCache: An Anywhere Cache instance. type AnywhereCache struct { // AdmissionPolicy: The cache-level entry admission policy. @@ -388,9 +418,9 @@ type AnywhereCache struct { NullFields []string `json:"-"` } -func (s *AnywhereCache) MarshalJSON() ([]byte, error) { +func (s AnywhereCache) MarshalJSON() ([]byte, error) { type NoMethod AnywhereCache - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AnywhereCaches: A list of Anywhere Caches. @@ -420,9 +450,9 @@ type AnywhereCaches struct { NullFields []string `json:"-"` } -func (s *AnywhereCaches) MarshalJSON() ([]byte, error) { +func (s AnywhereCaches) MarshalJSON() ([]byte, error) { type NoMethod AnywhereCaches - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Bucket: A bucket. @@ -459,6 +489,10 @@ type Bucket struct { Encryption *BucketEncryption `json:"encryption,omitempty"` // Etag: HTTP 1.1 Entity tag for the bucket. Etag string `json:"etag,omitempty"` + // Generation: The generation of this bucket. + Generation int64 `json:"generation,omitempty,string"` + // HardDeleteTime: The hard delete time of the bucket in RFC 3339 format. + HardDeleteTime string `json:"hardDeleteTime,omitempty"` // HierarchicalNamespace: The bucket's hierarchical namespace configuration. HierarchicalNamespace *BucketHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"` // IamConfiguration: The bucket's IAM configuration. @@ -466,16 +500,21 @@ type Bucket struct { // Id: The ID of the bucket. For buckets, the id and name properties are the // same. Id string `json:"id,omitempty"` + // IpFilter: The bucket's IP filter configuration. Specifies the network + // sources that are allowed to access the operations on the bucket, as well as + // its underlying objects. Only enforced when the mode is set to 'Enabled'. + IpFilter *BucketIpFilter `json:"ipFilter,omitempty"` // Kind: The kind of item this is. For buckets, this is always storage#bucket. Kind string `json:"kind,omitempty"` // Labels: User-provided labels, in key/value pairs. Labels map[string]string `json:"labels,omitempty"` - // Lifecycle: The bucket's lifecycle configuration. See lifecycle management - // for more information. + // Lifecycle: The bucket's lifecycle configuration. See Lifecycle Management + // (https://cloud.google.com/storage/docs/lifecycle) for more information. Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` // Location: The location of the bucket. Object data for objects in the bucket // resides in physical storage within this region. Defaults to US. See the - // developer's guide for the authoritative list. + // Developer's Guide (https://cloud.google.com/storage/docs/locations) for the + // authoritative list. Location string `json:"location,omitempty"` // LocationType: The type of the bucket location. LocationType string `json:"locationType,omitempty"` @@ -506,6 +545,8 @@ type Bucket struct { // Rpo: The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO // to turn on Turbo Replication on a bucket. Rpo string `json:"rpo,omitempty"` + // SatisfiesPZI: Reserved for future use. + SatisfiesPZI bool `json:"satisfiesPZI,omitempty"` // SatisfiesPZS: Reserved for future use. SatisfiesPZS bool `json:"satisfiesPZS,omitempty"` // SelfLink: The URI of this bucket. @@ -514,13 +555,16 @@ type Bucket struct { // of time that soft-deleted objects will be retained, and cannot be // permanently deleted. SoftDeletePolicy *BucketSoftDeletePolicy `json:"softDeletePolicy,omitempty"` + // SoftDeleteTime: The soft delete time of the bucket in RFC 3339 format. + SoftDeleteTime string `json:"softDeleteTime,omitempty"` // StorageClass: The bucket's default storage class, used whenever no // storageClass is specified for a newly-created object. This defines how // objects in the bucket are stored and determines the SLA and the cost of // storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, // COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not // specified when the bucket is created, it will default to STANDARD. For more - // information, see storage classes. + // information, see Storage Classes + // (https://cloud.google.com/storage/docs/storage-classes). StorageClass string `json:"storageClass,omitempty"` // TimeCreated: The creation time of the bucket in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` @@ -530,7 +574,8 @@ type Bucket struct { Versioning *BucketVersioning `json:"versioning,omitempty"` // Website: The bucket's website configuration, controlling how the service // behaves when accessing bucket contents as a web site. See the Static Website - // Examples for more information. + // Examples (https://cloud.google.com/storage/docs/static-website) for more + // information. Website *BucketWebsite `json:"website,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. @@ -548,9 +593,9 @@ type Bucket struct { NullFields []string `json:"-"` } -func (s *Bucket) MarshalJSON() ([]byte, error) { +func (s Bucket) MarshalJSON() ([]byte, error) { type NoMethod Bucket - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAutoclass: The bucket's Autoclass configuration. @@ -580,9 +625,9 @@ type BucketAutoclass struct { NullFields []string `json:"-"` } -func (s *BucketAutoclass) MarshalJSON() ([]byte, error) { +func (s BucketAutoclass) MarshalJSON() ([]byte, error) { type NoMethod BucketAutoclass - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketBilling: The bucket's billing configuration. @@ -602,9 +647,9 @@ type BucketBilling struct { NullFields []string `json:"-"` } -func (s *BucketBilling) MarshalJSON() ([]byte, error) { +func (s BucketBilling) MarshalJSON() ([]byte, error) { type NoMethod BucketBilling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BucketCors struct { @@ -634,9 +679,9 @@ type BucketCors struct { NullFields []string `json:"-"` } -func (s *BucketCors) MarshalJSON() ([]byte, error) { +func (s BucketCors) MarshalJSON() ([]byte, error) { type NoMethod BucketCors - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketCustomPlacementConfig: The bucket's custom placement configuration for @@ -657,9 +702,9 @@ type BucketCustomPlacementConfig struct { NullFields []string `json:"-"` } -func (s *BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) { +func (s BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) { type NoMethod BucketCustomPlacementConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketEncryption: Encryption configuration for a bucket. @@ -680,9 +725,9 @@ type BucketEncryption struct { NullFields []string `json:"-"` } -func (s *BucketEncryption) MarshalJSON() ([]byte, error) { +func (s BucketEncryption) MarshalJSON() ([]byte, error) { type NoMethod BucketEncryption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketHierarchicalNamespace: The bucket's hierarchical namespace @@ -704,9 +749,9 @@ type BucketHierarchicalNamespace struct { NullFields []string `json:"-"` } -func (s *BucketHierarchicalNamespace) MarshalJSON() ([]byte, error) { +func (s BucketHierarchicalNamespace) MarshalJSON() ([]byte, error) { type NoMethod BucketHierarchicalNamespace - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketIamConfiguration: The bucket's IAM configuration. @@ -736,9 +781,9 @@ type BucketIamConfiguration struct { NullFields []string `json:"-"` } -func (s *BucketIamConfiguration) MarshalJSON() ([]byte, error) { +func (s BucketIamConfiguration) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketIamConfigurationBucketPolicyOnly: The bucket's uniform bucket-level @@ -768,9 +813,9 @@ type BucketIamConfigurationBucketPolicyOnly struct { NullFields []string `json:"-"` } -func (s *BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { +func (s BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfigurationBucketPolicyOnly - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketIamConfigurationUniformBucketLevelAccess: The bucket's uniform @@ -798,13 +843,92 @@ type BucketIamConfigurationUniformBucketLevelAccess struct { NullFields []string `json:"-"` } -func (s *BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { +func (s BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfigurationUniformBucketLevelAccess - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketIpFilter: The bucket's IP filter configuration. Specifies the network +// sources that are allowed to access the operations on the bucket, as well as +// its underlying objects. Only enforced when the mode is set to 'Enabled'. +type BucketIpFilter struct { + // Mode: The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. + Mode string `json:"mode,omitempty"` + // PublicNetworkSource: The public network source of the bucket's IP filter. + PublicNetworkSource *BucketIpFilterPublicNetworkSource `json:"publicNetworkSource,omitempty"` + // VpcNetworkSources: The list of VPC network + // (https://cloud.google.com/vpc/docs/vpc) sources of the bucket's IP filter. + VpcNetworkSources []*BucketIpFilterVpcNetworkSources `json:"vpcNetworkSources,omitempty"` + // ForceSendFields is a list of field names (e.g. "Mode") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Mode") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIpFilter) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilter + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketIpFilterPublicNetworkSource: The public network source of the bucket's +// IP filter. +type BucketIpFilterPublicNetworkSource struct { + // AllowedIpCidrRanges: The list of public IPv4, IPv6 cidr ranges that are + // allowed to access the bucket. + AllowedIpCidrRanges []string `json:"allowedIpCidrRanges,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowedIpCidrRanges") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowedIpCidrRanges") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` } -// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle -// management for more information. +func (s BucketIpFilterPublicNetworkSource) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilterPublicNetworkSource + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type BucketIpFilterVpcNetworkSources struct { + // AllowedIpCidrRanges: The list of IPv4, IPv6 cidr ranges subnetworks that are + // allowed to access the bucket. + AllowedIpCidrRanges []string `json:"allowedIpCidrRanges,omitempty"` + // Network: Name of the network. Format: + // projects/{PROJECT_ID}/global/networks/{NETWORK_NAME} + Network string `json:"network,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowedIpCidrRanges") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowedIpCidrRanges") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIpFilterVpcNetworkSources) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilterVpcNetworkSources + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketLifecycle: The bucket's lifecycle configuration. See Lifecycle +// Management (https://cloud.google.com/storage/docs/lifecycle) for more +// information. type BucketLifecycle struct { // Rule: A lifecycle management rule, which is made of an action to take and // the condition(s) under which the action will be taken. @@ -822,9 +946,9 @@ type BucketLifecycle struct { NullFields []string `json:"-"` } -func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { +func (s BucketLifecycle) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycle - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BucketLifecycleRule struct { @@ -845,9 +969,9 @@ type BucketLifecycleRule struct { NullFields []string `json:"-"` } -func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRule) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLifecycleRuleAction: The action to take. @@ -871,9 +995,9 @@ type BucketLifecycleRuleAction struct { NullFields []string `json:"-"` } -func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRuleAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLifecycleRuleCondition: The condition(s) under which the action will @@ -947,9 +1071,9 @@ type BucketLifecycleRuleCondition struct { NullFields []string `json:"-"` } -func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRuleCondition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLogging: The bucket's logging configuration, which defines the @@ -973,9 +1097,9 @@ type BucketLogging struct { NullFields []string `json:"-"` } -func (s *BucketLogging) MarshalJSON() ([]byte, error) { +func (s BucketLogging) MarshalJSON() ([]byte, error) { type NoMethod BucketLogging - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketObjectRetention: The bucket's object retention config. @@ -995,9 +1119,9 @@ type BucketObjectRetention struct { NullFields []string `json:"-"` } -func (s *BucketObjectRetention) MarshalJSON() ([]byte, error) { +func (s BucketObjectRetention) MarshalJSON() ([]byte, error) { type NoMethod BucketObjectRetention - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketOwner: The owner of the bucket. This is always the project team's @@ -1020,9 +1144,9 @@ type BucketOwner struct { NullFields []string `json:"-"` } -func (s *BucketOwner) MarshalJSON() ([]byte, error) { +func (s BucketOwner) MarshalJSON() ([]byte, error) { type NoMethod BucketOwner - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketRetentionPolicy: The bucket's retention policy. The retention policy @@ -1058,9 +1182,9 @@ type BucketRetentionPolicy struct { NullFields []string `json:"-"` } -func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) { +func (s BucketRetentionPolicy) MarshalJSON() ([]byte, error) { type NoMethod BucketRetentionPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketSoftDeletePolicy: The bucket's soft delete policy, which defines the @@ -1087,9 +1211,9 @@ type BucketSoftDeletePolicy struct { NullFields []string `json:"-"` } -func (s *BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { +func (s BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { type NoMethod BucketSoftDeletePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketVersioning: The bucket's versioning configuration. @@ -1109,14 +1233,15 @@ type BucketVersioning struct { NullFields []string `json:"-"` } -func (s *BucketVersioning) MarshalJSON() ([]byte, error) { +func (s BucketVersioning) MarshalJSON() ([]byte, error) { type NoMethod BucketVersioning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketWebsite: The bucket's website configuration, controlling how the // service behaves when accessing bucket contents as a web site. See the Static -// Website Examples for more information. +// Website Examples (https://cloud.google.com/storage/docs/static-website) for +// more information. type BucketWebsite struct { // MainPageSuffix: If the requested object path is missing, the service will // ensure the path has a trailing '/', append this suffix, and attempt to @@ -1140,9 +1265,9 @@ type BucketWebsite struct { NullFields []string `json:"-"` } -func (s *BucketWebsite) MarshalJSON() ([]byte, error) { +func (s BucketWebsite) MarshalJSON() ([]byte, error) { type NoMethod BucketWebsite - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAccessControl: An access-control entry. @@ -1199,9 +1324,9 @@ type BucketAccessControl struct { NullFields []string `json:"-"` } -func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { +func (s BucketAccessControl) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControl - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAccessControlProjectTeam: The project team associated with the entity, @@ -1224,9 +1349,9 @@ type BucketAccessControlProjectTeam struct { NullFields []string `json:"-"` } -func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { +func (s BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControlProjectTeam - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAccessControls: An access-control list. @@ -1252,9 +1377,9 @@ type BucketAccessControls struct { NullFields []string `json:"-"` } -func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { +func (s BucketAccessControls) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControls - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketStorageLayout: The storage layout configuration of a bucket. @@ -1289,9 +1414,9 @@ type BucketStorageLayout struct { NullFields []string `json:"-"` } -func (s *BucketStorageLayout) MarshalJSON() ([]byte, error) { +func (s BucketStorageLayout) MarshalJSON() ([]byte, error) { type NoMethod BucketStorageLayout - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketStorageLayoutCustomPlacementConfig: The bucket's custom placement @@ -1312,9 +1437,9 @@ type BucketStorageLayoutCustomPlacementConfig struct { NullFields []string `json:"-"` } -func (s *BucketStorageLayoutCustomPlacementConfig) MarshalJSON() ([]byte, error) { +func (s BucketStorageLayoutCustomPlacementConfig) MarshalJSON() ([]byte, error) { type NoMethod BucketStorageLayoutCustomPlacementConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketStorageLayoutHierarchicalNamespace: The bucket's hierarchical @@ -1336,9 +1461,9 @@ type BucketStorageLayoutHierarchicalNamespace struct { NullFields []string `json:"-"` } -func (s *BucketStorageLayoutHierarchicalNamespace) MarshalJSON() ([]byte, error) { +func (s BucketStorageLayoutHierarchicalNamespace) MarshalJSON() ([]byte, error) { type NoMethod BucketStorageLayoutHierarchicalNamespace - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Buckets: A list of buckets. @@ -1368,9 +1493,9 @@ type Buckets struct { NullFields []string `json:"-"` } -func (s *Buckets) MarshalJSON() ([]byte, error) { +func (s Buckets) MarshalJSON() ([]byte, error) { type NoMethod Buckets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BulkRestoreObjectsRequest: A bulk restore objects request. @@ -1409,9 +1534,9 @@ type BulkRestoreObjectsRequest struct { NullFields []string `json:"-"` } -func (s *BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { +func (s BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { type NoMethod BulkRestoreObjectsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Channel: An notification channel used to watch for resource changes. @@ -1457,9 +1582,9 @@ type Channel struct { NullFields []string `json:"-"` } -func (s *Channel) MarshalJSON() ([]byte, error) { +func (s Channel) MarshalJSON() ([]byte, error) { type NoMethod Channel - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ComposeRequest: A Compose request. @@ -1484,9 +1609,9 @@ type ComposeRequest struct { NullFields []string `json:"-"` } -func (s *ComposeRequest) MarshalJSON() ([]byte, error) { +func (s ComposeRequest) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ComposeRequestSourceObjects struct { @@ -1511,9 +1636,9 @@ type ComposeRequestSourceObjects struct { NullFields []string `json:"-"` } -func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { +func (s ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequestSourceObjects - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ComposeRequestSourceObjectsObjectPreconditions: Conditions that must be met @@ -1537,9 +1662,9 @@ type ComposeRequestSourceObjectsObjectPreconditions struct { NullFields []string `json:"-"` } -func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { +func (s ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequestSourceObjectsObjectPreconditions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents an expression text. Example: title: "User account presence" @@ -1573,9 +1698,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Folder: A folder. Only available in buckets with hierarchical namespace @@ -1618,9 +1743,9 @@ type Folder struct { NullFields []string `json:"-"` } -func (s *Folder) MarshalJSON() ([]byte, error) { +func (s Folder) MarshalJSON() ([]byte, error) { type NoMethod Folder - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FolderPendingRenameInfo: Only present if the folder is part of an ongoing @@ -1642,9 +1767,9 @@ type FolderPendingRenameInfo struct { NullFields []string `json:"-"` } -func (s *FolderPendingRenameInfo) MarshalJSON() ([]byte, error) { +func (s FolderPendingRenameInfo) MarshalJSON() ([]byte, error) { type NoMethod FolderPendingRenameInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Folders: A list of folders. @@ -1674,14 +1799,17 @@ type Folders struct { NullFields []string `json:"-"` } -func (s *Folders) MarshalJSON() ([]byte, error) { +func (s Folders) MarshalJSON() ([]byte, error) { type NoMethod Folders - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleLongrunningListOperationsResponse: The response message for // storage.buckets.operations.list. type GoogleLongrunningListOperationsResponse struct { + // Kind: The kind of item this is. For lists of operations, this is always + // storage#operations. + Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large result // sets. Provide this value in a subsequent request to return the next page of // results. @@ -1692,22 +1820,22 @@ type GoogleLongrunningListOperationsResponse struct { // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See + // ForceSendFields is a list of field names (e.g. "Kind") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NextPageToken") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "Kind") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleLongrunningListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleLongrunningOperation: This resource represents a long-running @@ -1719,6 +1847,9 @@ type GoogleLongrunningOperation struct { Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or cancellation. Error *GoogleRpcStatus `json:"error,omitempty"` + // Kind: The kind of item this is. For operations, this is always + // storage#operation. + Kind string `json:"kind,omitempty"` // Metadata: Service-specific metadata associated with the operation. It // typically contains progress information and common metadata such as create // time. Some services might not provide such metadata. Any method that returns @@ -1736,6 +1867,8 @@ type GoogleLongrunningOperation struct { // method name. For example, if the original method name is "TakeSnapshot()", // the inferred response type is "TakeSnapshotResponse". Response googleapi.RawMessage `json:"response,omitempty"` + // SelfLink: The link to this long running operation. + SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` @@ -1752,9 +1885,9 @@ type GoogleLongrunningOperation struct { NullFields []string `json:"-"` } -func (s *GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { +func (s GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { type NoMethod GoogleLongrunningOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleRpcStatus: The "Status" type defines a logical error model that is @@ -1784,9 +1917,9 @@ type GoogleRpcStatus struct { NullFields []string `json:"-"` } -func (s *GoogleRpcStatus) MarshalJSON() ([]byte, error) { +func (s GoogleRpcStatus) MarshalJSON() ([]byte, error) { type NoMethod GoogleRpcStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HmacKey: JSON template to produce a JSON-style HMAC Key resource for Create @@ -1815,9 +1948,9 @@ type HmacKey struct { NullFields []string `json:"-"` } -func (s *HmacKey) MarshalJSON() ([]byte, error) { +func (s HmacKey) MarshalJSON() ([]byte, error) { type NoMethod HmacKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HmacKeyMetadata: JSON template to produce a JSON-style HMAC Key metadata @@ -1863,9 +1996,9 @@ type HmacKeyMetadata struct { NullFields []string `json:"-"` } -func (s *HmacKeyMetadata) MarshalJSON() ([]byte, error) { +func (s HmacKeyMetadata) MarshalJSON() ([]byte, error) { type NoMethod HmacKeyMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HmacKeysMetadata: A list of hmacKeys. @@ -1895,9 +2028,9 @@ type HmacKeysMetadata struct { NullFields []string `json:"-"` } -func (s *HmacKeysMetadata) MarshalJSON() ([]byte, error) { +func (s HmacKeysMetadata) MarshalJSON() ([]byte, error) { type NoMethod HmacKeysMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedFolder: A managed folder. @@ -1939,9 +2072,9 @@ type ManagedFolder struct { NullFields []string `json:"-"` } -func (s *ManagedFolder) MarshalJSON() ([]byte, error) { +func (s ManagedFolder) MarshalJSON() ([]byte, error) { type NoMethod ManagedFolder - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedFolders: A list of managed folders. @@ -1971,9 +2104,9 @@ type ManagedFolders struct { NullFields []string `json:"-"` } -func (s *ManagedFolders) MarshalJSON() ([]byte, error) { +func (s ManagedFolders) MarshalJSON() ([]byte, error) { type NoMethod ManagedFolders - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Notification: A subscription to receive Google PubSub notifications. @@ -2018,9 +2151,9 @@ type Notification struct { NullFields []string `json:"-"` } -func (s *Notification) MarshalJSON() ([]byte, error) { +func (s Notification) MarshalJSON() ([]byte, error) { type NoMethod Notification - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Notifications: A list of notification subscriptions. @@ -2046,9 +2179,9 @@ type Notifications struct { NullFields []string `json:"-"` } -func (s *Notifications) MarshalJSON() ([]byte, error) { +func (s Notifications) MarshalJSON() ([]byte, error) { type NoMethod Notifications - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Object: An object. @@ -2075,7 +2208,8 @@ type Object struct { ContentType string `json:"contentType,omitempty"` // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded using // base64 in big-endian byte order. For more information about using the CRC32c - // checksum, see Hashes and ETags: Best Practices. + // checksum, see Data Validation and Change Detection + // (https://cloud.google.com/storage/docs/data-validation). Crc32c string `json:"crc32c,omitempty"` // CustomTime: A timestamp in RFC 3339 format specified by the user for an // object. @@ -2113,7 +2247,8 @@ type Object struct { // request to fail with status code 400 - Bad Request. KmsKeyName string `json:"kmsKeyName,omitempty"` // Md5Hash: MD5 hash of the data; encoded using base64. For more information - // about using the MD5 hash, see Hashes and ETags: Best Practices. + // about using the MD5 hash, see Data Validation and Change Detection + // (https://cloud.google.com/storage/docs/data-validation). Md5Hash string `json:"md5Hash,omitempty"` // MediaLink: Media download link. MediaLink string `json:"mediaLink,omitempty"` @@ -2129,6 +2264,10 @@ type Object struct { // Owner: The owner of the object. This will always be the uploader of the // object. Owner *ObjectOwner `json:"owner,omitempty"` + // RestoreToken: Restore token used to differentiate deleted objects with the + // same name and generation. This field is only returned for deleted objects in + // hierarchical namespace buckets. + RestoreToken string `json:"restoreToken,omitempty"` // Retention: A collection of object level retention parameters. Retention *ObjectRetention `json:"retention,omitempty"` // RetentionExpirationTime: A server-determined value that specifies the @@ -2188,9 +2327,9 @@ type Object struct { NullFields []string `json:"-"` } -func (s *Object) MarshalJSON() ([]byte, error) { +func (s Object) MarshalJSON() ([]byte, error) { type NoMethod Object - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectCustomerEncryption: Metadata of customer-supplied encryption key, if @@ -2213,9 +2352,9 @@ type ObjectCustomerEncryption struct { NullFields []string `json:"-"` } -func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { +func (s ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { type NoMethod ObjectCustomerEncryption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectOwner: The owner of the object. This will always be the uploader of @@ -2238,9 +2377,9 @@ type ObjectOwner struct { NullFields []string `json:"-"` } -func (s *ObjectOwner) MarshalJSON() ([]byte, error) { +func (s ObjectOwner) MarshalJSON() ([]byte, error) { type NoMethod ObjectOwner - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectRetention: A collection of object level retention parameters. @@ -2263,9 +2402,9 @@ type ObjectRetention struct { NullFields []string `json:"-"` } -func (s *ObjectRetention) MarshalJSON() ([]byte, error) { +func (s ObjectRetention) MarshalJSON() ([]byte, error) { type NoMethod ObjectRetention - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectAccessControl: An access-control entry. @@ -2326,9 +2465,9 @@ type ObjectAccessControl struct { NullFields []string `json:"-"` } -func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControl) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControl - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectAccessControlProjectTeam: The project team associated with the entity, @@ -2351,9 +2490,9 @@ type ObjectAccessControlProjectTeam struct { NullFields []string `json:"-"` } -func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControlProjectTeam - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectAccessControls: An access-control list. @@ -2379,9 +2518,9 @@ type ObjectAccessControls struct { NullFields []string `json:"-"` } -func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControls) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControls - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Objects: A list of objects. @@ -2414,9 +2553,9 @@ type Objects struct { NullFields []string `json:"-"` } -func (s *Objects) MarshalJSON() ([]byte, error) { +func (s Objects) MarshalJSON() ([]byte, error) { type NoMethod Objects - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: A bucket/object/managedFolder IAM policy. @@ -2455,9 +2594,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PolicyBindings struct { @@ -2527,9 +2666,62 @@ type PolicyBindings struct { NullFields []string `json:"-"` } -func (s *PolicyBindings) MarshalJSON() ([]byte, error) { +func (s PolicyBindings) MarshalJSON() ([]byte, error) { type NoMethod PolicyBindings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RelocateBucketRequest: A Relocate Bucket request. +type RelocateBucketRequest struct { + // DestinationCustomPlacementConfig: The bucket's new custom placement + // configuration if relocating to a Custom Dual Region. + DestinationCustomPlacementConfig *RelocateBucketRequestDestinationCustomPlacementConfig `json:"destinationCustomPlacementConfig,omitempty"` + // DestinationLocation: The new location the bucket will be relocated to. + DestinationLocation string `json:"destinationLocation,omitempty"` + // ValidateOnly: If true, validate the operation, but do not actually relocate + // the bucket. + ValidateOnly bool `json:"validateOnly,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "DestinationCustomPlacementConfig") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted from + // API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. + // "DestinationCustomPlacementConfig") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for + // more details. + NullFields []string `json:"-"` +} + +func (s RelocateBucketRequest) MarshalJSON() ([]byte, error) { + type NoMethod RelocateBucketRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RelocateBucketRequestDestinationCustomPlacementConfig: The bucket's new +// custom placement configuration if relocating to a Custom Dual Region. +type RelocateBucketRequestDestinationCustomPlacementConfig struct { + // DataLocations: The list of regional locations in which data is placed. + DataLocations []string `json:"dataLocations,omitempty"` + // ForceSendFields is a list of field names (e.g. "DataLocations") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DataLocations") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RelocateBucketRequestDestinationCustomPlacementConfig) MarshalJSON() ([]byte, error) { + type NoMethod RelocateBucketRequestDestinationCustomPlacementConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RewriteResponse: A rewrite response. @@ -2569,9 +2761,9 @@ type RewriteResponse struct { NullFields []string `json:"-"` } -func (s *RewriteResponse) MarshalJSON() ([]byte, error) { +func (s RewriteResponse) MarshalJSON() ([]byte, error) { type NoMethod RewriteResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAccount: A subscription to receive Google PubSub notifications. @@ -2597,9 +2789,9 @@ type ServiceAccount struct { NullFields []string `json:"-"` } -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { +func (s ServiceAccount) MarshalJSON() ([]byte, error) { type NoMethod ServiceAccount - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: A @@ -2648,9 +2840,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AnywhereCachesDisableCall struct { @@ -4082,7 +4274,8 @@ type BucketsDeleteCall struct { header_ http.Header } -// Delete: Permanently deletes an empty bucket. +// Delete: Deletes an empty bucket. Deletions are permanent unless soft delete +// is enabled on the bucket. // // - bucket: Name of a bucket. func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { @@ -4186,6 +4379,13 @@ func (r *BucketsService) Get(bucket string) *BucketsGetCall { return c } +// Generation sets the optional parameter "generation": If present, specifies +// the generation of the bucket. This is required if softDeleted is true. +func (c *BucketsGetCall) Generation(generation int64) *BucketsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + // IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": // Makes the return of the bucket metadata conditional on whether the bucket's // current metageneration matches the given value. @@ -4215,6 +4415,15 @@ func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { return c } +// SoftDeleted sets the optional parameter "softDeleted": If true, return the +// soft-deleted version of this bucket. The default is false. For more +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). +func (c *BucketsGetCall) SoftDeleted(softDeleted bool) *BucketsGetCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + // UserProject sets the optional parameter "userProject": The project to be // billed for this request. Required for Requester Pays buckets. func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { @@ -4783,6 +4992,15 @@ func (c *BucketsListCall) Projection(projection string) *BucketsListCall { return c } +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted bucket versions will be returned. The default is false. For +// more information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). +func (c *BucketsListCall) SoftDeleted(softDeleted bool) *BucketsListCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + // UserProject sets the optional parameter "userProject": The project to be // billed for this request. func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { @@ -5197,6 +5415,190 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { return ret, nil } +type BucketsRelocateCall struct { + s *Service + bucket string + relocatebucketrequest *RelocateBucketRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Relocate: Initiates a long-running Relocate Bucket operation on the +// specified bucket. +// +// - bucket: Name of the bucket to be moved. +func (r *BucketsService) Relocate(bucket string, relocatebucketrequest *RelocateBucketRequest) *BucketsRelocateCall { + c := &BucketsRelocateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.relocatebucketrequest = relocatebucketrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsRelocateCall) Fields(s ...googleapi.Field) *BucketsRelocateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsRelocateCall) Context(ctx context.Context) *BucketsRelocateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsRelocateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsRelocateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.relocatebucketrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/relocate") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.relocate" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BucketsRelocateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketsRestoreCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restore: Restores a soft-deleted bucket. +// +// - bucket: Name of a bucket. +// - generation: Generation of a bucket. +func (r *BucketsService) Restore(bucket string, generation int64) *BucketsRestoreCall { + c := &BucketsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsRestoreCall) UserProject(userProject string) *BucketsRestoreCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsRestoreCall) Fields(s ...googleapi.Field) *BucketsRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsRestoreCall) Context(ctx context.Context) *BucketsRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/restore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.restore" call. +func (c *BucketsRestoreCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + type BucketsSetIamPolicyCall struct { s *Service bucket string @@ -9747,9 +10149,21 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { return c } +// RestoreToken sets the optional parameter "restoreToken": Restore token used +// to differentiate soft-deleted objects with the same name and generation. +// Only applicable for hierarchical namespace buckets and if softDeleted is set +// to true. This parameter is optional, and is only required in the rare case +// when there are multiple soft-deleted objects with the same name and +// generation. +func (c *ObjectsGetCall) RestoreToken(restoreToken string) *ObjectsGetCall { + c.urlParams_.Set("restoreToken", restoreToken) + return c +} + // SoftDeleted sets the optional parameter "softDeleted": If true, only // soft-deleted object versions will be listed. The default is false. For more -// information, see Soft Delete. +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). func (c *ObjectsGetCall) SoftDeleted(softDeleted bool) *ObjectsGetCall { c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) return c @@ -10412,7 +10826,8 @@ func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { // SoftDeleted sets the optional parameter "softDeleted": If true, only // soft-deleted object versions will be listed. The default is false. For more -// information, see Soft Delete. +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). func (c *ObjectsListCall) SoftDeleted(softDeleted bool) *ObjectsListCall { c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) return c @@ -10436,7 +10851,8 @@ func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { // Versions sets the optional parameter "versions": If true, lists all versions // of an object as distinct results. The default is false. For more -// information, see Object Versioning. +// information, see Object Versioning +// (https://cloud.google.com/storage/docs/object-versioning). func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { c.urlParams_.Set("versions", fmt.Sprint(versions)) return c @@ -10774,7 +11190,8 @@ type ObjectsRestoreCall struct { // - bucket: Name of the bucket in which the object resides. // - generation: Selects a specific revision of this object. // - object: Name of the object. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts. +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Restore(bucket string, object string, generation int64) *ObjectsRestoreCall { c := &ObjectsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10838,6 +11255,16 @@ func (c *ObjectsRestoreCall) Projection(projection string) *ObjectsRestoreCall { return c } +// RestoreToken sets the optional parameter "restoreToken": Restore token used +// to differentiate sof-deleted objects with the same name and generation. Only +// applicable for hierarchical namespace buckets. This parameter is optional, +// and is only required in the rare case when there are multiple soft-deleted +// objects with the same name and generation. +func (c *ObjectsRestoreCall) RestoreToken(restoreToken string) *ObjectsRestoreCall { + c.urlParams_.Set("restoreToken", restoreToken) + return c +} + // UserProject sets the optional parameter "userProject": The project to be // billed for this request. Required for Requester Pays buckets. func (c *ObjectsRestoreCall) UserProject(userProject string) *ObjectsRestoreCall { @@ -11767,7 +12194,8 @@ func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCa // Versions sets the optional parameter "versions": If true, lists all versions // of an object as distinct results. The default is false. For more -// information, see Object Versioning. +// information, see Object Versioning +// (https://cloud.google.com/storage/docs/object-versioning). func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { c.urlParams_.Set("versions", fmt.Sprint(versions)) return c @@ -11855,6 +12283,92 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) return ret, nil } +type OperationsAdvanceRelocateBucketCall struct { + s *Service + bucket string + operationId string + advancerelocatebucketoperationrequest *AdvanceRelocateBucketOperationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AdvanceRelocateBucket: Starts asynchronous advancement of the relocate +// bucket operation in the case of required write downtime, to allow it to lock +// the bucket at the source location, and proceed with the bucket location +// swap. The server makes a best effort to advance the relocate bucket +// operation, but success is not guaranteed. +// +// - bucket: Name of the bucket to advance the relocate for. +// - operationId: ID of the operation resource. +func (r *OperationsService) AdvanceRelocateBucket(bucket string, operationId string, advancerelocatebucketoperationrequest *AdvanceRelocateBucketOperationRequest) *OperationsAdvanceRelocateBucketCall { + c := &OperationsAdvanceRelocateBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.operationId = operationId + c.advancerelocatebucketoperationrequest = advancerelocatebucketoperationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OperationsAdvanceRelocateBucketCall) Fields(s ...googleapi.Field) *OperationsAdvanceRelocateBucketCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OperationsAdvanceRelocateBucketCall) Context(ctx context.Context) *OperationsAdvanceRelocateBucketCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OperationsAdvanceRelocateBucketCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsAdvanceRelocateBucketCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.advancerelocatebucketoperationrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/advanceRelocateBucket") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "operationId": c.operationId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.operations.advanceRelocateBucket" call. +func (c *OperationsAdvanceRelocateBucketCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + type OperationsCancelCall struct { s *Service bucket string @@ -12683,7 +13197,9 @@ type ProjectsHmacKeysUpdateCall struct { } // Update: Updates the state of an HMAC key. See the HMAC Key resource -// descriptor for valid states. +// descriptor +// (https://cloud.google.com/storage/docs/json_api/v1/projects/hmacKeys/update#request-body) +// for valid states. // // - accessId: Name of the HMAC key being updated. // - projectId: Project ID owning the service account of the updated key. diff --git a/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json b/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json index e439c9b7ca2..7e47fd67a92 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json @@ -392,7 +392,7 @@ ], "parameters": { "filter": { - "description": "Required. A list of query parameters specified as JSON text in the form of: `{\"projectId\":\"my_project_id\", \"jobNames\":[\"jobid1\",\"jobid2\",...], \"jobStatuses\":[\"status1\",\"status2\",...]}` Since `jobNames` and `jobStatuses` support multiple values, their values must be specified with array notation. `projectId` is required. `jobNames` and `jobStatuses` are optional. The valid values for `jobStatuses` are case-insensitive: ENABLED, DISABLED, and DELETED.", + "description": "Required. A list of query parameters specified as JSON text in the form of: ``` { \"projectId\":\"my_project_id\", \"jobNames\":[\"jobid1\",\"jobid2\",...], \"jobStatuses\":[\"status1\",\"status2\",...], \"dataBackend\":\"QUERY_REPLICATION_CONFIGS\", \"sourceBucket\":\"source-bucket-name\", \"sinkBucket\":\"sink-bucket-name\", } ``` The JSON formatting in the example is for display only; provide the query parameters without spaces or line breaks. * `projectId` is required. * Since `jobNames` and `jobStatuses` support multiple values, their values must be specified with array notation. `jobNames` and `jobStatuses` are optional. Valid values are case-insensitive: * ENABLED * DISABLED * DELETED * Specify `\"dataBackend\":\"QUERY_REPLICATION_CONFIGS\"` to return a list of cross-bucket replication jobs. * Limit the results to jobs from a particular bucket with `sourceBucket` and/or to a particular bucket with `sinkBucket`.", "location": "query", "required": true, "type": "string" @@ -632,7 +632,7 @@ } } }, - "revision": "20240511", + "revision": "20241005", "rootUrl": "https://storagetransfer.googleapis.com/", "schemas": { "AgentPool": { @@ -1200,7 +1200,7 @@ "type": "string" }, "timeCreated": { - "description": "Specifies how each object's `timeCreated` metadata is preserved for transfers. If unspecified, the default behavior is the same as TIME_CREATED_SKIP. This behavior is supported for transfers to GCS buckets from GCS, S3, Azure, S3 Compatible, and Azure sources.", + "description": "Specifies how each object's `timeCreated` metadata is preserved for transfers. If unspecified, the default behavior is the same as TIME_CREATED_SKIP. This behavior is supported for transfers to Cloud Storage buckets from Cloud Storage, Amazon S3, S3-compatible storage, and Azure sources.", "enum": [ "TIME_CREATED_UNSPECIFIED", "TIME_CREATED_SKIP", @@ -1368,24 +1368,24 @@ "type": "object" }, "ReplicationSpec": { - "description": "Specifies the configuration for running a replication job.", + "description": "Specifies the configuration for a cross-bucket replication job. Cross-bucket replication copies new or updated objects from a source Cloud Storage bucket to a destination Cloud Storage bucket. Existing objects in the source bucket are not copied by a new cross-bucket replication job.", "id": "ReplicationSpec", "properties": { "gcsDataSink": { "$ref": "GcsData", - "description": "Specifies cloud Storage data sink." + "description": "The Cloud Storage bucket to which to replicate objects." }, "gcsDataSource": { "$ref": "GcsData", - "description": "Specifies cloud Storage data source." + "description": "The Cloud Storage bucket from which to replicate objects." }, "objectConditions": { "$ref": "ObjectConditions", - "description": "Specifies the object conditions to only include objects that satisfy these conditions in the set of data source objects. Object conditions based on objects' \"last modification time\" do not exclude objects in a data sink." + "description": "Object conditions that determine which objects are transferred. For replication jobs, only `include_prefixes` and `exclude_prefixes` are supported." }, "transferOptions": { "$ref": "TransferOptions", - "description": "Specifies the actions to be performed on the object during replication. Delete options are not supported for replication and when specified, the request fails with an INVALID_ARGUMENT error." + "description": "Specifies the metadata options to be applied during replication. Delete options are not supported. If a delete option is specified, the request fails with an INVALID_ARGUMENT error." } }, "type": "object" @@ -1530,22 +1530,22 @@ "id": "TimeOfDay", "properties": { "hours": { - "description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", + "description": "Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "type": "integer" }, "minutes": { - "description": "Minutes of hour of day. Must be from 0 to 59.", + "description": "Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59.", "format": "int32", "type": "integer" }, "nanos": { - "description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", + "description": "Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999.", "format": "int32", "type": "integer" }, "seconds": { - "description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", + "description": "Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "type": "integer" } diff --git a/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go b/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go index a34514171c3..55fe560d06e 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go @@ -249,9 +249,9 @@ type AgentPool struct { NullFields []string `json:"-"` } -func (s *AgentPool) MarshalJSON() ([]byte, error) { +func (s AgentPool) MarshalJSON() ([]byte, error) { type NoMethod AgentPool - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AwsAccessKey: AWS access key (see AWS Security Credentials @@ -277,9 +277,9 @@ type AwsAccessKey struct { NullFields []string `json:"-"` } -func (s *AwsAccessKey) MarshalJSON() ([]byte, error) { +func (s AwsAccessKey) MarshalJSON() ([]byte, error) { type NoMethod AwsAccessKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AwsS3CompatibleData: An AwsS3CompatibleData resource. @@ -310,9 +310,9 @@ type AwsS3CompatibleData struct { NullFields []string `json:"-"` } -func (s *AwsS3CompatibleData) MarshalJSON() ([]byte, error) { +func (s AwsS3CompatibleData) MarshalJSON() ([]byte, error) { type NoMethod AwsS3CompatibleData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AwsS3Data: An AwsS3Data resource can be a data source, but not a data sink. @@ -371,9 +371,9 @@ type AwsS3Data struct { NullFields []string `json:"-"` } -func (s *AwsS3Data) MarshalJSON() ([]byte, error) { +func (s AwsS3Data) MarshalJSON() ([]byte, error) { type NoMethod AwsS3Data - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AzureBlobStorageData: An AzureBlobStorageData resource can be a data source, @@ -420,9 +420,9 @@ type AzureBlobStorageData struct { NullFields []string `json:"-"` } -func (s *AzureBlobStorageData) MarshalJSON() ([]byte, error) { +func (s AzureBlobStorageData) MarshalJSON() ([]byte, error) { type NoMethod AzureBlobStorageData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AzureCredentials: Azure credentials For information on our data retention @@ -447,9 +447,9 @@ type AzureCredentials struct { NullFields []string `json:"-"` } -func (s *AzureCredentials) MarshalJSON() ([]byte, error) { +func (s AzureCredentials) MarshalJSON() ([]byte, error) { type NoMethod AzureCredentials - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BandwidthLimit: Specifies a bandwidth limit for an agent pool. @@ -470,9 +470,9 @@ type BandwidthLimit struct { NullFields []string `json:"-"` } -func (s *BandwidthLimit) MarshalJSON() ([]byte, error) { +func (s BandwidthLimit) MarshalJSON() ([]byte, error) { type NoMethod BandwidthLimit - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // CancelOperationRequest: The request message for Operations.CancelOperation. @@ -512,9 +512,9 @@ type Date struct { NullFields []string `json:"-"` } -func (s *Date) MarshalJSON() ([]byte, error) { +func (s Date) MarshalJSON() ([]byte, error) { type NoMethod Date - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining @@ -546,9 +546,9 @@ type ErrorLogEntry struct { NullFields []string `json:"-"` } -func (s *ErrorLogEntry) MarshalJSON() ([]byte, error) { +func (s ErrorLogEntry) MarshalJSON() ([]byte, error) { type NoMethod ErrorLogEntry - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ErrorSummary: A summary of errors by error code, plus a count and sample @@ -654,9 +654,9 @@ type ErrorSummary struct { NullFields []string `json:"-"` } -func (s *ErrorSummary) MarshalJSON() ([]byte, error) { +func (s ErrorSummary) MarshalJSON() ([]byte, error) { type NoMethod ErrorSummary - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // EventStream: Specifies the Event-driven transfer options. Event-driven @@ -690,9 +690,9 @@ type EventStream struct { NullFields []string `json:"-"` } -func (s *EventStream) MarshalJSON() ([]byte, error) { +func (s EventStream) MarshalJSON() ([]byte, error) { type NoMethod EventStream - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GcsData: In a GcsData resource, an object's name is the Cloud Storage @@ -729,9 +729,9 @@ type GcsData struct { NullFields []string `json:"-"` } -func (s *GcsData) MarshalJSON() ([]byte, error) { +func (s GcsData) MarshalJSON() ([]byte, error) { type NoMethod GcsData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleServiceAccount: Google service account @@ -756,9 +756,9 @@ type GoogleServiceAccount struct { NullFields []string `json:"-"` } -func (s *GoogleServiceAccount) MarshalJSON() ([]byte, error) { +func (s GoogleServiceAccount) MarshalJSON() ([]byte, error) { type NoMethod GoogleServiceAccount - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HdfsData: An HdfsData resource specifies a path within an HDFS entity (e.g. @@ -781,9 +781,9 @@ type HdfsData struct { NullFields []string `json:"-"` } -func (s *HdfsData) MarshalJSON() ([]byte, error) { +func (s HdfsData) MarshalJSON() ([]byte, error) { type NoMethod HdfsData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HttpData: An HttpData resource specifies a list of objects on the web to be @@ -826,9 +826,9 @@ type HttpData struct { NullFields []string `json:"-"` } -func (s *HttpData) MarshalJSON() ([]byte, error) { +func (s HttpData) MarshalJSON() ([]byte, error) { type NoMethod HttpData - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListAgentPoolsResponse: Response from ListAgentPools. @@ -853,9 +853,9 @@ type ListAgentPoolsResponse struct { NullFields []string `json:"-"` } -func (s *ListAgentPoolsResponse) MarshalJSON() ([]byte, error) { +func (s ListAgentPoolsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListAgentPoolsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for Operations.ListOperations. @@ -881,9 +881,9 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ListTransferJobsResponse: Response from ListTransferJobs. @@ -908,9 +908,9 @@ type ListTransferJobsResponse struct { NullFields []string `json:"-"` } -func (s *ListTransferJobsResponse) MarshalJSON() ([]byte, error) { +func (s ListTransferJobsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListTransferJobsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // LoggingConfig: Specifies the logging behavior for transfer operations. Logs @@ -957,9 +957,9 @@ type LoggingConfig struct { NullFields []string `json:"-"` } -func (s *LoggingConfig) MarshalJSON() ([]byte, error) { +func (s LoggingConfig) MarshalJSON() ([]byte, error) { type NoMethod LoggingConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // MetadataOptions: Specifies the metadata options for running a transfer. @@ -1049,8 +1049,9 @@ type MetadataOptions struct { TemporaryHold string `json:"temporaryHold,omitempty"` // TimeCreated: Specifies how each object's `timeCreated` metadata is preserved // for transfers. If unspecified, the default behavior is the same as - // TIME_CREATED_SKIP. This behavior is supported for transfers to GCS buckets - // from GCS, S3, Azure, S3 Compatible, and Azure sources. + // TIME_CREATED_SKIP. This behavior is supported for transfers to Cloud Storage + // buckets from Cloud Storage, Amazon S3, S3-compatible storage, and Azure + // sources. // // Possible values: // "TIME_CREATED_UNSPECIFIED" - TimeCreated behavior is unspecified. @@ -1083,9 +1084,9 @@ type MetadataOptions struct { NullFields []string `json:"-"` } -func (s *MetadataOptions) MarshalJSON() ([]byte, error) { +func (s MetadataOptions) MarshalJSON() ([]byte, error) { type NoMethod MetadataOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // NotificationConfig: Specification to configure notifications published to @@ -1138,9 +1139,9 @@ type NotificationConfig struct { NullFields []string `json:"-"` } -func (s *NotificationConfig) MarshalJSON() ([]byte, error) { +func (s NotificationConfig) MarshalJSON() ([]byte, error) { type NoMethod NotificationConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectConditions: Conditions that determine which objects are transferred. @@ -1232,9 +1233,9 @@ type ObjectConditions struct { NullFields []string `json:"-"` } -func (s *ObjectConditions) MarshalJSON() ([]byte, error) { +func (s ObjectConditions) MarshalJSON() ([]byte, error) { type NoMethod ObjectConditions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is the @@ -1276,9 +1277,9 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { +func (s Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // PauseTransferOperationRequest: Request passed to PauseTransferOperation. @@ -1302,24 +1303,26 @@ type PosixFilesystem struct { NullFields []string `json:"-"` } -func (s *PosixFilesystem) MarshalJSON() ([]byte, error) { +func (s PosixFilesystem) MarshalJSON() ([]byte, error) { type NoMethod PosixFilesystem - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ReplicationSpec: Specifies the configuration for running a replication job. +// ReplicationSpec: Specifies the configuration for a cross-bucket replication +// job. Cross-bucket replication copies new or updated objects from a source +// Cloud Storage bucket to a destination Cloud Storage bucket. Existing objects +// in the source bucket are not copied by a new cross-bucket replication job. type ReplicationSpec struct { - // GcsDataSink: Specifies cloud Storage data sink. + // GcsDataSink: The Cloud Storage bucket to which to replicate objects. GcsDataSink *GcsData `json:"gcsDataSink,omitempty"` - // GcsDataSource: Specifies cloud Storage data source. + // GcsDataSource: The Cloud Storage bucket from which to replicate objects. GcsDataSource *GcsData `json:"gcsDataSource,omitempty"` - // ObjectConditions: Specifies the object conditions to only include objects - // that satisfy these conditions in the set of data source objects. Object - // conditions based on objects' "last modification time" do not exclude objects - // in a data sink. + // ObjectConditions: Object conditions that determine which objects are + // transferred. For replication jobs, only `include_prefixes` and + // `exclude_prefixes` are supported. ObjectConditions *ObjectConditions `json:"objectConditions,omitempty"` - // TransferOptions: Specifies the actions to be performed on the object during - // replication. Delete options are not supported for replication and when + // TransferOptions: Specifies the metadata options to be applied during + // replication. Delete options are not supported. If a delete option is // specified, the request fails with an INVALID_ARGUMENT error. TransferOptions *TransferOptions `json:"transferOptions,omitempty"` // ForceSendFields is a list of field names (e.g. "GcsDataSink") to @@ -1335,9 +1338,9 @@ type ReplicationSpec struct { NullFields []string `json:"-"` } -func (s *ReplicationSpec) MarshalJSON() ([]byte, error) { +func (s ReplicationSpec) MarshalJSON() ([]byte, error) { type NoMethod ReplicationSpec - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ResumeTransferOperationRequest: Request passed to ResumeTransferOperation. @@ -1362,9 +1365,9 @@ type RunTransferJobRequest struct { NullFields []string `json:"-"` } -func (s *RunTransferJobRequest) MarshalJSON() ([]byte, error) { +func (s RunTransferJobRequest) MarshalJSON() ([]byte, error) { type NoMethod RunTransferJobRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // S3CompatibleMetadata: S3CompatibleMetadata contains the metadata fields that @@ -1421,9 +1424,9 @@ type S3CompatibleMetadata struct { NullFields []string `json:"-"` } -func (s *S3CompatibleMetadata) MarshalJSON() ([]byte, error) { +func (s S3CompatibleMetadata) MarshalJSON() ([]byte, error) { type NoMethod S3CompatibleMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Schedule: Transfers can be scheduled to recur or to run just once. @@ -1483,9 +1486,9 @@ type Schedule struct { NullFields []string `json:"-"` } -func (s *Schedule) MarshalJSON() ([]byte, error) { +func (s Schedule) MarshalJSON() ([]byte, error) { type NoMethod Schedule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is suitable for @@ -1517,25 +1520,28 @@ type Status struct { NullFields []string `json:"-"` } -func (s *Status) MarshalJSON() ([]byte, error) { +func (s Status) MarshalJSON() ([]byte, error) { type NoMethod Status - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TimeOfDay: Represents a time of day. The date and time zone are either not // significant or are specified elsewhere. An API may choose to allow leap // seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. type TimeOfDay struct { - // Hours: Hours of day in 24 hour format. Should be from 0 to 23. An API may - // choose to allow the value "24:00:00" for scenarios like business closing - // time. + // Hours: Hours of a day in 24 hour format. Must be greater than or equal to 0 + // and typically must be less than or equal to 23. An API may choose to allow + // the value "24:00:00" for scenarios like business closing time. Hours int64 `json:"hours,omitempty"` - // Minutes: Minutes of hour of day. Must be from 0 to 59. + // Minutes: Minutes of an hour. Must be greater than or equal to 0 and less + // than or equal to 59. Minutes int64 `json:"minutes,omitempty"` - // Nanos: Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + // Nanos: Fractions of seconds, in nanoseconds. Must be greater than or equal + // to 0 and less than or equal to 999,999,999. Nanos int64 `json:"nanos,omitempty"` - // Seconds: Seconds of minutes of the time. Must normally be from 0 to 59. An - // API may allow the value 60 if it allows leap-seconds. + // Seconds: Seconds of a minute. Must be greater than or equal to 0 and + // typically must be less than or equal to 59. An API may allow the value 60 if + // it allows leap-seconds. Seconds int64 `json:"seconds,omitempty"` // ForceSendFields is a list of field names (e.g. "Hours") to unconditionally // include in API requests. By default, fields with empty or default values are @@ -1550,9 +1556,9 @@ type TimeOfDay struct { NullFields []string `json:"-"` } -func (s *TimeOfDay) MarshalJSON() ([]byte, error) { +func (s TimeOfDay) MarshalJSON() ([]byte, error) { type NoMethod TimeOfDay - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransferCounters: A collection of counters that report the progress of a @@ -1637,9 +1643,9 @@ type TransferCounters struct { NullFields []string `json:"-"` } -func (s *TransferCounters) MarshalJSON() ([]byte, error) { +func (s TransferCounters) MarshalJSON() ([]byte, error) { type NoMethod TransferCounters - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransferJob: This resource represents the configuration of a transfer job @@ -1723,9 +1729,9 @@ type TransferJob struct { NullFields []string `json:"-"` } -func (s *TransferJob) MarshalJSON() ([]byte, error) { +func (s TransferJob) MarshalJSON() ([]byte, error) { type NoMethod TransferJob - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransferManifest: Specifies where the manifest is located. @@ -1748,9 +1754,9 @@ type TransferManifest struct { NullFields []string `json:"-"` } -func (s *TransferManifest) MarshalJSON() ([]byte, error) { +func (s TransferManifest) MarshalJSON() ([]byte, error) { type NoMethod TransferManifest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransferOperation: A description of the execution of a transfer. @@ -1803,9 +1809,9 @@ type TransferOperation struct { NullFields []string `json:"-"` } -func (s *TransferOperation) MarshalJSON() ([]byte, error) { +func (s TransferOperation) MarshalJSON() ([]byte, error) { type NoMethod TransferOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransferOptions: TransferOptions define the actions to be performed on @@ -1857,9 +1863,9 @@ type TransferOptions struct { NullFields []string `json:"-"` } -func (s *TransferOptions) MarshalJSON() ([]byte, error) { +func (s TransferOptions) MarshalJSON() ([]byte, error) { type NoMethod TransferOptions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TransferSpec: Configuration for running a transfer. @@ -1921,9 +1927,9 @@ type TransferSpec struct { NullFields []string `json:"-"` } -func (s *TransferSpec) MarshalJSON() ([]byte, error) { +func (s TransferSpec) MarshalJSON() ([]byte, error) { type NoMethod TransferSpec - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // UpdateTransferJobRequest: Request passed to UpdateTransferJob. @@ -1958,9 +1964,9 @@ type UpdateTransferJobRequest struct { NullFields []string `json:"-"` } -func (s *UpdateTransferJobRequest) MarshalJSON() ([]byte, error) { +func (s UpdateTransferJobRequest) MarshalJSON() ([]byte, error) { type NoMethod UpdateTransferJobRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GoogleServiceAccountsGetCall struct { @@ -2970,12 +2976,19 @@ type TransferJobsListCall struct { // List: Lists transfer jobs. // // - filter: A list of query parameters specified as JSON text in the form of: -// `{"projectId":"my_project_id", "jobNames":["jobid1","jobid2",...], -// "jobStatuses":["status1","status2",...]}` Since `jobNames` and -// `jobStatuses` support multiple values, their values must be specified with -// array notation. `projectId` is required. `jobNames` and `jobStatuses` are -// optional. The valid values for `jobStatuses` are case-insensitive: -// ENABLED, DISABLED, and DELETED. +// ``` { "projectId":"my_project_id", "jobNames":["jobid1","jobid2",...], +// "jobStatuses":["status1","status2",...], +// "dataBackend":"QUERY_REPLICATION_CONFIGS", +// "sourceBucket":"source-bucket-name", "sinkBucket":"sink-bucket-name", } +// ``` The JSON formatting in the example is for display only; provide the +// query parameters without spaces or line breaks. * `projectId` is required. +// - Since `jobNames` and `jobStatuses` support multiple values, their values +// must be specified with array notation. `jobNames` and `jobStatuses` are +// optional. Valid values are case-insensitive: * ENABLED * DISABLED * +// DELETED * Specify "dataBackend":"QUERY_REPLICATION_CONFIGS" to return a +// list of cross-bucket replication jobs. * Limit the results to jobs from a +// particular bucket with `sourceBucket` and/or to a particular bucket with +// `sinkBucket`. func (r *TransferJobsService) List(filter string) *TransferJobsListCall { c := &TransferJobsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.urlParams_.Set("filter", filter) diff --git a/terraform/providers/google/vendor/google.golang.org/api/transport/grpc/dial.go b/terraform/providers/google/vendor/google.golang.org/api/transport/grpc/dial.go index 2d4f90c9c1f..ff3539d898f 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/terraform/providers/google/vendor/google.golang.org/api/transport/grpc/dial.go @@ -53,6 +53,9 @@ var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second} // Assign to var for unit test replacement var dialContext = grpc.DialContext +// Assign to var for unit test replacement +var dialContextNewAuth = grpctransport.Dial + // otelStatsHandler is a singleton otelgrpc.clientHandler to be used across // all dial connections to avoid the memory leak documented in // https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 @@ -218,26 +221,20 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna defaultEndpointTemplate = ds.DefaultEndpoint } - tokenURL, oauth2Client, err := internal.GetOAuth2Configuration(ctx, ds) - if err != nil { - return nil, err - } - - pool, err := grpctransport.Dial(ctx, secure, &grpctransport.Options{ + pool, err := dialContextNewAuth(ctx, secure, &grpctransport.Options{ DisableTelemetry: ds.TelemetryDisabled, DisableAuthentication: ds.NoAuth, Endpoint: ds.Endpoint, Metadata: metadata, - GRPCDialOpts: ds.GRPCDialOpts, + GRPCDialOpts: prepareDialOptsNewAuth(ds), PoolSize: poolSize, Credentials: creds, + APIKey: ds.APIKey, DetectOpts: &credentials.DetectOptions{ Scopes: ds.Scopes, Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, - TokenURL: tokenURL, - Client: oauth2Client, }, InternalOptions: &grpctransport.InternalOptions{ EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount, @@ -250,10 +247,20 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, }) return pool, err } +func prepareDialOptsNewAuth(ds *internal.DialSettings) []grpc.DialOption { + var opts []grpc.DialOption + if ds.UserAgent != "" { + opts = append(opts, grpc.WithUserAgent(ds.UserAgent)) + } + + return append(opts, ds.GRPCDialOpts...) +} + func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) { if o.HTTPClient != nil { return nil, errors.New("unsupported HTTP client specified") @@ -290,17 +297,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if err != nil { return nil, err } - if o.TokenSource == nil { - // We only validate non-tokensource creds, as TokenSource-based credentials - // don't propagate universe. - credsUniverseDomain, err := internal.GetUniverseDomain(creds) - if err != nil { - return nil, err - } - if o.GetUniverseDomain() != credsUniverseDomain { - return nil, internal.ErrUniverseNotMatch(o.GetUniverseDomain(), credsUniverseDomain) - } - } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{ TokenSource: oauth.TokenSource{TokenSource: creds.TokenSource}, quotaProject: internal.GetQuotaProject(creds, o.QuotaProject), diff --git a/terraform/providers/google/vendor/google.golang.org/api/transport/http/dial.go b/terraform/providers/google/vendor/google.golang.org/api/transport/http/dial.go index a36e24315ba..d5b213e0f08 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/transport/http/dial.go +++ b/terraform/providers/google/vendor/google.golang.org/api/transport/http/dial.go @@ -107,10 +107,6 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. if ds.RequestReason != "" { headers.Set("X-goog-request-reason", ds.RequestReason) } - tokenURL, oauth2Client, err := internal.GetOAuth2Configuration(ctx, ds) - if err != nil { - return nil, err - } client, err := httptransport.NewClient(&httptransport.Options{ DisableTelemetry: ds.TelemetryDisabled, DisableAuthentication: ds.NoAuth, @@ -125,8 +121,6 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, - TokenURL: tokenURL, - Client: oauth2Client, }, InternalOptions: &httptransport.InternalOptions{ EnableJWTWithScope: ds.EnableJwtWithScope, @@ -136,6 +130,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, }) if err != nil { return nil, err @@ -188,17 +183,6 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } - if settings.TokenSource == nil { - // We only validate non-tokensource creds, as TokenSource-based credentials - // don't propagate universe. - credsUniverseDomain, err := internal.GetUniverseDomain(creds) - if err != nil { - return nil, err - } - if settings.GetUniverseDomain() != credsUniverseDomain { - return nil, internal.ErrUniverseNotMatch(settings.GetUniverseDomain(), credsUniverseDomain) - } - } paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) ts := creds.TokenSource if settings.ImpersonationConfig == nil && settings.TokenSource != nil { diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 636edb460a4..aa69fb4d509 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -719,6 +719,8 @@ type PythonSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` } func (x *PythonSettings) Reset() { @@ -760,6 +762,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + // Settings for Node client libraries. type NodeSettings struct { state protoimpl.MessageState @@ -1024,6 +1033,13 @@ type MethodSettings struct { // The fully qualified name of the method, for which the options below apply. // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Describes settings to use for long-running operations when generating // API methods for RPCs. Complements RPCs that use the annotations in @@ -1033,15 +1049,12 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.cloud.speech.v2.Speech.BatchRecognize - // long_running: - // initial_poll_delay: - // seconds: 60 # 1 minute - // poll_delay_multiplier: 1.5 - // max_poll_delay: - // seconds: 360 # 6 minutes - // total_poll_timeout: - // seconds: 54000 # 90 minutes + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` // List of top-level fields of the request message, that should be // automatically populated by the client libraries based on their @@ -1051,9 +1064,9 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.example.v1.ExampleService.CreateExample - // auto_populated_fields: - // - request_id + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` } @@ -1110,6 +1123,60 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1138,7 +1205,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1151,7 +1218,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1456,132 +1523,143 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, - 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, + 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, + 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, + 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, + 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, - 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, - 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, - 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, - 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, - 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, - 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, + 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, + 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, + 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, - 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, - 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, - 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, - 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, - 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, - 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, - 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, - 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, - 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, - 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, - 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, + 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, + 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1597,34 +1675,35 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_google_api_client_proto_goTypes = []interface{}{ - (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization - (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - nil, // 15: google.api.DotnetSettings.RenamedServicesEntry - nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 18: google.api.LaunchStage - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures + nil, // 16: google.api.DotnetSettings.RenamedServicesEntry + nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 19: google.api.LaunchStage + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings @@ -1641,25 +1720,26 @@ var file_google_api_client_proto_depIdxs = []int32{ 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 32, // [32:32] is the sub-list for method output_type - 32, // [32:32] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 28, // [28:32] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 29, // [29:33] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1812,7 +1892,19 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1831,7 +1923,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 16, + NumMessages: 17, NumExtensions: 4, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index d339dfb02ac..a462e7d0132 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -121,6 +121,11 @@ type FieldInfo struct { // any API consumer, just documents the API's format for the field it is // applied to. Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` + // The type(s) that the annotated, generic field may represent. + // + // Currently, this must only be used on fields of type `google.protobuf.Any`. + // Supporting other generic types may be considered in the future. + ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"` } func (x *FieldInfo) Reset() { @@ -162,6 +167,70 @@ func (x *FieldInfo) GetFormat() FieldInfo_Format { return FieldInfo_FORMAT_UNSPECIFIED } +func (x *FieldInfo) GetReferencedTypes() []*TypeReference { + if x != nil { + return x.ReferencedTypes + } + return nil +} + +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo]. +type TypeReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the type that the annotated, generic field may represent. + // If the type is in the same protobuf package, the value can be the simple + // message name e.g., `"MyMessage"`. Otherwise, the value must be the + // fully-qualified message name e.g., `"google.library.v1.Book"`. + // + // If the type(s) are unknown to the service (e.g. the field accepts generic + // user input), use the wildcard `"*"` to denote this behavior. + // + // See [AIP-202](https://google.aip.dev/202#type-references) for more details. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *TypeReference) Reset() { + *x = TypeReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReference) ProtoMessage() {} + +func (x *TypeReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead. +func (*TypeReference) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{1} +} + +func (x *TypeReference) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FieldOptions)(nil), @@ -185,6 +254,13 @@ var ( // string actual_ip_address = 4 [ // (google.api.field_info).format = IPV4_OR_IPV6 // ]; + // google.protobuf.Any generic_field = 5 [ + // (google.api.field_info).referenced_types = {type_name: "ActualType"}, + // (google.api.field_info).referenced_types = {type_name: "OtherType"}, + // ]; + // google.protobuf.Any generic_user_input = 5 [ + // (google.api.field_info).referenced_types = {type_name: "*"}, + // ]; // // optional google.api.FieldInfo field_info = 291403980; E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] @@ -197,30 +273,37 @@ var file_google_api_field_info_proto_rawDesc = []byte{ 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, - 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, - 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, - 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, + 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -236,21 +319,23 @@ func file_google_api_field_info_proto_rawDescGZIP() []byte { } var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_google_api_field_info_proto_goTypes = []interface{}{ (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format (*FieldInfo)(nil), // 1: google.api.FieldInfo - (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions + (*TypeReference)(nil), // 2: google.api.TypeReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions } var file_google_api_field_info_proto_depIdxs = []int32{ 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format - 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions - 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 2, // [2:3] is the sub-list for extension type_name - 1, // [1:2] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference + 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_api_field_info_proto_init() } @@ -271,6 +356,18 @@ func file_google_api_field_info_proto_init() { return nil } } + file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -278,7 +375,7 @@ func file_google_api_field_info_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_field_info_proto_rawDesc, NumEnums: 1, - NumMessages: 1, + NumMessages: 2, NumExtensions: 1, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 76ea76df330..ffb5838cb18 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -102,7 +102,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { return false } -// # gRPC Transcoding +// gRPC Transcoding // // gRPC Transcoding is a feature for mapping between a gRPC method and one or // more HTTP REST endpoints. It allows developers to build a single API service @@ -143,9 +143,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables an HTTP REST to gRPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` // // Any fields in the request message which are not bound by the path template // automatically become HTTP query parameters if there is no HTTP request body. @@ -169,11 +168,9 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables a HTTP JSON to RPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` // // Note that fields which are mapped to URL query parameters must have a // primitive type or a repeated primitive type or a non-repeated message type. @@ -203,10 +200,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // representation of the JSON in the request body is determined by // protos JSON encoding: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the @@ -228,10 +223,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // The following HTTP JSON to RPC mapping is enabled: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in @@ -259,13 +252,13 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables the following two alternative HTTP JSON to RPC mappings: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` // -// ## Rules for HTTP mapping +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping // // 1. Leaf request fields (recursive expansion nested messages in the request // message) are classified into three categories: @@ -284,7 +277,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body, all // fields are passed via URL path and URL query parameters. // -// ### Path template syntax +// Path template syntax // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; @@ -323,7 +316,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // Document](https://developers.google.com/discovery/v1/reference/apis) as // `{+var}`. // -// ## Using gRPC API Service Configuration +// # Using gRPC API Service Configuration // // gRPC API Service Configuration (service config) is a configuration language // for configuring a gRPC service to become a user-facing product. The @@ -338,15 +331,14 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specified in the service config will override any matching transcoding // configuration in the proto. // -// Example: +// The following example selects a gRPC method and applies an `HttpRule` to it: // // http: // rules: -// # Selects a gRPC method and applies HttpRule to it. // - selector: example.v1.Messaging.GetMessage // get: /v1/messages/{message_id}/{sub.subfield} // -// ## Special notes +// # Special notes // // When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the // proto to JSON conversion must follow the [proto3 diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 7a3fd93fcd9..b5db279aebf 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -253,8 +253,13 @@ type ResourceDescriptor struct { History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` // The plural name used in the resource name and permission names, such as // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec + // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + // to this is for Nested Collections that have stuttering names, as defined + // in [AIP-122](https://google.aip.dev/122#nested-collections), where the + // collection ID in the resource name pattern does not necessarily directly + // match the `plural` value. + // + // It is the same concept of the `plural` field in k8s CRD spec // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ // // Note: The plural form is required even for singleton resources. See diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go new file mode 100644 index 00000000000..6e01be017c8 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -0,0 +1,892 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/distribution.proto + +package distribution + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// `Distribution` contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those values +// across a set of buckets. +// +// The summary statistics are the count, mean, sum of the squared deviation from +// the mean, the minimum, and the maximum of the set of population of values. +// The histogram is based on a sequence of buckets and gives a count of values +// that fall into each bucket. The boundaries of the buckets are given either +// explicitly or by formulas for buckets of fixed or exponentially increasing +// widths. +// +// Although it is not forbidden, it is generally a bad idea to include +// non-finite values (infinities or NaNs) in the population of values, as this +// will render the `mean` and `sum_of_squared_deviation` fields meaningless. +type Distribution struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of values in the population. Must be non-negative. This value + // must equal the sum of the values in `bucket_counts` if a histogram is + // provided. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // The arithmetic mean of the values in the population. If `count` is zero + // then this field must be zero. + Mean float64 `protobuf:"fixed64,2,opt,name=mean,proto3" json:"mean,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 232, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If `count` is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` + // If specified, contains the range of the population values. The field + // must not be present if the `count` is zero. + Range *Distribution_Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` + // Defines the histogram bucket boundaries. If the distribution does not + // contain a histogram, then omit this field. + BucketOptions *Distribution_BucketOptions `protobuf:"bytes,6,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` + // The number of values in each bucket of the histogram, as described in + // `bucket_options`. If the distribution does not have a histogram, then omit + // this field. If there is a histogram, then the sum of the values in + // `bucket_counts` must equal the value in the `count` field of the + // distribution. + // + // If present, `bucket_counts` should contain N values, where N is the number + // of buckets specified in `bucket_options`. If you supply fewer than N + // values, the remaining values are assumed to be 0. + // + // The order of the values in `bucket_counts` follows the bucket numbering + // schemes described for the three bucket types. The first value must be the + // count for the underflow bucket (number 0). The next N-2 values are the + // counts for the finite buckets (number 1 through N-2). The N'th value in + // `bucket_counts` is the count for the overflow bucket (number N-1). + BucketCounts []int64 `protobuf:"varint,7,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // Must be in increasing order of `value` field. + Exemplars []*Distribution_Exemplar `protobuf:"bytes,10,rep,name=exemplars,proto3" json:"exemplars,omitempty"` +} + +func (x *Distribution) Reset() { + *x = Distribution{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution) ProtoMessage() {} + +func (x *Distribution) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution.ProtoReflect.Descriptor instead. +func (*Distribution) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0} +} + +func (x *Distribution) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *Distribution) GetMean() float64 { + if x != nil { + return x.Mean + } + return 0 +} + +func (x *Distribution) GetSumOfSquaredDeviation() float64 { + if x != nil { + return x.SumOfSquaredDeviation + } + return 0 +} + +func (x *Distribution) GetRange() *Distribution_Range { + if x != nil { + return x.Range + } + return nil +} + +func (x *Distribution) GetBucketOptions() *Distribution_BucketOptions { + if x != nil { + return x.BucketOptions + } + return nil +} + +func (x *Distribution) GetBucketCounts() []int64 { + if x != nil { + return x.BucketCounts + } + return nil +} + +func (x *Distribution) GetExemplars() []*Distribution_Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + +// The range of the population values. +type Distribution_Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The minimum of the population values. + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + // The maximum of the population values. + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` +} + +func (x *Distribution_Range) Reset() { + *x = Distribution_Range{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_Range) ProtoMessage() {} + +func (x *Distribution_Range) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_Range.ProtoReflect.Descriptor instead. +func (*Distribution_Range) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Distribution_Range) GetMin() float64 { + if x != nil { + return x.Min + } + return 0 +} + +func (x *Distribution_Range) GetMax() float64 { + if x != nil { + return x.Max + } + return 0 +} + +// `BucketOptions` describes the bucket boundaries used to create a histogram +// for the distribution. The buckets can be in a linear sequence, an +// exponential sequence, or each bucket can be specified explicitly. +// `BucketOptions` does not include the number of values in each bucket. +// +// A bucket has an inclusive lower bound and exclusive upper bound for the +// values that are counted for that bucket. The upper bound of a bucket must +// be strictly greater than the lower bound. The sequence of N buckets for a +// distribution consists of an underflow bucket (number 0), zero or more +// finite buckets (number 1 through N - 2) and an overflow bucket (number N - +// 1). The buckets are contiguous: the lower bound of bucket i (i > 0) is the +// same as the upper bound of bucket i - 1. The buckets span the whole range +// of finite values: lower bound of the underflow bucket is -infinity and the +// upper bound of the overflow bucket is +infinity. The finite buckets are +// so-called because both bounds are finite. +type Distribution_BucketOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Exactly one of these three fields must be set. + // + // Types that are assignable to Options: + // + // *Distribution_BucketOptions_LinearBuckets + // *Distribution_BucketOptions_ExponentialBuckets + // *Distribution_BucketOptions_ExplicitBuckets + Options isDistribution_BucketOptions_Options `protobuf_oneof:"options"` +} + +func (x *Distribution_BucketOptions) Reset() { + *x = Distribution_BucketOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions) ProtoMessage() {} + +func (x *Distribution_BucketOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1} +} + +func (m *Distribution_BucketOptions) GetOptions() isDistribution_BucketOptions_Options { + if m != nil { + return m.Options + } + return nil +} + +func (x *Distribution_BucketOptions) GetLinearBuckets() *Distribution_BucketOptions_Linear { + if x, ok := x.GetOptions().(*Distribution_BucketOptions_LinearBuckets); ok { + return x.LinearBuckets + } + return nil +} + +func (x *Distribution_BucketOptions) GetExponentialBuckets() *Distribution_BucketOptions_Exponential { + if x, ok := x.GetOptions().(*Distribution_BucketOptions_ExponentialBuckets); ok { + return x.ExponentialBuckets + } + return nil +} + +func (x *Distribution_BucketOptions) GetExplicitBuckets() *Distribution_BucketOptions_Explicit { + if x, ok := x.GetOptions().(*Distribution_BucketOptions_ExplicitBuckets); ok { + return x.ExplicitBuckets + } + return nil +} + +type isDistribution_BucketOptions_Options interface { + isDistribution_BucketOptions_Options() +} + +type Distribution_BucketOptions_LinearBuckets struct { + // The linear bucket. + LinearBuckets *Distribution_BucketOptions_Linear `protobuf:"bytes,1,opt,name=linear_buckets,json=linearBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExponentialBuckets struct { + // The exponential buckets. + ExponentialBuckets *Distribution_BucketOptions_Exponential `protobuf:"bytes,2,opt,name=exponential_buckets,json=exponentialBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExplicitBuckets struct { + // The explicit buckets. + ExplicitBuckets *Distribution_BucketOptions_Explicit `protobuf:"bytes,3,opt,name=explicit_buckets,json=explicitBuckets,proto3,oneof"` +} + +func (*Distribution_BucketOptions_LinearBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExponentialBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExplicitBuckets) isDistribution_BucketOptions_Options() {} + +// Exemplars are example points that may be used to annotate aggregated +// distribution values. They are metadata that gives information about a +// particular value added to a Distribution bucket, such as a trace ID that +// was active when a value was added. They may contain further information, +// such as a example values and timestamps, origin, etc. +type Distribution_Exemplar struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Value of the exemplar point. This value determines to which bucket the + // exemplar belongs. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // The observation (sampling) time of the above value. + Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contextual information about the example value. Examples are: + // + // Trace: type.googleapis.com/google.monitoring.v3.SpanContext + // + // Literal string: type.googleapis.com/google.protobuf.StringValue + // + // Labels dropped during aggregation: + // type.googleapis.com/google.monitoring.v3.DroppedLabels + // + // There may be only a single attachment of any given message type in a + // single exemplar, and this is enforced by the system. + Attachments []*anypb.Any `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty"` +} + +func (x *Distribution_Exemplar) Reset() { + *x = Distribution_Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_Exemplar) ProtoMessage() {} + +func (x *Distribution_Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_Exemplar.ProtoReflect.Descriptor instead. +func (*Distribution_Exemplar) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Distribution_Exemplar) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *Distribution_Exemplar) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *Distribution_Exemplar) GetAttachments() []*anypb.Any { + if x != nil { + return x.Attachments + } + return nil +} + +// Specifies a linear sequence of buckets that all have the same width +// (except overflow and underflow). Each bucket represents a constant +// absolute uncertainty on the specific value in the bucket. +// +// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the +// following boundaries: +// +// Upper bound (0 <= i < N-1): offset + (width * i). +// +// Lower bound (1 <= i < N): offset + (width * (i - 1)). +type Distribution_BucketOptions_Linear struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 0. + Width float64 `protobuf:"fixed64,2,opt,name=width,proto3" json:"width,omitempty"` + // Lower bound of the first bucket. + Offset float64 `protobuf:"fixed64,3,opt,name=offset,proto3" json:"offset,omitempty"` +} + +func (x *Distribution_BucketOptions_Linear) Reset() { + *x = Distribution_BucketOptions_Linear{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions_Linear) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions_Linear) ProtoMessage() {} + +func (x *Distribution_BucketOptions_Linear) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions_Linear.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions_Linear) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (x *Distribution_BucketOptions_Linear) GetNumFiniteBuckets() int32 { + if x != nil { + return x.NumFiniteBuckets + } + return 0 +} + +func (x *Distribution_BucketOptions_Linear) GetWidth() float64 { + if x != nil { + return x.Width + } + return 0 +} + +func (x *Distribution_BucketOptions_Linear) GetOffset() float64 { + if x != nil { + return x.Offset + } + return 0 +} + +// Specifies an exponential sequence of buckets that have a width that is +// proportional to the value of the lower bound. Each bucket represents a +// constant relative uncertainty on a specific value in the bucket. +// +// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the +// following boundaries: +// +// Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). +// +// Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). +type Distribution_BucketOptions_Exponential struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 1. + GrowthFactor float64 `protobuf:"fixed64,2,opt,name=growth_factor,json=growthFactor,proto3" json:"growth_factor,omitempty"` + // Must be greater than 0. + Scale float64 `protobuf:"fixed64,3,opt,name=scale,proto3" json:"scale,omitempty"` +} + +func (x *Distribution_BucketOptions_Exponential) Reset() { + *x = Distribution_BucketOptions_Exponential{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions_Exponential) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions_Exponential) ProtoMessage() {} + +func (x *Distribution_BucketOptions_Exponential) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions_Exponential.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions_Exponential) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 1} +} + +func (x *Distribution_BucketOptions_Exponential) GetNumFiniteBuckets() int32 { + if x != nil { + return x.NumFiniteBuckets + } + return 0 +} + +func (x *Distribution_BucketOptions_Exponential) GetGrowthFactor() float64 { + if x != nil { + return x.GrowthFactor + } + return 0 +} + +func (x *Distribution_BucketOptions_Exponential) GetScale() float64 { + if x != nil { + return x.Scale + } + return 0 +} + +// Specifies a set of buckets with arbitrary widths. +// +// There are `size(bounds) + 1` (= N) buckets. Bucket `i` has the following +// boundaries: +// +// Upper bound (0 <= i < N-1): bounds[i] +// Lower bound (1 <= i < N); bounds[i - 1] +// +// The `bounds` field must contain at least one element. If `bounds` has +// only one element, then there are no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +type Distribution_BucketOptions_Explicit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The values must be monotonically increasing. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` +} + +func (x *Distribution_BucketOptions_Explicit) Reset() { + *x = Distribution_BucketOptions_Explicit{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions_Explicit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions_Explicit) ProtoMessage() {} + +func (x *Distribution_BucketOptions_Explicit) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions_Explicit.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 2} +} + +func (x *Distribution_BucketOptions_Explicit) GetBounds() []float64 { + if x != nil { + return x.Bounds + } + return nil +} + +var File_google_api_distribution_proto protoreflect.FileDescriptor + +var file_google_api_distribution_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x08, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6d, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x6d, 0x65, + 0x61, 0x6e, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x75, 0x6d, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x71, 0x75, + 0x61, 0x72, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x76, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x73, 0x75, 0x6d, 0x4f, 0x66, 0x53, 0x71, 0x75, 0x61, 0x72, + 0x65, 0x64, 0x44, 0x65, 0x76, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x05, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x4d, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x03, 0x52, 0x0c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x1a, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x6d, 0x69, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, + 0x6d, 0x61, 0x78, 0x1a, 0xb9, 0x04, 0x0a, 0x0d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x56, 0x0a, 0x0e, 0x6c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x48, 0x00, 0x52, 0x0d, + 0x6c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x65, 0x0a, + 0x13, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x00, + 0x52, 0x12, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, + 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x48, + 0x00, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x1a, 0x64, 0x0a, 0x06, 0x4c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x12, 0x2c, 0x0a, 0x12, + 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x46, 0x69, 0x6e, + 0x69, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x77, 0x69, + 0x64, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x1a, 0x76, 0x0a, 0x0b, 0x45, 0x78, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x66, + 0x69, 0x6e, 0x69, 0x74, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x46, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x5f, + 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x67, 0x72, + 0x6f, 0x77, 0x74, 0x68, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, + 0x61, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x73, 0x63, 0x61, 0x6c, 0x65, + 0x1a, 0x22, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x01, 0x52, 0x06, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, + 0x92, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x36, 0x0a, 0x0b, + 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x42, 0x71, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x11, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_distribution_proto_rawDescOnce sync.Once + file_google_api_distribution_proto_rawDescData = file_google_api_distribution_proto_rawDesc +) + +func file_google_api_distribution_proto_rawDescGZIP() []byte { + file_google_api_distribution_proto_rawDescOnce.Do(func() { + file_google_api_distribution_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_distribution_proto_rawDescData) + }) + return file_google_api_distribution_proto_rawDescData +} + +var file_google_api_distribution_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_google_api_distribution_proto_goTypes = []interface{}{ + (*Distribution)(nil), // 0: google.api.Distribution + (*Distribution_Range)(nil), // 1: google.api.Distribution.Range + (*Distribution_BucketOptions)(nil), // 2: google.api.Distribution.BucketOptions + (*Distribution_Exemplar)(nil), // 3: google.api.Distribution.Exemplar + (*Distribution_BucketOptions_Linear)(nil), // 4: google.api.Distribution.BucketOptions.Linear + (*Distribution_BucketOptions_Exponential)(nil), // 5: google.api.Distribution.BucketOptions.Exponential + (*Distribution_BucketOptions_Explicit)(nil), // 6: google.api.Distribution.BucketOptions.Explicit + (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp + (*anypb.Any)(nil), // 8: google.protobuf.Any +} +var file_google_api_distribution_proto_depIdxs = []int32{ + 1, // 0: google.api.Distribution.range:type_name -> google.api.Distribution.Range + 2, // 1: google.api.Distribution.bucket_options:type_name -> google.api.Distribution.BucketOptions + 3, // 2: google.api.Distribution.exemplars:type_name -> google.api.Distribution.Exemplar + 4, // 3: google.api.Distribution.BucketOptions.linear_buckets:type_name -> google.api.Distribution.BucketOptions.Linear + 5, // 4: google.api.Distribution.BucketOptions.exponential_buckets:type_name -> google.api.Distribution.BucketOptions.Exponential + 6, // 5: google.api.Distribution.BucketOptions.explicit_buckets:type_name -> google.api.Distribution.BucketOptions.Explicit + 7, // 6: google.api.Distribution.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 8, // 7: google.api.Distribution.Exemplar.attachments:type_name -> google.protobuf.Any + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_google_api_distribution_proto_init() } +func file_google_api_distribution_proto_init() { + if File_google_api_distribution_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_distribution_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions_Linear); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions_Exponential); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions_Explicit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_distribution_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Distribution_BucketOptions_LinearBuckets)(nil), + (*Distribution_BucketOptions_ExponentialBuckets)(nil), + (*Distribution_BucketOptions_ExplicitBuckets)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_distribution_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_distribution_proto_goTypes, + DependencyIndexes: file_google_api_distribution_proto_depIdxs, + MessageInfos: file_google_api_distribution_proto_msgTypes, + }.Build() + File_google_api_distribution_proto = out.File + file_google_api_distribution_proto_rawDesc = nil + file_google_api_distribution_proto_goTypes = nil + file_google_api_distribution_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go index 6b867a46ede..c90c6015d26 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go @@ -1105,25 +1105,66 @@ func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { // messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the // macro tests whether the property is set to its default. For map and struct // types, the macro tests whether the property `x` is defined on `m`. +// +// Comprehensions for the standard environment macros evaluation can be best +// visualized as the following pseudocode: +// +// ``` +// let `accu_var` = `accu_init` +// +// for (let `iter_var` in `iter_range`) { +// if (!`loop_condition`) { +// break +// } +// `accu_var` = `loop_step` +// } +// +// return `result` +// ``` +// +// Comprehensions for the optional V2 macros which support map-to-map +// translation differ slightly from the standard environment macros in that +// they expose both the key or index in addition to the value for each list +// or map entry: +// +// ``` +// let `accu_var` = `accu_init` +// +// for (let `iter_var`, `iter_var2` in `iter_range`) { +// if (!`loop_condition`) { +// break +// } +// `accu_var` = `loop_step` +// } +// +// return `result` +// ``` type Expr_Comprehension struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The name of the iteration variable. + // The name of the first iteration variable. + // When the iter_range is a list, this variable is the list element. + // When the iter_range is a map, this variable is the map entry key. IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` - // The range over which var iterates. + // The name of the second iteration variable, empty if not set. + // When the iter_range is a list, this variable is the integer index. + // When the iter_range is a map, this variable is the map entry value. + // This field is only set for comprehension v2 macros. + IterVar2 string `protobuf:"bytes,8,opt,name=iter_var2,json=iterVar2,proto3" json:"iter_var2,omitempty"` + // The range over which the comprehension iterates. IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` // The name of the variable used for accumulation of the result. AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` // The initial value of the accumulator. AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` - // An expression which can contain iter_var and accu_var. + // An expression which can contain iter_var, iter_var2, and accu_var. // // Returns false when the result has been computed and may be used as // a hint to short-circuit the remainder of the comprehension. LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` - // An expression which can contain iter_var and accu_var. + // An expression which can contain iter_var, iter_var2, and accu_var. // // Computes the next value of accu_var. LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` @@ -1172,6 +1213,13 @@ func (x *Expr_Comprehension) GetIterVar() string { return "" } +func (x *Expr_Comprehension) GetIterVar2() string { + if x != nil { + return x.IterVar2 + } + return "" +} + func (x *Expr_Comprehension) GetIterRange() *Expr { if x != nil { return x.IterRange @@ -1485,7 +1533,7 @@ var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{ 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xae, 0x0d, 0x0a, 0x04, 0x45, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xcb, 0x0d, 0x0a, 0x04, 0x45, 0x78, 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, @@ -1567,132 +1615,134 @@ var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{ 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xfd, - 0x02, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x72, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0x9a, + 0x03, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x3d, 0x0a, 0x0a, 0x69, - 0x74, 0x65, 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, - 0x09, 0x69, 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, - 0x63, 0x75, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, - 0x63, 0x75, 0x56, 0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, - 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, - 0x69, 0x74, 0x12, 0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, - 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, - 0x70, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, + 0x28, 0x09, 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x69, + 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x32, 0x12, 0x3d, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, - 0x6f, 0x70, 0x53, 0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, - 0x0a, 0x09, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, - 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, - 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, - 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, - 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, - 0x00, 0x52, 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, - 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, - 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, - 0x18, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, - 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x8c, 0x07, 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, - 0x0a, 0x0e, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, 0x74, + 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, 0x5f, + 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, 0x56, + 0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, 0x12, + 0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, - 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x4e, - 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x80, - 0x03, 0x0a, 0x09, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x69, 0x0a, 0x13, - 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x73, + 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, - 0x65, 0x6e, 0x74, 0x52, 0x12, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, - 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, - 0x22, 0x6f, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, - 0x15, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, - 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, - 0x0a, 0x16, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, - 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, - 0x03, 0x1a, 0x3c, 0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x5d, 0x0a, 0x0f, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, 0x53, + 0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, - 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x42, 0x6e, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, + 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, + 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, + 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, 0x48, + 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x8c, 0x07, 0x0a, + 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, 0x73, + 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, + 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, 0x61, + 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0a, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x80, 0x03, 0x0a, 0x09, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x69, 0x0a, 0x13, 0x61, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x52, 0x12, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x42, 0x0b, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, + 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, + 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, + 0x4e, 0x54, 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, + 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, + 0x45, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, + 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x1a, 0x3c, + 0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5d, 0x0a, 0x0f, + 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, 0x0a, 0x0e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0x6e, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x53, + 0x79, 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go new file mode 100644 index 00000000000..42bcacc3635 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go @@ -0,0 +1,249 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/label.proto + +package label + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Value types that can be used as label values. +type LabelDescriptor_ValueType int32 + +const ( + // A variable-length string. This is the default. + LabelDescriptor_STRING LabelDescriptor_ValueType = 0 + // Boolean; true or false. + LabelDescriptor_BOOL LabelDescriptor_ValueType = 1 + // A 64-bit signed integer. + LabelDescriptor_INT64 LabelDescriptor_ValueType = 2 +) + +// Enum value maps for LabelDescriptor_ValueType. +var ( + LabelDescriptor_ValueType_name = map[int32]string{ + 0: "STRING", + 1: "BOOL", + 2: "INT64", + } + LabelDescriptor_ValueType_value = map[string]int32{ + "STRING": 0, + "BOOL": 1, + "INT64": 2, + } +) + +func (x LabelDescriptor_ValueType) Enum() *LabelDescriptor_ValueType { + p := new(LabelDescriptor_ValueType) + *p = x + return p +} + +func (x LabelDescriptor_ValueType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LabelDescriptor_ValueType) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_label_proto_enumTypes[0].Descriptor() +} + +func (LabelDescriptor_ValueType) Type() protoreflect.EnumType { + return &file_google_api_label_proto_enumTypes[0] +} + +func (x LabelDescriptor_ValueType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LabelDescriptor_ValueType.Descriptor instead. +func (LabelDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return file_google_api_label_proto_rawDescGZIP(), []int{0, 0} +} + +// A description of a label. +type LabelDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The label key. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The type of data that can be assigned to the label. + ValueType LabelDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.LabelDescriptor_ValueType" json:"value_type,omitempty"` + // A human-readable description for the label. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *LabelDescriptor) Reset() { + *x = LabelDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_label_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelDescriptor) ProtoMessage() {} + +func (x *LabelDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_label_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelDescriptor.ProtoReflect.Descriptor instead. +func (*LabelDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_label_proto_rawDescGZIP(), []int{0} +} + +func (x *LabelDescriptor) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *LabelDescriptor) GetValueType() LabelDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return LabelDescriptor_STRING +} + +func (x *LabelDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +var File_google_api_label_proto protoreflect.FileDescriptor + +var file_google_api_label_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x22, 0xb9, 0x01, 0x0a, 0x0f, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x2c, 0x0a, 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, + 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, + 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0a, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x35, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x3b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, + 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_label_proto_rawDescOnce sync.Once + file_google_api_label_proto_rawDescData = file_google_api_label_proto_rawDesc +) + +func file_google_api_label_proto_rawDescGZIP() []byte { + file_google_api_label_proto_rawDescOnce.Do(func() { + file_google_api_label_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_label_proto_rawDescData) + }) + return file_google_api_label_proto_rawDescData +} + +var file_google_api_label_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_label_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_label_proto_goTypes = []interface{}{ + (LabelDescriptor_ValueType)(0), // 0: google.api.LabelDescriptor.ValueType + (*LabelDescriptor)(nil), // 1: google.api.LabelDescriptor +} +var file_google_api_label_proto_depIdxs = []int32{ + 0, // 0: google.api.LabelDescriptor.value_type:type_name -> google.api.LabelDescriptor.ValueType + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_label_proto_init() } +func file_google_api_label_proto_init() { + if File_google_api_label_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_label_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_label_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_label_proto_goTypes, + DependencyIndexes: file_google_api_label_proto_depIdxs, + EnumInfos: file_google_api_label_proto_enumTypes, + MessageInfos: file_google_api_label_proto_msgTypes, + }.Build() + File_google_api_label_proto = out.File + file_google_api_label_proto_rawDesc = nil + file_google_api_label_proto_goTypes = nil + file_google_api_label_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go new file mode 100644 index 00000000000..d4b89c98d19 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -0,0 +1,771 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/metric.proto + +package metric + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + label "google.golang.org/genproto/googleapis/api/label" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The kind of measurement. It describes how the data is reported. +// For information on setting the start time and end time based on +// the MetricKind, see [TimeInterval][google.monitoring.v3.TimeInterval]. +type MetricDescriptor_MetricKind int32 + +const ( + // Do not use this default value. + MetricDescriptor_METRIC_KIND_UNSPECIFIED MetricDescriptor_MetricKind = 0 + // An instantaneous measurement of a value. + MetricDescriptor_GAUGE MetricDescriptor_MetricKind = 1 + // The change in a value during a time interval. + MetricDescriptor_DELTA MetricDescriptor_MetricKind = 2 + // A value accumulated over a time interval. Cumulative + // measurements in a time series should have the same start time + // and increasing end times, until an event resets the cumulative + // value to zero and sets a new start time for the following + // points. + MetricDescriptor_CUMULATIVE MetricDescriptor_MetricKind = 3 +) + +// Enum value maps for MetricDescriptor_MetricKind. +var ( + MetricDescriptor_MetricKind_name = map[int32]string{ + 0: "METRIC_KIND_UNSPECIFIED", + 1: "GAUGE", + 2: "DELTA", + 3: "CUMULATIVE", + } + MetricDescriptor_MetricKind_value = map[string]int32{ + "METRIC_KIND_UNSPECIFIED": 0, + "GAUGE": 1, + "DELTA": 2, + "CUMULATIVE": 3, + } +) + +func (x MetricDescriptor_MetricKind) Enum() *MetricDescriptor_MetricKind { + p := new(MetricDescriptor_MetricKind) + *p = x + return p +} + +func (x MetricDescriptor_MetricKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MetricDescriptor_MetricKind) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_metric_proto_enumTypes[0].Descriptor() +} + +func (MetricDescriptor_MetricKind) Type() protoreflect.EnumType { + return &file_google_api_metric_proto_enumTypes[0] +} + +func (x MetricDescriptor_MetricKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MetricDescriptor_MetricKind.Descriptor instead. +func (MetricDescriptor_MetricKind) EnumDescriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0} +} + +// The value type of a metric. +type MetricDescriptor_ValueType int32 + +const ( + // Do not use this default value. + MetricDescriptor_VALUE_TYPE_UNSPECIFIED MetricDescriptor_ValueType = 0 + // The value is a boolean. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_BOOL MetricDescriptor_ValueType = 1 + // The value is a signed 64-bit integer. + MetricDescriptor_INT64 MetricDescriptor_ValueType = 2 + // The value is a double precision floating point number. + MetricDescriptor_DOUBLE MetricDescriptor_ValueType = 3 + // The value is a text string. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_STRING MetricDescriptor_ValueType = 4 + // The value is a [`Distribution`][google.api.Distribution]. + MetricDescriptor_DISTRIBUTION MetricDescriptor_ValueType = 5 + // The value is money. + MetricDescriptor_MONEY MetricDescriptor_ValueType = 6 +) + +// Enum value maps for MetricDescriptor_ValueType. +var ( + MetricDescriptor_ValueType_name = map[int32]string{ + 0: "VALUE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "DOUBLE", + 4: "STRING", + 5: "DISTRIBUTION", + 6: "MONEY", + } + MetricDescriptor_ValueType_value = map[string]int32{ + "VALUE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "DOUBLE": 3, + "STRING": 4, + "DISTRIBUTION": 5, + "MONEY": 6, + } +) + +func (x MetricDescriptor_ValueType) Enum() *MetricDescriptor_ValueType { + p := new(MetricDescriptor_ValueType) + *p = x + return p +} + +func (x MetricDescriptor_ValueType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MetricDescriptor_ValueType) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_metric_proto_enumTypes[1].Descriptor() +} + +func (MetricDescriptor_ValueType) Type() protoreflect.EnumType { + return &file_google_api_metric_proto_enumTypes[1] +} + +func (x MetricDescriptor_ValueType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MetricDescriptor_ValueType.Descriptor instead. +func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0, 1} +} + +// Defines a metric type and its schema. Once a metric descriptor is created, +// deleting or altering it stops data collection and makes the metric type's +// existing data unusable. +type MetricDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the metric descriptor. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The metric type, including its DNS name prefix. The type is not + // URL-encoded. All user-defined metric types have the DNS name + // `custom.googleapis.com` or `external.googleapis.com`. Metric types should + // use a natural hierarchical grouping. For example: + // + // "custom.googleapis.com/invoice/paid/amount" + // "external.googleapis.com/prometheus/up" + // "appengine.googleapis.com/http/server/response_latencies" + Type string `protobuf:"bytes,8,opt,name=type,proto3" json:"type,omitempty"` + // The set of labels that can be used to describe a specific + // instance of this metric type. For example, the + // `appengine.googleapis.com/http/server/response_latencies` metric + // type has a label for the HTTP response code, `response_code`, so + // you can look at latencies for successful responses or just + // for responses that failed. + Labels []*label.LabelDescriptor `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` + // Whether the metric records instantaneous values, changes to a value, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + MetricKind MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // Whether the measurement is an integer, a floating-point number, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + ValueType MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The units in which the metric value is reported. It is only applicable + // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` + // defines the representation of the stored metric values. + // + // Different systems might scale the values to be more easily displayed (so a + // value of `0.02kBy` _might_ be displayed as `20By`, and a value of + // `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is + // `kBy`, then the value of the metric is always in thousands of bytes, no + // matter how it might be displayed. + // + // If you want a custom metric to record the exact number of CPU-seconds used + // by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is + // `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 + // CPU-seconds, then the value is written as `12005`. + // + // Alternatively, if you want a custom metric to record data in a more + // granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is + // `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), + // or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). + // + // The supported units are a subset of [The Unified Code for Units of + // Measure](https://unitsofmeasure.org/ucum.html) standard: + // + // **Basic units (UNIT)** + // + // * `bit` bit + // * `By` byte + // * `s` second + // * `min` minute + // * `h` hour + // * `d` day + // * `1` dimensionless + // + // **Prefixes (PREFIX)** + // + // * `k` kilo (10^3) + // * `M` mega (10^6) + // * `G` giga (10^9) + // * `T` tera (10^12) + // * `P` peta (10^15) + // * `E` exa (10^18) + // * `Z` zetta (10^21) + // * `Y` yotta (10^24) + // + // * `m` milli (10^-3) + // * `u` micro (10^-6) + // * `n` nano (10^-9) + // * `p` pico (10^-12) + // * `f` femto (10^-15) + // * `a` atto (10^-18) + // * `z` zepto (10^-21) + // * `y` yocto (10^-24) + // + // * `Ki` kibi (2^10) + // * `Mi` mebi (2^20) + // * `Gi` gibi (2^30) + // * `Ti` tebi (2^40) + // * `Pi` pebi (2^50) + // + // **Grammar** + // + // The grammar also includes these connectors: + // + // - `/` division or ratio (as an infix operator). For examples, + // `kBy/{email}` or `MiBy/10ms` (although you should almost never + // have `/s` in a metric `unit`; rates should always be computed at + // query time from the underlying cumulative or delta value). + // - `.` multiplication or composition (as an infix operator). For + // examples, `GBy.d` or `k{watt}.h`. + // + // The grammar for a unit is as follows: + // + // Expression = Component { "." Component } { "/" Component } ; + // + // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] + // | Annotation + // | "1" + // ; + // + // Annotation = "{" NAME "}" ; + // + // Notes: + // + // - `Annotation` is just a comment if it follows a `UNIT`. If the annotation + // is used alone, then the unit is equivalent to `1`. For examples, + // `{request}/s == 1/s`, `By{transmitted}/s == By/s`. + // - `NAME` is a sequence of non-blank printable ASCII characters not + // containing `{` or `}`. + // - `1` represents a unitary [dimensionless + // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such + // as in `1/s`. It is typically used when none of the basic units are + // appropriate. For example, "new users per day" can be represented as + // `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new + // users). Alternatively, "thousands of page views per day" would be + // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric + // value of `5.3` would mean "5300 page views per day"). + // - `%` represents dimensionless value of 1/100, and annotates values giving + // a percentage (so the metric values are typically in the range of 0..100, + // and a metric value `3` means "3 percent"). + // - `10^2.%` indicates a metric contains a ratio, typically in the range + // 0..1, that will be multiplied by 100 and displayed as a percentage + // (so a metric value `0.03` means "3 percent"). + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + // A concise name for the metric, which can be displayed in user interfaces. + // Use sentence case without an ending period, for example "Request count". + // This field is optional but it is recommended to be set for any metrics + // associated with user-visible concepts, such as Quota. + DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Optional. Metadata which can be used to guide usage of the metric. + Metadata *MetricDescriptor_MetricDescriptorMetadata `protobuf:"bytes,10,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Optional. The launch stage of the metric definition. + LaunchStage api.LaunchStage `protobuf:"varint,12,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // Read-only. If present, then a [time + // series][google.monitoring.v3.TimeSeries], which is identified partially by + // a metric type and a + // [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor], that + // is associated with this metric type can only be associated with one of the + // monitored resource types listed here. + MonitoredResourceTypes []string `protobuf:"bytes,13,rep,name=monitored_resource_types,json=monitoredResourceTypes,proto3" json:"monitored_resource_types,omitempty"` +} + +func (x *MetricDescriptor) Reset() { + *x = MetricDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_metric_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricDescriptor) ProtoMessage() {} + +func (x *MetricDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_metric_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricDescriptor.ProtoReflect.Descriptor instead. +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0} +} + +func (x *MetricDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MetricDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MetricDescriptor) GetLabels() []*label.LabelDescriptor { + if x != nil { + return x.Labels + } + return nil +} + +func (x *MetricDescriptor) GetMetricKind() MetricDescriptor_MetricKind { + if x != nil { + return x.MetricKind + } + return MetricDescriptor_METRIC_KIND_UNSPECIFIED +} + +func (x *MetricDescriptor) GetValueType() MetricDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return MetricDescriptor_VALUE_TYPE_UNSPECIFIED +} + +func (x *MetricDescriptor) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +func (x *MetricDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *MetricDescriptor) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *MetricDescriptor) GetMetadata() *MetricDescriptor_MetricDescriptorMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *MetricDescriptor) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *MetricDescriptor) GetMonitoredResourceTypes() []string { + if x != nil { + return x.MonitoredResourceTypes + } + return nil +} + +// A specific metric, identified by specifying values for all of the +// labels of a [`MetricDescriptor`][google.api.MetricDescriptor]. +type Metric struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An existing metric type, see + // [google.api.MetricDescriptor][google.api.MetricDescriptor]. For example, + // `custom.googleapis.com/invoice/paid/amount`. + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + // The set of label values that uniquely identify this metric. All + // labels listed in the `MetricDescriptor` must be assigned values. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_metric_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_google_api_metric_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{1} +} + +func (x *Metric) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Metric) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// Additional annotations that can be used to guide the usage of a metric. +type MetricDescriptor_MetricDescriptorMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated. Must use the + // [MetricDescriptor.launch_stage][google.api.MetricDescriptor.launch_stage] + // instead. + // + // Deprecated: Do not use. + LaunchStage api.LaunchStage `protobuf:"varint,1,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // The sampling period of metric data points. For metrics which are written + // periodically, consecutive data points are stored at this time interval, + // excluding data loss due to errors. Metrics with a higher granularity have + // a smaller sampling period. + SamplePeriod *durationpb.Duration `protobuf:"bytes,2,opt,name=sample_period,json=samplePeriod,proto3" json:"sample_period,omitempty"` + // The delay of data points caused by ingestion. Data points older than this + // age are guaranteed to be ingested and available to be read, excluding + // data loss due to errors. + IngestDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=ingest_delay,json=ingestDelay,proto3" json:"ingest_delay,omitempty"` +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) Reset() { + *x = MetricDescriptor_MetricDescriptorMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_metric_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricDescriptor_MetricDescriptorMetadata) ProtoMessage() {} + +func (x *MetricDescriptor_MetricDescriptorMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_api_metric_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricDescriptor_MetricDescriptorMetadata.ProtoReflect.Descriptor instead. +func (*MetricDescriptor_MetricDescriptorMetadata) Descriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0} +} + +// Deprecated: Do not use. +func (x *MetricDescriptor_MetricDescriptorMetadata) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) GetSamplePeriod() *durationpb.Duration { + if x != nil { + return x.SamplePeriod + } + return nil +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) GetIngestDelay() *durationpb.Duration { + if x != nil { + return x.IngestDelay + } + return nil +} + +var File_google_api_metric_proto protoreflect.FileDescriptor + +var file_google_api_metric_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, + 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x07, 0x0a, + 0x10, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x48, + 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, + 0x6e, 0x69, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, + 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, + 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, + 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x73, 0x1a, 0xd8, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e, + 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x3e, + 0x0a, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x3c, + 0x0a, 0x0c, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x4f, 0x0a, 0x0a, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45, + 0x54, 0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, + 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a, + 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41, + 0x4c, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, + 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, + 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, + 0x47, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06, + 0x22, 0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x36, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, + 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_metric_proto_rawDescOnce sync.Once + file_google_api_metric_proto_rawDescData = file_google_api_metric_proto_rawDesc +) + +func file_google_api_metric_proto_rawDescGZIP() []byte { + file_google_api_metric_proto_rawDescOnce.Do(func() { + file_google_api_metric_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_metric_proto_rawDescData) + }) + return file_google_api_metric_proto_rawDescData +} + +var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_api_metric_proto_goTypes = []interface{}{ + (MetricDescriptor_MetricKind)(0), // 0: google.api.MetricDescriptor.MetricKind + (MetricDescriptor_ValueType)(0), // 1: google.api.MetricDescriptor.ValueType + (*MetricDescriptor)(nil), // 2: google.api.MetricDescriptor + (*Metric)(nil), // 3: google.api.Metric + (*MetricDescriptor_MetricDescriptorMetadata)(nil), // 4: google.api.MetricDescriptor.MetricDescriptorMetadata + nil, // 5: google.api.Metric.LabelsEntry + (*label.LabelDescriptor)(nil), // 6: google.api.LabelDescriptor + (api.LaunchStage)(0), // 7: google.api.LaunchStage + (*durationpb.Duration)(nil), // 8: google.protobuf.Duration +} +var file_google_api_metric_proto_depIdxs = []int32{ + 6, // 0: google.api.MetricDescriptor.labels:type_name -> google.api.LabelDescriptor + 0, // 1: google.api.MetricDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind + 1, // 2: google.api.MetricDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType + 4, // 3: google.api.MetricDescriptor.metadata:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata + 7, // 4: google.api.MetricDescriptor.launch_stage:type_name -> google.api.LaunchStage + 5, // 5: google.api.Metric.labels:type_name -> google.api.Metric.LabelsEntry + 7, // 6: google.api.MetricDescriptor.MetricDescriptorMetadata.launch_stage:type_name -> google.api.LaunchStage + 8, // 7: google.api.MetricDescriptor.MetricDescriptorMetadata.sample_period:type_name -> google.protobuf.Duration + 8, // 8: google.api.MetricDescriptor.MetricDescriptorMetadata.ingest_delay:type_name -> google.protobuf.Duration + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_google_api_metric_proto_init() } +func file_google_api_metric_proto_init() { + if File_google_api_metric_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_metric_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_metric_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_metric_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricDescriptor_MetricDescriptorMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_metric_proto_rawDesc, + NumEnums: 2, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_metric_proto_goTypes, + DependencyIndexes: file_google_api_metric_proto_depIdxs, + EnumInfos: file_google_api_metric_proto_enumTypes, + MessageInfos: file_google_api_metric_proto_msgTypes, + }.Build() + File_google_api_metric_proto = out.File + file_google_api_metric_proto_rawDesc = nil + file_google_api_metric_proto_goTypes = nil + file_google_api_metric_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go new file mode 100644 index 00000000000..b4cee29803c --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -0,0 +1,476 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/monitored_resource.proto + +package monitoredres + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + label "google.golang.org/genproto/googleapis/api/label" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// An object that describes the schema of a +// [MonitoredResource][google.api.MonitoredResource] object using a type name +// and a set of labels. For example, the monitored resource descriptor for +// Google Compute Engine VM instances has a type of +// `"gce_instance"` and specifies the use of the labels `"instance_id"` and +// `"zone"` to identify particular VM instances. +// +// Different APIs can support different monitored resource types. APIs generally +// provide a `list` method that returns the monitored resource descriptors used +// by the API. +type MonitoredResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The resource name of the monitored resource descriptor: + // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use the + // resource name format `"monitoredResourceDescriptors/{type}"`. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Required. The monitored resource type. For example, the type + // `"cloudsql_database"` represents databases in Google Cloud SQL. + // + // For a list of types, see [Monitored resource + // types](https://cloud.google.com/monitoring/api/resources) + // + // and [Logging resource + // types](https://cloud.google.com/logging/docs/api/v2/resource-list). + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Optional. A concise name for the monitored resource type that might be + // displayed in user interfaces. It should be a Title Cased Noun Phrase, + // without any article or other determiners. For example, + // `"Google Cloud SQL Database"`. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Optional. A detailed description of the monitored resource type that might + // be used in documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Required. A set of labels used to describe instances of this monitored + // resource type. For example, an individual Google Cloud SQL database is + // identified by values for the labels `"database_id"` and `"zone"`. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + // Optional. The launch stage of the monitored resource definition. + LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` +} + +func (x *MonitoredResourceDescriptor) Reset() { + *x = MonitoredResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_monitored_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonitoredResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonitoredResourceDescriptor) ProtoMessage() {} + +func (x *MonitoredResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_monitored_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonitoredResourceDescriptor.ProtoReflect.Descriptor instead. +func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *MonitoredResourceDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetLabels() []*label.LabelDescriptor { + if x != nil { + return x.Labels + } + return nil +} + +func (x *MonitoredResourceDescriptor) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +// An object representing a resource that can be used for monitoring, logging, +// billing, or other purposes. Examples include virtual machine instances, +// databases, and storage devices such as disks. The `type` field identifies a +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object +// that describes the resource's schema. Information in the `labels` field +// identifies the actual resource and its attributes according to the schema. +// For example, a particular Compute Engine VM instance could be represented by +// the following object, because the +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for +// `"gce_instance"` has labels +// `"project_id"`, `"instance_id"` and `"zone"`: +// +// { "type": "gce_instance", +// "labels": { "project_id": "my-project", +// "instance_id": "12345678901234", +// "zone": "us-central1-a" }} +type MonitoredResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The monitored resource type. This field must match + // the `type` field of a + // [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] + // object. For example, the type of a Compute Engine VM instance is + // `gce_instance`. Some descriptors include the service name in the type; for + // example, the type of a Datastream stream is + // `datastream.googleapis.com/Stream`. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Required. Values for all of the labels listed in the associated monitored + // resource descriptor. For example, Compute Engine VM instances use the + // labels `"project_id"`, `"instance_id"`, and `"zone"`. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MonitoredResource) Reset() { + *x = MonitoredResource{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_monitored_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonitoredResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonitoredResource) ProtoMessage() {} + +func (x *MonitoredResource) ProtoReflect() protoreflect.Message { + mi := &file_google_api_monitored_resource_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonitoredResource.ProtoReflect.Descriptor instead. +func (*MonitoredResource) Descriptor() ([]byte, []int) { + return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{1} +} + +func (x *MonitoredResource) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MonitoredResource) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] +// object. [MonitoredResource][google.api.MonitoredResource] objects contain the +// minimum set of information to uniquely identify a monitored resource +// instance. There is some other useful auxiliary metadata. Monitoring and +// Logging use an ingestion pipeline to extract metadata for cloud resources of +// all types, and store the metadata in this message. +type MonitoredResourceMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. Values for predefined system metadata labels. + // System labels are a kind of metadata extracted by Google, including + // "machine_image", "vpc", "subnet_id", + // "security_group", "name", etc. + // System label values can be only strings, Boolean values, or a list of + // strings. For example: + // + // { "name": "my-test-instance", + // "security_group": ["a", "b", "c"], + // "spot_instance": false } + SystemLabels *structpb.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"` + // Output only. A map of user-defined metadata labels. + UserLabels map[string]string `protobuf:"bytes,2,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MonitoredResourceMetadata) Reset() { + *x = MonitoredResourceMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_monitored_resource_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonitoredResourceMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonitoredResourceMetadata) ProtoMessage() {} + +func (x *MonitoredResourceMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_api_monitored_resource_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonitoredResourceMetadata.ProtoReflect.Descriptor instead. +func (*MonitoredResourceMetadata) Descriptor() ([]byte, []int) { + return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{2} +} + +func (x *MonitoredResourceMetadata) GetSystemLabels() *structpb.Struct { + if x != nil { + return x.SystemLabels + } + return nil +} + +func (x *MonitoredResourceMetadata) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +var File_google_api_monitored_resource_proto protoreflect.FileDescriptor + +var file_google_api_monitored_resource_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, + 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfb, 0x01, 0x0a, 0x1b, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, + 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, + 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, + 0x74, 0x61, 0x67, 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x11, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x41, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf0, 0x01, 0x0a, + 0x19, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3c, 0x0a, 0x0d, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0c, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x56, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, + 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, + 0x79, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x16, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x72, 0x65, 0x73, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x72, 0x65, 0x73, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_api_monitored_resource_proto_rawDescOnce sync.Once + file_google_api_monitored_resource_proto_rawDescData = file_google_api_monitored_resource_proto_rawDesc +) + +func file_google_api_monitored_resource_proto_rawDescGZIP() []byte { + file_google_api_monitored_resource_proto_rawDescOnce.Do(func() { + file_google_api_monitored_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_monitored_resource_proto_rawDescData) + }) + return file_google_api_monitored_resource_proto_rawDescData +} + +var file_google_api_monitored_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_api_monitored_resource_proto_goTypes = []interface{}{ + (*MonitoredResourceDescriptor)(nil), // 0: google.api.MonitoredResourceDescriptor + (*MonitoredResource)(nil), // 1: google.api.MonitoredResource + (*MonitoredResourceMetadata)(nil), // 2: google.api.MonitoredResourceMetadata + nil, // 3: google.api.MonitoredResource.LabelsEntry + nil, // 4: google.api.MonitoredResourceMetadata.UserLabelsEntry + (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor + (api.LaunchStage)(0), // 6: google.api.LaunchStage + (*structpb.Struct)(nil), // 7: google.protobuf.Struct +} +var file_google_api_monitored_resource_proto_depIdxs = []int32{ + 5, // 0: google.api.MonitoredResourceDescriptor.labels:type_name -> google.api.LabelDescriptor + 6, // 1: google.api.MonitoredResourceDescriptor.launch_stage:type_name -> google.api.LaunchStage + 3, // 2: google.api.MonitoredResource.labels:type_name -> google.api.MonitoredResource.LabelsEntry + 7, // 3: google.api.MonitoredResourceMetadata.system_labels:type_name -> google.protobuf.Struct + 4, // 4: google.api.MonitoredResourceMetadata.user_labels:type_name -> google.api.MonitoredResourceMetadata.UserLabelsEntry + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_google_api_monitored_resource_proto_init() } +func file_google_api_monitored_resource_proto_init() { + if File_google_api_monitored_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_monitored_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MonitoredResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_monitored_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MonitoredResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_monitored_resource_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MonitoredResourceMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_monitored_resource_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_monitored_resource_proto_goTypes, + DependencyIndexes: file_google_api_monitored_resource_proto_depIdxs, + MessageInfos: file_google_api_monitored_resource_proto_msgTypes, + }.Build() + File_google_api_monitored_resource_proto = out.File + file_google_api_monitored_resource_proto_rawDesc = nil + file_google_api_monitored_resource_proto_goTypes = nil + file_google_api_monitored_resource_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/types.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/types.pb.go deleted file mode 100644 index e30205d4510..00000000000 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/types.pb.go +++ /dev/null @@ -1,1166 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v4.24.4 -// source: google/bigtable/admin/v2/types.proto - -package admin - -import ( - reflect "reflect" - sync "sync" - - _ "google.golang.org/genproto/googleapis/api/annotations" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// `Type` represents the type of data that is written to, read from, or stored -// in Bigtable. It is heavily based on the GoogleSQL standard to help maintain -// familiarity and consistency across products and features. -// -// For compatibility with Bigtable's existing untyped APIs, each `Type` includes -// an `Encoding` which describes how to convert to/from the underlying data. -// This might involve composing a series of steps into an "encoding chain," for -// example to convert from INT64 -> STRING -> raw bytes. In most cases, a "link" -// in the encoding chain will be based an on existing GoogleSQL conversion -// function like `CAST`. -// -// Each link in the encoding chain also defines the following properties: -// - Natural sort: Does the encoded value sort consistently with the original -// typed value? Note that Bigtable will always sort data based on the raw -// encoded value, *not* the decoded type. -// - Example: BYTES values sort in the same order as their raw encodings. -// - Counterexample: Encoding INT64 to a fixed-width STRING does *not* -// preserve sort order when dealing with negative numbers. -// INT64(1) > INT64(-1), but STRING("-00001") > STRING("00001). -// - The overall encoding chain has this property if *every* link does. -// - Self-delimiting: If we concatenate two encoded values, can we always tell -// where the first one ends and the second one begins? -// - Example: If we encode INT64s to fixed-width STRINGs, the first value -// will always contain exactly N digits, possibly preceded by a sign. -// - Counterexample: If we concatenate two UTF-8 encoded STRINGs, we have -// no way to tell where the first one ends. -// - The overall encoding chain has this property if *any* link does. -// - Compatibility: Which other systems have matching encoding schemes? For -// example, does this encoding have a GoogleSQL equivalent? HBase? Java? -type Type struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The kind of type that this represents. - // - // Types that are assignable to Kind: - // - // *Type_BytesType - // *Type_StringType - // *Type_Int64Type - // *Type_AggregateType - Kind isType_Kind `protobuf_oneof:"kind"` -} - -func (x *Type) Reset() { - *x = Type{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type) ProtoMessage() {} - -func (x *Type) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type.ProtoReflect.Descriptor instead. -func (*Type) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0} -} - -func (m *Type) GetKind() isType_Kind { - if m != nil { - return m.Kind - } - return nil -} - -func (x *Type) GetBytesType() *Type_Bytes { - if x, ok := x.GetKind().(*Type_BytesType); ok { - return x.BytesType - } - return nil -} - -func (x *Type) GetStringType() *Type_String { - if x, ok := x.GetKind().(*Type_StringType); ok { - return x.StringType - } - return nil -} - -func (x *Type) GetInt64Type() *Type_Int64 { - if x, ok := x.GetKind().(*Type_Int64Type); ok { - return x.Int64Type - } - return nil -} - -func (x *Type) GetAggregateType() *Type_Aggregate { - if x, ok := x.GetKind().(*Type_AggregateType); ok { - return x.AggregateType - } - return nil -} - -type isType_Kind interface { - isType_Kind() -} - -type Type_BytesType struct { - // Bytes - BytesType *Type_Bytes `protobuf:"bytes,1,opt,name=bytes_type,json=bytesType,proto3,oneof"` -} - -type Type_StringType struct { - // String - StringType *Type_String `protobuf:"bytes,2,opt,name=string_type,json=stringType,proto3,oneof"` -} - -type Type_Int64Type struct { - // Int64 - Int64Type *Type_Int64 `protobuf:"bytes,5,opt,name=int64_type,json=int64Type,proto3,oneof"` -} - -type Type_AggregateType struct { - // Aggregate - AggregateType *Type_Aggregate `protobuf:"bytes,6,opt,name=aggregate_type,json=aggregateType,proto3,oneof"` -} - -func (*Type_BytesType) isType_Kind() {} - -func (*Type_StringType) isType_Kind() {} - -func (*Type_Int64Type) isType_Kind() {} - -func (*Type_AggregateType) isType_Kind() {} - -// Bytes -// Values of type `Bytes` are stored in `Value.bytes_value`. -type Type_Bytes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The encoding to use when converting to/from lower level types. - Encoding *Type_Bytes_Encoding `protobuf:"bytes,1,opt,name=encoding,proto3" json:"encoding,omitempty"` -} - -func (x *Type_Bytes) Reset() { - *x = Type_Bytes{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_Bytes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_Bytes) ProtoMessage() {} - -func (x *Type_Bytes) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_Bytes.ProtoReflect.Descriptor instead. -func (*Type_Bytes) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *Type_Bytes) GetEncoding() *Type_Bytes_Encoding { - if x != nil { - return x.Encoding - } - return nil -} - -// String -// Values of type `String` are stored in `Value.string_value`. -type Type_String struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The encoding to use when converting to/from lower level types. - Encoding *Type_String_Encoding `protobuf:"bytes,1,opt,name=encoding,proto3" json:"encoding,omitempty"` -} - -func (x *Type_String) Reset() { - *x = Type_String{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_String) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_String) ProtoMessage() {} - -func (x *Type_String) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_String.ProtoReflect.Descriptor instead. -func (*Type_String) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 1} -} - -func (x *Type_String) GetEncoding() *Type_String_Encoding { - if x != nil { - return x.Encoding - } - return nil -} - -// Int64 -// Values of type `Int64` are stored in `Value.int_value`. -type Type_Int64 struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The encoding to use when converting to/from lower level types. - Encoding *Type_Int64_Encoding `protobuf:"bytes,1,opt,name=encoding,proto3" json:"encoding,omitempty"` -} - -func (x *Type_Int64) Reset() { - *x = Type_Int64{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_Int64) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_Int64) ProtoMessage() {} - -func (x *Type_Int64) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_Int64.ProtoReflect.Descriptor instead. -func (*Type_Int64) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 2} -} - -func (x *Type_Int64) GetEncoding() *Type_Int64_Encoding { - if x != nil { - return x.Encoding - } - return nil -} - -// A value that combines incremental updates into a summarized value. -// -// Data is never directly written or read using type `Aggregate`. Writes will -// provide either the `input_type` or `state_type`, and reads will always -// return the `state_type` . -type Type_Aggregate struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Type of the inputs that are accumulated by this `Aggregate`, which must - // specify a full encoding. - // Use `AddInput` mutations to accumulate new inputs. - InputType *Type `protobuf:"bytes,1,opt,name=input_type,json=inputType,proto3" json:"input_type,omitempty"` - // Output only. Type that holds the internal accumulator state for the - // `Aggregate`. This is a function of the `input_type` and `aggregator` - // chosen, and will always specify a full encoding. - StateType *Type `protobuf:"bytes,2,opt,name=state_type,json=stateType,proto3" json:"state_type,omitempty"` - // Which aggregator function to use. The configured types must match. - // - // Types that are assignable to Aggregator: - // - // *Type_Aggregate_Sum_ - Aggregator isType_Aggregate_Aggregator `protobuf_oneof:"aggregator"` -} - -func (x *Type_Aggregate) Reset() { - *x = Type_Aggregate{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_Aggregate) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_Aggregate) ProtoMessage() {} - -func (x *Type_Aggregate) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_Aggregate.ProtoReflect.Descriptor instead. -func (*Type_Aggregate) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 3} -} - -func (x *Type_Aggregate) GetInputType() *Type { - if x != nil { - return x.InputType - } - return nil -} - -func (x *Type_Aggregate) GetStateType() *Type { - if x != nil { - return x.StateType - } - return nil -} - -func (m *Type_Aggregate) GetAggregator() isType_Aggregate_Aggregator { - if m != nil { - return m.Aggregator - } - return nil -} - -func (x *Type_Aggregate) GetSum() *Type_Aggregate_Sum { - if x, ok := x.GetAggregator().(*Type_Aggregate_Sum_); ok { - return x.Sum - } - return nil -} - -type isType_Aggregate_Aggregator interface { - isType_Aggregate_Aggregator() -} - -type Type_Aggregate_Sum_ struct { - // Sum aggregator. - Sum *Type_Aggregate_Sum `protobuf:"bytes,4,opt,name=sum,proto3,oneof"` -} - -func (*Type_Aggregate_Sum_) isType_Aggregate_Aggregator() {} - -// Rules used to convert to/from lower level types. -type Type_Bytes_Encoding struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Which encoding to use. - // - // Types that are assignable to Encoding: - // - // *Type_Bytes_Encoding_Raw_ - Encoding isType_Bytes_Encoding_Encoding `protobuf_oneof:"encoding"` -} - -func (x *Type_Bytes_Encoding) Reset() { - *x = Type_Bytes_Encoding{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_Bytes_Encoding) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_Bytes_Encoding) ProtoMessage() {} - -func (x *Type_Bytes_Encoding) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_Bytes_Encoding.ProtoReflect.Descriptor instead. -func (*Type_Bytes_Encoding) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 0, 0} -} - -func (m *Type_Bytes_Encoding) GetEncoding() isType_Bytes_Encoding_Encoding { - if m != nil { - return m.Encoding - } - return nil -} - -func (x *Type_Bytes_Encoding) GetRaw() *Type_Bytes_Encoding_Raw { - if x, ok := x.GetEncoding().(*Type_Bytes_Encoding_Raw_); ok { - return x.Raw - } - return nil -} - -type isType_Bytes_Encoding_Encoding interface { - isType_Bytes_Encoding_Encoding() -} - -type Type_Bytes_Encoding_Raw_ struct { - // Use `Raw` encoding. - Raw *Type_Bytes_Encoding_Raw `protobuf:"bytes,1,opt,name=raw,proto3,oneof"` -} - -func (*Type_Bytes_Encoding_Raw_) isType_Bytes_Encoding_Encoding() {} - -// Leaves the value "as-is" -// * Natural sort? Yes -// * Self-delimiting? No -// * Compatibility? N/A -type Type_Bytes_Encoding_Raw struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Type_Bytes_Encoding_Raw) Reset() { - *x = Type_Bytes_Encoding_Raw{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_Bytes_Encoding_Raw) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_Bytes_Encoding_Raw) ProtoMessage() {} - -func (x *Type_Bytes_Encoding_Raw) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_Bytes_Encoding_Raw.ProtoReflect.Descriptor instead. -func (*Type_Bytes_Encoding_Raw) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 0, 0, 0} -} - -// Rules used to convert to/from lower level types. -type Type_String_Encoding struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Which encoding to use. - // - // Types that are assignable to Encoding: - // - // *Type_String_Encoding_Utf8Raw_ - Encoding isType_String_Encoding_Encoding `protobuf_oneof:"encoding"` -} - -func (x *Type_String_Encoding) Reset() { - *x = Type_String_Encoding{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_String_Encoding) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_String_Encoding) ProtoMessage() {} - -func (x *Type_String_Encoding) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_String_Encoding.ProtoReflect.Descriptor instead. -func (*Type_String_Encoding) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 1, 0} -} - -func (m *Type_String_Encoding) GetEncoding() isType_String_Encoding_Encoding { - if m != nil { - return m.Encoding - } - return nil -} - -func (x *Type_String_Encoding) GetUtf8Raw() *Type_String_Encoding_Utf8Raw { - if x, ok := x.GetEncoding().(*Type_String_Encoding_Utf8Raw_); ok { - return x.Utf8Raw - } - return nil -} - -type isType_String_Encoding_Encoding interface { - isType_String_Encoding_Encoding() -} - -type Type_String_Encoding_Utf8Raw_ struct { - // Use `Utf8Raw` encoding. - Utf8Raw *Type_String_Encoding_Utf8Raw `protobuf:"bytes,1,opt,name=utf8_raw,json=utf8Raw,proto3,oneof"` -} - -func (*Type_String_Encoding_Utf8Raw_) isType_String_Encoding_Encoding() {} - -// UTF-8 encoding -// * Natural sort? No (ASCII characters only) -// * Self-delimiting? No -// * Compatibility? -// - BigQuery Federation `TEXT` encoding -// - HBase `Bytes.toBytes` -// - Java `String#getBytes(StandardCharsets.UTF_8)` -type Type_String_Encoding_Utf8Raw struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Type_String_Encoding_Utf8Raw) Reset() { - *x = Type_String_Encoding_Utf8Raw{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_String_Encoding_Utf8Raw) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_String_Encoding_Utf8Raw) ProtoMessage() {} - -func (x *Type_String_Encoding_Utf8Raw) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_String_Encoding_Utf8Raw.ProtoReflect.Descriptor instead. -func (*Type_String_Encoding_Utf8Raw) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 1, 0, 0} -} - -// Rules used to convert to/from lower level types. -type Type_Int64_Encoding struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Which encoding to use. - // - // Types that are assignable to Encoding: - // - // *Type_Int64_Encoding_BigEndianBytes_ - Encoding isType_Int64_Encoding_Encoding `protobuf_oneof:"encoding"` -} - -func (x *Type_Int64_Encoding) Reset() { - *x = Type_Int64_Encoding{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_Int64_Encoding) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_Int64_Encoding) ProtoMessage() {} - -func (x *Type_Int64_Encoding) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_Int64_Encoding.ProtoReflect.Descriptor instead. -func (*Type_Int64_Encoding) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 2, 0} -} - -func (m *Type_Int64_Encoding) GetEncoding() isType_Int64_Encoding_Encoding { - if m != nil { - return m.Encoding - } - return nil -} - -func (x *Type_Int64_Encoding) GetBigEndianBytes() *Type_Int64_Encoding_BigEndianBytes { - if x, ok := x.GetEncoding().(*Type_Int64_Encoding_BigEndianBytes_); ok { - return x.BigEndianBytes - } - return nil -} - -type isType_Int64_Encoding_Encoding interface { - isType_Int64_Encoding_Encoding() -} - -type Type_Int64_Encoding_BigEndianBytes_ struct { - // Use `BigEndianBytes` encoding. - BigEndianBytes *Type_Int64_Encoding_BigEndianBytes `protobuf:"bytes,1,opt,name=big_endian_bytes,json=bigEndianBytes,proto3,oneof"` -} - -func (*Type_Int64_Encoding_BigEndianBytes_) isType_Int64_Encoding_Encoding() {} - -// Encodes the value as an 8-byte big endian twos complement `Bytes` -// value. -// * Natural sort? No (positive values only) -// * Self-delimiting? Yes -// * Compatibility? -// - BigQuery Federation `BINARY` encoding -// - HBase `Bytes.toBytes` -// - Java `ByteBuffer.putLong()` with `ByteOrder.BIG_ENDIAN` -type Type_Int64_Encoding_BigEndianBytes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The underlying `Bytes` type, which may be able to encode further. - BytesType *Type_Bytes `protobuf:"bytes,1,opt,name=bytes_type,json=bytesType,proto3" json:"bytes_type,omitempty"` -} - -func (x *Type_Int64_Encoding_BigEndianBytes) Reset() { - *x = Type_Int64_Encoding_BigEndianBytes{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_Int64_Encoding_BigEndianBytes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_Int64_Encoding_BigEndianBytes) ProtoMessage() {} - -func (x *Type_Int64_Encoding_BigEndianBytes) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_Int64_Encoding_BigEndianBytes.ProtoReflect.Descriptor instead. -func (*Type_Int64_Encoding_BigEndianBytes) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 2, 0, 0} -} - -func (x *Type_Int64_Encoding_BigEndianBytes) GetBytesType() *Type_Bytes { - if x != nil { - return x.BytesType - } - return nil -} - -// Computes the sum of the input values. -// Allowed input: `Int64` -// State: same as input -type Type_Aggregate_Sum struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Type_Aggregate_Sum) Reset() { - *x = Type_Aggregate_Sum{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_Aggregate_Sum) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_Aggregate_Sum) ProtoMessage() {} - -func (x *Type_Aggregate_Sum) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_Aggregate_Sum.ProtoReflect.Descriptor instead. -func (*Type_Aggregate_Sum) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_types_proto_rawDescGZIP(), []int{0, 3, 0} -} - -var File_google_bigtable_admin_v2_types_proto protoreflect.FileDescriptor - -var file_google_bigtable_admin_v2_types_proto_rawDesc = []byte{ - 0x0a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xda, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x09, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, - 0x0a, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x69, - 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, - 0x49, 0x6e, 0x74, 0x36, 0x34, 0x48, 0x00, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0xb8, 0x01, 0x0a, 0x05, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, - 0x49, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, - 0x52, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0x64, 0x0a, 0x08, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x45, 0x0a, 0x03, 0x72, 0x61, 0x77, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, - 0x79, 0x70, 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x2e, 0x52, 0x61, 0x77, 0x48, 0x00, 0x52, 0x03, 0x72, 0x61, 0x77, 0x1a, 0x05, 0x0a, - 0x03, 0x52, 0x61, 0x77, 0x42, 0x0a, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, - 0x1a, 0xcc, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x4a, 0x0a, 0x08, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0x76, 0x0a, 0x08, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x53, 0x0a, 0x08, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x72, 0x61, 0x77, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x52, 0x61, 0x77, 0x48, 0x00, 0x52, - 0x07, 0x75, 0x74, 0x66, 0x38, 0x52, 0x61, 0x77, 0x1a, 0x09, 0x0a, 0x07, 0x55, 0x74, 0x66, 0x38, - 0x52, 0x61, 0x77, 0x42, 0x0a, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, - 0xac, 0x02, 0x0a, 0x05, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x12, 0x49, 0x0a, 0x08, 0x65, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x36, - 0x34, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x1a, 0xd7, 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, - 0x67, 0x12, 0x68, 0x0a, 0x10, 0x62, 0x69, 0x67, 0x5f, 0x65, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x36, - 0x34, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x69, 0x67, 0x45, 0x6e, - 0x64, 0x69, 0x61, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x62, 0x69, 0x67, - 0x45, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x1a, 0x55, 0x0a, 0x0e, 0x42, - 0x69, 0x67, 0x45, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x43, 0x0a, - 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x09, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x79, - 0x70, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0xe5, - 0x01, 0x0a, 0x09, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x0a, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x40, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x75, 0x6d, 0x48, 0x00, 0x52, 0x03, 0x73, 0x75, - 0x6d, 0x1a, 0x05, 0x0a, 0x03, 0x53, 0x75, 0x6d, 0x42, 0x0c, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, - 0x65, 0x67, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x42, 0xd2, - 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, - 0x0a, 0x54, 0x79, 0x70, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, - 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, - 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, - 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_bigtable_admin_v2_types_proto_rawDescOnce sync.Once - file_google_bigtable_admin_v2_types_proto_rawDescData = file_google_bigtable_admin_v2_types_proto_rawDesc -) - -func file_google_bigtable_admin_v2_types_proto_rawDescGZIP() []byte { - file_google_bigtable_admin_v2_types_proto_rawDescOnce.Do(func() { - file_google_bigtable_admin_v2_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_bigtable_admin_v2_types_proto_rawDescData) - }) - return file_google_bigtable_admin_v2_types_proto_rawDescData -} - -var file_google_bigtable_admin_v2_types_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_google_bigtable_admin_v2_types_proto_goTypes = []interface{}{ - (*Type)(nil), // 0: google.bigtable.admin.v2.Type - (*Type_Bytes)(nil), // 1: google.bigtable.admin.v2.Type.Bytes - (*Type_String)(nil), // 2: google.bigtable.admin.v2.Type.String - (*Type_Int64)(nil), // 3: google.bigtable.admin.v2.Type.Int64 - (*Type_Aggregate)(nil), // 4: google.bigtable.admin.v2.Type.Aggregate - (*Type_Bytes_Encoding)(nil), // 5: google.bigtable.admin.v2.Type.Bytes.Encoding - (*Type_Bytes_Encoding_Raw)(nil), // 6: google.bigtable.admin.v2.Type.Bytes.Encoding.Raw - (*Type_String_Encoding)(nil), // 7: google.bigtable.admin.v2.Type.String.Encoding - (*Type_String_Encoding_Utf8Raw)(nil), // 8: google.bigtable.admin.v2.Type.String.Encoding.Utf8Raw - (*Type_Int64_Encoding)(nil), // 9: google.bigtable.admin.v2.Type.Int64.Encoding - (*Type_Int64_Encoding_BigEndianBytes)(nil), // 10: google.bigtable.admin.v2.Type.Int64.Encoding.BigEndianBytes - (*Type_Aggregate_Sum)(nil), // 11: google.bigtable.admin.v2.Type.Aggregate.Sum -} -var file_google_bigtable_admin_v2_types_proto_depIdxs = []int32{ - 1, // 0: google.bigtable.admin.v2.Type.bytes_type:type_name -> google.bigtable.admin.v2.Type.Bytes - 2, // 1: google.bigtable.admin.v2.Type.string_type:type_name -> google.bigtable.admin.v2.Type.String - 3, // 2: google.bigtable.admin.v2.Type.int64_type:type_name -> google.bigtable.admin.v2.Type.Int64 - 4, // 3: google.bigtable.admin.v2.Type.aggregate_type:type_name -> google.bigtable.admin.v2.Type.Aggregate - 5, // 4: google.bigtable.admin.v2.Type.Bytes.encoding:type_name -> google.bigtable.admin.v2.Type.Bytes.Encoding - 7, // 5: google.bigtable.admin.v2.Type.String.encoding:type_name -> google.bigtable.admin.v2.Type.String.Encoding - 9, // 6: google.bigtable.admin.v2.Type.Int64.encoding:type_name -> google.bigtable.admin.v2.Type.Int64.Encoding - 0, // 7: google.bigtable.admin.v2.Type.Aggregate.input_type:type_name -> google.bigtable.admin.v2.Type - 0, // 8: google.bigtable.admin.v2.Type.Aggregate.state_type:type_name -> google.bigtable.admin.v2.Type - 11, // 9: google.bigtable.admin.v2.Type.Aggregate.sum:type_name -> google.bigtable.admin.v2.Type.Aggregate.Sum - 6, // 10: google.bigtable.admin.v2.Type.Bytes.Encoding.raw:type_name -> google.bigtable.admin.v2.Type.Bytes.Encoding.Raw - 8, // 11: google.bigtable.admin.v2.Type.String.Encoding.utf8_raw:type_name -> google.bigtable.admin.v2.Type.String.Encoding.Utf8Raw - 10, // 12: google.bigtable.admin.v2.Type.Int64.Encoding.big_endian_bytes:type_name -> google.bigtable.admin.v2.Type.Int64.Encoding.BigEndianBytes - 1, // 13: google.bigtable.admin.v2.Type.Int64.Encoding.BigEndianBytes.bytes_type:type_name -> google.bigtable.admin.v2.Type.Bytes - 14, // [14:14] is the sub-list for method output_type - 14, // [14:14] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name -} - -func init() { file_google_bigtable_admin_v2_types_proto_init() } -func file_google_bigtable_admin_v2_types_proto_init() { - if File_google_bigtable_admin_v2_types_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_bigtable_admin_v2_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_Bytes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_String); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_Int64); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_Aggregate); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_Bytes_Encoding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_Bytes_Encoding_Raw); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_String_Encoding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_String_Encoding_Utf8Raw); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_Int64_Encoding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_Int64_Encoding_BigEndianBytes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_Aggregate_Sum); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_bigtable_admin_v2_types_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*Type_BytesType)(nil), - (*Type_StringType)(nil), - (*Type_Int64Type)(nil), - (*Type_AggregateType)(nil), - } - file_google_bigtable_admin_v2_types_proto_msgTypes[4].OneofWrappers = []interface{}{ - (*Type_Aggregate_Sum_)(nil), - } - file_google_bigtable_admin_v2_types_proto_msgTypes[5].OneofWrappers = []interface{}{ - (*Type_Bytes_Encoding_Raw_)(nil), - } - file_google_bigtable_admin_v2_types_proto_msgTypes[7].OneofWrappers = []interface{}{ - (*Type_String_Encoding_Utf8Raw_)(nil), - } - file_google_bigtable_admin_v2_types_proto_msgTypes[9].OneofWrappers = []interface{}{ - (*Type_Int64_Encoding_BigEndianBytes_)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_bigtable_admin_v2_types_proto_rawDesc, - NumEnums: 0, - NumMessages: 12, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_bigtable_admin_v2_types_proto_goTypes, - DependencyIndexes: file_google_bigtable_admin_v2_types_proto_depIdxs, - MessageInfos: file_google_bigtable_admin_v2_types_proto_msgTypes, - }.Build() - File_google_bigtable_admin_v2_types_proto = out.File - file_google_bigtable_admin_v2_types_proto_rawDesc = nil - file_google_bigtable_admin_v2_types_proto_goTypes = nil - file_google_bigtable_admin_v2_types_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go new file mode 100644 index 00000000000..cae02bce14f --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go @@ -0,0 +1,189 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/type/calendar_period.proto + +package calendarperiod + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A `CalendarPeriod` represents the abstract concept of a time period that has +// a canonical start. Grammatically, "the start of the current +// `CalendarPeriod`." All calendar times begin at midnight UTC. +type CalendarPeriod int32 + +const ( + // Undefined period, raises an error. + CalendarPeriod_CALENDAR_PERIOD_UNSPECIFIED CalendarPeriod = 0 + // A day. + CalendarPeriod_DAY CalendarPeriod = 1 + // A week. Weeks begin on Monday, following + // [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). + CalendarPeriod_WEEK CalendarPeriod = 2 + // A fortnight. The first calendar fortnight of the year begins at the start + // of week 1 according to + // [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). + CalendarPeriod_FORTNIGHT CalendarPeriod = 3 + // A month. + CalendarPeriod_MONTH CalendarPeriod = 4 + // A quarter. Quarters start on dates 1-Jan, 1-Apr, 1-Jul, and 1-Oct of each + // year. + CalendarPeriod_QUARTER CalendarPeriod = 5 + // A half-year. Half-years start on dates 1-Jan and 1-Jul. + CalendarPeriod_HALF CalendarPeriod = 6 + // A year. + CalendarPeriod_YEAR CalendarPeriod = 7 +) + +// Enum value maps for CalendarPeriod. +var ( + CalendarPeriod_name = map[int32]string{ + 0: "CALENDAR_PERIOD_UNSPECIFIED", + 1: "DAY", + 2: "WEEK", + 3: "FORTNIGHT", + 4: "MONTH", + 5: "QUARTER", + 6: "HALF", + 7: "YEAR", + } + CalendarPeriod_value = map[string]int32{ + "CALENDAR_PERIOD_UNSPECIFIED": 0, + "DAY": 1, + "WEEK": 2, + "FORTNIGHT": 3, + "MONTH": 4, + "QUARTER": 5, + "HALF": 6, + "YEAR": 7, + } +) + +func (x CalendarPeriod) Enum() *CalendarPeriod { + p := new(CalendarPeriod) + *p = x + return p +} + +func (x CalendarPeriod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CalendarPeriod) Descriptor() protoreflect.EnumDescriptor { + return file_google_type_calendar_period_proto_enumTypes[0].Descriptor() +} + +func (CalendarPeriod) Type() protoreflect.EnumType { + return &file_google_type_calendar_period_proto_enumTypes[0] +} + +func (x CalendarPeriod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CalendarPeriod.Descriptor instead. +func (CalendarPeriod) EnumDescriptor() ([]byte, []int) { + return file_google_type_calendar_period_proto_rawDescGZIP(), []int{0} +} + +var File_google_type_calendar_period_proto protoreflect.FileDescriptor + +var file_google_type_calendar_period_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, + 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2a, 0x7f, 0x0a, 0x0e, 0x43, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x41, 0x4c, 0x45, 0x4e, 0x44, 0x41, 0x52, 0x5f, 0x50, + 0x45, 0x52, 0x49, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x57, 0x45, 0x45, 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x4f, 0x52, 0x54, 0x4e, 0x49, + 0x47, 0x48, 0x54, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x04, + 0x12, 0x0b, 0x0a, 0x07, 0x51, 0x55, 0x41, 0x52, 0x54, 0x45, 0x52, 0x10, 0x05, 0x12, 0x08, 0x0a, + 0x04, 0x48, 0x41, 0x4c, 0x46, 0x10, 0x06, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, + 0x07, 0x42, 0x78, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x13, 0x43, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, + 0x72, 0x69, 0x6f, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x3b, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_type_calendar_period_proto_rawDescOnce sync.Once + file_google_type_calendar_period_proto_rawDescData = file_google_type_calendar_period_proto_rawDesc +) + +func file_google_type_calendar_period_proto_rawDescGZIP() []byte { + file_google_type_calendar_period_proto_rawDescOnce.Do(func() { + file_google_type_calendar_period_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_calendar_period_proto_rawDescData) + }) + return file_google_type_calendar_period_proto_rawDescData +} + +var file_google_type_calendar_period_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_type_calendar_period_proto_goTypes = []interface{}{ + (CalendarPeriod)(0), // 0: google.type.CalendarPeriod +} +var file_google_type_calendar_period_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_type_calendar_period_proto_init() } +func file_google_type_calendar_period_proto_init() { + if File_google_type_calendar_period_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_type_calendar_period_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_type_calendar_period_proto_goTypes, + DependencyIndexes: file_google_type_calendar_period_proto_depIdxs, + EnumInfos: file_google_type_calendar_period_proto_enumTypes, + }.Build() + File_google_type_calendar_period_proto = out.File + file_google_type_calendar_period_proto_rawDesc = nil + file_google_type_calendar_period_proto_goTypes = nil + file_google_type_calendar_period_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go new file mode 100644 index 00000000000..c7bef08aadd --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go @@ -0,0 +1,200 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/type/date.proto + +package date + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Represents a whole or partial calendar date, such as a birthday. The time of +// day and time zone are either specified elsewhere or are insignificant. The +// date is relative to the Gregorian Calendar. This can represent one of the +// following: +// +// * A full date, with non-zero year, month, and day values +// * A month and day value, with a zero year, such as an anniversary +// * A year on its own, with zero month and day values +// * A year and month value, with a zero day, such as a credit card expiration +// date +// +// Related types are [google.type.TimeOfDay][google.type.TimeOfDay] and +// `google.protobuf.Timestamp`. +type Date struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Year of the date. Must be from 1 to 9999, or 0 to specify a date without + // a year. + Year int32 `protobuf:"varint,1,opt,name=year,proto3" json:"year,omitempty"` + // Month of a year. Must be from 1 to 12, or 0 to specify a year without a + // month and day. + Month int32 `protobuf:"varint,2,opt,name=month,proto3" json:"month,omitempty"` + // Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 + // to specify a year by itself or a year and month where the day isn't + // significant. + Day int32 `protobuf:"varint,3,opt,name=day,proto3" json:"day,omitempty"` +} + +func (x *Date) Reset() { + *x = Date{} + if protoimpl.UnsafeEnabled { + mi := &file_google_type_date_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Date) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Date) ProtoMessage() {} + +func (x *Date) ProtoReflect() protoreflect.Message { + mi := &file_google_type_date_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Date.ProtoReflect.Descriptor instead. +func (*Date) Descriptor() ([]byte, []int) { + return file_google_type_date_proto_rawDescGZIP(), []int{0} +} + +func (x *Date) GetYear() int32 { + if x != nil { + return x.Year + } + return 0 +} + +func (x *Date) GetMonth() int32 { + if x != nil { + return x.Month + } + return 0 +} + +func (x *Date) GetDay() int32 { + if x != nil { + return x.Day + } + return 0 +} + +var File_google_type_date_proto protoreflect.FileDescriptor + +var file_google_type_date_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x79, 0x65, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x79, 0x65, 0x61, + 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x64, 0x61, 0x79, 0x42, 0x5d, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x44, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3b, 0x64, 0x61, 0x74, 0x65, 0xf8, + 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_type_date_proto_rawDescOnce sync.Once + file_google_type_date_proto_rawDescData = file_google_type_date_proto_rawDesc +) + +func file_google_type_date_proto_rawDescGZIP() []byte { + file_google_type_date_proto_rawDescOnce.Do(func() { + file_google_type_date_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_date_proto_rawDescData) + }) + return file_google_type_date_proto_rawDescData +} + +var file_google_type_date_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_type_date_proto_goTypes = []interface{}{ + (*Date)(nil), // 0: google.type.Date +} +var file_google_type_date_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_type_date_proto_init() } +func file_google_type_date_proto_init() { + if File_google_type_date_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_type_date_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Date); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_type_date_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_type_date_proto_goTypes, + DependencyIndexes: file_google_type_date_proto_depIdxs, + MessageInfos: file_google_type_date_proto_msgTypes, + }.Build() + File_google_type_date_proto = out.File + file_google_type_date_proto_rawDesc = nil + file_google_type_date_proto_goTypes = nil + file_google_type_date_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/protobuf/api/api.go b/terraform/providers/google/vendor/google.golang.org/genproto/protobuf/api/api.go new file mode 100644 index 00000000000..67697b2c5f1 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/protobuf/api/api.go @@ -0,0 +1,25 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package api aliases all exported identifiers in +// package "google.golang.org/protobuf/types/known/apipb". +package api + +import "google.golang.org/protobuf/types/known/apipb" + +type Api = apipb.Api +type Method = apipb.Method +type Mixin = apipb.Mixin + +var File_google_protobuf_api_proto = apipb.File_google_protobuf_api_proto diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/MAINTAINERS.md b/terraform/providers/google/vendor/google.golang.org/grpc/MAINTAINERS.md index 6a8a07781ae..5d4096d46a0 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/terraform/providers/google/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,28 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. -- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/README.md b/terraform/providers/google/vendor/google.golang.org/grpc/README.md index ab0fbb79b86..b572707c623 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/README.md +++ b/terraform/providers/google/vendor/google.golang.org/grpc/README.md @@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Prerequisites -- **[Go][]**: any one of the **three latest major** [releases][go-releases]. +- **[Go][]**: any one of the **two latest major** [releases][go-releases]. ## Installation diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/SECURITY.md b/terraform/providers/google/vendor/google.golang.org/grpc/SECURITY.md index be6e108705c..abab279379b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/SECURITY.md +++ b/terraform/providers/google/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/backoff/backoff.go b/terraform/providers/google/vendor/google.golang.org/grpc/backoff/backoff.go index 0787d0b50ce..d7b40b7cb66 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/balancer.go index f391744f729..b181f386a1b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -72,8 +73,21 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -243,6 +257,10 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -410,6 +428,9 @@ type SubConnState struct { // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/base/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec8e6..2b87bd79c75 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index bdf93dbfeff..52f54e6a016 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -780,7 +780,7 @@ func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte { } var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{ +var file_grpc_lb_v1_load_balancer_proto_goTypes = []any{ (*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest (*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest (*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken @@ -818,7 +818,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*LoadBalanceRequest); i { case 0: return &v.state @@ -830,7 +830,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*InitialLoadBalanceRequest); i { case 0: return &v.state @@ -842,7 +842,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ClientStatsPerToken); i { case 0: return &v.state @@ -854,7 +854,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ClientStats); i { case 0: return &v.state @@ -866,7 +866,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*LoadBalanceResponse); i { case 0: return &v.state @@ -878,7 +878,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*FallbackResponse); i { case 0: return &v.state @@ -890,7 +890,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*InitialLoadBalanceResponse); i { case 0: return &v.state @@ -902,7 +902,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ServerList); i { case 0: return &v.state @@ -914,7 +914,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*Server); i { case 0: return &v.state @@ -927,11 +927,11 @@ func file_grpc_lb_v1_load_balancer_proto_init() { } } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{ (*LoadBalanceRequest_InitialRequest)(nil), (*LoadBalanceRequest_ClientStats)(nil), } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []any{ (*LoadBalanceResponse_InitialResponse)(nil), (*LoadBalanceResponse_ServerList)(nil), (*LoadBalanceResponse_FallbackResponse)(nil), diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index c57857ac0e1..84e6a25056b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -34,8 +34,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" @@ -46,7 +46,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type LoadBalancerClient interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) } type loadBalancerClient struct { @@ -57,53 +57,38 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { return &loadBalancerClient{cc} } -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &loadBalancerBalanceLoadClient{ClientStream: stream} + x := &grpc.GenericClientStream[LoadBalanceRequest, LoadBalanceResponse]{ClientStream: stream} return x, nil } -type LoadBalancer_BalanceLoadClient interface { - Send(*LoadBalanceRequest) error - Recv() (*LoadBalanceResponse, error) - grpc.ClientStream -} - -type loadBalancerBalanceLoadClient struct { - grpc.ClientStream -} - -func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { - m := new(LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadClient = grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse] // LoadBalancerServer is the server API for LoadBalancer service. // All implementations should embed UnimplementedLoadBalancerServer -// for forward compatibility +// for forward compatibility. type LoadBalancerServer interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(LoadBalancer_BalanceLoadServer) error + BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error } -// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations. -type UnimplementedLoadBalancerServer struct { -} +// UnimplementedLoadBalancerServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedLoadBalancerServer struct{} -func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error { +func (UnimplementedLoadBalancerServer) BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error { return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") } +func (UnimplementedLoadBalancerServer) testEmbeddedByValue() {} // UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to LoadBalancerServer will @@ -113,34 +98,22 @@ type UnsafeLoadBalancerServer interface { } func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) { + // If the following call panics, it indicates UnimplementedLoadBalancerServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&LoadBalancer_ServiceDesc, srv) } func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{ServerStream: stream}) -} - -type LoadBalancer_BalanceLoadServer interface { - Send(*LoadBalanceResponse) error - Recv() (*LoadBalanceRequest, error) - grpc.ServerStream -} - -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream + return srv.(LoadBalancerServer).BalanceLoad(&grpc.GenericServerStream[LoadBalanceRequest, LoadBalanceResponse]{ServerStream: stream}) } -func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { - m := new(LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadServer = grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse] // LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service. // It's only intended for direct use with grpc.RegisterService, diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index 47a3e938dcf..c0987627413 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -219,7 +219,7 @@ type lbBalancer struct { // All backends addresses, with metadata set to nil. This list contains all // backend addresses in the same order and with the same duplicates as in // serverlist. When generating picker, a SubConn slice with the same order - // but with only READY SCs will be gerenated. + // but with only READY SCs will be generated. backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go index 8942c31310a..96a57c8c70c 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -21,14 +21,14 @@ package grpclb import ( "encoding/json" - "google.golang.org/grpc" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/serviceconfig" ) const ( roundRobinName = roundrobin.Name - pickFirstName = grpc.PickFirstBalancerName + pickFirstName = pickfirst.Name ) type grpclbServiceConfig struct { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 20c5f2ec396..671bc663fcb 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -19,13 +19,13 @@ package grpclb import ( + "math/rand" "sync" "sync/atomic" "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/status" ) @@ -112,7 +112,7 @@ type rrPicker struct { func newRRPicker(readySCs []balancer.SubConn) *rrPicker { return &rrPicker{ subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), + subConnsNext: rand.Intn(len(readySCs)), } } @@ -147,7 +147,7 @@ func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats * return &lbPicker{ serverList: serverList, subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), + subConnsNext: rand.Intn(len(readySCs)), stats: stats, } } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go index 3289f2869f8..ddd9bd269bf 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go @@ -22,17 +22,17 @@ package leastrequest import ( "encoding/json" "fmt" + "math/rand" "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/serviceconfig" ) -// grpcranduint32 is a global to stub out in tests. -var grpcranduint32 = grpcrand.Uint32 +// randuint32 is a global to stub out in tests. +var randuint32 = rand.Uint32 // Name is the name of the least request balancer. const Name = "least_request_experimental" @@ -112,7 +112,9 @@ type scWithRPCCount struct { } func (lrb *leastRequestBalancer) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("least-request: Build called with info: %v", info) + if logger.V(2) { + logger.Infof("least-request: Build called with info: %v", info) + } if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } @@ -157,7 +159,7 @@ func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { var pickedSC *scWithRPCCount var pickedSCNumRPCs int32 for i := 0; i < int(p.choiceCount); i++ { - index := grpcranduint32() % uint32(len(p.subConns)) + index := randuint32() % uint32(len(p.subConns)) sc := p.subConns[index] n := sc.numRPCs.Load() if pickedSC == nil || n < pickedSCNumRPCs { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/pickfirst.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go similarity index 87% rename from terraform/providers/google/vendor/google.golang.org/grpc/pickfirst.go rename to terraform/providers/google/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 8853626614e..4d69b4052f8 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/pickfirst.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,38 +16,48 @@ * */ -package grpc +// Package pickfirst contains the pick_first load balancing policy. +package pickfirst import ( "encoding/json" "errors" "fmt" + "math/rand" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) +func init() { + balancer.Register(pickfirstBuilder{}) + internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } +} + +var logger = grpclog.Component("pick-first-lb") + const ( - // PickFirstBalancerName is the name of the pick_first balancer. - PickFirstBalancerName = "pick_first" - logPrefix = "[pick-first-lb %p] " + // Name is the name of the pick_first balancer. + Name = "pick_first" + logPrefix = "[pick-first-lb %p] " ) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } func (pickfirstBuilder) Name() string { - return PickFirstBalancerName + return Name } type pfConfig struct { @@ -93,6 +103,12 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +type Shuffler interface { + ShuffleAddressListForTesting(n int, swap func(i, j int)) +} + +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } + func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // The resolver reported an empty address list. Treat it like an error by @@ -124,7 +140,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - grpcrand.Shuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each @@ -139,13 +155,13 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses if cfg.ShuffleAddressList { addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/balancer.go index 3ac28271618..5ae4d2e1316 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/balancer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" @@ -77,6 +78,42 @@ var ( clientConnUpdateHook = func() {} dataCachePurgeHook = func() {} resetBackoffHook = func() {} + + cacheEntriesMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{ + Name: "grpc.lb.rls.cache_entries", + Description: "EXPERIMENTAL. Number of entries in the RLS cache.", + Unit: "entry", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"}, + Default: false, + }) + cacheSizeMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{ + Name: "grpc.lb.rls.cache_size", + Description: "EXPERIMENTAL. The current size of the RLS cache.", + Unit: "By", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"}, + Default: false, + }) + defaultTargetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.default_target_picks", + Description: "EXPERIMENTAL. Number of LB picks sent to the default target.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"}, + Default: false, + }) + targetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.target_picks", + Description: "EXPERIMENTAL. Number of LB picks sent to each RLS target. Note that if the default target is also returned by the RLS server, RPCs sent to that target from the cache will be counted in this metric, not in grpc.rls.default_target_picks.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"}, + Default: false, + }) + failedPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.failed_picks", + Description: "EXPERIMENTAL. Number of LB picks failed due to either a failed RLS request or the RLS channel being throttled.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target"}, + Default: false, + }) ) func init() { @@ -103,7 +140,7 @@ func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. updateCh: buffer.NewUnbounded(), } lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-experimental-lb %p] ", lb)) - lb.dataCache = newDataCache(maxCacheSize, lb.logger) + lb.dataCache = newDataCache(maxCacheSize, lb.logger, opts.MetricsRecorder, opts.Target.String()) lb.bg = balancergroup.New(balancergroup.Options{ CC: cc, BuildOpts: opts, @@ -285,27 +322,27 @@ func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error // Update the copy of the config in the LB policy before releasing the lock. b.lbCfg = newCfg - - // Enqueue an event which will notify us when the above update has been - // propagated to all child policies, and the child policies have all - // processed their updates, and we have sent a picker update. - done := make(chan struct{}) - b.updateCh.Put(resumePickerUpdates{done: done}) b.stateMu.Unlock() - <-done + // We cannot do cache operations above because `cacheMu` needs to be grabbed + // before `stateMu` if we are to hold both locks at the same time. + b.cacheMu.Lock() + b.dataCache.updateRLSServerTarget(newCfg.lookupService) if resizeCache { // If the new config changes reduces the size of the data cache, we // might have to evict entries to get the cache size down to the newly - // specified size. - // - // And we cannot do this operation above (where we compute the - // `resizeCache` boolean) because `cacheMu` needs to be grabbed before - // `stateMu` if we are to hold both locks at the same time. - b.cacheMu.Lock() + // specified size. If we do evict an entry with valid backoff timer, + // the new picker needs to be sent to the channel to re-process any + // RPCs queued as a result of this backoff timer. b.dataCache.resize(newCfg.cacheSizeBytes) - b.cacheMu.Unlock() } + b.cacheMu.Unlock() + // Enqueue an event which will notify us when the above update has been + // propagated to all child policies, and the child policies have all + // processed their updates, and we have sent a picker update. + done := make(chan struct{}) + b.updateCh.Put(resumePickerUpdates{done: done}) + <-done return nil } @@ -490,15 +527,19 @@ func (b *rlsBalancer) sendNewPickerLocked() { if b.defaultPolicy != nil { b.defaultPolicy.acquireRef() } + picker := &rlsPicker{ - kbm: b.lbCfg.kbMap, - origEndpoint: b.bopts.Target.Endpoint(), - lb: b, - defaultPolicy: b.defaultPolicy, - ctrlCh: b.ctrlCh, - maxAge: b.lbCfg.maxAge, - staleAge: b.lbCfg.staleAge, - bg: b.bg, + kbm: b.lbCfg.kbMap, + origEndpoint: b.bopts.Target.Endpoint(), + lb: b, + defaultPolicy: b.defaultPolicy, + ctrlCh: b.ctrlCh, + maxAge: b.lbCfg.maxAge, + staleAge: b.lbCfg.staleAge, + bg: b.bg, + rlsServerTarget: b.lbCfg.lookupService, + grpcTarget: b.bopts.Target.String(), + metricsRecorder: b.bopts.MetricsRecorder, } picker.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-picker %p] ", picker)) state := balancer.State{ diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/cache.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/cache.go index d7a6a1a436c..7fe796c9587 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/cache.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/cache.go @@ -22,6 +22,8 @@ import ( "container/list" "time" + "github.com/google/uuid" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal/backoff" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" @@ -47,7 +49,7 @@ type cacheEntry struct { // headerData is received in the RLS response and is to be sent in the // X-Google-RLS-Data header for matching RPCs. headerData string - // expiryTime is the absolute time at which this cache entry entry stops + // expiryTime is the absolute time at which this cache entry stops // being valid. When an RLS request succeeds, this is set to the current // time plus the max_age field from the LB policy config. expiryTime time.Time @@ -163,24 +165,39 @@ func (l *lru) getLeastRecentlyUsed() cacheKey { // // It is not safe for concurrent access. type dataCache struct { - maxSize int64 // Maximum allowed size. - currentSize int64 // Current size. - keys *lru // Cache keys maintained in lru order. - entries map[cacheKey]*cacheEntry - logger *internalgrpclog.PrefixLogger - shutdown *grpcsync.Event + maxSize int64 // Maximum allowed size. + currentSize int64 // Current size. + keys *lru // Cache keys maintained in lru order. + entries map[cacheKey]*cacheEntry + logger *internalgrpclog.PrefixLogger + shutdown *grpcsync.Event + rlsServerTarget string + + // Read only after initialization. + grpcTarget string + uuid string + metricsRecorder estats.MetricsRecorder } -func newDataCache(size int64, logger *internalgrpclog.PrefixLogger) *dataCache { +func newDataCache(size int64, logger *internalgrpclog.PrefixLogger, metricsRecorder estats.MetricsRecorder, grpcTarget string) *dataCache { return &dataCache{ - maxSize: size, - keys: newLRU(), - entries: make(map[cacheKey]*cacheEntry), - logger: logger, - shutdown: grpcsync.NewEvent(), + maxSize: size, + keys: newLRU(), + entries: make(map[cacheKey]*cacheEntry), + logger: logger, + shutdown: grpcsync.NewEvent(), + grpcTarget: grpcTarget, + uuid: uuid.New().String(), + metricsRecorder: metricsRecorder, } } +// updateRLSServerTarget updates the RLS Server Target the RLS Balancer is +// configured with. +func (dc *dataCache) updateRLSServerTarget(rlsServerTarget string) { + dc.rlsServerTarget = rlsServerTarget +} + // resize changes the maximum allowed size of the data cache. // // The return value indicates if an entry with a valid backoff timer was @@ -223,7 +240,7 @@ func (dc *dataCache) resize(size int64) (backoffCancelled bool) { backoffCancelled = true } } - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) } dc.maxSize = size return backoffCancelled @@ -249,7 +266,7 @@ func (dc *dataCache) evictExpiredEntries() bool { if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) { continue } - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) evicted = true } return evicted @@ -310,6 +327,8 @@ func (dc *dataCache) addEntry(key cacheKey, entry *cacheEntry) (backoffCancelled if dc.currentSize > dc.maxSize { backoffCancelled = dc.resize(dc.maxSize) } + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) + cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid) return backoffCancelled, true } @@ -319,6 +338,7 @@ func (dc *dataCache) updateEntrySize(entry *cacheEntry, newSize int64) { dc.currentSize -= entry.size entry.size = newSize dc.currentSize += entry.size + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) } func (dc *dataCache) getEntry(key cacheKey) *cacheEntry { @@ -339,7 +359,7 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) { if !ok { return } - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) } // deleteAndCleanup performs actions required at the time of deleting an entry @@ -347,15 +367,17 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) { // - the entry is removed from the map of entries // - current size of the data cache is update // - the key is removed from the LRU -func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) { +func (dc *dataCache) deleteAndCleanup(key cacheKey, entry *cacheEntry) { delete(dc.entries, key) dc.currentSize -= entry.size dc.keys.removeEntry(key) + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) + cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid) } func (dc *dataCache) stop() { for key, entry := range dc.entries { - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) } dc.shutdown.Fire() } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go index a3b0931b295..8b178604348 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go @@ -20,16 +20,15 @@ package adaptive import ( + "math/rand" "sync" "time" - - "google.golang.org/grpc/internal/grpcrand" ) // For overriding in unittests. var ( timeNowFunc = func() time.Time { return time.Now() } - randFunc = func() float64 { return grpcrand.Float64() } + randFunc = func() float64 { return rand.Float64() } ) const ( diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go index 13b316b7fa2..1ab874c356f 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go @@ -82,10 +82,3 @@ func (l *lookback) advance(t time.Time) int64 { l.head = nh return nh } - -func min(x int64, y int64) int64 { - if x < y { - return x - } - return y -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go index d010f74456f..cc5ce510ad9 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go @@ -218,7 +218,7 @@ type matcher struct { names []string } -// Equal reports if m and are are equivalent headerKeys. +// Equal reports if m and a are equivalent headerKeys. func (m matcher) Equal(a matcher) bool { if m.key != a.key { return false diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/picker.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/picker.go index 8f617a4e42e..e5c86f29068 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/picker.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/picker.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/balancer/rls/internal/keys" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" internalgrpclog "google.golang.org/grpc/internal/grpclog" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" @@ -61,12 +62,15 @@ type rlsPicker struct { // The picker is given its own copy of the below fields from the RLS LB policy // to avoid having to grab the mutex on the latter. - defaultPolicy *childPolicyWrapper // Child policy for the default target. - ctrlCh *controlChannel // Control channel to the RLS server. - maxAge time.Duration // Cache max age from LB config. - staleAge time.Duration // Cache stale age from LB config. - bg exitIdler - logger *internalgrpclog.PrefixLogger + rlsServerTarget string + grpcTarget string + metricsRecorder estats.MetricsRecorder + defaultPolicy *childPolicyWrapper // Child policy for the default target. + ctrlCh *controlChannel // Control channel to the RLS server. + maxAge time.Duration // Cache max age from LB config. + staleAge time.Duration // Cache stale age from LB config. + bg exitIdler + logger *internalgrpclog.PrefixLogger } // isFullMethodNameValid return true if name is of the form `/service/method`. @@ -85,7 +89,17 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { reqKeys := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) p.lb.cacheMu.Lock() - defer p.lb.cacheMu.Unlock() + var pr balancer.PickResult + var err error + + // Record metrics without the cache mutex held, to prevent lock contention + // between concurrent RPC's and their Pick calls. Metrics Recording can + // potentially be expensive. + metricsCallback := func() {} + defer func() { + p.lb.cacheMu.Unlock() + metricsCallback() + }() // Lookup data cache and pending request map using request path and keys. cacheKey := cacheKey{path: info.FullMethodName, keys: reqKeys.Str} @@ -98,7 +112,8 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { case dcEntry == nil && pendingEntry == nil: throttled := p.sendRouteLookupRequestLocked(cacheKey, &backoffState{bs: defaultBackoffStrategy}, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") if throttled { - return p.useDefaultPickIfPossible(info, errRLSThrottled) + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled) + return pr, err } return balancer.PickResult{}, balancer.ErrNoSubConnAvailable @@ -113,8 +128,8 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_STALE, dcEntry.headerData) } // Delegate to child policies. - res, err := p.delegateToChildPoliciesLocked(dcEntry, info) - return res, err + pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info) + return pr, err } // We get here only if the data cache entry has expired. If entry is in @@ -126,67 +141,108 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // message received from the control plane is still fine, as it could be // useful for debugging purposes. st := dcEntry.status - return p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) + return pr, err } // We get here only if the entry has expired and is not in backoff. throttled := p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") if throttled { - return p.useDefaultPickIfPossible(info, errRLSThrottled) + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled) + return pr, err } return balancer.PickResult{}, balancer.ErrNoSubConnAvailable // Data cache hit. Pending request exists. default: if dcEntry.expiryTime.After(now) { - res, err := p.delegateToChildPoliciesLocked(dcEntry, info) - return res, err + pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info) + return pr, err } // Data cache entry has expired and pending request exists. Queue pick. return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } } +// errToPickResult is a helper function which converts the error value returned +// by Pick() to a string that represents the pick result. +func errToPickResult(err error) string { + if err == nil { + return "complete" + } + if errors.Is(err, balancer.ErrNoSubConnAvailable) { + return "queue" + } + if _, ok := status.FromError(err); ok { + return "drop" + } + return "fail" +} + // delegateToChildPoliciesLocked is a helper function which iterates through the // list of child policy wrappers in a cache entry and attempts to find a child // policy to which this RPC can be routed to. If all child policies are in -// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. -func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, error) { +// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. Returns +// a function to be invoked to record metrics. +func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, func(), error) { const rlsDataHeaderName = "x-google-rls-data" for i, cpw := range dcEntry.childPolicyWrappers { state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) // Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if // it is the last one (which handles the case of delegating to the last - // child picker if all child polcies are in TRANSIENT_FAILURE). + // child picker if all child policies are in TRANSIENT_FAILURE). if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 { // Any header data received from the RLS server is stored in the // cache entry and needs to be sent to the actual backend in the // X-Google-RLS-Data header. res, err := state.Picker.Pick(info) if err != nil { - return res, err + pr := errToPickResult(err) + return res, func() { + if pr == "queue" { + // Don't record metrics for queued Picks. + return + } + targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, pr) + }, err } + if res.Metadata == nil { res.Metadata = metadata.Pairs(rlsDataHeaderName, dcEntry.headerData) } else { res.Metadata.Append(rlsDataHeaderName, dcEntry.headerData) } - return res, nil + return res, func() { + targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, "complete") + }, nil } } + // In the unlikely event that we have a cache entry with no targets, we end up // queueing the RPC. - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + return balancer.PickResult{}, func() {}, balancer.ErrNoSubConnAvailable } // useDefaultPickIfPossible is a helper method which delegates to the default -// target if one is configured, or fails the pick with the given error. -func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, error) { +// target if one is configured, or fails the pick with the given error. Returns +// a function to be invoked to record metrics. +func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, func(), error) { if p.defaultPolicy != nil { state := (*balancer.State)(atomic.LoadPointer(&p.defaultPolicy.state)) - return state.Picker.Pick(info) + res, err := state.Picker.Pick(info) + pr := errToPickResult(err) + return res, func() { + if pr == "queue" { + // Don't record metrics for queued Picks. + return + } + defaultTargetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, p.defaultPolicy.target, pr) + }, err } - return balancer.PickResult{}, errOnNoDefault + + return balancer.PickResult{}, func() { + failedPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget) + }, errOnNoDefault } // sendRouteLookupRequestLocked adds an entry to the pending request map and diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index f7031ad2251..260255d31b6 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,12 @@ package roundrobin import ( + "math/rand" "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" ) // Name is the name of round_robin balancer. @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(grpcrand.Intn(len(scs))), + next: uint32(rand.Intn(len(scs))), } } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go index 7e751722b7c..88bf64ec4ec 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go @@ -23,6 +23,7 @@ import ( "encoding/json" "errors" "fmt" + "math/rand" "sync" "sync/atomic" "time" @@ -31,9 +32,10 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/balancer/weightedroundrobin/internal" + "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/orca" "google.golang.org/grpc/resolver" @@ -45,6 +47,43 @@ import ( // Name is the name of the weighted round robin balancer. const Name = "weighted_round_robin" +var ( + rrFallbackMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.rr_fallback", + Description: "EXPERIMENTAL. Number of scheduler updates in which there were not enough endpoints with valid weight, which caused the WRR policy to fall back to RR behavior.", + Unit: "update", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + + endpointWeightNotYetUsableMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weight_not_yet_usable", + Description: "EXPERIMENTAL. Number of endpoints from each scheduler update that don't yet have usable weight information (i.e., either the load report has not yet been received, or it is within the blackout period).", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + + endpointWeightStaleMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weight_stale", + Description: "EXPERIMENTAL. Number of endpoints from each scheduler update whose latest weight is older than the expiration period.", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + endpointWeightsMetric = estats.RegisterFloat64Histo(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weights", + Description: "EXPERIMENTAL. Weight of each endpoint, recorded on every scheduler update. Endpoints without usable weights will be recorded as weight 0.", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) +) + func init() { balancer.Register(bb{}) } @@ -58,7 +97,10 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba csEvltr: &balancer.ConnectivityStateEvaluator{}, scMap: make(map[balancer.SubConn]*weightedSubConn), connectivityState: connectivity.Connecting, + target: bOpts.Target.String(), + metricsRecorder: bOpts.MetricsRecorder, } + b.logger = prefixLogger(b) b.logger.Infof("Created") return b @@ -101,8 +143,11 @@ func (bb) Name() string { // wrrBalancer implements the weighted round robin LB policy. type wrrBalancer struct { - cc balancer.ClientConn - logger *grpclog.PrefixLogger + // The following fields are immutable. + cc balancer.ClientConn + logger *grpclog.PrefixLogger + target string + metricsRecorder estats.MetricsRecorder // The following fields are only accessed on calls into the LB policy, and // do not need a mutex. @@ -114,6 +159,7 @@ type wrrBalancer struct { resolverErr error // the last error reported by the resolver; cleared on successful resolution connErr error // the last connection error; cleared upon leaving TransientFailure stopPicker func() + locality string } func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { @@ -125,6 +171,7 @@ func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error } b.cfg = cfg + b.locality = weightedtarget.LocalityFromResolverState(ccs.ResolverState) b.updateAddresses(ccs.ResolverState.Addresses) if len(ccs.ResolverState.Addresses) == 0 { @@ -171,6 +218,10 @@ func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { // Initially, we set load reports to off, because they are not // running upon initial weightedSubConn creation. cfg: &lbConfig{EnableOOBLoadReport: false}, + + metricsRecorder: b.metricsRecorder, + target: b.target, + locality: b.locality, } b.subConns.Set(addr, wsc) b.scMap[sc] = wsc @@ -318,9 +369,12 @@ func (b *wrrBalancer) regeneratePicker() { } p := &picker{ - v: grpcrand.Uint32(), // start the scheduler at a random point - cfg: b.cfg, - subConns: b.readySubConns(), + v: rand.Uint32(), // start the scheduler at a random point + cfg: b.cfg, + subConns: b.readySubConns(), + metricsRecorder: b.metricsRecorder, + locality: b.locality, + target: b.target, } var ctx context.Context ctx, b.stopPicker = context.WithCancel(context.Background()) @@ -339,16 +393,20 @@ type picker struct { v uint32 // incrementing value used by the scheduler; accessed atomically cfg *lbConfig // active config when picker created subConns []*weightedSubConn // all READY subconns + + // The following fields are immutable. + target string + locality string + metricsRecorder estats.MetricsRecorder } -// scWeights returns a slice containing the weights from p.subConns in the same -// order as p.subConns. -func (p *picker) scWeights() []float64 { +func (p *picker) scWeights(recordMetrics bool) []float64 { ws := make([]float64, len(p.subConns)) now := internal.TimeNow() for i, wsc := range p.subConns { - ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod)) + ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod), recordMetrics) } + return ws } @@ -357,7 +415,7 @@ func (p *picker) inc() uint32 { } func (p *picker) regenerateScheduler() { - s := newScheduler(p.scWeights(), p.inc) + s := p.newScheduler(true) atomic.StorePointer(&p.scheduler, unsafe.Pointer(&s)) } @@ -367,6 +425,7 @@ func (p *picker) start(ctx context.Context) { // No need to regenerate weights with only one backend. return } + go func() { ticker := time.NewTicker(time.Duration(p.cfg.WeightUpdatePeriod)) defer ticker.Stop() @@ -381,7 +440,7 @@ func (p *picker) start(ctx context.Context) { }() } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { // Read the scheduler atomically. All scheduler operations are threadsafe, // and if the scheduler is replaced during this usage, we want to use the // scheduler that was live when the pick started. @@ -404,8 +463,12 @@ func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // When needed, it also tracks connectivity state, listens for metrics updates // by implementing the orca.OOBListener interface and manages that listener. type weightedSubConn struct { + // The following fields are immutable. balancer.SubConn - logger *grpclog.PrefixLogger + logger *grpclog.PrefixLogger + target string + metricsRecorder estats.MetricsRecorder + locality string // The following fields are only accessed on calls into the LB policy, and // do not need a mutex. @@ -450,7 +513,7 @@ func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { } w.lastUpdated = internal.TimeNow() - if w.nonEmptySince == (time.Time{}) { + if w.nonEmptySince.Equal(time.Time{}) { w.nonEmptySince = w.lastUpdated } } @@ -495,14 +558,17 @@ func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connect w.SubConn.Connect() case connectivity.Ready: // If we transition back to READY state, reset nonEmptySince so that we - // apply the blackout period after we start receiving load data. Note - // that we cannot guarantee that we will never receive lingering - // callbacks for backend metric reports from the previous connection - // after the new connection has been established, but they should be - // masked by new backend metric reports from the new connection by the - // time the blackout period ends. + // apply the blackout period after we start receiving load data. Also + // reset lastUpdated to trigger endpoint weight not yet usable in the + // case endpoint gets asked what weight it is before receiving a new + // load report. Note that we cannot guarantee that we will never receive + // lingering callbacks for backend metric reports from the previous + // connection after the new connection has been established, but they + // should be masked by new backend metric reports from the new + // connection by the time the blackout period ends. w.mu.Lock() w.nonEmptySince = time.Time{} + w.lastUpdated = time.Time{} w.mu.Unlock() case connectivity.Shutdown: if w.stopORCAListener != nil { @@ -527,21 +593,44 @@ func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connect // weight returns the current effective weight of the subconn, taking into // account the parameters. Returns 0 for blacked out or expired data, which -// will cause the backend weight to be treated as the mean of the weights of -// the other backends. -func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration) float64 { +// will cause the backend weight to be treated as the mean of the weights of the +// other backends. If forScheduler is set to true, this function will emit +// metrics through the metrics registry. +func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration, recordMetrics bool) (weight float64) { w.mu.Lock() defer w.mu.Unlock() + + if recordMetrics { + defer func() { + endpointWeightsMetric.Record(w.metricsRecorder, weight, w.target, w.locality) + }() + } + + // The SubConn has not received a load report (i.e. just turned READY with + // no load report). + if w.lastUpdated.Equal(time.Time{}) { + endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + return 0 + } + // If the most recent update was longer ago than the expiration period, // reset nonEmptySince so that we apply the blackout period again if we // start getting data again in the future, and return 0. if now.Sub(w.lastUpdated) >= weightExpirationPeriod { + if recordMetrics { + endpointWeightStaleMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + } w.nonEmptySince = time.Time{} return 0 } + // If we don't have at least blackoutPeriod worth of data, return 0. - if blackoutPeriod != 0 && (w.nonEmptySince == (time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) { + if blackoutPeriod != 0 && (w.nonEmptySince.Equal(time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) { + if recordMetrics { + endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + } return 0 } + return w.weightVal } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go index f389678b4e8..56aa15da10d 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go @@ -31,13 +31,17 @@ type scheduler interface { // len(scWeights)-1 are zero or there is only a single subconn, otherwise it // will return an Earliest Deadline First (EDF) scheduler implementation that // selects the subchannels according to their weights. -func newScheduler(scWeights []float64, inc func() uint32) scheduler { +func (p *picker) newScheduler(recordMetrics bool) scheduler { + scWeights := p.scWeights(recordMetrics) n := len(scWeights) if n == 0 { return nil } if n == 1 { - return &rrScheduler{numSCs: 1, inc: inc} + if recordMetrics { + rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality) + } + return &rrScheduler{numSCs: 1, inc: p.inc} } sum := float64(0) numZero := 0 @@ -51,8 +55,12 @@ func newScheduler(scWeights []float64, inc func() uint32) scheduler { numZero++ } } + if numZero >= n-1 { - return &rrScheduler{numSCs: uint32(n), inc: inc} + if recordMetrics { + rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality) + } + return &rrScheduler{numSCs: uint32(n), inc: p.inc} } unscaledMean := sum / float64(n-numZero) scalingFactor := maxWeight / max @@ -74,11 +82,11 @@ func newScheduler(scWeights []float64, inc func() uint32) scheduler { } if allEqual { - return &rrScheduler{numSCs: uint32(n), inc: inc} + return &rrScheduler{numSCs: uint32(n), inc: p.inc} } logger.Infof("using edf scheduler with weights: %v", weights) - return &edfScheduler{weights: weights, inc: inc} + return &edfScheduler{weights: weights, inc: p.inc} } const maxWeight = math.MaxUint16 diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go index 27279257ed1..bcc8aca8b49 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -89,7 +89,7 @@ func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr } // Start starts the aggregator. It can be called after Stop to restart the -// aggretator. +// aggregator. func (wbsa *Aggregator) Start() { wbsa.mu.Lock() defer wbsa.mu.Unlock() diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go index 220f4e55567..dfd1ef26dcb 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go @@ -84,6 +84,17 @@ type weightedTargetBalancer struct { targets map[string]Target } +type localityKeyType string + +const localityKey = localityKeyType("locality") + +// LocalityFromResolverState returns the locality from the resolver.State +// provided, or an empty string if not present. +func LocalityFromResolverState(state resolver.State) string { + locality, _ := state.Attributes.Value(localityKey).(string) + return locality +} + // UpdateClientConnState takes the new targets in balancer group, // creates/deletes sub-balancers and sends them update. addresses are split into // groups based on hierarchy path. @@ -142,7 +153,7 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat ResolverState: resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, - Attributes: s.ResolverState.Attributes, + Attributes: s.ResolverState.Attributes.WithValue(localityKey, name), }, BalancerConfig: newT.ChildPolicy.Config, }) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer_wrapper.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer_wrapper.go index af39b8a4c73..8ad6ce2f095 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -25,12 +25,15 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -79,6 +82,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -92,7 +96,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { + uccs := func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -107,17 +111,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err - }) - if !ok { - return nil } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -133,7 +143,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { + ccb.serializer.TrySchedule(func(context.Context) { if ccb.balancer == nil { return } @@ -145,7 +155,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -182,7 +192,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -198,6 +208,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.cc.mu.Lock() defer ccb.cc.mu.Unlock() + if ccb.cc.conns == nil { + // The CC has been closed; ignore this update. + return + } ccb.mu.Lock() if ccb.closed { @@ -248,15 +262,29 @@ type acBalancerWrapper struct { // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) + acbw.ac.mu.Lock() + defer acbw.ac.mu.Unlock() + if s == connectivity.Ready { + // When changing states to READY, reset stateReadyChan. Wait until + // after we notify the LB policy's listener(s) in order to prevent + // ac.getTransport() from unblocking before the LB policy starts + // tracking the subchannel as READY. + close(acbw.ac.stateReadyChan) + acbw.ac.stateReadyChan = make(chan struct{}) + } }) } @@ -314,8 +342,8 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 1afb1e84ac0..55bffaa77ef 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GrpcLogEntry); i { case 0: return &v.state @@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientHeader); i { case 0: return &v.state @@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerHeader); i { case 0: return &v.state @@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Trailer); i { case 0: return &v.state @@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state @@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Metadata); i { case 0: return &v.state @@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MetadataEntry); i { case 0: return &v.state @@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Address); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { } } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/clientconn.go b/terraform/providers/google/vendor/google.golang.org/grpc/clientconn.go index 2359f94b8a4..9c8850e3fdd 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/clientconn.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "slices" "strings" "sync" "sync/atomic" @@ -31,6 +32,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" @@ -38,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -72,6 +75,8 @@ var ( // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = pickfirst.Name ) // The following errors are returned from Dial and DialContext @@ -152,6 +157,16 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) for _, opt := range opts { opt.apply(&cc.dopts) } + + // Determine the resolver to use. + if err := cc.initParsedTargetAndResolverBuilder(); err != nil { + return nil, err + } + + for _, opt := range globalPerTargetDialOptions { + opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts) + } + chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) @@ -160,7 +175,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } if cc.dopts.defaultServiceConfigRawJSON != nil { - scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts) if scpr.Err != nil { return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } @@ -168,30 +183,24 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } cc.mkp = cc.dopts.copts.KeepaliveParams - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - // TODO: Ideally it should be impossible to error from this function after - // channelz registration. This will require removing some channelz logs - // from the following functions that can error. Errors can be returned to - // the user, and successful logs can be emitted here, after the checks have - // passed and channelz is subsequently registered. - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) - return nil, err - } - if err = cc.determineAuthority(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) + if err = cc.initAuthority(); err != nil { return nil, err } + // Register ClientConn with channelz. Note that this is only done after + // channel creation cannot fail. + cc.channelzRegistration(target) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil } @@ -586,13 +595,14 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - idlenessMgr *idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -622,11 +632,6 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -641,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -692,8 +692,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { var emptyServiceConfig *ServiceConfig func init() { - balancer.Register(pickfirstBuilder{}) - cfg := parseServiceConfig("{}") + cfg := parseServiceConfig("{}", defaultMaxCallAttempts) if cfg.Err != nil { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } @@ -809,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } + copy(out, in) return out } @@ -832,14 +825,14 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateReadyChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -915,28 +908,29 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.mu.Unlock() - ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddressesWithoutBalancerAttributes(addrs) + addrs = copyAddresses(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -944,7 +938,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddresses(ac.addrs, addrs) { + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -963,7 +957,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -989,11 +983,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } - ac.mu.Unlock() - // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransport() + go ac.resetTransportAndUnlock() } // getServerName determines the serverName to be used in the connection @@ -1187,8 +1179,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State + stateReadyChan chan struct{} // closed and recreated on every READY state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1201,9 +1193,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1211,7 +1200,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, lastErr) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1228,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - ac.mu.Lock() +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1519,7 +1510,7 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { for ctx.Err() == nil { ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan + t, state, sc := ac.transport, ac.state, ac.stateReadyChan ac.mu.Unlock() if state == connectivity.Ready { return t, nil @@ -1582,7 +1573,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport @@ -1673,22 +1664,19 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -// parseTargetAndFindResolver parses the user's dial target and stores the -// parsed target in `cc.parsedTarget`. +// initParsedTargetAndResolverBuilder parses the user's dial target and stores +// the parsed target in `cc.parsedTarget`. // // The resolver to use is determined based on the scheme in the parsed target // and the same is stored in `cc.resolverBuilder`. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) parseTargetAndFindResolver() error { - channelz.Infof(logger, cc.channelz, "original dial target is: %q", cc.target) +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error { + logger.Infof("original dial target is: %q", cc.target) var rb resolver.Builder parsedTarget, err := parseTarget(cc.target) - if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", cc.target, err) - } else { - channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", parsedTarget) + if err == nil { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1707,15 +1695,12 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { defScheme = resolver.GetDefaultScheme() } - channelz.Infof(logger, cc.channelz, "fallback to scheme %q", defScheme) canonicalTarget := defScheme + ":///" + cc.target parsedTarget, err = parseTarget(canonicalTarget) if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", canonicalTarget, err) return err } - channelz.Infof(logger, cc.channelz, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) @@ -1805,7 +1790,7 @@ func encodeAuthority(authority string) string { // credentials do not match the authority configured through the dial option. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) determineAuthority() error { +func (cc *ClientConn) initAuthority() error { dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials @@ -1838,6 +1823,5 @@ func (cc *ClientConn) determineAuthority() error { } else { cc.authority = encodeAuthority(endpoint) } - channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) return nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/codec.go b/terraform/providers/google/vendor/google.golang.org/grpc/codec.go index 411e3dfd47c..e840858b77b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/codec.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go index 43726e877b8..7e4bfee8886 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -49,7 +49,7 @@ func (k KeySizeError) Error() string { // newRekeyAEAD creates a new instance of aes128gcm with rekeying. // The key argument should be 44 bytes, the first 32 bytes are used as a key -// for HKDF-expand and the remainining 12 bytes are used as a random mask for +// for HKDF-expand and the remaining 12 bytes are used as a random mask for // the counter. func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { k := len(key) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go index 6a9035ea254..b5bbb5497aa 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -51,7 +51,7 @@ type aes128gcmRekey struct { // NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying // for ALTS record. The key argument should be 44 bytes, the first 32 bytes -// are used as a key for HKDF-expand and the remainining 12 bytes are used +// are used as a key for HKDF-expand and the remaining 12 bytes are used // as a random mask for the counter. func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { inCounter := NewInCounter(side, overflowLenAES128GCMRekey) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go index 0d64fb37a12..f1ea7bb2081 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -266,10 +266,3 @@ func (p *conn) Write(b []byte) (n int, err error) { } return n, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 6c867dd8501..50721f690ac 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -128,7 +128,7 @@ type altsHandshaker struct { // NewClientHandshaker creates a core.Handshaker that performs a client-side // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. -func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { +func NewClientHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { return &altsHandshaker{ stream: nil, conn: c, @@ -141,7 +141,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // NewServerHandshaker creates a core.Handshaker that performs a server-side // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. -func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { +func NewServerHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { return &altsHandshaker{ stream: nil, conn: c, diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index e1cdafb980c..b3af0359072 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -34,8 +34,6 @@ var ( // to a corresponding connection to a hypervisor handshaker service // instance. hsConnMap = make(map[string]*grpc.ClientConn) - // hsDialer will be reassigned in tests. - hsDialer = grpc.Dial ) // Dial dials the handshake service in the hypervisor. If a connection has @@ -50,7 +48,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error - hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + hsConn, err = grpc.Dial(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index fe4488a95ed..b7de8f05b76 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/altscontext.proto package grpc_gcp @@ -201,7 +201,7 @@ func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte { } var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{ +var file_grpc_gcp_altscontext_proto_goTypes = []any{ (*AltsContext)(nil), // 0: grpc.gcp.AltsContext nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry (SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel @@ -225,7 +225,7 @@ func file_grpc_gcp_altscontext_proto_init() { } file_grpc_gcp_transport_security_common_proto_init() if !protoimpl.UnsafeEnabled { - file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AltsContext); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index adbad6b2fa3..79b5dad476c 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -533,7 +533,7 @@ type StartServerHandshakeReq struct { // to handshake_parameters is the integer value of HandshakeProtocol enum. HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Bytes in out_frames returned from the peer's HandshakerResp. It is possible - // that the peer's out_frames are split into multiple HandshakReq messages. + // that the peer's out_frames are split into multiple HandshakeReq messages. InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` // (Optional) Local endpoint information of the connection to the client, // such as local IP address, port number, and network protocol. @@ -1071,7 +1071,7 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x22, 0xf6, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x22, 0xfb, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, @@ -1108,139 +1108,140 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x19, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, - 0x73, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, + 0x01, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaf, + 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, 0x01, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, - 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, - 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, + 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, + 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, + 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, + 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, + 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, + 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, + 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, - 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, - 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, - 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, - 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, - 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, - 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, - 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, - 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, - 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, - 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, - 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, - 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, - 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, - 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, - 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, - 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, - 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, + 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, + 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, + 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, + 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, + 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, + 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, + 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1257,7 +1258,7 @@ func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte { var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_grpc_gcp_handshaker_proto_goTypes = []interface{}{ +var file_grpc_gcp_handshaker_proto_goTypes = []any{ (HandshakeProtocol)(0), // 0: grpc.gcp.HandshakeProtocol (NetworkProtocol)(0), // 1: grpc.gcp.NetworkProtocol (*Endpoint)(nil), // 2: grpc.gcp.Endpoint @@ -1313,7 +1314,7 @@ func file_grpc_gcp_handshaker_proto_init() { } file_grpc_gcp_transport_security_common_proto_init() if !protoimpl.UnsafeEnabled { - file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Endpoint); i { case 0: return &v.state @@ -1325,7 +1326,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Identity); i { case 0: return &v.state @@ -1337,7 +1338,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*StartClientHandshakeReq); i { case 0: return &v.state @@ -1349,7 +1350,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ServerHandshakeParameters); i { case 0: return &v.state @@ -1361,7 +1362,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*StartServerHandshakeReq); i { case 0: return &v.state @@ -1373,7 +1374,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*NextHandshakeMessageReq); i { case 0: return &v.state @@ -1385,7 +1386,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*HandshakerReq); i { case 0: return &v.state @@ -1397,7 +1398,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*HandshakerResult); i { case 0: return &v.state @@ -1409,7 +1410,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*HandshakerStatus); i { case 0: return &v.state @@ -1421,7 +1422,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*HandshakerResp); i { case 0: return &v.state @@ -1434,12 +1435,12 @@ func file_grpc_gcp_handshaker_proto_init() { } } } - file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{ (*Identity_ServiceAccount)(nil), (*Identity_Hostname)(nil), } - file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{ + file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []any{} + file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []any{ (*HandshakerReq_ClientStart)(nil), (*HandshakerReq_ServerStart)(nil), (*HandshakerReq_Next)(nil), diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index d1af55260bd..34443b1d2dc 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -75,7 +75,7 @@ type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerRe // HandshakerServiceServer is the server API for HandshakerService service. // All implementations must embed UnimplementedHandshakerServiceServer -// for forward compatibility +// for forward compatibility. type HandshakerServiceServer interface { // Handshaker service accepts a stream of handshaker request, returning a // stream of handshaker response. Client is expected to send exactly one @@ -87,14 +87,18 @@ type HandshakerServiceServer interface { mustEmbedUnimplementedHandshakerServiceServer() } -// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations. -type UnimplementedHandshakerServiceServer struct { -} +// UnimplementedHandshakerServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHandshakerServiceServer struct{} func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error { return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") } func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} +func (UnimplementedHandshakerServiceServer) testEmbeddedByValue() {} // UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HandshakerServiceServer will @@ -104,6 +108,13 @@ type UnsafeHandshakerServiceServer interface { } func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) { + // If the following call panics, it indicates UnimplementedHandshakerServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&HandshakerService_ServiceDesc, srv) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index d65ffe6e7be..6956c14f6a9 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/transport_security_common.proto package grpc_gcp @@ -253,7 +253,7 @@ func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte { var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{ +var file_grpc_gcp_transport_security_common_proto_goTypes = []any{ (SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel (*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions (*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version @@ -274,7 +274,7 @@ func file_grpc_gcp_transport_security_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*RpcProtocolVersions); i { case 0: return &v.state @@ -286,7 +286,7 @@ func file_grpc_gcp_transport_security_common_proto_init() { return nil } } - file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*RpcProtocolVersions_Version); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee1443bf..4c805c64462 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index d475cbc0894..328b838ed1f 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -38,7 +38,7 @@ type TokenSource struct { } // GetRequestMetadata gets the request metadata as a map from a TokenSource. -func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (ts TokenSource) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { token, err := ts.Token() if err != nil { return nil, err @@ -127,7 +127,7 @@ func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { return oauthAccess{token: *token} } -func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { ri, _ := credentials.RequestInfoFromContext(ctx) if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) @@ -156,7 +156,7 @@ type serviceAccount struct { t *oauth2.Token } -func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { s.mu.Lock() defer s.mu.Unlock() if !s.t.Valid() { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/tls.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/tls.go index 5dafd34edf9..4114358545e 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/tls.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/tls.go @@ -27,9 +27,13 @@ import ( "net/url" "os" + "google.golang.org/grpc/grpclog" credinternal "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/envconfig" ) +var logger = grpclog.Component("credentials") + // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { @@ -112,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon conn.Close() return nil, nil, ctx.Err() } + + // The negotiated protocol can be either of the following: + // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since + // it is the only protocol advertised by the client during the handshake. + // The tls library ensures that the server chooses a protocol advertised + // by the client. + // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement + // for using HTTP/2 over TLS. We can terminate the connection immediately. + np := conn.ConnectionState().NegotiatedProtocol + if np == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } + logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) + } tlsInfo := TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: CommonAuthInfo{ @@ -131,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) conn.Close() return nil, nil, err } + cs := conn.ConnectionState() + // The negotiated application protocol can be empty only if the client doesn't + // support ALPN. In such cases, we can close the connection since ALPN is required + // for using HTTP/2 over TLS. + if cs.NegotiatedProtocol == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } else if logger.V(2) { + logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") + } + } tlsInfo := TLSInfo{ - State: conn.ConnectionState(), + State: cs, CommonAuthInfo: CommonAuthInfo{ SecurityLevel: PrivacyAndIntegrity, }, diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go index 5c72f192cc3..a4b99e3d4a2 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go @@ -19,8 +19,10 @@ package certprovider import ( + "context" "fmt" "sync" + "sync/atomic" ) // provStore is the global singleton certificate provider store. @@ -53,6 +55,22 @@ type wrappedProvider struct { store *store } +// closedProvider always returns errProviderClosed error. +type closedProvider struct{} + +func (c closedProvider) KeyMaterial(context.Context) (*KeyMaterial, error) { + return nil, errProviderClosed +} + +func (c closedProvider) Close() { +} + +// singleCloseWrappedProvider wraps a provider instance with a reference count +// to properly handle multiple calls to Close. +type singleCloseWrappedProvider struct { + provider atomic.Pointer[Provider] +} + // store is a collection of provider instances, safe for concurrent access. type store struct { mu sync.Mutex @@ -75,6 +93,28 @@ func (wp *wrappedProvider) Close() { } } +// Close overrides the Close method of the embedded provider to avoid release the +// already released reference. +func (w *singleCloseWrappedProvider) Close() { + newProvider := Provider(closedProvider{}) + oldProvider := w.provider.Swap(&newProvider) + (*oldProvider).Close() +} + +// KeyMaterial returns the key material sourced by the Provider. +// Callers are expected to use the returned value as read-only. +func (w *singleCloseWrappedProvider) KeyMaterial(ctx context.Context) (*KeyMaterial, error) { + return (*w.provider.Load()).KeyMaterial(ctx) +} + +// newSingleCloseWrappedProvider create wrapper a provider instance with a reference count +// to properly handle multiple calls to Close. +func newSingleCloseWrappedProvider(provider Provider) *singleCloseWrappedProvider { + w := &singleCloseWrappedProvider{} + w.provider.Store(&provider) + return w +} + // BuildableConfig wraps parsed provider configuration and functionality to // instantiate provider instances. type BuildableConfig struct { @@ -112,7 +152,7 @@ func (bc *BuildableConfig) Build(opts BuildOptions) (Provider, error) { } if wp, ok := provStore.providers[sk]; ok { wp.refCount++ - return wp, nil + return newSingleCloseWrappedProvider(wp), nil } provider := bc.starter(opts) @@ -126,7 +166,7 @@ func (bc *BuildableConfig) Build(opts BuildOptions) (Provider, error) { store: provStore, } provStore.providers[sk] = wp - return wp, nil + return newSingleCloseWrappedProvider(wp), nil } // String returns the provider name and config as a colon separated string. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/dialoptions.go b/terraform/providers/google/vendor/google.golang.org/grpc/dialoptions.go index 00273702b69..2b285beee37 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/dialoptions.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/dialoptions.go @@ -21,6 +21,7 @@ package grpc import ( "context" "net" + "net/url" "time" "google.golang.org/grpc/backoff" @@ -32,10 +33,16 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +const ( + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges + defaultMaxCallAttempts = 5 +) + func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { globalDialOptions = append(globalDialOptions, opt...) @@ -43,10 +50,18 @@ func init() { internal.ClearGlobalDialOptions = func() { globalDialOptions = nil } + internal.AddGlobalPerTargetDialOptions = func(opt any) { + if ptdo, ok := opt.(perTargetDialOption); ok { + globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo) + } + } + internal.ClearGlobalPerTargetDialOptions = func() { + globalPerTargetDialOptions = nil + } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool + internal.WithBufferPool = withBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -78,8 +93,8 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration - recvBufferPool SharedBufferPool defaultScheme string + maxCallAttempts int } // DialOption configures how we set up the connection. @@ -89,6 +104,19 @@ type DialOption interface { var globalDialOptions []DialOption +// perTargetDialOption takes a parsed target and returns a dial option to apply. +// +// This gets called after NewClient() parses the target, and allows per target +// configuration set through a returned DialOption. The DialOption will not take +// effect if specifies a resolver builder, as that Dial Option is factored in +// while parsing target. +type perTargetDialOption interface { + // DialOption returns a Dial Option to apply. + DialOptionForTarget(parsedTarget url.URL) DialOption +} + +var globalPerTargetDialOptions []perTargetDialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // @@ -490,6 +518,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) @@ -649,12 +679,13 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, } } @@ -712,25 +743,25 @@ func WithIdleTimeout(d time.Duration) DialOption { }) } -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. +// WithMaxCallAttempts returns a DialOption that configures the maximum number +// of attempts per call (including retries and hedging) using the channel. +// Service owners may specify a higher value for these parameters, but higher +// values will be treated as equal to the maximum value by the client +// implementation. This mitigates security concerns related to the service +// config being transferred to the client via DNS. // -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) +// A value of 5 will be used if this dial option is not set or n < 2. +func WithMaxCallAttempts(n int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if n < 2 { + n = defaultMaxCallAttempts + } + o.maxCallAttempts = n + }) } -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { +func withBufferPool(bufferPool mem.BufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool + o.copts.BufferPool = bufferPool }) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/doc.go b/terraform/providers/google/vendor/google.golang.org/grpc/doc.go index 0022859ad74..e7b532b6f80 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/doc.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./regenerate.sh +//go:generate ./scripts/regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/encoding/encoding.go b/terraform/providers/google/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d7147..11d0ae142c4 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/terraform/providers/google/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 00000000000..074c5e234a7 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/encoding/proto/proto.go b/terraform/providers/google/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66d5cdf03ec..ceec319dd2f 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "fmt" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -32,28 +33,51 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil } -func (codec) Unmarshal(data []byte, v any) error { +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - return proto.Unmarshal(data, vv) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } func messageV2Of(v any) proto.Message { @@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/terraform/providers/google/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 00000000000..1d827dd5d9d --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/terraform/providers/google/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 00000000000..3221f7a633a --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/component.go b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ced25..f1ae080dcb8 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/component.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/grpclog.go b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9cb99..db320105e64 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 00000000000..59c03bc14c2 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 00000000000..e524fdd40b2 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go similarity index 52% rename from terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go rename to terraform/providers/google/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index bfc45102ab2..07df71e98a8 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,59 +16,17 @@ * */ -// Package grpclog (internal) defines depth logging for grpc. -package grpclog +package internal import ( + "encoding/json" + "fmt" + "io" + "log" "os" ) -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - // LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -107,14 +65,13 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -124,3 +81,124 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/logger.go b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d8267c..4b203585707 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d7130..892dc13d164 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 6a93475a7fb..d92335445f6 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_health_v1_health_proto_goTypes = []interface{}{ +var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse @@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckRequest); i { case 0: return &v.state @@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() { return nil } } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckResponse); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 8f793e6e89f..f96b8ab4927 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -43,6 +43,10 @@ const ( // HealthClient is the client API for Health service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. type HealthClient interface { // Check gets the health of the specified service. If the requested service // is unknown, the call will fail with status NOT_FOUND. If the caller does @@ -69,7 +73,7 @@ type HealthClient interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) } type healthClient struct { @@ -90,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{ClientStream: stream} + x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -106,26 +110,16 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts . return x, nil } -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer -// for forward compatibility +// for forward compatibility. +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. type HealthServer interface { // Check gets the health of the specified service. If the requested service // is unknown, the call will fail with status NOT_FOUND. If the caller does @@ -152,19 +146,23 @@ type HealthServer interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error + Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error } -// UnimplementedHealthServer should be embedded to have forward compatible implementations. -type UnimplementedHealthServer struct { -} +// UnimplementedHealthServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } -func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } +func (UnimplementedHealthServer) testEmbeddedByValue() {} // UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HealthServer will @@ -174,6 +172,13 @@ type UnsafeHealthServer interface { } func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + // If the following call panics, it indicates UnimplementedHealthServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Health_ServiceDesc, srv) } @@ -200,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream -} - -type healthWatchServer struct { - grpc.ServerStream + return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) } -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] // Health_ServiceDesc is the grpc.ServiceDesc for Health service. // It's only intended for direct use with grpc.RegisterService, diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/health/server.go b/terraform/providers/google/vendor/google.golang.org/grpc/health/server.go index cce6312d77f..d4b4b708159 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/health/server.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/health/server.go @@ -51,7 +51,7 @@ func NewServer() *Server { } // Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { +func (s *Server) Check(_ context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { s.mu.RLock() defer s.mu.RUnlock() if servingStatus, ok := s.statusMap[in.Service]; ok { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/backoff/backoff.go index fed1c011a32..b15cf482d29 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,10 +25,10 @@ package backoff import ( "context" "errors" + "math/rand" "time" grpcbackoff "google.golang.org/grpc/backoff" - "google.golang.org/grpc/internal/grpcrand" ) // Strategy defines the methodology for backing off after a grpc connection @@ -67,7 +67,7 @@ func (bc Exponential) Backoff(retries int) time.Duration { } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go index 4cee66aeb6e..31c9cdc9d02 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go @@ -19,6 +19,7 @@ package balancergroup import ( + "encoding/json" "fmt" "sync" "time" @@ -29,6 +30,7 @@ import ( "google.golang.org/grpc/internal/cache" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" ) // subBalancerWrapper is used to keep the configurations that will be used to start @@ -97,7 +99,7 @@ func (sbc *subBalancerWrapper) startBalancer() { if sbc.balancer == nil { sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts) } - sbc.group.logger.Infof("Creating child policy of type %q for locality %q", sbc.builder.Name(), sbc.id) + sbc.group.logger.Infof("Creating child policy of type %q for child %q", sbc.builder.Name(), sbc.id) sbc.balancer.SwitchTo(sbc.builder) if sbc.ccState != nil { sbc.balancer.UpdateClientConnState(*sbc.ccState) @@ -119,14 +121,11 @@ func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState) sbc.ccState = &s b := sbc.balancer if b == nil { - // This sub-balancer was closed. This should never happen because - // sub-balancers are closed when the locality is removed from EDS, or - // the balancer group is closed. There should be no further address - // updates when either of this happened. - // - // This will be a common case with priority support, because a - // sub-balancer (and the whole balancer group) could be closed because - // it's the lower priority, but it can still get address updates. + // A sub-balancer is closed when it is removed from the group or the + // group is closed as a whole, and is not expected to receive updates + // after that. But when used with the priority LB policy a sub-balancer + // (and the whole balancer group) could be closed because it's the lower + // priority, but it can still get address updates. return nil } return b.UpdateClientConnState(s) @@ -135,33 +134,16 @@ func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState) func (sbc *subBalancerWrapper) resolverError(err error) { b := sbc.balancer if b == nil { - // This sub-balancer was closed. This should never happen because - // sub-balancers are closed when the locality is removed from EDS, or - // the balancer group is closed. There should be no further address - // updates when either of this happened. - // - // This will be a common case with priority support, because a - // sub-balancer (and the whole balancer group) could be closed because - // it's the lower priority, but it can still get address updates. + // A sub-balancer is closed when it is removed from the group or the + // group is closed as a whole, and is not expected to receive updates + // after that. But when used with the priority LB policy a sub-balancer + // (and the whole balancer group) could be closed because it's the lower + // priority, but it can still get address updates. return } b.ResolverError(err) } -func (sbc *subBalancerWrapper) gracefulSwitch(builder balancer.Builder) { - sbc.builder = builder - b := sbc.balancer - // Even if you get an add and it persists builder but doesn't start - // balancer, this would leave graceful switch being nil, in which we are - // correctly overwriting with the recent builder here as well to use later. - // The graceful switch balancer's presence is an invariant of whether the - // balancer group is closed or not (if closed, nil, if started, present). - if sbc.balancer != nil { - sbc.group.logger.Infof("Switching child policy %v to type %v", sbc.id, sbc.builder.Name()) - b.SwitchTo(sbc.builder) - } -} - func (sbc *subBalancerWrapper) stopBalancer() { if sbc.balancer == nil { return @@ -170,7 +152,8 @@ func (sbc *subBalancerWrapper) stopBalancer() { sbc.balancer = nil } -// BalancerGroup takes a list of balancers, and make them into one balancer. +// BalancerGroup takes a list of balancers, each behind a gracefulswitch +// balancer, and make them into one balancer. // // Note that this struct doesn't implement balancer.Balancer, because it's not // intended to be used directly as a balancer. It's expected to be used as a @@ -221,7 +204,7 @@ type BalancerGroup struct { // after it's closed. // // We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer - // may call back to balancer group inline. It causes deaclock if they + // may call back to balancer group inline. It causes deadlock if they // require the same mutex). // // We should never need to hold multiple locks at the same time in this @@ -235,7 +218,7 @@ type BalancerGroup struct { // guards the map from SubConn to balancer ID, so updateSubConnState needs // to hold it shortly to potentially delete from the map. // - // UpdateState is called by the balancer state aggretator, and it will + // UpdateState is called by the balancer state aggregator, and it will // decide when and whether to call. // // The corresponding boolean incomingStarted is used to stop further updates @@ -309,11 +292,11 @@ func (bg *BalancerGroup) Start() { // AddWithClientConn adds a balancer with the given id to the group. The // balancer is built with a balancer builder registered with balancerName. The // given ClientConn is passed to the newly built balancer instead of the -// onepassed to balancergroup.New(). +// one passed to balancergroup.New(). // // TODO: Get rid of the existing Add() API and replace it with this. func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error { - bg.logger.Infof("Adding child policy of type %q for locality %q", balancerName, id) + bg.logger.Infof("Adding child policy of type %q for child %q", balancerName, id) builder := balancer.Get(balancerName) if builder == nil { return fmt.Errorf("unregistered balancer name %q", balancerName) @@ -329,7 +312,7 @@ func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer. if bg.outgoingStarted && bg.deletedBalancerCache != nil { if old, ok := bg.deletedBalancerCache.Remove(id); ok { if bg.logger.V(2) { - bg.logger.Infof("Removing and reusing child policy of type %q for locality %q from the balancer cache", balancerName, id) + bg.logger.Infof("Removing and reusing child policy of type %q for child %q from the balancer cache", balancerName, id) bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) } @@ -377,38 +360,19 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { bg.AddWithClientConn(id, builder.Name(), bg.cc) } -// UpdateBuilder updates the builder for a current child, starting the Graceful -// Switch process for that child. -// -// TODO: update this API to take the name of the new builder instead. -func (bg *BalancerGroup) UpdateBuilder(id string, builder balancer.Builder) { - bg.outgoingMu.Lock() - // This does not deal with the balancer cache because this call should come - // after an Add call for a given child balancer. If the child is removed, - // the caller will call Add if the child balancer comes back which would - // then deal with the balancer cache. - sbc := bg.idToBalancerConfig[id] - if sbc == nil { - // simply ignore it if not present, don't error - return - } - sbc.gracefulSwitch(builder) - bg.outgoingMu.Unlock() -} - // Remove removes the balancer with id from the group. // // But doesn't close the balancer. The balancer is kept in a cache, and will be // closed after timeout. Cleanup work (closing sub-balancer and removing // subconns) will be done after timeout. func (bg *BalancerGroup) Remove(id string) { - bg.logger.Infof("Removing child policy for locality %q", id) + bg.logger.Infof("Removing child policy for child %q", id) bg.outgoingMu.Lock() sbToRemove, ok := bg.idToBalancerConfig[id] if !ok { - bg.logger.Errorf("Child policy for locality %q does not exist in the balancer group", id) + bg.logger.Errorf("Child policy for child %q does not exist in the balancer group", id) bg.outgoingMu.Unlock() return } @@ -424,13 +388,13 @@ func (bg *BalancerGroup) Remove(id string) { if bg.deletedBalancerCache != nil { if bg.logger.V(2) { - bg.logger.Infof("Adding child policy for locality %q to the balancer cache", id) + bg.logger.Infof("Adding child policy for child %q to the balancer cache", id) bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) } bg.deletedBalancerCache.Add(id, sbToRemove, func() { if bg.logger.V(2) { - bg.logger.Infof("Removing child policy for locality %q from the balancer cache after timeout", id) + bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id) bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) } @@ -571,7 +535,7 @@ func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver // aggregator will create an aggregated picker and an aggregated connectivity // state, then forward to ClientConn. func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) { - bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state) + bg.logger.Infof("Balancer state update from child %v, new state: %+v", id, state) // Send new state to the aggregator, without holding the incomingMu. // incomingMu is to protect all calls to the parent ClientConn, this update @@ -636,3 +600,14 @@ func (bg *BalancerGroup) ExitIdleOne(id string) { } bg.outgoingMu.Unlock() } + +// ParseConfig parses a child config list and returns a LB config for the +// gracefulswitch Balancer. +// +// cfg is expected to be a json.RawMessage containing a JSON array of LB policy +// names + configs as the format of the "loadBalancingConfig" field in +// ServiceConfig. It returns a type that should be passed to +// UpdateClientConnState in the BalancerConfig field. +func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return gracefulswitch.ParseConfig(cfg) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a871d..9669328914a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index dfe18b08925..64c791953d0 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e1507a..078bb81238b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6a51..0e6e18e185c 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 9c915d9e4b2..452985f8d8f 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -40,6 +40,16 @@ var ( // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled + // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this + // option is present for backward compatibility. This option may be overridden + // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" + // or "false". + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/experimental.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e1731..7617be21589 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/experimental.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go index 6717b757f80..43423d8ad9a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -62,9 +62,9 @@ func isRunningOnGCE(manufacturer []byte, goos string) bool { name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" case "windows": - name = strings.Replace(name, " ", "", -1) - name = strings.Replace(name, "\n", "", -1) - name = strings.Replace(name, "\r", "", -1) + name = strings.ReplaceAll(name, " ", "") + name = strings.ReplaceAll(name, "\n", "") + name = strings.ReplaceAll(name, "\r", "") return name == "Google" default: return false diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go similarity index 63% rename from terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename to terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go index faa998de763..092ad187a2c 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -16,17 +16,21 @@ * */ +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" + + "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger DepthLoggerV2 + logger grpclog.DepthLoggerV2 prefix string } @@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - InfoDepth(1, fmt.Sprintf(format, args...)) + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - WarningDepth(1, fmt.Sprintf(format, args...)) + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) } // V reports whether verbosity level l is at least the requested verbose level. func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) + if pl != nil { + return pl.logger.V(l) + } + return true } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go deleted file mode 100644 index 0126d6b5108..00000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build !go1.21 - -// TODO: when this file is deleted (after Go 1.20 support is dropped), delete -// all of grpcrand and call the rand package directly. - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - mu sync.Mutex -) - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - mu.Lock() - defer mu.Unlock() - return r.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - mu.Lock() - defer mu.Unlock() - return r.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - mu.Lock() - defer mu.Unlock() - return r.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - mu.Lock() - defer mu.Unlock() - return r.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - mu.Lock() - defer mu.Unlock() - return r.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - mu.Lock() - defer mu.Unlock() - return r.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - mu.Lock() - defer mu.Unlock() - return r.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - mu.Lock() - defer mu.Unlock() - return r.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - mu.Lock() - defer mu.Unlock() - r.Shuffle(n, f) -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go deleted file mode 100644 index c37299af1ef..00000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build go1.21 - -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import "math/rand" - -// This implementation will be used for Go version 1.21 or newer. -// For older versions, the original implementation with mutex will be used. - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - return rand.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - return rand.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - return rand.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - return rand.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - return rand.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - return rand.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - return rand.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - return rand.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - rand.Shuffle(n, f) -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index f7f40a16ace..19b9d639275 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// Schedule adds a callback to be scheduled after existing callbacks are run. +// TrySchedule tries to schedules the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. // -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index aef8cec1ab0..6d8c2f518df 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/internal.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/internal.go index 48d24bdb4e6..7aae9240ffc 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/internal.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/internal.go @@ -106,6 +106,14 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. ClearGlobalDialOptions func() + + // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be + // configured for newly created ClientConns. + AddGlobalPerTargetDialOptions any // func (opt any) + // ClearGlobalPerTargetDialOptions clears the slice of global late apply + // dial options. + ClearGlobalPerTargetDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a // single dial option. JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption @@ -126,7 +134,8 @@ var ( // deleted or changed. BinaryLogger any // func(binarylog.Logger) grpc.ServerOption - // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a + // provided grpc.ClientConn. SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using @@ -174,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -184,25 +193,45 @@ var ( ChannelzTurnOffForTesting func() - // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found - // error for a given resource type and name. This is usually triggered when - // the associated watch timer fires. For testing purposes, having this - // function makes events more predictable than relying on timer events. - TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error - - // TriggerXDSResourceNameNotFoundClient invokes the testing xDS Client - // singleton to invoke resource not found for a resource type name and - // resource name. - TriggerXDSResourceNameNotFoundClient any // func(string, string) error + // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to + // invoke resource-not-found error for the given resource type and name. + TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error - // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + // FromOutgoingContextRaw returns the un-merged, intermediary contents of + // metadata.rawMD. FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) - // UserSetDefaultScheme is set to true if the user has overridden the default resolver scheme. - UserSetDefaultScheme bool = false + // UserSetDefaultScheme is set to true if the user has overridden the + // default resolver scheme. + UserSetDefaultScheme = false + + // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n + // is the number of elements. swap swaps the elements with indexes i and j. + ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) -// HealthChecker defines the signature of the client-side LB channel health checking function. +// HealthChecker defines the signature of the client-side LB channel health +// checking function. // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go index 70449ffd14c..703091047b4 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 @@ -313,7 +313,7 @@ func file_grpc_lookup_v1_rls_proto_rawDescGZIP() []byte { var file_grpc_lookup_v1_rls_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_lookup_v1_rls_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_grpc_lookup_v1_rls_proto_goTypes = []interface{}{ +var file_grpc_lookup_v1_rls_proto_goTypes = []any{ (RouteLookupRequest_Reason)(0), // 0: grpc.lookup.v1.RouteLookupRequest.Reason (*RouteLookupRequest)(nil), // 1: grpc.lookup.v1.RouteLookupRequest (*RouteLookupResponse)(nil), // 2: grpc.lookup.v1.RouteLookupResponse @@ -340,7 +340,7 @@ func file_grpc_lookup_v1_rls_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*RouteLookupRequest); i { case 0: return &v.state @@ -352,7 +352,7 @@ func file_grpc_lookup_v1_rls_proto_init() { return nil } } - file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*RouteLookupResponse); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go index e3144f94c7c..a0be3c8cb26 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/lookup/v1/rls_config.proto package grpc_lookup_v1 @@ -271,6 +271,8 @@ type HttpKeyBuilder struct { // for example if you are suppressing a lot of information from the URL, but // need to separately cache and request URLs with that content. ConstantKeys map[string]string `protobuf:"bytes,5,rep,name=constant_keys,json=constantKeys,proto3" json:"constant_keys,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, the HTTP method/verb will be extracted under this key name. + Method string `protobuf:"bytes,6,opt,name=method,proto3" json:"method,omitempty"` } func (x *HttpKeyBuilder) Reset() { @@ -340,6 +342,13 @@ func (x *HttpKeyBuilder) GetConstantKeys() map[string]string { return nil } +func (x *HttpKeyBuilder) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + type RouteLookupConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -707,7 +716,7 @@ var file_grpc_lookup_v1_rls_config_proto_rawDesc = []byte{ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xf1, 0x02, 0x0a, 0x0e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, + 0x38, 0x01, 0x22, 0x89, 0x03, 0x0a, 0x0e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, @@ -726,58 +735,60 @@ var file_grpc_lookup_v1_rls_config_proto_rawDesc = []byte{ 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x3f, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x4b, 0x65, 0x79, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6, 0x04, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x10, - 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, - 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, - 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x68, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x67, 0x72, 0x70, 0x63, 0x5f, - 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, - 0x72, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, - 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x6b, - 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x16, 0x6c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, - 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x36, - 0x0a, 0x09, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x3f, 0x0a, + 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6, + 0x04, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6b, 0x65, 0x79, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, + 0x68, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, + 0x49, 0x0a, 0x10, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, + 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4b, + 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x4f, 0x0a, 0x16, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x74, - 0x61, 0x6c, 0x65, 0x41, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4a, 0x04, 0x08, 0x0a, - 0x10, 0x0b, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, - 0x70, 0x0a, 0x1b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x51, - 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x42, 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, - 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, + 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, + 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x41, 0x67, 0x65, 0x12, 0x28, + 0x0a, 0x10, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x25, 0x0a, + 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, 0x70, 0x0a, 0x1b, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, + 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, + 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -793,7 +804,7 @@ func file_grpc_lookup_v1_rls_config_proto_rawDescGZIP() []byte { } var file_grpc_lookup_v1_rls_config_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_grpc_lookup_v1_rls_config_proto_goTypes = []interface{}{ +var file_grpc_lookup_v1_rls_config_proto_goTypes = []any{ (*NameMatcher)(nil), // 0: grpc.lookup.v1.NameMatcher (*GrpcKeyBuilder)(nil), // 1: grpc.lookup.v1.GrpcKeyBuilder (*HttpKeyBuilder)(nil), // 2: grpc.lookup.v1.HttpKeyBuilder @@ -832,7 +843,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*NameMatcher); i { case 0: return &v.state @@ -844,7 +855,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GrpcKeyBuilder); i { case 0: return &v.state @@ -856,7 +867,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*HttpKeyBuilder); i { case 0: return &v.state @@ -868,7 +879,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*RouteLookupConfig); i { case 0: return &v.state @@ -880,7 +891,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*RouteLookupClusterSpecifier); i { case 0: return &v.state @@ -892,7 +903,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*GrpcKeyBuilder_Name); i { case 0: return &v.state @@ -904,7 +915,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*GrpcKeyBuilder_ExtraKeys); i { case 0: return &v.state diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go index 3ce28f68e0e..23dcb2100c3 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 @@ -64,21 +64,25 @@ func (c *routeLookupServiceClient) RouteLookup(ctx context.Context, in *RouteLoo // RouteLookupServiceServer is the server API for RouteLookupService service. // All implementations must embed UnimplementedRouteLookupServiceServer -// for forward compatibility +// for forward compatibility. type RouteLookupServiceServer interface { // Lookup returns a target for a single key. RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) mustEmbedUnimplementedRouteLookupServiceServer() } -// UnimplementedRouteLookupServiceServer must be embedded to have forward compatible implementations. -type UnimplementedRouteLookupServiceServer struct { -} +// UnimplementedRouteLookupServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedRouteLookupServiceServer struct{} func (UnimplementedRouteLookupServiceServer) RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RouteLookup not implemented") } func (UnimplementedRouteLookupServiceServer) mustEmbedUnimplementedRouteLookupServiceServer() {} +func (UnimplementedRouteLookupServiceServer) testEmbeddedByValue() {} // UnsafeRouteLookupServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to RouteLookupServiceServer will @@ -88,6 +92,13 @@ type UnsafeRouteLookupServiceServer interface { } func RegisterRouteLookupServiceServer(s grpc.ServiceRegistrar, srv RouteLookupServiceServer) { + // If the following call panics, it indicates UnimplementedRouteLookupServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&RouteLookupService_ServiceDesc, srv) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index f3f52a59a86..4552db16b02 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,6 +24,7 @@ import ( "context" "encoding/json" "fmt" + "math/rand" "net" "os" "strconv" @@ -35,7 +36,6 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -63,6 +63,8 @@ var ( func init() { resolver.Register(NewBuilder()) internal.TimeAfterFunc = time.After + internal.TimeNowFunc = time.Now + internal.TimeUntilFunc = time.Until internal.NewNetResolver = newNetResolver internal.AddressDialer = addressDialer } @@ -209,12 +211,12 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var waitTime time.Duration + var nextResolutionTime time.Time if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - waitTime = MinResolutionInterval + nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval) select { case <-d.ctx.Done(): return @@ -223,13 +225,13 @@ func (d *dnsResolver) watcher() { } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - waitTime = backoff.DefaultExponential.Backoff(backoffIndex) + nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } select { case <-d.ctx.Done(): return - case <-internal.TimeAfterFunc(waitTime): + case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)): } } } @@ -423,7 +425,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return grpcrand.Intn(100)+1 <= *a + return rand.Intn(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go index a7ecaf8d522..c0eae4f5f83 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -51,11 +51,22 @@ var ( // The following vars are overridden from tests. var ( // TimeAfterFunc is used by the DNS resolver to wait for the given duration - // to elapse. In non-test code, this is implemented by time.After. In test + // to elapse. In non-test code, this is implemented by time.After. In test // code, this can be used to control the amount of time the resolver is // blocked waiting for the duration to elapse. TimeAfterFunc func(time.Duration) <-chan time.Time + // TimeNowFunc is used by the DNS resolver to get the current time. + // In non-test code, this is implemented by time.Now. In test code, + // this can be used to control the current time for the resolver. + TimeNowFunc func() time.Time + + // TimeUntilFunc is used by the DNS resolver to calculate the remaining + // wait time for re-resolution. In non-test code, this is implemented by + // time.Until. In test code, this can be used to control the remaining + // time for resolver to wait for re-resolution. + TimeUntilFunc func(time.Time) time.Duration + // NewNetResolver returns the net.Resolver instance for the given target. NewNetResolver func(string) (NetResolver, error) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac56572ad..b901c7bace5 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 00000000000..be110d41f9a --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,95 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/status/status.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc820595..757925381fe 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/status/status.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,11 +138,11 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd75b..54c24c2ff38 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7fd7..7e7aaa54636 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a8907..d5c1085eeae 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 3deadfb4a20..ef72fbb3a01 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -148,9 +149,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -308,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { - var wakeUp bool c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { if !f() { // f wasn't successful - c.mu.Unlock() return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - c.trfChan.Store(make(chan struct{})) + ch := make(chan struct{}) + c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: return nil, errors.New("transport closed by client") } } } +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } + // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) + ch := c.trfChan.Swap(nil) if ch != nil { - close(ch) + close(*ch) } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() } type side int @@ -466,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -490,12 +511,13 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - var ( - buf []byte - ) + // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h } else { - buf = dataItem.d - } + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) - size := hSize + dSize + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + if dataItem.endStream && remainingBytes == 0 { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { @@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 4a3ddce29a4..ce878693bd7 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -171,6 +172,8 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() headersWritten := s.updateHeaderSent() - return ht.do(func() { + err := ht.do(func() { + defer data.Free() if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { @@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() @@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3c63c706986..c769deab53c 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,6 +47,7 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -59,6 +60,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var goAwayLoopyWriterTimeout = 5 * time.Second + var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -144,7 +147,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -346,7 +349,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -463,7 +466,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -504,7 +507,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -770,7 +772,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -983,6 +985,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1006,10 +1009,20 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - <-t.writerDone + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() t.conn.Close() channelz.RemoveEntry(t.channelz.ID) @@ -1065,27 +1078,36 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - d: data, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -1190,10 +1212,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that @@ -1222,7 +1247,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1307,7 +1332,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1642,11 +1667,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1671,13 +1695,6 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} @@ -1745,7 +1762,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_server.go index cab0e2d3d44..584b50fe553 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,6 +25,7 @@ import ( "fmt" "io" "math" + "math/rand" "net" "net/http" "strconv" @@ -38,12 +39,12 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -119,7 +120,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 @@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: newBufferPool(), + bufferPool: config.BufferPool, } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } if f.StreamEnded() { @@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { + _ = reader.Close() return t.streamContextErr(s) } } + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } // keepalive running in a separate goroutine does the following: @@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: @@ -1440,7 +1455,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := grpcrand.Int63n(2*r) - r + j := rand.Int63n(2*r) - r return time.Duration(j) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http_util.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http_util.go index 39cef3bd442..3613d7b6481 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) + n, err := w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { @@ -389,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/proxy.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa1032574..54b22443654 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/transport.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/transport.go index 4b39c0ade97..924ba4f3653 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,7 +22,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -47,32 +47,10 @@ import ( const logLevel = 2 -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -148,45 +129,97 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readAdditional(m, n) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readAdditional(m, n) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) } - return copied, nil + + return m.buffer, nil } type streamState uint32 @@ -241,7 +289,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream + ct ClientTransport // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,7 +299,7 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota @@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -499,36 +547,87 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return nil, er } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err - return + return 0, err } t.windowHandler(n) - return + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err + } + t.windowHandler(buf.Len()) + return buf, nil } // BytesReceived indicates whether any bytes have been received on this stream. @@ -574,6 +673,7 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -612,6 +712,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -673,7 +775,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -725,7 +827,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -798,7 +900,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/wrr/random.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/wrr/random.go index 4d91fc6f580..3f611a35059 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/wrr/random.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/wrr/random.go @@ -19,9 +19,8 @@ package wrr import ( "fmt" + "math/rand" "sort" - - "google.golang.org/grpc/internal/grpcrand" ) // weightedItem is a wrapped weighted item that is used to implement weighted random algorithm. @@ -47,19 +46,19 @@ func NewRandom() WRR { return &randomWRR{} } -var grpcrandInt63n = grpcrand.Int63n +var randInt63n = rand.Int63n func (rw *randomWRR) Next() (item any) { if len(rw.items) == 0 { return nil } if rw.equalWeights { - return rw.items[grpcrandInt63n(int64(len(rw.items)))].item + return rw.items[randInt63n(int64(len(rw.items)))].item } sumOfWeights := rw.items[len(rw.items)-1].accumulatedWeight // Random number in [0, sumOfWeights). - randomWeight := grpcrandInt63n(sumOfWeights) + randomWeight := randInt63n(sumOfWeights) // Item's accumulated weights are in ascending order, because item's weight >= 0. // Binary search rw.items to find first item whose accumulatedWeight > randomWeight // The return i is guaranteed to be in range [0, len(rw.items)) because randomWeight < last item's accumulatedWeight diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go index f89f03dd9ac..8317859e1e9 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go @@ -24,35 +24,29 @@ import ( "bytes" "encoding/json" "fmt" + "maps" "net/url" "os" + "slices" "strings" + "sync" "google.golang.org/grpc" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/bootstrap" - "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/structpb" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" ) const ( - // The "server_features" field in the bootstrap file contains a list of - // features supported by the server: - // - A value of "xds_v3" indicates that the server supports the v3 version of - // the xDS transport protocol. - // - A value of "ignore_resource_deletion" indicates that the client should - // ignore deletion of Listener and Cluster resources in updates from the - // server. - serverFeaturesV3 = "xds_v3" serverFeaturesIgnoreResourceDeletion = "ignore_resource_deletion" - - gRPCUserAgentName = "gRPC Go" - clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" - clientFeatureResourceWrapper = "xds.config.resource-in-sotw" + gRPCUserAgentName = "gRPC Go" + clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" + clientFeatureResourceWrapper = "xds.config.resource-in-sotw" ) // For overriding in unit tests. @@ -60,12 +54,15 @@ var bootstrapFileReadFunc = os.ReadFile // ChannelCreds contains the credentials to be used while communicating with an // xDS server. It is also used to dedup servers with the same server URI. +// +// This type does not implement custom JSON marshal/unmarshal logic because it +// is straightforward to accomplish the same with json struct tags. type ChannelCreds struct { // Type contains a unique name identifying the credentials type. The only // supported types currently are "google_default" and "insecure". - Type string + Type string `json:"type,omitempty"` // Config contains the JSON configuration associated with the credentials. - Config json.RawMessage + Config json.RawMessage `json:"config,omitempty"` } // Equal reports whether cc and other are considered equal. @@ -87,92 +84,208 @@ func (cc ChannelCreds) String() string { return cc.Type + "-" + string(b) } -// ServerConfig contains the configuration to connect to a server, including -// URI, creds, and transport API version (e.g. v2 or v3). +// ServerConfigs represents a collection of server configurations. +type ServerConfigs []*ServerConfig + +// Equal returns true if scs equals other. +func (scs *ServerConfigs) Equal(other *ServerConfigs) bool { + if len(*scs) != len(*other) { + return false + } + for i := range *scs { + if !(*scs)[i].Equal((*other)[i]) { + return false + } + } + return true +} + +// UnmarshalJSON takes the json data (a list of server configurations) and +// unmarshals it to the struct. +func (scs *ServerConfigs) UnmarshalJSON(data []byte) error { + servers := []*ServerConfig{} + if err := json.Unmarshal(data, &servers); err != nil { + return fmt.Errorf("xds: failed to JSON unmarshal server configurations during bootstrap: %v, config:\n%s", err, string(data)) + } + // Only use the first server config if fallback support is disabled. + if !envconfig.XDSFallbackSupport { + if len(servers) > 1 { + servers = servers[:1] + } + } + *scs = servers + return nil +} + +// String returns a string representation of the ServerConfigs, by concatenating +// the string representations of the underlying server configs. +func (scs *ServerConfigs) String() string { + ret := "" + for i, sc := range *scs { + if i > 0 { + ret += ", " + } + ret += sc.String() + } + return ret +} + +// Authority contains configuration for an xDS control plane authority. // -// It contains unexported fields that are initialized when unmarshaled from JSON -// using either the UnmarshalJSON() method or the ServerConfigFromJSON() -// function. Hence users are strongly encouraged not to use a literal struct -// initialization to create an instance of this type, but instead unmarshal from -// JSON using one of the two available options. -type ServerConfig struct { - // ServerURI is the management server to connect to. +// This type does not implement custom JSON marshal/unmarshal logic because it +// is straightforward to accomplish the same with json struct tags. +type Authority struct { + // ClientListenerResourceNameTemplate is template for the name of the + // Listener resource to subscribe to for a gRPC client channel. Used only + // when the channel is created using an "xds:" URI with this authority name. // - // The bootstrap file contains an ordered list of xDS servers to contact for - // this authority. The first one is picked. - ServerURI string - // Creds contains the credentials to be used while communicationg with this - // xDS server. It is also used to dedup servers with the same server URI. - Creds ChannelCreds - // ServerFeatures contains a list of features supported by this xDS server. - // It is also used to dedup servers with the same server URI and creds. - ServerFeatures []string + // The token "%s", if present in this string, will be replaced + // with %-encoded service authority (i.e., the path part of the target + // URI used to create the gRPC channel). + // + // Must start with "xdstp:///". If it does not, + // that is considered a bootstrap file parsing error. + // + // If not present in the bootstrap file, defaults to + // "xdstp:///envoy.config.listener.v3.Listener/%s". + ClientListenerResourceNameTemplate string `json:"client_listener_resource_name_template,omitempty"` + // XDSServers contains the list of server configurations for this authority. + XDSServers ServerConfigs `json:"xds_servers,omitempty"` +} + +// Equal returns true if a equals other. +func (a *Authority) Equal(other *Authority) bool { + switch { + case a == nil && other == nil: + return true + case (a != nil) != (other != nil): + return false + case a.ClientListenerResourceNameTemplate != other.ClientListenerResourceNameTemplate: + return false + case !a.XDSServers.Equal(&other.XDSServers): + return false + } + return true +} + +// ServerConfig contains the configuration to connect to a server. +type ServerConfig struct { + serverURI string + channelCreds []ChannelCreds + serverFeatures []string // As part of unmarshalling the JSON config into this struct, we ensure that // the credentials config is valid by building an instance of the specified - // credentials and store it here as a grpc.DialOption for easy access when - // dialing this xDS server. + // credentials and store it here for easy access. + selectedCreds ChannelCreds credsDialOption grpc.DialOption - // IgnoreResourceDeletion controls the behavior of the xDS client when the - // server deletes a previously sent Listener or Cluster resource. If set, the - // xDS client will not invoke the watchers' OnResourceDoesNotExist() method - // when a resource is deleted, nor will it remove the existing resource value - // from its cache. - IgnoreResourceDeletion bool + cleanups []func() +} + +// ServerURI returns the URI of the management server to connect to. +func (sc *ServerConfig) ServerURI() string { + return sc.serverURI +} + +// ChannelCreds returns the credentials configuration to use when communicating +// with this server. Also used to dedup servers with the same server URI. +func (sc *ServerConfig) ChannelCreds() []ChannelCreds { + return sc.channelCreds +} + +// ServerFeatures returns the list of features supported by this server. Also +// used to dedup servers with the same server URI and channel creds. +func (sc *ServerConfig) ServerFeatures() []string { + return sc.serverFeatures +} - // Cleanups are called when the xDS client for this server is closed. Allows - // cleaning up resources created specifically for this ServerConfig. - Cleanups []func() +// ServerFeaturesIgnoreResourceDeletion returns true if this server supports a +// feature where the xDS client can ignore resource deletions from this server, +// as described in gRFC A53. +// +// This feature controls the behavior of the xDS client when the server deletes +// a previously sent Listener or Cluster resource. If set, the xDS client will +// not invoke the watchers' OnResourceDoesNotExist() method when a resource is +// deleted, nor will it remove the existing resource value from its cache. +func (sc *ServerConfig) ServerFeaturesIgnoreResourceDeletion() bool { + for _, sf := range sc.serverFeatures { + if sf == serverFeaturesIgnoreResourceDeletion { + return true + } + } + return false } -// CredsDialOption returns the configured credentials as a grpc dial option. +// CredsDialOption returns the first supported transport credentials from the +// configuration, as a dial option. func (sc *ServerConfig) CredsDialOption() grpc.DialOption { return sc.credsDialOption } +// Cleanups returns a collection of functions to be called when the xDS client +// for this server is closed. Allows cleaning up resources created specifically +// for this server. +func (sc *ServerConfig) Cleanups() []func() { + return sc.cleanups +} + +// Equal reports whether sc and other are considered equal. +func (sc *ServerConfig) Equal(other *ServerConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + case sc.serverURI != other.serverURI: + return false + case !slices.EqualFunc(sc.channelCreds, other.channelCreds, func(a, b ChannelCreds) bool { return a.Equal(b) }): + return false + case !slices.Equal(sc.serverFeatures, other.serverFeatures): + return false + case !sc.selectedCreds.Equal(other.selectedCreds): + return false + } + return true +} + // String returns the string representation of the ServerConfig. -// -// This string representation will be used as map keys in federation -// (`map[ServerConfig]authority`), so that the xDS ClientConn and stream will be -// shared by authorities with different names but the same server config. -// -// It covers (almost) all the fields so the string can represent the config -// content. It doesn't cover NodeProto because NodeProto isn't used by -// federation. func (sc *ServerConfig) String() string { - features := strings.Join(sc.ServerFeatures, "-") - return strings.Join([]string{sc.ServerURI, sc.Creds.String(), features}, "-") + if len(sc.serverFeatures) == 0 { + return fmt.Sprintf("%s-%s", sc.serverURI, sc.selectedCreds.String()) + } + features := strings.Join(sc.serverFeatures, "-") + return strings.Join([]string{sc.serverURI, sc.selectedCreds.String(), features}, "-") } -// MarshalJSON marshals the ServerConfig to json. -func (sc ServerConfig) MarshalJSON() ([]byte, error) { - server := xdsServer{ - ServerURI: sc.ServerURI, - ChannelCreds: []channelCreds{{Type: sc.Creds.Type, Config: sc.Creds.Config}}, - ServerFeatures: sc.ServerFeatures, - } - server.ServerFeatures = []string{serverFeaturesV3} - if sc.IgnoreResourceDeletion { - server.ServerFeatures = append(server.ServerFeatures, serverFeaturesIgnoreResourceDeletion) +// The following fields correspond 1:1 with the JSON schema for ServerConfig. +type serverConfigJSON struct { + ServerURI string `json:"server_uri,omitempty"` + ChannelCreds []ChannelCreds `json:"channel_creds,omitempty"` + ServerFeatures []string `json:"server_features,omitempty"` +} + +// MarshalJSON returns marshaled JSON bytes corresponding to this server config. +func (sc *ServerConfig) MarshalJSON() ([]byte, error) { + server := &serverConfigJSON{ + ServerURI: sc.serverURI, + ChannelCreds: sc.channelCreds, + ServerFeatures: sc.serverFeatures, } return json.Marshal(server) } // UnmarshalJSON takes the json data (a server) and unmarshals it to the struct. func (sc *ServerConfig) UnmarshalJSON(data []byte) error { - var server xdsServer + server := serverConfigJSON{} if err := json.Unmarshal(data, &server); err != nil { - return fmt.Errorf("xds: json.Unmarshal(data) for field ServerConfig failed during bootstrap: %v", err) + return fmt.Errorf("xds: failed to JSON unmarshal server configuration during bootstrap: %v, config:\n%s", err, string(data)) } - sc.ServerURI = server.ServerURI - sc.ServerFeatures = server.ServerFeatures - for _, f := range server.ServerFeatures { - if f == serverFeaturesIgnoreResourceDeletion { - sc.IgnoreResourceDeletion = true - } - } + sc.serverURI = server.ServerURI + sc.channelCreds = server.ChannelCreds + sc.serverFeatures = server.ServerFeatures + for _, cc := range server.ChannelCreds { // We stop at the first credential type that we support. c := bootstrap.GetCredentials(cc.Type) @@ -183,348 +296,533 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("failed to build credentials bundle from bootstrap for %q: %v", cc.Type, err) } - sc.Creds = ChannelCreds(cc) + sc.selectedCreds = cc sc.credsDialOption = grpc.WithCredentialsBundle(bundle) - sc.Cleanups = append(sc.Cleanups, cancel) + sc.cleanups = append(sc.cleanups, cancel) break } + if sc.serverURI == "" { + return fmt.Errorf("xds: `server_uri` field in server config cannot be empty: %s", string(data)) + } + if sc.credsDialOption == nil { + return fmt.Errorf("xds: `channel_creds` field in server config cannot be empty: %s", string(data)) + } return nil } -// ServerConfigFromJSON creates a new ServerConfig from the given JSON -// configuration. This is the preferred way of creating a ServerConfig when -// hand-crafting the JSON configuration. -func ServerConfigFromJSON(data []byte) (*ServerConfig, error) { +// ServerConfigTestingOptions specifies options for creating a new ServerConfig +// for testing purposes. +// +// # Testing-Only +type ServerConfigTestingOptions struct { + // URI is the name of the server corresponding to this server config. + URI string + // ChannelCreds contains a list of channel credentials to use when talking + // to this server. If unspecified, `insecure` credentials will be used. + ChannelCreds []ChannelCreds + // ServerFeatures represents the list of features supported by this server. + ServerFeatures []string +} + +// ServerConfigForTesting creates a new ServerConfig from the passed in options, +// for testing purposes. +// +// # Testing-Only +func ServerConfigForTesting(opts ServerConfigTestingOptions) (*ServerConfig, error) { + cc := opts.ChannelCreds + if cc == nil { + cc = []ChannelCreds{{Type: "insecure"}} + } + scInternal := &serverConfigJSON{ + ServerURI: opts.URI, + ChannelCreds: cc, + ServerFeatures: opts.ServerFeatures, + } + scJSON, err := json.Marshal(scInternal) + if err != nil { + return nil, err + } + sc := new(ServerConfig) - if err := sc.UnmarshalJSON(data); err != nil { + if err := sc.UnmarshalJSON(scJSON); err != nil { return nil, err } return sc, nil } -// Equal reports whether sc and other are considered equal. -func (sc *ServerConfig) Equal(other *ServerConfig) bool { +// Config is the internal representation of the bootstrap configuration provided +// to the xDS client. +type Config struct { + xDSServers ServerConfigs + cpcs map[string]certproviderNameAndConfig + serverListenerResourceNameTemplate string + clientDefaultListenerResourceNameTemplate string + authorities map[string]*Authority + node node + + // A map from certprovider instance names to parsed buildable configs. + certProviderConfigs map[string]*certprovider.BuildableConfig +} + +// XDSServers returns the top-level list of management servers to connect to, +// ordered by priority. +func (c *Config) XDSServers() ServerConfigs { + return c.xDSServers +} + +// CertProviderConfigs returns a map from certificate provider plugin instance +// name to their configuration. Callers must not modify the returned map. +func (c *Config) CertProviderConfigs() map[string]*certprovider.BuildableConfig { + return c.certProviderConfigs +} + +// ServerListenerResourceNameTemplate returns template for the name of the +// Listener resource to subscribe to for a gRPC server. +// +// If starts with "xdstp:", will be interpreted as a new-style name, +// in which case the authority of the URI will be used to select the +// relevant configuration in the "authorities" map. +// +// The token "%s", if present in this string, will be replaced with the IP +// and port on which the server is listening. (e.g., "0.0.0.0:8080", +// "[::]:8080"). For example, a value of "example/resource/%s" could become +// "example/resource/0.0.0.0:8080". If the template starts with "xdstp:", +// the replaced string will be %-encoded. +// +// There is no default; if unset, xDS-based server creation fails. +func (c *Config) ServerListenerResourceNameTemplate() string { + return c.serverListenerResourceNameTemplate +} + +// ClientDefaultListenerResourceNameTemplate returns a template for the name of +// the Listener resource to subscribe to for a gRPC client channel. Used only +// when the channel is created with an "xds:" URI with no authority. +// +// If starts with "xdstp:", will be interpreted as a new-style name, +// in which case the authority of the URI will be used to select the +// relevant configuration in the "authorities" map. +// +// The token "%s", if present in this string, will be replaced with +// the service authority (i.e., the path part of the target URI +// used to create the gRPC channel). If the template starts with +// "xdstp:", the replaced string will be %-encoded. +// +// Defaults to "%s". +func (c *Config) ClientDefaultListenerResourceNameTemplate() string { + return c.clientDefaultListenerResourceNameTemplate +} + +// Authorities returns a map of authority name to corresponding configuration. +// Callers must not modify the returned map. +// +// This is used in the following cases: +// - A gRPC client channel is created using an "xds:" URI that includes +// an authority. +// - A gRPC client channel is created using an "xds:" URI with no +// authority, but the "client_default_listener_resource_name_template" +// field above turns it into an "xdstp:" URI. +// - A gRPC server is created and the +// "server_listener_resource_name_template" field is an "xdstp:" URI. +// +// In any of those cases, it is an error if the specified authority is +// not present in this map. +func (c *Config) Authorities() map[string]*Authority { + return c.authorities +} + +// Node returns xDS a v3 Node proto corresponding to the node field in the +// bootstrap configuration, which identifies a specific gRPC instance. +func (c *Config) Node() *v3corepb.Node { + return c.node.toProto() +} + +// Equal returns true if c equals other. +func (c *Config) Equal(other *Config) bool { switch { - case sc == nil && other == nil: + case c == nil && other == nil: return true - case (sc != nil) != (other != nil): + case (c != nil) != (other != nil): + return false + case !c.xDSServers.Equal(&other.xDSServers): + return false + case !maps.EqualFunc(c.certProviderConfigs, other.certProviderConfigs, func(a, b *certprovider.BuildableConfig) bool { return a.String() == b.String() }): return false - case sc.ServerURI != other.ServerURI: + case c.serverListenerResourceNameTemplate != other.serverListenerResourceNameTemplate: return false - case !sc.Creds.Equal(other.Creds): + case c.clientDefaultListenerResourceNameTemplate != other.clientDefaultListenerResourceNameTemplate: return false - case !equalStringSlice(sc.ServerFeatures, other.ServerFeatures): + case !maps.EqualFunc(c.authorities, other.authorities, func(a, b *Authority) bool { return a.Equal(b) }): + return false + case !c.node.Equal(other.node): return false } return true } -func equalStringSlice(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true +// String returns a string representation of the Config. +func (c *Config) String() string { + s, _ := c.MarshalJSON() + return string(s) } -// unmarshalJSONServerConfigSlice unmarshals JSON to a slice. -func unmarshalJSONServerConfigSlice(data []byte) ([]*ServerConfig, error) { - var servers []*ServerConfig - if err := json.Unmarshal(data, &servers); err != nil { - return nil, fmt.Errorf("failed to unmarshal JSON to []*ServerConfig: %v", err) - } - if len(servers) < 1 { - return nil, fmt.Errorf("no management server found in JSON") - } - return servers, nil +// The following fields correspond 1:1 with the JSON schema for Config. +type configJSON struct { + XDSServers ServerConfigs `json:"xds_servers,omitempty"` + CertificateProviders map[string]certproviderNameAndConfig `json:"certificate_providers,omitempty"` + ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"` + ClientDefaultListenerResourceNameTemplate string `json:"client_default_listener_resource_name_template,omitempty"` + Authorities map[string]*Authority `json:"authorities,omitempty"` + Node node `json:"node,omitempty"` } -// Authority contains configuration for an Authority for an xDS control plane -// server. See the Authorities field in the Config struct for how it's used. -type Authority struct { - // ClientListenerResourceNameTemplate is template for the name of the - // Listener resource to subscribe to for a gRPC client channel. Used only - // when the channel is created using an "xds:" URI with this authority name. - // - // The token "%s", if present in this string, will be replaced - // with %-encoded service authority (i.e., the path part of the target - // URI used to create the gRPC channel). - // - // Must start with "xdstp:///". If it does not, - // that is considered a bootstrap file parsing error. - // - // If not present in the bootstrap file, defaults to - // "xdstp:///envoy.config.listener.v3.Listener/%s". - ClientListenerResourceNameTemplate string - // XDSServer contains the management server and config to connect to for - // this authority. - XDSServer *ServerConfig +// MarshalJSON returns marshaled JSON bytes corresponding to this config. +func (c *Config) MarshalJSON() ([]byte, error) { + config := &configJSON{ + XDSServers: c.xDSServers, + CertificateProviders: c.cpcs, + ServerListenerResourceNameTemplate: c.serverListenerResourceNameTemplate, + ClientDefaultListenerResourceNameTemplate: c.clientDefaultListenerResourceNameTemplate, + Authorities: c.authorities, + Node: c.node, + } + return json.MarshalIndent(config, " ", " ") } -// UnmarshalJSON implement json unmarshaller. -func (a *Authority) UnmarshalJSON(data []byte) error { - var jsonData map[string]json.RawMessage - if err := json.Unmarshal(data, &jsonData); err != nil { - return fmt.Errorf("xds: failed to parse authority: %v", err) +// UnmarshalJSON takes the json data (the complete bootstrap configuration) and +// unmarshals it to the struct. +func (c *Config) UnmarshalJSON(data []byte) error { + // Initialize the node field with client controlled values. This ensures + // even if the bootstrap configuration did not contain the node field, we + // will have a node field with client controlled fields alone. + config := configJSON{Node: newNode()} + if err := json.Unmarshal(data, &config); err != nil { + return fmt.Errorf("xds: json.Unmarshal(%s) failed during bootstrap: %v", string(data), err) } - for k, v := range jsonData { - switch k { - case "xds_servers": - servers, err := unmarshalJSONServerConfigSlice(v) - if err != nil { - return fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err) - } - a.XDSServer = servers[0] - case "client_listener_resource_name_template": - if err := json.Unmarshal(v, &a.ClientListenerResourceNameTemplate); err != nil { - return fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } + c.xDSServers = config.XDSServers + c.cpcs = config.CertificateProviders + c.serverListenerResourceNameTemplate = config.ServerListenerResourceNameTemplate + c.clientDefaultListenerResourceNameTemplate = config.ClientDefaultListenerResourceNameTemplate + c.authorities = config.Authorities + c.node = config.Node + + // Build the certificate providers configuration to ensure that it is valid. + cpcCfgs := make(map[string]*certprovider.BuildableConfig) + getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder) + for instance, nameAndConfig := range c.cpcs { + name := nameAndConfig.PluginName + parser := getBuilder(nameAndConfig.PluginName) + if parser == nil { + // We ignore plugins that we do not know about. + continue } + bc, err := parser.ParseConfig(nameAndConfig.Config) + if err != nil { + return fmt.Errorf("xds: config parsing for certificate provider plugin %q failed during bootstrap: %v", name, err) + } + cpcCfgs[instance] = bc } - return nil -} + c.certProviderConfigs = cpcCfgs -// Config provides the xDS client with several key bits of information that it -// requires in its interaction with the management server. The Config is -// initialized from the bootstrap file. -type Config struct { - // XDSServer is the management server to connect to. - // - // The bootstrap file contains a list of servers (with name+creds), but we - // pick the first one. - XDSServer *ServerConfig - // CertProviderConfigs contains a mapping from certificate provider plugin - // instance names to parsed buildable configs. - CertProviderConfigs map[string]*certprovider.BuildableConfig - // ServerListenerResourceNameTemplate is a template for the name of the - // Listener resource to subscribe to for a gRPC server. - // - // If starts with "xdstp:", will be interpreted as a new-style name, - // in which case the authority of the URI will be used to select the - // relevant configuration in the "authorities" map. - // - // The token "%s", if present in this string, will be replaced with the IP - // and port on which the server is listening. (e.g., "0.0.0.0:8080", - // "[::]:8080"). For example, a value of "example/resource/%s" could become - // "example/resource/0.0.0.0:8080". If the template starts with "xdstp:", - // the replaced string will be %-encoded. - // - // There is no default; if unset, xDS-based server creation fails. - ServerListenerResourceNameTemplate string - // A template for the name of the Listener resource to subscribe to - // for a gRPC client channel. Used only when the channel is created - // with an "xds:" URI with no authority. - // - // If starts with "xdstp:", will be interpreted as a new-style name, - // in which case the authority of the URI will be used to select the - // relevant configuration in the "authorities" map. - // - // The token "%s", if present in this string, will be replaced with - // the service authority (i.e., the path part of the target URI - // used to create the gRPC channel). If the template starts with - // "xdstp:", the replaced string will be %-encoded. - // - // Defaults to "%s". - ClientDefaultListenerResourceNameTemplate string - // Authorities is a map of authority name to corresponding configuration. - // - // This is used in the following cases: - // - A gRPC client channel is created using an "xds:" URI that includes - // an authority. - // - A gRPC client channel is created using an "xds:" URI with no - // authority, but the "client_default_listener_resource_name_template" - // field above turns it into an "xdstp:" URI. - // - A gRPC server is created and the - // "server_listener_resource_name_template" field is an "xdstp:" URI. - // - // In any of those cases, it is an error if the specified authority is - // not present in this map. - Authorities map[string]*Authority - // NodeProto contains the Node proto to be used in xDS requests. This will be - // of type *v3corepb.Node. - NodeProto *v3corepb.Node -} - -type channelCreds struct { - Type string `json:"type"` - Config json.RawMessage `json:"config,omitempty"` -} + // Default value of the default client listener name template is "%s". + if c.clientDefaultListenerResourceNameTemplate == "" { + c.clientDefaultListenerResourceNameTemplate = "%s" + } + if len(c.xDSServers) == 0 { + return fmt.Errorf("xds: required field `xds_servers` not found in bootstrap configuration: %s", string(data)) + } -type xdsServer struct { - ServerURI string `json:"server_uri"` - ChannelCreds []channelCreds `json:"channel_creds"` - ServerFeatures []string `json:"server_features"` + // Post-process the authorities' client listener resource template field: + // - if set, it must start with "xdstp:///" + // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s" + for name, authority := range c.authorities { + prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name)) + if authority.ClientListenerResourceNameTemplate == "" { + authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s" + continue + } + if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) { + return fmt.Errorf("xds: field clientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix) + } + } + return nil } -func bootstrapConfigFromEnvVariable() ([]byte, error) { +// GetConfiguration returns the bootstrap configuration initialized by reading +// the bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents +// specified at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the +// former is preferred. +// +// If none of the env vars are set, this function returns the fallback +// configuration if it is not nil. Else, it returns an error. +// +// This function tries to process as much of the bootstrap file as possible (in +// the presence of the errors) and may return a Config object with certain +// fields left unspecified, in which case the caller should use some sane +// defaults. +func GetConfiguration() (*Config, error) { fName := envconfig.XDSBootstrapFileName fContent := envconfig.XDSBootstrapFileContent - // Bootstrap file name has higher priority than bootstrap content. if fName != "" { - // If file name is set - // - If file not found (or other errors), fail - // - Otherwise, use the content. - // - // Note that even if the content is invalid, we don't failover to the - // file content env variable. - logger.Debugf("Using bootstrap file with name %q", fName) - return bootstrapFileReadFunc(fName) + if logger.V(2) { + logger.Infof("Using bootstrap file with name %q from GRPC_XDS_BOOTSTRAP environment variable", fName) + } + cfg, err := bootstrapFileReadFunc(fName) + if err != nil { + return nil, fmt.Errorf("xds: failed to read bootstrap config from file %q: %v", fName, err) + } + return newConfigFromContents(cfg) } if fContent != "" { - return []byte(fContent), nil + if logger.V(2) { + logger.Infof("Using bootstrap contents from GRPC_XDS_BOOTSTRAP_CONFIG environment variable") + } + return newConfigFromContents([]byte(fContent)) + } + + if cfg := fallbackBootstrapConfig(); cfg != nil { + if logger.V(2) { + logger.Infof("Using bootstrap contents from fallback config") + } + return cfg, nil } - return nil, fmt.Errorf("none of the bootstrap environment variables (%q or %q) defined", - envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv) + return nil, fmt.Errorf("bootstrap environment variables (%q or %q) not defined, and no fallback config set", envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv) +} + +func newConfigFromContents(data []byte) (*Config, error) { + // Normalize the input configuration. + buf := bytes.Buffer{} + err := json.Indent(&buf, data, "", "") + if err != nil { + return nil, fmt.Errorf("xds: error normalizing JSON bootstrap configuration: %v", err) + } + data = bytes.TrimSpace(buf.Bytes()) + + config := &Config{} + if err := config.UnmarshalJSON(data); err != nil { + return nil, err + } + return config, nil } -// NewConfig returns a new instance of Config initialized by reading the -// bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents specified -// at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the former is -// preferred. +// ConfigOptionsForTesting specifies options for creating a new bootstrap +// configuration for testing purposes. // -// We support a credential registration mechanism and only credentials -// registered through that mechanism will be accepted here. See package -// `xds/bootstrap` for details. +// # Testing-Only +type ConfigOptionsForTesting struct { + // Servers is the top-level xDS server configuration. It contains a list of + // server configurations. + Servers json.RawMessage + // CertificateProviders is the certificate providers configuration. + CertificateProviders map[string]json.RawMessage + // ServerListenerResourceNameTemplate is the listener resource name template + // to be used on the gRPC server. + ServerListenerResourceNameTemplate string + // ClientDefaultListenerResourceNameTemplate is the default listener + // resource name template to be used on the gRPC client. + ClientDefaultListenerResourceNameTemplate string + // Authorities is a list of non-default authorities. + Authorities map[string]json.RawMessage + // Node identifies the gRPC client/server node in the + // proxyless service mesh. + Node json.RawMessage +} + +// NewContentsForTesting creates a new bootstrap configuration from the passed in +// options, for testing purposes. // -// This function tries to process as much of the bootstrap file as possible (in -// the presence of the errors) and may return a Config object with certain -// fields left unspecified, in which case the caller should use some sane -// defaults. -func NewConfig() (*Config, error) { - // Examples of the bootstrap json can be found in the generator tests - // https://github.com/GoogleCloudPlatform/traffic-director-grpc-bootstrap/blob/master/main_test.go. - data, err := bootstrapConfigFromEnvVariable() +// # Testing-Only +func NewContentsForTesting(opts ConfigOptionsForTesting) ([]byte, error) { + var servers ServerConfigs + if err := json.Unmarshal(opts.Servers, &servers); err != nil { + return nil, err + } + certProviders := make(map[string]certproviderNameAndConfig) + for k, v := range opts.CertificateProviders { + cp := certproviderNameAndConfig{} + if err := json.Unmarshal(v, &cp); err != nil { + return nil, fmt.Errorf("failed to unmarshal certificate provider configuration for %s: %s", k, string(v)) + } + certProviders[k] = cp + } + authorities := make(map[string]*Authority) + for k, v := range opts.Authorities { + a := &Authority{} + if err := json.Unmarshal(v, a); err != nil { + return nil, fmt.Errorf("failed to unmarshal authority configuration for %s: %s", k, string(v)) + } + authorities[k] = a + } + node := newNode() + if err := json.Unmarshal(opts.Node, &node); err != nil { + return nil, fmt.Errorf("failed to unmarshal node configuration %s: %v", string(opts.Node), err) + } + cfgJSON := configJSON{ + XDSServers: servers, + CertificateProviders: certProviders, + ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, + ClientDefaultListenerResourceNameTemplate: opts.ClientDefaultListenerResourceNameTemplate, + Authorities: authorities, + Node: node, + } + contents, err := json.MarshalIndent(cfgJSON, " ", " ") if err != nil { - return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err) + return nil, fmt.Errorf("failed to marshal bootstrap configuration for provided options %+v: %v", opts, err) } - return newConfigFromContents(data) + return contents, nil } -// NewConfigFromContents returns a new Config using the specified -// bootstrap file contents instead of reading the environment variable. -func NewConfigFromContents(data []byte) (*Config, error) { - return newConfigFromContents(data) +// NewConfigForTesting creates a new bootstrap configuration from the provided +// contents, for testing purposes. +// +// # Testing-Only +func NewConfigForTesting(contents []byte) (*Config, error) { + return newConfigFromContents(contents) } -func newConfigFromContents(data []byte) (*Config, error) { - config := &Config{} +// certproviderNameAndConfig is the internal representation of +// the`certificate_providers` field in the bootstrap configuration. +type certproviderNameAndConfig struct { + PluginName string `json:"plugin_name"` + Config json.RawMessage `json:"config"` +} - var jsonData map[string]json.RawMessage - if err := json.Unmarshal(data, &jsonData); err != nil { - return nil, fmt.Errorf("xds: failed to parse bootstrap config: %v", err) - } +// locality is the internal representation of the locality field within node. +type locality struct { + Region string `json:"region,omitempty"` + Zone string `json:"zone,omitempty"` + SubZone string `json:"sub_zone,omitempty"` +} - var node *v3corepb.Node - opts := protojson.UnmarshalOptions{DiscardUnknown: true} - for k, v := range jsonData { - switch k { - case "node": - node = &v3corepb.Node{} - if err := opts.Unmarshal(v, node); err != nil { - return nil, fmt.Errorf("xds: protojson.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - case "xds_servers": - servers, err := unmarshalJSONServerConfigSlice(v) - if err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err) - } - config.XDSServer = servers[0] - case "certificate_providers": - var providerInstances map[string]json.RawMessage - if err := json.Unmarshal(v, &providerInstances); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - configs := make(map[string]*certprovider.BuildableConfig) - getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder) - for instance, data := range providerInstances { - var nameAndConfig struct { - PluginName string `json:"plugin_name"` - Config json.RawMessage `json:"config"` - } - if err := json.Unmarshal(data, &nameAndConfig); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), instance, err) - } - - name := nameAndConfig.PluginName - parser := getBuilder(nameAndConfig.PluginName) - if parser == nil { - // We ignore plugins that we do not know about. - continue - } - bc, err := parser.ParseConfig(nameAndConfig.Config) - if err != nil { - return nil, fmt.Errorf("xds: config parsing for plugin %q failed: %v", name, err) - } - configs[instance] = bc - } - config.CertProviderConfigs = configs - case "server_listener_resource_name_template": - if err := json.Unmarshal(v, &config.ServerListenerResourceNameTemplate); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - case "client_default_listener_resource_name_template": - if err := json.Unmarshal(v, &config.ClientDefaultListenerResourceNameTemplate); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - case "authorities": - if err := json.Unmarshal(v, &config.Authorities); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - default: - logger.Warningf("Bootstrap content has unknown field: %s", k) - } - // Do not fail the xDS bootstrap when an unknown field is seen. This can - // happen when an older version client reads a newer version bootstrap - // file with new fields. - } +func (l locality) Equal(other locality) bool { + return l.Region == other.Region && l.Zone == other.Zone && l.SubZone == other.SubZone +} + +func (l locality) isEmpty() bool { + return l.Equal(locality{}) +} - if config.ClientDefaultListenerResourceNameTemplate == "" { - // Default value of the default client listener name template is "%s". - config.ClientDefaultListenerResourceNameTemplate = "%s" +type userAgentVersion struct { + UserAgentVersion string `json:"user_agent_version,omitempty"` +} + +// node is the internal representation of the node field in the bootstrap +// configuration. +type node struct { + ID string `json:"id,omitempty"` + Cluster string `json:"cluster,omitempty"` + Locality locality `json:"locality,omitempty"` + Metadata *structpb.Struct `json:"metadata,omitempty"` + + // The following fields are controlled by the client implementation and + // should not unmarshaled from JSON. + userAgentName string + userAgentVersionType userAgentVersion + clientFeatures []string +} + +// newNode is a convenience function to create a new node instance with fields +// controlled by the client implementation set to the desired values. +func newNode() node { + return node{ + userAgentName: gRPCUserAgentName, + userAgentVersionType: userAgentVersion{UserAgentVersion: grpc.Version}, + clientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, } - if config.XDSServer == nil { - return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers", jsonData["xds_servers"]) +} + +func (n node) Equal(other node) bool { + switch { + case n.ID != other.ID: + return false + case n.Cluster != other.Cluster: + return false + case !n.Locality.Equal(other.Locality): + return false + case n.userAgentName != other.userAgentName: + return false + case n.userAgentVersionType != other.userAgentVersionType: + return false } - if config.XDSServer.ServerURI == "" { - return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"]) + + // Consider failures in JSON marshaling as being unable to perform the + // comparison, and hence return false. + nMetadata, err := n.Metadata.MarshalJSON() + if err != nil { + return false } - if config.XDSServer.CredsDialOption() == nil { - return nil, fmt.Errorf("xds: required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"]) + otherMetadata, err := other.Metadata.MarshalJSON() + if err != nil { + return false } - // Post-process the authorities' client listener resource template field: - // - if set, it must start with "xdstp:///" - // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s" - for name, authority := range config.Authorities { - prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name)) - if authority.ClientListenerResourceNameTemplate == "" { - authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s" - continue - } - if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) { - return nil, fmt.Errorf("xds: field ClientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix) - } + if !bytes.Equal(nMetadata, otherMetadata) { + return false } - // Performing post-production on the node information. Some additional fields - // which are not expected to be set in the bootstrap file are populated here. - if node == nil { - node = &v3corepb.Node{} + return slices.Equal(n.clientFeatures, other.clientFeatures) +} + +func (n node) toProto() *v3corepb.Node { + return &v3corepb.Node{ + Id: n.ID, + Cluster: n.Cluster, + Locality: func() *v3corepb.Locality { + if n.Locality.isEmpty() { + return nil + } + return &v3corepb.Locality{ + Region: n.Locality.Region, + Zone: n.Locality.Zone, + SubZone: n.Locality.SubZone, + } + }(), + Metadata: proto.Clone(n.Metadata).(*structpb.Struct), + UserAgentName: n.userAgentName, + UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: n.userAgentVersionType.UserAgentVersion}, + ClientFeatures: slices.Clone(n.clientFeatures), } - node.UserAgentName = gRPCUserAgentName - node.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - node.ClientFeatures = append(node.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper) - config.NodeProto = node +} - if logger.V(2) { - logger.Infof("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) +// SetFallbackBootstrapConfig sets the fallback bootstrap configuration to be +// used when the bootstrap environment variables are unset. +// +// The provided configuration must be valid JSON. Returns a non-nil error if +// parsing the provided configuration fails. +func SetFallbackBootstrapConfig(cfgJSON []byte) error { + config, err := newConfigFromContents(cfgJSON) + if err != nil { + return err } - return config, nil + + configMu.Lock() + defer configMu.Unlock() + fallbackBootstrapCfg = config + return nil +} + +// UnsetFallbackBootstrapConfigForTesting unsets the fallback bootstrap +// configuration to be used when the bootstrap environment variables are unset. +// +// # Testing-Only +func UnsetFallbackBootstrapConfigForTesting() { + configMu.Lock() + defer configMu.Unlock() + fallbackBootstrapCfg = nil } + +// fallbackBootstrapConfig returns the fallback bootstrap configuration +// that will be used by the xDS client when the bootstrap environment +// variables are unset. +func fallbackBootstrapConfig() *Config { + configMu.Lock() + defer configMu.Unlock() + return fallbackBootstrapCfg +} + +var ( + configMu sync.Mutex + fallbackBootstrapCfg *Config +) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go index 9b51fcc8397..ec1a30919ec 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go @@ -34,7 +34,7 @@ func PopulateResourceTemplate(template, target string) string { if strings.HasPrefix(template, "xdstp:") { target = percentEncode(target) } - return strings.Replace(template, "%s", target, -1) + return strings.ReplaceAll(template, "%s", target) } // percentEncode percent encode t, except for "/". See the tests for examples. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go index 713e39cf31c..fb599954a6c 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go @@ -59,11 +59,11 @@ func buildLogger(loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConf } func getCustomConfig(config *anypb.Any) (json.RawMessage, string, error) { - any, err := config.UnmarshalNew() + c, err := config.UnmarshalNew() if err != nil { return nil, "", err } - switch m := any.(type) { + switch m := c.(type) { case *v1xdsudpatypepb.TypedStruct: return convertCustomConfig(m.TypeUrl, m.Value) case *v3xdsxdstypepb.TypedStruct: diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go index c9f71d32cbb..e1c15018bde 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go @@ -244,7 +244,7 @@ func (am *andMatcher) match(data *rpcData) bool { type alwaysMatcher struct { } -func (am *alwaysMatcher) match(data *rpcData) bool { +func (am *alwaysMatcher) match(*rpcData) bool { return true } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go index 33011726a6f..344052cb04f 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go @@ -237,12 +237,9 @@ func newRPCData(ctx context.Context) (*rpcData, error) { var authType string var peerCertificates []*x509.Certificate - if pi.AuthInfo != nil { - tlsInfo, ok := pi.AuthInfo.(credentials.TLSInfo) - if ok { - authType = pi.AuthInfo.AuthType() - peerCertificates = tlsInfo.State.PeerCertificates - } + if tlsInfo, ok := pi.AuthInfo.(credentials.TLSInfo); ok { + authType = pi.AuthInfo.AuthType() + peerCertificates = tlsInfo.State.PeerCertificates } return &rpcData{ @@ -281,11 +278,12 @@ func (e *engine) doAuditLogging(rpcData *rpcData, rule string, authorized bool) // In the RBAC world, we need to have a SPIFFE ID as the principal for this // to be meaningful principal := "" - if rpcData.peerInfo != nil && rpcData.peerInfo.AuthInfo != nil && rpcData.peerInfo.AuthInfo.AuthType() == "tls" { + if rpcData.peerInfo != nil { // If AuthType = tls, then we can cast AuthInfo to TLSInfo. - tlsInfo := rpcData.peerInfo.AuthInfo.(credentials.TLSInfo) - if tlsInfo.SPIFFEID != nil { - principal = tlsInfo.SPIFFEID.String() + if tlsInfo, ok := rpcData.peerInfo.AuthInfo.(credentials.TLSInfo); ok { + if tlsInfo.SPIFFEID != nil { + principal = tlsInfo.SPIFFEID.String() + } } } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/keepalive/keepalive.go b/terraform/providers/google/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7d3..eb42b19fb99 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/mem/buffer_pool.go b/terraform/providers/google/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 00000000000..c37c58c0233 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/mem/buffer_slice.go b/terraform/providers/google/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 00000000000..228e9c2f20f --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/mem/buffers.go b/terraform/providers/google/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 00000000000..4d66b2ccc2b --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +// String returns a string representation of the buffer. May be used for +// debugging purposes. +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer functions +// just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +type SliceBuffer []byte + +func (s SliceBuffer) ReadOnlyData() []byte { return s } +func (s SliceBuffer) Ref() {} +func (s SliceBuffer) Free() {} +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/metadata/metadata.go b/terraform/providers/google/vendor/google.golang.org/grpc/metadata/metadata.go index 6c01a9b359c..d2e15253bbf 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/metadata/metadata.go @@ -90,21 +90,6 @@ func Pairs(kv ...string) MD { return md } -// String implements the Stringer interface for pretty-printing a MD. -// Ordering of the values is non-deterministic as it ranges over a map. -func (md MD) String() string { - var sb strings.Builder - fmt.Fprintf(&sb, "MD{") - for k, v := range md { - if sb.Len() > 3 { - fmt.Fprintf(&sb, ", ") - } - fmt.Fprintf(&sb, "%s=[%s]", k, strings.Join(v, ", ")) - } - fmt.Fprintf(&sb, "}") - return sb.String() -} - // Len returns the number of items in md. func (md MD) Len() int { return len(md) @@ -228,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -243,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/orca/call_metrics.go b/terraform/providers/google/vendor/google.golang.org/grpc/orca/call_metrics.go index 157dad49c65..9ae77214203 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/orca/call_metrics.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/orca/call_metrics.go @@ -156,7 +156,7 @@ func unaryInt(smp ServerMetricsProvider) func(ctx context.Context, req any, _ *g } func streamInt(smp ServerMetricsProvider) func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { // We don't allocate the metric recorder here. It will be allocated the // first time the user calls CallMetricsRecorderFromContext(). rw := &recorderWrapper{smp: smp} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/orca/producer.go b/terraform/providers/google/vendor/google.golang.org/grpc/orca/producer.go index 04edae6de66..6e7c4c9f301 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/orca/producer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/orca/producer.go @@ -72,7 +72,7 @@ type OOBListenerOptions struct { // returned stop function must be called when no longer needed. Do not // register a single OOBListener more than once per SubConn. func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOptions) (stop func()) { - pr, close := sc.GetOrBuildProducer(producerBuilderSingleton) + pr, closeFn := sc.GetOrBuildProducer(producerBuilderSingleton) p := pr.(*producer) p.registerListener(l, opts.ReportInterval) @@ -84,7 +84,7 @@ func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOpt // subsequent calls. return grpcsync.OnceFunc(func() { p.unregisterListener(l, opts.ReportInterval) - close() + closeFn() }) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/orca/server_metrics.go b/terraform/providers/google/vendor/google.golang.org/grpc/orca/server_metrics.go index 67d1fa9d7f2..bb664d6a081 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/orca/server_metrics.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/orca/server_metrics.go @@ -108,7 +108,7 @@ type ServerMetricsRecorder interface { // SetMemoryUtilization sets the memory utilization server metric. Must be // in the range [0, 1]. SetMemoryUtilization(float64) - // DeleteMemoryUtilization deletes the memory utiliztion server metric to + // DeleteMemoryUtilization deletes the memory utilization server metric to // prevent it from being sent. DeleteMemoryUtilization() diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/picker_wrapper.go b/terraform/providers/google/vendor/google.golang.org/grpc/picker_wrapper.go index 56e8aba783f..bdaa2130e48 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/picker_wrapper.go @@ -22,7 +22,7 @@ import ( "context" "fmt" "io" - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" @@ -33,35 +33,43 @@ import ( "google.golang.org/grpc/status" ) +// pickerGeneration stores a picker and a channel used to signal that a picker +// newer than this one is available. +type pickerGeneration struct { + // picker is the picker produced by the LB policy. May be nil if a picker + // has never been produced. + picker balancer.Picker + // blockingCh is closed when the picker has been invalidated because there + // is a new one available. + blockingCh chan struct{} +} + // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker + // If pickerGen holds a nil pointer, the pickerWrapper is closed. + pickerGen atomic.Pointer[pickerGeneration] statsHandlers []stats.Handler // to record blocking picker calls } func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - return &pickerWrapper{ - blockingCh: make(chan struct{}), + pw := &pickerWrapper{ statsHandlers: statsHandlers, } + pw.pickerGen.Store(&pickerGeneration{ + blockingCh: make(chan struct{}), + }) + return pw } -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +// updatePicker is called by UpdateState calls from the LB policy. It +// unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() - return - } - pw.picker = p - // pw.blockingCh should never be nil. - close(pw.blockingCh) - pw.blockingCh = make(chan struct{}) - pw.mu.Unlock() + old := pw.pickerGen.Swap(&pickerGeneration{ + picker: p, + blockingCh: make(chan struct{}), + }) + close(old.blockingCh) } // doneChannelzWrapper performs the following: @@ -98,20 +106,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var lastPickErr error for { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() + pg := pw.pickerGen.Load() + if pg == nil { return nil, balancer.PickResult{}, ErrClientConnClosing } - - if pw.picker == nil { - ch = pw.blockingCh + if pg.picker == nil { + ch = pg.blockingCh } - if ch == pw.blockingCh { + if ch == pg.blockingCh { // This could happen when either: // - pw.picker is nil (the previous if condition), or - // - has called pick on the current picker. - pw.mu.Unlock() + // - we have already called pick on the current picker. select { case <-ctx.Done(): var errStr string @@ -145,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } } - ch = pw.blockingCh - p := pw.picker - pw.mu.Unlock() + ch = pg.blockingCh + p := pg.picker pickResult, err := p.Pick(info) if err != nil { @@ -197,24 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } func (pw *pickerWrapper) close() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.done = true - close(pw.blockingCh) + old := pw.pickerGen.Swap(nil) + close(old.blockingCh) } // reset clears the pickerWrapper and prepares it for being used again when idle // mode is exited. func (pw *pickerWrapper) reset() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.blockingCh = make(chan struct{}) + old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})}) + close(old.blockingCh) } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/preloader.go b/terraform/providers/google/vendor/google.golang.org/grpc/preloader.go index 73bd6336433..e87a17f36a5 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/preloader.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/preloader.go @@ -20,6 +20,7 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -31,9 +32,10 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go index 6c49c2333b1..e1f58104d85 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,8 +21,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/reflection/v1/reflection.proto package grpc_reflection_v1 @@ -789,7 +789,7 @@ func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { } var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ +var file_grpc_reflection_v1_reflection_proto_goTypes = []any{ (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse @@ -822,7 +822,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ServerReflectionRequest); i { case 0: return &v.state @@ -834,7 +834,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRequest); i { case 0: return &v.state @@ -846,7 +846,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerReflectionResponse); i { case 0: return &v.state @@ -858,7 +858,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorResponse); i { case 0: return &v.state @@ -870,7 +870,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ExtensionNumberResponse); i { case 0: return &v.state @@ -882,7 +882,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*ListServiceResponse); i { case 0: return &v.state @@ -894,7 +894,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ServiceResponse); i { case 0: return &v.state @@ -906,7 +906,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ErrorResponse); i { case 0: return &v.state @@ -919,14 +919,14 @@ func file_grpc_reflection_v1_reflection_proto_init() { } } } - file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), (*ServerReflectionRequest_FileContainingExtension)(nil), (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), (*ServerReflectionRequest_ListServices)(nil), } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []any{ (*ServerReflectionResponse_FileDescriptorResponse)(nil), (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), (*ServerReflectionResponse_ListServicesResponse)(nil), diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go index 6e544f81e4e..03108280767 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -21,8 +21,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/reflection/v1/reflection.proto package grpc_reflection_v1 @@ -36,8 +36,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" @@ -49,7 +49,7 @@ const ( type ServerReflectionClient interface { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. - ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) } type serverReflectionClient struct { @@ -60,54 +60,39 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie return &serverReflectionClient{cc} } -func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} + x := &grpc.GenericClientStream[ServerReflectionRequest, ServerReflectionResponse]{ClientStream: stream} return x, nil } -type ServerReflection_ServerReflectionInfoClient interface { - Send(*ServerReflectionRequest) error - Recv() (*ServerReflectionResponse, error) - grpc.ClientStream -} - -type serverReflectionServerReflectionInfoClient struct { - grpc.ClientStream -} - -func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { - m := new(ServerReflectionResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoClient = grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse] // ServerReflectionServer is the server API for ServerReflection service. // All implementations should embed UnimplementedServerReflectionServer -// for forward compatibility +// for forward compatibility. type ServerReflectionServer interface { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. - ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error + ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error } -// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. -type UnimplementedServerReflectionServer struct { -} +// UnimplementedServerReflectionServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedServerReflectionServer struct{} -func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { +func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error { return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") } +func (UnimplementedServerReflectionServer) testEmbeddedByValue() {} // UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ServerReflectionServer will @@ -117,34 +102,22 @@ type UnsafeServerReflectionServer interface { } func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + // If the following call panics, it indicates UnimplementedServerReflectionServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&ServerReflection_ServiceDesc, srv) } func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) -} - -type ServerReflection_ServerReflectionInfoServer interface { - Send(*ServerReflectionResponse) error - Recv() (*ServerReflectionRequest, error) - grpc.ServerStream -} - -type serverReflectionServerReflectionInfoServer struct { - grpc.ServerStream + return srv.(ServerReflectionServer).ServerReflectionInfo(&grpc.GenericServerStream[ServerReflectionRequest, ServerReflectionResponse]{ServerStream: stream}) } -func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { - m := new(ServerReflectionRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoServer = grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse] // ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. // It's only intended for direct use with grpc.RegisterService, diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 909b24a19da..0582e16af2b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha @@ -403,7 +403,7 @@ type ServerReflectionResponse_FileDescriptorResponse struct { } type ServerReflectionResponse_AllExtensionNumbersResponse struct { - // This message is used to answer all_extension_numbers_of_type requst. + // This message is used to answer all_extension_numbers_of_type request. // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` @@ -864,7 +864,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { } var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ +var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []any{ (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse @@ -897,7 +897,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ServerReflectionRequest); i { case 0: return &v.state @@ -909,7 +909,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRequest); i { case 0: return &v.state @@ -921,7 +921,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerReflectionResponse); i { case 0: return &v.state @@ -933,7 +933,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorResponse); i { case 0: return &v.state @@ -945,7 +945,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ExtensionNumberResponse); i { case 0: return &v.state @@ -957,7 +957,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*ListServiceResponse); i { case 0: return &v.state @@ -969,7 +969,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ServiceResponse); i { case 0: return &v.state @@ -981,7 +981,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ErrorResponse); i { case 0: return &v.state @@ -994,14 +994,14 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { } } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), (*ServerReflectionRequest_FileContainingExtension)(nil), (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), (*ServerReflectionRequest_ListServices)(nil), } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []any{ (*ServerReflectionResponse_FileDescriptorResponse)(nil), (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), (*ServerReflectionResponse_ListServicesResponse)(nil), diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 3de5dc354f6..80755d74d74 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha @@ -33,8 +33,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" @@ -46,7 +46,7 @@ const ( type ServerReflectionClient interface { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. - ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) } type serverReflectionClient struct { @@ -57,54 +57,39 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie return &serverReflectionClient{cc} } -func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} + x := &grpc.GenericClientStream[ServerReflectionRequest, ServerReflectionResponse]{ClientStream: stream} return x, nil } -type ServerReflection_ServerReflectionInfoClient interface { - Send(*ServerReflectionRequest) error - Recv() (*ServerReflectionResponse, error) - grpc.ClientStream -} - -type serverReflectionServerReflectionInfoClient struct { - grpc.ClientStream -} - -func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { - m := new(ServerReflectionResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoClient = grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse] // ServerReflectionServer is the server API for ServerReflection service. // All implementations should embed UnimplementedServerReflectionServer -// for forward compatibility +// for forward compatibility. type ServerReflectionServer interface { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. - ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error + ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error } -// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. -type UnimplementedServerReflectionServer struct { -} +// UnimplementedServerReflectionServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedServerReflectionServer struct{} -func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { +func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error { return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") } +func (UnimplementedServerReflectionServer) testEmbeddedByValue() {} // UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ServerReflectionServer will @@ -114,34 +99,22 @@ type UnsafeServerReflectionServer interface { } func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + // If the following call panics, it indicates UnimplementedServerReflectionServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&ServerReflection_ServiceDesc, srv) } func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) -} - -type ServerReflection_ServerReflectionInfoServer interface { - Send(*ServerReflectionResponse) error - Recv() (*ServerReflectionRequest, error) - grpc.ServerStream -} - -type serverReflectionServerReflectionInfoServer struct { - grpc.ServerStream + return srv.(ServerReflectionServer).ServerReflectionInfo(&grpc.GenericServerStream[ServerReflectionRequest, ServerReflectionResponse]{ServerStream: stream}) } -func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { - m := new(ServerReflectionRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoServer = grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse] // ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. // It's only intended for direct use with grpc.RegisterService, diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/internal/internal.go b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/internal/internal.go index 36ee6507507..902fc6d35c2 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/internal/internal.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/internal/internal.go @@ -18,7 +18,7 @@ // Package internal contains code that is shared by both reflection package and // the test package. The packages are split in this way inorder to avoid -// depenedency to deprecated package github.com/golang/protobuf. +// dependency to deprecated package github.com/golang/protobuf. package internal import ( diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/regenerate.sh b/terraform/providers/google/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index 3edca296c22..00000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/resolver/manual/manual.go b/terraform/providers/google/vendor/google.golang.org/grpc/resolver/manual/manual.go index f2efa2a2cb5..09e864a89d3 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.BuildCallback(target, cc, opts) r.mu.Lock() defer r.mu.Unlock() + // Call BuildCallback after locking to avoid a race when UpdateState + // or ReportError is called before Build returns. + r.BuildCallback(target, cc, opts) r.CC = cc if r.lastSeenState != nil { err := r.CC.UpdateState(*r.lastSeenState) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/resolver_wrapper.go b/terraform/providers/google/vendor/google.golang.org/grpc/resolver_wrapper.go index 9dcc9780f89..23bb3fb2582 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.Schedule(func(context.Context) { + ccr.serializer.TrySchedule(func(context.Context) { if ccr.resolver == nil { return } @@ -171,12 +171,15 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // ParseServiceConfig is called by resolver implementations to parse a JSON // representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) + return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts) } // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/rpc_util.go b/terraform/providers/google/vendor/google.golang.org/grpc/rpc_util.go index fdd49e6e915..2d96f1405e8 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/rpc_util.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/rpc_util.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" @@ -35,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,24 +264,20 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -515,10 +511,50 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -540,10 +576,10 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -581,19 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -608,14 +653,15 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -697,33 +752,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -731,7 +789,11 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } else { + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -741,104 +803,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes []byte + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, -) (uncompressedBuf []byte, cancel func(), err error) { - pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, nil, err + return nil, err } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, nil, st.Err() + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() } var size int - if pf == compressionMade { + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } size = len(uncompressedBuf) } else { - uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) } if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { + out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - uncompressedBuf = compressedBuf + out = compressed } if payInfo != nil { - payInfo.compressedLength = len(compressedBuf) - payInfo.uncompressedBytes = uncompressedBuf - - cancel = func() {} - } else { - cancel = func() { - p.recvBufferPool.Put(&compressedBuf) - } + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out } - return uncompressedBuf, cancel, nil + return out, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) if err != nil { return nil, 0, err } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - // - // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // we can also utilize the recv buffer pool here. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err + return out, out.Len(), nil } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - defer cancel() - if err := c.Unmarshal(buf, m); err != nil { + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -941,7 +1028,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { + if ec, ok := c.codec.(encoding.CodecV2); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -950,12 +1037,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/server.go b/terraform/providers/google/vendor/google.golang.org/grpc/server.go index 89f8e4792bf..d1e1415a40f 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/server.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -80,7 +81,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool + internal.BufferPool = bufferPool } var statusOK = status.New(codes.OK, "") @@ -170,7 +171,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool + bufferPool mem.BufferPool waitForHandlers bool } @@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, + bufferPool: mem.DefaultBufferPool(), } var globalServerOptions []ServerOption @@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) }) } @@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption { }) } -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { +func bufferPool(bufferPool mem.BufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool + o.bufferPool = bufferPool }) } @@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorkers blocks on a *transport.Stream channel forever and waits for +// serverWorker blocks on a *transport.Stream channel forever and waits for // data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). @@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { + data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } } } return err @@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } + defer d.Free() if channelz.IsOn() { t.IncrMsgRecv() } df := func(v any) error { - defer cancel() - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } + for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: len(d), + Length: d.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d, + Message: d.Materialize(), } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx: ctx, t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } return codec } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/service_config.go b/terraform/providers/google/vendor/google.golang.org/grpc/service_config.go index 9da8fc8027d..2671c5ef69f 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/service_config.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/service_config.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" @@ -163,9 +164,11 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfig = parseServiceConfig + internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult { + return parseServiceConfig(js, defaultMaxCallAttempts) + } } -func parseServiceConfig(js string) *serviceconfig.ParseResult { +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } @@ -183,12 +186,12 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { } c := rsc.LoadBalancingConfig if c == nil { - name := PickFirstBalancerName + name := pickfirst.Name if rsc.LoadBalancingPolicy != nil { name = *rsc.LoadBalancingPolicy } if balancer.Get(name) == nil { - name = PickFirstBalancerName + name = pickfirst.Name } cfg := []map[string]any{{name: struct{}{}}} strCfg, err := json.Marshal(cfg) @@ -218,7 +221,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { WaitForReady: m.WaitForReady, Timeout: (*time.Duration)(m.Timeout), } - if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil { logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -264,7 +267,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } @@ -278,17 +281,16 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol return nil, nil } + if jrp.MaxAttempts < maxAttempts { + maxAttempts = jrp.MaxAttempts + } rp := &internalserviceconfig.RetryPolicy{ - MaxAttempts: jrp.MaxAttempts, + MaxAttempts: maxAttempts, InitialBackoff: time.Duration(jrp.InitialBackoff), MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.MaxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.MaxAttempts = 5 - } for _, code := range jrp.RetryableStatusCodes { rp.RetryableStatusCodes[code] = true } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/shared_buffer_pool.go b/terraform/providers/google/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe8e2..00000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/stats/stats.go b/terraform/providers/google/vendor/google.golang.org/grpc/stats/stats.go index fdb0bd65182..71195c4943d 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/stats/stats.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/stats/stats.go @@ -77,9 +77,6 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -150,9 +147,6 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/stream.go b/terraform/providers/google/vendor/google.golang.org/grpc/stream.go index b54563e81cd..bb2b2a216ce 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/stream.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/stream.go @@ -23,6 +23,7 @@ import ( "errors" "io" "math" + "math/rand" "strconv" "sync" "time" @@ -34,13 +35,13 @@ import ( "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } @@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -566,10 +567,15 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a @@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -699,7 +710,7 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if max := float64(rp.MaxBackoff); cur > max { cur = max } - dur = time.Duration(grpcrand.Int63n(int64(cur))) + dur = time.Duration(rand.Int63n(int64(cur))) cs.numRetriesSincePushback++ } @@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in the replay buffer always sets cs.attempt + // Note that the first op in replayBuffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.buffer) == 0 { + if len(cs.replayBuffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data, + Message: data.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, + Message: recvInfo.uncompressedBytes.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } return io.EOF } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } if channelz.IsOn() { a.t.IncrMsgSent() @@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, + Client: true, + RecvTime: time.Now(), + Payload: m, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Length: payInfo.uncompressedBytes.Len(), }) } if channelz.IsOn() { @@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (a *csAttempt) finish(err error) { @@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (as *addrConnStream) finish(err error) { @@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data, + Message: data.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } } return nil @@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, + Message: payInfo.uncompressedBytes.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/stream_interfaces.go b/terraform/providers/google/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529c0c..0037fee0bd7 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/version.go b/terraform/providers/google/vendor/google.golang.org/grpc/version.go index e1806e76000..a96b6a6bff8 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/version.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.64.0" +const Version = "1.67.1" diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go index cb022b45de1..578e1278970 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go @@ -58,7 +58,7 @@ func (t *tlsCredsBuilder) Name() string { } // googleDefaultCredsBuilder implements the `Credentials` interface defined in -// package `xds/boostrap` and encapsulates a Google Default credential. +// package `xds/bootstrap` and encapsulates a Google Default credential. type googleDefaultCredsBuilder struct{} func (d *googleDefaultCredsBuilder) Build(json.RawMessage) (credentials.Bundle, func(), error) { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/csds/csds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/csds/csds.go index 8d03124811a..3d8398a72ff 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/csds/csds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/csds/csds.go @@ -27,17 +27,13 @@ import ( "context" "fmt" "io" - "sync" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/types/known/timestamppb" - v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) @@ -58,26 +54,18 @@ func prefixLogger(s *ClientStatusDiscoveryServer) *internalgrpclog.PrefixLogger // https://github.com/grpc/proposal/blob/master/A40-csds-support.md. type ClientStatusDiscoveryServer struct { logger *internalgrpclog.PrefixLogger - - mu sync.Mutex - xdsClient xdsclient.XDSClient - xdsClientClose func() } // NewClientStatusDiscoveryServer returns an implementation of the CSDS server // that can be registered on a gRPC server. func NewClientStatusDiscoveryServer() (*ClientStatusDiscoveryServer, error) { - c, close, err := xdsclient.New() - if err != nil { - logger.Warningf("Failed to create xDS client: %v", err) - } - s := &ClientStatusDiscoveryServer{xdsClient: c, xdsClientClose: close} + s := &ClientStatusDiscoveryServer{} s.logger = prefixLogger(s) - s.logger.Infof("Created CSDS server, with xdsClient %p", c) + s.logger.Infof("Created CSDS server") return s, nil } -// StreamClientStatus implementations interface ClientStatusDiscoveryServiceServer. +// StreamClientStatus implements interface ClientStatusDiscoveryServiceServer. func (s *ClientStatusDiscoveryServer) StreamClientStatus(stream v3statusgrpc.ClientStatusDiscoveryService_StreamClientStatusServer) error { for { req, err := stream.Recv() @@ -97,85 +85,24 @@ func (s *ClientStatusDiscoveryServer) StreamClientStatus(stream v3statusgrpc.Cli } } -// FetchClientStatus implementations interface ClientStatusDiscoveryServiceServer. +// FetchClientStatus implements interface ClientStatusDiscoveryServiceServer. func (s *ClientStatusDiscoveryServer) FetchClientStatus(_ context.Context, req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { return s.buildClientStatusRespForReq(req) } -// buildClientStatusRespForReq fetches the status from the client, and returns -// the response to be sent back to xdsclient. +// buildClientStatusRespForReq fetches the status of xDS resources from the +// xdsclient, and returns the response to be sent back to the csds client. // // If it returns an error, the error is a status error. func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.xdsClient == nil { - return &v3statuspb.ClientStatusResponse{}, nil - } // Field NodeMatchers is unsupported, by design // https://github.com/grpc/proposal/blob/master/A40-csds-support.md#detail-node-matching. if len(req.NodeMatchers) != 0 { return nil, status.Errorf(codes.InvalidArgument, "node_matchers are not supported, request contains node_matchers: %v", req.NodeMatchers) } - dump := s.xdsClient.DumpResources() - ret := &v3statuspb.ClientStatusResponse{ - Config: []*v3statuspb.ClientConfig{ - { - Node: s.xdsClient.BootstrapConfig().NodeProto, - GenericXdsConfigs: dumpToGenericXdsConfig(dump), - }, - }, - } - return ret, nil + return xdsclient.DumpResources(), nil } // Close cleans up the resources. -func (s *ClientStatusDiscoveryServer) Close() { - if s.xdsClientClose != nil { - s.xdsClientClose() - } -} - -func dumpToGenericXdsConfig(dump map[string]map[string]xdsresource.UpdateWithMD) []*v3statuspb.ClientConfig_GenericXdsConfig { - var ret []*v3statuspb.ClientConfig_GenericXdsConfig - for typeURL, updates := range dump { - for name, update := range updates { - config := &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: typeURL, - Name: name, - VersionInfo: update.MD.Version, - XdsConfig: update.Raw, - LastUpdated: timestamppb.New(update.MD.Timestamp), - ClientStatus: serviceStatusToProto(update.MD.Status), - } - if errState := update.MD.ErrState; errState != nil { - config.ErrorState = &v3adminpb.UpdateFailureState{ - LastUpdateAttempt: timestamppb.New(errState.Timestamp), - Details: errState.Err.Error(), - VersionInfo: errState.Version, - } - } - ret = append(ret, config) - } - } - return ret -} - -func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { - switch serviceStatus { - case xdsresource.ServiceStatusUnknown: - return v3adminpb.ClientResourceStatus_UNKNOWN - case xdsresource.ServiceStatusRequested: - return v3adminpb.ClientResourceStatus_REQUESTED - case xdsresource.ServiceStatusNotExist: - return v3adminpb.ClientResourceStatus_DOES_NOT_EXIST - case xdsresource.ServiceStatusACKed: - return v3adminpb.ClientResourceStatus_ACKED - case xdsresource.ServiceStatusNACKed: - return v3adminpb.ClientResourceStatus_NACKED - default: - return v3adminpb.ClientResourceStatus_UNKNOWN - } -} +func (s *ClientStatusDiscoveryServer) Close() {} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go index 37b38799e78..936bf2da327 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -26,7 +26,9 @@ package googledirectpath import ( + "encoding/json" "fmt" + "math/rand" "net/url" "time" @@ -34,10 +36,8 @@ import ( "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/googlecloud" internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/xdsclient" _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. ) @@ -46,30 +46,21 @@ const ( c2pScheme = "google-c2p" c2pAuthority = "traffic-director-c2p.xds.googleapis.com" - tdURL = "dns:///directpath-pa.googleapis.com" - httpReqTimeout = 10 * time.Second - zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" - ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" - - gRPCUserAgentName = "gRPC Go" - clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" - clientFeatureResourceWrapper = "xds.config.resource-in-sotw" - ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE" - - logPrefix = "[google-c2p-resolver]" + tdURL = "dns:///directpath-pa.googleapis.com" + zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" + ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" + ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE" + httpReqTimeout = 10 * time.Second + logPrefix = "[google-c2p-resolver]" dnsName, xdsName = "dns", "xds" ) // For overriding in unittests. var ( - onGCE = googlecloud.OnGCE - - newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, func(), error) { - return xdsclient.NewWithConfig(config) - } - - logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) + onGCE = googlecloud.OnGCE + randInt = rand.Int + logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) ) func init() { @@ -108,23 +99,18 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts xdsServerCfg := newXdsServerConfig(xdsServerURI) authoritiesCfg := newAuthoritiesConfig(xdsServerCfg) - config, err := bootstrap.NewConfigFromContents([]byte(fmt.Sprintf(` - { - "xds_servers": [%s], - "client_default_listener_resource_name_template": "%%s", - "authorities": %s, - "node": %s - }`, xdsServerCfg, authoritiesCfg, nodeCfg))) - - if err != nil { - return nil, fmt.Errorf("failed to build bootstrap configuration: %v", err) + cfg := map[string]any{ + "xds_servers": []any{xdsServerCfg}, + "client_default_listener_resource_name_template": "%s", + "authorities": authoritiesCfg, + "node": nodeCfg, } - - // Create singleton xds client with this config. The xds client will be - // used by the xds resolver later. - _, close, err := newClientWithConfig(config) + cfgJSON, err := json.Marshal(cfg) if err != nil { - return nil, fmt.Errorf("failed to start xDS client: %v", err) + return nil, fmt.Errorf("failed to marshal bootstrap configuration: %v", err) + } + if err := bootstrap.SetFallbackBootstrapConfig(cfgJSON); err != nil { + return nil, fmt.Errorf("failed to set fallback bootstrap configuration: %v", err) } t = resolver.Target{ @@ -134,66 +120,36 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts Path: t.URL.Path, }, } - xdsR, err := resolver.Get(xdsName).Build(t, cc, opts) - if err != nil { - close() - return nil, err - } - return &c2pResolver{ - Resolver: xdsR, - clientCloseFunc: close, - }, nil + return resolver.Get(xdsName).Build(t, cc, opts) } func (b c2pResolverBuilder) Scheme() string { return c2pScheme } -type c2pResolver struct { - resolver.Resolver - clientCloseFunc func() -} - -func (r *c2pResolver) Close() { - r.Resolver.Close() - r.clientCloseFunc() -} - -var id = fmt.Sprintf("C2P-%d", grpcrand.Int()) - -func newNodeConfig(zone string, ipv6Capable bool) string { - metadata := "" +func newNodeConfig(zone string, ipv6Capable bool) map[string]any { + node := map[string]any{ + "id": fmt.Sprintf("C2P-%d", randInt()), + "locality": map[string]any{"zone": zone}, + } if ipv6Capable { - metadata = fmt.Sprintf(`, "metadata": { "%s": true }`, ipv6CapableMetadataName) + node["metadata"] = map[string]any{ipv6CapableMetadataName: true} } - - return fmt.Sprintf(` - { - "id": "%s", - "locality": { - "zone": "%s" - } - %s - }`, id, zone, metadata) + return node } -func newAuthoritiesConfig(xdsServer string) string { - return fmt.Sprintf(` - { - "%s": { - "xds_servers": [%s] - } +func newAuthoritiesConfig(serverCfg map[string]any) map[string]any { + return map[string]any{ + c2pAuthority: map[string]any{"xds_servers": []any{serverCfg}}, } - `, c2pAuthority, xdsServer) } -func newXdsServerConfig(xdsServerURI string) string { - return fmt.Sprintf(` - { - "server_uri": "%s", - "channel_creds": [{"type": "google_default"}], - "server_features": ["xds_v3", "ignore_resource_deletion", "xds.config.resource-in-sotw"] - }`, xdsServerURI) +func newXdsServerConfig(uri string) map[string]any { + return map[string]any{ + "server_uri": uri, + "channel_creds": []map[string]any{{"type": "google_default"}}, + "server_features": []any{"ignore_resource_deletion"}, + } } // runDirectPath returns whether this resolver should use direct path. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 8e97e104ed4..9a112e27697 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -207,7 +207,7 @@ func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) e } // A root provider is required whether we are using TLS or mTLS. - cpc := b.xdsClient.BootstrapConfig().CertProviderConfigs + cpc := b.xdsClient.BootstrapConfig().CertProviderConfigs() rootProvider, err := buildProvider(cpc, config.RootInstanceName, config.RootCertName, false, true) if err != nil { return err @@ -309,8 +309,8 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro b.lbCfg = lbCfg // Handle the update in a blocking fashion. - done := make(chan struct{}) - ok = b.serializer.Schedule(func(context.Context) { + errCh := make(chan error, 1) + callback := func(context.Context) { // A config update with a changed top-level cluster name means that none // of our old watchers make any sense any more. b.closeAllWatchers() @@ -319,20 +319,20 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro // could end up creating more watchers if turns out to be an aggregate // cluster. b.createAndAddWatcherForCluster(lbCfg.ClusterName) - close(done) - }) - if !ok { + errCh <- nil + } + onFailure := func() { // The call to Schedule returns false *only* if the serializer has been // closed, which happens only when we receive an update after close. - return errBalancerClosed + errCh <- errBalancerClosed } - <-done - return nil + b.serializer.ScheduleOr(callback, onFailure) + return <-errCh } // ResolverError handles errors reported by the xdsResolver. func (b *cdsBalancer) ResolverError(err error) { - b.serializer.Schedule(func(context.Context) { + b.serializer.TrySchedule(func(context.Context) { // Resource not found error is reported by the resolver when the // top-level cluster resource is removed by the management server. if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { @@ -351,7 +351,7 @@ func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } -// Closes all registered cluster wathers and removes them from the internal map. +// Closes all registered cluster watchers and removes them from the internal map. // // Only executed in the context of a serializer callback. func (b *cdsBalancer) closeAllWatchers() { @@ -364,7 +364,7 @@ func (b *cdsBalancer) closeAllWatchers() { // Close cancels the CDS watch, closes the child policy and closes the // cdsBalancer. func (b *cdsBalancer) Close() { - b.serializer.Schedule(func(ctx context.Context) { + b.serializer.TrySchedule(func(context.Context) { b.closeAllWatchers() if b.childLB != nil { @@ -384,7 +384,7 @@ func (b *cdsBalancer) Close() { } func (b *cdsBalancer) ExitIdle() { - b.serializer.Schedule(func(context.Context) { + b.serializer.TrySchedule(func(context.Context) { if b.childLB == nil { b.logger.Warningf("Received ExitIdle with no child policy") return diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go index 0b0d168376d..835461d0997 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go @@ -32,22 +32,19 @@ type clusterWatcher struct { parent *cdsBalancer } -func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData) { - cw.parent.serializer.Schedule(func(context.Context) { - cw.parent.onClusterUpdate(cw.name, u.Resource) - }) +func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { cw.parent.onClusterUpdate(cw.name, u.Resource); onDone() } + cw.parent.serializer.ScheduleOr(handleUpdate, onDone) } -func (cw *clusterWatcher) OnError(err error) { - cw.parent.serializer.Schedule(func(context.Context) { - cw.parent.onClusterError(cw.name, err) - }) +func (cw *clusterWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { cw.parent.onClusterError(cw.name, err); onDone() } + cw.parent.serializer.ScheduleOr(handleError, onDone) } -func (cw *clusterWatcher) OnResourceDoesNotExist() { - cw.parent.serializer.Schedule(func(context.Context) { - cw.parent.onClusterResourceNotFound(cw.name) - }) +func (cw *clusterWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { cw.parent.onClusterResourceNotFound(cw.name); onDone() } + cw.parent.serializer.ScheduleOr(handleNotFound, onDone) } // watcherState groups the state associated with a clusterWatcher. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go index 164f3099d28..0dc71dfedeb 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -24,6 +24,7 @@ package clusterimpl import ( + "context" "encoding/json" "fmt" "sync" @@ -31,8 +32,8 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -52,6 +53,11 @@ const ( defaultRequestCountMax = 1024 ) +var ( + connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address) + errBalancerClosed = fmt.Errorf("%s LB policy is closed", Name) +) + func init() { balancer.Register(bb{}) } @@ -59,18 +65,17 @@ func init() { type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + ctx, cancel := context.WithCancel(context.Background()) b := &clusterImplBalancer{ - ClientConn: cc, - bOpts: bOpts, - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - loadWrapper: loadstore.NewWrapper(), - pickerUpdateCh: buffer.NewUnbounded(), - requestCountMax: defaultRequestCountMax, + ClientConn: cc, + bOpts: bOpts, + loadWrapper: loadstore.NewWrapper(), + requestCountMax: defaultRequestCountMax, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } b.logger = prefixLogger(b) b.child = gracefulswitch.NewBalancer(b, bOpts) - go b.run() b.logger.Infof("Created") return b } @@ -86,18 +91,6 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err type clusterImplBalancer struct { balancer.ClientConn - // mu guarantees mutual exclusion between Close() and handling of picker - // update to the parent ClientConn in run(). It's to make sure that the - // run() goroutine doesn't send picker update to parent after the balancer - // is closed. - // - // It's only used by the run() goroutine, but not the other exported - // functions. Because the exported functions are guaranteed to be - // synchronized with Close(). - mu sync.Mutex - closed *grpcsync.Event - done *grpcsync.Event - bOpts balancer.BuildOptions logger *grpclog.PrefixLogger xdsClient xdsclient.XDSClient @@ -112,10 +105,11 @@ type clusterImplBalancer struct { clusterNameMu sync.Mutex clusterName string + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + // childState/drops/requestCounter keeps the state used by the most recently - // generated picker. All fields can only be accessed in run(). And run() is - // the only goroutine that sends picker to the parent ClientConn. All - // requests to update picker need to be sent to pickerUpdateCh. + // generated picker. childState balancer.State dropCategories []DropConfig // The categories for drops. drops []*dropper @@ -124,7 +118,6 @@ type clusterImplBalancer struct { requestCounter *xdsclient.ClusterRequestsCounter requestCountMax uint32 telemetryLabels map[string]string - pickerUpdateCh *buffer.Unbounded } // updateLoadStore checks the config for load store, and decides whether it @@ -205,14 +198,9 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { return nil } -func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - if b.closed.HasFired() { - b.logger.Warningf("xds: received ClientConnState {%+v} after clusterImplBalancer was closed", s) - return nil - } - +func (b *clusterImplBalancer) updateClientConnState(s balancer.ClientConnState) error { if b.logger.V(2) { - b.logger.Infof("Received update from resolver, balancer config: %s", pretty.ToJSON(s.BalancerConfig)) + b.logger.Infof("Received configuration: %s", pretty.ToJSON(s.BalancerConfig)) } newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { @@ -224,7 +212,7 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) // it. bb := balancer.Get(newConfig.ChildPolicy.Name) if bb == nil { - return fmt.Errorf("balancer %q not registered", newConfig.ChildPolicy.Name) + return fmt.Errorf("child policy %q not registered", newConfig.ChildPolicy.Name) } if b.xdsClient == nil { @@ -250,9 +238,14 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) } b.config = newConfig - // Notify run() of this new config, in case drop and request counter need - // update (which means a new picker needs to be generated). - b.pickerUpdateCh.Put(newConfig) + b.telemetryLabels = newConfig.TelemetryLabels + dc := b.handleDropAndRequestCount(newConfig) + if dc != nil && b.childState.Picker != nil { + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: b.newPicker(dc), + }) + } // Addresses and sub-balancer config are sent to sub-balancer. return b.child.UpdateClientConnState(balancer.ClientConnState{ @@ -261,20 +254,28 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) }) } -func (b *clusterImplBalancer) ResolverError(err error) { - if b.closed.HasFired() { - b.logger.Warningf("xds: received resolver error {%+v} after clusterImplBalancer was closed", err) - return +func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // Handle the update in a blocking fashion. + errCh := make(chan error, 1) + callback := func(context.Context) { + errCh <- b.updateClientConnState(s) + } + onFailure := func() { + // An attempt to schedule callback fails only when an update is received + // after Close(). + errCh <- errBalancerClosed } - b.child.ResolverError(err) + b.serializer.ScheduleOr(callback, onFailure) + return <-errCh } -func (b *clusterImplBalancer) updateSubConnState(sc balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) { - if b.closed.HasFired() { - b.logger.Warningf("xds: received subconn state change {%+v, %+v} after clusterImplBalancer was closed", sc, s) - return - } +func (b *clusterImplBalancer) ResolverError(err error) { + b.serializer.TrySchedule(func(context.Context) { + b.child.ResolverError(err) + }) +} +func (b *clusterImplBalancer) updateSubConnState(_ balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) { // Trigger re-resolution when a SubConn turns transient failure. This is // necessary for the LogicalDNS in cluster_resolver policy to re-resolve. // @@ -296,26 +297,40 @@ func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer } func (b *clusterImplBalancer) Close() { - b.mu.Lock() - b.closed.Fire() - b.mu.Unlock() - - b.child.Close() - b.childState = balancer.State{} - b.pickerUpdateCh.Close() - <-b.done.Done() - b.logger.Infof("Shutdown") + b.serializer.TrySchedule(func(_ context.Context) { + b.child.Close() + b.childState = balancer.State{} + + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + } + b.logger.Infof("Shutdown") + }) + b.serializerCancel() + <-b.serializer.Done() } func (b *clusterImplBalancer) ExitIdle() { - b.child.ExitIdle() + b.serializer.TrySchedule(func(context.Context) { + b.child.ExitIdle() + }) } // Override methods to accept updates from the child LB. func (b *clusterImplBalancer) UpdateState(state balancer.State) { - // Instead of updating parent ClientConn inline, send state to run(). - b.pickerUpdateCh.Put(state) + b.serializer.TrySchedule(func(context.Context) { + b.childState = state + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: b.newPicker(&dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + }), + }) + }) } func (b *clusterImplBalancer) setClusterName(n string) { @@ -360,22 +375,37 @@ func (scw *scWrapper) localityID() xdsinternal.LocalityID { func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { clusterName := b.getClusterName() newAddrs := make([]resolver.Address, len(addrs)) - var lID xdsinternal.LocalityID for i, addr := range addrs { newAddrs[i] = xds.SetXDSHandshakeClusterName(addr, clusterName) - lID = xdsinternal.GetLocalityID(newAddrs[i]) } var sc balancer.SubConn + scw := &scWrapper{} oldListener := opts.StateListener - opts.StateListener = func(state balancer.SubConnState) { b.updateSubConnState(sc, state, oldListener) } + opts.StateListener = func(state balancer.SubConnState) { + b.serializer.TrySchedule(func(context.Context) { + b.updateSubConnState(sc, state, oldListener) + if state.ConnectivityState != connectivity.Ready { + return + } + // Read connected address and call updateLocalityID() based on the connected + // address's locality. https://github.com/grpc/grpc-go/issues/7339 + addr := connectedAddress(state) + lID := xdsinternal.GetLocalityID(addr) + if lID.Empty() { + if b.logger.V(2) { + b.logger.Infof("Locality ID for %s unexpectedly empty", addr) + } + return + } + scw.updateLocalityID(lID) + }) + } sc, err := b.ClientConn.NewSubConn(newAddrs, opts) if err != nil { return nil, err } - // Wrap this SubConn in a wrapper, and add it to the map. - ret := &scWrapper{SubConn: sc} - ret.updateLocalityID(lID) - return ret, nil + scw.SubConn = sc + return scw, nil } func (b *clusterImplBalancer) RemoveSubConn(sc balancer.SubConn) { @@ -448,49 +478,3 @@ func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dr requestCountMax: b.requestCountMax, } } - -func (b *clusterImplBalancer) run() { - defer b.done.Fire() - for { - select { - case update, ok := <-b.pickerUpdateCh.Get(): - if !ok { - return - } - b.pickerUpdateCh.Load() - b.mu.Lock() - if b.closed.HasFired() { - b.mu.Unlock() - return - } - switch u := update.(type) { - case balancer.State: - b.childState = u - b.ClientConn.UpdateState(balancer.State{ - ConnectivityState: b.childState.ConnectivityState, - Picker: b.newPicker(&dropConfigs{ - drops: b.drops, - requestCounter: b.requestCounter, - requestCountMax: b.requestCountMax, - }), - }) - case *LBConfig: - b.telemetryLabels = u.TelemetryLabels - dc := b.handleDropAndRequestCount(u) - if dc != nil && b.childState.Picker != nil { - b.ClientConn.UpdateState(balancer.State{ - ConnectivityState: b.childState.ConnectivityState, - Picker: b.newPicker(dc), - }) - } - } - b.mu.Unlock() - case <-b.closed.Done(): - if b.cancelLoadReport != nil { - b.cancelLoadReport() - b.cancelLoadReport = nil - } - return - } - } -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go index d8cb8df1a81..fbadbb92ba3 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go @@ -19,6 +19,8 @@ package clusterimpl import ( + "context" + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" @@ -96,14 +98,23 @@ func (b *clusterImplBalancer) newPicker(config *dropConfigs) *picker { } } +func telemetryLabels(ctx context.Context) map[string]string { + if ctx == nil { + return nil + } + labels := stats.GetLabels(ctx) + if labels == nil { + return nil + } + return labels.TelemetryLabels +} + func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // Unconditionally set labels if present, even dropped or queued RPC's can // use these labels. - if info.Ctx != nil { - if labels := stats.GetLabels(info.Ctx); labels != nil && labels.TelemetryLabels != nil { - for key, value := range d.telemetryLabels { - labels.TelemetryLabels[key] = value - } + if labels := telemetryLabels(info.Ctx); labels != nil { + for key, value := range d.telemetryLabels { + labels[key] = value } } @@ -156,6 +167,10 @@ func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { return pr, err } + if labels := telemetryLabels(info.Ctx); labels != nil { + labels["grpc.lb.locality"] = lIDStr + } + if d.loadStore != nil { d.loadStore.CallStarted(lIDStr) oldDone := pr.Done diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go index 4b971a3e241..92c69f5e1fc 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go @@ -45,17 +45,14 @@ func (s *subBalancerState) String() string { type balancerStateAggregator struct { cc balancer.ClientConn logger *grpclog.PrefixLogger + csEval *balancer.ConnectivityStateEvaluator mu sync.Mutex - // If started is false, no updates should be sent to the parent cc. A closed - // sub-balancer could still send pickers to this aggregator. This makes sure - // that no updates will be forwarded to parent when the whole balancer group - // and states aggregator is closed. - started bool - // All balancer IDs exist as keys in this map, even if balancer group is not - // started. - // - // If an ID is not in map, it's either removed or never added. + // This field is used to ensure that no updates are forwarded to the parent + // CC once the aggregator is closed. A closed sub-balancer could still send + // pickers to this aggregator. + closed bool + // Map from child policy name to last reported state. idToPickerState map[string]*subBalancerState // Set when UpdateState call propagation is paused. pauseUpdateState bool @@ -68,34 +65,24 @@ func newBalancerStateAggregator(cc balancer.ClientConn, logger *grpclog.PrefixLo return &balancerStateAggregator{ cc: cc, logger: logger, + csEval: &balancer.ConnectivityStateEvaluator{}, idToPickerState: make(map[string]*subBalancerState), } } -// Start starts the aggregator. It can be called after Close to restart the -// aggretator. -func (bsa *balancerStateAggregator) start() { - bsa.mu.Lock() - defer bsa.mu.Unlock() - bsa.started = true -} - -// Close closes the aggregator. When the aggregator is closed, it won't call -// parent ClientConn to update balancer state. func (bsa *balancerStateAggregator) close() { bsa.mu.Lock() defer bsa.mu.Unlock() - bsa.started = false - bsa.clearStates() + bsa.closed = true } -// add adds a sub-balancer state with weight. It adds a place holder, and waits -// for the real sub-balancer to update state. +// add adds a sub-balancer in CONNECTING state. // // This is called when there's a new child. func (bsa *balancerStateAggregator) add(id string) { bsa.mu.Lock() defer bsa.mu.Unlock() + bsa.idToPickerState[id] = &subBalancerState{ // Start everything in CONNECTING, so if one of the sub-balancers // reports TransientFailure, the RPCs will still wait for the other @@ -106,6 +93,8 @@ func (bsa *balancerStateAggregator) add(id string) { }, stateToAggregate: connectivity.Connecting, } + bsa.csEval.RecordTransition(connectivity.Shutdown, connectivity.Connecting) + bsa.buildAndUpdateLocked() } // remove removes the sub-balancer state. Future updates from this sub-balancer, @@ -118,9 +107,15 @@ func (bsa *balancerStateAggregator) remove(id string) { if _, ok := bsa.idToPickerState[id]; !ok { return } + // Setting the state of the deleted sub-balancer to Shutdown will get + // csEvltr to remove the previous state for any aggregated state + // evaluations. Transitions to and from connectivity.Shutdown are ignored + // by csEvltr. + bsa.csEval.RecordTransition(bsa.idToPickerState[id].stateToAggregate, connectivity.Shutdown) // Remove id and picker from picker map. This also results in future updates // for this ID to be ignored. delete(bsa.idToPickerState, id) + bsa.buildAndUpdateLocked() } // pauseStateUpdates causes UpdateState calls to not propagate to the parent @@ -140,7 +135,7 @@ func (bsa *balancerStateAggregator) resumeStateUpdates() { defer bsa.mu.Unlock() bsa.pauseUpdateState = false if bsa.needUpdateStateOnResume { - bsa.cc.UpdateState(bsa.build()) + bsa.cc.UpdateState(bsa.buildLocked()) } } @@ -149,6 +144,8 @@ func (bsa *balancerStateAggregator) resumeStateUpdates() { // // It calls parent ClientConn's UpdateState with the new aggregated state. func (bsa *balancerStateAggregator) UpdateState(id string, state balancer.State) { + bsa.logger.Infof("State update from sub-balancer %q: %+v", id, state) + bsa.mu.Lock() defer bsa.mu.Unlock() pickerSt, ok := bsa.idToPickerState[id] @@ -162,42 +159,17 @@ func (bsa *balancerStateAggregator) UpdateState(id string, state balancer.State) // update the state, to prevent the aggregated state from being always // CONNECTING. Otherwise, stateToAggregate is the same as // state.ConnectivityState. + bsa.csEval.RecordTransition(pickerSt.stateToAggregate, state.ConnectivityState) pickerSt.stateToAggregate = state.ConnectivityState } pickerSt.state = state - - if !bsa.started { - return - } - if bsa.pauseUpdateState { - // If updates are paused, do not call UpdateState, but remember that we - // need to call it when they are resumed. - bsa.needUpdateStateOnResume = true - return - } - bsa.cc.UpdateState(bsa.build()) -} - -// clearState Reset everything to init state (Connecting) but keep the entry in -// map (to keep the weight). -// -// Caller must hold bsa.mu. -func (bsa *balancerStateAggregator) clearStates() { - for _, pState := range bsa.idToPickerState { - pState.state = balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), - } - pState.stateToAggregate = connectivity.Connecting - } + bsa.buildAndUpdateLocked() } -// buildAndUpdate combines the sub-state from each sub-balancer into one state, -// and update it to parent ClientConn. -func (bsa *balancerStateAggregator) buildAndUpdate() { - bsa.mu.Lock() - defer bsa.mu.Unlock() - if !bsa.started { +// buildAndUpdateLocked combines the sub-state from each sub-balancer into one +// state, and sends a picker update to the parent ClientConn. +func (bsa *balancerStateAggregator) buildAndUpdateLocked() { + if bsa.closed { return } if bsa.pauseUpdateState { @@ -206,47 +178,11 @@ func (bsa *balancerStateAggregator) buildAndUpdate() { bsa.needUpdateStateOnResume = true return } - bsa.cc.UpdateState(bsa.build()) + bsa.cc.UpdateState(bsa.buildLocked()) } -// build combines sub-states into one. The picker will do a child pick. -// -// Caller must hold bsa.mu. -func (bsa *balancerStateAggregator) build() balancer.State { - // TODO: the majority of this function (and UpdateState) is exactly the same - // as weighted_target's state aggregator. Try to make a general utility - // function/struct to handle the logic. - // - // One option: make a SubBalancerState that handles Update(State), including - // handling the special connecting after ready, as in UpdateState(). Then a - // function to calculate the aggregated connectivity state as in this - // function. - // - // TODO: use balancer.ConnectivityStateEvaluator to calculate the aggregated - // state. - var readyN, connectingN, idleN int - for _, ps := range bsa.idToPickerState { - switch ps.stateToAggregate { - case connectivity.Ready: - readyN++ - case connectivity.Connecting: - connectingN++ - case connectivity.Idle: - idleN++ - } - } - var aggregatedState connectivity.State - switch { - case readyN > 0: - aggregatedState = connectivity.Ready - case connectingN > 0: - aggregatedState = connectivity.Connecting - case idleN > 0: - aggregatedState = connectivity.Idle - default: - aggregatedState = connectivity.TransientFailure - } - +// buildLocked combines sub-states into one. +func (bsa *balancerStateAggregator) buildLocked() balancer.State { // The picker's return error might not be consistent with the // aggregatedState. Because for this LB policy, we want to always build // picker with all sub-pickers (not only ready sub-pickers), so even if the @@ -254,7 +190,7 @@ func (bsa *balancerStateAggregator) build() balancer.State { // or TransientFailure. bsa.logger.Infof("Child pickers: %+v", bsa.idToPickerState) return balancer.State{ - ConnectivityState: aggregatedState, + ConnectivityState: bsa.csEval.CurrentState(), Picker: newPickerGroup(bsa.idToPickerState), } } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go index db8332b90ea..e6d751ecbee 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go @@ -25,6 +25,8 @@ import ( "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/balancergroup" internalgrpclog "google.golang.org/grpc/internal/grpclog" @@ -46,7 +48,6 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal b := &bal{} b.logger = prefixLogger(b) b.stateAggregator = newBalancerStateAggregator(cc, b.logger) - b.stateAggregator.start() b.bg = balancergroup.New(balancergroup.Options{ CC: cc, BuildOpts: opts, @@ -68,59 +69,101 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err } type bal struct { - logger *internalgrpclog.PrefixLogger - - // TODO: make this package not dependent on xds specific code. Same as for - // weighted target balancer. + logger *internalgrpclog.PrefixLogger bg *balancergroup.BalancerGroup stateAggregator *balancerStateAggregator children map[string]childConfig } -func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) { - update := false +func (b *bal) setErrorPickerForChild(childName string, err error) { + b.stateAggregator.UpdateState(childName, balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) +} + +func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) error { + // TODO: Get rid of handling hierarchy in addresses. This LB policy never + // gets addresses from the resolver. addressesSplit := hierarchy.Group(s.ResolverState.Addresses) - // Remove sub-pickers and sub-balancers that are not in the new cluster list. + // Remove sub-balancers that are not in the new list from the aggregator and + // balancergroup. for name := range b.children { if _, ok := newConfig.Children[name]; !ok { b.stateAggregator.remove(name) b.bg.Remove(name) - update = true } } - // For sub-balancers in the new cluster list, - // - add to balancer group if it's new, - // - forward the address/balancer config update. - for name, newT := range newConfig.Children { - if _, ok := b.children[name]; !ok { - // If this is a new sub-balancer, add it to the picker map. - b.stateAggregator.add(name) - // Then add to the balancer group. - b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + var retErr error + for childName, childCfg := range newConfig.Children { + lbCfg := childCfg.ChildPolicy.Config + if _, ok := b.children[childName]; !ok { + // Add new sub-balancers to the aggregator and balancergroup. + b.stateAggregator.add(childName) + b.bg.Add(childName, balancer.Get(childCfg.ChildPolicy.Name)) } else { - // Already present, check for type change and if so send down a new builder. - if newT.ChildPolicy.Name != b.children[name].ChildPolicy.Name { - b.bg.UpdateBuilder(name, balancer.Get(newT.ChildPolicy.Name)) + // If the child policy type has changed for existing sub-balancers, + // parse the new config and send down the config update to the + // balancergroup, which will take care of gracefully switching the + // child over to the new policy. + // + // If we run into errors here, we need to ensure that RPCs to this + // child fail, while RPCs to other children with good configs + // continue to succeed. + newPolicyName, oldPolicyName := childCfg.ChildPolicy.Name, b.children[childName].ChildPolicy.Name + if newPolicyName != oldPolicyName { + var err error + var cfgJSON []byte + cfgJSON, err = childCfg.ChildPolicy.MarshalJSON() + if err != nil { + retErr = fmt.Errorf("failed to JSON marshal load balancing policy for child %q: %v", childName, err) + b.setErrorPickerForChild(childName, retErr) + continue + } + // This overwrites lbCfg to be in the format expected by the + // gracefulswitch balancer. So, when this config is pushed to + // the child (below), it will result in a graceful switch to the + // new child policy. + lbCfg, err = balancergroup.ParseConfig(cfgJSON) + if err != nil { + retErr = fmt.Errorf("failed to parse load balancing policy for child %q: %v", childName, err) + b.setErrorPickerForChild(childName, retErr) + continue + } } } - // TODO: handle error? How to aggregate errors and return? - _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ + + if err := b.bg.UpdateClientConnState(childName, balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: addressesSplit[name], + Addresses: addressesSplit[childName], ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }, - BalancerConfig: newT.ChildPolicy.Config, - }) + BalancerConfig: lbCfg, + }); err != nil { + retErr = fmt.Errorf("failed to push new configuration %v to child %q", childCfg.ChildPolicy.Config, childName) + b.setErrorPickerForChild(childName, retErr) + } + + // Picker update is sent to the parent ClientConn only after the + // new child policy returns a picker. So, there is no need to + // set needUpdateStateOnResume to true here. } b.children = newConfig.Children - if update { - b.stateAggregator.buildAndUpdate() - } + + // If multiple sub-balancers run into errors, we will return only the last + // one, which is still good enough, since the grpc channel will anyways + // return this error as balancer.ErrBadResolver to the name resolver, + // resulting in re-resolution attempts. + return retErr + + // Adding or removing a sub-balancer will result in the + // needUpdateStateOnResume bit to true which results in a picker update once + // resumeStateUpdates() is called. } func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { @@ -128,12 +171,11 @@ func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } - b.logger.Infof("update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) + b.logger.Infof("Update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) b.stateAggregator.pauseStateUpdates() defer b.stateAggregator.resumeStateUpdates() - b.updateChildren(s, newConfig) - return nil + return b.updateChildren(s, newConfig) } func (b *bal) ResolverError(err error) { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go index 83ead92a4a6..749945059b8 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -207,11 +207,6 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { // handleResourceUpdate handles a resource update or error from the resource // resolver by propagating the same to the child LB policy. func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { - if err := update.err; err != nil { - b.handleErrorFromUpdate(err, false) - return - } - b.watchUpdateReceived = true b.priorities = update.priorities @@ -219,6 +214,10 @@ func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { // for all configured discovery mechanisms ordered by priority. This is used // to generate configuration for the priority LB policy. b.updateChildConfig() + + if update.onDone != nil { + update.onDone() + } } // updateChildConfig builds child policy configuration using endpoint addresses diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go index 151c54dae6d..5bc64b86305 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -30,8 +30,14 @@ import ( // resourceUpdate is a combined update from all the resources, in the order of // priority. For example, it can be {EDS, EDS, DNS}. type resourceUpdate struct { + // A discovery mechanism would return an empty update when it runs into + // errors, and this would result in the priority LB policy reporting + // TRANSIENT_FAILURE (if there was a single discovery mechanism), or would + // fallback to the next highest priority that is available. priorities []priorityConfig - err error + // To be invoked once the update is completely processed, or is dropped in + // favor of a newer update. + onDone xdsresource.OnDoneFunc } // topLevelResolver is used by concrete endpointsResolver implementations for @@ -39,7 +45,11 @@ type resourceUpdate struct { // interface and takes appropriate actions upon receipt of updates and errors // from underlying concrete resolvers. type topLevelResolver interface { - onUpdate() + // onUpdate is called when a new update is received from the underlying + // endpointsResolver implementation. The onDone callback is to be invoked + // once the update is completely processed, or is dropped in favor of a + // newer update. + onUpdate(onDone xdsresource.OnDoneFunc) } // endpointsResolver wraps the functionality to resolve a given resource name to @@ -205,7 +215,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { } // Regenerate even if there's no change in discovery mechanism, in case // priority order changed. - rr.generateLocked() + rr.generateLocked(func() {}) } // resolveNow is typically called to trigger re-resolve of DNS. The EDS @@ -252,7 +262,10 @@ func (rr *resourceResolver) stop(closing bool) { // after they are stopped. Therefore, we don't have to worry about another // write to this channel happening at the same time as this one. select { - case <-rr.updateChannel: + case ru := <-rr.updateChannel: + if ru.onDone != nil { + ru.onDone() + } default: } rr.updateChannel <- &resourceUpdate{} @@ -262,14 +275,20 @@ func (rr *resourceResolver) stop(closing bool) { // result on the update channel if all child resolvers have received at least // one update. Otherwise it returns early. // -// caller must hold rr.mu. -func (rr *resourceResolver) generateLocked() { +// The onDone callback is invoked inline if not all child resolvers have +// received at least one update. If all child resolvers have received at least +// one update, onDone is invoked when the combined update is processed by the +// clusterresolver LB policy. +// +// Caller must hold rr.mu. +func (rr *resourceResolver) generateLocked(onDone xdsresource.OnDoneFunc) { var ret []priorityConfig for _, rDM := range rr.children { u, ok := rDM.r.lastUpdate() if !ok { // Don't send updates to parent until all resolvers have update to // send. + onDone() return } switch uu := u.(type) { @@ -280,16 +299,23 @@ func (rr *resourceResolver) generateLocked() { } } select { - case <-rr.updateChannel: + // A previously unprocessed update is dropped in favor of the new one, and + // the former's onDone callback is invoked to unblock the xDS client's + // receive path. + case ru := <-rr.updateChannel: + if ru.onDone != nil { + ru.onDone() + } default: } - rr.updateChannel <- &resourceUpdate{priorities: ret} + rr.updateChannel <- &resourceUpdate{priorities: ret, onDone: onDone} } -func (rr *resourceResolver) onUpdate() { - rr.serializer.Schedule(func(context.Context) { +func (rr *resourceResolver) onUpdate(onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { rr.mu.Lock() - rr.generateLocked() + rr.generateLocked(onDone) rr.mu.Unlock() - }) + } + rr.serializer.ScheduleOr(handleUpdate, func() { onDone() }) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index efdc3088a39..cfc871d3b59 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -79,7 +79,7 @@ func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *gr ret.logger.Infof("Failed to parse dns hostname %q in clusterresolver LB policy", target) } ret.updateReceived = true - ret.topLevelResolver.onUpdate() + ret.topLevelResolver.onUpdate(func() {}) return ret } @@ -89,7 +89,7 @@ func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *gr ret.logger.Infof("Failed to build DNS resolver for target %q: %v", target, err) } ret.updateReceived = true - ret.topLevelResolver.onUpdate() + ret.topLevelResolver.onUpdate(func() {}) return ret } ret.dnsR = r @@ -153,7 +153,7 @@ func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { dr.updateReceived = true dr.mu.Unlock() - dr.topLevelResolver.onUpdate() + dr.topLevelResolver.onUpdate(func() {}) return nil } @@ -176,7 +176,7 @@ func (dr *dnsDiscoveryMechanism) ReportError(err error) { dr.updateReceived = true dr.mu.Unlock() - dr.topLevelResolver.onUpdate() + dr.topLevelResolver.onUpdate(func() {}) } func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go index 3d0ec356e93..ddb949019ee 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -76,8 +76,9 @@ func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelR } // OnUpdate is invoked to report an update for the resource being watched. -func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData) { +func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData, onDone xdsresource.OnDoneFunc) { if er.stopped.HasFired() { + onDone() return } @@ -85,11 +86,12 @@ func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceD er.update = &update.Resource er.mu.Unlock() - er.topLevelResolver.onUpdate() + er.topLevelResolver.onUpdate(onDone) } -func (er *edsDiscoveryMechanism) OnError(err error) { +func (er *edsDiscoveryMechanism) OnError(err error, onDone xdsresource.OnDoneFunc) { if er.stopped.HasFired() { + onDone() return } @@ -102,6 +104,7 @@ func (er *edsDiscoveryMechanism) OnError(err error) { // Continue using a previously received good configuration if one // exists. er.mu.Unlock() + onDone() return } @@ -114,11 +117,12 @@ func (er *edsDiscoveryMechanism) OnError(err error) { er.update = &xdsresource.EndpointsUpdate{} er.mu.Unlock() - er.topLevelResolver.onUpdate() + er.topLevelResolver.onUpdate(onDone) } -func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() { +func (er *edsDiscoveryMechanism) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { if er.stopped.HasFired() { + onDone() return } @@ -136,5 +140,5 @@ func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() { er.update = &xdsresource.EndpointsUpdate{} er.mu.Unlock() - er.topLevelResolver.onUpdate() + er.topLevelResolver.onUpdate(onDone) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go index 8ce958d71ca..f5605df8327 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go @@ -36,7 +36,7 @@ func NewWrapper() *Wrapper { // update its internal perCluster store so that new stats will be added to the // correct perCluster. // -// Note that this struct is a temporary walkaround before we implement graceful +// Note that this struct is a temporary workaround before we implement graceful // switch for EDS. Any update to the clusterName and serviceName is too early, // the perfect timing is when the picker is updated with the new connection. // This early update could cause picks for the old SubConn being reported to the diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go index 34604318c31..53ba72c0813 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go @@ -25,6 +25,7 @@ import ( "encoding/json" "fmt" "math" + "math/rand" "strings" "sync" "sync/atomic" @@ -37,7 +38,6 @@ import ( "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" @@ -592,20 +592,6 @@ func (b *outlierDetectionBalancer) Target() string { return b.cc.Target() } -func max(x, y time.Duration) time.Duration { - if x < y { - return y - } - return x -} - -func min(x, y time.Duration) time.Duration { - if x < y { - return x - } - return y -} - // handleSubConnUpdate stores the recent state and forward the update // if the SubConn is not ejected. func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) { @@ -838,7 +824,7 @@ func (b *outlierDetectionBalancer) successRateAlgorithm() { requiredSuccessRate := mean - stddev*(float64(ejectionCfg.StdevFactor)/1000) if successRate < requiredSuccessRate { channelz.Infof(logger, b.channelzParent, "SuccessRate algorithm detected outlier: %s. Parameters: successRate=%f, mean=%f, stddev=%f, requiredSuccessRate=%f", addrInfo, successRate, mean, stddev, requiredSuccessRate) - if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { b.ejectAddress(addrInfo) } } @@ -865,7 +851,7 @@ func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { channelz.Infof(logger, b.channelzParent, "FailurePercentage algorithm detected outlier: %s, failurePercentage=%f", addrInfo, failurePercentage) - if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { b.ejectAddress(addrInfo) } } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go index 988ca280789..c17c62f23a5 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -270,6 +270,7 @@ func (b *priorityBalancer) run() { // deadlock. b.mu.Lock() if b.done.HasFired() { + b.mu.Unlock() return } switch s := u.(type) { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go index 4655bf41847..0be807c134a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go @@ -83,7 +83,9 @@ var ( // Caller must hold b.mu. func (b *priorityBalancer) syncPriority(childUpdating string) { if b.inhibitPickerUpdates { - b.logger.Debugf("Skipping update from child policy %q", childUpdating) + if b.logger.V(2) { + b.logger.Infof("Skipping update from child policy %q", childUpdating) + } return } for p, name := range b.priorities { @@ -99,12 +101,16 @@ func (b *priorityBalancer) syncPriority(childUpdating string) { (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || p == len(b.priorities)-1 { if b.childInUse != child.name || child.name == childUpdating { - b.logger.Debugf("childInUse, childUpdating: %q, %q", b.childInUse, child.name) + if b.logger.V(2) { + b.logger.Infof("childInUse, childUpdating: %q, %q", b.childInUse, child.name) + } // If we switch children or the child in use just updated its // picker, push the child's picker to the parent. b.cc.UpdateState(child.state) } - b.logger.Debugf("Switching to (%q, %v) in syncPriority", child.name, p) + if b.logger.V(2) { + b.logger.Infof("Switching to (%q, %v) in syncPriority", child.name, p) + } b.switchToChild(child, p) break } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go index b450716fa0f..5ce72caded4 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go @@ -159,28 +159,3 @@ func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { // There's no qualifying next entry. return nil } - -// nextSkippingDuplicatesSubConn finds the next subconn in the ring, that's -// different from the given subconn. -func nextSkippingDuplicatesSubConn(ring *ring, sc *subConn) *subConn { - var entry *ringEntry - for _, it := range ring.items { - if it.sc == sc { - entry = it - break - } - } - if entry == nil { - // If the given subconn is not in the ring (e.g. it was deleted), return - // the first one. - if len(ring.items) > 0 { - return ring.items[0].sc - } - return nil - } - ee := nextSkippingDuplicates(ring, entry) - if ee == nil { - return nil - } - return ee.sc -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go index 4d7fdb35e72..45dbb2d2a83 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go @@ -67,11 +67,15 @@ type ringEntry struct { // // Must be called with a non-empty subConns map. func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, logger *grpclog.PrefixLogger) *ring { - logger.Debugf("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize) + if logger.V(2) { + logger.Infof("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize) + } // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 normalizedWeights, minWeight := normalizeWeights(subConns) - logger.Debugf("newRing: normalized subConn weights is %v", normalizedWeights) + if logger.V(2) { + logger.Infof("newRing: normalized subConn weights is %v", normalizedWeights) + } // Normalized weights for {3,3,4} is {0.3,0.3,0.4}. @@ -82,7 +86,9 @@ func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, log scale := math.Min(math.Ceil(minWeight*float64(minRingSize))/minWeight, float64(maxRingSize)) ringSize := math.Ceil(scale) items := make([]*ringEntry, 0, int(ringSize)) - logger.Debugf("newRing: creating new ring of size %v", ringSize) + if logger.V(2) { + logger.Infof("newRing: creating new ring of size %v", ringSize) + } // For each entry, scale*weight nodes are generated in the ring. // @@ -116,30 +122,37 @@ func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, log return &ring{items: items} } -// normalizeWeights divides all the weights by the sum, so that the total weight -// is 1. +// normalizeWeights calculates the normalized weights for each subConn in the +// given subConns map. It returns a slice of subConnWithWeight structs, where +// each struct contains a subConn and its corresponding weight. The function +// also returns the minimum weight among all subConns. +// +// The normalized weight of each subConn is calculated by dividing its weight +// attribute by the sum of all subConn weights. If the weight attribute is not +// found on the address, a default weight of 1 is used. +// +// The addresses are sorted in ascending order to ensure consistent results. // // Must be called with a non-empty subConns map. func normalizeWeights(subConns *resolver.AddressMap) ([]subConnWithWeight, float64) { var weightSum uint32 - keys := subConns.Keys() - for _, a := range keys { - weightSum += getWeightAttribute(a) + // Since attributes are explicitly ignored in the AddressMap key, we need to + // iterate over the values to get the weights. + scVals := subConns.Values() + for _, a := range scVals { + weightSum += a.(*subConn).weight } - ret := make([]subConnWithWeight, 0, len(keys)) - min := float64(1.0) - for _, a := range keys { - v, _ := subConns.Get(a) - scInfo := v.(*subConn) - // getWeightAttribute() returns 1 if the weight attribute is not found - // on the address. And since this function is guaranteed to be called - // with a non-empty subConns map, weightSum is guaranteed to be - // non-zero. So, we need not worry about divide a by zero error here. - nw := float64(getWeightAttribute(a)) / float64(weightSum) + ret := make([]subConnWithWeight, 0, subConns.Len()) + min := 1.0 + for _, a := range scVals { + scInfo := a.(*subConn) + // (*subConn).weight is set to 1 if the weight attribute is not found on + // the address. And since this function is guaranteed to be called with + // a non-empty subConns map, weightSum is guaranteed to be non-zero. So, + // we need not worry about divide by zero error here. + nw := float64(scInfo.weight) / float64(weightSum) ret = append(ret, subConnWithWeight{sc: scInfo, weight: nw}) - if nw < min { - min = nw - } + min = math.Min(min, nw) } // Sort the addresses to return consistent results. // diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go index e63c6f65390..ef054d48aa4 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go @@ -44,12 +44,13 @@ func init() { type bb struct{} -func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &ringhashBalancer{ - cc: cc, - subConns: resolver.NewAddressMap(), - scStates: make(map[balancer.SubConn]*subConn), - csEvltr: &connectivityStateEvaluator{}, + cc: cc, + subConns: resolver.NewAddressMap(), + scStates: make(map[balancer.SubConn]*subConn), + csEvltr: &connectivityStateEvaluator{}, + orderedSubConns: make([]*subConn, 0), } b.logger = prefixLogger(b) b.logger.Infof("Created") @@ -197,6 +198,14 @@ type ringhashBalancer struct { resolverErr error // the last error reported by the resolver; cleared on successful resolution connErr error // the last connection error; cleared upon leaving TransientFailure + + // orderedSubConns contains the list of subconns in the order that addresses + // appear from the resolver. Together with lastInternallyTriggeredSCIndex, + // this allows triggering connection attempts to all SubConns independently + // of the order they appear on the ring. Always in sync with ring and + // subConns. The index is reset when addresses change. + orderedSubConns []*subConn + lastInternallyTriggeredSCIndex int } // updateAddresses creates new SubConns and removes SubConns, based on the @@ -214,6 +223,9 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { var addrsUpdated bool // addrsSet is the set converted from addrs, used for quick lookup. addrsSet := resolver.NewAddressMap() + + b.orderedSubConns = b.orderedSubConns[:0] // reuse the underlying array. + for _, addr := range addrs { addrsSet.Set(addr, true) newWeight := getWeightAttribute(addr) @@ -234,6 +246,7 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { b.state = b.csEvltr.recordTransition(connectivity.Shutdown, connectivity.Idle) b.subConns.Set(addr, scs) b.scStates[sc] = scs + b.orderedSubConns = append(b.orderedSubConns, scs) addrsUpdated = true } else { // We have seen this address before and created a subConn for it. If the @@ -244,6 +257,7 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { // since *only* the weight attribute has changed, and that does not affect // subConn uniqueness. scInfo := val.(*subConn) + b.orderedSubConns = append(b.orderedSubConns, scInfo) if oldWeight := scInfo.weight; oldWeight != newWeight { scInfo.weight = newWeight b.subConns.Set(addr, scInfo) @@ -264,6 +278,9 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { // The entry will be deleted in updateSubConnState. } } + if addrsUpdated { + b.lastInternallyTriggeredSCIndex = 0 + } return addrsUpdated } @@ -399,19 +416,11 @@ func (b *ringhashBalancer) updateSubConnState(sc balancer.SubConn, state balance return } } - // Trigger a SubConn (this updated SubConn's next SubConn in the ring) - // to connect if nobody is attempting to connect. - sc := nextSkippingDuplicatesSubConn(b.ring, scs) - if sc != nil { - sc.queueConnect() - return - } - // This handles the edge case where we have a single subConn in the - // ring. nextSkippingDuplicatesSubCon() would have returned nil. We - // still need to ensure that some subConn is attempting to connect, in - // order to give the LB policy a chance to move out of - // TRANSIENT_FAILURE. Hence, we try connecting on the current subConn. - scs.queueConnect() + + // Trigger a SubConn (the next in the order addresses appear in the + // resolver) to connect if nobody is attempting to connect. + b.lastInternallyTriggeredSCIndex = (b.lastInternallyTriggeredSCIndex + 1) % len(b.orderedSubConns) + b.orderedSubConns[b.lastInternallyTriggeredSCIndex].queueConnect() } } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go index 74abfec1fa8..89837605c1d 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go @@ -65,13 +65,13 @@ func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.Bala if cfg == nil { return nil, fmt.Errorf("rls_csp: nil configuration message provided") } - any, ok := cfg.(*anypb.Any) + m, ok := cfg.(*anypb.Any) if !ok { return nil, fmt.Errorf("rls_csp: error parsing config %v: unknown type %T", cfg, cfg) } rlcs := new(rlspb.RouteLookupClusterSpecifier) - if err := any.UnmarshalTo(rlcs); err != nil { + if err := m.UnmarshalTo(rlcs); err != nil { return nil, fmt.Errorf("rls_csp: error parsing config %v: %v", cfg, err) } rlcJSON, err := protojson.Marshal(rlcs.GetRouteLookupConfig()) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go index 3ad93e27b1d..5a82490598a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go @@ -24,12 +24,12 @@ import ( "errors" "fmt" "io" + "math/rand" "strconv" "sync/atomic" "time" "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpcrand" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" @@ -81,12 +81,12 @@ func parseConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { if cfg == nil { return nil, fmt.Errorf("fault: nil configuration message provided") } - any, ok := cfg.(*anypb.Any) + m, ok := cfg.(*anypb.Any) if !ok { return nil, fmt.Errorf("fault: error parsing config %v: unknown type %T", cfg, cfg) } msg := new(fpb.HTTPFault) - if err := any.UnmarshalTo(msg); err != nil { + if err := m.UnmarshalTo(msg); err != nil { return nil, fmt.Errorf("fault: error parsing config %v: %v", cfg, err) } return config{config: msg}, nil @@ -139,7 +139,7 @@ type interceptor struct { var activeFaults uint32 // global active faults; accessed atomically -func (i *interceptor) NewStream(ctx context.Context, ri iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { +func (i *interceptor) NewStream(ctx context.Context, _ iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { if maxAF := i.config.GetMaxActiveFaults(); maxAF != nil { defer atomic.AddUint32(&activeFaults, ^uint32(0)) // decrement counter if af := atomic.AddUint32(&activeFaults, 1); af > maxAF.GetValue() { @@ -162,7 +162,7 @@ func (i *interceptor) NewStream(ctx context.Context, ri iresolver.RPCInfo, done } // For overriding in tests -var randIntn = grpcrand.Intn +var randIntn = rand.Intn var newTimer = time.NewTimer func injectDelay(ctx context.Context, delayCfg *cpb.FaultDelay) error { @@ -296,5 +296,5 @@ func (*okStream) Header() (metadata.MD, error) { return nil, nil } func (*okStream) Trailer() metadata.MD { return nil } func (*okStream) CloseSend() error { return nil } func (o *okStream) Context() context.Context { return o.ctx } -func (*okStream) SendMsg(m any) error { return io.EOF } -func (*okStream) RecvMsg(m any) error { return io.EOF } +func (*okStream) SendMsg(any) error { return io.EOF } +func (*okStream) RecvMsg(any) error { return io.EOF } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go index 37de3a39b64..bcda2ab05fc 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go @@ -117,7 +117,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { // "If absent, no enforcing RBAC policy will be applied" - RBAC // Documentation for Rules field. // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configurated." - A41 + // completely ignored, as if RBAC was not configured." - A41 if rbacCfg.Rules == nil || rbacCfg.GetRules().GetAction() == v3rbacpb.RBAC_LOG { return config{}, nil } @@ -128,7 +128,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}, "") if err != nil { // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configurated." - A41 + // completely ignored, as if RBAC was not configured." - A41 if rbacCfg.GetRules().GetAction() != v3rbacpb.RBAC_LOG { return nil, fmt.Errorf("rbac: error constructing matching engine: %v", err) } @@ -141,12 +141,12 @@ func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, er if cfg == nil { return nil, fmt.Errorf("rbac: nil configuration message provided") } - any, ok := cfg.(*anypb.Any) + m, ok := cfg.(*anypb.Any) if !ok { return nil, fmt.Errorf("rbac: error parsing config %v: unknown type %T", cfg, cfg) } msg := new(rpb.RBAC) - if err := any.UnmarshalTo(msg); err != nil { + if err := m.UnmarshalTo(msg); err != nil { return nil, fmt.Errorf("rbac: error parsing config %v: %v", cfg, err) } return parseConfig(msg) @@ -156,12 +156,12 @@ func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.Fil if override == nil { return nil, fmt.Errorf("rbac: nil configuration message provided") } - any, ok := override.(*anypb.Any) + m, ok := override.(*anypb.Any) if !ok { return nil, fmt.Errorf("rbac: error parsing override config %v: unknown type %T", override, override) } msg := new(rpb.RBACPerRoute) - if err := any.UnmarshalTo(msg); err != nil { + if err := m.UnmarshalTo(msg); err != nil { return nil, fmt.Errorf("rbac: error parsing override config %v: %v", override, err) } return parseConfig(msg.Rbac) @@ -198,7 +198,7 @@ func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override http // "If absent, no enforcing RBAC policy will be applied" - RBAC // Documentation for Rules field. // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configurated." - A41 + // completely ignored, as if RBAC was not configured." - A41 if c.chainEngine == nil { return nil, nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go index 1675ec86ec1..a781523d371 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go @@ -54,12 +54,12 @@ func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, er if cfg == nil { return nil, fmt.Errorf("router: nil configuration message provided") } - any, ok := cfg.(*anypb.Any) + m, ok := cfg.(*anypb.Any) if !ok { return nil, fmt.Errorf("router: error parsing config %v: unknown type %T", cfg, cfg) } msg := new(pb.Router) - if err := any.UnmarshalTo(msg); err != nil { + if err := m.UnmarshalTo(msg); err != nil { return nil, fmt.Errorf("router: error parsing config %v: %v", cfg, err) } return config{}, nil diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/internal.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/internal.go index fda4c7f5610..1d8a6b03f1b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/internal.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/internal.go @@ -55,6 +55,11 @@ func (l LocalityID) Equal(o any) bool { return l.Region == ol.Region && l.Zone == ol.Zone && l.SubZone == ol.SubZone } +// Empty returns whether or not the locality ID is empty. +func (l LocalityID) Empty() bool { + return l.Region == "" && l.Zone == "" && l.SubZone == "" +} + // LocalityIDFromString converts a json representation of locality, into a // LocalityID struct. func LocalityIDFromString(s string) (ret LocalityID, _ error) { @@ -83,3 +88,10 @@ func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { // ResourceTypeMapForTesting maps TypeUrl to corresponding ResourceType. var ResourceTypeMapForTesting map[string]any + +// UnknownCSMLabels are TelemetryLabels emitted from CDS if CSM Telemetry Label +// data is not present in the CDS Resource. +var UnknownCSMLabels = map[string]string{ + "csm.service_name": "unknown", + "csm.service_namespace_name": "unknown", +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go index f505eeb4394..d9c23278281 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go @@ -26,5 +26,5 @@ var ( NewWRR any // func() wrr.WRR // NewXDSClient is the function used to create a new xDS client. - NewXDSClient any // func() (xdsclient.XDSClient, func(), error) + NewXDSClient any // func(string) (xdsclient.XDSClient, func(), error) ) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go index 88cb1d2a1fd..36776f3debd 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -23,13 +23,13 @@ import ( "encoding/json" "fmt" "math/bits" + "math/rand" "strings" "sync/atomic" "time" xxhash "github.com/cespare/xxhash/v2" "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" @@ -182,7 +182,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP if v := atomic.AddInt32(ref, -1); v == 0 { // This entry will be removed from activeClusters when // producing the service config for the empty update. - cs.r.serializer.Schedule(func(context.Context) { + cs.r.serializer.TrySchedule(func(context.Context) { cs.r.onClusterRefDownToZero() }) } @@ -274,7 +274,7 @@ func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies [ } // If no generated hash return a random long. In the grand scheme of things // this logically will map to choosing a random backend to route request to. - return grpcrand.Uint64() + return rand.Uint64() } func (cs *configSelector) newInterceptor(rt *route, cluster *routeCluster) (iresolver.ClientInterceptor, error) { @@ -326,7 +326,7 @@ func (cs *configSelector) stop() { // selector; we need another update to delete clusters from the config (if // we don't have another update pending already). if needUpdate { - cs.r.serializer.Schedule(func(context.Context) { + cs.r.serializer.TrySchedule(func(context.Context) { cs.r.onClusterRefDownToZero() }) } @@ -336,7 +336,7 @@ type interceptorList struct { interceptors []iresolver.ClientInterceptor } -func (il *interceptorList) NewStream(ctx context.Context, ri iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { +func (il *interceptorList) NewStream(ctx context.Context, ri iresolver.RPCInfo, _ func(), newStream func(ctx context.Context, _ func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { for i := len(il.interceptors) - 1; i >= 0; i-- { ns := newStream interceptor := il.interceptors[i] diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go index abb3c2c5acf..0de6604484b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go @@ -36,22 +36,19 @@ func newListenerWatcher(resourceName string, parent *xdsResolver) *listenerWatch return lw } -func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData) { - l.parent.serializer.Schedule(func(context.Context) { - l.parent.onListenerResourceUpdate(update.Resource) - }) +func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { l.parent.onListenerResourceUpdate(update.Resource); onDone() } + l.parent.serializer.ScheduleOr(handleUpdate, onDone) } -func (l *listenerWatcher) OnError(err error) { - l.parent.serializer.Schedule(func(context.Context) { - l.parent.onListenerResourceError(err) - }) +func (l *listenerWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { l.parent.onListenerResourceError(err); onDone() } + l.parent.serializer.ScheduleOr(handleError, onDone) } -func (l *listenerWatcher) OnResourceDoesNotExist() { - l.parent.serializer.Schedule(func(context.Context) { - l.parent.onListenerResourceNotFound() - }) +func (l *listenerWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { l.parent.onListenerResourceNotFound(); onDone() } + l.parent.serializer.ScheduleOr(handleNotFound, onDone) } func (l *listenerWatcher) stop() { @@ -71,22 +68,22 @@ func newRouteConfigWatcher(resourceName string, parent *xdsResolver) *routeConfi return rw } -func (r *routeConfigWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) { - r.parent.serializer.Schedule(func(context.Context) { - r.parent.onRouteConfigResourceUpdate(r.resourceName, update.Resource) - }) +func (r *routeConfigWatcher) OnUpdate(u *xdsresource.RouteConfigResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { + r.parent.onRouteConfigResourceUpdate(r.resourceName, u.Resource) + onDone() + } + r.parent.serializer.ScheduleOr(handleUpdate, onDone) } -func (r *routeConfigWatcher) OnError(err error) { - r.parent.serializer.Schedule(func(context.Context) { - r.parent.onRouteConfigResourceError(r.resourceName, err) - }) +func (r *routeConfigWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { r.parent.onRouteConfigResourceError(r.resourceName, err); onDone() } + r.parent.serializer.ScheduleOr(handleError, onDone) } -func (r *routeConfigWatcher) OnResourceDoesNotExist() { - r.parent.serializer.Schedule(func(context.Context) { - r.parent.onRouteConfigResourceNotFound(r.resourceName) - }) +func (r *routeConfigWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { r.parent.onRouteConfigResourceNotFound(r.resourceName); onDone() } + r.parent.serializer.ScheduleOr(handleNotFound, onDone) } func (r *routeConfigWatcher) stop() { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go index 928decfa1f6..b5d24e4bf21 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go @@ -22,11 +22,11 @@ package resolver import ( "context" "fmt" + "math/rand" "sync/atomic" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" @@ -49,8 +49,8 @@ const Scheme = "xds" // ClientConns at the same time. func newBuilderForTesting(config []byte) (resolver.Builder, error) { return &xdsResolverBuilder{ - newXDSClient: func() (xdsclient.XDSClient, func(), error) { - return xdsclient.NewWithBootstrapContentsForTesting(config) + newXDSClient: func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Name: name, Contents: config}) }, }, nil } @@ -64,7 +64,7 @@ func init() { } type xdsResolverBuilder struct { - newXDSClient func() (xdsclient.XDSClient, func(), error) + newXDSClient func(string) (xdsclient.XDSClient, func(), error) } // Build helps implement the resolver.Builder interface. @@ -75,7 +75,7 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon r := &xdsResolver{ cc: cc, activeClusters: make(map[string]*clusterInfo), - channelID: grpcrand.Uint64(), + channelID: rand.Uint64(), } defer func() { if retErr != nil { @@ -97,16 +97,16 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon r.serializerCancel = cancel // Initialize the xDS client. - newXDSClient := rinternal.NewXDSClient.(func() (xdsclient.XDSClient, func(), error)) + newXDSClient := rinternal.NewXDSClient.(func(string) (xdsclient.XDSClient, func(), error)) if b.newXDSClient != nil { newXDSClient = b.newXDSClient } - client, close, err := newXDSClient() + client, closeFn, err := newXDSClient(target.String()) if err != nil { return nil, fmt.Errorf("xds: failed to create xds-client: %v", err) } r.xdsClient = client - r.xdsClientClose = close + r.xdsClientClose = closeFn // Determine the listener resource name and start a watcher for it. template, err := r.sanityChecksOnBootstrapConfig(target, opts, r.xdsClient) @@ -128,7 +128,7 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon // // Returns the listener resource name template to use. If any of the above // validations fail, a non-nil error is returned. -func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, opts resolver.BuildOptions, client xdsclient.XDSClient) (string, error) { +func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, _ resolver.BuildOptions, client xdsclient.XDSClient) (string, error) { bootstrapConfig := client.BootstrapConfig() if bootstrapConfig == nil { // This is never expected to happen after a successful xDS client @@ -139,9 +139,13 @@ func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, opts // Find the client listener template to use from the bootstrap config: // - If authority is not set in the target, use the top level template // - If authority is set, use the template from the authority map. - template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate + template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate() if authority := target.URL.Host; authority != "" { - a := bootstrapConfig.Authorities[authority] + authorities := bootstrapConfig.Authorities() + if authorities == nil { + return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target) + } + a := authorities[authority] if a == nil { return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target) } @@ -210,7 +214,7 @@ type xdsResolver struct { } // ResolveNow is a no-op at this point. -func (*xdsResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*xdsResolver) ResolveNow(resolver.ResolveNowOptions) {} func (r *xdsResolver) Close() { // Cancel the context passed to the serializer and wait for any scheduled diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go index fdba769294d..92d07e7fb6d 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go @@ -47,7 +47,7 @@ type connWrapper struct { // The specific filter chain picked for handling this connection. filterChain *xdsresource.FilterChain - // A reference fo the listenerWrapper on which this connection was accepted. + // A reference to the listenerWrapper on which this connection was accepted. parent *listenerWrapper // The certificate providers created for this connection. @@ -107,7 +107,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { return xdsinternal.NewHandshakeInfo(nil, nil, nil, false), nil } - cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs + cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs() // Identity provider name is mandatory on the server-side, and this is // enforced when the resource is received at the XDSClient layer. secCfg := c.filterChain.SecurityCfg @@ -161,6 +161,7 @@ func (c *connWrapper) Close() error { if c.rootProvider != nil { c.rootProvider.Close() } + c.parent.removeConn(c) return c.Conn.Close() } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go index 174b54c4411..09d320018ae 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go @@ -86,6 +86,7 @@ func NewListenerWrapper(params ListenerWrapperParams) net.Listener { xdsC: params.XDSClient, modeCallback: params.ModeCallback, isUnspecifiedAddr: params.Listener.Addr().(*net.TCPAddr).IP.IsUnspecified(), + conns: make(map[*connWrapper]bool), mode: connectivity.ServingModeNotServing, closed: grpcsync.NewEvent(), @@ -135,13 +136,13 @@ type listenerWrapper struct { // mu guards access to the current serving mode and the active filter chain // manager. - mu sync.RWMutex + mu sync.Mutex // Current serving mode. mode connectivity.ServingMode // Filter chain manager currently serving. activeFilterChainManager *xdsresource.FilterChainManager // conns accepted with configuration from activeFilterChainManager. - conns []*connWrapper + conns map[*connWrapper]bool // These fields are read/written to in the context of xDS updates, which are // guaranteed to be emitted synchronously from the xDS Client. Thus, they do @@ -202,17 +203,14 @@ func (l *listenerWrapper) maybeUpdateFilterChains() { // gracefully shut down with a grace period of 10 minutes for long-lived // RPC's, such that clients will reconnect and have the updated // configuration apply." - A36 - var connsToClose []*connWrapper - if l.activeFilterChainManager != nil { // If there is a filter chain manager to clean up. - connsToClose = l.conns - l.conns = nil - } + connsToClose := l.conns + l.conns = make(map[*connWrapper]bool) l.activeFilterChainManager = l.pendingFilterChainManager l.pendingFilterChainManager = nil l.instantiateFilterChainRoutingConfigurationsLocked() l.mu.Unlock() go func() { - for _, conn := range connsToClose { + for conn := range connsToClose { conn.Drain() } }() @@ -304,7 +302,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { return nil, fmt.Errorf("received connection with non-TCP address (local: %T, remote %T)", conn.LocalAddr(), conn.RemoteAddr()) } - l.mu.RLock() + l.mu.Lock() if l.mode == connectivity.ServingModeNotServing { // Close connections as soon as we accept them when we are in // "not-serving" mode. Since we accept a net.Listener from the user @@ -312,7 +310,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { // "not-serving". Closing the connection immediately upon accepting // is one of the other ways to implement the "not-serving" mode as // outlined in gRFC A36. - l.mu.RUnlock() + l.mu.Unlock() conn.Close() continue } @@ -324,7 +322,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { SourcePort: srcAddr.Port, }) if err != nil { - l.mu.RUnlock() + l.mu.Unlock() // When a matching filter chain is not found, we close the // connection right away, but do not return an error back to // `grpc.Serve()` from where this Accept() was invoked. Returning an @@ -341,12 +339,18 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { continue } cw := &connWrapper{Conn: conn, filterChain: fc, parent: l, urc: fc.UsableRouteConfiguration} - l.conns = append(l.conns, cw) - l.mu.RUnlock() + l.conns[cw] = true + l.mu.Unlock() return cw, nil } } +func (l *listenerWrapper) removeConn(conn *connWrapper) { + l.mu.Lock() + defer l.mu.Unlock() + delete(l.conns, conn) +} + // Close closes the underlying listener. It also cancels the xDS watch // registered in Serve() and closes any certificate provider instances created // based on security configuration received in the LDS response. @@ -376,9 +380,9 @@ func (l *listenerWrapper) switchModeLocked(newMode connectivity.ServingMode, err l.mode = newMode if l.mode == connectivity.ServingModeNotServing { connsToClose := l.conns - l.conns = nil + l.conns = make(map[*connWrapper]bool) go func() { - for _, conn := range connsToClose { + for conn := range connsToClose { conn.Drain() } }() @@ -410,7 +414,8 @@ type ldsWatcher struct { name string } -func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData) { +func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.OnDoneFunc) { + defer onDone() if lw.parent.closed.HasFired() { lw.logger.Warningf("Resource %q received update: %#v after listener was closed", lw.name, update) return @@ -421,7 +426,8 @@ func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData) { lw.parent.handleLDSUpdate(update.Resource) } -func (lw *ldsWatcher) OnError(err error) { +func (lw *ldsWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + defer onDone() if lw.parent.closed.HasFired() { lw.logger.Warningf("Resource %q received error: %v after listener was closed", lw.name, err) return @@ -433,7 +439,8 @@ func (lw *ldsWatcher) OnError(err error) { // continue to use the old configuration. } -func (lw *ldsWatcher) OnResourceDoesNotExist() { +func (lw *ldsWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + defer onDone() if lw.parent.closed.HasFired() { lw.logger.Warningf("Resource %q received resource-does-not-exist error after listener was closed", lw.name) return diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go index 67cde460289..bcd3938e6f1 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go @@ -147,7 +147,8 @@ type rdsWatcher struct { canceled bool // eats callbacks if true } -func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) { +func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData, onDone xdsresource.OnDoneFunc) { + defer onDone() rw.mu.Lock() if rw.canceled { rw.mu.Unlock() @@ -160,7 +161,8 @@ func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) { rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{data: &update.Resource}) } -func (rw *rdsWatcher) OnError(err error) { +func (rw *rdsWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + defer onDone() rw.mu.Lock() if rw.canceled { rw.mu.Unlock() @@ -173,7 +175,8 @@ func (rw *rdsWatcher) OnError(err error) { rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{err: err}) } -func (rw *rdsWatcher) OnResourceDoesNotExist() { +func (rw *rdsWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + defer onDone() rw.mu.Lock() if rw.canceled { rw.mu.Unlock() diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go index 62d7a1756e4..3251737f181 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -23,6 +23,7 @@ import ( "fmt" "strings" "sync" + "sync/atomic" "time" "google.golang.org/grpc/internal/grpclog" @@ -32,6 +33,10 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/transport" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/timestamppb" + + v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) type watchState int @@ -114,12 +119,12 @@ func newAuthority(args authorityArgs) (*authority, error) { } tr, err := transport.New(transport.Options{ - ServerCfg: *args.serverCfg, + ServerCfg: args.serverCfg, OnRecvHandler: ret.handleResourceUpdate, OnErrorHandler: ret.newConnectionError, OnSendHandler: ret.transportOnSendHandler, Logger: args.logger, - NodeProto: args.bootstrapCfg.NodeProto, + NodeProto: args.bootstrapCfg.Node(), }) if err != nil { return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) @@ -144,7 +149,7 @@ func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) { a.startWatchTimersLocked(rType, u.ResourceNames) } -func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate) error { +func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate, onDone func()) error { rType := a.resourceTypeGetter(resourceUpdate.URL) if rType == nil { return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) @@ -155,14 +160,40 @@ func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate ServerConfig: a.serverCfg, } updates, md, err := decodeAllResources(opts, rType, resourceUpdate) - a.updateResourceStateAndScheduleCallbacks(rType, updates, md) + a.updateResourceStateAndScheduleCallbacks(rType, updates, md, onDone) return err } -func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata) { +func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata, onDone func()) { a.resourcesMu.Lock() defer a.resourcesMu.Unlock() + // We build a list of callback funcs to invoke, and invoke them at the end + // of this method instead of inline (when handling the update for a + // particular resource), because we want to make sure that all calls to + // increment watcherCnt happen before any callbacks are invoked. This will + // ensure that the onDone callback is never invoked before all watcher + // callbacks are invoked, and the watchers have processed the update. + watcherCnt := new(atomic.Int64) + done := func() { + watcherCnt.Add(-1) + if watcherCnt.Load() == 0 { + onDone() + } + } + funcsToSchedule := []func(context.Context){} + defer func() { + if len(funcsToSchedule) == 0 { + // When there are no watchers for the resources received as part of + // this update, invoke onDone explicitly to unblock the next read on + // the ADS stream. + onDone() + } + for _, f := range funcsToSchedule { + a.serializer.ScheduleOr(f, onDone) + } + }() + resourceStates := a.resources[rType] for name, uErr := range updates { if state, ok := resourceStates[name]; ok { @@ -206,7 +237,8 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty for watcher := range state.watchers { watcher := watcher err := uErr.err - a.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnError(err, done) }) } continue } @@ -221,11 +253,14 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty for watcher := range state.watchers { watcher := watcher resource := uErr.resource - a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnUpdate(resource, done) }) } } // Sync cache. - a.logger.Debugf("Resource type %q with name %q added to cache", rType.TypeName(), name) + if a.logger.V(2) { + a.logger.Infof("Resource type %q with name %q added to cache", rType.TypeName(), name) + } state.cache = uErr.resource // Set status to ACK, and clear error state. The metadata might be a // NACK metadata because some other resources in the same response @@ -279,7 +314,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // resource deletion is to be ignored, the resource is not removed from // the cache and the corresponding OnResourceDoesNotExist() callback is // not invoked on the watchers. - if a.serverCfg.IgnoreResourceDeletion { + if a.serverCfg.ServerFeaturesIgnoreResourceDeletion() { if !state.deletionIgnored { state.deletionIgnored = true a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) @@ -294,7 +329,8 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher - a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnResourceDoesNotExist(done) }) } } } @@ -366,7 +402,9 @@ func (a *authority) startWatchTimersLocked(rType xdsresource.Type, resourceNames continue } state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { - a.handleWatchTimerExpiry(rType, resourceName, state) + a.resourcesMu.Lock() + a.handleWatchTimerExpiryLocked(rType, resourceName, state) + a.resourcesMu.Unlock() }) state.wState = watchStateRequested } @@ -420,8 +458,8 @@ func (a *authority) newConnectionError(err error) { // Propagate the connection error from the transport layer to all watchers. for watcher := range state.watchers { watcher := watcher - a.serializer.Schedule(func(context.Context) { - watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) + a.serializer.TrySchedule(func(context.Context) { + watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err), func() {}) }) } } @@ -448,7 +486,9 @@ func (a *authority) close() { } func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { - a.logger.Debugf("New watch for type %q, resource name %q", rType.TypeName(), resourceName) + if a.logger.V(2) { + a.logger.Infof("New watch for type %q, resource name %q", rType.TypeName(), resourceName) + } a.resourcesMu.Lock() defer a.resourcesMu.Unlock() @@ -465,7 +505,9 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // instruct the transport layer to send a DiscoveryRequest for the same. state := resources[resourceName] if state == nil { - a.logger.Debugf("First watch for type %q, resource name %q", rType.TypeName(), resourceName) + if a.logger.V(2) { + a.logger.Infof("First watch for type %q, resource name %q", rType.TypeName(), resourceName) + } state = &resourceState{ watchers: make(map[xdsresource.ResourceWatcher]bool), md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, @@ -483,7 +525,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w a.logger.Infof("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) } resource := state.cache - a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) + a.serializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, func() {}) }) } return func() { @@ -504,16 +546,15 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // There are no more watchers for this resource, delete the state // associated with it, and instruct the transport to send a request // which does not include this resource name. - a.logger.Debugf("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) + if a.logger.V(2) { + a.logger.Infof("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) + } delete(resources, resourceName) a.sendDiscoveryRequestLocked(rType, resources) } } -func (a *authority) handleWatchTimerExpiry(rType xdsresource.Type, resourceName string, state *resourceState) { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() - +func (a *authority) handleWatchTimerExpiryLocked(rType xdsresource.Type, resourceName string, state *resourceState) { if a.closed { return } @@ -537,7 +578,7 @@ func (a *authority) handleWatchTimerExpiry(rType xdsresource.Type, resourceName state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher - a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) + a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) } } @@ -563,13 +604,13 @@ func (a *authority) triggerResourceNotFoundForTesting(rType xdsresource.Type, re state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher - a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) + a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) } } // sendDiscoveryRequestLocked sends a discovery request for the specified // resource type and resource names. Even though this method does not directly -// access the resource cache, it is important that `resourcesMu` be beld when +// access the resource cache, it is important that `resourcesMu` be held when // calling this method to ensure that a consistent snapshot of resource names is // being requested. func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) { @@ -586,26 +627,54 @@ func (a *authority) reportLoad() (*load.Store, func()) { return a.transport.ReportLoad() } -func (a *authority) dumpResources() map[string]map[string]xdsresource.UpdateWithMD { +func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig { a.resourcesMu.Lock() defer a.resourcesMu.Unlock() - dump := make(map[string]map[string]xdsresource.UpdateWithMD) + var ret []*v3statuspb.ClientConfig_GenericXdsConfig for rType, resourceStates := range a.resources { - states := make(map[string]xdsresource.UpdateWithMD) + typeURL := rType.TypeURL() for name, state := range resourceStates { var raw *anypb.Any if state.cache != nil { raw = state.cache.Raw() } - states[name] = xdsresource.UpdateWithMD{ - MD: state.md, - Raw: raw, + config := &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: typeURL, + Name: name, + VersionInfo: state.md.Version, + XdsConfig: raw, + LastUpdated: timestamppb.New(state.md.Timestamp), + ClientStatus: serviceStatusToProto(state.md.Status), + } + if errState := state.md.ErrState; errState != nil { + config.ErrorState = &v3adminpb.UpdateFailureState{ + LastUpdateAttempt: timestamppb.New(errState.Timestamp), + Details: errState.Err.Error(), + VersionInfo: errState.Version, + } } + ret = append(ret, config) } - dump[rType.TypeURL()] = states } - return dump + return ret +} + +func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { + switch serviceStatus { + case xdsresource.ServiceStatusUnknown: + return v3adminpb.ClientResourceStatus_UNKNOWN + case xdsresource.ServiceStatusRequested: + return v3adminpb.ClientResourceStatus_REQUESTED + case xdsresource.ServiceStatusNotExist: + return v3adminpb.ClientResourceStatus_DOES_NOT_EXIST + case xdsresource.ServiceStatusACKed: + return v3adminpb.ClientResourceStatus_ACKED + case xdsresource.ServiceStatusNACKed: + return v3adminpb.ClientResourceStatus_NACKED + default: + return v3adminpb.ClientResourceStatus_UNKNOWN + } } func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go index 8f211238f52..144cb5bd768 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -46,10 +46,6 @@ type XDSClient interface { // the watcher is canceled. Callers need to handle this case. WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) - // DumpResources returns the status of the xDS resources. Returns a map of - // resource type URLs to a map of resource names to resource state. - DumpResources() map[string]map[string]xdsresource.UpdateWithMD - ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) BootstrapConfig() *bootstrap.Config diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go index 81c14e2439f..6097e86925e 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go @@ -19,12 +19,9 @@ package xdsclient import ( - "bytes" "context" - "encoding/json" "fmt" "sync" - "sync/atomic" "time" "google.golang.org/grpc/internal" @@ -34,39 +31,33 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -// New returns a new xDS client configured by the bootstrap file specified in env -// variable GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG. -// -// The returned client is a reference counted singleton instance. This function -// creates a new client only when one doesn't already exist. -// -// The second return value represents a close function which releases the -// caller's reference on the returned client. The caller is expected to invoke -// it once they are done using the client. The underlying client will be closed -// only when all references are released, and it is safe for the caller to -// invoke this close function multiple times. -func New() (XDSClient, func(), error) { - return newRefCountedWithConfig(nil) -} +// NameForServer represents the value to be passed as name when creating an xDS +// client from xDS-enabled gRPC servers. This is a well-known dedicated key +// value, and is defined in gRFC A71. +const NameForServer = "#server" -// NewWithConfig returns a new xDS client configured by the given config. +// New returns an xDS client configured with bootstrap configuration specified +// by the ordered list: +// - file name containing the configuration specified by GRPC_XDS_BOOTSTRAP +// - actual configuration specified by GRPC_XDS_BOOTSTRAP_CONFIG +// - fallback configuration set using bootstrap.SetFallbackBootstrapConfig +// +// gRPC client implementations are expected to pass the channel's target URI for +// the name field, while server implementations are expected to pass a dedicated +// well-known value "#server", as specified in gRFC A71. The returned client is +// a reference counted implementation shared among callers using the same name. // // The second return value represents a close function which releases the // caller's reference on the returned client. The caller is expected to invoke // it once they are done using the client. The underlying client will be closed // only when all references are released, and it is safe for the caller to // invoke this close function multiple times. -// -// # Internal/Testing Only -// -// This function should ONLY be used for internal (c2p resolver) and/or testing -// purposese. DO NOT use this elsewhere. Use New() instead. -func NewWithConfig(config *bootstrap.Config) (XDSClient, func(), error) { - return newRefCountedWithConfig(config) +func New(name string) (XDSClient, func(), error) { + return newRefCounted(name, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) } -// newWithConfig returns a new xdsClient with the given config. -func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { +// newClientImpl returns a new xdsClient with the given config. +func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { ctx, cancel := context.WithCancel(context.Background()) c := &clientImpl{ done: grpcsync.NewEvent(), @@ -80,12 +71,28 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i } c.logger = prefixLogger(c) - c.logger.Infof("Created client to xDS management server: %s", config.XDSServer) return c, nil } -// NewWithConfigForTesting returns an xDS client for the specified bootstrap -// config, separate from the global singleton. +// OptionsForTesting contains options to configure xDS client creation for +// testing purposes only. +type OptionsForTesting struct { + // Name is a unique name for this xDS client. + Name string + // Contents contain a JSON representation of the bootstrap configuration to + // be used when creating the xDS client. + Contents []byte + + // WatchExpiryTimeout is the timeout for xDS resource watch expiry. If + // unspecified, uses the default value used in non-test code. + WatchExpiryTimeout time.Duration + + // AuthorityIdleTimeout is the timeout before idle authorities are deleted. + // If unspecified, uses the default value used in non-test code. + AuthorityIdleTimeout time.Duration +} + +// NewForTesting returns an xDS client configured with the provided options. // // The second return value represents a close function which the caller is // expected to invoke once they are done using the client. It is safe for the @@ -94,28 +101,25 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i // # Testing Only // // This function should ONLY be used for testing purposes. -// TODO(easwars): Document the new close func. -func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout, authorityIdleTimeout time.Duration) (XDSClient, func(), error) { - cl, err := newWithConfig(config, watchExpiryTimeout, authorityIdleTimeout) - if err != nil { - return nil, nil, err +func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { + if opts.Name == "" { + return nil, nil, fmt.Errorf("opts.Name field must be non-empty") + } + if opts.WatchExpiryTimeout == 0 { + opts.WatchExpiryTimeout = defaultWatchExpiryTimeout + } + if opts.AuthorityIdleTimeout == 0 { + opts.AuthorityIdleTimeout = defaultIdleAuthorityDeleteTimeout } - return cl, grpcsync.OnceFunc(cl.close), nil -} - -func init() { - internal.TriggerXDSResourceNameNotFoundClient = triggerXDSResourceNameNotFoundClient -} - -var singletonClientForTesting = atomic.Pointer[clientRefCounted]{} -func triggerXDSResourceNameNotFoundClient(resourceType, resourceName string) error { - c := singletonClientForTesting.Load() - return internal.TriggerXDSResourceNameNotFoundForTesting.(func(func(xdsresource.Type, string) error, string, string) error)(c.clientImpl.triggerResourceNotFoundForTesting, resourceType, resourceName) + if err := bootstrap.SetFallbackBootstrapConfig(opts.Contents); err != nil { + return nil, nil, err + } + client, cancel, err := newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout) + return client, func() { bootstrap.UnsetFallbackBootstrapConfigForTesting(); cancel() }, err } -// NewWithBootstrapContentsForTesting returns an xDS client for this config, -// separate from the global singleton. +// GetForTesting returns an xDS client created earlier using the given name. // // The second return value represents a close function which the caller is // expected to invoke once they are done using the client. It is safe for the @@ -124,56 +128,28 @@ func triggerXDSResourceNameNotFoundClient(resourceType, resourceName string) err // # Testing Only // // This function should ONLY be used for testing purposes. -func NewWithBootstrapContentsForTesting(contents []byte) (XDSClient, func(), error) { - // Normalize the contents - buf := bytes.Buffer{} - err := json.Indent(&buf, contents, "", "") - if err != nil { - return nil, nil, fmt.Errorf("xds: error normalizing JSON: %v", err) - } - contents = bytes.TrimSpace(buf.Bytes()) - - c, err := getOrMakeClientForTesting(contents) - if err != nil { - return nil, nil, err - } - singletonClientForTesting.Store(c) - return c, grpcsync.OnceFunc(func() { - clientsMu.Lock() - defer clientsMu.Unlock() - if c.decrRef() == 0 { - c.close() - delete(clients, string(contents)) - singletonClientForTesting.Store(nil) - } - }), nil -} - -// getOrMakeClientForTesting creates a new reference counted client (separate -// from the global singleton) for the given config, or returns an existing one. -// It takes care of incrementing the reference count for the returned client, -// and leaves the caller responsible for decrementing the reference count once -// the client is no longer needed. -func getOrMakeClientForTesting(config []byte) (*clientRefCounted, error) { +func GetForTesting(name string) (XDSClient, func(), error) { clientsMu.Lock() defer clientsMu.Unlock() - if c := clients[string(config)]; c != nil { - c.incrRef() - return c, nil + c, ok := clients[name] + if !ok { + return nil, nil, fmt.Errorf("xDS client with name %q not found", name) } + c.incrRef() + return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil +} - bcfg, err := bootstrap.NewConfigFromContents(config) - if err != nil { - return nil, fmt.Errorf("bootstrap config %s: %v", string(config), err) - } - cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - return nil, fmt.Errorf("creating xDS client: %v", err) +func init() { + internal.TriggerXDSResourceNotFoundForTesting = triggerXDSResourceNotFoundForTesting +} + +func triggerXDSResourceNotFoundForTesting(client XDSClient, typ xdsresource.Type, name string) error { + crc, ok := client.(*clientRefCounted) + if !ok { + return fmt.Errorf("xDS client is of type %T, want %T", client, &clientRefCounted{}) } - c := &clientRefCounted{clientImpl: cImpl, refCount: 1} - clients[string(config)] = c - return c, nil + return crc.clientImpl.triggerResourceNotFoundForTesting(typ, name) } var ( diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go new file mode 100644 index 00000000000..1efb4de42eb --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + "sync/atomic" + "time" + + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" +) + +const ( + defaultWatchExpiryTimeout = 15 * time.Second + defaultIdleAuthorityDeleteTimeout = 5 * time.Minute +) + +var ( + // The following functions are no-ops in the actual code, but can be + // overridden in tests to give them visibility into certain events. + xdsClientImplCreateHook = func(string) {} + xdsClientImplCloseHook = func(string) {} +) + +func clientRefCountedClose(name string) { + clientsMu.Lock() + defer clientsMu.Unlock() + + client, ok := clients[name] + if !ok { + logger.Errorf("Attempt to close a non-existent xDS client with name %s", name) + return + } + if client.decrRef() != 0 { + return + } + client.clientImpl.close() + xdsClientImplCloseHook(name) + delete(clients, name) + +} + +// newRefCounted creates a new reference counted xDS client implementation for +// name, if one does not exist already. If an xDS client for the given name +// exists, it gets a reference to it and returns it. +func newRefCounted(name string, watchExpiryTimeout, idleAuthorityTimeout time.Duration) (XDSClient, func(), error) { + clientsMu.Lock() + defer clientsMu.Unlock() + + if c := clients[name]; c != nil { + c.incrRef() + return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil + } + + // Create the new client implementation. + config, err := bootstrap.GetConfiguration() + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err) + } + c, err := newClientImpl(config, watchExpiryTimeout, idleAuthorityTimeout) + if err != nil { + return nil, nil, err + } + c.logger.Infof("Created client with name %q and bootstrap configuration:\n %s", name, config) + client := &clientRefCounted{clientImpl: c, refCount: 1} + clients[name] = client + xdsClientImplCreateHook(name) + + logger.Infof("xDS node ID: %s", config.Node().GetId()) + return client, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil +} + +// clientRefCounted is ref-counted, and to be shared by the xds resolver and +// balancer implementations, across multiple ClientConns and Servers. +type clientRefCounted struct { + *clientImpl + + refCount int32 // accessed atomically +} + +func (c *clientRefCounted) incrRef() int32 { + return atomic.AddInt32(&c.refCount, 1) +} + +func (c *clientRefCounted) decrRef() int32 { + return atomic.AddInt32(&c.refCount, -1) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go index 7321250d6ab..9f619016a08 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go @@ -85,17 +85,17 @@ func (c *clientImpl) close() { c.authorityMu.Unlock() c.serializerClose() - for _, f := range c.config.XDSServer.Cleanups { - f() - } - for _, a := range c.config.Authorities { - if a.XDSServer == nil { - // The server for this authority is the top-level one, cleaned up above. - continue - } - for _, f := range a.XDSServer.Cleanups { + for _, s := range c.config.XDSServers() { + for _, f := range s.Cleanups() { f() } } + for _, a := range c.config.Authorities() { + for _, s := range a.XDSServers { + for _, f := range s.Cleanups() { + f() + } + } + } c.logger.Infof("Shutdown") } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go index 69db79ee891..1ce20fabdf8 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go @@ -45,14 +45,18 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (*authority, func(), err return nil, nil, errors.New("the xds-client is closed") } - config := c.config.XDSServer + config := c.config.XDSServers()[0] if scheme == xdsresource.FederationScheme { - cfg, ok := c.config.Authorities[authority] + authorities := c.config.Authorities() + if authorities == nil { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + cfg, ok := authorities[authority] if !ok { return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) } - if cfg.XDSServer != nil { - config = cfg.XDSServer + if len(cfg.XDSServers) >= 1 { + config = cfg.XDSServers[0] } } @@ -110,7 +114,7 @@ func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *auth serializer: c.serializer, resourceTypeGetter: c.resourceTypes.get, watchExpiryTimeout: c.watchExpiryTimeout, - logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI)), + logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI())), }) if err != nil { return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go index b9d0499301a..f4d7b0a0115 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go @@ -19,35 +19,35 @@ package xdsclient import ( - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) -func appendMaps(dst, src map[string]map[string]xdsresource.UpdateWithMD) { - // Iterate through the resource types. - for rType, srcResources := range src { - // Lookup/create the resource type specific map in the destination. - dstResources := dst[rType] - if dstResources == nil { - dstResources = make(map[string]xdsresource.UpdateWithMD) - dst[rType] = dstResources - } +// dumpResources returns the status and contents of all xDS resources. +func (c *clientImpl) dumpResources() *v3statuspb.ClientConfig { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + + var retCfg []*v3statuspb.ClientConfig_GenericXdsConfig + for _, a := range c.authorities { + retCfg = append(retCfg, a.dumpResources()...) + } - // Iterate through the resources within the resource type in the source, - // and copy them over to the destination. - for name, update := range srcResources { - dstResources[name] = update - } + return &v3statuspb.ClientConfig{ + Node: c.config.Node(), + GenericXdsConfigs: retCfg, } } // DumpResources returns the status and contents of all xDS resources. -func (c *clientImpl) DumpResources() map[string]map[string]xdsresource.UpdateWithMD { - c.authorityMu.Lock() - defer c.authorityMu.Unlock() - dumps := make(map[string]map[string]xdsresource.UpdateWithMD) - for _, a := range c.authorities { - dump := a.dumpResources() - appendMaps(dumps, dump) +func DumpResources() *v3statuspb.ClientStatusResponse { + clientsMu.Lock() + defer clientsMu.Unlock() + + resp := &v3statuspb.ClientStatusResponse{} + for key, client := range clients { + cfg := client.dumpResources() + cfg.ClientScope = key + resp.Config = append(resp.Config, cfg) } - return dumps + return resp } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go index ff2f5e9d672..b42e43a5697 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go @@ -32,7 +32,7 @@ func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, fu a, err := c.newAuthorityLocked(server) if err != nil { c.authorityMu.Unlock() - c.logger.Infof("xds: failed to connect to the control plane to do load reporting for authority %q: %v", server, err) + c.logger.Warningf("Failed to connect to the management server to report load for authority %q: %v", server, err) return nil, func() {} } // Hold the ref before starting load reporting. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go index f64124dad64..b9af85db63a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go @@ -44,7 +44,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, if err := c.resourceTypes.maybeRegister(rType); err != nil { logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeName(), resourceName) - c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) return func() {} } @@ -54,7 +54,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, a, unref, err := c.findAuthority(n) if err != nil { logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority) - c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) return func() {} } cancelF := a.watchResource(rType, n.String(), watcher) @@ -96,7 +96,6 @@ func (r *resourceTypeRegistry) maybeRegister(rType xdsresource.Type) error { } func (c *clientImpl) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) error { - // Return early if the client is already closed. if c == nil || c.done.HasFired() { return fmt.Errorf("attempt to trigger resource-not-found-error for resource %q of type %q, but client is closed", rType.TypeName(), resourceName) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go new file mode 100644 index 00000000000..e1261074410 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go @@ -0,0 +1,25 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal contains functionality internal to the xdsclient package. +package internal + +// The following vars can be overridden by tests. +var ( + // NewADSStream is a function that returns a new ADS stream. + NewADSStream any // func(context.Context, *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) +) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go index 1f266ae2018..f1e265ee7dd 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go @@ -174,6 +174,7 @@ func (ls *perClusterStore) CallStarted(locality string) { p, _ = ls.localityRPCCount.LoadOrStore(locality, tp) } p.(*rpcCountData).incrInProgress() + p.(*rpcCountData).incrIssued() } // CallFinished adds one call finished record for the given locality. @@ -248,6 +249,8 @@ type RequestData struct { Errored uint64 // InProgress is the number of requests in flight. InProgress uint64 + // Issued is the total number requests that were sent. + Issued uint64 } // ServerLoadData contains server load data. @@ -296,7 +299,8 @@ func (ls *perClusterStore) stats() *Data { succeeded := countData.loadAndClearSucceeded() inProgress := countData.loadInProgress() errored := countData.loadAndClearErrored() - if succeeded == 0 && inProgress == 0 && errored == 0 { + issued := countData.loadAndClearIssued() + if succeeded == 0 && inProgress == 0 && errored == 0 && issued == 0 { return true } @@ -305,6 +309,7 @@ func (ls *perClusterStore) stats() *Data { Succeeded: succeeded, Errored: errored, InProgress: inProgress, + Issued: issued, }, LoadStats: make(map[string]ServerLoadData), } @@ -339,6 +344,7 @@ type rpcCountData struct { succeeded *uint64 errored *uint64 inProgress *uint64 + issued *uint64 // Map from load desc to load data (sum+count). Loading data from map is // atomic, but updating data takes a lock, which could cause contention when @@ -353,6 +359,7 @@ func newRPCCountData() *rpcCountData { succeeded: new(uint64), errored: new(uint64), inProgress: new(uint64), + issued: new(uint64), } } @@ -384,6 +391,14 @@ func (rcd *rpcCountData) loadInProgress() uint64 { return atomic.LoadUint64(rcd.inProgress) // InProgress count is not clear when reading. } +func (rcd *rpcCountData) incrIssued() { + atomic.AddUint64(rcd.issued, 1) +} + +func (rcd *rpcCountData) loadAndClearIssued() uint64 { + return atomic.SwapUint64(rcd.issued, 0) +} + func (rcd *rpcCountData) addServerLoad(name string, d float64) { loads, ok := rcd.serverLoads.Load(name) if !ok { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go deleted file mode 100644 index f981bfebb58..00000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go +++ /dev/null @@ -1,115 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/xds/bootstrap" -) - -const ( - defaultWatchExpiryTimeout = 15 * time.Second - defaultIdleAuthorityDeleteTimeout = 5 * time.Minute -) - -var ( - // This is the client returned by New(). It contains one client implementation, - // and maintains the refcount. - singletonMu sync.Mutex - singletonClient *clientRefCounted - - // The following functions are no-ops in the actual code, but can be - // overridden in tests to give them visibility into certain events. - singletonClientImplCreateHook = func() {} - singletonClientImplCloseHook = func() {} -) - -// To override in tests. -var bootstrapNewConfig = bootstrap.NewConfig - -func clientRefCountedClose() { - singletonMu.Lock() - defer singletonMu.Unlock() - - if singletonClient.decrRef() != 0 { - return - } - singletonClient.clientImpl.close() - singletonClientImplCloseHook() - singletonClient = nil -} - -func newRefCountedWithConfig(fallbackConfig *bootstrap.Config) (XDSClient, func(), error) { - singletonMu.Lock() - defer singletonMu.Unlock() - - if singletonClient != nil { - singletonClient.incrRef() - return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil - - } - - // Use fallbackConfig only if bootstrap env vars are unspecified. - var config *bootstrap.Config - if envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == "" { - if fallbackConfig == nil { - return nil, nil, fmt.Errorf("xds: bootstrap env vars are unspecified and provided fallback config is nil") - } - config = fallbackConfig - } else { - var err error - config, err = bootstrapNewConfig() - if err != nil { - return nil, nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err) - } - } - - // Create the new client implementation. - c, err := newWithConfig(config, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - return nil, nil, err - } - singletonClient = &clientRefCounted{clientImpl: c, refCount: 1} - singletonClientImplCreateHook() - - logger.Infof("xDS node ID: %s", config.NodeProto.GetId()) - return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil -} - -// clientRefCounted is ref-counted, and to be shared by the xds resolver and -// balancer implementations, across multiple ClientConns and Servers. -type clientRefCounted struct { - *clientImpl - - refCount int32 // accessed atomically -} - -func (c *clientRefCounted) incrRef() int32 { - return atomic.AddInt32(&c.refCount, 1) -} - -func (c *clientRefCounted) decrRef() int32 { - return atomic.AddInt32(&c.refCount, -1) -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go new file mode 100644 index 00000000000..9acc33cbbf8 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go @@ -0,0 +1,25 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal contains functionality internal to the transport package. +package internal + +// The following vars can be overridden by tests. +var ( + // GRPCNewClient creates a new gRPC Client. + GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error) +) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go index 289fd62cbc7..e47fdd9846b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go @@ -223,6 +223,7 @@ func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) e TotalSuccessfulRequests: localityData.RequestStats.Succeeded, TotalRequestsInProgress: localityData.RequestStats.InProgress, TotalErrorRequests: localityData.RequestStats.Errored, + TotalIssuedRequests: localityData.RequestStats.Issued, LoadMetricStats: loadMetricStats, UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. }) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go index 421ba78074c..0bc0d386802 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go @@ -24,6 +24,7 @@ import ( "errors" "fmt" "sync" + "sync/atomic" "time" "google.golang.org/grpc" @@ -35,7 +36,9 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/keepalive" + xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" "google.golang.org/grpc/xds/internal/xdsclient/load" + transportinternal "google.golang.org/grpc/xds/internal/xdsclient/transport/internal" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" @@ -45,17 +48,23 @@ import ( statuspb "google.golang.org/genproto/googleapis/rpc/status" ) +type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient + +func init() { + transportinternal.GRPCNewClient = grpc.NewClient + xdsclientinternal.NewADSStream = func(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) + } +} + // Any per-RPC level logs which print complete request or response messages // should be gated at this verbosity level. Other per-RPC level logs which print -// terse output should be at `INFO` and verbosity 2, which corresponds to using -// the `Debugf` method on the logger. +// terse output should be at `INFO` and verbosity 2. const perRPCVerbosityLevel = 9 -type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient - // Transport provides a resource-type agnostic implementation of the xDS // transport protocol. At this layer, resource contents are supposed to be -// opaque blobs which should be be meaningful only to the xDS data model layer +// opaque blobs which should be meaningful only to the xDS data model layer // which is implemented by the `xdsresource` package. // // Under the hood, it owns the gRPC connection to a single management server and @@ -77,7 +86,7 @@ type Transport struct { lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. // These channels enable synchronization amongst the different goroutines - // spawned by the transport, and between asynchorous events resulting from + // spawned by the transport, and between asynchronous events resulting from // receipt of responses from the management server. adsStreamCh chan adsStream // New ADS streams are pushed here. adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here. @@ -112,7 +121,10 @@ type Transport struct { // cause the transport layer to send an ACK to the management server. A non-nil // error is returned from this function when the data model layer believes // otherwise, and this will cause the transport layer to send a NACK. -type OnRecvHandlerFunc func(update ResourceUpdate) error +// +// The implementation is expected to invoke onDone when local processing of the +// update is complete, i.e. it is consumed by all watchers. +type OnRecvHandlerFunc func(update ResourceUpdate, onDone func()) error // OnSendHandlerFunc is the implementation at the authority, which handles state // changes for the resource watch and stop watch timers accordingly. @@ -135,7 +147,7 @@ type ResourceUpdate struct { type Options struct { // ServerCfg contains all the configuration required to connect to the xDS // management server. - ServerCfg bootstrap.ServerConfig + ServerCfg *bootstrap.ServerConfig // OnRecvHandler is the component which makes ACK/NACK decisions based on // the received resources. // @@ -169,16 +181,9 @@ type Options struct { NodeProto *v3corepb.Node } -// For overriding in unit tests. -var grpcDial = grpc.Dial - // New creates a new Transport. func New(opts Options) (*Transport, error) { switch { - case opts.ServerCfg.ServerURI == "": - return nil, errors.New("missing server URI when creating a new transport") - case opts.ServerCfg.CredsDialOption() == nil: - return nil, errors.New("missing credentials when creating a new transport") case opts.OnRecvHandler == nil: return nil, errors.New("missing OnRecv callback handler when creating a new transport") case opts.OnErrorHandler == nil: @@ -197,11 +202,13 @@ func New(opts Options) (*Transport, error) { Timeout: 20 * time.Second, }), } - cc, err := grpcDial(opts.ServerCfg.ServerURI, dopts...) + grpcNewClient := transportinternal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error)) + cc, err := grpcNewClient(opts.ServerCfg.ServerURI(), dopts...) if err != nil { // An error from a non-blocking dial indicates something serious. - return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI, err) + return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI(), err) } + cc.Connect() boff := opts.Backoff if boff == nil { @@ -209,7 +216,7 @@ func New(opts Options) (*Transport, error) { } ret := &Transport{ cc: cc, - serverURI: opts.ServerCfg.ServerURI, + serverURI: opts.ServerCfg.ServerURI(), onRecvHandler: opts.OnRecvHandler, onErrorHandler: opts.OnErrorHandler, onSendHandler: opts.OnSendHandler, @@ -263,12 +270,6 @@ func (t *Transport) SendRequest(url string, resources []string) { }) } -func (t *Transport) newAggregatedDiscoveryServiceStream(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { - // The transport retries the stream with an exponential backoff whenever the - // stream breaks without ever having seen a response. - return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) -} - // ResourceSendInfo wraps the names and url of resources sent to the management // server. This is used by the `authority` type to start/stop the watch timer // associated with every resource in the update. @@ -298,7 +299,9 @@ func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, send if t.logger.V(perRPCVerbosityLevel) { t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) } else { - t.logger.Debugf("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) + if t.logger.V(2) { + t.logger.Infof("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) + } } t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames}) return nil @@ -311,8 +314,8 @@ func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (re } if t.logger.V(perRPCVerbosityLevel) { t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) - } else { - t.logger.Debugf("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) + } else if t.logger.V(2) { + t.logger.Infof("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) } return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil } @@ -328,7 +331,8 @@ func (t *Transport) adsRunner(ctx context.Context) { // We reset backoff state when we successfully receive at least one // message from the server. runStreamWithBackoff := func() error { - stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) + newStream := xdsclientinternal.NewADSStream.(func(context.Context, *grpc.ClientConn) (adsStream, error)) + stream, err := newStream(ctx, t.cc) if err != nil { t.onErrorHandler(err) t.logger.Warningf("Creating new ADS stream failed: %v", err) @@ -341,7 +345,7 @@ func (t *Transport) adsRunner(ctx context.Context) { default: } t.adsStreamCh <- stream - msgReceived := t.recv(stream) + msgReceived := t.recv(ctx, stream) if msgReceived { return backoff.ErrResetBackoff } @@ -461,9 +465,21 @@ func (t *Transport) sendExisting(stream adsStream) (sentNodeProto bool, err erro // recv receives xDS responses on the provided ADS stream and branches out to // message specific handlers. Returns true if at least one message was // successfully received. -func (t *Transport) recv(stream adsStream) bool { +func (t *Transport) recv(ctx context.Context, stream adsStream) bool { + // Initialize the flow control quota for the stream. This helps to block the + // next read until the previous one is consumed by all watchers. + fc := newADSFlowControl() + msgReceived := false for { + // Wait for ADS stream level flow control to be available. + if !fc.wait(ctx) { + if t.logger.V(2) { + t.logger.Infof("ADS stream context canceled") + } + return msgReceived + } + resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) if err != nil { // Note that we do not consider it an error if the ADS stream was closed @@ -481,12 +497,13 @@ func (t *Transport) recv(stream adsStream) bool { } msgReceived = true - err = t.onRecvHandler(ResourceUpdate{ + u := ResourceUpdate{ Resources: resources, URL: url, Version: rVersion, - }) - if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { + } + fc.setPending() + if err = t.onRecvHandler(u, fc.onDone); xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { t.logger.Warningf("%v", err) continue } @@ -512,7 +529,9 @@ func (t *Transport) recv(stream adsStream) bool { stream: stream, version: rVersion, }) - t.logger.Debugf("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) + if t.logger.V(2) { + t.logger.Infof("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) + } } } @@ -618,3 +637,68 @@ func (t *Transport) Close() { func (t *Transport) ChannelConnectivityStateForTesting() connectivity.State { return t.cc.GetState() } + +// adsFlowControl implements ADS stream level flow control that enables the +// transport to block the reading of the next message off of the stream until +// the previous update is consumed by all watchers. +// +// The lifetime of the flow control is tied to the lifetime of the stream. +type adsFlowControl struct { + logger *grpclog.PrefixLogger + + // Whether the most recent update is pending consumption by all watchers. + pending atomic.Bool + // Channel used to notify when all the watchers have consumed the most + // recent update. Wait() blocks on reading a value from this channel. + readyCh chan struct{} +} + +// newADSFlowControl returns a new adsFlowControl. +func newADSFlowControl() *adsFlowControl { + return &adsFlowControl{readyCh: make(chan struct{}, 1)} +} + +// setPending changes the internal state to indicate that there is an update +// pending consumption by all watchers. +func (fc *adsFlowControl) setPending() { + fc.pending.Store(true) +} + +// wait blocks until all the watchers have consumed the most recent update and +// returns true. If the context expires before that, it returns false. +func (fc *adsFlowControl) wait(ctx context.Context) bool { + // If there is no pending update, there is no need to block. + if !fc.pending.Load() { + // If all watchers finished processing the most recent update before the + // `recv` goroutine made the next call to `Wait()`, there would be an + // entry in the readyCh channel that needs to be drained to ensure that + // the next call to `Wait()` doesn't unblock before it actually should. + select { + case <-fc.readyCh: + default: + } + return true + } + + select { + case <-ctx.Done(): + return false + case <-fc.readyCh: + return true + } +} + +// onDone indicates that all watchers have consumed the most recent update. +func (fc *adsFlowControl) onDone() { + fc.pending.Store(false) + + select { + // Writes to the readyCh channel should not block ideally. The default + // branch here is to appease the paranoid mind. + case fc.readyCh <- struct{}{}: + default: + if fc.logger.V(2) { + fc.logger.Infof("ADS stream flow control readyCh is full") + } + } +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go index 076ae8644f8..3c48f1bdea3 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go @@ -27,9 +27,9 @@ import ( "fmt" "strings" - "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/leastrequest" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/internal/envconfig" @@ -110,7 +110,7 @@ func convertPickFirstProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessa if err != nil { return nil, fmt.Errorf("error marshaling JSON for type %T: %v", pfCfg, err) } - return makeBalancerConfigJSON(grpc.PickFirstBalancerName, js), nil + return makeBalancerConfigJSON(pickfirst.Name, js), nil } func convertRoundRobinProtoToServiceConfig([]byte, int) (json.RawMessage, error) { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index 5ac7f031223..18d47cbc101 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -111,7 +111,7 @@ func (c *ClusterResourceData) Raw() *anypb.Any { // corresponding to the cluster resource being watched. type ClusterWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - OnUpdate(*ClusterResourceData) + OnUpdate(*ClusterResourceData, OnDoneFunc) // OnError is invoked under different error conditions including but not // limited to the following: @@ -121,28 +121,28 @@ type ClusterWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - OnError(error) + OnError(error, OnDoneFunc) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - OnResourceDoesNotExist() + OnResourceDoesNotExist(OnDoneFunc) } type delegatingClusterWatcher struct { watcher ClusterWatcher } -func (d *delegatingClusterWatcher) OnUpdate(data ResourceData) { +func (d *delegatingClusterWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { c := data.(*ClusterResourceData) - d.watcher.OnUpdate(c) + d.watcher.OnUpdate(c, onDone) } -func (d *delegatingClusterWatcher) OnError(err error) { - d.watcher.OnError(err) +func (d *delegatingClusterWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) } -func (d *delegatingClusterWatcher) OnResourceDoesNotExist() { - d.watcher.OnResourceDoesNotExist() +func (d *delegatingClusterWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) } // WatchCluster uses xDS to discover the configuration associated with the diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index 775a8aa1942..66c0ae0b202 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -54,7 +54,7 @@ type endpointsResourceType struct { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. -func (endpointsResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { +func (endpointsResourceType) Decode(_ *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { name, rc, err := unmarshalEndpointsResource(resource) switch { case name == "": @@ -107,7 +107,7 @@ func (e *EndpointsResourceData) Raw() *anypb.Any { // events corresponding to the endpoints resource being watched. type EndpointsWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - OnUpdate(*EndpointsResourceData) + OnUpdate(*EndpointsResourceData, OnDoneFunc) // OnError is invoked under different error conditions including but not // limited to the following: @@ -117,28 +117,28 @@ type EndpointsWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - OnError(error) + OnError(error, OnDoneFunc) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - OnResourceDoesNotExist() + OnResourceDoesNotExist(OnDoneFunc) } type delegatingEndpointsWatcher struct { watcher EndpointsWatcher } -func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData) { +func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { e := data.(*EndpointsResourceData) - d.watcher.OnUpdate(e) + d.watcher.OnUpdate(e, onDone) } -func (d *delegatingEndpointsWatcher) OnError(err error) { - d.watcher.OnError(err) +func (d *delegatingEndpointsWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) } -func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist() { - d.watcher.OnResourceDoesNotExist() +func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) } // WatchEndpoints uses xDS to discover the configuration associated with the diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go index bef1277d220..196bb9f873f 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -536,12 +536,12 @@ func (fcm *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain if name := ts.GetName(); name != transportSocketName { return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) } - any := ts.GetTypedConfig() - if any == nil || any.TypeUrl != version.V3DownstreamTLSContextURL { - return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) + tc := ts.GetTypedConfig() + if tc == nil || tc.TypeUrl != version.V3DownstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", tc.TypeUrl) } downstreamCtx := &v3tlspb.DownstreamTlsContext{} - if err := proto.Unmarshal(any.GetValue(), downstreamCtx); err != nil { + if err := proto.Unmarshal(tc.GetValue(), downstreamCtx); err != nil { return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) } if downstreamCtx.GetRequireSni().GetValue() { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go index 4337e4e063f..80fa5e6a21e 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -60,12 +60,12 @@ func securityConfigValidator(bc *bootstrap.Config, sc *SecurityConfig) error { return nil } if sc.IdentityInstanceName != "" { - if _, ok := bc.CertProviderConfigs[sc.IdentityInstanceName]; !ok { + if _, ok := bc.CertProviderConfigs()[sc.IdentityInstanceName]; !ok { return fmt.Errorf("identity certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) } } if sc.RootInstanceName != "" { - if _, ok := bc.CertProviderConfigs[sc.RootInstanceName]; !ok { + if _, ok := bc.CertProviderConfigs()[sc.RootInstanceName]; !ok { return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) } } @@ -144,7 +144,7 @@ func (l *ListenerResourceData) Raw() *anypb.Any { // events corresponding to the listener resource being watched. type ListenerWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - OnUpdate(*ListenerResourceData) + OnUpdate(*ListenerResourceData, OnDoneFunc) // OnError is invoked under different error conditions including but not // limited to the following: @@ -154,28 +154,28 @@ type ListenerWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - OnError(error) + OnError(error, OnDoneFunc) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - OnResourceDoesNotExist() + OnResourceDoesNotExist(OnDoneFunc) } type delegatingListenerWatcher struct { watcher ListenerWatcher } -func (d *delegatingListenerWatcher) OnUpdate(data ResourceData) { +func (d *delegatingListenerWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { l := data.(*ListenerResourceData) - d.watcher.OnUpdate(l) + d.watcher.OnUpdate(l, onDone) } -func (d *delegatingListenerWatcher) OnError(err error) { - d.watcher.OnError(err) +func (d *delegatingListenerWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) } -func (d *delegatingListenerWatcher) OnResourceDoesNotExist() { - d.watcher.OnResourceDoesNotExist() +func (d *delegatingListenerWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) } // WatchListener uses xDS to discover the configuration associated with the diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go index 77aa85b68e5..796e9e3008d 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go @@ -19,9 +19,9 @@ package xdsresource import ( "fmt" + "math/rand" "strings" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/xds/matcher" @@ -142,8 +142,8 @@ func newFractionMatcher(fraction uint32) *fractionMatcher { return &fractionMatcher{fraction: int64(fraction)} } -// RandInt63n overwrites grpcrand for control in tests. -var RandInt63n = grpcrand.Int63n +// RandInt63n overwrites rand for control in tests. +var RandInt63n = rand.Int63n func (fm *fractionMatcher) match() bool { t := RandInt63n(1000000) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go index a1e15e2d3e2..55cfd6fbb15 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go @@ -25,9 +25,6 @@ package xdsresource import ( - "fmt" - - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/xds/bootstrap" xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" @@ -40,8 +37,6 @@ func init() { xdsinternal.ResourceTypeMapForTesting[version.V3RouteConfigURL] = routeConfigType xdsinternal.ResourceTypeMapForTesting[version.V3ClusterURL] = clusterType xdsinternal.ResourceTypeMapForTesting[version.V3EndpointsURL] = endpointsType - - internal.TriggerXDSResourceNameNotFoundForTesting = triggerResourceNotFoundForTesting } // Producer contains a single method to discover resource configuration from a @@ -57,13 +52,19 @@ type Producer interface { WatchResource(rType Type, resourceName string, watcher ResourceWatcher) (cancel func()) } +// OnDoneFunc is a function to be invoked by watcher implementations upon +// completing the processing of a callback from the xDS client. Failure to +// invoke this callback prevents the xDS client from reading further messages +// from the xDS server. +type OnDoneFunc func() + // ResourceWatcher wraps the callbacks to be invoked for different events // corresponding to the resource being watched. type ResourceWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. // The ResourceData parameter needs to be type asserted to the appropriate // type for the resource being watched. - OnUpdate(ResourceData) + OnUpdate(ResourceData, OnDoneFunc) // OnError is invoked under different error conditions including but not // limited to the following: @@ -73,11 +74,11 @@ type ResourceWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - OnError(error) + OnError(error, OnDoneFunc) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - OnResourceDoesNotExist() + OnResourceDoesNotExist(OnDoneFunc) } // TODO: Once the implementation is complete, rename this interface as @@ -171,20 +172,3 @@ func (r resourceTypeState) TypeName() string { func (r resourceTypeState) AllResourcesRequiredInSotW() bool { return r.allResourcesRequiredInSotW } - -func triggerResourceNotFoundForTesting(cb func(Type, string) error, typeName, resourceName string) error { - var typ Type - switch typeName { - case ListenerResourceTypeName: - typ = listenerType - case RouteConfigTypeName: - typ = routeConfigType - case ClusterResourceTypeName: - typ = clusterType - case EndpointsResourceTypeName: - typ = endpointsType - default: - return fmt.Errorf("unknown type name %q", typeName) - } - return cb(typ, resourceName) -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index 8ce5cb28596..ed32abb8333 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -54,7 +54,7 @@ type routeConfigResourceType struct { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. -func (routeConfigResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { +func (routeConfigResourceType) Decode(_ *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { name, rc, err := unmarshalRouteConfigResource(resource) switch { case name == "": @@ -108,7 +108,7 @@ func (r *RouteConfigResourceData) Raw() *anypb.Any { // events corresponding to the route configuration resource being watched. type RouteConfigWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - OnUpdate(*RouteConfigResourceData) + OnUpdate(*RouteConfigResourceData, OnDoneFunc) // OnError is invoked under different error conditions including but not // limited to the following: @@ -118,28 +118,28 @@ type RouteConfigWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - OnError(error) + OnError(error, OnDoneFunc) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - OnResourceDoesNotExist() + OnResourceDoesNotExist(OnDoneFunc) } type delegatingRouteConfigWatcher struct { watcher RouteConfigWatcher } -func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData) { +func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { rc := data.(*RouteConfigResourceData) - d.watcher.OnUpdate(rc) + d.watcher.OnUpdate(rc, onDone) } -func (d *delegatingRouteConfigWatcher) OnError(err error) { - d.watcher.OnError(err) +func (d *delegatingRouteConfigWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) } -func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist() { - d.watcher.OnResourceDoesNotExist() +func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) } // WatchRouteConfig uses xDS to discover the configuration associated with the diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index de12f478bd0..ab024b57c46 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -89,17 +89,26 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster, serv if fields := val.GetFields(); fields != nil { if val, ok := fields["service_name"]; ok { if _, ok := val.GetKind().(*structpb.Value_StringValue); ok { - telemetryLabels["service_name"] = val.GetStringValue() + telemetryLabels["csm.service_name"] = val.GetStringValue() } } if val, ok := fields["service_namespace"]; ok { if _, ok := val.GetKind().(*structpb.Value_StringValue); ok { - telemetryLabels["service_namespace"] = val.GetStringValue() + telemetryLabels["csm.service_namespace_name"] = val.GetStringValue() } } } } } + // "The values for the service labels csm.service_name and + // csm.service_namespace_name come from xDS, “unknown” if not present." - + // CSM Design. + if _, ok := telemetryLabels["csm.service_name"]; !ok { + telemetryLabels["csm.service_name"] = "unknown" + } + if _, ok := telemetryLabels["csm.service_namespace_name"]; !ok { + telemetryLabels["csm.service_namespace_name"] = "unknown" + } var lbPolicy json.RawMessage var err error @@ -269,7 +278,7 @@ func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { // the received Cluster resource. func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { - return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm) + return nil, fmt.Errorf("unsupported transport_socket_matches field is non-empty: %+v", tsm) } // The Cluster resource contains a `transport_socket` field, which contains // a oneof `typed_config` field of type `protobuf.Any`. The any proto @@ -281,12 +290,12 @@ func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, e if name := ts.GetName(); name != transportSocketName { return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) } - any := ts.GetTypedConfig() - if any == nil || any.TypeUrl != version.V3UpstreamTLSContextURL { - return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) + tc := ts.GetTypedConfig() + if tc == nil || tc.TypeUrl != version.V3UpstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", tc.TypeUrl) } upstreamCtx := &v3tlspb.UpstreamTlsContext{} - if err := proto.Unmarshal(any.GetValue(), upstreamCtx); err != nil { + if err := proto.Unmarshal(tc.GetValue(), upstreamCtx); err != nil { return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) } // The following fields from `UpstreamTlsContext` are ignored: @@ -468,7 +477,7 @@ func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsC case len(validationCtx.GetVerifyCertificateHash()) != 0: return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): - return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common) + return nil, fmt.Errorf("unsupported require_signed_certificate_timestamp field in CommonTlsContext message: %+v", common) case validationCtx.GetCrl() != nil: return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) case validationCtx.GetCustomValidatorConfig() != nil: diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/server.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/server.go index b5eb806207a..1fea8c83093 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/server.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/server.go @@ -43,8 +43,8 @@ const serverPrefix = "[xds-server %p] " var ( // These new functions will be overridden in unit tests. - newXDSClient = func() (xdsclient.XDSClient, func(), error) { - return xdsclient.New() + newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.New(name) } newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { return grpc.NewServer(opts...) @@ -95,11 +95,14 @@ func NewGRPCServer(opts ...grpc.ServerOption) (*GRPCServer, error) { newXDSClient := newXDSClient if s.opts.bootstrapContentsForTesting != nil { // Bootstrap file contents may be specified as a server option for tests. - newXDSClient = func() (xdsclient.XDSClient, func(), error) { - return xdsclient.NewWithBootstrapContentsForTesting(s.opts.bootstrapContentsForTesting) + newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.NewForTesting(xdsclient.OptionsForTesting{ + Name: name, + Contents: s.opts.bootstrapContentsForTesting, + }) } } - xdsClient, xdsClientClose, err := newXDSClient() + xdsClient, xdsClientClose, err := newXDSClient(xdsclient.NameForServer) if err != nil { return nil, fmt.Errorf("xDS client creation failed: %v", err) } @@ -108,7 +111,7 @@ func NewGRPCServer(opts ...grpc.ServerOption) (*GRPCServer, error) { // Listener resource name template is mandatory on the server side. cfg := xdsClient.BootstrapConfig() - if cfg.ServerListenerResourceNameTemplate == "" { + if cfg.ServerListenerResourceNameTemplate() == "" { xdsClientClose() return nil, errors.New("missing server_listener_resource_name_template in the bootstrap configuration") } @@ -191,7 +194,7 @@ func (s *GRPCServer) Serve(lis net.Listener) error { // string, it will be replaced with the server's listening "IP:port" (e.g., // "0.0.0.0:8080", "[::]:8080"). cfg := s.xdsC.BootstrapConfig() - name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate, lis.Addr().String()) + name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate(), lis.Addr().String()) // Create a listenerWrapper which handles all functionality required by // this particular instance of Serve(). diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index bb2966e3b4c..8f9e592f870 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) } func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 29846df222c..0e72d85378b 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil - if isProto2Scalar || isSingularMessage { + if fd.HasPresence() { if m.skipNull { continue } diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/descopts/options.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c84f..024ffebd3dd 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 029a6a12d74..08dad7692c6 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -5,7 +5,7 @@ // Package editionssupport defines constants for editions that are supported. package editionssupport -import descriptorpb "google.golang.org/protobuf/types/descriptorpb" +import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40b25..fa790e0ff19 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -258,6 +258,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -425,6 +427,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b08c..d2f549497eb 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8dbe..67a51b327c5 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356b66..fd4d0c83d25 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/doc.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd01211c..d9b9d916a20 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b424..7f67cbb6e97 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,25 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 ) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 +) diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02ff2a..bef5a25fbbf 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b85b0..9404270de0b 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a20ce..0d5b546e0ee 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e44b9..7c1f66c8c19 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb739a..78be9df3420 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577bd6b..00000000000 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e23c9..077712c2c5a 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/convert.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55a26..f72ddd882f3 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/encode.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd2122472..6254f5de41f 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/equal.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 00000000000..9f6c32a7d8c --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee633..b6849d66927 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/message.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d454d..741b5ed29cf 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,8 +30,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a29d..00000000000 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d8ab..79e186667b7 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f333860..00000000000 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd0908..832a7988f14 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2ba3c..1ffddf6877a 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/version/version.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f6862c..fb8e15e8dad 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 35 + Patch = 1 PreRelease = "" ) diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/proto/equal.go b/terraform/providers/google/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b03c7..c36d4a9cd75 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/proto/equal.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/proto/extension.go b/terraform/providers/google/vendor/google.golang.org/protobuf/proto/extension.go index d248f292846..78445d116f7 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/proto/extension.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 85617554272..ebcb4a8ab13 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() + f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() } @@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if xd.JsonName != nil { x.L2.StringName.InitJSON(xd.GetJsonName()) } + if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { + x.L1.Kind = protoreflect.GroupKind + } } return xs, nil } diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 804830eda36..002e0047aea 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" + "google.golang.org/protobuf/types/gofeaturespb" ) var defaults = &descriptorpb.FeatureSetDefaults{} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6ebed..742cb518c40 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2af03..00000000000 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ead81..0015fcb35d8 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d386990a0..479527b58dd 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/terraform/providers/google/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d884..246156561ce 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -166,3 +169,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb07507..6dea75cd5b1 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -1217,11 +1217,9 @@ type FileDescriptorSet struct { func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1232,7 +1230,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1291,11 +1289,9 @@ type FileDescriptorProto struct { func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1306,7 +1302,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1434,11 +1430,9 @@ type DescriptorProto struct { func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1449,7 +1443,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1561,11 +1555,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1576,7 +1568,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1680,11 +1672,9 @@ type FieldDescriptorProto struct { func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1695,7 +1685,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1799,11 +1789,9 @@ type OneofDescriptorProto struct { func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1814,7 +1802,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1863,11 +1851,9 @@ type EnumDescriptorProto struct { func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1878,7 +1864,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1941,11 +1927,9 @@ type EnumValueDescriptorProto struct { func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1956,7 +1940,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2005,11 +1989,9 @@ type ServiceDescriptorProto struct { func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2020,7 +2002,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2082,11 +2064,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2097,7 +2077,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2267,11 +2247,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2282,7 +2260,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2534,11 +2512,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2549,7 +2525,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2707,11 +2683,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2722,7 +2696,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2849,11 +2823,9 @@ type OneofOptions struct { func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2864,7 +2836,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2929,11 +2901,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2944,7 +2914,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3026,11 +2996,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3041,7 +3009,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3115,11 +3083,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3130,7 +3096,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3192,11 +3158,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3207,7 +3171,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3274,11 +3238,9 @@ type UninterpretedOption struct { func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3289,7 +3251,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3375,11 +3337,9 @@ type FeatureSet struct { func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3390,7 +3350,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3467,11 +3427,9 @@ type FeatureSetDefaults struct { func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3482,7 +3440,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3578,11 +3536,9 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3593,7 +3549,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3630,11 +3586,9 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3645,7 +3599,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3679,11 +3633,9 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3694,7 +3646,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3744,11 +3696,9 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3759,7 +3709,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3813,11 +3763,9 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3828,7 +3776,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3895,11 +3843,9 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3910,7 +3856,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3950,11 +3896,9 @@ type FieldOptions_EditionDefault struct { func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3965,7 +3909,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4018,11 +3962,9 @@ type FieldOptions_FeatureSupport struct { func (x *FieldOptions_FeatureSupport) Reset() { *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_FeatureSupport) String() string { @@ -4033,7 +3975,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {} func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4092,11 +4034,9 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -4107,7 +4047,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4154,11 +4094,9 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4169,7 +4107,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4305,11 +4243,9 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4320,7 +4256,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4392,11 +4328,9 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4407,7 +4341,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5385,424 +5319,6 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index a2ca940c50f..c7e860fcd6d 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -29,11 +29,9 @@ type GoFeatures struct { func (x *GoFeatures) Reset() { *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GoFeatures) String() string { @@ -44,7 +42,7 @@ func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -145,20 +143,6 @@ func file_google_protobuf_go_features_proto_init() { if File_google_protobuf_go_features_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d383..87da199a386 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,20 +459,6 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go new file mode 100644 index 00000000000..fdc3aef2c65 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go @@ -0,0 +1,531 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/api.proto + +package apipb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + sourcecontextpb "google.golang.org/protobuf/types/known/sourcecontextpb" + typepb "google.golang.org/protobuf/types/known/typepb" + reflect "reflect" + sync "sync" +) + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*typepb.Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *sourcecontextpb.SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins,proto3" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax typepb.Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (x *Api) Reset() { + *x = Api{} + mi := &file_google_protobuf_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Api) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Api) ProtoMessage() {} + +func (x *Api) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_api_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Api.ProtoReflect.Descriptor instead. +func (*Api) Descriptor() ([]byte, []int) { + return file_google_protobuf_api_proto_rawDescGZIP(), []int{0} +} + +func (x *Api) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Api) GetMethods() []*Method { + if x != nil { + return x.Methods + } + return nil +} + +func (x *Api) GetOptions() []*typepb.Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Api) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Api) GetSourceContext() *sourcecontextpb.SourceContext { + if x != nil { + return x.SourceContext + } + return nil +} + +func (x *Api) GetMixins() []*Mixin { + if x != nil { + return x.Mixins + } + return nil +} + +func (x *Api) GetSyntax() typepb.Syntax { + if x != nil { + return x.Syntax + } + return typepb.Syntax(0) +} + +// Method represents a method of an API interface. +type Method struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl,proto3" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming,proto3" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl,proto3" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming,proto3" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*typepb.Option `protobuf:"bytes,6,rep,name=options,proto3" json:"options,omitempty"` + // The source syntax of this method. + Syntax typepb.Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (x *Method) Reset() { + *x = Method{} + mi := &file_google_protobuf_api_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Method) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Method) ProtoMessage() {} + +func (x *Method) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_api_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Method.ProtoReflect.Descriptor instead. +func (*Method) Descriptor() ([]byte, []int) { + return file_google_protobuf_api_proto_rawDescGZIP(), []int{1} +} + +func (x *Method) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Method) GetRequestTypeUrl() string { + if x != nil { + return x.RequestTypeUrl + } + return "" +} + +func (x *Method) GetRequestStreaming() bool { + if x != nil { + return x.RequestStreaming + } + return false +} + +func (x *Method) GetResponseTypeUrl() string { + if x != nil { + return x.ResponseTypeUrl + } + return "" +} + +func (x *Method) GetResponseStreaming() bool { + if x != nil { + return x.ResponseStreaming + } + return false +} + +func (x *Method) GetOptions() []*typepb.Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Method) GetSyntax() typepb.Syntax { + if x != nil { + return x.Syntax + } + return typepb.Syntax(0) +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` +} + +func (x *Mixin) Reset() { + *x = Mixin{} + mi := &file_google_protobuf_api_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Mixin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Mixin) ProtoMessage() {} + +func (x *Mixin) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_api_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Mixin.ProtoReflect.Descriptor instead. +func (*Mixin) Descriptor() ([]byte, []int) { + return file_google_protobuf_api_proto_rawDescGZIP(), []int{2} +} + +func (x *Mixin) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Mixin) GetRoot() string { + if x != nil { + return x.Root + } + return "" +} + +var File_google_protobuf_api_proto protoreflect.FileDescriptor + +var file_google_protobuf_api_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x24, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, + 0x02, 0x0a, 0x03, 0x41, 0x70, 0x69, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x31, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x12, 0x2e, 0x0a, 0x06, 0x6d, 0x69, 0x78, 0x69, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x69, 0x78, 0x69, 0x6e, 0x52, 0x06, 0x6d, 0x69, 0x78, 0x69, 0x6e, + 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, + 0x61, 0x78, 0x22, 0xb2, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x28, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2b, 0x0a, 0x11, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, + 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0x2f, 0x0a, 0x05, 0x4d, 0x69, 0x78, 0x69, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, + 0x08, 0x41, 0x70, 0x69, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x2f, 0x61, 0x70, 0x69, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_api_proto_rawDescOnce sync.Once + file_google_protobuf_api_proto_rawDescData = file_google_protobuf_api_proto_rawDesc +) + +func file_google_protobuf_api_proto_rawDescGZIP() []byte { + file_google_protobuf_api_proto_rawDescOnce.Do(func() { + file_google_protobuf_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_api_proto_rawDescData) + }) + return file_google_protobuf_api_proto_rawDescData +} + +var file_google_protobuf_api_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_protobuf_api_proto_goTypes = []any{ + (*Api)(nil), // 0: google.protobuf.Api + (*Method)(nil), // 1: google.protobuf.Method + (*Mixin)(nil), // 2: google.protobuf.Mixin + (*typepb.Option)(nil), // 3: google.protobuf.Option + (*sourcecontextpb.SourceContext)(nil), // 4: google.protobuf.SourceContext + (typepb.Syntax)(0), // 5: google.protobuf.Syntax +} +var file_google_protobuf_api_proto_depIdxs = []int32{ + 1, // 0: google.protobuf.Api.methods:type_name -> google.protobuf.Method + 3, // 1: google.protobuf.Api.options:type_name -> google.protobuf.Option + 4, // 2: google.protobuf.Api.source_context:type_name -> google.protobuf.SourceContext + 2, // 3: google.protobuf.Api.mixins:type_name -> google.protobuf.Mixin + 5, // 4: google.protobuf.Api.syntax:type_name -> google.protobuf.Syntax + 3, // 5: google.protobuf.Method.options:type_name -> google.protobuf.Option + 5, // 6: google.protobuf.Method.syntax:type_name -> google.protobuf.Syntax + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_google_protobuf_api_proto_init() } +func file_google_protobuf_api_proto_init() { + if File_google_protobuf_api_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_api_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_api_proto_goTypes, + DependencyIndexes: file_google_protobuf_api_proto_depIdxs, + MessageInfos: file_google_protobuf_api_proto_msgTypes, + }.Build() + File_google_protobuf_api_proto = out.File + file_google_protobuf_api_proto_rawDesc = nil + file_google_protobuf_api_proto_goTypes = nil + file_google_protobuf_api_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 1b71bcd910a..b99d4d24109 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -245,11 +245,9 @@ func (x *Duration) check() uint { func (x *Duration) Reset() { *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duration) String() string { @@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {} func (x *Duration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,20 +337,6 @@ func file_google_protobuf_duration_proto_init() { if File_google_protobuf_duration_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index d87b4fb8281..1761bc9c69a 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -55,11 +55,9 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_empty_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Empty) String() string { @@ -70,7 +68,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_empty_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -131,20 +129,6 @@ func file_google_protobuf_empty_proto_init() { if File_google_protobuf_empty_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index ac1e91bb6dd..19de8d371fd 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -467,11 +467,9 @@ func rangeFields(path string, f func(field string) bool) bool { func (x *FieldMask) Reset() { *x = FieldMask{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldMask) String() string { @@ -482,7 +480,7 @@ func (*FieldMask) ProtoMessage() {} func (x *FieldMask) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -553,20 +551,6 @@ func file_google_protobuf_field_mask_proto_init() { if File_google_protobuf_field_mask_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FieldMask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go new file mode 100644 index 00000000000..4d15e9748c9 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go @@ -0,0 +1,160 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +package sourcecontextpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` +} + +func (x *SourceContext) Reset() { + *x = SourceContext{} + mi := &file_google_protobuf_source_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceContext) ProtoMessage() {} + +func (x *SourceContext) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_source_context_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceContext.ProtoReflect.Descriptor instead. +func (*SourceContext) Descriptor() ([]byte, []int) { + return file_google_protobuf_source_context_proto_rawDescGZIP(), []int{0} +} + +func (x *SourceContext) GetFileName() string { + if x != nil { + return x.FileName + } + return "" +} + +var File_google_protobuf_source_context_proto protoreflect.FileDescriptor + +var file_google_protobuf_source_context_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x2c, 0x0a, 0x0d, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x8a, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x12, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, + 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_source_context_proto_rawDescOnce sync.Once + file_google_protobuf_source_context_proto_rawDescData = file_google_protobuf_source_context_proto_rawDesc +) + +func file_google_protobuf_source_context_proto_rawDescGZIP() []byte { + file_google_protobuf_source_context_proto_rawDescOnce.Do(func() { + file_google_protobuf_source_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_source_context_proto_rawDescData) + }) + return file_google_protobuf_source_context_proto_rawDescData +} + +var file_google_protobuf_source_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_source_context_proto_goTypes = []any{ + (*SourceContext)(nil), // 0: google.protobuf.SourceContext +} +var file_google_protobuf_source_context_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_source_context_proto_init() } +func file_google_protobuf_source_context_proto_init() { + if File_google_protobuf_source_context_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_source_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_source_context_proto_goTypes, + DependencyIndexes: file_google_protobuf_source_context_proto_depIdxs, + MessageInfos: file_google_protobuf_source_context_proto_msgTypes, + }.Build() + File_google_protobuf_source_context_proto = out.File + file_google_protobuf_source_context_proto_rawDesc = nil + file_google_protobuf_source_context_proto_goTypes = nil + file_google_protobuf_source_context_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d45361cbc72..8f206a66117 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -120,6 +120,7 @@ package structpb import ( base64 "encoding/base64" + json "encoding/json" protojson "google.golang.org/protobuf/encoding/protojson" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -233,11 +234,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error { func (x *Struct) Reset() { *x = Struct{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Struct) String() string { @@ -248,7 +247,7 @@ func (*Struct) ProtoMessage() {} func (x *Struct) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -296,19 +295,20 @@ type Value struct { // NewValue constructs a Value from a general-purpose Go interface. // -// ╔════════════════════════╤════════════════════════════════════════════╗ -// ║ Go type │ Conversion ║ -// ╠════════════════════════╪════════════════════════════════════════════╣ -// ║ nil │ stored as NullValue ║ -// ║ bool │ stored as BoolValue ║ -// ║ int, int32, int64 │ stored as NumberValue ║ -// ║ uint, uint32, uint64 │ stored as NumberValue ║ -// ║ float32, float64 │ stored as NumberValue ║ -// ║ string │ stored as StringValue; must be valid UTF-8 ║ -// ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]any │ stored as StructValue ║ -// ║ []any │ stored as ListValue ║ -// ╚════════════════════════╧════════════════════════════════════════════╝ +// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ json.Number │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ +// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. @@ -320,12 +320,20 @@ func NewValue(v any) (*Value, error) { return NewBoolValue(v), nil case int: return NewNumberValue(float64(v)), nil + case int8: + return NewNumberValue(float64(v)), nil + case int16: + return NewNumberValue(float64(v)), nil case int32: return NewNumberValue(float64(v)), nil case int64: return NewNumberValue(float64(v)), nil case uint: return NewNumberValue(float64(v)), nil + case uint8: + return NewNumberValue(float64(v)), nil + case uint16: + return NewNumberValue(float64(v)), nil case uint32: return NewNumberValue(float64(v)), nil case uint64: @@ -334,6 +342,12 @@ func NewValue(v any) (*Value, error) { return NewNumberValue(float64(v)), nil case float64: return NewNumberValue(float64(v)), nil + case json.Number: + n, err := v.Float64() + if err != nil { + return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err) + } + return NewNumberValue(n), nil case string: if !utf8.ValidString(v) { return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) @@ -441,11 +455,9 @@ func (x *Value) UnmarshalJSON(b []byte) error { func (x *Value) Reset() { *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Value) String() string { @@ -456,7 +468,7 @@ func (*Value) ProtoMessage() {} func (x *Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -613,11 +625,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error { func (x *ListValue) Reset() { *x = ListValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListValue) String() string { @@ -628,7 +638,7 @@ func (*ListValue) ProtoMessage() {} func (x *ListValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -742,44 +752,6 @@ func file_google_protobuf_struct_proto_init() { if File_google_protobuf_struct_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Struct); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ListValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645b08..0d20722d70b 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -254,11 +254,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go new file mode 100644 index 00000000000..f0ca52a01b3 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go @@ -0,0 +1,918 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/type.proto + +package typepb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + sourcecontextpb "google.golang.org/protobuf/types/known/sourcecontextpb" + reflect "reflect" + sync "sync" +) + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + Syntax_SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + Syntax_SYNTAX_PROTO3 Syntax = 1 + // Syntax `editions`. + Syntax_SYNTAX_EDITIONS Syntax = 2 +) + +// Enum value maps for Syntax. +var ( + Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", + 2: "SYNTAX_EDITIONS", + } + Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, + "SYNTAX_EDITIONS": 2, + } +) + +func (x Syntax) Enum() *Syntax { + p := new(Syntax) + *p = x + return p +} + +func (x Syntax) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Syntax) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_type_proto_enumTypes[0].Descriptor() +} + +func (Syntax) Type() protoreflect.EnumType { + return &file_google_protobuf_type_proto_enumTypes[0] +} + +func (x Syntax) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Syntax.Descriptor instead. +func (Syntax) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{0} +} + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + Field_TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + Field_TYPE_DOUBLE Field_Kind = 1 + // Field type float. + Field_TYPE_FLOAT Field_Kind = 2 + // Field type int64. + Field_TYPE_INT64 Field_Kind = 3 + // Field type uint64. + Field_TYPE_UINT64 Field_Kind = 4 + // Field type int32. + Field_TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + Field_TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + Field_TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + Field_TYPE_BOOL Field_Kind = 8 + // Field type string. + Field_TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + Field_TYPE_GROUP Field_Kind = 10 + // Field type message. + Field_TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + Field_TYPE_BYTES Field_Kind = 12 + // Field type uint32. + Field_TYPE_UINT32 Field_Kind = 13 + // Field type enum. + Field_TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + Field_TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + Field_TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + Field_TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + Field_TYPE_SINT64 Field_Kind = 18 +) + +// Enum value maps for Field_Kind. +var ( + Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", + } + Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, + } +) + +func (x Field_Kind) Enum() *Field_Kind { + p := new(Field_Kind) + *p = x + return p +} + +func (x Field_Kind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Field_Kind) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_type_proto_enumTypes[1].Descriptor() +} + +func (Field_Kind) Type() protoreflect.EnumType { + return &file_google_protobuf_type_proto_enumTypes[1] +} + +func (x Field_Kind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Field_Kind.Descriptor instead. +func (Field_Kind) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{1, 0} +} + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + Field_CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + Field_CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + Field_CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + Field_CARDINALITY_REPEATED Field_Cardinality = 3 +) + +// Enum value maps for Field_Cardinality. +var ( + Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", + } + Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, + } +) + +func (x Field_Cardinality) Enum() *Field_Cardinality { + p := new(Field_Cardinality) + *p = x + return p +} + +func (x Field_Cardinality) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Field_Cardinality) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_type_proto_enumTypes[2].Descriptor() +} + +func (Field_Cardinality) Type() protoreflect.EnumType { + return &file_google_protobuf_type_proto_enumTypes[2] +} + +func (x Field_Cardinality) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Field_Cardinality.Descriptor instead. +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{1, 1} +} + +// A protocol buffer message type. +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs,proto3" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *sourcecontextpb.SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + // The source edition string, only valid when syntax is SYNTAX_EDITIONS. + Edition string `protobuf:"bytes,7,opt,name=edition,proto3" json:"edition,omitempty"` +} + +func (x *Type) Reset() { + *x = Type{} + mi := &file_google_protobuf_type_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{0} +} + +func (x *Type) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Type) GetFields() []*Field { + if x != nil { + return x.Fields + } + return nil +} + +func (x *Type) GetOneofs() []string { + if x != nil { + return x.Oneofs + } + return nil +} + +func (x *Type) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Type) GetSourceContext() *sourcecontextpb.SourceContext { + if x != nil { + return x.SourceContext + } + return nil +} + +func (x *Type) GetSyntax() Syntax { + if x != nil { + return x.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (x *Type) GetEdition() string { + if x != nil { + return x.Edition + } + return "" +} + +// A single field of a message type. +type Field struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex,proto3" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed,proto3" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName,proto3" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *Field) Reset() { + *x = Field{} + mi := &file_google_protobuf_type_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Field) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Field) ProtoMessage() {} + +func (x *Field) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Field.ProtoReflect.Descriptor instead. +func (*Field) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{1} +} + +func (x *Field) GetKind() Field_Kind { + if x != nil { + return x.Kind + } + return Field_TYPE_UNKNOWN +} + +func (x *Field) GetCardinality() Field_Cardinality { + if x != nil { + return x.Cardinality + } + return Field_CARDINALITY_UNKNOWN +} + +func (x *Field) GetNumber() int32 { + if x != nil { + return x.Number + } + return 0 +} + +func (x *Field) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Field) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *Field) GetOneofIndex() int32 { + if x != nil { + return x.OneofIndex + } + return 0 +} + +func (x *Field) GetPacked() bool { + if x != nil { + return x.Packed + } + return false +} + +func (x *Field) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Field) GetJsonName() string { + if x != nil { + return x.JsonName + } + return "" +} + +func (x *Field) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +// Enum type definition. +type Enum struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue,proto3" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *sourcecontextpb.SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + // The source edition string, only valid when syntax is SYNTAX_EDITIONS. + Edition string `protobuf:"bytes,6,opt,name=edition,proto3" json:"edition,omitempty"` +} + +func (x *Enum) Reset() { + *x = Enum{} + mi := &file_google_protobuf_type_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Enum) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Enum) ProtoMessage() {} + +func (x *Enum) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Enum.ProtoReflect.Descriptor instead. +func (*Enum) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{2} +} + +func (x *Enum) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Enum) GetEnumvalue() []*EnumValue { + if x != nil { + return x.Enumvalue + } + return nil +} + +func (x *Enum) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Enum) GetSourceContext() *sourcecontextpb.SourceContext { + if x != nil { + return x.SourceContext + } + return nil +} + +func (x *Enum) GetSyntax() Syntax { + if x != nil { + return x.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (x *Enum) GetEdition() string { + if x != nil { + return x.Edition + } + return "" +} + +// Enum value definition. +type EnumValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` +} + +func (x *EnumValue) Reset() { + *x = EnumValue{} + mi := &file_google_protobuf_type_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnumValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValue) ProtoMessage() {} + +func (x *EnumValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead. +func (*EnumValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{3} +} + +func (x *EnumValue) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EnumValue) GetNumber() int32 { + if x != nil { + return x.Number + } + return 0 +} + +func (x *EnumValue) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *anypb.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Option) Reset() { + *x = Option{} + mi := &file_google_protobuf_type_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Option) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Option) ProtoMessage() {} + +func (x *Option) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Option.ProtoReflect.Descriptor instead. +func (*Option) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{4} +} + +func (x *Option) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Option) GetValue() *anypb.Any { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_type_proto protoreflect.FileDescriptor + +var file_google_protobuf_type_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x19, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, + 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa7, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x73, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0d, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x2f, 0x0a, + 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb4, 0x06, 0x0a, 0x05, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x2f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, + 0x69, 0x6e, 0x64, 0x12, 0x44, 0x0a, 0x0b, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x2e, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0b, 0x63, 0x61, + 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, + 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc8, + 0x02, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, + 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, + 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, + 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, + 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, + 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, + 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x74, 0x0a, 0x0b, 0x43, 0x61, 0x72, + 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x41, 0x52, 0x44, + 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x52, 0x44, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x54, 0x59, + 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x43, + 0x41, 0x52, 0x44, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, + 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x52, 0x44, 0x49, 0x4e, 0x41, + 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, + 0x99, 0x02, 0x0a, 0x04, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x65, 0x6e, 0x75, + 0x6d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, + 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6a, 0x0a, 0x09, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x06, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2a, 0x43, 0x0a, 0x06, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x11, 0x0a, 0x0d, 0x53, + 0x59, 0x4e, 0x54, 0x41, 0x58, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x53, 0x59, 0x4e, 0x54, 0x41, 0x58, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x59, 0x4e, 0x54, 0x41, 0x58, 0x5f, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x02, 0x42, 0x7b, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x09, 0x54, + 0x79, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_type_proto_rawDescOnce sync.Once + file_google_protobuf_type_proto_rawDescData = file_google_protobuf_type_proto_rawDesc +) + +func file_google_protobuf_type_proto_rawDescGZIP() []byte { + file_google_protobuf_type_proto_rawDescOnce.Do(func() { + file_google_protobuf_type_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_type_proto_rawDescData) + }) + return file_google_protobuf_type_proto_rawDescData +} + +var file_google_protobuf_type_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_google_protobuf_type_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_protobuf_type_proto_goTypes = []any{ + (Syntax)(0), // 0: google.protobuf.Syntax + (Field_Kind)(0), // 1: google.protobuf.Field.Kind + (Field_Cardinality)(0), // 2: google.protobuf.Field.Cardinality + (*Type)(nil), // 3: google.protobuf.Type + (*Field)(nil), // 4: google.protobuf.Field + (*Enum)(nil), // 5: google.protobuf.Enum + (*EnumValue)(nil), // 6: google.protobuf.EnumValue + (*Option)(nil), // 7: google.protobuf.Option + (*sourcecontextpb.SourceContext)(nil), // 8: google.protobuf.SourceContext + (*anypb.Any)(nil), // 9: google.protobuf.Any +} +var file_google_protobuf_type_proto_depIdxs = []int32{ + 4, // 0: google.protobuf.Type.fields:type_name -> google.protobuf.Field + 7, // 1: google.protobuf.Type.options:type_name -> google.protobuf.Option + 8, // 2: google.protobuf.Type.source_context:type_name -> google.protobuf.SourceContext + 0, // 3: google.protobuf.Type.syntax:type_name -> google.protobuf.Syntax + 1, // 4: google.protobuf.Field.kind:type_name -> google.protobuf.Field.Kind + 2, // 5: google.protobuf.Field.cardinality:type_name -> google.protobuf.Field.Cardinality + 7, // 6: google.protobuf.Field.options:type_name -> google.protobuf.Option + 6, // 7: google.protobuf.Enum.enumvalue:type_name -> google.protobuf.EnumValue + 7, // 8: google.protobuf.Enum.options:type_name -> google.protobuf.Option + 8, // 9: google.protobuf.Enum.source_context:type_name -> google.protobuf.SourceContext + 0, // 10: google.protobuf.Enum.syntax:type_name -> google.protobuf.Syntax + 7, // 11: google.protobuf.EnumValue.options:type_name -> google.protobuf.Option + 9, // 12: google.protobuf.Option.value:type_name -> google.protobuf.Any + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_google_protobuf_type_proto_init() } +func file_google_protobuf_type_proto_init() { + if File_google_protobuf_type_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_type_proto_rawDesc, + NumEnums: 3, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_type_proto_goTypes, + DependencyIndexes: file_google_protobuf_type_proto_depIdxs, + EnumInfos: file_google_protobuf_type_proto_enumTypes, + MessageInfos: file_google_protobuf_type_proto_msgTypes, + }.Build() + File_google_protobuf_type_proto = out.File + file_google_protobuf_type_proto_rawDesc = nil + file_google_protobuf_type_proto_goTypes = nil + file_google_protobuf_type_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index e473f826aa3..006060e5695 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -69,11 +69,9 @@ func Double(v float64) *DoubleValue { func (x *DoubleValue) Reset() { *x = DoubleValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DoubleValue) String() string { @@ -84,7 +82,7 @@ func (*DoubleValue) ProtoMessage() {} func (x *DoubleValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -125,11 +123,9 @@ func Float(v float32) *FloatValue { func (x *FloatValue) Reset() { *x = FloatValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FloatValue) String() string { @@ -140,7 +136,7 @@ func (*FloatValue) ProtoMessage() {} func (x *FloatValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -181,11 +177,9 @@ func Int64(v int64) *Int64Value { func (x *Int64Value) Reset() { *x = Int64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int64Value) String() string { @@ -196,7 +190,7 @@ func (*Int64Value) ProtoMessage() {} func (x *Int64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -237,11 +231,9 @@ func UInt64(v uint64) *UInt64Value { func (x *UInt64Value) Reset() { *x = UInt64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt64Value) String() string { @@ -252,7 +244,7 @@ func (*UInt64Value) ProtoMessage() {} func (x *UInt64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -293,11 +285,9 @@ func Int32(v int32) *Int32Value { func (x *Int32Value) Reset() { *x = Int32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int32Value) String() string { @@ -308,7 +298,7 @@ func (*Int32Value) ProtoMessage() {} func (x *Int32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -349,11 +339,9 @@ func UInt32(v uint32) *UInt32Value { func (x *UInt32Value) Reset() { *x = UInt32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt32Value) String() string { @@ -364,7 +352,7 @@ func (*UInt32Value) ProtoMessage() {} func (x *UInt32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -405,11 +393,9 @@ func Bool(v bool) *BoolValue { func (x *BoolValue) Reset() { *x = BoolValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BoolValue) String() string { @@ -420,7 +406,7 @@ func (*BoolValue) ProtoMessage() {} func (x *BoolValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,11 +447,9 @@ func String(v string) *StringValue { func (x *StringValue) Reset() { *x = StringValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringValue) String() string { @@ -476,7 +460,7 @@ func (*StringValue) ProtoMessage() {} func (x *StringValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -517,11 +501,9 @@ func Bytes(v []byte) *BytesValue { func (x *BytesValue) Reset() { *x = BytesValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BytesValue) String() string { @@ -532,7 +514,7 @@ func (*BytesValue) ProtoMessage() {} func (x *BytesValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -629,116 +611,6 @@ func file_google_protobuf_wrappers_proto_init() { if File_google_protobuf_wrappers_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*DoubleValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FloatValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Int64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*UInt64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Int32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*UInt32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*BoolValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*StringValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*BytesValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/terraform/providers/google/vendor/modules.txt b/terraform/providers/google/vendor/modules.txt index b30cf1b58fc..edf34ea8f58 100644 --- a/terraform/providers/google/vendor/modules.txt +++ b/terraform/providers/google/vendor/modules.txt @@ -1,14 +1,17 @@ # bitbucket.org/creachadair/stringset v0.0.8 => bitbucket.org/creachadair/stringset v0.0.11 ## explicit; go 1.18 bitbucket.org/creachadair/stringset -# cloud.google.com/go v0.115.0 -## explicit; go 1.20 +# cel.dev/expr v0.16.0 +## explicit; go 1.18 +cel.dev/expr +# cloud.google.com/go v0.116.0 +## explicit; go 1.21 cloud.google.com/go cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.5.1 -## explicit; go 1.20 +# cloud.google.com/go/auth v0.9.8 +## explicit; go 1.21 cloud.google.com/go/auth cloud.google.com/go/auth/credentials cloud.google.com/go/auth/credentials/idtoken @@ -21,31 +24,39 @@ cloud.google.com/go/auth/credentials/internal/stsexchange cloud.google.com/go/auth/grpctransport cloud.google.com/go/auth/httptransport cloud.google.com/go/auth/internal +cloud.google.com/go/auth/internal/compute cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport cloud.google.com/go/auth/internal/transport/cert -# cloud.google.com/go/auth/oauth2adapt v0.2.2 -## explicit; go 1.19 -cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/bigtable v1.24.0 +# cloud.google.com/go/auth/oauth2adapt v0.2.4 ## explicit; go 1.20 +cloud.google.com/go/auth/oauth2adapt +# cloud.google.com/go/bigtable v1.33.0 +## explicit; go 1.21 cloud.google.com/go/bigtable +cloud.google.com/go/bigtable/admin/apiv2/adminpb +cloud.google.com/go/bigtable/apiv2/bigtablepb cloud.google.com/go/bigtable/internal cloud.google.com/go/bigtable/internal/option -# cloud.google.com/go/compute/metadata v0.3.0 -## explicit; go 1.19 +# cloud.google.com/go/compute/metadata v0.5.2 +## explicit; go 1.21 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v1.1.8 -## explicit; go 1.19 +# cloud.google.com/go/iam v1.2.1 +## explicit; go 1.21 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/longrunning v0.5.7 -## explicit; go 1.19 +# cloud.google.com/go/longrunning v0.6.1 +## explicit; go 1.21 cloud.google.com/go/longrunning cloud.google.com/go/longrunning/autogen cloud.google.com/go/longrunning/autogen/longrunningpb -# github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0 +# cloud.google.com/go/monitoring v1.21.1 +## explicit; go 1.21 +cloud.google.com/go/monitoring/apiv3/v2 +cloud.google.com/go/monitoring/apiv3/v2/monitoringpb +cloud.google.com/go/monitoring/internal +# github.com/GoogleCloudPlatform/declarative-resource-client-library v1.75.0 ## explicit; go 1.19 github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations @@ -62,9 +73,8 @@ github.com/GoogleCloudPlatform/declarative-resource-client-library/services/goog github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub -github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise -# github.com/ProtonMail/go-crypto v1.1.0-alpha.0 +# github.com/ProtonMail/go-crypto v1.1.0-alpha.2 ## explicit; go 1.17 github.com/ProtonMail/go-crypto/bitcurves github.com/ProtonMail/go-crypto/brainpool @@ -104,7 +114,7 @@ github.com/cenkalti/backoff ## explicit; go 1.18 github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1 github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 -# github.com/cespare/xxhash/v2 v2.2.0 +# github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/cloudflare/circl v1.3.7 @@ -121,7 +131,7 @@ github.com/cloudflare/circl/math/mlsbset github.com/cloudflare/circl/sign github.com/cloudflare/circl/sign/ed25519 github.com/cloudflare/circl/sign/ed448 -# github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 +# github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 ## explicit; go 1.19 github.com/cncf/xds/go/udpa/annotations github.com/cncf/xds/go/udpa/type/v1 @@ -134,8 +144,8 @@ github.com/cncf/xds/go/xds/type/v3 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/envoyproxy/go-control-plane v0.12.0 -## explicit; go 1.17 +# github.com/envoyproxy/go-control-plane v0.13.0 +## explicit; go 1.21 github.com/envoyproxy/go-control-plane/envoy/admin/v3 github.com/envoyproxy/go-control-plane/envoy/annotations github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3 @@ -174,7 +184,7 @@ github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3 github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3 github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3 github.com/envoyproxy/go-control-plane/envoy/type/v3 -# github.com/envoyproxy/protoc-gen-validate v1.0.4 +# github.com/envoyproxy/protoc-gen-validate v1.1.0 ## explicit; go 1.19 github.com/envoyproxy/protoc-gen-validate/validate # github.com/fatih/color v1.16.0 @@ -189,14 +199,14 @@ github.com/gammazero/deque # github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 ## explicit github.com/gammazero/workerpool -# github.com/go-logr/logr v1.4.1 +# github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/golang/glog v1.2.0 +# github.com/golang/glog v1.2.2 ## explicit; go 1.19 github.com/golang/glog github.com/golang/glog/internal/logsink @@ -208,12 +218,7 @@ github.com/golang/groupcache/lru ## explicit; go 1.17 github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/empty -github.com/golang/protobuf/ptypes/struct -github.com/golang/protobuf/ptypes/timestamp -github.com/golang/protobuf/ptypes/wrappers # github.com/google/go-cmp v0.6.0 ## explicit; go 1.13 github.com/google/go-cmp/cmp @@ -225,8 +230,8 @@ github.com/google/go-cmp/cmp/internal/value # github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 ## explicit; go 1.17 github.com/google/go-cpy/cpy -# github.com/google/s2a-go v0.1.7 -## explicit; go 1.19 +# github.com/google/s2a-go v0.1.8 +## explicit; go 1.20 github.com/google/s2a-go github.com/google/s2a-go/fallback github.com/google/s2a-go/internal/authinfo @@ -251,17 +256,18 @@ github.com/google/s2a-go/stream # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.3.2 +# github.com/googleapis/enterprise-certificate-proxy v0.3.4 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.12.4 -## explicit; go 1.19 +# github.com/googleapis/gax-go/v2 v2.13.0 +## explicit; go 1.20 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal +github.com/googleapis/gax-go/v2/iterator # github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware @@ -286,7 +292,7 @@ github.com/hashicorp/go-cty/cty/gocty github.com/hashicorp/go-cty/cty/json github.com/hashicorp/go-cty/cty/msgpack github.com/hashicorp/go-cty/cty/set -# github.com/hashicorp/go-hclog v1.5.0 +# github.com/hashicorp/go-hclog v1.6.3 ## explicit; go 1.13 github.com/hashicorp/go-hclog # github.com/hashicorp/go-multierror v1.1.1 @@ -305,7 +311,7 @@ github.com/hashicorp/go-uuid # github.com/hashicorp/go-version v1.6.0 ## explicit github.com/hashicorp/go-version -# github.com/hashicorp/hc-install v0.6.3 +# github.com/hashicorp/hc-install v0.6.4 ## explicit; go 1.18 github.com/hashicorp/hc-install github.com/hashicorp/hc-install/checkpoint @@ -321,7 +327,7 @@ github.com/hashicorp/hc-install/product github.com/hashicorp/hc-install/releases github.com/hashicorp/hc-install/src github.com/hashicorp/hc-install/version -# github.com/hashicorp/hcl/v2 v2.19.1 +# github.com/hashicorp/hcl/v2 v2.20.1 ## explicit; go 1.18 github.com/hashicorp/hcl/v2 github.com/hashicorp/hcl/v2/ext/customdecode @@ -329,11 +335,11 @@ github.com/hashicorp/hcl/v2/hclsyntax # github.com/hashicorp/logutils v1.0.0 ## explicit github.com/hashicorp/logutils -# github.com/hashicorp/terraform-exec v0.20.0 +# github.com/hashicorp/terraform-exec v0.21.0 ## explicit; go 1.18 github.com/hashicorp/terraform-exec/internal/version github.com/hashicorp/terraform-exec/tfexec -# github.com/hashicorp/terraform-json v0.21.0 +# github.com/hashicorp/terraform-json v0.22.1 ## explicit; go 1.18 github.com/hashicorp/terraform-json # github.com/hashicorp/terraform-plugin-framework v1.7.0 @@ -379,7 +385,7 @@ github.com/hashicorp/terraform-plugin-framework/types/basetypes github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator -# github.com/hashicorp/terraform-plugin-go v0.22.1 +# github.com/hashicorp/terraform-plugin-go v0.23.0 ## explicit; go 1.21 github.com/hashicorp/terraform-plugin-go/internal/logging github.com/hashicorp/terraform-plugin-go/tfprotov5 @@ -416,7 +422,6 @@ github.com/hashicorp/terraform-plugin-sdk/v2/diag github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff github.com/hashicorp/terraform-plugin-sdk/v2/helper/id github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging -github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure @@ -428,12 +433,27 @@ github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert -github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags github.com/hashicorp/terraform-plugin-sdk/v2/meta github.com/hashicorp/terraform-plugin-sdk/v2/plugin github.com/hashicorp/terraform-plugin-sdk/v2/terraform -# github.com/hashicorp/terraform-provider-google v1.20.1-0.20240708170355-9d7e7459a11c +# github.com/hashicorp/terraform-plugin-testing v1.5.1 +## explicit; go 1.19 +github.com/hashicorp/terraform-plugin-testing/config +github.com/hashicorp/terraform-plugin-testing/helper/resource +github.com/hashicorp/terraform-plugin-testing/internal/addrs +github.com/hashicorp/terraform-plugin-testing/internal/configs/configschema +github.com/hashicorp/terraform-plugin-testing/internal/configs/hcl2shim +github.com/hashicorp/terraform-plugin-testing/internal/errorshim +github.com/hashicorp/terraform-plugin-testing/internal/logging +github.com/hashicorp/terraform-plugin-testing/internal/plugintest +github.com/hashicorp/terraform-plugin-testing/internal/teststep +github.com/hashicorp/terraform-plugin-testing/internal/tfdiags +github.com/hashicorp/terraform-plugin-testing/plancheck +github.com/hashicorp/terraform-plugin-testing/terraform +github.com/hashicorp/terraform-plugin-testing/tfjsonpath +github.com/hashicorp/terraform-plugin-testing/tfversion +# github.com/hashicorp/terraform-provider-google v1.20.1-0.20240708170355-9d7e7459a11c => github.com/openshift/terraform-providers-terraform-provider-google v1.20.1-0.20260210200830-91107ee8cc5d ## explicit; go 1.21 github.com/hashicorp/terraform-provider-google github.com/hashicorp/terraform-provider-google/google/envvar @@ -497,7 +517,6 @@ github.com/hashicorp/terraform-provider-google/google/services/datapipeline github.com/hashicorp/terraform-provider-google/google/services/dataplex github.com/hashicorp/terraform-provider-google/google/services/dataproc github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore -github.com/hashicorp/terraform-provider-google/google/services/datastore github.com/hashicorp/terraform-provider-google/google/services/datastream github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager github.com/hashicorp/terraform-provider-google/google/services/dialogflow @@ -539,10 +558,12 @@ github.com/hashicorp/terraform-provider-google/google/services/networkmanagement github.com/hashicorp/terraform-provider-google/google/services/networksecurity github.com/hashicorp/terraform-provider-google/google/services/networkservices github.com/hashicorp/terraform-provider-google/google/services/notebooks +github.com/hashicorp/terraform-provider-google/google/services/oracledatabase github.com/hashicorp/terraform-provider-google/google/services/orgpolicy github.com/hashicorp/terraform-provider-google/google/services/osconfig github.com/hashicorp/terraform-provider-google/google/services/oslogin github.com/hashicorp/terraform-provider-google/google/services/privateca +github.com/hashicorp/terraform-provider-google/google/services/privilegedaccessmanager github.com/hashicorp/terraform-provider-google/google/services/publicca github.com/hashicorp/terraform-provider-google/google/services/pubsub github.com/hashicorp/terraform-provider-google/google/services/pubsublite @@ -550,6 +571,7 @@ github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterpri github.com/hashicorp/terraform-provider-google/google/services/redis github.com/hashicorp/terraform-provider-google/google/services/resourcemanager github.com/hashicorp/terraform-provider-google/google/services/secretmanager +github.com/hashicorp/terraform-provider-google/google/services/secretmanagerregional github.com/hashicorp/terraform-provider-google/google/services/securesourcemanager github.com/hashicorp/terraform-provider-google/google/services/securitycenter github.com/hashicorp/terraform-provider-google/google/services/securitycentermanagement @@ -558,6 +580,7 @@ github.com/hashicorp/terraform-provider-google/google/services/securityposture github.com/hashicorp/terraform-provider-google/google/services/servicemanagement github.com/hashicorp/terraform-provider-google/google/services/servicenetworking github.com/hashicorp/terraform-provider-google/google/services/serviceusage +github.com/hashicorp/terraform-provider-google/google/services/siteverification github.com/hashicorp/terraform-provider-google/google/services/sourcerepo github.com/hashicorp/terraform-provider-google/google/services/spanner github.com/hashicorp/terraform-provider-google/google/services/sql @@ -566,6 +589,7 @@ github.com/hashicorp/terraform-provider-google/google/services/storageinsights github.com/hashicorp/terraform-provider-google/google/services/storagetransfer github.com/hashicorp/terraform-provider-google/google/services/tags github.com/hashicorp/terraform-provider-google/google/services/tpu +github.com/hashicorp/terraform-provider-google/google/services/transcoder github.com/hashicorp/terraform-provider-google/google/services/vertexai github.com/hashicorp/terraform-provider-google/google/services/vmwareengine github.com/hashicorp/terraform-provider-google/google/services/vpcaccess @@ -621,6 +645,15 @@ github.com/mitchellh/reflectwalk # github.com/oklog/run v1.0.0 ## explicit github.com/oklog/run +# github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 +## explicit; go 1.20 +github.com/planetscale/vtprotobuf/protohelpers +github.com/planetscale/vtprotobuf/types/known/anypb +github.com/planetscale/vtprotobuf/types/known/durationpb +github.com/planetscale/vtprotobuf/types/known/emptypb +github.com/planetscale/vtprotobuf/types/known/structpb +github.com/planetscale/vtprotobuf/types/known/timestamppb +github.com/planetscale/vtprotobuf/types/known/wrapperspb # github.com/sirupsen/logrus v1.8.1 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -637,7 +670,7 @@ github.com/vmihailenco/msgpack/v5/msgpcode github.com/vmihailenco/tagparser/v2 github.com/vmihailenco/tagparser/v2/internal github.com/vmihailenco/tagparser/v2/internal/parser -# github.com/zclconf/go-cty v1.14.2 +# github.com/zclconf/go-cty v1.14.4 ## explicit; go 1.18 github.com/zclconf/go-cty/cty github.com/zclconf/go-cty/cty/convert @@ -666,16 +699,18 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 -## explicit; go 1.20 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 +## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 -## explicit; go 1.20 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 +## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel v1.29.0 +## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -687,20 +722,35 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 -# go.opentelemetry.io/otel/metric v1.24.0 -## explicit; go 1.20 +go.opentelemetry.io/otel/semconv/v1.26.0 +# go.opentelemetry.io/otel/metric v1.29.0 +## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/trace v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel/sdk v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/sdk +go.opentelemetry.io/otel/sdk/instrumentation +go.opentelemetry.io/otel/sdk/internal/x +go.opentelemetry.io/otel/sdk/resource +# go.opentelemetry.io/otel/sdk/metric v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/sdk/metric +go.opentelemetry.io/otel/sdk/metric/internal +go.opentelemetry.io/otel/sdk/metric/internal/aggregate +go.opentelemetry.io/otel/sdk/metric/internal/exemplar +go.opentelemetry.io/otel/sdk/metric/internal/x +go.opentelemetry.io/otel/sdk/metric/metricdata +# go.opentelemetry.io/otel/trace v1.29.0 +## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded # go4.org/netipx v0.0.0-20231129151722-fdeea329fbba ## explicit; go 1.18 go4.org/netipx -# golang.org/x/crypto v0.24.0 -## explicit; go 1.18 +# golang.org/x/crypto v0.28.0 +## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b golang.org/x/crypto/cast5 @@ -714,6 +764,7 @@ golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/sha3 # golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 ## explicit; go 1.20 +golang.org/x/exp/constraints golang.org/x/exp/maps # golang.org/x/mod v0.17.0 ## explicit; go 1.18 @@ -721,7 +772,7 @@ golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.26.0 +# golang.org/x/net v0.30.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/http/httpguts @@ -730,7 +781,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.21.0 +# golang.org/x/oauth2 v0.23.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -742,28 +793,51 @@ golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.7.0 +# golang.org/x/sync v0.8.0 ## explicit; go 1.18 +golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.21.0 +# golang.org/x/sys v0.26.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.16.0 +golang.org/x/sys/windows/registry +# golang.org/x/text v0.19.0 ## explicit; go 1.18 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.5.0 +# golang.org/x/time v0.7.0 ## explicit; go 1.18 golang.org/x/time/rate -# google.golang.org/api v0.185.0 -## explicit; go 1.20 +# golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d +## explicit; go 1.19 +golang.org/x/tools/cmd/stringer +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath +golang.org/x/tools/internal/aliases +golang.org/x/tools/internal/event +golang.org/x/tools/internal/event/core +golang.org/x/tools/internal/event/keys +golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/gcimporter +golang.org/x/tools/internal/gocommand +golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/stdlib +golang.org/x/tools/internal/tokeninternal +golang.org/x/tools/internal/typesinternal +golang.org/x/tools/internal/versions +# google.golang.org/api v0.201.0 +## explicit; go 1.21 google.golang.org/api/appengine/v1 google.golang.org/api/bigquery/v2 google.golang.org/api/bigtableadmin/v2 +google.golang.org/api/certificatemanager/v1 google.golang.org/api/cloudbilling/v1 google.golang.org/api/cloudbuild/v1 google.golang.org/api/cloudfunctions/v1 @@ -822,23 +896,28 @@ google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api -# google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 -## explicit; go 1.20 -google.golang.org/genproto/googleapis/bigtable/admin/v2 -google.golang.org/genproto/googleapis/bigtable/v2 +# google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 +## explicit; go 1.21 +google.golang.org/genproto/googleapis/type/calendarperiod +google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 -## explicit; go 1.20 +google.golang.org/genproto/protobuf/api +# google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f +## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations +google.golang.org/genproto/googleapis/api/distribution google.golang.org/genproto/googleapis/api/expr/v1alpha1 -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 -## explicit; go 1.20 +google.golang.org/genproto/googleapis/api/label +google.golang.org/genproto/googleapis/api/metric +google.golang.org/genproto/googleapis/api/monitoredres +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 +## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.64.0 -## explicit; go 1.19 +# google.golang.org/grpc v1.67.1 +## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/authz/audit @@ -850,6 +929,7 @@ google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/leastrequest +google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/rls google.golang.org/grpc/balancer/rls/internal/adaptive google.golang.org/grpc/balancer/rls/internal/keys @@ -877,7 +957,9 @@ google.golang.org/grpc/credentials/tls/certprovider google.golang.org/grpc/credentials/tls/certprovider/pemfile google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal @@ -896,7 +978,6 @@ google.golang.org/grpc/internal/credentials/xds google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/googlecloud google.golang.org/grpc/internal/grpclog -google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/hierarchy @@ -922,6 +1003,7 @@ google.golang.org/grpc/internal/xds/bootstrap/tlscreds google.golang.org/grpc/internal/xds/matcher google.golang.org/grpc/internal/xds/rbac google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/orca google.golang.org/grpc/orca/internal @@ -962,14 +1044,16 @@ google.golang.org/grpc/xds/internal/resolver google.golang.org/grpc/xds/internal/resolver/internal google.golang.org/grpc/xds/internal/server google.golang.org/grpc/xds/internal/xdsclient +google.golang.org/grpc/xds/internal/xdsclient/internal google.golang.org/grpc/xds/internal/xdsclient/load google.golang.org/grpc/xds/internal/xdsclient/transport +google.golang.org/grpc/xds/internal/xdsclient/transport/internal google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter google.golang.org/grpc/xds/internal/xdsclient/xdsresource google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +# google.golang.org/protobuf v1.35.1 +## explicit; go 1.21 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -1004,10 +1088,14 @@ google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb +google.golang.org/protobuf/types/known/apipb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb google.golang.org/protobuf/types/known/fieldmaskpb +google.golang.org/protobuf/types/known/sourcecontextpb google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb +google.golang.org/protobuf/types/known/typepb google.golang.org/protobuf/types/known/wrapperspb # bitbucket.org/creachadair/stringset => bitbucket.org/creachadair/stringset v0.0.11 +# github.com/hashicorp/terraform-provider-google => github.com/openshift/terraform-providers-terraform-provider-google v1.20.1-0.20260210200830-91107ee8cc5d From fe6f9e518cb0719beac6715e21e4a00ec5d0d0df Mon Sep 17 00:00:00 2001 From: Venkata Charan Sunku Date: Wed, 18 Feb 2026 07:07:32 +0530 Subject: [PATCH 2/2] OCPBUGS-76586: fix gcp post-bootstrap balancing_mode for provider v6 Terraform Google provider v6 changed the default balancing_mode from unset (API-decided) to explicitly UTILIZATION. GCP rejects UTILIZATION for INTERNAL backend services, requiring CONNECTION. Set balancing_mode=CONNECTION explicitly on the api_internal backend service to fix GCP IPI cluster installation failures. --- data/data/gcp/post-bootstrap/main.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/data/data/gcp/post-bootstrap/main.tf b/data/data/gcp/post-bootstrap/main.tf index 5e0f2a10cd4..8d89f0571af 100644 --- a/data/data/gcp/post-bootstrap/main.tf +++ b/data/data/gcp/post-bootstrap/main.tf @@ -22,7 +22,8 @@ resource "google_compute_region_backend_service" "api_internal" { for_each = var.gcp_bootstrap_lb ? concat(var.bootstrap_instance_groups, var.master_instance_groups) : var.master_instance_groups content { - group = backend.value + group = backend.value + balancing_mode = "CONNECTION" } }